parameters/0000755000175000017500000000000014167565473012571 5ustar nileshnileshparameters/MD50000644000175000017500000005366414167565473013117 0ustar nileshnilesh9da9251108acef72de78e8957e328bb0 *DESCRIPTION 2af7e9d6c0941577ae8c4de55e1459bb *NAMESPACE 31a5acd69860562a72270af3dd384139 *NEWS.md 1bc6dabd1a8579ea3f11471242a4c738 *R/1_model_parameters.R 0665fe439a867792a9ed06489e6739c6 *R/2_ci.R b0e4cb08387c53ad5fbaaeaeac208298 *R/3_p_value.R e4d6eb1cf1f6f5d20bfa36bbe392f196 *R/4_standard_error.R dc32e81f572c0526d3a9c4a4adb9be32 *R/5_simulate_model.R bf51304a14b6f7997659a2fff1f1573d *R/backports.R 6e2c7b62e3e9b1d4e4a9d375f1edac94 *R/bootstrap_model-emmeans.R 902036517902d8938e1abdcf522f3e95 *R/bootstrap_model.R 21c1cd920e8244dd0a1767e15ed5b89a *R/bootstrap_parameters.R f29c04a5851cb945fd85fa2c79373539 *R/check_clusterstructure.R 7e22a24c40397191e6e4aeeba09c2670 *R/check_factorstructure.R 9677c6353bdf7496dcd0dd1534ea0fad *R/check_heterogeneity.R 17ba2d58e73d6ecbc498ab7240d02272 *R/ci_betwithin.R e0661c1e3e44444e13238117dc9f5297 *R/ci_generic.R 36dca1b2c7b684f2599f098f4b8154b8 *R/ci_kenward.R 9fbbb98bb9420f4f7abed9f198a33105 *R/ci_ml1.R f7ee8a57475ecbecadd970baf98e04c0 *R/ci_profile_boot.R b7370d101181a2db41d82b248cdc991d *R/ci_satterthwaite.R ee5ac50186e709751e26c758f0a3dd63 *R/cluster_analysis.R 87fc063fe5400f7f92ee6aa230f3a4cc *R/cluster_centers.R fa5f8cd83bc332c88c18e3bb79e999f7 *R/cluster_discrimination.R bd5053ced7a3433e38b01ef3a16fe7a8 *R/cluster_meta.R 1d6334f9f6f685cdacbd8f430b93fa58 *R/cluster_performance.R 7b0c21b4df6bf9e73fa55abb91bc49ac *R/compare_parameters.R 838316b11b87082072b6625b2d593807 *R/convert_efa_to_cfa.R 55f0da00f816533dcd93dc5130a404da *R/datasets.R 08f97779636d757dad680c6b93f17029 *R/display.R b4378f614ad67312dbdc0851c1b2f3d2 *R/dof.R 457939ac2e889037a0bd09a43f793826 *R/dof_betwithin.R ba61cbdbaaec470a4bc63ead1101eb89 *R/dof_kenward.R a0d6493d14b280c23b4ad1c41dffc6cc *R/dof_ml1.R 4bfc86cf6675b64f75e6f9705be8e016 *R/dof_satterthwaite.R b692f44dbeb67a54936609c1feabe585 *R/equivalence_test.R a2ac930fec8d96aa02af3e5ce2de2ac1 *R/extract_parameters.R 3c43058a89e64d2dc1c1ede3969f2106 *R/extract_parameters_anova.R 3c3a97cc8b11bbc4bc65ab1be41ceded *R/extract_random_parameters.R fb5c0f4f6f7256ab07d322ad2509fbda *R/extract_random_variances.R 0e7848283c9e0273beb5bb627d3392f1 *R/factor_analysis.R 3871ae01ef8c1c7c283bdc415d7611d5 *R/format.R a3a89babcbe48d2342736a518982ecaf *R/format_df_adjust.R 8c4e277b0c493677d174b363a43c4d8c *R/format_order.R 95d47e86d70c1a11f03fef0addec9c7c *R/format_p_adjust.R 78bd307d222f11afc213f7d2297aa64d *R/format_parameters.R 2a81a6614cf3c5107dc5b7945394e5d1 *R/get_scores.R 4eecf1ad1f11817a893222a8022cbefe *R/methods_AER.R b56b8f3971ea3df280990b043c882ae8 *R/methods_BBMM.R 8ef6650195c30cda9820ca2276579430 *R/methods_BayesFM.R dbe68729ebbf87746d421daf6edfb8de *R/methods_BayesFactor.R 9a32574d6b1bdc8c1ce568197e28a6c3 *R/methods_BayesX.R cb4b2c51d9d2f30c25c0aff58ecc5c8a *R/methods_DirichletReg.R 7e2e23f70c0f22841b4a547a8c1cf7fc *R/methods_FactoMineR.R 6aacff0546fc1164526345ce770e65d1 *R/methods_MCMCglmm.R 23298eb9344ec951b286019637207868 *R/methods_PMCMRplus.R c558eca4ab0220fd293ee1c4cda6329c *R/methods_aod.R a07a396f14b78f9941a1cc3b90958a87 *R/methods_aov.R b1e5a544b07cbdfbb9d30928995f741a *R/methods_averaging.R cc2aaed5f15a24d9d2c9b9130ff704a1 *R/methods_bamlss.R 6f784ec177972bb5a62eff88d0c36a99 *R/methods_base.R 5643f5e0ccb82270c58b3f2b5de665ac *R/methods_bayesQR.R 079ada60c0566403a1903c42790d79d5 *R/methods_bayestestR.R 59f8cf34979cfabbb1630473742764ab *R/methods_bbmle.R 4b774729109b5c2abfb0c2c3365feadf *R/methods_betareg.R dbb838872b786fd0c878fe51005fb7cc *R/methods_bfsl.R de1cccfd5b748ec0a487eb689f7ab661 *R/methods_bggm.R 9449575264b756af39b39225bf2fc553 *R/methods_bife.R 5d72c90f7bc8de484f6269c8af26477b *R/methods_biglm.R 7cbd891e0c3e5b85c14427350ac6ddb0 *R/methods_brglm2.R 9fde72e4ec574657357a255817195b30 *R/methods_brms.R 702b25f9bdf2f87dac4572dd0524e02e *R/methods_censReg.R c47e4bb9afaa5a1bebd217b0bcdc862b *R/methods_cgam.R b37757fbeb09ed92449676646828a5dc *R/methods_coda.R 7fa96b9399af1fb364d3cfb6c86406bf *R/methods_coxme.R 8c5bd7595fcbd8cadd2929de291d9c63 *R/methods_coxrobust.R cca5cdc9cb22c8a37c9492050377c9a8 *R/methods_cplm.R 66acde97a92a56af6d335dab18756f35 *R/methods_crch.R dab4efac71b5b1c60198024e03fdd8df *R/methods_dbscan.R eb326802da7fb42c6d9f7bddd80d3fea *R/methods_effect_size.R 7ff045ef7750c54be0b047c7e39488e6 *R/methods_eflm.R cae0d677f38f2d498a216563f776c17a *R/methods_emmeans.R 16b3b7c26c677363a3e6f2976208a653 *R/methods_epi2x2.R db2ae63070c392a3a47fa61c860c00b1 *R/methods_ergm.R cae553f026ea4d5c01fa7908f4f86fe9 *R/methods_estimatr.R 0a87bf084a41f8acfdd97af534641545 *R/methods_fitdistr.R f71f096be0ad5404f5560a0fee8785c0 *R/methods_fixest.R 351b198d7c43c6cf68e3a0df73d7fd65 *R/methods_flexsurvreg.R c2689bdb600701cd043715b21a431530 *R/methods_gam.R be966c46e860a2434b1a9d878dcc6023 *R/methods_gamlss.R a1d19201b05be5d37c59bb98e127e42d *R/methods_gamm4.R 75f152876fc54356179f1b79e91d7226 *R/methods_gee.R f1f6b25377065d040064504fec599da3 *R/methods_ggeffects.R ad46fb608d44f2808165dcf3d72fe044 *R/methods_gjrm.R 5897a0dbd65ca63b53c03458027eda9b *R/methods_glm.R 7f2f9c2ab08f39b3bde4297b44b7fa09 *R/methods_glmm.R 0d69f514d547cc1c15fae4173af24be8 *R/methods_glmmTMB.R e8f62a9931561d2d886dd264951a7aa1 *R/methods_glmx.R 477e6b9dc8dca17b29c912f3422be003 *R/methods_gmnl.R 176090f1160a194937244a7bb085392d *R/methods_hclust.R 0de993d6f9de8175ef38b21008c19ca4 *R/methods_htest.R 6599658518e104f3599b968b01a704c2 *R/methods_ivfixed.R a411a7d9a49e3cba4912228675d08bdd *R/methods_ivprobit.R 55960685d785d71fdaa71a0156b1d955 *R/methods_ivreg.R 3d6f20869a717ec83bed840f2e113185 *R/methods_kmeans.R c6a79eaa44248464596c55c95c5fd5ad *R/methods_lavaan.R f061b4e9bd1ee483a1363be30acb1bad *R/methods_lm.R b7e8dc5337154323977ccc208ec41752 *R/methods_lme4.R ede3efc40f5551adba1ac1055fb280ad *R/methods_lmodel2.R 875618786ccef9b9f9800b82c5afcde8 *R/methods_lmtest.R 06f89312c1dff28e7bf4bd96b17266cd *R/methods_logistf.R 1c10e1a13ca1035188b2d753746a731d *R/methods_lqmm.R ea4d9653b5623e8191d10450783b926d *R/methods_lrm.R cdee27f9caaf66137aa156c85664d817 *R/methods_margins.R e2a5c79d9ccaa29b48366d00646412ed *R/methods_mass.R bb90294c2cfef72e27d30eb0bc8dde99 *R/methods_maxLik.R 3dedc27710359ea605f1dc906792b509 *R/methods_mclust.R de90f533bb91137ea8a2a0052e90b9c2 *R/methods_mcmc.R 251bb0ad75af79cdb2f4df881d6a9701 *R/methods_mediate.R f3ed30c073b2a7736007934d2a35c98b *R/methods_merTools.R 4afcba3e4b25bb9647cc582fec9162a7 *R/methods_metafor.R 8af2bac958fe573a5972e5b2031f24ac *R/methods_metaplus.R e5a8d50920e41e6a327d933d36ca6132 *R/methods_mfx.R b49996c06971bf3690be382ab1f9f910 *R/methods_mgcv.R 188b11d38c47104c61189369a7ca05c1 *R/methods_mhurdle.R eb6c8dc3c655cd8a2e1e212c46703615 *R/methods_mice.R 341b31ce1cb3c0f8718d85b6a80160f4 *R/methods_mixed.R 5f549f7f795fa2eefe6543839dee1ed5 *R/methods_mixmod.R b46a6eb2ff928a0e5b221ea3d84c36ad *R/methods_mixor.R 88e30658df2cfb55fe1ae45d9aff77fd *R/methods_mjoint.R 6a863f4b62ba34317fa27f3585fd386a *R/methods_mlm.R cb07af02ae7245294f1b47332e827d59 *R/methods_model_fit.R 82a5e49b45ed1da0dfa39137d07372ee *R/methods_multcomp.R bdb4b914688bf0fec3b37c8876e96369 *R/methods_multgee.R 0a512967af0a0b77a077e75477bc25fb *R/methods_mvord.R 202b7a804547fa59f3c491166fb7b3b0 *R/methods_nlme.R 786dad7461ad8f3ddfcfdd30e688961b *R/methods_ordinal.R 3753750647f31bfeaae7267d11eb7e40 *R/methods_other.R 169bec0664dd783b4cd53bdd6e0297e5 *R/methods_pam.R 7e1418e918a68f65013e0898dc4569fc *R/methods_panelr.R 1d3b9a6ff9f725a708c660c7b908e310 *R/methods_pglm.R cd8eabb9eb368e702db551f3d84ff673 *R/methods_plm.R d2a8465b213792f0b8b5476aae0f159f *R/methods_pscl.R 5fe53c20a02b901e6eb195e731e33532 *R/methods_psych.R 93d1bee4abaa2c3a36030b0a3703791d *R/methods_quantreg.R e56a2638bc7dc1285411160a71a0e46f *R/methods_robmixglm.R 23f350a2e4e2cdb17b415a23110fb1dc *R/methods_robustlmm.R 9f81e85dcf10b5ff30163984eb36bd16 *R/methods_rstan.R 1d56e0b44f8ab7b94cc369ff1107fed9 *R/methods_rstanarm.R 9688613a91224a7b3615d862be846485 *R/methods_sarlm.R 1af7bd2ed7e113584cd12be4ce9e4072 *R/methods_scam.R 05e6b63161bc80fe831cbc445321d1c9 *R/methods_selection.R 1655ceecef10821bf388bbca1b0ac3a8 *R/methods_sem.R 0549bf0dbf62cb2915c1cb390fec9147 *R/methods_skewness_kurtosis.R b8b7a258108b46860bc256b1c6e7cf20 *R/methods_spaMM.R 9e597c496caf42dc8c8372a7e6dc52af *R/methods_speedglm.R 9effb1792046cb9ec4fbc158c5b3a086 *R/methods_stats4.R b8ce5d410c6b549afc9d76b544f6a566 *R/methods_survey.R 0802945c9a1adcb9a2ff765b86045b89 *R/methods_survival.R 9090387e05f90c760015a5e669bec1cb *R/methods_systemfit.R 85bfcc830a2f3a2a7a7309f3548b7d45 *R/methods_truncreg.R 1bdb365470150a97fd1fabb3b2ff89b7 *R/methods_tseries.R 31561c9ea89e3a2180fcfbd384d23f9e *R/methods_varest.R 027ae7d99d32d59ceb6950a15e2186de *R/methods_vgam.R a8b0815d223c37d0a97436c20badab03 *R/methods_wrs2.R 235315dbe40f338d74b279b9e7184de1 *R/n_clusters.R fd17656ff20d20c4c2764818a6fba798 *R/n_clusters_easystats.R 8454f58da7f0d7da9dfbcb9676b351c5 *R/n_factors.R 13d57d06de27880265635121c2c7bc0e *R/n_parameters.R 2061475d7893de1a78e203439ecd3659 *R/p_value_betwithin.R 6e1c984a101d86d5102a4efdf7f68590 *R/p_value_kenward.R 70cb2ab2aa4bc9b43c3a04cf6346ae6e *R/p_value_ml1.R 6d2d77865f4066b3a5d34aa53983611f *R/p_value_satterthwaite.R 435e232197a8038120d5c329b717a9c5 *R/parameters_type.R 08ce280f17beada99ad7673c0cb6b007 *R/plot.R eb62184949c0f1068e2387544d73cec1 *R/pool_parameters.R f0f5bdaa95255550eadc7007c1182ba1 *R/principal_components.R 155916816b17c3c57af5e0a41c5b6645 *R/print.compare_parameters.R f2744e7601d0ba4a4cd28b33d7644ab5 *R/print.parameters_model.R 3284e004edcb1e4947a41b2950ab2548 *R/print_html.R b1bb8403b4e4c331d4b6bf881c557515 *R/print_md.R 261ed70d014895d75221bd99a1664a7d *R/random_parameters.R 791671b8894eeb537e11f622015daa09 *R/reduce_parameters.R a154033615dfd5b289a8054406868e98 *R/reexports.R d2e858106318865766f7919f7024e8b6 *R/reshape_loadings.R 9c78ef92b855ee3d8c4a5b70ad25818c *R/robust_estimation.R cda5fe51702ffe5e0969c97649c62c79 *R/select_parameters.R 6cd4eeb2f109fb53bf559c49d6b140d6 *R/select_parameters.stanreg.R e1f6f2599d7a6fec57238e37e278c53b *R/simulate_parameters.R 4580b00a2472cf843c6e5821998752c1 *R/standard_error_kenward.R 24a5f5202a30bbdf08270e1a2f5104b7 *R/standard_error_satterthwaite.R a97024e8da15f723a17c714bedd7c689 *R/utils.R 6778b9923b997044748bead170a7f06f *R/utils_cleaners.R 7cc263fd2733a188311f29581c2cbc2a *R/utils_clustering.R 3d7038852b25f5c0416a355106aa37f4 *R/utils_format.R 2d4640b7152ee1acbad277450c4b3a7e *R/utils_model_parameters.R 7c7d7a7041156e4af3fb9b35d0c1abec *R/utils_pca_efa.R 4a591a55483a2a811a5fc73bc862a7c6 *R/zzz.R da5ad0b0566d06b10fc613698b67f975 *README.md d9d26ee35082dc830e2caf1c8aba3947 *build/partial.rdb e61da601bb04bd548c145d562ad653ca *build/vignette.rds fed293a745c7a75939e2f2156aaa9afe *data/fish.RData fca1e9b681b9f432165601e6510c483c *data/qol_cancer.RData d9a675761b0b4ec7816a274c92857f5e *inst/CITATION 87830b83858cf567acd1e6f1efa75f29 *inst/WORDLIST 7342a6d63f4cc4501afc55be1d148740 *inst/doc/overview_of_vignettes.R 0790e38c288e7ad9b24ab34a2123bb16 *inst/doc/overview_of_vignettes.Rmd d254ef97d6390ffe6d902c1d083cf92f *inst/doc/overview_of_vignettes.html 690778fb94b8827795bcf627c8309dde *man/bootstrap_model.Rd 692ebd7bf2077d0cd70ae6ef91c92043 *man/bootstrap_parameters.Rd 043f32d86cb7006e8177e4dc0d03f668 *man/check_clusterstructure.Rd 785df66e1cfb49e7d1bf0b8c84a0b3c3 *man/check_factorstructure.Rd 3ff0976e40647d099d3e1b2aff669d80 *man/check_heterogeneity.Rd 54d23d408782d240cfb6880bfaf2feb0 *man/check_kmo.Rd a5b188f86e90af17f5c5e2a8a78272f0 *man/check_sphericity_bartlett.Rd 3ce672ed5ab026b890c41e5c9ede36cf *man/ci.default.Rd b42dabc3b6a3b3b582625b771aac3df1 *man/cluster_analysis.Rd a6e41576548a1d095129e7b451fa8acf *man/cluster_centers.Rd 8a713f108ae7cdcf161fe835e82ae2f9 *man/cluster_discrimination.Rd 92c511c32fb0c2e827d47dfc2025a522 *man/cluster_meta.Rd c78f121ee3a14c9edd473ef7afe20aec *man/cluster_performance.Rd 16ac9cd1d8af5c67c9e7d8f866319bc2 *man/compare_parameters.Rd afc3bdd2ad5370b270034125b51a8d75 *man/convert_efa_to_cfa.Rd 4e6fcd72b67221be6ed69b47248021fc *man/degrees_of_freedom.Rd f13536371b470836f3ce0001793fcd59 *man/display.parameters_model.Rd f2f62d5994d78723e9f702bec23eea59 *man/dot-compact_character.Rd 39cc0b840df819fcacaf0df7279f31fd *man/dot-compact_list.Rd ada4525e7f51696872af8e67a11a8a03 *man/dot-data_frame.Rd d497a17727c64a877268d9e06f0cd046 *man/dot-factor_to_dummy.Rd 6c3815c9740a529c0b5dd0c2ea7f7f46 *man/dot-factor_to_numeric.Rd 6f88971941994882a00c6f6fe9b59ea2 *man/dot-filter_component.Rd b9af8cc94728c5f8388a662363b06ea8 *man/dot-find_most_common.Rd 113e7b9f091f711d4cdd5a0a25141bf7 *man/dot-flatten_list.Rd d345ca758850004bcc8f2dfbfa129a77 *man/dot-n_factors_bartlett.Rd d773be8d96a78e87754d7ffd0e64102b *man/dot-n_factors_bentler.Rd 8d7187c3a82dbaaea1fa0368bea3914a *man/dot-n_factors_cng.Rd 9545ed998c17d115ed152a3f269530fd *man/dot-n_factors_mreg.Rd 1e126d6996539eff78565cf11aeb80ec *man/dot-n_factors_scree.Rd 4e979353266706b75a554fe0b05420ae *man/dot-n_factors_sescree.Rd 9c4b49f415f73a87ddc293092fc49a24 *man/dot-recode_to_zero.Rd a46c381e25ea47ca4aec1b73cd3abf4c *man/equivalence_test.lm.Rd 80c2c196a70f7211c90bf2c510d39d41 *man/figures/figure1.png 763d72787cad1fca6fc76de99da06308 *man/figures/figure2.png 34f97573ccc6dab523d52cbd156882dd *man/figures/logo.png dfb7d2691aa5df65c196bdd538860c75 *man/fish.Rd ed5f06e2c08127ed5b98439ac44b725f *man/format_df_adjust.Rd 2c1c882e67918da57dcf3a5aca85cf6e *man/format_order.Rd 0d3085434ec747b5f23d015056783737 *man/format_p_adjust.Rd c9163448b1e93d86c7dc5b4ae86bc5bf *man/format_parameters.Rd 974a04c093a407848dc7a5269c61710d *man/get_scores.Rd 5b15eff06bc403e17e4b1f71f00e1884 *man/model_parameters.BFBayesFactor.Rd 8c1d40408d1524329ac2d435df97278e *man/model_parameters.Rd af365f4e0ebfab3ffff16afaa65dcce0 *man/model_parameters.aov.Rd ec1d1db290426c81bfa76833e80dfb2a *man/model_parameters.averaging.Rd 7638b16d3caadc8b9026275363d86a26 *man/model_parameters.befa.Rd 8a906e519f84abcd281bfddb21a1d338 *man/model_parameters.cgam.Rd 96b5821deb6c4e2ef546131f8bd5796d *man/model_parameters.default.Rd 5f3b6d63bd05efeb595928d64d421055 *man/model_parameters.glht.Rd 49b95669109223ee3278e96d0961d0be *man/model_parameters.htest.Rd 2e57ba9550dcf06cfb46a67e1f14435d *man/model_parameters.kmeans.Rd 57b8130b9f6772ec8450b979a478e88c *man/model_parameters.lavaan.Rd 4fa7751d4df6c9a501e2fbb1bebbe5d3 *man/model_parameters.merMod.Rd a9ad67840d1c866b840130acc06bfd5f *man/model_parameters.mira.Rd 9c58d8354078724be99bc986de9d8e81 *man/model_parameters.mlm.Rd 2a8100c17397a9258b1849a97a84fcbb *man/model_parameters.principal.Rd b6247a71f030d91f19ad22305faac77d *man/model_parameters.rma.Rd 8acfca9cbba887af0b69264313982641 *man/model_parameters.stanreg.Rd 6263ffbfad14d845cee2c4d3a1794c46 *man/model_parameters.t1way.Rd d470ab123ae9e94e55ad990008e3ab73 *man/model_parameters.zcpglm.Rd cc76d3c83527382d3452a20957cf45c9 *man/n_clusters.Rd c603d05286da3437b1488ae2a6c78e01 *man/n_factors.Rd 9effff3e6383ef46c7298c41236d7e8b *man/p_value.BFBayesFactor.Rd fbf898effa37b8487550a4a6c0e10cde *man/p_value.DirichletRegModel.Rd bd90c8e1be51ed1725512adc507e26ff *man/p_value.Rd b18eae5eb0e24db61106950d4291048c *man/p_value.poissonmfx.Rd 8fda70cf6b9575c68ade8a739c8fd03a *man/p_value.zcpglm.Rd 3ca5f44f708e125be0a5ccc5567c1a10 *man/p_value_betwithin.Rd 65ccfae37b489d48ea75973ea2468fa2 *man/p_value_kenward.Rd 2f656f6a3b7c9b53939788af36d854e3 *man/p_value_ml1.Rd dd066ec08009a9b91f1175538a3f8432 *man/p_value_satterthwaite.Rd fe31fb663bdf80533d7e94f995c788b2 *man/parameters_type.Rd 7e8747eb816465edcb8d01596e679bee *man/pool_parameters.Rd 5be3b72e464e0a9b6e5f6016209518f4 *man/principal_components.Rd 58e681c7fb857223e99051e1816e988d *man/print.parameters_model.Rd d1e2d2ee9e66ebab0de28c9f432defb9 *man/qol_cancer.Rd 9c3b37c7d4c2e90a16ea1886cfa56b61 *man/random_parameters.Rd 8b4305fb07b122f4dcdab5526e2de665 *man/reduce_parameters.Rd 3a178c6ebec913c0def6af61efdb1ac9 *man/reexports.Rd aabc04dc6060455245b4a395043e4713 *man/reshape_loadings.Rd 0ed92e79ccab61f1419e3ef629d0c665 *man/select_parameters.Rd cfb0b6e63edeb3d93134b6dd43248948 *man/simulate_model.Rd 1244f6d07bc3f71c647311e1b171d6f6 *man/simulate_parameters.Rd 014792f13ca72ca567c9e533626841b6 *man/standard_error.Rd 0abf17a219fe77122262ab6623fa437b *man/standard_error_robust.Rd dbd9bab057163c01c320d0b9d3bb9620 *tests/spelling.R 5286b5bf3be98885225e552c042cbcc7 *tests/testthat.R c89c8592c1f0c04b91935a5892bc6460 *tests/testthat/helper-requiet.R bc2237e45327531caca8e5e3c67d4753 *tests/testthat/test-GLMMadaptive.R 17297cb40fe75094c91b3a7b7a5876e1 *tests/testthat/test-MCMCglmm.R beeff864f26c9756ddcbc41deeac9a3a *tests/testthat/test-PMCMRplus.R 3e908991ea6d353b5f29d99bbbf0415f *tests/testthat/test-backticks.R 7517a5c3f8994988dabde1680dab6199 *tests/testthat/test-betareg.R afa473c6db15c7881a205a72d4c5b068 *tests/testthat/test-bootstrap_emmeans.R 1f2119c280267d6ed7288ddc4fb47492 *tests/testthat/test-bracl.R 6ca0c2d6f252ba9dcc0b31670ab33772 *tests/testthat/test-checks.R 1f7e858ecbddee991ef05b9c5fc4692d *tests/testthat/test-ci.R 10fdc649c3ce749687d050cbf37795f5 *tests/testthat/test-compare_parameters.R 8a18d412999611329609a49c29173b0b *tests/testthat/test-coxph.R 067befa14a9260cba7498580fce947f0 *tests/testthat/test-emmGrid-df_colname.R 8e74eacde142d525768ba7c06778af24 *tests/testthat/test-equivalence_test.R 9f08b9d7d35427da651db4775ee23118 *tests/testthat/test-format.R 287eae87ee15e6b5ff045c1be8ee3c3a *tests/testthat/test-format_model_parameters.R 0aef709a76bccc3369bb3b946a749e87 *tests/testthat/test-format_p_adjust.R 59e893a2a42283d69d52e962b9d91014 *tests/testthat/test-format_parameters.R 498790cf3f87d34a23cc52fa17881db3 *tests/testthat/test-gam.R f31f9aac92f8f791dd21897eef54801c *tests/testthat/test-gamm.R 05ef7057dd85a4d46cca99847b641137 *tests/testthat/test-gee.R 23ee23665f3c56b021fca611a771b081 *tests/testthat/test-geeglm.R 214e6346ff4b1c79cc857fc830ce0a0a *tests/testthat/test-get_scores.R 1e970b753c755525295f180d1947d1b4 *tests/testthat/test-glmer.R ae8aab1bd8ef6964229a7bba661521d8 *tests/testthat/test-glmmTMB-2.R e7414aeb785780ec9670bb5ebd87239e *tests/testthat/test-glmmTMB.R dfc1ae1daf7aadc143c747895e9ec1aa *tests/testthat/test-gls.R 7bac2f289cff126263ad946041331644 *tests/testthat/test-ivreg.R 9bf154da80dcb9eed53be626d149287b *tests/testthat/test-lavaan.R c6fbd3df6b2650b518dc50e37dfbac07 *tests/testthat/test-lme.R 2d0d439a0efe2e67319fcc06ec6359e5 *tests/testthat/test-lmerTest.R 6493dd671c60dc0b059f143d4a7c5ff6 *tests/testthat/test-mira.R 9e9614c961f157f9e5c47255cda8baf8 *tests/testthat/test-mlm.R e3ace8a5f7f8f1a2271db6dba72e0ea0 *tests/testthat/test-model_parameters.BFBayesFactor.R 20f836a239bf1be79bc8c33d10984b96 *tests/testthat/test-model_parameters.MASS.R 59f360a6c50711c793596f18cf03e06b *tests/testthat/test-model_parameters.afex_aov.R b14f3d03eb74ac5f6231aa01b9b88c2a *tests/testthat/test-model_parameters.anova.R 712ae46b4aca1926edb21dd7bdd5b1e7 *tests/testthat/test-model_parameters.aov.R 92ab58660fca6dce39b3a9e5e1ee4378 *tests/testthat/test-model_parameters.aov_es_ci.R 80fc4b1f7e8a216245c15b451239085b *tests/testthat/test-model_parameters.blmerMod.R 2c0856779df4d32586b841a563e55a45 *tests/testthat/test-model_parameters.cgam.R 81b5a49b5c93aa4ed08377311711864b *tests/testthat/test-model_parameters.cpglmm.R 3a7ac589ad6655d64bd1ba153675d489 *tests/testthat/test-model_parameters.efa_cfa.R d268c5984e56985e36a954fd1fee9c3a *tests/testthat/test-model_parameters.gam.R 23b49cba5b055a394c0b8a65b392b298 *tests/testthat/test-model_parameters.glht.R a1247863e3e87ba7d1bc279a3777ca3b *tests/testthat/test-model_parameters.glm.R ae98704a4d3406cdcbe887937f8a9b9d *tests/testthat/test-model_parameters.htest.R 6debb526f544a3b23f8d452dd5b3d967 *tests/testthat/test-model_parameters.hurdle.R 9059933ea8e48b5a44a1c360b78700d4 *tests/testthat/test-model_parameters.lme.R 738f6515ea0cab8e98fce123e4eac75d *tests/testthat/test-model_parameters.lqmm.R ee9d229fe607fc0d1e6b915c3fcd5282 *tests/testthat/test-model_parameters.maov.R b51fb140d69da9b45c6769b168c0055a *tests/testthat/test-model_parameters.mediate.R 3aeead1e2c4969e5539559c834eaee77 *tests/testthat/test-model_parameters.metaBMA.R 2192a4ed6eec6ff5e4cb9c7ccc4e6353 *tests/testthat/test-model_parameters.metafor.R f8f8c3ac9527a830d025e03a15e26a73 *tests/testthat/test-model_parameters.mfx.R 31491a85b9d11ca29d496a31e3589e43 *tests/testthat/test-model_parameters.mixed.R f0f7e60a3e658fcadf5512d6555012c8 *tests/testthat/test-model_parameters.mle2.R 4dc5741699fe4ec36de92320338645b9 *tests/testthat/test-model_parameters.pairwise.htest.R 1ac63a1051d4b9ea575b5f265ab9215e *tests/testthat/test-model_parameters.truncreg.R 1a47bb45e2ce895675dcb13122a7ba65 *tests/testthat/test-model_parameters.vgam.R bde079fb1ee57eb4f8febdba0575aa2e *tests/testthat/test-model_parameters_df.R 8bd6e9431c9f00cb2dcc7c8c51461efb *tests/testthat/test-model_parameters_df_method.R 82e434b0f26139a9a918244bdb9c9326 *tests/testthat/test-model_parameters_labels.R eb6d5fe19c7fa4aa0fcc699471697a1c *tests/testthat/test-model_parameters_mixed_coeforder.R 8268d49d6ebc42cbb5b9ea844a477a97 *tests/testthat/test-model_parameters_ordinal.R 73031a8ea2d25a313eda0755b9a82098 *tests/testthat/test-model_parameters_robust.R 62c7d0491c4f55f3f898326030396f3c *tests/testthat/test-model_parameters_std.R 4a3f550502725370320993f8b42f08bc *tests/testthat/test-model_parameters_std_mixed.R f7e4937abbbf73a49e29381fd5ea1bdf *tests/testthat/test-n_factors.R 364dfd6261222dc1f4fb212c2e6ef4bc *tests/testthat/test-p_adjust.R e23fe72f4dd4d3f25c6382235dae54a9 *tests/testthat/test-p_value.R 86e438742c9c658796b76eab320fc248 *tests/testthat/test-panelr.R 5383d4a4acc50eaade4ad95d2618010f *tests/testthat/test-parameters_selection.R 800ccb8448f3990488e0422ca928133f *tests/testthat/test-parameters_table.R 5cdd92ac8d8f8b189912def0dafdb171 *tests/testthat/test-parameters_type-2.R b541af43f09e0de8b2ffdea4de4ed30a *tests/testthat/test-parameters_type.R 5b066e6eaf6ece1cfea9734f5e5df886 *tests/testthat/test-pca.R 0afc134631e279965b934cfb243782fb *tests/testthat/test-plm.R 53e635f333bcb66ff318bac5ea7c91ae *tests/testthat/test-quantreg.R 80c05b574e2a697573442b601a229e64 *tests/testthat/test-rank_deficienty.R 6e23726092a10d2d3e8f55cf4869bd1c *tests/testthat/test-rstanarm.R ca59909eab73a72bddee7ac8a6268061 *tests/testthat/test-tobit.R e412ec2750617560212d739e5495bf35 *tests/testthat/test-wrs2.R bc366c1d2c1229b60224b58ee1013682 *tests/testthat/test-zeroinfl.R 0790e38c288e7ad9b24ab34a2123bb16 *vignettes/overview_of_vignettes.Rmd parameters/NEWS.md0000644000175000017500000004617514166656741013702 0ustar nileshnilesh# parameters 0.15.1 ## General * Improved speed performance for `model_parameters()`, in particular for glm's and mixed models where random effect variances were calculated. * Added more options for printing `model_parameters()`. See also revised vignette: https://easystats.github.io/parameters/articles/model_parameters_print.html ## Changes to functions ### `model_parameters()` * `model_parameters()` for mixed models gains an `include_sigma` argument. If `TRUE`, adds the residual variance, computed from the random effects variances, as an attribute to the returned data frame. Including sigma was the default behaviour, but now defaults to `FALSE` and is only included when `include_sigma = TRUE`, because the calculation was very time consuming. * `model_parameters()` for `merMod` models now also computes CIs for the random SD parameters when `ci_method="boot"` (previously, this was only possible when `ci_method` was `"profile"`). * `model_parameters()` for `glmmTMB` models now computes CIs for the random SD parameters. Note that these are based on a Wald-z-distribution. * Similar to `model_parameters.htest()`, the `model_parameters.BFBayesFactor()` method gains `cohens_d` and `cramers_v` arguments to control if you need to add frequentist effect size estimates to the returned summary data frame. Previously, this was done by default. * Column name for coefficients from *emmeans* objects are now more specific. * `model_prameters()` for `MixMod` objects (package *GLMMadaptive*) gains a `robust` argument, to compute robust standard errors. ## Bug fixes * Fixed bug with `ci()` for class `merMod` when `method="boot"`. * Fixed issue with correct association of components for ordinal models of classes `clm` and `clm2`. * Fixed issues in `random_parameters()` and `model_parameters()` for mixed models without random intercept. * Confidence intervals for random parameters in `model_parameters()` failed for (some?) `glmer` models. * Fix issue with default `ci_type` in `compare_parameters()` for Bayesian models. # parameters 0.15.0 ## Breaking changes * Following functions were moved to the new *datawizard* package and are now re-exported from *parameters* package: - `center()` - `convert_data_to_numeric()` - `data_partition()` - `demean()` (and its aliases `degroup()` and `detrend()`) - `kurtosis()` - `rescale_weights()` - `skewness()` - `smoothness()` Note that these functions will be removed in the next release of *parameters* package and they are currently being re-exported only as a convenience for the package developers. This release should provide them with time to make the necessary changes before this breaking change is implemented. * Following functions were moved to the *performance* package: - `check_heterogeneity()` - `check_multimodal()` ## General * The handling to approximate the degrees of freedom in `model_parameters()`, `ci()` and `p_value()` was revised and should now be more consistent. Some bugs related to the previous computation of confidence intervals and p-values have been fixed. Now it is possible to change the method to approximate degrees of freedom for CIs and p-values using the `ci_method`, resp. `method` argument. This change has been documented in detail in `?model_parameters`, and online here: https://easystats.github.io/parameters/reference/model_parameters.html * Minor changes to `print()` for *glmmTMB* with dispersion parameter. * Added vignette on printing options for model parameters. ## Changes to functions ### `model_parameters()` * The `df_method` argument in `model_parameters()` is deprecated. Please use `ci_method` now. * `model_parameters()` with `standardize = "refit"` now returns random effects from the standardized model. * `model_parameters()` and `ci()` for `lmerMod` models gain a `"residuals"` option for the `ci_method` (resp. `method`) argument, to explicitly calculate confidence intervals based on the residual degrees of freedom, when present. * `model_parameters()` supports following new objects: `trimcibt`, `wmcpAKP`, `dep.effect` (in *WRS2* package), `systemfit` * `model_parameters()` gains a new argument `table_wide` for ANOVA tables. This can be helpful for users who may wish to report ANOVA table in wide format (i.e., with numerator and denominator degrees of freedom on the same row). * `model_parameters()` gains two new arguments, `keep` and `drop`. `keep` is the new names for the former `parameters` argument and can be used to filter parameters. While `keep` selects those parameters whose names match the regular expression pattern defined in `keep`, `drop` is the counterpart and excludes matching parameter names. * When `model_parameters()` is called with `verbose = TRUE`, and `ci_method` is not the default value, the printed output includes a message indicating which approximation-method for degrees of freedom was used. * `model_parameters()` for mixed models with `ci_method = "profile` computes (profiled) confidence intervals for both fixed and random effects. Thus, `ci_method = "profile` allows to add confidence intervals to the random effect variances. * `model_parameters()` should longer fail for supported model classes when robust standard errors are not available. ### Other functions * `n_factors()` the methods based on fit indices have been fixed and can be included separately (`package = "fit"`). Also added a `n_max` argument to crop the output. * `compare_parameters()` now also accepts a list of model objects. * `describe_distribution()` gets `verbose` argument to toggle warnings and messages. * `format_parameters()` removes dots and underscores from parameter names, to make these more "human readable". * The experimental calculation of p-values in `equivalence_test()` was replaced by a proper calculation p-values. The argument `p_value` was removed and p-values are now always included. * Minor improvements to `print()`, `print_html()` and `print_md()`. ## Bug fixes * The random effects returned by `model_parameters()` mistakenly displayed the residuals standard deviation as square-root of the residual SD. * Fixed issue with `model_parameters()` for *brmsfit* objects that model standard errors (i.e. for meta-analysis). * Fixed issue in `model_parameters` for `lmerMod` models that, by default, returned residual degrees of freedom in the statistic column, but confidence intervals were based on `Inf` degrees of freedom instead. * Fixed issue in `ci_satterthwaite()`, which used `Inf` degrees of freedom instead of the Satterthwaite approximation. * Fixed issue in `model_parameters.mlm()` when model contained interaction terms. * Fixed issue in `model_parameters.rma()` when model contained interaction terms. * Fixed sign error for `model_parameters.htest()` for objects created with `t.test.formula()` (issue #552) * Fixed issue when computing random effect variances in `model_parameters()` for mixed models with categorical random slopes. # parameters 0.14.0 ## Breaking changes * `check_sphericity()` has been renamed into `check_sphericity_bartlett()`. * Removed deprecated arguments. * `model_parameters()` for bootstrapped samples used in *emmeans* now treats the bootstrap samples as samples from posterior distributions (Bayesian models). ## New supported model classes * `SemiParBIV` (*GJRM*), `selection` (*sampleSelection*), `htest` from the *survey* package, `pgmm` (*plm*). ## General * Performance improvements for models from package *survey*. ## New functions * Added a `summary()` method for `model_parameters()`, which is a convenient shortcut for `print(..., select = "minimal")`. ## Changes to functions ### `model_parameters()` * `model_parameters()` gains a `parameters` argument, which takes a regular expression as string, to select specific parameters from the returned data frame. * `print()` for `model_parameters()` and `compare_parameters()` gains a `groups` argument, to group parameters in the output. Furthermore, `groups` can be used directly as argument in `model_parameters()` and `compare_parameters()` and will be passed to the `print()` method. * `model_parameters()` for ANOVAs now saves the type as attribute and prints this information as footer in the output as well. * `model_parameters()` for *htest*-objects now saves the alternative hypothesis as attribute and prints this information as footer in the output as well. * `model_parameters()` passes arguments `type`, `parallel` and `n_cpus` down to `bootstrap_model()` when `bootstrap = TRUE`. ### other * `bootstrap_models()` for *merMod* and *glmmTMB* objects gains further arguments to set the type of bootstrapping and to allow parallel computing. * `bootstrap_parameters()` gains the `ci_method` type `"bci"`, to compute bias-corrected and accelerated bootstrapped intervals. * `ci()` for `svyglm` gains a `method` argument. ## Bug fixes * Fixed issue in `model_parameters()` for *emmGrid* objects with Bayesian models. * Arguments `digits`, `ci_digits` and `p_digits` were ignored for `print()` and only worked when used in the call to `model_parameters()` directly. # parameters 0.13.0 ## General * Revised and improved the `print()` method for `model_parameters()`. ## New supported model classes * `blrm` (*rmsb*), `AKP`, `med1way`, `robtab` (*WRS2*), `epi.2by2` (*epiR*), `mjoint` (*joineRML*), `mhurdle` (*mhurdle*), `sarlm` (*spatialreg*), `model_fit` (*tidymodels*), `BGGM` (*BGGM*), `mvord` (*mvord*) ## Changes to functions ### `model_parameters()` * `model_parameters()` for `blavaan` models is now fully treated as Bayesian model and thus relies on the functions from *bayestestR* (i.e. ROPE, Rhat or ESS are reported) . * The `effects`-argument from `model_parameters()` for mixed models was revised and now shows the random effects variances by default (same functionality as `random_parameters()`, but mimicking the behaviour from `broom.mixed::tidy()`). When the `group_level` argument is set to `TRUE`, the conditional modes (BLUPs) of the random effects are shown. * `model_parameters()` for mixed models now returns an `Effects` column even when there is just one type of "effects", to mimic the behaviour from `broom.mixed::tidy()`. In conjunction with `standardize_names()` users can get the same column names as in `tidy()` for `model_parameters()` objects. * `model_parameters()` for t-tests now uses the group values as column names. * `print()` for `model_parameters()` gains a `zap_small` argument, to avoid scientific notation for very small numbers. Instead, `zap_small` forces to round to the specified number of digits. * To be internally consistent, the degrees of freedom column for `lqm(m)` and `cgam(m)` objects (with *t*-statistic) is called `df_error`. * `model_parameters()` gains a `summary` argument to add summary information about the model to printed outputs. * Minor improvements for models from *quantreg*. * `model_parameters` supports rank-biserial, rank epsilon-squared, and Kendall's *W* as effect size measures for `wilcox.test()`, `kruskal.test`, and `friedman.test`, respectively. ### Other functions * `describe_distribution()` gets a `quartiles` argument to include 25th and 75th quartiles of a variable. ## Bug fixes * Fixed issue with non-initialized argument `style` in `display()` for `compare_parameters()`. * Make `print()` for `compare_parameters()` work with objects that have "simple" column names for confidence intervals with missing CI-level (i.e. when column is named `"CI"` instead of, say, `"95% CI"`). * Fixed issue with `p_adjust` in `model_parameters()`, which did not work for adjustment-methods `"BY"` and `"BH"`. * Fixed issue with `show_sigma` in `print()` for `model_parameters()`. * Fixed issue in `model_parameters()` with incorrect order of degrees of freedom. # parameters 0.12.0 ## General * Roll-back R dependency to R >= 3.4. * Bootstrapped estimates (from `bootstrap_model()` or `bootstrap_parameters()`) can be passed to `emmeans` to obtain bootstrapped estimates, contrasts, simple slopes (etc) and their CIs. * These can then be passed to `model_parameters()` and related functions to obtain standard errors, p-values, etc. ## Breaking changes * `model_parameters()` now always returns the confidence level for as additional `CI` column. * The `rule` argument in `equivalenct_test()` defaults to `"classic"`. ## New supported model classes * `crr` (*cmprsk*), `leveneTest()` (*car*), `varest` (*vars*), `ergm` (*ergm*), `btergm` (*btergm*), `Rchoice` (*Rchoice*), `garch` (*tseries*) ## New functions * `compare_parameters()` (and its alias `compare_models()`) to show / print parameters of multiple models in one table. ## Changes to functions * Estimation of bootstrapped *p*-values has been re-written to be more accurate. * `model_parameters()` for mixed models gains an `effects`-argument, to return fixed, random or both fixed and random effects parameters. * Revised printing for `model_parameters()` for *metafor* models. * `model_parameters()` for *metafor* models now recognized confidence levels specified in the function call (via argument `level`). * Improved support for effect sizes in `model_parameters()` from *anova* objects. ## Bug fixes * Fixed edge case when formatting parameters from polynomial terms with many degrees. * Fixed issue with random sampling and dropped factor levels in `bootstrap_model()`. # parameters 0.11.0 ## New supported model classes * `coxr` (*coxrobust*), `coeftest` (*lmtest*), `ivfixed` (*ivfixed*), `ivprobit` (*ivprobit*), `riskRegression` (*riskRegression*), `fitdistr` (*MASS*), `yuen`, `t1way`, `onesampb`, `mcp1` and `mcp2` (*WRS2*), `Anova.mlm` (*car*), `rqs` (*quantreg*), `lmodel2` (*lmodel2*), `summary.lm`, `PMCMR`, `osrt` and `trendPMCMR` (*PMCMRplus*), `bamlss` (*bamlss*). ## New functions ### Printing and table Formatting * `print_html()` as an alias for `display(format = "html")`. This allows to print tabular outputs from data frames (as returned by most functions in _parameters_) into nicely rendered HTML markdown tables. ## Changes to functions * Added more effect size measures to `model_parameters()` for `htest` objects. * `model_parameters()` for anova objects gains a `power` argument, to calculate the power for each parameter. * `ci()` for models from *lme4* and *glmmTMB* can now computed profiled confidence intervals, using `method = "profile"`. Consequently, `model_parameters()` with `df_method = "profile"` also computes profiled confidence intervals. For models of class `glmmTMB`, option `"uniroot"` is also available. ## Bug fixes * `model_parameters()` for t-tests when `standardize_d = TRUE`, did not return columns for the group-specific means. * Fixed issue in `p_value()` for `fixest::feols()`. * Fixed issue in `model_parameters()` for `glmer()` models with p-values that were calculated with `df_method = "ml1"` or `df_method = "betwithin"`. * Fixed issue in `model_parameters()` for multinomial models when response was a character vector (and no factor). * Fixed issue in `print_md()` for model-parameters objects from Bayesian models. * Fixed issues with printing of model parameters for multivariate response models from *brms*. * Fixed issue with paired t-tests and `model_parameters()`. # parameters 0.10.1 ## New functions * `format_p_adjust()`, to create pretty names for p-adjustment methods. ## Bug fixes * Fixed breaking code / failing tests due to latest _effectsize_ update. * Fixed issue with `model_parameters()` for models of class `mlm`. * Undocumented arguments `digits`, `ci_digits` and `p_digits` worked for `print()`, but not when directly called inside `model_parameters()`. Now, `model_parameters(model, digits = 5, ci_digits = 8)` works again. * Fixed some minor printing-issues. # parameters 0.10.0 ## Breaking changes * The default-method for effect sizes in `model_parameters()` for Anova-models (i.e. when arguments `omega_squared`, `eta_squared` or `epsilon_squared` are set to `TRUE`) is now `"partial"`, as initially intended. * Column names for degrees of freedom were revised. `"df_residual"` was replaced by the more generic `"df_error"`. Moreover, models of class `htest` now also have the column name `"df_error"` and no longer `"df"` (where applicable). * Some re-exports for functions that were moved to *insight* longer ago, were now removed. ## New supported model classes * `Glm` (*rms*), `mediate` (*mediation*). * `model_parameters()` supports `Gam` models (*gam*), `ridgelm` (*MASS*), `htest` objects from `oneway.test()`, `chisq.test()`, `prop.test()`, `mcnemar.test()` and `pairwise.htest` objects, `mcmc.list` (e.g. from *bayesGARCH*). ## New functions ### Printing and table Formatting * `display()`, to format output from package-functions into different formats. * `print_md()` as an alias for `display(format = "markdown")`. This allows to print tabular outputs from data frames (as returned by most functions in _parameters_) into nicely rendered markdown tables. * `format()`, to create a "pretty data frame" with nicer column names and formatted values. This is one of the worker-functions behind `print()` or `print_md()`. ## Changes to functions ### `model_parameters()` * `model_parameters()` for Anova-models (of class `aov`, `anova` etc.) gains a `ci`-argument, to add confidence intervals to effect size parameters. * `model_parameters()` for `htest` objects gains a `cramers_v` and `phi` argument, to compute effect size parameters for objects from `chisq.test()`, and a `standardized_D` argument, to compute effect size parameters for objects from `t.test()`. * `model_parameters()` for `metafor`-models is more stable when called from inside functions. * `model_parameters()` for *metaBMA*-models now includes prior information for the meta-parameters. * `model_parameters()` for meta-analysis-models gains a `include_studies`-argument, to include or remove studies from the output. * `model_parameters()` for gam-models now includes the residual df for smooth terms, and no longer the reference df. * Slightly revised and improved the `print()` method for `model_parameters()`. ### Other functions * `describe_distribution()` now includes the name of the centrality index in the `CI`-column, when `centrality = "all"`. * `pool_parameters()` gains a `details`-argument. For mixed models, and if `details = TRUE`, random effect variances will also be pooled. ## Bug fixes * Fixed issue in `ci()` for *lme* models with non-positive definite variance-covariance. * Fixed issue in `model_parameters()` for `nnet::multinom()`, `lqmm::lqm()`, `mgcv::gam()`, and `margins::margins()` models, and models from package *blme*. parameters/DESCRIPTION0000644000175000017500000001165214167565473014304 0ustar nileshnileshType: Package Package: parameters Title: Processing of Model Parameters Version: 0.16.0 Authors@R: c(person(given = "Daniel", family = "Lüdecke", role = c("aut", "cre"), email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206", Twitter = "@strengejacke")), person(given = "Dominique", family = "Makowski", role = "aut", email = "dom.makowski@gmail.com", comment = c(ORCID = "0000-0001-5375-9967", Twitter = "@Dom_Makowski")), person(given = "Mattan S.", family = "Ben-Shachar", role = "aut", email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person(given = "Indrajeet", family = "Patil", role = "aut", email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@patilindrajeets")), person(given = "Søren", family = "Højsgaard", role = "aut", email = "sorenh@math.aau.dk"), person(given = "Brenton M.", family = "Wiernik", role = "aut", email = "brenton@wiernik.org", comment = c(ORCID = "0000-0001-9560-6336", Twitter = "@bmwiernik")), person(given = "Zen J.", family = "Lau", role = "ctb", email = "zenjuen.lau@ntu.edu.sg"), person(given = "Vincent", family = "Arel-Bundock", role = "ctb", email = "vincent.arel-bundock@umontreal.ca", comment = c(ORCID = "0000-0003-1995-6531", Twitter = "@vincentab")), person(given = "Jeffrey", family = "Girard", role = "ctb", email = "me@jmgirard.com", comment = c(ORCID = "0000-0002-7359-3746", Twitter = "@jeffreymgirard")), person(given = "Christina", family = "Maimone", role = "rev", email = "christina.maimone@northwestern.edu"), person(given = "Niels", family = "Ohlsen", role = "rev", comment = c(Twitter = "@Niels_Bremen"))) Maintainer: Daniel Lüdecke Description: Utilities for processing the parameters of various statistical models. Beyond computing p values, CIs, and other indices for a wide variety of models (see list of supported models using the function 'insight::supported_models()'), this package implements features like bootstrapping or simulating of parameters and models, feature reduction (feature extraction and variable selection) as well as functions to describe data and variable characteristics (e.g. skewness, kurtosis, smoothness or distribution). License: GPL-3 URL: https://easystats.github.io/parameters/ BugReports: https://github.com/easystats/parameters/issues Depends: R (>= 3.4) Imports: bayestestR (>= 0.11.5), datawizard (>= 0.2.1), insight (>= 0.14.5), graphics, methods, stats, utils Suggests: AER, afex, aod, BayesFactor, BayesFM, bbmle, betareg, biglm, blme, boot, brglm2, brms, broom, cAIC4, car, cgam, ClassDiscovery, clubSandwich, cluster, cplm, dbscan, drc, DRR, effectsize (>= 0.5.0), EGAnet (>= 0.7), emmeans (>= 1.7.0), factoextra, FactoMineR, fastICA, fpc, gam, gamlss, gee, geepack, ggplot2, GLMMadaptive, glmmTMB, GPArotation, gt, ivprobit, ivreg, knitr, lavaan, lavaSearch2, lfe, lm.beta, lme4, lmerTest, logspline, lqmm, M3C, magrittr, MASS, Matrix, mclust, MCMCglmm, mediation, metaBMA, metafor, mfx, mgcv, mice, multcomp, MuMIn, NbClust, nFactors, nlme, nnet, openxlsx, ordinal, panelr, pbkrtest, PCDimension, performance (>= 0.8.0), plm, PMCMRplus, poorman, PROreg, projpred, pscl, psych, pvclust, quantreg, randomForest, rmarkdown, rstanarm, sandwich, see (>= 0.6.8), sjstats, spelling, survey, survival, testthat, TMB, tripack, truncreg, VGAM, WRS2 VignetteBuilder: knitr Encoding: UTF-8 Language: en-US RoxygenNote: 7.1.2 Config/testthat/edition: 3 NeedsCompilation: no Packaged: 2022-01-12 12:43:41 UTC; Daniel Author: Daniel Lüdecke [aut, cre] (, @strengejacke), Dominique Makowski [aut] (, @Dom_Makowski), Mattan S. Ben-Shachar [aut] (), Indrajeet Patil [aut] (, @patilindrajeets), Søren Højsgaard [aut], Brenton M. Wiernik [aut] (, @bmwiernik), Zen J. Lau [ctb], Vincent Arel-Bundock [ctb] (, @vincentab), Jeffrey Girard [ctb] (, @jeffreymgirard), Christina Maimone [rev], Niels Ohlsen [rev] (@Niels_Bremen) Repository: CRAN Date/Publication: 2022-01-12 14:52:43 UTC parameters/README.md0000644000175000017500000003155014160547734014045 0ustar nileshnilesh # parameters [![DOI](https://joss.theoj.org/papers/10.21105/joss.02445/status.svg)](https://doi.org/10.21105/joss.02445) [![downloads](http://cranlogs.r-pkg.org/badges/parameters)](https://cran.r-project.org/package=parameters) [![total](https://cranlogs.r-pkg.org/badges/grand-total/parameters)](https://cranlogs.r-pkg.org/) [![status](https://tinyverse.netlify.com/badge/parameters)](https://CRAN.R-project.org/package=parameters) ------------------------------------------------------------------------ :warning: For Bayesian models, we changed the default the CI width! Please make an [informed decision](https://easystats.github.io/bayestestR/articles/credible_interval.html) and set it explicitly (`ci = 0.89`, `ci = 0.95`, or anything else that you decide) :warning: ------------------------------------------------------------------------ ***Describe and understand your model’s parameters!*** **parameters**’ primary goal is to provide utilities for processing the parameters of various statistical models (see [here](https://easystats.github.io/insight/) for a list of supported models). Beyond computing *p-values*, *CIs*, *Bayesian indices* and other measures for a wide variety of models, this package implements features like *bootstrapping* of parameters and models, *feature reduction* (feature extraction and variable selection), or tools for data reduction like functions to perform cluster, factor or principal component analysis. Another important goal of the **parameters** package is to facilitate and streamline the process of reporting results of statistical models, which includes the easy and intuitive calculation of standardized estimates or robust standard errors and p-values. **parameters** therefor offers a simple and unified syntax to process a large variety of (model) objects from many different packages. ## Installation [![CRAN](http://www.r-pkg.org/badges/version/parameters)](https://cran.r-project.org/package=parameters) [![parameters status badge](https://easystats.r-universe.dev/badges/parameters)](https://easystats.r-universe.dev) [![R-check](https://github.com/easystats/parameters/workflows/R-check/badge.svg?branch=main)](https://github.com/easystats/parameters/actions) Run the following to install the stable release of **parameters** from CRAN: ``` r install.packages("parameters") ``` Or this one to install the latest development version from R-universe… ``` r install.packages("parameters", repos = "https://easystats.r-universe.dev") ``` …or from GitHub: ``` r install.packages("remotes") remotes::install_github("easystats/parameters") ``` ## Documentation [![Documentation](https://img.shields.io/badge/documentation-parameters-orange.svg?colorB=E91E63)](https://easystats.github.io/parameters/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-parameters-orange.svg?colorB=2196F3)](https://easystats.github.io/parameters/reference/index.html) Click on the buttons above to access the package [documentation](https://easystats.github.io/parameters/) and the [easystats blog](https://easystats.github.io/blog/posts/), and check-out these vignettes: - [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) - [Standardized Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_standardized.html) - [Robust Estimation of Standard Errors, Confidence Intervals and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) - [Model Parameters and Missing Data](https://easystats.github.io/parameters/articles/model_parameters_mice.html) - [Feature reduction (PCA, cMDS, ICA…)](https://easystats.github.io/parameters/articles/parameters_reduction.html) - [Structural models (EFA, CFA, SEM…)](https://easystats.github.io/parameters/articles/efa_cfa.html) - [Parameters selection](https://easystats.github.io/parameters/articles/parameters_selection.html) - [A Practical Guide for Panel Data Analysis](https://easystats.github.io/datawizard/articles/demean.html) ## Contributing and Support In case you want to file an issue or contribute in another way to the package, please follow [this guide](https://github.com/easystats/parameters/blob/master/.github/CONTRIBUTING.md). For questions about the functionality, you may either contact us via email or also file an issue. # Features ## Model’s parameters description The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (that can be accessed via the `parameters()` shortcut) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences: - The column names of the returned data frame are *specific* to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (however, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/insight/reference/standardize_names.html)). - It is able to compute or extract indices not available by default, such as *p-values*, *CIs*, etc. - It includes *feature engineering* capabilities, including parameters [bootstrapping](https://easystats.github.io/parameters/reference/bootstrap_parameters.html). ### Classical Regression Models ``` r model <- lm(Sepal.Width ~ Petal.Length * Species + Petal.Width, data = iris) # regular model parameters model_parameters(model) #> Parameter | Coefficient | SE | 95% CI | t(143) | p #> ------------------------------------------------------------------------------------------- #> (Intercept) | 2.89 | 0.36 | [ 2.18, 3.60] | 8.01 | < .001 #> Petal Length | 0.26 | 0.25 | [-0.22, 0.75] | 1.07 | 0.287 #> Species [versicolor] | -1.66 | 0.53 | [-2.71, -0.62] | -3.14 | 0.002 #> Species [virginica] | -1.92 | 0.59 | [-3.08, -0.76] | -3.28 | 0.001 #> Petal Width | 0.62 | 0.14 | [ 0.34, 0.89] | 4.41 | < .001 #> Petal Length * Species [versicolor] | -0.09 | 0.26 | [-0.61, 0.42] | -0.36 | 0.721 #> Petal Length * Species [virginica] | -0.13 | 0.26 | [-0.64, 0.38] | -0.50 | 0.618 # standardized parameters model_parameters(model, standardize = "refit") #> Parameter | Coefficient | SE | 95% CI | t(143) | p #> ------------------------------------------------------------------------------------------- #> (Intercept) | 3.59 | 1.30 | [ 1.01, 6.17] | 2.75 | 0.007 #> Petal Length | 1.07 | 1.00 | [-0.91, 3.04] | 1.07 | 0.287 #> Species [versicolor] | -4.62 | 1.31 | [-7.21, -2.03] | -3.53 | < .001 #> Species [virginica] | -5.51 | 1.38 | [-8.23, -2.79] | -4.00 | < .001 #> Petal Width | 1.08 | 0.24 | [ 0.59, 1.56] | 4.41 | < .001 #> Petal Length * Species [versicolor] | -0.38 | 1.06 | [-2.48, 1.72] | -0.36 | 0.721 #> Petal Length * Species [virginica] | -0.52 | 1.04 | [-2.58, 1.54] | -0.50 | 0.618 ``` ### Mixed Models ``` r library(lme4) model <- lmer(Sepal.Width ~ Petal.Length + (1|Species), data = iris) # model parameters with CI, df and p-values based on Wald approximation model_parameters(model, effects = "all") #> # Fixed Effects #> #> Parameter | Coefficient | SE | 95% CI | t(146) | p #> ------------------------------------------------------------------ #> (Intercept) | 2.00 | 0.56 | [0.89, 3.11] | 3.56 | < .001 #> Petal Length | 0.28 | 0.06 | [0.16, 0.40] | 4.75 | < .001 #> #> # Random Effects #> #> Parameter | Coefficient #> ------------------------------------- #> SD (Intercept: Species) | 0.89 #> SD (Residual) | 0.32 # model parameters with CI, df and p-values based on Kenward-Roger approximation model_parameters(model, df_method = "kenward") #> # Fixed Effects #> #> Parameter | Coefficient | SE | 95% CI | t | df | p #> ------------------------------------------------------------------------- #> (Intercept) | 2.00 | 0.57 | [0.07, 3.93] | 3.53 | 2.67 | 0.046 #> Petal Length | 0.28 | 0.06 | [0.16, 0.40] | 4.58 | 140.98 | < .001 #> #> # Random Effects #> #> Parameter | Coefficient #> ------------------------------------- #> SD (Intercept: Species) | 0.89 #> SD (Residual) | 0.32 ``` ### Structural Models Besides many types of regression models and packages, it also works for other types of models, such as [**structural models**](https://easystats.github.io/parameters/articles/efa_cfa.html) (EFA, CFA, SEM…). ``` r library(psych) model <- psych::fa(attitude, nfactors = 3) model_parameters(model) #> # Rotated loadings from Factor Analysis (oblimin-rotation) #> #> Variable | MR1 | MR2 | MR3 | Complexity | Uniqueness #> ------------------------------------------------------------ #> rating | 0.90 | -0.07 | -0.05 | 1.02 | 0.23 #> complaints | 0.97 | -0.06 | 0.04 | 1.01 | 0.10 #> privileges | 0.44 | 0.25 | -0.05 | 1.64 | 0.65 #> learning | 0.47 | 0.54 | -0.28 | 2.51 | 0.24 #> raises | 0.55 | 0.43 | 0.25 | 2.35 | 0.23 #> critical | 0.16 | 0.17 | 0.48 | 1.46 | 0.67 #> advance | -0.11 | 0.91 | 0.07 | 1.04 | 0.22 #> #> The 3 latent factors (oblimin rotation) accounted for 66.60% of the total variance of the original data (MR1 = 38.19%, MR2 = 22.69%, MR3 = 5.72%). ``` ## Variable and parameters selection [`select_parameters()`](https://easystats.github.io/parameters/articles/parameters_selection.html) can help you quickly select and retain the most relevant predictors using methods tailored for the model type. ``` r library(poorman) lm(disp ~ ., data = mtcars) %>% select_parameters() %>% model_parameters() #> Parameter | Coefficient | SE | 95% CI | t(26) | p #> ----------------------------------------------------------------------- #> (Intercept) | 141.70 | 125.67 | [-116.62, 400.02] | 1.13 | 0.270 #> cyl | 13.14 | 7.90 | [ -3.10, 29.38] | 1.66 | 0.108 #> hp | 0.63 | 0.20 | [ 0.22, 1.03] | 3.18 | 0.004 #> wt | 80.45 | 12.22 | [ 55.33, 105.57] | 6.58 | < .001 #> qsec | -14.68 | 6.14 | [ -27.31, -2.05] | -2.39 | 0.024 #> carb | -28.75 | 5.60 | [ -40.28, -17.23] | -5.13 | < .001 ``` ## Miscellaneous This packages also contains a lot of [other useful functions](https://easystats.github.io/parameters/reference/index.html): ### Describe a Distribution ``` r data(iris) describe_distribution(iris) #> Variable | Mean | SD | IQR | Range | Skewness | Kurtosis | n | n_Missing #> ---------------------------------------------------------------------------------------- #> Sepal.Length | 5.84 | 0.83 | 1.30 | [4.30, 7.90] | 0.31 | -0.55 | 150 | 0 #> Sepal.Width | 3.06 | 0.44 | 0.52 | [2.00, 4.40] | 0.32 | 0.23 | 150 | 0 #> Petal.Length | 3.76 | 1.77 | 3.52 | [1.00, 6.90] | -0.27 | -1.40 | 150 | 0 #> Petal.Width | 1.20 | 0.76 | 1.50 | [0.10, 2.50] | -0.10 | -1.34 | 150 | 0 ``` ### Citation In order to cite this package, please use the following command: ``` r citation("parameters") Lüdecke D, Ben-Shachar M, Patil I, Makowski D (2020). "Extracting, Computing and Exploring the Parameters of Statistical Models using R." _Journal of Open Source Software_, *5*(53), 2445. doi: 10.21105/joss.02445 (URL: https://doi.org/10.21105/joss.02445). A BibTeX entry for LaTeX users is @Article{, title = {Extracting, Computing and Exploring the Parameters of Statistical Models using {R}.}, volume = {5}, doi = {10.21105/joss.02445}, number = {53}, journal = {Journal of Open Source Software}, author = {Daniel Lüdecke and Mattan S. Ben-Shachar and Indrajeet Patil and Dominique Makowski}, year = {2020}, pages = {2445}, } ``` parameters/man/0000755000175000017500000000000014166656741013342 5ustar nileshnileshparameters/man/p_value_ml1.Rd0000644000175000017500000000663214160324505016024 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_ml1.R, R/dof_ml1.R, R/p_value_ml1.R \name{ci_ml1} \alias{ci_ml1} \alias{dof_ml1} \alias{p_value_ml1} \title{"m-l-1" approximation for SEs, CIs and p-values} \usage{ ci_ml1(model, ci = 0.95, robust = FALSE, ...) dof_ml1(model) p_value_ml1(model, dof = NULL, robust = FALSE, ...) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{...}{Arguments passed down to \code{\link[=standard_error_robust]{standard_error_robust()}} when confidence intervals or p-values based on robust standard errors should be computed.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "m-l-1" heuristic as suggested by Elff et al. (2019). } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics (see \cite{Li and Redden 2015}). The \emph{m-l-1} heuristic is such an approach that uses a t-distribution with fewer degrees of freedom (\code{dof_ml1()}) to calculate p-values (\code{p_value_ml1()}) and confidence intervals (\code{ci(method = "ml1")}). } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{m-l-1} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_ml1()} returns different degrees of freedom for within-cluster and between-cluster effects. } \subsection{Limitations of the "m-l-1" Heuristic}{ Note that the "m-l-1" heuristic is not applicable (or at least less accurate) for complex multilevel designs, e.g. with cross-classified clusters. In such cases, more accurate approaches like the Kenward-Roger approximation (\code{dof_kenward()}) is recommended. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_ml1(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{dof_ml1()} is a small helper-function to calculate approximated degrees of freedom of model parameters, based on the "m-l-1" heuristic. } parameters/man/dot-flatten_list.Rd0000644000175000017500000000057313636467450017110 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.flatten_list} \alias{.flatten_list} \title{Flatten a list} \usage{ .flatten_list(object, name = "name") } \arguments{ \item{object}{A list.} \item{name}{Name of column of keys in the case the output is a dataframe.} } \description{ Flatten a list } \keyword{internal} parameters/man/ci.default.Rd0000644000175000017500000002466414160324505015643 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/2_ci.R, R/methods_glmmTMB.R, R/methods_lme4.R \name{ci.default} \alias{ci.default} \alias{ci.glmmTMB} \alias{ci.merMod} \title{Confidence Intervals (CI)} \usage{ \method{ci}{default}(x, ci = 0.95, dof = NULL, method = NULL, robust = FALSE, ...) \method{ci}{glmmTMB}( x, ci = 0.95, dof = NULL, method = "wald", robust = FALSE, component = "all", verbose = TRUE, ... ) \method{ci}{merMod}( x, ci = 0.95, dof = NULL, method = "wald", robust = FALSE, iterations = 500, ... ) } \arguments{ \item{x}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{dof}{Number of degrees of freedom to be used when calculating confidence intervals. If \code{NULL} (default), the degrees of freedom are retrieved by calling \code{\link[=degrees_of_freedom]{degrees_of_freedom()}} with approximation method defined in \code{method}. If not \code{NULL}, use this argument to override the default degrees of freedom used to compute confidence intervals.} \item{method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{...}{Arguments passed down to \code{\link[=standard_error_robust]{standard_error_robust()}} when confidence intervals or p-values based on robust standard errors should be computed.} \item{component}{Model component for which parameters should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{verbose}{Toggle warnings and messages.} \item{iterations}{The number of bootstrap replicates. Only applies to models of class \code{merMod} when \code{method=boot}.} } \value{ A data frame containing the CI bounds. } \description{ Compute confidence intervals (CI) for frequentist models. } \note{ \code{ci_robust()} resp. \code{ci(robust=TRUE)} rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation="CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ \donttest{ library(parameters) if (require("glmmTMB")) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) ci(model) ci(model, component = "zi") } } } parameters/man/cluster_meta.Rd0000644000175000017500000000432214131075434016303 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_meta.R \name{cluster_meta} \alias{cluster_meta} \title{Metaclustering} \usage{ cluster_meta(list_of_clusters, rownames = NULL, ...) } \arguments{ \item{list_of_clusters}{A list of vectors with the clustering assignments from various methods.} \item{rownames}{An optional vector of row.names for the matrix.} \item{...}{Currently not used.} } \value{ A matrix containing all the pairwise (between each observation) probabilities of being clustered together by the methods. } \description{ One of the core "issue" of statistical clustering is that, in many cases, different methods will give different results. The \strong{metaclustering} approach proposed by \emph{easystats} (that finds echoes in \emph{consensus clustering}; see Monti et al., 2003) consists of treating the unique clustering solutions as a ensemble, from which we can derive a probability matrix. This matrix contains, for each pair of observations, the probability of being in the same cluster. For instance, if the 6th and the 9th row of a dataframe has been assigned to a similar cluster by 5 our of 10 clustering methods, then its probability of being grouped together is 0.5. \cr\cr Metaclustering is based on the hypothesis that, as each clustering algorithm embodies a different prism by which it sees the data, running an infinite amount of algorithms would result in the emergence of the "true" clusters. As the number of algorithms and parameters is finite, the probabilistic perspective is a useful proxy. This method is interesting where there is no obvious reasons to prefer one over another clustering method, as well as to investigate how robust some clusters are under different algorithms. } \examples{ \dontrun{ data <- iris[1:4] rez1 <- cluster_analysis(data, n = 2, method = "kmeans") rez2 <- cluster_analysis(data, n = 3, method = "kmeans") rez3 <- cluster_analysis(data, n = 6, method = "kmeans") list_of_clusters <- list(rez1, rez2, rez3) m <- cluster_meta(list_of_clusters) # Visualize matrix without reordering heatmap(m, Rowv = NA, Colv = NA, scale = "none") # Without reordering # Reordered heatmap heatmap(m, scale = "none") } } parameters/man/dot-factor_to_numeric.Rd0000644000175000017500000000053313741777636020125 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.factor_to_numeric} \alias{.factor_to_numeric} \title{Safe transformation from factor/character to numeric} \usage{ .factor_to_numeric(x, lowest = NULL) } \description{ Safe transformation from factor/character to numeric } \keyword{internal} parameters/man/check_kmo.Rd0000644000175000017500000000376214160324505015544 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_kmo} \alias{check_kmo} \title{Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA) for Factor Analysis} \usage{ check_kmo(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of indices related to KMO. } \description{ Kaiser (1970) introduced a Measure of Sampling Adequacy (MSA), later modified by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can vary from 0 to 1, indicates the degree to which each variable in a set is predicted without error by the other variables. } \details{ A value of 0 indicates that the sum of partial correlations is large relative to the sum correlations, indicating factor analysis is likely to be inappropriate. A KMO value close to 1 indicates that the sum of partial correlations is not large relative to the sum of correlations and so factor analysis should yield distinct and reliable factors. Kaiser (1975) suggested that KMO > .9 were marvelous, in the .80s, meritorious, in the .70s, middling, in the .60s, mediocre, in the .50s, miserable, and less than .5, unacceptable. Hair et al. (2006) suggest accepting a value > 0.5. Values between 0.5 and 0.7 are mediocre, and values between 0.7 and 0.8 are good. This function is strongly inspired by the \code{KMO} function in the \code{psych} package (Revelle, 2016). All credit goes to its author. } \examples{ library(parameters) check_kmo(mtcars) } \references{ \itemize{ \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Kaiser, H. F. (1970). A second generation little jiffy. Psychometrika, 35(4), 401-415. \item Kaiser, H. F., & Rice, J. (1974). Little jiffy, mark IV. Educational and psychological measurement, 34(1), 111-117. \item Kaiser, H. F. (1974). An index of factorial simplicity. Psychometrika, 39(1), 31-36. } } parameters/man/fish.Rd0000644000175000017500000000036613754201615014553 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{fish} \alias{fish} \title{Sample data set} \description{ A sample data set, used in tests and some examples. } \keyword{data} parameters/man/model_parameters.principal.Rd0000644000175000017500000001040714160324505021116 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_FactoMineR.R, R/methods_psych.R \name{model_parameters.PCA} \alias{model_parameters.PCA} \alias{model_parameters.principal} \alias{model_parameters.omega} \title{Parameters from Structural Models (PCA, EFA, ...)} \usage{ \method{model_parameters}{PCA}( model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ... ) \method{model_parameters}{principal}( model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ... ) \method{model_parameters}{omega}(model, verbose = TRUE, ...) } \arguments{ \item{model}{PCA or FA created by the \pkg{psych} or \pkg{FactoMineR} packages (e.g. through \code{psych::principal}, \code{psych::fa} or \code{psych::omega}).} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{labels}{A character vector containing labels to be added to the loadings data. Usually, the question related to the item.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ Format structural models from the \pkg{psych} or \pkg{FactoMineR} packages. } \details{ For the structural models obtained with \pkg{psych}, the following indices are present: \itemize{ \item \strong{Complexity} (\cite{Hoffman's, 1978; Pettersson and Turkheimer, 2010}) represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1. \item \strong{Uniqueness} represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \verb{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that \verb{20\%} or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. \item \strong{MSA} represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). } } \examples{ \donttest{ library(parameters) if (require("psych", quietly = TRUE)) { # Principal Component Analysis (PCA) --------- pca <- psych::principal(attitude) model_parameters(pca) pca <- psych::principal(attitude, nfactors = 3, rotate = "none") model_parameters(pca, sort = TRUE, threshold = 0.2) principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) # Exploratory Factor Analysis (EFA) --------- efa <- psych::fa(attitude, nfactors = 3) model_parameters(efa, threshold = "max", sort = TRUE, labels = as.character(1:ncol(attitude))) # Omega --------- omega <- psych::omega(mtcars, nfactors = 3) params <- model_parameters(omega) params summary(params) } # FactoMineR --------- if (require("FactoMineR", quietly = TRUE)) { model <- FactoMineR::PCA(iris[, 1:4], ncp = 2) model_parameters(model) attributes(model_parameters(model))$scores model <- FactoMineR::FAMD(iris, ncp = 2) model_parameters(model) } } } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Pettersson, E., \& Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420. \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. } } parameters/man/model_parameters.lavaan.Rd0000644000175000017500000001406214160324505020400 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_lavaan.R \name{model_parameters.lavaan} \alias{model_parameters.lavaan} \title{Parameters from CFA/SEM models} \usage{ \method{model_parameters}{lavaan}( model, ci = 0.95, standardize = FALSE, component = c("regression", "correlation", "loading", "defined"), keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) } \arguments{ \item{model}{CFA or SEM created by the \code{lavaan::cfa} or \code{lavaan::sem} functions.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{standardize}{Return standardized parameters (standardized coefficients). Can be \code{TRUE} (or \code{"all"} or \code{"std.all"}) for standardized estimates based on both the variances of observed and latent variables; \code{"latent"} (or \code{"std.lv"}) for standardized estimates based on the variances of the latent variables only; or \code{"no_exogenous"} (or \code{"std.nox"}) for standardized estimates based on both the variances of observed and latent variables, but not the variances of exogenous covariates. See \code{lavaan::standardizedsolution} for details.} \item{component}{What type of links to return. Can be \code{"all"} or some of \code{c("regression", "correlation", "loading", "variance", "mean")}.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Format CFA/SEM objects from the lavaan package (Rosseel, 2012; Merkle and Rosseel 2018). } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) # lavaan ------------------------------------- if (require("lavaan", quietly = TRUE)) { # Confirmatory Factor Analysis (CFA) --------- structure <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " model <- lavaan::cfa(structure, data = HolzingerSwineford1939) model_parameters(model) model_parameters(model, standardize = TRUE) # filter parameters model_parameters( model, parameters = list( To = "^(?!visual)", From = "^(?!(x7|x8))" ) ) # Structural Equation Model (SEM) ------------ structure <- " # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 " model <- lavaan::sem(structure, data = PoliticalDemocracy) model_parameters(model) model_parameters(model, standardize = TRUE) } } \references{ \itemize{ \item Rosseel Y (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \item Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation Models via Parameter Expansion. Journal of Statistical Software, 85(4), 1-30. http://www.jstatsoft.org/v85/i04/ } } parameters/man/simulate_model.Rd0000644000175000017500000000475214131356650016630 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/5_simulate_model.R, R/methods_glmmTMB.R \name{simulate_model} \alias{simulate_model} \alias{simulate_model.glmmTMB} \title{Simulated draws from model coefficients} \usage{ simulate_model(model, iterations = 1000, ...) \method{simulate_model}{glmmTMB}( model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = FALSE, ... ) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{iterations}{The number of draws to simulate/bootstrap.} \item{...}{Arguments passed to or from other methods.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame. } \description{ Simulate draws from a statistical model to return a data frame of estimates. } \details{ \subsection{Technical Details}{ \code{simulate_model()} is a computationally faster alternative to \code{bootstrap_model()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and \pkg{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \examples{ library(parameters) model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) head(simulate_model(model)) \donttest{ if (require("glmmTMB", quietly = TRUE)) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) head(simulate_model(model)) head(simulate_model(model, component = "zero_inflated")) } } } \seealso{ \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=bootstrap_parameters]{bootstrap_parameters()}} } parameters/man/cluster_performance.Rd0000644000175000017500000000267714131014354017663 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_performance.R \name{cluster_performance} \alias{cluster_performance} \alias{cluster_performance.kmeans} \alias{cluster_performance.hclust} \alias{cluster_performance.dbscan} \alias{cluster_performance.parameters_clusters} \title{Performance of clustering models} \usage{ cluster_performance(model, ...) \method{cluster_performance}{kmeans}(model, ...) \method{cluster_performance}{hclust}(model, data, clusters, ...) \method{cluster_performance}{dbscan}(model, data, ...) \method{cluster_performance}{parameters_clusters}(model, ...) } \arguments{ \item{model}{Cluster model.} \item{...}{Arguments passed to or from other methods.} \item{data}{A data.frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} } \description{ Compute performance indices for clustering solutions. } \examples{ # kmeans model <- kmeans(iris[1:4], 3) cluster_performance(model) # hclust data <- iris[1:4] model <- hclust(dist(data)) clusters <- cutree(model, 3) rez <- cluster_performance(model, data, clusters) rez # DBSCAN if (require("dbscan", quietly = TRUE)) { model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) rez <- cluster_performance(model, iris[1:4]) rez } # Retrieve performance from parameters params <- model_parameters(kmeans(iris[1:4], 3)) cluster_performance(params) } parameters/man/n_factors.Rd0000644000175000017500000001377614131244425015605 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{n_factors} \alias{n_factors} \alias{n_components} \title{Number of components/factors to retain in PCA/FA} \usage{ n_factors( x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, n_max = NULL, ... ) n_components( x, type = "PCA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, ... ) } \arguments{ \item{x}{A data frame.} \item{type}{Can be \code{"FA"} or \code{"PCA"}, depending on what you want to do.} \item{rotation}{Only used for VSS (Very Simple Structure criterion, see \code{\link[psych:VSS]{psych::VSS()}}). The rotation to apply. Can be \code{"none"}, \code{"varimax"}, \code{"quartimax"}, \code{"bentlerT"}, \code{"equamax"}, \code{"varimin"}, \code{"geominT"} and \code{"bifactor"} for orthogonal rotations, and \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, \code{"bentlerQ"}, \code{"geominQ"}, \code{"biquartimin"} and \code{"cluster"} for oblique transformations.} \item{algorithm}{Factoring method used by VSS. Can be \code{"pa"} for Principal Axis Factor Analysis, \code{"minres"} for minimum residual (OLS) factoring, \code{"mle"} for Maximum Likelihood FA and \code{"pc"} for Principal Components. \code{"default"} will select \code{"minres"} if \code{type = "FA"} and \code{"pc"} if \code{type = "PCA"}.} \item{package}{Package from which respective methods are used. Can be \code{"all"} or a vector containing \code{"nFactors"}, \code{"psych"}, \code{"PCDimension"}, \code{"fit"} or \code{"EGAnet"}. Note that \code{"fit"} (which actually also relies on the \code{psych} package) and \code{"EGAnet"} can be very slow for bigger datasets. Thus, the default is \code{c("nFactors", "psych")}. You must have the respective packages installed for the methods to be used.} \item{cor}{An optional correlation matrix that can be used (note that the data must still be passed as the first argument). If \code{NULL}, will compute it by running \code{cor()} on the passed data.} \item{safe}{If \code{TRUE}, the function will run all the procedures in try blocks, and will only return those that work and silently skip the ones that may fail.} \item{n_max}{If set to a value (e.g., \code{10}), will drop from the results all methods that suggest a higher number of components. The interpretation becomes 'from all the methods that suggested a number lower than n_max, the results are ...'.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ This function runs many existing procedures for determining how many factors to retain/extract from factor analysis (FA) or dimension reduction (PCA). It returns the number of factors based on the maximum consensus between methods. In case of ties, it will keep the simplest model and select the solution with the fewer factors. } \details{ \code{n_components} is actually an alias for \code{n_factors}, with different defaults for the function arguments. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. \code{n_components()} is a convenient short for \code{n_factors(type = "PCA")}. } \examples{ library(parameters) if (require("nFactors", quietly = TRUE) && require("EGAnet", quietly = TRUE)) { n_factors(mtcars, type = "PCA") result <- n_factors(mtcars[1:5], type = "FA") as.data.frame(result) summary(result) \dontrun{ if (require("PCDimension", quietly = TRUE)) { # Setting package = 'all' will increase the number of methods (but is slow) n_factors(mtcars, type = "PCA", package = "all") n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") } } } } \references{ \itemize{ \item Bartlett, M. S. (1950). Tests of significance in factor analysis. British Journal of statistical psychology, 3(2), 77-85. \item Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in eigenvalues of a covariance matrix with application to data analysis. British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. \item Cattell, R. B. (1966). The scree test for the number of factors. Multivariate behavioral research, 1(2), 245-276. \item Finch, W. H. (2019). Using Fit Statistic Differences to Determine the Optimal Number of Factors to Retain in an Exploratory Factor Analysis. Educational and Psychological Measurement. \item Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the visual scree test for factor analysis: The standard error scree. Educational and Psychological Measurement, 56(3), 443-451. \item Zoski, K., & Jurs, S. (1993). Using multiple regression to determine the number of factors to retain in factor analysis. Multiple Linear Regression Viewpoints, 20(1), 5-9. \item Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of regression-based variations of the visual scree for determining the number of common factors. Educational and psychological measurement, 62(3), 397-419. \item Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance of Exploratory Graph Analysis and traditional techniques to identify the number of latent factors: A simulation and tutorial. \item Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A new approach for estimating the number of dimensions in psychological research. PloS one, 12(6), e0174035. \item Revelle, W., & Rocklin, T. (1979). Very simple structure: An alternative procedure for estimating the optimal number of interpretable factors. Multivariate Behavioral Research, 14(4), 403-414. \item Velicer, W. F. (1976). Determining the number of components from the matrix of partial correlations. Psychometrika, 41(3), 321-327. } } parameters/man/pool_parameters.Rd0000644000175000017500000000712314141743726017021 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pool_parameters.R \name{pool_parameters} \alias{pool_parameters} \title{Pool Model Parameters} \usage{ pool_parameters( x, exponentiate = FALSE, effects = "fixed", component = "conditional", verbose = TRUE, ... ) } \arguments{ \item{x}{A list of \code{parameters_model} objects, as returned by \code{\link[=model_parameters]{model_parameters()}}, or a list of model-objects that is supported by \code{model_parameters()}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}), \code{"marginal"} (\pkg{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Currently not used.} } \value{ A data frame of indices related to the model's parameters. } \description{ This function "pools" (i.e. combines) model parameters in a similar fashion as \code{mice::pool()}. However, this function pools parameters from \code{parameters_model} objects, as returned by \code{\link[=model_parameters]{model_parameters()}}. } \details{ Averaging of parameters follows Rubin's rules (\cite{Rubin, 1987, p. 76}). The pooled degrees of freedom is based on the Barnard-Rubin adjustment for small samples (\cite{Barnard and Rubin, 1999}). } \note{ Models with multiple components, (for instance, models with zero-inflation, where predictors appear in the count and zero-inflated part) may fail in case of identical names for coefficients in the different model components, since the coefficient table is grouped by coefficient names for pooling. In such cases, coefficients of count and zero-inflated model parts would be combined. Therefore, the \code{component} argument defaults to \code{"conditional"} to avoid this. } \examples{ # example for multiple imputed datasets if (require("mice")) { data("nhanes2") imp <- mice(nhanes2, printFlag = FALSE) models <- lapply(1:5, function(i) { lm(bmi ~ age + hyp + chl, data = complete(imp, action = i)) }) pool_parameters(models) # should be identical to: m <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) summary(pool(m)) } } \references{ Barnard, J. and Rubin, D.B. (1999). Small sample degrees of freedom with multiple imputation. Biometrika, 86, 948-955. Rubin, D.B. (1987). Multiple Imputation for Nonresponse in Surveys. New York: John Wiley and Sons. } parameters/man/dot-n_factors_bentler.Rd0000644000175000017500000000052213636467450020103 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bentler} \alias{.n_factors_bentler} \title{Bentler and Yuan's Procedure} \usage{ .n_factors_bentler(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bentler and Yuan's Procedure } \keyword{internal} parameters/man/model_parameters.glht.Rd0000644000175000017500000000463414077615701020110 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_PMCMRplus.R, R/methods_multcomp.R \name{model_parameters.PMCMR} \alias{model_parameters.PMCMR} \alias{model_parameters.glht} \title{Parameters from Hypothesis Testing} \usage{ \method{model_parameters}{PMCMR}(model, ...) \method{model_parameters}{glht}(model, ci = 0.95, exponentiate = FALSE, verbose = TRUE, ...) } \arguments{ \item{model}{Object of class \code{\link[multcomp:glht]{multcomp::glht()}} (\pkg{multcomp}) or of class \code{PMCMR}, \code{trendPMCMR} or \code{osrt} (\pkg{PMCMRplus}).} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from Hypothesis Testing. } \examples{ \donttest{ if (require("multcomp", quietly = TRUE)) { # multiple linear model, swiss data lmod <- lm(Fertility ~ ., data = swiss) mod <- glht( model = lmod, linfct = c( "Agriculture = 0", "Examination = 0", "Education = 0", "Catholic = 0", "Infant.Mortality = 0" ) ) model_parameters(mod) } if (require("PMCMRplus", quietly = TRUE)) { model <- kwAllPairsConoverTest(count ~ spray, data = InsectSprays) model_parameters(model) } } } parameters/man/check_heterogeneity.Rd0000644000175000017500000000207714134317337017635 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_heterogeneity.R \name{check_heterogeneity} \alias{check_heterogeneity} \title{Check model predictor for heterogeneity bias} \usage{ check_heterogeneity(x, select = NULL, group = NULL) } \arguments{ \item{x}{A data frame or a mixed model object.} \item{select}{Character vector (or formula) with names of variables to select that should be checked. If \code{x} is a mixed model object, this argument will be ignored.} \item{group}{Character vector (or formula) with the name of the variable that indicates the group- or cluster-ID. If \code{x} is a model object, this argument will be ignored.} } \description{ \code{check_heterogeneity()} checks if model predictors or variables may cause a heterogeneity bias, i.e. if variables have a within- and/or between-effect. } \note{ This function will be removed in a future update. Please use \code{performance::check_heterogeneity_bias()}. } \seealso{ For further details, see documentation for \code{?datawizard::demean}. } parameters/man/p_value_kenward.Rd0000644000175000017500000000422414077615701016771 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_kenward.R, R/dof_kenward.R, % R/p_value_kenward.R, R/standard_error_kenward.R \name{ci_kenward} \alias{ci_kenward} \alias{dof_kenward} \alias{p_value_kenward} \alias{se_kenward} \title{Kenward-Roger approximation for SEs, CIs and p-values} \usage{ ci_kenward(model, ci = 0.95) dof_kenward(model) p_value_kenward(model, dof = NULL) se_kenward(model) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Kenward-Roger (1997) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Kenward-Roger approximation is also applicable in more complex multilevel designs, e.g. with cross-classified clusters. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4", quietly = TRUE)) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_kenward(model) } } } \references{ Kenward, M. G., & Roger, J. H. (1997). Small sample inference for fixed effects from restricted maximum likelihood. Biometrics, 983-997. } \seealso{ \code{dof_kenward()} and \code{se_kenward()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Kenward-Roger (1997) approach. \cr \cr \code{\link[=dof_satterthwaite]{dof_satterthwaite()}} and \code{\link[=dof_ml1]{dof_ml1()}} approximate degrees of freedom based on Satterthwaite's method or the "m-l-1" rule. } parameters/man/model_parameters.t1way.Rd0000644000175000017500000000420114133472610020176 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_wrs2.R \name{model_parameters.t1way} \alias{model_parameters.t1way} \title{Parameters from robust statistical objects in \code{WRS2}} \usage{ \method{model_parameters}{t1way}(model, keep = NULL, verbose = TRUE, ...) } \arguments{ \item{model}{Object from \code{WRS2} package.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from robust statistical objects in \code{WRS2} } \examples{ if (require("WRS2") && packageVersion("WRS2") >= "1.1.3") { model <- t1way(libido ~ dose, data = viagra) model_parameters(model) } } parameters/man/format_df_adjust.Rd0000644000175000017500000000134714074637002017134 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_df_adjust.R \name{format_df_adjust} \alias{format_df_adjust} \title{Format the name of the degrees-of-freedom adjustment methods} \usage{ format_df_adjust( method, approx_string = "-approximated", dof_string = " degrees of freedom" ) } \arguments{ \item{method}{Name of the method.} \item{approx_string, dof_string}{Suffix added to the name of the method in the returned string.} } \value{ A formatted string. } \description{ Format the name of the degrees-of-freedom adjustment methods. } \examples{ library(parameters) format_df_adjust("kenward") format_df_adjust("kenward", approx_string = "", dof_string = " DoF") } parameters/man/compare_parameters.Rd0000644000175000017500000002216714141743726017503 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare_parameters.R \name{compare_parameters} \alias{compare_parameters} \alias{compare_models} \title{Compare model parameters of multiple models} \usage{ compare_parameters( ..., ci = 0.95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, style = NULL, column_names = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method ) compare_models( ..., ci = 0.95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, style = NULL, column_names = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method ) } \arguments{ \item{...}{One or more regression model objects, or objects returned by \code{model_parameters()}. Regression models may be of different model types. Model objects may be passed comma separated, or as a list. If model objects are passed with names or the list has named elements, these names will be used as column names.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Model component for which parameters should be shown. See documentation for related model class in \code{\link[=model_parameters]{model_parameters()}}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{ci_method}{Method for computing degrees of freedom for p values and confidence intervals (CI). See documentation for related model class in \code{\link[=model_parameters]{model_parameters()}}.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{style}{String, indicating which style of output is requested. Following templates are possible: \itemize{ \item \code{"ci"}: Estimate and confidence intervals, no asterisks for p-values. \item \code{"se"}: Estimate and standard errors, no asterisks for p-values. \item \code{"ci_p"}: Estimate, confidence intervals and asterisks for p-values. \item \code{"se_p"}: Estimate, standard errors and asterisks for p-values. \item \code{"ci_p2"}: Estimate, confidence intervals and numeric p-values, in two columns. \item \code{"se_p2"}: Estimate, standard errors and numeric p-values, in two columns. }} \item{column_names}{Character vector with strings that should be used as column headers. Must be of same length as number of models in \code{...}.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{df_method}{Deprecated. Please use \code{ci_method}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Compute and extract model parameters of multiple regression models. See \code{\link[=model_parameters]{model_parameters()}} for further details. } \details{ This function is in an early stage and does not yet cope with more complex models, and probably does not yet properly render all model components. It should also be noted that when including models with interaction terms, not only do the values of the parameters change, but so does their meaning (from main effects, to simple slopes), thereby making such comparisons hard. Therefore, you should not use this function to compare models with interaction terms with models without interaction terms. } \examples{ data(iris) lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) compare_parameters(lm1, lm2) data(mtcars) m1 <- lm(mpg ~ wt, data = mtcars) m2 <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") compare_parameters(m1, m2) \dontrun{ # exponentiate coefficients, but not for lm compare_parameters(m1, m2, exponentiate = "nongaussian") # change column names compare_parameters("linear model" = m1, "logistic reg." = m2) compare_parameters(m1, m2, column_names = c("linear model", "logistic reg.")) # or as list compare_parameters(list(m1, m2)) compare_parameters(list("linear model" = m1, "logistic reg." = m2)) } } parameters/man/dot-n_factors_scree.Rd0000644000175000017500000000051313636467450017551 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_scree} \alias{.n_factors_scree} \title{Non Graphical Cattell's Scree Test} \usage{ .n_factors_scree(eigen_values = NULL, model = "factors") } \description{ Non Graphical Cattell's Scree Test } \keyword{internal} parameters/man/simulate_parameters.Rd0000644000175000017500000000773514100203332017657 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_glmmTMB.R, R/simulate_parameters.R \name{simulate_parameters.glmmTMB} \alias{simulate_parameters.glmmTMB} \alias{simulate_parameters} \alias{simulate_parameters.default} \title{Simulate Model Parameters} \usage{ \method{simulate_parameters}{glmmTMB}( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) simulate_parameters(model, ...) \method{simulate_parameters}{default}( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi()}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame with simulated parameters. } \description{ Compute simulated draws of parameters and their related indices such as Confidence Intervals (CI) and p-values. Simulating parameter draws can be seen as a (computationally faster) alternative to bootstrapping. } \details{ \subsection{Technical Details}{ \code{simulate_parameters()} is a computationally faster alternative to \code{bootstrap_parameters()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and \pkg{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ library(parameters) model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) simulate_parameters(model) \dontrun{ if (require("glmmTMB")) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) simulate_parameters(model, centrality = "mean") simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") } } } \references{ Gelman A, Hill J. Data analysis using regression and multilevel/hierarchical models. Cambridge; New York: Cambridge University Press 2007: 140-143 } \seealso{ \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}, \code{\link[=simulate_model]{simulate_model()}} } parameters/man/reexports.Rd0000644000175000017500000000322314104230351015635 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/display.R, R/equivalence_test.R, % R/methods_bayestestR.R, R/n_parameters.R, R/print_md.R, R/reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{display} \alias{equivalence_test} \alias{ci} \alias{n_parameters} \alias{print_md} \alias{standardize_names} \alias{supported_models} \alias{print_html} \alias{describe_distribution} \alias{demean} \alias{rescale_weights} \alias{data_to_numeric} \alias{convert_data_to_numeric} \alias{skewness} \alias{kurtosis} \alias{smoothness} \alias{center} \alias{visualisation_recipe} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{bayestestR}{\code{\link[bayestestR]{ci}}, \code{\link[bayestestR]{equivalence_test}}} \item{datawizard}{\code{\link[datawizard]{center}}, \code{\link[datawizard]{convert_data_to_numeric}}, \code{\link[datawizard:convert_data_to_numeric]{data_to_numeric}}, \code{\link[datawizard]{demean}}, \code{\link[datawizard]{describe_distribution}}, \code{\link[datawizard:skewness]{kurtosis}}, \code{\link[datawizard]{rescale_weights}}, \code{\link[datawizard]{skewness}}, \code{\link[datawizard]{smoothness}}, \code{\link[datawizard]{visualisation_recipe}}} \item{insight}{\code{\link[insight]{display}}, \code{\link[insight]{n_parameters}}, \code{\link[insight:display]{print_html}}, \code{\link[insight:display]{print_md}}, \code{\link[insight]{standardize_names}}, \code{\link[insight:is_model_supported]{supported_models}}} }} parameters/man/display.parameters_model.Rd0000644000175000017500000001737014142761501020611 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/display.R, R/format.R, R/print_html.R, % R/print_md.R \name{display.parameters_model} \alias{display.parameters_model} \alias{display.parameters_sem} \alias{display.parameters_efa_summary} \alias{display.parameters_efa} \alias{display.equivalence_test_lm} \alias{format.parameters_model} \alias{print_html.parameters_model} \alias{print_md.parameters_model} \title{Print tables in different output formats} \usage{ \method{display}{parameters_model}( object, format = "markdown", pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, verbose = TRUE, ... ) \method{display}{parameters_sem}( object, format = "markdown", digits = 2, ci_digits = 2, p_digits = 3, ci_brackets = c("(", ")"), ... ) \method{display}{parameters_efa_summary}(object, format = "markdown", digits = 3, ...) \method{display}{parameters_efa}( object, format = "markdown", digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ... ) \method{display}{equivalence_test_lm}(object, format = "markdown", digits = 2, ...) \method{format}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, digits = 2, ci_digits = 2, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, ... ) \method{print_html}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, verbose = TRUE, ... ) \method{print_md}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, verbose = TRUE, ... ) } \arguments{ \item{object}{An object returned by \code{\link[=model_parameters]{model_parameters()}}, \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=equivalence_test.lm]{equivalence_test()}} or \code{\link[=principal_components]{principal_components()}}.} \item{format}{String, indicating the output format. Can be \code{"markdown"} or \code{"html"}.} \item{pretty_names}{Return "pretty" (i.e. more human readable) parameter names.} \item{split_components}{Logical, if \code{TRUE} (default), For models with multiple components (zero-inflation, smooth terms, ...), each component is printed in a separate table. If \code{FALSE}, model parameters are printed in a single table and a \code{Component} column is added to the output.} \item{select}{Character vector (or numeric index) of column names that should be printed. If \code{NULL} (default), all columns are printed. The shortcut \code{select = "minimal"} prints coefficient, confidence intervals and p-values, while \code{select = "short"} prints coefficient, standard errors and p-values.} \item{caption}{Table caption as string. If \code{NULL}, no table caption is printed.} \item{subtitle}{Table title (same as caption) and subtitle, as strings. If \code{NULL}, no title or subtitle is printed, unless it is stored as attributes (\code{table_title}, or its alias \code{table_caption}, and \code{table_subtitle}). If \code{x} is a list of data frames, \code{caption} may be a list of table captions, one for each table.} \item{footer}{Table footer, as string. For markdown-formatted tables, table footers, due to the limitation in markdown rendering, are actually just a new text line under the table. If \code{x} is a list of data frames, \code{footer} may be a list of table captions, one for each table.} \item{align}{Only applies to HTML tables. May be one of \code{"left"}, \code{"right"} or \code{"center"}.} \item{digits, ci_digits, p_digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{footer_digits}{Number of decimal places for values in the footer summary.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{show_sigma}{Logical, if \code{TRUE}, adds information about the residual standard deviation.} \item{show_formula}{Logical, if \code{TRUE}, adds the model formula to the output.} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{verbose}{Toggle messages and warnings.} \item{...}{Arguments passed to or from other methods.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{labels}{A character vector containing labels to be added to the loadings data. Usually, the question related to the item.} \item{x}{An object returned by \code{\link[=model_parameters]{model_parameters()}}.} \item{ci_width}{Minimum width of the returned string for confidence intervals. If not \code{NULL} and width is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{groups}{Named list, can be used to group parameters in the printed output. List elements may either be character vectors that match the name of those parameters that belong to one group, or list elements can be row numbers of those parameter rows that should belong to one group. The names of the list elements will be used as group names, which will be inserted as "header row". A possible use case might be to emphasize focal predictors and control variables, see 'Examples'. Parameters will be re-ordered according to the order used in \code{groups}, while all non-matching parameters will be added to the end.} } \value{ If \code{format = "markdown"}, the return value will be a character vector in markdown-table format. If \code{format = "html"}, an object of class \code{gt_tbl}. } \description{ Prints tables (i.e. data frame) in different output formats. \code{print_md()} is a alias for \code{display(format = "markdown")}. } \details{ \code{display()} is useful when the table-output from functions, which is usually printed as formatted text-table to console, should be formatted for pretty table-rendering in markdown documents, or if knitted from rmarkdown to PDF or Word files. See \href{https://easystats.github.io/parameters/articles/model_parameters_formatting.html}{vignette} for examples. } \examples{ model <- lm(mpg ~ wt + cyl, data = mtcars) mp <- model_parameters(model) display(mp) } parameters/man/parameters_type.Rd0000644000175000017500000000441114075351704017023 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameters_type.R \name{parameters_type} \alias{parameters_type} \title{Type of model parameters} \usage{ parameters_type(model, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ In a regression model, the parameters do not all have the meaning. For instance, the intercept has to be interpreted as theoretical outcome value under some conditions (when predictors are set to 0), whereas other coefficients are to be interpreted as amounts of change. Others, such as interactions, represent changes in another of the parameter. The \code{parameters_type} function attempts to retrieve information and meaning of parameters. It outputs a dataframe of information for each parameters, such as the \code{Type} (whether the parameter corresponds to a factor or a numeric predictor, or whether it is a (regular) interaction or a nested one), the \code{Link} (whether the parameter can be interpreted as a mean value, the slope of an association or a difference between two levels) and, in the case of interactions, which other parameters is impacted by which parameter. } \examples{ library(parameters) model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) parameters_type(model) # Interactions model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) parameters_type(model) # Complex interactions data <- iris data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) parameters_type(model) model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) parameters_type(model) } parameters/man/standard_error_robust.Rd0000644000175000017500000001005314140567204020222 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/robust_estimation.R \name{standard_error_robust} \alias{standard_error_robust} \alias{p_value_robust} \alias{ci_robust} \title{Robust estimation} \usage{ standard_error_robust( model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", ... ) p_value_robust( model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", method = NULL, ... ) ci_robust( model, ci = 0.95, method = NULL, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", ... ) } \arguments{ \item{model}{A model.} \item{vcov_estimation}{String, indicating the suffix of the \verb{vcov*()}-function from the \pkg{sandwich} or \pkg{clubSandwich} package, e.g. \code{vcov_estimation = "CL"} (which calls \code{\link[sandwich:vcovCL]{sandwich::vcovCL()}} to compute clustered covariance matrix estimators), or \code{vcov_estimation = "HC"} (which calls \code{\link[sandwich:vcovHC]{sandwich::vcovHC()}} to compute heteroskedasticity-consistent covariance matrix estimators).} \item{vcov_type}{Character vector, specifying the estimation type for the robust covariance matrix estimation (see \code{\link[sandwich:vcovHC]{sandwich::vcovHC()}} or \code{clubSandwich::vcovCR()} for details). Passed down as \code{type} argument to the related \verb{vcov*()}-function from the \pkg{sandwich} or \pkg{clubSandwich} package and hence will be ignored if there is no \code{type} argument (e.g., \code{sandwich::vcovHAC()} will ignore that argument).} \item{vcov_args}{List of named vectors, used as additional arguments that are passed down to the \pkg{sandwich}-function specified in \code{vcov_estimation}.} \item{component}{Should all parameters or parameters for specific model components be returned?} \item{...}{Arguments passed to or from other methods. For \code{standard_error()}, if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} and \code{vcov_args} can be passed down to \code{standard_error_robust()}.} \item{method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} } \value{ A data frame. } \description{ \code{standard_error_robust()}, \code{ci_robust()} and \code{p_value_robust()} attempt to return indices based on robust estimation of the variance-covariance matrix, using the packages \pkg{sandwich} and \pkg{clubSandwich}. } \note{ These functions rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \examples{ if (require("sandwich", quietly = TRUE)) { # robust standard errors, calling sandwich::vcovHC(type="HC3") by default model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) standard_error_robust(model) } \dontrun{ if (require("clubSandwich", quietly = TRUE)) { # cluster-robust standard errors, using clubSandwich iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) standard_error_robust( model, vcov_type = "CR2", vcov_args = list(cluster = iris$cluster) ) } } } \seealso{ Working examples cam be found \href{https://easystats.github.io/parameters/articles/model_parameters_robust.html}{in this vignette}. } parameters/man/model_parameters.cgam.Rd0000644000175000017500000002261614135322113020044 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cgam.R, R/methods_gam.R, % R/methods_quantreg.R \name{model_parameters.cgam} \alias{model_parameters.cgam} \alias{model_parameters.gam} \alias{model_parameters.rqss} \title{Parameters from Generalized Additive (Mixed) Models} \usage{ \method{model_parameters}{cgam}( model, ci = 0.95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) \method{model_parameters}{gam}( model, ci = 0.95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) \method{model_parameters}{rqss}( model, ci = 0.95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) } \arguments{ \item{model}{A gam/gamm model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details and \href{https://easystats.github.io/parameters/articles/model_parameters_robust.html}{this vignette} for working examples.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of generalized additive models (GAM(M)s). } \details{ The reporting of degrees of freedom \emph{for the spline terms} slightly differs from the output of \code{summary(model)}, for example in the case of \code{mgcv::gam()}. The \emph{estimated degrees of freedom}, column \code{edf} in the summary-output, is named \code{df} in the returned data frame, while the column \code{df_error} in the returned data frame refers to the residual degrees of freedom that are returned by \code{df.residual()}. Hence, the values in the the column \code{df_error} differ from the column \code{Ref.df} from the summary, which is intentional, as these reference degrees of freedom \dQuote{is not very interpretable} (\href{https://stat.ethz.ch/pipermail/r-help/2019-March/462135.html}{web}). } \examples{ library(parameters) if (require("mgcv")) { dat <- gamSim(1, n = 400, dist = "normal", scale = 2) model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/random_parameters.Rd0000644000175000017500000000526014125102513017311 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/random_parameters.R \name{random_parameters} \alias{random_parameters} \title{Summary information from random effects} \usage{ random_parameters(model, component = "conditional") } \arguments{ \item{model}{A mixed effects model (including \code{stanreg} models).} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"} (default), \code{"zi"} or \code{"zero_inflated"}. May be abbreviated.} } \value{ A data frame with random effects statistics for the variance components, including number of levels per random effect group, as well as complete observations in the model. } \description{ This function extracts the different variance components of a mixed model and returns the result as a data frame. } \details{ The variance components are obtained from \code{\link[insight:get_variance]{insight::get_variance()}} and are denoted as following: \subsection{Within-group (or residual) variance}{ The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, is the sum of the distribution-specific variance and the variance due to additive dispersion. It indicates the \emph{within-group variance}. } \subsection{Between-group random intercept variance}{ The random intercept variance, or \emph{between-group} variance for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), is obtained from \code{VarCorr()}. It indicates how much groups or subjects differ from each other. } \subsection{Between-group random slope variance}{ The random slope variance, or \emph{between-group} variance for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random slopes. It indicates how much groups or subjects differ from each other according to their slopes. } \subsection{Random slope-intercept correlation}{ The random slope-intercept correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random intercepts and slopes. } \strong{Note:} For the within-group and between-group variance, variance and standard deviations (which are simply the square root of the variance) are shown. } \examples{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) random_parameters(model) } } parameters/man/reshape_loadings.Rd0000644000175000017500000000234614012467214017126 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_loadings.R \name{reshape_loadings} \alias{reshape_loadings} \alias{reshape_loadings.parameters_efa} \alias{reshape_loadings.data.frame} \title{Reshape loadings between wide/long formats} \usage{ reshape_loadings(x, ...) \method{reshape_loadings}{parameters_efa}(x, threshold = NULL, ...) \method{reshape_loadings}{data.frame}(x, threshold = NULL, loadings_columns = NULL, ...) } \arguments{ \item{x}{A data frame or a statistical model.} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{loadings_columns}{Vector indicating the columns corresponding to loadings.} } \description{ Reshape loadings between wide/long formats. } \examples{ library(parameters) library(psych) pca <- model_parameters(psych::fa(attitude, nfactors = 3)) loadings <- reshape_loadings(pca) loadings reshape_loadings(loadings) } parameters/man/model_parameters.stanreg.Rd0000644000175000017500000004460714160324505020611 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_base.R, R/methods_brms.R, % R/methods_rstanarm.R \name{model_parameters.data.frame} \alias{model_parameters.data.frame} \alias{model_parameters.brmsfit} \alias{model_parameters.stanreg} \title{Parameters from Bayesian Models} \usage{ \method{model_parameters}{data.frame}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) \method{model_parameters}{brmsfit}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "all", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) \method{model_parameters}{stanreg}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, effects = "fixed", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) } \arguments{ \item{model}{Bayesian model (including SEM from \pkg{blavaan}. May also be a data frame with posterior samples.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Credible Interval (CI) level. Default to \code{0.95} (\verb{95\%}). See \code{\link[bayestestR:ci]{bayestestR::ci()}} for further details.} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle messages and warnings.} \item{...}{Currently not used.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, or other auxiliary parameters be returned? Applies to models with zero-inflated and/or dispersion formula, or if parameters such as \code{sigma} should be included. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms}, are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, or \code{beta} (and other auxiliary parameters) are returned.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{group_level}{Logical, for multilevel models (i.e. models with random effects) and when \code{effects = "all"} or \code{effects = "random"}, include the parameters for each group level from random effects. If \code{group_level = FALSE} (the default), only information on SD and COR are shown.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from Bayesian models. } \note{ When \code{standardize = "refit"}, columns \code{diagnostic}, \code{bf_prior} and \code{priors} refer to the \emph{original} \code{model}. If \code{model} is a data frame, arguments \code{diagnostic}, \code{bf_prior} and \code{priors} are ignored. \cr \cr There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ \dontrun{ library(parameters) if (require("rstanarm")) { model <- stan_glm( Sepal.Length ~ Petal.Length * Species, data = iris, iter = 500, refresh = 0 ) model_parameters(model) } } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-recode_to_zero.Rd0000644000175000017500000000052313636467450017415 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.recode_to_zero} \alias{.recode_to_zero} \title{Recode a variable so its lowest value is beginning with zero} \usage{ .recode_to_zero(x) } \description{ Recode a variable so its lowest value is beginning with zero } \keyword{internal} parameters/man/model_parameters.befa.Rd0000644000175000017500000000445014104713406020033 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_BayesFM.R \name{model_parameters.befa} \alias{model_parameters.befa} \title{Parameters from Bayesian Exploratory Factor Analysis} \usage{ \method{model_parameters}{befa}( model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Bayesian EFA created by the \code{BayesFM::befa}.} \item{sort}{Sort the loadings.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi()}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ Format Bayesian Exploratory Factor Analysis objects from the BayesFM package. } \examples{ library(parameters) \donttest{ if (require("BayesFM")) { efa <- BayesFM::befa(mtcars, iter = 1000) results <- model_parameters(efa, sort = TRUE) results efa_to_cfa(results) } } } parameters/man/format_parameters.Rd0000644000175000017500000000437214077615701017342 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_parameters.R \name{format_parameters} \alias{format_parameters} \alias{format_parameters.default} \title{Parameter names formatting} \usage{ format_parameters(model, ...) \method{format_parameters}{default}(model, brackets = c("[", "]"), ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Currently not used.} \item{brackets}{A character vector of length two, indicating the opening and closing brackets.} } \value{ A (names) character vector with formatted parameter names. The value names refer to the original names of the coefficients. } \description{ This functions formats the names of model parameters (coefficients) to make them more human-readable. } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b} \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans} or \CRANpkg{ggeffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \examples{ model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) format_parameters(model) } parameters/man/model_parameters.kmeans.Rd0000644000175000017500000001014314135275207020416 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_dbscan.R, R/methods_hclust.R, % R/methods_kmeans.R, R/methods_mclust.R, R/methods_pam.R \name{model_parameters.dbscan} \alias{model_parameters.dbscan} \alias{model_parameters.hclust} \alias{model_parameters.pvclust} \alias{model_parameters.kmeans} \alias{model_parameters.hkmeans} \alias{model_parameters.Mclust} \alias{model_parameters.pam} \title{Parameters from Cluster Models (k-means, ...)} \usage{ \method{model_parameters}{dbscan}(model, data = NULL, clusters = NULL, ...) \method{model_parameters}{hclust}(model, data = NULL, clusters = NULL, ...) \method{model_parameters}{pvclust}(model, data = NULL, clusters = NULL, ci = 0.95, ...) \method{model_parameters}{kmeans}(model, ...) \method{model_parameters}{hkmeans}(model, ...) \method{model_parameters}{Mclust}(model, data = NULL, clusters = NULL, ...) \method{model_parameters}{pam}(model, data = NULL, clusters = NULL, ...) } \arguments{ \item{model}{Cluster model.} \item{data}{A data.frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} \item{...}{Arguments passed to or from other methods.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} } \description{ Format cluster models obtained for example by \code{\link[=kmeans]{kmeans()}}. } \examples{ \donttest{ # DBSCAN --------------------------- if (require("dbscan", quietly = TRUE)) { model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) rez <- model_parameters(model, iris[1:4]) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between # HDBSCAN model <- dbscan::hdbscan(iris[1:4], minPts = 10) model_parameters(model, iris[1:4]) } } # # Hierarchical clustering (hclust) --------------------------- data <- iris[1:4] model <- hclust(dist(data)) clusters <- cutree(model, 3) rez <- model_parameters(model, data, clusters) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Total_Sum_Squares attributes(rez)$Between_Sum_Squares \donttest{ # # pvclust (finds "significant" clusters) --------------------------- if (require("pvclust", quietly = TRUE)) { data <- iris[1:4] # NOTE: pvclust works on transposed data model <- pvclust::pvclust(datawizard::data_transpose(data), method.dist = "euclidean", nboot = 50, quiet = TRUE ) rez <- model_parameters(model, data, ci = 0.90) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between } } \dontrun{ # # K-means ------------------------------- model <- kmeans(iris[1:4], centers = 3) rez <- model_parameters(model) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between } \dontrun{ # # Hierarchical K-means (factoextra::hkclust) ---------------------- if (require("factoextra", quietly = TRUE)) { data <- iris[1:4] model <- factoextra::hkmeans(data, k = 3) rez <- model_parameters(model) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between } } if (require("mclust", quietly = TRUE)) { model <- mclust::Mclust(iris[1:4], verbose = FALSE) model_parameters(model) } \dontrun{ # # K-Medoids (PAM and HPAM) ============== if (require("cluster", quietly = TRUE)) { model <- cluster::pam(iris[1:4], k = 3) model_parameters(model) } if (require("fpc", quietly = TRUE)) { model <- fpc::pamk(iris[1:4], criterion = "ch") model_parameters(model) } } } parameters/man/dot-n_factors_mreg.Rd0000644000175000017500000000047613636467450017412 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_mreg} \alias{.n_factors_mreg} \title{Multiple Regression Procedure} \usage{ .n_factors_mreg(eigen_values = NULL, model = "factors") } \description{ Multiple Regression Procedure } \keyword{internal} parameters/man/bootstrap_parameters.Rd0000644000175000017500000000762714131751777020102 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_parameters.R \name{bootstrap_parameters} \alias{bootstrap_parameters} \title{Parameters bootstrapping} \usage{ bootstrap_parameters( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi()}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices to compute. Character (vector) with one or more of these options: \code{"p-value"} (or \code{"p"}), \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{bayestestR::rope()}} or \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}) and its results included in the summary output.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame summarizing the bootstrapped parameters. } \description{ Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. } \details{ This function first calls \code{\link[=bootstrap_model]{bootstrap_model()}} to generate bootstrapped coefficients. The resulting replicated for each coefficient are treated as "distribution", and is passed to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}} to calculate the related indices defined in the \code{"test"} argument. \cr\cr Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \section{Using with \strong{emmeans}}{ The output can be passed directly to the various functions from the \strong{emmeans} package, to obtain bootstrapped estimates, contrasts, simple slopes, etc. and their confidence intervals. These can then be passed to \code{model_parameter()} to obtain standard errors, p-values, etc. (see example). \cr\cr Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \examples{ \dontrun{ if (require("boot", quietly = TRUE)) { set.seed(2) model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) b <- bootstrap_parameters(model) print(b) if (require("emmeans")) { est <- emmeans(b, trt.vs.ctrl ~ Species) print(model_parameters(est)) } } } } \references{ Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1). Cambridge university press. } \seealso{ \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=simulate_model]{simulate_model()}} } parameters/man/equivalence_test.lm.Rd0000644000175000017500000001766214141743726017605 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test.lm} \alias{equivalence_test.lm} \alias{equivalence_test.merMod} \title{Equivalence test} \usage{ \method{equivalence_test}{lm}( x, range = "default", ci = 0.95, rule = "classic", verbose = TRUE, ... ) \method{equivalence_test}{merMod}( x, range = "default", ci = 0.95, rule = "classic", effects = c("fixed", "random"), verbose = TRUE, ... ) } \arguments{ \item{x}{A statistical model.} \item{range}{The range of practical equivalence of an effect. May be \code{"default"}, to automatically define this range based on properties of the model's data.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{rule}{Character, indicating the rules when testing for practical equivalence. Can be \code{"bayes"}, \code{"classic"} or \code{"cet"}. See 'Details'.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} } \value{ A data frame. } \description{ Compute the (conditional) equivalence test for frequentist models. } \details{ In classical null hypothesis significance testing (NHST) within a frequentist framework, it is not possible to accept the null hypothesis, H0 - unlike in Bayesian statistics, where such probability statements are possible. \dQuote{\link{...} one can only reject the null hypothesis if the test statistics falls into the critical region(s), or fail to reject this hypothesis. In the latter case, all we can say is that no significant effect was observed, but one cannot conclude that the null hypothesis is true.} (\cite{Pernet 2017}). One way to address this issues without Bayesian methods is \emph{Equivalence Testing}, as implemented in \code{equivalence_test()}. While you either can reject the null hypothesis or claim an inconclusive result in NHST, the equivalence test adds a third category, \emph{"accept"}. Roughly speaking, the idea behind equivalence testing in a frequentist framework is to check whether an estimate and its uncertainty (i.e. confidence interval) falls within a region of "practical equivalence". Depending on the rule for this test (see below), statistical significance does not necessarily indicate whether the null hypothesis can be rejected or not, i.e. the classical interpretation of the p-value may differ from the results returned from the equivalence test. \subsection{Calculation of equivalence testing}{ \describe{ \item{"bayes" - Bayesian rule (Kruschke 2018)}{ This rule follows the \dQuote{HDI+ROPE decision rule} \cite{(Kruschke, 2014, 2018)} used for the \code{\link[bayestestR:equivalence_test]{Bayesian counterpart()}}. This means, if the confidence intervals are completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the CI, the null hypothesis is accepted. Else, it's undecided whether to accept or reject the null hypothesis. Desirable results are low proportions inside the ROPE (the closer to zero the better). } \item{"classic" - The TOST rule (Lakens 2017)}{ This rule follows the \dQuote{TOST rule}, i.e. a two one-sided test procedure (\cite{Lakens 2017}). Following this rule, practical equivalence of an effect (i.e. H0) is \emph{rejected}, when the coefficient is statistically significant \emph{and} the narrow confidence intervals (i.e. \code{1-2*alpha}) \emph{include} or \emph{exceed} the ROPE boundaries. Practical equivalence is assumed (i.e. H0 accepted) when the narrow confidence intervals are completely inside the ROPE, no matter if the effect is statistically significant or not. Else, the decision whether to accept or reject H0 is undecided. } \item{"cet" - Conditional Equivalence Testing (Campbell/Gustafson 2018)}{ The Conditional Equivalence Testing as described by \cite{Campbell and Gustafson 2018}. According to this rule, practical equivalence is rejected when the coefficient is statistically significant. When the effect is \emph{not} significant and the narrow confidence intervals are completely inside the ROPE, we accept H0, else it is undecided. } } } \subsection{Levels of Confidence Intervals used for Equivalence Testing}{ For \code{rule = "classic"}, "narrow" confidence intervals are used for equivalence testing. "Narrow" means, the the intervals is not 1 - alpha, but 1 - 2 * alpha. Thus, if \code{ci = .95}, alpha is assumed to be 0.05 and internally a ci-level of 0.90 is used. \code{rule = "cet"} uses both regular and narrow confidence intervals, while \code{rule = "bayes"} only uses the regular intervals. } \subsection{p-Values}{ The equivalence p-value is the area of the (cumulative) confidence distribution that is outside of the region of equivalence. It can be interpreted as p-value for \emph{rejecting} the alternative hypothesis and \emph{accepting} the null hypothesis. } \subsection{Second Generation p-Value (SGPV)}{ Second generation p-values (SGPV) were proposed as a statistic that represents \dQuote{the proportion of data-supported hypotheses that are also null hypotheses} \cite{(Blume et al. 2018)}. This statistic is actually computed in the same way as the percentage inside the ROPE as returned by \code{equivalence_test()} (see \cite{Lakens and Delacre 2020} for details on computation of the SGPV). Thus, the \code{"inside ROPE"} column reflects the SGPV. } \subsection{ROPE range}{ Some attention is required for finding suitable values for the ROPE limits (argument \code{range}). See 'Details' in \code{\link[bayestestR:rope_range]{bayestestR::rope_range()}} for further information. } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ data(qol_cancer) model <- lm(QoL ~ time + age + education, data = qol_cancer) # default rule equivalence_test(model) # conditional equivalence test equivalence_test(model, rule = "cet") # plot method if (require("see", quietly = TRUE)) { result <- equivalence_test(model) plot(result) } } \references{ \itemize{ \item Blume, J. D., D'Agostino McGowan, L., Dupont, W. D., & Greevy, R. A. (2018). Second-generation p-values: Improved rigor, reproducibility, & transparency in statistical analyses. PLOS ONE, 13(3), e0188299. https://doi.org/10.1371/journal.pone.0188299 \item Campbell, H., & Gustafson, P. (2018). Conditional equivalence testing: An alternative remedy for publication bias. PLOS ONE, 13(4), e0195145. doi: 10.1371/journal.pone.0195145 \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. doi: 10.1177/2515245918771304 \item Lakens, D. (2017). Equivalence Tests: A Practical Primer for t Tests, Correlations, and Meta-Analyses. Social Psychological and Personality Science, 8(4), 355–362. doi: 10.1177/1948550617697177 \item Lakens, D., & Delacre, M. (2020). Equivalence Testing and the Second Generation P-Value. Meta-Psychology, 4. https://doi.org/10.15626/MP.2018.933 \item Pernet, C. (2017). Null hypothesis significance testing: A guide to commonly misunderstood concepts and recommendations for good practice. F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5 } } \seealso{ For more details, see \code{\link[bayestestR:equivalence_test]{bayestestR::equivalence_test()}}. Further readings can be found in the references. } parameters/man/select_parameters.Rd0000644000175000017500000000652414077615701017332 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/select_parameters.R, % R/select_parameters.stanreg.R \name{select_parameters} \alias{select_parameters} \alias{select_parameters.lm} \alias{select_parameters.merMod} \alias{select_parameters.stanreg} \title{Automated selection of model parameters} \usage{ select_parameters(model, ...) \method{select_parameters}{lm}(model, direction = "both", steps = 1000, k = 2, ...) \method{select_parameters}{merMod}(model, direction = "backward", steps = 1000, ...) \method{select_parameters}{stanreg}(model, method = NULL, cross_validation = FALSE, ...) } \arguments{ \item{model}{A statistical model (of class \code{lm}, \code{glm}, \code{merMod}, \code{stanreg} or \code{brmsfit}).} \item{...}{Arguments passed to or from other methods.} \item{direction}{ the mode of stepwise search, can be one of \code{"both"}, \code{"backward"}, or \code{"forward"}, with a default of \code{"both"}. If the \code{scope} argument is missing the default for \code{direction} is \code{"backward"}. Values can be abbreviated. } \item{steps}{ the maximum number of steps to be considered. The default is 1000 (essentially as many as required). It is typically used to stop the process early. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{method}{The method used in the variable selection. Can be \code{NULL} (default), \code{"forward"} or \code{"L1"}. See \code{projpred::varsel}.} \item{cross_validation}{Select with cross-validation.} } \value{ The model refitted with optimal number of parameters. } \description{ This function performs an automated selection of the 'best' parameters, updating and returning the "best" model. } \details{ \subsection{Classical lm and glm}{ For frequentist GLMs, \code{select_parameters()} performs an AIC-based stepwise selection. } \subsection{Mixed models}{ For mixed-effects models of class \code{merMod}, stepwise selection is based on \code{\link[cAIC4:stepcAIC]{cAIC4::stepcAIC()}}. This step function only searches the "best" model based on the random-effects structure, i.e. \code{select_parameters()} adds or excludes random-effects until the cAIC can't be improved further. } \subsection{Bayesian models}{ For Bayesian models, it uses the \pkg{projpred} package. } } \examples{ model <- lm(mpg ~ ., data = mtcars) select_parameters(model) model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) select_parameters(model) \donttest{ # lme4 ------------------------------------------- if (require("lme4")) { model <- lmer( Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris ) select_parameters(model) } } \dontrun{ # rstanarm ------------------------------------------- if (require("rstanarm") && require("projpred")) { model <- stan_glm( mpg ~ ., data = mtcars, iter = 500, refresh = 0, verbose = FALSE ) select_parameters(model, cross_validation = TRUE) model <- stan_glm( mpg ~ cyl * disp * hp, data = mtcars, iter = 500, refresh = 0, verbose = FALSE ) select_parameters(model, cross_validation = FALSE) } } } parameters/man/cluster_centers.Rd0000644000175000017500000000201514111120042016775 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_centers.R \name{cluster_centers} \alias{cluster_centers} \title{Find the cluster centers in your data} \usage{ cluster_centers(data, clusters, fun = mean, ...) } \arguments{ \item{data}{A data.frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} \item{fun}{What function to use, \code{mean} by default.} \item{...}{Other arguments to be passed to or from other functions.} } \value{ A dataframe containing the cluster centers. Attributes include performance statistics and distance between each observation and its respective cluster centre. } \description{ For each cluster, computes the mean (or other indices) of the variables. Can be used to retrieve the centers of clusters. Also returns the within Sum of Squares. } \examples{ k <- kmeans(iris[1:4], 3) cluster_centers(iris[1:4], clusters = k$cluster) cluster_centers(iris[1:4], clusters = k$cluster, fun = median) } parameters/man/p_value.BFBayesFactor.Rd0000644000175000017500000000177214160324505017664 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_BayesFactor.R \name{p_value.BFBayesFactor} \alias{p_value.BFBayesFactor} \title{p-values for Bayesian Models} \usage{ \method{p_value}{BFBayesFactor}(model, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. Only available for models where \code{method = "robust"} is supported.} } \value{ The p-values. } \description{ This function attempts to return, or compute, p-values of Bayesian models. } \details{ For Bayesian models, the p-values corresponds to the \emph{probability of direction} (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted to a p-value using \code{bayestestR::convert_pd_to_p()}. } \examples{ data(iris) model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) p_value(model) } parameters/man/model_parameters.htest.Rd0000644000175000017500000000721214107211212020253 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_htest.R \name{model_parameters.htest} \alias{model_parameters.htest} \alias{model_parameters.pairwise.htest} \title{Parameters from hypothesis tests} \usage{ \method{model_parameters}{htest}( model, cramers_v = NULL, phi = NULL, standardized_d = NULL, hedges_g = NULL, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, cohens_g = NULL, rank_biserial = NULL, rank_epsilon_squared = NULL, kendalls_w = NULL, ci = 0.95, alternative = NULL, bootstrap = FALSE, verbose = TRUE, ... ) \method{model_parameters}{pairwise.htest}(model, verbose = TRUE, ...) } \arguments{ \item{model}{Object of class \code{htest} or \code{pairwise.htest}.} \item{cramers_v, phi}{Compute Cramer's V or phi as index of effect size. Can be \code{"raw"} or \code{"adjusted"} (effect size will be bias-corrected). Only applies to objects from \code{chisq.test()}.} \item{standardized_d}{If \code{TRUE}, compute standardized d as index of effect size. Only applies to objects from \code{t.test()}. Calculation of \code{d} is based on the t-value (see \code{\link[effectsize:t_to_r]{effectsize::t_to_d()}}) for details.} \item{hedges_g}{If \code{TRUE}, compute Hedge's g as index of effect size. Only applies to objects from \code{t.test()}.} \item{omega_squared, eta_squared, epsilon_squared}{Logical, if \code{TRUE}, returns the non-partial effect size Omega, Eta or Epsilon squared. Only applies to objects from \code{oneway.test()}.} \item{cohens_g}{If \code{TRUE}, compute Cohen's g as index of effect size. Only applies to objects from \code{mcnemar.test()}.} \item{rank_biserial}{If \code{TRUE}, compute the rank-biserial correlation as effect size measure. Only applies to objects from \code{wilcox.test()}.} \item{rank_epsilon_squared}{If \code{TRUE}, compute the rank epsilon squared as effect size measure. Only applies to objects from \code{kruskal.test()}.} \item{kendalls_w}{If \code{TRUE}, compute the Kendall's coefficient of concordance as effect size measure. Only applies to objects from \code{friedman.test()}.} \item{ci}{Level of confidence intervals for effect size statistic. Currently only applies to objects from \code{chisq.test()} or \code{oneway.test()}.} \item{alternative}{A character string specifying the alternative hypothesis; Controls the type of CI returned: \code{"two.sided"} (default, two-sided CI), \code{"greater"} or \code{"less"} (one-sided CI). Partial matching is allowed (e.g., \code{"g"}, \code{"l"}, \code{"two"}...). See section \emph{One-Sided CIs} in the \href{https://easystats.github.io/effectsize/}{effectsize_CIs vignette}.} \item{bootstrap}{Should estimates be bootstrapped?} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters of h-tests (correlations, t-tests, chi-squared, ...). } \examples{ model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") model_parameters(model) model <- t.test(iris$Sepal.Width, iris$Sepal.Length) model_parameters(model) model <- t.test(mtcars$mpg ~ mtcars$vs) model_parameters(model) model <- t.test(iris$Sepal.Width, mu = 1) model_parameters(model) data(airquality) airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) model <- pairwise.t.test(airquality$Ozone, airquality$Month) model_parameters(model) smokers <- c(83, 90, 129, 70) patients <- c(86, 93, 136, 82) model <- pairwise.prop.test(smokers, patients) model_parameters(model) } parameters/man/model_parameters.averaging.Rd0000644000175000017500000001154314135322113021075 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_averaging.R, R/methods_betareg.R, % R/methods_glmx.R \name{model_parameters.averaging} \alias{model_parameters.averaging} \alias{model_parameters.betareg} \alias{model_parameters.glmx} \title{Parameters from special models} \usage{ \method{model_parameters}{averaging}( model, ci = 0.95, component = c("conditional", "full"), exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{betareg}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "precision", "all"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{glmx}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "extra"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}), \code{"marginal"} (\pkg{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from special regression models not listed under one of the previous categories yet. } \examples{ library(parameters) if (require("brglm2", quietly = TRUE)) { data("stemcell") model <- bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML" ) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/p_value.poissonmfx.Rd0000644000175000017500000000270414100573643017456 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_mfx.R \name{p_value.poissonmfx} \alias{p_value.poissonmfx} \alias{p_value.betaor} \alias{p_value.betamfx} \title{p-values for Marginal Effects Models} \usage{ \method{p_value}{poissonmfx}(model, component = c("all", "conditional", "marginal"), ...) \method{p_value}{betaor}(model, component = c("all", "conditional", "precision"), ...) \method{p_value}{betamfx}( model, component = c("all", "conditional", "precision", "marginal"), ... ) } \arguments{ \item{model}{A statistical model.} \item{component}{Should all parameters, parameters for the conditional model, precision-component or marginal effects be returned? \code{component} may be one of \code{"conditional"}, \code{"precision"}, \code{"marginal"} or \code{"all"} (default).} \item{...}{Currently not used.} } \value{ A data frame with at least two columns: the parameter names and the p-values. Depending on the model, may also include columns for model components etc. } \description{ This function attempts to return, or compute, p-values of marginal effects models from package \pkg{mfx}. } \examples{ if (require("mfx", quietly = TRUE)) { set.seed(12345) n <- 1000 x <- rnorm(n) y <- rnegbin(n, mu = exp(1 + 0.5 * x), theta = 0.5) d <- data.frame(y, x) model <- poissonmfx(y ~ x, data = d) p_value(model) p_value(model, component = "marginal") } } parameters/man/cluster_discrimination.Rd0000644000175000017500000000247514135724575020413 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_discrimination.R \name{cluster_discrimination} \alias{cluster_discrimination} \title{Compute a linear discriminant analysis on classified cluster groups} \usage{ cluster_discrimination(x, cluster_groups = NULL, ...) } \arguments{ \item{x}{A data frame} \item{cluster_groups}{Group classification of the cluster analysis, which can be retrieved from the \code{\link[=cluster_analysis]{cluster_analysis()}} function.} \item{...}{Other arguments to be passed to or from.} } \description{ Computes linear discriminant analysis (LDA) on classified cluster groups, and determines the goodness of classification for each cluster group. See \code{MASS::lda()} for details. } \examples{ if (requireNamespace("MASS", quietly = TRUE)) { # Retrieve group classification from hierarchical cluster analysis clustering <- cluster_analysis(iris[, 1:4], n = 3) # Goodness of group classification cluster_discrimination(clustering) } } \seealso{ \code{\link[=n_clusters]{n_clusters()}} to determine the number of clusters to extract, \code{\link[=cluster_analysis]{cluster_analysis()}} to compute a cluster analysis and \code{\link[=check_clusterstructure]{check_clusterstructure()}} to check suitability of data for clustering. } parameters/man/dot-n_factors_bartlett.Rd0000644000175000017500000000055513636467450020277 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bartlett} \alias{.n_factors_bartlett} \title{Bartlett, Anderson and Lawley Procedures} \usage{ .n_factors_bartlett(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bartlett, Anderson and Lawley Procedures } \keyword{internal} parameters/man/dot-find_most_common.Rd0000644000175000017500000000042513636467450017746 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.find_most_common} \alias{.find_most_common} \title{Find most common occurence} \usage{ .find_most_common(x) } \description{ Find most common occurence } \keyword{internal} parameters/man/model_parameters.mira.Rd0000644000175000017500000000551714077615701020103 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_mice.R \name{model_parameters.mira} \alias{model_parameters.mira} \title{Parameters from multiply imputed repeated analyses} \usage{ \method{model_parameters}{mira}( model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{An object of class \code{mira}.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \description{ Format models of class \code{mira}, obtained from \code{mice::width.mids()}. } \details{ \code{model_parameters()} for objects of class \code{mira} works similar to \code{summary(mice::pool())}, i.e. it generates the pooled summary of multiple imputed repeated regression analyses. } \examples{ library(parameters) if (require("mice", quietly = TRUE)) { data(nhanes2) imp <- mice(nhanes2) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) model_parameters(fit) } \dontrun{ # model_parameters() also works for models that have no "tidy"-method in mice if (require("mice", quietly = TRUE) && require("gee", quietly = TRUE)) { data(warpbreaks) set.seed(1234) warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA imp <- mice(warpbreaks) fit <- with(data = imp, expr = gee(breaks ~ tension, id = wool)) # does not work: # summary(pool(fit)) model_parameters(fit) } } # and it works with pooled results if (require("mice")) { data("nhanes2") imp <- mice(nhanes2) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) pooled <- pool(fit) model_parameters(pooled) } } parameters/man/dot-factor_to_dummy.Rd0000644000175000017500000000050613636467450017607 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.factor_to_dummy} \alias{.factor_to_dummy} \title{Safe transformation from factor/character to numeric} \usage{ .factor_to_dummy(x) } \description{ Safe transformation from factor/character to numeric } \keyword{internal} parameters/man/bootstrap_model.Rd0000644000175000017500000000607314133733476017027 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_model.R \name{bootstrap_model} \alias{bootstrap_model} \alias{bootstrap_model.default} \alias{bootstrap_model.merMod} \title{Model bootstrapping} \usage{ bootstrap_model(model, iterations = 1000, ...) \method{bootstrap_model}{default}( model, iterations = 1000, type = "ordinary", parallel = c("no", "multicore", "snow"), n_cpus = 1, verbose = FALSE, ... ) \method{bootstrap_model}{merMod}( model, iterations = 1000, type = "parametric", parallel = c("no", "multicore", "snow"), n_cpus = 1, verbose = FALSE, ... ) } \arguments{ \item{model}{Statistical model.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{...}{Arguments passed to or from other methods.} \item{type}{Character string specifying the type of bootstrap. For mixed models of class \code{merMod} or \code{glmmTMB}, may be \code{"parametric"} (default) or \code{"semiparametric"} (see \code{?lme4::bootMer} for details). For all other models, see argument \code{sim} in \code{?boot::boot} (defaults to \code{"ordinary"}).} \item{parallel}{The type of parallel operation to be used (if any).} \item{n_cpus}{Number of processes to be used in parallel operation.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame of bootstrapped estimates. } \description{ Bootstrap a statistical model n times to return a data frame of estimates. } \details{ By default, \code{boot::boot()} is used to generate bootstraps from the model data, which are then used to \code{update()} the model, i.e. refit the model with the bootstrapped samples. For \code{merMod} objects (\strong{lme4}) or models from \strong{glmmTMB}, the \code{lme4::bootMer()} function is used to obtain bootstrapped samples. \code{bootstrap_parameters()} summarizes the bootstrapped model estimates. } \section{Using with \strong{emmeans}}{ The output can be passed directly to the various functions from the \strong{emmeans} package, to obtain bootstrapped estimates, contrasts, simple slopes, etc. and their confidence intervals. These can then be passed to \code{model_parameter()} to obtain standard errors, p-values, etc. (see example). \cr\cr Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \examples{ \dontrun{ if (require("boot", quietly = TRUE)) { model <- lm(mpg ~ wt + factor(cyl), data = mtcars) b <- bootstrap_model(model) print(head(b)) if (require("emmeans", quietly = TRUE)) { est <- emmeans(b, consec ~ cyl) print(model_parameters(est)) } } } } \seealso{ \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}, \code{\link[=simulate_model]{simulate_model()}}, \code{\link[=simulate_parameters]{simulate_parameters()}} } parameters/man/p_value.DirichletRegModel.Rd0000644000175000017500000000307114160324505020572 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_DirichletReg.R, R/methods_averaging.R, % R/methods_betareg.R, R/methods_cgam.R, R/methods_ordinal.R \name{p_value.DirichletRegModel} \alias{p_value.DirichletRegModel} \alias{p_value.averaging} \alias{p_value.betareg} \alias{p_value.cgam} \alias{p_value.clm2} \title{p-values for Models with Special Components} \usage{ \method{p_value}{DirichletRegModel}(model, component = c("all", "conditional", "precision"), ...) \method{p_value}{averaging}(model, component = c("conditional", "full"), ...) \method{p_value}{betareg}(model, component = c("all", "conditional", "precision"), ...) \method{p_value}{cgam}(model, component = c("all", "conditional", "smooth_terms"), ...) \method{p_value}{clm2}(model, component = c("all", "conditional", "scale"), ...) } \arguments{ \item{model}{A statistical model.} \item{component}{Should all parameters, parameters for the conditional model, precision- or scale-component or smooth_terms be returned? \code{component} may be one of \code{"conditional"}, \code{"precision"}, \code{"scale"}, \code{"smooth_terms"}, \code{"full"} or \code{"all"} (default).} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. Only available for models where \code{method = "robust"} is supported.} } \value{ The p-values. } \description{ This function attempts to return, or compute, p-values of models with special model components. } parameters/man/dot-compact_list.Rd0000644000175000017500000000042313636467450017073 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.compact_list} \alias{.compact_list} \title{remove NULL elements from lists} \usage{ .compact_list(x) } \description{ remove NULL elements from lists } \keyword{internal} parameters/man/convert_efa_to_cfa.Rd0000644000175000017500000000262014100573643017422 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_efa_to_cfa.R \name{convert_efa_to_cfa} \alias{convert_efa_to_cfa} \alias{convert_efa_to_cfa.fa} \alias{efa_to_cfa} \title{Conversion between EFA results and CFA structure} \usage{ convert_efa_to_cfa(model, ...) \method{convert_efa_to_cfa}{fa}(model, threshold = "max", names = NULL, ...) efa_to_cfa(model, ...) } \arguments{ \item{model}{An EFA model (e.g., a \code{psych::fa} object).} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{names}{Vector containing dimension names.} } \value{ Converted index. } \description{ Enables a conversion between Exploratory Factor Analysis (EFA) and Confirmatory Factor Analysis (CFA) \code{lavaan}-ready structure. } \examples{ \donttest{ library(parameters) if (require("psych") && require("lavaan")) { efa <- psych::fa(attitude, nfactors = 3) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) anova( lavaan::cfa(model1, data = attitude), lavaan::cfa(model2, data = attitude) ) } } } parameters/man/format_order.Rd0000644000175000017500000000130014100062343016257 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_order.R \name{format_order} \alias{format_order} \title{Order (first, second, ...) formatting} \usage{ format_order(order, textual = TRUE, ...) } \arguments{ \item{order}{value or vector of orders.} \item{textual}{Return number as words. If \code{FALSE}, will run \code{\link[insight:format_value]{insight::format_value()}}.} \item{...}{Arguments to be passed to \code{\link[insight:format_value]{format_value()}} if \code{textual} is \code{FALSE}.} } \value{ A formatted string. } \description{ Format order. } \examples{ format_order(2) format_order(8) format_order(25, textual = FALSE) } parameters/man/model_parameters.rma.Rd0000644000175000017500000001022114135322113017701 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_metafor.R \name{model_parameters.rma} \alias{model_parameters.rma} \title{Parameters from Meta-Analysis} \usage{ \method{model_parameters}{rma}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{include_studies}{Logical, if \code{TRUE} (default), includes parameters for all studies. Else, only parameters for overall-effects are shown.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of meta-analysis models. } \examples{ library(parameters) mydat <<- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), stderr = c(0.317, 0.317, 0.13, 0.36) ) if (require("metafor", quietly = TRUE)) { model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) model_parameters(model) } \dontrun{ # with subgroups if (require("metafor", quietly = TRUE)) { data(dat.bcg) dat <- escalc( measure = "RR", ai = tpos, bi = tneg, ci = cpos, di = cneg, data = dat.bcg ) dat$alloc <- ifelse(dat$alloc == "random", "random", "other") model <- rma(yi, vi, mods = ~alloc, data = dat, digits = 3, slab = author) model_parameters(model) } if (require("metaBMA", quietly = TRUE)) { data(towels) m <- meta_random(logOR, SE, study, data = towels) model_parameters(m) } } } parameters/man/degrees_of_freedom.Rd0000644000175000017500000000730714160324505017423 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dof.R \name{degrees_of_freedom} \alias{degrees_of_freedom} \alias{degrees_of_freedom.default} \alias{dof} \title{Degrees of Freedom (DoF)} \usage{ degrees_of_freedom(model, ...) \method{degrees_of_freedom}{default}(model, method = "analytical", ...) dof(model, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Currently not used.} \item{method}{Can be \code{"analytical"} (default, DoFs are estimated based on the model type), \code{"residual"} in which case they are directly taken from the model if available (for Bayesian models, the goal (looking for help to make it happen) would be to refit the model as a frequentist one before extracting the DoFs), \code{"ml1"} (see \code{\link[=dof_ml1]{dof_ml1()}}), \code{"betwithin"} (see \code{\link[=dof_betwithin]{dof_betwithin()}}), \code{"satterthwaite"} (see \code{\link[=dof_satterthwaite]{dof_satterthwaite()}}), \code{"kenward"} (see \code{\link[=dof_kenward]{dof_kenward()}}) or \code{"any"}, which tries to extract DoF by any of those methods, whichever succeeds. See 'Details'.} } \description{ Estimate or extract degrees of freedom of models parameters. } \details{ Methods for calculating degrees of freedom: \itemize{ \item \code{"analytical"} for models of class \code{lmerMod}, Kenward-Roger approximated degrees of freedoms are calculated, for other models, \code{n-k} (number of observations minus number of parameters). \item \code{"residual"} tries to extract residual degrees of freedom, and returns \code{Inf} if residual degrees of freedom could not be extracted. \item \code{"any"} first tries to extract residual degrees of freedom, and if these are not available, extracts analytical degrees of freedom. \item \code{"nokr"} same as \code{"analytical"}, but does not Kenward-Roger approximation for models of class \code{lmerMod}. Instead, always uses \code{n-k} to calculate df for any model. \item \code{"normal"} returns \code{Inf}. \item \code{"wald"} returns residual df for models with t-statistic, and \code{Inf} for all other models. \item \code{"kenward"} calls \code{\link[=dof_kenward]{dof_kenward()}}. \item \code{"satterthwaite"} calls \code{\link[=dof_satterthwaite]{dof_satterthwaite()}}. \item \code{"ml1"} calls \code{\link[=dof_ml1]{dof_ml1()}}. \item \code{"betwithin"} calls \code{\link[=dof_betwithin]{dof_betwithin()}}. } For models with z-statistic, the returned degrees of freedom for model parameters is \code{Inf} (unless \code{method = "ml1"} or \code{method = "betwithin"}), because there is only one distribution for the related test statistic. } \note{ In many cases, \code{degrees_of_freedom()} returns the same as \code{df.residuals()}, or \code{n-k} (number of observations minus number of parameters). However, \code{degrees_of_freedom()} refers to the model's \emph{parameters} degrees of freedom of the distribution for the related test statistic. Thus, for models with z-statistic, results from \code{degrees_of_freedom()} and \code{df.residuals()} differ. Furthermore, for other approximation methods like \code{"kenward"} or \code{"satterthwaite"}, each model parameter can have a different degree of freedom. } \examples{ model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) dof(model) model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") dof(model) \dontrun{ if (require("lme4", quietly = TRUE)) { model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) dof(model) } if (require("rstanarm", quietly = TRUE)) { model <- stan_glm( Sepal.Length ~ Petal.Length * Species, data = iris, chains = 2, refresh = 0 ) dof(model) } } } parameters/man/cluster_analysis.Rd0000644000175000017500000001450514160324505017202 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_analysis.R \name{cluster_analysis} \alias{cluster_analysis} \title{Cluster Analysis} \usage{ cluster_analysis( x, n = NULL, method = "kmeans", include_factors = FALSE, standardize = TRUE, verbose = TRUE, distance_method = "euclidean", hclust_method = "complete", kmeans_method = "Hartigan-Wong", dbscan_eps = 15, iterations = 100, ... ) } \arguments{ \item{x}{A data frame.} \item{n}{Number of clusters used for supervised cluster methods. If \code{NULL}, the number of clusters to extract is determined by calling \code{\link[=n_clusters]{n_clusters()}}. Note that this argument does not apply for unsupervised clustering methods like \code{dbscan}, \code{mixture}, \code{pvclust}, or \code{pamk}.} \item{method}{Method for computing the cluster analysis. Can be \code{"kmeans"} (default; k-means using \code{kmeans()}), \code{"hkmeans"} (hierarchical k-means using \code{factoextra::hkmeans()}), \code{pam} (K-Medoids using \code{cluster::pam()}), \code{pamk} (K-Medoids that finds out the number of clusters), \code{"hclust"} (hierarchical clustering using \code{hclust()} or \code{pvclust::pvclust()}), \code{dbscan} (DBSCAN using \code{dbscan::dbscan()}), \code{hdbscan} (Hierarchical DBSCAN using \code{dbscan::hdbscan()}), or \code{mixture} (Mixture modelling using \code{mclust::Mclust()}, which requires the user to run \code{library(mclust)} before).} \item{include_factors}{Logical, if \code{TRUE}, factors are converted to numerical values in order to be included in the data for determining the number of clusters. By default, factors are removed, because most methods that determine the number of clusters need numeric input only.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{verbose}{Toggle warnings and messages.} \item{distance_method}{Distance measure to be used for methods based on distances (e.g., when \code{method = "hclust"} for hierarchical clustering. For other methods, such as \code{"kmeans"}, this argument will be ignored). Must be one of \code{"euclidean"}, \code{"maximum"}, \code{"manhattan"}, \code{"canberra"}, \code{"binary"} or \code{"minkowski"}. See \code{\link[=dist]{dist()}} and \code{pvclust::pvclust()} for more information.} \item{hclust_method}{Agglomeration method to be used when \code{method = "hclust"} or \code{method = "hkmeans"} (for hierarchical clustering). This should be one of \code{"ward"}, \code{"ward.D2"}, \code{"single"}, \code{"complete"}, \code{"average"}, \code{"mcquitty"}, \code{"median"} or \code{"centroid"}. Default is \code{"complete"} (see \code{\link[=hclust]{hclust()}}).} \item{kmeans_method}{Algorithm used for calculating kmeans cluster. Only applies, if \code{method = "kmeans"}. May be one of \code{"Hartigan-Wong"} (default), \code{"Lloyd"} (used by SPSS), or \code{"MacQueen"}. See \code{\link[=kmeans]{kmeans()}} for details on this argument.} \item{dbscan_eps}{The 'eps' argument for DBSCAN method. See \code{\link[=n_clusters_dbscan]{n_clusters_dbscan()}}.} \item{iterations}{The number of replications.} \item{...}{Arguments passed to or from other methods.} } \value{ The group classification for each observation as vector. The returned vector includes missing values, so it has the same length as \code{nrow(x)}. } \description{ Compute hierarchical or kmeans cluster analysis and return the group assignment for each observation as vector. } \details{ The \code{print()} and \code{plot()} methods show the (standardized) mean value for each variable within each cluster. Thus, a higher absolute value indicates that a certain variable characteristic is more pronounced within that specific cluster (as compared to other cluster groups with lower absolute mean values). } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ set.seed(33) # K-Means ==================================================== rez <- cluster_analysis(iris[1:4], n = 3, method = "kmeans") rez # Show results predict(rez) # Get clusters summary(rez) # Extract the centers values (can use 'plot()' on that) cluster_discrimination(rez) # Perform LDA # Hierarchical k-means (more robust k-means) if (require("factoextra", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], n = 3, method = "hkmeans") rez # Show results predict(rez) # Get clusters } # Hierarchical Clustering (hclust) =========================== rez <- cluster_analysis(iris[1:4], n = 3, method = "hclust") rez # Show results predict(rez) # Get clusters # K-Medoids (pam) ============================================ if (require("cluster", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], n = 3, method = "pam") rez # Show results predict(rez) # Get clusters } # PAM with automated number of clusters if (require("fpc", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], method = "pamk") rez # Show results predict(rez) # Get clusters } # DBSCAN ==================================================== if (require("dbscan", quietly = TRUE)) { # Note that you can assimilate more outliers (cluster 0) to neighbouring # clusters by setting borderPoints = TRUE. rez <- cluster_analysis(iris[1:4], method = "dbscan", dbscan_eps = 1.45) rez # Show results predict(rez) # Get clusters } # Mixture ==================================================== if (require("mclust", quietly = TRUE)) { library(mclust) # Needs the package to be loaded rez <- cluster_analysis(iris[1:4], method = "mixture") rez # Show results predict(rez) # Get clusters } } \references{ \itemize{ \item Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster Analysis Basics and Extensions. R package. } } \seealso{ \itemize{ \item \code{\link[=n_clusters]{n_clusters()}} to determine the number of clusters to extract, \code{\link[=cluster_discrimination]{cluster_discrimination()}} to determine the accuracy of cluster group classification via linear discriminant analysis (LDA) and \code{\link[=check_clusterstructure]{check_clusterstructure()}} to check suitability of data for clustering. \item https://www.datanovia.com/en/lessons/ } } parameters/man/model_parameters.Rd0000644000175000017500000004123614160324505017142 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_model_parameters.R \name{model_parameters} \alias{model_parameters} \alias{parameters} \title{Model Parameters} \usage{ model_parameters(model, ...) parameters(model, ...) } \arguments{ \item{model}{Statistical Model.} \item{...}{Arguments passed to or from other methods. Non-documented arguments are \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{group} can also be passed to the \code{print()} method. See details in \code{\link[=print.parameters_model]{print.parameters_model()}} and 'Examples' in \code{\link[=model_parameters.default]{model_parameters.default()}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Compute and extract model parameters. See the documentation for your object's class: \itemize{ \item{\link[=model_parameters.htest]{Correlations, t-tests, ...} (\code{htest}, \code{pairwise.htest})} \item{\link[=model_parameters.aov]{ANOVAs} (\code{aov}, \code{anova}, \strong{afex}, ...)} \item{\link[=model_parameters.default]{Regression models} (\code{lm}, \code{glm}, \strong{survey}, ...)} \item{\link[=model_parameters.cgam]{Additive models} (\code{gam}, \code{gamm}, ...)} \item{\link[=model_parameters.zcpglm]{Zero-inflated models} (\code{hurdle}, \code{zeroinfl}, \code{zerocount})} \item{\link[=model_parameters.mlm]{Multinomial, ordinal and cumulative link models} (\code{bracl}, \code{multinom}, \code{mlm}, ...)} \item{\link[=model_parameters.averaging]{Other special models} (\code{model.avg}, \code{betareg}, \code{glmx}, ...)} \item{\link[=model_parameters.merMod]{Mixed models} (\pkg{lme4}, \pkg{nlme}, \pkg{glmmTMB}, \pkg{afex}, ...)} \item{\link[=model_parameters.BFBayesFactor]{Bayesian tests} (\pkg{BayesFactor})} \item{\link[=model_parameters.stanreg]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, \pkg{blavaan}, ...)} \item{\link[=model_parameters.principal]{PCA and FA} (\pkg{psych})} \item{\link[=model_parameters.lavaan]{CFA and SEM} (\pkg{lavaan})} \item{\link[=model_parameters.kmeans]{Cluster models} (k-means, ...)} \item{\link[=model_parameters.rma]{Meta-Analysis via linear (mixed) models} (\code{rma}, \code{metaplus}, \pkg{metaBMA}, ...)} \item{\link[=model_parameters.glht]{Hypothesis testing} (\code{glht}, \pkg{PMCMRplus})} \item{\link[=model_parameters.t1way]{Robust statistical tests} (\pkg{WRS2})} \item{\link[=model_parameters.mira]{Multiply imputed repeated analyses} (\code{mira})} } } \note{ The \code{\link[=print.parameters_model]{print()}} method has several arguments to tweak the output. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}, and a dedicated method for use inside rmarkdown files, \code{\link[=print_md.parameters_model]{print_md()}}. } \section{Standardization of model coefficients}{ Standardization is based on \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. In case of \code{standardize = "refit"}, the data used to fit the model will be standardized and the model is completely refitted. In such cases, standard errors and confidence intervals refer to the standardized coefficient. The default, \code{standardize = "refit"}, never standardizes categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages or other software packages (like SPSS). To mimic behaviour of SPSS or packages such as \pkg{lm.beta}, use \code{standardize = "basic"}. } \section{Standardization Methods}{ \itemize{ \item \strong{refit}: This method is based on a complete model re-fit with a standardized version of the data. Hence, this method is equal to standardizing the variables before fitting the model. It is the "purest" and the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). The \code{robust} (default to \code{FALSE}) argument enables a robust standardization of data, i.e., based on the \code{median} and \code{MAD} instead of the \code{mean} and \code{SD}. \strong{See \code{\link[=standardize]{standardize()}} for more details.} \strong{Note} that \code{standardize_parameters(method = "refit")} may not return the same results as fitting a model on data that has been standardized with \code{standardize()}; \code{standardize_parameters()} used the data used by the model fitting function, which might not be same data if there are missing values. see the \code{remove_na} argument in \code{standardize()}. \item \strong{posthoc}: Post-hoc standardization of the parameters, aiming at emulating the results obtained by "refit" without refitting the model. The coefficients are divided by the standard deviation (or MAD if \code{robust}) of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation (or MAD if \code{robust}) of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "A change in 1 SD of \code{x} is related to a change of 0.24 of the SD of \code{y}). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tend to give aberrant results when interactions are specified. \item \strong{basic}: This method is similar to \code{method = "posthoc"}, but treats all variables as continuous: it also scales the coefficient by the standard deviation of model's matrix' parameter of factors levels (transformed to integers) or binary predictors. Although being inappropriate for these cases, this method is the one implemented by default in other software packages, such as \code{\link[lm.beta:lm.beta]{lm.beta::lm.beta()}}. \item \strong{smart} (Standardization of Model's parameters with Adjustment, Reconnaissance and Transformation - \emph{experimental}): Similar to \code{method = "posthoc"} in that it does not involve model refitting. The difference is that the SD (or MAD if \code{robust}) of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta. \item \strong{pseudo} (\emph{for 2-level (G)LMMs only}): In this (post-hoc) method, the response and the predictor are standardized based on the level of prediction (levels are detected with \code{\link[performance:check_heterogeneity_bias]{performance::check_heterogeneity_bias()}}): Predictors are standardized based on their SD at level of prediction (see also \code{\link[datawizard:demean]{datawizard::demean()}}); The outcome (in linear LMMs) is standardized based on a fitted random-intercept-model, where \code{sqrt(random-intercept-variance)} is used for level 2 predictors, and \code{sqrt(residual-variance)} is used for level 1 predictors (Hoffman 2015, page 342). A warning is given when a within-group variable is found to have access between-group variance. } } \section{Labeling the Degrees of Freedom}{ Throughout the \pkg{parameters} package, we decided to label the residual degrees of freedom \emph{df_error}. The reason for this is that these degrees of freedom not always refer to the residuals. For certain models, they refer to the estimate error - in a linear model these are the same, but in - for instance - any mixed effects model, this isn't strictly true. Hence, we think that \code{df_error} is the most generic label for these degrees of freedom. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b} \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans} or \CRANpkg{ggeffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \references{ \itemize{ \item Hoffman, L. (2015). Longitudinal analysis: Modeling within-person fluctuation and change. Routledge. \item Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear regression models. } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/format_p_adjust.Rd0000644000175000017500000000110613762372716017006 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_p_adjust.R \name{format_p_adjust} \alias{format_p_adjust} \title{Format the name of the p-value adjustment methods} \usage{ format_p_adjust(method) } \arguments{ \item{method}{Name of the method.} } \value{ A string with the full surname(s) of the author(s), including year of publication, for the adjustment-method. } \description{ Format the name of the p-value adjustment methods. } \examples{ library(parameters) format_p_adjust("holm") format_p_adjust("bonferroni") } parameters/man/print.parameters_model.Rd0000644000175000017500000001613314143162015020270 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/print.parameters_model.R \name{print.parameters_model} \alias{print.parameters_model} \alias{summary.parameters_model} \title{Print model parameters} \usage{ \method{print}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), ... ) \method{summary}{parameters_model}(object, ...) } \arguments{ \item{x, object}{An object returned by \code{\link[=model_parameters]{model_parameters()}}.} \item{pretty_names}{Return "pretty" (i.e. more human readable) parameter names.} \item{split_components}{Logical, if \code{TRUE} (default), For models with multiple components (zero-inflation, smooth terms, ...), each component is printed in a separate table. If \code{FALSE}, model parameters are printed in a single table and a \code{Component} column is added to the output.} \item{select}{Character vector (or numeric index) of column names that should be printed. If \code{NULL} (default), all columns are printed. The shortcut \code{select = "minimal"} prints coefficient, confidence intervals and p-values, while \code{select = "short"} prints coefficient, standard errors and p-values.} \item{caption}{Table caption as string. If \code{NULL}, no table caption is printed.} \item{digits, ci_digits, p_digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{footer_digits}{Number of decimal places for values in the footer summary.} \item{show_sigma}{Logical, if \code{TRUE}, adds information about the residual standard deviation.} \item{show_formula}{Logical, if \code{TRUE}, adds the model formula to the output.} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{groups}{Named list, can be used to group parameters in the printed output. List elements may either be character vectors that match the name of those parameters that belong to one group, or list elements can be row numbers of those parameter rows that should belong to one group. The names of the list elements will be used as group names, which will be inserted as "header row". A possible use case might be to emphasize focal predictors and control variables, see 'Examples'. Parameters will be re-ordered according to the order used in \code{groups}, while all non-matching parameters will be added to the end.} \item{column_width}{Width of table columns. Can be either \code{NULL}, a named numeric vector, or \code{"fixed"}. If \code{NULL}, the width for each table column is adjusted to the minimum required width. If a named numeric vector, value names are matched against column names, and for each match, the specified width is used. If \code{"fixed"}, and table is split into multiple components, columns across all table components are adjusted to have the same width.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{...}{Arguments passed to or from other methods.} } \value{ Invisibly returns the original input object. } \description{ A \code{print()}-method for objects from \code{\link[=model_parameters]{model_parameters()}}. } \details{ \code{summary()} is a convenient shortcut for \code{print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)}. } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b} \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans} or \CRANpkg{ggeffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \section{Labeling the Degrees of Freedom}{ Throughout the \pkg{parameters} package, we decided to label the residual degrees of freedom \emph{df_error}. The reason for this is that these degrees of freedom not always refer to the residuals. For certain models, they refer to the estimate error - in a linear model these are the same, but in - for instance - any mixed effects model, this isn't strictly true. Hence, we think that \code{df_error} is the most generic label for these degrees of freedom. } \examples{ \donttest{ library(parameters) if (require("glmmTMB", quietly = TRUE)) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) mp <- model_parameters(model) print(mp, pretty_names = FALSE) print(mp, split_components = FALSE) print(mp, select = c("Parameter", "Coefficient", "SE")) print(mp, select = "minimal") } # group parameters ------ data(iris) model <- lm( Sepal.Width ~ Sepal.Length + Species + Petal.Length, data = iris ) # don't select "Intercept" parameter mp <- model_parameters(model, parameters = "^(?!\\\\(Intercept)") groups <- list( "Focal Predictors" = c("Speciesversicolor", "Speciesvirginica"), "Controls" = c("Sepal.Length", "Petal.Length") ) print(mp, groups = groups) # or use row indices print(mp, groups = list( "Focal Predictors" = c(1, 4), "Controls" = c(2, 3) )) # only show coefficients, CI and p, # put non-matched parameters to the end data(mtcars) mtcars$cyl <- as.factor(mtcars$cyl) mtcars$gear <- as.factor(mtcars$gear) model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) # don't select "Intercept" parameter mp <- model_parameters(model, parameters = "^(?!\\\\(Intercept)") print(mp, groups = list( "Engine" = c("cyl6", "cyl8", "vs", "hp"), "Interactions" = c("gear4:vs", "gear5:vs") )) } } \seealso{ There is a dedicated method to use inside rmarkdown files, \code{\link[=print_md.parameters_model]{print_md()}}. } parameters/man/dot-data_frame.Rd0000644000175000017500000000035513636467450016501 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.data_frame} \alias{.data_frame} \title{help-functions} \usage{ .data_frame(...) } \description{ help-functions } \keyword{internal} parameters/man/check_sphericity_bartlett.Rd0000644000175000017500000000255114160324505021035 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_sphericity_bartlett} \alias{check_sphericity_bartlett} \title{Bartlett's Test of Sphericity} \usage{ check_sphericity_bartlett(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of indices related to sphericity. } \description{ Bartlett's (1951) test of sphericity tests whether a matrix (of correlations) is significantly different from an identity matrix. The test provides probability that the correlation matrix has significant correlations among at least some of the variables in a dataset, a prerequisite for factor analysis to work. In other words, before starting with factor analysis, one needs to check whether Bartlett’s test of sphericity is significant. } \details{ This function is strongly inspired by the \code{cortest.bartlett} function in the \pkg{psych} package (Revelle, 2016). All credit goes to its author. } \examples{ library(parameters) check_sphericity_bartlett(mtcars) } \references{ \itemize{ \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Bartlett, M. S. (1951). The effect of standardization on a Chi-square approximation in factor analysis. Biometrika, 38(3/4), 337-344. } } parameters/man/p_value.Rd0000644000175000017500000000610514160324505015246 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/3_p_value.R, R/methods_emmeans.R \name{p_value} \alias{p_value} \alias{p_value.default} \alias{p_value.emmGrid} \title{p-values} \usage{ p_value(model, ...) \method{p_value}{default}( model, dof = NULL, method = NULL, robust = FALSE, component = "all", verbose = TRUE, ... ) \method{p_value}{emmGrid}(model, ci = 0.95, adjust = "none", ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. Only available for models where \code{method = "robust"} is supported.} \item{dof}{Number of degrees of freedom to be used when calculating confidence intervals. If \code{NULL} (default), the degrees of freedom are retrieved by calling \code{\link[=degrees_of_freedom]{degrees_of_freedom()}} with approximation method defined in \code{method}. If not \code{NULL}, use this argument to override the default degrees of freedom used to compute confidence intervals.} \item{method}{If \code{"robust"}, and if model is supported by the \pkg{sandwich} or \pkg{clubSandwich} packages, computes p-values based on robust covariance matrix estimation.} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{component}{Model component for which parameters should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{verbose}{Toggle warnings and messages.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{adjust}{Character value naming the method used to adjust p-values or confidence intervals. See \code{?emmeans::summary.emmGrid} for details.} } \value{ A data frame with at least two columns: the parameter names and the p-values. Depending on the model, may also include columns for model components etc. } \description{ This function attempts to return, or compute, p-values of a model's parameters. See the documentation for your object's class: \itemize{ \item{\link[=p_value.BFBayesFactor]{Bayesian models} (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} \item{\link[=p_value.zeroinfl]{Zero-inflated models} (\code{hurdle}, \code{zeroinfl}, \code{zerocount}, ...)} \item{\link[=p_value.poissonmfx]{Marginal effects models} (\pkg{mfx})} \item{\link[=p_value.DirichletRegModel]{Models with special components} (\code{DirichletRegModel}, \code{clm2}, \code{cgam}, ...)} } } \note{ \code{p_value_robust()} resp. \code{p_value(robust = TRUE)} rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if \code{vcov_estimation = "CR"} for cluster-robust standard errors) and will thus only work for those models supported by those packages. } \examples{ data(iris) model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) p_value(model) } parameters/man/n_clusters.Rd0000644000175000017500000001464714135275207016014 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_clusters.R, R/n_clusters_easystats.R \name{n_clusters} \alias{n_clusters} \alias{n_clusters_elbow} \alias{n_clusters_gap} \alias{n_clusters_silhouette} \alias{n_clusters_dbscan} \alias{n_clusters_hclust} \title{Find number of clusters in your data} \usage{ n_clusters( x, standardize = TRUE, include_factors = FALSE, package = c("easystats", "NbClust", "mclust"), fast = TRUE, nbclust_method = "kmeans", n_max = 10, ... ) n_clusters_elbow( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) n_clusters_gap( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, gap_method = "firstSEmax", ... ) n_clusters_silhouette( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) n_clusters_dbscan( x, standardize = TRUE, include_factors = FALSE, method = c("kNN", "SS"), min_size = 0.1, eps_n = 50, eps_range = c(0.1, 3), ... ) n_clusters_hclust( x, standardize = TRUE, include_factors = FALSE, distance_method = "correlation", hclust_method = "average", ci = 0.95, iterations = 100, ... ) } \arguments{ \item{x}{A data frame.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{include_factors}{Logical, if \code{TRUE}, factors are converted to numerical values in order to be included in the data for determining the number of clusters. By default, factors are removed, because most methods that determine the number of clusters need numeric input only.} \item{package}{Package from which methods are to be called to determine the number of clusters. Can be \code{"all"} or a vector containing \code{"NbClust"}, \code{"mclust"}, \code{"cluster"} and \code{"M3C"}.} \item{fast}{If \code{FALSE}, will compute 4 more indices (sets \code{index = "allong"} in \code{NbClust}). This has been deactivated by default as it is computationally heavy.} \item{nbclust_method}{The clustering method (passed to \code{NbClust::NbClust()} as \code{method}).} \item{n_max}{Maximal number of clusters to test.} \item{...}{Arguments passed to or from other methods.} \item{clustering_function, gap_method}{Other arguments passed to other functions. \code{clustering_function} is used by \code{fviz_nbclust} and can be \code{kmeans}, code{cluster::pam}, code{cluster::clara}, code{cluster::fanny}, and more. \code{gap_method} is used by \code{cluster::maxSE} to extract the optimal numbers of clusters (see its \code{method} argument).} \item{method, min_size, eps_n, eps_range}{Arguments for DBSCAN algorithm.} \item{distance_method}{The distance method (passed to \code{\link[=dist]{dist()}}). Used by algorithms relying on the distance matrix, such as \code{hclust} or \code{dbscan}.} \item{hclust_method}{The hierarchical clustering method (passed to \code{\link[=hclust]{hclust()}}).} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} } \description{ Similarly to \code{\link[=n_factors]{n_factors()}} for factor / principal component analysis, \code{n_clusters} is the main function to find out the optimal numbers of clusters present in the data based on the maximum consensus of a large number of methods. \cr Essentially, there exist many methods to determine the optimal number of clusters, each with pros and cons, benefits and limitations. The main \code{n_clusters} function proposes to run all of them, and find out the number of clusters that is suggested by the majority of methods (in case of ties, it will select the most parsimonious solution with fewer clusters). \cr Note that we also implement some specific, commonly used methods, like the Elbow or the Gap method, with their own visualization functionalities. See the examples below for more details. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ \dontrun{ library(parameters) # The main 'n_clusters' function =============================== if (require("mclust", quietly = TRUE) && require("NbClust", quietly = TRUE) && require("cluster", quietly = TRUE) && require("see", quietly = TRUE)) { n <- n_clusters(iris[, 1:4], package = c("NbClust", "mclust", "cluster")) n summary(n) as.data.frame(n) plot(n) # The following runs all the method but it significantly slower # n_clusters(iris[1:4], standardize = FALSE, package = "all", fast = FALSE) } } \donttest{ # # Specific Methods ========================= # Elbow method -------------------- if (require("openxlsx", quietly = TRUE) && require("see", quietly = TRUE) && require("factoextra", quietly = TRUE)) { x <- n_clusters_elbow(iris[1:4]) x as.data.frame(x) plot(x) } } \donttest{ # # Gap method -------------------- if (require("see", quietly = TRUE) && require("cluster", quietly = TRUE) && require("factoextra", quietly = TRUE)) { x <- n_clusters_gap(iris[1:4]) x as.data.frame(x) plot(x) } } \donttest{ # # Silhouette method -------------------------- if (require("factoextra", quietly = TRUE)) { x <- n_clusters_silhouette(iris[1:4]) x as.data.frame(x) plot(x) } } \donttest{ # if (require("dbscan", quietly = TRUE)) { # DBSCAN method ------------------------- # NOTE: This actually primarily estimates the 'eps' parameter, the number of # clusters is a side effect (it's the number of clusters corresponding to # this 'optimal' EPS parameter). x <- n_clusters_dbscan(iris[1:4], method = "kNN", min_size = 0.05) # 5 percent x head(as.data.frame(x)) plot(x) x <- n_clusters_dbscan(iris[1:4], method = "SS", eps_n = 100, eps_range = c(0.1, 2)) x head(as.data.frame(x)) plot(x) } } \donttest{ # # hclust method ------------------------------- if (require("pvclust", quietly = TRUE) && getRversion() >= "3.6.0") { # iterations should be higher for real analyses x <- n_clusters_hclust(iris[1:4], iterations = 50, ci = 0.90) x head(as.data.frame(x), n = 10) # Print 10 first rows plot(x) } } } parameters/man/model_parameters.BFBayesFactor.Rd0000644000175000017500000001116514141263004021544 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_BayesFactor.R \name{model_parameters.BFBayesFactor} \alias{model_parameters.BFBayesFactor} \title{Parameters from BayesFactor objects} \usage{ \method{model_parameters}{BFBayesFactor}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, priors = TRUE, cohens_d = NULL, cramers_v = NULL, include_proportions = FALSE, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{BFBayesFactor}.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi()}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{priors}{Add the prior used for each parameter.} \item{cohens_d}{If \code{TRUE}, compute Cohens' \emph{d} as index of effect size. Only applies to objects from \code{ttestBF()}. See \code{effectsize::cohens_d()} for details.} \item{cramers_v}{Compute Cramer's V or phi as index of effect size. Can be \code{"raw"} or \code{"adjusted"} (effect size will be bias-corrected). Only applies to objects from \code{chisq.test()}.} \item{include_proportions}{Logical that decides whether to include posterior cell proportions/counts for Bayesian contingency table analysis (from \code{BayesFactor::contingencyTableBF()}). Defaults to \code{FALSE}, as this information is often redundant.} \item{verbose}{Toggle off warnings.} \item{...}{Additional arguments to be passed to or from methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from \code{BFBayesFactor} objects from \code{{BayesFactor}} package. } \details{ The meaning of the extracted parameters: \itemize{ \item For \code{\link[BayesFactor:ttestBF]{BayesFactor::ttestBF()}}: \code{Difference} is the raw difference between the means. \item For \code{\link[BayesFactor:correlationBF]{BayesFactor::correlationBF()}}: \code{rho} is the linear correlation estimate (equivalent to Pearson's \emph{r}). \item For \code{\link[BayesFactor:lmBF]{BayesFactor::lmBF()}} / \code{\link[BayesFactor:generalTestBF]{BayesFactor::generalTestBF()}} / \code{\link[BayesFactor:regressionBF]{BayesFactor::regressionBF()}} / \code{\link[BayesFactor:anovaBF]{BayesFactor::anovaBF()}}: in addition to parameters of the fixed and random effects, there are: \code{mu} is the (mean-centered) intercept; \code{sig2} is the model's sigma; \code{g} / \verb{g_*} are the \emph{g} parameters; See the \emph{Bayes Factors for ANOVAs} paper (\doi{10.1016/j.jmp.2012.08.001}). } } \examples{ \donttest{ if (require("BayesFactor")) { # Bayesian t-test model <- ttestBF(x = rnorm(100, 1, 1)) model_parameters(model) model_parameters(model, cohens_d = TRUE, ci = .9) # Bayesian contingency table analysis data(raceDolls) bf <- contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols") model_parameters(bf, centrality = "mean", dispersion = TRUE, verbose = FALSE, cramers_v = TRUE ) } } } parameters/man/p_value_satterthwaite.Rd0000644000175000017500000000513214140570270020215 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_satterthwaite.R, R/dof_satterthwaite.R, % R/p_value_satterthwaite.R, R/standard_error_satterthwaite.R \name{ci_satterthwaite} \alias{ci_satterthwaite} \alias{dof_satterthwaite} \alias{p_value_satterthwaite} \alias{se_satterthwaite} \title{Satterthwaite approximation for SEs, CIs and p-values} \usage{ ci_satterthwaite(model, ci = 0.95, robust = FALSE, ...) dof_satterthwaite(model) p_value_satterthwaite(model, dof = NULL, robust = FALSE, ...) se_satterthwaite(model) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{...}{Arguments passed down to \code{\link[=standard_error_robust]{standard_error_robust()}} when confidence intervals or p-values based on robust standard errors should be computed.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Satterthwaite (1946) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statitics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Satterthwaite approximation is also applicable in more complex multilevel designs. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4", quietly = TRUE)) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_satterthwaite(model) } } } \references{ Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. } \seealso{ \code{dof_satterthwaite()} and \code{se_satterthwaite()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Satterthwaite (1946) approach. \cr \cr \code{\link[=dof_kenward]{dof_kenward()}} and \code{\link[=dof_ml1]{dof_ml1()}} approximate degrees of freedom based on Kenward-Roger's method or the "m-l-1" rule. } parameters/man/model_parameters.merMod.Rd0000644000175000017500000004731214166656741020405 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cplm.R, R/methods_glmmTMB.R, % R/methods_lme4.R, R/methods_mixor.R, R/methods_ordinal.R \name{model_parameters.cpglmm} \alias{model_parameters.cpglmm} \alias{model_parameters.glmmTMB} \alias{model_parameters.merMod} \alias{model_parameters.mixor} \alias{model_parameters.clmm} \title{Parameters from Mixed Models} \usage{ \method{model_parameters}{cpglmm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, ci_method = NULL, p_adjust = NULL, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ... ) \method{model_parameters}{glmmTMB}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, effects = "all", component = "all", group_level = FALSE, standardize = NULL, exponentiate = FALSE, ci_method = "wald", robust = FALSE, p_adjust = NULL, wb_component = TRUE, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ... ) \method{model_parameters}{merMod}( model, ci = 0.95, bootstrap = FALSE, ci_method = NULL, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, wb_component = TRUE, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ... ) \method{model_parameters}{mixor}( model, ci = 0.95, effects = "all", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, verbose = TRUE, include_sigma = FALSE, ... ) \method{model_parameters}{clmm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, ci_method = NULL, p_adjust = NULL, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ... ) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of draws to simulate/bootstrap.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{group_level}{Logical, for multilevel models (i.e. models with random effects) and when \code{effects = "all"} or \code{effects = "random"}, include the parameters for each group level from random effects. If \code{group_level = FALSE} (the default), only information on SD and COR are shown.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{verbose}{Toggle warnings and messages.} \item{df_method}{Deprecated. Please use \code{ci_method}.} \item{include_sigma}{Logical, if \code{TRUE}, includes the residual standard deviation. For mixed models, this is defined as the sum of the distribution-specific variance and the variance for the additive overdispersion term (see \code{\link[insight:get_variance]{insight::get_variance()}} for details). Defaults to \code{FALSE} for mixed models due to the longer computation time.} \item{...}{Arguments passed to or from other methods.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details and \href{https://easystats.github.io/parameters/articles/model_parameters_robust.html}{this vignette} for working examples.} \item{wb_component}{Logical, if \code{TRUE} and models contains within- and between-effects (see \code{datawizard::demean()}), the \code{Component} column will indicate which variables belong to the within-effects, between-effects, and cross-level interactions. By default, the \code{Component} column indicates, which parameters belong to the conditional or zero-inflated component of the model.} \item{summary}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from (linear) mixed models. } \note{ If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Confidence intervals for random effect variances}{ For models of class \code{merMod} and \code{glmmTMB}, confidence intervals for random effect variances can be calculated. For models of class \code{lme4}, when \code{ci_method} is either \code{"profile"} or \code{"boot"}, and \code{effects} is either \code{"random"} or \code{"all"}, profiled resp. bootstrapped confidence intervals are computed for the random effects. For all other options of \code{ci_method}, confidence intervals for random effects will be missing. For models of class \code{glmmTMB}, confidence intervals for random effect variances always use a Wald t-distribution approximation. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ library(parameters) if (require("lme4")) { data(mtcars) model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model) } \donttest{ if (require("glmmTMB")) { data(Salamanders) model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) model_parameters(model, effects = "all") } if (require("lme4")) { model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model, bootstrap = TRUE, iterations = 50) } } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/check_clusterstructure.Rd0000644000175000017500000000421214160324505020407 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_clusterstructure.R \name{check_clusterstructure} \alias{check_clusterstructure} \title{Check suitability of data for clustering} \usage{ check_clusterstructure(x, standardize = TRUE, distance = "euclidean", ...) } \arguments{ \item{x}{A data frame.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{distance}{Distance method used. Other methods than "euclidean" (default) are exploratory in the context of clustering tendency. See \code{\link[stats:dist]{stats::dist()}} for list of available methods.} \item{...}{Arguments passed to or from other methods.} } \value{ The H statistic (numeric) } \description{ This checks whether the data is appropriate for clustering using the Hopkins' H statistic of given data. If the value of Hopkins statistic is close to 0 (below 0.5), then we can reject the null hypothesis and conclude that the dataset is significantly clusterable. A value for H lower than 0.25 indicates a clustering tendency at the \verb{90\%} confidence level. The visual assessment of cluster tendency (VAT) approach (Bezdek and Hathaway, 2002) consists in investigating the heatmap of the ordered dissimilarity matrix. Following this, one can potentially detect the clustering tendency by counting the number of square shaped blocks along the diagonal. } \examples{ \donttest{ library(parameters) check_clusterstructure(iris[, 1:4]) plot(check_clusterstructure(iris[, 1:4])) } } \references{ \itemize{ \item Lawson, R. G., & Jurs, P. C. (1990). New index for clustering tendency and its application to chemical problems. Journal of chemical information and computer sciences, 30(1), 36-41. \item Bezdek, J. C., & Hathaway, R. J. (2002, May). VAT: A tool for visual assessment of (cluster) tendency. In Proceedings of the 2002 International Joint Conference on Neural Networks. IJCNN02 (3), 2225-2230. IEEE. } } \seealso{ \code{\link[=check_kmo]{check_kmo()}}, \code{\link[=check_sphericity_bartlett]{check_sphericity_bartlett()}} and \code{\link[=check_factorstructure]{check_factorstructure()}}. } parameters/man/dot-compact_character.Rd0000644000175000017500000000045013636467450020054 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.compact_character} \alias{.compact_character} \title{remove empty string from character} \usage{ .compact_character(x) } \description{ remove empty string from character } \keyword{internal} parameters/man/principal_components.Rd0000644000175000017500000002412314106662543020050 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/factor_analysis.R, R/principal_components.R, % R/utils_pca_efa.R \name{factor_analysis} \alias{factor_analysis} \alias{principal_components} \alias{rotated_data} \alias{predict.parameters_efa} \alias{print.parameters_efa} \alias{sort.parameters_efa} \alias{closest_component} \title{Principal Component Analysis (PCA) and Factor Analysis (FA)} \usage{ factor_analysis( x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, cor = NULL, ... ) principal_components( x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ... ) rotated_data(pca_results) \method{predict}{parameters_efa}(object, newdata = NULL, names = NULL, keep_na = TRUE, ...) \method{print}{parameters_efa}(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) \method{sort}{parameters_efa}(x, ...) closest_component(pca_results) } \arguments{ \item{x}{A data frame or a statistical model.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link[=n_factors]{n_factors()}} resp. \code{\link[=n_components]{n_components()}}. In \code{\link[=reduce_parameters]{reduce_parameters()}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{rotation}{If not \code{"none"}, the PCA / FA will be computed using the \pkg{psych} package. Possible options include \code{"varimax"}, \code{"quartimax"}, \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, or \code{"cluster"} (and more). See \code{\link[psych:fa]{psych::fa()}} for details.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{standardize}{A logical value indicating whether the variables should be standardized (centered and scaled) to have unit variance before the analysis (in general, such scaling is advisable).} \item{cor}{An optional correlation matrix that can be used (note that the data must still be passed as the first argument). If \code{NULL}, will compute it by running \code{cor()} on the passed data.} \item{...}{Arguments passed to or from other methods.} \item{pca_results}{The output of the \code{principal_components()} function.} \item{object}{An object of class \code{parameters_pca} or \code{parameters_efa}} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used.} \item{names}{Optional character vector to name columns of the returned data frame.} \item{keep_na}{Logical, if \code{TRUE}, predictions also return observations with missing values from the original data, hence the number of rows of predicted data and original data is equal.} \item{digits, labels}{Arguments for \code{print()}.} } \value{ A data frame of loadings. } \description{ The functions \code{principal_components()} and \code{factor_analysis()} can be used to perform a principal component analysis (PCA) or a factor analysis (FA). They return the loadings as a data frame, and various methods and functions are available to access / display other information (see the Details section). } \details{ \subsection{Methods and Utilities}{ \itemize{ \item \code{\link[=n_components]{n_components()}} and \code{\link[=n_factors]{n_factors()}} automatically estimates the optimal number of dimensions to retain. \item \code{\link[=check_factorstructure]{check_factorstructure()}} checks the suitability of the data for factor analysis using the \code{\link[=check_sphericity_bartlett]{sphericity()}} and the \code{\link[=check_kmo]{sphericity()}} KMO measure. \item{\code{\link[performance:check_itemscale]{performance::check_itemscale()}} computes various measures of internal consistencies applied to the (sub)scales (i.e., components) extracted from the PCA.} \item{Running \code{summary} returns information related to each component/factor, such as the explained variance and the Eivenvalues.} \item{Running \code{\link[=get_scores]{get_scores()}} computes scores for each subscale.} \item{Running \code{\link[=closest_component]{closest_component()}} will return a numeric vector with the assigned component index for each column from the original data frame.} \item{Running \code{\link[=rotated_data]{rotated_data()}} will return the rotated data, including missing values, so it matches the original data frame.} \item{Running \href{https://easystats.github.io/see/articles/parameters.html#principal-component-analysis}{\code{plot()}} visually displays the loadings (that requires the \href{https://easystats.github.io/see/}{\pkg{see} package} to work).} } } \subsection{Complexity}{ Complexity represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1 (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . } \subsection{Uniqueness}{ Uniqueness represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \verb{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that \verb{20\%} or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. } \subsection{MSA}{ MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). } \subsection{PCA or FA?}{ There is a simplified rule of thumb that may help do decide whether to run a factor analysis or a principal component analysis: \itemize{ \item Run \emph{factor analysis} if you assume or wish to test a theoretical model of \emph{latent factors} causing observed variables. \item Run \emph{principal component analysis} If you want to simply \emph{reduce} your correlated observed variables to a smaller set of important independent composite variables. } (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) } \subsection{Computing Item Scores}{ Use \code{\link[=get_scores]{get_scores()}} to compute scores for the "subscales" represented by the extracted principal components. \code{get_scores()} takes the results from \code{principal_components()} and extracts the variables for each component found by the PCA. Then, for each of these "subscales", raw means are calculated (which equals adding up the single items and dividing by the number of items). This results in a sum score for each component from the PCA, which is on the same scale as the original, single items that were used to compute the PCA. One can also use \code{predict()} to back-predict scores for each component, to which one can provide \code{newdata} or a vector of \code{names} for the components. } \subsection{Explained Variance and Eingenvalues}{ Use \code{summary()} to get the Eigenvalues and the explained variance for each extracted component. The eigenvectors and eigenvalues represent the "core" of a PCA: The eigenvectors (the principal components) determine the directions of the new feature space, and the eigenvalues determine their magnitude. In other words, the eigenvalues explain the variance of the data along the new feature axes. } } \examples{ library(parameters) \donttest{ # Principal Component Analysis (PCA) ------------------- if (require("psych")) { principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE ) principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") pca # Print loadings summary(pca) # Print information about the factors predict(pca, names = c("Component1", "Component2")) # Back-predict scores # which variables from the original data belong to which extracted component? closest_component(pca) # rotated_data(pca) # TODO: doesn't work } # Automated number of components principal_components(mtcars[, 1:4], n = "auto") } # Factor Analysis (FA) ------------------------ if (require("psych")) { factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2) factor_analysis(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) factor_analysis(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) efa <- factor_analysis(mtcars[, 1:5], n = 2) summary(efa) predict(efa) \donttest{ # Automated number of components factor_analysis(mtcars[, 1:4], n = "auto") } } } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. } } parameters/man/dot-n_factors_cng.Rd0000644000175000017500000000050513636467450017220 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_cng} \alias{.n_factors_cng} \title{Cattell-Nelson-Gorsuch CNG Indices} \usage{ .n_factors_cng(eigen_values = NULL, model = "factors") } \description{ Cattell-Nelson-Gorsuch CNG Indices } \keyword{internal} parameters/man/qol_cancer.Rd0000644000175000017500000000145714077615701015736 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{qol_cancer} \alias{qol_cancer} \title{Sample data set} \format{ A data frame with 564 rows and 7 variables: \describe{ \item{ID}{Patient ID} \item{QoL}{Quality of Life Score} \item{time}{Timepoint of measurement} \item{age}{Age in years} \item{phq4}{Patients' Health Questionnaire, 4-item version} \item{hospital}{Hospital ID, where patient was treated} \item{education}{Patients' educational level} } } \description{ A sample data set with longitudinal data, used in the vignette describing the \code{datawizard::demean()} function. Health-related quality of life from cancer-patients was measured at three time points (pre-surgery, 6 and 12 months after surgery). } \keyword{data} parameters/man/standard_error.Rd0000644000175000017500000001170714142675753016646 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/4_standard_error.R, R/methods_DirichletReg.R, % R/methods_averaging.R, R/methods_base.R, R/methods_betareg.R, % R/methods_glmmTMB.R, R/methods_lme4.R, R/methods_mfx.R, R/methods_mixmod.R, % R/methods_mixor.R, R/methods_ordinal.R, R/methods_pscl.R, % R/methods_survival.R \name{standard_error} \alias{standard_error} \alias{standard_error.default} \alias{standard_error.DirichletRegModel} \alias{standard_error.averaging} \alias{standard_error.factor} \alias{standard_error.betareg} \alias{standard_error.glmmTMB} \alias{standard_error.merMod} \alias{standard_error.poissonmfx} \alias{standard_error.betamfx} \alias{standard_error.MixMod} \alias{standard_error.mixor} \alias{standard_error.clm2} \alias{standard_error.zeroinfl} \alias{standard_error.coxph} \title{Standard Errors} \usage{ standard_error(model, ...) \method{standard_error}{default}(model, method = NULL, verbose = TRUE, ...) \method{standard_error}{DirichletRegModel}(model, component = c("all", "conditional", "precision"), ...) \method{standard_error}{averaging}(model, component = c("conditional", "full"), ...) \method{standard_error}{factor}(model, force = FALSE, verbose = TRUE, ...) \method{standard_error}{betareg}(model, component = c("all", "conditional", "precision"), ...) \method{standard_error}{glmmTMB}( model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ... ) \method{standard_error}{merMod}(model, effects = c("fixed", "random"), method = NULL, ...) \method{standard_error}{poissonmfx}(model, component = c("all", "conditional", "marginal"), ...) \method{standard_error}{betamfx}( model, component = c("all", "conditional", "precision", "marginal"), ... ) \method{standard_error}{MixMod}( model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), robust = FALSE, verbose = TRUE, ... ) \method{standard_error}{mixor}(model, effects = "all", ...) \method{standard_error}{clm2}(model, component = c("all", "conditional", "scale"), ...) \method{standard_error}{zeroinfl}( model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, verbose = TRUE, ... ) \method{standard_error}{coxph}(model, method = NULL, ...) } \arguments{ \item{model}{A model.} \item{...}{Arguments passed to or from other methods. For \code{standard_error()}, if \code{method = "robust"}, arguments \code{vcov_estimation}, \code{vcov_type} and \code{vcov_args} can be passed down to \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{method}{If \code{"robust"}, robust standard errors are computed by calling \code{\link[=standard_error_robust]{standard_error_robust()}}. \code{standard_error_robust()}, in turn, calls one of the \verb{vcov*()}-functions from the \pkg{sandwich} or \pkg{clubSandwich} package for robust covariance matrix estimators. For linear mixed models, \code{method} may also be \code{\link[=p_value_kenward]{"kenward"}} or \code{\link[=p_value_satterthwaite]{"satterthwaite"}}.} \item{verbose}{Toggle warnings and messages.} \item{component}{Should all parameters, parameters for the conditional model, or for the zero-inflated part of the model be returned? Applies to models with zero-inflated component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} \item{force}{Logical, if \code{TRUE}, factors are converted to numerical values to calculate the standard error, with the lowest level being the value \code{1} (unless the factor has numeric levels, which are converted to the corresponding numeric value). By default, \code{NA} is returned for factors or character vectors.} \item{effects}{Should standard errors for fixed effects or random effects be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} } \value{ A data frame with at least two columns: the parameter names and the standard errors. Depending on the model, may also include columns for model components etc. } \description{ \code{standard_error()} attempts to return standard errors of model parameters, while \code{standard_error_robust()} attempts to return robust standard errors. } \note{ For Bayesian models (from \pkg{rstanarm} or \pkg{brms}), the standard error is the SD of the posterior samples. } \examples{ model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) standard_error(model) } parameters/man/model_parameters.mlm.Rd0000644000175000017500000001312314135322113017713 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_DirichletReg.R, R/methods_bife.R, % R/methods_brglm2.R, R/methods_mlm.R, R/methods_ordinal.R \name{model_parameters.DirichletRegModel} \alias{model_parameters.DirichletRegModel} \alias{model_parameters.bifeAPEs} \alias{model_parameters.bracl} \alias{model_parameters.mlm} \alias{model_parameters.clm2} \title{Parameters from multinomial or cumulative link models} \usage{ \method{model_parameters}{DirichletRegModel}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision"), standardize = NULL, exponentiate = FALSE, verbose = TRUE, ... ) \method{model_parameters}{bifeAPEs}(model, ...) \method{model_parameters}{bracl}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{mlm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{clm2}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "scale"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A model with multinomial or categorical response value.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}), \code{"marginal"} (\pkg{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from multinomial or cumulative link models } \details{ Multinomial or cumulative link models, i.e. models where the response value (dependent variable) is categorical and has more than two levels, usually return coefficients for each response level. Hence, the output from \code{model_parameters()} will split the coefficient tables by the different levels of the model's response. } \examples{ library(parameters) if (require("brglm2", quietly = TRUE)) { data("stemcell") model <- bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML" ) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-filter_component.Rd0000644000175000017500000000062613636467450017766 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.filter_component} \alias{.filter_component} \title{for models with zero-inflation component, return required component of model-summary} \usage{ .filter_component(dat, component) } \description{ for models with zero-inflation component, return required component of model-summary } \keyword{internal} parameters/man/reduce_parameters.Rd0000644000175000017500000001055514077615701017321 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{reduce_parameters} \alias{reduce_parameters} \alias{reduce_data} \title{Dimensionality reduction (DR) / Features Reduction} \usage{ reduce_parameters(x, method = "PCA", n = "max", distance = "euclidean", ...) reduce_data(x, method = "PCA", n = "max", distance = "euclidean", ...) } \arguments{ \item{x}{A data frame or a statistical model.} \item{method}{The feature reduction method. Can be one of 'PCA', 'cMDS', 'DRR', 'ICA' (see the Details section).} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link[=n_factors]{n_factors()}} resp. \code{\link[=n_components]{n_components()}}. In \code{\link[=reduce_parameters]{reduce_parameters()}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{distance}{The distance measure to be used. Only applies when \code{method = "cMDS"}. This must be one of "euclidean", "maximum", "manhattan", "canberra", "binary" or "minkowski". Any unambiguous substring can be given.} \item{...}{Arguments passed to or from other methods.} } \description{ This function performs a reduction in the parameter space (the number of variables). It starts by creating a new set of variables, based on the given method (the default method is "PCA", but other are available via the \code{method} argument, such as "cMDS", "DRR" or "ICA"). Then, it names this new dimensions using the original variables that correlates the most with it. For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a separate and dedicated stage, as this is a very important process in the data analysis workflow. \code{reduce_data()} is an alias for \code{reduce_parameters.data.frame()}. } \details{ The different methods available are described below: \subsection{Supervised Methods}{ \itemize{ \item \strong{PCA}: See \code{\link[=principal_components]{principal_components()}}. \item \strong{cMDS / PCoA}: Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. \item \strong{DRR}: Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (Laparra et al., 2015). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing DRR are avoidance of multicollinearity between predictors and overfitting mitigation. DRR tends to perform well when the first principal component is enough to explain most of the variation in the predictors. Requires the \pkg{DRR} package to be installed. \item \strong{ICA}: Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, which attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \pkg{fastICA} package to be installed. } } See also \href{https://easystats.github.io/parameters/articles/parameters_reduction.html}{package vignette}. } \examples{ data(iris) model <- lm(Sepal.Width ~ Species * Sepal.Length + Petal.Width, data = iris) model reduce_parameters(model) out <- reduce_data(iris, method = "PCA", n = "max") head(out) } \references{ \itemize{ \item Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). \item Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. } } parameters/man/model_parameters.default.Rd0000644000175000017500000004010414160324505020556 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_model_parameters.R, R/methods_mfx.R \name{model_parameters.default} \alias{model_parameters.default} \alias{model_parameters.glm} \alias{model_parameters.logitor} \alias{model_parameters.poissonmfx} \alias{model_parameters.betamfx} \title{Parameters from (General) Linear Models} \usage{ \method{model_parameters}{default}( model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) \method{model_parameters}{glm}( model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, summary = FALSE, verbose = TRUE, df_method = ci_method, ... ) \method{model_parameters}{logitor}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = TRUE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{poissonmfx}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "marginal"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ... ) \method{model_parameters}{betamfx}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision", "marginal"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details and \href{https://easystats.github.io/parameters/articles/model_parameters_robust.html}{this vignette} for working examples.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{summary}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{keep, drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} \item{df_method}{Deprecated. Please use \code{ci_method}.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}), \code{"marginal"} (\pkg{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of (general) linear models (GLMs). } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ library(parameters) model <- lm(mpg ~ wt + cyl, data = mtcars) model_parameters(model) # bootstrapped parameters model_parameters(model, bootstrap = TRUE) # standardized parameters model_parameters(model, standardize = "refit") # different p-value style in output model_parameters(model, p_digits = 5) model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") \donttest{ # logistic regression model model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") model_parameters(model) # show odds ratio / exponentiated coefficients model_parameters(model, exponentiate = TRUE) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/figures/0000755000175000017500000000000014132515367014775 5ustar nileshnileshparameters/man/figures/logo.png0000644000175000017500000003612114132515367016446 0ustar nileshnileshPNG  IHDRxb]esRGBgAMA a pHYsod;IDATx^]|SU~t7i.e@Pʦe" *\ P  ZdCˆ-t#;=I @n{y;ψ#(щMH=!>qFɻ ^Lȣc-7ݙc .ƦkNd# +,q h =Hq+=h*spG H$'5<*6\v Աw+tB+te %VID/$_82ݶ .h0`mm* FPϞO'Fi::1ZXmKpQT:Y#=aIۧd.5!'/CgZ$S1GMEn;"Ãȁz^Nêeb{!֧xo LlDQ WEHMXmCp~N2; TmZ\vϠQ*s2V g CR7c|VK"h4~BGbn .ao/jr ۍB!ܺl?aЅPhͥ׀k.kph+H e;j*&{ }~aH͟Fd m)G654(9jVvb]Ѓ/&Jm3XgJ?J&0 Sf'3KIo{a^4mȉv6^Ƅx68kbMI%;ro9Nyt{c(Y:GÄz`55_>n. W`-ֆU`g@2t{S<-mg .F*8U I+N2$aݙqVPd[=\ڰHSQgȤpmdHbG,gnn9v:Kƻ⨖9 :.  ۾k7`@q&yK/c6l'I #;bo}) pc֦BaXa܆Qpo\*bV?Q֪PٶD}EdR3EBC|;%R%H=͒HkTLZ׃Bo_I])g. d;x7;+3Ik}v0T͌!":f}^ >GJ:z;zV'NsHv eaύU{5GܓyvD#򶫌jzkYed7{wQhg[o;;[W}Lߋu9sUSh4v V.']hY"̞">?N6g{NUGC 6QRwCRˣ==hWӋ.6KO!$ V ao>sw+ٹNc .[b4ZK.3#{ػ#0 ZaQpꭓDӎԸ h5_4$Jgi!8#wsɿ=88>9N8_TR8bg(r-{̔ubBꌶ^ D)Z{H!5`sU.ExcBy96[͛Z %qMw]cҫ YC؉{/]?Ӗ nC]1ה z8{$5\>7D#q#0\B w nGI )cﳮ0o. n7n~\%n [lb pOCCOm!J,^f-O. L5'5Y]1,=x=+ͪd_G7䇫#e_졾;)aٹTxZJ Gj""|C3rRhq؆2l v|X4STlH0"U_:n@WeĐi*\XI5BKRlV5Ԙ`yk 76 F23tNؗd]+_,Q:OqRusJḚֳ +%Imsڎ򡧋sGL^gah$:v:T_5.[Hb:9e,Hès3q1B #r6$؋d=m:yts%DsVFVs5;j8lT*u vfCg̓P,(k1YE( dY?jT̞bS+cV.D+ 9ɉ/9\ 8q5 ._:Ԙ`VA!'} ggػ\~)5΂r3 r:8A6;p??(n!FmJ֡W#\(qۼq;8EKq(6g{#$Ϧ;(aR/0r|wfnȹ6F6("r94&:#H߫>qUJjNYb X v^2FHpgQ#-z_kOA!²*lnSܐvyԫce=rxY0!]o'Y^4fFEq:efGE eꙟ#g%˜ d]Mz4ψ0c5D׹=S=P]+sSP_03~]q۠:$ծn &iJƱt1L~ `ɕH%1R;s ze6: tWRv*fD cZ2>oEg}pqZr2/ M3n;%[l+X2O v9}Zt(=r~~'`\dHH9ء]h.CZw^Cڧ+զ* :HFb+INQ'p&Yp';C}ۀo[zA7c ;;I*řR+(g1cO~vb[N.kupn?F·]#sSQʛB/qVT O^L 5vĮXpM,,#"i\Ikwе-k.$dDİtrXDS[5tַA{|冉Opi>~υҤfQ\lރj,^ºЌ\ cT3;QW~{U!F7= o ]B*4/H˻+߄t&8N݇t{`( 8vRO%jxIa3K8<qabPc]𱯨CG3>nM=Hnp  /)0Fq5:zBC/p˟jlʸ+`l=յTJ uMx6޵$2f4#y O6cìYV"9gc@ FP1!FnL@'BS(SFq=i. w ܡ,jd_C2 =S5Bal7>P:#B!ͅcF? ʚ;9ԇ!сp:w8۫nRsu9/sV.[I6!܀<0[-tU K9O_xihE*~R˦$Pxt*cBhXqmRN(=\rq\:cS'4+k+yZ{;W/ mYx^m%7NrΑ`;9z-" :ɛD + h!*Yz~a:m_"ݵ,1< <# ʂLr|ՑkA9a}*)*6Pox8^O$,fzJ^\s.-$}|BLl,EO$Hgw#scP6}&);bH6tK=4Sˑ\%A&-Ӻ t91*HJ!=K/HE~_t̆yw9@0agDUqe?դ>p殎Fj¹H!I,Fy,MF^~~WLxq( n-JNq@)v5f+O3~!ʙڂ יR ~R>WE!uoOCIo͌uyP55{'˅(ƗDH*Rrp;KiΉ$l6{~?ڥ}8*C⻧z` U1:ȭf 1(PaBH*HE?: (ݶ}}?:œ+BC e;5'5w&$#=0Ms%AyL0K ¥0]8_݇Cj`[.Yi1IA#TEʓ(KMnhɻL'D'"q*ĸuJψmކ1ajk'shT1+7pSEqpİḁ&xyzg![L;+ymyUj F'kЪ)8!8qnu=:(2hPr}+[+3`g!uD䉍^D.{zRm(5&C-LĢ;xH-T %sPW`Q>9RXR}fM}<G6"f"ٶ:?T})!!!h٢$&%hFv 5KMnnr,lq$JI6!Ѭ\U8Bf1\-*K꾟2'lF$[[(9:5J7~ENW KKrЦ|}d$F* ?R ȶ9242?Ю1X_n:?cԭMi.*QJGwÑݓ2 4lY1. 'ӾOt tء#i2^R᭷BII1IٮNg/&>S9Ե oT:gO&c{(:LInr:S8uxgL9FIpDhRnwOvct=>V,2[s=IA WW)=@^B"(/&`3Msz;`מ;ܯQ30_3)ŕ5 b6/=&$g!ǐ$=q&Q̟R%E_Nω|?l"<͞~"—?̳ڥ 4ig~{F)SS〲$EŘp'I/I6wo/dӔKCX ܗ6nn|[Bm%gxH 'F4$: Fh TO>QID*:Ӻ&Ej{PĬ{M}(Oy_wg.T_L~kKV%HZy8,1SO=%,"kRÄ ̦/\ϟ牤:K1y^$&#85Յ߉IHT^2bv KokI#Q,.9ol9Rquz-Dl.8 GKz"䅕d*ěd"/ΣNO?e_=Q9<[A"±-#=# |+^#xxY[3Rxfg6wkHO~Y6;um{C g!y2% T3cN9}g=w)_~[8$gAU|=1#I=Y;L(*F~We̜9c2x; %yZ .vIoX~`&WMRUy2p+uX) n\Wr: 3"1vhM-$)\~/hS*]*v+ \fJ?Zz:>*>g3eÞB:|_51q/#_',y&2C|e\Ribw!bYɖsqDm]a`|s?VoU56m+arLvVۨʽpt!L].-Lj Cbc,]_XR2EA^siAΜ9sŋ@׏CSQ$D̠#XJ"C:b 2xٛ @qtx5X_\S%\/ϧrL+j(8L@[B!/$<]zSxTjRiUDQ)8'G?~X̬,"/7r\!̒>$,YlCVx:S*%HYԧYqyuۖ"Ci?>GWϱ>L:7[xi |xYy@<->z@X7oQ;񿄓fplzr{ 8Qp)FpwhE$"Ģ9a\3zYTlxUOO5&adХ+͟/ޤI4i#F ?gy9T'bx9.P(0Νy]nny_6Gh0đėԕxeCr)Ԫ_W\C*rNR]끝#N-U#]$e)ȠTRF͞E;9/9}Mqql yRP8_rb*:?ϤPz,;$&&b/_H_-[$7Rzs^b4j0W]W~5[{'cڑ9h\R[:6WH+@F$3zG)IDZt |mIыh9Ng"gRPce&ÒⵒT|Նl}YdY&jdktx#< U#ݓW(vqnIV/( ]~8zfԶ=gسQg!`^Znh)9 we⿻cCm".&mӖM],Gs1RlR|2HJx'3k憥|:kSqB{ɫ&& #΢? R:d\Afr1,Ys}:B7ATV{{hkn:oѣgOؿODٮ}:LΒ˘Ka ǿܘ=ܭ{H9q ܸb/M[8ΣPi'uϓxX9m*utkס4r/SxZ'~/GǖM=ќ>|^p >߶}zK&9wd/CVf>4I"x 19zuPUe3*'5jD N )HUx?U˚}#}":ʕn(a0P~CUae0bLæEJ4zhX >H`OAR0 ťx#,A/}gV2^bo-gvfgLwM!=#A9ɏo@tQ^}ZQvRFvs!e&}Uzo*.~&}Yu+lwnxeXU9i><|8]; WK;clyѨ>R67I-l4?Iz Ǽqʣ-g۷⥢6qx{[Szxu˼ۓr,\6\DpybYj] ÓaOv6`O8,/o\W!AM ={J]=)DIo: Vj>/l7i6^L: MZ!3=ruWg|P&+zLZS`y% ubɻ9 tSTRgw8۽QZ[4-?K;8 0{3fsRJ^mRJwD ޶z'O,PU>i.kQlUدpSf.I%UtX=%WId-&2Aڂ'э}lZBl1NO| f%e;۬(]%Q$  1FdRH>Z;gj~[iIΪ7P|x( z>/w4%H+D|c8h(~#b|9fr)t7X n:E{B"y^~@G٦*o*1j>@x <ůT޾Т\&(',rtQHԶVm{HMgJ 뎘kg2-P o&J>:l(~XNW639R%iUR&ap0IU5ae:>W◡˕SR`q^a0b[a[N0CكxcP*Z/EvaľYI(97Tbrs_k'v%[$.$vy℄xqmUnS:ne*AlnҢٳY9/wj{M5+x71ZW]YDz\e+rb-hP[M&-zÒVd5ڏ^>8 &P^7J0WƇ=E$eTϩ<洡7sڴuśD\>V*d=OUaM0 2^ ٬ֹGh;tʫ^d ۫RqJ)cZ$@ @ywG^2a!+> zH5ǰ*U"ޙF'Ŷ5ewql8gXmlOuסּ/{XnbU!$v<:#؂ :֦}@Vx% +p_nsU0_eɣbn[nC0ޝ<ۓ%yRØU^d)tEtöU&edG7Ɂ=6%լH7)NvfFvAE<+g{ ɸH8Tt"K *N{V75n{H_͸# fpHC譭yǓ;^Kq'Nc 2M [dyGT˦Y #p~h'edxD"$z0?Z*(dyIENDB`parameters/man/figures/figure2.png0000644000175000017500000012202413620074571017045 0ustar nileshnileshPNG  IHDR:DsRGBgAMA a pHYsodIDATx^|IƳ:Un+(9w]BŭbJKB'ߓ/4i!Iͼ3;]JP@  l@ E"n@ )!P @ B@ E "n@ )!P @ B@ E "n@ )!P @ B@ E "n@ )!P(r: Ν;L@ }7KJf'(.:xd_IӧOw%66 ޼yp·o2bРAϟ?g"_KΝqƜZ q;v=* kS5L?)+ԣGm۶Q@  M1 !!>ĉ#F{Μ9(n Jƌse޽ˤlo߾RoٲN:N̛7/,, EjժKe"Di ##{8ށ=3zPB xKH&X.((V)T+өKW6eh ȕ(\l&3c#ȜB#+i/noҤILDu1ߟ(>pb75h:zmz6mZ` 0`ڵkeu*n G۷a\x+W(͛7o|~TTWڱcG&A4@$ 6lٲ-4ϯQFm!O׿r%fw?!X@yرcժUc"*֭f1*@ >t']\\֮]ۣG:0VprҥKdQQQbtQiΞ= }ĵ=+Vxxxܸq$$$*g>x cʇ7oBzjԨQ/^4h:ށC+܀f͚-^'[i5xlVg1W>{uy&\͒]י&]&T~ܜ( ѣ?~mٲ% Rw~%K,]˗>NՉ;&Lӝ;w2/5ܵkrff&k׮ͤ=B3f̰gLZ|dg ֭[XU`ks-xuQzmfddؼys"&!t6s޼yKn۶-.;;'/(V(X)/`RZyrV<ʆk׮Ϟ=;x!C:vիW!k۴i$H$233AqغukŊH&Mcʔ)@ GAp|4hٳϟ?vZƚ?f"DfZB *59r@ hM9[<edUKrlM26)d*Qb[R\6+W[7}nPU<%[[[sϞ=.]ׯcՅizz{ʕ+7l^rÆ cߒتU+H131c1D̬Yt>uOn&͞:uj% ֪U'R( z̿ >[Qow2bJ >'P6=-Ǭ 6Qs~%xED-2U~x1m#'g5w RUookQ$nݪ_~>}vŘcǎǗ*Uj|,+Vݻ7>W ^:ͨQvvv#Gscƌzݻwͱ?s5d;uꔅ]?_3dUtܹsuƘTܩSU @ .9s0ȱ5aۙR59U8ӊݡ,k yX)/Ɣt8c[giL!( :)+q I58K(MrK,U5j0ƍ7z٬Y3ԩ-NNN;pl6W^322wk&E%hBN ФIB 'עE ^*TH4bĈcǎBc…Nyfk|lѣG1GtРA666޽[N($$$\|ɓDH]!@(RWn@ !P @ B@ E "n@ )!P @ B@ E "n@ )bX~:L ի{{{3@ 8/nӭ0@RV^=vX&B Mnnȑ#0@RzѢE &B p&@ "YPL HA @ "7@ gϞUz>0V .\l۷oS!8}tٲek֬)HaAdDFFNWn :(;;1BH$ڱc~rnBK+@ E|ӧO^]Rd&q.]0o@nLMM)իP(d D.O4 GGJް0>!tcժU&L`"ܹs۷766VV͏?jy8͛7r劍ͬYP/:fG@(p̙?ٳg3glժDTrDEVVs圜SNǗ.]Zg*$ˉ'ʕ+5k*T+88x˖-˗OMM]|9VVV|>_s_eʔE=ztݺukذa:u0r"hÆ h suus>< ZjffܹsoܸJaŋDk׮ܹ322Nm՝;wPЃ"%ڵk5j_( 0bĈ-Z4n8vp<.O:ɒ%K"HH %&&5yyyd2ylZիW8PAAAjׯ(*Kك3gPUtzwA|#V_ZAo… mSTA(33iӦxc۶m]LС۷o};w gt!;wn„ +WNHHh۶-$4MT5-̟?ɒ%8cRȀNJgϞwޅqƍC tP(6jUfMhu,Zh%JuVXXڞgS p8&Æ hٲ%%>;ALOOGj׮v0o666F:`yZG.=}4W+Pu+V O٭[7tǟu~*>|8~m3O)%>gLj4huAdff2 fpL:uҤIQfYɁ,Lm֬_uԩ'O:99!h $LչoggGU=ZMOfggSΝxb:: T^OM!nݺ֭C7onD4P,Jhˀi|8Zڥ^=]|VZʕ+quӺoJ_zɵ|r ԨQ" v=UJ&h:?R MRڿ @ !jժ͞={Сfffpݺukٲ%f-br|Tm(ԩf͚> NwC0q > ;_zE_2dÇ;6rH6vutuQWnQ=Ϧheel/^A@\pᨘz;:99!nkkcҠ<@ 6lԨQ{A \14v*SRL̹[jf&('Sjŋ9rC'N Uw^@Af8((~TT ?*Amޔ:hԠAq6mT682m48mێ;E0E۶mV ֭[Ro]v8m3O)%~R%JP?h_x o @'yzz"\p*]nݺsK$Aõ<[`;vKHHvOy׮]߾} w!EǁPBhh(} z+-- eNOOG QRJѾb^ص7.Bˈ-[Ћ9YYBԫW5Ξ0aB>}`TobŊmڴYK.f͚ᐢ0knvM낽+V {AU! !^xqm)CA 15kV֭m29ߞ;wyg޽ʕ;w.f>""nE(XS_~E3$ ݻpum֬m #@ qC Hq/y@״lْ~@ ?tz.@^zرL@ ?r@:+7'57@ -@ D@(RqC H[ڵk<3fezzBزes?-{( !C(M:`.]jff˘ [8tm,2#B7&&&s̹wٳgƍgeeb sss& 5Ν;QHԫWؗ/_YݰԩSffc2XfΜ㓖v#G&&&`}ZJ(z{{A@DEEYXXIGDDџ1cԩSҥK-Z֭cU}-00QFɠHIIvtt|)c54d2kJ*hǎ۶mۢ:@ #166^lYpppPPŋʆ]2q Qxq&dȈD:@h@ .i 'L~z6;  XbNC ?E >Yf7nLLLd~n޽۴iSWW]v :vm۶nݺ{tN,!!ƒk6S5kX,իW )S0 ?742 FPݻzsa~!!!Gp8۱c,^hذ!@ ?M||͛+T/o߾˗SuĉpЄǏ߿Ν>?` Hƍxyy5I0\BkUVY[[E[n(#B!!ߖ"P(hA1@ A @ "7@ D@(RqC HA @ "7@ D@(RqC HA @#/Œ@  ō@ ppp]6F$DnjciisSx!:ٳgWnggWRϟcǎbeeղeˬ,hX>}8;;NX [RCÃ|{"{*;Xr %" F dΜ9݃# 7n|Mppp||X,2dSg(((Ç52vX4y9 H4hР/_]L2sLtGIKu4*666::ޞI(R͑N*x&U͔W0Td6RluT~0=z44ͤI`_$ʔ)+?M6A@f͚8,+N&LXsG\U 2(PtA HL陈:IOO_hÛ5kqFzՂB8p <߮]*UD zԩSO< h%޽{G'C_Ϟ={+V\v-c54(zxЊӵkWMN*(2WHG>~a+7-ٯ᪦k&δ$,L G)J32Eg\1q]|' ȦMV~(b.];w.ZNNc.ܹsU?M+͛͘->|p&Mљ|}Yb1^Z L2M6m޼}t'O 4;m_ŋpl߾sδݰFDaaa!!!GWގ;`yUDDDÆ :ZT;իJR%b5l\-ݢ%DwKlؑˁđ٠$ b\&9֋/f }]p![lSw[Bѽ{9s`teLߌʕ+'&&`՞6mZ͚5t6߿7664i8p Fxxxٲeb*p ?ڵ355uQ+feeգGB:aٸqc4 9r!bҥmڴA !!nݺh#t',h8Qe2" y y544!h\u&zl HES[6e0 |U.FK{-\.?{/2OūEg_KdrڦuWT:0k5!mI_*3_,38!F/0qCܤ3/Xٳgt`|&0դ8ߡc7w)Ug"iF ZY=`b4̍޾}C7-&uqʔ)ժU4-_\=:_lmm s~c @ E||:ynB\j3ڔV^@i"fs˧G-x3 X?guM9:3& wn~!7Sz  ҄G\})Z~_!7zz44+:Ig'w} c6EX'^I_mnpr8oaA[˔)sĉڵkө'O`SM6]r%,OHR;;7v֍1&Hcǎ{}ߚ5kΝ;kW^P'PQYYY>>>2FbU@lݺUsS۷q -`#W?xÇ&En|{lhky G!K5]gAf_s/@"r"?J,[ {d\N&%(K#D@ȕg5y=|`%ޓd7)r('̂}*r纚"Wy5&=j۶-Wzq4({͛7/)PG\ r͙3g=zTSـK. 2L~{  ZbE w( x^Z< D5e˖A4hЀ淑clL\ @ B~lpw"Ae_\A޻6/:7 9P[vX =b8kaɮXcƣ:s-2A3J&ɞ&+*< >p?s$VrKdg 72,::~CWSSSH> ^-:AVt< C'b;v0aӧOwX?{bbɓ TfĉZFŋzZ "nBP.]6_,\g@\vۤ5)R0 ?J,Fbc%|`ה'r_ EQy 066ҥK@@@ZZǏftqO4QFFR*?rrr! Ȭ&Mу~PghڪUPO؏9~z: ̰_H Ƥm޽8n%J`Lg#ߡ"n?H _P2vSIǼ%o,\7 QoG#߿el[6e&A! X _ 6ł: 'X:rJqT,c-+}eCecVW**țp8߻wbذaW^ݶm[n k3k, aGGǓ'O xw'xwU\'11UVmڴYnml|ԩgӧIօF^DŽ~AyT\X&7n \b1i@1I았[P;tc+$ I]n@/ܚg7I?#~rymJq47Yi)̍QrlylA&\{TzY1|-Dݻw5 P-_|Yti58wܯ;fH(U^F6bȑt8? W5􄷷ܯ[ñcK*~ٳs+VsvIγ8Йj׬Y3zgΜ9L@ 4߽);UrD$T.1Bǫp7-[xnS~WG3inD*)?_e\h;P,Okvʵ5bAYS8]JsFqԹloMu9ۛQTiT |wKʻ8wF]tQZNsؚ(\-[foo=P+qƍ={"\N,++%K"PZ5:055Q̦sNNNt* ZBNa͛\333(Zj@hDsss޽w#t*vϟ?{n##=`+7!/75GŷKT]m W^C}$lGV켇&?|\ܯ{X͓1Wvڵk߻w]vyĚ5kYF_K @?LžGş_\ We}"[#ߜhSNo1D@Yc}e4nnn+V믿Zn$ 3f8::1%D2 -dȐ?2~J.\ئMS˄ԩSffw>|hԨѲeT̟A|fD.y{ G!KO5}^|Ya_7}@}P+m|9g6-hڅ֡@ L{[ s_sss蛨(|ᘘ(ϟ3Or#%%۷yx]z {ݲe 4'DtIm41!+ XWagỆ3r͓Gm5ŧi8ŵw4nAAkn=ڹsgnJ'##cթvZ|ATRp4#ɒp@D*jz}*Ο?M6A48zQJk4D\#}8/;ԝ71̞7 qIhEڏlOej8t9D :oKЁ87(*۬ H͛/2VC<,,l„ LG&_d (Ě9j?$lj:?Z OBjG"MWnӽvyW^i~=ر?)S|;vˮV WG@.iDGGWPݻw@zСC/]Θ~+7aց (ȳ+}Q&{)d+؄gͥB,Y=quadPHr3k,XWSL ߹sgvv6Ѝ7ޝ+)'~-;wN("ٳb*hr=qD++[n]p9s? 󡘌 &hۿ.$Ӵiz1±k.33N:1q]&Wq_wu~wnbb=@FFFLLLD_*$DQ׀}љ6fϞ}}iӦ_U=y3qC&<|&[RN89& XpanLMM1qիҤI^RX|:j*!&R8P͛_ra֬YР)LZDs/kR<߂sεo8..nٲe|>߳ j8sa? :СCHںukrr2~8ӺvJ̩E5kroߦ~␥q_|ـ>qST)|}}U7nMT:C`ܹ3 qݻlUVMHHw^˖-܆d&bLw7p|*g\&&# tR ;1=zt||u׽08jii _S}7lؐTBG9r\rH.D"Ο?oaá!333%%J*yR 3aGp6l*jРAK{x i ;w7 Vqa4ŋDk׮ܹ322ֳgvMa1 =x 29'NXv-6VרQCիWlPPPZy@HHJ(qҥ={]p݁: e˖ejD3Ozz:/Q0`#ckܸ1z響蠧O߽{L2Pb)z|5AMDfwq)qkt8 vծ]Fc'O`AgGGpp:Qɓ'!bc={?Yd ো!:?!C@3 **U;&RTo)J+33Dx www茺u¡"`ojΛ7r'ĉׯUQ1Oé b1\BK"DhS5%jl>eɒ%3220xFEEسgONpbApr!J0cSbbx_hޭ[<I8 P8\Æ dɒ h!A1xb$!ziӦA >|̙ƚ{vټy3\>jZU\ՙCيNwՁ\8|0BG{G}" z} h+6mS ~KnҤG'k}wޝcߨяy^znnn7C Kg0 𓲷a֭,bC}14Jk&_pb>~8S`@~?tķlfnc߂͍ 7քH*!U_7,ڰ[nW\9hР;::b۫W/8NhC tG;U&L? Δ)Z@[zDp(\:*u|yᚇb Gi $2VT#s>k-QveHX޼'+ODJ=Kb˝qϹ-5Q:-T TŚQ[)9gw쥆#͡fr2V*݋NaҌ Pݘ W]))GNg 6b4j#Q X=L\133DgX?Qp6cVQ Eᬁm޼yI:S eʔتU5k`ԩNتU.^f|^ZoTFP"~S3J*!36=zTmޔ: SaHM6; c$qdƌ b;[n?+Y|yQE"ܽ{~ɓ^8,:㱵[@ͱw FQ{G%5۶mߣ} <0BJҗ R;t)mv)\'e @ͪg,E^Ɔ ^V7(*M4S۱beu_Jqӄ,3**]*U~7^1.Q~U)fj;νoePB) :՘VokʦkzlQ@wd2˧˚}4^)Z'WJ)Efb ?ZTQ~=EP< 7Sft!+&Sg9Yp$Rr NUsIJD'PP$p' NEqWMaCǿ5ihs`NڗR8=qb&$$L>wHL[8CCC;Jp=׮]èKZL3 #F9koJ]"-[ĸ2~ {,V -RD ;(׬Y3.lǏ]ktܰaC06ݺuklV;ƫۣGp _l>z!;;{„ }꽷if͚e˖E@JR:?1Ts#M@zFKq uKR⧈Å"hkCPL $y^״֞͡)'^I{U-/6Vx'X7($IvI]BTڱ,w3ɌF/ 40Zv_ɾ0IXq*R&]*\ڙR_|2GZ*a%fj;`pN*y\7BӬoiދD:; ]jV ʦ;w`e(&Y{$psmn jkgOog2ՐyOwԟ15kܿ: ?POzԩS{CrC MsHPHYert&Ojr.Gy@$˖NGI&Ȋ)gEHM*"Rϡ t䟥Qilf6CjɎT,/¦Y;(+a=KW*+Ƀ%&C~9ZfeL%(NEI *aaMc يt +pTeQ,Tmܷ$W Ud\$-ΩWn~fD"ѱczef FGGC^x MHHXlB+7 ƪpvdPELl^sB63B"$+7B 7ŕUgHXo:F"M}7AAAL@ N:/8D9L Aܜ8q}L@ hGX!~$ҠKU)/~%7߈+n$:2ox/"""||~ Nz4~-$&&Xή9(޾}u۷oZZZ2Vb>|pjj*6ܹSg~"O= ϝ;>qww]`` ~J*LIIVdf6hBG˵>Еҟ7w=[#:+]+K.Smt_%~$ G[p+,Xx._gfffrlzzz2e?{n;::2&tc[lyCoC 6lpyFـN=A 2t3gP!Cȯ,>LHH6lX۶m5hN~ТW2&aaa>4'NЃ&8qv+ݻWeuP[M赻~s…؋ cU.)Էo_04k׎)P0 F*ҟ#GXg Lmm}?D0L\hR "#/70q_|W@A2qPFFF:_\Q+.+/~vr/ ,+7P(_(s`чK`}Gg|%KWo999VTB@^:**jرƌɐA׮];k,&9˗/ի=W CkժUzጺqz6B :n8 ڨW-[2qK.:t(@&^<-I.f9_Էv_\>^{{s u@($I?8EQ\S\J|@jw__ل_oJ.˗=MFFȘ~>t$ ~iJLMMss~ 0V]Gh 0ߔ|GvLvΝ 'Eb H-&A :u _W[={6m1t/^ r]v}aǎƦ_|۶m{ŋY$p1Ow-(Q[Kqذa=Bs6mڄv9lٲ!Cx<&늜]ta EbL._CYf+Z|M}ο%閞=}Ŗ4P(g1@0VI60.6rgtց);xtzN7, x|tc6/_ZϞ=K1t)>>zAs3779~Yaa(6:z@2dii9;7ϟ?mڴ:tIbwA@g]8p̒x}(wޥ" eJ8iҤ$JLZ+QK.7&AaDU\-Z`xeJzY׏lܸgϞ?+l6zsg噏Y sJ$RP\ZA@|32~87yS<{f؜}ooW&onVw4-owɓ'רQ?c2cƌݻw׭[gp88>Oj(f͛7?xv/^i#(h "gΜɁPt_ccc[lI/(^l1.(۷/K_;w.Sñc݋߰aCF sяsSNedd 4Q-['ZۮsQWD< =Ɠ_ᰢnZ{Mr|L9,궒P|M\әͷk ɼ>F6 MU-fmfZH,fdDqtxu롽[SG/m۶oݺ')N[ ȑ#? :DZ={,ZhСpU0љ =oCA@(V^AP9k׮ѢB{n1c8;;s\3++͛7IKqjۗ,Yg:@ԬY̌2TФUTQ7G 4p^t5 CЧ#RN GYF=nKzԮ'gheeE*T3|M(Ӎ=v{<ء^N\g>%Fp an}E3ZD!@*qC 73&+I[݊1XYL8{.jdu[ )&-5ϒ˥QYv#ڦ)ˆWc~G{[Nf~_z [O'~! hT&T[Ζ_sZ75e>@R呑Smi'WPªК%S\N9ORPl*A;2 /f/a,|UsK<-w\*ztBeMmk#lWޫ"+E*+KR~΁CZ}mIN7222ڷowƉtR333Ϟ=[zu;;J*}78̙s=O@@q!n͙|Wxׯ_G?7dhnݺjŋM:u$xAΝ}||r֭;w0"Q .lӦMhhhpp0(۵kl4}_zv_wQ6%. pIG+63BR޾':xZQVT! :F̠w*pKYKuОXؾA&byo2Kq E5+z<:&zLg3tϟ:qD od.XEdy>s=j1 %0xvׯ_3 ̙3ъ㇚mffB8((Ç5?W%R 7oѣmll&M#B+Wp^-[vxɩȯZD  U9$99Lspv7уuń( =$%%9::"m߿(l(6]H󃏪\e¾pV"pye)K_\)k8la{F/iGQl(~T]&H'W|GHFFܹsǎ{ѕ+Wj~LMMm޼Tzm)UTHH}7 3kڴiO<4"رc!홸wT,-~IzQlcK:Y)F`ѥw^GYVN$w}|%̹z[ʙpR{K6ǿPT6[+RL9nt0Pg-0&?[HJP|v)'\eq^PERbKI(G;7O}?fZ| ⬬,Hs ܾ}'NT7j:o߾ ؋Q ajP3!]=z4,,_|MzzEެY7&&&2 ٳ/^y/^>|lp@pڴmۛIPdҥ I0t⌺vsvvEaX} b$zdWsXEd fٖ]8:gcfgʚޭsJŭ Et} S"WvupAط*s=CPWsO)>fLlAiW#S,Do*WQ/R6`ݺu]w mmm1ޭE`` Ty:  xRxO +VĔ1FFFfff t466k6W^:uɓ'ѕ|DžYfb]z@ 2e 9ahAΝ;1ݸqyk=fe@ɃIII5j᧛ܠA4|M( dtMNLJ2kONO[o3;cQOjt)G R~Zխ-YWeW\ĪԞS<~Nt|ނ9M8Vջ9TȕԬq"Q_-mTF!XR1Z̹.J~KGb)q͸\>K!P E?mKΔJ9J:Mɱtf_Hؼ;(eLl¿nWEsF.6qNŴUKLΘhV.p7ַu1jJ.ξz-3OtjMDp&D\$ͣ7lcjg>z„9YYNeS7bylʖϫm]l^iחu}ǺP6hgަYyS#W>ɈW֔_ܴe n[̤e-3?EҧOeC |O!~<_GpxcNTH!gI嬴 yv\.c,ahX0f^Z|=Klֳb.cX5 $ B*v26`zų\E9]ݜsft }eX8'hᜯQmxz׫{Ovsuj* ucWqC¢6D+`afJ?D<4x=%S./ዎ!3D?89$ꌩ)Ԅ4Jذffl>ejʖXR8E399rvGSz5ŊIݽ+}'+V,T/67|ZU1Daf_ͭobmm={֬SN$W3]BP&-.~CzD$w ȑ)5:zjc[lE磾e\2|V _ug2痽'~j*Ăr!?D?l(J~&&U^ssYI2+e1?{[,6Jdw#G%|ݺ)|}jGFYx咚|K)٫ JлwoT~̨c\κ7qlMoe4|HեᨔĐX&?RŭW.VPW*-.qMKr̺Ow^qL C @pv 91u}Lge+rXak(d2?.b%^]mmF*0 d/\^LmIT6Prz乙ҏUOqC[@Ƚ%E,?K$rŚY Q5OHH~)ncmZA,fwY3ӧY9>.E*kU.}$%=N,E% kNf匜+iYrE2#۝>Fl.JDZbfCl7B |'NjL TRˈB85S+ʑfpeCŪXrԋuXNkguV0w䒤l#cyvRʶA1dI0gl}$9D O{ϝҺ8) XQ\nŊ(xҁR̼ɖ^Ѻx{#~K[KW>7φ ^HX_Lj%lK (cbn-(ʷCǼ91I,ŠFަF7jh45.ZhL DEE2l-]ڛ[e"RDಕ2&>I6^*\tr.Y#r{ְ MN-D\Ϻl{{9Gu7@E%-}Gw$"I(v(WWO_ţGFRhm-VfGHnlr|cn,T^j{˱xXݧ ?[vT~Rǿ[4OfS+xNswh`c9quB|2@(qC 29%Qtlc֣ }+)rx} cL̂B"_K<= f֧ǹe WarAFuK}X"evnHRi6ϯ꾽{ݻ0}_;#k׭{1վ1R#Y*k Pdl4¥z1X&)}rryUwCyr&Fڒ`/Bqɯ\ ;kYA؅OwBф568cDqH}+-ͱ=zb Y)= !˘:{N#~3Px]NmwQnŵP6խnng}w)>j!X.?5.k׮_151͑Jۖ5W~'O(E YC] žwFP _(0ba#JMmխ2ђNUy|~=H WPvw 楿x* RHsW/Fh,3܉L}i]9=6 qnn&+`avriBv6tﮧfz̛ۘqs`jtr+C `ժUii|6W7B=LQ}jAf6G9Z6y]F>|BF).j׮-WMƌciiP(lЩ=z( aqիUTUT*rUA=::I&lذΩoB11cgV\*T_| ].],771%dRE<ǧa[ŞCqTXJ˒GDK˶` Lun3G}6,yvFީ s1baz*:W66UmޫW/ gT(cjپxrrRNNNVVuȓ$_!QǦVRbS}E^*2]3+Wedɾ`?DsiMLLfϞK.ŊCRv|\g:),,!˗@O>pFp4SNe*ٓӇN'fiӦ1Vرcpj˖-տFAq=nqaΜ9Îq27^bčrC~E͛7H"ٰ022 CF Hׯ_Bòp6mڄOW D4xAΝc*qCc͚5 ڭԩSff&z1=,YO7p)=˂"Ȉ/]$Hϔ,Mnw}6M,%r1RK\>Ą=6ܹs{f)cB*Z(![,.>^b$=FhYY7Jgw>0*gnnnχ8q"ߑ#GŽ=cD:y$\ &"`-Z" AI⌌ h;t v" ,Z1cǎIX{z v|/*=zͤI`_ȗryNeˎ|"p*ӧO3"M6\{a-}*))Qsi bŊL\-tLWV 7n|\C+k2;g]^N+ыp'N=.&yeuIL&zsK6 #m:+Ƽ6&%%Ϝ9Y2w "]rlfW*SϛY4P/фao{|vs8cSM,u?()|`llA?Sv͛@2mڴ'Oi>v옾QF-YDgŠJٟ{kk5k4lذe˖gVŕV,H, .x0m~? >k:7IÕ׵ZE(Mo"B"ogi>.N4m4ν7BGgHUӘsfGG67гec+n%_U%3uܮTvFT ]`z8y3ϓ4'y&}r~o9EM~g["ʰ 2rK.-UԄ  \ U]gٳ8(oh&,Po]v3 ND8jժ >ŋIHNNvqqîD{.tN:PY>""bժUt4O4fܹCվfp=z4,,,rSRR>|aA@g7n2vŊ?硔?Nyf*UjԨw^ LAQ(%T}N9FR+J+55X5gF,`iO>ݹco,6G/ |]L&z8O(BaL:Z[O><8Wqȗ7 ~]mBBB6m ߠL }za0>_^{bt"j_uօh`ccc]M p=:$&d-Zl֬ƍr҉vW^]|Ç8qqq7իSN,A[rҽhkk$N.]@M4 C :pȑSNA_WZO={ N]_(ᛣ+ BIȅʻ"?"$[6ᮺc2sLLf0W.fZU4e:Pj%m(lA"WUaLs9ůhH.Ug/Sd;flTv49U{8>[[}8aշ}kU\WέD.*=wǺ9MlvX޽'0ӧw?ճ͛_K̀cLӰ7|}qnI g UȌnݺ*U %,Ү];\H^X1 kitʡ>}>lذ¬[0bRM{??Ç3 Q=A *00PVp <̙3˔)ccc#Kڃ0]*IP @0rdB*MPw鶶 LjuVc&C\7Vlr#5I -T)حJfQ~R$q4rU&(p )mfdLHebNբysO&</x%}cm,QhBN2ј[:HQlͭ]τm)h>j{ЫNwjs^u mdl^  >ӳ6EBR˺Ǽ/aV0BԻMX*sK:Ehۯ..z9|wqQ__xiѢ 6SN%mu*` Zʆq~Wp$5RG^Al ڟU%/̨[og(*ё- Ml %?|/dffzxz2?%0W*BV)Pj*%`‡[险6.+Wv%mU*h@ĉ}E!lbSoJxisx?X/WRxl6IK9L@ DEɫ"%']/؆PlݜW;991A,^di)Wp8&^U!P0$ E`\7jUvU9w7>lFTtGp"mM,sk6)SثÓ9vik$ђ5\6W5lU2c"7!#ϟ,Mg.[9*\g:E #o&l Z'<=*K|ˊr=:}Ɯa\|sk;KS#y @]^ʗS|S,LQԐ}#v278ŵ +ˠޘB qC >YY;'<3G.7-1oedž|r?Bq痧%$[+{Ϟ}RCq7Bѐ77S3;Qs_;ֵ.ib\R,"A,y+1$d`Z]r;z17YYuh1]gv,C~@ qC oά6u1w_7Wq…-Zԭ[/5=/|H;*g)Ƹ9.(}pԱA"ZӄMQy2Sw3P{pi͔P ;l6Fmc(W~/@DE ۊk%bd_SX J!WJrf-M3mHGX@anվaGN] #l#SamE>)RܯY }Ӏ+C}~M\ܨc52&`qC 9$bÊ;%y.p,Jy(O?'t7ݭ[7uL5O(X,xwŭoes^zƲ(ۮ\Yf=aC"嫴9Jy%q,2'zD4B*QPX1{?v.,^k9p!@((R;6J/JyNr\U}v)wKT5rC%m}E8E< )S#PmxofG D+cvOV?㍚Zg^B`CQlr/}9FJI釽E~䐹ze7rA˶T)k[J  DEūՒO߈Ó/95Wu|i|W-MӷxM SlOG//p( 233vOyɴRq9*v)됛8+dU7@6*Vb GȠ{%j׮MR3fezzΝ;˗/yC4ٶm]ښX!7c :J5ʪqƴ@,"ʤUpMΞ=[zu;;J*=~bŊeʔqww4hZ\5ٳg h]tѣMNcTopʙmb+do(as(k`B*0օNٗ/,Aj$vf֡n7B?}ucZ0l+ml3к<GGDŽ(76ԩSUT6mܹsʢ?VȿH͚5!tZjuD[[!C:;;/X~2?`תU+,,Lo?Pw􎹹9Z3g+*hnݺ5bĈGq-ZcL!ѤIOOΝ;w sClD+?G~tD <G!`dwiybWrtw7}keSL?bSsrr _$D~nf-;UT(H{mc/M]k'JQ}FMQ<v쮽Y&@ Q7Bs\4=t:uT1nݺU .2xÇCfʕ+󈛴2e<{ޞ1i#˗-[{nٲ%((X˗/ @800ݻ;w 5J/]MqyCZ6Eb|:k:jSaF)ϭ3 ',o)P^4]ˑ44+AFvTnz\!NFո6|#w?}9嗞T`lnŴW.1CFS@ i荍ᕡ`/^)S"##CCCV eØT, ܹsǎ{QS6&?~N:T6PB;v7noN>/0J Fv]ve-Djݻ$dsf..[oR _54wzޟo.f|(O+eL"j5?>/>+)S( q ̧{:ִH(E~. yȶ2=y%(T6'j:#(-,G!~ Ё/_  {6o """VZE,LMM9MrۼE۷94h?ΰ &0qZtuuݳgc5 NMI qfggwҥG]VmğrU66 q adlnOw_|7N0nfYL+T4_(P(Řיx6犲&jz3jYBF]ŏ|oK{yya)_zeooP6pp۷g~Bg&;vs/_ݻlru&kٲe7nؤIƤ\zuСm;w.obcc=< 6Q,*W?>opgΣ. _Q_;{ ddg"+7fzj@0eM6߿jeC{q uΝPE7n@ hɓ8r:|eFQɓ' T6貵k&''D"P ͛7~ \]]e2$۷/ bŊ>zuްaCON1K ;~ 17eF;&H/ &嚘UeFf4 KHtǦ2nymZ4.׮T J>Q6hڂ]D?-M||<\=S~;T:DEEĸSN m͛#ko߾زeK:r ;ep(4o'gφiР{ }6!!ggv{zz2euhceenݺ-Z@t|$X,ʽgZ%.gEJ p|~\\!q8̛"s5=;c^^B1ջ27C~.{tq,J:&j1 *Pseg/A["J ~JK'D"C%ڏg,V~W\΢X,.'œGǘL,Vu+X8CʍP+]*=cagsWcma`#R6˛SSXJt(H/h{+DXYmʣ╔+zlFL|6/>EBs&l:;WGq#RWٌyx/mqD ~z!:xڸF2Lw$g]/lbǕ(.W~IaLal6F!V9%fn.H-kln ac>@ TqC A]<(! boXPJTnإۅnrҘ|*(.kj^ͼZg㲍ԍRoC_)ϗ*s(kkG i_=N 8DV-X(3Zp$Uǟwi̗APiXyn:3ٹs&T/;O(1˻3CV|7p:ta3V@"n?xNiMmxdWyF)nÿBxROt~ES>hɅ1.!EbG=l&TQ\^ciIiיͻ@ tB @bi]eNH$ҼWYh ~o :su}#+NӪwe7z `!ET"piBȚuM|gi 㒓0ъ&w q'k*RR^e@ F(Xlٲ;wFΝ;/_?~YYY7nlԨѕ+WԟOʯ2@T>L7DsW9˗/Qȫ[.mC ¦LJIIA71VL&sqqY~}n/_G(+?_݆TbQZ2 >ܟ6 _Q03cwҋelX TXXX*Uƍ0y[nծ]NBU>{,&&iuɯ322٠ō\#rJBܘ9+y <۷B2n'|I/5%/W0*lwZ(ͨA%@ [l5ekNl]vvT*?~UkralkˇTANG)J |oKޢElz W^mٲ%N:ǻkڰaC׮]ԩS4nKZim\]]߾}kkk˘T6P(lbʔ)zLG"QQodnיݣ?ˢc%DWnΟ?ewJ>]@ Μ9sIII999cƌqvvrիW777^Y6%Kl۶-Yʊ |}}+TD H4AL211qssáQ\VVրիW֭+PH݊OԮ]=ٖR@s>LnӁ]{՝·!57@ Hkn@  "n@ )!P @ B@ E "n@ )!P @ B@ E ݟ_ Wrr7wپ}{ժUe2ٱcǎ=c>Ovݻw/_^P ߹s۷]]]--- X|s3r9zʕ+ׯ_qㆺ49u=<< 4 EjԨ4mǎzyy_2/P}.88xP-C=s挙ِ!C`;wnڵ% S "HBt3(IIIGf111ժUbT̞=͛666ӦMC7ׯwtttQ; :_~辫W2&aaag͛ʕ+CO7i?Ԃ@ E E><N4iŊp waTOp88mۆ0 _ :wL[ T3&9q5b"<}rb1/[lIdu=~8 2&{@ ''i;(PwM  ?=vA0 ,Vjj*\\WZ"p'OV-"~͍@ c"z ٳg!Ir9cdľ}ЛL\p… 2-ESpPЂ B]ll,EQѿ;˅3h/U\&5t ƍgaaX5XfʹiӘ^:tǏ I=Gߐܹ~@[vmz۞V@9s&''ZK.VƶlBԩS:tr{DOch=7IIImcƌqvv^א!C޾}+ ;uaii Sdɶm|"VVVFY, *0qtt G֬YC b.%%-PztA@0[)>uqqA+TB?Ocll﹡[Xǎ={fjj_xq}G "+7@ JAOK@ D@(RqC HA @ "7@ D@(RqC HA @ "7@ ō@ ppp]\.Gt̘1 b˖-oϯΝ;(GGGͯol۶.bmmc:%22oƌtT(:u*mÞ={Є2qk~XrJH ׯXgVXήRJ72RZ <<<@((? @:u Κ5 e˖լYlٲʕiɓ'mmm+FgɯHRR|pƍaGq}~M>@!{(ڮVڹs0&Ahmn7o8qڮ\|:Fݷo_$ ~bXN L07o܃ͤI`{Iy\.?~<:eʔ͕d)RD ###OOOSSS+44bŊLŲlԨ%Kh̜9ssL\PW BikeeEQ Cڮ4m:i @ ~7/[,88ŋ[XX0 S@H̪UZ[[3&:ddd̝;wرG]rwP]lvuoJb*|x۷9 PQݜAZZÛ5k$(ڵӹFg8=zt6M 8vj?(˖-ӹdРAǏg" 200Id2YӦMgϞĵl]~:|ȑ a$ϲ-nذaL@ E|.̚5K,ի)S\@ȦM߿y۫JCܹ^z )d'::ɓ'D0wwwCBB]]]!bUwƍCUp`{]z9s0Og"H$*nŹsB!Ϟ=+]F_`KOO?t,@ "J~fҤIttիW2T8pL2ƍY$<<:ɭDDD̝;K*@@?baaajj@TTTdddƍ5GI?t6mEh"=|Z AuuFu 1Tt *ԫW}ڌ>n~**VxMXjT~@(PO:@ z@ K!@ B@ E_0@RFٹsg&B Mzzz³zc2@ 8/nR[0@R@ ~Qp@ E @ B@ E "n@ )!P @ kh?=>H "7@)r "7@)r 2BŒ6IENDB`parameters/man/figures/figure1.png0000644000175000017500000014362713620074530017053 0ustar nileshnileshPNG  IHDR/Q)sRGBgAMA a pHYsod,IDATx^X[gf%DJDQ;~v{mvww]! lL|J x~r980@ D#@4A Q@@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DI@ $M?Y UU@d^v޽{4ÅMToR>këJʼNzSATw$YHlm%8u`zM%cbV2Dg<֭[޽ XEiii۶m{ABByϞ=;uE.\5j>|8ԥ_~MMM9lRV>}X  iRu*A ׫aE"/&P T{kE;#>i$\z!uINT;EH ňS]U 0`Νaرk֬@```۶mCBB l`` cM* ŤI `kk9- |}}yJ5qD8t аaþ}394(lu.9=?#/)?QozpMPfHlEۣsn)WgC،oȅw5H h Hi@X@,j}bt28n`SEjF}.FQ)))˖-0`1ck8x !w9rȄ  jРA;v- =aQ3233N:ydH@[xyy={6R2!piM4DMas57oތ~ŊSti,5I>r=x@8`pS"@ %ib2Ǫ feP2:l7MA@lxu*szj*(Y|#Jb$mxܢ R dqqq{7oիoܸQQQQݺuL۷9ȵ&s۷o_dIF6lذ| Baw~aP3۷ͻveB >F!JuhuVhhaxS`o @*"4vFdso>V; 1uKɩdZ(i:r)gF٦Ɯ fwH`$ǹKA\m7P8{EQ Ķ}ұLG]#^h^[1yc|>Q Gׯ;I&NNN`HyM˗! &ۘ]Qy9'Yr&RQmNuOe bIn*ůK{;}W"~12Wf ƵcP~fo"V6(i9>[d!< .<~<^^^2ׯ J%<<L(b8,, ' Q@@ q֬Y3zh0!CZ`luJES@~SΝ;U*ccufJT,N@vv8YoOfC˂lȥP 3_jFppVA͂!:P K*lh/#I&v,Fpcg>5YDU_fY?PBl\]M`hu8LB?J+C}4Z>FQ3&#{mG>Q9g`˖-OvuuD5j8vD@ VaҥpA޹s-G.fv6Ϊ#iZ 2a/ԁu43H;uA}/!awaU}p7ђ[];vX蓃k=i$-:\} a5SdQ'N<|mmmymڴi„ .]_>B DaI_A&*j{߾}7d߉IJJJt!C ŋRw!JWDAKLm 6Bnef0;C"8M9s?*&8v6$p TH {p4&':{M*I'Mi ALw} g_۶mܹ3;;ݻs-'%%9s\rǎ#k׮NNN(XLJ0l׮]%K,_={ E֭Vio{nbbiΝׯGFFr *Wƅ9<$ݻVV&Mp[iKرu+]3&O̻Qx&=vʵ;m9?'Q\"+m#r߼GKnQ̓עE kצ(v YvQC߿FiLVVV\P(|%V9vEuЁwa90#G Nsqvv @fnwE $WzJO$8EQ⩲f@[e/t,VHNB5 f'^43]&3] ;οbŊ{W\v-88СCʕ㢼`oslzxnܸ26 uV:n|S5IǎCBBx@ ?]&U^3=>Cw~ DKXг<6kIIm)ulaʼn*~&R|S isӁmi6V}z @ [qqq YD D DI9s3ز24c ە.m \IP$S5lvF5ԙj( x|4DDDlݺ OOOCiذ!|1 Mmׯoccs@MSlYHR܆O *Um@Zti2릲F3bwqY3܊wd4=DVlEr{l-K$̉Q4ͼU-cM%? 41ђwK.5gΜ5ɅO]hYj{*T+W|}.!n8:t$vlpff; ӠAyMrN'RcذaM[l~jÇN*J9<.ܺuk8Ǐ VZs%@ ?n^jbҕt''^?n"txo޼d˗/j>ڵm@rP_ 8x, TrfEM; 0dȐ\gE DaDq@ҕDױ|> 1hrRRR<==xWA^J,,,ʖ-kmmͻ0˗n@j~@ A@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DIBCA +80|F^~Kq DW.8 0axyMԴi+Wp@ y]ds,5>i(j!#wMzpttϘJ3777HJJ^zvVZŻ4B~䉩)B ՠ3Voْ?Fh>L>w}+aʗ/?ɓ'X?%JO@ 5yCQLgTzGP)toޗj>RV?U!4.1a.- >Q9?ӧO v!ClܸT~0`̙3x CCCl 5RRR8 ܹsժUb֭[y@ \aaxaV?V[Ug 6VNٓXfr܊$ W h ^ kG޿ͻ>O<{3͛ C(5k֔dt 6䢀Ǘ/_7r@QIxQؠ^fo8s#t; ԕ(flWA}UL:ޱNz1 }f ~4u2RVTFyt!Jc".V6DEN֮]TR#GիW9uԈ#1MltܹToo:pѮ]6KP.5~(Tu=E 'JvMP4"<*9K x{ZIajuKi*ŧb+$rB.#{RoIw} ٳ'ңGcwPvibddsѿSLiժl2lذ$AhG B@ Y7pE:c@ ~8?Q8aa]&#W1*Y@jKSZ2lz3 nfƧ+\  R}xfllMc&33@b.*W`CHyRf[iihZ~ŗtiB ķIJq3zt@Ia%ȥPru%eo\%&5IOO'177bjj ђd2( CR\2yǷB'^ @hKg0Q&Vl8FҤje2(Mm\DKzl] 9t묞mr[䕶eߺ'bw+A2BLJ3 gT*F`b8\\⨁ܺukر͋UVYYY-X@OO qҧOӧO'%11ի=zmDATۙt?! n@ fLt@" HU`>qTߊ@|c\Ŝ!$9# CP2CJF}h&WXrqRzuJ׶mۤw6jԈ!\NgggHv$  aL2}]la„ gϞ}򥶏 @ݽܴ/A$8[Y&@ ~%M8x`~@dkyQz;֦Mޅ(oCUVQO P2@ ĮQNq}=銍Uy@ ?< E˖-qoذvL@ Dpy:oy-j){"@ TkQ*2tgx󛐌"lי7yMD*2+]B@hA&?bYDzGNjm(\7\޶m[@ Ν; OoTe"ަ=#o@ݿZ+Kئ#B>RRRLLLx@Ə;RIS-_x7 { kyCdޓhIw Q`@`]/J.Ԫ&=McInX2a]BoqM@| pcQ񭨼bJᬾէiK!P Tgsy/f̤f_DI2g~cQ0a!Z-@AP A(χ#A* *^r`7rjh(@ oZO'/ y:"Ҍ'1L$ t qi :<,khYU'^GwD^kAr r3Pк#ay(#]<޺WNN]P HN"ׯG`|^( iRټysϟ[@䋨@1o|(W7/TڣT(Uo'B)3hދ@ $M0aI?޴iӄޅ%?Xy+_>g &W-U$>mudۆB̶䱊Q;7c5M @.P_"Ehhhr弼ϻ4(Zj9888qw!򁢲E}ÛߊxQ0 ̶ Kj*{ 0*B\?rqŤE>TNz&  -,A#(< ii:(((..lٲlmmr/( ...R<d\yDDm! Fν;vLIIqo#P'gΜiժB b.J絇(_R;@ޫȠs0ElIEU% itúV`wjT?EAً?E jM0Q\H[[ʕ+?TǍe˖C DBi:66VNBK.VXy@܀ĹvgӧժUYSfMޅuk-6})sZF^SgOUlC&tծC >"_Tq̈́' |J{K"cqڼ @ {ݰfP$zzz8.\,ЪU+%z@ !!0@Q?8ŋ/[웧Kxazz:o&(ʝ;wC T. K &˪UƏ/ 7n8;;yJCp.`jj Bzȑ#GaÆ\.<~O8/p#ș-Lzjʬa}~,#nf9}}Q+I9,7Xݷ8a4}Tm/Վ\?KKowo?j(kHMM[y0 3a„&M@$++qׯ_㾀sz***J&-YFpfNʧ|\ۀqfnFGG2s~v֯_υ% \s0r.\uΟ?oMto.D뼲 s4hSZZ&+VWf$IrV&"[n\J<<< 62A@h~wɋ GA*&6^02<`h!mtVYK"`(@:V->5 EP/~IX01772䵧O޼y'MW\.]cxq_-dccaP9o޽;{˫9\WٳT_ l{.X`p}BJ.3224ȥ &Sjiiy!xn!iӦ 8RMtoo*Q Ӆ==~-U$4ӆ8|0x [Pʕ+a~HNN5k֖-[ Kp&=K>-|] 2sLЭ111gϞ;RaÆB*kҁu:\ .\ iN <@~ضm<K | ~6PuxxAZN} Uʇ]TݮtK1K.20? -U7lx>u } j |0g/^ hajj ***%%P0oYYY5#GZjyHkk׮X(/͛7IIIA|\6@;N>$9qqq!EQu6m|Ϸ&xQԨ&]9P_~ 9EUѣGkٲef '2ݻw9j@~AkΜ9pj+V<rP8*PP~xIFDD@!Js_L& أG8`('>H _+ O ֬Y{Bx޸qL2p7o6!5|r߽{?C³ n ѽr=pƒ wr PTkxyyn{sK(yD v{U.Y9*p& ᡇ6ZyǏZ-Zp)&HNի׳g'իuqh) ]2waX>}Gׯ_Wqvo߾٘={6U"D>D]zJbl| P WWeVbqtj-[|)j|C~9_>*};P{u EIP$>tdBӻurީS]RD"߻6 "&22b!;7ǎzzz\Dx;w ֭[KÙǏARpPP!z}rrիAQ@ ˗fݽ"`pM4'PF%yM(ICrVH Wéq7@A1c +yptss333{yr6%508\54b :tTA^'EGc&nn^<-xI.Y$3H>m·|@ҤH?HxJ ҰsNٳA|BQxpˎ_]*lӉ.M3*9Z7,{ ;rfcP3PIi*ElM`hhȇtMŋoI.0hР ?I>|y69۷p PBҥ,Y&NDD6%|94A@tmӃm˗AAf ={R k*B9fopw\Ӷ> 5jąvs6('GGGs'cnj8yd^ɲ]_Wpuiܥ B,heH a8lHDs^GAv0Hw3}̹m7H5@,v'I4,Y]70k,?Uoqٞ Ӆ ST$ۼb]IC\R(ĔIvwN.ekBbGGQ fPSF3.ޛ'N;w.M+M8K\At.J7nb\ \TΕ+Wj^$$$h{(ɜ>}O>,g{\iNJJ]am۶qe7nqr@9  ߽tR^t7y n gs|ݻw]ϟ?DpsnCY4)jBxo[5}6k֌n]BغdzwTqǵ+ lZpA7 \w_~¡U6wt: 㧏+F1 y̙W&ȕiӦAV1uԭ[?~\DOOo„ k׮uqqܹ3n׬Yb lr%ՃLeСCB˗'O^r%\n mu,} )cURkϞ=Q5 Vj!+W̅W_ '7ep:˖-L'wpD"nsQ Xj\w 33?>bĈ3fh~ի9΢m۶ ,v޽;h8Am,l΅:uon>y&N HMmǜP}ϾlNռMجK/ )255[_WCgK^cF Ju͜JGd[  YF,&|U‚):yCӾ}W^Y[[[Jܸ adɒ",,jիCLL 7NNNڵ);U6|3aÆ:Q(k׆v'vJٳ 88pFz-@zsg{=S -[Yڨ-ZK |urr2(T4Q=0t.B?J_^iDP=Yn'!r* Oph  *!Ǝ B$ +tHNQ]~=ۨϜd Ԡ@|4M]8E :WCh3V-έ6Y!X])}Kf6Zcj|B)cәhދ(t\Ƚ{=zĭT(h4ٽ{[n}]L5A ۴F*ۼ&ce NRa 8.(J귮sf1ԃc@,iP(>;Zrz¼K`m o\RSE 0*B|e{38P"R_M| -,#O~&i~ k53F;zek֬yf!!ƍه@ԫWʕ+DQ}зo[#㢩l#p4qSWSh _pjv 'eIz0A [&?f!(LƏ_ݻ;;+'@l޼w!SSFS g(cCdS=fmIKi\ug׭ke.$UOӃ{Rbԫ@X bŋ/]bggUf7N͛[krAB3fLTTݻk[| qȑg>>.[l`` lrHN<ٹshnl?p@nx$u8:%%ۼVݴ9[\S3 YU ?5ap6:(p :k׮urrzٕ+Winϝ;%ybϞ=}]|IryDD {pwwUs77NУW)s7OF,5XӿX՟KF@JulR e-atދ@ _C&$IP@|*`vN:qHw%7>|Fcq?&6Z0:&+$(ۄ%C+ ZoYML]+ HUԪ'fO'nT(TRzOa?yaQ(XI=BCC6m:bĈ͛7WG l.ߏ '4N/jXPp,jZ4l{|Ka~%p4 M8 RS>|aիbx/@|ѣGE@^L4ڵkb횮ߊM\\oFBBgیE^ Pۡ*wN)e9t~+bJmS۳UCIGh`g Po8\]P! 6466uVLL?3e>wpĉmڴ</^رSv@D@W@Ι!.4`Mgm wiѧK~ ~=ʉucW!(5$)FVU&~FFFw9rܺuy̚5ȑ#a&LyǏٰaXBѷo߹srɊp+W\hQ&!!!e˖Ϭf͚Ɋ@ !s)ԩSI&=ԩSBB 066 mL_XP(ӦM"@}_詺ڵk/[ HfMJO 쪭hv9ѳGUa>~Μ9@|=HxrzYXjuBvL!I+0iU_;OWa KiTudq9hPR8M<[r=7k֮D (!C\xQP͛]fM ry=*NS11m?tC$**AFܹ30cc㌌ xȈ" [+ҙez*l~ ,{}@%daoSi0bYr uy:ZμxGCxp4v21a?Gg3$u1VAT|p p31Q˙Glp>yKI1WGLG2IYoVM'r/vT"Bö7 H&£8#ʚTD&0}SFș%C4fPc KS%&1wʥk.-r5A\wP(z%_X1(&?~R655?>IϵSKٲe~ѣG0`@lllƍꁼL077v_(ŭXN 4Mzxxq YnRJ >E}l{ZT=ᩪ`]//jժqWҷoW8~+V ħ:ur,^ EqO o`.(Fr۷r#44 ֻv{Ƴ.i}`Ǜ8!GI(54wXgUKgyQ^[1[MI)1H|^N} 2e׮]P211w| iiinnnT1cܹ3I j׮e +? Q@ԩo h_&sD`󡍳a?@loJ l~I AIlvA٢b^[NwaiD/}tB/k,}>HV AN θLst]TrP$SL9v؉'´fϞ ҥKgΜyT*tI@@H5!!!\p-ZFni˼q6RgUF9@aKڐAչ7Hմ8ֵ,Bf 8B)sHmymM쭍u.#XY3vG 9#yʎookprryɓ*ǑG/^9df/$-bnn. +VE,s3"p@ClLL H"F8PFn:QT.ir!;s ű5ʉ;f$TcNe> S4iorGO ${ @ؽ{wǎK. ~\*4 䆹rP(@FnpRFW!~ /ub|EN"wI9J9IvPO@[72//Qbw a_}jKoa(SM !Νy5ŋL& ܆`ee`ll,!N?BQ}#̳#N+*ɹT:A & /daa9%y3|]4m:=>VrMmT EIu@֐^l׀3G(Mm gϞ})7n̍8ҪU#FĐ$#.*VqΝڵkP3*B2 K6gctƗH )yi2ތWX q;0A.Lx'/BklS!Q8i"`o g^6+K;p,ólbnU>]"|(Bů@0I R 2vTjg Q/w޺@H )3[_qǖOt @:']%>GqB\2sܢ6^R/jnI%V~cpV% qc" @j4A} AFE)RV;[j> ,Zdh5&D@|i2mji^dr4)BTbsr:HII177/QDzz:/_q|ܹ766ٙЅy晙%rww/^[.8mllv9Oe.YXK Â4@2g~;w-E{,ƌ4aJ}=&1&pىԯ֯vj6 ɔ'ƮnͨAp;)A|?>&&f^^l#BCCǎ۩S7o_ bmT*}ttBʇ0di={yaÆs XWZv|DQa͇[ sҤIK.p+~ O (d&RfC#=J`.RQ߼X>RGƐkXn. v %\ W) jTH 11YML .ڕ*aA݌^TJAdP,իސZYYURPܳgAZZZTTBT*0`|J$=z@~V}|| |6hР^zǎ$/ 4nܸrevX8RJA G8q%өS>}&;@Q !ZoZֿ'Kxf:nVL$y|VFv#{+&5t 3|O q|gEgGpHI{5*KUI2]$KKw1zRiyr)JsӍg̘ެY3SS9sldddkk˻9xyҥE"v}rʅsā޿!^d(իWpCɇӄ0o~aXH߯NBDCRVQ &/\Q&o}V6% H1c]NRolR1ͱ&Q,UJKZßA^7}ժU?O87tɩS =z4B|Jjjj.]L 76GFF^KFpqFJJʳgV^b >Hg{J%EQRҥKΝk׮QF-YK @[fo*/Ξ=98zh~xî^ھ}/\h;Չ@rqt +&7oCrߡoyrUCӂ^𿹹_8 a0FJUL@Fv?7Ȗ-[<<< r~xY[n.: Eׯ?oh۶-yS믍7֭[wi: 8cǎ kx ;zZD快į>~BR#CɈәNg  #D@|zΖfpQ+O'U$XqU&_`],L$a1n9sT*ww&M۷wܙ9s3#5 aMbaawaXFŲyfCɓ'"3gy x`8mqs+VHLLLII={6d"(bPnڢE n_&x H]0P ym\\oƍkذay/ Er\W!C[ȨW͚5ksϞ=j;4irϘ1Avvv&a{ѥK$ЖQ䗔U%RJ*\~=S)o|t(PE\ 0f{ ,8Nֳ⽕ZHUjGk)vQ|.&-(B3*F*Qq\Iӎ2H;+&"SQj "a ;kh[teZj?4S.%|W ]@.PWbJ*nnn!xVݻw?|z`AH$JuرN:quرI ۷]Ss>EWPq֭ʯ5ɖEZ3x@g&RmmG鏙DIM&Gͩ"IK8ZO0Qt#G5]f |4M?dЬ5umhbHҹ)*q9&8[,.<o %D5pp `;98>;w\dɩS~yw [[ۯR=YkbD;DS_ѓOlɦKp(bR737X<𹽂d2ivOJ_W56k(4TNX05pa*|K0o,ӧOL2o<ƍ* P(>B!\O]G5 9Ռ^w=WJ';UQjMꏐfm )iĹ$7ͫcĚ L.%c[vSn2c<M bƦF$MS c' ,i%&E pAEt QHA100OnHUN[L3g>===ꠝt hD9 ;;oFnDDDptɠM_NhNBb/NL4fSH´k.z[[,c [̠xu;RXe&ߨ4m٘[}WS3&67b,'cUJ "2Jq%DItȂӧڵkٲe|ŋ-Y$[ G^[7>{pBZjԩIKKk޼um۶( \]]5p@\@p~I&ׯ_kz.-[ɢ3dDiL1s2%J#3^CObTZl8!Eb292֖cJҌAJBۊźm1 Ǝ˙/_Utb~̍D쀭Tl& ؕ9ʌ^p\TPSԺ^xaÆRqOnӦvѩG-kL0a5kּ}66ڼyٳ a&\bDO>}8;v-޿?. q"9yZYK#G{0}2ߎsJ3\F`^RLxSb 2|OK}Ae(C )_[؄}-x(-Zv齪SMD]$Uõ?w8|ׄ# /e%J.|ΐٛ~9Vք5#O&j["r{?Bw~ h^˕+ץK1%""imPVVVyeym$33Twzਨ($bq^ZA$%%k]O}vP?AAAGi׮*3YO&ylʗ*]LCe|$EۃIǞf-pᩩWײc rv̰XifTJJ5"W0_'(4}FoZ׾}{Nl)o7Eĵ0=ZEcXv&xc$PP,LNi";D7|MG ՠ3~y湹.ku 3g ޺ ? ;; OD^[YXX?۲g 0w\ss޽{0 :k׮:u*2$~O%Quw8wG]Dq*cK0Q>N_1@@.oU_}xI|F }ۦ8],] zU@КC@k9%`->_hPɸq O6i֯_piӦN*fG*^t_wuYYY{"ɢBOZ1,KݸHIWnq)V ?c%EתWbo%03aI8ff9cxUzٳfs}9G%&ϪWt<*:[}eS:+JO` \$ H E%Mu#FQD"9zhӦM!]&wŋH2qD Zlmm5kرcE~ˢpL Db#r2rc׮_o- L키1DjKeK=Zq<)e3P$Jdivj2JӴᣘGmMHKM>}41 }yZĮ*ʼvKO2sB2RX2bjQ ^]aԲL.A Cl߾ܼ_~rIw"#G/_RJ)2SN{?l =Br\AYr[ulp__޾cY 8#}ܹˆe*·NϳWr} s3V[R.`[+)w0q՗juC&d~""X5y]ʙĔ(Lp f$drAX?y{|9ń} -N}:9,C:lyyD59zH,,*ӣcMS@@(bi@YJ СC5kքv˽{tW 5Q41YX1t|pދɮ̛{Xw{vX#N9 T՘S]U21|IsaO;g&|M; pǷ狐Dj&_K?b ]cql[DMOW2v_vUin. -._%n]gF¶A@ KG&FFFA@TTo䆥%|r#$?,RgiHLLF(ĝ]g doz :fLfĔr-叏B^N&R#z;w;vM'jmܸ1ٳyg2vXD FP($*sU!?LM=H#`2JL&CbI޿utSv25޹7mX3[Phndkn,4ἒgeԂRAԮݩSǓ'_r24b%e[7R67V(߫* k`b qun+兜ƞBA?s4+Rni>6eb2eĭmGgWEqޮy!+?~|LLYl#u yf rqW^eLNNB#d|D`̙SRzԨQ|D!$ȪUp?RNI""MlmmW둑(*}EzgJ)0/n*(Vq#B@ %ݷ f3?,G`O_EGO.fYe\l6%E~PS 8gFyFRZc*m08R2%0lst"r ^1Y#^y}jJZlk@*K42/E-tbNՄDwt^_G/^=l0()Sŋ ,?< EB)ݻwCwYT:`H$=z(27n\reԫWTR 7! b4 7e/I(>|}<'Or^.TlҤIYEa?L`RR-U0Q*E?}}BOEUҶxm`ض=rL? Pg['Z#4`TZ4dGXH%\S ĉ yO'_ѵo[!֕w g]x48qoI\vmC…*<쬌%¹6Au*URZұYY7.V;?Ϙ1#==Yfs݃d<&Mҝ]c^paѢEZvĉ"A^C+5kִjՊ ʕ)Xd4S믿1/׏E$;GǛ&y`.IN(>P2I%Dq -{`0$xDD-_>Ԭ3b&0񞒿.)Q9LIMk '{?tL`TVx3K<ݻnewB0Lss.V-MǾ O/J]ơPx0];S#Td>5(]|3E& !'}e(HeB*vbYdsˣD\V&_ߋ|nh;vJ,ׇ2"r|9bdd5gg͛7ϝ;W^ (z'9\r#Gޢh5k6lؐHA,;v쀨g pĉdv"Cvp'Ok̙3ӥKF rE:!32,œ$AS 08X9lPP\+W:#.۷ԩb 'g$[_q} SkF2: 7ݲe%(ҝ([l*U [ M6TRNNN[n2GL&:u*}ŋu.Nnmjjz…kGux."oJ$1L1J} ʖpbL%cn(<}2D cd3So&X '1x//o GQрf,G@ ҹӧO >̞bjf/TR $z C=b͍u4i RI1ţq328^A݋w3"@XIU1\Bǜ9sdiҿS(yըа> w'=6zyе鄅kf4S_re͚5ܿhѢkBڵkKA| \7>y޴&&&{쁬(Owq/Cyf 8͓'OBaʊK\^;NPU&uAr_"Mj! 0Tv߇:V(+3ز8lCGʒ%N6 Sթ6۱;)ih‡v\$=ɳ+\}QLCqa@Lj#*UAAΥcܪ0]!͉SOk6!ᝣcffL*DO 8Yl=cV! Zi ĮJ6igD_7ce1hN jBNoϳ i"qD4@RjMŶ*Sg,y}dCAa`5aqTJ۶q i5Wb'gh.502_DscN:gok3}McMBYξ] 8@􏋝INNa %x%ThuD|j%eۙ.k&3?^4A tHtEeRofRyDO3wOɼw_F߻X>gK>+z1C m"1 F[saRN }W._ige+S+CZ>rdZ8~r:_KA `Y :bP3&'fnObC ⇀ )q퀏Dy$[O){w?}߁md0#.0K8p03*rff8pg >LUɥKOp"?/f5x&LիJHe _gpM6D8`==J 6;xhԻdTy em͂o3IC \PaJw&HxR;::ծS{޽JMu*}@*u7r{ kUVD}M՚f^Q:?])SqҤI z̓ 0`(BP$u%̽ Ax)[g̬_`HRȮd"$r(0}azG+T9]RB*^a%\y%q+s IƗ8|J鞖_K!AOgJReo?k[Tl/_ٳ^DE>="QMy] 1Nr+k*bRЬ@ ?$M~3 <3o#&&iӦǏXbr99;v?֚p͜ɮ€!َ% R3r%!CLR8s=9@dj 9sT|}L(p/fv嚆&c|G(f.E{ =SۚPg*K].88d&L8IM;$Xש|ףBq(Q`%*0yk5*4);w.%%eرǎ۵kצM1jԨf͚ >58;;{yy-_[y@0l>P DDŽ]BfI[CBzUW_G vR<[PVJg$ȫy%T/tydtBEgΜ}*`\ztTAjz ^Y WD"I\&P%{}Ya>g1͂dfGgd`7+EcӽSo(>MSl8疒j2Kl(~M鴿oWTPP|\gkƍ]vma̘1o߲ZN!! J\1KCƗ ZߧfQ"hy=zt?sZ?ye&ifВ+¢QagŭTqy$ !>YfPPP@@\x1B8sm.r@֪G)=Yy|j~*[$ε!T^xPU֙E&d t'3َ5ٔm+6M6 ;'s>IQ_[_O>ihj9Xf9 gSN8\No<ӹ>LIL#A)H䉌`5HiW *^Xe#~lD*%" 7bڻm2ؗ, mw8|>QWBo޼9000圙ہ. MIĭ;El7FE=z{ %*gMowrr(mCƯGW_lSܘ /$͘ mj=Uye9'c06&ŋǏ?w $xruXH$[+2׌ O I|6"45ssɁg.[hD@Rے.&'=[5RʄVʼn]m|Ϩ-'*Ny\`7lkN+ƀ^ٵX&=y)97OTNm4r7nԮͶ8h*E\kT\MLӄkI+U6*Vzz:⪚_u-u=ZP"_S*S3u'*' `l2W0GժUulܺu >ԩÙE ܥ,rp|u(W=w@P+ئI9\w\2Œ [K5M HBr/ C`]9G/ 4J}'{%,ڃ^(Z@px"8|BX,`׹?xͼpĬRqr獳]ZD)DiKbWLRP622ܺuy(r֡C9dff6j(66*}u„ 7o~1*4_p4)JwuuUՆzyuR&υc?VE Yu\秓C_"Yu>VD 'TY6č2q,.X℣1,RiF@XIa/IkUlY+Q!@LL̩S@4m^M+D*TЩS5kp-ժU;w\-xWхGݺQA0B ^WaЎ2?>"op$ak 11koӦDb$Yh'QHu2F+>ί󅈊;iTej_}B'}+fDaD׬>+_f@/F߻wo_xQV-$MgѢEaBQF (AjժH"֭[mhhغukEL<| S',YJH'MUsN4eQa)W )7pq;^rqƼKSy )_iу{mZ~L(B0_uM1~x Μ=cǏn )ntRu^Cu4"890,~=#1*ectf BȌVe$jw8Tp"uCO10aTIp: K$I?xRSS]]]u}vsss$Mr%,, .! 5C\͛7ׯ4hP&-[,QAnfΜ)}R իWÙN2%[B(Yfm۶ƦC&]v%K V^g? $MrIq.Bبj տtÂVXkx@9O޽CBBL ?Q@QԹmhŴ궥\Io JO&HPKr+^OC īȆSlTx@f*ƽzQ|\F/\$dYdr%(2RE K -pz /RA$Y:İm1}ر.]@TRk~  ]I^xPn9p̙ۨQ[nYXXi( T "sΞfbU*֭[MLZE\\܀{իpsk֬ B_~FFF*TxgI.2>qTysٗVD+)l3a#p-^@3gδmۖ70? &-ٻ:{\Rzw.](Rɽ+.ǭi Ś3-}( {VJyvljM8㴌"%:?y:{p @\+g2p! acxWiACCC'L`(28NShpww/⽬(5ٻw۴iN֐u֍KVY|sR$Iɓ~{vځ ?TrҤI${9x r (ܙO¾޹u `Ń+ .tu %nT9A~C}1D#7tvN3?[.Zej 6݇!ղry{G8P8Z!AD(+#=xQCﴺDdUz0\Y]fnoˈɦK@HC6.Dزe߰a@-Z(==G$͙3Sdgeeծ]%E|ށ * . ^:gB@PP|RaÆΝ;՚ _ }&e:&*γܧ<5wS"qtfJ]2| !n_ң6'4C?#bdR&Sq/9qZzH"V,tA m rǠAd[ H(hގ?36彅x/_>###$$tٳSr 0jɳgFѨQ#D~WW#G:½4j Te``͛7/^XRݻw;6..֭[ .+ɾ}@q2  >ʖ- 7kڴi3f8p7g޽P"3ZlA9s &נTЇj ?WIͅ0V" v\/bҥ74s=E~4.NJd$_{*6ġCњ jB. ߵfʔ+Vf.˻ xyyUV+맦^޾\(Jᅫݻw<-ϟ?r\.SNƍX\oB<`eeջwoX8 &8~[ bݹ o߾%w@oqgTv&Mɬ"pw24A ;z%N'0jTW>4.XRE s*!\@Sd\W[aiYW'?L$/(f%TTȺD`!J rM \oW9Pc[ZF\@ $Mo xlZME@qzUѶQn_U/+DHGbs!;FQ̗MAcetu^"sLFH&#:$>@ ?$M瘝^Ĝ-)"Zۜj_s`3\,.=f$ :V.kQ*C1" AJNL1n1U N x}hRJsQCA7&­3@^ 9sp7}JsgCNmU) ?npjeaP\N,ߤǓIjqhx5M Mm-:dȯ)TqØ󞬰͈9. W}™@ Br$yf"$$Z_Dl17Ь߸Pq7 gHbƖ#s`jh7}=ޝEQߙBDBDD -"/L+M,HoXf^半 ("Hxp(3v+ * xޯ^ݙl{ [s| ,eΣkٰ\7;LݙYO {GDvmwxɳ\yEKqAn?`HGYXVzM 6& B {=A`ttfA|`VͣGeC-H7r,EӊaFV5[u) 4PJ=3gFbeEw u끟4=$W,5O/ o7=i*\4!ЛNaWtJs#4M4O۶=hK(CFc5N9I>2HL$aJsP}%I ߳NְqN2fDg-gi$34%rx0n!iB7 B=f/]d/EYE6 ˺ b˸X>XtH E70/$qjnPY𢋄 Ϟs)׽w+_lnNIHGFJR*ٸ8.iJ*GN)Ns,cUy}g.f5Ͱ\vMv*ȈȾIV!XrtM1)aHq넟a>=qP5%!hu\5ޑlB@S;X_:[1vjR@29!?xNɓX1K1BH[:N˘n$vhD{05WJYԌݓs.+/ oCaG҄B)&cc.=~v l3]fi {: F67?Le[)JSp~>T\W6x0fWMY kPc&2z SB*ytw!RhjN"J:a\q*2MW+ L|{٭Jk1s o ,8sڙ"oyQl:h5:SuZbZI(K(@" \2)d9oW5:K5(;)))[jYѐTϜ9ķ Ş?ZKGꯃS5,H˲G vlyY;yRÉ?CGM F$d-d70u;)j5loo?qD=W^K.UP666o+,lRNΝ;&Xn۶gϞ=j~Ɂ7HWq,-Z%O_'Ovvv5-n߾u֎;^֬YӸJ,[Kf̙Rt֭~ׯP/cto%WqR.0}1v K$%Jʺr,lĜKSgS5#ξ6bURV]*iZZU˫y.gϞOϟNfӦMχqK,2o͚5N:M:52242GPPPtt4i*ŋg\bqd Eǎ322 !d̛7t<bժUP ҸqV ?8JW{Me.ͩ壆/Йf"*&)A {FD2*)07k:![ق)`sqRɥߥ빑jqF]æD?XHAZNOO9, amV, >w0IVZ&MO){&dtymB#3P2m<.5%.|fq{jivAK+S߭Dᣙ~Cҽ;tK֯_" @4 2 0kժh"*-]N:ƻ ကoۢiVIIIeQ*...Æ #M4-BIҥK={?B5HS9e*b:fbe]EȮQ(/U]!JCʁ %|Ԉ~(WO$J|VaGc(&Ͳ#GiݤW~P(ݻw&MvڰaCtx[ĉ5jԥKmڴ\·â ~{͚5[xqs-[S 98H0}[zG7Ž]Ip1n| 'f{N bX1*G/9r̿iN)cQP/G}yT.ɉDBq0׮];ج8p lE!X`  jn7W\ɔ|?dTJh`@e6-{TZlRfch7QMBBmr`-ZM̖Eq>]YcT0x!cIigMb?U.>{ox.H|;v>|xzz3H@o>kk={C푑E`#`۶mŰwA|9rL:tSޢ}uC{*T^ H#@ٰC<IE5gVjg -}'A'N=GDf-D?o΢I+z b *ݺu+hI&jǤnݺv#H{˗/',n:h ]& QlQf[!%%eӦM?VX3|ǤnPfcUh2]7G33J{y-<G=k]20LHi y&߰%WKyĽ+f[n&]B, )00tׯ_`˗CCCC ~ƒ^b1 $22EQ'O4kMΙ3Ʒl6ÜCmŋJr۶m3g䫼2HUE" 1N^j D]eEBJ_M@X!{mlƁY[w_1wu PA? ߼iGzUpƷ[b-vZ[֭]%0Arlܸ1666?O^fE=~6HbbWWֵ4M{yyãڶmۧOR1YfWE L0رcΤ} n^{5tPeׯdKKK7omͿZ6ڴis,D0# @{@@AM$ 5[n={w޽o߾| ~zPGe6Vz_c3OzV$r{hrnŬQO<`ٵuCsh/iz9]Æ 0`!Xtu֭ԁޢE H0L.|gH2TtcfԸO_K~ .k?ĺpLTӲOHDF믿L h;;;-_-MLLڵk8R6V<Xo}ץ1eƌ]c:r9| JZmܸ 7NNNn׮]ym۶b1ʠ ǎǏ8pر2N]kGR+p0#n)UCv%ηrtaB ?pb&5CR|1]|Nk8[VD : ?ˋ^W-*KYX ۮIթS'PxR GGG`q+Nx sbpsP1 Os.?nMOF9}W@ s`44ů_f̙3ϟ?_ֱt)rOz)"DR~J֤RYR BnWG8psӆf.j+,8\P: K%U!1Ct_s{C{[nUQ"]\eAeB/!zzϞ hF uVj! =uM #B]~G!]Fb0sD*eA/́=~%҃4qRAiӦZT0 ?K{O Hň.//Ӳ-Z޹s' LFÆ Q +1_~J&mMkҷD^1N#6s BU"&撓HPH!Peh/62)*ɤTBU<UkS ?2H"ͥ+!B ;?eдlV"$x@U{Bh=} p\BU w_=cIL="uB BiOSVԍ?&G*!*A% 0T 6m4aR>=['PR)i*vtL*!*Fg__pR=SL+ws6lX~Hݠm۶g&jB"6vJZO&H!PehbvyRI޽{k֬ILL$jرc?1cƐ&۷/]4**4UEG5[6Lv|!PU`ϟO=x <<<88ڵk ԪUojY<?))7n4h H2@wȰw)X[[dff߿?,,ÇtKKnܸg3gh4;wҼH@ `Æ |ז-[[nBmѢ˲|o<==!0` sҾ}{JwX)NNNv;vF% ̏sssꫯH#ԫ'BU&sM\cʕ/_ ۾};5tPXy3&..nƍ09 /_ΝG,i 2x1tA aoom7 ~D ¼AY CF?z(tyxxS{a =g:s 74|RG!kR1D E@@==={1rHhjXvƍ}} ~Dl277cvQV8888;;C -d={Ν;^^^ɧO 0g 80 eL#@RW~24 BU &n` QV|j( L<ɓNjEݺu ^9?jNNS;ֻwڵkO|[Y=b666yyyBU &gxMNNEs>2339[ɉ>V=uTQx>2;w;wjDDD@@Lpԩ/se->xPMۓ B!T50TLuݺu'NB~~>⇸ QKk.\ޮK]h:bĈN:y{{&~GKLL _5oޜpx-:4ßb'55U(lܸiӦX8%&&>yo)R~}xz*_53#D!9eKv%081 +V`njW(YYYСCmٲK:_5KROOOssB{nn.CHrww2h(///&O*F8A!PT EQ~~~nnnkܹ3~4K"XZZsٲeÆ j` 3foߞ44hdBxT||| [dr.]ǟ)R8h4#G\~}Uza`>uV|@K.y.><&8n8. !x?D@ԩC_~9sk^B&8uքӧ&3fl߾s B7 z9,,?HNNڵkO!PU:!2!!B& B!LFB&!2!MB!dB0 BȄ`4A! hB!!B& B!LFB&!2!x?dBBBrssrv\]]K8>=zt֭˗//\"25פbT෫;&uQQQӧO?{,qԹsHK={6RW\|rRG!db0TLB=mذ!00Ç=z4%%eܸq^B:D-[oY^JBfX>4 Nw9XNKΧ\cΌ3TZw>Dž%kD{M:J-s2Zil( F+'e90 ]@ bq>TI,ޜJg/fpbF`-#`!>V {feXJ$drRȿ;66i;;;hrJbbǓ7nq\jjT*577^Z} F),,ttt4LIC5&&ĉ0Xڵ »dff«VQթS m9---44̌o|KF9s&$$B*UM5He'){v2҇ C =e] !%izт>=B`|+b:I{`GZ6.2PuXY|Lw=F_Wy+* *$uݥ5CKb: o1hgw}wiw~4X%o\ٽzVG"WfGWae(t Xd˖-00v Ƀo^^^2vXO?]nߵjժiӦ(.]@ u ӧO ,zw|ƍ'O|Gǎiڴ)D%~H*{NII·{qqqHB!P͚|=O k v`.3K09$hohCÐ?uBR_VHjʨeFȯ3[Ca?|Z`M7)Lc.4đn1fGsdⲸgH"okWvn#r+`6-'kA9Üt+(&+O>uX}ڵkv'Oѣ-ZeswĈϟ>}||bbb:w_~_r%-[7>ƍ!,Ya2y-ϥK+x?c̘1P^>2s ܹ޽{I!ɨѤ-Gʁ*tV_vTV@j7`S0~\/ UP׊@d ,ųQ魾^EPRw&(a*LUb'nB?Oi*gc~tXb!Dܼx{yM6032$ kkkCg`Yfǒ͛gkk Ba$$$xxxecBf;))BjM\~#Dmj3OVeǼ :Y0CJ(u,(+P` oGaoJ݊٫Ov^ޮ~$) )Eh^L]?\.߲eK6lk.\!!!M4 8㖫K. i3f̀r =AJ,B20 2:I&W

Sl&_<"}[xN7Qۭ.H2CC)w 3 Gg O"k7liii6mILLx"+Grr!Cݻsθs璎WeeeEtBBLdd9s>3 JYтBȤ`4)?8iGn#^Efgp++~ \?clSW-&%+,פL ٜ՟~U}. 威?z1ev) HǏhٲ%]q%ʻ|Znݺ]\\^e^hŊBWeee-\?:\]]!됊+H!hRZu? ?(}Z:_U>g@s{O+GQU^"Fpvae]Ne}ڈP.#MzH.}_2j֬٥Kc+ kk:>Å0|tuP}bqxx84lIӦM!ԩSnݺ Lo>.Ys10111^RTTTzzz~H!ɨ5) Ԝ˞n.9cN>w7_X=xEma~~u=.E&m+alͨNuNP܎'jCSzŊxU:82sKTMm>/7ܭK&W+Ph;3 & өk?me%]x[)ڵkW^( VAAAH@%;;͛4MU ޽ ݺu[zJP^=h{acc05߿???~M6͚5P޺uStt4#]]][jޝGx >}`037o~/.}0/B |Y[[q,Xy),88xȐ!gΜ) y%66 hބ%K\~TJZzu:u+ww;wh^Մ 9r…{waş~)߂BȤ`4Ao_MLL/]5k֬J٥jG=eʔ'رB>y!LFBB!B& B!LFB&!2'~`IENDB`parameters/man/model_parameters.zcpglm.Rd0000644000175000017500000001716214135322113020431 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cplm.R \name{model_parameters.zcpglm} \alias{model_parameters.zcpglm} \title{Parameters from Zero-Inflated Models} \usage{ \method{model_parameters}{zcpglm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ... ) } \arguments{ \item{model}{A model with zero-inflation component.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.stanreg]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (\pkg{betareg}), \code{"scale"} (\pkg{ordinal}), \code{"extra"} (\pkg{glmx}), \code{"marginal"} (\pkg{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[effectsize:standardize_parameters]{effectsize::standardize_parameters()}}. \strong{Important:} \itemize{ \item The \code{"refit"} method does \emph{not} standardized categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be returned. \item Robust estimation (i.e. \code{robust=TRUE}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{robust}{Logical, if \code{TRUE}, robust standard errors are calculated (if possible), and confidence intervals and p-values are based on these robust standard errors. Additional arguments like \code{vcov_estimation} or \code{vcov_type} are passed down to other methods, see \code{\link[=standard_error_robust]{standard_error_robust()}} for details and \href{https://easystats.github.io/parameters/articles/model_parameters_robust.html}{this vignette} for working examples.} \item{p_adjust}{Character vector, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"} and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \pkg{emmeans}).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}, and arguments like \code{ci_method} are passed down to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from zero-inflated models (from packages like \pkg{pscl}, \pkg{cplm} or \pkg{countreg}). } \examples{ library(parameters) if (require("pscl")) { data("bioChemists") model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/p_value_betwithin.Rd0000644000175000017500000000634014140570270017324 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_betwithin.R, R/dof_betwithin.R, % R/p_value_betwithin.R \name{ci_betwithin} \alias{ci_betwithin} \alias{dof_betwithin} \alias{p_value_betwithin} \title{Between-within approximation for SEs, CIs and p-values} \usage{ ci_betwithin(model, ci = 0.95, robust = FALSE, ...) dof_betwithin(model) p_value_betwithin(model, dof = NULL, robust = FALSE, ...) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{robust}{Logical, if \code{TRUE}, computes confidence intervals (or p-values) based on robust standard errors. See \code{\link[=standard_error_robust]{standard_error_robust()}}.} \item{...}{Arguments passed down to \code{\link[=standard_error_robust]{standard_error_robust()}} when confidence intervals or p-values based on robust standard errors should be computed.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "between-within" heuristic. } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics (see \cite{Li and Redden 2015}). The \emph{Between-within} denominator degrees of freedom approximation is recommended in particular for (generalized) linear mixed models with repeated measurements (longitudinal design). \code{dof_betwithin()} implements a heuristic based on the between-within approach. \strong{Note} that this implementation does not return exactly the same results as shown in \cite{Li and Redden 2015}, but similar. } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{between-within} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_betwithin()} returns different degrees of freedom for within-cluster and between-cluster effects. } } \examples{ \donttest{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) dof_betwithin(model) p_value_betwithin(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{dof_betwithin()} is a small helper-function to calculate approximated degrees of freedom of model parameters, based on the "between-within" heuristic. } parameters/man/check_factorstructure.Rd0000644000175000017500000000167514100573643020221 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_factorstructure.R \name{check_factorstructure} \alias{check_factorstructure} \title{Check suitability of data for Factor Analysis (FA)} \usage{ check_factorstructure(x, ...) } \arguments{ \item{x}{A dataframe.} \item{...}{Arguments passed to or from other methods.} } \value{ A list of lists of indices related to sphericity and KMO. } \description{ This checks whether the data is appropriate for Factor Analysis (FA) by running the \link[=check_sphericity_bartlett]{Bartlett's Test of Sphericity} and the \link[=check_kmo]{Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA)}. } \examples{ library(parameters) check_factorstructure(mtcars) } \seealso{ \code{\link[=check_kmo]{check_kmo()}}, \code{\link[=check_sphericity_bartlett]{check_sphericity_bartlett()}} and \code{\link[=check_clusterstructure]{check_clusterstructure()}}. } parameters/man/dot-n_factors_sescree.Rd0000644000175000017500000000061513636467450020104 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_sescree} \alias{.n_factors_sescree} \title{Standard Error Scree and Coefficient of Determination Procedures} \usage{ .n_factors_sescree(eigen_values = NULL, model = "factors") } \description{ Standard Error Scree and Coefficient of Determination Procedures } \keyword{internal} parameters/man/model_parameters.aov.Rd0000644000175000017500000001674314133472610017734 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_aov.R \name{model_parameters.aov} \alias{model_parameters.aov} \title{Parameters from ANOVAs} \usage{ \method{model_parameters}{aov}( model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, df_error = NULL, type = NULL, ci = NULL, alternative = NULL, test = NULL, power = FALSE, keep = NULL, drop = NULL, parameters = keep, table_wide = FALSE, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{\link[=aov]{aov()}}, \code{\link[=anova]{anova()}}, \code{aovlist}, \code{Gam}, \code{\link[=manova]{manova()}}, \code{Anova.mlm}, \code{afex_aov} or \code{maov}.} \item{omega_squared}{Compute omega squared as index of effect size. Can be \code{"partial"} (the default, adjusted for effect size) or \code{"raw"}.} \item{eta_squared}{Compute eta squared as index of effect size. Can be \code{"partial"} (the default, adjusted for effect size), \code{"raw"} or \code{"adjusted"} (the latter option only for ANOVA-tables from mixed models).} \item{epsilon_squared}{Compute epsilon squared as index of effect size. Can be \code{"partial"} (the default, adjusted for effect size) or \code{"raw"}.} \item{df_error}{Denominator degrees of freedom (or degrees of freedom of the error estimate, i.e., the residuals). This is used to compute effect sizes for ANOVA-tables from mixed models. See 'Examples'. (Ignored for \code{afex_aov}.)} \item{type}{Numeric, type of sums of squares. May be 1, 2 or 3. If 2 or 3, ANOVA-tables using \code{car::Anova()} will be returned. (Ignored for \code{afex_aov}.)} \item{ci}{Confidence Interval (CI) level for effect sizes \code{omega_squared}, \code{eta_squared} etc. The default, \code{NULL}, will compute no confidence intervals. \code{ci} should be a scalar between 0 and 1.} \item{alternative}{A character string specifying the alternative hypothesis; Controls the type of CI returned: \code{"two.sided"} (default, two-sided CI), \code{"greater"} or \code{"less"} (one-sided CI). Partial matching is allowed (e.g., \code{"g"}, \code{"l"}, \code{"two"}...). See section \emph{One-Sided CIs} in the \href{https://easystats.github.io/effectsize/}{effectsize_CIs vignette}.} \item{test}{String, indicating the type of test for \code{Anova.mlm} to be returned. If \code{"multivariate"} (or \code{NULL}), returns the summary of the multivariate test (that is also given by the \code{print}-method). If \code{test = "univariate"}, returns the summary of the univariate test.} \item{power}{Logical, if \code{TRUE}, adds a column with power for each parameter.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{parameters}{Deprecated, alias for \code{keep}.} \item{table_wide}{Logical that decides whether the ANOVA table should be in wide format, i.e. should the numerator and denominator degrees of freedom be in the same row. Default: \code{FALSE}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from ANOVAs } \note{ For ANOVA-tables from mixed models (i.e. \code{anova(lmer())}), only partial or adjusted effect sizes can be computed. Note that type 3 ANOVAs with interactions involved only give sensible and informative results when covariates are mean-centred and factors are coded with orthogonal contrasts (such as those produced by \code{contr.sum}, \code{contr.poly}, or \code{contr.helmert}, but \emph{not} by the default \code{contr.treatment}). } \examples{ if (requireNamespace("effectsize", quietly = TRUE)) { df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") model <- aov(Sepal.Length ~ Sepal.Big, data = df) model_parameters( model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = "partial" ) model_parameters( model, omega_squared = "partial", eta_squared = "partial", ci = .9 ) model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) model_parameters(model) model_parameters( model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = "partial" ) model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) model_parameters(model) \dontrun{ if (require("lme4")) { mm <- lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), data = df ) model <- anova(mm) # simple parameters table model_parameters(model) # parameters table including effect sizes model_parameters( model, eta_squared = "partial", ci = .9, df_error = dof_satterthwaite(mm)[2:3] ) } } } } parameters/man/p_value.zcpglm.Rd0000644000175000017500000000325114160324505016540 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cplm.R, R/methods_pscl.R \name{p_value.zcpglm} \alias{p_value.zcpglm} \alias{p_value.zeroinfl} \title{p-values for Models with Zero-Inflation} \usage{ \method{p_value}{zcpglm}(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) \method{p_value}{zeroinfl}( model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A statistical model.} \item{component}{Model component for which parameters should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{...}{Arguments passed down to \code{standard_error_robust()} when confidence intervals or p-values based on robust standard errors should be computed. Only available for models where \code{method = "robust"} is supported.} \item{method}{If \code{"robust"}, and if model is supported by the \pkg{sandwich} or \pkg{clubSandwich} packages, computes p-values based on robust covariance matrix estimation.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame with at least two columns: the parameter names and the p-values. Depending on the model, may also include columns for model components etc. } \description{ This function attempts to return, or compute, p-values of hurdle and zero-inflated models. } \examples{ if (require("pscl", quietly = TRUE)) { data("bioChemists") model <- zeroinfl(art ~ fem + mar + kid5 | kid5 + phd, data = bioChemists) p_value(model) p_value(model, component = "zi") } } parameters/man/get_scores.Rd0000644000175000017500000000332514077615701015761 0ustar nileshnilesh% Generated by roxygen2: do not edit by hand % Please edit documentation in R/get_scores.R \name{get_scores} \alias{get_scores} \title{Get Scores from Principal Component Analysis (PCA)} \usage{ get_scores(x, n_items = NULL) } \arguments{ \item{x}{An object returned by \code{\link[=principal_components]{principal_components()}}.} \item{n_items}{Number of required (i.e. non-missing) items to build the sum score. If \code{NULL}, the value is chosen to match half of the number of columns in a data frame.} } \value{ A data frame with subscales, which are average sum scores for all items from each component. } \description{ \code{get_scores()} takes \code{n_items} amount of items that load the most (either by loading cutoff or number) on a component, and then computes their average. } \details{ \code{get_scores()} takes the results from \code{\link[=principal_components]{principal_components()}} and extracts the variables for each component found by the PCA. Then, for each of these "subscales", row means are calculated (which equals adding up the single items and dividing by the number of items). This results in a sum score for each component from the PCA, which is on the same scale as the original, single items that were used to compute the PCA. } \examples{ if (require("psych")) { pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") # PCA extracted two components pca # assignment of items to each component closest_component(pca) # now we want to have sum scores for each component get_scores(pca) # compare to manually computed sum score for 2nd component, which # consists of items "hp" and "qsec" (mtcars$hp + mtcars$qsec) / 2 } } parameters/vignettes/0000755000175000017500000000000014167546375014601 5ustar nileshnileshparameters/vignettes/overview_of_vignettes.Rmd0000644000175000017500000000367714136174105021664 0ustar nileshnilesh--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/parameters/](https://easystats.github.io/parameters/). ## Function Overview * [Function Reference](https://easystats.github.io/parameters/reference/index.html) ## Description of Parameters * [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) * [Standardized Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_standardized.html) * [Robust Estimation of Standard Errors, Confidence Intervals, and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) * [Model Parameters for Multiply Imputed Repeated Analyses](https://easystats.github.io/parameters/articles/model_parameters_mice.html) ## Formatting and Printing * [Formatting Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) * [Printing Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_print.html) ## Dimension Reduction and Clustering * [Feature Reduction (PCA, cMDS, ICA...)](https://easystats.github.io/parameters/articles/parameters_reduction.html) * [Structural Models (EFA, CFA, SEM...)](https://easystats.github.io/parameters/articles/efa_cfa.html) * [Selection of Model Parameters](https://easystats.github.io/parameters/articles/parameters_selection.html) * [Clustering with easystats](https://easystats.github.io/parameters/articles/clustering.html) parameters/build/0000755000175000017500000000000014167546374013667 5ustar nileshnileshparameters/build/vignette.rds0000644000175000017500000000033114167546374016223 0ustar nileshnileshb```b`adb`b2 1# '/K-*L-O/LK-)I- MAS(USH i%9h*q t0XD90!icKŰ% 5/$~hZ8S+`zP԰Aհe ,s\ܠL t7`~΢r=xA$Gs=ʕXVr7T|?parameters/build/partial.rdb0000644000175000017500000006245614167546370016025 0ustar nileshnilesh |YzA `x_CМF I$HĀv8PjѬג-;%'d9Nla;$Vɒ;i9wr'ؒY wtw~I$mm$iO$v~~o@L; _؄@xT!Řo4)a>(Nʎg,yg%?p78{^QqJ|\ƲSlvb8= Dxer^Yka7neNӳz>׳2aȇ`FL_1ۗb}%h% r8DY(TzYs(\ gXlϡgOJT7p?ʼz^7 3 ϋ3؄-"C3Jγ8&A>L \9k>+zfhȷ\%@>.0K+cJWdn0LV|Dd6);ZX&Q Ve"!f;\VC}<{Wz~t3{L]S'5bu|~ȕY`\b^1V,b1dSst_f pJf <7=nel:An Cr].E5f^ocml3P /B6aҿ,.R!7]#љ G hmI6 Y}-MALZ/G!s(vBY@騺*%>rtu5Z s777_i2U\ѕ Yzal(xh  8 yV[\hOpEI{A'8#Y*|/^sL tOB>H=gw QM9mM23hi/Aw(ڜ#$oA;ݔMN7ee8 Yj :dnFh{Fn!K7"+F$\$< ly8" <Ј7vS ;λ^Rb*vMS3X!*M Z\ބ|S{+5"OC~5H7׈82m,p 6ːs~WU|b yYcc&&&} %!xZM"J LLSdFyx7{d;jMҜ$\]@+k' fS~|ђ|tBV7dw1% GϲVmg6䷛nRrI; 붇^Da8YlT;Q=ZIiJp!Q` u"wF B:/VEE+ .'r4 ȟ,42pR،]v5(L)α:U)+ <ͨڗbR @V>V˅Lp2UYQz .v鵴p7?_٬n# ˇ럩DuYXi6剻%xvchJdveNs7k@JG?j˂D YʙQhW (vr:UιhO@|2@ܩS30K!ȫpQ`v|!հ~x:Y % 3^r4(tsBXA~wH9ңřK/͒_֭uO\˅fr.KGF*Zfzi '{^Щ8MGW/?0N1~.vHd֜^%Dx15R9];e43YѪM4s 8 yT]3i6'|a Q|Rl43 Y;AXD[XJY}r%1e#L+5O<)S@NnPRG#GԽ QW!fa\,5+R8j#(vArdnR %f5 eSsITLLQr'FE7Q.B^Tҙ5]9Xjv7opvAjGOi9fW% =&Toꪣue*tm|z-׭~U|4ie5wu1A2Px&fzW_x'**Sr 5/F B>M){e5ŏK[PrCv:K쥕QZ gxED2 |P${wmX K Gc-4Q:<| c&D[F? NARϞ \-Vc:QRifa1{5)Q\S0d34׾% wYi4J &@zArCԡ'ZYe/Rgf9: 4ExTa/p48ǡ63Y.RMdŞiyhh8Nnp};4Bjˢ'9ģ){+jH;\lJEpU"D&>jYuOr⸖9nEAne~yO|0qITnեO9-}&3Q,d=^yn/Eȋڊart2%X=8кP}6oPmW(чg>I)N+G@_y:1QParRҪ.J,D#: Q| %@ .eb˽K,_;b#5sȟm3)dL6 `,U"y?pVl7ENAVmҟ@ꥣw,L[f.u<)䧭,=)eMVSP2^6K+ʌNg+aat[#|1@>|u]%{xp%j( Uެ[V, (1<S)R-xpFM.$ =7v!KkrS-(Y]rENhm~<.B^lB߅Bz~$!D._HT⬿r![Gױ Qz k&LCNk% :8>cN1G(A(QeL D:&䛭)1-ȷ& SAn^NԪV2Gn0GD)-sgB" /jS]i-Qr;3r^!Z,{ڭv(/ CЖ}ut5K|F 7v LA# GM%v'#Go=a\4'U1uS # Qma>|U,}%lʡK_WV.O]9cccKg޳mK~ G?N/GGC7CC1zxfiabjQL&ۀ 2$Q)e#xy'U:]+\d=򈏱2gOغA]Z}_t@6+]tDնRYoT(].+%xi~8MT"ivZukefNpֿt*qlm%m~z>շE&۱l9'jn拝pߺ{)j̺ 㭘l[&"0|L@`wZTqTW cܛ(y+33$<@,8ģpwlDʏU˩ CD҅"҉-~C#g2#e>U_-%[pK&٧$ XiȧIb\1wN. S[B3$ I;,<R.Rl?G$2:#B`O@>LjAR/~ayb8ut BL]N"զOkK=}t@PsFȘv.sd@>LKRŎ.B6#_̙O6g}tuqsTgb(Y,c>Y!N1wG#*jlhXvQrW-gWdPl4*=>'98|\Gyfk9Eh $av90i-yŹ0zUG s+OA+vī !+SiLHA7Zz 81)%IBgG&ޡ~wdյl3dԚ"FJ z!?Nd=jTY;/QH;Q"w-S?]jZyqrĽJ pK^o}}|53 >_rܟծ[;˫Zdr/ers lrBv7.\¿XPS{/zڻW`l/gW $j\vL9vmc -/}u-;9hZ./3v>ox;9X6啼].e6o ]k6u`2(5- +;Y } # +g~ibZ5ۈ~94̼aݤQoR{3ՕDZmOR뛦6o:a7Fszb^ZGs>۰6}J-C3/Bw=awdx#evҹ:]ci D|;mǽYKGŚjOcIBk:شmŶ)\v!eXUY&eerŦ}| TCy*Fq-4AIպmm_`4 k͘3YpǏwO;)A.@^Вe71Q:I !{𠄟F:zKs⻵7wBZ9l g.!BėQ B!mUh⓹G+q/nq;zyԔCOXQŋ l}rL"|+Cj rI> &O<__ sBns斲nrS.`tMУK?nV"qխ .}Yt+ٯ6ڬ`Z.}^1SfF"r | =l/;gͿ0hjh=]FCދgt[/(Sm,eC<.Ŝi8f`Y;#d- f,SojͯkAA8yJ9/EJOsG yaCȍ;Z]P HЈ:d78f1|@YdOd;(zq5 M6wB*T+ FlFob(ѤXP3(NL؇NDn,lfYhV_P;1Zhp;Jz; @f{o!6]xI9Ex_7p!3^6b+Yd; j )Tn坫]b]4Sbs4tlf(}Z6Ei {Eˆ45˃*8\Ӌ>-Vby`zh? m+<Ϻ>dl= M la;BdzpEɷqGUL:Dn eV0%C>Eȋop)w9j|[݁+ȍ}DԨt߼fe -V^ <hNTC>Ɲ; ~|7vhۥTOi3]>X\`(dR]r]ǂTJIHP0(;R}S}d@8: GMp(G# ]we3)h~<&\Ɉ|C<é%J6b OXJOipj4ߙNZ-K'] OGYp/ :xTۊHA~Ej0p򬖦-G?j~SL]zV_"ٗf!S59h^YR6RhY˹,[1C4+ڢq4qn> J0(hD ؤ.k zWO44HuKOC>|Ϧ|vC3h} ilRY"8|QC6ɡ=Mlv ( J*DX6ZUrZGV54L5lm] ֪OcJ}l,,ǵ2>@yY5s0Ռ~`(L}{8ģ١~7r9ؿ2M)̹t= 4y䔝/%Rڴh6[XkŠ f݌W}@ ,\.E7nrOr` -r4_+3\vrmg_vZmyX#;7c|GSG;k< #MOHgV{(yߪl5zXFC}dB!X%-mOI}Ɨ꣩:I.kQ7T-)ozwQ(DC< 6}CSɽׯ\,B.oW3CT׽CrWYjkcv݇Ûv$dF5fhV˅`ٵYI:X=(|Ƈ8C؞LLWǥXq uq֎䦭61M#15dGs)6螰!d;!f~=LTSU~"=Vu#uoxŏ}wgN:^MQm>7H :]V"u%P:І^,mHEF<4"JOpO+ckŷ)/ ?Z 8*P=VP ,U4D]*PѤ4+:Ⴡ@*A@{!Nvh^uD5Z$ع*sC<2ؗ 5)$(tFE'K^w HnVNtF?Œfŗ -ŹYыJv7=ǖ"\+\ѴoT) 1=E&HQmlzUˆB))H2)P&$)ujM)4HSRyu -aDLV0sC<%|vcJs]LIAmaLA 1"+Sh)\b(QDCLII )IO' 1%5"S#bJ*#&$%7\H4ĔՇpLIJ!Q!d|\C))oNDCLfN>zJ!l'#SbJ6?U,lXJ.dhA ؜^"$mv%bl)Ŕ$z 1%_H%bJM1&bJ*4o?J4ĔlVSL]j((؞a 0Y:J=q!TZrɆ}zQ9)*|`&w1%iT i)IO'bJ6| g iPCk%RR͉y]&Hu.F-[um_UTP'~3Hkexe@{d}ߘNF*ĉUy~!>%7ܺqS+,0 9)N&u,Y{#AD} 8 yR}H j3&mS.l7ĩxPrmÐէLj Cg1g}4w8y5އ|_Ys;gdn1Za:ZOǡiT&F-V8 'Z!x!EUo[!X+dfNphH;Ƚ;Q MTN5svHT,ܷUDN|r&p7Rk;4{ញ**j =xJ~2Q2|YX|-[`x.tJ-筼"̉چq¦Q9A> )D<|zj(sVXə{u+uJ"vIo Ukڻ3_|53k Qd;W DZV-! #f}/O<3.(e5jjVEa`Oېok0|4,p R;M>>~vM܀]>Tǃڵ4gN)8MA`QR "Է_L^SPmj c^ி45*ETA[>T!ȇZo^Q1G%!榫NuЂ*VbU%< tUDɟMQA]`Gƭ J!ƭ vc:!<D :ٓ PjyDg&g;V}AFAr^Ç?x뵳F Yۋo1*&&f}dv"n?.#;IȓZn.j|aʟkk)iHotb\(qZ0p?ʼv1*X.H0⬇$ t]Qv>qRo Et@0Do0Fkvb8rib]/κ7>ƅ89*^qFQq4N-y݌ӱ떱dR_K]:JDsx¹qYon~}>Lַp#שo9*]Mʼ~}:.J6 XbHp;>f?Oˑ8)^Pax.ˆ -+ j-(s]^Ƚ2"Ilmmng 6б\ KT꽢o|lf+/qAj3Zu*?=7Ⳃ6Z?Q? \ bs?/h^`ݐ_ӿCA߂&6ӷb3*Ų,g#Zяa9c 3+PQ!6 S ZYVOפ^Dk@*J&"o4.RfDM$6eEz3 k?jN?h2eXZB6;f%!ɶ*C<>D]#D<ڽ|doרF4rI=RjkSMViXrVnUS҃i5A@wi CTc,2JWnȸi6$*YsuGaDbs8yLc9b6&|ߙ^;Iȓ4kڛdC<>:igpXbCe&2é (YbayPvذpĆ@>rMUعW`[fD|!_oQrI 7b}q 6tRK JIad$1͌8 {!*kg7rI'G4݅qKy`r59 ~NZ<Σo[{)s93CG;]^[5#>(zs[xrboF2*M*gf=OùKX\Xͮa(QuL(|O,rp5מ%3[ΘϪUBjsīЁ|VK=oJx'2F!Ͷ"#B(e.w[%Ky#E&\ZR#6+WYݷ\ҢK}ѽH3#by* VtrFs# T2lnb+2Sf]=Bn6;YⒽ돛xZMڼ5p3 ;j;J)aZ~nտ_H~Q}lmԉi9ߨSrǀ!WdAS4eJ(;/{G܉Cr۶̺ GڗbR}**+ƚfX.qB&W {PDp0 9Mc]}{xQp_Qrt/M{ mweo'eJ):;!K)f #Z_(CJǧI1}Œ->}a9Rݢ^,ƴb .H%H%]ok5exLEL+XbNYj1F\17s綡{@~c"&]\$VW <YJƪ]#dtF.4{l ;Dίz:ȼB.4Y2 3ft+k%nZ(Rb. 񭘣T9n.[͔k{BMɯN%aU۲J\-?-X7 /v[Me+jUD&dz2/؅ZN1gl_;-C;cEjˑ ׏IX4dOzq4$]6g/nlu?Gws48KhbwoF /;Jߑߞ*ۚ/xQ{nAdrg/*hISB[?J63ױ<pzu rRȐLvv2ْ IF{K15ޔR ʾRp{F#N4dÛaLIn$)̓zG/LPk.YD%6xZM=@ͩk} up/d5zzQeݠiyO|YΕ'L\*`JM)=y;^vmh{z~?K\\V[<%ɡP|4I]$"  TDE#o>XO1Kx+x$d(KN&B%*} u^&Rro/BlǗRaw 7t )Tx #\>%q&L6ܑʶFdYb&d]47r䊹Go m0?)W^/#eRW5_.ouKKɧ8j_E ؜' a"h 쑓=եUp(͒N$k6‘~͡@R~u [wYEV:w-/K/ +fd29`;U 5[S29~~:|@[;o9k:UޯRVb[JC5*zx4O^VMԵrk / APQs@>#˻᛻C RraRe.,wcfqS\7KV+PXJ BZx'P쟭%7g.J!_R K @B BGj 3&ׁ!~sjǁ!K#xց?G bLl`s`NBg4`˛)q yv([윖$sK۬&B t46T"_$\p"x>g.8<7vB~Nu%*G+0.íH5_xq-?>繬tf)K<;YXtlXYJ"Ny%雊k\ȶhn^[\;jr.ũ Dx!T)8Lco)i5W([l IB== VDĦؓP)Fi#r<ؔ\/Mo*1pjB$&jT` q3^ӈ^8h5T6B} .Yȳo(i5OQ!m.$I6/˶Ħmկ3'c{OW ܌w[I҃o> Yd]LAAVuORg5+,QozS]~[Q_Oi|ڷ[~I7\eZbl^S{0qS[\"8ܢ %ȟ K Iȓox)^dŸAd 6h7MNܦdCSӉe !Z͉}R#w0TjH1Ǧ[Y7\ !a|OP|N;R>W'RSi5?rFCPMPތ< tui885ύ,-{1AZ VLܹq9{3KY^dR9[n "Zw܇!K$s["P9(ya9ei}tjykJ'e$G h*`s!_m1oL8P8yH_\|j8 YxN'+ƚLoB? %wAZ|i B#o!O7O\3"՚CJ~C<2;F@ ;ITB5S*JC< `2#2>;D2POBTs㺸=S6}eX"Gx$CթZ\D8xvZ)CQ TwUhPP-zy,]*^ 7W? f ;{8aέMd'p򐆖? zk0o urmn;}[dw-0K+1ûFjP; `K+W-|@͹hJ 7nGP!KEӉsg) ZoV=*Bpc#_v,,L8Y}gΏ" џY2?sUb'MRfn~S J%ێi0Vƿcr;1= >I8KƂ]^[g|z֬~?tK9l: N]h^~ H8 Y}d5o<ydPz4:k:VÒ>!ρ/(W?C[O>1V<+(X~ZfHAk~Ԅ ]SBn"pxVaVRceP@J &z"f'n D˹D8zff<ޏFiҥ(ràr~% n,k{wk=OA ϼ\k~,FĘ( ˭4 P3.?%Twr _pdoKz,K~g4f^ ̔sne:W\%rs-Q#&zg'i(d<37"0ڬe.&No7*J2:2Fp}k~Q"joBކL%LjnNBqHp̡zx )gӷ^(ʺ:悖ᕈ>~$J!Km>"S2NGA|=4a7ne= %<-q56J\ӕM{3w]r /?PkEL:2E$TnS.oPπgos a.Tx(8fynʬ53'׮^zP\b m}C7U=#geK-R6- %8Y}Bͩ!ڲ9t? kĶEQO}s(3AwxʿX9ڛXO=1لjc%G4sGQO 2W7vJ^ki`ls $'!K_ {!Kc#f% d.^_H˕}ŁNh")%eW*<{ĕ7nG /huP vIWDi 8Yg!/> ؔ]l=qaLe.>$ B ۛȝ^B:kT{Ύ/unw3܍zlת].TZp7H!U}5gatzRd6SfA_[w4aֿ\ yrN8;Y <{iDiBՓ;~w˩ ]A)9 we*ʾ)W>ޠ:RUyB}«R_w N&~JX{DL T<96qe*_[LY+#q9dY; J,dblЊy>h&RW>e> 0b~A:l8쫣`bX%XEo] %RB_lbYzH*x-zSNBln w2]Vni.r$#QTt@zcJ#>{!l~cL|HY1W[3]Ăq%>Tc(Q"‡w!kWc%NS~v|? ȏ2)S$7 gy #ŠAXc.鵨U_u [fټg/dR}99QCe\U4UҪ̯!9RZrY3 h]: F_ךqih?4e {bW;O]P%<*_藑࿗V_oPLs3H9)L-L( Rzxtb꿙4XbyPIzs(gȇR@duBCDs Q<+Tex"<I60eHxMeǬU֏Ff/d O@dǓE݌Qr>H_Mg}F.G۹xOAprF.n)#d3$Yj|H*VBPz%l,}Y'B\'KOg`Gxiv-DjfӹW4RSB}#-o!tr읁-kk9d :8A#B g|< M>b]"q5v?*z=c52}bkǪ?P|M6. gA6!Ϻi.2=nHdEbp-0bp;dpr32^N!W既bWJOM?cYt \?04 SNnL ˍժCSuΞcev}07B}^W+#/; Ct-gKcѪYq ;}x/(<.7oj1t;T-f-.=՜]3“͜pr>R >gl.#ʙj&u>NC;R֢H;D=- c?ޤd+`?p2c/^Ƚ:XE)V&̊&w*[?H!Ꙉ?G olܨ:7x@h^'oP*1^(P/|EQ"n!!ʫB퉣;`?^x$(Dp@-.]4S^z6]9Y?]˜Lyus&Z]H1}άi6-g|/ȉ?d x1~v^Qy:\N 7~px(Nz1RmH:Dza3=${ZpQ ) ό앏iC 3([|q8yN9'sYɔshpc Ir]M .yd:Qʬ{~ŕ7' u]#W\J1A: ]ӛˏW88^ʦX3WV.O]9cccKg޳mK~/81<> _2rh~4|eh͸㙥G1LBNn(Www7\ \X_a?QrfO*ٽvim0۞VV;i`EG\WJugqbnF.:e5B[$m1syaJ-:󵲳e'g8vz`lŸ1׶vZM@=oqg["XV͜u식0Sƾw2v-mKل!0է#oUSj:'Sl&bkxs L :f.ytʟyQDŽPDed[*gUVAHasYݷ%jK{/fT2Szj҃WG;@(qqJG?N\*CWCC#r?s2=,َseXv֏*bdT࠼ =(LQà\& 3i.8u̒37D8m༝1rO|qpN.GWVGo5GVZ0u,'v}*^!v:|V줱a:YCg-SVL[g=gpGcY&~_parameters/tests/0000755000175000017500000000000014166770670013727 5ustar nileshnileshparameters/tests/testthat/0000755000175000017500000000000014167565473015573 5ustar nileshnileshparameters/tests/testthat/test-mira.R0000644000175000017500000000103214122064334017574 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("mice")) { data("nhanes2") imp <- mice(nhanes2, printFlag = FALSE) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) mp1 <- model_parameters(fit) mp2 <- summary(pool(fit)) test_that("param", { expect_equal(mp1$Parameter, as.vector(mp2$term)) }) test_that("coef", { expect_equal(mp1$Coefficient, mp2$estimate, tolerance = 1e-3) }) test_that("se", { expect_equal(mp1$SE, mp2$std.error, tolerance = 1e-3) }) } parameters/tests/testthat/test-gee.R0000644000175000017500000000142114122064334017406 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("gee")) { data(warpbreaks) m1 <- gee(breaks ~ tension, id = wool, data = warpbreaks) test_that("ci", { expect_equal( ci(m1)$CI_low, c(30.90044, -17.76184, -22.48406), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(2.80028, 3.96019, 3.96019), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.01157, 2e-04), tolerance = 1e-3 ) }) mp <- suppressWarnings(model_parameters(m1)) test_that("model_parameters", { expect_equal( mp$Coefficient, c(36.38889, -10, -14.72222), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.glm.R0000644000175000017500000000635014136205626022763 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("boot")) { data(mtcars) test_that("model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(2, 9)) expect_equal(params$CI_high, c(41.119752761418, -4.20263490802709), tolerance = 1e-3) expect_equal(attributes(params)$sigma, 3.045882, tolerance = 1e-3) params <- model_parameters(model, ci = c(0.8, 0.9), verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(2, 10)) params <- model_parameters(model, dispersion = TRUE, bootstrap = TRUE, iterations = 500, verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(2, 7)) model <- lm(mpg ~ wt + cyl, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(3, 9)) model <- lm(mpg ~ wt * cyl, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(4, 9)) params <- model_parameters(model, component = "conditional", effects = "fixed", verbose = FALSE) }) test_that("print digits model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, digits = 4, ci_digits = 5, verbose = FALSE) out <- capture.output(print(params)) expect_equal(out[3], "(Intercept) | 37.2851 | 1.8776 | [33.45050, 41.11975] | 19.8576 | < .001") }) test_that("print digits model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, summary = TRUE, verbose = FALSE) out <- capture.output(print(params)) expect_equal( out, c( "Parameter | Coefficient | SE | 95% CI | t(30) | p", "------------------------------------------------------------------", "(Intercept) | 37.29 | 1.88 | [33.45, 41.12] | 19.86 | < .001", "wt | -5.34 | 0.56 | [-6.49, -4.20] | -9.56 | < .001", "", "Model: mpg ~ wt (32 Observations)", "Residual standard deviation: 3.046 (df = 30)", "R2: 0.753; adjusted R2: 0.745" ) ) params <- model_parameters(model, summary = FALSE, verbose = FALSE) out <- capture.output(print(params)) expect_equal( out, c( "Parameter | Coefficient | SE | 95% CI | t(30) | p", "------------------------------------------------------------------", "(Intercept) | 37.29 | 1.88 | [33.45, 41.12] | 19.86 | < .001", "wt | -5.34 | 0.56 | [-6.49, -4.20] | -9.56 | < .001" ) ) }) test_that("model_parameters.glm - binomial", { set.seed(333) model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") params <- model_parameters(model, verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(3, 9)) params <- suppressWarnings(model_parameters(model, bootstrap = TRUE, iterations = 500, verbose = FALSE)) expect_equal(c(nrow(params), ncol(params)), c(3, 6)) params <- model_parameters(model, component = "conditional", effects = "fixed", verbose = FALSE) }) } parameters/tests/testthat/test-panelr.R0000644000175000017500000000402614122064334020133 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("panelr")) { data("WageData") wages <- panel_data(WageData, id = id, wave = t) m1 <- wbm(lwage ~ lag(union) + wks | blk + fem | blk * lag(union), data = wages) m2 <- suppressWarnings(wbm(lwage ~ lag(union) + wks | blk + fem | blk * (t | id), data = wages)) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.00807, -0.00376, 6.14479, -0.09624, -0.00507, -0.34607, -0.53918, -0.37071), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.01668, -0.00139, 6.01762, -0.08795, -0.0055, -0.32126, -0.54359), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.0256, 0.00108, 0.2313, 0.03482, 0.00482, 0.05952, 0.04971, 0.12418), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.01838, 0.00073, 0.22549, 0.03394, 0.0047, 0.05803, 0.04846), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.02295, 0.13007, 0, 0.42167, 0.36422, 0.00013, 0, 0.30533), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.29282, 0.9538, 0, 0.52805, 0.43004, 0.00038, 0), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(0.05825, -0.00164, 6.59813, -0.028, 0.00438, -0.22941, -0.44176, -0.12732), tolerance = 1e-3 ) expect_equal( model_parameters(m1, effects = "all")$Coefficient, c( 0.05825, -0.00164, 6.59813, -0.028, 0.00438, -0.22941, -0.44176, -0.12732, 0.35399, 0.23264 ), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(0.01934, 4e-05, 6.45957, -0.02143, 0.00371, -0.20753, -0.44861), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-lavaan.R0000644000175000017500000000333614122064334020117 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("lavaan")) { model <- " # measurement model ind60 =~ x1 + x2 + x3 dem60 =~ y1 + y2 + y3 + y4 dem65 =~ y5 + y6 + y7 + y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 " m <- sem(model, data = PoliticalDemocracy, test = "Satorra-Bentler") test_that("unstandardized", { mp <- model_parameters(m, eta_squared = "raw") ml <- parameterEstimates(m, se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized", { mp <- model_parameters(m, standardize = TRUE) ml <- standardizedSolution(m, type = "std.all", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized-lv", { mp <- model_parameters(m, standardize = "latent") ml <- standardizedSolution(m, type = "std.lv", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized-nox", { mp <- model_parameters(m, standardize = "no_exogenous") ml <- standardizedSolution(m, type = "std.nox", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) } parameters/tests/testthat/test-format.R0000644000175000017500000000046514122064334020145 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("format_order", { expect_equal(format_order(2), "second") expect_equal(format_order(45), "forty fifth") expect_equal(format_order(2, textual = FALSE), "2nd") expect_equal(format_order(45, textual = FALSE), "45th") }) } parameters/tests/testthat/test-format_p_adjust.R0000644000175000017500000000033114122064334022026 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("format_p_adjust", { expect_equal(format_p_adjust("holm"), "Holm (1979)") expect_equal(format_p_adjust("bonferroni"), "Bonferroni") }) } parameters/tests/testthat/test-gamm.R0000644000175000017500000000221714122064334017573 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("mgcv")) { set.seed(123) dat <- gamSim(6, n = 200, scale = .2, dist = "poisson") m1 <- gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, random = list(fac = ~1) ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(2.361598, NA, NA, NA), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.3476989, NA, NA, NA), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0), tolerance = 1e-3 ) }) mp <- model_parameters(m1) test_that("model_parameters", { expect_equal( mp$Coefficient, c(3.0476, NA, NA, NA), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( mp$df, c(NA, 3.84696, 3.17389, 8.51855), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( mp$df_error, c(183.4606, NA, NA, NA), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-p_adjust.R0000644000175000017500000000236214122064334020464 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters")) { data(mtcars) model <- lm(mpg ~ wt * cyl + am + log(hp), data = mtcars) test_that("model_parameters, p-adjust", { mp <- model_parameters(model) expect_equal(mp$p, c(0, 0.00304, 0.02765, 0.65851, 0.01068, 0.02312), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "BH") expect_equal(mp$p, c(0, 0.00912, 0.03318, 0.65851, 0.02137, 0.03318), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "bonferroni") expect_equal(mp$p, c(0, 0.01824, 0.16588, 1, 0.06411, 0.13869), tolerance = 1e-3) }) if (requiet("emmeans")) { data(iris) m <- pairs(emmeans(aov(Sepal.Width ~ Species, data = iris), ~Species)) test_that("model_parameters, emmeans, p-adjust", { mp <- model_parameters(m) expect_equal(mp$p, as.data.frame(m)$p.value, tolerance = 1e-4) }) m <- pairs(emmeans(aov(Sepal.Width ~ Species, data = iris), ~Species), adjust = "scheffe") test_that("model_parameters, emmeans, p-adjust", { mp <- model_parameters(m, p_adjust = "scheffe") expect_equal(mp$p, as.data.frame(m)$p.value, tolerance = 1e-4) }) } } parameters/tests/testthat/test-parameters_selection.R0000644000175000017500000000126514122064334023064 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("select_parameters", { model <- lm(mpg ~ ., data = mtcars) x <- select_parameters(model) expect_equal(n_parameters(model) - n_parameters(x), 7) # library(lme4) # model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris) # x <- select_parameters(model) # expect_equal(n_parameters(model) - n_parameters(x), 0) # This is broken # library(rstanarm) # model <- stan_glm(mpg ~ ., data = mtcars, refresh = 0) # x <- select_parameters(model, cross_validation = TRUE) # expect_equal(n_parameters(model) - n_parameters(x), 9) }) } parameters/tests/testthat/test-checks.R0000644000175000017500000000065314122064334020114 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { data(mtcars) test_that("check_factorstructure", { x <- check_factorstructure(mtcars) expect_equal(x$KMO$MSA, 0.826, tolerance = 0.01) expect_equal(x$sphericity$chisq, 408.011, tolerance = 0.01) }) test_that("check_clusterstructure", { set.seed(333) expect_equal(check_clusterstructure(iris[, 1:4])$H, 0.187, tolerance = 0.01) }) } parameters/tests/testthat/test-model_parameters.mediate.R0000644000175000017500000000457714122064334023617 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (requiet("testthat") && requiet("parameters") && requiet("mediation") && requiet("MASS")) { data(jobs) b <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) c <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) set.seed(1234) m1 <- mediate(b, c, sims = 50, treat = "treat", mediator = "job_seek") b2 <- lm(job_seek ~ educ + sex, data = jobs) c2 <- lm(depress2 ~ educ + job_seek + sex, data = jobs) set.seed(1234) m2 <- mediate(b2, c2, treat = "educ", mediator = "job_seek", sims = 50, control.value = "gradwk", treat.value = "somcol" ) test_that("model_parameters.mediate-1", { params <- model_parameters(m1) expect_equal(params$Estimate, c(-0.01488, -0.04753, -0.06242, 0.16635), tolerance = 1e-2) expect_equal(params$Parameter, c("ACME", "ADE", "Total Effect", "Prop. Mediated")) }) test_that("model_parameters.mediate-2", { params <- model_parameters(m2) expect_equal(params$Estimate, c(0.02484, -0.05793, -0.03309, -0.27914), tolerance = 1e-2) expect_equal(params$Parameter, c("ACME", "ADE", "Total Effect", "Prop. Mediated")) }) if (.runThisTest) { jobs$job_disc <- as.factor(jobs$job_disc) b.ord <- MASS::polr( job_disc ~ treat + econ_hard + sex + age, data = jobs, method = "probit", Hess = TRUE ) d.bin <- glm( work1 ~ treat + job_disc + econ_hard + sex + age, data = jobs, family = binomial(link = "probit") ) set.seed(1234) m3 <- mediate(b.ord, d.bin, sims = 50, treat = "treat", mediator = "job_disc") test_that("model_parameters.mediate-3", { params <- model_parameters(m3) expect_equal(params$Estimate, c( 0.00216, 0.00231, 0.0486, 0.04875, 0.05091, 0.03981, 0.04829, 0.00223, 0.04868, 0.04405 ), tolerance = 1e-2) expect_equal(params$Parameter, c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" )) expect_equal(params$Component, c( "control", "treated", "control", "treated", "Total Effect", "control", "treated", "average", "average", "average" )) }) } } parameters/tests/testthat/test-model_parameters.MASS.R0000644000175000017500000000130414132224441022731 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("MASS")) { model <- MASS::rlm(formula = mpg ~ am * cyl, data = mtcars) s <- summary(model) test_that("model_parameters.rlm", { params <- model_parameters(model) expect_equal(params$SE, as.vector(coef(s)[, 2]), tolerance = 1e-3) expect_equal(params$Coefficient, as.vector(coef(s)[, 1]), tolerance = 1e-3) expect_equal(params$t, as.vector(coef(s)[, 3]), tolerance = 1e-3) expect_equal(params$df_error, c(28, 28, 28, 28), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p") ) }) } parameters/tests/testthat/test-gam.R0000644000175000017500000000374614122064334017426 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("mgcv")) { set.seed(123) dat <- gamSim(1, n = 400, dist = "normal", scale = 2) m1 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) test_that("ci", { expect_equal( ci(m1)$CI_low, c(7.771085, NA, NA, NA, NA), tolerance = 1e-2 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.1020741, NA, NA, NA, NA), tolerance = 1e-2 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0.00196), tolerance = 1e-2 ) }) .runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { mp <- model_parameters(m1) test_that("model_parameters", { expect_equal( mp$Coefficient, c(7.97176, NA, NA, NA, NA), tolerance = 1e-3 ) expect_equal( mp$df, c(NA, 3.63421, 2.97192, 8.29867, 1.04607), tolerance = 1e-3 ) expect_equal( mp$df_error, c(383.04913, NA, NA, NA, NA), tolerance = 1e-3 ) }) test_that("print model_parameters", { out <- utils::capture.output(print(mp)) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Coefficient | SE | 95% CI | t(383.05) | p", "--------------------------------------------------------------------", "(Intercept) | 7.97 | 0.10 | [7.77, 8.17] | 78.10 | < .001", "", "# Smooth Terms", "", "Parameter | F | df | p", "----------------------------------------", "Smooth term (x0) | 10.53 | 3.63 | < .001", "Smooth term (x1) | 87.44 | 2.97 | < .001", "Smooth term (x2) | 72.49 | 8.30 | < .001", "Smooth term (x3) | 9.58 | 1.05 | 0.002 " ) ) }) } } parameters/tests/testthat/test-plm.R0000644000175000017500000000465014133043122017437 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("stats") && requiet("plm") && getRversion() > "3.5") { data(Crime) data("Produc", package = "plm") set.seed(123) Crime$year <- as.factor(Crime$year) m1 <- plm(lcrmrte ~ lprbarr + year | . - lprbarr + lmix, data = Crime, model = "random") m2 <- plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") ) test3333 <- data.frame( ID = c("TOM", "TOM", "TOM", "TOM", "MARY", "MARY", "MARY", "JOHN", "JOHN"), Year = c(1992:1995, 1991:1993, 1993:1994), ret = rnorm(9) ) test3333 <- pdata.frame(test3333) test3333["lag"] <- lag(test3333$ret) test3333 <- na.omit(test3333) test3333model <- ret ~ lag m3 <- plm::plm( test3333model, data = test3333, model = "within", effect = "individual", index = c("ID", "Year") ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-3.73825, -0.12292, -0.05971, -0.13356, -0.18381, -0.17782, -0.11688, -0.03962), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.08308, 0.2427, 0.70909, -0.00724), tolerance = 1e-3 ) expect_equal(ci(m3)$CI_low, -2.60478, tolerance = 1e-3) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.13223, 0.09221, 0.02684, 0.02679, 0.02704, 0.02671, 0.02663, 0.02664), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.029, 0.02512, 0.03009, 0.00099), tolerance = 1e-3 ) expect_equal(standard_error(m3)$SE, 0.5166726, tolerance = 1e-3) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.5285, 0.79456, 0.00262, 0, 0, 0.01558, 0.63395), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.36752, 0, 0, 0), tolerance = 1e-3 ) expect_equal(p_value(m3)$p, 0.53696, tolerance = 1e-3) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-3.47857, 0.05815, -0.00699, -0.08095, -0.13071, -0.12537, -0.06458, 0.01269), tolerance = 1e-3 ) expect_equal( model_parameters(m2)$Coefficient, c(-0.02615, 0.29201, 0.76816, -0.0053), tolerance = 1e-3 ) expect_equal(model_parameters(m3)$Coefficient, -0.381721, tolerance = 1e-3) }) } parameters/tests/testthat/test-rstanarm.R0000644000175000017500000000303114122064334020474 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" osx <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (.runThisTest && !osx && requiet("testthat") && requiet("parameters") && requiet("rstanarm")) { data(mtcars) set.seed(123) model <- stan_glm( vs ~ mpg + cyl, data = mtcars, refresh = 0, family = "binomial", seed = 123 ) mp <- model_parameters(model, centrality = "mean") s <- summary(model) test_that("mp", { expect_equal(mp$Mean, unname(s[1:3, 1]), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$Prior_Scale, c(2.5, 0.4148, 1.39984), tolerance = 1e-2) }) pbcLong$ybern <- as.integer(pbcLong$logBili >= mean(pbcLong$logBili)) set.seed(123) model <- stan_mvmer( formula = list( ybern ~ year + (1 | id), albumin ~ sex + year + (year | id) ), data = pbcLong, refresh = 0, seed = 123 ) mp <- suppressWarnings(model_parameters(model, centrality = "mean")) s <- summary(model) test_that("mp2", { expect_equal(mp$Mean, unname(s[c("y1|(Intercept)", "y1|year", "y2|(Intercept)", "y2|sexf", "y2|year"), 1]), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$Response, c("y1", "y1", "y2", "y2", "y2")) expect_equal(mp$Prior_Scale, c(4.9647, 0.3465, 5.57448, 1.39362, 0.38906), tolerance = 1e-2) }) } parameters/tests/testthat/test-gls.R0000644000175000017500000000163614133036017017442 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("nlme")) { data(Ovary) m1 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = Ovary, correlation = corAR1(form = ~ 1 | Mare) ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(10.90853, -4.04402, -2.2722), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.664643651063474, 0.645047778144975, 0.697538308948056), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(2.6187369542827e-51, 2.28628382225752e-05, 0.198137111907874), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(12.2163981810227, -2.77471219793581, -0.899604717105857), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-ivreg.R0000644000175000017500000000322314135275207017773 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("AER")) { data(CigarettesSW) CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m1 <- ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) test_that("ci", { expect_equal( ci(m1, method = "normal")$CI_low, c(7.82022, -1.79328, -0.18717), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(1.05856, 0.2632, 0.23857), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 1e-05, 0.24602), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(9.89496, -1.27742, 0.2804), tolerance = 1e-4 ) }) test_that("print-model_parameters", { out <- utils::capture.output(print(model_parameters(m1))) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Coefficient | SE | 95% CI | t(45) | p", "--------------------------------------------------------------------", "(Intercept) | 9.89 | 1.06 | [ 7.76, 12.03] | 9.35 | < .001", "rprice [log] | -1.28 | 0.26 | [-1.81, -0.75] | -4.85 | < .001", "rincome [log] | 0.28 | 0.24 | [-0.20, 0.76] | 1.18 | 0.246 " ) ) }) } parameters/tests/testthat/test-compare_parameters.R0000644000175000017500000000475314122064334022532 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("insight")) { data(iris) m1 <- lm(Sepal.Length ~ Species, data = iris) m2 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- gl(3, 1, 9) treatment <- gl(3, 3) m3 <- glm(counts ~ outcome + treatment, family = poisson()) x <- compare_parameters(m1, m2, m3) test_that("compare_parameters, default", { expect_equal( colnames(x), c( "Parameter", "Component", "Coefficient.m1", "SE.m1", "CI.m1", "CI_low.m1", "CI_high.m1", "t.m1", "df_error.m1", "p.m1", "Coefficient.m2", "SE.m2", "CI.m2", "CI_low.m2", "CI_high.m2", "t.m2", "df_error.m2", "p.m2", "Log-Mean.m3", "SE.m3", "CI.m3", "CI_low.m3", "CI_high.m3", "z.m3", "df_error.m3", "p.m3" ) ) out <- capture.output(x) expect_equal(length(out), 14) out <- format(x, style = "ci") expect_equal(colnames(out), c("Parameter", "m1", "m2", "m3")) expect_equal( out$Parameter, c( "(Intercept)", "Species (versicolor)", "Species (virginica)", "Petal Length", "Species (versicolor) * Petal Length", "Species (virginica) * Petal Length", "outcome (2)", "outcome (3)", "treatment (2)", "treatment (3)", NA, "Observations" ) ) }) x <- compare_parameters(m1, m2, m3, style = "se_p2") test_that("compare_parameters, se_p2", { expect_equal( colnames(x), c( "Parameter", "Component", "Coefficient.m1", "SE.m1", "CI.m1", "CI_low.m1", "CI_high.m1", "t.m1", "df_error.m1", "p.m1", "Coefficient.m2", "SE.m2", "CI.m2", "CI_low.m2", "CI_high.m2", "t.m2", "df_error.m2", "p.m2", "Log-Mean.m3", "SE.m3", "CI.m3", "CI_low.m3", "CI_high.m3", "z.m3", "df_error.m3", "p.m3" ) ) out <- capture.output(x) expect_equal(length(out), 14) out <- format(x, style = "se_p2") expect_equal( colnames(out), c( "Parameter", "Coefficient (m1)", "p (m1)", "Coefficient (m2)", "p (m2)", "Log-Mean (m3)", "p (m3)" ) ) expect_equal( out$Parameter, c( "(Intercept)", "Species (versicolor)", "Species (virginica)", "Petal Length", "Species (versicolor) * Petal Length", "Species (virginica) * Petal Length", "outcome (2)", "outcome (3)", "treatment (2)", "treatment (3)", NA, "Observations" ) ) }) } parameters/tests/testthat/test-format_parameters.R0000644000175000017500000003467114122064334022376 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("splines")) { data(iris) set.seed(123) iris$cat <- sample(LETTERS[1:4], nrow(iris), replace = TRUE) test_that("format_parameters-1", { model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width" )) }) test_that("format_parameters-2", { model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Sepal.Width = "Sepal Width", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Sepal.Width:Speciesversicolor` = "Sepal Width * Species [versicolor]", `Sepal.Width:Speciesvirginica` = "Sepal Width * Species [virginica]" )) }) test_that("format_parameters-3", { model <- lm(Sepal.Length ~ Species * Sepal.Width * Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", Petal.Length = "Petal Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Sepal.Width:Petal.Length` = "Sepal Width * Petal Length", `Speciesversicolor:Sepal.Width:Petal.Length` = "(Species [versicolor] * Sepal Width) * Petal Length", `Speciesvirginica:Sepal.Width:Petal.Length` = "(Species [virginica] * Sepal Width) * Petal Length" )) }) test_that("format_parameters-4", { model <- lm(Sepal.Length ~ Species * cat * Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", catB = "cat [B]", catC = "cat [C]", catD = "cat [D]", Petal.Length = "Petal Length", `Speciesversicolor:catB` = "Species [versicolor] * cat [B]", `Speciesvirginica:catB` = "Species [virginica] * cat [B]", `Speciesversicolor:catC` = "Species [versicolor] * cat [C]", `Speciesvirginica:catC` = "Species [virginica] * cat [C]", `Speciesversicolor:catD` = "Species [versicolor] * cat [D]", `Speciesvirginica:catD` = "Species [virginica] * cat [D]", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `catB:Petal.Length` = "cat [B] * Petal Length", `catC:Petal.Length` = "cat [C] * Petal Length", `catD:Petal.Length` = "cat [D] * Petal Length", `Speciesversicolor:catB:Petal.Length` = "(Species [versicolor] * cat [B]) * Petal Length", `Speciesvirginica:catB:Petal.Length` = "(Species [virginica] * cat [B]) * Petal Length", `Speciesversicolor:catC:Petal.Length` = "(Species [versicolor] * cat [C]) * Petal Length", `Speciesvirginica:catC:Petal.Length` = "(Species [virginica] * cat [C]) * Petal Length", `Speciesversicolor:catD:Petal.Length` = "(Species [versicolor] * cat [D]) * Petal Length", `Speciesvirginica:catD:Petal.Length` = "(Species [virginica] * cat [D]) * Petal Length" )) }) test_that("format_parameters-5", { model <- lm(Sepal.Length ~ Species / Petal.Length, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length" )) }) test_that("format_parameters-6", { model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width" )) }) test_that("format_parameters-7", { model <- lm(Sepal.Length ~ Species / Petal.Length * Sepal.Width, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Length:Sepal.Width` = "Species [setosa] * Petal Length * Sepal Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "Species [versicolor] * Petal Length * Sepal Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "Species [virginica] * Petal Length * Sepal Width" )) }) test_that("format_parameters-8", { model <- lm(Sepal.Length ~ Species / (Petal.Length * Sepal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Length:Sepal.Width` = "Species [setosa] * Petal Length * Sepal Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "Species [versicolor] * Petal Length * Sepal Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "Species [virginica] * Petal Length * Sepal Width" )) }) test_that("format_parameters-9", { model <- lm(Sepal.Length ~ Petal.Length + (Species / (Sepal.Width * Petal.Width)), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Width` = "Species [setosa] * Petal Width", `Speciesversicolor:Petal.Width` = "Species [versicolor] * Petal Width", `Speciesvirginica:Petal.Width` = "Species [virginica] * Petal Width", `Speciessetosa:Sepal.Width:Petal.Width` = "Species [setosa] * Sepal Width * Petal Width", `Speciesversicolor:Sepal.Width:Petal.Width` = "Species [versicolor] * Sepal Width * Petal Width", `Speciesvirginica:Sepal.Width:Petal.Width` = "Species [virginica] * Sepal Width * Petal Width" )) }) test_that("format_parameters-10", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2)1` = "Sepal Width [1st degree]", `poly(Sepal.Width, 2)2` = "Sepal Width [2nd degree]" )) }) test_that("format_parameters-11", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2, raw = TRUE)1` = "Sepal Width [1st degree]", `poly(Sepal.Width, 2, raw = TRUE)2` = "Sepal Width [2nd degree]" )) }) test_that("format_parameters-12", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `bs(Petal.Width)1` = "Petal Width [1st degree]", `bs(Petal.Width)2` = "Petal Width [2nd degree]", `bs(Petal.Width)3` = "Petal Width [3rd degree]", `Petal.Length:bs(Petal.Width)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:bs(Petal.Width)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:bs(Petal.Width)3` = "Petal Length * Petal Width [3rd degree]" )) }) test_that("format_parameters-13", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width, degree = 4), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `bs(Petal.Width, degree = 4)1` = "Petal Width [1st degree]", `bs(Petal.Width, degree = 4)2` = "Petal Width [2nd degree]", `bs(Petal.Width, degree = 4)3` = "Petal Width [3rd degree]", `bs(Petal.Width, degree = 4)4` = "Petal Width [4th degree]", `Petal.Length:bs(Petal.Width, degree = 4)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:bs(Petal.Width, degree = 4)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:bs(Petal.Width, degree = 4)3` = "Petal Length * Petal Width [3rd degree]", `Petal.Length:bs(Petal.Width, degree = 4)4` = "Petal Length * Petal Width [4th degree]" )) }) test_that("format_parameters-14", { model <- lm(Sepal.Length ~ Petal.Length * ns(Petal.Width, df = 3), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `ns(Petal.Width, df = 3)1` = "Petal Width [1st degree]", `ns(Petal.Width, df = 3)2` = "Petal Width [2nd degree]", `ns(Petal.Width, df = 3)3` = "Petal Width [3rd degree]", `Petal.Length:ns(Petal.Width, df = 3)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:ns(Petal.Width, df = 3)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:ns(Petal.Width, df = 3)3` = "Petal Length * Petal Width [3rd degree]" )) }) test_that("format_parameters-15", { model <- lm(Sepal.Length ~ Petal.Length * I(Petal.Width^2), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `I(Petal.Width^2)` = "Petal Width^2", `Petal.Length:I(Petal.Width^2)` = "Petal Length * Petal Width^2" )) }) test_that("format_parameters-16", { model <- lm(Sepal.Length ~ Petal.Length * as.factor(Species), data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `as.factor(Species)versicolor` = "Species [versicolor]", `as.factor(Species)virginica` = "Species [virginica]", `Petal.Length:as.factor(Species)versicolor` = "Petal Length * Species [versicolor]", `Petal.Length:as.factor(Species)virginica` = "Petal Length * Species [virginica]" )) }) test_that("format_parameters-17", { if (requiet("pscl")) { data("bioChemists") model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) fp <- format_parameters(model) expect_equal(fp, c( `count_(Intercept)` = "(Intercept)", count_femWomen = "fem [Women]", count_marMarried = "mar [Married]", count_kid5 = "kid5", count_ment = "ment", `zero_(Intercept)` = "(Intercept)", zero_kid5 = "kid5", zero_phd = "phd" )) } }) test_that("format_parameters-18", { data(iris) levels(iris$Species) <- c("Species verti", "No Specieses", "Yes (Species)") model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) fp <- format_parameters(model) expect_equal(fp, c( `(Intercept)` = "(Intercept)", `SpeciesNo Specieses` = "Species [No Specieses]", `SpeciesYes (Species)` = "Species [Yes (Species)]", Petal.Width = "Petal Width", `SpeciesNo Specieses:Petal.Width` = "Species [No Specieses] * Petal Width", `SpeciesYes (Species):Petal.Width` = "Species [Yes (Species)] * Petal Width" )) }) test_that("format_parameters-19", { data(mtcars) m1 <- lm(mpg ~ qsec:wt + wt:drat, data = mtcars) m2 <- lm(mpg ~ qsec:wt + wt / drat, data = mtcars) m3 <- lm(mpg ~ qsec:wt + wt:drat + wt, data = mtcars) m4 <- lm(mpg ~ qsec:wt + wt / drat + wt, data = mtcars) m5 <- lm(mpg ~ qsec * wt + wt:drat + wt, data = mtcars) m6 <- lm(mpg ~ wt + qsec + wt:qsec, data = mtcars) expect_equal(format_parameters(m1), c(`(Intercept)` = "(Intercept)", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat")) expect_equal(format_parameters(m2), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat")) expect_equal(format_parameters(m3), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat")) expect_equal(format_parameters(m4), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat")) expect_equal(format_parameters(m5), c(`(Intercept)` = "(Intercept)", qsec = "qsec", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat")) expect_equal(format_parameters(m6), c(`(Intercept)` = "(Intercept)", wt = "wt", qsec = "qsec", `wt:qsec` = "wt * qsec")) }) } parameters/tests/testthat/test-model_parameters.vgam.R0000644000175000017500000000322314133316175023131 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("VGAM") && requiet("parameters")) { data("pneumo") data("hunua") set.seed(123) pneumo <- transform(pneumo, let = log(exposure.time)) m1 <- suppressWarnings(vgam( cbind(normal, mild, severe) ~ s(let) + exposure.time, cumulative(parallel = TRUE), data = pneumo, trace = FALSE )) set.seed(123) hunua$x <- rnorm(nrow(hunua)) m2 <- vgam(agaaus ~ s(altitude, df = 2) + s(x) + beitaw + corlae, binomialff, data = hunua) test_that("model_parameters.vgam", { params <- suppressWarnings(model_parameters(m1)) expect_equal(params$Coefficient, as.vector(m1@coefficients[params$Parameter]), tolerance = 1e-3) expect_equal(params$Parameter, c("(Intercept):1", "(Intercept):2", "exposure.time", "s(let)")) expect_equal(params$df, c(NA, NA, NA, 2.65007), tolerance = 1e-3) expect_equal(as.vector(na.omit(params$df)), as.vector(m1@nl.df), tolerance = 1e-3) }) test_that("model_parameters.vgam", { params <- suppressWarnings(model_parameters(m2)) expect_equal(params$Coefficient, as.vector(m2@coefficients[params$Parameter]), tolerance = 1e-3) expect_equal(params$Parameter, c("(Intercept)", "beitaw", "corlae", "s(altitude, df = 2)", "s(x)")) expect_equal(params$df, c(NA, NA, NA, 0.82686, 2.8054), tolerance = 1e-3) expect_equal(as.vector(na.omit(params$df)), as.vector(m2@nl.df), tolerance = 1e-3) expect_equal(colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Chi2", "df_error", "p", "Component" )) }) } parameters/tests/testthat/test-equivalence_test.R0000644000175000017500000000047714131251006022212 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("equivalence_test", { data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) x <- equivalence_test(m) expect_equal(c(nrow(x), ncol(x)), c(5, 9)) expect_true(is.character(capture.output(equivalence_test(m)))) }) } parameters/tests/testthat/test-rank_deficienty.R0000644000175000017500000000107014122064334022004 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { set.seed(123) data(mtcars) model <- stats::lm( formula = wt ~ am * cyl * vs, data = mtcars ) test_that("model_parameters-rank_deficiency", { expect_warning(model_parameters(model)) params <- suppressWarnings(model_parameters(model)) expect_equal(params$Parameter, c("(Intercept)", "am", "cyl", "vs", "am:cyl", "am:vs"), tolerance = 1e-3) expect_equal(params$Coefficient, c(2.28908, -1.37908, 0.22688, -0.26158, 0.08062, 0.14987), tolerance = 1e-3) }) } parameters/tests/testthat/test-model_parameters.lqmm.R0000644000175000017500000000563214122064334023146 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (FALSE && requiet("testthat") && requiet("lqmm") && requiet("parameters")) { # lqm ----------------------- test_that("model_parameters - lqm", { # data set.seed(123) n <- 500 p <- 1:3 / 4 set.seed(123) x <- runif(n, 0, 1) y <- 30 + x + rnorm(n) test <<- data.frame(x, y) # model set.seed(123) fit.lqm <- lqmm::lqm( y ~ x, data = test, tau = p, control = list(verbose = FALSE, loop_tol_ll = 1e-9), fit = TRUE ) df_lqm <- as.data.frame(model_parameters(fit.lqm)) expect_equal(df_lqm$Coefficient, c( 29.3220715172958, 1.1244506550584, 29.9547605920406, 1.1822574944936, 30.6283792821576, 1.25165747424685 ), tolerance = 0.001 ) }) # lqmm ----------------------- test_that("model_parameters - lqmm", { # setup set.seed(123) # data M <- 50 n <- 10 set.seed(123) x <- runif(n * M, 0, 1) group <- rep(1:M, each = n) y <- 10 * x + rep(rnorm(M, 0, 2), each = n) + rchisq(n * M, 3) test <<- data.frame(x, y, group) # model set.seed(123) fit.lqmm <- lqmm::lqmm( fixed = y ~ x, random = ~1, group = group, data = test, tau = 0.5, nK = 11, type = "normal" ) df_lqmm <- as.data.frame(model_parameters(fit.lqmm)) expect_equal(df_lqmm, structure( list( Parameter = c("(Intercept)", "x"), Coefficient = c( 3.44347538706013, 9.25833091219961 ), SE = c(0.491049614414579, 0.458163772053399), CI = c(0.95, 0.95), CI_low = c(2.47868633791118, 8.35815427623814), CI_high = c(4.40826443620908, 10.1585075481611), t = c( 7.01247956617455, 20.207470509302 ), df_error = c(497L, 497L), p = c( 6.34497395571023e-09, 2.05172540270515e-25 ) ), row.names = 1:2, pretty_names = c( `(Intercept)` = "(Intercept)", x = "x" ), ci = 0.95, verbose = TRUE, exponentiate = FALSE, ordinal_model = FALSE, linear_model = TRUE, mixed_model = TRUE, n_obs = 500L, model_class = "lqmm", bootstrap = FALSE, iterations = 1000, ignore_group = TRUE, ran_pars = TRUE, weighted_nobs = 500, model_formula = "y ~ x", coefficient_name = "Coefficient", zi_coefficient_name = "Log-Odds", digits = 2, ci_digits = 2, p_digits = 3, class = "data.frame", object_name = "fit.lqmm" ), tolerance = 0.001 ) }) } parameters/tests/testthat/test-glmmTMB-2.R0000644000175000017500000000447714122064334020322 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("glmmTMB")) { data(Salamanders) model <- suppressWarnings(glmmTMB( count ~ spp + mined + spp * mined, ziformula = ~ spp + mined + spp * mined, family = truncated_poisson, data = Salamanders )) mp <- model_parameters(model, effects = "fixed", component = "conditional") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(fixef(model)[[1]]), tolerance = 1e-3) expect_equal(mp$Parameter, names(fixef(model)[[1]])) }) mp <- model_parameters(model, effects = "fixed", component = "all") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(unlist(fixef(model))), tolerance = 1e-3) expect_equal(mp$Parameter, gsub("^(cond\\.|zi\\.)", "", names(unlist(fixef(model))))) expect_equal( mp$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) }) sim1 <- function(nfac = 40, nt = 100, facsd = 0.1, tsd = 0.15, mu = 0, residsd = 1) { dat <- expand.grid(fac = factor(letters[1:nfac]), t = 1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd = facsd)[dat$fac] dat$REt <- rnorm(nt, sd = tsd)[dat$t] dat$x <- rnorm(n, mean = mu, sd = residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu = 100, residsd = 10) d2 <- sim1(mu = 200, residsd = 5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) model <- suppressWarnings(glmmTMB(x ~ sd + (1 | t), dispformula = ~sd, data = dat)) mp <- model_parameters(model, effects = "fixed") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(unlist(fixef(model))), tolerance = 1e-3) expect_equal(mp$Component, c("conditional", "conditional", "dispersion", "dispersion")) }) } parameters/tests/testthat/test-model_parameters.metafor.R0000644000175000017500000000152114122064334023626 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("metafor")) { test <- data.frame( estimate = c(0.111, 0.245, 0.8, 1.1, 0.03), std.error = c(0.05, 0.111, 0.001, 0.2, 0.01) ) mydat <<- test model <- metafor::rma(yi = estimate, sei = std.error, data = mydat) params <- model_parameters(model) test_that("model_parameters.metafor", { expect_equal(params$Parameter, c("Study 1", "Study 2", "Study 3", "Study 4", "Study 5", "Overall")) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "p", "Weight", "Method") ) expect_equal(params$Coefficient, c(0.111, 0.245, 0.8, 1.1, 0.03, 0.43769), tolerance = 1e-3) expect_equal(params$Weight, c(400, 81.16224, 1e+06, 25, 10000, NA), tolerance = 1e-3) }) } parameters/tests/testthat/test-model_parameters.aov_es_ci.R0000644000175000017500000003617014166770670024147 0ustar nileshnileshif (requiet("insight") && requiet("effectsize") && requiet("testthat") && requiet("lme4") && requiet("parameters") && requiet("effectsize") && utils::packageVersion("effectsize") > "0.5.0") { unloadNamespace("afex") unloadNamespace("lmerTest") data(iris) iris$Cat1 <- rep(c("X", "X", "Y"), length.out = nrow(iris)) iris$Cat2 <- rep(c("A", "B"), length.out = nrow(iris)) # aov ---------------------------------- test_that("model_parameters.aov", { skip_if_not_installed("effectsize", minimum_version = "0.5.1") model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE, ci = .9)) es <- suppressMessages(effectsize::omega_squared(model, partial = TRUE, ci = .9)) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0.3122, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), 1, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high" )) model <- aov(Sepal.Length ~ Species * Cat1 * Cat2, data = iris) mp <- model_parameters(model, eta_squared = "raw", ci = .9) es <- effectsize::eta_squared(model, partial = FALSE, ci = .9) expect_equal(na.omit(mp$Eta2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Eta2_CI_low, c(0.5572, 0, 0, 0, 0, 0, 0, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Eta2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Eta2_CI_high), rep(1, 7), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Eta2", "Eta2_CI_low", "Eta2_CI_high" )) }) # anova --------------------- data(mtcars) test_that("model_parameters.anova", { skip_if_not_installed("effectsize", minimum_version = "0.5.1") model <- anova(lm(Sepal.Length ~ Species * Cat1 * Cat2, data = iris)) mp <- model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE, ci = .9) es <- effectsize::omega_squared(model, partial = TRUE, ci = .9) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) data(mtcars) test_that("model_parameters.anova", { skip_if_not_installed("effectsize", minimum_version = "0.5.1") model <- aov(wt ~ cyl + Error(gear), data = mtcars) suppressWarnings({ mp <- model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE, ci = .9) es <- effectsize::omega_squared(model, partial = TRUE, ci = .9, verbose = FALSE) }) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low[2], tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Group", "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) # car anova --------------------------------- if (requiet("car")) { set.seed(123) data(Moore) model <- car::Anova(stats::lm( formula = conformity ~ fcategory * partner.status, data = Moore, contrasts = list(fcategory = contr.sum, partner.status = contr.sum) )) test_that("model_parameters.car-anova", { skip_if_not_installed("effectsize", minimum_version = "0.5.1") mp <- model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE, ci = .9) es <- effectsize::omega_squared(model, partial = TRUE, ci = .9) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0, 0.05110, 0.00666, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), rep(1, 3), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) } # maov ---------------------------------- set.seed(123) fit <- lm(cbind(mpg, disp, hp) ~ factor(cyl), data = mtcars) model <- aov(fit) test_that("model_parameters.maov", { skip_if_not_installed("effectsize", minimum_version = "0.5.1") mp <- suppressMessages(model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE, ci = .9)) es <- suppressMessages(effectsize::omega_squared(model, partial = TRUE, ci = .9)) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0.58067, NA, 0.74092, NA, 0.55331, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), rep(1, 3), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(colnames(mp), c( "Response", "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) # stricter tests --------------------------------------------------------- if (requiet("car") && requiet("gam")) { # aov ------------------------------------------------ test_that("works with aov", { skip_on_cran() skip_if_not_installed("effectsize", minimum_version = "0.5.1") set.seed(123) npk.aov <- aov(yield ~ block + N * P, npk) set.seed(123) df_aov <- as.data.frame(parameters::model_parameters(npk.aov, ci = 0.95, eta_squared = "partial", omega_squared = "raw" )) expect_equal( df_aov, structure( list( Parameter = c("block", "N", "P", "N:P", "Residuals"), Sum_Squares = c(343.29, 189.28, 8.4, 21.28, 314.1), df = c(5, 1, 1, 1, 15), Mean_Square = c(68.66, 189.28, 8.4, 21.28, 20.94), F = c(3.28, 9.04, 0.4, 1.02, NA), p = c(0.03, 0.01, 0.54, 0.33, NA), Omega2 = c(0.27, 0.19, -0.01, 0, NA), Omega2_CI_low = c(0, 0, 0, 0, NA), Omega2_CI_high = c(1, 1, 1, 1, NA), Eta2_partial = c(0.52, 0.38, 0.03, 0.06, NA), Eta2_CI_low = c(0.04258, 0.0733, 0, 0, NA), Eta2_CI_high = c(1, 1, 1, 1, NA) ), row.names = c(NA, 5L), class = "data.frame", ci = 0.95, model_class = c("aov", "lm"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) # aovlist ------------------------------------------------ # test_that("works with aovlist", { # skip_on_cran() # # set.seed(123) # npk.aovE <- aov(yield ~ N * P * K + Error(block), npk) # # set.seed(123) # df_aovE <- # as.data.frame(model_parameters(npk.aovE, # ci = 0.90, # eta_squared = "raw", # omega_squared = "partial" # )) # # expect_equal( # df_aovE, # structure( # list( # Group = c( # "block", # "block", # "Within", # "Within", # "Within", # "Within", # "Within", # "Within", # "Within" # ), # Parameter = c( # "N:P:K", # "Residuals", # "N", # "P", # "K", # "N:P", # "N:K", # "P:K", # "Residuals" # ), # Sum_Squares = c(37, 306.29, 189.28, 8.4, 95.2, 21.28, 33.14, 0.48, 185.29), # df = c(1, 4, 1, 1, 1, 1, 1, 1, 12), # Mean_Square = c(37, 76.57, 189.28, 8.4, 95.2, 21.28, 33.14, 0.48, 15.44), # `F` = c(0.48, NA, 12.26, 0.54, 6.17, 1.38, 2.15, 0.03, NA), # p = c(0.53, NA, 0, 0.47, 0.03, 0.26, 0.17, 0.86, NA), # Omega2_partial = c(-0.09, NA, 0.23, -0.01, 0.12, 0.01, 0.03, -0.03, NA), # Omega2_CI_low = c(0, NA, 0, 0, 0, 0, 0, 0, NA), # Omega2_CI_high = c(0, NA, 0.52, 0, 0.42, 0.22, 0.29, 0, NA), # Eta2 = c(0.04, NA, 0.22, 0.01, 0.11, 0.02, 0.04, 0, NA), # Eta2_CI_low = c(0, NA, 0, 0, 0, 0, 0, 0, NA), # Eta2_CI_high = c(0.49, NA, 0.51, 0.23, 0.41, 0.28, 0.31, 0.04, NA) # ), # row.names = c(NA, 9L), # class = "data.frame", # ci = 0.9, # model_class = c("aovlist", "listof"), # digits = 2, # ci_digits = 2, # p_digits = 3 # ), # tolerance = 0.1, # ignore_attr = TRUE # ) # }) # manova ------------------------------------------------ test_that("works with manova", { skip_on_cran() skip_if_not_installed("effectsize", minimum_version = "0.5.1") set.seed(123) # fake a 2nd response variable npk2 <- within(npk, foo <- rnorm(24)) # model m <- manova(cbind(yield, foo) ~ block + N * P * K, npk2) set.seed(123) df_manova <- as.data.frame(model_parameters(m, ci = 0.99, eta_squared = NULL, omega_squared = "partial", epsilon_squared = "partial" )) expect_equal( df_manova, structure( list( Parameter = c("block", "N", "P", "K", "N:P", "N:K", "P:K", "Residuals"), Pillai = c(0.88, 0.61, 0.07, 0.39, 0.11, 0.17, 0, NA), df = c(5, 1, 1, 1, 1, 1, 1, 12), F = c(1.9, 8.52, 0.39, 3.49, 0.65, 1.16, 0.02, NA), p = c(0.1, 0.01, 0.69, 0.07, 0.54, 0.35, 0.98, NA), Omega2_partial = c(0.2, 0.52, -0.1, 0.26, -0.05, 0.02, -0.16, NA), Omega2_CI_low = c(0, 0, 0, 0, 0, 0, 0, NA), Omega2_CI_high = c(1, 1, 1, 1, 1, 1, 1, NA), Epsilon2_partial = c(0.21, 0.54, -0.1, 0.28, -0.06, 0.02, -0.18, NA), Epsilon2_CI_low = c(0, 0, 0, 0, 0, 0, 0, NA), Epsilon2_CI_high = c(1, 1, 1, 1, 1, 1, 1, NA) ), row.names = c(NA, 8L), class = "data.frame", ci = 0.99, model_class = c("manova", "maov", "aov", "mlm", "lm"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) # Gam ------------------------------------------------ test_that("works with Gam", { skip_on_cran() skip_if_not_installed("effectsize", minimum_version = "0.5.1") # setup set.seed(123) # model set.seed(123) g <- gam::gam( formula = mpg ~ gam::s(hp, 4) + am + qsec, data = mtcars ) set.seed(123) df_Gam <- as.data.frame(model_parameters(g, ci = 0.50, omega_squared = "partial" )) expect_equal( df_Gam, structure( list( Parameter = c("gam::s(hp, 4)", "am", "qsec", "Residuals"), Sum_Squares = c(678.37287, 202.23503, 6.87905, 238.56023), df = c(1, 1, 1, 28), Mean_Square = c(678.37287, 202.23503, 6.87905, 8.52001), `F` = c(79.62115, 23.73648, 0.8074, NA), p = c(0, 4e-05, 0.37655, NA), Omega2_partial = c(0.71072, 0.41538, -0.00606, NA), Omega2_CI_low = c(0.70634, 0.41067, 0, NA), Omega2_CI_high = c(1, 1, 1, NA) ), row.names = c(NA, 4L), class = "data.frame", ci = 0.5, model_class = c("anova", "data.frame"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) # anova ------------------------------------------------ test_that("works with anova", { skip_on_cran() skip_if_not_installed("effectsize", minimum_version = "0.5.1") set.seed(123) mod <- car::Anova(stats::lm( formula = conformity ~ fcategory * partner.status, data = Moore, contrasts = list(fcategory = contr.sum, partner.status = contr.sum) )) set.seed(123) df_car <- as.data.frame(model_parameters(mod, ci = 0.89, eta_squared = "raw", omega_squared = "partial", epsilon_squared = "raw" )) expect_equal( df_car, structure( list( Parameter = c( "fcategory", "partner.status", "fcategory:partner.status", "Residuals" ), Sum_Squares = c(11.61, 212.21, 175.49, 817.76), df = c(2, 1, 2, 39), Mean_Square = c(5.81, 212.21, 87.74, 20.97), F = c(0.28, 10.12, 4.18, NA), p = c(0.76, 0, 0.02, NA), Omega2_partial = c(-0.03, 0.17, 0.12, NA), Omega2_CI_low = c(0, 0.03, 0, NA), Omega2_CI_high = c(1, 1, 1, NA), Eta2 = c(0.01, 0.17, 0.14, NA), Eta2_CI_low = c(0, 0.03, 0, NA), Eta2_CI_high = c(1, 1, 1, NA), Epsilon2 = c(-0.02, 0.16, 0.11, NA), Epsilon2_CI_low = c(0, 0.03, 0, NA), Epsilon2_CI_high = c(1, 1, 1, NA) ), row.names = c(NA, 4L), class = "data.frame", ci = 0.89, model_class = c("anova", "data.frame"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) } } parameters/tests/testthat/test-mlm.R0000644000175000017500000000252014122064334017434 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && getRversion() >= "3.6.0") { set.seed(123) mod <- lm(formula = cbind(mpg, disp) ~ wt, data = mtcars) mp <- model_parameters(mod) test_that("model_parameters,mlm", { expect_equal( mp$Coefficient, c(37.28513, -5.34447, -131.14842, 112.47814), tolerance = 1e-3 ) expect_equal( colnames(mp), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Response" ) ) expect_equal(mp$Response, c("mpg", "mpg", "disp", "disp")) expect_equal(mp$Parameter, c("(Intercept)", "wt", "(Intercept)", "wt")) }) model <- lm(cbind(mpg, hp) ~ cyl * disp, mtcars) mp <- model_parameters(model) test_that("model_parameters,mlm", { expect_equal( mp$Coefficient, c(49.03721, -3.40524, -0.14553, 0.01585, 23.55, 17.43527, -0.36762, 0.06174), tolerance = 1e-3 ) expect_equal( colnames(mp), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Response" ) ) expect_equal(mp$Response, c("mpg", "mpg", "mpg", "mpg", "hp", "hp", "hp", "hp")) expect_equal(mp$Parameter, c("(Intercept)", "cyl", "disp", "cyl:disp", "(Intercept)", "cyl", "disp", "cyl:disp")) }) } parameters/tests/testthat/test-pca.R0000644000175000017500000000345714122064334017424 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("psych") && requiet("nFactors")) { test_that("principal_components", { x <- parameters::principal_components(mtcars[, 1:7], rotation = "varimax") expect_equal( x$RC1, c( -0.836114674884308, 0.766808147590597, 0.85441780762136, 0.548502661888057, -0.889046093964722, 0.931879020871552, -0.030485507571411 ), tolerance = 0.01 ) expect_equal( colnames(x), c("Variable", "RC1", "RC2", "Complexity", "Uniqueness", "MSA") ) }) test_that("principal_components", { x <- parameters::principal_components(mtcars[, 1:7]) expect_equal( x$PC1, c( -0.930866058535747, 0.9578708009312, 0.952846253483008, 0.874493647245971, -0.746868056938478, 0.882509152331738, -0.541093678419456 ), tolerance = 0.01 ) expect_equal( colnames(x), c("Variable", "PC1", "PC2", "Complexity") ) }) # predict ---------------------- # N.B tests will fail if `GPArotation` package is not installed data(bfi) d <- na.omit(bfi[, 1:25]) model <- psych::fa(d, nfactors = 5) mp <- model_parameters(model, sort = TRUE, threshold = "max") test_that("predict model_parameters fa", { pr <- suppressWarnings(predict(mp, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness"))) out <- head(pr, 5) expect_equal( out$Neuroticism, c(-0.22242, 0.1618, 0.61907, -0.11692, -0.17372), tolerance = 0.01 ) expect_equal( out$Opennness, c(-1.6092, -0.17222, 0.23341, -1.06152, -0.66086), tolerance = 0.01 ) }) } parameters/tests/testthat/test-model_parameters.maov.R0000644000175000017500000000105714122064334023137 0ustar nileshnileshif (requiet("insight") && requiet("testthat") && requiet("parameters")) { fit <- lm(cbind(mpg, disp, hp) ~ factor(cyl), data = mtcars) m <- aov(fit) mp <- model_parameters(m) test_that("model_parameters.maov", { expect_equal( mp$Sum_Squares, as.vector(do.call(c, lapply(summary(m), function(i) as.data.frame(i)$`Sum Sq`))), tolerance = 1e-3 ) expect_equal( mp[["F"]], as.vector(do.call(c, lapply(summary(m), function(i) as.data.frame(i)[["F value"]]))), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.afex_aov.R0000644000175000017500000000204314122064334023761 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("afex")) { data(obk.long, package = "afex") m_between <- suppressWarnings(aov_car(value ~ treatment * gender + Error(id), data = obk.long)) m_within <- suppressWarnings(aov_car(value ~ Error(id / (phase * hour)), data = obk.long)) mp1 <- model_parameters(m_between) mp2 <- model_parameters(m_within) test_that("afex_aov", { expect_equal(c(nrow(mp1), ncol(mp1)), c(5, 7)) expect_equal(mp1$Sum_Squares, c(450.62069, 11.98202, 5.56322, 8.68275, 15.2037), tolerance = 1e-3) expect_equal(c(nrow(mp2), ncol(mp2)), c(3, 9)) expect_equal(mp2$Sum_Squares, c(167.5, 106.29167, 11.08333), tolerance = 1e-3) expect_equal( colnames(mp1), c("Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Method") ) expect_equal( colnames(mp2), c("Parameter", "Sum_Squares", "Sum_Squares_Error", "df", "df_error", "Mean_Square", "F", "p", "Method") ) }) unloadNamespace("afex") unloadNamespace("lmerTest") } parameters/tests/testthat/test-model_parameters.glht.R0000644000175000017500000000143014122064334023126 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("multcomp")) { set.seed(123) lmod <- lm(Fertility ~ ., data = swiss) model <- glht( model = lmod, linfct = c( "Agriculture = 0", "Examination = 0", "Education = 0", "Catholic = 0", "Infant.Mortality = 0" ) ) test_that("model_parameters.glht", { params <- model_parameters(model) expect_equal(params$Coefficient, c(-0.1721, -0.258, -0.8709, 0.1041, 1.077), tolerance = 1e-2) expect_equal(params$SE, c(0.0703, 0.2539, 0.183, 0.0353, 0.3817), tolerance = 1e-2) expect_equal( params$Parameter, c("Agriculture == 0", "Examination == 0", "Education == 0", "Catholic == 0", "Infant.Mortality == 0") ) }) } parameters/tests/testthat/test-quantreg.R0000644000175000017500000000516314131565447020516 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("tripack") && requiet("insight") && requiet("quantreg")) { # rqss --------- data("CobarOre") set.seed(123) CobarOre$w <- rnorm(nrow(CobarOre)) m1 <- rqss(z ~ w + qss(cbind(x, y), lambda = .08), data = CobarOre) mp <- suppressWarnings(model_parameters(m1)) test_that("mp_rqss", { expect_identical(mp$Parameter, c("(Intercept)", "w", "cbind(x, y)")) expect_equal(mp$Coefficient, c(17.63057, 1.12506, NA), tolerance = 1e-3) expect_equal(mp$df_error, c(15, 15, NA), tolerance = 1e-3) expect_equal(mp[["df"]], c(NA, NA, 70), tolerance = 1e-3) }) # rq --------- data(stackloss) m1 <- rq(stack.loss ~ Air.Flow + Water.Temp, data = stackloss, tau = .25) mp <- suppressWarnings(model_parameters(m1)) test_that("mp_rq", { expect_identical(mp$Parameter, c("(Intercept)", "Air.Flow", "Water.Temp")) expect_equal(mp$Coefficient, c(-36, 0.5, 1), tolerance = 1e-3) }) # rqs --------- set.seed(123) data("engel") m1 <- rq(foodexp ~ income, data = engel, tau = 1:9 / 10) mp <- suppressWarnings(model_parameters(m1)) test_that("mp_rqs", { expect_identical(mp$Parameter, c( "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income" )) expect_equal(mp$Coefficient, c( 110.14157, 0.40177, 102.31388, 0.4469, 99.11058, 0.48124, 101.95988, 0.5099, 81.48225, 0.56018, 79.70227, 0.58585, 79.28362, 0.60885, 58.00666, 0.65951, 67.35087, 0.6863 ), tolerance = 1e-3) expect_equal(mp$SE, c( 29.39768, 0.04024, 21.42836, 0.02997, 22.18115, 0.02987, 22.06032, 0.02936, 19.25066, 0.02828, 17.61762, 0.02506, 14.25039, 0.02176, 19.21719, 0.02635, 22.39538, 0.02849 ), tolerance = 1e-3) }) # crq --------- set.seed(123) n <- 200 x <- rnorm(n) y <- 5 + x + rnorm(n) c <- 4 + x + rnorm(n) d <- (y > c) dat <- data.frame(y, x, c, d) m1 <- crq(survival::Surv(pmax(y, c), d, type = "left") ~ x, method = "Portnoy", data = dat) mp <- model_parameters(m1) test_that("mp_rq", { expect_identical(mp$Parameter, c("(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x")) expect_equal(mp$Coefficient, c(4.26724, 0.97534, 4.84961, 0.92638, 5.21843, 0.98038, 5.91301, 0.97382), tolerance = 1e-3) }) } parameters/tests/testthat/test-p_value.R0000644000175000017500000000711614131531730020307 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("parameters") && requiet("lme4") && requiet("insight")) { data(mtcars) test_that("p_value", { # h-tests model <- insight::download_model("htest_1") expect_equal(p_value(model), 0.04136799, tolerance = 0.01) model <- insight::download_model("htest_2") expect_equal(p_value(model), 0.1518983, tolerance = 0.01) model <- insight::download_model("htest_3") expect_equal(p_value(model), 0.182921, tolerance = 0.01) model <- insight::download_model("htest_4") expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_5") expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_6") expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_7") expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_8") expect_equal(p_value(model), 0, tolerance = 0.01) # ANOVAs model <- insight::download_model("aov_1") expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("anova_1") expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("aovlist_1") expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("aov_2") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_2") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aovlist_2") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aov_3") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_3") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aovlist_3") expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_4") expect_equal(p_value(model)$p[2], 0, tolerance = 0.01) # ANOVA lmer model <- insight::download_model("anova_lmerMod_0") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_1") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_2") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_3") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_4") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_5") expect_equal(p_value(model), NA) model <- insight::download_model("anova_lmerMod_6") expect_equal(p_value(model)$p[2], 0, tolerance = 0.01) # Mixed models model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(p_value(model)$p[1], 0.206219, tolerance = 0.01) expect_equal(p_value(model, method = "normal")$p[1], 0.1956467, tolerance = 0.01) expect_equal(p_value(model, method = "kr")$p[1], 0.319398, tolerance = 0.01) model <- insight::download_model("merMod_1") expect_equal(p_value(model)$p[1], 0.06578, tolerance = 0.01) model <- insight::download_model("merMod_2") expect_equal(p_value(model)$p[1], 0.29912, tolerance = 0.01) }) } } parameters/tests/testthat/test-glmer.R0000644000175000017500000000717414131531727017774 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("lme4")) { data("cbpp") set.seed(123) model <- glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial(), nAGQ = 0 ) params <- model_parameters(model, effects = "fixed") test_that("model_parameters.glmer", { expect_equal(params$SE, c(0.22758, 0.30329, 0.32351, 0.42445), tolerance = 1e-2) }) test_that("print model_parameters", { out <- utils::capture.output(print(params)) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "---------------------------------------------------------------", "(Intercept) | -1.36 | 0.23 | [-1.81, -0.91] | -5.98 | < .001", "period [2] | -0.98 | 0.30 | [-1.57, -0.38] | -3.22 | 0.001 ", "period [3] | -1.11 | 0.32 | [-1.75, -0.48] | -3.43 | < .001", "period [4] | -1.56 | 0.42 | [-2.39, -0.73] | -3.67 | < .001" ) ) mp <- model_parameters(model, effects = "all", exponentiate = TRUE) out <- utils::capture.output(print(mp)) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Odds Ratio | SE | 95% CI | z | p", "---------------------------------------------------------------", "(Intercept) | 0.26 | 0.06 | [0.16, 0.40] | -5.98 | < .001", "period [2] | 0.38 | 0.11 | [0.21, 0.68] | -3.22 | 0.001 ", "period [3] | 0.33 | 0.11 | [0.17, 0.62] | -3.43 | < .001", "period [4] | 0.21 | 0.09 | [0.09, 0.48] | -3.67 | < .001", "", "# Random Effects", "", "Parameter | Coefficient", "----------------------------------", "SD (Intercept: herd) | 0.64", "SD (Residual) | 1.00" ) ) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, ci_method = "ml1", effects = "fixed") expect_equal(params$SE, c(0.22758, 0.30329, 0.32351, 0.42445), tolerance = 1e-2) expect_equal(params$df, c(54, 54, 54, 54), tolerance = 1e-2) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, ci_method = "betwithin", effects = "fixed") expect_equal(params$SE, c(0.23009, 0.30433, 0.32476, 0.42632), tolerance = 1e-2) expect_equal(params$df, c(822, 822, 822, 822), tolerance = 1e-2) }) set.seed(123) cbpp$time <- runif(nrow(cbpp), 1, 4) model <- glmer( cbind(incidence, size - incidence) ~ period + time + (1 + time | herd), data = cbpp, family = binomial(), nAGQ = 0 ) test_that("model_parameters.glmer", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, ci_method = "ml1", effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) expect_equal(params$df, c(53, 53, 53, 53, 53), tolerance = 1e-2) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, ci_method = "betwithin", effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) expect_equal(params$df, c(821, 821, 821, 821, 9), tolerance = 1e-2) }) } parameters/tests/testthat/test-model_parameters.lme.R0000644000175000017500000000203314122064334022745 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("nlme") && requiet("lme4")) { data("sleepstudy") model <- lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) test_that("model_parameters.lme", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(6.8245, 1.5458), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) }) test_that("model_parameters.lme", { params <- model_parameters(model, effects = "all") expect_equal(params$Coefficient, c(251.4051, 10.46729, 24.74024, 5.9221, 0.066, 25.59184), tolerance = 1e-3) expect_equal(params$SE, c(6.82452, 1.54578, NA, NA, NA, NA), tolerance = 1e-3) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects", "Group" ) ) }) } parameters/tests/testthat/test-model_parameters_robust.R0000644000175000017500000000425614122064334023600 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("sandwich") && requiet("clubSandwich") && requiet("effectsize")) { data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am + cyl + gear, data = mtcars) test_that("model_parameters, robust", { params <- model_parameters(model, robust = TRUE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.0259, 0.01478, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) test_that("model_parameters, robust CL", { params <- model_parameters(model, robust = TRUE, vcov_estimation = "CL", vcov_type = "HC1") robust_se <- unname(sqrt(diag(sandwich::vcovCL(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.00695, 0.00322, 0.00435, 0.94471, 0.00176), tolerance = 1e-3) }) model2 <- lm(mpg ~ wt * am + cyl + gear, data = effectsize::standardize(mtcars)) test_that("model_parameters, robust", { params <- model_parameters(model, standardize = "refit", robust = TRUE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model2)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.28624, 0.0259, 0.43611, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) # cluster-robust standard errors, using clubSandwich data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) test_that("model_parameters, robust CR", { params <- model_parameters(model, robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$cluster)) robust_se <- unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.01246, 0.04172, 0.18895, 0.57496, 0, 0), tolerance = 1e-3) }) test_that("model_parameters, normal", { params <- model_parameters(model) expect_equal(params$p, c(0.13267, 0.21557, 0.36757, 0.77012, 3e-05, 0), tolerance = 1e-3) }) } parameters/tests/testthat/test-model_parameters.cpglmm.R0000644000175000017500000000102314122064334023445 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("cplm")) { data("FineRoot") model <- cpglmm(RLD ~ Stock + Spacing + (1 | Plant), data = FineRoot) test_that("model_parameters.cpglmm", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(0.1308, 0.2514, 0.2, 0.1921), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) }) } parameters/tests/testthat/test-emmGrid-df_colname.R0000644000175000017500000000377014141263005022325 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("emmeans") && requiet("lme4")) { data(sleep) data(fiber) m <- lm(strength ~ diameter + machine, data = fiber) emm <- emmeans(m, "machine") es1 <- eff_size(emm, sigma = sigma(m), edf = df.residual(m)) sleep$group <- as.factor(sleep$group) m2 <- lme4::lmer(extra ~ group + (1 | ID), sleep) emm2 <- emmeans(m2, ~group, df = NA) es2 <- eff_size(emm2, sigma = sigma(m2), edf = df.residual(m2)) test_that("df", { expect_equal( colnames(model_parameters(es1)), c( "contrast", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p" ) ) expect_equal( colnames(model_parameters(es2)), c( "contrast", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p" ) ) }) test_that("print model_parameters", { mp <- model_parameters(emm) out <- capture.output(print(mp)) expect_equal( out, c( "machine | Marginal Means | SE | 95% CI | t(11) | p", "-----------------------------------------------------------------", "A | 40.38 | 0.72 | [38.79, 41.98] | 55.81 | < .001", "B | 41.42 | 0.74 | [39.78, 43.06] | 55.64 | < .001", "C | 38.80 | 0.79 | [37.06, 40.53] | 49.24 | < .001" ) ) mp <- model_parameters(es1) out <- capture.output(print(mp)) expect_equal( out, c( "contrast | Coefficient | SE | 95% CI | t(11) | p", "-------------------------------------------------------------", "A - B | -0.65 | 0.65 | [-2.08, 0.78] | -1.00 | 0.339", "A - C | 0.99 | 0.73 | [-0.60, 2.59] | 1.37 | 0.198", "B - C | 1.64 | 0.80 | [-0.12, 3.40] | 2.05 | 0.065" ) ) }) } parameters/tests/testthat/test-model_parameters_mixed_coeforder.R0000644000175000017500000000111414122064334025406 0ustar nileshnileshif (requiet("lme4") && requiet("testthat") && requiet("parameters")) { set.seed(1) dat <- data.frame( TST.diff = runif(100, 0, 100), Exposition = as.factor(sample(0:2, 100, TRUE)), Gruppe = as.factor(sample(0:1, 100, TRUE)), Kennung = as.factor(sample(1:5, 100, TRUE)) ) m <- lme4::lmer(TST.diff ~ Exposition + Gruppe + Gruppe:Exposition + (1 | Kennung), data = dat) test_that("model_parameters.mixed.coeforder", { cs <- coef(summary(m)) mp <- model_parameters(m, effects = "fixed") expect_equal(mp$Parameter, rownames(cs)) }) } parameters/tests/testthat/test-glmmTMB.R0000644000175000017500000006551614165246770020201 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("glmmTMB")) { data("fish") data("Salamanders") win_os <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Windows" || grepl("^mingw", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) m1 <- suppressWarnings(glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = truncated_poisson() )) m2 <- suppressWarnings(glmmTMB( count ~ child + camper + (1 | persons), data = fish, family = poisson() )) m3 <- suppressWarnings(glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~ spp + mined, family = nbinom2, data = Salamanders )) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.33067, -1.32402, 0.55037, -1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.33067, -1.32402, 0.55037), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3 ) expect_equal( ci(m2, component = "cond")$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3 ) expect_null(ci(m2, component = "zi")) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.47559, 0.09305, 0.09346, 0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.47559, 0.09305, 0.09346), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3 ) expect_equal( standard_error(m2, component = "cond")$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3 ) expect_null(standard_error(m2, component = "zi")) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00792, 0, 0, 0.55054, 0, 0.00181), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "cond")$p, c(0.00792, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "zi")$p, c(0.55054, 0, 0.00181), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.23497, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m2, component = "cond")$p, c(0.23497, 0, 0), tolerance = 1e-3 ) expect_null(p_value(m2, component = "zi")) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823), tolerance = 1e-3 ) expect_equal( model_parameters(m1, effects = "all")$Coefficient, c( 1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823, 0.9312, 1, 1.17399 ), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(0.73785, -1.69166, 0.93516), tolerance = 1e-3 ) expect_equal( model_parameters(m3, effects = "fixed")$Coefficient, c( -0.6104, -0.9637, 0.1707, -0.3871, 0.4879, 0.5895, -0.1133, 1.4294, 0.91, 1.1614, -0.9393, 1.0424, -0.5623, -0.893, -2.5398, -2.563, 0.4132 ), tolerance = 1e-2 ) expect_equal( model_parameters(m1)$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "zero_inflated" ) ) expect_null(model_parameters(m2, effects = "fixed")$Component) expect_equal( model_parameters(m2)$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional" ) ) expect_equal( model_parameters(m3, effects = "fixed")$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "dispersion" ) ) expect_equal( model_parameters(m3, effects = "fixed")$SE, c( 0.4052, 0.6436, 0.2353, 0.3424, 0.2383, 0.2278, 0.2439, 0.3666, 0.6279, 1.3346, 0.8005, 0.714, 0.7263, 0.7535, 2.1817, 0.6045, NA ), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-random", { params <- model_parameters(m1, effects = "random", group_level = TRUE) expect_equal(c(nrow(params), ncol(params)), c(8, 10)) expect_equal( colnames(params), c( "Parameter", "Level", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Component", "Effects", "Group" ) ) expect_equal( as.vector(params$Parameter), c( "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(-1.24, -0.3456, 0.3617, 1.2553, 1.5719, 0.3013, -0.3176, -1.5665), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(m1, effects = "random") expect_equal(c(nrow(params), ncol(params)), c(3, 9)) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component") ) expect_equal( params$Parameter, c("SD (Intercept)", "SD (Observations)", "SD (Intercept)") ) expect_equal( params$Component, c("conditional", "conditional", "zero_inflated") ) expect_equal( params$Coefficient, c(0.9312, 1, 1.17399), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-all_pars", { params <- model_parameters(m1, effects = "all") expect_equal(c(nrow(params), ncol(params)), c(9, 12)) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group", "Component" ) ) expect_equal( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "SD (Intercept)", "SD (Observations)", "SD (Intercept)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "zero_inflated" ) ) expect_equal( params$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823, 0.9312, 1, 1.17399), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-all", { params <- model_parameters(m1, effects = "all", group_level = TRUE) expect_equal(c(nrow(params), ncol(params)), c(14, 13)) expect_equal( colnames(params), c( "Parameter", "Level", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Component", "Effects", "Group" ) ) expect_equal( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c( 1.2628, -1.1417, 0.7335, -0.3894, 2.0541, -1.0082, -1.24, -0.3456, 0.3617, 1.2553, 1.5719, 0.3013, -0.3176, -1.5665 ), tolerance = 1e-2 ) }) m4 <- suppressWarnings(glmmTMB( count ~ child + camper + (1 + xb | persons), ziformula = ~ child + camper + (1 + zg | persons), data = fish, family = truncated_poisson() )) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(m4, effects = "random") expect_equal(c(nrow(params), ncol(params)), c(7, 9)) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component") ) expect_equal( params$Parameter, c( "SD (Intercept)", "SD (xb)", "Cor (Intercept~xb: persons)", "SD (Observations)", "SD (Intercept)", "SD (zg)", "Cor (Intercept~zg: persons)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(3.40563, 1.21316, -1, 1, 2.73583, 1.56833, 1), tolerance = 1e-2 ) }) # proper printing --------------------- if (win_os) { test_that("print-model_parameters glmmTMB", { mp <- model_parameters(m4, effects = "fixed", component = "conditional") out <- utils::capture.output(print(mp)) expect_equal( out[-5], c( "# Fixed Effects", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "----------------------------------------------------------------", "child | -1.09 | 0.10 | [-1.28, -0.90] | -11.09 | < .001", "camper [1] | 0.27 | 0.10 | [ 0.07, 0.47] | 2.70 | 0.007 " ) ) mp <- model_parameters(m4, effects = "random", component = "conditional") out <- utils::capture.output(print(mp)) expect_equal( out, c( "# Random Effects", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 3.41 | [ 1.67, 6.93]", "SD (xb: persons) | 1.21 | [ 0.60, 2.44]", "Cor (Intercept~xb: persons) | -1.00 | [-1.00, 1.00]", "SD (Residual) | 1.00 | " ) ) mp <- model_parameters(m4, effects = "fixed", component = "zero_inflated") out <- utils::capture.output(print(mp)) expect_equal( out[-6], c( "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "-------------------------------------------------------------", "(Intercept) | 1.89 | 0.66 | [ 0.59, 3.19] | 2.84 | 0.004", "camper [1] | -0.17 | 0.39 | [-0.93, 0.59] | -0.44 | 0.663" ) ) mp <- model_parameters(m4, effects = "random", component = "zero_inflated") out <- utils::capture.output(print(mp)) expect_equal( out, c( "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 2.74 | [ 1.16, 6.43]", "SD (zg: persons) | 1.57 | [ 0.64, 3.82]", "Cor (Intercept~zg: persons) | 1.00 | [-1.00, 1.00]" ) ) mp <- model_parameters(m4, effects = "all", component = "conditional") out <- utils::capture.output(print(mp)) expect_equal( out[-5], c( "# Fixed Effects", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "----------------------------------------------------------------", "child | -1.09 | 0.10 | [-1.28, -0.90] | -11.09 | < .001", "camper [1] | 0.27 | 0.10 | [ 0.07, 0.47] | 2.70 | 0.007 ", "", "# Random Effects", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 3.41 | [ 1.67, 6.93]", "SD (xb: persons) | 1.21 | [ 0.60, 2.44]", "Cor (Intercept~xb: persons) | -1.00 | [-1.00, 1.00]", "SD (Residual) | 1.00 | " ) ) mp <- model_parameters(m4, effects = "all", component = "zero_inflated") out <- utils::capture.output(print(mp)) expect_equal( out[-6], c( "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "-------------------------------------------------------------", "(Intercept) | 1.89 | 0.66 | [ 0.59, 3.19] | 2.84 | 0.004", "camper [1] | -0.17 | 0.39 | [-0.93, 0.59] | -0.44 | 0.663", "", "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 2.74 | [ 1.16, 6.43]", "SD (zg: persons) | 1.57 | [ 0.64, 3.82]", "Cor (Intercept~zg: persons) | 1.00 | [-1.00, 1.00]" ) ) mp <- model_parameters(m4, effects = "all", component = "all") out <- utils::capture.output(print(mp)) expect_equal( out[-c(5, 14)], c( "# Fixed Effects (Count Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "----------------------------------------------------------------", "child | -1.09 | 0.10 | [-1.28, -0.90] | -11.09 | < .001", "camper [1] | 0.27 | 0.10 | [ 0.07, 0.47] | 2.70 | 0.007 ", "", "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "-------------------------------------------------------------", "(Intercept) | 1.89 | 0.66 | [ 0.59, 3.19] | 2.84 | 0.004", "camper [1] | -0.17 | 0.39 | [-0.93, 0.59] | -0.44 | 0.663", "", "# Random Effects Variances", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 3.41 | [ 1.67, 6.93]", "SD (xb: persons) | 1.21 | [ 0.60, 2.44]", "Cor (Intercept~xb: persons) | -1.00 | [-1.00, 1.00]", "SD (Residual) | 1.00 | ", "", "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------", "SD (Intercept: persons) | 2.74 | [ 1.16, 6.43]", "SD (zg: persons) | 1.57 | [ 0.64, 3.82]", "Cor (Intercept~zg: persons) | 1.00 | [-1.00, 1.00]" ) ) }) # proper printing of digits --------------------- test_that("print-model_parameters glmmTMB digits", { mp <- model_parameters(m4, effects = "all", component = "all") out <- utils::capture.output(print(mp, digits = 4, ci_digits = 5)) expect_equal( out[-c(5, 14)], c( "# Fixed Effects (Count Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "--------------------------------------------------------------------------", "child | -1.0875 | 0.0981 | [-1.27966, -0.89529] | -11.0903 | < .001", "camper [1] | 0.2723 | 0.1009 | [ 0.07460, 0.46999] | 2.6996 | 0.007 ", "", "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "-----------------------------------------------------------------------", "(Intercept) | 1.8896 | 0.6646 | [ 0.58714, 3.19213] | 2.8435 | 0.004", "camper [1] | -0.1701 | 0.3898 | [-0.93409, 0.59395] | -0.4363 | 0.663", "", "# Random Effects Variances", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------------", "SD (Intercept: persons) | 3.4056 | [ 1.67398, 6.92858]", "SD (xb: persons) | 1.2132 | [ 0.60210, 2.44440]", "Cor (Intercept~xb: persons) | -1.0000 | [-1.00000, 1.00000]", "SD (Residual) | 1.0000 | ", "", "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------------", "SD (Intercept: persons) | 2.7358 | [ 1.16473, 6.42622]", "SD (zg: persons) | 1.5683 | [ 0.64351, 3.82225]", "Cor (Intercept~zg: persons) | 1.0000 | [-1.00000, 1.00000]" ) ) mp <- model_parameters(m4, effects = "all", component = "all", digits = 4, ci_digits = 5) out <- utils::capture.output(print(mp)) expect_equal( out[-c(5, 14)], c( "# Fixed Effects (Count Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "--------------------------------------------------------------------------", "child | -1.0875 | 0.0981 | [-1.27966, -0.89529] | -11.0903 | < .001", "camper [1] | 0.2723 | 0.1009 | [ 0.07460, 0.46999] | 2.6996 | 0.007 ", "", "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "-----------------------------------------------------------------------", "(Intercept) | 1.8896 | 0.6646 | [ 0.58714, 3.19213] | 2.8435 | 0.004", "camper [1] | -0.1701 | 0.3898 | [-0.93409, 0.59395] | -0.4363 | 0.663", "", "# Random Effects Variances", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------------", "SD (Intercept: persons) | 3.4056 | [ 1.67398, 6.92858]", "SD (xb: persons) | 1.2132 | [ 0.60210, 2.44440]", "Cor (Intercept~xb: persons) | -1.0000 | [-1.00000, 1.00000]", "SD (Residual) | 1.0000 | ", "", "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "---------------------------------------------------------------", "SD (Intercept: persons) | 2.7358 | [ 1.16473, 6.42622]", "SD (zg: persons) | 1.5683 | [ 0.64351, 3.82225]", "Cor (Intercept~zg: persons) | 1.0000 | [-1.00000, 1.00000]" ) ) }) # proper alignment of CIs --------------------- model_pr <- tryCatch( { load(url("https://github.com/d-morrison/parameters/raw/glmmTMB/data/pressure_durations.RData")) glmmTMB( formula = n_samples ~ Surface + Side + Jaw + (1 | Participant / Session), ziformula = ~ Surface + Side + Jaw + (1 | Participant / Session), dispformula = ~ 1, family = nbinom2(), data = pressure_durations ) }, error = function(e) { NULL } ) if (!is.null(model_pr)) { test_that("print-model_parameters glmmTMB CI alignment", { mp <- model_parameters(model_pr, effects = "random", component = "all") out <- utils::capture.output(print(mp)) expect_equal( out, c("# Random Effects: conditional", "", "Parameter | Coefficient | 95% CI", "----------------------------------------------------------------", "SD (Intercept: Session:Participant) | 0.27 | [0.08, 0.87]", "SD (Intercept: Participant) | 0.38 | [0.16, 0.92]", "SD (Residual) | 2.06 | [1.30, 3.27]", "", "# Random Effects: zero_inflated", "", "Parameter | Coefficient | 95% CI", "----------------------------------------------------------------", "SD (Intercept: Session:Participant) | 0.69 | [0.40, 1.19]", "SD (Intercept: Participant) | 2.39 | [1.25, 4.57]" ) ) }) } } test_that("model_parameters.mixed-all", { params <- model_parameters(m4, effects = "all") expect_equal(c(nrow(params), ncol(params)), c(13, 12)) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group", "Component" ) ) expect_equal( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "SD (Intercept)", "SD (xb)", "Cor (Intercept~xb: persons)", "SD (Observations)", "SD (Intercept)", "SD (zg)", "Cor (Intercept~zg: persons)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c( 2.54713, -1.08747, 0.2723, 1.88964, 0.15712, -0.17007, 3.40563, 1.21316, -1, 1, 2.73583, 1.56833, 1 ), tolerance = 1e-2 ) }) test_that("print-model_parameters", { out <- utils::capture.output(print(model_parameters(m1, effects = "fixed"))) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "----------------------------------------------------------------", "(Intercept) | 1.26 | 0.48 | [ 0.33, 2.19] | 2.66 | 0.008 ", "child | -1.14 | 0.09 | [-1.32, -0.96] | -12.27 | < .001", "camper [1] | 0.73 | 0.09 | [ 0.55, 0.92] | 7.85 | < .001", "", "# Zero-Inflated", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "---------------------------------------------------------------", "(Intercept) | -0.39 | 0.65 | [-1.67, 0.89] | -0.60 | 0.551 ", "child | 2.05 | 0.31 | [ 1.45, 2.66] | 6.63 | < .001", "camper [1] | -1.01 | 0.32 | [-1.64, -0.37] | -3.12 | 0.002 " ) ) out <- utils::capture.output(print(model_parameters(m1, effects = "fixed", exponentiate = TRUE))) expect_equal( out, c( "# Fixed Effects", "", "Parameter | IRR | SE | 95% CI | z | p", "----------------------------------------------------------", "(Intercept) | 3.54 | 1.68 | [1.39, 8.98] | 2.66 | 0.008 ", "child | 0.32 | 0.03 | [0.27, 0.38] | -12.27 | < .001", "camper [1] | 2.08 | 0.19 | [1.73, 2.50] | 7.85 | < .001", "", "# Zero-Inflated", "", "Parameter | Odds Ratio | SE | 95% CI | z | p", "----------------------------------------------------------------", "(Intercept) | 0.68 | 0.44 | [0.19, 2.43] | -0.60 | 0.551 ", "child | 7.80 | 2.42 | [4.25, 14.32] | 6.63 | < .001", "camper [1] | 0.36 | 0.12 | [0.19, 0.69] | -3.12 | 0.002 " ) ) out <- utils::capture.output(print(model_parameters(m1, effects = "all"))) expect_equal( out, c( "# Fixed Effects (Count Model)", "", "Parameter | Log-Mean | SE | 95% CI | z | p", "----------------------------------------------------------------", "(Intercept) | 1.26 | 0.48 | [ 0.33, 2.19] | 2.66 | 0.008 ", "child | -1.14 | 0.09 | [-1.32, -0.96] | -12.27 | < .001", "camper [1] | 0.73 | 0.09 | [ 0.55, 0.92] | 7.85 | < .001", "", "# Fixed Effects (Zero-Inflated Model)", "", "Parameter | Log-Odds | SE | 95% CI | z | p", "---------------------------------------------------------------", "(Intercept) | -0.39 | 0.65 | [-1.67, 0.89] | -0.60 | 0.551 ", "child | 2.05 | 0.31 | [ 1.45, 2.66] | 6.63 | < .001", "camper [1] | -1.01 | 0.32 | [-1.64, -0.37] | -3.12 | 0.002 ", "", "# Random Effects Variances", "", "Parameter | Coefficient | 95% CI", "----------------------------------------------------", "SD (Intercept: persons) | 0.93 | [0.46, 1.89]", "SD (Residual) | 1.00 | ", "", "# Random Effects (Zero-Inflated Model)", "", "Parameter | Coefficient | 95% CI", "----------------------------------------------------", "SD (Intercept: persons) | 1.17 | [0.54, 2.57]" ) ) }) } parameters/tests/testthat/helper-requiet.R0000644000175000017500000000022014131014354020615 0ustar nileshnileshrequiet <- function(package) { suppressPackageStartupMessages( require(package, warn.conflicts = FALSE, character.only = TRUE) ) } parameters/tests/testthat/test-betareg.R0000644000175000017500000000327314122064334020266 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("betareg")) { data("GasolineYield") data("FoodExpenditure") m1 <- betareg(yield ~ batch + temp, data = GasolineYield) m2 <- betareg(I(food / income) ~ income + persons, data = FoodExpenditure) test_that("ci", { expect_equal( ci(m1)$CI_low, as.vector(confint(m1)[, 1]), tolerance = 1e-4 ) expect_equal( ci(m2)$CI_low, as.vector(confint(m2)[, 1]), tolerance = 1e-4 ) }) test_that("se", { s <- summary(m1) expect_equal( standard_error(m1)$SE, as.vector(c(s$coefficients$mean[, 2], s$coefficients$precision[, 2])), tolerance = 1e-4 ) s <- summary(m2) expect_equal( standard_error(m2)$SE, as.vector(c(s$coefficients$mean[, 2], s$coefficients$precision[, 2])), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0, 0, 0, 0, 1e-05, 0.00114, 0, 6e-05), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.00542, 5e-05, 8e-04, 1e-05), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, as.vector(coef(m1))[1:11], tolerance = 1e-4 ) expect_equal( model_parameters(m1, component = "all")$Coefficient, as.vector(coef(m1)), tolerance = 1e-4 ) expect_equal(model_parameters(m2)$Coefficient, c(-0.62255, -0.0123, 0.11846), tolerance = 1e-4) expect_equal(model_parameters(m2, component = "all")$Coefficient, c(-0.62255, -0.0123, 0.11846, 35.60975033), tolerance = 1e-4) }) } parameters/tests/testthat/test-model_parameters.metaBMA.R0000644000175000017500000000741314122064334023445 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("metaBMA") && getRversion() >= "3.6.0") { data(towels) # default set.seed(1234) m <- suppressWarnings(meta_random( logOR, SE, study, data = towels, ci = 0.95, iter = 100, logml_iter = 200 )) test_that("model_parameters.meta_random", { params <- model_parameters(m) expect_equal(params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall", "tau" )) expect_equal( params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.2004, 0.12107), tolerance = 1e-3 ) expect_equal( params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, 0.00824, 0.01884), tolerance = 1e-3 ) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) set.seed(1234) m2 <- meta_fixed( logOR, SE, study, data = towels, ci = 0.95 ) test_that("model_parameters.meta_fixed", { params <- model_parameters(m2) expect_equal(params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall" )) expect_equal(params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.22141), tolerance = 1e-3 ) expect_equal(params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, 0.06638), tolerance = 1e-3 ) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) set.seed(1234) m3 <- suppressWarnings(meta_random( logOR, SE, study, data = towels, ci = 0.99, iter = 100, logml_iter = 200 )) test_that("model_parameters.meta_random", { params <- model_parameters(m3) expect_equal(params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall", "tau" )) expect_equal(params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.2004, 0.12107), tolerance = 1e-3) expect_equal(params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, -0.15334, 0.01884), tolerance = 1e-3) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) } parameters/tests/testthat/test-model_parameters_std_mixed.R0000644000175000017500000001345214131014354024235 0ustar nileshnileshif (require("testthat") && require("parameters") && require("effectsize") && require("lme4")) { data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) test_that("model_parameters, standardize-refit, wald-normal", { skip_on_cran() params <- model_parameters(model, ci_method = "normal", standardize = "refit", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) expect_equal(params$SE, c(0.2045, 0.2619, 0.34035, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) expect_equal(params$CI_high, c(1.37031, -0.77301, -1.14754, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3) }) test_that("model_parameters, standardize-refit, wald-t", { skip_on_cran() params <- model_parameters(model, ci_method = "wald", standardize = "refit", verbose = FALSE, effects = "fixed") expect_equal(params$CI_high, c(1.37378, -0.76856, -1.14177, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3) }) test_that("model_parameters, standardize-refit", { skip_on_cran() params <- model_parameters(model, standardize = "refit", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) expect_equal(params$SE, c(0.2045, 0.2619, 0.34035, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) expect_equal(params$CI_high, c(1.37378, -0.76856, -1.14177, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3) params <- model_parameters(model, standardize = "refit", verbose = FALSE, effects = "all") paramsZ <- model_parameters(effectsize::standardize(model), effects = "all", verbose = FALSE) expect_equal(paramsZ, params, ignore_attr = TRUE) }) test_that("model_parameters, standardize-posthoc", { skip_on_cran() params <- model_parameters(model, standardize = "posthoc", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Std_Coefficient, c(0, 0.49679, -0.49355, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.66228, 0.70202, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) expect_equal(params$CI_high, c(0, 1.80607, 0.8943, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc", { skip_on_cran() params <- model_parameters(model, ci_method = "normal", standardize = "posthoc", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Std_Coefficient, c(0, 0.49679, -0.49355, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.66228, 0.70202, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3) expect_equal(params$CI_high, c(0, 1.79483, 0.88238, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc, wald-t", { skip_on_cran() params <- model_parameters(model, ci_method = "wald", standardize = "posthoc", verbose = FALSE, effects = "fixed") expect_equal(params$CI_high, c(0, 1.80607, 0.8943, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3) }) test_that("model_parameters, standardize-basic", { skip_on_cran() params <- model_parameters(model, ci_method = "normal", standardize = "basic", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Std_Coefficient, c(0, 0.23497, -0.23344, 0.34791, 1.74252, -0.77129, -0.61304), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.31325, 0.33204, 0.05968, 0.13914, 0.2962, 0.30761), tolerance = 1e-3) expect_equal(params$CI_high, c(0, 0.84893, 0.41735, 0.46488, 2.01523, -0.19075, -0.01014), tolerance = 1e-3) }) test_that("model_parameters, standardize-basic", { skip_on_cran() params <- model_parameters(model, ci_method = "residual", standardize = "basic", verbose = FALSE, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Std_Coefficient, c(0, 0.23497, -0.23344, 0.34791, 1.74252, -0.77129, -0.61304), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.31325, 0.33204, 0.05968, 0.13914, 0.2962, 0.30761), tolerance = 1e-3) expect_equal(params$CI_high, c(0, 0.85424, 0.42299, 0.4659, 2.01759, -0.18572, -0.00492), tolerance = 1e-3) }) test_that("model_parameters, standardize-basic", { skip_on_cran() params <- model_parameters(model, standardize = "basic", verbose = FALSE, effects = "fixed") expect_equal(params$CI_high, c(0, 0.85424, 0.42299, 0.4659, 2.01759, -0.18572, -0.00492), tolerance = 1e-3) }) if (requiet("clubSandwich")) { test_that("model_parameters, standardize-refit robust", { skip_on_cran() params <- model_parameters(model, standardize = "refit", effects = "fixed", robust = TRUE, vcov_estimation = "CR", vcov_type = "CR1", vcov_args = list(cluster = iris$grp), verbose = FALSE) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal(params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3) expect_equal(params$SE, c(0.07726, 0.33406, 0.22647, 0.0524, 0.10092, 0.18537, 0.05552), tolerance = 1e-3) expect_equal(params$CI_high, c(1.12224, -0.6259, -1.36691, 0.45151, 1.94204, 0.11227, -0.07858), tolerance = 1e-3) }) } } parameters/tests/testthat/test-geeglm.R0000644000175000017500000000150114122064334020105 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("geepack")) { data(warpbreaks) m1 <- geeglm( breaks ~ tension, id = wool, data = warpbreaks, family = poisson, corstr = "ar1" ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(3.28294, -0.76741, -0.64708), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.15931, 0.22554, 0.06598), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.14913, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(3.59517, -0.32536, -0.51776), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-parameters_type-2.R0000644000175000017500000000523614122064334022221 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { data(iris) dat <- iris m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type default contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "versicolor", "virginica")) }) data(iris) dat <- iris dat$Species <- as.ordered(dat$Species) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type ordered factor", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "ordered", "ordered")) expect_equal(p_type$Level, c(NA, "[linear]", "[quadratic]")) }) data(iris) dat <- iris dat$Species <- as.ordered(dat$Species) contrasts(dat$Species) <- contr.treatment(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type ordered factor", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "2", "3")) }) data(iris) dat <- iris contrasts(dat$Species) <- contr.poly(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type poly contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, ".L", ".Q")) }) data(iris) dat <- iris contrasts(dat$Species) <- contr.treatment(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type treatment contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "2", "3")) }) data(iris) dat <- iris contrasts(dat$Species) <- contr.sum(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type sum contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) data(iris) dat <- iris contrasts(dat$Species) <- contr.helmert(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type helmert contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) data(iris) dat <- iris contrasts(dat$Species) <- contr.SAS(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type SAS contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) } parameters/tests/testthat/test-PMCMRplus.R0000644000175000017500000000062614122064334020436 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("PMCMRplus")) { test_that("model_parameters.PMCMR", { set.seed(123) mod <- suppressWarnings(kwAllPairsConoverTest(count ~ spray, data = InsectSprays)) df <- as.data.frame(model_parameters(mod)) # no need to add strict tests, since `toTidy` is tested in `PMCMRplus` itself expect_equal(dim(df), c(15L, 8L)) }) } parameters/tests/testthat/test-n_factors.R0000644000175000017500000000051514122064334020627 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("nFactors") && requiet("EGAnet") && requiet("psych")) { test_that("n_factors", { set.seed(333) x <- n_factors(mtcars[, 1:4]) expect_equal(ncol(x), 3) }) } parameters/tests/testthat/test-GLMMadaptive.R0000644000175000017500000001111314142732543021125 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (requiet("testthat") && requiet("parameters") && requiet("lme4") && requiet("GLMMadaptive")) { data("fish") data("cbpp") m1 <- mixed_model( count ~ child + camper, random = ~ 1 | persons, zi_fixed = ~ child + livebait, data = fish, family = zi.poisson() ) m2 <- mixed_model( cbind(incidence, size - incidence) ~ period, random = ~ 1 | herd, data = cbpp, family = binomial ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.08708, -1.35715, 0.58599, -0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.08708, -1.35715, 0.58599), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_equal( ci(m2, component = "cond")$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_null(ci(m2, component = "zi")) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.540016, 0.094847, 0.09356, 0.468122, 0.29416, 0.507634), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.540016, 0.094847, 0.09356), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.468122, 0.29416, 0.507634), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.233543, 0.306776, 0.326777, 0.427606), tolerance = 1e-3 ) expect_equal( standard_error(m2, component = "cond")$SE, c(0.233543, 0.306776, 0.326777, 0.427606), tolerance = 1e-3 ) expect_null(standard_error(m2, component = "zi")) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.0339, 0, 0, 0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "cond")$p, c(0.0339, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "zi")$p, c(0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_equal( p_value(m2, component = "cond")$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_null(p_value(m2, component = "zi")) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(1.14549, -1.17125, 0.76937, -0.08243, 1.33197, -1.12165), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(-1.39946, -0.99138, -1.1278, -1.57945), tolerance = 1e-3 ) }) win_os <- tryCatch( { si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Windows" || grepl("^mingw", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) if (.runThisTest && requiet("glmmTMB") && win_os) { data("Salamanders") model <- mixed_model( count ~ spp + mined, random = ~ DOY | site, zi_fixed = ~ spp + mined, zi_random = ~ DOP | site, family = zi.negative.binomial(), data = Salamanders, control = list(nAGQ = 1) ) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(model, effects = "random") expect_equal(c(nrow(params), ncol(params)), c(7, 9)) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component") ) expect_equal( params$Parameter, c( "SD (Intercept)", "SD (DOY)", "Cor (Intercept~DOY: site)", "SD (Observations)", "SD (Intercept)", "SD (DOP)", "Cor (Intercept~DOP: site)" ) ) expect_equal( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(0.56552, 0.29951, 0.06307, 0, 1.02233, 0.38209, -0.17162), tolerance = 1e-2 ) }) } } parameters/tests/testthat/test-parameters_table.R0000644000175000017500000000237614122064334022172 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("insight") && requiet("effectsize") && requiet("lme4")) { test_that("parameters_table 1", { x <- model_parameters(lm(Sepal.Length ~ Species, data = iris), standardize = "refit") tab <- format_table(x) expect_equal(colnames(tab), c("Parameter", "Coefficient", "SE", "95% CI", "t(147)", "p")) }) test_that("parameters_table 2", { x <- model_parameters(lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris), effects = "fixed") tab <- format_table(x) expect_true(all(names(tab) == c("Parameter", "Coefficient", "SE", "95% CI", "t(146)", "p", "Effects"))) }) test_that("parameters_table 3", { x <- effectsize::effectsize(lm(Sepal.Length ~ Species, data = iris)) tab <- format_table(x) expect_equal(colnames(tab), c("Parameter", "Std. Coef.", "95% CI")) }) test_that("parameters_table 4", { x <- model_parameters(lm(Sepal.Length ~ Species, data = iris), standardize = "posthoc") tab <- format_table(x) expect_equal(colnames(tab), c("Parameter", "Std. Coef.", "SE", "95% CI", "t(147)", "p")) }) # x <- report::report_table(lm(Sepal.Length ~ Species, data=iris)) # Once on CRAN # t <- format_table(x) # t } parameters/tests/testthat/test-model_parameters_std.R0000644000175000017500000000416114122064334023047 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("parameters") && requiet("effectsize") && requiet("insight")) { data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am, data = mtcars) test_that("model_parameters, standardize-refit", { params <- model_parameters(model, standardize = "refit") expect_equal(c(nrow(params), ncol(params)), c(4, 9)) expect_equal(params$Coefficient, c(-0.14183, -0.61463, -0.35967, -0.86017), tolerance = 1e-3) expect_equal(params$SE, c(0.12207, 0.12755, 0.23542, 0.23454), tolerance = 1e-3) expect_equal(params$CI_high, c(0.10821, -0.35336, 0.12257, -0.37973), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters(model, standardize = "posthoc") expect_equal(c(nrow(params), ncol(params)), c(4, 9)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.46865, -0.87911), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.7075, 0.23971), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 3.91789, -0.38809), tolerance = 0.1) }) test_that("model_parameters, standardize-basic", { params <- model_parameters(model, standardize = "basic") expect_equal(c(nrow(params), ncol(params)), c(4, 9)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 1.23183, -1.11016), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.35303, 0.30271), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 1.95499, -0.4901), tolerance = 0.1) }) test_that("model_parameters, standardize-smart", { params <- model_parameters(model, standardize = "smart") expect_equal(c(nrow(params), ncol(params)), c(4, 9)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.41278, -0.85922), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.69148, 0.23428), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 3.82922, -0.37931), tolerance = 0.1) }) } } parameters/tests/testthat/test-bootstrap_emmeans.R0000644000175000017500000000401314122064334022370 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters")) { test_that("emmeans | lm", { skip_if_not_installed("emmeans") skip_if_not_installed("boot") model <- lm(mpg ~ log(wt) + factor(cyl), data = mtcars) set.seed(7) b <- bootstrap_model(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) set.seed(7) b <- bootstrap_parameters(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) mp <- model_parameters(emmeans::emmeans(b, consec ~ cyl), verbose = FALSE) expect_equal( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Component" ) ) expect_equal(nrow(mp), 5) }) test_that("emmeans | lmer", { skip_if_not_installed("emmeans") skip_if_not_installed("boot") skip_if_not_installed("lme4") model <- lme4::lmer(mpg ~ log(wt) + factor(cyl) + (1 | gear), data = mtcars) set.seed(7) b <- bootstrap_model(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) set.seed(7) b <- bootstrap_parameters(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) mp <- suppressWarnings(model_parameters(emmeans::emmeans(b, consec ~ cyl))) expect_equal( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Component" ) ) expect_equal(nrow(mp), 5) }) } parameters/tests/testthat/test-model_parameters.aov.R0000644000175000017500000000736014165246770023002 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { if (requiet("insight") && requiet("testthat") && requiet("lme4") && requiet("parameters")) { if (requireNamespace("effectsize")) { unloadNamespace("afex") unloadNamespace("lmerTest") data(iris) iris$Cat1 <- rep(c("X", "X", "Y"), length.out = nrow(iris)) iris$Cat2 <- rep(c("A", "B"), length.out = nrow(iris)) # aov ---------------------------------- test_that("model_parameters.aov", { skip_if_not_installed("effectsize", minimum_version = "0.5.0") model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE)) expect_equal(mp$Parameter, c("Species", "Residuals")) expect_equal(mp$Sum_Squares, c(11.34493, 16.962), tolerance = 1e-3) }) test_that("model_parameters.aov", { model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters(model, omega_squared = "partial", eta_squared = "partial", epsilon_squared = TRUE)) expect_equal(sum(mp$df), 149) expect_equal(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Eta2", "Epsilon2" )) model <- aov(Sepal.Length ~ Species * Cat1 * Cat2, data = iris) expect_equal(sum(model_parameters(model, omega_squared = "raw", eta_squared = "partial", epsilon_squared = TRUE)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 * Cat2, data = iris) expect_equal(sum(model_parameters(model)$df), 149) }) data(mtcars) test_that("model_parameters.anova", { model <- anova(lm(Sepal.Width ~ Species, data = iris)) expect_equal(sum(model_parameters(model)$df), 149) model <- anova(lm(Sepal.Length ~ Species * Cat1 * Cat2, data = iris)) expect_equal(sum(model_parameters(model)$df), 149) model <- anova(lmer(wt ~ 1 + (1 | gear), data = mtcars)) expect_equal(nrow(model_parameters(model)), 0) model <- anova(lmer(wt ~ cyl + (1 | gear), data = mtcars)) expect_equal(sum(model_parameters(model)$df), 1) model <- anova(lmer(wt ~ drat + cyl + (1 | gear), data = mtcars)) expect_equal(sum(model_parameters(model)$df), 2) model <- anova(lmer(wt ~ drat * cyl + (1 | gear), data = mtcars)) expect_equal(sum(model_parameters(model)$df), 3) model <- anova(lmer(wt ~ drat / cyl + (1 | gear), data = mtcars)) expect_equal(sum(model_parameters(model)$df), 2) }) if (.runThisTest) { test_that("model_parameters.anova", { model <- insight::download_model("anova_3") expect_equal(sum(model_parameters(model)$df), 149) model <- insight::download_model("anova_4") expect_equal(sum(model_parameters(model)$df, na.rm = TRUE), 2) model <- insight::download_model("anova_lmerMod_5") expect_equal(sum(model_parameters(model)$df), 1) model <- insight::download_model("anova_lmerMod_6") expect_equal(sum(model_parameters(model)$df), 12) }) } data(mtcars) test_that("model_parameters.anova", { model <- aov(wt ~ cyl + Error(gear), data = mtcars) expect_equal(sum(model_parameters(model)$df), 31) model <- aov(Sepal.Length ~ Species * Cat1 + Error(Cat2), data = iris) expect_equal(sum(model_parameters(model)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 + Error(Cat2), data = iris) expect_equal(sum(model_parameters(model)$df), 149) }) } } } parameters/tests/testthat/test-bracl.R0000644000175000017500000000263614122064334017742 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("utils") && requiet("brglm2")) { data("stemcell") levels(stemcell$research) <- c("definitly", "alterly", "probably not", "definitely not") m1 <- bracl(research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML") test_that("model_parameters", { params <- model_parameters(m1) expect_equal( params$Response, c("definitly", "alterly", "probably not", "definitly", "alterly", "probably not", "definitly", "alterly", "probably not") ) expect_equal( params$Parameter, c( "definitly:(Intercept)", "alterly:(Intercept)", "probably not:(Intercept)", "definitly:as.numeric(religion)", "alterly:as.numeric(religion)", "probably not:as.numeric(religion)", "definitly:genderfemale", "alterly:genderfemale", "probably not:genderfemale" ) ) expect_equal( params$Coefficient, c(-1.24836, 0.47098, 0.42741, 0.4382, 0.25962, 0.01192, -0.13683, 0.18707, -0.16093), tolerance = 1e-3 ) }) # check order of response levels test_that("print model_parameters", { out <- utils::capture.output(print(model_parameters(m1))) expect_equal(out[1], "# Response level: definitly") expect_equal(out[9], "# Response level: alterly") expect_equal(out[17], "# Response level: probably not") }) } parameters/tests/testthat/test-backticks.R0000644000175000017500000000757714131250357020630 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("parameters")) { data(iris) iris$`a m` <<- iris$Species iris$`Sepal Width` <<- iris$Sepal.Width m1 <- lm(`Sepal Width` ~ Petal.Length + `a m` * log(Sepal.Length), data = iris) m2 <- lm(Sepal.Width ~ Petal.Length + Species * log(Sepal.Length), data = iris) test_that("standard_error, backticks", { expect_equal( standard_error(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( standard_error(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("ci, backticks", { expect_equal( ci(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( ci(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) expect_equal( ci(m1, method = "wald")$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( ci(m2, method = "wald")$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("p, backticks", { expect_equal( p_value(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( p_value(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters, backticks", { expect_equal( model_parameters(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( model_parameters(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters-2, backticks", { expect_equal( model_parameters(select_parameters(m1))$Parameter, c( "(Intercept)", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_equal( model_parameters(select_parameters(m2))$Parameter, c( "(Intercept)", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) } } parameters/tests/testthat/test-model_parameters.htest.R0000644000175000017500000000706614131306310023323 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("effectsize") && utils::packageVersion("effectsize") > "0.4.5") { test_that("model_parameters.htest", { params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "pearson")) expect_equal( colnames(params), c( "Parameter1", "Parameter2", "r", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Method", "Alternative" ) ) expect_equal(params$r, -0.852, tolerance = 0.05) expect_warning(params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "spearman"))) expect_equal(params$rho, -0.9108, tolerance = 0.05) expect_warning(params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "kendall"))) expect_equal(params$tau, -0.795, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, iris$Sepal.Length)) expect_equal(params$Difference, -2.786, tolerance = 0.05) params <- model_parameters(t.test(mtcars$mpg ~ mtcars$vs)) expect_equal(params$Difference, -7.940, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, mu = 1)) expect_equal(params$Difference, 2.0573, tolerance = 0.05) }) test_that("model_parameters.htest-2", { x <- c(A = 20, B = 15, C = 25) mp <- model_parameters(chisq.test(x)) expect_equal(colnames(mp), c("Chi2", "df", "p", "Method")) }) data(mtcars) mp <- model_parameters(stats::chisq.test(table(mtcars$am))) test_that("model_parameters-chisq-test NULL", { expect_equal(mp$Chi2, 1.125, tolerance = 1e-3) expect_equal(colnames(mp), c("Chi2", "df", "p", "Method")) }) mp <- model_parameters(stats::chisq.test(table(mtcars$am)), phi = "adjusted", ci = 0.95) test_that("model_parameters-chisq-test adjusted", { expect_equal(mp$Chi2, 1.125, tolerance = 1e-3) expect_equal(mp$phi_adjusted, 0.0538348, tolerance = 1e-3) expect_equal(colnames(mp), c("Chi2", "df", "phi_adjusted", "CI", "phi_CI_low", "phi_CI_high", "p", "Method")) }) params <- model_parameters(t.test(iris$Sepal.Width, iris$Sepal.Length), standardized_d = TRUE) test_that("model_parameters-t-test standardized d", { expect_equal(params$Cohens_d, -4.210417, tolerance = 0.05) expect_equal(params$d_CI_low, -4.655306, tolerance = 0.05) expect_equal( colnames(params), c( "Parameter1", "Parameter2", "Mean_Parameter1", "Mean_Parameter2", "Difference", "CI", "CI_low", "CI_high", "t", "df_error", "Cohens_d", "d_CI_low", "d_CI_high", "p", "Method", "Alternative" ) ) }) mp <- model_parameters(t.test(mtcars$mpg ~ mtcars$vs), standardized_d = TRUE, verbose = FALSE) test_that("model_parameters-t-test standardized d", { expect_equal(mp$Cohens_d, -1.696032, tolerance = 1e-3) expect_equal( colnames(mp), c( "Parameter", "Group", "Mean_Group1", "Mean_Group2", "Difference", "CI", "CI_low", "CI_high", "t", "df_error", "Cohens_d", "d_CI_low", "d_CI_high", "p", "Method", "Alternative" ) ) }) test_that("model_parameters-t-test reports the same unregarding of interface", { g1 <- 1:10 g2 <- 7:20 df <- data.frame(y = c(g1, g2), x = rep(c(0, 1), c(length(g1), length(g2)))) compare_only <- c("Difference", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Method") default_ttest <- model_parameters(t.test(x = g1, y = g2))[compare_only] formula_ttest <- model_parameters(t.test(y ~ x, df))[compare_only] expect_equal(default_ttest, formula_ttest) }) } parameters/tests/testthat/test-zeroinfl.R0000644000175000017500000000424114122064334020501 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("pscl")) { data("bioChemists") m1 <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.42844, -0.34446, 0.00734, -0.26277, 0.01717, -1.77978, -0.37558, -0.51411), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.06797, 0.05868, 0.06593, 0.04874, 0.00212, 0.43378, 0.21509, 0.1352), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 9e-05, 0.03833, 6e-04, 0, 0.03211, 0.83068, 0.06539), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, as.vector(coef(m1)), tolerance = 1e-4 ) }) m2 <- zeroinfl(formula = art ~ . | 1, data = bioChemists, dist = "negbin") test_that("model_parameters", { expect_equal( model_parameters(m2)$Coefficient, as.vector(coef(m2)), tolerance = 1e-4 ) expect_equal( model_parameters(m2)$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated" ) ) }) m3 <- zeroinfl(art ~ mar + kid5 * fem + ment | kid5 * fem + phd, data = bioChemists) test_that("model_parameters", { expect_equal( model_parameters(m3)$Coefficient, as.vector(coef(m3)), tolerance = 1e-4 ) }) test_that("parameters_type", { expect_equal( parameters_type(m3)$Type, c( "intercept", "factor", "numeric", "factor", "numeric", "interaction", "intercept", "numeric", "factor", "numeric", "interaction" ), tolerance = 1e-4 ) }) test_that("parameters_type", { expect_equal( parameters_type(m3)$Link, c( "Mean", "Difference", "Association", "Difference", "Association", "Difference", "Mean", "Association", "Difference", "Association", "Difference" ), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters.blmerMod.R0000644000175000017500000000223414131306252023732 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("blme")) { data(sleepstudy) set.seed(123) model <- blmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy, cov.prior = NULL) test_that("model_parameters.blmerMod", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(6.8246, 1.54579), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) }) test_that("model_parameters.blmerMod-all", { params <- model_parameters(model, effects = "all") expect_equal(params$SE, c(6.8246, 1.54579, NA, NA, NA, NA), tolerance = 1e-3) expect_equal(params$Coefficient, c(251.4051, 10.46729, 24.74066, 5.92214, 0.06555, 25.5918), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects", "Group") ) expect_equal( params$Parameter, c("(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "Cor (Intercept~Days: Subject)", "SD (Observations)") ) }) } parameters/tests/testthat/test-model_parameters_ordinal.R0000644000175000017500000000420614141263005023702 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("ordinal")) { d <- data.frame( Stim = c( "New", "New", "New", "New", "New", "New", "Old", "Old", "Old", "Old", "Old", "Old" ), Response = c( "Confidence1", "Confidence2", "Confidence3", "Confidence4", "Confidence5", "Confidence6", "Confidence1", "Confidence2", "Confidence3", "Confidence4", "Confidence5", "Confidence6" ), w = c(320, 295, 243, 206, 174, 159, 136, 188, 208, 256, 302, 333) ) m1 <- clm(ordered(Response) ~ Stim, scale = ~Stim, link = "probit", data = d, weights = w ) m2 <- clm2(ordered(Response) ~ Stim, scale = ~Stim, link = "probit", data = d, weights = w ) test_that("model_parameters.clm", { mp <- model_parameters(m1) expect_equal( mp$Parameter, c( "Confidence1|Confidence2", "Confidence2|Confidence3", "Confidence3|Confidence4", "Confidence4|Confidence5", "Confidence5|Confidence6", "StimOld", "StimOld" ), tolerance = 1e-4 ) expect_equal( mp$Component, c("intercept", "intercept", "intercept", "intercept", "intercept", "location", "scale"), tolerance = 1e-4 ) expect_equal( mp$Coefficient, c(-0.72845, -0.15862, 0.26583, 0.69614, 1.23477, 0.55237, -0.04069), tolerance = 1e-4 ) }) test_that("model_parameters.clm2", { mp <- model_parameters(m2) expect_equal( mp$Parameter, c( "Confidence1|Confidence2", "Confidence2|Confidence3", "Confidence3|Confidence4", "Confidence4|Confidence5", "Confidence5|Confidence6", "StimOld", "StimOld" ), tolerance = 1e-4 ) expect_equal( mp$Component, c("intercept", "intercept", "intercept", "intercept", "intercept", "location", "scale"), tolerance = 1e-4 ) expect_equal( mp$Coefficient, c(-0.72845, -0.15862, 0.26583, 0.69614, 1.23477, 0.55237, -0.04069), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-lmerTest.R0000644000175000017500000000225714131246434020460 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("lmerTest") && requiet("pbkrtest")) { data("carrots", package = "lmerTest") m1 <- lmerTest::lmer(Preference ~ sens2 + Homesize + (1 + sens2 | Consumer), data = carrots) test_that("model_parameters, satterthwaite", { params <- model_parameters(m1, effects = "fixed", ci_method = "satterthwaite") s <- summary(m1) expect_equal(params$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(params$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(params$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) }) test_that("model_parameters, kenward", { params <- model_parameters(m1, effects = "fixed", ci_method = "kenward") s <- summary(m1, ddf = "Kenward-Roger") expect_equal(params$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(params$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(params$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) }) } unloadNamespace("lmerTest") parameters/tests/testthat/test-model_parameters.cgam.R0000644000175000017500000000767114135275207023123 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("cgam") && utils::packageVersion("insight") > "0.14.4") { test_that("model_parameters - cgam", { # cgam ----------------------- data(cubic) # model m_cgam <- cgam::cgam(formula = y ~ incr.conv(x), data = cubic) df_cgam <- parameters::model_parameters(m_cgam) expect_equal( df_cgam, structure(list( Parameter = "(Intercept)", Coefficient = 1.187, SE = 0.3054, CI = 0.95, CI_low = 0.569520101908619, CI_high = 1.80447989809138, t = 3.8868, df_error = 39.5, p = 4e-04 ), row.names = c(NA, -1L), sigma = 2.15946395506817, residual_df = 39.5, pretty_names = c(`(Intercept)` = "(Intercept)"), ci = 0.95, test_statistic = "t-statistic", verbose = TRUE, exponentiate = FALSE, ordinal_model = FALSE, linear_model = TRUE, mixed_model = FALSE, n_obs = 50L, model_class = "cgam", bootstrap = FALSE, iterations = 1000, robust_vcov = FALSE, ignore_group = TRUE, ran_pars = TRUE, show_summary = FALSE, weighted_nobs = 50, model_formula = "y ~ incr.conv(x)", coefficient_name = "Coefficient", zi_coefficient_name = "Log-Odds", digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, class = c("parameters_model", "see_parameters_model", "data.frame"), object_name = "m_cgam" ), tolerance = 0.01 ) }) # cgamm ----------------------- test_that("model_parameters - cgamm", { # setup set.seed(123) # simulate a balanced data set with 30 clusters # each cluster has 30 data points n <- 30 m <- 30 # the standard deviation of between cluster error terms is 1 # the standard deviation of within cluster error terms is 2 sige <- 1 siga <- 2 # generate a continuous predictor x <- 1:(m * n) for (i in 1:m) { x[(n * (i - 1) + 1):(n * i)] <- round(runif(n), 3) } # generate a group factor group <- trunc(0:((m * n) - 1) / n) + 1 # generate the fixed-effect term mu <- 10 * exp(10 * x - 5) / (1 + exp(10 * x - 5)) # generate the random-intercept term asscosiated with each group avals <- rnorm(m, 0, siga) # generate the response y <- 1:(m * n) for (i in 1:m) { y[group == i] <- mu[group == i] + avals[i] + rnorm(n, 0, sige) } # use REML method to fit the model ans <- cgam::cgamm(formula = y ~ s.incr(x) + (1 | group), reml = TRUE) df <- suppressWarnings(parameters::model_parameters(ans)) expect_equal(df, structure( list( Parameter = "(Intercept)", Coefficient = 5.5174, SE = 0.3631, CI = 0.95, CI_low = 4.80573707721351, CI_high = 6.22906292278649, t = 15.1954, df_error = 890.4, p = 0 ), row.names = 1L, sigma = numeric(0), residual_df = 890, ci = 0.95, test_statistic = "t-statistic", verbose = TRUE, exponentiate = FALSE, ordinal_model = FALSE, linear_model = TRUE, mixed_model = TRUE, model_class = c( "cgamm", "cgam" ), bootstrap = FALSE, iterations = 1000, robust_vcov = FALSE, ignore_group = TRUE, ran_pars = TRUE, show_summary = FALSE, model_formula = "y ~ s.incr(x)", coefficient_name = "Coefficient", zi_coefficient_name = "Log-Odds", digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, class = c("parameters_model", "see_parameters_model", "data.frame"), object_name = "ans" ), tolerance = 0.01 ) }) } parameters/tests/testthat/test-model_parameters.anova.R0000644000175000017500000001666114136205412023307 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("insight") && requiet("testthat") && requiet("parameters")) { data(mtcars) m <- glm(am ~ mpg + hp + factor(cyl), data = mtcars, family = binomial() ) a <- anova(m, test = "Chisq") mp <- model_parameters(a) test_that("model_parameters.anova", { expect_equal(colnames(mp), c("Parameter", "df", "Deviance", "df_error", "Deviance_error", "p")) expect_equal(mp$Deviance_error, c(43.22973, 29.67517, 19.23255, 10.48692), tolerance = 1e-3) expect_equal(mp$p, c(NA, 0.00023, 0.00123, 0.01262), tolerance = 1e-3) }) test_that("print-model_parameters", { out <- utils::capture.output(print(mp)) expect_equal( out, c( "Parameter | df | Deviance | df (error) | Deviance (error) | p", "--------------------------------------------------------------------", "NULL | | | 31 | 43.23 | ", "mpg | 1 | 13.55 | 30 | 29.68 | < .001", "hp | 1 | 10.44 | 29 | 19.23 | 0.001 ", "factor(cyl) | 2 | 8.75 | 27 | 10.49 | 0.013 ", "", "Anova Table (Type 1 tests)" ) ) }) if (requiet("car")) { a <- car::Anova(m, type = 3, test.statistic = "F") mp <- model_parameters(a) test_that("model_parameters.anova", { expect_equal(colnames(mp), c("Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p")) expect_equal(mp[["F"]], c(53.40138, 60.42944, 13.96887, NA), tolerance = 1e-3) }) test_that("print-model_parameters", { out <- utils::capture.output(print(mp)) expect_equal( out, c( "Parameter | Sum_Squares | df | Mean_Square | F | p", "-------------------------------------------------------------", "mpg | 16.72 | 1 | 16.72 | 53.40 | < .001", "hp | 18.92 | 1 | 18.92 | 60.43 | < .001", "factor(cyl) | 8.75 | 2 | 4.37 | 13.97 | < .001", "Residuals | 8.45 | 27 | 0.31 | | ", "", "Anova Table (Type 3 tests)" ) ) }) m <- lm(cbind(hp, mpg) ~ factor(cyl) * am, data = mtcars) a <- car::Anova(m, type = 3, test.statistic = "Pillai") mp <- model_parameters(a, verbose = FALSE) test_that("model_parameters_Anova.mlm", { expect_equal(colnames(mp), c("Parameter", "df", "Statistic", "df_num", "df_error", "F", "p")) expect_equal(mp[["F"]], c(158.2578, 6.60593, 3.71327, 3.28975), tolerance = 1e-3) expect_equal(mp$Statistic, c(0.9268, 0.67387, 0.22903, 0.4039), tolerance = 1e-3) }) if (requiet("MASS")) { data(housing) m <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) a <- car::Anova(m) mp <- model_parameters(a) test_that("model_parameters_Anova.mlm", { expect_equal(colnames(mp), c("Parameter", "Chi2", "df", "p")) expect_equal(mp$Chi2, c(108.2392, 55.91008, 14.30621), tolerance = 1e-3) }) } } if (requiet("lme4") && requiet("effectsize") && utils::packageVersion("effectsize") > "0.4.3") { data(iris) df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") mm <- suppressMessages(lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), data = df)) model <- anova(mm) # parameters table including effect sizes mp <- model_parameters( model, eta_squared = "partial", ci = .9, df_error = dof_satterthwaite(mm)[2:3] ) test_that("model_parameters_Anova-effectsize", { expect_equal( colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high" ) ) expect_equal(mp$Eta2_partial, c(0.03262, 0.6778), tolerance = 1e-3) }) } } # XXX ----- if (.runThisTest && requiet("parameters") && requiet("testthat")) { test_that("anova type | lm", { m <- lm(mpg ~ factor(cyl) * hp + disp, mtcars) a1 <- aov(m) expect_equal(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m) expect_equal(attr(model_parameters(a1), "anova_type"), 1) skip_if_not_installed("car") a2 <- car::Anova(m, type = 2) a3 <- car::Anova(m, type = 3) expect_equal(attr(model_parameters(a2), "anova_type"), 2) expect_equal(attr(model_parameters(a3), "anova_type"), 3) m <- lm(mpg ~ factor(cyl) + hp + disp, mtcars) expect_warning(model_parameters(aov(m)), regexp = NA) # no need for warning, because no interactions m <- lm(mpg ~ factor(cyl) * scale(disp, TRUE, FALSE) + scale(disp, TRUE, FALSE), mtcars, contrasts = list("factor(cyl)" = contr.helmert) ) a3 <- car::Anova(m, type = 3) expect_warning(model_parameters(a3), regexp = NA) # expect no warning }) test_that("anova type | mlm", { m <- lm(cbind(mpg, drat) ~ factor(cyl) * hp + disp, mtcars) a1 <- aov(m) expect_equal(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m) expect_equal(attr(model_parameters(a1), "anova_type"), 1) skip_if_not_installed("car") a2 <- car::Anova(m, type = 2) a3 <- car::Anova(m, type = 3) expect_equal(attr(model_parameters(a2), "anova_type"), 2) expect_equal(attr(model_parameters(a3, verbose = FALSE), "anova_type"), 3) }) test_that("anova type | glm", { m <- suppressWarnings(glm(am ~ factor(cyl) * hp + disp, mtcars, family = binomial())) a1 <- anova(m) expect_equal(attr(model_parameters(a1), "anova_type"), 1) skip_if_not_installed("car") a2 <- suppressWarnings(car::Anova(m, type = 2)) a3 <- suppressWarnings(car::Anova(m, type = 3)) expect_equal(attr(model_parameters(a2), "anova_type"), 2) expect_equal(attr(model_parameters(a3), "anova_type"), 3) }) test_that("anova type | lme4", { skip_if_not_installed("lmerTest") m1 <- lme4::lmer(mpg ~ factor(cyl) * hp + disp + (1 | gear), mtcars) m2 <- lme4::glmer(carb ~ factor(cyl) * hp + disp + (1 | gear), mtcars, family = poisson() ) a1 <- anova(m1) expect_equal(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m2) expect_equal(attr(model_parameters(a1), "anova_type"), 1) a3 <- anova(lmerTest::as_lmerModLmerTest(m1)) expect_equal(attr(model_parameters(a3), "anova_type"), 3) skip_if_not_installed("car") a2 <- car::Anova(m1, type = 2) a3 <- car::Anova(m1, type = 3) expect_equal(attr(model_parameters(a2), "anova_type"), 2) expect_equal(attr(model_parameters(a3), "anova_type"), 3) a2 <- car::Anova(m2, type = 2) a3 <- car::Anova(m2, type = 3) expect_equal(attr(model_parameters(a2), "anova_type"), 2) expect_equal(attr(model_parameters(a3), "anova_type"), 3) }) test_that("anova type | afex + Anova.mlm", { skip_if_not_installed("afex") data(obk.long, package = "afex") m <- afex::aov_ez("id", "value", obk.long, between = c("treatment", "gender"), within = c("phase", "hour"), observed = "gender" ) expect_equal(attr(model_parameters(m), "anova_type"), 3) expect_equal(attr(model_parameters(m$Anova, verbose = FALSE), "anova_type"), 3) }) } parameters/tests/testthat/test-ci.R0000644000175000017500000000215714131250707017251 0ustar nileshnileshif (requiet("testthat") && requiet("lme4") && requiet("parameters")) { data(mtcars) test_that("ci", { model <- lm(mpg ~ wt, data = mtcars) expect_equal(ci(model)[1, 3], 33.4505, tolerance = 0.01) expect_equal(ci(model, ci = c(0.7, 0.8))[1, 3], 35.30486, tolerance = 0.01) model <- glm(vs ~ wt, family = "binomial", data = mtcars) expect_equal(ci(model)[1, 3], 1.934013, tolerance = 0.01) model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(ci(model, method = "normal")[1, 3], -0.335063, tolerance = 0.01) model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(ci(model)[1, 3], -0.3795646, tolerance = 0.01) set.seed(1) val <- ci(model, method = "boot")[1, 3] expect_equal(val, -0.555424, tolerance = 0.01) model <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") expect_equal(ci(model)[1, 3], -0.7876679, tolerance = 0.01) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") expect_equal(ci(model)[1, 3], -48.14195, tolerance = 0.01) }) } parameters/tests/testthat/test-model_parameters_labels.R0000644000175000017500000000675714122064334023534 0ustar nileshnileshif (requiet("insight") && requiet("testthat") && requiet("parameters") && requiet("lme4")) { test_that("model_parameters_labels", { data(mtcars) mtcars$am <- as.factor(mtcars$am) m1 <- lmer(mpg ~ hp * am + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m1), "pretty_names"), c(`(Intercept)` = "(Intercept)", hp = "hp", am1 = "am [1]", `hp:am1` = "hp * am [1]") ) m2 <- lmer(mpg ~ hp * as.factor(am) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m2), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `as.factor(am)1` = "am [1]", `hp:as.factor(am)1` = "hp * am [1]" ) ) m3 <- lmer(mpg ~ hp * log(gear) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m3), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m4 <- lm(mpg ~ as.factor(cyl) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m4), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m5 <- lm(mpg ~ as.factor(cyl) * I(wt / 10) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m5), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt/10)` = "wt/10", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt/10)` = "cyl [6] * wt/10", `as.factor(cyl)8:I(wt/10)` = "cyl [8] * wt/10", `hp:log(gear)` = "hp * gear [log]" ) ) m6 <- lm(mpg ~ as.factor(cyl) * log(wt) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m6), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `log(wt)` = "wt [log]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:log(wt)` = "cyl [6] * wt [log]", `as.factor(cyl)8:log(wt)` = "cyl [8] * wt [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m7 <- lm(mpg ~ as.factor(cyl) * poly(wt, 2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m7), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl6", `as.factor(cyl)8` = "cyl8", `poly(wt, 2)1` = "wt [1st degree]", `poly(wt, 2)2` = "wt [2nd degree]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:poly(wt, 2)1` = "cyl6 * wt [1st degree]", `as.factor(cyl)8:poly(wt, 2)1` = "cyl8 * wt [1st degree]", `as.factor(cyl)6:poly(wt, 2)2` = "cyl6 * wt [2nd degree]", `as.factor(cyl)8:poly(wt, 2)2` = "cyl8 * wt [2nd degree]", `hp:log(gear)` = "hp * gear [log]" ) ) m8 <- lm(mpg ~ as.factor(cyl) * I(wt^2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m8), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt^2)` = "wt^2", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt^2)` = "cyl [6] * wt^2", `as.factor(cyl)8:I(wt^2)` = "cyl [8] * wt^2", `hp:log(gear)` = "hp * gear [log]" ) ) }) } parameters/tests/testthat/test-format_model_parameters.R0000644000175000017500000001344314122064334023550 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("splines")) { data(mtcars) m <- lm(mpg ~ qsec:wt + wt:drat, data = mtcars) test_that("format_model_parameters-1", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "qsec * wt", "wt * drat")) }) m <- lm(mpg ~ qsec:wt + wt / drat, data = mtcars) test_that("format_model_parameters-2", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) m <- lm(mpg ~ qsec:wt + wt:drat + wt, data = mtcars) test_that("format_model_parameters-3", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) m <- lm(mpg ~ qsec:wt + wt / drat + wt, data = mtcars) test_that("format_model_parameters-4", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) m <- lm(mpg ~ qsec * wt + wt:drat + wt, data = mtcars) test_that("format_model_parameters-5", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "qsec", "wt", "qsec * wt", "wt * drat")) }) m <- lm(mpg ~ wt + qsec + wt:qsec, data = mtcars) test_that("format_model_parameters-6", { expect_equal(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec", "wt * qsec")) }) m <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) test_that("format_model_parameters-7", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species:Petal.Length, data = iris) test_that("format_model_parameters-8", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species / Petal.Length, data = iris) test_that("format_model_parameters-9", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species * Petal.Length + Species, data = iris) test_that("format_model_parameters-10", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species:Petal.Length + Species, data = iris) test_that("format_model_parameters-11", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species / Petal.Length + Species, data = iris) test_that("format_model_parameters-12", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species * Petal.Length + Petal.Length, data = iris) test_that("format_model_parameters-13", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species:Petal.Length + Petal.Length, data = iris) test_that("format_model_parameters-14", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species / Petal.Length + Petal.Length, data = iris) test_that("format_model_parameters-15", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species * Petal.Length + Petal.Length + Species, data = iris) test_that("format_model_parameters-16", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species:Petal.Length + Petal.Length + Species, data = iris) test_that("format_model_parameters-17", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Petal Length", "Species [versicolor]", "Species [virginica]", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) m <- lm(Sepal.Width ~ Species / Petal.Length + Petal.Length + Species, data = iris) test_that("format_model_parameters-18", { expect_equal( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) } parameters/tests/testthat/test-lme.R0000644000175000017500000000460214131246426017433 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("nlme") && requiet("lme4") && requiet("insight")) { data("sleepstudy") m1 <- nlme::lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(237.927995380985, 7.4146616764556), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(6.82451602451407, 1.54578275017725), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(2.38350215912719e-80, 2.26328050057813e-10), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(251.405104848485, 10.467285959596), tolerance = 1e-4 ) }) data("Orthodont") m2 <- nlme::lme( distance ~ age + Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML" ) test_that("model_parameters", { params <- model_parameters(m2, effects = "fixed") expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307), tolerance = 1e-4) # expect_equal(params$df, c(80, 80, 25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.07503, 0.53834, -3.82999), tolerance = 1e-4) }) test_that("model_parameters, satterthwaite", { params <- model_parameters(m2, ci_method = "satterthwaite", effects = "fixed") expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307), tolerance = 1e-4) # expect_equal(params$df, c(104.1503, 82.87867, 26.25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.0391, 0.53609, -3.88541), tolerance = 1e-3) }) test_that("model_parameters, satterthwaite", { params <- model_parameters(m2, ci_method = "satterthwaite", effects = "all") expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102, 1.73008, 1.42273), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307, NA, NA), tolerance = 1e-4) # expect_equal(params$df, c(104.1503, 82.87867, 26.25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.0391, 0.53609, -3.88541, NA, NA), tolerance = 1e-3) }) } parameters/tests/testthat/test-model_parameters.mixed.R0000644000175000017500000001622614141263005023304 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters") && requiet("lme4")) { data(mtcars) m1 <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) m2 <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") test_that("model_parameters.mixed", { params <- model_parameters(m1, ci_method = "normal", effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 10)) expect_equal(params$CI_high, c(1.6373105660317, 0.554067677205595), tolerance = 1e-3) params <- model_parameters(m1, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 10)) expect_equal(params$CI_high, c(1.68181, 0.56083), tolerance = 1e-3) params <- model_parameters(m1, ci = c(0.8, 0.9), ci_method = "normal", effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 11)) expect_equal(params$CI_high_0.8, c(1.29595665381331, 0.502185700948862), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.47875781798108, 0.529969433080186), tolerance = 1e-3) params <- model_parameters(m1, ci_method = "normal", effects = "fixed") lme4_ci <- na.omit(as.data.frame(confint(m1, method = "Wald"))) expect_equal(params$CI_low, lme4_ci$`2.5 %`, tolerance = 1e-4) params <- model_parameters(m1, ci = c(0.8, 0.9), ci_method = "wald", effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 11)) expect_equal(params$CI_high_0.8, c(1.31154, 0.50455), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.50707, 0.53427), tolerance = 1e-3) params <- model_parameters(m1, ci = c(0.8, 0.9), effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 11)) expect_equal(params$CI_high_0.8, c(1.31154, 0.50455), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.50707, 0.53427), tolerance = 1e-3) params <- model_parameters(m2, effects = "fixed") expect_equal(c(nrow(params), ncol(params)), c(2, 10)) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") params <- model_parameters(model, effects = "fixed") cs <- coef(summary(model)) expect_equal(c(nrow(params), ncol(params)), c(3, 10)) expect_equal(colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects" )) expect_equal(params$Parameter, rownames(cs)) # TODO: Not sure how to deal with bootstrapped mixed models... As it throws an unreasonable amount of singular fits... }) test_that("model_parameters.mixed-random", { params <- model_parameters(m1, effects = "random", group_level = TRUE) expect_equal(c(nrow(params), ncol(params)), c(3, 9)) expect_equal(as.vector(params$Parameter), c("(Intercept)", "(Intercept)", "(Intercept)")) expect_equal(as.vector(params$Level), c("3", "4", "5")) expect_equal(params$Coefficient, c(0.1692, 0.0566, -0.2259), tolerance = 1e-2) }) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(m1, effects = "random") expect_equal(c(nrow(params), ncol(params)), c(2, 8)) expect_equal( as.vector(params$Parameter), c("SD (Intercept)", "SD (Observations)") ) expect_equal(params$Coefficient, c(0.27049, 0.59385), tolerance = 1e-2) }) test_that("model_parameters.mixed-all", { params <- model_parameters(m1, effects = "all") expect_equal(c(nrow(params), ncol(params)), c(4, 11)) expect_equal( as.vector(params$Parameter), c("(Intercept)", "cyl", "SD (Intercept)", "SD (Observations)") ) expect_equal(params$Coefficient, c(0.65112, 0.40418, 0.27049, 0.59385), tolerance = 1e-2) }) test_that("model_parameters.mixed-all_pars", { params <- model_parameters(m1, effects = "all", group_level = TRUE) expect_equal(c(nrow(params), ncol(params)), c(5, 12)) expect_equal( as.vector(params$Parameter), c("(Intercept)", "cyl", "(Intercept)", "(Intercept)", "(Intercept)") ) expect_equal(as.vector(params$Level), c(NA, NA, "3", "4", "5")) expect_equal(params$Coefficient, c(0.65112, 0.40418, 0.16923, 0.05663, -0.22586), tolerance = 1e-2) }) data("qol_cancer") qol_cancer <- cbind( qol_cancer, demean(qol_cancer, select = c("phq4", "QoL"), group = "ID") ) model <- lmer( QoL ~ time + phq4_within + phq4_between + (1 | ID), data = qol_cancer ) mp <- model_parameters(model, effects = "fixed") test_that("model_parameters.mixed", { expect_equal(mp$Component, c("rewb-contextual", "rewb-contextual", "within", "between")) }) test_that("print-model_parameters", { out <- utils::capture.output(print(model_parameters(model, effects = "fixed"))) expect_equal( out, c( "Parameter | Coefficient | SE | 95% CI | t(558) | p", "-------------------------------------------------------------------", "(Intercept) | 71.53 | 1.56 | [68.48, 74.59] | 45.98 | < .001", "time | 1.09 | 0.64 | [-0.17, 2.34] | 1.70 | 0.089 ", "", "# Within-Effects", "", "Parameter | Coefficient | SE | 95% CI | t(558) | p", "-------------------------------------------------------------------", "phq4 within | -3.66 | 0.41 | [-4.46, -2.86] | -8.95 | < .001", "", "# Between-Effects", "", "Parameter | Coefficient | SE | 95% CI | t(558) | p", "--------------------------------------------------------------------", "phq4 between | -6.28 | 0.50 | [-7.27, -5.30] | -12.53 | < .001" ) ) }) test_that("print-model_parameters", { out <- utils::capture.output(print(model_parameters(m1, effects = "all"))) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Coefficient | SE | 95% CI | t(28) | p", "-----------------------------------------------------------------", "(Intercept) | 0.65 | 0.50 | [-0.38, 1.68] | 1.29 | 0.206 ", "cyl | 0.40 | 0.08 | [ 0.25, 0.56] | 5.29 | < .001", "", "# Random Effects", "", "Parameter | Coefficient", "----------------------------------", "SD (Intercept: gear) | 0.27", "SD (Residual) | 0.59" ) ) out <- utils::capture.output(print(model_parameters(m1, effects = "fixed", summary = TRUE))) expect_equal( out, c( "# Fixed Effects", "", "Parameter | Coefficient | SE | 95% CI | t(28) | p", "-----------------------------------------------------------------", "(Intercept) | 0.65 | 0.50 | [-0.38, 1.68] | 1.29 | 0.206 ", "cyl | 0.40 | 0.08 | [ 0.25, 0.56] | 5.29 | < .001", "", "Model: wt ~ cyl (32 Observations)", "Residual standard deviation: 0.594 (df = 28)", "Conditional R2: 0.628; Marginal R2: 0.550" ) ) }) } parameters/tests/testthat/test-model_parameters.hurdle.R0000644000175000017500000000121714122064334023456 0ustar nileshnileshif (requiet("testthat") && requiet("pscl") && requiet("parameters")) { set.seed(123) data("bioChemists", package = "pscl") model <- hurdle(formula = art ~ ., data = bioChemists, zero = "geometric") test_that("model_parameters.hurdle", { params <- model_parameters(model) expect_equal(params$SE, c(0.12246, 0.06522, 0.07283, 0.04845, 0.0313, 0.00228, 0.29552, 0.15911, 0.18082, 0.11113, 0.07956, 0.01302), tolerance = 1e-3) expect_equal(params$Coefficient, unname(coef(model)), tolerance = 1e-3) expect_equal(params$z, unname(c(coef(summary(model))[[1]][, 3], coef(summary(model))[[2]][, 3])), tolerance = 1e-3) }) } parameters/tests/testthat/test-get_scores.R0000644000175000017500000000106014122064334021002 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest) { if (requiet("testthat") && requiet("parameters") && requiet("psych")) { data(mtcars) pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") scores <- get_scores(pca) test_that("get_scores", { expect_equal(head(scores$Component_1), c(38.704, 38.755, 28.194, 58.339, 78.658, 51.064), tolerance = 1e-2) expect_equal(head(scores$Component_2), c(63.23, 63.51, 55.805, 64.72, 96.01, 62.61), tolerance = 1e-2) }) } } parameters/tests/testthat/test-model_parameters.BFBayesFactor.R0000644000175000017500000001113214166775471024625 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("BayesFactor") && requiet("logspline") && getRversion() >= "3.6") { .runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" # if (.runThisTest) { # test_that("model_parameters.BFBayesFactor", { # model <- BayesFactor::ttestBF(iris$Sepal.Width, iris$Petal.Length, paired = TRUE) # expect_equal(parameters::model_parameters(model)$BF, c(492.770567186302, NA), tolerance = 1e-2) # }) # } test_that("model_parameters.BFBayesFactor", { model <- BayesFactor::correlationBF(iris$Sepal.Width, iris$Petal.Length) expect_equal(parameters::model_parameters(model)$BF, 348853.6, tolerance = 10) }) test_that("model_parameters.BFBayesFactor", { set.seed(123) model <- BayesFactor::anovaBF(Sepal.Length ~ Species, data = iris) expect_equal( parameters::model_parameters(model, centrality = "median")$Median, c(5.8431, -0.8266, 0.092, 0.734, 0.2681, 2.0415), tolerance = 2 ) }) df <- mtcars df$gear <- as.factor(df$gear) df$am <- as.factor(df$am) # if (.runThisTest) { # test_that("model_parameters.BFBayesFactor", { # model <- BayesFactor::ttestBF(formula = mpg ~ am, data = df) # expect_equal(model_parameters(model)$BF, c(86.58973, NA), tolerance = 1) # }) # } test_that("model_parameters.BFBayesFactor", { set.seed(123) model <- BayesFactor::anovaBF(mpg ~ gear * am, data = df) expect_equal( model_parameters(model, centrality = "mean")$Mean, c(20.7099, -3.24884, 3.24884, 26.51413, 5.30506, NA, NA, NA), tolerance = 1L ) }) if (.runThisTest && requiet("effectsize") && utils::packageVersion("effectsize") > "0.5.0") { data(raceDolls) bf <- contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols") mp <- suppressWarnings(model_parameters(bf, centrality = "mean", dispersion = TRUE, verbose = FALSE, cramers_v = TRUE, include_proportions = TRUE )) mp2 <- suppressWarnings(model_parameters(bf, verbose = FALSE)) test_that("model_parameters.BFBayesFactor", { expect_equal( colnames(mp), c( "Parameter", "Mean", "CI", "CI_low", "CI_high", "SD", "Cramers_v", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_equal(dim(mp), c(6L, 14L)) expect_equal( colnames(mp2), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_equal(dim(mp2), c(1L, 12L)) }) data(puzzles) result <- anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID", whichModels = "top", progress = FALSE ) mp <- model_parameters( result, centrality = "median", dispersion = TRUE, verbose = FALSE ) test_that("model_parameters.BFBayesFactor", { expect_equal(colnames(mp), c( "Parameter", "Median", "MAD", "CI", "CI_low", "CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Effects", "Component", "BF", "Method" )) expect_equal(mp$Effects, c( "fixed", "fixed", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "fixed", "fixed", "fixed", "fixed" )) }) } # one-sample t-test # without effectsize set.seed(123) df_t <- as.data.frame(parameters(ttestBF(mtcars$wt, mu = 3))) expect_identical( colnames(df_t), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_equal(dim(df_t), c(1L, 12L)) if (requiet("effectsize") && utils::packageVersion("effectsize") > "0.5.0") { # with effectsize set.seed(123) df_t_es <- as.data.frame(parameters(ttestBF(mtcars$wt, mu = 3), cohens_d = TRUE)) # TODO: fix column order expect_identical( colnames(df_t_es), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "Cohens_d", "d_CI_low", "d_CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_equal(dim(df_t_es), c(1L, 15L)) } } parameters/tests/testthat/test-model_parameters_df_method.R0000644000175000017500000001233514131074741024213 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("lmerTest") && requiet("pbkrtest") && requiet("lme4")) { data("mtcars") mtcars$cyl <- as.factor(mtcars$cyl) model <- suppressMessages(lme4::lmer(mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars)) model2 <- suppressMessages(lmerTest::lmer(mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars)) mp0 <- model_parameters(model, digits = 5, effects = "fixed") mp1 <- model_parameters(model, digits = 5, ci_method = "normal", effects = "fixed") mp2 <- model_parameters(model, digits = 5, ci_method = "s", effects = "fixed") mp3 <- model_parameters(model, digits = 5, ci_method = "kr", effects = "fixed") mp4 <- model_parameters(model, digits = 5, ci_method = "wald", effects = "fixed") test_that("model_parameters, ci_method default (residual)", { expect_equal(mp0$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp0$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal(mp0$p, c(0, 0.00258, 0.14297, 0.17095, 0.84778, 0.00578, 0.00151, 0.32653), tolerance = 1e-3) expect_equal(mp0$CI_low, c(24.54722, 4.89698, -1.95317, -0.05493, -2.97949, -4.42848, -0.16933, -0.05133), tolerance = 1e-3) }) test_that("model_parameters, ci_method normal", { expect_equal(mp1$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp1$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal(mp1$p, c(0, 0.00068, 0.12872, 0.15695, 0.846, 0.00224, 0.00029, 0.31562), tolerance = 1e-3) expect_equal(mp1$CI_low, c(24.86326, 5.31796, -1.5521, -0.05313, -2.79893, -4.33015, -0.16595, -0.04943), tolerance = 1e-3) }) test_that("model_parameters, ci_method satterthwaite", { expect_equal(mp2$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp2$df, c(24, 24, 24, 24, 24, 24, 24, 24), tolerance = 1e-3) expect_equal(mp2$p, c(0, 0.00236, 0.14179, 0.16979, 0.84763, 0.00542, 0.00136, 0.32563), tolerance = 1e-3) expect_equal(mp2$CI_low, c(24.57489, 4.93385, -1.91805, -0.05477, -2.96368, -4.41987, -0.16904, -0.05117), tolerance = 1e-3) }) test_that("model_parameters, ci_method kenward", { expect_equal(mp3$SE, c(2.97608, 6.10454, 3.98754, 0.02032, 1.60327, 0.91599, 0.05509, 0.01962), tolerance = 1e-3) expect_equal(mp3$df, c(19.39553, 5.27602, 23.57086, 8.97297, 22.7421, 23.76299, 2.72622, 22.82714), tolerance = 1e-3) expect_equal(mp3$p, c(0, 0.09176, 0.19257, 0.30147, 0.84942, 0.00828, 0.15478, 0.40248), tolerance = 1e-3) expect_equal(mp3$CI_low, c(24.08091, -2.887, -2.88887, -0.06828, -3.01082, -4.5299, -0.29339, -0.05735), tolerance = 1e-3) }) test_that("model_parameters, ci_method wald (t)", { expect_equal(mp4$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3) expect_equal(mp4$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal(mp4$p, c(0, 0.00258, 0.14297, 0.17095, 0.84778, 0.00578, 0.00151, 0.32653), tolerance = 1e-3) expect_equal(mp4$CI_low, c(24.54722, 4.89698, -1.95317, -0.05493, -2.97949, -4.42848, -0.16933, -0.05133), tolerance = 1e-3) }) test_that("model_parameters, satterthwaite compare", { s <- summary(model2) expect_equal(mp2$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(mp2$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(mp2$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) expect_equal(mp2$SE, as.vector(s$coefficients[, "Std. Error"]), tolerance = 1e-4) }) test_that("model_parameters, satterthwaite Conf Int-1", { ci1 <- ci_satterthwaite(model) expect_equal(mp2$CI_low, ci1$CI_low, tolerance = 1e-4) ci2 <- ci_satterthwaite(model2) expect_equal(mp2$CI_low, ci2$CI_low, tolerance = 1e-4) }) test_that("model_parameters, satterthwaite Conf Int-2", { coef.table <- as.data.frame(summary(model2)$coefficients) coef.table$CI_low <- coef.table$Estimate - (coef.table$"Std. Error" * qt(.975, df = coef.table$df)) coef.table$CI_high <- coef.table$Estimate + (coef.table$"Std. Error" * qt(.975, df = coef.table$df)) expect_equal(mp2$CI_low, coef.table$CI_low, tolerance = 1e-4) expect_equal(mp2$CI_high, coef.table$CI_high, tolerance = 1e-4) }) test_that("model_parameters, Kenward-Roger compare", { s <- summary(model2, ddf = "Kenward-Roger") expect_equal(mp3$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(mp3$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(mp3$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) expect_equal(mp3$SE, as.vector(s$coefficients[, "Std. Error"]), tolerance = 1e-4) }) model <- lm(mpg ~ as.factor(gear) * hp + as.factor(am) + wt, data = mtcars) test_that("model_parameters, ci_method-lm", { expect_s3_class(model_parameters(model), "parameters_model") expect_warning(model_parameters(model, ci_method = "kenward")) }) unloadNamespace("afex") unloadNamespace("lmerTest") } parameters/tests/testthat/test-model_parameters.mle2.R0000644000175000017500000000115614122064334023034 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("bbmle")) { x <- 0:10 y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8) d <- data.frame(x, y) LL <- function(ymax = 15, xhalf = 6) { -sum(stats::dpois(y, lambda = ymax / (1 + x / xhalf), log = TRUE)) } model <- suppressWarnings(mle2(LL)) test_that("model_parameters.mle2", { params <- model_parameters(model) expect_equal(params$SE, c(4.224444, 1.034797), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p") ) }) } parameters/tests/testthat/test-model_parameters_df.R0000644000175000017500000003022714135275207022657 0ustar nileshnilesh.runThisTest <- Sys.getenv("RunAllparametersTests") == "yes" if (.runThisTest && requiet("testthat") && requiet("parameters")) { # glm --------------------------- set.seed(123) data(mtcars) model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") test_that("model_parameters.glm", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(4.7888, -0.52956, -6.91917), tolerance = 1e-3) expect_equal(params$p, c(0.01084, 0.17431, 0.03362), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(2.4503, -0.9299, -5.63472), tolerance = 1e-3) expect_equal(params$p, c(0.01084, 0.17431, 0.03362), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "residual")) expect_equal(params$df_error, c(29, 29, 29), tolerance = 1e-3) expect_equal(params$CI_low, c(2.09492, -1.06171, -5.75235), tolerance = 1e-3) expect_equal(params$p, c(0.0164, 0.18479, 0.04227), tolerance = 1e-3) }) # PROreg --------------------------- if (requiet("PROreg")) { set.seed(1234) # defining the parameters k <- 100 m <- 10 phi <- 0.5 beta <- c(1.5, -1.1) sigma <- 0.5 # simulating the covariate and random effects x <- runif(k, 0, 10) X <- model.matrix(~x) z <- as.factor(rBI(k, 4, 0.5, 2)) Z <- model.matrix(~ z - 1) u <- rnorm(5, 0, sigma) # the linear predictor and simulated response variable eta <- beta[1] + beta[2] * x + crossprod(t(Z), u) p <- 1 / (1 + exp(-eta)) y <- rBB(k, m, p, phi) dat <- data.frame(cbind(y, x, z)) dat$z <- as.factor(dat$z) # apply the model model <- PROreg::BBmm( fixed.formula = y ~ x, random.formula = ~z, m = m, data = dat ) test_that("model_parameters.BBmm", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(96, 96), tolerance = 1e-3) expect_equal(params$CI_low, c(0.26363, -1.46645), tolerance = 1e-3) expect_equal(params$p, c(0.00811, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(0.27359, -1.46136), tolerance = 1e-3) expect_equal(params$p, c(0.00811, 0), tolerance = 1e-3) }) set.seed(18) # we simulate a covariate, fix the paramters of the beta-binomial # distribution and simulate a response variable. # then we apply the model, and try to get the same values. k <- 1000 m <- 10 x <- rnorm(k, 5, 3) beta <- c(-10, 2) p <- 1 / (1 + exp(-1 * (beta[1] + beta[2] * x))) phi <- 1.2 y <- PROreg::rBB(k, m, p, phi) # model model <- PROreg::BBreg(y ~ x, m) test_that("model_parameters.BBreg", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(997, 997), tolerance = 1e-3) expect_equal(params$CI_low, c(-11.08184, 1.84727), tolerance = 1e-3) expect_equal(params$p, c(0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-11.08069, 1.84749), tolerance = 1e-3) expect_equal(params$p, c(0, 0), tolerance = 1e-3) }) } # MASS / nnet --------------------------- if (requiet("MASS") && requiet("nnet")) { set.seed(123) utils::example(topic = birthwt, echo = FALSE) # model model <- nnet::multinom( formula = low ~ ., data = bwt, trace = FALSE ) test_that("model_parameters.multinom", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.6332, -0.11362, -0.02963, 0.13471, -0.17058, -0.08325, 0.39528, 0.49086, -0.23614, -1.38245, -0.72163 ), tolerance = 1e-3) expect_equal(params$p, c( 0.50926, 0.33729, 0.02833, 0.02736, 0.11049, 0.07719, 0.00575, 0.00866, 0.14473, 0.36392, 0.69537 ), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.6165, -0.1131, -0.02953, 0.1419, -0.16439, -0.07755, 0.40173, 0.50053, -0.22991, -1.37601, -0.71551 ), tolerance = 1e-3) expect_equal(params$p, c( 0.5084, 0.33599, 0.02706, 0.0261, 0.10872, 0.07548, 0.00518, 0.00794, 0.14296, 0.36269, 0.6949 ), tolerance = 1e-3) }) } # ivprobit --------------------------- if (requiet("ivprobit")) { set.seed(123) data(eco) # model model <- ivprobit::ivprobit( formula = d2 ~ ltass + roe + div | eqrat + bonus | ltass + roe + div + gap + cfa, data = eco ) test_that("model_parameters.ivprobit", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(789L, 789L, 789L, 789L, 789L, 789L), tolerance = 1e-3) expect_equal(params$CI_low, c(-35.96484, -0.27082, -0.15557, -1e-05, -15.95755, -1e-05), tolerance = 1e-3) expect_equal(params$p, c(0.08355, 0.12724, 0.55684, 0.63368, 0.46593, 0.61493), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-35.93553, -0.26895, -0.15522, -1e-05, -15.91859, -1e-05), tolerance = 1e-3) expect_equal(params$p, c(0.08316, 0.12684, 0.55668, 0.63355, 0.46571, 0.61479), tolerance = 1e-3) }) } # biglm --------------------------- if (requiet("biglm")) { set.seed(123) data(trees) # model model <- biglm::bigglm( formula = log(Volume) ~ log(Girth) + log(Height), data = trees, chunksize = 10, sandwich = TRUE ) test_that("model_parameters.bigglm", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(28, 28, 28), tolerance = 1e-3) expect_equal(params$CI_low, c(-8.12252, 1.86862, 0.72411), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-8.05815, 1.87355, 0.74108), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0), tolerance = 1e-3) }) } # ivreg --------------------------- if (requiet("ivreg")) { data(CigarettesSW) CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) model <- ivreg::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) test_that("model_parameters.ivreg", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(45L, 45L, 45L), tolerance = 1e-3) expect_equal(params$CI_low, c(7.76291, -1.80753, -0.20009), tolerance = 1e-3) expect_equal(params$p, c(0, 1e-05, 0.24602), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(7.82022, -1.79328, -0.18717), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0.23984), tolerance = 1e-3) }) } # plm --------------------------- if (requiet("plm") && getRversion() > "3.5") { data("Produc", package = "plm") set.seed(123) model <- plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") ) test_that("model_parameters.plm", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(764L, 764L, 764L, 764L), tolerance = 1e-3) expect_equal(params$CI_low, c(-0.08308, 0.2427, 0.70909, -0.00724), tolerance = 1e-3) expect_equal(params$p, c(0.36752, 0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-0.08299, 0.24277, 0.70918, -0.00724), tolerance = 1e-3) expect_equal(params$p, c(0.36724, 0, 0, 0), tolerance = 1e-3) }) } # nlme --------------------------- if (requiet("nlme")) { data(Ovary) model <- gls( follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = Ovary, correlation = corAR1(form = ~ 1 | Mare) ) test_that("model_parameters.gls", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(305L, 305L, 305L), tolerance = 1e-3) expect_equal(params$CI_low, c(10.90853, -4.04402, -2.2722), tolerance = 1e-3) expect_equal(params$p, c(0, 2e-05, 0.19814), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(10.91372, -4.03898, -2.26675), tolerance = 1e-3) expect_equal(params$p, c(0, 2e-05, 0.19716), tolerance = 1e-3) }) } # # complmrob --------------------------- # # if (requiet("complmrob")) { # # crimes <- data.frame( # lifeExp = state.x77[, "Life Exp"], # USArrests[, c("Murder", "Assault", "Rape")] # ) # # # model # model <- complmrob::complmrob(formula = lifeExp ~ ., data = crimes) # # test_that("model_parameters.complmrob", { # params <- suppressWarnings(model_parameters(model)) # expect_equal(params$df_error, c(46L, 46L, 46L, 46L), tolerance = 1e-3) # expect_equal(params$CI_low, c(69.79492, -3.09088, -2.91019, 2.05479), tolerance = 1e-3) # expect_equal(params$p, c(0, 0, 0.26437, 0), tolerance = 1e-3) # # params <- suppressWarnings(model_parameters(model, ci_method = "normal")) # expect_equal(params$df_error, c(Inf, Inf, Inf, Inf), tolerance = 1e-3) # expect_equal(params$CI_low, c(69.81747, -3.06832, -2.86118, 2.087), tolerance = 1e-3) # expect_equal(params$p, c(0, 0, 0.25851, 0), tolerance = 1e-3) # }) # } # drc --------------------------- if (requiet("drc")) { set.seed(123) data("selenium") # model model <- drc::drm( formula = dead / total ~ conc, curveid = type, weights = total, data = selenium, fct = LL.2(), type = "binomial" ) test_that("model_parameters.drc", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(17L, 17L, 17L, 17L, 17L, 17L, 17L, 17L), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.83156, -1.13673, -2.4552, -1.80875, 223.0835, 295.39556, 107.25398, 70.62683 ), tolerance = 1e-3) expect_equal(params$p, c(0, 1e-05, 0, 0, 0, 0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.80826, -1.11588, -2.43449, -1.78349, 225.15547, 301.29532, 108.13891, 71.91797 ), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-3) }) } } parameters/tests/testthat/test-tobit.R0000644000175000017500000000165014122064334017773 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("AER")) { data("Affairs", package = "AER") m1 <- AER::tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, data = Affairs ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(2.80106, -0.33435, 0.29049, -2.47756, -0.17261, -3.0843), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(2.74145, 0.07909, 0.13452, 0.40375, 0.25442, 0.40783), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00287, 0.02337, 4e-05, 3e-05, 0.20001, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(8.1742, -0.17933, 0.55414, -1.68622, 0.32605, -2.28497), tolerance = 1e-4 ) }) } parameters/tests/testthat/test-model_parameters.pairwise.htest.R0000644000175000017500000000166214122064334025150 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("model_parameters.pairwise.htest", { data(airquality) airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) model <- pairwise.t.test(airquality$Ozone, airquality$Month) mp <- model_parameters(model) expect_equal( mp$Group1, c("Jun", "Jul", "Jul", "Aug", "Aug", "Aug", "Sep", "Sep", "Sep", "Sep") ) expect_equal( mp$p, c(1, 0.00026, 0.05113, 0.00019, 0.04987, 1, 1, 1, 0.00488, 0.00388), tolerance = 1e-3 ) smokers <- c(83, 90, 129, 70) patients <- c(86, 93, 136, 82) model <- suppressWarnings(pairwise.prop.test(smokers, patients)) mp <- model_parameters(model) expect_equal( mp$Group1, c("2", "3", "3", "4", "4", "4") ) expect_equal( mp$p, c(1, 1, 1, 0.11856, 0.09322, 0.12377), tolerance = 1e-3 ) }) } parameters/tests/testthat/test-model_parameters.truncreg.R0000644000175000017500000000124414122064334024024 0ustar nileshnileshif (requiet("testthat") && requiet("truncreg") && requiet("survival") && requiet("parameters")) { set.seed(123) data("tobin", package = "survival") model <- truncreg( formula = durable ~ age + quant, data = tobin, subset = durable > 0 ) test_that("model_parameters.truncreg", { params <- model_parameters(model) expect_equal(params$SE, c(9.21875, 0.22722, 0.03259, 0.56841), tolerance = 1e-3) expect_equal(params$t, c(1.36653, 1.89693, -3.64473, 2.90599), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p") ) }) } parameters/tests/testthat/test-wrs2.R0000644000175000017500000000340314122064334017545 0ustar nileshnileshif (requiet("testthat") && requiet("WRS2") && packageVersion("WRS2") >= "1.1.3" && getRversion() >= "3.6.0") { # model_parameters.t1way --------------------------------------------------- test_that("model_parameters.t1way", { set.seed(123) df_b <- model_parameters(t1way(libido ~ dose, data = viagra)) set.seed(123) df_w <- model_parameters(rmanova(WineTasting$Taste, WineTasting$Wine, WineTasting$Taster)) }) # model_parameters.yuen --------------------------------------------------- test_that("model_parameters.yuen", { set.seed(123) df_b <- model_parameters(yuen(Anxiety ~ Group, data = spider)) before <- c(190, 210, 300, 240, 280, 170, 280, 250, 240, 220) after <- c(210, 210, 340, 190, 260, 180, 200, 220, 230, 200) set.seed(123) df_w <- model_parameters(yuend(before, after)) }) # model_parameters.mcp and robtab --------------------------------------- test_that("model_parameters.mcp and robtab", { set.seed(123) df_b <- model_parameters(lincon(libido ~ dose, data = viagra)) set.seed(123) df_w <- model_parameters(rmmcp(WineTasting$Taste, WineTasting$Wine, WineTasting$Taster)) set.seed(123) df <- model_parameters(discmcp(libido ~ dose, viagra, nboot = 100)) }) # model_parameters.akp.effect ----------------------------------------------- test_that("model_parameters.AKP", { set.seed(123) mod <- WRS2::akp.effect( formula = wt ~ am, data = mtcars, EQVAR = FALSE ) }) # model_parameters.onesampb --------------------------------------------------- test_that("model_parameters.onesampb", { set.seed(123) x <- rnorm(30) set.seed(123) mod <- onesampb(x, nboot = 100) }) } parameters/tests/testthat/test-model_parameters.efa_cfa.R0000644000175000017500000000474514122064334023550 0ustar nileshnileshlinux <- tryCatch({ si <- Sys.info() if (!is.null(si["sysname"])) { si["sysname"] == "Linux" || grepl("^linux", R.version$os) } else { FALSE } }) if (requiet("testthat") && requiet("parameters") && requiet("psych") && requiet("lavaan") && requiet("BayesFM") && requiet("FactoMineR")) { test_that("principal_components", { set.seed(333) x <- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) expect_equal(c(ncol(x), nrow(x)), c(8, 7)) x <- principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) expect_equal(c(ncol(x), nrow(x)), c(6, 7)) pca <- principal_components(mtcars[, 1:5], n = 2) expect_equal(c(ncol(pca), nrow(pca)), c(4, 5)) x <- summary(pca) expect_equal(c(ncol(x), nrow(x)), c(3, 4)) x <- model_parameters(pca) expect_equal(c(ncol(x), nrow(x)), c(5, 2)) x <- predict(pca) expect_equal(c(ncol(x), nrow(x)), c(2, 32)) }) test_that("efa-cfa", { efa <- psych::fa(attitude, nfactors = 3) params <- parameters::model_parameters(efa) expect_equal(c(nrow(params), ncol(params)), c(7, 6)) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) expect_equal(nchar(model1), 109) m1 <- suppressWarnings(lavaan::cfa(model1, data = attitude)) params <- parameters::model_parameters(m1) expect_equal(c(nrow(params), ncol(params)), c(10, 10)) expect_warning(parameters::model_parameters(m1, ci = c(0.8, 0.9))) params <- parameters::model_parameters(m1, standardize = TRUE, component = "all") expect_equal(c(nrow(params), ncol(params)), c(20, 10)) x <- lavaan::anova(m1, lavaan::cfa(model2, data = attitude)) params <- parameters::model_parameters(x) expect_equal(c(nrow(params), ncol(params)), c(2, 6)) }) # if (!linux) { # test_that("FactoMineR", { # x <- suppressWarnings(model_parameters(FactoMineR::PCA(mtcars, ncp = 3), threshold = 0.2, sort = TRUE)) # expect_equal(c(ncol(x), nrow(x)), c(5, 11)) # # x <- suppressWarnings(model_parameters(FactoMineR::FAMD(iris, ncp = 3), threshold = 0.2, sort = TRUE)) # expect_equal(c(ncol(x), nrow(x)), c(5, 5)) # }) # } set.seed(333) befa <- BayesFM::befa(mtcars, iter = 1000, verbose = FALSE) params <- suppressWarnings(parameters::model_parameters(befa, sort = TRUE)) test_that("BayesFM", { expect_equal(nrow(params), 11) }) } parameters/tests/testthat/test-parameters_type.R0000644000175000017500000000141314122064334022053 0ustar nileshnileshif (requiet("testthat") && requiet("parameters")) { test_that("parameters_type-1", { m0 <- lm(mpg ~ am * cyl, mtcars) m1 <- lm(mpg ~ am * scale(cyl), mtcars) m2 <- lm(mpg ~ scale(am) * cyl, mtcars) m3 <- lm(mpg ~ scale(am) * scale(cyl), mtcars) expect_equal(parameters_type(m0)[4, "Type"], "interaction") expect_equal(parameters_type(m1)[4, "Type"], "interaction") expect_equal(parameters_type(m2)[4, "Type"], "interaction") expect_equal(parameters_type(m3)[4, "Type"], "interaction") }) test_that("parameters_type-2", { model <- lm(Sepal.Length ~ Petal.Width * scale(Petal.Length, TRUE, FALSE), data = iris) expect_equal(parameters_type(model)$Type, c("intercept", "numeric", "numeric", "interaction")) }) } parameters/tests/testthat/test-model_parameters.gam.R0000644000175000017500000000147014122064334022740 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("mgcv")) { set.seed(123) model <- mgcv::gam( formula = mpg ~ s(hp) + s(wt) + factor(cyl) + am + qsec, family = stats::quasi(), data = mtcars ) test_that("model_parameters.gam", { params <- model_parameters(model) expect_equal(params$SE, c(10.83359, 1.80704, 2.82608, 1.71366, 0.53172, NA, NA), tolerance = 1e-2) expect_equal(params$df_error, c(23.3923, 23.3923, 23.3923, 23.3923, 23.3923, NA, NA), tolerance = 1e-2) expect_equal(params$CI[[1]], .95, tolerance = 1e-2) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t / F", "df", "df_error", "p", "Component" ) ) }) } parameters/tests/testthat/test-MCMCglmm.R0000644000175000017500000000200014122064334020234 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("MCMCglmm")) { data(PlodiaPO) set.seed(123) m1 <- MCMCglmm( PO ~ plate, random = ~FSfamily, data = PlodiaPO, verbose = FALSE, nitt = 1300, burnin = 300, thin = 1 ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.97495, 0.03407), tolerance = 0.01 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.02309, 0.00509), tolerance = 0.01 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0), tolerance = 0.01 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, centrality = "mean")$Mean, c(1.0132, 0.04232), tolerance = 0.01 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, centrality = "median")$Median, c(1.01382, 0.04207), tolerance = 0.01 ) }) } parameters/tests/testthat/test-coxph.R0000644000175000017500000000303514131532463017775 0ustar nileshnileshif (requiet("testthat") && requiet("parameters") && requiet("survival") && getRversion() >= "3.6.0") { lung <- subset(lung, subset = ph.ecog %in% 0:2) lung$sex <- factor(lung$sex, labels = c("male", "female")) lung$ph.ecog <- factor(lung$ph.ecog, labels = c("good", "ok", "limited")) m1 <- coxph(Surv(time, status) ~ sex + age + ph.ecog, data = lung) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-0.87535, -0.00747, 0.01862, 0.45527), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.16823, 0.00931, 0.19961, 0.22809), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00118, 0.24713, 0.04005, 8e-05), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-0.54563, 0.01078, 0.40984, 0.90232), tolerance = 1e-4 ) }) # Create the simplest test data set test1 <- list( time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1) ) # Fit a stratified model m2 <- coxph(Surv(time, status) ~ x + strata(sex), test1) test_that("model_parameters", { expect_equal(model_parameters(m2)$Coefficient, 0.8023179, tolerance = 1e-4) expect_equal(model_parameters(m2)$z, 0.9756088, tolerance = 1e-4) expect_equal(model_parameters(m2)$p, 0.3292583, tolerance = 1e-4) }) } parameters/tests/testthat/test-model_parameters.mfx.R0000644000175000017500000000436314122064334022772 0ustar nileshnileshif (requiet("testthat") && requiet("insight") && requiet("parameters") && requiet("mfx")) { set.seed(12345) n <- 1000 x <- rnorm(n) y <- rbeta(n, shape1 = plogis(1 + 0.5 * x), shape2 = (abs(0.2 * x))) y <- (y * (n - 1) + 0.5) / n data <- data.frame(y, x) model <- betamfx(y ~ x | x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.betamfx", { expect_equal(params$Parameter, c("x", "(Intercept)", "x", "(Intercept)", "x")) expect_equal(params$Coefficient, c(0.02259, 1.35961, 0.13947, 0.07498, 0.12071), tolerance = 1e-2) expect_equal(params$Component, c("marginal", "conditional", "conditional", "precision", "precision")) }) model <- betaor(y ~ x | x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.betaor", { expect_equal(params$Parameter, c("(Intercept)", "x")) expect_equal(params$Coefficient, c(1.35961, 0.13947), tolerance = 1e-2) expect_null(params$Component) }) params <- suppressWarnings(model_parameters(model, component = "all")) test_that("model_parameters.betaor", { expect_equal(params$Parameter, c("(Intercept)", "x", "(Intercept)", "x")) expect_equal(params$Coefficient, unname(do.call(rbind, coef(summary(model$fit)))[, 1]), tolerance = 1e-2) expect_equal(params$Component, c("conditional", "conditional", "precision", "precision")) }) set.seed(12345) n <- 1000 x <- rnorm(n) y <- rnegbin(n, mu = exp(1 + 0.5 * x), theta = 0.5) data <- data.frame(y, x) model <- poissonmfx(formula = y ~ x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.poissonmfx", { expect_equal(params$Parameter, c("x", "(Intercept)", "x")) expect_equal(params$Coefficient, c(1.46009, 0.96036, 0.54496), tolerance = 1e-2) expect_equal(params$Component, c("marginal", "conditional", "conditional")) }) params <- suppressWarnings(model_parameters(model, component = "cond")) test_that("model_parameters.poissonmfx", { expect_equal(params$Parameter, c("(Intercept)", "x")) expect_equal(params$Coefficient, c(0.96036, 0.54496), tolerance = 1e-2) expect_null(params$Component) }) } parameters/tests/testthat.R0000644000175000017500000000150714166770670015715 0ustar nileshnileshif (require("testthat")) { library(parameters) if (length(strsplit(packageDescription("parameters")$Version, "\\.")[[1]]) > 3) { Sys.setenv("RunAllparametersTests" = "yes") } else { Sys.setenv("RunAllparametersTests" = "no") } si <- Sys.info() osx <- tryCatch( { if (!is.null(si["sysname"])) { si["sysname"] == "Darwin" || grepl("^darwin", R.version$os) } else { FALSE } }, error = function(e) { FALSE } ) solaris <- tryCatch( { if (!is.null(si["sysname"])) { grepl("SunOS", si["sysname"], ignore.case = TRUE) } else { FALSE } }, error = function(e) { FALSE } ) # if (!osx && !solaris) { # test_check("parameters") # } test_check("parameters") } parameters/tests/spelling.R0000644000175000017500000000023314044454046015654 0ustar nileshnileshif (requireNamespace("spelling", quietly = TRUE)) { spelling::spell_check_test( vignettes = TRUE, error = FALSE, skip_on_cran = TRUE ) } parameters/R/0000755000175000017500000000000014166656750012770 5ustar nileshnileshparameters/R/methods_maxLik.R0000644000175000017500000000071714012467213016050 0ustar nileshnilesh# .maxLik, .maxim #' @export model_parameters.maxLik <- model_parameters.default #' @export model_parameters.maxim <- model_parameters.default #' @export p_value.maxLik <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export ci.maxLik <- ci.default #' @export standard_error.maxLik <- standard_error.default parameters/R/cluster_centers.R0000644000175000017500000001021414131014351016266 0ustar nileshnilesh#' Find the cluster centers in your data #' #' For each cluster, computes the mean (or other indices) of the variables. Can be used #' to retrieve the centers of clusters. Also returns the within Sum of Squares. #' #' @param data A data.frame. #' @param clusters A vector with clusters assignments (must be same length as rows in data). #' @param fun What function to use, `mean` by default. #' @param ... Other arguments to be passed to or from other functions. #' #' @return A dataframe containing the cluster centers. Attributes include performance statistics and distance between each observation and its respective cluster centre. #' #' #' @examples #' k <- kmeans(iris[1:4], 3) #' cluster_centers(iris[1:4], clusters = k$cluster) #' cluster_centers(iris[1:4], clusters = k$cluster, fun = median) #' @export cluster_centers <- function(data, clusters, fun = mean, ...) { # Get n obs params <- data.frame(table(clusters)) names(params) <- c("Cluster", "n_Obs") # Get Within clusters sum of squares (WCSS) ss <- .cluster_centers_SS(data, clusters) params$Sum_Squares <- ss$WSS # Get Cluster Centers centers <- stats::aggregate(data, list(Cluster = clusters), fun) params <- merge(params, centers, by = "Cluster") # Get distance of observations from cluster # Add attributes attr(params, "Sum_Squares_Total") <- ss$TSS attr(params, "Sum_Squares_Between") <- ss$BSS attr(params, "variance") <- ss$BSS / ss$TSS attr(params, "scale") <- sapply(data, stats::sd) attr(params, "distance") <- .cluster_centers_distance(data, clusters, centers, attributes(params)$scale) params } # Performance ------------------------------------------------------------- #' @keywords internal .cluster_centers_params <- function(data, clusters, ...) { # This function actually wraps *around* the exported cluster_centers() # to be used within the different model_parameters() functions for clusters params <- cluster_centers(data = data, clusters = clusters, ...) # Long means means <- datawizard::reshape_longer(params, cols = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) attr(params, "variance") <- attributes(params)$variance attr(params, "Sum_Squares_Between") <- attributes(params)$Sum_Squares_Between attr(params, "Sum_Squares_Total") <- attributes(params)$Sum_Squares_Total attr(params, "scale") <- attributes(params)$scale attr(params, "distance") <- attributes(params)$distance attr(params, "scores") <- attributes(params)$scores attr(params, "means") <- means class(params) <- c("parameters_clusters", class(params)) params } # Distance ---------------------------------------------------------------- #' @keywords internal .cluster_centers_distance <- function(data, clusters, centers, scale) { dis <- c() for (c in unique(clusters)) { center <- centers[centers$Cluster == c, ] center$Cluster <- NULL # Remove column d <- apply(data[clusters == c, ], 1, function(x) { z <- x - center[names(data)] z <- z / scale sqrt(sum((z)^2)) }) dis <- c(dis, d) } dis } # Performance ------------------------------------------------------------- #' @keywords internal .cluster_centers_SS <- function(data, clusters) { # https://stackoverflow.com/questions/68714612/compute-between-clusters-sum-of-squares-bcss-and-total-sum-of-squares-manually # total sum of squares TSS <- sum(scale(data, scale = FALSE)^2) # Within clusters sum of squares (WCSS) WSS <- sapply(split(data, clusters), function(x) sum(scale(x, scale = FALSE)^2)) # Between clsuters sum of squares BSS <- TSS - sum(WSS) # Compute BSS directly (without TSS to double check) gmeans <- sapply(split(data, clusters), colMeans) means <- colMeans(data) BSS2 <- sum(colSums((gmeans - means)^2) * table(clusters)) # Double check if (BSS2 - BSS > 1e-05) stop("The between sum of squares computation went wrong. Please open an issue at https://github.com/easystats/parameters/issues so we can fix the bug (provide an example and mention that 'BSS != BSS2').") list(WSS = WSS, BSS = BSS, TSS = TSS) } parameters/R/reexports.R0000644000175000017500000000225714104230351015125 0ustar nileshnilesh# ----------------------- insight ------------------------------------- #' @importFrom insight standardize_names #' @export insight::standardize_names #' @importFrom insight supported_models #' @export insight::supported_models #' @importFrom insight print_html #' @export insight::print_html # ----------------------- datawizard ------------------------------------- #' @importFrom datawizard describe_distribution #' @export datawizard::describe_distribution #' @importFrom datawizard demean #' @export datawizard::demean #' @importFrom datawizard rescale_weights #' @export datawizard::rescale_weights #' @importFrom datawizard data_to_numeric #' @export datawizard::data_to_numeric #' @importFrom datawizard convert_data_to_numeric #' @export datawizard::convert_data_to_numeric #' @importFrom datawizard skewness datawizard::skewness #' @importFrom datawizard kurtosis #' @export datawizard::kurtosis #' @importFrom datawizard smoothness #' @export datawizard::smoothness #' @importFrom datawizard center #' @export datawizard::center #' @importFrom datawizard visualisation_recipe #' @export datawizard::visualisation_recipe parameters/R/ci_betwithin.R0000644000175000017500000000066614140567704015563 0ustar nileshnilesh#' @rdname p_value_betwithin #' @export ci_betwithin <- function(model, ci = .95, robust = FALSE, ...) { df_bet <- dof_ml1(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, effects = "fixed", component = "all", dof = df_bet, method = "betwithin", robust = robust, ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_multcomp.R0000644000175000017500000000473414131014352016460 0ustar nileshnilesh#' Parameters from Hypothesis Testing #' #' Parameters from Hypothesis Testing. #' #' @param model Object of class [multcomp::glht()] (\pkg{multcomp}) #' or of class `PMCMR`, `trendPMCMR` or `osrt` (\pkg{PMCMRplus}). #' @inheritParams model_parameters.default #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' \donttest{ #' if (require("multcomp", quietly = TRUE)) { #' # multiple linear model, swiss data #' lmod <- lm(Fertility ~ ., data = swiss) #' mod <- glht( #' model = lmod, #' linfct = c( #' "Agriculture = 0", #' "Examination = 0", #' "Education = 0", #' "Catholic = 0", #' "Infant.Mortality = 0" #' ) #' ) #' model_parameters(mod) #' } #' if (require("PMCMRplus", quietly = TRUE)) { #' model <- kwAllPairsConoverTest(count ~ spray, data = InsectSprays) #' model_parameters(model) #' } #' } #' @export model_parameters.glht <- function(model, ci = .95, exponentiate = FALSE, verbose = TRUE, ...) { # p-adjustment method s <- summary(model) p_adjust <- s$test$type out <- .model_parameters_generic( model = model, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = "Parameter", standardize = NULL, exponentiate = exponentiate, robust = FALSE, p_adjust = NULL, verbose = verbose, ... ) attr(out, "p_adjust") <- p_adjust attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.glht <- function(x, ci = .95, robust = TRUE, ...) { s <- summary(x) if (robust) { adjusted_ci <- 2 * stats::pnorm(s$test$qfunction(ci)) - 1 dof <- Inf } else { adjusted_ci <- ci dof <- x$df } out <- .ci_generic(model = x, ci = adjusted_ci, dof = dof, ...) if (robust) { out$CI <- ci } out } #' @export standard_error.glht <- function(model, ...) { s <- summary(model) .data_frame( Parameter = insight::find_parameters(model, flatten = TRUE), SE = unname(s$test$sigma) ) } #' @export degrees_of_freedom.glht <- function(model, ...) { model$df } #' @export p_value.glht <- function(model, ...) { s <- summary(model) .data_frame( Parameter = insight::find_parameters(model, flatten = TRUE), p = unname(s$test$pvalues) ) } parameters/R/methods_spaMM.R0000644000175000017500000000342614131014353015633 0ustar nileshnilesh #' @export model_parameters.HLfit <- model_parameters.default #' @export ci.HLfit <- function(x, ci = 0.95, method = c("wald", "ml1", "betwithin", "profile", "boot"), iterations = 100, ...) { method <- match.arg(tolower(method)) # Wald approx if (method == "wald") { out <- .ci_generic(model = x, ci = ci, dof = Inf) # ml1 approx } else if (method == "ml1") { out <- ci_ml1(x, ci) # betwithin approx } else if (method == "betwithin") { out <- ci_betwithin(x, ci) # profiled } else if (method == "profile") { nparms <- n_parameters(x) conf <- stats::confint(x, parm = 1:nparms, level = ci, verbose = FALSE, boot_args = NULL) if (nparms == 1) { out <- as.data.frame(t(conf$interval)) } else { out <- as.data.frame(do.call(rbind, lapply(conf, function(i) i$interval))) } colnames(out) <- c("CI_low", "CI_high") out$Parameter <- insight::find_parameters(x, effects = "fixed", flatten = TRUE) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] } # # bootstrapping # } else if (method == "boot") { # out <- stats::confint(x, parm = n_parameters(x), level = ci, verbose = FALSE, boot_args = list(nsim = iterations, showpbar = FALSE)) # } out } #' @export standard_error.HLfit <- function(model, method = NULL, ...) { if (is.null(method)) method <- "wald" utils::capture.output(se <- summary(model)$beta_table[, 2]) .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.vector(se) ) } #' @export p_value.HLfit <- p_value.cpglmm parameters/R/methods_gamlss.R0000644000175000017500000000120614133000425016073 0ustar nileshnilesh#################### .gamlss ------ #' @export model_parameters.gamlss <- model_parameters.gam #' @export standard_error.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output(cs <- summary(model)) .data_frame( Parameter = parms$Parameter, SE = as.vector(cs[, 2]), Component = parms$Component ) } #' @export p_value.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output(cs <- summary(model)) .data_frame( Parameter = parms$Parameter, p = as.vector(cs[, 4]), Component = parms$Component ) } parameters/R/methods_ivreg.R0000644000175000017500000000025214133000474015725 0ustar nileshnilesh#' @export p_value.ivreg <- p_value.default #' @export simulate_model.ivreg <- simulate_model.default #' @export standard_error.ivreg <- standard_error.default parameters/R/methods_htest.R0000644000175000017500000007515514137207406015766 0ustar nileshnilesh#' Parameters from hypothesis tests #' #' Parameters of h-tests (correlations, t-tests, chi-squared, ...). #' #' @param model Object of class `htest` or `pairwise.htest`. #' @param bootstrap Should estimates be bootstrapped? #' @param cramers_v,phi Compute Cramer's V or phi as index of effect size. #' Can be `"raw"` or `"adjusted"` (effect size will be bias-corrected). #' Only applies to objects from `chisq.test()`. #' @param cohens_g If `TRUE`, compute Cohen's g as index of effect size. #' Only applies to objects from `mcnemar.test()`. #' @param standardized_d If `TRUE`, compute standardized d as index of #' effect size. Only applies to objects from `t.test()`. Calculation of #' `d` is based on the t-value (see [effectsize::t_to_d()]) #' for details. #' @param hedges_g If `TRUE`, compute Hedge's g as index of effect size. #' Only applies to objects from `t.test()`. #' @param omega_squared,eta_squared,epsilon_squared Logical, if `TRUE`, #' returns the non-partial effect size Omega, Eta or Epsilon squared. Only #' applies to objects from `oneway.test()`. #' @param rank_biserial If `TRUE`, compute the rank-biserial correlation as #' effect size measure. Only applies to objects from `wilcox.test()`. #' @param rank_epsilon_squared If `TRUE`, compute the rank epsilon squared #' as effect size measure. Only applies to objects from `kruskal.test()`. #' @param kendalls_w If `TRUE`, compute the Kendall's coefficient of #' concordance as effect size measure. Only applies to objects from #' `friedman.test()`. #' @param ci Level of confidence intervals for effect size statistic. Currently #' only applies to objects from `chisq.test()` or `oneway.test()`. #' @inheritParams model_parameters.default #' @inheritParams model_parameters.aov #' @param ... Arguments passed to or from other methods. #' #' @examples #' model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") #' model_parameters(model) #' #' model <- t.test(iris$Sepal.Width, iris$Sepal.Length) #' model_parameters(model) #' #' model <- t.test(mtcars$mpg ~ mtcars$vs) #' model_parameters(model) #' #' model <- t.test(iris$Sepal.Width, mu = 1) #' model_parameters(model) #' #' data(airquality) #' airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) #' model <- pairwise.t.test(airquality$Ozone, airquality$Month) #' model_parameters(model) #' #' smokers <- c(83, 90, 129, 70) #' patients <- c(86, 93, 136, 82) #' model <- pairwise.prop.test(smokers, patients) #' model_parameters(model) #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.htest <- function(model, cramers_v = NULL, phi = NULL, standardized_d = NULL, hedges_g = NULL, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, cohens_g = NULL, rank_biserial = NULL, rank_epsilon_squared = NULL, kendalls_w = NULL, ci = .95, alternative = NULL, bootstrap = FALSE, verbose = TRUE, ...) { if (bootstrap) { stop("Bootstrapped h-tests are not yet implemented.") } else { parameters <- .extract_parameters_htest( model, cramers_v = cramers_v, phi = phi, standardized_d = standardized_d, hedges_g = hedges_g, omega_squared = omega_squared, eta_squared = eta_squared, epsilon_squared = epsilon_squared, cohens_g = cohens_g, rank_biserial = rank_biserial, rank_epsilon_squared = rank_epsilon_squared, kendalls_w = kendalls_w, ci = ci, alternative = alternative, verbose = verbose, ... ) } if (!is.null(parameters$Method)) { parameters$Method <- trimws(gsub("with continuity correction", "", parameters$Method)) } # save alternative parameters$Alternative <- model$alternative parameters <- .add_htest_parameters_attributes(parameters, model, ci, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export standard_error.htest <- function(model, ...) { NULL } #' @export p_value.htest <- function(model, ...) { model$p.value } # .pairwise.htest -------------------- #' @rdname model_parameters.htest #' @export model_parameters.pairwise.htest <- function(model, verbose = TRUE, ...) { m <- model$p.value parameters <- data.frame( Group1 = rep(rownames(m), each = ncol(m)), Group2 = rep(colnames(m), times = nrow(m)), p = as.numeric(t(m)), stringsAsFactors = FALSE ) parameters <- stats::na.omit(parameters) parameters <- .add_htest_attributes(parameters, model, p_adjust = model$p.adjust.method) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } # ==== extract parameters ==== #' @keywords internal .extract_parameters_htest <- function(model, cramers_v = NULL, phi = NULL, standardized_d = NULL, hedges_g = NULL, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, cohens_g = NULL, rank_biserial = NULL, rank_epsilon_squared = NULL, kendalls_w = NULL, ci = 0.95, alternative = NULL, verbose = TRUE, ...) { m_info <- insight::model_info(model, verbose = FALSE) if (m_info$is_correlation) { # correlation --------- out <- .extract_htest_correlation(model) } else if (.is_levenetest(model)) { # levene's test --------- out <- .extract_htest_levenetest(model) } else if (m_info$is_ttest) { # t-test ----------- out <- .extract_htest_ttest(model) out <- .add_effectsize_ttest(model, out, standardized_d, hedges_g, ci = ci, alternative = alternative, verbose = verbose, ... ) } else if (m_info$is_ranktest) { # rank-test (kruskal / wilcox / friedman) ----------- out <- .extract_htest_ranktest(model) if (grepl("^Wilcox", model$method)) { out <- .add_effectsize_rankbiserial(model, out, rank_biserial, ci = ci, verbose = verbose, ... ) } if (grepl("^Kruskal", model$method)) { out <- .add_effectsize_rankepsilon(model, out, rank_epsilon_squared, ci = ci, verbose = verbose, ... ) } if (grepl("^Friedman", model$method)) { out <- .add_effectsize_kendalls_w(model, out, kendalls_w, ci = ci, verbose = verbose, ... ) } } else if (m_info$is_onewaytest) { # one-way test ----------- out <- .extract_htest_oneway(model) out <- .add_effectsize_oneway( model, out, omega_squared, eta_squared, epsilon_squared, ci = ci, verbose = verbose ) } else if (m_info$is_chi2test) { # chi2- and mcnemar-test ----------- out <- .extract_htest_chi2(model) if (grepl("^McNemar", model$method)) { out <- .add_effectsize_mcnemar(model, out, cohens_g = cohens_g, ci = ci, verbose = verbose ) } else { out <- .add_effectsize_chi2( model, out, cramers_v = cramers_v, phi = phi, ci = ci, alternative = alternative, verbose = verbose ) } } else if (m_info$is_proptest) { # test of proportion -------------- out <- .extract_htest_prop(model) } else if (m_info$is_binomtest) { # exact binomial test -------------- out <- .extract_htest_binom(model) } else { stop("model_parameters not implemented for such h-tests yet.") } row.names(out) <- NULL out } # extract htest correlation ---------------------- .extract_htest_correlation <- function(model) { names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], stringsAsFactors = FALSE ) if (model$method == "Pearson's Chi-squared test") { out$Chi2 <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } else if (grepl("Pearson", model$method, fixed = TRUE)) { out$r <- model$estimate out$t <- model$statistic out$df_error <- model$parameter out$p <- model$p.value out$CI_low <- model$conf.int[1] out$CI_high <- model$conf.int[2] } else if (grepl("Spearman", model$method, fixed = TRUE)) { out$rho <- model$estimate out$S <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } else { out$tau <- model$estimate out$z <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } out$Method <- model$method # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "r", "rho", "tau", "CI_low", "CI_high", "t", "z", "S", "df_error", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } # extract htest ranktest ---------------------- .extract_htest_ranktest <- function(model) { # survey if (grepl("design-based", tolower(model$method), fixed = TRUE)) { names <- gsub("~", "", unlist(strsplit(model$data.name, " + ", fixed = TRUE)), fixed = TRUE) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], "Statistic" = model$statistic[[1]], "df_error" = model$parameter[[1]], "Method" = model$method, "p" = model$p.value[[1]], stringsAsFactors = FALSE ) out$Method <- gsub("KruskalWallis", "Kruskal-Wallis", out$Method, fixed = TRUE) colnames(out)[colnames(out) == "Statistic"] <- names(model$statistic)[1] } else { if (grepl(" (and|by) ", model$data.name)) { names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], stringsAsFactors = FALSE ) } else { out <- data.frame( "Parameter" = model$data.name, stringsAsFactors = FALSE ) } if (grepl("Wilcoxon", model$method, fixed = TRUE)) { out$W <- model$statistic[[1]] out$df_error <- model$parameter[[1]] out$p <- model$p.value[[1]] } else if (grepl("Kruskal-Wallis", model$method, fixed = TRUE) || grepl("Friedman", model$method, fixed = TRUE)) { out$Chi2 <- model$statistic[[1]] out$df_error <- model$parameter[[1]] out$p <- model$p.value[[1]] } out$Method <- model$method } out } # extract htest leveneTest ---------------------- .extract_htest_levenetest <- function(model) { data.frame( "df" = model$Df[1], "df_error" = model$Df[2], `F` = model$`F value`[1], p = model$`Pr(>F)`[1], Method = "Levene's Test for Homogeneity of Variance", stringsAsFactors = FALSE ) } # extract htest ttest ---------------------- .extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) { # survey if (grepl("design-based", tolower(model$method), fixed = TRUE)) { names <- unlist(strsplit(model$data.name, " ~ ")) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], "Difference" = model$estimate[[1]], "t" = model$statistic[[1]], "df_error" = model$parameter[[1]], "Method" = model$method, "p" = model$p.value[[1]], stringsAsFactors = FALSE ) out$Method <- gsub("KruskalWallis", "Kruskal-Wallis", out$Method, fixed = TRUE) colnames(out)[colnames(out) == "Statistic"] <- names(model$statistic)[1] } else { paired_test <- grepl("^Paired", model$method) && length(model$estimate) == 1 if (grepl(" and ", model$data.name) && isFALSE(paired_test)) { names <- unlist(strsplit(model$data.name, " and ", fixed = TRUE)) out <- data.frame( "Parameter1" = names[1], "Parameter2" = names[2], "Mean_Parameter1" = model$estimate[1], "Mean_Parameter2" = model$estimate[2], "Difference" = model$estimate[1] - model$estimate[2], "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "t" = model$statistic, "df_error" = model$parameter, "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) attr(out, "mean_group_values") <- gsub("mean in group ", "", names(model$estimate), fixed = TRUE) } else if (isTRUE(paired_test)) { names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( "Parameter" = names[1], "Group" = names[2], "Difference" = model$estimate, "t" = model$statistic, "df_error" = model$parameter, "p" = model$p.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Method" = model$method, stringsAsFactors = FALSE ) } else if (grepl(" by ", model$data.name, fixed = TRUE)) { if (length(model$estimate) == 1) { names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE)) out <- data.frame( "Parameter" = names[1], "Group" = names[2], "Difference" = model$estimate, "CI" = .95, "CI_low" = as.vector(model$conf.int[, 1]), "CI_high" = as.vector(model$conf.int[, 2]), "t" = model$statistic, "df_error" = model$parameter, "p" = model$p.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Method" = model$method, stringsAsFactors = FALSE ) } else { names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE)) out <- data.frame( "Parameter" = names[1], "Group" = names[2], "Mean_Group1" = model$estimate[1], "Mean_Group2" = model$estimate[2], "Difference" = model$estimate[1] - model$estimate[2], "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "t" = model$statistic, "df_error" = model$parameter, "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) attr(out, "mean_group_values") <- gsub("mean in group ", "", names(model$estimate), fixed = TRUE) } } else { out <- data.frame( "Parameter" = model$data.name, "Mean" = model$estimate, "mu" = model$null.value, "Difference" = model$estimate - model$null.value, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "t" = model$statistic, "df_error" = model$parameter, "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } } attr(out, "htest_type") <- "ttest" out } # extract htest oneway ---------------------- .extract_htest_oneway <- function(model) { data.frame( "F" = model$statistic, "df" = model$parameter[1], "df_error" = model$parameter[2], "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } # extract htest chi2 ---------------------- .extract_htest_chi2 <- function(model) { # survey-chisq-test if (("observed" %in% names(model) && inherits(model$observed, "svytable")) || grepl("^svychisq", model$data.name)) { if (grepl("Pearson's X", model$method, fixed = TRUE)) { model$method <- gsub("(Pearson's X\\^2: )(.*)", "Pearson's Chi2 \\(\\2\\)", model$method) } if (names(model$statistic) == "F") { data.frame( "F" = model$statistic, "df" = model$parameter[1], "df_error" = model$parameter[2], "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } else { data.frame( "Chi2" = model$statistic, "df" = model$parameter, "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } } else { if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) { data.frame( "Odds Ratio" = model$estimate, # "CI" = attributes(model$conf.int)$conf.level, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } else { data.frame( "Chi2" = model$statistic, "df" = model$parameter, "p" = model$p.value, "Method" = model$method, stringsAsFactors = FALSE ) } } } # extract htest prop ---------------------- .extract_htest_prop <- function(model) { out <- data.frame( Proportion = paste0(insight::format_value(model$estimate, as_percent = TRUE), collapse = " / "), stringsAsFactors = FALSE ) if (length(model$estimate) == 2) { out$Difference <- insight::format_value( abs(model$estimate[1] - model$estimate[2]), as_percent = TRUE ) } if (!is.null(model$conf.int)) { out$CI_low <- model$conf.int[1] out$CI_high <- model$conf.int[2] } out$Chi2 <- model$statistic out$df <- model$parameter[1] out$Null_value <- model$null.value out$p <- model$p.value out$Method <- model$method out } # extract htest binom ---------------------- .extract_htest_binom <- function(model) { out <- data.frame( "Probability" = model$estimate, "CI_low" = model$conf.int[1], "CI_high" = model$conf.int[2], "Success" = model$statistic, "Trials" = model$parameter, stringsAsFactors = FALSE ) out$Null_value <- model$null.value out$p <- model$p.value out$Method <- model$method out } # ==== effectsizes ===== .add_effectsize_chi2 <- function(model, out, cramers_v = NULL, phi = NULL, ci = .95, alternative = NULL, verbose = TRUE) { if (!requireNamespace("effectsize", quietly = TRUE) || (is.null(cramers_v) && is.null(phi))) { return(out) } if (!is.null(cramers_v)) { # Cramers V es <- effectsize::effectsize( model, type = "cramers_v", ci = ci, alternative = alternative, adjust = identical(cramers_v, "adjusted"), verbose = verbose ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Cramers_", names(es)[ci_cols]) out <- cbind(out, es) } if (!is.null(phi)) { # Phi es <- effectsize::effectsize( model, type = "phi", ci = ci, alternative = alternative, adjust = identical(phi, "adjusted"), verbose = verbose ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("phi_", names(es)[ci_cols]) out <- cbind(out, es) } # reorder col_order <- c( "Chi2", "df", "df_error", "Cramers_v", "Cramers_v_adjusted", "Cramers_CI_low", "Cramers_CI_high", "phi", "phi_adjusted", "phi_CI_low", "phi_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_mcnemar <- function(model, out, cohens_g = NULL, ci = .95, verbose = TRUE) { if (is.null(cohens_g)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { es <- effectsize::effectsize(model, type = "cohens_g", ci = ci, verbose = verbose) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Cohens_", names(es)[ci_cols]) out <- cbind(out, es) } # reorder col_order <- c( "Chi2", "df", "df_error", "Cohens_g", "g", "Cohens_CI_low", "Cohens_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_ttest <- function(model, out, standardized_d = NULL, hedges_g = NULL, ci = .95, alternative = NULL, verbose = TRUE, ...) { if (is.null(standardized_d) && is.null(hedges_g)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { # standardized d if (!is.null(standardized_d)) { es <- effectsize::effectsize( model, type = "cohens_d", ci = ci, alternative = alternative, verbose = verbose, ... ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("d_", names(es)[ci_cols]) out <- cbind(out, es) } # Hedge's g if (!is.null(hedges_g)) { es <- effectsize::effectsize( model, type = "hedges_g", ci = ci, alternative = alternative, verbose = verbose, ... ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("g_", names(es)[ci_cols]) out <- cbind(out, es) } } # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "Group", "Mean_Parameter1", "Mean_Parameter2", "Mean_Group1", "Mean_Group2", "mu", "Difference", "CI_low", "CI_high", "t", "df_error", "d", "Cohens_d", "d_CI_low", "d_CI_high", "g", "Hedges_g", "g_CI_low", "g_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_rankbiserial <- function(model, out, rank_biserial = NULL, ci = .95, verbose = TRUE, ...) { if (is.null(rank_biserial)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { es <- effectsize::effectsize(model, type = "r_rank_biserial", ci = ci, verbose = verbose, ... ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("rank_biserial_", names(es)[ci_cols]) out <- cbind(out, es) } # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "W", "r_rank_biserial", "CI", "rank_biserial_CI_low", "rank_biserial_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_rankepsilon <- function(model, out, rank_epsilon_squared = NULL, ci = .95, verbose = TRUE, ...) { if (is.null(rank_epsilon_squared)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { es <- effectsize::effectsize(model, type = "rank_epsilon_squared", ci = ci, verbose = verbose, ... ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("rank_epsilon_squared_", names(es)[ci_cols]) out <- cbind(out, es) } # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "Chi2", "df_error", "rank_epsilon_squared", "CI", "rank_epsilon_squared_CI_low", "rank_epsilon_squared_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_kendalls_w <- function(model, out, kendalls_w = NULL, ci = .95, verbose = TRUE, ...) { if (is.null(kendalls_w)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { es <- effectsize::effectsize(model, type = "kendalls_w", ci = ci, verbose = verbose, ... ) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Kendalls_W_", names(es)[ci_cols]) out <- cbind(out, es) } # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "Chi2", "df_error", "Kendalls_W", "CI", "Kendalls_W_CI_low", "Kendalls_W_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } .add_effectsize_oneway <- function(model, out, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, ci = .95, verbose = TRUE) { if (is.null(omega_squared) && is.null(eta_squared) && is.null(epsilon_squared)) { return(out) } if (requireNamespace("effectsize", quietly = TRUE)) { # omega_squared if (!is.null(omega_squared)) { es <- effectsize::effectsize(model, ci = ci, type = "omega", partial = TRUE, verbose = verbose) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Omega2_", names(es)[ci_cols]) out <- cbind(out, es) } # eta squared if (!is.null(eta_squared)) { es <- effectsize::effectsize(model, ci = ci, type = "eta", partial = TRUE, verbose = verbose) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Eta2_", names(es)[ci_cols]) out <- cbind(out, es) } # epsilon squared if (!is.null(epsilon_squared)) { es <- effectsize::effectsize(model, ci = ci, type = "epsilon", partial = TRUE, verbose = verbose) es$CI <- NULL ci_cols <- grepl("^CI", names(es)) names(es)[ci_cols] <- paste0("Epsilon2_", names(es)[ci_cols]) out <- cbind(out, es) } } # reorder col_order <- c( "F", "df", "df_error", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } # ==== add attributes ==== .add_htest_parameters_attributes <- function(params, model, ci = 0.95, ...) { attr(params, "title") <- unique(params$Method) attr(params, "model_class") <- class(model) attr(params, "alternative") <- model$alternative if (!is.null(model$alternative)) { h1_text <- "Alternative hypothesis: " if (!is.null(model$null.value)) { if (length(model$null.value) == 1L) { alt.char <- switch(model$alternative, two.sided = "not equal to", less = "less than", greater = "greater than" ) h1_text <- paste0(h1_text, "true ", names(model$null.value), " is ", alt.char, " ", model$null.value) } else { h1_text <- paste0(h1_text, model$alternative) } } else { h1_text <- paste0(h1_text, model$alternative) } attr(params, "text_alternative") <- h1_text } dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- 2 } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } attr(params, "ci") <- ci attr(params, "ci_test") <- attributes(model$conf.int)$conf.level # add CI, and reorder if (!"CI" %in% colnames(params) && length(ci) == 1) { ci_pos <- grep("CI_low", colnames(params), fixed = TRUE) if (length(ci_pos)) { if (length(ci_pos) > 1) { ci_pos <- ci_pos[1] } params$CI <- ci a <- attributes(params) params <- params[c(1:(ci_pos - 1), ncol(params), ci_pos:(ncol(params) - 1))] attributes(params) <- utils::modifyList(a, attributes(params)) } } params } #' @keywords internal .add_htest_attributes <- function(params, model, p_adjust = NULL, verbose = TRUE, ...) { dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) attr(params, "p_adjust") <- p_adjust attr(params, "model_class") <- class(model) attr(params, "title") <- params$Method if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- 2 } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- eval(dot.arguments[["s_value"]]) } params } parameters/R/methods_ergm.R0000644000175000017500000000140114012467213015544 0ustar nileshnilesh# .ergm, btergm ----------------------- #' @export ci.btergm <- function(x, ci = .95, ...) { as.data.frame(ci(as.data.frame(x@boot$t), ci = ci, ...)) } #' @export standard_error.btergm <- function(model, ...) { cf <- model@coef bootstraps <- model@boot$t sdev <- sapply(1:ncol(bootstraps), function(i) { cur <- (bootstraps[, i] - cf[i])^2 sqrt(sum(cur) / length(cur)) }) .data_frame( Parameter = insight::find_parameters(model, flatten = TRUE), SE = as.vector(sdev) ) } #' @export p_value.btergm <- function(model, ...) { stat <- insight::get_statistic(model) pval <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) .data_frame( Parameter = stat$Parameter, p = pval ) } parameters/R/methods_pscl.R0000644000175000017500000001163314131014353015556 0ustar nileshnilesh# .zeroinfl, .hurdle, .zerocount # model parameters ----------------- #' @export model_parameters.zeroinfl <- model_parameters.zcpglm #' @export model_parameters.hurdle <- model_parameters.zcpglm #' @export model_parameters.zerocount <- model_parameters.zcpglm # ci ----------------- #' @export ci.zeroinfl <- ci.glmmTMB #' @export ci.hurdle <- ci.glmmTMB #' @export ci.zerocount <- ci.glmmTMB # standard error ----------------- #' @rdname standard_error #' @export standard_error.zeroinfl <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { comp <- ifelse(i == "count", "conditional", "zi") stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), SE = as.vector(stats[, 2]), Component = comp ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") .filter_component(se, component) } #' @export standard_error.hurdle <- standard_error.zeroinfl #' @export standard_error.zerocount <- standard_error.zeroinfl # p values ----------------------- #' @rdname p_value.zcpglm #' @export p_value.zeroinfl <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), method = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(p_value_robust(model, ...)) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { comp <- ifelse(i == "count", "conditional", "zi") stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), p = as.vector(stats[, 4]), Component = comp ) }) p <- do.call(rbind, x) p$Component <- .rename_values(p$Component, "cond", "conditional") p$Component <- .rename_values(p$Component, "zi", "zero_inflated") .filter_component(p, component) } #' @export p_value.hurdle <- p_value.zeroinfl #' @export p_value.zerocount <- p_value.zeroinfl # simulate model ----------------- #' @export simulate_model.zeroinfl <- simulate_model.glmmTMB #' @export simulate_model.hurdle <- simulate_model.zeroinfl #' @export simulate_model.zerocount <- simulate_model.zeroinfl # simulate paramaters ----------------- #' @export simulate_parameters.zeroinfl <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model) if ("Effects" %in% colnames(params) && .n_unique(params$Effects) > 1) { out$Effects <- params$Effects } if ("Component" %in% colnames(params) && .n_unique(params$Component) > 1) { out$Component <- params$Component } if (inherits(model, c("zeroinfl", "hurdle", "zerocount"))) { out$Parameter <- gsub("^(count_|zero_)", "", out$Parameter) } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } #' @export simulate_parameters.hurdle <- simulate_parameters.zeroinfl #' @export simulate_parameters.zerocount <- simulate_parameters.zeroinfl parameters/R/4_standard_error.R0000644000175000017500000001212514133063760016333 0ustar nileshnilesh#' Standard Errors #' #' `standard_error()` attempts to return standard errors of model #' parameters, while `standard_error_robust()` attempts to return robust #' standard errors. #' #' @param model A model. #' @param force Logical, if `TRUE`, factors are converted to numerical #' values to calculate the standard error, with the lowest level being the #' value `1` (unless the factor has numeric levels, which are converted #' to the corresponding numeric value). By default, `NA` is returned for #' factors or character vectors. #' @param method If `"robust"`, robust standard errors are computed by #' calling [`standard_error_robust()`][standard_error_robust]. #' `standard_error_robust()`, in turn, calls one of the #' `vcov*()`-functions from the \pkg{sandwich} or \pkg{clubSandwich} #' package for robust covariance matrix estimators. For linear mixed models, #' `method` may also be [`"kenward"`][p_value_kenward] or #' [`"satterthwaite"`][p_value_satterthwaite]. #' @param ... Arguments passed to or from other methods. For #' `standard_error()`, if `method = "robust"`, arguments #' `vcov_estimation`, `vcov_type` and `vcov_args` can be passed #' down to [`standard_error_robust()`][standard_error_robust]. #' @param effects Should standard errors for fixed effects or random effects be #' returned? Only applies to mixed models. May be abbreviated. When standard #' errors for random effects are requested, for each grouping factor a list of #' standard errors (per group level) for random intercepts and slopes is #' returned. #' @inheritParams simulate_model #' @inheritParams p_value #' #' @note For Bayesian models (from \pkg{rstanarm} or \pkg{brms}), the standard #' error is the SD of the posterior samples. #' #' @return A data frame with at least two columns: the parameter names and the #' standard errors. Depending on the model, may also include columns for model #' components etc. #' #' @examples #' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) #' standard_error(model) #' @export standard_error <- function(model, ...) { UseMethod("standard_error") } # Default methods --------------------------------------------------------- ## TODO use "robust" argument instead of method = "robust" #' @rdname standard_error #' @export standard_error.default <- function(model, method = NULL, verbose = TRUE, ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } if (method == "robust") { standard_error_robust(model, ...) } else { se <- tryCatch( { if (grepl("^Zelig-", class(model)[1])) { unlist(model$get_se()) } else { .get_se_from_summary(model) } }, error = function(e) { NULL } ) # if all fails, try to get se from varcov if (is.null(se)) { se <- tryCatch( { varcov <- insight::get_varcov(model) se_from_varcov <- sqrt(diag(varcov)) names(se_from_varcov) <- colnames(varcov) se_from_varcov }, error = function(e) { NULL } ) } if (is.null(se)) { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors from model object.\n", "red") } } else { .data_frame( Parameter = names(se), SE = as.vector(se) ) } } } # helper ----------------------------------------------------------------- .get_se_from_summary <- function(model, component = NULL) { cs <- stats::coef(summary(model)) se <- NULL if (is.list(cs) && !is.null(component)) cs <- cs[[component]] if (!is.null(cs)) { # do we have a se column? se_col <- which(colnames(cs) == "Std. Error") # if not, default to 2 if (length(se_col) == 0) se_col <- 2 se <- as.vector(cs[, se_col]) if (is.null(names(se))) { coef_names <- rownames(cs) if (length(coef_names) == length(se)) names(se) <- coef_names } } names(se) <- .remove_backticks_from_string(names(se)) se } # .ranef_se <- function(x) { # insight::check_if_installed("lme4") # # cc <- stats::coef(model) # # # get names of intercepts # inames <- names(cc) # # # variances of fixed effects # fixed.vars <- diag(as.matrix(stats::vcov(model))) # # # extract variances of conditional modes # r1 <- lme4::ranef(model, condVar = TRUE) # # # we may have multiple random intercepts, iterate all # se.merMod <- lapply(1:length(cc), function(i) { # cmode.vars <- t(apply(attr(r1[[i]], "postVar"), 3, diag)) # seVals <- sqrt(sweep(cmode.vars, 2, fixed.vars[names(r1[[i]])], "+", check.margin = FALSE)) # # if (length(r1[[i]]) == 1) { # seVals <- as.data.frame(t(seVals)) # stats::setNames(seVals, names(r1[[i]])) # } else { # seVals <- seVals[, 1:2] # stats::setNames(as.data.frame(seVals), names(r1[[i]])) # } # }) # # # set names of list # names(se.merMod) <- inames # # se.merMod # } parameters/R/methods_lme4.R0000644000175000017500000002631014166656741015501 0ustar nileshnilesh############# .merMod ----------------- #' @title Parameters from Mixed Models #' @name model_parameters.merMod #' #' @description Parameters from (linear) mixed models. #' #' @param model A mixed model. #' @param effects Should parameters for fixed effects (`"fixed"`), random #' effects (`"random"`), or both (`"all"`) be returned? Only applies #' to mixed models. May be abbreviated. If the calculation of random effects #' parameters takes too long, you may use `effects = "fixed"`. #' @param wb_component Logical, if `TRUE` and models contains within- and #' between-effects (see `datawizard::demean()`), the `Component` column #' will indicate which variables belong to the within-effects, #' between-effects, and cross-level interactions. By default, the #' `Component` column indicates, which parameters belong to the #' conditional or zero-inflated component of the model. #' @param include_sigma Logical, if `TRUE`, includes the residual standard #' deviation. For mixed models, this is defined as the sum of the distribution-specific #' variance and the variance for the additive overdispersion term (see #' [insight::get_variance()] for details). Defaults to `FALSE` for mixed models #' due to the longer computation time. #' @inheritParams model_parameters.default #' @inheritParams model_parameters.stanreg #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @section Confidence intervals for random effect variances: #' For models of class `merMod` and `glmmTMB`, confidence intervals for random #' effect variances can be calculated. For models of class `lme4`, when #' `ci_method` is either `"profile"` or `"boot"`, and `effects` is either #' `"random"` or `"all"`, profiled resp. bootstrapped confidence intervals are #' computed for the random effects. For all other options of `ci_method`, #' confidence intervals for random effects will be missing. For models of class #' `glmmTMB`, confidence intervals for random effect variances always use a #' Wald t-distribution approximation. #' #' @seealso [insight::standardize_names()] to #' rename columns into a consistent, standardized naming scheme. #' #' @note If the calculation of random effects parameters takes too long, you may #' use `effects = "fixed"`. There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(parameters) #' if (require("lme4")) { #' data(mtcars) #' model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model) #' } #' \donttest{ #' if (require("glmmTMB")) { #' data(Salamanders) #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' model_parameters(model, effects = "all") #' } #' #' if (require("lme4")) { #' model <- lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model, bootstrap = TRUE, iterations = 50) #' } #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.merMod <- function(model, ci = .95, bootstrap = FALSE, ci_method = NULL, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, wb_component = TRUE, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ...) { ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } # set default if (is.null(ci_method)) { if (isTRUE(bootstrap)) { ci_method <- "quantile" } else { ci_method <- switch(insight::find_statistic(model), "t-statistic" = "residual", "wald" ) } } # p-values, CI and se might be based of wald, or KR ci_method <- tolower(ci_method) if (isTRUE(bootstrap)) { ci_method <- match.arg(ci_method, c("hdi", "quantile", "ci", "eti", "si", "bci", "bcai")) } else { ci_method <- match.arg(ci_method, choices = c("wald", "normal", "residual", "ml1", "betwithin", "satterthwaite", "kenward", "kr", "boot", "profile", "uniroot")) } # which component to return? effects <- match.arg(effects, choices = c("fixed", "random", "all")) params <- params_random <- params_variance <- NULL # post hoc standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { warning(insight::format_message("Standardizing coefficients only works for fixed effects of the mixed model."), call. = FALSE) } effects <- "fixed" } # for refit, we completely refit the model, than extract parameters, # ci etc. as usual - therefor, we set "standardize" to NULL if (!is.null(standardize) && standardize == "refit") { model <- datawizard::standardize(model, verbose = FALSE) standardize <- NULL } if (effects %in% c("fixed", "all")) { # Processing if (bootstrap) { params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) if (effects != "fixed") { effects <- "fixed" if (verbose) { warning(insight::format_message("Bootstrapping only returns fixed effects of the mixed model."), call. = FALSE) } } } else { params <- .extract_parameters_mixed( model, ci = ci, ci_method = ci_method, robust = robust, standardize = standardize, p_adjust = p_adjust, wb_component = wb_component, keep_parameters = keep, drop_parameters = drop, verbose = verbose, include_sigma = include_sigma, summary = summary, ... ) } params$Effects <- "fixed" if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } } att <- attributes(params) if (effects %in% c("random", "all") && isTRUE(group_level)) { params_random <- .extract_random_parameters(model, ci = ci, effects = effects) } if (effects %in% c("random", "all") && isFALSE(group_level)) { params_variance <- .extract_random_variances(model, ci = ci, effects = effects, ci_method = ci_method) } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" if (!is.null(params_random)) { params <- params[match(colnames(params_random), colnames(params))] } else { params <- params[match(colnames(params_variance), colnames(params))] } } params <- rbind(params, params_random, params_variance) # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } # due to rbind(), we lose attributes from "extract_parameters()", # so we add those attributes back here... if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } params <- .add_model_parameters_attributes( params, model, ci = ci, exponentiate, bootstrap, iterations, ci_method = ci_method, p_adjust = p_adjust, verbose = verbose, summary = summary, group_level = group_level, wb_component = wb_component, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @rdname ci.default #' @export ci.merMod <- function(x, ci = 0.95, dof = NULL, method = "wald", robust = FALSE, iterations = 500, ...) { method <- tolower(method) method <- match.arg(method, choices = c( "wald", "ml1", "betwithin", "kr", "satterthwaite", "kenward", "boot", "profile", "residual", "normal" )) # bootstrapping if (method == "boot") { out <- lapply(ci, function(ci, x) .ci_boot_merMod(x, ci, iterations, ...), x = x) out <- do.call(rbind, out) row.names(out) <- NULL # profiled CIs } else if (method == "profile") { pp <- suppressWarnings(stats::profile(x, which = "beta_")) out <- lapply(ci, function(i) .ci_profile_merMod(x, ci = i, profiled = pp, ...)) out <- do.call(rbind, out) # all others } else { out <- .ci_generic(model = x, ci = ci, dof = dof, method = method, robust = robust, ...) } out } #' @rdname standard_error #' @export standard_error.merMod <- function(model, effects = c("fixed", "random"), method = NULL, ...) { effects <- match.arg(effects) if (effects == "random") { .standard_errors_random(model) } else { if (is.null(method)) method <- "wald" robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { standard_error_robust(model, ...) } else { # kenward approx if (method %in% c("kenward", "kr")) { se_kenward(model) } else { # Classic and Satterthwaite SE se_mixed_default(model) } } } } # helpers -------------- .standard_errors_random <- function(model) { insight::check_if_installed("lme4") rand.se <- lme4::ranef(model, condVar = TRUE) n.groupings <- length(rand.se) for (m in 1:n.groupings) { vars.m <- attr(rand.se[[m]], "postVar") K <- dim(vars.m)[1] J <- dim(vars.m)[3] names.full <- dimnames(rand.se[[m]]) rand.se[[m]] <- array(NA, c(J, K)) for (j in 1:J) { rand.se[[m]][j, ] <- sqrt(diag(as.matrix(vars.m[, , j]))) } dimnames(rand.se[[m]]) <- list(names.full[[1]], names.full[[2]]) } rand.se } se_mixed_default <- function(model) { params <- insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ) .data_frame(Parameter = params, SE = .get_se_from_summary(model)) } #' @export p_value.merMod <- p_value.cpglmm parameters/R/methods_bggm.R0000644000175000017500000000016414030147726015537 0ustar nileshnilesh#' @export model_parameters.BGGM <- model_parameters.bayesQR #' @export p_value.BGGM <- p_value.BFBayesFactor parameters/R/methods_coxme.R0000644000175000017500000000104514133047714015735 0ustar nileshnilesh #' @export standard_error.coxme <- function(model, ...) { beta <- model$coefficients if (length(beta) > 0) { .data_frame( Parameter = .remove_backticks_from_string(names(beta)), SE = sqrt(diag(stats::vcov(model))) ) } } ## TODO add ci_method later? #' @export p_value.coxme <- function(model, ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(1 - stats::pchisq(stat$Statistic^2, df = 1)) ) } } parameters/R/equivalence_test.R0000644000175000017500000005736214131014351016441 0ustar nileshnilesh#' @importFrom bayestestR equivalence_test #' @export bayestestR::equivalence_test #' @title Equivalence test #' #' @description Compute the (conditional) equivalence test for frequentist models. #' #' @param x A statistical model. #' @param range The range of practical equivalence of an effect. May be #' `"default"`, to automatically define this range based on properties of the #' model's data. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param rule Character, indicating the rules when testing for practical #' equivalence. Can be `"bayes"`, `"classic"` or `"cet"`. See #' 'Details'. #' @param verbose Toggle warnings and messages. #' @param ... Arguments passed to or from other methods. #' @inheritParams model_parameters.merMod #' @inheritParams p_value #' #' @seealso For more details, see [bayestestR::equivalence_test()]. #' Further readings can be found in the references. #' #' @details #' In classical null hypothesis significance testing (NHST) within a frequentist #' framework, it is not possible to accept the null hypothesis, H0 - unlike #' in Bayesian statistics, where such probability statements are possible. #' \dQuote{[...] one can only reject the null hypothesis if the test #' statistics falls into the critical region(s), or fail to reject this #' hypothesis. In the latter case, all we can say is that no significant effect #' was observed, but one cannot conclude that the null hypothesis is true.} #' (\cite{Pernet 2017}). One way to address this issues without Bayesian methods #' is *Equivalence Testing*, as implemented in `equivalence_test()`. #' While you either can reject the null hypothesis or claim an inconclusive result #' in NHST, the equivalence test adds a third category, *"accept"*. Roughly #' speaking, the idea behind equivalence testing in a frequentist framework is #' to check whether an estimate and its uncertainty (i.e. confidence interval) #' falls within a region of "practical equivalence". Depending on the rule for #' this test (see below), statistical significance does not necessarily indicate #' whether the null hypothesis can be rejected or not, i.e. the classical #' interpretation of the p-value may differ from the results returned from #' the equivalence test. #' #' \subsection{Calculation of equivalence testing}{ #' \describe{ #' \item{"bayes" - Bayesian rule (Kruschke 2018)}{ #' This rule follows the \dQuote{HDI+ROPE decision rule} \cite{(Kruschke, #' 2014, 2018)} used for the #' [`Bayesian counterpart()`][bayestestR::equivalence_test]. This #' means, if the confidence intervals are completely outside the ROPE, the #' "null hypothesis" for this parameter is "rejected". If the ROPE #' completely covers the CI, the null hypothesis is accepted. Else, it's #' undecided whether to accept or reject the null hypothesis. Desirable #' results are low proportions inside the ROPE (the closer to zero the #' better). #' } #' \item{"classic" - The TOST rule (Lakens 2017)}{ #' This rule follows the \dQuote{TOST rule}, i.e. a two one-sided test #' procedure (\cite{Lakens 2017}). Following this rule, practical #' equivalence of an effect (i.e. H0) is *rejected*, when the #' coefficient is statistically significant *and* the narrow #' confidence intervals (i.e. `1-2*alpha`) *include* or #' *exceed* the ROPE boundaries. Practical equivalence is assumed #' (i.e. H0 accepted) when the narrow confidence intervals are completely #' inside the ROPE, no matter if the effect is statistically significant #' or not. Else, the decision whether to accept or reject H0 is undecided. #' } #' \item{"cet" - Conditional Equivalence Testing (Campbell/Gustafson 2018)}{ #' The Conditional Equivalence Testing as described by \cite{Campbell and #' Gustafson 2018}. According to this rule, practical equivalence is #' rejected when the coefficient is statistically significant. When the #' effect is *not* significant and the narrow confidence intervals are #' completely inside the ROPE, we accept H0, else it is undecided. #' } #' } #' } #' \subsection{Levels of Confidence Intervals used for Equivalence Testing}{ #' For `rule = "classic"`, "narrow" confidence intervals are used for #' equivalence testing. "Narrow" means, the the intervals is not 1 - alpha, #' but 1 - 2 * alpha. Thus, if `ci = .95`, alpha is assumed to be 0.05 #' and internally a ci-level of 0.90 is used. `rule = "cet"` uses #' both regular and narrow confidence intervals, while `rule = "bayes"` #' only uses the regular intervals. #' } #' \subsection{p-Values}{ #' The equivalence p-value is the area of the (cumulative) confidence #' distribution that is outside of the region of equivalence. It can be #' interpreted as p-value for *rejecting* the alternative hypothesis #' and *accepting* the null hypothesis. #' } #' \subsection{Second Generation p-Value (SGPV)}{ #' Second generation p-values (SGPV) were proposed as a statistic #' that represents \dQuote{the proportion of data-supported hypotheses #' that are also null hypotheses} \cite{(Blume et al. 2018)}. This statistic #' is actually computed in the same way as the percentage inside the ROPE as #' returned by `equivalence_test()` (see \cite{Lakens and Delacre 2020} #' for details on computation of the SGPV). Thus, the `"inside ROPE"` #' column reflects the SGPV. #' } #' \subsection{ROPE range}{ #' Some attention is required for finding suitable values for the ROPE limits #' (argument `range`). See 'Details' in [bayestestR::rope_range()] #' for further information. #' } #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references #' \itemize{ #' \item Blume, J. D., D'Agostino McGowan, L., Dupont, W. D., & Greevy, R. A. #' (2018). Second-generation p-values: Improved rigor, reproducibility, & #' transparency in statistical analyses. PLOS ONE, 13(3), e0188299. #' https://doi.org/10.1371/journal.pone.0188299 #' #' \item Campbell, H., & Gustafson, P. (2018). Conditional equivalence #' testing: An alternative remedy for publication bias. PLOS ONE, 13(4), #' e0195145. doi: 10.1371/journal.pone.0195145 #' #' \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with #' R, JAGS, and Stan. Academic Press #' #' \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in #' Bayesian estimation. Advances in Methods and Practices in Psychological #' Science, 1(2), 270-280. doi: 10.1177/2515245918771304 #' #' \item Lakens, D. (2017). Equivalence Tests: A Practical Primer for t Tests, #' Correlations, and Meta-Analyses. Social Psychological and Personality #' Science, 8(4), 355–362. doi: 10.1177/1948550617697177 #' #' \item Lakens, D., & Delacre, M. (2020). Equivalence Testing and the Second #' Generation P-Value. Meta-Psychology, 4. #' https://doi.org/10.15626/MP.2018.933 #' #' \item Pernet, C. (2017). Null hypothesis significance testing: A guide to #' commonly misunderstood concepts and recommendations for good practice. #' F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5 #' } #' #' @return A data frame. #' @examples #' data(qol_cancer) #' model <- lm(QoL ~ time + age + education, data = qol_cancer) #' #' # default rule #' equivalence_test(model) #' #' # conditional equivalence test #' equivalence_test(model, rule = "cet") #' #' # plot method #' if (require("see", quietly = TRUE)) { #' result <- equivalence_test(model) #' plot(result) #' } #' @export equivalence_test.lm <- function(x, range = "default", ci = .95, rule = "classic", verbose = TRUE, ...) { rule <- match.arg(tolower(rule), choices = c("bayes", "classic", "cet")) out <- .equivalence_test_frequentist(x, range, ci, rule, verbose, ...) if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "rule") <- rule class(out) <- c("equivalence_test_lm", "see_equivalence_test_lm", class(out)) out } # standard models, only fixed effects ---------------------- #' @export equivalence_test.glm <- equivalence_test.lm #' @export equivalence_test.wbm <- equivalence_test.lm #' @export equivalence_test.lme <- equivalence_test.lm #' @export equivalence_test.gee <- equivalence_test.lm #' @export equivalence_test.gls <- equivalence_test.lm #' @export equivalence_test.feis <- equivalence_test.lm #' @export equivalence_test.felm <- equivalence_test.lm #' @export equivalence_test.mixed <- equivalence_test.lm #' @export equivalence_test.hurdle <- equivalence_test.lm #' @export equivalence_test.zeroinfl <- equivalence_test.lm #' @export equivalence_test.rma <- equivalence_test.lm # mixed models, also random effects ---------------------- #' @rdname equivalence_test.lm #' @export equivalence_test.merMod <- function(x, range = "default", ci = .95, rule = "classic", effects = c("fixed", "random"), verbose = TRUE, ...) { # ==== argument matching ==== rule <- match.arg(tolower(rule), choices = c("bayes", "classic", "cet")) effects <- match.arg(effects) # ==== equivalent testing for fixed or random effects ==== if (effects == "fixed") { out <- .equivalence_test_frequentist(x, range, ci, rule, verbose, ...) } else { out <- .equivalence_test_frequentist_random(x, range, ci, rule, verbose, ...) } # ==== result ==== if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "rule") <- rule class(out) <- c("equivalence_test_lm", "see_equivalence_test_lm", class(out)) out } #' @export equivalence_test.glmmTMB <- equivalence_test.merMod #' @export equivalence_test.MixMod <- equivalence_test.merMod # Special classes ------------------------- #' @export equivalence_test.parameters_simulate_model <- function(x, range = "default", ci = .95, verbose = TRUE, ...) { # ==== retrieve model, to define rope range for simulated model parameters ==== model <- .get_object(x) if (all(range == "default") && !is.null(model)) { range <- bayestestR::rope_range(model, verbose = verbose) } else if (!all(is.numeric(range)) | length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # ==== classical equivalent testing for data frames ==== out <- equivalence_test(as.data.frame(x), range = range, ci = ci, verbose = verbose, ...) if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- attr(x, "object_name") attr(out, "data") <- x class(out) <- unique(c("equivalence_test", "see_equivalence_test", "equivalence_test_simulate_model", class(out))) out } # helper ------------------- #' @keywords internal .equivalence_test_frequentist <- function(x, range = "default", ci = .95, rule = "classic", verbose = TRUE, ...) { # ==== define rope range ==== if (all(range == "default")) { range <- bayestestR::rope_range(x, verbose = verbose) if (is.list(range)) { range <- range[[which.max(sapply(range, diff))]] } } else if (!all(is.numeric(range)) | length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } if (length(ci) > 1) { warning("`ci` may only be of length 1. Using first ci-value now.", call. = FALSE) ci <- ci[1] } # ==== requested confidence intervals ==== params <- conf_int <- .ci_generic(x, ci = ci) conf_int <- as.data.frame(t(conf_int[, c("CI_low", "CI_high")])) # ==== the "narrower" intervals (1-2*alpha) for CET-rules. ==== alpha <- 1 - ci conf_int2 <- .ci_generic(x, ci = (ci - alpha)) conf_int2 <- as.data.frame(t(conf_int2[, c("CI_low", "CI_high")])) # ==== equivalence test for each parameter ==== l <- mapply( function(ci_wide, ci_narrow) { .equivalence_test_numeric( ci_wide, ci_narrow, range_rope = range, rule = rule, verbose = verbose ) }, conf_int, conf_int2, SIMPLIFY = FALSE ) dat <- do.call(rbind, l) if ("Component" %in% colnames(params)) dat$Component <- params$Component out <- data.frame( Parameter = params$Parameter, CI = ifelse(rule == "bayes", ci, ci - alpha), dat, stringsAsFactors = FALSE ) # ==== (adjusted) p-values for tests ==== out$p <- .add_p_to_equitest(x, ci, range) attr(out, "rope") <- range out } #' @keywords internal .equivalence_test_frequentist_random <- function(x, range = "default", ci = .95, rule = "classic", verbose = TRUE, ...) { if (all(range == "default")) { range <- bayestestR::rope_range(x, verbose = verbose) } else if (!all(is.numeric(range)) | length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } if (length(ci) > 1) { if (isTRUE(verbose)) { warning("`ci` may only be of length 1. Using first ci-value now.", call. = FALSE) } ci <- ci[1] } params <- insight::get_parameters(x, effects = "random", component = "conditional", verbose = FALSE) se <- standard_error(x, effects = "random", component = "conditional") alpha <- (1 + ci) / 2 fac <- stats::qnorm(alpha) alpha_narrow <- (1 + ci - (1 - ci)) / 2 fac_narrow <- stats::qnorm(alpha_narrow) out <- do.call(rbind, lapply(names(params), function(np) { est <- params[[np]][, "(Intercept)"] stderr <- se[[np]][, "(Intercept)"] d <- data.frame( Parameter = rownames(params[[np]]), Estimate = est, CI = ifelse(rule == "bayes", ci, ci - (1 - ci)), Group = np, stringsAsFactors = FALSE ) conf_int <- as.data.frame(t(data.frame( CI_low = est - stderr * fac, CI_high = est + stderr * fac ))) conf_int2 <- as.data.frame(t(data.frame( CI_low = est - stderr * fac_narrow, CI_high = est + stderr * fac_narrow ))) l <- mapply( function(ci_wide, ci_narrow) { .equivalence_test_numeric( ci_wide, ci_narrow, range_rope = range, rule = rule, verbose = verbose ) }, conf_int, conf_int2, SIMPLIFY = FALSE ) dat <- do.call(rbind, l) cbind(d, dat) })) attr(out, "rope") <- range out } #' @keywords internal .equivalence_test_numeric <- function(ci_wide, ci_narrow, range_rope, rule, verbose) { final_ci <- NULL # ==== HDI+ROPE decision rule, by Kruschke ==== if (rule == "bayes") { final_ci <- ci_wide if (min(ci_wide) > max(range_rope) || max(ci_wide) < min(range_rope)) { decision <- "Rejected" } else if (max(ci_wide) <= max(range_rope) && min(ci_wide) >= min(range_rope)) { decision <- "Accepted" } else { decision <- "Undecided" } } # ==== Lakens' rule ==== if (rule == "classic") { final_ci <- ci_narrow # significant result? if (min(ci_narrow) > 0 || max(ci_narrow) < 0) { # check if CI are entirely inside ROPE. If CI crosses ROPE, reject H0, else accept if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) { decision <- "Accepted" } else { decision <- "Rejected" } # non-significant results } else { # check if CI are entirely inside ROPE. If CI crosses ROPE, reject H0, else accept if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) { decision <- "Accepted" } else { decision <- "Undecided" } } } # ==== CET rule ==== if (rule == "cet") { final_ci <- ci_narrow # significant result? if (min(ci_wide) > 0 || max(ci_wide) < 0) { decision <- "Rejected" # non-significant results, all narrow CI inside ROPE } else if (min(abs(ci_narrow)) < max(abs(range_rope)) && max(abs(ci_narrow)) < max(abs(range_rope))) { decision <- "Accepted" } else { decision <- "Undecided" } } data.frame( CI_low = final_ci[1], CI_high = final_ci[2], ROPE_low = range_rope[1], ROPE_high = range_rope[2], ROPE_Percentage = .rope_coverage(range_rope, final_ci), ROPE_Equivalence = decision, stringsAsFactors = FALSE ) } # helper --------------------- .rope_coverage <- function(rope, ci) { diff_rope <- abs(diff(rope)) diff_ci <- abs(diff(ci)) # inside? if (min(ci) >= min(rope) && max(ci) <= max(rope)) { coverage <- 1 # outside? } else if (max(ci) < min(rope) || min(ci) > max(rope)) { coverage <- 0 # CI covers completely rope? } else if (max(ci) > max(rope) && min(ci) < min(rope)) { coverage <- diff_rope / diff_ci # CI inside rope and outside max rope? } else if (min(ci) >= min(rope) && max(ci) > max(rope)) { diff_in_rope <- max(rope) - min(ci) coverage <- diff_in_rope / diff_ci # CI inside rope and outside min rope? } else if (max(ci) <= max(rope) && min(ci) < min(rope)) { diff_in_rope <- max(ci) - min(rope) coverage <- diff_in_rope / diff_ci } coverage } .add_p_to_equitest <- function(model, ci, range) { tryCatch( { params <- insight::get_parameters(model) # degrees of freedom df <- degrees_of_freedom(model, method = "any") # mu params$mu <- params$Estimate * -1 # se se <- standard_error(model) stats::pt((range[1] - params$mu) / se$SE, df, lower.tail = TRUE) + stats::pt((range[2] - params$mu) / se$SE, df, lower.tail = FALSE) }, error = function(e) { NULL } ) } # methods ---------------- #' @export format.equivalence_test_lm <- function(x, digits = 2, ci_digits = 2, p_digits = 3, ci_width = NULL, ci_brackets = NULL, format = "text", zap_small = FALSE, ...) { # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { ci_brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { ci_brackets <- c("[", "]") } # main formatting out <- insight::format_table( x, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, ... ) # format column names colnames(out)[which(colnames(out) == "Equivalence (ROPE)")] <- "H0" out$ROPE <- NULL # only show supported components if ("Component" %in% colnames(out)) { out <- out[out$Component %in% c("conditional", "count"), ] } out } #' @export print.equivalence_test_lm <- function(x, digits = 2, ci_digits = 2, p_digits = 3, ci_brackets = NULL, zap_small = FALSE, ...) { orig_x <- x rule <- attributes(x)$rule if (!is.null(rule)) { if (rule == "cet") { insight::print_color("# Conditional Equivalence Testing\n\n", "blue") } else if (rule == "classic") { insight::print_color("# TOST-test for Practical Equivalence\n\n", "blue") } else { insight::print_color("# Test for Practical Equivalence\n\n", "blue") } } else { insight::print_color("# Test for Practical Equivalence\n\n", "blue") } .rope <- attr(x, "rope", exact = TRUE) cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, .rope[1], digits, .rope[2])) # formatting x <- format(x, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = "auto", ci_brackets = ci_brackets, format = "text", zap_small = zap_small, ... ) if ("Group" %in% colnames(x)) { out <- split(x, x$Group) for (i in names(out)) { insight::print_color(sprintf("Group: %s\n\n", i), "red") cat(insight::export_table(out[[i]])) } } else { cat(insight::export_table(x)) } invisible(orig_x) } #' @export plot.equivalence_test_lm <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' #' #' #' @export #' print_md.equivalence_test_lm <- function(x, #' digits = 2, #' ci_digits = 2, #' p_digits = 3, #' ci_brackets = NULL, #' zap_small = FALSE, #' ...) { #' orig_x <- x #' #' rule <- attributes(x)$rule #' if (!is.null(rule)) { #' if (rule == "cet") { #' title <- "Conditional Equivalence Testing" #' } else if (rule == "classic") { #' title <- "TOST-test for Practical Equivalence" #' } else { #' title <- "Test for Practical Equivalence" #' } #' } else { #' title <- "Test for Practical Equivalence" #' } #' #' .rope <- attr(x, "rope", exact = TRUE) #' subtitle <- sprintf(" ROPE: [%.*f %.*f]\n\n", digits, .rope[1], digits, .rope[2]) #' #' # formatting #' x <- format(x, #' digits = digits, #' ci_digits = ci_digits, #' p_digits = p_digits, #' ci_width = NULL, #' ci_brackets = ci_brackets, #' format = "md", #' zap_small = zap_small, #' ...) #' #' if ("Group" %in% colnames(x)) { #' group_by <- "Group" #' } else { #' group_by <- NULL #' } #' #' cat(insight::export_table(x, #' format = "md", #' title = title, #' subtitle = subtitle, #' group_by = group_by)) #' invisible(orig_x) #' } parameters/R/extract_parameters_anova.R0000644000175000017500000002450214107206235020157 0ustar nileshnilesh#' @keywords internal .extract_parameters_anova <- function(model, test = "multivariate") { # Processing if ("manova" %in% class(model)) { parameters <- .extract_anova_manova(model) } else if ("maov" %in% class(model)) { parameters <- .extract_anova_maov(model) } else if ("aov" %in% class(model)) { parameters <- .extract_anova_aov(model) } else if ("anova" %in% class(model)) { parameters <- .extract_anova_anova(model) } else if ("Anova.mlm" %in% class(model)) { parameters <- .extract_anova_mlm(model, test) } else if ("aovlist" %in% class(model)) { parameters <- .extract_anova_aovlist(model) } else if ("anova.rms" %in% class(model)) { parameters <- .extract_anova_aov_rms(model) } # Rename # p-values names(parameters) <- gsub("(Pr|P)\\(>.*\\)", "p", names(parameters)) names(parameters) <- gsub("Pr..Chisq.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr..Chi.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("p.value", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("^P$", "p", names(parameters)) # squares names(parameters) <- gsub("Sum Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Error SS", "Sum_Squares_Error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Partial.SS", "Sum_Squares_Partial", names(parameters), fixed = TRUE) names(parameters) <- gsub("Sum of Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Mean Sq", "Mean_Square", names(parameters), fixed = TRUE) names(parameters) <- gsub("MSE", "Mean_Square", names(parameters), fixed = TRUE) names(parameters) <- gsub("MS", "Mean_Square", names(parameters), fixed = TRUE) # statistic names(parameters) <- gsub("approx F", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("F values", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("F value", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR.Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi.sq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi-Square", "Chi2", names(parameters), fixed = TRUE) # other names(parameters) <- gsub("logLik", "Log_Likelihood", names(parameters), fixed = TRUE) names(parameters) <- gsub("deviance", "Deviance", names(parameters), fixed = TRUE) names(parameters) <- gsub("Resid. Dev", "Deviance_error", names(parameters), fixed = TRUE) # error-df if (!"df_error" %in% names(parameters)) { names(parameters) <- gsub("den Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Resid. Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.DoF", "df_error", names(parameters), fixed = TRUE) } # df if (!"df" %in% names(parameters)) { names(parameters) <- gsub("npar", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("NumDF", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("num Df", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("d.f.", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Df", "df", names(parameters), fixed = TRUE) } # other df names(parameters) <- gsub("Chi.Df", "Chi2_df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi DoF", "Chi2_df", names(parameters), fixed = TRUE) # Reorder row.names(parameters) <- NULL order <- c("Response", "Group", "Parameter", "Pillai", "AIC", "BIC", "Log_Likelihood", "Chi2", "Chi2_df", "RSS", "Sum_Squares", "Sum_Squares_Partial", "Sum_Squares_Error", "df", "Deviance", "Statistic", "df_num", "df_error", "Deviance_error", "Mean_Square", "F", "Rao", "p") parameters <- parameters[order[order %in% names(parameters)]] .remove_backticks_from_parameter_names(parameters) } # helpers ----- # aov ----- .extract_anova_aov <- function(model) { parameters <- as.data.frame(summary(model)[[1]]) parameters$Parameter <- trimws(row.names(parameters)) parameters } # manova ----- .extract_anova_manova <- function(model) { parameters <- as.data.frame(summary(model)$stats) parameters$Parameter <- trimws(row.names(parameters)) parameters[["den Df"]] <- NULL parameters[["num Df"]] <- NULL parameters } # maov ----- .extract_anova_maov <- function(model) { s <- summary(model) out <- do.call(rbind, lapply(names(s), function(i) { parameters <- as.data.frame(s[[i]]) parameters$Parameter <- trimws(row.names(parameters)) parameters$Response <- gsub("\\s*Response ", "", i) parameters })) out } # aov.rms ----- .extract_anova_aov_rms <- function(model) { parameters <- data.frame(model) parameters$Parameter <- rownames(parameters) parameters$Parameter[parameters$Parameter == "ERROR"] <- "Residuals" parameters$Parameter[parameters$Parameter == "TOTAL"] <- "Total" parameters } # aovlist ----- .extract_anova_aovlist <- function(model) { if (names(model)[1L] == "(Intercept)") { model <- model[-1L] } parameters <- Reduce(function(x, y) merge(x, y, all = TRUE, sort = FALSE), lapply(names(model), function(i) { aov_summary <- summary(model[[i]]) if (inherits(aov_summary, "summary.manova")) { temp <- as.data.frame(aov_summary$stats) } else { temp <- as.data.frame(aov_summary[[1]]) } temp$Parameter <- trimws(row.names(temp)) temp$Group <- i temp })) # parameters <- parameters[order(parameters$Group), ] parameters } # anova ----- .extract_anova_anova <- function(model) { parameters <- as.data.frame(model) parameters$Parameter <- trimws(row.names(parameters)) # Deal with anovas of models if (length(attributes(model)$heading) == 2) { info <- attributes(model)$heading[[2]] if (grepl("Model", info)) { parameters$Parameter <- unlist(strsplit(info, "\n", fixed = TRUE)) } } else if (length(attributes(model)$heading) > 2) { p_names <- attributes(model)$heading[-1:-2] if (nrow(parameters) == length(p_names)) { parameters$Parameter <- p_names } } # If mixed models... sumsq <- names(parameters)[names(parameters) %in% c("Sum Sq", "Sum of Sq")] df_num <- names(parameters)[names(parameters) %in% c("npar", "Df", "NumDF", "num Df")] mean_sq <- names(parameters)[names(parameters) %in% c("Mean Sq", "MSE")] if (length(sumsq) != 0 && length(df_num) != 0) { parameters$Mean_Square <- parameters[[sumsq]] / parameters[[df_num]] } else if (length(mean_sq) != 0) { parameters$Mean_Square <- parameters[[mean_sq]] } if (length(df_num) == 0 && length(sumsq) != 0 && "Mean_Square" %in% colnames(parameters) && !("Df" %in% colnames(parameters))) { parameters$Df <- round(parameters[[sumsq]] / parameters$Mean_Square) } parameters } # Anova.mlm ------------- .extract_anova_mlm <- function(model, test = NULL) { if (identical(test, "univariate")) { ut <- unclass(summary(model)$univariate.tests) out <- data.frame(Parameter = rownames(ut), stringsAsFactors = FALSE) out <- cbind(out, as.data.frame(ut)) } else { out <- lapply(1:length(model$terms), function(i) { if (model$repeated) { qr_value <- qr(model$SSPE[[i]]) } else { qr_value <- qr(model$SSPE) } eigs <- Re(eigen(qr.coef(qr_value, model$SSP[[i]]), symmetric = FALSE)$values) test <- switch(model$test, "Pillai" = .pillai_test(eigs, model$df[i], model$error.df), "Wilks" = .wilks_test(eigs, model$df[i], model$error.df), "Hotelling-Lawley" = .hl_test(eigs, model$df[i], model$error.df), "Roy" = .roy_test(eigs, model$df[i], model$error.df) ) data.frame( Parameter = model$terms[i], df = model$df[i], Statistic = test[1], `F` = test[2], df_num = test[3], df_error = test[4], p = stats::pf(test[2], test[3], test[4], lower.tail = FALSE), stringsAsFactors = FALSE ) }) out <- do.call(rbind, out) } out } # test helper ------------- .pillai_test <- function(eig, q, df.res) { test <- sum(eig / (1 + eig)) p <- length(eig) s <- min(p, q) n <- 0.5 * (df.res - p - 1) m <- 0.5 * (abs(p - q) - 1) tmp1 <- 2 * m + s + 1 tmp2 <- 2 * n + s + 1 c(test, (tmp2 / tmp1 * test) / (s - test), s * tmp1, s * tmp2) } .roy_test <- function(eig, q, df.res) { p <- length(eig) test <- max(eig) tmp1 <- max(p, q) tmp2 <- df.res - tmp1 + q c(test, (tmp2 * test) / tmp1, tmp1, tmp2) } .hl_test <- function(eig, q, df.res) { test <- sum(eig) p <- length(eig) m <- 0.5 * (abs(p - q) - 1) n <- 0.5 * (df.res - p - 1) s <- min(p, q) tmp1 <- 2 * m + s + 1 tmp2 <- 2 * (s * n + 1) c(test, (tmp2 * test) / s / s / tmp1, s * tmp1, tmp2) } .wilks_test <- function(eig, q, df.res) { test <- prod(1 / (1 + eig)) p <- length(eig) tmp1 <- df.res - 0.5 * (p - q + 1) tmp2 <- (p * q - 2) / 4 tmp3 <- p^2 + q^2 - 5 tmp3 <- if (tmp3 > 0) { sqrt(((p * q)^2 - 4) / tmp3) } else { 1 } c( test, ((test^(-1 / tmp3) - 1) * (tmp1 * tmp3 - 2 * tmp2)) / p / q, p * q, tmp1 * tmp3 - 2 * tmp2 ) } # parameter-power ---------------- .power_for_aov <- function(model, params) { if (requireNamespace("effectsize", quietly = TRUE)) { power <- tryCatch( { cohens_f2 <- effectsize::cohens_f_squared(model, partial = TRUE) f2 <- cohens_f2$Cohens_f2[match(cohens_f2$Parameter, params$Parameter)] u <- params$df[params$Parameter != "Residuals"] v <- params$df[params$Parameter == "Residuals"] lambda <- f2 * (u + v + 1) cohens_f2$Power <- stats::pf(stats::qf(0.05, u, v, lower.tail = FALSE), u, v, lambda, lower.tail = FALSE) cohens_f2 }, error = function(e) { NULL } ) } if (!is.null(power)) { params <- merge(params, cohens_f2[c("Parameter", "Power")], sort = FALSE, all = TRUE) } params } parameters/R/methods_cplm.R0000644000175000017500000002176514142723210015560 0ustar nileshnilesh# classes: .cpglm, .bcpglm, .zcpglm, .cpglmm ########## .zcpglm --------------- #' @title Parameters from Zero-Inflated Models #' @name model_parameters.zcpglm #' #' @description #' Parameters from zero-inflated models (from packages like \pkg{pscl}, #' \pkg{cplm} or \pkg{countreg}). #' #' @param model A model with zero-inflation component. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("pscl")) { #' data("bioChemists") #' model <- zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @inheritParams simulate_model #' @export model_parameters.zcpglm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { component <- match.arg(component) # fix argument, if model has no zi-part if (!insight::model_info(model, verbose = FALSE)$is_zero_inflated && component != "conditional") { component <- "conditional" } # Processing if (bootstrap) { params <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { params <- .extract_parameters_generic( model, ci = ci, component = component, standardize = standardize, robust = robust, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.zcpglm <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) { insight::check_if_installed("cplm") component <- match.arg(component) junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) tweedie <- .data_frame( Parameter = params$Parameter[params$Component == "conditional"], SE = as.vector(stats$tweedie[, "Std. Error"]), Component = "conditional" ) zero <- .data_frame( Parameter = params$Parameter[params$Component == "zero_inflated"], SE = as.vector(stats$zero[, "Std. Error"]), Component = "zero_inflated" ) out <- .filter_component(rbind(tweedie, zero), component) out } #' p-values for Models with Zero-Inflation #' #' This function attempts to return, or compute, p-values of hurdle and #' zero-inflated models. #' #' @param model A statistical model. #' @inheritParams p_value #' @inheritParams simulate_model #' @inheritParams standard_error #' #' @return #' A data frame with at least two columns: the parameter names and the p-values. #' Depending on the model, may also include columns for model components etc. #' #' @examples #' if (require("pscl", quietly = TRUE)) { #' data("bioChemists") #' model <- zeroinfl(art ~ fem + mar + kid5 | kid5 + phd, data = bioChemists) #' p_value(model) #' p_value(model, component = "zi") #' } #' @export p_value.zcpglm <- function(model, component = c("all", "conditional", "zi", "zero_inflated"), ...) { insight::check_if_installed("cplm") component <- match.arg(component) junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) tweedie <- .data_frame( Parameter = params$Parameter[params$Component == "conditional"], p = as.vector(stats$tweedie[, "Pr(>|z|)"]), Component = "conditional" ) zero <- .data_frame( Parameter = params$Parameter[params$Component == "zero_inflated"], p = as.vector(stats$zero[, "Pr(>|z|)"]), Component = "zero_inflated" ) out <- .filter_component(rbind(tweedie, zero), component) out } ########## .bcpglm --------------- #' @export model_parameters.bcplm <- model_parameters.bayesQR #' @export p_value.bcplm <- p_value.brmsfit ########## .cpglm --------------- #' @export p_value.cpglm <- function(model, ...) { insight::check_if_installed("cplm") junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, "Pr(>|t|)"]) ) } #' @export standard_error.cpglm <- function(model, ...) { insight::check_if_installed("cplm") junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } ########## .cpglmm --------------- #' @rdname model_parameters.merMod #' @export model_parameters.cpglmm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, ci_method = NULL, p_adjust = NULL, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ...) { ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } # p-values, CI and se might be based on different df-methods ci_method <- .check_df_method(ci_method) effects <- match.arg(effects, choices = c("fixed", "random", "all")) # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { warning(insight::format_message("Standardizing coefficients only works for fixed effects of the mixed model."), call. = FALSE) } effects <- "fixed" } params <- .mixed_model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, effects = effects, robust = FALSE, p_adjust = p_adjust, group_level = group_level, ci_method = ci_method, include_sigma = include_sigma, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", "data.frame") params } #' @export p_value.cpglmm <- function(model, method = "wald", ...) { p_value.default(model, method = method, ...) } #' @export standard_error.cpglmm <- function(model, ...) { insight::check_if_installed("cplm") stats <- cplm::summary(model)$coefs params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } # tools -------------------- .check_df_method <- function(df_method) { if (!is.null(df_method)) { df_method <- tolower(df_method) if (df_method %in% c("satterthwaite", "kenward", "kr")) { warning(insight::format_message("Satterthwaite or Kenward-Rogers approximation of degrees of freedom is only available for linear mixed models."), call. = FALSE) df_method <- "wald" } df_method <- match.arg(df_method, choices = c("wald", "normal", "residual", "ml1", "betwithin", "profile", "boot", "uniroot")) } df_method } parameters/R/n_factors.R0000644000175000017500000006251514131244363015063 0ustar nileshnilesh#' Number of components/factors to retain in PCA/FA #' #' This function runs many existing procedures for determining how many factors #' to retain/extract from factor analysis (FA) or dimension reduction (PCA). It #' returns the number of factors based on the maximum consensus between methods. #' In case of ties, it will keep the simplest model and select the solution #' with the fewer factors. #' #' @param x A data frame. #' @param type Can be `"FA"` or `"PCA"`, depending on what you want to #' do. #' @param rotation Only used for VSS (Very Simple Structure criterion, see #' [psych::VSS()]). The rotation to apply. Can be `"none"`, #' `"varimax"`, `"quartimax"`, `"bentlerT"`, `"equamax"`, #' `"varimin"`, `"geominT"` and `"bifactor"` for orthogonal #' rotations, and `"promax"`, `"oblimin"`, `"simplimax"`, #' `"bentlerQ"`, `"geominQ"`, `"biquartimin"` and #' `"cluster"` for oblique transformations. #' @param algorithm Factoring method used by VSS. Can be `"pa"` for #' Principal Axis Factor Analysis, `"minres"` for minimum residual (OLS) #' factoring, `"mle"` for Maximum Likelihood FA and `"pc"` for #' Principal Components. `"default"` will select `"minres"` if #' `type = "FA"` and `"pc"` if `type = "PCA"`. #' @param package Package from which respective methods are used. Can be #' `"all"` or a vector containing `"nFactors"`, `"psych"`, `"PCDimension"`, `"fit"` or #' `"EGAnet"`. Note that `"fit"` (which actually also relies on the `psych` #' package) and `"EGAnet"` can be very slow for bigger #' datasets. Thus, the default is `c("nFactors", "psych")`. You must have #' the respective packages installed for the methods to be used. #' @param safe If `TRUE`, the function will run all the procedures in try #' blocks, and will only return those that work and silently skip the ones #' that may fail. #' @param cor An optional correlation matrix that can be used (note that the #' data must still be passed as the first argument). If `NULL`, will #' compute it by running `cor()` on the passed data. #' @param n_max If set to a value (e.g., `10`), will drop from the results all #' methods that suggest a higher number of components. The interpretation becomes #' 'from all the methods that suggested a number lower than n_max, the results #' are ...'. #' @param ... Arguments passed to or from other methods. #' #' @details `n_components` is actually an alias for `n_factors`, with #' different defaults for the function arguments. #' #' @note There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the #' \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' `n_components()` is a convenient short for `n_factors(type = #' "PCA")`. #' #' @examples #' library(parameters) #' if (require("nFactors", quietly = TRUE) && require("EGAnet", quietly = TRUE)) { #' n_factors(mtcars, type = "PCA") #' #' result <- n_factors(mtcars[1:5], type = "FA") #' as.data.frame(result) #' summary(result) #' \dontrun{ #' if (require("PCDimension", quietly = TRUE)) { #' # Setting package = 'all' will increase the number of methods (but is slow) #' n_factors(mtcars, type = "PCA", package = "all") #' n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") #' } #' } #' } #' @return A data frame. #' #' @references \itemize{ #' \item Bartlett, M. S. (1950). Tests of significance in factor analysis. #' British Journal of statistical psychology, 3(2), 77-85. #' #' \item Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in #' eigenvalues of a covariance matrix with application to data analysis. #' British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. #' #' \item Cattell, R. B. (1966). The scree test for the number of factors. #' Multivariate behavioral research, 1(2), 245-276. #' #' \item Finch, W. H. (2019). Using Fit Statistic Differences to Determine the #' Optimal Number of Factors to Retain in an Exploratory Factor Analysis. #' Educational and Psychological Measurement. #' #' \item Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the #' visual scree test for factor analysis: The standard error scree. #' Educational and Psychological Measurement, 56(3), 443-451. #' #' \item Zoski, K., & Jurs, S. (1993). Using multiple regression to determine #' the number of factors to retain in factor analysis. Multiple Linear #' Regression Viewpoints, 20(1), 5-9. #' #' \item Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of #' regression-based variations of the visual scree for determining the number #' of common factors. Educational and psychological measurement, 62(3), #' 397-419. #' #' \item Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. #' D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance #' of Exploratory Graph Analysis and traditional techniques to identify the #' number of latent factors: A simulation and tutorial. #' #' \item Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A #' new approach for estimating the number of dimensions in psychological #' research. PloS one, 12(6), e0174035. #' #' \item Revelle, W., & Rocklin, T. (1979). Very simple structure: An #' alternative procedure for estimating the optimal number of interpretable #' factors. Multivariate Behavioral Research, 14(4), 403-414. #' #' \item Velicer, W. F. (1976). Determining the number of components from the #' matrix of partial correlations. Psychometrika, 41(3), 321-327. #' } #' @export n_factors <- function(x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, n_max = NULL, ...) { if (all(package == "all")) { package <- c("nFactors", "EGAnet", "psych", "fit", "pcdimension") } # Get number of observations if(!is.data.frame(x)) { if(is.numeric(x) && !is.null(cor)) { nobs <- x package <- package[!package %in% c("pcdimension", "PCDimension")] } else if(is.matrix(x) || inherits(x, "easycormatrix")) { stop("Please input the correlation matrix via the `cor = ...` argument and the number of rows / observations via the first argument.") } } else { nobs <- nrow(x) } # Correlation matrix if (is.null(cor)) { cor <- stats::cor(x, use = "pairwise.complete.obs", ...) } eigen_values <- eigen(cor)$values # Smooth matrix if negative eigen values if(any(eigen_values < 0)) { insight::check_if_installed("psych") cor <- psych::cor.smooth(cor, ...) eigen_values <- eigen(cor)$values } # Initialize dataframe out <- data.frame() # nFactors ------------------------------------------- if ("nFactors" %in% package) { insight::check_if_installed("nFactors") # Model if (tolower(type) %in% c("fa", "factor", "efa")) { model <- "factors" } else { model <- "components" } # Compute all if (safe) { out <- rbind( out, tryCatch(.n_factors_bartlett(eigen_values, model, nobs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_bentler(eigen_values, model, nobs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_cng(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_mreg(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_scree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch(.n_factors_sescree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_bartlett(eigen_values, model, nobs) ) out <- rbind( out, .n_factors_bentler(eigen_values, model, nobs) ) out <- rbind( out, .n_factors_cng(eigen_values, model) ) out <- rbind( out, .n_factors_mreg(eigen_values, model) ) out <- rbind( out, .n_factors_scree(eigen_values, model) ) out <- rbind( out, .n_factors_sescree(eigen_values, model) ) } } # EGAnet ------------------------------------------- if ("EGAnet" %in% package) { insight::check_if_installed("EGAnet") if (safe) { out <- rbind( out, tryCatch(.n_factors_ega(x, cor, nobs, eigen_values, type), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_ega(x, cor, nobs, eigen_values, type) ) } } # psych ------------------------------------------- if ("psych" %in% package) { insight::check_if_installed("psych") if (safe) { out <- rbind( out, tryCatch(.n_factors_vss(x, cor, nobs, type, rotation, algorithm), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_vss(x, cor, nobs, type, rotation, algorithm) ) } } # fit ------------------------------------------- if ("fit" %in% package) { insight::check_if_installed("psych") if (safe) { out <- rbind( out, tryCatch(.n_factors_fit(x, cor, nobs, type, rotation, algorithm), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_fit(x, cor, nobs, type, rotation, algorithm) ) } } # fit ------------------------------------------- if ("pcdimension" %in% tolower(package)) { insight::check_if_installed("PCDimension") if (safe) { out <- rbind( out, tryCatch(.n_factors_PCDimension(x, type), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind( out, .n_factors_PCDimension(x, type) ) } } # OUTPUT ---------------------------------------------- # TODO created weighted composite score out <- out[!is.na(out$n_Factors), ] # Remove empty methods out <- out[order(out$n_Factors), ] # Arrange by n factors row.names(out) <- NULL # Reset row index if(!is.null(n_max)) { out <- out[out$n_Factors <= n_max, ] } # Add summary by_factors <- .data_frame( n_Factors = as.numeric(unique(out$n_Factors)), n_Methods = as.numeric(by(out, as.factor(out$n_Factors), function(out) n <- nrow(out))) ) attr(out, "summary") <- by_factors attr(out, "n") <- min(as.numeric(as.character(by_factors[by_factors$n_Methods == max(by_factors$n_Methods), c("n_Factors")]))) class(out) <- c("n_factors", "see_n_factors", class(out)) out } #' @rdname n_factors #' @export n_components <- function(x, type = "PCA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), cor = NULL, safe = TRUE, ...) { n_factors( x, type = type, rotation = rotation, algorithm = algorithm, package = package, cor = cor, safe = safe, ... ) } #' @export print.n_factors <- function(x, ...) { results <- attributes(x)$summary # Extract info max_methods <- max(results$n_Methods) best_n <- attributes(x)$n # Extract methods if ("n_Factors" %in% names(x)) { type <- "factor" methods_text <- paste0(as.character(x[x$n_Factors == best_n, "Method"]), collapse = ", ") } else { type <- "cluster" methods_text <- paste0(as.character(x[x$n_Clusters == best_n, "Method"]), collapse = ", ") } # Text text <- paste0( "The choice of ", as.character(best_n), ifelse(type == "factor", " dimensions ", " clusters "), "is supported by ", max_methods, " (", sprintf("%.2f", max_methods / nrow(x) * 100), "%) methods out of ", nrow(x), " (", methods_text, ").\n" ) insight::print_color("# Method Agreement Procedure:\n\n", "blue") cat(text) invisible(x) } #' @export summary.n_factors <- function(object, ...) { attributes(object)$summary } #' @export as.numeric.n_factors <- function(x, ...) { attributes(x)$n } #' @export as.double.n_factors <- as.numeric.n_factors #' @export summary.n_clusters <- summary.n_factors #' @export as.numeric.n_clusters <- as.numeric.n_factors #' @export as.double.n_clusters <- as.double.n_factors #' @export print.n_clusters <- print.n_factors # Methods ----------------------------------------------------------------- #' Bartlett, Anderson and Lawley Procedures #' @keywords internal .n_factors_bartlett <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- nFactors::nBartlett( eigen_values, N = nobs, cor = TRUE, alpha = 0.05, details = FALSE )$nFactors data.frame( n_Factors = as.numeric(nfac), Method = .capitalize(names(nfac)), Family = "Barlett" ) } #' Bentler and Yuan's Procedure #' @keywords internal .n_factors_bentler <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- .nBentler( x = eigen_values, N = nobs, model = model, alpha = 0.05, details = FALSE )$nFactors data.frame( n_Factors = as.numeric(nfac), Method = "Bentler", Family = "Bentler" ) } #' Cattell-Nelson-Gorsuch CNG Indices #' @keywords internal .n_factors_cng <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nCng(x = eigen_values, cor = TRUE, model = model)$nFactors } data.frame( n_Factors = as.numeric(nfac), Method = "CNG", Family = "CNG" ) } #' Multiple Regression Procedure #' @keywords internal .n_factors_mreg <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nMreg(x = eigen_values, cor = TRUE, model = model)$nFactors } data.frame( n_Factors = as.numeric(nfac), Method = c("beta", "t", "p"), Family = "Multiple_regression" ) } #' Non Graphical Cattell's Scree Test #' @keywords internal .n_factors_scree <- function(eigen_values = NULL, model = "factors") { nfac <- unlist(nFactors::nScree(x = eigen_values, cor = TRUE, model = model)$Components) data.frame( n_Factors = as.numeric(nfac), Method = c("Optimal coordinates", "Acceleration factor", "Parallel analysis", "Kaiser criterion"), Family = "Scree" ) } #' Standard Error Scree and Coefficient of Determination Procedures #' @keywords internal .n_factors_sescree <- function(eigen_values = NULL, model = "factors") { nfac <- nFactors::nSeScree(x = eigen_values, cor = TRUE, model = model)$nFactors data.frame( n_Factors = as.numeric(nfac), Method = c("Scree (SE)", "Scree (R2)"), Family = "Scree_SE" ) } # EGAnet ------------------------ #' @keywords internal .n_factors_ega <- function(x = NULL, cor = NULL, nobs = NULL, eigen_values = NULL, type = "FA") { # Replace with own correlation matrix junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_glasso <- EGAnet::EGA(cor, n = nobs, model = "glasso", plot.EGA = FALSE)$n.dim))) junk <- utils::capture.output(suppressWarnings(suppressMessages(nfac_TMFG <- EGAnet::EGA(cor, n = nobs, model = "TMFG", plot.EGA = FALSE)$n.dim))) data.frame( n_Factors = as.numeric(c(nfac_glasso, nfac_TMFG)), Method = c("EGA (glasso)", "EGA (TMFG)"), Family = "EGA" ) } # psych ------------------------ #' @keywords internal .n_factors_vss <- function(x = NULL, cor = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default") { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } # Compute VSS vss <- psych::VSS( cor, n = ncol(x) - 1, n.obs = nobs, rotate = rotation, fm = algorithm, plot = FALSE ) # Format results stats <- vss$vss.stats stats$map <- vss$map stats$n_Factors <- seq_len(nrow(stats)) names(stats) <- gsub("cfit.", "VSS_Complexity_", names(stats)) # Indices vss_1 <- which.max(stats$VSS_Complexity_1) vss_2 <- which.max(stats$VSS_Complexity_2) velicer_MAP <- which.min(stats$map) BIC_reg <- which.min(stats$BIC) BIC_adj <- which.min(stats$SABIC) BIC_reg <- ifelse(length(BIC_reg) == 0, NA, BIC_reg) BIC_adj <- ifelse(length(BIC_adj) == 0, NA, BIC_adj) data.frame( n_Factors = as.numeric(c(vss_1, vss_2, velicer_MAP, BIC_reg, BIC_adj)), Method = c("VSS complexity 1", "VSS complexity 2", "Velicer's MAP", "BIC", "BIC (adjusted)"), Family = c("VSS", "VSS", "Velicers_MAP", "BIC", "BIC") ) } #' @keywords internal .n_factors_fit <- function(x = NULL, cor = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default", threshold = 0.1) { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } rez <- data.frame() for (n in 1:(ncol(cor) - 1)) { if (tolower(type) %in% c("fa", "factor", "efa")) { factors <- tryCatch(suppressWarnings(psych::fa(cor, nfactors = n, n.obs = nobs, rotate = rotation, fm = algorithm )), error = function(e) NA ) } else { factors <- tryCatch(suppressWarnings(psych::pca(cor, nfactors = n, n.obs = nobs, rotate = rotation )), error = function(e) NA ) } if (all(is.na(factors))) { next } rmsea <- ifelse(is.null(factors$RMSEA), NA, factors$RMSEA[1]) rmsr <- ifelse(is.null(factors$rms), NA, factors$rms) crms <- ifelse(is.null(factors$crms), NA, factors$crms) bic <- ifelse(is.null(factors$BIC), NA, factors$BIC) tli <- ifelse(is.null(factors$TLI), NA, factors$TLI) rez <- rbind( rez, data.frame( n = n, Fit = factors$fit.off, TLI = tli, RMSEA = rmsea, RMSR = rmsr, CRMS = crms, BIC = bic ) ) } # For fit indices that constantly increase / decrease, we need to find # an "elbow"/"knee". Here we take the first value that reaches 90 percent # of the range between the max and the min (when 'threshold = 0.1'). # Fit if(all(is.na(rez$Fit))) { fit_off <- NA } else { target <- max(rez$Fit, na.rm = TRUE) - threshold * diff(range(rez$Fit, na.rm = TRUE)) fit_off <- rez[!is.na(rez$Fit) & rez$Fit >= target, "n"][1] } # TLI if(all(is.na(rez$TLI))) { TLI <- NA } else { target <- max(rez$TLI, na.rm = TRUE) - threshold * diff(range(rez$TLI, na.rm = TRUE)) TLI <- rez[!is.na(rez$TLI) & rez$TLI >= target, "n"][1] } # RMSEA if(all(is.na(rez$RMSEA))) { RMSEA <- NA } else { target <- min(rez$RMSEA, na.rm = TRUE) + threshold * diff(range(rez$RMSEA, na.rm = TRUE)) RMSEA <- rez[!is.na(rez$RMSEA) & rez$RMSEA <= target, "n"][1] } # RMSR if(all(is.na(rez$RMSR))) { RMSR <- NA } else { target <- min(rez$RMSR, na.rm = TRUE) + threshold * diff(range(rez$RMSR, na.rm = TRUE)) RMSR <- rez[!is.na(rez$RMSR) & rez$RMSR <= target, "n"][1] } # CRMS if(all(is.na(rez$CRMS))) { CRMS <- NA } else { target <- min(rez$CRMS, na.rm = TRUE) + threshold * diff(range(rez$CRMS, na.rm = TRUE)) CRMS <- rez[!is.na(rez$CRMS) & rez$CRMS <= target, "n"][1] } # BIC (this is a penalized method so we can just take the one that minimizes it) BIC <- ifelse(all(is.na(rez$BIC)), NA, rez[!is.na(rez$BIC) & rez$BIC == min(rez$BIC, na.rm = TRUE), "n"]) data.frame( n_Factors = c(fit_off, TLI, RMSEA, RMSR, CRMS, BIC), Method = c("Fit_off", "TLI", "RMSEA", "RMSR", "CRMS", "BIC"), Family = c("Fit", "Fit", "Fit", "Fit", "Fit", "Fit") ) } # PCDimension ------------------------ #' @keywords internal .n_factors_PCDimension <- function(x = NULL, type = "PCA") { # This package is a strict dependency of PCDimension so if users have the # former they should have it insight::check_if_installed("ClassDiscovery") # Only applies to PCA with full data if(tolower(type) %in% c("fa", "factor", "efa") || !is.data.frame(x)) { return(data.frame()) } # Randomization-Based Methods rez_rnd <- PCDimension::rndLambdaF(x) # Broken-Stick spca <- ClassDiscovery::SamplePCA(t(x)) lambda <- spca@variances[1:(ncol(x)-1)] rez_bokenstick <- PCDimension::bsDimension(lambda) # Auer-Gervini ag <- PCDimension::AuerGervini(spca) agfuns <- list(twice=PCDimension::agDimTwiceMean, specc=PCDimension::agDimSpectral, km=PCDimension::agDimKmeans, km3=PCDimension::agDimKmeans3, # tt=PCDimension::agDimTtest, # known to overestimate # cpm=PCDimension::makeAgCpmFun("Exponential"), # known to overestimate tt2=PCDimension::agDimTtest2, cpt=PCDimension::agDimCPT) rez_ag <- PCDimension::compareAgDimMethods(ag, agfuns) data.frame( n_Factors = as.numeric(c(rez_rnd, rez_bokenstick, rez_ag)), Method = c("Random (lambda)", "Random (F)", "Broken-Stick", "Auer-Gervini (twice)", "Auer-Gervini (spectral)", "Auer-Gervini (kmeans-2)", "AuerGervini (kmeans-3)", "Auer-Gervini (T)", "AuerGervini (CPT)"), Family = "PCDimension" ) } # Re-implementation of nBentler in nFactors ------------------------ #' @keywords internal .nBentler <- function(x, N, model = model, log = TRUE, alpha = 0.05, cor = TRUE, details = TRUE, ...) { insight::check_if_installed("nFactors") lambda <- nFactors::eigenComputes(x, cor = cor, model = model, ...) if (length(which(lambda < 0)) > 0) { stop("These indices are only valid with a principal component solution. So, only positive eigenvalues are permitted.") } minPar <- c(min(lambda) - abs(min(lambda)) + .001, 0.001) maxPar <- c(max(lambda), stats::lm(lambda ~ I(length(lambda):1))$coef[2]) n <- N significance <- alpha min.k <- 3 LRT <- data.frame( q = numeric(length(lambda) - min.k), k = numeric(length(lambda) - min.k), LRT = numeric(length(lambda) - min.k), a = numeric(length(lambda) - min.k), b = numeric(length(lambda) - min.k), p = numeric(length(lambda) - min.k), convergence = numeric(length(lambda) - min.k) ) bentler.n <- 0 for (i in 1:(length(lambda) - min.k)) { temp <- nFactors::bentlerParameters( x = lambda, N = n, nFactors = i, log = log, cor = cor, minPar = minPar, maxPar = maxPar, graphic = FALSE ) LRT[i, 3] <- temp$lrt LRT[i, 4] <- ifelse(is.null(temp$coef[1]), NA, temp$coef[1]) LRT[i, 5] <- ifelse(is.null(temp$coef[2]), NA, temp$coef[2]) LRT[i, 6] <- ifelse(is.null(temp$p.value), NA, temp$p.value) LRT[i, 7] <- ifelse(is.null(temp$convergence), NA, temp$convergence) LRT[i, 2] <- i LRT[i, 1] <- length(lambda) - i } # LRT <- LRT[order(LRT[,1],decreasing = TRUE),] for (i in 1:(length(lambda) - min.k)) { if (i == 1) bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) if (i > 1) { if (LRT$p[i - 1] <= 0.05) bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) } } if (bentler.n == 0) bentler.n <- length(lambda) if (details == TRUE) details <- LRT else details <- NULL res <- list(detail = details, nFactors = bentler.n) class(res) <- c("nFactors", "list") res } parameters/R/utils_pca_efa.R0000644000175000017500000002611514131014354015672 0ustar nileshnilesh# model parameters ----------------------------------------------------------------- #' @export model_parameters.parameters_efa <- function(model, ...) { x <- attributes(model)$summary if ("parameters_efa" %in% class(model)) { class(x) <- c("parameters_efa_summary", class(model)) } else { class(x) <- c("parameters_pca_summary", class(model)) } x } #' @export model_parameters.parameters_pca <- model_parameters.parameters_efa # summary ----------------------------------------------------------------- #' @export summary.parameters_efa <- function(object, ...) { x <- attributes(object)$summary cols <- intersect( c("Std_Dev", "Eigenvalues", "Variance", "Variance_Cumulative", "Variance_Proportion"), colnames(x) ) x <- as.data.frame(t(x[, cols])) x <- cbind(data.frame("Parameter" = row.names(x), stringsAsFactors = FALSE), x) names(x) <- c("Parameter", attributes(object)$summary$Component) row.names(x) <- NULL if ("parameters_efa" %in% class(object)) { class(x) <- c("parameters_efa_summary", class(object)) } else { class(x) <- c("parameters_pca_summary", class(object)) } x } #' @export summary.parameters_pca <- summary.parameters_efa #' @export summary.parameters_omega <- function(object, ...) { table_var <- attributes(object)$summary class(table_var) <- c("parameters_omega_summary", class(table_var)) table_var } # predict ----------------------------------------------------------------- #' @rdname principal_components #' @export predict.parameters_efa <- function(object, newdata = NULL, names = NULL, keep_na = TRUE, ...) { if (is.null(newdata)) { out <- as.data.frame(attributes(object)$scores) if (isTRUE(keep_na)) { out <- .merge_na(object, out) } } else { out <- as.data.frame(stats::predict(attributes(object)$model, newdata = newdata, ...)) } if (!is.null(names)) { names(out)[1:length(c(names))] <- names } else { names(out) <- names(get_scores(object)) } row.names(out) <- NULL out } #' @export predict.parameters_pca <- predict.parameters_efa .merge_na <- function(object, out) { compl_cases <- attributes(object)$complete_cases if (is.null(compl_cases)) { warning(insight::format_message("Could not retrieve information about missing data. Returning only complete cases."), call. = FALSE) } else { original_data <- data.frame(.parameters_merge_id = 1:length(compl_cases)) out$.parameters_merge_id <- (1:nrow(original_data))[compl_cases] out <- merge(original_data, out, by = ".parameters_merge_id", all = TRUE, sort = TRUE) out$.parameters_merge_id <- NULL } out } # print ------------------------------------------------------------------- #' @export print.parameters_efa_summary <- function(x, digits = 3, ...) { if ("Parameter" %in% names(x)) { x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } else if ("Component" %in% names(x)) { names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } cat(insight::export_table(x, digits = digits, caption = c("# (Explained) Variance of Components", "blue"), format = "text", ...)) invisible(x) } #' @export print.parameters_pca_summary <- print.parameters_efa_summary #' @rdname principal_components #' @export print.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { cat( .print_parameters_cfa_efa( x, threshold = threshold, sort = sort, format = "text", digits = digits, labels = labels, ... ) ) invisible(x) } #' @export print.parameters_pca <- print.parameters_efa #' @export print.parameters_omega <- function(x, ...) { orig_x <- x names(x) <- c("Composite", "Omega (total)", "Omega (hierarchical)", "Omega (group)") cat(insight::export_table(x)) invisible(orig_x) } #' @export print.parameters_omega_summary <- function(x, ...) { orig_x <- x names(x) <- c("Composite", "Total Variance (%)", "Variance due to General Factor (%)", "Variance due to Group Factor (%)") cat(insight::export_table(x)) invisible(orig_x) } # print-helper ---------------------- .print_parameters_cfa_efa <- function(x, threshold, sort, format, digits, labels, ...) { # Method if (inherits(x, "parameters_pca")) { method <- "Principal Component Analysis" } else { method <- "Factor Analysis" } # Rotation rotation_name <- attr(x, "rotation", exact = TRUE) # Labels if (!is.null(labels)) { x$Label <- labels x <- x[c("Variable", "Label", names(x)[!names(x) %in% c("Variable", "Label")])] } # Sorting if (isTRUE(sort)) { x <- .sort_loadings(x) } # Replace by NA all cells below threshold if (!is.null(threshold)) { x <- .filter_loadings(x, threshold = threshold) } # table caption if (is.null(rotation_name) || rotation_name == "none") { if (format == "markdown") { table_caption <- sprintf("Loadings from %s (no rotation)", method) } else { table_caption <- c(sprintf("# Loadings from %s (no rotation)", method), "blue") } } else { if (format == "markdown") { table_caption <- sprintf("Rotated loadings from %s (%s-rotation)", method, rotation_name) } else { table_caption <- c(sprintf("# Rotated loadings from %s (%s-rotation)", method, rotation_name), "blue") } } # footer if (!is.null(attributes(x)$type)) { footer <- c(.text_components_variance(x, sep = ifelse(format == "markdown", "", "\n")), "yellow") } else { footer <- NULL } insight::export_table( x, digits = digits, format = format, caption = table_caption, footer = footer, align = "firstleft", ... ) } #' @keywords internal .text_components_variance <- function(x, sep = "") { type <- attributes(x)$type if (type %in% c("prcomp", "principal", "pca")) { type <- "principal component" } else if (type %in% c("fa")) { type <- "latent factor" } else if (type %in% c("kmeans", "hclust", "pvclust", "dbscan", "mixture", "pam")) { type <- "cluster" } else { type <- paste0(type, " component") } if (type == "cluster") { summary <- as.data.frame(x) variance <- attributes(x)$variance * 100 } else { summary <- attributes(x)$summary variance <- max(summary$Variance_Cumulative) * 100 } if (nrow(summary) == 1) { text <- paste0("The unique ", type) } else { text <- paste0("The ", nrow(summary), " ", type, "s") } # rotation if (!is.null(attributes(x)$rotation) && attributes(x)$rotation != "none") { text <- paste0(text, " (", attributes(x)$rotation, " rotation)") } text <- paste0( text, " accounted for ", sprintf("%.2f", variance), "% of the total variance of the original data" ) if (type == "cluster" || nrow(summary) == 1) { text <- paste0(text, ".") } else { text <- paste0( text, " (", paste0(summary$Component, " = ", sprintf("%.2f", summary$Variance * 100), "%", collapse = ", " ), ")." ) } paste0(sep, text, sep) } # sort -------------------------------------------------------------------- #' @rdname principal_components #' @export sort.parameters_efa <- function(x, ...) { .sort_loadings(x) } #' @export sort.parameters_pca <- sort.parameters_efa #' @keywords internal .sort_loadings <- function(loadings, cols = NULL) { if (is.null(cols)) { cols <- attributes(loadings)$loadings_columns } # Remove variable name column x <- loadings[, cols, drop = FALSE] row.names(x) <- NULL # Initialize clusters nitems <- nrow(x) loads <- data.frame(item = seq(1:nitems), cluster = rep(0, nitems)) # first sort them into clusters: Find the maximum for each row and assign it to that cluster loads$cluster <- apply(abs(x), 1, which.max) ord <- sort(loads$cluster, index.return = TRUE) x[1:nitems, ] <- x[ord$ix, ] rownames(x)[1:nitems] <- rownames(x)[ord$ix] total.ord <- ord$ix # now sort column wise so that the loadings that have their highest loading on each cluster items <- table(loads$cluster) # how many items are in each cluster? first <- 1 item <- loads$item for (i in 1:length(items)) { if (items[i] > 0) { last <- first + items[i] - 1 ord <- sort(abs(x[first:last, i]), decreasing = TRUE, index.return = TRUE) x[first:last, ] <- x[item[ord$ix + first - 1], ] loads[first:last, 1] <- item[ord$ix + first - 1] rownames(x)[first:last] <- rownames(x)[ord$ix + first - 1] total.ord[first:last] <- total.ord[ord$ix + first - 1] first <- first + items[i] } } order <- row.names(x) loadings <- loadings[as.numeric(as.character(order)), ] # Arrange by max row.names(loadings) <- NULL loadings } # Filter -------------------------------------------------------------------- #' @keywords internal .filter_loadings <- function(loadings, threshold = 0.2, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (threshold == "max" | threshold >= 1) { if (threshold == "max") { for (row in 1:nrow(loadings)) { maxi <- max(abs(loadings[row, loadings_columns, drop = FALSE])) loadings[row, loadings_columns][abs(loadings[row, loadings_columns]) < maxi] <- NA } } else { for (col in loadings_columns) { loadings[utils::tail(order(abs(loadings[, col]), decreasing = TRUE), -round(threshold)), col] <- NA } } } else { loadings[, loadings_columns][abs(loadings[, loadings_columns]) < threshold] <- NA } loadings } # closest_component ------------------------------------------------------- #' @rdname principal_components #' @export closest_component <- function(pca_results) { if ("closest_component" %in% names(attributes(pca_results))) { attributes(pca_results)$closest_component } else { .closest_component(pca_results) } } .closest_component <- function(loadings, loadings_columns = NULL, variable_names = NULL) { if (is.matrix(loadings)) loadings <- as.data.frame(loadings) if (is.null(loadings_columns)) loadings_columns <- 1:ncol(loadings) if (is.null(variable_names)) variable_names <- row.names(loadings) component_columns <- apply(loadings[loadings_columns], 1, function(i) which.max(abs(i))) stats::setNames(component_columns, variable_names) } parameters/R/methods_coda.R0000644000175000017500000000010713766351403015532 0ustar nileshnilesh#' @export model_parameters.mcmc.list <- model_parameters.data.frame parameters/R/standard_error_satterthwaite.R0000644000175000017500000000133114036353021021047 0ustar nileshnilesh#' @rdname p_value_satterthwaite #' @export se_satterthwaite <- function(model) { UseMethod("se_satterthwaite") } #' @export se_satterthwaite.default <- function(model) { standard_error(model) } #' @export se_satterthwaite.lme <- function(model) { if (!requireNamespace("lavaSearch2", quietly = TRUE)) { stop("Package `lavaSearch2` required for Satterthwaite approximation.", call. = FALSE) } params <- insight::get_parameters(model, effects = "fixed") lavaSearch2::sCorrect(model) <- TRUE s <- lavaSearch2::summary2(model) .data_frame( Parameter = params$Parameter, SE = as.vector(s$tTable[, "Std.Error"]) ) } #' @export se_satterthwaite.gls <- se_satterthwaite.lme parameters/R/methods_aov.R0000644000175000017500000004535214166656741015434 0ustar nileshnilesh# classes: .aov, .anova, aovlist, anova.rms, maov, afex_aov # .aov ------ #' Parameters from ANOVAs #' #' @param model Object of class [aov()], [anova()], #' `aovlist`, `Gam`, [manova()], `Anova.mlm`, #' `afex_aov` or `maov`. #' @param omega_squared Compute omega squared as index of effect size. Can be #' `"partial"` (the default, adjusted for effect size) or `"raw"`. #' @param eta_squared Compute eta squared as index of effect size. Can be #' `"partial"` (the default, adjusted for effect size), `"raw"` or #' `"adjusted"` (the latter option only for ANOVA-tables from mixed #' models). #' @param epsilon_squared Compute epsilon squared as index of effect size. Can #' be `"partial"` (the default, adjusted for effect size) or #' `"raw"`. #' @param df_error Denominator degrees of freedom (or degrees of freedom of the #' error estimate, i.e., the residuals). This is used to compute effect sizes #' for ANOVA-tables from mixed models. See 'Examples'. (Ignored for #' `afex_aov`.) #' @param type Numeric, type of sums of squares. May be 1, 2 or 3. If 2 or 3, #' ANOVA-tables using `car::Anova()` will be returned. (Ignored for #' `afex_aov`.) #' @param ci Confidence Interval (CI) level for effect sizes #' `omega_squared`, `eta_squared` etc. The default, `NULL`, #' will compute no confidence intervals. `ci` should be a scalar between #' 0 and 1. #' @param test String, indicating the type of test for `Anova.mlm` to be #' returned. If `"multivariate"` (or `NULL`), returns the summary of #' the multivariate test (that is also given by the `print`-method). If #' `test = "univariate"`, returns the summary of the univariate test. #' @param power Logical, if `TRUE`, adds a column with power for each #' parameter. #' @param table_wide Logical that decides whether the ANOVA table should be in #' wide format, i.e. should the numerator and denominator degrees of freedom #' be in the same row. Default: `FALSE`. #' @param alternative A character string specifying the alternative hypothesis; #' Controls the type of CI returned: `"two.sided"` (default, two-sided CI), #' `"greater"` or `"less"` (one-sided CI). Partial matching is allowed #' (e.g., `"g"`, `"l"`, `"two"`...). See section *One-Sided CIs* in #' the [effectsize_CIs vignette](https://easystats.github.io/effectsize/). #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @return A data frame of indices related to the model's parameters. #' #' @note For ANOVA-tables from mixed models (i.e. `anova(lmer())`), only #' partial or adjusted effect sizes can be computed. Note that type 3 ANOVAs #' with interactions involved only give sensible and informative results when #' covariates are mean-centred and factors are coded with orthogonal contrasts #' (such as those produced by `contr.sum`, `contr.poly`, or #' `contr.helmert`, but *not* by the default `contr.treatment`). #' #' @examples #' if (requireNamespace("effectsize", quietly = TRUE)) { #' df <- iris #' df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") #' #' model <- aov(Sepal.Length ~ Sepal.Big, data = df) #' model_parameters( #' model, #' omega_squared = "partial", #' eta_squared = "partial", #' epsilon_squared = "partial" #' ) #' #' model_parameters( #' model, #' omega_squared = "partial", #' eta_squared = "partial", #' ci = .9 #' ) #' #' model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) #' model_parameters(model) #' model_parameters( #' model, #' omega_squared = "partial", #' eta_squared = "partial", #' epsilon_squared = "partial" #' ) #' #' model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) #' model_parameters(model) #' #' \dontrun{ #' if (require("lme4")) { #' mm <- lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), #' data = df #' ) #' model <- anova(mm) #' #' # simple parameters table #' model_parameters(model) #' #' # parameters table including effect sizes #' model_parameters( #' model, #' eta_squared = "partial", #' ci = .9, #' df_error = dof_satterthwaite(mm)[2:3] #' ) #' } #' } #' } #' @export model_parameters.aov <- function(model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, df_error = NULL, type = NULL, ci = NULL, alternative = NULL, test = NULL, power = FALSE, keep = NULL, drop = NULL, parameters = keep, table_wide = FALSE, verbose = TRUE, ...) { # save model object, for later checks original_model <- model object_name <- deparse(substitute(model), width.cutoff = 500) if (inherits(model, "aov") && !is.null(type) && type > 1) { if (!requireNamespace("car", quietly = TRUE)) { warning(insight::format_message("Package 'car' required for type-2 or type-3 anova. Defaulting to type-1."), call. = FALSE) } else { model <- car::Anova(model, type = type) } } # try to extract type of anova table if (is.null(type)) { type <- .anova_type(model) } # exceptions if (.is_levenetest(model)) { return(model_parameters.htest(model, ...)) } # check contrasts if (verbose) { .check_anova_contrasts(original_model, type) } # extract standard parameters params <- .extract_parameters_anova(model, test) # add effect sizes, if available params <- .effectsizes_for_aov( model, parameters = params, omega_squared = omega_squared, eta_squared = eta_squared, epsilon_squared = epsilon_squared, df_error = df_error, ci = ci, alternative = alternative, verbose = verbose ) # add power, if possible if (isTRUE(power)) { params <- .power_for_aov(model, params) } # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep = keep, drop = drop, verbose = verbose) } # wide or long? if (table_wide) { params <- .anova_table_wide(params) } # add attributes params <- .add_anova_attributes(params, model, ci, test = test, ...) class(params) <- c("parameters_model", "see_parameters_model", class(params)) attr(params, "object_name") <- object_name params } #' @export standard_error.aov <- function(model, ...) { params <- model_parameters(model) .data_frame( Parameter = params$Parameter, SE = params$SE ) } #' @export p_value.aov <- function(model, ...) { params <- model_parameters(model) if (nrow(params) == 0) { return(NA) } if ("Group" %in% names(params)) { params <- params[params$Group == "Within", ] } if ("Residuals" %in% params$Parameter) { params <- params[params$Parameter != "Residuals", ] } if (!"p" %in% names(params)) { return(NA) } .data_frame( Parameter = params$Parameter, p = params$p ) } # .anova ------ #' @export standard_error.anova <- standard_error.aov #' @export p_value.anova <- p_value.aov #' @export model_parameters.anova <- model_parameters.aov # .aov.list ------ #' @export standard_error.aovlist <- standard_error.aov #' @export p_value.aovlist <- p_value.aov #' @export model_parameters.aovlist <- model_parameters.aov # .afex_aov ------ #' @export model_parameters.afex_aov <- function(model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, df_error = NULL, type = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { if (inherits(model$Anova, "Anova.mlm")) { params <- model$anova_table with_df_and_p <- summary(model$Anova)$univariate.tests params$`Sum Sq` <- with_df_and_p[-1, 1] params$`Error SS` <- with_df_and_p[-1, 3] out <- .extract_parameters_anova(params, test = NULL) } else { out <- .extract_parameters_anova(model$Anova, test = NULL) } out <- .effectsizes_for_aov( model, parameters = out, omega_squared = omega_squared, eta_squared = eta_squared, epsilon_squared = epsilon_squared, df_error = df_error, verbose = verbose, ... ) # add attributes out <- .add_anova_attributes(out, model, ci, test = NULL, ...) # filter parameters if (!is.null(keep) || !is.null(drop)) { out <- .filter_parameters(out, keep = keep, drop = drop, verbose = verbose) } if (!"Method" %in% names(out)) { out$Method <- "ANOVA estimation for factorial designs using 'afex'" } attr(out, "title") <- unique(out$Method) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(out) <- unique(c("parameters_model", "see_parameters_model", class(out))) out } # others ------ #' @export model_parameters.anova.rms <- model_parameters.aov #' @export model_parameters.Anova.mlm <- model_parameters.aov #' @export model_parameters.maov <- model_parameters.aov # helper ------------------------------ .anova_type <- function(model, type = NULL) { if (is.null(type)) { type_to_numeric <- function(type) { if (is.numeric(type)) { return(type) } switch( type, "1" = , "I" = 1, "2" = , "II" = 2, "3" = , "III" = 3, 1 ) } # default to 1 type <- 1 if (!is.null(attr(model, "type", exact = TRUE))) { type <- type_to_numeric(attr(model, "type", exact = TRUE)) } else if (!is.null(attr(model, "heading"))) { heading <- attr(model, "heading")[1] if (grepl("(.*)Type (.*) Wald(.*)", heading)) { type <- type_to_numeric(trimws(gsub("(.*)Type (.*) Wald(.*)", "\\2", heading))) } else if (grepl("Type (.*) Analysis(.*)", heading)) { type <- type_to_numeric(trimws(gsub("Type (.*) Analysis(.*)", "\\1", heading))) } else if (grepl("(.*)Type (.*) tests(.*)", heading)) { type <- type_to_numeric(trimws(gsub("(.*)Type (.*) tests(.*)", "\\2", heading))) } } else if ("type" %in% names(model) && !is.null(model$type)) { type <- type_to_numeric(model$type) } } type } .check_anova_contrasts <- function(model, type) { # check only valid for anova tables of type III if (!is.null(type) && type == 3) { # check for interaction terms interaction_terms <- tryCatch( { insight::find_interactions(model, flatten = TRUE) }, error = function(e) { if (is.data.frame(model)) { if (any(grepl(":", row.names(model), fixed = TRUE))) { TRUE } else { NULL } } } ) # try to access data of model predictors predictors <- tryCatch( { insight::get_predictors(model) }, error = function(e) { NULL } ) # if data available, check contrasts and mean centering if (!is.null(predictors)) { treatment_contrasts_or_not_centered <- sapply(predictors, function(i) { if (is.factor(i)) { cn <- stats::contrasts(i) if (is.null(cn) || (all(cn %in% c(0, 1)))) { return(TRUE) } } else { if (abs(mean(i, na.rm = TRUE)) > 1e-2) { return(TRUE) } } return(FALSE) }) } else { treatment_contrasts_or_not_centered <- FALSE } # successfully checked predictors, or if not possible, at least found interactions? if (!is.null(interaction_terms) && (any(treatment_contrasts_or_not_centered) || is.null(predictors))) { message(insight::format_message("Type 3 ANOVAs only give sensible and informative results when covariates are mean-centered and factors are coded with orthogonal contrasts (such as those produced by 'contr.sum', 'contr.poly', or 'contr.helmert', but *not* by the default 'contr.treatment').")) } } } .effectsizes_for_aov <- function(model, parameters, omega_squared, eta_squared, epsilon_squared, df_error = NULL, ci = NULL, alternative = NULL, verbose = TRUE, ...) { # user actually does not want to compute effect sizes if (is.null(omega_squared) && is.null(eta_squared) && is.null(epsilon_squared)) { return(parameters) } insight::check_if_installed("effectsize", minimum_version = "0.5.0") # set error-df, when provided. if (!is.null(df_error) && is.data.frame(model) && !any(c("DenDF", "den Df", "denDF", "df_error") %in% colnames(model))) { if (length(df_error) > nrow(model)) { stop("Number of degrees of freedom in argument 'df_error' is larger than number of parameters.") } model$df_error <- df_error } # set defaults if (isTRUE(omega_squared)) { omega_squared <- "partial" } if (isTRUE(eta_squared)) { eta_squared <- "partial" } if (isTRUE(epsilon_squared)) { epsilon_squared <- "partial" } # Omega squared if (!is.null(omega_squared)) { fx <- effectsize::omega_squared(model, partial = omega_squared == "partial", ci = ci, alternative = alternative, verbose = verbose) parameters <- .add_effectsize_to_parameters(fx, parameters) } # Eta squared if (!is.null(eta_squared)) { fx <- effectsize::eta_squared(model, partial = eta_squared == "partial", ci = ci, alternative = alternative, verbose = verbose) parameters <- .add_effectsize_to_parameters(fx, parameters) } # Epsilon squared if (!is.null(epsilon_squared)) { fx <- effectsize::epsilon_squared(model, partial = epsilon_squared == "partial", ci = ci, alternative = alternative, verbose = verbose) parameters <- .add_effectsize_to_parameters(fx, parameters) } parameters } # internals -------------------------- .fix_effectsize_rows <- function(fx, parameters) { stat_column <- colnames(parameters)[colnames(parameters) %in% c("F", "t", "z", "statistic")] if (nrow(parameters) > length(fx)) { es <- rep_len(NA, length.out = nrow(parameters)) es[!is.na(parameters[[stat_column]])] <- fx fx <- es } fx } # retrieves those rows in a "model_parameters" object where # the statistic column is not missing .valid_effectsize_rows <- function(parameters, fx_params) { stat_column <- colnames(parameters)[colnames(parameters) %in% c("F", "t", "z", "statistic")] out <- !is.na(parameters[[stat_column]]) if (sum(out) > length(fx_params)) { out <- out & !is.na(match(parameters$Parameter, fx_params)) } out } # add effect size column and related CI to the parameters # data frame, automatically detecting the effect size name .add_effectsize_to_parameters <- function(fx, params) { if (!is.null(fx$CI_low)) { # find name of current effect size es <- effectsize::get_effectsize_name(colnames(fx)) # and add CI-name to effect size, to have specific # CI columns for this particular effect size ci_low <- paste0(gsub("_partial$", "", es), "_CI_low") ci_high <- paste0(gsub("_partial$", "", es), "_CI_high") # rename columns fx[[ci_low]] <- fx$CI_low fx[[ci_high]] <- fx$CI_high # delete old or duplicated columns fx$CI_low <- NULL fx$CI_high <- NULL fx$CI <- NULL } params$.id <- 1:nrow(params) params <- merge(params, fx, all.x = TRUE, sort = FALSE, by = intersect(c("Response", "Group", "Parameter"), intersect(colnames(params), colnames(fx)))) params <- params[order(params$.id), ] params$.id <- NULL params # fx_params <- fx$Parameter # if (is.null(fx_params)) { # fx_params <- params$Parameter # } # fx$Parameter <- NULL # fx$Response <- NULL # fx$Group <- NULL # es <- colnames(fx)[1] # valid_rows <- .valid_effectsize_rows(params, fx_params) # params[[es]][valid_rows] <- fx[[es]] # # if (!is.null(fx$CI_low)) { # ci_low <- paste0(gsub("_partial$", "", es), "_CI_low") # ci_high <- paste0(gsub("_partial$", "", es), "_CI_high") # params[[ci_low]][valid_rows] <- fx$CI_low # params[[ci_high]][valid_rows] <- fx$CI_high # } # # params } .is_levenetest <- function(x) { inherits(x, "anova") && !is.null(attributes(x)$heading) && all(isTRUE(grepl("Levene's Test", attributes(x)$heading, fixed = TRUE))) } # data: A dataframe from `model_parameters` # ... Currently ignored .anova_table_wide <- function(data, ...) { wide_anova <- function(x) { # creating numerator and denominator degrees of freedom if (length(idxResid <- x$Parameter == "Residuals")) { x$df_error <- x$df[idxResid] x$Sum_Squares_Error <- x$Sum_Squares[idxResid] x$Mean_Square_Error <- x$Sum_Squares[idxResid] x <- x[!idxResid, ] } x } if ("Group" %in% colnames(data)) { data <- split(data, data$Group) data <- lapply(data, wide_anova) data <- do.call(rbind, data) } else { data <- wide_anova(data) } # reorder columns col_order <- union(c('Parameter', 'F', 'df', 'df_error', 'p'), names(data)) data[, col_order] } parameters/R/pool_parameters.R0000644000175000017500000002213014077615700016274 0ustar nileshnilesh#' Pool Model Parameters #' #' This function "pools" (i.e. combines) model parameters in a similar fashion #' as `mice::pool()`. However, this function pools parameters from #' `parameters_model` objects, as returned by #' [model_parameters()]. #' #' @param x A list of `parameters_model` objects, as returned by #' [model_parameters()], or a list of model-objects that is #' supported by `model_parameters()`. #' @param ... Currently not used. #' @inheritParams model_parameters.default #' @inheritParams bootstrap_model #' @inheritParams model_parameters.merMod #' #' @note Models with multiple components, (for instance, models with zero-inflation, #' where predictors appear in the count and zero-inflated part) may fail in #' case of identical names for coefficients in the different model components, #' since the coefficient table is grouped by coefficient names for pooling. In #' such cases, coefficients of count and zero-inflated model parts would be #' combined. Therefore, the `component` argument defaults to #' `"conditional"` to avoid this. #' #' @details Averaging of parameters follows Rubin's rules (\cite{Rubin, 1987, p. 76}). #' The pooled degrees of freedom is based on the Barnard-Rubin adjustment for #' small samples (\cite{Barnard and Rubin, 1999}). #' #' @references #' Barnard, J. and Rubin, D.B. (1999). Small sample degrees of freedom with #' multiple imputation. Biometrika, 86, 948-955. Rubin, D.B. (1987). Multiple #' Imputation for Nonresponse in Surveys. New York: John Wiley and Sons. #' #' @examples #' # example for multiple imputed datasets #' if (require("mice")) { #' data("nhanes2") #' imp <- mice(nhanes2, printFlag = FALSE) #' models <- lapply(1:5, function(i) { #' lm(bmi ~ age + hyp + chl, data = complete(imp, action = i)) #' }) #' pool_parameters(models) #' #' # should be identical to: #' m <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' summary(pool(m)) #' } #' @return A data frame of indices related to the model's parameters. #' @export pool_parameters <- function(x, exponentiate = FALSE, effects = "fixed", component = "conditional", verbose = TRUE, ...) { # check input, save original model ----- original_model <- random_params <- NULL obj_name <- deparse(substitute(x), width.cutoff = 500) if (all(sapply(x, insight::is_model)) && all(sapply(x, insight::is_model_supported))) { original_model <- x[[1]] x <- lapply(x, model_parameters, effects = effects, component = component, ...) } if (!all(sapply(x, inherits, "parameters_model"))) { stop("'x' must be a list of 'parameters_model' objects, as returned by the 'model_parameters()' function.", call. = FALSE) } if (is.null(original_model)) { original_model <- .get_object(x[[1]]) } if (isTRUE(attributes(x[[1]])$exponentiate)) { warning(insight::format_message("Pooling on exponentiated parameters is not recommended. Please call 'model_parameters()' with 'exponentiate = FALSE', and then call 'pool_parameters(..., exponentiate = TRUE)'."), call. = FALSE) } # only pool for specific component ----- original_x <- x if ("Component" %in% colnames(x[[1]]) && !.is_empty_object(component) && component != "all") { x <- lapply(x, function(i) { i <- i[i$Component == component, ] i$Component <- NULL i }) warning(paste0("Pooling applied to the ", component, " model component."), call. = FALSE) } # preparation ---- params <- do.call(rbind, x) len <- length(x) ci <- attributes(original_x[[1]])$ci if (is.null(ci)) ci <- .95 parameter_values <- x[[1]]$Parameter # split multiply (imputed) datasets by parameters, # but only for fixed effects. Filter random effects, # and save parameter names from fixed effects for later use... if (effects == "all" && "Effects" %in% colnames(params) && "random" %in% params$Effects) { random_params <- params[params$Effects == "random", ] params <- params[params$Effects != "random", ] parameter_values <- x[[1]]$Parameter[x[[1]]$Effects != "random"] } estimates <- split(params, factor(params$Parameter, levels = unique(parameter_values))) # pool estimates etc. ----- pooled_params <- do.call(rbind, lapply(estimates, function(i) { # pooled estimate pooled_estimate <- mean(i$Coefficient) # pooled standard error ubar <- mean(i$SE^2) tmp <- ubar + (1 + 1 / len) * stats::var(i$Coefficient) pooled_se <- sqrt(tmp) # pooled degrees of freedom, Barnard-Rubin adjustment for small samples df_column <- colnames(i)[grepl("(\\bdf\\b|\\bdf_error\\b)", colnames(i))][1] if (length(df_column)) { pooled_df <- .barnad_rubin(m = nrow(i), b = stats::var(i$Coefficient), t = tmp, dfcom = unique(i[[df_column]])) } else { pooled_df <- Inf } # pooled statistic pooled_statistic <- pooled_estimate / pooled_se # confidence intervals alpha <- (1 + ci) / 2 fac <- suppressWarnings(stats::qt(alpha, df = pooled_df)) data.frame( Coefficient = pooled_estimate, SE = pooled_se, CI_low = pooled_estimate - pooled_se * fac, CI_high = pooled_estimate + pooled_se * fac, Statistic = pooled_statistic, df_error = pooled_df, p = 2 * stats::pt(abs(pooled_statistic), df = pooled_df, lower.tail = FALSE) ) })) # pool random effect variances ----- pooled_random <- NULL if (!is.null(random_params)) { estimates <- split(random_params, factor(random_params$Parameter, levels = unique(random_params$Parameter))) pooled_random <- do.call(rbind, lapply(estimates, function(i) { pooled_estimate <- mean(i$Coefficient, na.rm = TRUE) data.frame( Parameter = unique(i$Parameter), Coefficient = pooled_estimate, Effects = "random", stringsAsFactors = FALSE ) })) } # reorder ------ pooled_params$Parameter <- parameter_values pooled_params <- pooled_params[c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p")] # final attributes ----- if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { pooled_params <- .exponentiate_parameters(pooled_params, NULL, exponentiate) } if (!is.null(pooled_random)) { pooled_params <- merge(pooled_params, pooled_random, all = TRUE, sort = FALSE) } # this needs to be done extra here, cannot call ".add_model_parameters_attributes()" pooled_params <- .add_pooled_params_attributes( pooled_params, model_params = original_x[[1]], model = original_model, ci, exponentiate, verbose = verbose ) attr(pooled_params, "object_name") <- obj_name # pool sigma ---- sig <- unlist(.compact_list(lapply(original_x, function(i) { attributes(i)$sigma }))) if (!.is_empty_object(sig)) { attr(pooled_params, "sigma") <- mean(sig, na.rm = TRUE) } class(pooled_params) <- c("parameters_model", "see_parameters_model", class(pooled_params)) pooled_params } # helper ------ .barnad_rubin <- function(m, b, t, dfcom = 999999) { # fix for z-statistic if (is.null(dfcom) || all(is.na(dfcom)) || all(is.infinite(dfcom))) { return(Inf) } lambda <- (1 + 1 / m) * b / t lambda[lambda < 1e-04] <- 1e-04 dfold <- (m - 1) / lambda^2 dfobs <- (dfcom + 1) / (dfcom + 3) * dfcom * (1 - lambda) dfold * dfobs / (dfold + dfobs) } .add_pooled_params_attributes <- function(pooled_params, model_params, model, ci, exponentiate, verbose = TRUE) { info <- insight::model_info(model, verbose = FALSE) pretty_names <- attributes(model_params)$pretty_names if (length(pretty_names) < nrow(model_params)) { pretty_names <- c(pretty_names, model_params$Parameter[(length(pretty_names) + 1):nrow(model_params)]) } attr(pooled_params, "ci") <- ci attr(pooled_params, "exponentiate") <- exponentiate attr(pooled_params, "pretty_names") <- pretty_names attr(pooled_params, "verbose") <- verbose attr(pooled_params, "ordinal_model") <- attributes(pooled_params)$ordinal_model attr(pooled_params, "model_class") <- attributes(pooled_params)$model_class attr(pooled_params, "bootstrap") <- attributes(pooled_params)$bootstrap attr(pooled_params, "iterations") <- attributes(pooled_params)$iterations attr(pooled_params, "df_method") <- attributes(pooled_params)$df_method attr(pooled_params, "digits") <- attributes(pooled_params)$digits attr(pooled_params, "ci_digits") <- attributes(pooled_params)$ci_digits attr(pooled_params, "p_digits") <- attributes(pooled_params)$p_digits # column name for coefficients coef_col <- .find_coefficient_type(info, exponentiate) attr(pooled_params, "coefficient_name") <- coef_col attr(pooled_params, "zi_coefficient_name") <- ifelse(isTRUE(exponentiate), "Odds Ratio", "Log-Odds") # formula attr(pooled_params, "model_formula") <- insight::find_formula(model) pooled_params } parameters/R/methods_pam.R0000644000175000017500000000140314133317013015365 0ustar nileshnilesh#' @rdname model_parameters.kmeans #' #' @examples #' \dontrun{ #' # #' # K-Medoids (PAM and HPAM) ============== #' if (require("cluster", quietly = TRUE)) { #' model <- cluster::pam(iris[1:4], k = 3) #' model_parameters(model) #' } #' if (require("fpc", quietly = TRUE)) { #' model <- fpc::pamk(iris[1:4], criterion = "ch") #' model_parameters(model) #' } #' } #' @export model_parameters.pam <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) data <- as.data.frame(model$data) if (is.null(clusters)) clusters <- model$clustering params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "pam" attr(params, "title") <- "K-Medoids" params } parameters/R/datasets.R0000644000175000017500000000163114077615700014713 0ustar nileshnilesh#' @docType data #' @title Sample data set #' @name fish #' @keywords data #' #' @description A sample data set, used in tests and some examples. NULL #' @docType data #' @title Sample data set #' @name qol_cancer #' @keywords data #' #' @description A sample data set with longitudinal data, used in the vignette describing the `datawizard::demean()` function. Health-related quality of life from cancer-patients was measured at three time points (pre-surgery, 6 and 12 months after surgery). #' #' @format A data frame with 564 rows and 7 variables: #' \describe{ #' \item{ID}{Patient ID} #' \item{QoL}{Quality of Life Score} #' \item{time}{Timepoint of measurement} #' \item{age}{Age in years} #' \item{phq4}{Patients' Health Questionnaire, 4-item version} #' \item{hospital}{Hospital ID, where patient was treated} #' \item{education}{Patients' educational level} #' } NULL parameters/R/p_value_satterthwaite.R0000644000175000017500000000370114140570243017477 0ustar nileshnilesh#' @title Satterthwaite approximation for SEs, CIs and p-values #' @name p_value_satterthwaite #' #' @description An approximate F-test based on the Satterthwaite (1946) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statitics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (`dof_ml1`), the Satterthwaite approximation is #' also applicable in more complex multilevel designs. However, the "m-l-1" #' heuristic also applies to generalized mixed models, while approaches like #' Kenward-Roger or Satterthwaite are limited to linear mixed models only. #' #' @seealso `dof_satterthwaite()` and `se_satterthwaite()` are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Satterthwaite (1946) approach. #' \cr \cr #' [`dof_kenward()`][dof_kenward] and [`dof_ml1()`][dof_ml1] #' approximate degrees of freedom based on Kenward-Roger's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4", quietly = TRUE)) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_satterthwaite(model) #' } #' } #' @return A data frame. #' @references Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. #' @export p_value_satterthwaite <- function(model, dof = NULL, robust = FALSE, ...) { if (is.null(dof)) { dof <- dof_satterthwaite(model) } .p_value_dof(model, dof, method = "satterthwaite", robust = robust, ...) } parameters/R/ci_profile_boot.R0000644000175000017500000000720614133736412016242 0ustar nileshnilesh.ci_profiled <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- insight::get_parameters(model, effects = "fixed", component = "conditional", verbose = FALSE )$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } # we need this function for models where confint and get_parameters return # different length (e.g. as for "polr" models) .ci_profiled2 <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- .remove_backticks_from_string(rownames(out)) out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } #' @keywords internal .ci_profile_merMod <- function(x, ci, profiled, ...) { out <- as.data.frame(suppressWarnings(stats::confint(profiled, level = ci, ...))) rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) out <- out[rownames(out) %in% insight::find_parameters(x, effects = "fixed")$conditional, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- row.names(out) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] row.names(out) <- NULL out } #' @keywords internal .ci_profile_glmmTMB <- function(x, ci, profiled, component, ...) { out <- as.data.frame(stats::confint(profiled, level = ci, ...)) .process_glmmTMB_CI(x, out, ci, component) } #' @keywords internal .ci_uniroot_glmmTMB <- function(x, ci, component, ...) { out <- as.data.frame(stats::confint(x, level = ci, method = "uniroot", ...)) .process_glmmTMB_CI(x, out, ci, component) } .process_glmmTMB_CI <- function(x, out, ci, component) { rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) pars <- insight::get_parameters(x, effects = "fixed", component = component, verbose = FALSE ) param_names <- switch(component, "conditional" = pars$Parameter, "zi" = , "zero_inflated" = paste0("zi~", pars$Parameter), c( pars$Parameter[pars$Component == "conditional"], paste0("zi~", pars$Parameter[pars$Component == "zero_inflated"]) ) ) out <- out[rownames(out) %in% param_names, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- pars$Parameter out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] out$Component <- pars$Component row.names(out) <- NULL out } #' @keywords internal .ci_boot_merMod <- function(x, ci, iterations = 500, effects = "fixed", ...) { insight::check_if_installed("lme4") # Compute out <- as.data.frame(lme4::confint.merMod(x, level = ci, method = "boot", nsim = iterations, ...)) rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) out <- out[rownames(out) %in% insight::find_parameters(x, effects = "fixed")$conditional, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- row.names(out) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] row.names(out) <- NULL out } parameters/R/dof_kenward.R0000644000175000017500000002626214057212550015367 0ustar nileshnilesh#' @rdname p_value_kenward #' @export dof_kenward <- function(model) { parameters <- insight::find_parameters(model, effects = "fixed", flatten = TRUE) L <- as.data.frame(diag(rep(1, n_parameters(model, effects = "fixed")))) krvcov <- .vcov_kenward_ajusted(model) dof <- stats::setNames(sapply(L, .kenward_adjusted_ddf, model = model, adjusted_vcov = krvcov), parameters) attr(dof, "vcov") <- krvcov attr(dof, "se") <- abs(as.vector(sqrt(diag(as.matrix(krvcov))))) dof } # The following code was taken from the "pbkrtest" package and slightly modified #' @author Søren Højsgaard, \email{sorenh@@math.aau.dk} .kenward_adjusted_ddf <- function(model, linear_coef, adjusted_vcov) { .adjusted_ddf(adjusted_vcov, linear_coef, stats::vcov(model)) } .adjusted_ddf <- function(adjusted_vcov, linear_coef, unadjusted_vcov = adjusted_vcov) { insight::check_if_installed("Matrix") if (!is.matrix(linear_coef)) { linear_coef <- matrix(linear_coef, ncol = 1) } vlb <- sum(linear_coef * (unadjusted_vcov %*% linear_coef)) theta <- Matrix::Matrix(as.numeric(outer(linear_coef, linear_coef) / vlb), nrow = length(linear_coef)) P <- attr(adjusted_vcov, "P") W <- attr(adjusted_vcov, "W") A1 <- A2 <- 0 theta_unadjusted_vcov <- theta %*% unadjusted_vcov n.ggamma <- length(P) for (ii in 1:n.ggamma) { for (jj in c(ii:n.ggamma)) { e <- ifelse(ii == jj, 1, 2) ui <- as.matrix(theta_unadjusted_vcov %*% P[[ii]] %*% unadjusted_vcov) uj <- as.matrix(theta_unadjusted_vcov %*% P[[jj]] %*% unadjusted_vcov) A1 <- A1 + e * W[ii, jj] * (sum(diag(ui)) * sum(diag(uj))) A2 <- A2 + e * W[ii, jj] * sum(ui * t(uj)) } } B <- (A1 + 6 * A2) / 2 g <- (2 * A1 - 5 * A2) / (3 * A2) c1 <- g / (3 + 2 * (1 - g)) c2 <- (1 - g) / (3 + 2 * (1 - g)) c3 <- (3 - g) / (3 + 2 * (1 - g)) EE <- 1 + A2 VV <- 2 * (1 + B) EEstar <- 1 / (1 - A2) VVstar <- 2 * ((1 + c1 * B) / ((1 - c2 * B)^2 * (1 - c3 * B))) V0 <- 1 + c1 * B V1 <- 1 - c2 * B V2 <- 1 - c3 * B V0 <- ifelse(abs(V0) < 1e-10, 0, V0) rho <- (.divZero(1 - A2, V1))^2 * V0 / V2 df2 <- 4 + 3 / (rho - 1) df2 } .divZero <- function(x, y, tol = 1e-14) { ## ratio x/y is set to 1 if both |x| and |y| are below tol if (abs(x) < tol & abs(y) < tol) { 1 } else { x / y } } .vcov_kenward_ajusted <- function(model) { insight::check_if_installed("lme4") if (!(lme4::getME(model, "is_REML"))) { model <- stats::update(model, . ~ ., REML = TRUE) } .vcovAdj16_internal(stats::vcov(model), .get_SigmaG(model), lme4::getME(model, "X")) } .get_SigmaG <- function(model) { insight::check_if_installed("lme4") insight::check_if_installed("Matrix") GGamma <- lme4::VarCorr(model) SS <- .shgetME(model) ## Put covariance parameters for the random effects into a vector: ## TODO: It is a bit ugly to throw everything into one long vector here; a list would be more elegant ggamma <- NULL for (ii in 1:(SS$n.RT)) { Lii <- GGamma[[ii]] ggamma <- c(ggamma, Lii[lower.tri(Lii, diag = TRUE)]) } ggamma <- c(ggamma, stats::sigma(model)^2) ## Extend ggamma by the residuals variance n.ggamma <- length(ggamma) ## Find G_r: G <- NULL Zt <- lme4::getME(model, "Zt") for (ss in 1:SS$n.RT) { ZZ <- .shget_Zt_group(ss, Zt, SS$Gp) n.lev <- SS$n.lev.by.RT2[ss] ## ; cat(sprintf("n.lev=%i\n", n.lev)) Ig <- Matrix::sparseMatrix(1:n.lev, 1:n.lev, x = 1) for (rr in 1:SS$n.parm.by.RT[ss]) { ## This is takes care of the case where there is random regression and several matrices have to be constructed. ## FIXME: I am not sure this is correct if there is a random quadratic term. The '2' below looks suspicious. ii.jj <- .index2UpperTriEntry(rr, SS$n.comp.by.RT[ss]) ## ; cat("ii.jj:"); print(ii.jj) ii.jj <- unique(ii.jj) if (length(ii.jj) == 1) { EE <- Matrix::sparseMatrix( ii.jj, ii.jj, x = 1, dims = rep(SS$n.comp.by.RT[ss], 2) ) } else { EE <- Matrix::sparseMatrix(ii.jj, ii.jj[2:1], dims = rep(SS$n.comp.by.RT[ss], 2)) } EE <- Ig %x% EE ## Kronecker product G <- c(G, list(t(ZZ) %*% EE %*% ZZ)) } } ## Extend by the indentity for the residual n.obs <- insight::n_obs(model) G <- c(G, list(Matrix::sparseMatrix(1:n.obs, 1:n.obs, x = 1))) Sigma <- ggamma[1] * G[[1]] for (ii in 2:n.ggamma) { Sigma <- Sigma + ggamma[ii] * G[[ii]] } list(Sigma = Sigma, G = G, n.ggamma = n.ggamma) } .index2UpperTriEntry <- function(k, N) { ## inverse of indexSymmat2vec ## result: index pair (i,j) with i>=j ## k: element in the vector of upper triangular elements ## example: N=3: k=1 -> (1,1), k=2 -> (1,2), k=3 -> (1,3), k=4 -> (2,2) aa <- cumsum(N:1) aaLow <- c(0, aa[-length(aa)]) i <- which(aaLow < k & k <= aa) j <- k - N * i + N - i * (3 - i) / 2 + i c(i, j) } .vcovAdj16_internal <- function(Phi, SigmaG, X) { insight::check_if_installed("MASS") insight::check_if_installed("Matrix") SigmaInv <- chol2inv(chol(Matrix::forceSymmetric(as.matrix(SigmaG$Sigma)))) n.ggamma <- SigmaG$n.ggamma TT <- as.matrix(SigmaInv %*% X) HH <- OO <- vector("list", n.ggamma) for (ii in 1:n.ggamma) { HH[[ii]] <- as.matrix(SigmaG$G[[ii]] %*% SigmaInv) OO[[ii]] <- as.matrix(HH[[ii]] %*% X) } ## Finding PP, QQ PP <- QQ <- NULL for (rr in 1:n.ggamma) { OrTrans <- t(OO[[rr]]) PP <- c(PP, list(Matrix::forceSymmetric(-1 * OrTrans %*% TT))) for (ss in rr:n.ggamma) { QQ <- c(QQ, list(OrTrans %*% SigmaInv %*% OO[[ss]])) } } PP <- as.matrix(PP) QQ <- as.matrix(QQ) Ktrace <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) for (rr in 1:n.ggamma) { HrTrans <- t(HH[[rr]]) for (ss in rr:n.ggamma) { Ktrace[rr, ss] <- Ktrace[ss, rr] <- sum(HrTrans * HH[[ss]]) } } ## Finding information matrix IE2 <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) for (ii in 1:n.ggamma) { Phi.P.ii <- Phi %*% PP[[ii]] for (jj in c(ii:n.ggamma)) { www <- .indexSymmat2vec(ii, jj, n.ggamma) IE2[ii, jj] <- IE2[jj, ii] <- Ktrace[ii, jj] - 2 * sum(Phi * QQ[[www]]) + sum(Phi.P.ii * (PP[[jj]] %*% Phi)) } } eigenIE2 <- eigen(IE2, only.values = TRUE)$values condi <- min(abs(eigenIE2)) WW <- if (condi > 1e-10) { as.matrix(Matrix::forceSymmetric(2 * solve(IE2))) } else { as.matrix(Matrix::forceSymmetric(2 * MASS::ginv(IE2))) } UU <- matrix(0, nrow = ncol(X), ncol = ncol(X)) for (ii in 1:(n.ggamma - 1)) { for (jj in c((ii + 1):n.ggamma)) { www <- .indexSymmat2vec(ii, jj, n.ggamma) UU <- UU + WW[ii, jj] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[jj]]) } } UU <- as.matrix(UU) UU <- UU + t(UU) for (ii in 1:n.ggamma) { www <- .indexSymmat2vec(ii, ii, n.ggamma) UU <- UU + WW[ii, ii] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[ii]]) } GGAMMA <- Phi %*% UU %*% Phi PhiA <- Phi + 2 * GGAMMA attr(PhiA, "P") <- PP attr(PhiA, "W") <- WW attr(PhiA, "condi") <- condi PhiA # n.ggamma <- SigmaG$n.ggamma # # M <- cbind(do.call(cbind, SigmaG$G), X) # SinvM <- chol2inv(chol(Matrix::forceSymmetric(SigmaG$Sigma))) %*% M # # v <- c(rep(1:length(SigmaG$G), each = nrow(SinvM)), rep(length(SigmaG$G) + 1, ncol(X))) # idx <- lapply(unique.default(v), function(i) { # which(v == i) # }) # SinvG <- lapply(idx, function(z) { # SinvM[, z] # }) # # SinvX <- SinvG[[length(SinvG)]] # SinvG[length(SinvG)] <- NULL # # OO <- lapply(1:n.ggamma, function(i) { # SigmaG$G[[i]] %*% SinvX # }) # # PP <- vector("list", n.ggamma) # QQ <- vector("list", n.ggamma * (n.ggamma + 1) / 2) # index <- 1 # for (r in 1:n.ggamma) { # OOt.r <- t(OO[[r]]) # PP[[r]] <- -1 * (OOt.r %*% SinvX) # for (s in r:n.ggamma) { # QQ[[index]] <- OOt.r %*% (SinvG[[s]] %*% SinvX) # index <- index + 1 # } # } # # Ktrace <- matrix(NA, nrow = n.ggamma, ncol = n.ggamma) # for (r in 1:n.ggamma) { # HHr <- SinvG[[r]] # for (s in r:n.ggamma) { # Ktrace[r, s] <- Ktrace[s, r] <- sum(HHr * SinvG[[s]]) # } # } # # ## Finding information matrix # IE2 <- matrix(0, nrow = n.ggamma, ncol = n.ggamma) # for (ii in 1:n.ggamma) { # Phi.P.ii <- Phi %*% PP[[ii]] # for (jj in c(ii:n.ggamma)) { # www <- .indexSymmat2vec(ii, jj, n.ggamma) # IE2[ii, jj] <- IE2[jj, ii] <- Ktrace[ii, jj] - # 2 * sum(Phi * QQ[[www]]) + sum(Phi.P.ii * (PP[[jj]] %*% Phi)) # } # } # # eigenIE2 <- eigen(IE2, only.values = TRUE)$values # condi <- min(abs(eigenIE2)) # WW <- if (condi > 1e-10) # Matrix::forceSymmetric(2 * solve(IE2)) # else # Matrix::forceSymmetric(2 * MASS::ginv(IE2)) # # UU <- matrix(0, nrow = ncol(X), ncol = ncol(X)) # for (ii in 1:(n.ggamma - 1)) { # for (jj in c((ii + 1):n.ggamma)) { # www <- .indexSymmat2vec(ii, jj, n.ggamma) # UU <- UU + WW[ii, jj] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[jj]]) # } # } # # UU <- UU + t(UU) # for (ii in 1:n.ggamma) { # www <- .indexSymmat2vec(ii, ii, n.ggamma) # UU <- UU + WW[ii, ii] * (QQ[[www]] - PP[[ii]] %*% Phi %*% PP[[ii]]) # } # # GGAMMA <- Phi %*% UU %*% Phi # PhiA <- Phi + 2 * GGAMMA # attr(PhiA, "P") <- PP # attr(PhiA, "W") <- WW # attr(PhiA, "condi") <- condi # PhiA } .indexSymmat2vec <- function(i, j, N) { ## S[i,j] symetric N times N matrix ## r the vector of upper triangular element in row major order: ## r= c(S[1,1],S[1,2]...,S[1,j], S[1,N], S[2,2],...S[N,N] ## Result: k: index of k-th element of r k <- if (i <= j) { (i - 1) * (N - i / 2) + j } else { (j - 1) * (N - j / 2) + i } } .shgetME <- function(model) { insight::check_if_installed("lme4") Gp <- lme4::getME(model, "Gp") n.RT <- length(Gp) - 1 ## Number of random terms (i.e. of (|)'s) n.lev.by.RT <- sapply(lme4::getME(model, "flist"), function(x) length(levels(x))) n.comp.by.RT <- .get.RT.dim.by.RT(model) n.parm.by.RT <- (n.comp.by.RT + 1) * n.comp.by.RT / 2 n.RE.by.RT <- diff(Gp) n.lev.by.RT2 <- n.RE.by.RT / n.comp.by.RT ## Same as n.lev.by.RT2 ??? list( Gp = Gp, ## group.index n.RT = n.RT, ## n.groupFac n.lev.by.RT = n.lev.by.RT, ## nn.groupFacLevelsNew n.comp.by.RT = n.comp.by.RT, ## nn.GGamma n.parm.by.RT = n.parm.by.RT, ## mm.GGamma n.RE.by.RT = n.RE.by.RT, ## ... Not returned before n.lev.by.RT2 = n.lev.by.RT2, ## nn.groupFacLevels n_rtrms = lme4::getME(model, "n_rtrms") ) } ## Alternative to .get_Zt_group .shget_Zt_group <- function(ii.group, Zt, Gp, ...) { zIndex.sub <- (Gp[ii.group] + 1):Gp[ii.group + 1] as.matrix(Zt[zIndex.sub, ]) } .get.RT.dim.by.RT <- function(model) { insight::check_if_installed("lme4") ## output: dimension (no of columns) of covariance matrix for random term ii if (inherits(model, "mer")) { sapply(model@ST, function(X) nrow(X)) } else { sapply(lme4::getME(model, "cnms"), length) } } parameters/R/methods_emmeans.R0000644000175000017500000002625114114074176016256 0ustar nileshnilesh# emmeans # model_parameters ---------------- #' @export model_parameters.emmGrid <- function(model, ci = .95, centrality = "median", dispersion = FALSE, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, exponentiate = FALSE, p_adjust = NULL, parameters = NULL, verbose = TRUE, ...) { # set default for p-adjust emm_padjust <- tryCatch( { adj <- model@misc$adjust }, error = function(e) { NULL } ) if (!is.null(emm_padjust) && is.null(p_adjust)) { p_adjust <- emm_padjust } s <- summary(model, level = ci, adjust = "none") params <- as.data.frame(s) # we assume frequentist here... if (!.is_bayesian_emmeans(model)) { # get statistic, se and p statistic <- insight::get_statistic(model, ci = ci, adjust = "none") SE <- standard_error(model) p <- p_value(model, ci = ci, adjust = "none") params$Statistic <- statistic$Statistic params$SE <- SE$SE params$p <- p$p # ==== adjust p-values? if (!is.null(p_adjust)) { params <- .p_adjust(params, p_adjust, model, verbose) } } else { # Bayesian models go here... params <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = NULL, verbose = verbose, ... ) statistic <- NULL } # Renaming if (!is.null(statistic)) { names(params) <- gsub("Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), names(params)) } names(params) <- gsub("Std. Error", "SE", names(params)) names(params) <- gsub(estName <- attr(s, "estName"), "Estimate", names(params)) names(params) <- gsub("lower.CL", "CI_low", names(params)) names(params) <- gsub("upper.CL", "CI_high", names(params)) names(params) <- gsub("asymp.LCL", "CI_low", names(params)) names(params) <- gsub("asymp.UCL", "CI_high", names(params)) names(params) <- gsub("lower.HPD", "CI_low", names(params)) names(params) <- gsub("upper.HPD", "CI_high", names(params)) # check if we have CIs if (!any(grepl("^CI_", colnames(params)))) { df_column <- grep("(df|df_error)", colnames(params)) if (length(df_column) > 0) { df <- params[[df_column[1]]] } else { df <- Inf } fac <- stats::qt((1 + ci) / 2, df = df) params$CI_low <- params$Estimate - fac * params$SE params$CI_high <- params$Estimate + fac * params$SE } # rename if necessary if ("df" %in% colnames(params)) { colnames(params)[colnames(params) == "df"] <- "df_error" } # Reorder estimate_pos <- which(colnames(s) == estName) parameter_names <- colnames(params)[seq_len(estimate_pos - 1)] order <- c( parameter_names, "Estimate", "Median", "Mean", "SE", "SD", "MAD", "CI_low", "CI_high", "F", "t", "z", "df", "df_error", "p", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) params <- params[order[order %in% names(params)]] # rename names(params) <- gsub("Estimate", "Coefficient", names(params)) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } # filter parameters if (!is.null(parameters)) { params <- .filter_parameters(params, parameters, verbose = verbose) } params <- suppressWarnings(.add_model_parameters_attributes(params, model, ci, exponentiate = FALSE, p_adjust = p_adjust, verbose = verbose, ...)) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(params, "parameter_names") <- parameter_names class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export model_parameters.emm_list <- function(model, ci = .95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { s <- summary(model) params <- lapply(seq_along(s), function(i) { pars <- model_parameters( model[[i]], ci = ci, exponentiate = exponentiate, p_adjust = p_adjust, verbose = verbose ) estimate_pos <- which(colnames(pars) %in% c("Coefficient", "Median", "Mean"))[1] pars[seq_len(estimate_pos - 1)] <- NULL cbind( Parameter = .pretty_emmeans_Parameter_names(model[[i]]), pars ) }) params <- do.call(rbind, params) params$Component <- .pretty_emmeans_Component_names(s) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes(params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ...) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # standard errors ----------------- #' @export standard_error.emmGrid <- function(model, ...) { if (!is.null(model@misc$is_boot) && model@misc$is_boot) { return(boot_em_standard_error(model)) } s <- summary(model) estimate_pos <- which(colnames(s) == attr(s, "estName")) if (length(estimate_pos) && !is.null(s$SE)) { out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = unname(s$SE) ) } else { out <- NULL } out } #' @export standard_error.emm_list <- function(model, ...) { if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) { return(boot_em_standard_error(model)) } params <- insight::get_parameters(model) s <- summary(model) se <- unlist(lapply(s, function(i) { if (is.null(i$SE)) { rep(NA, nrow(i)) } else { i$SE } })) .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = unname(se), Component = .pretty_emmeans_Component_names(s) ) } boot_em_standard_error <- function(model) { est <- insight::get_parameters(model, summary = FALSE) Component <- NULL if (inherits(s <- summary(model), "list")) { Component <- .pretty_emmeans_Component_names(s) } out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = sapply(est, stats::sd) ) if (!is.null(Component)) out$Component <- Component out } # degrees of freedom -------------------- #' @export degrees_of_freedom.emmGrid <- function(model, ...) { if (!is.null(model@misc$is_boot) && model@misc$is_boot) { return(boot_em_df(model)) } summary(model)$df } #' @export degrees_of_freedom.emm_list <- function(model, ...) { if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) { return(boot_em_df(model)) } s <- summary(model) unname(unlist(lapply(s, function(i) { if (is.null(i$df)) { rep(Inf, nrow(i)) } else { i$df } }))) } boot_em_df <- function(model) { est <- insight::get_parameters(model, summary = FALSE) rep(NA, ncol(est)) } # p values ---------------------- #' @rdname p_value #' @export p_value.emmGrid <- function(model, ci = .95, adjust = "none", ...) { if (!is.null(model@misc$is_boot) && model@misc$is_boot) { return(boot_em_pval(model, adjust)) } s <- summary(model, level = ci, adjust = adjust) estimate_pos <- which(colnames(s) == attr(s, "estName")) if (length(estimate_pos)) { stat <- insight::get_statistic(model, ci = ci, adjust = adjust) p <- 2 * stats::pt(abs(stat$Statistic), df = s$df, lower.tail = FALSE) .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = as.vector(p) ) } else { return(NULL) } } #' @export p_value.emm_list <- function(model, adjust = "none", ...) { if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) { return(boot_em_pval(model, adjust)) } params <- insight::get_parameters(model) s <- summary(model, adjust = adjust) # p-values p <- unlist(lapply(s, function(i) { if (is.null(i$p)) { rep(NA, nrow(i)) } else { i$p } })) # result out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = as.vector(p), Component = .pretty_emmeans_Component_names(s) ) # any missing values? if (anyNA(out$p)) { # standard errors se <- unlist(lapply(s, function(i) { if (is.null(i$SE)) { rep(NA, nrow(i)) } else { i$SE } })) # test statistic and p-values stat <- params$Estimate / se df <- degrees_of_freedom(model) p_val <- 2 * stats::pt(abs(stat), df = df, lower.tail = FALSE) out$p[is.na(out$p)] <- p_val[is.na(out$p)] } out } boot_em_pval <- function(model, adjust) { est <- insight::get_parameters(model, summary = FALSE) p <- sapply(est, p_value) p <- stats::p.adjust(p, method = adjust) Component <- NULL if (inherits(s <- summary(model), "list")) { Component <- .pretty_emmeans_Component_names(s) } out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = unname(p) ) if (!is.null(Component)) out$Component <- Component out } # format parameters ----------------- #' @export format_parameters.emm_list <- function(model, ...) { NULL } # Utils ------------------------------------------------------------------- .pretty_emmeans_Parameter_names <- function(model) { s <- summary(model) if (inherits(s, "list")) { parnames <- lapply(seq_along(s), function(i) .pretty_emmeans_Parameter_names(model[[i]])) parnames <- unlist(parnames) } else { estimate_pos <- which(colnames(s) == attr(s, "estName")) params <- s[, 1:(estimate_pos - 1), drop = FALSE] if (ncol(params) >= 2) { r <- apply(params, 1, function(i) paste0(colnames(params), " [", i, "]")) parnames <- unname(sapply(as.data.frame(r), paste, collapse = ", ")) } else { parnames <- as.vector(params[[1]]) } } parnames } .pretty_emmeans_Component_names <- function(s) { Component <- lapply(seq_along(s), function(i) { rep(names(s)[[i]], nrow(s[[i]])) }) Component <- unlist(Component) } .is_bayesian_emmeans <- function(model) { is_frq <- isTRUE(all.equal(dim(model@post.beta), c(1, 1))) && isTRUE(is.na(model@post.beta)) && is.null(model@misc$is_boot) isFALSE(is_frq) } parameters/R/plot.R0000644000175000017500000000313614135724575014071 0ustar nileshnilesh#' @export plot.parameters_sem <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_model <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.compare_parameters <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_stan <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_simulate <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_brms_meta <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.n_factors <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_distribution <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.n_clusters <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_pca <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_efa <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @importFrom graphics plot #' @export plot.cluster_analysis <- function(x, ...) { insight::check_if_installed("see") plot(datawizard::visualisation_recipe(x, ...)) } #' @export plot.cluster_analysis_summary <- function(x, ...) { insight::check_if_installed("see") plot(datawizard::visualisation_recipe(x, ...)) } parameters/R/methods_kmeans.R0000644000175000017500000000773214133316742016111 0ustar nileshnilesh#' Parameters from Cluster Models (k-means, ...) #' #' Format cluster models obtained for example by [kmeans()]. #' #' @param model Cluster model. #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @examples #' \dontrun{ #' # #' # K-means ------------------------------- #' model <- kmeans(iris[1:4], centers = 3) #' rez <- model_parameters(model) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' } #' @export model_parameters.kmeans <- function(model, ...) { params <- cbind( data.frame( Cluster = row.names(model$centers), n_Obs = model$size, Sum_Squares = model$withinss ), model$centers ) # Long means means <- datawizard::reshape_longer(params, cols = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) # Attributes attr(params, "variance") <- model$betweenss / model$totss attr(params, "Sum_Squares_Between") <- model$betweenss attr(params, "Sum_Squares_Total") <- model$totss attr(params, "means") <- means attr(params, "model") <- model attr(params, "iterations") <- model$iter attr(params, "scores") <- model$cluster attr(params, "type") <- "kmeans" class(params) <- c("parameters_clusters", class(params)) params } # factoextra::hkmeans ----------------------------------------------------- #' @rdname model_parameters.kmeans #' @inheritParams cluster_centers #' #' @examples #' \dontrun{ #' # #' # Hierarchical K-means (factoextra::hkclust) ---------------------- #' if (require("factoextra", quietly = TRUE)) { #' data <- iris[1:4] #' model <- factoextra::hkmeans(data, k = 3) #' #' rez <- model_parameters(model) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' } #' } #' @export model_parameters.hkmeans <- model_parameters.kmeans # Methods ------------------------------------------------------------------- #' @export print.parameters_clusters <- function(x, digits = 2, ...) { title <- "# Clustering Solution" if ("title" %in% attributes(x)) title <- attributes(x)$title insight::print_color(title, "blue") cat("\n\n") insight::print_colour(.text_components_variance(x), "yellow") cat("\n\n") cat(insight::export_table(x, digits = digits, ...)) invisible(x) } # Predict ----------------------------------------------------------------- #' @export predict.parameters_clusters <- function(object, newdata = NULL, names = NULL, ...) { if (is.null(newdata)) { out <- attributes(object)$scores } else { out <- stats::predict(attributes(object)$model, newdata = newdata, ...) } # Add labels if (!is.null(names)) { # List if (is.list(names)) { out <- as.factor(out) for (i in names(names)) { levels(out)[levels(out) == i] <- names[[i]] } # Vector } else if (is.character(names)) { out <- names[as.numeric(out)] } else { stop("'names' must be a character vector or a list.") } out <- as.character(out) } out } #' @export predict.kmeans <- function(object, newdata = NULL, ...) { if (is.null(newdata)) { return(object$cluster) } # compute squared euclidean distance from each sample to each cluster center centers <- object$centers sumsquares_by_center <- apply(centers, 1, function(x) { colSums((t(newdata) - x)^2) }) if (is.null(nrow(sumsquares_by_center))) { as.vector(which.min(sumsquares_by_center)) } else { as.vector(apply(as.data.frame(sumsquares_by_center), 1, which.min)) } } parameters/R/dof_ml1.R0000644000175000017500000000424214137207406014422 0ustar nileshnilesh#' @rdname p_value_ml1 #' @export dof_ml1 <- function(model) { if (!insight::model_info(model, verbose = FALSE)$is_mixed) { stop("Model must be a mixed model.") } re_groups <- insight::get_random(model) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] predictors <- insight::find_predictors(model, effects = "fixed", component = "conditional", flatten = TRUE) predictors <- setdiff(predictors, names(re_groups)) model_data <- insight::get_data(model, verbose = FALSE)[predictors] has_intcp <- insight::has_intercept(model) term_assignment <- .find_term_assignment(model_data, predictors, parameters) ddf <- sapply(model_data, function(.x) { min(sapply(re_groups, .get_df_ml1_approx, x = .x)) }) ltab <- table(ddf) ltab <- list(m = as.integer(names(ltab)), l = as.vector(ltab)) ltab$ddf <- ltab$m - ltab$l if (has_intcp) ltab$ddf <- ltab$ddf - 1 ii <- match(ddf, ltab$m) ddf[] <- ltab$ddf[ii] out <- numeric(length = length(parameters)) ## TODO number of items to replace is not a multiple of replacement length suppressWarnings(out[which("(Intercept)" != parameters)] <- ddf[term_assignment]) if (has_intcp) out[which("(Intercept)" == parameters)] <- min(ddf) stats::setNames(out, parameters) } .get_df_ml1_approx <- function(x, g) { if (!is.factor(g)) { g <- as.factor(g) } m <- nlevels(g) n <- length(x) if (is.character(x)) { x <- as.numeric(as.factor(x)) } else { x <- as.numeric(x) } x.bar <- stats::ave(x, g) var.within <- stats::var(x - x.bar) var.between <- stats::var(x.bar) if (var.within >= var.between) { return(n) } else { return(m) } } .find_term_assignment <- function(model_data, predictors, parameters) { parms <- unlist(lapply(1:length(predictors), function(i) { p <- predictors[i] if (is.factor(model_data[[p]])) { ps <- paste0(p, levels(model_data[[p]])) names(ps)[1:length(ps)] <- i ps } else { names(p) <- i p } })) stats::na.omit(as.numeric(names(parms)[match(insight::clean_names(parameters), parms)])) } parameters/R/methods_gmnl.R0000644000175000017500000000171414012467213015556 0ustar nileshnilesh #' @export standard_error.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } #' @export p_value.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] # se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } parameters/R/methods_logistf.R0000644000175000017500000000110713765377240016302 0ustar nileshnilesh #' @export ci.logistf <- ci.glm #' @export standard_error.logistf <- function(model, ...) { utils::capture.output(s <- summary(model)) se <- sqrt(diag(s$var)) .data_frame( Parameter = .remove_backticks_from_string(names(s$coefficients)), SE = as.vector(se) ) } #' @export p_value.logistf <- function(model, ...) { utils::capture.output(s <- summary(model)) .data_frame( Parameter = .remove_backticks_from_string(names(s$prob)), p = as.vector(s$prob) ) } #' @export model_parameters.logistf <- model_parameters.glm parameters/R/get_scores.R0000644000175000017500000000421114077615700015235 0ustar nileshnilesh#' Get Scores from Principal Component Analysis (PCA) #' #' `get_scores()` takes `n_items` amount of items that load the most #' (either by loading cutoff or number) on a component, and then computes their #' average. #' #' @param x An object returned by [principal_components()]. #' @param n_items Number of required (i.e. non-missing) items to build the sum #' score. If `NULL`, the value is chosen to match half of the number of #' columns in a data frame. #' #' @details `get_scores()` takes the results from #' [principal_components()] and extracts the variables for each #' component found by the PCA. Then, for each of these "subscales", row means #' are calculated (which equals adding up the single items and dividing by the #' number of items). This results in a sum score for each component from the #' PCA, which is on the same scale as the original, single items that were #' used to compute the PCA. #' #' @examples #' if (require("psych")) { #' pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") #' #' # PCA extracted two components #' pca #' #' # assignment of items to each component #' closest_component(pca) #' #' # now we want to have sum scores for each component #' get_scores(pca) #' #' # compare to manually computed sum score for 2nd component, which #' # consists of items "hp" and "qsec" #' (mtcars$hp + mtcars$qsec) / 2 #' } #' @return A data frame with subscales, which are average sum scores for all #' items from each component. #' @export get_scores <- function(x, n_items = NULL) { subscales <- closest_component(x) data_set <- attributes(x)$data_set out <- lapply(sort(unique(subscales)), function(.subscale) { columns <- names(subscales)[subscales == .subscale] items <- data_set[columns] if (is.null(n_items)) { .n_items <- round(ncol(items) / 2) } else { .n_items <- n_items } apply(items, 1, function(i) ifelse(sum(!is.na(i)) >= .n_items, mean(i, na.rm = TRUE), NA)) }) out <- as.data.frame(do.call(cbind, out)) colnames(out) <- sprintf("Component_%i", 1:ncol(out)) out } parameters/R/methods_mfx.R0000644000175000017500000003001014131014352015374 0ustar nileshnilesh# model parameters --------------------- #' @rdname model_parameters.default #' @export model_parameters.logitor <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = TRUE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { model_parameters.default( model$fit, ci = ci, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, ... ) } #' @export model_parameters.poissonirr <- model_parameters.logitor #' @export model_parameters.negbinirr <- model_parameters.logitor #' @rdname model_parameters.default #' @export model_parameters.poissonmfx <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "marginal"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, component = component, robust = robust, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export model_parameters.logitmfx <- model_parameters.poissonmfx #' @export model_parameters.probitmfx <- model_parameters.poissonmfx #' @export model_parameters.negbinmfx <- model_parameters.poissonmfx #' @export model_parameters.betaor <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "precision", "all"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) model_parameters.betareg( model$fit, ci = ci, bootstrap = bootstrap, iterations = iterations, component = component, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) } #' @rdname model_parameters.default #' @export model_parameters.betamfx <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision", "marginal"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, component = component, robust = robust, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } # ci ------------------ #' @export ci.logitor <- function(x, ci = .95, method = NULL, robust = FALSE, ...) { .ci_generic(model = x$fit, ci = ci, method = method, robust = robust, ...) } #' @export ci.poissonirr <- ci.logitor #' @export ci.negbinirr <- ci.logitor #' @export ci.poissonmfx <- function(x, ci = .95, component = c("all", "conditional", "marginal"), method = NULL, robust = FALSE, ...) { component <- match.arg(component) .ci_generic(model = x, ci = ci, component = component, method = method, robust = robust, ...) } #' @export ci.negbinmfx <- ci.poissonmfx #' @export ci.logitmfx <- ci.poissonmfx #' @export ci.probitmfx <- ci.poissonmfx #' @export ci.betaor <- function(x, ci = .95, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) .ci_generic(model = x$fit, ci = ci, dof = Inf, component = component) } #' @export ci.betamfx <- function(x, ci = .95, method = NULL, robust = FALSE, component = c("all", "conditional", "precision", "marginal"), ...) { component <- match.arg(component) .ci_generic(model = x, ci = ci, component = component, method = method, robust = robust, ...) } # standard error ------------------ #' @export standard_error.negbin <- standard_error.default #' @export standard_error.logitor <- function(model, ...) { standard_error.lm(model$fit, ...) } #' @export standard_error.poissonirr <- standard_error.logitor #' @export standard_error.negbinirr <- standard_error.logitor #' @rdname standard_error #' @export standard_error.poissonmfx <- function(model, component = c("all", "conditional", "marginal"), ...) { parms <- insight::get_parameters(model, component = "all") cs <- stats::coef(summary(model$fit)) se <- c(as.vector(model$mfxest[, 2]), as.vector(cs[, 2])) out <- .data_frame( Parameter = parms$Parameter, SE = se, Component = parms$Component ) component <- match.arg(component) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.logitmfx <- standard_error.poissonmfx #' @export standard_error.probitmfx <- standard_error.poissonmfx #' @export standard_error.negbinmfx <- standard_error.poissonmfx #' @export standard_error.betaor <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) standard_error.betareg(model$fit, component = component, ...) } #' @rdname standard_error #' @export standard_error.betamfx <- function(model, component = c("all", "conditional", "precision", "marginal"), ...) { parms <- insight::get_parameters(model, component = "all") cs <- do.call(rbind, stats::coef(summary(model$fit))) se <- c(as.vector(model$mfxest[, 2]), as.vector(cs[, 2])) out <- .data_frame( Parameter = parms$Parameter, SE = se, Component = parms$Component ) component <- match.arg(component) if (component != "all") { out <- out[out$Component == component, ] } out } # degrees of freedom ------------------ #' @export degrees_of_freedom.logitor <- function(model, ...) { degrees_of_freedom.default(model$fit, ...) } #' @export degrees_of_freedom.poissonirr <- degrees_of_freedom.logitor #' @export degrees_of_freedom.negbinirr <- degrees_of_freedom.logitor #' @export degrees_of_freedom.poissonmfx <- degrees_of_freedom.logitor #' @export degrees_of_freedom.logitmfx <- degrees_of_freedom.logitor #' @export degrees_of_freedom.negbinmfx <- degrees_of_freedom.logitor #' @export degrees_of_freedom.probitmfx <- degrees_of_freedom.logitor #' @export degrees_of_freedom.betaor <- degrees_of_freedom.logitor #' @export degrees_of_freedom.betamfx <- degrees_of_freedom.logitor # p values ------------------ #' p-values for Marginal Effects Models #' #' This function attempts to return, or compute, p-values of marginal effects #' models from package \pkg{mfx}. #' #' @param model A statistical model. #' @param component Should all parameters, parameters for the conditional model, #' precision-component or marginal effects be returned? `component` may be one #' of `"conditional"`, `"precision"`, `"marginal"` or `"all"` (default). #' @param ... Currently not used. #' #' @return A data frame with at least two columns: the parameter names and the #' p-values. Depending on the model, may also include columns for model #' components etc. #' #' @examples #' if (require("mfx", quietly = TRUE)) { #' set.seed(12345) #' n <- 1000 #' x <- rnorm(n) #' y <- rnegbin(n, mu = exp(1 + 0.5 * x), theta = 0.5) #' d <- data.frame(y, x) #' model <- poissonmfx(y ~ x, data = d) #' #' p_value(model) #' p_value(model, component = "marginal") #' } #' @export p_value.poissonmfx <- function(model, component = c("all", "conditional", "marginal"), ...) { parms <- insight::get_parameters(model, component = "all") cs <- stats::coef(summary(model$fit)) p <- c(as.vector(model$mfxest[, 4]), as.vector(cs[, 4])) out <- .data_frame( Parameter = parms$Parameter, p = p, Component = parms$Component ) component <- match.arg(component) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.logitor <- function(model, method = NULL, ...) { p_value.default(model$fit, method = method, ...) } #' @export p_value.poissonirr <- p_value.logitor #' @export p_value.negbinirr <- p_value.logitor #' @export p_value.logitmfx <- p_value.poissonmfx #' @export p_value.probitmfx <- p_value.poissonmfx #' @export p_value.negbinmfx <- p_value.poissonmfx #' @rdname p_value.poissonmfx #' @export p_value.betaor <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) p_value.betareg(model$fit, component = component, ...) } #' @rdname p_value.poissonmfx #' @export p_value.betamfx <- function(model, component = c("all", "conditional", "precision", "marginal"), ...) { parms <- insight::get_parameters(model, component = "all") cs <- do.call(rbind, stats::coef(summary(model$fit))) p <- c(as.vector(model$mfxest[, 4]), as.vector(cs[, 4])) out <- .data_frame( Parameter = parms$Parameter, p = p, Component = parms$Component ) component <- match.arg(component) if (component != "all") { out <- out[out$Component == component, ] } out } # simulate model ------------------ #' @export simulate_model.betaor <- function(model, iterations = 1000, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) simulate_model.betareg(model$fit, iterations = iterations, component = component, ... ) } #' @export simulate_model.betamfx <- simulate_model.betaor parameters/R/methods_selection.R0000644000175000017500000000571114044454046016614 0ustar nileshnilesh#' @export model_parameters.selection <- function(model, ci = .95, component = c("all", "selection", "outcome", "auxiliary"), bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, component = component, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export p_value.selection <- function(model, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, p = estimates[[4]], Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$betaS] <- "selection" params$Component[s$param$index$betaO] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export standard_error.selection <- function(model, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, SE = estimates[[2]], Component = "auxiliary", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$betaS] <- "selection" params$Component[s$param$index$betaO] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export simulate_model.selection <- function(model, iterations = 1000, component = c("all", "selection", "outcome", "auxiliary"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component, effects = "fixed") class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #' @export ci.selection <- ci.default #' @export degrees_of_freedom.selection <- function(model, ...) { s <- summary(model) s$param$df } parameters/R/utils_format.R0000644000175000017500000007056214164651314015622 0ustar nileshnilesh# output-format helper ------------------------- # this function does the main composition of columns for the output .format_output_style <- function(x, style, format, modelname) { linesep <- " " if (style %in% c("se", "ci")) { x$p_stars <- "" } if (style == "minimal") { ci_col <- colnames(x)[grepl(" CI$", colnames(x)) | colnames(x) == "CI"] param_col <- colnames(x)[1] x[[param_col]] <- trimws(paste0(x[[param_col]], linesep, x[[ci_col]])) x <- x[c(param_col, "p")] colnames(x) <- paste0(colnames(x), " (", modelname, ")") } else if (style %in% c("ci_p", "ci")) { ci_col <- colnames(x)[grepl(" CI$", colnames(x)) | colnames(x) == "CI"] param_col <- colnames(x)[1] x[[param_col]] <- trimws(paste0(x[[param_col]], x$p_stars, linesep, x[[ci_col]])) x <- x[param_col] colnames(x) <- modelname } else if (style %in% c("se_p", "se")) { param_col <- colnames(x)[1] x[[param_col]] <- trimws(paste0(x[[param_col]], x$p_stars, linesep, "(", x$SE, ")")) x <- x[param_col] colnames(x) <- modelname } else if (style %in% c("ci_p2")) { ci_col <- colnames(x)[grepl(" CI$", colnames(x)) | colnames(x) == "CI"] param_col <- colnames(x)[1] x[[param_col]] <- trimws(paste0(x[[param_col]], linesep, x[[ci_col]])) x <- x[c(param_col, "p")] colnames(x) <- paste0(colnames(x), " (", modelname, ")") } else if (style %in% c("se_p2")) { param_col <- colnames(x)[1] x[[param_col]] <- trimws(paste0(x[[param_col]], linesep, "(", x$SE, ")")) x <- x[c(param_col, "p")] colnames(x) <- paste0(colnames(x), " (", modelname, ")") } x[[1]][x[[1]] == "()"] <- "" x } .add_obs_row <- function(x, att, style) { observations <- unlist(lapply(att, function(i) { if (is.null(i$n_obs)) { NA } else { i$n_obs } })) weighted_observations <- unlist(lapply(att, function(i) { if (is.null(i$weighted_nobs)) { NA } else { i$weighted_nobs } })) # check if model had weights, and if due to missing values n of weighted # observations differs from "raw" observations if (!all(is.na(weighted_observations)) && !all(is.na(observations))) { if (!isTRUE(all.equal(as.vector(weighted_observations), as.vector(observations)))) { message(insight::format_message("Number of weighted observations differs from number of unweighted observations.")) } observations <- weighted_observations } if (!all(is.na(observations))) { # add empty row, as separator empty_row <- do.call(data.frame, as.list(rep(NA, ncol(x)))) colnames(empty_row) <- colnames(x) x <- rbind(x, empty_row) # add observations steps <- (ncol(x) - 1) / length(observations) empty_row[[1]] <- "Observations" insert_at <- seq(2, ncol(x), by = steps) for (i in 1:length(insert_at)) { empty_row[[insert_at[i]]] <- observations[i] } x <- rbind(x, empty_row) } x } # other helper ------------------------ .format_columns_single_component <- function(x, pretty_names, digits = 2, ci_digits = 2, p_digits = 3, ci_width = "auto", ci_brackets = TRUE, format = NULL, coef_name = NULL, zap_small = FALSE, ...) { # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { brackets <- c("[", "]") } else { brackets <- ci_brackets } # fix coefficient column name for random effects if (!is.null(x$Effects) && all(x$Effects == "random") && any(colnames(x) %in% .all_coefficient_types())) { colnames(x)[colnames(x) %in% .all_coefficient_types()] <- "Coefficient" } # fix coefficient column name for mixed count and zi pars if (!is.null(x$Component) && sum(c("conditional", "zero_inflated", "dispersion") %in% x$Component) >= 2 && any(colnames(x) %in% .all_coefficient_types())) { colnames(x)[colnames(x) %in% .all_coefficient_types()] <- "Coefficient" } # random pars with level? combine into parameter column if (all(c("Parameter", "Level") %in% colnames(x))) { x$Parameter <- paste0(x$Parameter, " ", brackets[1], x$Level, brackets[2]) x$Level <- NULL } insight::format_table( x, pretty_names = pretty_names, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ... ) } # helper to format the header / subheader of different model components -------------- .format_model_component_header <- function(x, type, split_column, is_zero_inflated, is_ordinal_model, is_multivariate = FALSE, ran_pars, formatted_table = NULL) { component_name <- switch(type, "mu" = , "fixed" = , "fixed." = , "conditional" = , "conditional." = "Fixed Effects", "random." = , "random" = "Random Effects", "conditional.fixed" = , "conditional.fixed." = ifelse(is_zero_inflated, "Fixed Effects (Count Model)", "Fixed Effects"), "conditional.random" = ifelse(ran_pars, "Random Effects Variances", ifelse(is_zero_inflated, "Random Effects (Count Model)", "Random Effects" ) ), "zero_inflated" = "Zero-Inflated", "zero_inflated.fixed" = , "zero_inflated.fixed." = "Fixed Effects (Zero-Inflated Model)", "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", "survival" = , "survival.fixed" = "Survival", "dispersion.fixed" = , "dispersion.fixed." = , "dispersion" = "Dispersion", "marginal" = "Marginal Effects", "emmeans" = "Estimated Marginal Means", "contrasts" = "Contrasts", "simplex.fixed" = , "simplex" = "Monotonic Effects", "smooth_sd" = "Smooth Terms (SD)", "smooth_terms" = "Smooth Terms", "sigma.fixed" = , "sigma.fixed." = , "sigma" = "Sigma", "thresholds" = "Thresholds", "correlation" = "Correlation", "SD/Cor" = "SD / Correlation", "Loading" = "Loading", "location" = , "location.fixed" = , "location.fixed." = "Location Parameters", "scale" = , "scale.fixed" = , "scale.fixed." = "Scale Parameters", "extra" = , "extra.fixed" = , "extra.fixed." = "Extra Parameters", "nu" = "Nu", "tau" = "Tau", "meta" = "Meta-Parameters", "studies" = "Studies", "within" = "Within-Effects", "between" = "Between-Effects", "interactions" = "(Cross-Level) Interactions", "precision" = , "precision." = "Precision", "infrequent_purchase" = "Infrequent Purchase", "auxiliary" = "Auxiliary", "residual" = "Residual", "intercept" = "Intercept", "regression" = "Regression", "latent" = "Latent", "time_dummies" = "Time Dummies", type ) if (grepl("^conditional\\.(r|R)andom_variances", component_name)) { component_name <- trimws(gsub("^conditional\\.(r|R)andom_variances(\\.)*", "", component_name)) if (nchar(component_name) == 0) { component_name <- "Random Effects Variances" } else { component_name <- paste0("Random Effects Variances: ", component_name) } } if (grepl("^conditional\\.(r|R)andom", component_name)) { component_name <- trimws(gsub("^conditional\\.(r|R)andom(\\.)*", "", component_name)) if (nchar(component_name) == 0) { component_name <- ifelse(ran_pars, "Random Effects Variances", "Random Effects (Count Model)") } else { component_name <- paste0("Random Effects (Count Model): ", component_name) } } if (grepl("^zero_inflated\\.(r|R)andom", component_name)) { component_name <- trimws(gsub("^zero_inflated\\.(r|R)andom(\\.)*", "", component_name)) if (nchar(component_name) == 0) { component_name <- "Random Effects (Zero-Inflated Model)" } else { component_name <- paste0("Random Effects (Zero-Inflated Model): ", component_name) } } if (grepl("^random\\.(.*)", component_name)) { component_name <- paste0("Random Effects: ", gsub("^random\\.", "", component_name)) } # if we show ZI component only, make sure this appears in header if (!grepl("(Zero-Inflated Model)", component_name, fixed = TRUE) && !is.null(formatted_table$Component) && all(formatted_table$Component == "zero_inflated")) { component_name <- paste0(component_name, " (Zero-Inflated Model)") } # tweaking of sub headers if (isTRUE(attributes(x)$is_ggeffects)) { s1 <- gsub("(.*)\\.(.*) = (.*)", "\\1 (\\2 = \\3)", component_name) s2 <- "" } else if ("DirichletRegModel" %in% attributes(x)$model_class) { if (grepl("^conditional\\.", component_name) || split_column == "Response") { s1 <- "Response level:" s2 <- gsub("^conditional\\.(.*)", "\\1", component_name) } else { s1 <- component_name s2 <- "" } } else if (length(split_column) > 1 && "Response" %in% split_column && is_multivariate) { # This here only applies to brms multivariate response models component_name <- gsub("^conditional\\.(.*)", "Response level: \\1", component_name) component_name <- gsub("^sigma\\.(.*)", "Auxilliary parameters, response level: \\1", component_name) component_name <- gsub("(.*)fixed\\.(.*)", "\\1\\2", component_name) component_name <- gsub("(.*)random\\.(.*)", "Random effects, \\1\\2", component_name) s1 <- component_name s2 <- "" } else if (length(split_column) > 1 || split_column %in% c("Subgroup", "Type", "Group") || grepl(tolower(split_column), tolower(component_name), fixed = TRUE) || component_name %in% c("Within-Effects", "Between-Effects", "(Cross-Level) Interactions")) { s1 <- component_name s2 <- "" } else if (split_column == "Response" && is_ordinal_model) { s1 <- "Response level:" s2 <- component_name } else { s1 <- component_name s2 <- ifelse(tolower(split_column) == "component", "", split_column) } list(name = component_name, subheader1 = s1, subheader2 = s2) } # helper grouping parameters ------------------- .parameter_groups <- function(x, groups) { # only apply to conditional component for now if ("Component" %in% colnames(x) && sum(x$Component == "conditional") == 0) { return(x) } if ("Component" %in% colnames(x)) { row_index <- which(x$Component == "conditional") } else { row_index <- 1:nrow(x) } x_other <- x[-row_index, ] x <- x[row_index, ] att <- attributes(x) indent_rows <- NULL indent_parameters <- NULL if (is.list(groups)) { # find parameter names and replace by rowindex group_rows <- lapply(groups, function(i) { if (is.character(i)) { i <- match(i, x$Parameter) } i }) # sanity check - check if all parameter names in the # group list are spelled correctly misspelled <- sapply(group_rows, function(i) { any(is.na(i)) }) if (any(misspelled)) { # remove invalid groups group_rows[misspelled] <- NULL # tell user warning(insight::format_message( "Couldn't find one or more parameters specified in following groups:", paste0(names(misspelled[misspelled]), collapse = ", "), "Maybe you misspelled parameter names?" ), call. = FALSE) } # sort parameters according to grouping selected_rows <- unlist(group_rows) indent_parameters <- x$Parameter[selected_rows] x <- rbind(x[selected_rows, ], x[-selected_rows, ]) # set back correct indices groups <- 1 for (i in 2:length(group_rows)) { groups <- c(groups, groups[i - 1] + length(group_rows[[i - 1]])) } names(groups) <- names(group_rows) } else { # find parameter names and replace by rowindex group_names <- names(groups) groups <- match(groups, x$Parameter) names(groups) <- group_names # order groups groups <- groups[order(groups)] } empty_row <- x[1, ] for (i in 1:ncol(empty_row)) { empty_row[[i]] <- NA } for (i in length(groups):1) { x[seq(groups[i] + 1, nrow(x) + 1), ] <- x[seq(groups[i], nrow(x)), ] x[groups[i], ] <- empty_row x$Parameter[groups[i]] <- paste0("# ", names(groups[i])) } # find row indices of indented parameters if (!is.null(indent_parameters)) { indent_rows <- match(indent_parameters, x$Parameter) } # add other rows back if (nrow(x_other) > 0) { x <- rbind(x, x_other) } attributes(x) <- utils::modifyList(att, attributes(x)) attr(x, "indent_rows") <- indent_rows attr(x, "indent_groups") <- "# " x } # .insert_row <- function(x, newrow, r) { # existingDF[seq(r+1,nrow(existingDF)+1),] <- existingDF[seq(r,nrow(existingDF)),] # existingDF[r,] <- newrow # existingDF # } .prepare_x_for_print <- function(x, select, coef_name, s_value) { # minor fix for nested Anovas if ("Group" %in% colnames(x) && sum(x$Parameter == "Residuals") > 1) { colnames(x)[which(colnames(x) == "Group")] <- "Subgroup" } if (!is.null(select)) { if (all(select == "minimal")) { select <- c("Parameter", "Coefficient", "Std_Coefficient", "CI", "CI_low", "CI_high", "p") } else if (all(select == "short")) { select <- c("Parameter", "Coefficient", "Std_Coefficient", "SE", "p") } else if (is.numeric(select)) { select <- colnames(x)[select] } select <- union(select, c("Parameter", "Component", "Effects", "Response", "Subgroup")) # for emmGrid objects, we save specific parameter names as attribute parameter_names <- attributes(x)$parameter_names if (!is.null(parameter_names)) { select <- c(parameter_names, select) } to_remove <- setdiff(colnames(x), select) x[to_remove] <- NULL } # remove columns that have only NA or Inf to_remove <- sapply(x, function(col) all(is.na(col) | is.infinite(col))) if (any(to_remove)) x[to_remove] <- NULL # For Bayesian models, we need to prettify parameter names here... mc <- attributes(x)$model_class cp <- attributes(x)$cleaned_parameters if (!is.null(mc) && !is.null(cp) && mc %in% c("stanreg", "stanmvreg", "brmsfit")) { if (length(cp) == length(x$Parameter)) { x$Parameter <- cp } pretty_names <- FALSE } # for bayesian meta, remove ROPE_CI if (isTRUE(attributes(x)$is_bayes_meta)) { x$CI <- NULL x$ROPE_CI <- NULL x$ROPE_low <- NULL x$ROPE_high <- NULL } if (!is.null(coef_name)) { colnames(x)[which(colnames(x) == "Coefficient")] <- coef_name colnames(x)[which(colnames(x) == "Std_Coefficient")] <- paste0("Std_", coef_name) } if (isTRUE(s_value) && "p" %in% colnames(x)) { colnames(x)[colnames(x) == "p"] <- "s" x[["s"]] <- log2(1 / x[["s"]]) } x } .prepare_splitby_for_print <- function(x) { if (!is.null(attributes(x)$model_class) && any(attributes(x)$model_class == "mvord")) { x$Response <- NULL } split_by <- "" split_by <- c(split_by, ifelse("Component" %in% names(x) && .n_unique(x$Component) > 1, "Component", "")) split_by <- c(split_by, ifelse("Effects" %in% names(x) && .n_unique(x$Effects) > 1, "Effects", "")) split_by <- c(split_by, ifelse("Response" %in% names(x) && .n_unique(x$Response) > 1, "Response", "")) split_by <- c(split_by, ifelse("Group" %in% names(x) && .n_unique(x$Group) > 1, "Group", "")) split_by <- c(split_by, ifelse("Subgroup" %in% names(x) && .n_unique(x$Subgroup) > 1, "Subgroup", "")) split_by <- split_by[nchar(split_by) > 0] split_by } # this function is actually similar to "insight::print_parameters()", but more # sophisticated, to ensure nicely outputs even for complicated or complex models, # or edge cases... #' @keywords internal .format_columns_multiple_components <- function(x, pretty_names, split_column = "Component", digits = 2, ci_digits = 2, p_digits = 3, coef_column = NULL, format = NULL, ci_width = "auto", ci_brackets = TRUE, zap_small = FALSE, ...) { final_table <- list() ignore_group <- isTRUE(attributes(x)$ignore_group) ran_pars <- isTRUE(attributes(x)$ran_pars) is_ggeffects <- isTRUE(attributes(x)$is_ggeffects) # name of "Parameter" column - usually the first column, however, for # ggeffects objects, this column has the name of the focal term if (is_ggeffects) { parameter_column <- colnames(x)[1] } else { parameter_column <- "Parameter" } # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { ci_brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { ci_brackets <- c("[", "]") } # check ordinal / multivariate is_ordinal_model <- isTRUE(attributes(x)$ordinal_model) is_multivariate <- isTRUE(attributes(x)$multivariate_response) # zero-inflated stuff is_zero_inflated <- (!is.null(x$Component) & "zero_inflated" %in% x$Component) zi_coef_name <- attributes(x)$zi_coefficient_name # other special model-components, like emm_list coef_name2 <- attributes(x)$coefficient_name2 # make sure we have correct order of levels from split-factor if (!is.null(attributes(x)$model_class) && all(attributes(x)$model_class == "mediate")) { x$Component <- factor(x$Component, levels = c("control", "treated", "average", "Total Effect")) x$Parameter <- trimws(gsub("(.*)\\((.*)\\)$", "\\1", x$Parameter)) } else { x[split_column] <- lapply(x[split_column], function(i) { if (!is.factor(i)) i <- factor(i, levels = unique(i)) i }) } # fix column output if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && "Label" %in% colnames(x)) { x$From <- ifelse(x$Label == "" | x$Label == x$To, x$From, paste0(x$From, " (", x$Label, ")")) x$Label <- NULL } if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && !"Parameter" %in% colnames(x)) { parameter_column <- colnames(x)[1] } if (inherits(attributes(x)$model, c("lavaan", "blavaan")) && "Defined" %in% x$Component) { x$From[x$Component == "Defined"] <- "" x$Operator[x$Component == "Defined"] <- "" x$To <- ifelse(x$Component == "Defined", paste0("(", x$To, ")"), x$To) } # set up split-factor if (length(split_column) > 1) { split_by <- lapply(split_column, function(i) x[[i]]) } else { split_by <- list(x[[split_column]]) } names(split_by) <- split_column # make sure we have correct sorting here... tables <- split(x, f = split_by) # sanity check - only preserve tables with any data in data frames tables <- tables[sapply(tables, nrow) > 0] # fix table names for random effects, when we only have random # effects. in such cases, the wrong header (fixed effects) is chosen # to prevent this, we "fake" the name of the splitted components by # prefixing them with "random." if (!is.null(x$Effects) && all(x$Effects == "random") && !all(grepl("^random\\.", names(tables)))) { wrong_names <- !grepl("^random\\.", names(tables)) names(tables)[wrong_names] <- paste0("random.", names(tables)[wrong_names]) } for (type in names(tables)) { # do we have emmeans emlist? and contrasts? model_class <- attributes(tables[[type]])$model_class em_list_coef_name <- (!is.null(model_class) && "emm_list" %in% model_class && "contrasts" %in% tables[[type]]$Component) # Don't print Component column for (i in split_column) { tables[[type]][[i]] <- NULL } # Smooth terms statistics if ("t / F" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "F" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "t" } } else if (type == "smooth_terms" && "t" %in% names(tables[[type]])) { names(tables[[type]])[names(tables[[type]]) == "t"] <- "F" } if ("z / Chi2" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "z / Chi2"] <- "Chi2" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "z / Chi2"] <- "z" } } # Don't print se and ci if all are missing if (all(is.na(tables[[type]]$SE))) tables[[type]]$SE <- NULL if (all(is.na(tables[[type]]$CI_low)) && all(is.na(tables[[type]]$CI_high))) { tables[[type]]$CI_low <- NULL tables[[type]]$CI_high <- NULL } # if (all(is.na(tables[[type]]$CI_low))) tables[[type]]$CI_low <- NULL # if (all(is.na(tables[[type]]$CI_high))) tables[[type]]$CI_high <- NULL # Don't print if empty col tables[[type]][sapply(colnames(tables[[type]]), function(x) { col <- tables[[type]][[x]] (all(col == "") | all(is.na(col))) && !grepl("_CI_(high|low)$", x) })] <- NULL attr(tables[[type]], "digits") <- digits attr(tables[[type]], "ci_digits") <- ci_digits attr(tables[[type]], "p_digits") <- p_digits # random pars with level? combine into parameter column if (all(c("Parameter", "Level") %in% colnames(tables[[type]]))) { tables[[type]]$Parameter <- paste0(tables[[type]]$Parameter, " ", ci_brackets[1], tables[[type]]$Level, ci_brackets[2]) tables[[type]]$Level <- NULL } # rename columns for emmeans contrast part if (em_list_coef_name && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- coef_name2 } # rename columns for zero-inflation part if (grepl("^zero", type) && !is.null(zi_coef_name) && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- zi_coef_name colnames(tables[[type]])[which(colnames(tables[[type]]) == paste0("Std_", coef_column))] <- paste0("Std_", zi_coef_name) } # rename columns for correlation part if (type == "correlation" && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- "Estimate" } # rename columns for dispersion part if (grepl("^dispersion", type) && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- "Coefficient" } # rename columns for random part if (grepl("random", type) && any(colnames(tables[[type]]) %in% .all_coefficient_types())) { colnames(tables[[type]])[colnames(tables[[type]]) %in% .all_coefficient_types()] <- "Coefficient" } if (grepl("random", type) && isTRUE(ran_pars)) { tables[[type]]$CI <- NULL } # for ggeffects objects, only choose selected lines, to have # a more compact output if (is_ggeffects && is.numeric(tables[[type]][[1]])) { n_rows <- nrow(tables[[type]]) row_steps <- round(sqrt(n_rows)) sample_rows <- round(c(1, stats::quantile(seq_len(n_rows), seq_len(row_steps - 2) / row_steps), n_rows)) tables[[type]] <- tables[[type]][sample_rows, ] tables[[type]][[1]] <- insight::format_value(tables[[type]][[1]], digits = digits, protect_integers = TRUE) } formatted_table <- insight::format_table(tables[[type]], digits = digits, ci_digits = ci_digits, p_digits = p_digits, pretty_names = pretty_names, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, ...) component_header <- .format_model_component_header(x, type, split_column, is_zero_inflated, is_ordinal_model, is_multivariate, ran_pars, formatted_table) # exceptions for random effects if (.n_unique(formatted_table$Group) == 1) { component_header$subheader1 <- paste0(component_header$subheader1, " (", formatted_table$Group, ")") formatted_table$Group <- NULL } # remove non-necessary columns if (.n_unique(formatted_table$Component) == 1) { formatted_table$Component <- NULL } # no column with CI-level in output if (!is.null(formatted_table$CI) && .n_unique(formatted_table$CI) == 1) { formatted_table$CI <- NULL } table_caption <- NULL if (is.null(format) || format == "text") { # Print if (component_header$name != "rewb-contextual") { table_caption <- c(sprintf("# %s %s", component_header$subheader1, tolower(component_header$subheader2)), "blue") } } else if (format %in% c("markdown", "html")) { # Print if (component_header$name != "rewb-contextual") { table_caption <- sprintf("%s %s", component_header$subheader1, tolower(component_header$subheader2)) } # replace brackets by parenthesis if (!is.null(parameter_column) && parameter_column %in% colnames(formatted_table)) { formatted_table[[parameter_column]] <- gsub("[", ci_brackets[1], formatted_table[[parameter_column]], fixed = TRUE) formatted_table[[parameter_column]] <- gsub("]", ci_brackets[2], formatted_table[[parameter_column]], fixed = TRUE) } } if (identical(format, "html")) { formatted_table$Component <- table_caption } else { attr(formatted_table, "table_caption") <- table_caption } # remove unique columns if (.n_unique(formatted_table$Effects) == 1) formatted_table$Effects <- NULL if (.n_unique(formatted_table$Group) == 1) formatted_table$Group <- NULL final_table <- c(final_table, list(formatted_table)) } if (identical(format, "html")) { # fix non-equal length of columns final_table <- .fix_nonmatching_columns(final_table, is_lavaan = inherits(attributes(x)$model, c("lavaan", "blavaan"))) do.call(rbind, final_table) } else { .compact_list(final_table) } } # helper to fix unequal number of columns for list of data frames, # when used for HTML printing .fix_nonmatching_columns <- function(final_table, is_lavaan = FALSE) { # fix for lavaan here if (is_lavaan) { for (i in 1:length(final_table)) { if (!is.null(final_table[[i]]$Link) && !is.null(final_table[[i]]$To)) { if (all(is.na(final_table[[i]]$Link))) { final_table[[i]]$Link <- final_table[[i]]$To final_table[[i]]$To <- NA } } colnames(final_table[[i]])[1] <- "Parameter" if (!is.null(final_table[[i]]$To) && all(is.na(final_table[[i]]$To))) { final_table[[i]]$To <- NULL } } } # then check for correct column length col_len <- sapply(final_table, function(i) length(colnames(i))) # remove non matching columns if (!all(col_len) == max(col_len)) { all_columns <- unique(unlist(lapply(final_table, colnames))) for (i in 1:length(final_table)) { missing_columns <- setdiff(all_columns, colnames(final_table[[i]])) if (length(missing_columns)) { a <- attributes(final_table[[i]]) final_table[[i]][missing_columns] <- NA final_table[[i]] <- final_table[[i]][match(all_columns, colnames(final_table[[i]]))] attributes(final_table[[i]]) <- utils::modifyList(a, attributes(final_table[[i]])) } } } final_table } parameters/R/methods_lqmm.R0000644000175000017500000000622714032160360015566 0ustar nileshnilesh#' @export model_parameters.lqmm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, p_adjust = NULL, verbose = TRUE, ...) { # Processing if (bootstrap) { parameters <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) } else { parameters <- .extract_parameters_lqmm( model, ci = ci, p_adjust = p_adjust, verbose = verbose, ... ) } parameters <- .add_model_parameters_attributes( parameters, model, ci, exponentiate = FALSE, p_adjust = p_adjust, verbose = verbose, ... ) attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.lqm <- model_parameters.lqmm #' @export ci.lqmm <- function(x, ...) { out <- model_parameters(x, ...) as.data.frame(out[c("Parameter", "CI_low", "CI_high")]) } #' @export ci.lqm <- ci.lqmm #' @export standard_error.lqmm <- function(model, ...) { out <- model_parameters(model, ...) as.data.frame(out[c("Parameter", "SE")]) } #' @export standard_error.lqm <- standard_error.lqmm #' @export degrees_of_freedom.lqmm <- function(model, ...) { out <- model_parameters(model, ...) out$df_error } #' @export degrees_of_freedom.lqm <- degrees_of_freedom.lqmm #' @export p_value.lqmm <- function(model, ...) { out <- model_parameters(model, ...) as.data.frame(out[c("Parameter", "p")]) } #' @export p_value.lqm <- p_value.lqmm # helper ------------------ .extract_parameters_lqmm <- function(model, ci, p_adjust, verbose = TRUE, ...) { cs <- summary(model) parameters <- insight::get_parameters(model) if (is.list(cs$tTable)) { summary_table <- do.call(rbind, cs$tTable) } else { summary_table <- cs$tTable } # ==== Coefficient, SE and test statistic parameters$Coefficient <- parameters$Estimate parameters$SE <- summary_table[, 2] parameters$t <- parameters$Estimate / parameters$SE # ==== DF parameters$df_error <- tryCatch( { if (!is.null(cs$rdf)) { cs$rdf } else { attr(cs$B, "R") - 1 } }, error = function(e) { Inf } ) # ==== Conf Int parameters$CI_low <- parameters$Coefficient - stats::qt((1 + ci) / 2, df = parameters$df_error) * parameters$SE parameters$CI_high <- parameters$Coefficient + stats::qt((1 + ci) / 2, df = parameters$df_error) * parameters$SE # ==== p-value parameters$p <- summary_table[, 5] if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # ==== Reorder col_order <- c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "t", "df_error", "p", "Component") parameters[col_order[col_order %in% names(parameters)]] } parameters/R/p_value_kenward.R0000644000175000017500000001145514131531727016253 0ustar nileshnilesh#' @title Kenward-Roger approximation for SEs, CIs and p-values #' @name p_value_kenward #' #' @description An approximate F-test based on the Kenward-Roger (1997) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (`dof_ml1`), the Kenward-Roger approximation is #' also applicable in more complex multilevel designs, e.g. with cross-classified #' clusters. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' #' @seealso `dof_kenward()` and `se_kenward()` are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Kenward-Roger (1997) approach. #' \cr \cr #' [`dof_satterthwaite()`][dof_satterthwaite] and #' [`dof_ml1()`][dof_ml1] approximate degrees #' of freedom based on Satterthwaite's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4", quietly = TRUE)) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_kenward(model) #' } #' } #' @return A data frame. #' @references Kenward, M. G., & Roger, J. H. (1997). Small sample inference for #' fixed effects from restricted maximum likelihood. Biometrics, 983-997. #' @export p_value_kenward <- function(model, dof = NULL) { UseMethod("p_value_kenward") } #' @export p_value_kenward.lmerMod <- function(model, dof = NULL) { if (is.null(dof)) { dof <- dof_kenward(model) } .p_value_dof(model, dof, method = "kenward") } # helper ------------------------------ .p_value_dof <- function(model, dof, method = NULL, statistic = NULL, se = NULL, robust = FALSE, component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "precision", "scale", "smooth_terms", "full", "marginal"), effects = c("fixed", "random", "all"), verbose = TRUE, ...) { component <- match.arg(component) effects <- match.arg(effects) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } params <- insight::get_parameters(model, component = component) # check if all estimates are non-NA params <- .check_rank_deficiency(params, verbose = FALSE) if (is.null(statistic)) { statistic <- insight::get_statistic(model, component = component) params <- merge(params, statistic, sort = FALSE) statistic <- params$Statistic } # different SE for kenward and robust if (identical(method, "kenward") || identical(method, "kr")) { if (is.null(se)) { se <- se_kenward(model)$SE } } else if (isTRUE(robust)) { se <- standard_error_robust(model, component = component, ...)$SE } # overwrite statistic, based on robust or kenward standard errors if (identical(method, "kenward") || identical(method, "kr") || isTRUE(robust)) { estimate <- if ("Coefficient" %in% colnames(params)) { params$Coefficient } else { params$Estimate } statistic <- estimate / se } p <- 2 * stats::pt(abs(statistic), df = dof, lower.tail = FALSE) out <- .data_frame( Parameter = params$Parameter, p = unname(p) ) if ("Component" %in% names(params)) out$Component <- params$Component if ("Effects" %in% names(params) && effects != "fixed") out$Effects <- params$Effects if ("Response" %in% names(params)) out$Response <- params$Response out } .p_value_dof_kr <- function(model, params, dof) { if ("SE" %in% colnames(params) && "SE" %in% colnames(dof)) { params$SE <- NULL } params <- merge(params, dof, by = "Parameter") p <- 2 * stats::pt(abs(params$Estimate / params$SE), df = params$df_error, lower.tail = FALSE) .data_frame( Parameter = params$Parameter, p = unname(p) ) } # helper ------------------------- .check_REML_fit <- function(model) { insight::check_if_installed("lme4") if (!(lme4::getME(model, "is_REML"))) { warning(insight::format_message("Model was not fitted by REML. Re-fitting model now, but p-values, df, etc. still might be unreliable."), call. = FALSE) } } parameters/R/methods_BayesFactor.R0000644000175000017500000002342214142156524017027 0ustar nileshnilesh# classes: .BFBayesFactor #' Parameters from BayesFactor objects #' #' Parameters from `BFBayesFactor` objects from `{BayesFactor}` package. #' #' @param model Object of class `BFBayesFactor`. #' @param cohens_d If `TRUE`, compute Cohens' *d* as index of effect size. Only #' applies to objects from `ttestBF()`. See `effectsize::cohens_d()` for #' details. #' @param include_proportions Logical that decides whether to include posterior #' cell proportions/counts for Bayesian contingency table analysis (from #' `BayesFactor::contingencyTableBF()`). Defaults to `FALSE`, as this #' information is often redundant. #' @inheritParams bayestestR::describe_posterior #' @inheritParams p_value #' @inheritParams model_parameters.htest #' #' @details #' The meaning of the extracted parameters: #' \itemize{ #' \item For [BayesFactor::ttestBF()]: `Difference` is the raw #' difference between the means. \item For #' [BayesFactor::correlationBF()]: `rho` is the linear #' correlation estimate (equivalent to Pearson's *r*). \item For #' [BayesFactor::lmBF()] / [BayesFactor::generalTestBF()] #' / [BayesFactor::regressionBF()] / #' [BayesFactor::anovaBF()]: in addition to parameters of the fixed #' and random effects, there are: `mu` is the (mean-centered) intercept; #' `sig2` is the model's sigma; `g` / `g_*` are the *g* #' parameters; See the *Bayes Factors for ANOVAs* paper #' (\doi{10.1016/j.jmp.2012.08.001}). #' } #' #' @examples #' \donttest{ #' if (require("BayesFactor")) { #' # Bayesian t-test #' model <- ttestBF(x = rnorm(100, 1, 1)) #' model_parameters(model) #' model_parameters(model, cohens_d = TRUE, ci = .9) #' #' # Bayesian contingency table analysis #' data(raceDolls) #' bf <- contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols") #' model_parameters(bf, #' centrality = "mean", #' dispersion = TRUE, #' verbose = FALSE, #' cramers_v = TRUE #' ) #' } #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.BFBayesFactor <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, priors = TRUE, cohens_d = NULL, cramers_v = NULL, include_proportions = FALSE, verbose = TRUE, ...) { if (any(grepl("^Null", names(model@numerator)))) { if (isTRUE(verbose)) { insight::print_color("Nothing to compute for point-null models.\nSee github.com/easystats/parameters/issues/226\n", "red") } return(NULL) } if (is.null(insight::get_parameters(model, verbose = verbose))) { if (isTRUE(verbose)) { warning("Can't extract model parameters.", call. = FALSE) } return(NULL) } out <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, priors = priors, verbose = verbose, ... ) bf_type <- .classify_BFBayesFactor(model) # Add components and effects columns cleaned_params <- NULL tryCatch( { cleaned_params <- insight::clean_parameters(model) out <- merge(out, cleaned_params[, c("Parameter", "Effects", "Component")], sort = FALSE) }, error = function(e) { NULL } ) # Extract BF tryCatch( { bfm <- as.data.frame(bayestestR::bayesfactor_models(model)[-1, ]) if (!is.null(bfm$log_BF)) { out$BF <- exp(bfm$log_BF) } else { out$BF <- bfm$BF } }, error = function(e) { NULL } ) # leave out redundant posterior cell proportions/counts if (bf_type == "xtable" && isFALSE(include_proportions)) { out <- out[which(!grepl("^cell\\[", out$Parameter)), , drop = FALSE] } # Effect size? if (bf_type %in% c("ttest1", "ttest2") && !is.null(cohens_d) || bf_type == "xtable" && !is.null(cramers_v)) { # needs {effectsize} to be installed insight::check_if_installed("effectsize") tryCatch( { effsize <- effectsize::effectsize(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, rope_ci = rope_ci ) if (bf_type == "xtable" && isTRUE(include_proportions)) { out <- merge(out, effsize, sort = FALSE, all = TRUE) } else { if (bf_type == "xtable") { prefix <- "Cramers_" } else { prefix <- "d_" } ci_cols <- grepl("^CI_", colnames(effsize)) colnames(effsize)[ci_cols] <- paste0(prefix, colnames(effsize)[ci_cols]) out$CI <- NULL out <- cbind(out, effsize) } }, error = function(e) { NULL } ) } # # Remove unnecessary columns # if ("CI" %in% names(out) && length(stats::na.omit(unique(out$CI))) == 1) { # out$CI <- NULL # } if ("ROPE_CI" %in% names(out) && length(stats::na.omit(unique(out$ROPE_CI))) == 1) { out$ROPE_CI <- NULL } if ("ROPE_low" %in% names(out)) { out$ROPE_low <- NULL out$ROPE_high <- NULL } # ==== remove Component column if not needed if (!is.null(out$Component) && .n_unique(out$Component) == 1) out$Component <- NULL if (!is.null(out$Effects) && .n_unique(out$Effects) == 1) out$Effects <- NULL # ==== pretty parameter names cp <- out$Parameter if (!is.null(cleaned_params) && length(cleaned_params$Cleaned_Parameter) == length(cp) && bf_type == "linear") { match_params <- stats::na.omit(match(cp, cleaned_params$Parameter)) cp <- cleaned_params$Cleaned_Parameter[match_params] } pretty_names <- stats::setNames( gsub("Cohens_d", "Cohen's D", gsub("Cramers_v", "Cramer's V", cp, fixed = TRUE), fixed = TRUE), out$Parameter ) if (!"Method" %in% names(out)) { out$Method <- .method_BFBayesFactor(model) } # reorder col_order <- c( "Parameter", "Mean", "Median", "MAD", "CI", "CI_low", "CI_high", "SD", "Cohens_d", "Cramers_v", "d_CI_low", "d_CI_high", "Cramers_CI_low", "Cramers_CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Effects", "Component", "BF", "Method" ) out <- out[col_order[col_order %in% names(out)]] attr(out, "title") <- unique(out$Method) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "pretty_names") <- pretty_names attr(out, "ci_test") <- ci out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, ci_method = ci_method, verbose = verbose ) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } #' p-values for Bayesian Models #' #' This function attempts to return, or compute, p-values of Bayesian models. #' #' @param model A statistical model. #' @inheritParams p_value #' #' @details #' #' For Bayesian models, the p-values corresponds to the *probability of #' direction* ([bayestestR::p_direction()]), which is converted to a p-value #' using `bayestestR::convert_pd_to_p()`. #' #' @return The p-values. #' #' @examples #' data(iris) #' model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) #' p_value(model) #' @export p_value.BFBayesFactor <- function(model, ...) { p <- bayestestR::p_direction(model) .data_frame( Parameter = .remove_backticks_from_string(p$Parameter), p = sapply(p$pd, bayestestR::convert_pd_to_p, simplify = TRUE) ) } # helper ------- .classify_BFBayesFactor <- function(x) { if (!requireNamespace("BayesFactor", quietly = TRUE)) { stop("This function needs `BayesFactor` to be installed.") } if (any(class(x@denominator) %in% c("BFcorrelation"))) { "correlation" } else if (any(class(x@denominator) %in% c("BFoneSample"))) { "ttest1" } else if (any(class(x@denominator) %in% c("BFindepSample"))) { "ttest2" } else if (any(class(x@denominator) %in% c("BFmetat"))) { "meta" } else if (any(class(x@denominator) %in% c("BFlinearModel"))) { "linear" } else if (any(class(x@denominator) %in% c("BFcontingencyTable"))) { "xtable" } else if (any(class(x@denominator) %in% c("BFproportion"))) { "proptest" } else { class(x@denominator) } } .method_BFBayesFactor <- function(x) { if (!requireNamespace("BayesFactor", quietly = TRUE)) { stop("This function needs `BayesFactor` to be installed.") } if (any(class(x@denominator) %in% c("BFcorrelation"))) { "Bayesian correlation analysis" } else if (any(class(x@denominator) %in% c("BFoneSample", "BFindepSample"))) { "Bayesian t-test" } else if (any(class(x@denominator) %in% c("BFmetat"))) { "Meta-analytic Bayes factors" } else if (any(class(x@denominator) %in% c("BFlinearModel"))) { "Bayes factors for linear models" } else if (any(class(x@denominator) %in% c("BFcontingencyTable"))) { "Bayesian contingency table analysis" } else if (any(class(x@denominator) %in% c("BFproportion"))) { "Bayesian proportion test" } else { NA_character_ } } parameters/R/methods_merTools.R0000644000175000017500000000254314131354760016432 0ustar nileshnilesh#' @export model_parameters.merModList <- function(model, ci = .95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = "Parameter", standardize = NULL, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.merModList <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, dof = NULL, robust = FALSE, component = "conditional") } #' @export standard_error.merModList <- function(model, ...) { s <- suppressWarnings(summary(model)) out <- .data_frame( Parameter = s$fe$term, SE = s$fe$std.error ) .remove_backticks_from_parameter_names(out) } #' @export degrees_of_freedom.merModList <- function(model, ...) { s <- suppressWarnings(summary(model)) s$fe$df } #' @export format_parameters.merModList <- function(model, brackets = c("[", "]"), ...) { .format_parameter_default(model[[1]], brackets = brackets) } parameters/R/utils_cleaners.R0000644000175000017500000000454614036353021016115 0ustar nileshnilesh#' @keywords internal .clean_parameter_names <- function(x, full = FALSE) { # return if x is empty if (is.null(x) || length(x) == 0) { return("") } # here we need to capture only those patterns that we do *not* want to format # in a particular style. However, these patterns will not be shown in the output # from "model_parameters()". If certain patterns contain useful information, # remove them here and clean/prepare them in ".parameters_type_basic()". # for formatting / printing, refer to ".format_parameter()". pattern <- if (full) { c( "as.factor", "as.numeric", "factor", "offset", "lag", "diff", "catg", "asis", "matrx", "pol", "strata", "strat", "scale", "scored", "interaction", "lsp", "pb", "lo", "t2", "te", "ti", "tt", "mi", "mo", "gp" ) } else { c("as.factor", "as.numeric", "factor", "catg", "asis", "interaction") } for (j in 1:length(pattern)) { # remove possible namespace x <- sub("(.*)::(.*)", "\\2", x) if (pattern[j] == "offset") { x <- trimws(sub("offset\\(([^-+ )]*)\\)(.*)", "\\1\\2", x)) } else if (pattern[j] == "I") { if (full) { x <- trimws(sub("I\\(((\\w|\\.)*).*", "\\1", x)) } else { x <- trimws(sub("I\\((.*)\\)(.*)", "\\1", x)) } # some exceptions here... } else if (full && pattern[j] == "scale" && any(grepl("scale\\(", x))) { x[grepl("scale\\(", x)] <- insight::clean_names(x[grepl("scale\\(", x)]) } else { p <- paste0(pattern[j], "\\(((\\w|\\.)*)\\)(.*)") x <- trimws(sub(p, "\\1\\3", x)) } } gsub("`", "", x, fixed = TRUE) } #' @keywords internal .remove_backticks_from_string <- function(x) { if (is.character(x)) { x <- gsub("`", "", x, fixed = TRUE) } x } #' @keywords internal .remove_backticks_from_parameter_names <- function(x) { if (is.data.frame(x) && "Parameter" %in% colnames(x)) { x$Parameter <- gsub("`", "", x$Parameter, fixed = TRUE) } x } #' @keywords internal .intercepts <- function() { c( "(intercept)_zi", "intercept (zero-inflated)", "intercept", "zi_intercept", "(intercept)", "b_intercept", "b_zi_intercept" ) } #' @keywords internal .in_intercepts <- function(x) { tolower(x) %in% .intercepts() | grepl("^intercept", tolower(x)) } parameters/R/3_p_value.R0000644000175000017500000001162614160324505014756 0ustar nileshnilesh#' p-values #' #' This function attempts to return, or compute, p-values of a model's #' parameters. See the documentation for your object's class: #' \itemize{ #' \item{[Bayesian models][p_value.BFBayesFactor] (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, ...)} #' \item{[Zero-inflated models][p_value.zeroinfl] (`hurdle`, `zeroinfl`, `zerocount`, ...)} #' \item{[Marginal effects models][p_value.poissonmfx] (\pkg{mfx})} #' \item{[Models with special components][p_value.DirichletRegModel] (`DirichletRegModel`, `clm2`, `cgam`, ...)} #' } #' #' @param model A statistical model. #' @param method If `"robust"`, and if model is supported by the \pkg{sandwich} #' or \pkg{clubSandwich} packages, computes p-values based on robust #' covariance matrix estimation. #' @param adjust Character value naming the method used to adjust p-values or #' confidence intervals. See `?emmeans::summary.emmGrid` for details. #' @param ... Arguments passed down to `standard_error_robust()` when confidence #' intervals or p-values based on robust standard errors should be computed. #' Only available for models where `method = "robust"` is supported. #' @inheritParams ci.default #' #' @note `p_value_robust()` resp. `p_value(robust = TRUE)` #' rely on the \pkg{sandwich} or \pkg{clubSandwich} package (the latter if #' `vcov_estimation = "CR"` for cluster-robust standard errors) and will #' thus only work for those models supported by those packages. #' #' @return A data frame with at least two columns: the parameter names and the #' p-values. Depending on the model, may also include columns for model #' components etc. #' #' @examples #' data(iris) #' model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) #' p_value(model) #' @export p_value <- function(model, ...) { UseMethod("p_value") } # p-Values from Standard Models ----------------------------------------------- #' @rdname p_value #' @export p_value.default <- function(model, dof = NULL, method = NULL, robust = FALSE, component = "all", verbose = TRUE, ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } # default p-value method for profiled or uniroot CI if (method %in% c("uniroot", "profile", "likelihood", "boot")) { method <- "normal" } p <- NULL if (method == "ml1") { return(p_value_ml1(model)) } else if (method == "betwithin") { return(p_value_betwithin(model)) } else if (method %in% c("residual", "wald", "normal", "satterthwaite", "kenward", "kr")) { if (is.null(dof)) { dof <- degrees_of_freedom(model, method = method, verbose = FALSE) } return( .p_value_dof( model, dof = dof, method = method, component = component, verbose = verbose, robust = robust, ... ) ) } else if (method %in% c("hdi", "eti", "si", "bci", "bcai", "quantile")) { return(bayestestR::p_direction(model, ...)) } else { # first, we need some special handling for Zelig-models p <- tryCatch( { if (grepl("^Zelig-", class(model)[1])) { unlist(model$get_pvalue()) } else { # try to get p-value from classical summary for default models .get_pval_from_summary(model) } }, error = function(e) { NULL } ) } # if all fails, try to get p-value from test-statistic if (is.null(p)) { p <- tryCatch( { stat <- insight::get_statistic(model) p_from_stat <- 2 * stats::pt(abs(stat$Statistic), df = Inf, lower.tail = FALSE) names(p_from_stat) <- stat$Parameter p_from_stat }, error = function(e) { NULL } ) } if (is.null(p)) { if (isTRUE(verbose)) { warning("Could not extract p-values from model object.", call. = FALSE) } } else { .data_frame( Parameter = names(p), p = as.vector(p) ) } } # helper -------------------------------------------------------- .get_pval_from_summary <- function(model, cs = NULL) { if (is.null(cs)) cs <- stats::coef(summary(model)) p <- NULL if (ncol(cs) >= 4) { # do we have a p-value column based on t? pvcn <- which(colnames(cs) == "Pr(>|t|)") # if not, do we have a p-value column based on z? if (length(pvcn) == 0) { pvcn <- which(colnames(cs) == "Pr(>|z|)") } # if not, default to 4 if (length(pvcn) == 0) pvcn <- 4 p <- cs[, pvcn] if (is.null(names(p))) { coef_names <- rownames(cs) if (length(coef_names) == length(p)) names(p) <- coef_names } } names(p) <- .remove_backticks_from_string(names(p)) p } parameters/R/methods_mediate.R0000644000175000017500000001201214137207406016226 0ustar nileshnilesh#' @export model_parameters.mediate <- function(model, ci = .95, exponentiate = FALSE, verbose = TRUE, ...) { # Parameters, Estimate and CI params <- insight::get_parameters(model) # CI params <- merge(params, ci(model, ci = ci), by = "Parameter", sort = FALSE) params$CI <- NULL # p-value params <- merge(params, p_value(model), by = "Parameter", sort = FALSE) # ==== Renaming if (any(grepl("\\(control\\)$", params$Parameter))) { params$Component <- gsub("(.*)\\((.*)\\)$", "\\2", params$Parameter) } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) params <- .add_model_parameters_attributes(params, model, ci, exponentiate, verbose = verbose, ...) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export ci.mediate <- function(x, ci = .95, ...) { info <- insight::model_info(x$model.y, verbose = FALSE) alpha <- (1 + ci) / 2 if (info$is_linear && !x$INT) { out <- data.frame( Parameter = c("ACME", "ADE", "Total Effect", "Prop. Mediated"), CI = ci, CI_low = c( stats::quantile(x$d0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$tau.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n0.sims, probs = 1 - alpha, names = FALSE) ), CI_high = c( stats::quantile(x$d0.sims, probs = alpha, names = FALSE), stats::quantile(x$z0.sims, probs = alpha, names = FALSE), stats::quantile(x$tau.sims, probs = alpha, names = FALSE), stats::quantile(x$n0.sims, probs = alpha, names = FALSE) ), stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ), CI = ci, CI_low = c( stats::quantile(x$d0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$d1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$tau.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$d.avg.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z.avg.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n.avg.sims, probs = 1 - alpha, names = FALSE) ), CI_high = c( stats::quantile(x$d0.sims, probs = alpha, names = FALSE), stats::quantile(x$d1.sims, probs = alpha, names = FALSE), stats::quantile(x$z0.sims, probs = alpha, names = FALSE), stats::quantile(x$z1.sims, probs = alpha, names = FALSE), stats::quantile(x$tau.sims, probs = alpha, names = FALSE), stats::quantile(x$n0.sims, probs = alpha, names = FALSE), stats::quantile(x$n1.sims, probs = alpha, names = FALSE), stats::quantile(x$d.avg.sims, probs = alpha, names = FALSE), stats::quantile(x$z.avg.sims, probs = alpha, names = FALSE), stats::quantile(x$n.avg.sims, probs = alpha, names = FALSE) ), stringsAsFactors = FALSE ) } out } #' @export standard_error.mediate <- function(model, ...) { NULL } #' @export degrees_of_freedom.mediate <- function(model, ...) { NULL } #' @export p_value.mediate <- function(model, ...) { info <- insight::model_info(model$model.y, verbose = FALSE) if (info$is_linear && !model$INT) { out <- data.frame( Parameter = c("ACME", "ADE", "Total Effect", "Prop. Mediated"), p = c(model$d0.p, model$z0.p, model$tau.p, model$n0.p), stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ), p = c( model$d0.p, model$d1.p, model$z0.p, model$z1.p, model$tau.p, model$n0.p, model$n1.p, model$d.avg.p, model$z.avg.p, model$n.avg.p ), stringsAsFactors = FALSE ) } out } #' @export format_parameters.mediate <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) params <- trimws(gsub("(.*)\\((.*)\\)$", "\\1", params)) names(params) <- params params[params == "ACME"] <- "Indirect Effect (ACME)" params[params == "ADE"] <- "Direct Effect (ADE)" params } parameters/R/extract_parameters.R0000644000175000017500000007332414142723012016775 0ustar nileshnilesh# generic function ------------------------------------------------------ #' @keywords internal .extract_parameters_generic <- function(model, ci, component, merge_by = c("Parameter", "Component"), standardize = NULL, effects = "fixed", robust = FALSE, ci_method = NULL, p_adjust = NULL, wb_component = FALSE, verbose = TRUE, keep_component_column = FALSE, keep_parameters = NULL, drop_parameters = NULL, include_sigma = TRUE, summary = FALSE, ...) { # ==== check if standardization is required and package available if (isTRUE(standardize)) { if (verbose) { insight::format_message(warning("'standardize' must be on of 'refit', 'posthoc', 'basic', 'smart' or 'pseudo'.", call. = FALSE)) } standardize <- NULL } if (!is.null(standardize) && !requireNamespace("effectsize", quietly = TRUE)) { if (verbose) { insight::format_message(warning("Package 'effectsize' required to calculate standardized coefficients. Please install it.", call. = FALSE)) } standardize <- NULL } # ==== model exceptions if (inherits(model, c("crq", "crqs"))) { merge_by <- c("Parameter", "Component") } # ==== for refit, we completely refit the model, than extract parameters, ci etc. as usual if (!is.null(standardize) && standardize == "refit") { model <- datawizard::standardize(model, verbose = FALSE, ...) standardize <- NULL } parameters <- insight::get_parameters(model, effects = effects, component = component, verbose = FALSE ) statistic <- insight::get_statistic(model, component = component) # check if all estimates are non-NA parameters <- .check_rank_deficiency(parameters) # ==== check if we really have a component column if (!("Component" %in% names(parameters)) && "Component" %in% merge_by) { merge_by <- setdiff(merge_by, "Component") } # ==== check Degrees of freedom if (!.dof_method_ok(model, ci_method, type = "ci_method")) { ci_method <- NULL } # ==== for ordinal models, first, clean parameter names and then indicate # intercepts (alpha-coefficients) in the component column if (inherits(model, "polr")) { intercept_groups <- which(grepl("^Intercept:", parameters$Parameter)) parameters$Parameter <- gsub("Intercept: ", "", parameters$Parameter, fixed = TRUE) } else if (inherits(model, "clm") && !is.null(model$alpha)) { intercept_groups <- rep(c("intercept", "location", "scale"), vapply(model[c("alpha", "beta", "zeta")], length, numeric(1))) } else if (inherits(model, "clm2") && !is.null(model$Alpha)) { intercept_groups <- rep(c("intercept", "location", "scale"), vapply(model[c("Alpha", "beta", "zeta")], length, numeric(1))) } else { intercept_groups <- NULL } original_order <- parameters$.id <- 1:nrow(parameters) # column name for coefficients, non-standardized coef_col <- "Coefficient" # ==== CI - only if we don't already have CI for std. parameters if (!is.null(ci)) { if (isTRUE(robust)) { ci_df <- suppressMessages(ci_robust( model, ci = ci, method = ci_method, component = component, verbose = verbose, ... )) } else if (!is.null(ci_method)) { ci_df <- suppressMessages( ci( model, ci = ci, effects = effects, component = component, method = ci_method, verbose = verbose ) ) } else { ci_df <- suppressMessages(ci( model, ci = ci, effects = effects, component = component, verbose = verbose )) } if (!is.null(ci_df)) { if (length(ci) > 1) ci_df <- datawizard::reshape_ci(ci_df) ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", merge_by)] parameters <- merge(parameters, ci_df, by = merge_by, sort = FALSE) } else { ci_cols <- c() } } else { ci_cols <- c() } # ==== p value if (isTRUE(robust)) { pval <- p_value_robust( model, method = ci_method, component = component, ... ) } else { pval <- p_value( model, effects = effects, component = component, method = ci_method, verbose = verbose ) } if (!is.null(pval)) { parameters <- merge(parameters, pval, by = merge_by, sort = FALSE) } # ==== standard error - only if we don't already have SE for std. parameters std_err <- NULL if (isTRUE(robust)) { std_err <- standard_error_robust(model, component = component, ...) } else if (!is.null(ci_method)) { std_err <- standard_error( model, effects = effects, component = component, method = ci_method, verbose = verbose ) } else { std_err <- standard_error(model, effects = effects, component = component, verbose = verbose ) } if (!is.null(std_err)) { parameters <- merge(parameters, std_err, by = merge_by, sort = FALSE) } # ==== test statistic - fix values for robust estimation if (isTRUE(robust)) { parameters$Statistic <- parameters$Estimate / parameters$SE } else if (!is.null(statistic)) { parameters <- merge(parameters, statistic, by = merge_by, sort = FALSE) } # ==== degrees of freedom if (!is.null(ci_method)) { df_error <- degrees_of_freedom(model, method = ci_method, verbose = FALSE) } else { df_error <- degrees_of_freedom(model, method = "any", verbose = FALSE) } if (!is.null(df_error) && (length(df_error) == 1 || length(df_error) == nrow(parameters))) { if (length(df_error) == 1) { parameters$df_error <- df_error } else { # order may have changed due to merging, so make sure # df are in correct order. parameters$df_error <- df_error[order(parameters$.id)] } } # ==== Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # ==== Renaming if ("Statistic" %in% names(parameters)) { stat_type <- attr(statistic, "statistic", exact = TRUE) if (!is.null(stat_type)) { names(parameters) <- gsub("Statistic", gsub("(-|\\s)statistic", "", stat_type), names(parameters)) names(parameters) <- gsub("chi-squared", "Chi2", names(parameters)) } } names(parameters) <- gsub("(c|C)hisq", "Chi2", names(parameters)) names(parameters) <- gsub("Estimate", "Coefficient", names(parameters)) # ==== add intercept groups for ordinal models if (inherits(model, "polr") && !is.null(intercept_groups)) { parameters$Component <- "beta" parameters$Component[intercept_groups] <- "alpha" } else if (inherits(model, c("clm", "clm2")) && !is.null(intercept_groups)) { parameters$Component <- intercept_groups } # ==== remove Component column if not needed if (!is.null(parameters$Component) && .n_unique(parameters$Component) == 1 && !keep_component_column) parameters$Component <- NULL if ((!is.null(parameters$Effects) && .n_unique(parameters$Effects) == 1) || effects == "fixed") parameters$Effects <- NULL # ==== adjust p-values? if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # ==== remove all complete-missing cases parameters <- parameters[apply(parameters, 1, function(i) !all(is.na(i))), ] # ==== add within/between attributes if (inherits(model, c("glmmTMB", "MixMod")) && isTRUE(wb_component)) { parameters <- .add_within_between_effects(model, parameters) } # ==== Std Coefficients for other methods than "refit" if (!is.null(standardize) && !isFALSE(standardize)) { # give minimal attributes required for standardization temp_pars <- parameters class(temp_pars) <- c("parameters_model", class(temp_pars)) attr(temp_pars, "ci") <- ci attr(temp_pars, "object_name") <- model # pass the model as is (this is a cheat - teehee!) std_parms <- effectsize::standardize_parameters(temp_pars, method = standardize) parameters$Std_Coefficient <- std_parms$Std_Coefficient parameters$SE <- attr(std_parms, "standard_error") if (!is.null(ci)) { parameters$CI_low <- std_parms$CI_low parameters$CI_high <- std_parms$CI_high } coef_col <- "Std_Coefficient" } # ==== Reorder col_order <- c( "Parameter", coef_col, "SE", ci_cols, "t", "z", "t / F", "t/F", "z / Chisq", "z/Chisq", "z / Chi2", "z/Chi2", "F", "Chi2", "chisq", "chi-squared", "Statistic", "df", "df_error", "p", "Component", "Response", "Effects" ) parameters <- parameters[col_order[col_order %in% names(parameters)]] # ==== add sigma and residual df if (isTRUE(include_sigma) || isTRUE(summary)) { parameters <- .add_sigma_residual_df(parameters, model) } # ==== filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters(parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } rownames(parameters) <- NULL parameters } # helper ---------------- .add_sigma_residual_df <- function(params, model) { if (is.null(params$Component) || !"sigma" %in% params$Component) { sig <- tryCatch( { suppressWarnings(insight::get_sigma(model, ci = NULL, verbose = FALSE)) }, error = function(e) { NULL } ) attr(params, "sigma") <- as.numeric(sig) resdf <- tryCatch( { suppressWarnings(insight::get_df(model, type = "residual")) }, error = function(e) { NULL } ) attr(params, "residual_df") <- as.numeric(resdf) } params } .filter_parameters <- function(params, keep = NULL, drop = NULL, verbose = TRUE) { if (!is.null(keep) && is.list(keep)) { for (i in names(keep)) { params <- .filter_parameters_vector(params, keep[[i]], drop = NULL, column = i, verbose = verbose ) } } else { params <- .filter_parameters_vector(params, keep, drop, column = NULL, verbose = verbose ) } params } .filter_parameters_vector <- function(params, keep = NULL, drop = NULL, column = NULL, verbose = TRUE) { # check pattern if (!is.null(keep) && length(keep) > 1) { keep <- paste0("(", paste0(keep, collapse = "|"), ")") if (verbose) { message(insight::format_message(sprintf("The 'keep' argument has more than 1 element. Merging into following regular expression: '%s'.", keep))) } } # check pattern if (!is.null(drop) && length(drop) > 1) { drop <- paste0("(", paste0(drop, collapse = "|"), ")") if (verbose) { message(insight::format_message(sprintf("The 'drop' argument has more than 1 element. Merging into following regular expression: '%s'.", drop))) } } if (is.null(column) || !column %in% colnames(params)) { if ("Parameter" %in% colnames(params)) { column <- "Parameter" } else { column <- 1 } } # row to keep and drop if (!is.null(keep)) { rows_to_keep <- grepl(keep, params[[column]], perl = TRUE) } else { rows_to_keep <- rep_len(TRUE, nrow(params)) } if (!is.null(drop)) { rows_to_drop <- !grepl(drop, params[[column]], perl = TRUE) } else { rows_to_drop <- rep_len(TRUE, nrow(params)) } out <- params[rows_to_keep & rows_to_drop, ] if (nrow(out) == 0) { if (verbose) { warning(insight::format_message("The pattern defined in the 'keep' (and 'drop') arguments would remove all parameters from the output. Thus, selecting specific parameters will be ignored."), call. = FALSE) } return(params) } out } # mixed models function ------------------------------------------------------ #' @keywords internal .extract_parameters_mixed <- function(model, ci = .95, ci_method = "wald", standardize = NULL, robust = FALSE, p_adjust = NULL, wb_component = FALSE, keep_parameters = NULL, drop_parameters = NULL, include_sigma = FALSE, summary = FALSE, verbose = TRUE, ...) { special_ci_methods <- c("betwithin", "satterthwaite", "ml1", "kenward", "kr") # get parameters and statistic parameters <- insight::get_parameters(model, effects = "fixed", component = "all", verbose = FALSE) statistic <- insight::get_statistic(model, component = "all") # check if all estimates are non-NA parameters <- .check_rank_deficiency(parameters) # sometimes, due to merge(), row-order messes up, so we save this here original_order <- parameters$.id <- 1:nrow(parameters) # remove SE column parameters <- .remove_columns(parameters, c("SE", "Std. Error")) # column name for coefficients, non-standardized coef_col <- "Coefficient" # Degrees of freedom if (.dof_method_ok(model, ci_method)) { df <- degrees_of_freedom(model, method = ci_method, verbose = FALSE) } else { df <- Inf } df_error <- data.frame( Parameter = parameters$Parameter, df_error = as.vector(df), stringsAsFactors = FALSE ) # for KR-dof, we have the SE as well, to save computation time df_error$SE <- attr(df, "se", exact = TRUE) # CI - only if we don't already have CI for std. parameters if (!is.null(ci)) { if (isTRUE(robust)) { ci_df <- suppressMessages(ci_robust(model, ci = ci, ...)) } else if (ci_method %in% c("kenward", "kr")) { # special handling for KR-CIs, where we already have computed SE ci_df <- .ci_kenward_dof(model, ci = ci, df_kr = df_error) } else { ci_df <- ci(model, ci = ci, method = ci_method, effects = "fixed") } if (length(ci) > 1) ci_df <- datawizard::reshape_ci(ci_df) ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", "Parameter")] parameters <- merge(parameters, ci_df, by = "Parameter", sort = FALSE) } else { ci_cols <- c() } # standard error - only if we don't already have SE for std. parameters if (!("SE" %in% colnames(parameters))) { if (isTRUE(robust)) { parameters <- merge(parameters, standard_error_robust(model, ...), by = "Parameter", sort = FALSE) # special handling for KR-SEs, which we already have computed from dof } else if ("SE" %in% colnames(df_error)) { se_kr <- df_error se_kr$df_error <- NULL parameters <- merge(parameters, se_kr, by = "Parameter", sort = FALSE) } else { parameters <- merge(parameters, standard_error(model, method = ci_method, effects = "fixed"), by = "Parameter", sort = FALSE) } } # p value if (isTRUE(robust)) { parameters <- merge(parameters, p_value_robust(model, ...), by = "Parameter", sort = FALSE) } else { if ("Pr(>|z|)" %in% names(parameters)) { names(parameters)[grepl("Pr(>|z|)", names(parameters), fixed = TRUE)] <- "p" } else if (ci_method %in% special_ci_methods) { # special handling for KR-p, which we already have computed from dof # parameters <- merge(parameters, .p_value_dof_kr(model, params = parameters, dof = df_error), by = "Parameter") parameters <- merge(parameters, .p_value_dof(model, dof = df_error$df_error, method = ci_method, se = df_error$SE), by = "Parameter", sort = FALSE) } else { parameters <- merge(parameters, p_value(model, dof = df, effects = "fixed"), by = "Parameter", sort = FALSE) } } # adjust standard errors and test-statistic as well if (isFALSE(robust) && ci_method %in% special_ci_methods) { parameters$Statistic <- parameters$Estimate / parameters$SE } else { parameters <- merge(parameters, statistic, by = "Parameter", sort = FALSE) } # dof if (!"df" %in% names(parameters)) { if (!ci_method %in% special_ci_methods) { df_error <- data.frame( Parameter = parameters$Parameter, df_error = degrees_of_freedom(model, method = "any"), stringsAsFactors = FALSE ) } if (!is.null(df_error) && nrow(df_error) == nrow(parameters)) { if ("SE" %in% colnames(df_error)) { df_error$SE <- NULL } parameters <- merge(parameters, df_error, by = "Parameter", sort = FALSE) } } # Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # Renaming names(parameters) <- gsub("Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), names(parameters)) names(parameters) <- gsub("Std. Error", "SE", names(parameters)) names(parameters) <- gsub("Estimate", "Coefficient", names(parameters)) names(parameters) <- gsub("t value", "t", names(parameters)) names(parameters) <- gsub("z value", "z", names(parameters)) # adjust p-values? if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # if we have within/between effects (from demean()), we can add a component # column for nicer printing... if (isTRUE(wb_component)) { parameters <- .add_within_between_effects(model, parameters) } # Std Coefficients for other methods than "refit" if (!is.null(standardize)) { temp_pars <- parameters class(temp_pars) <- c("parameters_model", class(temp_pars)) attr(temp_pars, "ci") <- ci attr(temp_pars, "object_name") <- model # pass the model as is (this is a cheat - teehee!) std_parms <- effectsize::standardize_parameters(temp_pars, method = standardize) parameters$Std_Coefficient <- std_parms$Std_Coefficient parameters$SE <- attr(std_parms, "standard_error") if (!is.null(ci)) { parameters$CI_low <- std_parms$CI_low parameters$CI_high <- std_parms$CI_high } coef_col <- "Std_Coefficient" } # Reorder order <- c("Parameter", coef_col, "SE", ci_cols, "t", "z", "df", "df_error", "p", "Component") parameters <- parameters[order[order %in% names(parameters)]] # add sigma if (isTRUE(include_sigma) || isTRUE(summary)) { parameters <- .add_sigma_residual_df(parameters, model) } # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters(parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } rownames(parameters) <- NULL parameters } .add_within_between_effects <- function(model, parameters) { # This function checks whether the model contains predictors that were # "demeaned" using the "demean()" function. If so, these columns have an # attribute indicating the within or between effect, and in such cases, # this effect is used as "Component" value. by this, we get a nicer print # for model parameters... # extract attributes that indicate within and between effects within_effects <- .find_within_between(model, "within-effect") between_effects <- .find_within_between(model, "between-effect") # if there are no attributes, return if (is.null(within_effects) && is.null(between_effects)) { return(parameters) } if (is.null(parameters$Component)) { parameters$Component <- "rewb-contextual" } if (!is.null(within_effects)) { index <- unique(unlist(sapply(within_effects, function(i) { grep(i, parameters$Parameter, fixed = TRUE) }))) parameters$Component[index] <- "within" } if (!is.null(between_effects)) { index <- unique(unlist(sapply(between_effects, function(i) { grep(i, parameters$Parameter, fixed = TRUE) }))) parameters$Component[index] <- "between" } interactions <- grep(":", parameters$Parameter, fixed = TRUE) if (length(interactions)) { parameters$Component[interactions] <- "interactions" } if (((!("within" %in% parameters$Component) || !("between" %in% parameters$Component)) && inherits(model, "merMod")) || all(parameters$Component == "rewb-contextual")) { parameters$Component <- NULL } parameters } .find_within_between <- function(model, which_effect) { mf <- stats::model.frame(model) unlist(sapply(names(mf), function(i) { if (!is.null(attr(mf[[i]], which_effect, exact = TRUE))) { i } })) } # Bayes function ------------------------------------------------------ #' @keywords internal .extract_parameters_bayesian <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, standardize = NULL, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ...) { # check if standardization is required and package available if (!is.null(standardize) && !requireNamespace("effectsize", quietly = TRUE)) { insight::print_color("Package 'effectsize' required to calculate standardized coefficients. Please install it.\n", "red") standardize <- NULL } # no ROPE for multi-response models if (insight::is_multivariate(model)) { test <- setdiff(test, c("rope", "p_rope")) warning(insight::format_message("Multivariate response models are not yet supported for tests 'rope' and 'p_rope'."), call. = FALSE) } # MCMCglmm need special handling if (inherits(model, "MCMCglmm")) { parameters <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = "ESS", verbose = verbose, ... ) } else if (!is.null(standardize)) { parameters <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, verbose = verbose, ... ) # Don't test BF on standardized params test_no_BF <- test[!test %in% c("bf", "bayesfactor", "bayes_factor")] if (length(test_no_BF) == 0) test_no_BF <- NULL std_post <- effectsize::standardize_posteriors(model, method = standardize) std_parameters <- bayestestR::describe_posterior( std_post, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test_no_BF, rope_range = rope_range, rope_ci = rope_ci, verbose = verbose, ... ) parameters <- merge(std_parameters, parameters[c("Parameter", setdiff(colnames(parameters), colnames(std_parameters)))], sort = FALSE) } else { parameters <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, verbose = verbose, ... ) } if (length(ci) > 1) { parameters <- datawizard::reshape_ci(parameters) } # Remove unnecessary columns if ("CI" %in% names(parameters) && .n_unique(parameters$CI) == 1) { parameters$CI <- NULL } if ("ROPE_CI" %in% names(parameters) && .n_unique(parameters$ROPE_CI) == 1) { parameters$ROPE_CI <- NULL } if ("ROPE_low" %in% names(parameters) & "ROPE_high" %in% names(parameters)) { parameters$ROPE_low <- NULL parameters$ROPE_high <- NULL } # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters(parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } rownames(parameters) <- NULL parameters } # SEM function ------------------------------------------------------ #' @keywords internal .extract_parameters_lavaan <- function(model, ci = 0.95, standardize = FALSE, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ...) { insight::check_if_installed("lavaan") # set proper default if (is.null(standardize)) { standardize <- FALSE } # check for valid parameters if (!is.logical(standardize)) { if (!(standardize %in% c("all", "std.all", "latent", "std.lv", "no_exogenous", "std.nox"))) { if (verbose) { warning(insight::format_message("'standardize' should be one of TRUE, 'all', 'std.all', 'latent', 'std.lv', 'no_exogenous' or 'std.nox'. Returning unstandardized solution."), call. = FALSE) } standardize <- FALSE } } # CI if (length(ci) > 1) { ci <- ci[1] if (verbose) { warning(insight::format_message(paste0("lavaan models only accept one level of CI :( Keeping the first one: `ci = ", ci, "`.")), call. = FALSE) } } # collect dots dot_args <- list(...) # list all argument names from the `lavaan` function dot_args <- dot_args[names(dot_args) %in% c( "zstat", "pvalue", "standardized", "fmi", "level", "boot.ci.type", "cov.std", "fmi.options", "rsquare", "remove.system.eq", "remove.eq", "remove.ineq", "remove.def", "remove.nonfree", "add.attributes", "output", "header" )] # Get estimates data <- do.call(lavaan::parameterEstimates, c( list(object = model, se = TRUE, ci = TRUE, level = ci), dot_args )) label <- data$label # check if standardized estimates are requested, and if so, which type if (isTRUE(standardize) || !is.logical(standardize)) { if (is.logical(standardize)) { standardize <- "all" } type <- switch(standardize, "all" = , "std.all" = "std.all", "latent" = , "std.lv" = "std.lv", "no_exogenous" = , "std.nox" = "std.nox", "std.all" ) data <- lavaan::standardizedsolution(model, se = TRUE, level = ci, type = type, ... ) names(data)[names(data) == "est.std"] <- "est" } params <- data.frame( To = data$lhs, Operator = data$op, From = data$rhs, Coefficient = data$est, SE = data$se, CI_low = data$ci.lower, CI_high = data$ci.upper, z = data$z, p = data$pvalue, stringsAsFactors = FALSE ) if (!is.null(label)) { params$Label <- label } params$Component <- ifelse(params$Operator == "=~", "Loading", ifelse(params$Operator == "~", "Regression", ifelse(params$Operator == "~~", "Correlation", ifelse(params$Operator == ":=", "Defined", ifelse(params$Operator == "~1", "Mean", NA) ) ) ) ) params$Component <- ifelse(as.character(params$From) == as.character(params$To), "Variance", params$Component) if ("p" %in% colnames(params)) { params$p <- ifelse(is.na(params$p), 0, params$p) } if ("group" %in% names(data)) { params$Group <- data$group } # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { params <- .filter_parameters(params, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } params } # tools ------------------------- .check_rank_deficiency <- function(p, verbose = TRUE) { if (anyNA(p$Estimate)) { if (isTRUE(verbose)) warning(insight::format_message(sprintf("Model matrix is rank deficient. Parameters %s were not estimable.", paste(p$Parameter[is.na(p$Estimate)], collapse = ", "))), call. = FALSE) p <- p[!is.na(p$Estimate), ] } p } parameters/R/methods_robustlmm.R0000644000175000017500000000016614020472756016654 0ustar nileshnilesh#' @export model_parameters.rlmerMod <- model_parameters.cpglmm #' @export p_value.rlmerMod <- p_value.cpglmm parameters/R/methods_rstan.R0000644000175000017500000000444514104713406015754 0ustar nileshnilesh#' @export model_parameters.stanfit <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), effects = "fixed", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = diagnostic, priors = FALSE, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) if (effects != "fixed") { random_effect_levels <- which(params$Effects %in% "random" & grepl("^(?!Sigma\\[)(.*)", params$Parameter, perl = TRUE)) if (length(random_effect_levels) && isFALSE(group_level)) params <- params[-random_effect_levels, ] } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ... ) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_stan", "parameters_model", "see_parameters_model", class(params)) params } parameters/R/methods_metaplus.R0000644000175000017500000002651714040537567016476 0ustar nileshnilesh# metaplus ###### .metaplus ------------------- #' @export model_parameters.metaplus <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { if (!missing(ci)) { if (isTRUE(verbose)) { message(insight::format_message("'metaplus' models do not support other levels for confidence intervals than 0.95. Argument 'ci' is ignored.")) } ci <- .95 } meta_analysis_overall <- suppressWarnings(.model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, ... )) rma_parameters <- if (!is.null(model$slab) && !is.numeric(model$slab)) { sprintf("%s", model$slab) } else if (is.null(model$k) && !is.null(model$slab) && is.numeric(model$slab)) { sprintf("Study %i", model$slab) } else if (!is.null(model$k)) { sprintf("Study %i", 1:model[["k"]]) } else { sprintf("Study %i", 1:length(model$yi)) } alpha <- (1 + ci) / 2 rma_coeffients <- as.vector(model$yi) rma_se <- as.vector(model$sei) rma_ci_low <- rma_coeffients - rma_se * stats::qt(alpha, df = Inf) rma_ci_high <- rma_coeffients + rma_se * stats::qt(alpha, df = Inf) rma_statistic <- rma_coeffients / rma_se rma_ci_p <- 2 * stats::pt(abs(rma_statistic), df = Inf, lower.tail = FALSE) meta_analysis_studies <- data.frame( Parameter = rma_parameters, Coefficient = rma_coeffients, SE = rma_se, CI_low = rma_ci_low, CI_high = rma_ci_high, z = rma_statistic, df_error = NA, p = rma_ci_p, Weight = 1 / as.vector(model$sei), stringsAsFactors = FALSE ) original_attributes <- attributes(meta_analysis_overall) out <- merge(meta_analysis_studies, meta_analysis_overall, all = TRUE, sort = FALSE) # fix intercept name out$Parameter[out$Parameter == "(Intercept)"] <- "Overall" out <- out[!(out$Parameter %in% c("tau2", "vinv")), ] # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter == "Overall", ] } original_attributes$names <- names(out) original_attributes$row.names <- 1:nrow(out) original_attributes$pretty_names <- stats::setNames(out$Parameter, out$Parameter) attributes(out) <- original_attributes # no df out$df_error <- NULL attr(out, "object_name") <- .safe_deparse(substitute(model)) attr(out, "measure") <- "Estimate" if (!"Method" %in% names(out)) { out$Method <- "Robust meta-analysis using 'metaplus'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.metaplus <- function(model, ...) { ci_low <- as.vector(model$results[, "95% ci.lb"]) ci_high <- as.vector(model$results[, "95% ci.ub"]) cis <- apply(cbind(ci_low, ci_high), MARGIN = 1, diff) out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(model$results)), SE = cis / (2 * stats::qnorm(.975)) ) out$Parameter[grepl("muhat", out$Parameter)] <- "(Intercept)" out } #' @export p_value.metaplus <- function(model, ...) { out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(model$results)), p = as.vector(model$results[, "pvalue"]) ) out$Parameter[grepl("muhat", out$Parameter)] <- "(Intercept)" out } #' @export ci.metaplus <- function(x, ...) { out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(x$results)), CI_low = as.vector(x$results[, "95% ci.lb"]), CI_high = as.vector(x$results[, "95% ci.ub"]) ) out$Parameter[grepl("muhat", out$Parameter)] <- "(Intercept)" out } ###### .meta_random ------------------- #' @export model_parameters.meta_random <- function(model, ci = .95, ci_method = "hdi", exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { # process arguments params <- as.data.frame(model$estimates) ci_method <- match.arg(ci_method, choices = c("hdi", "eti")) # parameters of studies included study_params <- model$data fac <- stats::qnorm((1 + ci) / 2, lower.tail = TRUE) out_study <- data.frame( Parameter = study_params$labels, Coefficient = study_params$y, SE = study_params$SE, CI_low = study_params$y - fac * study_params$SE, CI_high = study_params$y + fac * study_params$SE, Weight = 1 / study_params$SE^2, BF = NA, Rhat = NA, ESS = NA, Component = "studies", Prior_Distribution = NA, Prior_Location = NA, Prior_Scale = NA, stringsAsFactors = FALSE ) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) # parameters of overall / tau out <- data.frame( Parameter = rownames(params), Coefficient = params$mean, SE = params$sd, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], Weight = NA, BF = NA, Rhat = params$Rhat, ESS = params$n_eff, Component = "meta", stringsAsFactors = FALSE ) # add prior information priors <- insight::get_priors(model) out$Prior_Distribution <- priors$Distribution out$Prior_Location <- priors$Location out$Prior_Scale <- priors$Scale # fix intercept name out$Parameter[out$Parameter == "d"] <- "Overall" # add BF out$BF[1] <- model$BF[2, 1] # merge out <- rbind(out_study, out) # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter %in% c("Overall", "tau"), ] } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { out <- .exponentiate_parameters(out, model, exponentiate) } out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, exponentiate = exponentiate, ci_method = ci_method, verbose = verbose, ... ) # final atributes attr(out, "measure") <- "Estimate" attr(out, "object_name") <- .safe_deparse(substitute(model)) class(out) <- c("parameters_model", "see_parameters_model", class(params)) if (!"Method" %in% names(out)) { out$Method <- "Bayesian meta-analysis using 'metaBMA'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.meta_random <- function(model, ...) { params <- as.data.frame(model$estimates) out <- data.frame( Parameter = .remove_backticks_from_string(rownames(params)), SE = params$sd, stringsAsFactors = FALSE ) out$Parameter[grepl("d", out$Parameter)] <- "(Intercept)" out } #' @export ci.meta_random <- function(x, method = "hdi", ...) { # process arguments params <- as.data.frame(x$estimates) ci_method <- match.arg(method, choices = c("hdi", "eti")) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) out <- data.frame( Parameter = rownames(params), CI = .95, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], stringsAsFactors = FALSE ) out$Parameter[grepl("d", out$Parameter)] <- "(Intercept)" out } ###### .meta_fixed ------------------- #' @export model_parameters.meta_fixed <- model_parameters.meta_random #' @export standard_error.meta_fixed <- standard_error.meta_random #' @export ci.meta_fixed <- ci.meta_random ###### .meta_bma ------------------- #' @export model_parameters.meta_bma <- function(model, ci = .95, ci_method = "hdi", exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { # process arguments params <- as.data.frame(model$estimates) ci_method <- match.arg(ci_method, choices = c("hdi", "eti")) # parameters of studies included study_params <- model$meta$fixed$data fac <- stats::qnorm((1 + ci) / 2, lower.tail = TRUE) out_study <- data.frame( Parameter = study_params$labels, Coefficient = study_params$y, SE = study_params$SE, CI_low = study_params$y - fac * study_params$SE, CI_high = study_params$y + fac * study_params$SE, Weight = 1 / study_params$SE^2, BF = NA, Rhat = NA, ESS = NA, Component = "studies", stringsAsFactors = FALSE ) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) out <- data.frame( Parameter = rownames(params), Coefficient = params$mean, SE = params$sd, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], Weight = NA, BF = NA, Rhat = params$Rhat, ESS = params$n_eff, Component = "meta", stringsAsFactors = FALSE ) # add BF out$BF <- c(NA, model$BF[2, 1], model$BF[4, 1]) # merge out <- rbind(out_study, out) # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter %in% c("averaged", "fixed", "random"), ] } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { out <- .exponentiate_parameters(out, model, exponentiate) } out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, exponentiate = exponentiate, ci_method = ci_method, verbose = verbose, ... ) # final attributes attr(out, "measure") <- "Estimate" attr(out, "object_name") <- .safe_deparse(substitute(model)) class(out) <- c("parameters_model", "see_parameters_model", class(params)) if (!"Method" %in% names(out)) { out$Method <- "Bayesian meta-analysis using 'metaBMA'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.meta_bma <- standard_error.meta_random #' @export ci.meta_bma <- ci.meta_random # helper ------ .meta_bma_extract_ci <- function(params) { hpd_col <- colnames(params)[grepl("hpd(\\d+)_lower", colnames(params))] as.numeric(gsub("hpd(\\d+)_lower", "\\1", hpd_col)) / 100 } .metabma_ci_columns <- function(ci_method, ci) { switch(toupper(ci_method), "HDI" = sprintf(c("hpd%i_lower", "hpd%i_upper"), 100 * ci), c(sprintf("%g%%", (100 * (1 - ci)) / 2), sprintf("%g%%", 100 - (100 * (1 - ci)) / 2)) ) } # format_parameters ----------------------------------- #' @export format_parameters.meta_random <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) names(params) <- params params } #' @export format_parameters.meta_fixed <- format_parameters.meta_random #' @export format_parameters.meta_bma <- format_parameters.meta_random parameters/R/methods_MCMCglmm.R0000644000175000017500000000423214104713406016213 0ustar nileshnilesh#' @export standard_error.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl parms <- as.data.frame(model$Sol[, 1:nF, drop = FALSE]) .data_frame( Parameter = .remove_backticks_from_string(colnames(parms)), SE = unname(sapply(parms, stats::sd)) ) } #' @export p_value.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl p <- 1 - colSums(model$Sol[, 1:nF, drop = FALSE] > 0) / dim(model$Sol)[1] .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), p = p ) } #' @export model_parameters.MCMCglmm <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "pretty_names") <- format_parameters(model) attr(params, "ci") <- ci attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } parameters/R/methods_nlme.R0000644000175000017500000000360514133044610015551 0ustar nileshnilesh# Package nlme; .lme, .gls ############### .lme -------------- #' @export model_parameters.lme <- model_parameters.merMod #' @export ci.lme <- function(x, ci = .95, method = "wald", ...) { method <- tolower(method) method <- match.arg(method, choices = c("wald", "normal", "residual", "betwithin", "ml1", "satterthwaite")) if (method %in% c("wald", "residual", "normal")) { if (!requireNamespace("nlme", quietly = TRUE)) { .ci_generic(model = x, ci = ci, method = method) } else { out <- lapply(ci, function(i) { ci_list <- tryCatch( { nlme::intervals(x, level = i, ...) }, error = function(e) { nlme::intervals(x, level = i, which = "fixed", ...) } ) .data_frame( Parameter = rownames(ci_list$fixed), CI = i, CI_low = as.vector(ci_list$fixed[, "lower"]), CI_high = as.vector(ci_list$fixed[, "upper"]) ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } # ml1 approx } else if (method == "ml1") { ci_ml1(x, ci) # betwithin approx } else if (method == "betwithin") { ci_betwithin(x, ci) # Satterthwaite } else if (method == "satterthwaite") { ci_satterthwaite(x, ci) } } #' @export p_value.lme <- function(model, ...) { cs <- stats::coef(summary(model)) p <- cs[, 5] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export standard_error.lme <- standard_error.default ############### .gls -------------- #' @export standard_error.gls <- standard_error.default #' @export p_value.gls <- p_value.default #' @export degrees_of_freedom.gls <- function(model, method = NULL, ...) { .degrees_of_freedom_no_dfresid_method(model, method) } parameters/R/print.compare_parameters.R0000644000175000017500000000230214143236432020076 0ustar nileshnilesh#' @export print.compare_parameters <- function(x, digits = 2, ci_digits = 2, p_digits = 3, style = NULL, groups = NULL, ...) { # save original input orig_x <- x # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(style)) { style <- attributes(x)$output_style } formatted_table <- format( x, style, split_components = TRUE, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = "auto", ci_brackets = c("(", ")"), format = "text", groups = groups ) cat(insight::export_table(formatted_table, format = "text", footer = NULL, empty_line = "-", ...)) invisible(orig_x) } parameters/R/methods_vgam.R0000644000175000017500000000505114133000627015545 0ustar nileshnilesh# classes: .vglm, .vgam ########### .vgam --------------- #' @export model_parameters.vgam <- model_parameters.gam #' @export standard_error.vgam <- function(model, ...) { params <- insight::get_parameters(model) se <- sqrt(diag(insight::get_varcov(model))) # sort se <- se[params$Parameter] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se), Component = params$Component ) } #' @export degrees_of_freedom.vgam <- function(model, ...) { params <- insight::get_parameters(model) out <- stats::setNames(rep(NA, nrow(params)), params$Parameter) out[names(model@nl.df)] <- model@nl.df out } #' @export p_value.vgam <- function(model, ...) { stat <- insight::get_statistic(model) stat$p <- as.vector(stats::pchisq(stat$Statistic, df = degrees_of_freedom(model), lower.tail = FALSE)) stat[c("Parameter", "p", "Component")] } #' @export simulate_model.vgam <- function(model, iterations = 1000, ...) { out <- .simulate_model(model, iterations, component = "all") class(out) <- c("parameters_simulate_model", class(out)) out } ########### .vglm --------------- #' @export p_value.vglm <- function(model, ...) { if (!requireNamespace("VGAM", quietly = TRUE)) { stop("Package `VGAM` required.", call. = FALSE) } cs <- VGAM::summary(model)@coef3 p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export standard_error.vglm <- function(model, ...) { se <- sqrt(diag(insight::get_varcov(model))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # ci.vgam <- function(x, ci = .95, component = c("all", "conditional", "smooth"), ...) { # component <- match.arg(component) # # # dof and SE # dof <- degrees_of_freedom(x) # se <- standard_error(x)$SE # params <- insight::get_parameters(x) # # se <- se[!is.na(dof)] # dof <- dof[!is.na(dof)] # params_names <- names(dof) # # # Wald CI for non-chisq parameters # out <- .ci_generic(model = x, ci = ci, dof = Inf) # # chisq_fac <- stats::qchisq(se, df = dof, lower.tail = FALSE) # for (i in 1:length(params_names)) { # out$CI_low[out$Parameter == params_names[i]] <- params$Estimate[params$Parameter == params_names[i]] - se[i] * chisq_fac[i] # out$CI_high[out$Parameter == params_names[i]] <- params$Estimate[params$Parameter == params_names[i]] + se[i] * chisq_fac[i] # } # # out # } parameters/R/methods_glmx.R0000644000175000017500000000414314133000470015557 0ustar nileshnilesh#' @rdname model_parameters.averaging #' @export model_parameters.glmx <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "extra"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = c(as.vector(stats$glm[, "Std. Error"]), as.vector(stats$extra[, "Std. Error"])), Component = params$Component ) } #' @export p_value.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = c(as.vector(stats$glm[, "Pr(>|z|)"]), as.vector(stats$extra[, "Pr(>|z|)"])), Component = params$Component ) } #' @export simulate_model.glmx <- function(model, iterations = 1000, component = c("all", "conditional", "extra"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } parameters/R/methods_fixest.R0000644000175000017500000000217614133047772016136 0ustar nileshnilesh# .fixest ----------------------- #' @export standard_error.fixest <- function(model, ...) { stats <- summary(model) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats$se) ) } ## TODO add ci_method later? #' @export p_value.fixest <- function(model, ...) { stats <- summary(model)$coeftable params <- insight::get_parameters(model) stat_col <- which(colnames(stats) %in% c("Pr(>|t|)", "Pr(>|z|)")) .data_frame( Parameter = params$Parameter, p = as.vector(stats[[stat_col]]) ) } # .feglm ----------------------- #' @export standard_error.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. error"]) ) } ## TODO add ci_method later? #' @export p_value.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, 4]) ) } parameters/R/check_heterogeneity.R0000644000175000017500000000547514135275207017124 0ustar nileshnilesh#' Check model predictor for heterogeneity bias #' #' `check_heterogeneity()` checks if model predictors or variables may #' cause a heterogeneity bias, i.e. if variables have a within- and/or #' between-effect. #' #' @param x A data frame or a mixed model object. #' @param select Character vector (or formula) with names of variables to select #' that should be checked. If `x` is a mixed model object, this argument #' will be ignored. #' @param group Character vector (or formula) with the name of the variable that #' indicates the group- or cluster-ID. If `x` is a model object, this #' argument will be ignored. #' #' @seealso #' For further details, see documentation for `?datawizard::demean`. #' #' @note #' This function will be removed in a future update. Please use #' `performance::check_heterogeneity_bias()`. #' #' @export check_heterogeneity <- function(x, select = NULL, group = NULL) { .Deprecated("performance::check_heterogeneity_bias()") if (insight::is_model(x)) { group <- insight::find_random(x, split_nested = TRUE, flatten = TRUE) if (is.null(group)) { stop("Model is no mixed model. Please provide a mixed model, or a data frame and arguments 'select' and 'group'.") } data <- insight::get_data(x) select <- insight::find_predictors(x, effects = "fixed", component = "conditional", flatten = TRUE) } else { if (inherits(select, "formula")) { select <- all.vars(select) } if (inherits(group, "formula")) { group <- all.vars(group) } data <- x } unique_groups <- .n_unique(data[[group]]) combinations <- expand.grid(select, group) result <- mapply(function(predictor, id) { # demean predictor d <- datawizard::demean(data, select = predictor, group = id, verbose = FALSE) # get new names within_name <- paste0(predictor, "_within") # check if any within-variable differs from zero. if yes, we have # a within-subject effect if (any(sum(abs(d[[within_name]]) > 1e-5, na.rm = TRUE) > 0)) { predictor } else { NULL } }, as.character(combinations[[1]]), as.character(combinations[[2]]), SIMPLIFY = FALSE) out <- unname(unlist(.compact_list(result))) if (is.null(out)) { message("No predictor found that could cause heterogeneity bias.") return(invisible(NULL)) } class(out) <- c("check_heterogeneity", class(out)) out } #' @export print.check_heterogeneity <- function(x, ...) { cat("Possible heterogeneity bias due to following predictors: ") insight::print_color(paste(x, collapse = ", "), "red") cat("\n") invisible(x) } #' @keywords internal .n_unique <- function(x, na.rm = TRUE) { if (is.null(x)) { return(0) } if (isTRUE(na.rm)) x <- stats::na.omit(x) length(unique(x)) } parameters/R/methods_coxrobust.R0000644000175000017500000000103014133047726016647 0ustar nileshnilesh #' @export standard_error.coxr <- function(model, ...) { params <- insight::get_parameters(model) vc <- insight::get_varcov(model) .data_frame( Parameter = params$Parameter, SE = as.vector(sqrt(diag(vc))) ) } ## TODO add ci_method later? #' @export p_value.coxr <- function(model, ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE)) ) } } parameters/R/extract_random_variances.R0000644000175000017500000006474114166665556020200 0ustar nileshnilesh .extract_random_variances <- function(model, ...) { UseMethod(".extract_random_variances") } .extract_random_variances.default <- function(model, ci = .95, effects = "random", component = "conditional", ci_method = NULL, verbose = FALSE, ...) { out <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = component, ci_method = ci_method, verbose = verbose, ... ) ) # check for errors if (is.null(out)) { if (isTRUE(verbose)) { warning(insight::format_message("Something went wrong when calculating random effects parameters. Only showing model's fixed effects now. You may use `effects=\"fixed\"` to speed up the call to `model_parameters()`."), call. = FALSE) } } out } .extract_random_variances.glmmTMB <- function(model, ci = .95, effects = "random", component = "all", ci_method = NULL, verbose = FALSE, ...) { component <- match.arg(component, choices = c("all", "conditional", "zero_inflated", "zi", "dispersion")) out <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = "conditional", ci_method = ci_method, verbose = verbose, ... ) ) # check for errors if (is.null(out)) { if (isTRUE(verbose)) { warning(insight::format_message("Something went wrong when calculating random effects parameters. Only showing model's fixed effects now. You may use `effects=\"fixed\"` to speed up the call to `model_parameters()`."), call. = FALSE) } return(NULL) } out$Component <- "conditional" if (insight::model_info(model, verbose = FALSE)$is_zero_inflated && !is.null(insight::find_random(model)$zero_inflated_random)) { zi_var <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = "zi", ci_method = ci_method, verbose = FALSE, ... ) ) # bind if any zi-components could be extracted if (!is.null(zi_var)) { zi_var$Component <- "zero_inflated" out <- rbind(out, zi_var) } } # filter if (component != "all") { if (component == "zi") { component <- "zero_inflated" } out <- out[out$Component == component, ] } out } .extract_random_variances.MixMod <- .extract_random_variances.glmmTMB # workhorse ------------------------ .extract_random_variances_helper <- function(model, ci = .95, effects = "random", component = "conditional", ci_method = NULL, verbose = FALSE, ...) { varcorr <- .get_variance_information(model, component) ran_intercept <- tryCatch( { data.frame(.random_intercept_variance(varcorr)) }, error = function(e) { NULL } ) ran_slope <- tryCatch( { data.frame(.random_slope_variance(model, varcorr)) }, error = function(e) { NULL } ) ran_corr <- tryCatch( { data.frame(.random_slope_intercept_corr(model, varcorr)) }, error = function(e) { NULL } ) # sigma/dispersion only once if (component == "conditional") { ran_sigma <- data.frame(insight::get_sigma(model, ci = NULL, verbose = FALSE)) } else { ran_sigma <- NULL } # random intercept - tau00 if (!is.null(ran_intercept) && nrow(ran_intercept) > 0) { colnames(ran_intercept) <- "Coefficient" ran_intercept$Group <- rownames(ran_intercept) ran_intercept$Parameter <- "SD (Intercept)" } ran_groups <- ran_intercept$Group # random slope - tau11 if (!is.null(ran_slope) && nrow(ran_slope) > 0) { colnames(ran_slope) <- "Coefficient" ran_slope$Group <- rownames(ran_slope) if (is.null(ran_groups)) { ran_groups <- gsub("\\..*", "", ran_slope$Group) } for (i in unique(ran_groups)) { slopes <- which(grepl(paste0("^\\Q", i, "\\E"), ran_slope$Group)) if (length(slopes)) { ran_slope$Parameter[slopes] <- paste0( "SD (", gsub("^\\.", "", gsub(i, "", ran_slope$Group[slopes], fixed = TRUE)), ")" ) ran_slope$Group[slopes] <- i } } } # random slope-intercept correlation - rho01 if (!is.null(ran_corr) && nrow(ran_corr) > 0) { if (!is.null(ran_intercept$Group) && colnames(ran_corr)[1] == ran_intercept$Group[1]) { colnames(ran_corr)[1] <- "Coefficient" ran_corr$Parameter <- paste0("Cor (Intercept~", row.names(ran_corr), ")") ran_corr$Group <- ran_intercept$Group[1] } else { colnames(ran_corr) <- "Coefficient" ran_corr$Group <- rownames(ran_corr) for (i in unique(ran_groups)) { corrs <- which(grepl(paste0("^\\Q", i, "\\E"), ran_corr$Group)) if (length(corrs)) { param_name <- i cor_slopes <- which(grepl(paste0("^\\Q", i, "\\E"), ran_slope$Group)) if (length(cor_slopes)) { param_name <- paste0( gsub("SD \\((.*)\\)", "\\1", ran_slope$Parameter[cor_slopes]), ": ", i ) } ran_corr$Parameter[corrs] <- paste0("Cor (Intercept~", param_name, ")") ran_corr$Group[corrs] <- i } } } } # residuals - sigma if (!is.null(ran_sigma) && nrow(ran_sigma) > 0) { colnames(ran_sigma) <- "Coefficient" ran_sigma$Group <- "Residual" ran_sigma$Parameter <- "SD (Observations)" } # row bind all random effect variances, if possible out <- tryCatch( { out_list <- .compact_list(list(ran_intercept, ran_slope, ran_corr, ran_sigma)) do.call(rbind, out_list) }, error = function(e) { NULL } ) if (is.null(out)) { return(NULL) } rownames(out) <- NULL # variances to SD (sqrt), except correlations and Sigma corr_param <- grepl("Cor (Intercept~", out$Parameter, fixed = TRUE) sigma_param <- out$Parameter == "SD (Observations)" not_cor_and_sigma <- !corr_param & !sigma_param if (any(not_cor_and_sigma)) { out$Coefficient[not_cor_and_sigma] <- sqrt(out$Coefficient[not_cor_and_sigma]) } stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$SE <- NA out$df_error <- NA out$p <- NA out$Level <- NA out$CI <- NA out$Effects <- "random" if (length(ci) == 1) { ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- c() for (i in ci) { ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) ci_cols <- c(ci_cols, ci_low, ci_high) } } out[ci_cols] <- NA # add confidence intervals? if (!is.null(ci) && !all(is.na(ci)) && length(ci) == 1) { out <- .random_sd_ci(model, out, ci_method, ci, corr_param, sigma_param, component) } out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p", "CI")] <- NULL } rownames(out) <- NULL out } # extract CI for random SD ------------------------ .random_sd_ci <- function(model, out, ci_method, ci, corr_param, sigma_param, component = NULL) { if (inherits(model, c("merMod", "glmerMod", "lmerMod"))) { if (!is.null(ci_method) && ci_method %in% c("profile", "boot")) { var_ci <- as.data.frame(suppressWarnings(stats::confint(model, parm = "theta_", oldNames = FALSE, method = ci_method, level = ci))) colnames(var_ci) <- c("CI_low", "CI_high") rn <- row.names(var_ci) rn <- gsub("sd_(.*)(\\|)(.*)", "\\1: \\3", rn) rn <- gsub("|", ":", rn, fixed = TRUE) rn <- gsub("[\\(\\)]", "", rn) rn <- gsub("cor_(.*)\\.(.*)", "cor \\2", rn) var_ci_corr_param <- grepl("^cor ", rn) var_ci_sigma_param <- rn == "sigma" out$CI_low[!corr_param & !sigma_param] <- var_ci$CI_low[!var_ci_corr_param & !var_ci_sigma_param] out$CI_high[!corr_param & !sigma_param] <- var_ci$CI_high[!var_ci_corr_param & !var_ci_sigma_param] if (any(sigma_param) && any(var_ci_sigma_param)) { out$CI_low[sigma_param] <- var_ci$CI_low[var_ci_sigma_param] out$CI_high[sigma_param] <- var_ci$CI_high[var_ci_sigma_param] } if (any(corr_param) && any(var_ci_corr_param)) { out$CI_low[corr_param] <- var_ci$CI_low[var_ci_corr_param] out$CI_high[corr_param] <- var_ci$CI_high[var_ci_corr_param] } } } else if (inherits(model, "glmmTMB")) { ## TODO "profile" seems to be less stable, so only wald? out <- tryCatch( { var_ci <- rbind( as.data.frame(suppressWarnings(stats::confint(model, parm = "theta_", method = "wald", level = ci))), as.data.frame(suppressWarnings(stats::confint(model, parm = "sigma", method = "wald", level = ci))) ) colnames(var_ci) <- c("CI_low", "CI_high", "not_used") var_ci$Component <- "conditional" # # regex-pattern to find conditional and ZI components group_factor <- insight::find_random(model, flatten = TRUE) group_factor2 <- paste0("(", paste(group_factor, collapse = "|"), ")") var_ci$Parameter <- row.names(var_ci) pattern <- paste0("^(zi\\.|", group_factor2, "\\.zi\\.)") zi_rows <- grepl(pattern, var_ci$Parameter) if (any(zi_rows)) { var_ci$Component[zi_rows] <- "zi" } # add Group var_ci$Group <- NA if (length(group_factor) > 1) { var_ci$Group[var_ci$Component == "conditional"] <- gsub(paste0("^", group_factor2, "\\.cond\\.(.*)"), "\\1", var_ci$Parameter[var_ci$Component == "conditional"]) var_ci$Group[var_ci$Component == "zi"] <- gsub(paste0("^", group_factor2, "\\.zi\\.(.*)"), "\\1", var_ci$Parameter[var_ci$Component == "zi"]) } else { var_ci$Group <- group_factor } var_ci$Group[var_ci$Group == "sigma"] <- "Residual" # remove cond/zi prefix pattern <- paste0("^(cond\\.|zi\\.|", group_factor, "\\.cond\\.|", group_factor, "\\.zi\\.)(.*)") for (p in pattern) { var_ci$Parameter <- gsub(p, "\\2", var_ci$Parameter) } # fix SD and Cor names var_ci$Parameter <- gsub(".Intercept.", "(Intercept)", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub("^(Std\\.Dev\\.)(.*)", "SD \\(\\2\\)", var_ci$Parameter) var_ci$Parameter <- gsub("^Cor\\.(.*)\\.(.*)", "Cor \\(\\2~\\1:", var_ci$Parameter) # minor cleaning var_ci$Parameter <- gsub("((", "(", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub("))", ")", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub(")~", "~", var_ci$Parameter, fixed = TRUE) # fix sigma var_ci$Parameter[var_ci$Parameter == "sigma"] <- "SD (Observations)" # add name of group factor to cor cor_params <- grepl("^Cor ", var_ci$Parameter) if (any(cor_params)) { var_ci$Parameter[cor_params] <- paste0(var_ci$Parameter[cor_params], " ", group_factor, ")") } # remove unused columns (that are added back after merging) out$CI_low <- NULL out$CI_high <- NULL # filter component var_ci <- var_ci[var_ci$Component == component, ] var_ci$not_used <- NULL var_ci$Component <- NULL merge(out, var_ci, sort = FALSE, all.x = TRUE) # # groups <- utils::stack(insight::find_random(model, flatten = FALSE)) # colnames(groups) <- c("Group", "Component") # groups$Component <- ifelse(groups$Component == "random", "conditional", "zi") # # # regex-pattern to find conditional and ZI components # group_factor <- insight::find_random(model, flatten = TRUE) # group_factor2 <- paste0("(", paste(group_factor, collapse = "|"), ")") # # thetas <- as.data.frame(suppressWarnings(stats::confint(model, parm = "theta_", method = "wald", level = ci))) # thetas$Parameter <- row.names(thetas) # thetas$Component <- "conditional" # # find zi-prefix, to set correct component value # pattern <- paste0("^(zi\\.|", group_factor2, "\\.zi\\.)") # thetas$Component[grepl(pattern, row.names(thetas))] <- "zi" # # if (nrow(thetas) == nrow(groups)) { # thetas <- cbind(thetas, groups) # } else { # thetas <- merge(thetas, groups, sort = FALSE) # } # # # reorder columns # thetas <- datawizard::data_relocate(thetas, cols = "Component", after = "Group") # thetas <- datawizard::data_relocate(thetas, cols = "Parameter") # # sigma <- as.data.frame(suppressWarnings(stats::confint(model, parm = "sigma", method = "wald", level = ci))) # # # check for sigma component # if (nrow(sigma) > 0) { # sigma$Parameter <- row.names(sigma) # sigma$Group <- "Residual" # sigma$Component <- "conditional" # sigma <- datawizard::data_relocate(sigma, cols = "Parameter") # var_ci <- rbind(thetas, sigma) # } else { # var_ci <- thetas # } # # colnames(var_ci) <- c("Parameter", "CI_low", "CI_high", "not_used", "Group", "Component") # # # remove cond/zi prefix # pattern <- paste0("^(cond\\.|zi\\.|", group_factor2, "\\.cond\\.|", group_factor2, "\\.zi\\.)") # var_ci$Parameter <- gsub(pattern, "", var_ci$Parameter) # # fix SD and Cor names # var_ci$Parameter <- gsub(".Intercept.", "(Intercept)", var_ci$Parameter, fixed = TRUE) # var_ci$Parameter <- gsub("^(Std\\.Dev\\.)(.*)", "SD \\(\\2\\)", var_ci$Parameter) # var_ci$Parameter <- gsub("^Cor\\.(.*)\\.(.*)", "Cor \\(\\2~\\1:", var_ci$Parameter) # # minor cleaning # var_ci$Parameter <- gsub("((", "(", var_ci$Parameter, fixed = TRUE) # var_ci$Parameter <- gsub("))", ")", var_ci$Parameter, fixed = TRUE) # var_ci$Parameter <- gsub(")~", "~", var_ci$Parameter, fixed = TRUE) # # fix sigma # var_ci$Parameter[var_ci$Parameter == "sigma"] <- "SD (Observations)" # # add name of group factor to cor # cor_params <- grepl("^Cor ", var_ci$Parameter) # if (any(cor_params)) { # # this might break if length(group_factor) > 1; I don't have a test case handy # var_ci$Parameter[cor_params] <- paste0(var_ci$Parameter[cor_params], " ", group_factor, ")") # } # # # remove unused columns (that are added back after merging) # out$CI_low <- NULL # out$CI_high <- NULL # # # filter component # var_ci <- var_ci[var_ci$Component == component, ] # var_ci$not_used <- NULL # var_ci$Component <- NULL # # merge(out, var_ci, sort = FALSE, all.x = TRUE) }, error = function(e) { out } ) } out } # Extract Variance and Correlation Components ---- # store essential information about variance components... # basically, this function should return lme4::VarCorr(x) .get_variance_information <- function(model, model_component = "conditional") { # reason to be installed reason <- "to compute random effect variances for mixed models" # installed? insight::check_if_installed("lme4", reason = reason) if (inherits(model, "lme")) { insight::check_if_installed("nlme", reason = reason) } if (inherits(model, "clmm")) { insight::check_if_installed("ordinal", reason = reason) } if (inherits(model, "brmsfit")) { insight::check_if_installed("brms", reason = reason) } if (inherits(model, "cpglmm")) { insight::check_if_installed("cplm", reason = reason) } if (inherits(model, "rstanarm")) { insight::check_if_installed("rstanarm", reason = reason) } # stanreg # --------------------------- if (inherits(model, "stanreg")) { varcorr <- lme4::VarCorr(model) # GLMMapdative # --------------------------- } else if (inherits(model, "MixMod")) { vc1 <- vc2 <- NULL re_names <- insight::find_random(model) vc_cond <- !grepl("^zi_", colnames(model$D)) if (any(vc_cond)) { vc1 <- model$D[vc_cond, vc_cond, drop = FALSE] attr(vc1, "stddev") <- sqrt(diag(vc1)) attr(vc1, "correlation") <- stats::cov2cor(model$D[vc_cond, vc_cond, drop = FALSE]) } vc_zi <- grepl("^zi_", colnames(model$D)) if (any(vc_zi)) { colnames(model$D) <- gsub("^zi_(.*)", "\\1", colnames(model$D)) rownames(model$D) <- colnames(model$D) vc2 <- model$D[vc_zi, vc_zi, drop = FALSE] attr(vc2, "stddev") <- sqrt(diag(vc2)) attr(vc2, "correlation") <- stats::cov2cor(model$D[vc_zi, vc_zi, drop = FALSE]) } vc1 <- list(vc1) names(vc1) <- re_names[[1]] attr(vc1, "sc") <- sqrt(insight::get_deviance(model, verbose = FALSE) / insight::get_df(model, type = "residual", verbose = FALSE)) if (!is.null(vc2)) { vc2 <- list(vc2) names(vc2) <- re_names[[2]] attr(vc2, "sc") <- sqrt(insight::get_deviance(model, verbose = FALSE) / insight::get_df(model, type = "residual", verbose = FALSE)) } varcorr <- .compact_list(list(vc1, vc2)) names(varcorr) <- c("cond", "zi")[1:length(varcorr)] # joineRML # --------------------------- } else if (inherits(model, "mjoint")) { re_names <- insight::find_random(model, flatten = TRUE) varcorr <- summary(model)$D attr(varcorr, "stddev") <- sqrt(diag(varcorr)) attr(varcorr, "correlation") <- stats::cov2cor(varcorr) varcorr <- list(varcorr) names(varcorr) <- re_names[1] attr(varcorr, "sc") <- model$coef$sigma2[[1]] # nlme # --------------------------- } else if (inherits(model, "lme")) { re_names <- insight::find_random(model, split_nested = TRUE, flatten = TRUE) if (.is_nested_lme(model)) { varcorr <- .get_nested_lme_varcorr(model) } else { varcorr <- list(nlme::getVarCov(model)) } names(varcorr) <- re_names # ordinal # --------------------------- } else if (inherits(model, "clmm")) { varcorr <- ordinal::VarCorr(model) # glmmadmb # --------------------------- } else if (inherits(model, "glmmadmb")) { varcorr <- lme4::VarCorr(model) # brms # --------------------------- } else if (inherits(model, "brmsfit")) { varcorr <- lapply(names(lme4::VarCorr(model)), function(i) { element <- lme4::VarCorr(model)[[i]] if (i != "residual__") { if (!is.null(element$cov)) { out <- as.matrix(drop(element$cov[, 1, ])) colnames(out) <- rownames(out) <- gsub("Intercept", "(Intercept)", rownames(element$cov), fixed = TRUE) } else { out <- as.matrix(drop(element$sd[, 1])^2) colnames(out) <- rownames(out) <- gsub("Intercept", "(Intercept)", rownames(element$sd), fixed = TRUE) } attr(out, "sttdev") <- element$sd[, 1] } else { out <- NULL } out }) varcorr <- .compact_list(varcorr) names(varcorr) <- setdiff(names(lme4::VarCorr(model)), "residual__") attr(varcorr, "sc") <- lme4::VarCorr(model)$residual__$sd[1, 1] # cpglmm # --------------------------- } else if (inherits(model, "cpglmm")) { varcorr <- cplm::VarCorr(model) # lme4 / glmmTMB # --------------------------- } else { varcorr <- lme4::VarCorr(model) } # for glmmTMB, tell user that dispersion model is ignored if (inherits(model, c("glmmTMB", "MixMod"))) { if (is.null(model_component) || model_component == "conditional") { varcorr <- .collapse_cond(varcorr) } else { varcorr <- .collapse_zi(varcorr) } } varcorr } # Caution! this is somewhat experimental... # It retrieves the variance-covariance matrix of random effects # from nested lme-models. .get_nested_lme_varcorr <- function(model) { # installed? insight::check_if_installed("lme4") vcor <- lme4::VarCorr(model) class(vcor) <- "matrix" re_index <- (which(rownames(vcor) == "(Intercept)") - 1)[-1] vc_list <- split(data.frame(vcor, stringsAsFactors = FALSE), findInterval(1:nrow(vcor), re_index)) vc_rownames <- split(rownames(vcor), findInterval(1:nrow(vcor), re_index)) re_pars <- unique(unlist(insight::find_parameters(model)["random"])) re_names <- insight::find_random(model, split_nested = TRUE, flatten = TRUE) names(vc_list) <- re_names mapply( function(x, y) { if ("Corr" %in% colnames(x)) { g_cor <- suppressWarnings(stats::na.omit(as.numeric(x[, "Corr"]))) } else { g_cor <- NULL } row.names(x) <- as.vector(y) vl <- rownames(x) %in% re_pars x <- suppressWarnings(apply(x[vl, vl, drop = FALSE], MARGIN = c(1, 2), FUN = as.numeric)) m1 <- matrix(, nrow = nrow(x), ncol = ncol(x)) m1[1:nrow(m1), 1:ncol(m1)] <- as.vector(x[, 1]) rownames(m1) <- rownames(x) colnames(m1) <- rownames(x) if (!is.null(g_cor)) { m1_cov <- sqrt(prod(diag(m1))) * g_cor for (j in 1:ncol(m1)) { m1[j, nrow(m1) - j + 1] <- m1_cov[1] } } attr(m1, "cor_slope_intercept") <- g_cor m1 }, vc_list, vc_rownames, SIMPLIFY = FALSE ) } .is_nested_lme <- function(model) { sapply(insight::find_random(model), function(i) any(grepl(":", i, fixed = TRUE))) } # glmmTMB returns a list of model information, one for conditional # and one for zero-inflated part, so here we "unlist" it, returning # only the conditional part. .collapse_cond <- function(x) { if (is.list(x) && "cond" %in% names(x)) { x[["cond"]] } else { x } } .collapse_zi <- function(x) { if (is.list(x) && "zi" %in% names(x)) { x[["zi"]] } else { x } } #### helper to extract various random effect variances ----------------------- # random slope-variances (tau 11) ---- # ---------------------------------------------- .random_slope_variance <- function(model, varcorr) { if (inherits(model, "lme")) { unlist(lapply(varcorr, function(x) diag(x)[-1])) } else { out <- unlist(lapply(varcorr, function(x) diag(x)[-1])) # check for uncorrelated random slopes-intercept non_intercepts <- which(sapply(varcorr, function(i) !grepl("^\\(Intercept\\)", dimnames(i)[[1]][1]))) if (length(non_intercepts)) { dn <- unlist(lapply(varcorr, function(i) dimnames(i)[1])[non_intercepts]) rndslopes <- unlist(lapply(varcorr, function(i) i[1])[non_intercepts]) names(rndslopes) <- gsub("(.*)\\.\\d+$", "\\1", names(rndslopes)) out <- c(out, stats::setNames(rndslopes, paste0(names(rndslopes), ".", dn))) } out } } # random intercept-variances, i.e. # between-subject-variance (tau 00) ---- # ---------------------------------------------- .random_intercept_variance <- function(varcorr) { vars <- lapply(varcorr, function(i) i[1]) # check for uncorrelated random slopes-intercept non_intercepts <- which(sapply(varcorr, function(i) !grepl("^\\(Intercept\\)", dimnames(i)[[1]][1]))) if (length(non_intercepts)) { vars <- vars[-non_intercepts] } sapply(vars, function(i) i) } # slope-intercept-correlations (rho 01) ---- # ---------------------------------------------- .random_slope_intercept_corr <- function(model, varcorr) { if (inherits(model, "lme")) { rho01 <- unlist(sapply(varcorr, function(i) attr(i, "cor_slope_intercept"))) if (is.null(rho01)) { vc <- lme4::VarCorr(model) if ("Corr" %in% colnames(vc)) { re_name <- insight::find_random(model, split_nested = FALSE, flatten = TRUE) rho01 <- as.vector(suppressWarnings(stats::na.omit(as.numeric(vc[, "Corr"])))) if (length(re_name) == length(rho01)) { names(rho01) <- re_name } } } rho01 } else { corrs <- lapply(varcorr, attr, "correlation") rho01 <- sapply(corrs, function(i) { if (!is.null(i)) { i[-1, 1] } else { NULL } }) unlist(rho01) } } # slope-slope-correlations (rho 00) ---- # ---------------------------------------------- .random_slopes_corr <- function(model, varcorr) { corrs <- lapply(varcorr, attr, "correlation") rnd_slopes <- unlist(insight::find_random_slopes(model)) if (length(rnd_slopes) < 2) { return(NULL) } rho00 <- tryCatch( { lapply(corrs, function(d) { d[upper.tri(d, diag = TRUE)] <- NA d <- as.data.frame(d) d <- datawizard::reshape_longer(d, colnames_to = "Parameter1", rows_to = "Parameter2") d <- d[stats::complete.cases(d), ] d <- d[!d$Parameter1 %in% c("Intercept", "(Intercept)"), ] d$Parameter <- paste0(d$Parameter1, "-", d$Parameter2) d$Parameter1 <- d$Parameter2 <- NULL stats::setNames(d$Value, d$Parameter) }) }, error = function(e) { NULL } ) # rho01 <- tryCatch( # { # sapply(corrs, function(i) { # if (!is.null(i)) { # slope_pairs <- utils::combn(x = rnd_slopes, m = 2, simplify = FALSE) # lapply(slope_pairs, function(j) { # stats::setNames(i[j[1], j[2]], paste0("..", paste0(j, collapse = "-"))) # }) # } else { # NULL # } # }) # }, # error = function(e) { # NULL # } # ) unlist(rho00) } parameters/R/methods_betareg.R0000644000175000017500000000621214135275207016236 0ustar nileshnilesh## TODO add ci_method later? #' @rdname model_parameters.averaging #' @export model_parameters.betareg <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "precision", "all"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.betareg <- function(x, ci = .95, component = "all", ...) { component <- match.arg(component, choices = c("all", "conditional", "precision")) .ci_generic(model = x, ci = ci, dof = Inf, component = component) } #' @rdname standard_error #' @export standard_error.betareg <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) se <- cs[, 2] out <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), Component = params$Component, SE = as.vector(se) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @rdname p_value.DirichletRegModel #' @export p_value.betareg <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export simulate_model.betareg <- function(model, iterations = 1000, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } parameters/R/methods_margins.R0000644000175000017500000000415614131014352016256 0ustar nileshnilesh#' @export model_parameters.margins <- function(model, ci = .95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { # Parameters, Estimate and CI params <- insight::get_parameters(model) params <- .data_frame( params, SE = summary(model)$SE ) # CI params <- merge(params, ci(model, ci = ci), by = "Parameter", sort = FALSE) # Statistic statistic <- insight::get_statistic(model) params <- merge(params, statistic, by = "Parameter", sort = FALSE) # p-value params <- .data_frame(params, p = summary(model)$p) # ==== Renaming if ("Statistic" %in% names(params)) { names(params) <- gsub("Statistic", gsub("(-|\\s)statistic", "", attr(statistic, "statistic", exact = TRUE)), names(params)) names(params) <- gsub("chi-squared", "Chi2", names(params)) } names(params) <- gsub("(c|C)hisq", "Chi2", names(params)) names(params) <- gsub("Estimate", "Coefficient", names(params)) # ==== adjust p-values? if (!is.null(p_adjust)) { params <- .p_adjust(params, p_adjust, model, verbose) } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export ci.margins <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, ...) } #' @export standard_error.margins <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = summary(model)$SE ) } #' @export p_value.margins <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = summary(model)$p ) } #' @export format_parameters.margins <- function(model, ...) { NULL } parameters/R/methods_survival.R0000644000175000017500000000554314135275207016506 0ustar nileshnilesh# classes: .coxph, .aareg, .survreg, .riskRegression #################### .coxph ------ #' @rdname standard_error #' @export standard_error.coxph <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) se <- cs[, 3] # check if (length(se) > nrow(params)) { se <- se[match(params$Parameter, .remove_backticks_from_string(rownames(cs)))] } .data_frame( Parameter = params$Parameter, SE = as.vector(se) ) } #' @export p_value.coxph <- function(model, ...) { params <- insight::get_parameters(model) stats <- insight::get_statistic(model) params <- merge(params, stats, sort = FALSE) statistic <- attributes(stats)$statistic # convert in case of z if (identical(statistic, "z-statistic")) { params$Statistic <- params$Statistic^2 } .data_frame( Parameter = params$Parameter, p = as.vector(1 - stats::pchisq(params$Statistic, df = 1)) ) } #################### .aareg ------ #' @export standard_error.aareg <- function(model, ...) { s <- summary(model) se <- s$table[, "se(coef)"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export p_value.aareg <- function(model, ...) { s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #################### .survreg ------ #' @export standard_error.survreg <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } s <- summary(model) se <- s$table[, 2] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export p_value.survreg <- function(model, method = NULL, robust = FALSE, ...) { if (isTRUE(robust)) { return(p_value_robust(model, ...)) } s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #################### .riskRegression ------ #' @export standard_error.riskRegression <- function(model, ...) { junk <- utils::capture.output(cs <- stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(as.vector(cs[, 1])), SE = as.numeric(cs[, "StandardError"]) ) } #' @export p_value.riskRegression <- function(model, ...) { junk <- utils::capture.output(cs <- stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(as.vector(cs[, 1])), p = as.numeric(cs[, "Pvalue"]) ) } parameters/R/methods_BayesFM.R0000644000175000017500000001030214104713406016100 0ustar nileshnilesh#' Parameters from Bayesian Exploratory Factor Analysis #' #' Format Bayesian Exploratory Factor Analysis objects from the BayesFM package. #' #' @param model Bayesian EFA created by the `BayesFM::befa`. #' @inheritParams principal_components #' @inheritParams bayestestR::describe_posterior #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' \donttest{ #' if (require("BayesFM")) { #' efa <- BayesFM::befa(mtcars, iter = 1000) #' results <- model_parameters(efa, sort = TRUE) #' results #' efa_to_cfa(results) #' } #' } #' @return A data frame of loadings. #' @export model_parameters.befa <- function(model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = NULL, verbose = TRUE, ...) { if (!attr(model, "post.column.switch") | !attr(model, "post.sign.switch")) { insight::check_if_installed("BayesFM") if (!attr(model, "post.column.switch")) model <- BayesFM::post.column.switch(model) if (!attr(model, "post.sign.switch")) model <- BayesFM::post.sign.switch(model) } loadings <- as.data.frame(model$alpha) names(loadings) <- gsub("alpha:", "", names(loadings)) loadings <- stats::reshape( loadings, direction = "long", varying = list(names(loadings)), sep = "_", timevar = "Variable", v.names = "Loading", idvar = "Draw", times = names(loadings) ) components <- as.data.frame(model$dedic) names(components) <- gsub("dedic:", "", names(components)) components <- stats::reshape( components, direction = "long", varying = list(names(components)), sep = "_", timevar = "Variable", v.names = "Component", idvar = "Draw", times = names(components) ) loadings <- merge(components, loadings) # Compute posterior by dedic long_loadings <- data.frame() for (var in unique(loadings$Variable)) { for (comp in unique(loadings$Component)) { chunk <- loadings[loadings$Variable == var & loadings$Component == comp, ] if (nrow(chunk) == 0) { rez <- bayestestR::describe_posterior( loadings$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, ... ) rez[1, ] <- NA } else { rez <- bayestestR::describe_posterior( chunk$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, ... ) } long_loadings <- rbind( long_loadings, cbind(data.frame(Component = comp, Variable = var), rez) ) } } long_loadings$Component <- paste0("F", long_loadings$Component) # Clean long_loadings$Parameter <- NULL if ("CI" %in% names(long_loadings) && .n_unique(long_loadings$CI) == 1) { long_loadings$CI <- NULL } long_loadings <- long_loadings[long_loadings$Component != 0, ] loadings <- .wide_loadings(long_loadings, loadings_columns = names(long_loadings)[3], component_column = "Component", variable_column = "Variable") # Add attributes attr(loadings, "model") <- model attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- .n_unique(long_loadings$Component) attr(loadings, "loadings_columns") <- names(loadings)[2:ncol(loadings)] attr(loadings, "ci") <- ci # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Add some more attributes long_loadings <- stats::na.omit(long_loadings) row.names(long_loadings) <- NULL attr(loadings, "loadings_long") <- long_loadings # add class-attribute for printing class(loadings) <- c("parameters_efa", class(loadings)) loadings } parameters/R/methods_epi2x2.R0000644000175000017500000000347414064172570015745 0ustar nileshnilesh#' @export model_parameters.epi.2by2 <- function(model, verbose = TRUE, ...) { # get parameter estimates params <- insight::get_parameters(model) colnames(params)[2] <- "Coefficient" # get coefficients including CI coef_names <- grepl("^([^NNT]*)(\\.strata\\.wald)", names(model$massoc.detail), perl = TRUE) cf <- model$massoc.detail[coef_names] names(cf) <- gsub(".strata.wald", "", names(cf), fixed = TRUE) # extract CI cis <- do.call(rbind, cf) cis$Parameter <- rownames(cis) cis$est <- NULL colnames(cis) <- c("CI_low", "CI_high", "Parameter") # merge params <- merge(params, cis, sort = FALSE) # find fraction estimates, multiply by 100 to get percentages fractions <- params$Parameter %in% c("AFRisk", "PAFRisk") params[fractions, c("Coefficient", "CI_low", "CI_high")] <- 100 * params[fractions, c("Coefficient", "CI_low", "CI_high")] # pretty names pretty_names <- params$Parameter pretty_names[pretty_names == "PR"] <- "Prevalence Ratio" pretty_names[pretty_names == "OR"] <- "Odds Ratio" pretty_names[pretty_names == "ARisk"] <- "Attributable Risk" pretty_names[pretty_names == "PARisk"] <- "Attributable Risk in Population" pretty_names[pretty_names == "AFRisk"] <- "Attributable Fraction in Exposed (%)" pretty_names[pretty_names == "PAFRisk"] <- "Attributable Fraction in Population (%)" stats <- model$massoc.detail$chi2.strata.uncor attr(params, "footer_text") <- paste0("Test that Odds Ratio = 1: Chi2(", stats[["df"]], ") = ", insight::format_value(stats[["test.statistic"]]), ", ", insight::format_p(stats[["p.value.2s"]])) attr(params, "pretty_names") <- stats::setNames(pretty_names, params$Parameter) attr(params, "no_caption") <- TRUE class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } parameters/R/zzz.R0000644000175000017500000000027214017731457013742 0ustar nileshnilesh.onLoad <- function(libname, pkgname) { if (requireNamespace("emmeans", quietly = TRUE)) { emmeans::.emm_register(c("bootstrap_model", "bootstrap_parameters"), pkgname) } } parameters/R/cluster_discrimination.R0000644000175000017500000000576614141263004017661 0ustar nileshnilesh#' Compute a linear discriminant analysis on classified cluster groups #' #' Computes linear discriminant analysis (LDA) on classified cluster groups, and determines the goodness of classification for each cluster group. See `MASS::lda()` for details. #' #' @param x A data frame #' @param cluster_groups Group classification of the cluster analysis, which can #' be retrieved from the [cluster_analysis()] function. #' @param ... Other arguments to be passed to or from. #' #' @seealso [n_clusters()] to determine the number of clusters to extract, [cluster_analysis()] to compute a cluster analysis and [check_clusterstructure()] to check suitability of data for clustering. #' #' @examples #' if (requireNamespace("MASS", quietly = TRUE)) { #' # Retrieve group classification from hierarchical cluster analysis #' clustering <- cluster_analysis(iris[, 1:4], n = 3) #' #' # Goodness of group classification #' cluster_discrimination(clustering) #' } #' @export cluster_discrimination <- function(x, cluster_groups = NULL, ...) { UseMethod("cluster_discrimination") } #' @export cluster_discrimination.cluster_analysis <- function(x, cluster_groups = NULL, ...) { if (is.null(cluster_groups)) { cluster_groups <- stats::predict(x) } cluster_discrimination(attributes(x)$data, cluster_groups, ...) } #' @export cluster_discrimination.default <- function(x, cluster_groups = NULL, ...) { if (is.null(cluster_groups)) { stop("Please provide cluster assignments via 'cluster_groups'.") } x <- stats::na.omit(x) cluster_groups <- stats::na.omit(cluster_groups) # compute discriminant analysis of groups on original data frame insight::check_if_installed("MASS") disc <- MASS::lda(cluster_groups ~ ., data = x, na.action = "na.omit", CV = TRUE) # Assess the accuracy of the prediction # percent correct for each category of groups classification_table <- table(cluster_groups, disc$class) correct <- diag(prop.table(classification_table, 1)) # total correct percentage total_correct <- sum(diag(prop.table(classification_table))) out <- data.frame( Group = unique(cluster_groups), Accuracy = correct, stringsAsFactors = FALSE ) # Sort according to accuracy out <- out[order(out$Group), ] attr(out, "Overall_Accuracy") <- total_correct class(out) <- c("cluster_discrimination", class(out)) out } # Utils ------------------------------------------------------------------- #' @export print.cluster_discrimination <- function(x, ...) { orig_x <- x insight::print_color("# Accuracy of Cluster Group Classification via Linear Discriminant Analysis (LDA)\n\n", "blue") total_accuracy <- attributes(x)$Overall_Accuracy x$Accuracy <- sprintf("%.2f%%", 100 * x$Accuracy) total <- sprintf("%.2f%%", 100 * total_accuracy) print.data.frame(x, row.names = FALSE, ...) insight::print_color(sprintf("\nOverall accuracy of classification: %s\n", total), "yellow") invisible(orig_x) } parameters/R/convert_efa_to_cfa.R0000644000175000017500000000521514100573643016707 0ustar nileshnilesh#' Conversion between EFA results and CFA structure #' #' Enables a conversion between Exploratory Factor Analysis (EFA) and #' Confirmatory Factor Analysis (CFA) `lavaan`-ready structure. #' #' @param model An EFA model (e.g., a `psych::fa` object). #' @inheritParams principal_components #' @param names Vector containing dimension names. #' #' @examples #' \donttest{ #' library(parameters) #' if (require("psych") && require("lavaan")) { #' efa <- psych::fa(attitude, nfactors = 3) #' #' model1 <- efa_to_cfa(efa) #' model2 <- efa_to_cfa(efa, threshold = 0.3) #' #' anova( #' lavaan::cfa(model1, data = attitude), #' lavaan::cfa(model2, data = attitude) #' ) #' } #' } #' @return Converted index. #' @export convert_efa_to_cfa <- function(model, ...) { UseMethod("convert_efa_to_cfa") } #' @rdname convert_efa_to_cfa #' @inheritParams model_parameters.principal #' @export convert_efa_to_cfa.fa <- function(model, threshold = "max", names = NULL, ...) { .efa_to_cfa(model_parameters(model, threshold = threshold, ...), names = names, ...) } #' @export convert_efa_to_cfa.fa.ci <- convert_efa_to_cfa.fa #' @export convert_efa_to_cfa.parameters_efa <- function(model, threshold = NULL, names = NULL, ...) { if (!is.null(threshold)) { model <- model_parameters(attributes(model)$model, threshold = threshold, ...) } .efa_to_cfa(model, names = names, ...) } #' @export convert_efa_to_cfa.parameters_pca <- convert_efa_to_cfa.parameters_efa #' @rdname convert_efa_to_cfa #' @export efa_to_cfa <- convert_efa_to_cfa #' @keywords internal .efa_to_cfa <- function(loadings, names = NULL, ...) { loadings <- attributes(loadings)$loadings_long # Get dimension names if (is.null(names)) { names <- unique(loadings$Component) } # Catch error if (length(names) != .n_unique(loadings$Component)) { stop(paste("The `names` vector must be of same length as the number of dimensions, in this case", length(unique(loadings$Component)))) } cfa <- c() # Iterate over dimensions for (i in 1:length(names)) { cfa <- c( cfa, paste0(names[i], " =~ ", paste(as.character(loadings[loadings$Component == unique(loadings$Component)[i], "Variable"]), collapse = " + ")) ) } cfa <- paste0(cfa, collapse = "\n") cfa <- paste0("# Latent variables\n", cfa) class(cfa) <- c("cfa_model", class(cfa)) cfa } #' @export print.cfa_model <- function(x, ...) { cat(x) invisible(x) } parameters/R/format_p_adjust.R0000644000175000017500000000761414140434006016260 0ustar nileshnilesh#' Format the name of the p-value adjustment methods #' #' Format the name of the p-value adjustment methods. #' #' @param method Name of the method. #' #' @examples #' library(parameters) #' #' format_p_adjust("holm") #' format_p_adjust("bonferroni") #' @return A string with the full surname(s) of the author(s), including year of publication, for the adjustment-method. #' @export format_p_adjust <- function(method) { method <- tolower(method) switch(method, "holm" = "Holm (1979)", "hochberg" = "Hochberg (1988)", "hommel" = "Hommel (1988)", "bonferroni" = "Bonferroni", "fdr" = "Benjamini & Hochberg (1995)", "bh" = "Benjamini & Hochberg (1995)", "by" = "Benjamini & Yekutieli (2001)", "tukey" = "Tukey", "scheffe" = "Scheffe", "sidak" = "Sidak", method ) } .p_adjust <- function(params, p_adjust, model = NULL, verbose = TRUE) { # check if we have any adjustment at all, and a p-column if (!is.null(p_adjust) && "p" %in% colnames(params) && p_adjust != "none") { ## TODO add "mvt" method from emmeans # prepare arguments all_methods <- c(tolower(stats::p.adjust.methods), "tukey", "scheffe", "sidak") # for interaction terms, e.g. for "by" argument in emmeans # pairwise comparison, we have to adjust the rank resp. the # number of estimates in a comparison family rank_adjust <- tryCatch( { correction <- 1 by_vars <- model@misc$by.vars if (!is.null(by_vars) && by_vars %in% colnames(params)) { correction <- .n_unique(params[[by_vars]]) } correction }, error = function(e) { 1 } ) # only proceed if valid argument-value if (tolower(p_adjust) %in% all_methods) { # save old values, to check if p-adjustment worked old_p_vals <- params$p # find statistic column stat_column <- stats::na.omit(match(c("F", "t", "Statistic"), colnames(params))) if (tolower(p_adjust) %in% tolower(stats::p.adjust.methods)) { # base R adjustments params$p <- stats::p.adjust(params$p, method = p_adjust) } else if (tolower(p_adjust) == "tukey") { # tukey adjustment if ("df" %in% colnames(params) && length(stat_column) > 0) { params$p <- stats::ptukey(sqrt(2) * abs(params[[stat_column]]), nrow(params) / rank_adjust, params$df, lower.tail = FALSE) } } else if (tolower(p_adjust) == "scheffe" && !is.null(model)) { # scheffe adjustment if ("df" %in% colnames(params) && length(stat_column) > 0) { # 1st try scheffe_ranks <- try(qr(model@linfct)$rank, silent = TRUE) # 2nd try if (inherits(scheffe_ranks, "try-error") || is.null(scheffe_ranks)) { scheffe_ranks <- try(model$qr$rank, silent = TRUE) } if (inherits(scheffe_ranks, "try-error") || is.null(scheffe_ranks)) { scheffe_ranks <- nrow(params) } scheffe_ranks <- scheffe_ranks / rank_adjust params$p <- stats::pf(params[[stat_column]]^2 / scheffe_ranks, df1 = scheffe_ranks, df2 = params$df, lower.tail = FALSE ) } } else if (tolower(p_adjust) == "sidak") { # sidak adjustment params$p <- 1 - (1 - params$p)^(nrow(params) / rank_adjust) } if (isTRUE(all.equal(old_p_vals, params$p)) && !identical(p_adjust, "none")) { if (verbose) { warning(insight::format_message(paste0("Could not apply ", p_adjust, "-adjustment to p-values. Either something went wrong, or the non-adjusted p-values were already very large.")), call. = FALSE) } } } else if (verbose) { warning(paste0("'p_adjust' must be one of ", paste0(all_methods, collapse = ", ")), call. = FALSE) } } params } parameters/R/methods_pglm.R0000644000175000017500000000030314012467213015551 0ustar nileshnilesh#' @export p_value.pglm <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } parameters/R/methods_tseries.R0000644000175000017500000000013114132765437016303 0ustar nileshnilesh# classes: .garch #' @export degrees_of_freedom.garch <- degrees_of_freedom.mhurdle parameters/R/methods_systemfit.R0000644000175000017500000000523514131306004016642 0ustar nileshnilesh#' @export model_parameters.systemfit <- function(model, ci = .95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, summary = FALSE, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, summary = summary, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.systemfit <- function(model, ...) { cf <- stats::coef(summary(model)) f <- insight::find_formula(model) system_names <- names(f) parameter_names <- row.names(cf) out <- lapply(system_names, function(i) { pattern <- paste0("^", i, "_(.*)") params <- grepl(pattern, parameter_names) data.frame( Parameter = gsub(pattern, "\\1", parameter_names[params]), SE = as.vector(cf[params, 2]), Component = i, stringsAsFactors = FALSE ) }) do.call(rbind, out) } #' @export p_value.systemfit <- function(model, ...) { cf <- stats::coef(summary(model)) f <- insight::find_formula(model) system_names <- names(f) parameter_names <- row.names(cf) out <- lapply(system_names, function(i) { pattern <- paste0("^", i, "_(.*)") params <- grepl(pattern, parameter_names) data.frame( Parameter = gsub(pattern, "\\1", parameter_names[params]), p = as.vector(cf[params, 4]), Component = i, stringsAsFactors = FALSE ) }) do.call(rbind, out) } #' @export degrees_of_freedom.systemfit <- function(model, ...) { df <- c() s <- summary(model)$eq params <- insight::find_parameters(model) f <- insight::find_formula(model) system_names <- names(f) for (i in 1:length(system_names)) { dfs <- rep(s[[i]]$df[2], length(params[[i]])) df_names <- rep(names(params[i]), length(params[[i]])) df <- c(df, stats::setNames(dfs, df_names)) } df } #' @export ci.systemfit <- ci.lm parameters/R/methods_quantreg.R0000644000175000017500000001657314132746504016466 0ustar nileshnilesh# quantreg: .rq, .rqss, .crq, .nlrq, .rqs # model parameters --------------------- #' @rdname model_parameters.cgam #' @export model_parameters.rqss <- model_parameters.cgam #' @export model_parameters.rqs <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } # ci --------------------- #' @export ci.rq <- ci.default #' @export ci.rqss <- ci.default #' @export ci.crq <- ci.default #' @export ci.nlrq <- ci.default #' @export ci.rqs <- ci.default # standard errors --------------------- #' @export standard_error.rq <- function(model, ...) { se <- .get_quantreg_se(model) if (is.null(se)) { vc <- insight::get_varcov(model) se <- as.vector(sqrt(diag(vc))) } params <- insight::get_parameters(model) params$SE <- se params[intersect(colnames(params), c("Parameter", "SE", "Component"))] } #' @export standard_error.rqs <- function(model, ...) { se <- tryCatch( { s <- suppressWarnings(summary(model, covariance = TRUE)) cs <- do.call(rbind, lapply(s, stats::coef)) cs[, "Std. Error"] }, error = function(e) { NULL } ) params <- insight::get_parameters(model) data.frame( Parameter = params$Parameter, SE = se, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) } #' @export standard_error.crq <- standard_error.rq #' @export standard_error.nlrq <- standard_error.rq #' @export standard_error.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) se <- cs[, se_column] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, SE = se, Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, SE = NA, Component = "smooth_terms" ) switch(component, "all" = rbind(out_cond, out_smooth), "conditional" = out_cond, "smooth_terms" = out_smooth ) } .get_quantreg_se <- function(model) { se <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) if (length(se_column)) { cs[, se_column] } else { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } }, error = function(e) { NULL } ) if (is.null(se)) { se <- tryCatch( { sc <- summary(model) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) se <- stats::setNames(out$coefficients.Std.Error, sprintf("tau (%g)", out$tau)) } else { se <- stats::setNames(unname(sc$coefficients[, 4]), names(sc$coefficients[, 4])) } }, error = function(e) { NULL } ) } se } # p values --------------------- #' @export p_value.rq <- function(model, ...) { p <- .get_quantreg_p(model) params <- insight::get_parameters(model) params$p <- p params[intersect(colnames(params), c("Parameter", "p", "Component"))] } #' @export p_value.rqs <- function(model, ...) { p <- tryCatch( { s <- suppressWarnings(summary(model, covariance = TRUE)) cs <- do.call(rbind, lapply(s, stats::coef)) cs[, "Pr(>|t|)"] }, error = function(e) { NULL } ) params <- insight::get_parameters(model) data.frame( Parameter = params$Parameter, p = p, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) } #' @export p_value.crq <- p_value.rq #' @export p_value.nlrq <- p_value.rq #' @export p_value.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef p_column <- intersect(c("Pr(>|t|)", "Pr(>|z|)"), colnames(cs)) p_cond <- cs[, p_column] cs <- summary(model)$qsstab p_smooth <- cs[, "Pr(>F)"] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, p = as.vector(p_cond), Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, p = as.vector(p_smooth), Component = "smooth_terms" ) switch(component, "all" = rbind(out_cond, out_smooth), "conditional" = out_cond, "smooth_terms" = out_smooth ) } .get_quantreg_p <- function(model) { p <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) cs[, "Pr(>|t|)"] }, error = function(e) { NULL } ) if (is.null(p)) { p <- tryCatch( { .get_pval_from_summary( model, cs = suppressWarnings(stats::coef(summary(model, covariance = TRUE))) ) }, error = function(e) { NULL } ) } if (is.null(p)) { p <- tryCatch( { sc <- summary(model) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) p <- stats::setNames(out[[grep("^coefficients\\.Pr", colnames(out))]], sprintf("tau (%g)", out$tau)) } else { p <- stats::setNames(unname(sc$coefficients[, 6]), names(sc$coefficients[, 6])) } }, error = function(e) { NULL } ) } p } # degrees of freedom --------------------- #' @export degrees_of_freedom.rqs <- function(model, ...) { tryCatch( { s <- suppressWarnings(summary(model, covariance = TRUE)) cs <- lapply(s, function(i) i$rdf) unique(unlist(cs)) }, error = function(e) { NULL } ) } #' @export degrees_of_freedom.rqss <- degrees_of_freedom.multinom #' @export degrees_of_freedom.rq <- degrees_of_freedom.rqs #' @export degrees_of_freedom.nlrq <- degrees_of_freedom.mhurdle parameters/R/methods_hclust.R0000644000175000017500000000767514133317132016135 0ustar nileshnilesh#' @rdname model_parameters.kmeans #' @inheritParams cluster_centers #' #' @examples #' # #' # Hierarchical clustering (hclust) --------------------------- #' data <- iris[1:4] #' model <- hclust(dist(data)) #' clusters <- cutree(model, 3) #' #' rez <- model_parameters(model, data, clusters) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Total_Sum_Squares #' attributes(rez)$Between_Sum_Squares #' @export model_parameters.hclust <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) { stop("This function requires the data used to compute the clustering to be provided via 'data' as it is not accessible from the clustering object itself.") } if (is.null(clusters)) { stop("This function requires a vector of clusters assignments of same length as data to be passed, as it is not contained in the clustering object itself.") } params <- cluster_centers(data, clusters, ...) # Long means means <- datawizard::reshape_longer(params, cols = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) attr(params, "variance") <- attributes(params)$variance attr(params, "Sum_Squares_Between") <- attributes(params)$Sum_Squares_Between attr(params, "Sum_Squares_Total") <- attributes(params)$Sum_Squares_Total attr(params, "means") <- means attr(params, "model") <- model attr(params, "scores") <- clusters attr(params, "type") <- "hclust" class(params) <- c("parameters_clusters", class(params)) params } #' @inheritParams n_clusters #' @rdname model_parameters.kmeans #' @examples #' \donttest{ #' # #' # pvclust (finds "significant" clusters) --------------------------- #' if (require("pvclust", quietly = TRUE)) { #' data <- iris[1:4] #' # NOTE: pvclust works on transposed data #' model <- pvclust::pvclust(datawizard::data_transpose(data), #' method.dist = "euclidean", #' nboot = 50, #' quiet = TRUE #' ) #' #' rez <- model_parameters(model, data, ci = 0.90) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' } #' } #' @export model_parameters.pvclust <- function(model, data = NULL, clusters = NULL, ci = 0.95, ...) { if (is.null(data)) { stop("This function requires the data used to compute the clustering to be provided via 'data' as it is not accessible from the clustering object itself.") } if (is.null(clusters)) { clusters <- .model_parameters_pvclust_clusters(model, data, ci)$Cluster } params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "pvclust" attr(params, "title") <- "Bootstrapped Hierarchical Clustering (PVCLUST)" params } # Utils ------------------------------------------------------------------- #' @keywords internal .model_parameters_pvclust_clusters <- function(model, data, ci = 0.95) { insight::check_if_installed("pvclust") rez <- pvclust::pvpick(model, alpha = ci, pv = "si") # Assign clusters out <- data.frame() for (cluster in 1:length(rez$clusters)) { out <- rbind(out, data.frame(Cluster = cluster, Row = rez$clusters[[cluster]], stringsAsFactors = FALSE), make.row.names = FALSE, stringsAsFactors = FALSE) } # Add points not in significant clusters remaining_rows <- row.names(data)[!row.names(data) %in% out$Row] if (length(remaining_rows) > 0) out <- rbind(out, data.frame(Cluster = 0, Row = remaining_rows, stringsAsFactors = FALSE), make.row.names = FALSE, stringsAsFactors = FALSE) # Reorder according to original order of rows out <- out[order(match(out$Row, row.names(data))), ] row.names(out) <- NULL out } parameters/R/methods_crch.R0000644000175000017500000000101014133000346015517 0ustar nileshnilesh #' @export standard_error.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 2]) ) } #' @export p_value.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(cs[, 4]) ) } parameters/R/backports.R0000644000175000017500000000050414012467213015062 0ustar nileshnilesh.str2lang <- function(s) { stopifnot(length(s) == 1L) ex <- parse(text = s, keep.source = FALSE) stopifnot(length(ex) == 1L) ex[[1L]] } isTRUE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && x } isFALSE <- function(x) { is.logical(x) && length(x) == 1L && !is.na(x) && !x } parameters/R/methods_stats4.R0000644000175000017500000000023113765377240016052 0ustar nileshnilesh #' @export ci.mle <- ci.glm #' @export standard_error.mle <- standard_error.mle2 #' @export model_parameters.mle <- model_parameters.glm parameters/R/methods_gamm4.R0000644000175000017500000000065513766405552015646 0ustar nileshnilesh #' @export ci.gamm4 <- function(x, ci = .95, ...) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } #' @export standard_error.gamm4 <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } #' @export p_value.gamm4 <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model) } parameters/R/bootstrap_model-emmeans.R0000644000175000017500000000227514054473310017722 0ustar nileshnilesh#' @keywords emmeans_methods emm_basis.bootstrap_model <- function(object, trms, xlev, grid, ...) { insight::check_if_installed("emmeans") model <- attr(object, "original_model") emb <- emmeans::emm_basis(model, trms, xlev, grid, ...) if (ncol(object) != ncol(emb$V) || !all(colnames(object) == colnames(emb$V))) { stop("Oops! Cannot create the reference grid. Please open an issue at github.com/easystats/parameters/issues.") } emb$post.beta <- as.matrix(object) emb$misc$is_boot <- TRUE emb } #' @keywords emmeans_methods recover_data.bootstrap_model <- function(object, ...) { insight::check_if_installed("emmeans") model <- attr(object, "original_model") emmeans::recover_data(model, ...) } #' @keywords emmeans_methods emm_basis.bootstrap_parameters <- function(object, trms, xlev, grid, ...) { insight::check_if_installed("emmeans") model <- attr(object, "boot_samples") emmeans::emm_basis(model, trms, xlev, grid, ...) } #' @keywords emmeans_methods recover_data.bootstrap_parameters <- function(object, ...) { insight::check_if_installed("emmeans") model <- attr(object, "boot_samples") emmeans::recover_data(model, ...) } parameters/R/ci_generic.R0000644000175000017500000001010614135275405015166 0ustar nileshnilesh.ci_generic <- function(model, ci = .95, method = "wald", dof = NULL, effects = c("fixed", "random", "all"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion", "precision", "scale", "smooth_terms", "full", "marginal"), robust = FALSE, ...) { # check method if (is.null(method)) { method <- "wald" } method <- match.arg(tolower(method), choices = c( "wald", "ml1", "betwithin", "kr", "satterthwaite", "kenward", "boot", "profile", "residual", "normal" )) effects <- match.arg(effects) component <- match.arg(component) if (method == "ml1") { return(ci_ml1(model, ci = ci)) } else if (method == "betwithin") { return(ci_betwithin(model, ci = ci)) } else if (method == "satterthwaite") { return(ci_satterthwaite(model, ci = ci)) } else if (method %in% c("kenward", "kr")) { return(ci_kenward(model, ci = ci)) } out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = dof, effects = effects, component = component, robust = robust, method = method, ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } #' @keywords internal .ci_dof <- function(model, ci, dof, effects, component, robust = FALSE, method = "wald", se = NULL, ...) { if (inherits(model, "emmGrid")) { params <- insight::get_parameters( model, effects = effects, component = component, merge_parameters = TRUE ) } else { params <- insight::get_parameters(model, effects = effects, component = component, verbose = FALSE ) } # check if all estimates are non-NA params <- .check_rank_deficiency(params, verbose = FALSE) # sanity check... if (is.null(method)) { method <- "wald" } method <- tolower(method) # if we have adjusted SE, e.g. from kenward-roger, don't recompute # standard errors to save time... if (is.null(se)) { stderror <- if (isTRUE(robust)) { standard_error_robust(model, component = component, ...) } else { switch(method, "kenward" = , "kr" = se_kenward(model), "satterthwaite" = se_satterthwaite(model), standard_error(model, component = component) ) } if (.is_empty_object(stderror)) { return(NULL) } # filter non-matching parameters if (nrow(stderror) != nrow(params)) { params <- stderror <- merge(stderror, params, sort = FALSE) } se <- stderror$SE } if (is.null(dof)) { # residual df dof <- degrees_of_freedom(model, method = method, verbose = FALSE) # make sure we have a value for degrees of freedom if (is.null(dof) || length(dof) == 0 || .is_chi2_model(model, dof)) { dof <- Inf } else if (length(dof) > nrow(params)) { # filter non-matching parameters dof <- dof[1:nrow(params)] } } alpha <- (1 + ci) / 2 fac <- suppressWarnings(stats::qt(alpha, df = dof)) out <- cbind( CI_low = params$Estimate - se * fac, CI_high = params$Estimate + se * fac ) out <- as.data.frame(out) out$CI <- ci out$Parameter <- params$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] if ("Component" %in% names(params)) out$Component <- params$Component if ("Effects" %in% names(params) && effects != "fixed") out$Effects <- params$Effects if ("Response" %in% names(params)) out$Response <- params$Response if (anyNA(params$Estimate)) { out[stats::complete.cases(out), ] } else { out } } .is_chi2_model <- function(model, dof) { statistic <- insight::find_statistic(model) (all(dof == 1) && identical(statistic, "chi-squared statistic")) } parameters/R/1_model_parameters.R0000644000175000017500000007052614160324505016650 0ustar nileshnilesh# default methods, glm (almost default) #################### .default ---------------------- #' Model Parameters #' #' Compute and extract model parameters. See the documentation for your object's class: #' \itemize{ #' \item{[Correlations, t-tests, ...][model_parameters.htest] (`htest`, `pairwise.htest`)} #' \item{[ANOVAs][model_parameters.aov] (`aov`, `anova`, **afex**, ...)} #' \item{[Regression models][model_parameters.default] (`lm`, `glm`, **survey**, ...)} #' \item{[Additive models][model_parameters.cgam] (`gam`, `gamm`, ...)} #' \item{[Zero-inflated models][model_parameters.zcpglm] (`hurdle`, `zeroinfl`, `zerocount`)} #' \item{[Multinomial, ordinal and cumulative link models][model_parameters.mlm] (`bracl`, `multinom`, `mlm`, ...)} #' \item{[Other special models][model_parameters.averaging] (`model.avg`, `betareg`, `glmx`, ...)} #' \item{[Mixed models][model_parameters.merMod] (\pkg{lme4}, \pkg{nlme}, \pkg{glmmTMB}, \pkg{afex}, ...)} #' \item{[Bayesian tests][model_parameters.BFBayesFactor] (\pkg{BayesFactor})} #' \item{[Bayesian models][model_parameters.stanreg] (\pkg{rstanarm}, \pkg{brms}, \pkg{MCMCglmm}, \pkg{blavaan}, ...)} #' \item{[PCA and FA][model_parameters.principal] (\pkg{psych})} #' \item{[CFA and SEM][model_parameters.lavaan] (\pkg{lavaan})} #' \item{[Cluster models][model_parameters.kmeans] (k-means, ...)} #' \item{[Meta-Analysis via linear (mixed) models][model_parameters.rma] (`rma`, `metaplus`, \pkg{metaBMA}, ...)} #' \item{[Hypothesis testing][model_parameters.glht] (`glht`, \pkg{PMCMRplus})} #' \item{[Robust statistical tests][model_parameters.t1way] (\pkg{WRS2})} #' \item{[Multiply imputed repeated analyses][model_parameters.mira] (`mira`)} #' } #' #' @param model Statistical Model. #' @param ... Arguments passed to or from other methods. Non-documented #' arguments are `digits`, `p_digits`, `ci_digits` and #' `footer_digits` to set the number of digits for the output. #' `group` can also be passed to the `print()` method. See details #' in [print.parameters_model()] and 'Examples' in #' [model_parameters.default()]. #' #' @seealso [insight::standardize_names()] to #' rename columns into a consistent, standardized naming scheme. #' #' @note The [`print()`][print.parameters_model] method has several #' arguments to tweak the output. There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the #' [**see**-package](https://easystats.github.io/see/), and a dedicated #' method for use inside rmarkdown files, #' [`print_md()`][print_md.parameters_model]. #' #' @section Standardization of model coefficients: #' Standardization is based on [effectsize::standardize_parameters()]. In case #' of `standardize = "refit"`, the data used to fit the model will be #' standardized and the model is completely refitted. In such cases, standard #' errors and confidence intervals refer to the standardized coefficient. The #' default, `standardize = "refit"`, never standardizes categorical predictors #' (i.e. factors), which may be a different behaviour compared to other R #' packages or other software packages (like SPSS). To mimic behaviour of SPSS #' or packages such as \pkg{lm.beta}, use `standardize = "basic"`. #' #' @section #' #' Standardization Methods: #' #' - **refit**: This method is based on a complete model re-fit with a #' standardized version of the data. Hence, this method is equal to #' standardizing the variables before fitting the model. It is the "purest" and #' the most accurate (Neter et al., 1989), but it is also the most #' computationally costly and long (especially for heavy models such as Bayesian #' models). This method is particularly recommended for complex models that #' include interactions or transformations (e.g., polynomial or spline terms). #' The `robust` (default to `FALSE`) argument enables a robust standardization #' of data, i.e., based on the `median` and `MAD` instead of the `mean` and #' `SD`. **See [standardize()] for more details.** #' **Note** that `standardize_parameters(method = "refit")` may not return #' the same results as fitting a model on data that has been standardized with #' `standardize()`; `standardize_parameters()` used the data used by the model #' fitting function, which might not be same data if there are missing values. #' see the `remove_na` argument in `standardize()`. #' #' - **posthoc**: Post-hoc standardization of the parameters, aiming at #' emulating the results obtained by "refit" without refitting the model. The #' coefficients are divided by the standard deviation (or MAD if `robust`) of #' the outcome (which becomes their expression 'unit'). Then, the coefficients #' related to numeric variables are additionally multiplied by the standard #' deviation (or MAD if `robust`) of the related terms, so that they correspond #' to changes of 1 SD of the predictor (e.g., "A change in 1 SD of `x` is #' related to a change of 0.24 of the SD of `y`). This does not apply to binary #' variables or factors, so the coefficients are still related to changes in #' levels. This method is not accurate and tend to give aberrant results when #' interactions are specified. #' #' - **basic**: This method is similar to `method = "posthoc"`, but treats all #' variables as continuous: it also scales the coefficient by the standard #' deviation of model's matrix' parameter of factors levels (transformed to #' integers) or binary predictors. Although being inappropriate for these cases, #' this method is the one implemented by default in other software packages, #' such as [lm.beta::lm.beta()]. #' #' - **smart** (Standardization of Model's parameters with Adjustment, #' Reconnaissance and Transformation - *experimental*): Similar to `method = #' "posthoc"` in that it does not involve model refitting. The difference is #' that the SD (or MAD if `robust`) of the response is computed on the relevant #' section of the data. For instance, if a factor with 3 levels A (the #' intercept), B and C is entered as a predictor, the effect corresponding to B #' vs. A will be scaled by the variance of the response at the intercept only. #' As a results, the coefficients for effects of factors are similar to a Glass' #' delta. #' #' - **pseudo** (*for 2-level (G)LMMs only*): In this (post-hoc) method, the #' response and the predictor are standardized based on the level of prediction #' (levels are detected with [performance::check_heterogeneity_bias()]): Predictors #' are standardized based on their SD at level of prediction (see also #' [datawizard::demean()]); The outcome (in linear LMMs) is standardized based #' on a fitted random-intercept-model, where `sqrt(random-intercept-variance)` #' is used for level 2 predictors, and `sqrt(residual-variance)` is used for #' level 1 predictors (Hoffman 2015, page 342). A warning is given when a #' within-group variable is found to have access between-group variance. #' #' @section Labeling the Degrees of Freedom: #' Throughout the \pkg{parameters} package, we decided to label the residual #' degrees of freedom *df_error*. The reason for this is that these degrees #' of freedom not always refer to the residuals. For certain models, they refer #' to the estimate error - in a linear model these are the same, but in - for #' instance - any mixed effects model, this isn't strictly true. Hence, we #' think that `df_error` is the most generic label for these degrees of #' freedom. #' #' @section Confidence intervals and approximation of degrees of freedom: #' There are different ways of approximating the degrees of freedom depending #' on different assumptions about the nature of the model and its sampling #' distribution. The `ci_method` argument modulates the method for computing degrees #' of freedom (df) that are used to calculate confidence intervals (CI) and the #' related p-values. Following options are allowed, depending on the model #' class: #' #' **Classical methods:** #' #' Classical inference is generally based on the **Wald method**. #' The Wald approach to inference computes a test statistic by dividing the #' parameter estimate by its standard error (Coefficient / SE), #' then comparing this statistic against a t- or normal distribution. #' This approach can be used to compute CIs and p-values. #' #' `"wald"`: #' - Applies to *non-Bayesian models*. For *linear models*, CIs #' computed using the Wald method (SE and a *t-distribution with residual df*); #' p-values computed using the Wald method with a *t-distribution with residual df*. #' For other models, CIs computed using the Wald method (SE and a *normal distribution*); #' p-values computed using the Wald method with a *normal distribution*. #' #' `"normal"` #' - Applies to *non-Bayesian models*. Compute Wald CIs and p-values, #' but always use a normal distribution. #' #' `"residual"` #' - Applies to *non-Bayesian models*. Compute Wald CIs and p-values, #' but always use a *t-distribution with residual df* when possible. If the #' residual df for a model cannot be determined, a normal distribution is #' used instead. #' #' **Methods for mixed models:** #' #' Compared to fixed effects (or single-level) models, determining appropriate #' df for Wald-based inference in mixed models is more difficult. #' See [the R GLMM FAQ](https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable) #' for a discussion. #' #' Several approximate methods for computing df are available, but you should #' also consider instead using profile likelihood (`"profile"`) or bootstrap ("`boot"`) #' CIs and p-values instead. #' #' `"satterthwaite"` #' - Applies to *linear mixed models*. CIs computed using the #' Wald method (SE and a *t-distribution with Satterthwaite df*); p-values #' computed using the Wald method with a *t-distribution with Satterthwaite df*. #' #' `"kenward"` #' - Applies to *linear mixed models*. CIs computed using the Wald #' method (*Kenward-Roger SE* and a *t-distribution with Kenward-Roger df*); #' p-values computed using the Wald method with *Kenward-Roger SE and t-distribution with Kenward-Roger df*. #' #' `"ml1"` #' - Applies to *linear mixed models*. CIs computed using the Wald #' method (SE and a *t-distribution with m-l-1 approximated df*); p-values #' computed using the Wald method with a *t-distribution with m-l-1 approximated df*. #' See [`ci_ml1()`]. #' #' `"betwithin"` #' - Applies to *linear mixed models* and *generalized linear mixed models*. #' CIs computed using the Wald method (SE and a *t-distribution with between-within df*); #' p-values computed using the Wald method with a *t-distribution with between-within df*. #' See [`ci_betwithin()`]. #' #' **Likelihood-based methods:** #' #' Likelihood-based inference is based on comparing the likelihood for the #' maximum-likelihood estimate to the the likelihood for models with one or more #' parameter values changed (e.g., set to zero or a range of alternative values). #' Likelihood ratios for the maximum-likelihood and alternative models are compared #' to a \eqn{\chi}-squared distribution to compute CIs and p-values. #' #' `"profile"` #' - Applies to *non-Bayesian models* of class `glm`, `polr` or `glmmTMB`. #' CIs computed by *profiling the likelihood curve for a parameter*, using #' linear interpolation to find where likelihood ratio equals a critical value; #' p-values computed using the Wald method with a *normal-distribution* (note: #' this might change in a future update!) #' #' `"uniroot"` #' - Applies to *non-Bayesian models* of class `glmmTMB`. CIs #' computed by *profiling the likelihood curve for a parameter*, using root #' finding to find where likelihood ratio equals a critical value; p-values #' computed using the Wald method with a *normal-distribution* (note: this #' might change in a future update!) #' #' **Methods for bootstrapped or Bayesian models:** #' #' Bootstrap-based inference is based on **resampling** and refitting the model #' to the resampled datasets. The distribution of parameter estimates across #' resampled datasets is used to approximate the parameter's sampling #' distribution. Depending on the type of model, several different methods for #' bootstrapping and constructing CIs and p-values from the bootstrap #' distribution are available. #' #' For Bayesian models, inference is based on drawing samples from the model #' posterior distribution. #' #' `"quantile"` (or `"eti"`) #' - Applies to *all models (including Bayesian models)*. #' For non-Bayesian models, only applies if `bootstrap = TRUE`. CIs computed #' as *equal tailed intervals* using the quantiles of the bootstrap or #' posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::eti()`]. #' #' `"hdi"` #' - Applies to *all models (including Bayesian models)*. For non-Bayesian #' models, only applies if `bootstrap = TRUE`. CIs computed as *highest density intervals* #' for the bootstrap or posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::hdi()`]. #' #' `"bci"` (or `"bcai"`) #' - Applies to *all models (including Bayesian models)*. #' For non-Bayesian models, only applies if `bootstrap = TRUE`. CIs computed #' as *bias corrected and accelerated intervals* for the bootstrap or #' posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::bci()`]. #' #' `"si"` #' - Applies to *Bayesian models* with proper priors. CIs computed as #' *support intervals* comparing the posterior samples against the prior samples; #' p-values are based on the *probability of direction*. See [`bayestestR::si()`]. #' #' `"boot"` #' - Applies to *non-Bayesian models* of class `merMod`. CIs computed #' using *parametric bootstrapping* (simulating data from the fitted model); #' p-values computed using the Wald method with a *normal-distribution)* #' (note: this might change in a future update!). #' #' For all iteration-based methods other than `"boot"` #' (`"hdi"`, `"quantile"`, `"ci"`, `"eti"`, `"si"`, `"bci"`, `"bcai"`), #' p-values are based on the probability of direction ([`bayestestR::p_direction()`]), #' which is converted into a p-value using [`bayestestR::pd_to_p()`]. #' #' @inheritSection format_parameters Interpretation of Interaction Terms #' #' @references #' #' - Hoffman, L. (2015). Longitudinal analysis: Modeling within-person #' fluctuation and change. Routledge. #' #' - Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear #' regression models. #' #' @return A data frame of indices related to the model's parameters. #' @export model_parameters <- function(model, ...) { UseMethod("model_parameters") } # DF naming convention -------------------- # DF column naming # F has df, df_error # t has df_error # z has df_error = Inf # Chisq has df # https://github.com/easystats/parameters/issues/455 #' @rdname model_parameters #' @export parameters <- model_parameters #' Parameters from (General) Linear Models #' #' Extract and compute indices and measures to describe parameters of (general) #' linear models (GLMs). #' #' @param model Model object. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param bootstrap Should estimates be based on bootstrapped model? If #' `TRUE`, then arguments of [Bayesian #' regressions][model_parameters.stanreg] apply (see also #' [`bootstrap_parameters()`][bootstrap_parameters]). #' @param iterations The number of bootstrap replicates. This only apply in the #' case of bootstrapped frequentist models. #' @param standardize The method used for standardizing the parameters. Can be #' `NULL` (default; no standardization), `"refit"` (for re-fitting the model #' on standardized data) or one of `"basic"`, `"posthoc"`, `"smart"`, #' `"pseudo"`. See 'Details' in [effectsize::standardize_parameters()]. #' **Important:** #' - The `"refit"` method does *not* standardized categorical predictors (i.e. #' factors), which may be a different behaviour compared to other R packages #' (such as \pkg{lm.beta}) or other software packages (like SPSS). to mimic #' such behaviours, either use `standardize="basic"` or standardize the data #' with `datawizard::standardize(force=TRUE)` *before* fitting the model. #' - For mixed models, when using methods other than `"refit"`, only the fixed #' effects will be returned. #' - Robust estimation (i.e. `robust=TRUE`) of standardized parameters only #' works when `standardize="refit"`. #' @param exponentiate Logical, indicating whether or not to exponentiate the #' the coefficients (and related confidence intervals). This is typical for #' logistic regression, or more generally speaking, for models with log #' or logit links. **Note:** Delta-method standard errors are also #' computed (by multiplying the standard errors by the transformed #' coefficients). This is to mimic behaviour of other software packages, such #' as Stata, but these standard errors poorly estimate uncertainty for the #' transformed coefficient. The transformed confidence interval more clearly #' captures this uncertainty. For `compare_parameters()`, #' `exponentiate = "nongaussian"` will only exponentiate coefficients #' from non-Gaussian families. #' @param robust Logical, if `TRUE`, robust standard errors are calculated #' (if possible), and confidence intervals and p-values are based on these #' robust standard errors. Additional arguments like `vcov_estimation` or #' `vcov_type` are passed down to other methods, see #' [`standard_error_robust()`][standard_error_robust] for details #' and [this vignette](https://easystats.github.io/parameters/articles/model_parameters_robust.html) #' for working examples. #' @param component Model component for which parameters should be shown. May be #' one of `"conditional"`, `"precision"` (\pkg{betareg}), #' `"scale"` (\pkg{ordinal}), `"extra"` (\pkg{glmx}), #' `"marginal"` (\pkg{mfx}), `"conditional"` or `"full"` (for #' `MuMIn::model.avg()`) or `"all"`. #' @param p_adjust Character vector, if not `NULL`, indicates the method to #' adjust p-values. See [stats::p.adjust()] for details. Further #' possible adjustment methods are `"tukey"`, `"scheffe"`, #' `"sidak"` and `"none"` to explicitly disable adjustment for #' `emmGrid` objects (from \pkg{emmeans}). #' @param ci_method Method for computing degrees of freedom for #' confidence intervals (CI) and the related p-values. Allowed are following #' options (which vary depending on the model class): `"residual"`, #' `"normal"`, `"likelihood"`, `"satterthwaite"`, `"kenward"`, `"wald"`, #' `"profile"`, `"boot"`, `"uniroot"`, `"ml1"`, `"betwithin"`, `"hdi"`, #' `"quantile"`, `"ci"`, `"eti"`, `"si"`, `"bci"`, or `"bcai"`. See section #' _Confidence intervals and approximation of degrees of freedom_ in #' [`model_parameters()`] for further details. When `ci_method=NULL`, in most #' cases `"wald"` is used then. #' @param df_method Deprecated. Please use `ci_method`. #' @param summary Logical, if `TRUE`, prints summary information about the #' model (model formula, number of observations, residual standard deviation #' and more). #' @param keep,drop Character containing a regular expression pattern that #' describes the parameters that should be included (for `keep`) or excluded #' (for `drop`) in the returned data frame. `keep` may also be a #' named list of regular expressions. All non-matching parameters will be #' removed from the output. If `keep` is a character vector, every parameter #' name in the *"Parameter"* column that matches the regular expression in #' `keep` will be selected from the returned data frame (and vice versa, #' all parameter names matching `drop` will be excluded). Furthermore, if #' `keep` has more than one element, these will be merged with an `OR` #' operator into a regular expression pattern like this: `"(one|two|three)"`. #' If `keep` is a named list of regular expression patterns, the names of the #' list-element should equal the column name where selection should be #' applied. This is useful for model objects where `model_parameters()` #' returns multiple columns with parameter components, like in #' [model_parameters.lavaan()]. Note that the regular expression pattern #' should match the parameter names as they are stored in the returned data #' frame, which can be different from how they are printed. Inspect the #' `$Parameter` column of the parameters table to get the exact parameter #' names. #' @param parameters Deprecated, alias for `keep`. #' @param verbose Toggle warnings and messages. #' @param ... Arguments passed to or from other methods. For instance, when #' `bootstrap = TRUE`, arguments like `type` or `parallel` are #' passed down to `bootstrap_model()`, and arguments like `ci_method` #' are passed down to [bayestestR::describe_posterior()]. #' #' @seealso [insight::standardize_names()] to #' rename columns into a consistent, standardized naming scheme. #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @examples #' library(parameters) #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' #' model_parameters(model) #' #' # bootstrapped parameters #' model_parameters(model, bootstrap = TRUE) #' #' # standardized parameters #' model_parameters(model, standardize = "refit") #' #' # different p-value style in output #' model_parameters(model, p_digits = 5) #' model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") #' \donttest{ #' # logistic regression model #' model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") #' model_parameters(model) #' #' # show odds ratio / exponentiated coefficients #' model_parameters(model, exponentiate = TRUE) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.default <- function(model, ci = .95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { out <- tryCatch( { .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, summary = summary, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) }, error = function(e) { fail <- NA attr(fail, "error") <- gsub(" ", " ", gsub("\\n", "", e$message), fixed = TRUE) fail } ) if (length(out) == 1 && isTRUE(is.na(out))) { msg <- insight::format_message( paste0("Sorry, `model_parameters()` failed with the following error (possible class '", class(model)[1], "' not supported):\n"), attr(out, "error") ) stop(msg, call. = FALSE) } else if (is.null(out)) { stop(paste0("Sorry, `model_parameters()` does currently not work for objects of class '", class(model)[1], "'."), call. = FALSE) } attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } .model_parameters_generic <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, merge_by = "Parameter", standardize = NULL, exponentiate = FALSE, effects = "fixed", component = "conditional", robust = FALSE, ci_method = NULL, p_adjust = NULL, summary = FALSE, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, df_method = ci_method, ...) { ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } # Processing if (bootstrap) { # set default method for bootstrapped CI if (is.null(ci_method) || missing(ci_method)) { ci_method <- "quantile" } params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ci_method = ci_method, ... ) } else { # set default method for CI if (is.null(ci_method) || missing(ci_method)) { ci_method <- "wald" } params <- .extract_parameters_generic( model, ci = ci, component = component, merge_by = merge_by, standardize = standardize, effects = effects, robust = robust, ci_method = ci_method, p_adjust = p_adjust, keep_parameters = keep_parameters, drop_parameters = drop_parameters, verbose = verbose, ... ) } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, bootstrap, iterations, ci_method = ci_method, p_adjust = p_adjust, robust = robust, summary = summary, verbose = verbose, ... ) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #################### .glm ---------------------- #' @rdname model_parameters.default #' @export model_parameters.glm <- function(model, ci = .95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, summary = FALSE, verbose = TRUE, df_method = ci_method, ...) { # set default if (is.null(ci_method)) { ci_method <- ifelse(isTRUE(bootstrap), "quantile", "profile") } ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } if (insight::n_obs(model) > 1e4 && identical(ci_method, "profile")) { message(insight::format_message("Profiled confidence intervals may take longer time to compute. Use 'ci_method=\"wald\"' for faster computation of CIs.")) } out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, summary = summary, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } parameters/R/methods_glm.R0000644000175000017500000000057713766364050015420 0ustar nileshnilesh# classes: .glm #################### .glm #' @export standard_error.glm <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { standard_error_robust(model, ...) } else { se <- .get_se_from_summary(model) .data_frame( Parameter = names(se), SE = as.vector(se) ) } } parameters/R/methods_mhurdle.R0000644000175000017500000000552114133044516016262 0ustar nileshnilesh#' @export model_parameters.mhurdle <- function(model, ci = .95, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) params <- .model_parameters_generic( model, ci = ci, merge_by = c("Parameter", "Component"), exponentiate = exponentiate, effects = "fixed", component = component, p_adjust = p_adjust, verbose = verbose, ... ) params$Parameter <- gsub("^(h1|h2|h3)\\.(.*)", "\\2", params$Parameter) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) params } #' @export p_value.mhurdle <- function(model, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) params <- insight::get_parameters(model, component = "all") pvals <- data.frame( Parameter = rownames(s$coefficients), p = as.vector(s$coefficients[, 4]), stringsAsFactors = FALSE ) params <- merge(params, pvals, sort = FALSE) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params[c("Parameter", "p", "Component")] } #' @export ci.mhurdle <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, ...) } #' @export degrees_of_freedom.mhurdle <- function(model, method = NULL, ...) { .degrees_of_freedom_no_dfresid_method(model, method) } #' @export standard_error.mhurdle <- function(model, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) params <- insight::get_parameters(model, component = "all") se <- data.frame( Parameter = rownames(s$coefficients), SE = as.vector(s$coefficients[, 2]), stringsAsFactors = FALSE ) params <- merge(params, se, sort = FALSE) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params[c("Parameter", "SE", "Component")] } #' @export simulate_model.mhurdle <- function(model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component, effects = "fixed") class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } parameters/R/ci_ml1.R0000644000175000017500000000064414140570005014237 0ustar nileshnilesh#' @rdname p_value_ml1 #' @export ci_ml1 <- function(model, ci = .95, robust = FALSE, ...) { df_ml1 <- dof_ml1(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, effects = "fixed", component = "all", dof = df_ml1, method = "ml1", robust = robust, ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_robmixglm.R0000644000175000017500000000057714036353021016624 0ustar nileshnilesh #' @export standard_error.robmixglm <- function(model, ...) { se <- stats::na.omit(.get_se_from_summary(model)) .data_frame( Parameter = names(se), SE = as.vector(se) ) } #' @export p_value.robmixglm <- function(model, ...) { p <- stats::na.omit(.get_pval_from_summary(model)) .data_frame( Parameter = names(p), p = as.vector(p) ) } parameters/R/utils_model_parameters.R0000644000175000017500000002651414166656741017666 0ustar nileshnilesh#' @keywords internal .add_model_parameters_attributes <- function(params, model, ci, exponentiate = FALSE, bootstrap = FALSE, iterations = 1000, ci_method = NULL, p_adjust = NULL, robust = FALSE, summary = FALSE, verbose = TRUE, group_level = FALSE, wb_component = FALSE, ...) { # capture additional arguments dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) # model info info <- tryCatch( { suppressWarnings(insight::model_info(model, verbose = FALSE)) }, error = function(e) { NULL } ) ## TODO remove is.list() when insight 0.8.3 on CRAN if (is.null(info) || !is.list(info)) { info <- list(family = "unknown", link_function = "unknown") } if (!is.null(info) && insight::is_multivariate(model) && !"is_zero_inflated" %in% names(info)) { info <- info[[1]] } # add regular attributes if (isFALSE(dot.arguments$pretty_names)) { attr(params, "pretty_names") <- params$Parameter } else if (is.null(attr(params, "pretty_names", exact = TRUE))) { attr(params, "pretty_names") <- suppressWarnings(format_parameters(model, model_info = info)) } attr(params, "ci") <- ci attr(params, "ci_method") <- ci_method attr(params, "df_method") <- ci_method attr(params, "test_statistic") <- insight::find_statistic(model) attr(params, "verbose") <- verbose attr(params, "exponentiate") <- exponentiate attr(params, "ordinal_model") <- isTRUE(info$is_ordinal) | isTRUE(info$is_multinomial) attr(params, "linear_model") <- isTRUE(info$is_linear) attr(params, "mixed_model") <- isTRUE(info$is_mixed) attr(params, "n_obs") <- info$n_obs attr(params, "model_class") <- class(model) attr(params, "bootstrap") <- bootstrap attr(params, "iterations") <- iterations attr(params, "p_adjust") <- p_adjust attr(params, "robust_vcov") <- isTRUE(robust) attr(params, "ignore_group") <- isFALSE(group_level) attr(params, "ran_pars") <- isFALSE(group_level) attr(params, "show_summary") <- isTRUE(summary) # save if model is multivariate response model if (isTRUE(info$is_multivariate)) { attr(params, "multivariate_response") <- TRUE } # if we have a complex random-within-between model, don't show first title element if (isTRUE(wb_component) && !is.null(params$Component) && any(c("within", "between") %in% params$Component)) { attr(params, "no_caption") <- TRUE } # for summaries, add R2 if (isTRUE(summary)) { if (requireNamespace("performance", quietly = TRUE)) { rsq <- tryCatch( { suppressWarnings(performance::r2(model)) }, error = function(e) { NULL } ) attr(params, "r2") <- rsq } } # Models for which titles should be removed - # here we add exceptions for objects that should # not have a table headline if (inherits(model, c("emmGrid", "emm_list", "lm", "glm", "coxph", "bfsl"))) { attr(params, "title") <- "" } # weighted nobs weighted_nobs <- tryCatch( { w <- insight::get_weights(model, na_rm = TRUE, null_as_ones = TRUE) round(sum(w)) }, error = function(e) { NULL } ) attr(params, "weighted_nobs") <- weighted_nobs # model formula model_formula <- tryCatch( { .safe_deparse(insight::find_formula(model)$conditional) }, error = function(e) { NULL } ) attr(params, "model_formula") <- model_formula # column name for coefficients - for emm_list, we can have # multiple different names for the parameter column. for other # models, check whether we have coefficient, odds ratios, IRR etc. if (inherits(model, "emm_list")) { coef_col1 <- .find_coefficient_type(info, exponentiate, model[[1]]) coef_col2 <- .find_coefficient_type(info, exponentiate, model[[2]]) attr(params, "coefficient_name") <- coef_col1 attr(params, "coefficient_name2") <- coef_col2 } else { coef_col <- .find_coefficient_type(info, exponentiate, model) attr(params, "coefficient_name") <- coef_col attr(params, "zi_coefficient_name") <- ifelse(isTRUE(exponentiate), "Odds Ratio", "Log-Odds") } # special handling for meta analysis. we need additional # information about study weights if (inherits(model, c("rma", "rma.uni"))) { rma_data <- tryCatch( { insight::get_data(model, verbose = FALSE) }, error = function(e) { NULL } ) attr(params, "data") <- rma_data attr(params, "study_weights") <- 1 / model$vi } # special handling for meta analysis again, but these objects save the # inverse weighting information in a different column. if (inherits(model, c("meta_random", "meta_fixed", "meta_bma"))) { rma_data <- tryCatch( { insight::get_data(model, verbose = FALSE) }, error = function(e) { NULL } ) attr(params, "data") <- rma_data attr(params, "study_weights") <- 1 / params$SE^2 } # should coefficients be grouped? if ("groups" %in% names(dot.arguments)) { attr(params, "coef_groups") <- eval(dot.arguments[["groups"]]) } # now comes all the digits stuff... if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- 2 } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } if ("footer_digits" %in% names(dot.arguments)) { attr(params, "footer_digits") <- eval(dot.arguments[["footer_digits"]]) } else { attr(params, "footer_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- eval(dot.arguments[["s_value"]]) } # add CI, and reorder if (!"CI" %in% colnames(params) && length(ci) == 1) { params$CI <- ci ci_pos <- grep("CI_low", colnames(params)) if (length(ci_pos)) { if (length(ci_pos) > 1) { ci_pos <- ci_pos[1] } a <- attributes(params) params <- params[c(1:(ci_pos - 1), ncol(params), ci_pos:(ncol(params) - 1))] attributes(params) <- utils::modifyList(a, attributes(params)) } } row.names(params) <- NULL params } .find_coefficient_type <- function(info, exponentiate, model = NULL) { # column name for coefficients coef_col <- "Coefficient" if (!is.null(model) && inherits(model, "emmGrid")) { s <- summary(model) name <- attributes(s)$estName if (!is.null(name)) { coef_col <- switch(name, "prob" = "Probability", "odds.ratio" = "Odds Ratio", "emmean" = "Marginal Means", "rate" = "Estimated Counts", "ratio" = "Ratio", "Coefficient" ) } } else if (!is.null(info)) { if (!info$family == "unknown") { if (isTRUE(exponentiate)) { if ((info$is_binomial && info$is_logit) || info$is_ordinal || info$is_multinomial || info$is_categorical) { coef_col <- "Odds Ratio" } else if (info$is_binomial && !info$is_logit) { coef_col <- "Risk Ratio" } else if (info$is_count) { coef_col <- "IRR" } } else { if (info$is_binomial || info$is_ordinal || info$is_multinomial || info$is_categorical) { coef_col <- "Log-Odds" } else if (info$is_count) { coef_col <- "Log-Mean" } } } } coef_col } .all_coefficient_types <- function() { c("Odds Ratio", "Risk Ratio", "IRR", "Log-Odds", "Log-Mean", "Probability", "Marginal Means", "Estimated Counts", "Ratio") } #' @keywords internal .exponentiate_parameters <- function(params, model = NULL, exponentiate = TRUE) { if (!is.null(model) && insight::model_info(model, verbose = FALSE)$is_linear && identical(exponentiate, "nongaussian")) { return(params) } columns <- grepl(pattern = "^(Coefficient|Mean|Median|MAP|Std_Coefficient|CI_|Std_CI)", colnames(params)) if (any(columns)) { if (inherits(model, "mvord")) { rows <- params$Component != "correlation" params[rows, columns] <- exp(params[rows, columns]) if (all(c("Coefficient", "SE") %in% names(params))) { params$SE[rows] <- params$Coefficient[rows] * params$SE[rows] } } else { params[columns] <- exp(params[columns]) if (all(c("Coefficient", "SE") %in% names(params))) { params$SE <- params$Coefficient * params$SE } } } params } .add_pretty_names <- function(params, model) { attr(params, "model_class") <- class(model) cp <- insight::clean_parameters(model) clean_params <- cp[cp$Parameter %in% params$Parameter, ] attr(params, "cleaned_parameters") <- stats::setNames(clean_params$Cleaned_Parameter[match(params$Parameter, clean_params$Parameter)], params$Parameter) attr(params, "pretty_names") <- stats::setNames(clean_params$Cleaned_Parameter[match(params$Parameter, clean_params$Parameter)], params$Parameter) params } #' @keywords internal .add_anova_attributes <- function(params, model, ci, test = NULL, ...) { dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) attr(params, "ci") <- ci attr(params, "model_class") <- class(model) attr(params, "anova_type") <- .anova_type(model) if (inherits(model, "Anova.mlm") && !identical(test, "univariate")) { attr(params, "anova_test") <- model$test } # here we add exception for objects that should not have a table headline if (inherits(model, c("aov", "anova", "lm"))) { attr(params, "title") <- "" } if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- 2 } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- eval(dot.arguments[["s_value"]]) } params } .additional_arguments <- function(x, value, default) { args <- attributes(x)$additional_arguments if (length(args) > 0 && value %in% names(args)) { out <- args[[value]] } else { out <- attributes(x)[[value]] } if (is.null(out)) { out <- default } out } parameters/R/methods_rstanarm.R0000644000175000017500000001616314133474167016466 0ustar nileshnilesh#' Parameters from Bayesian Models #' #' Parameters from Bayesian models. #' #' @param model Bayesian model (including SEM from \pkg{blavaan}. May also be #' a data frame with posterior samples. #' @param ci Credible Interval (CI) level. Default to `0.95` (`95%`). See #' [bayestestR::ci()] for further details. #' @param group_level Logical, for multilevel models (i.e. models with random #' effects) and when `effects = "all"` or `effects = "random"`, #' include the parameters for each group level from random effects. If #' `group_level = FALSE` (the default), only information on SD and COR #' are shown. #' @param component Which type of parameters to return, such as parameters for the #' conditional model, the zero-inflated part of the model, the dispersion #' term, or other auxiliary parameters be returned? Applies to models with #' zero-inflated and/or dispersion formula, or if parameters such as `sigma` #' should be included. May be abbreviated. Note that the *conditional* #' component is also called *count* or *mean* component, depending on the #' model. There are three convenient shortcuts: `component = "all"` returns #' all possible parameters. If `component = "location"`, location parameters #' such as `conditional`, `zero_inflated`, or `smooth_terms`, are returned #' (everything that are fixed or random effects - depending on the `effects` #' argument - but no auxiliary parameters). For `component = "distributional"` #' (or `"auxiliary"`), components like `sigma`, `dispersion`, or `beta` #' (and other auxiliary parameters) are returned. #' #' @inheritParams model_parameters.default #' @inheritParams bayestestR::describe_posterior #' @inheritParams insight::get_parameters #' #' @seealso [insight::standardize_names()] to #' rename columns into a consistent, standardized naming scheme. #' #' @note When `standardize = "refit"`, columns `diagnostic`, #' `bf_prior` and `priors` refer to the *original* #' `model`. If `model` is a data frame, arguments `diagnostic`, #' `bf_prior` and `priors` are ignored. \cr \cr There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the #' \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @examples #' \dontrun{ #' library(parameters) #' if (require("rstanarm")) { #' model <- stan_glm( #' Sepal.Length ~ Petal.Length * Species, #' data = iris, iter = 500, refresh = 0 #' ) #' model_parameters(model) #' } #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.stanreg <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, effects = "fixed", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) if (effects != "fixed") { random_effect_levels <- which(params$Effects %in% "random" & grepl("^(?!Sigma\\[)(.*)", params$Parameter, perl = TRUE)) if (length(random_effect_levels) && isFALSE(group_level)) params <- params[-random_effect_levels, ] } params <- .add_pretty_names(params, model) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ... ) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_stan", "parameters_model", "see_parameters_model", class(params)) params } #' @export model_parameters.stanmvreg <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, effects = "fixed", standardize = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) params$Parameter <- gsub("^(.*)\\|(.*)", "\\2", params$Parameter) params <- .add_pretty_names(params, model) attr(params, "ci") <- ci attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.stanreg <- standard_error.brmsfit #' @export standard_error.mvstanreg <- standard_error.brmsfit #' @export p_value.stanreg <- p_value.BFBayesFactor parameters/R/dof.R0000644000175000017500000002431214160324505013645 0ustar nileshnilesh#' Degrees of Freedom (DoF) #' #' Estimate or extract degrees of freedom of models parameters. #' #' @param model A statistical model. #' @param method Can be `"analytical"` (default, DoFs are estimated based #' on the model type), `"residual"` in which case they are directly taken #' from the model if available (for Bayesian models, the goal (looking for #' help to make it happen) would be to refit the model as a frequentist one #' before extracting the DoFs), `"ml1"` (see [dof_ml1()]), `"betwithin"` #' (see [dof_betwithin()]), `"satterthwaite"` (see [`dof_satterthwaite()`]), #' `"kenward"` (see [`dof_kenward()`]) or `"any"`, which tries to extract DoF #' by any of those methods, whichever succeeds. See 'Details'. #' @param ... Currently not used. #' #' @details Methods for calculating degrees of freedom: #' \itemize{ #' \item `"analytical"` for models of class `lmerMod`, Kenward-Roger approximated degrees of freedoms are calculated, for other models, `n-k` (number of observations minus number of parameters). #' \item `"residual"` tries to extract residual degrees of freedom, and returns `Inf` if residual degrees of freedom could not be extracted. #' \item `"any"` first tries to extract residual degrees of freedom, and if these are not available, extracts analytical degrees of freedom. #' \item `"nokr"` same as `"analytical"`, but does not Kenward-Roger approximation for models of class `lmerMod`. Instead, always uses `n-k` to calculate df for any model. #' \item `"normal"` returns `Inf`. #' \item `"wald"` returns residual df for models with t-statistic, and `Inf` for all other models. #' \item `"kenward"` calls [`dof_kenward()`]. #' \item `"satterthwaite"` calls [`dof_satterthwaite()`]. #' \item `"ml1"` calls [`dof_ml1()`]. #' \item `"betwithin"` calls [`dof_betwithin()`]. #' } #' For models with z-statistic, the returned degrees of freedom for model parameters is `Inf` (unless `method = "ml1"` or `method = "betwithin"`), because there is only one distribution for the related test statistic. #' #' @note #' #' In many cases, `degrees_of_freedom()` returns the same as `df.residuals()`, #' or `n-k` (number of observations minus number of parameters). However, #' `degrees_of_freedom()` refers to the model's *parameters* degrees of freedom #' of the distribution for the related test statistic. Thus, for models with #' z-statistic, results from `degrees_of_freedom()` and `df.residuals()` differ. #' Furthermore, for other approximation methods like `"kenward"` or #' `"satterthwaite"`, each model parameter can have a different degree of #' freedom. #' #' @examples #' model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) #' dof(model) #' #' model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") #' dof(model) #' \dontrun{ #' if (require("lme4", quietly = TRUE)) { #' model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' dof(model) #' } #' #' if (require("rstanarm", quietly = TRUE)) { #' model <- stan_glm( #' Sepal.Length ~ Petal.Length * Species, #' data = iris, #' chains = 2, #' refresh = 0 #' ) #' dof(model) #' } #' } #' @export degrees_of_freedom <- function(model, ...) { UseMethod("degrees_of_freedom") } #' @rdname degrees_of_freedom #' @export degrees_of_freedom.default <- function(model, method = "analytical", ...) { if (is.null(method)) { method <- "wald" } method <- tolower(method) method <- match.arg(method, choices = c( "analytical", "any", "fit", "ml1", "betwithin", "satterthwaite", "kenward", "nokr", "wald", "kr", "profile", "boot", "uniroot", "residual", "normal", "likelihood" )) if (!.dof_method_ok(model, method, ...) || method %in% c("profile", "likelihood", "boot", "uniroot")) { method <- "any" } stat <- insight::find_statistic(model) # for z-statistic, always return Inf if (!is.null(stat) && stat == "z-statistic" && !(method %in% c("ml1", "betwithin"))) { if (method == "residual") { return(.degrees_of_freedom_residual(model, verbose = FALSE)) } else { return(Inf) } } # Chi2-distributions usually have 1 df if (!is.null(stat) && stat == "chi-squared statistic") { if (method == "residual") { return(.degrees_of_freedom_residual(model, verbose = FALSE)) } else { return(1) } } if (method == "any") { dof <- .degrees_of_freedom_residual(model, verbose = FALSE) if (is.null(dof) || all(is.infinite(dof)) || anyNA(dof)) { dof <- .degrees_of_freedom_analytical(model, kenward = FALSE) } } else if (method == "ml1") { dof <- dof_ml1(model) } else if (method == "wald") { dof <- .degrees_of_freedom_residual(model, verbose = FALSE) } else if (method == "normal") { dof <- Inf } else if (method == "satterthwaite") { dof <- dof_satterthwaite(model) } else if (method == "betwithin") { dof <- dof_betwithin(model) } else if (method %in% c("kenward", "kr")) { dof <- dof_kenward(model) } else if (method == "analytical") { dof <- .degrees_of_freedom_analytical(model, kenward = TRUE) } else if (method == "nokr") { dof <- .degrees_of_freedom_analytical(model, kenward = FALSE) } else { dof <- .degrees_of_freedom_residual(model) } if (!is.null(dof) && length(dof) > 0 && all(dof == 0)) { warning("Model has zero degrees of freedom!", call. = FALSE) } dof } #' @rdname degrees_of_freedom #' @export dof <- degrees_of_freedom # Analytical approach ------------------------------ #' @keywords internal .degrees_of_freedom_analytical <- function(model, kenward = TRUE) { nparam <- n_parameters(model) n <- insight::n_obs(model) if (is.null(n)) { n <- Inf } if (isTRUE(kenward) && inherits(model, "lmerMod")) { dof <- as.numeric(dof_kenward(model)) } else { dof <- rep(n - nparam, nparam) } dof } # Model approach (Residual df) ------------------------------ #' @keywords internal .degrees_of_freedom_residual <- function(model, verbose = TRUE) { if (.is_bayesian_model(model) && !inherits(model, c("bayesx", "blmerMod", "bglmerMod"))) { model <- bayestestR::bayesian_as_frequentist(model) } # 1st try dof <- try(stats::df.residual(model), silent = TRUE) # 2nd try if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) { junk <- utils::capture.output(dof = try(summary(model)$df[2], silent = TRUE)) } # 3rd try, nlme if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) { dof <- try(unname(model$fixDF$X), silent = TRUE) } # last try if (inherits(dof, "try-error") || is.null(dof) || all(is.na(dof))) { dof <- Inf if (verbose) { warning("Could not extract degrees of freedom.", call. = FALSE) } } # special cases # if (inherits(model, "gam")) { # dof <- .dof_fit_gam(model, dof) # } dof } # residual df - for models with residual df, but no "df.residual()" method -------------- #' @keywords internal .degrees_of_freedom_no_dfresid_method <- function(model, method = NULL) { if (identical(method, "normal")) { return(Inf) } else if (!is.null(method) && method %in% c("ml1", "satterthwaite", "betwithin")) { degrees_of_freedom.default(model, method = method) } else { .degrees_of_freedom_analytical(model, kenward = FALSE) } } # helper -------------- .dof_fit_gam <- function(model, dof) { params <- insight::find_parameters(model) if (!is.null(params$conditional)) { dof <- rep(dof, length(params$conditional)) } if (!is.null(params$smooth_terms)) { s <- summary(model) dof <- c(dof, s$s.table[, "Ref.df"]) } dof } # Helper, check args ------------------------------ .dof_method_ok <- function(model, method, type = "df_method", verbose = TRUE, ...) { if (is.null(method)) { return(TRUE) } method <- tolower(method) if (inherits(model, c("polr", "glm", "svyglm"))) { if (method %in% c( "analytical", "any", "fit", "profile", "residual", "wald", "nokr", "likelihood", "normal" )) { return(TRUE) } else { if (verbose) { warning(insight::format_message(sprintf("'%s' must be one of 'wald', 'residual' or 'profile'. Using 'wald' now.", type)), call. = FALSE) } return(FALSE) } } info <- insight::model_info(model, verbose = FALSE) if (!is.null(info) && isFALSE(info$is_mixed) && method == "boot") { if (verbose) { warning(insight::format_message(sprintf("'%s=boot' only works for mixed models of class 'merMod'. To bootstrap this model, use `bootstrap=TRUE, ci_method=\"bcai\"`.", type)), call. = FALSE) } return(TRUE) } if (is.null(info) || !info$is_mixed) { if (!(method %in% c("analytical", "any", "fit", "betwithin", "nokr", "wald", "ml1", "profile", "boot", "uniroot", "residual", "normal"))) { if (verbose) { warning(insight::format_message(sprintf("'%s' must be one of 'residual', 'wald', normal', 'profile', 'boot', 'uniroot', 'betwithin' or 'ml1'. Using 'wald' now.", type)), call. = FALSE) } return(FALSE) } return(TRUE) } if (!(method %in% c("analytical", "any", "fit", "satterthwaite", "betwithin", "kenward", "kr", "nokr", "wald", "ml1", "profile", "boot", "uniroot", "residual", "normal"))) { if (verbose) { warning(insight::format_message(sprintf("'%s' must be one of 'residual', 'wald', 'normal', 'profile', 'boot', 'uniroot', 'kenward', 'satterthwaite', 'betwithin' or 'ml1'. Using 'wald' now.", type)), call. = FALSE) } return(FALSE) } if (!info$is_linear && method %in% c("satterthwaite", "kenward", "kr")) { if (verbose) { warning(sprintf("'%s'-degrees of freedoms are only available for linear mixed models.", method), call. = FALSE) } return(FALSE) } return(TRUE) } # helper .is_bayesian_model <- function(x) { inherits(x, c( "brmsfit", "stanfit", "MCMCglmm", "stanreg", "stanmvreg", "bmerMod", "BFBayesFactor", "bamlss", "bayesx", "mcmc", "bcplm", "bayesQR", "BGGM", "meta_random", "meta_fixed", "meta_bma", "blavaan", "blrm" )) } parameters/R/methods_mlm.R0000644000175000017500000001174014131014352015400 0ustar nileshnilesh# classes: .mlm #################### .mlm #' Parameters from multinomial or cumulative link models #' #' Parameters from multinomial or cumulative link models #' #' @param model A model with multinomial or categorical response value. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @details Multinomial or cumulative link models, i.e. models where the #' response value (dependent variable) is categorical and has more than two #' levels, usually return coefficients for each response level. Hence, the #' output from `model_parameters()` will split the coefficient tables #' by the different levels of the model's response. #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("brglm2", quietly = TRUE)) { #' data("stemcell") #' model <- bracl( #' research ~ as.numeric(religion) + gender, #' weights = frequency, #' data = stemcell, #' type = "ML" #' ) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @inheritParams simulate_model #' @export model_parameters.mlm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Response"), standardize = standardize, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.mlm <- function(model, ...) { cs <- stats::coef(summary(model)) se <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), SE = params[, "Std. Error"], Response = gsub("^Response (.*)", "\\1", x) ) }) .remove_backticks_from_parameter_names(do.call(rbind, se)) } #' @export p_value.mlm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), p = params[, "Pr(>|t|)"], Response = gsub("^Response (.*)", "\\1", x) ) }) .remove_backticks_from_parameter_names(do.call(rbind, p)) } #' @export ci.mlm <- function(x, ci = .95, ...) { if (is.null(insight::find_weights(x))) { out <- lapply(ci, function(i) { .ci <- stats::confint(x, level = i, ...) rn <- rownames(.ci) .data_frame( Parameter = gsub("([^\\:]+)(\\:)(.*)", "\\3", rn), CI = i, CI_low = .ci[, 1], CI_high = .ci[, 2], Response = gsub("([^\\:]+)(\\:)(.*)", "\\1", rn) ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, out)) } else { out <- .data_frame(.ci_generic(x, ci = ci, ...), Response = insight::get_parameters(x)$Response) } out } #' @export simulate_model.mlm <- function(model, iterations = 1000, ...) { responses <- insight::find_response(model, combine = FALSE) out <- .simulate_model(model, iterations, component = "conditional", effects = "fixed") cn <- paste0(colnames(out), rep(responses, each = length(colnames(out)) / length(responses))) colnames(out) <- cn class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #' @export simulate_parameters.mlm <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) out$Response <- NA responses <- insight::find_response(model, combine = FALSE) for (i in responses) { out$Response[grepl(paste0(i, "$"), out$Parameter)] <- i out$Parameter <- gsub(paste0(i, "$"), "", out$Parameter) } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "object_class") <- class(model) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } parameters/R/methods_plm.R0000644000175000017500000000550214133043222015402 0ustar nileshnilesh# plm package: .plm, .pgmm, .pggls # plm --------------------------- #' @export degrees_of_freedom.plm <- function(model, method = "wald", ...) { if (identical(method, "normal")) { return(Inf) } else { model$df.residual } } #' @export standard_error.plm <- function(model, ...) { se <- stats::coef(summary(model)) .data_frame( Parameter = .remove_backticks_from_string(rownames(se)), SE = as.vector(se[, 2]) ) } #' @export p_value.plm <- p_value.default # pggls ------------------------ #' @export p_value.pggls <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } # pgmm -------------------- #' @export model_parameters.pgmm <- function(model, ci = .95, component = c("conditional", "all"), exponentiate = FALSE, robust = TRUE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { component <- match.arg(component) params <- .extract_parameters_generic( model, merge_by = c("Parameter", "Component"), ci = ci, component = component, robust = robust, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.pgmm <- function(model, component = c("conditional", "all"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component, ...) se <- sqrt(diag(insight::get_varcov(model, component = component, ...))) .data_frame( Parameter = params$Parameter, SE = as.vector(se) ) } #' @export ci.pgmm <- function(x, ci = .95, dof = Inf, method = NULL, robust = FALSE, component = "conditional", ...) { if (!is.null(method)) { method <- tolower(method) } else { method <- "wald" } .ci_generic(model = x, ci = ci, dof = dof, robust = robust, method = method, component = component) } parameters/R/cluster_performance.R0000644000175000017500000000555514131014351017140 0ustar nileshnilesh#' Performance of clustering models #' #' Compute performance indices for clustering solutions. #' #' @inheritParams model_parameters.kmeans #' #' @examples #' # kmeans #' model <- kmeans(iris[1:4], 3) #' cluster_performance(model) #' @export cluster_performance <- function(model, ...) { UseMethod("cluster_performance") } #' @rdname cluster_performance #' @export cluster_performance.kmeans <- function(model, ...) { out <- as.data.frame(model[c("totss", "betweenss", "tot.withinss")]) colnames(out) <- c("Sum_Squares_Total", "Sum_Squares_Between", "Sum_Squares_Within") out$R2 <- out$Sum_Squares_Between / out$Sum_Squares_Total row.names(out) <- NULL class(out) <- c("performance_model", class(out)) out } #' @rdname cluster_performance #' @examples #' # hclust #' data <- iris[1:4] #' model <- hclust(dist(data)) #' clusters <- cutree(model, 3) #' #' rez <- cluster_performance(model, data, clusters) #' rez #' @export cluster_performance.hclust <- function(model, data, clusters, ...) { if (is.null(data)) { stop("This function requires the data used to compute the clustering to be provided via 'data' as it is not accessible from the clustering object itself.") } if (is.null(clusters)) { stop("This function requires a vector of clusters assignments of same length as data to be passed, as it is not contained in the clustering object itself.") } params <- model_parameters(model, data = data, clusters = clusters, ...) cluster_performance(params) } #' @rdname cluster_performance #' @examples #' # DBSCAN #' if (require("dbscan", quietly = TRUE)) { #' model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) #' #' rez <- cluster_performance(model, iris[1:4]) #' rez #' } #' @export cluster_performance.dbscan <- function(model, data, ...) { if (is.null(data)) { stop("This function requires the data used to compute the clustering to be provided via 'data' as it is not accessible from the clustering object itself.") } params <- model_parameters(model, data = data, ...) cluster_performance(params) } # Base -------------------------------------------------------------------- #' @rdname cluster_performance #' @examples #' # Retrieve performance from parameters #' params <- model_parameters(kmeans(iris[1:4], 3)) #' cluster_performance(params) #' @export cluster_performance.parameters_clusters <- function(model, ...) { valid <- model$Cluster != 0 & model$Cluster != "0" # Valid clusters out <- data.frame( Sum_Squares_Total = attributes(model)$Sum_Squares_Total, Sum_Squares_Between = attributes(model)$Sum_Squares_Between, Sum_Squares_Within = sum(model$Sum_Squares[valid], na.rm = TRUE) ) out$R2 <- out$Sum_Squares_Between / out$Sum_Squares_Total class(out) <- c("performance_model", class(out)) out } parameters/R/print.parameters_model.R0000644000175000017500000003440214143162001017544 0ustar nileshnilesh#' @title Print model parameters #' @name print.parameters_model #' #' @description A `print()`-method for objects from [`model_parameters()`][model_parameters]. #' #' @param x,object An object returned by [`model_parameters()`][model_parameters]. #' @param split_components Logical, if `TRUE` (default), For models with #' multiple components (zero-inflation, smooth terms, ...), each component is #' printed in a separate table. If `FALSE`, model parameters are printed #' in a single table and a `Component` column is added to the output. #' @param select Character vector (or numeric index) of column names that should #' be printed. If `NULL` (default), all columns are printed. The shortcut #' `select = "minimal"` prints coefficient, confidence intervals and p-values, #' while `select = "short"` prints coefficient, standard errors and p-values. #' @param show_sigma Logical, if `TRUE`, adds information about the residual #' standard deviation. #' @param show_formula Logical, if `TRUE`, adds the model formula to the output. #' @param caption Table caption as string. If `NULL`, no table caption is printed. #' @param footer_digits Number of decimal places for values in the footer summary. #' @param groups Named list, can be used to group parameters in the printed output. #' List elements may either be character vectors that match the name of those #' parameters that belong to one group, or list elements can be row numbers #' of those parameter rows that should belong to one group. The names of the #' list elements will be used as group names, which will be inserted as "header #' row". A possible use case might be to emphasize focal predictors and control #' variables, see 'Examples'. Parameters will be re-ordered according to the #' order used in `groups`, while all non-matching parameters will be added #' to the end. #' @param column_width Width of table columns. Can be either `NULL`, a named #' numeric vector, or `"fixed"`. If `NULL`, the width for each table column is #' adjusted to the minimum required width. If a named numeric vector, value #' names are matched against column names, and for each match, the specified #' width is used. If `"fixed"`, and table is split into multiple components, #' columns across all table components are adjusted to have the same width. #' @param digits,ci_digits,p_digits Number of digits for rounding or #' significant figures. May also be `"signif"` to return significant #' figures or `"scientific"` to return scientific notation. Control the #' number of digits by adding the value as suffix, e.g. `digits = "scientific4"` #' to have scientific notation with 4 decimal places, or `digits = "signif5"` #' for 5 significant figures (see also [signif()]). #' @inheritParams insight::format_table #' #' @inheritSection format_parameters Interpretation of Interaction Terms #' @inheritSection model_parameters Labeling the Degrees of Freedom #' #' @details `summary()` is a convenient shortcut for #' `print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)`. #' #' @return Invisibly returns the original input object. #' #' @seealso There is a dedicated method to use inside rmarkdown files, #' [`print_md()`][print_md.parameters_model]. #' #' @examples #' \donttest{ #' library(parameters) #' if (require("glmmTMB", quietly = TRUE)) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' mp <- model_parameters(model) #' #' print(mp, pretty_names = FALSE) #' #' print(mp, split_components = FALSE) #' #' print(mp, select = c("Parameter", "Coefficient", "SE")) #' #' print(mp, select = "minimal") #' } #' #' #' # group parameters ------ #' #' data(iris) #' model <- lm( #' Sepal.Width ~ Sepal.Length + Species + Petal.Length, #' data = iris #' ) #' # don't select "Intercept" parameter #' mp <- model_parameters(model, parameters = "^(?!\\(Intercept)") #' groups <- list( #' "Focal Predictors" = c("Speciesversicolor", "Speciesvirginica"), #' "Controls" = c("Sepal.Length", "Petal.Length") #' ) #' print(mp, groups = groups) #' #' # or use row indices #' print(mp, groups = list( #' "Focal Predictors" = c(1, 4), #' "Controls" = c(2, 3) #' )) #' #' # only show coefficients, CI and p, #' # put non-matched parameters to the end #' #' data(mtcars) #' mtcars$cyl <- as.factor(mtcars$cyl) #' mtcars$gear <- as.factor(mtcars$gear) #' model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) #' #' # don't select "Intercept" parameter #' mp <- model_parameters(model, parameters = "^(?!\\(Intercept)") #' print(mp, groups = list( #' "Engine" = c("cyl6", "cyl8", "vs", "hp"), #' "Interactions" = c("gear4:vs", "gear5:vs") #' )) #' } #' @export print.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), ...) { # save original input orig_x <- x # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # table caption table_caption <- .print_caption(x, caption, format = "text") # main table formatted_table <- .print_core( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = "auto", ci_brackets = ci_brackets, format = "text", groups = groups, ... ) # if we have multiple components, we can align colum width across components here if (!is.null(column_width) && all(column_width == "fixed") && is.list(formatted_table)) { column_width <- .find_min_colwidth(formatted_table) } # footer footer <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula ) # get attributes verbose <- .additional_arguments(x, "verbose", TRUE) # print main table cat(insight::export_table( formatted_table, format = "text", caption = table_caption, footer = footer, width = column_width, ... )) # inform about CI and df approx. if (isTRUE(verbose)) { .print_footer_cimethod(x) } invisible(orig_x) } #' @rdname print.parameters_model #' @export summary.parameters_model <- function(object, ...) { print( x = object, select = "minimal", show_sigma = TRUE, show_formula = TRUE, ... ) } #' @export print.parameters_simulate <- print.parameters_model #' @export print.parameters_brms_meta <- print.parameters_model # Random effects ------------------ #' @export print.parameters_random <- function(x, digits = 2, ...) { .print_random_parameters(x, digits = digits) invisible(x) } # Stan models ------------------ #' @export print.parameters_stan <- print.parameters_model #' @export summary.parameters_stan <- function(object, ...) { print(x = object, select = "minimal", show_formula = TRUE, ...) } # helper -------------------- .print_core <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, digits = 2, ci_digits = 2, p_digits = 3, zap_small = FALSE, ci_width = "auto", ci_brackets = TRUE, format = "text", group = NULL, ...) { format( x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, format = format, group = group, ... ) } .print_footer <- function(x, digits = 3, show_sigma = FALSE, show_formula = FALSE, format = "text") { # get attributes sigma <- attributes(x)$sigma show_summary <- isTRUE(attributes(x)$show_summary) verbose <- .additional_arguments(x, "verbose", TRUE) # override defaults. if argument "summary" is called in "model_parameters()", # this overrides the defaults... show_sigma <- ifelse(show_summary, TRUE, show_sigma) show_formula <- ifelse(show_summary, TRUE, show_formula) show_r2 <- .additional_arguments(x, "show_summary", FALSE) # set defaults, if necessary if (is.null(sigma)) { show_sigma <- FALSE } .format_footer( x, digits = digits, verbose = verbose, show_sigma = show_sigma, show_formula = show_formula, show_r2 = show_r2, format = format ) } .print_caption <- function(x, caption = NULL, format = "text") { no_caption <- attributes(x)$no_caption if (isTRUE(no_caption)) { return(NULL) } title_attribute <- attributes(x)$title[1] # check effects and component parts if (!is.null(x$Effects) && all(x$Effects == "random")) { eff_name <- "Random" } else { eff_name <- "Fixed" } if (!is.null(x$Component) && all(x$Component == "zero_inflated")) { zero_inflated <- " (Zero-Inflated Model)" } else { zero_inflated <- "" } if (identical(format, "html") && is.null(caption)) { if (isTRUE(attributes(x)$is_ggeffects)) { table_caption <- title_attribute } else { table_caption <- "Model Summary" } } else if (isTRUE(attributes(x)$ordinal_model)) { table_caption <- "" } else if (!is.null(title_attribute) && is.null(caption)) { if (length(title_attribute) == 1 && title_attribute == "") { table_caption <- NULL } else { table_caption <- title_attribute } } else if (!is.null(caption) && caption != "") { table_caption <- caption } else if (!is.null(caption) && caption == "") { table_caption <- NULL } else if (identical(format, "text")) { table_caption <- c(paste0("# ", eff_name, " Effects", zero_inflated), "blue") } else { table_caption <- paste0(eff_name, " Effects", zero_inflated) } table_caption } #' @keywords internal .print_random_parameters <- function(random_params, digits = 2) { insight::print_color("# Random Effects\n\n", "blue") # create SD random_params$SD <- NA var_components <- random_params$Description %in% c("Within-Group Variance", "Between-Group Variance") random_params$SD[var_components] <- sqrt(random_params$Value[var_components]) # format values random_params$Value <- format(sprintf("%g", round(random_params$Value, digits = digits)), justify = "right") random_params$SD[var_components] <- format(sprintf("(%g)", round(random_params$SD[var_components], digits = digits)), justify = "right") # create summary-information for each component random_params$Line <- "" random_params$Term[is.na(random_params$Term)] <- "" random_params$SD[is.na(random_params$SD)] <- "" non_empty <- random_params$Term != "" & random_params$Type != "" random_params$Line[non_empty] <- sprintf("%s (%s)", random_params$Type[non_empty], random_params$Term[non_empty]) non_empty <- random_params$Term != "" & random_params$Type == "" random_params$Line[non_empty] <- sprintf("%s", random_params$Term[non_empty]) # final fix, indentions random_params$Line <- sprintf(" %s", format(random_params$Line)) max_len <- max(nchar(random_params$Line)) + 2 out <- split(random_params, factor(random_params$Description, levels = unique(random_params$Description))) for (i in out) { if ("Within-Group Variance" %in% i$Description) { insight::print_color(format("Within-Group Variance", width = max_len), color = "blue") cat(sprintf("%s %s\n", i$Value, i$SD)) } else if ("Between-Group Variance" %in% i$Description) { insight::print_color("Between-Group Variance\n", "blue") for (j in 1:nrow(i)) { cat(sprintf("%s %s %s\n", i$Line[j], i$Value[j], i$SD[j])) } } else if ("Correlations" %in% i$Description) { insight::print_color("Correlations\n", "blue") for (j in 1:nrow(i)) { cat(sprintf("%s %s\n", i$Line[j], i$Value[j])) } } else if ("N" %in% i$Description) { insight::print_color("N (groups per factor)\n", "blue") for (j in 1:nrow(i)) { cat(sprintf(" %s%s\n", format(i$Term[j], width = max_len - 2), i$Value[j])) } } else if ("Observations" %in% i$Description) { insight::print_color(format("Observations", width = max_len), color = "blue") cat(sprintf("%s\n", i$Value)) } } } .find_min_colwidth <- function(formatted_table) { shared_cols <- unique(unlist(lapply(formatted_table, colnames))) col_width <- rep(NA, length(shared_cols)) for (i in 1:length(shared_cols)) { col_width[i] <- max(unlist(lapply(formatted_table, function(j) { col <- j[[shared_cols[i]]] if (!is.null(col)) { max(nchar(col)) } else { NA } }))) } stats::na.omit(stats::setNames(col_width, shared_cols)) } parameters/R/select_parameters.stanreg.R0000644000175000017500000000440714077615700020253 0ustar nileshnilesh#' @param method The method used in the variable selection. Can be `NULL` #' (default), `"forward"` or `"L1"`. See `projpred::varsel`. #' @param cross_validation Select with cross-validation. #' @rdname select_parameters #' @export select_parameters.stanreg <- function(model, method = NULL, cross_validation = FALSE, ...) { insight::check_if_installed("projpred") if (cross_validation) { message("Cross-validating best parameters...") junk <- utils::capture.output(selection <- projpred::cv_varsel(model, method = method, ...)) } else { selection <- projpred::varsel(model, method = method, ...) } # Visualise # varsel_plot(selection, stats = c('elpd', 'rmse'), deltas = TRUE) # Extract parameters projection <- projpred::project(selection, nv = projpred::suggest_size(selection), ...) parameters <- row.names(projection$beta) # Reconstruct formula formula <- .reconstruct_formula(parameters, model) # Update model junk <- utils::capture.output(best <- suppressWarnings(stats::update(model, formula = formula, ...))) best } #' @export select_parameters.brmsfit <- select_parameters.stanreg #' @keywords internal .reconstruct_formula <- function(parameters, model) { # # Clean # if (utils::tail(parameters, 1) == "sigma") { # parameters <- parameters[1:length(parameters) - 1] # } # if (parameters[1] == "(Intercept)") { # parameters <- parameters[2:length(parameters)] # } # # # Detect interactions # interactions <- parameters[grepl(":", parameters)] # if (length(interactions) > 0) { # for (interaction in interactions) { # terms <- unlist(strsplit(interaction, ":", fixed = TRUE)) # if (length(terms) == 2) { # if (all(terms %in% parameters)) { # # replace interactions components by interactions # parameters <- parameters[!parameters %in% c(terms, interaction)] # parameters <- c(parameters, paste0(terms, collapse = " * ")) # } # } # } # } formula <- paste(parameters, collapse = " + ") formula <- paste(insight::find_response(model), "~", formula) formula } parameters/R/methods_mixmod.R0000644000175000017500000000446614142675703016136 0ustar nileshnilesh #' @export model_parameters.MixMod <- model_parameters.glmmTMB #' @export ci.MixMod <- function(x, ci = .95, component = c("all", "conditional", "zi", "zero_inflated"), robust = FALSE, verbose = TRUE, ...) { component <- match.arg(component) if (is.null(.check_component(x, component, verbose = verbose))) { return(NULL) } .ci_generic( model = x, ci = ci, dof = Inf, component = component, robust = robust ) } #' @rdname standard_error #' @export standard_error.MixMod <- function(model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), robust = FALSE, verbose = TRUE, ...) { component <- match.arg(component) effects <- match.arg(effects) if (effects == "random") { insight::check_if_installed("lme4") rand.se <- lme4::ranef(model, post_vars = TRUE) vars.m <- attr(rand.se, "post_vars") all_names <- attributes(rand.se)$dimnames if (dim(vars.m[[1]])[1] == 1) { rand.se <- sqrt(unlist(vars.m)) } else { rand.se <- do.call( rbind, lapply(vars.m, function(.x) t(as.data.frame(sqrt(diag(.x))))) ) rownames(rand.se) <- all_names[[1]] colnames(rand.se) <- all_names[[2]] rand.se <- list(rand.se) names(rand.se) <- insight::find_random(model, flatten = TRUE) } rand.se } else { if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } vc <- insight::get_varcov(model, effects = "fixed", component = "all", robust = robust) se <- sqrt(diag(vc)) x <- .data_frame( Parameter = names(se), SE = as.vector(se), Component = "conditional" ) zi_parms <- grepl("^zi_", x$Parameter) if (any(zi_parms)) { x$Component[zi_parms] <- "zero_inflated" x$Parameter[zi_parms] <- gsub("^zi_(.*)", "\\1", x$Parameter[zi_parms]) } .filter_component(x, component) } } #' @export simulate_model.MixMod <- simulate_model.glmmTMB parameters/R/methods_mixed.R0000644000175000017500000000010014111757432015720 0ustar nileshnilesh#' @export model_parameters.mixed <- model_parameters.glmmTMB parameters/R/methods_bayesQR.R0000644000175000017500000000305614104713406016170 0ustar nileshnilesh#' @export model_parameters.bayesQR <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "ci") <- ci attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export p_value.bayesQR <- p_value.BFBayesFactor parameters/R/methods_wrs2.R0000644000175000017500000002363414106662225015527 0ustar nileshnilesh#' Parameters from robust statistical objects in `WRS2` #' #' @param model Object from `WRS2` package. #' @param ... Arguments passed to or from other methods. #' @inheritParams model_parameters.default #' #' @examples #' if (require("WRS2") && packageVersion("WRS2") >= "1.1.3") { #' model <- t1way(libido ~ dose, data = viagra) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export # anova ---------------------- model_parameters.t1way <- function(model, keep = NULL, verbose = TRUE, ...) { parameters <- .extract_wrs2_t1way(model) parameters <- .add_htest_parameters_attributes(parameters, model, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_t1way <- function(model) { fcall <- .safe_deparse(model$call) # effect sizes are by default contained for `t1way` but not `rmanova` if (grepl("^(t1way|WRS2::t1way)", fcall)) { data.frame( "F" = model$test, "df" = model$df1, "df_error" = model$df2, "p" = model$p.value, "Method" = "A heteroscedastic one-way ANOVA for trimmed means", "Estimate" = model$effsize, "CI" = 1 - model$alpha, "CI_low" = model$effsize_ci[1], "CI_high" = model$effsize_ci[2], "Effectsize" = "Explanatory measure of effect size", stringsAsFactors = FALSE ) } else if (grepl("^(rmanova|WRS2::rmanova)", fcall)) { data.frame( "F" = model$test, "df" = model$df1, "df_error" = model$df2, "p" = model$p.value, "Method" = "A heteroscedastic one-way repeated measures ANOVA for trimmed means", stringsAsFactors = FALSE ) } } #' @export model_parameters.med1way <- function(model, verbose = TRUE, ...) { parameters <- .extract_wrs2_med1way(model) parameters <- .add_htest_parameters_attributes(parameters, model, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_med1way <- function(model) { data.frame( "F" = model$test, "Critical value" = model$crit.val, "p" = model$p.value, "Method" = "Heteroscedastic one-way ANOVA for medians", stringsAsFactors = FALSE ) } #' @export model_parameters.dep.effect <- function(model, keep = NULL, verbose = TRUE, ...) { parameters <- .extract_wrs2_dep.effect(model, keep = keep) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_dep.effect <- function(model, keep = NULL, ...) { out <- as.data.frame(model) out$Parameter <- c(attributes(out)$row.names) # effectsize descriptions out$Effectsize <- c( "Algina-Keselman-Penfield robust standardized difference", # AKP "Quantile shift based on the median of the distribution of difference scores", # QS (median) "Quantile shift based on the trimmed mean of the distribution of X-Y", # QStr "P(X= "3.6.0") { #' # iterations should be higher for real analyses #' x <- n_clusters_hclust(iris[1:4], iterations = 50, ci = 0.90) #' x #' head(as.data.frame(x), n = 10) # Print 10 first rows #' plot(x) #' } #' } #' @export n_clusters_hclust <- function(x, standardize = TRUE, include_factors = FALSE, distance_method = "correlation", hclust_method = "average", ci = 0.95, iterations = 100, ...) { insight::check_if_installed("pvclust") x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) # pvclust works on columns, so we need to pivot the dataframe model <- pvclust::pvclust(datawizard::data_transpose(x), method.hclust = hclust_method, method.dist = distance_method, nboot = iterations, quiet = TRUE) out <- .model_parameters_pvclust_clusters(model, x, ci) attr(out, "model") <- model attr(out, "ci") <- ci attr(out, "n") <- length(unique(out$Cluster)[unique(out$Cluster) != 0]) class(out) <- c("n_clusters_hclust", class(out)) out } # Utils ------------------------------------------------------------------- #' @keywords internal .n_clusters_factoextra <- function(x, method = "wss", standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ...) { x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) insight::check_if_installed("factoextra") factoextra::fviz_nbclust(x, clustering_function, method = method, k.max = n_max, verbose = FALSE)$data } # Printing ---------------------------------------------------------------- #' @export print.n_clusters_elbow <- function(x, ...) { insight::print_color(paste0("The Elbow method, that aims at minimizing the total intra-cluster variation (i.e., the total within-cluster sum of square), suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") invisible(x) } #' @export print.n_clusters_gap <- function(x, ...) { insight::print_color(paste0("The Gap method, that compares the total intracluster variation of k clusters with their expected values under null reference distribution of the data, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") invisible(x) } #' @export print.n_clusters_silhouette <- function(x, ...) { insight::print_color(paste0("The Silhouette method, based on the average quality of clustering, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") invisible(x) } #' @export print.n_clusters_dbscan <- function(x, ...) { insight::print_color(paste0("The DBSCAN method, based on the total clusters sum of squares, suggests that the optimal eps = ", attributes(x)$eps, " (with min. cluster size set to ", attributes(x)$min_size, "), which corresponds to ", attributes(x)$n, " clusters."), "green") invisible(x) } #' @export print.n_clusters_hclust <- function(x, ...) { insight::print_color(paste0("The bootstrap analysis of hierachical clustering highlighted ", attributes(x)$n, " significant clusters."), "green") invisible(x) } # Plotting ---------------------------------------------------------------- #' @export visualisation_recipe.n_clusters_elbow <- function(x, ...) { data <- as.data.frame(x) data$Gradient <- datawizard::change_scale(attributes(x)$gradient, c(min(data$WSS, max(data$WSS)))) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = data, aes = list(x = "n_Clusters", y = "WSS", group = 1), size = 1 ) layers[["l2"]] <- list( geom = "point", data = data, aes = list(x = "n_Clusters", y = "WSS") ) layers[["l3"]] <- list( geom = "line", data = data, aes = list(x = "n_Clusters", y = "Gradient", group = 1), size = 0.5, color = "red", linetype = "dashed" ) layers[["l4"]] <- list( geom = "vline", data = data, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Total Within-Clusters Sum of Squares", title = "Elbow Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export visualisation_recipe.n_clusters_gap <- function(x, ...) { data <- as.data.frame(x) data$ymin <- attributes(x)$ymin data$ymax <- attributes(x)$ymax layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = data, aes = list(x = "n_Clusters", y = "Gap", group = 1) ) layers[["l2"]] <- list( geom = "pointrange", data = data, aes = list(x = "n_Clusters", y = "Gap", ymin = "ymin", ymax = "ymax") ) layers[["l4"]] <- list( geom = "vline", data = data, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Gap statistic", title = "Gap Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export visualisation_recipe.n_clusters_silhouette <- function(x, ...) { data <- as.data.frame(x) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = data, aes = list(x = "n_Clusters", y = "Silhouette", group = 1) ) layers[["l2"]] <- list( geom = "point", data = data, aes = list(x = "n_Clusters", y = "Silhouette") ) layers[["l4"]] <- list( geom = "vline", data = data, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Average Silhouette Width", title = "Silhouette Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export visualisation_recipe.n_clusters_dbscan <- function(x, ...) { data <- as.data.frame(x) layers <- list() # Layers ----------------------- if ("gradient" %in% names(attributes(x))) { data$gradient <- datawizard::change_scale(attributes(x)$gradient, c(min(data$eps), max(data$eps))) layers[["l1"]] <- list( geom = "line", data = data, aes = list(x = "n_Obs", y = "eps"), size = 1 ) layers[["l2"]] <- list( geom = "line", data = data, aes = list(x = "n_Obs", y = "gradient"), color = "red", linetype = "dashed" ) layers[["l3"]] <- list( geom = "hline", data = data, yintercept = attributes(x)$eps, linetype = "dotted" ) layers[["l4"]] <- list( geom = "labs", x = "Observations", y = paste0("EPS Value (min. size = ", attributes(x)$min_size, ")"), title = "DBSCAN Method" ) } else { data$y <- datawizard::change_scale(data$total_SS, c(min(data$n_Clusters), max(data$n_Clusters))) layers[["l1"]] <- list( geom = "line", data = data, aes = list(x = "eps", y = "n_Clusters"), size = 1 ) layers[["l2"]] <- list( geom = "line", data = data, aes = list(x = "eps", y = "y"), color = "red", linetype = "dashed" ) layers[["l3"]] <- list( geom = "vline", data = data, xintercept = attributes(x)$eps, linetype = "dotted" ) layers[["l4"]] <- list( geom = "labs", x = paste0("EPS Value (min. size = ", attributes(x)$min_size, ")"), y = paste0("Number of CLusters"), title = "DBSCAN Method" ) } # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export plot.n_clusters_elbow <- function(x, ...) { graphics::plot(visualisation_recipe(x, ...)) } #' @export plot.n_clusters_gap <- plot.n_clusters_elbow #' @export plot.n_clusters_silhouette <- plot.n_clusters_elbow #' @export plot.n_clusters_dbscan <- plot.n_clusters_elbow #' @export plot.n_clusters_hclust <- function(x, ...) { insight::check_if_installed("pvclust") graphics::plot(attributes(x)$model) pvclust::pvrect(attributes(x)$model, alpha = attributes(x)$ci, pv = "si") } parameters/R/methods_model_fit.R0000644000175000017500000000364514131305757016576 0ustar nileshnilesh## tidymodels (.model_fit) # model parameters --------------------- #' @export model_parameters.model_fit <- function(model, ci = .95, effects = "fixed", component = "conditional", ci_method = "profile", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { model_parameters( model$fit, ci = ci, effects = effects, component = component, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, verbose = verbose, ... ) } # ci ------------------ #' @export ci.model_fit <- function(x, ci = .95, method = NULL, ...) { ci(x$fit, ci = ci, method = method, ...) } # standard error ------------------ #' @export standard_error.model_fit <- function(model, ...) { standard_error(model$fit, ...) } # degrees of freedom ------------------ #' @export degrees_of_freedom.model_fit <- function(model, ...) { degrees_of_freedom(model$fit, ...) } # p values ------------------ #' @export p_value.model_fit <- function(model, ...) { p_value(model$fit, ...) } # simulate model ------------------ #' @export simulate_model.model_fit <- function(model, iterations = 1000, ...) { simulate_model(model$fit, iterations = iterations, ...) } parameters/R/select_parameters.R0000644000175000017500000001003114077615700016577 0ustar nileshnilesh#' Automated selection of model parameters #' #' This function performs an automated selection of the 'best' parameters, #' updating and returning the "best" model. #' #' @param model A statistical model (of class `lm`, `glm`, #' `merMod`, `stanreg` or `brmsfit`). #' @param ... Arguments passed to or from other methods. #' #' @details #' \subsection{Classical lm and glm}{ #' For frequentist GLMs, `select_parameters()` performs an AIC-based #' stepwise selection. #' } #' #' \subsection{Mixed models}{ #' For mixed-effects models of class `merMod`, stepwise selection is #' based on [cAIC4::stepcAIC()]. This step function #' only searches the "best" model based on the random-effects structure, #' i.e. `select_parameters()` adds or excludes random-effects until #' the cAIC can't be improved further. #' } #' #' \subsection{Bayesian models}{ #' For Bayesian models, it uses the \pkg{projpred} package. #' } #' #' @examples #' model <- lm(mpg ~ ., data = mtcars) #' select_parameters(model) #' #' model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) #' select_parameters(model) #' \donttest{ #' # lme4 ------------------------------------------- #' if (require("lme4")) { #' model <- lmer( #' Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), #' data = iris #' ) #' select_parameters(model) #' } #' } #' #' \dontrun{ #' # rstanarm ------------------------------------------- #' if (require("rstanarm") && require("projpred")) { #' model <- stan_glm( #' mpg ~ ., #' data = mtcars, #' iter = 500, refresh = 0, verbose = FALSE #' ) #' select_parameters(model, cross_validation = TRUE) #' #' model <- stan_glm( #' mpg ~ cyl * disp * hp, #' data = mtcars, #' iter = 500, refresh = 0, verbose = FALSE #' ) #' select_parameters(model, cross_validation = FALSE) #' } #' } #' #' @return The model refitted with optimal number of parameters. #' @export select_parameters <- function(model, ...) { UseMethod("select_parameters") } #' @rdname select_parameters #' @inheritParams stats::step #' @export select_parameters.lm <- function(model, direction = "both", steps = 1000, k = 2, ...) { junk <- utils::capture.output(best <- stats::step(model, trace = 0, direction = direction, steps = steps, k = k, ... )) best } #' @rdname select_parameters #' @export select_parameters.merMod <- function(model, direction = "backward", steps = 1000, ...) { insight::check_if_installed("cAIC4") # Find slope and group candidates # data <- insight::get_data(model) # factors <- names(data[sapply(data, is.factor)]) # if(length(factors) == 0){ # factors <- NULL # } # nums <- names(data[sapply(data, is.numeric)]) # if(length(nums) == 0){ # nums <- NULL # } factors <- unique(c( insight::find_random(model, split_nested = FALSE, flatten = TRUE), insight::find_random(model, split_nested = TRUE, flatten = TRUE) )) factors <- gsub(":", "/", factors, fixed = TRUE) best <- suppressMessages(suppressWarnings(cAIC4::stepcAIC(model, # slopeCandidates = nums, groupCandidates = factors, direction = direction, steps = steps, allowUseAcross = TRUE )$finalModel)) # Using MuMIn's dredge(): works nicely BUT throws unnecessary warnings and requires to set global options for na.action even tho no NaNs. # The code is here: https://github.com/cran/MuMIn/blob/master/R/dredge.R Maybe it could be reimplemented? # insight::check_if_installed("MuMIn") # model <- lmer(Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris, na.action = na.fail) # summary(MuMIn::get.models(MuMIn::dredge(model), 1)[[1]]) best } parameters/R/parameters_type.R0000644000175000017500000003240114151115065016276 0ustar nileshnilesh#' Type of model parameters #' #' In a regression model, the parameters do not all have the meaning. For #' instance, the intercept has to be interpreted as theoretical outcome value #' under some conditions (when predictors are set to 0), whereas other #' coefficients are to be interpreted as amounts of change. Others, such as #' interactions, represent changes in another of the parameter. The #' `parameters_type` function attempts to retrieve information and meaning #' of parameters. It outputs a dataframe of information for each parameters, #' such as the `Type` (whether the parameter corresponds to a factor or a #' numeric predictor, or whether it is a (regular) interaction or a nested #' one), the `Link` (whether the parameter can be interpreted as a mean #' value, the slope of an association or a difference between two levels) and, #' in the case of interactions, which other parameters is impacted by which #' parameter. #' #' @param model A statistical model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' parameters_type(model) #' #' # Interactions #' model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) #' parameters_type(model) #' #' #' # Complex interactions #' data <- iris #' data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") #' model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) #' parameters_type(model) #' @return A data frame. #' @export parameters_type <- function(model, ...) { # Get info params <- data.frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), stringsAsFactors = FALSE ) # Special case if (inherits(model, "polr")) { params$Parameter <- gsub("Intercept: ", "", params$Parameter, fixed = TRUE) } # Special case if (inherits(model, "bracl")) { params$Parameter <- gsub("(.*):(.*)", "\\2", params$Parameter) } # Special case if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") params$Parameter <- gsub(pattern, "\\2", names(unlist(cf))) } else { params$Parameter <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) } } # Remove "as.factor()", "log()" etc. from parameter names but save original parameter before original_parameter <- params$Parameter params$Parameter <- .clean_parameter_names(params$Parameter, full = TRUE) ## TODO can we get rid of the count_ / zero_ prefix here? if (inherits(model, c("zeroinfl", "hurdle", "zerocount"))) { params$Parameter <- gsub("^(count_|zero_)", "", params$Parameter) } data <- insight::get_data(model, verbose = FALSE) if (is.null(data) || inherits(data, "ts") || nrow(data) == 0) { return(NULL) } # convert on-the-fly-factors back from numeric to factors data[] <- lapply(data, function(i) { if (isTRUE(attributes(i)$factor)) { as.factor(i) } else { i } }) reference <- .list_factors_numerics(data, model) # Get types main <- .parameters_type_table(names = params$Parameter, data, reference) secondary <- .parameters_type_table(names = main$Secondary_Parameter, data, reference) names(secondary) <- paste0("Secondary_", names(secondary)) names(secondary)[names(secondary) == "Secondary_Secondary_Parameter"] <- "Tertiary_Parameter" out <- cbind(params, main, secondary) # Deal with nested interactions for (i in unique(paste0(out[out$Type == "interaction", "Variable"], out[out$Type == "interaction", "Secondary_Variable"]))) { interac <- out[paste0(out$Variable, out$Secondary_Variable) == i, ] if (!all(interac$Term %in% out$Parameter)) { out[paste0(out$Variable, out$Secondary_Variable) == i, "Type"] <- "nested" } if (all(interac$Term %in% out$Parameter)) { interac_sec_term <- interac$Secondary_Term[!is.na(interac$Secondary_Term)] if (length(interac_sec_term) && !all(interac_sec_term %in% out$Parameter)) { out[paste0(out$Variable, out$Secondary_Variable) == i, "Type"] <- "simple" } } } for (i in unique(out$Secondary_Parameter)) { if (!is.na(i) && i %in% out$Parameter) { .param_type <- out[!is.na(out$Parameter) & out$Parameter == i, "Type"] .param_secondary_type <- out[!is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type"] if (length(.param_type) == length(.param_secondary_type) || length(.param_type) == 1) { out[!is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type"] <- .param_type } } } out$Parameter <- original_parameter out } #' @keywords internal .parameters_type_table <- function(names, data, reference) { out <- lapply(names, .parameters_type, data = data, reference = reference) out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) names(out) <- c("Type", "Link", "Term", "Variable", "Level", "Secondary_Parameter") out } #' @keywords internal .parameters_type <- function(name, data, reference) { if (grepl(":", name, fixed = TRUE)) { # Split var <- unlist(strsplit(name, ":", fixed = TRUE)) if (length(var) > 2) { var <- c(utils::tail(var, 1), paste0(utils::head(var, -1), collapse = ":")) } else { var <- rev(var) } # Check if any is factor types <- unlist(lapply(var, function(x, data, reference) .parameters_type_basic(x, data, reference)[1], data = data, reference = reference)) link <- ifelse(any("factor" %in% types), "Difference", "Association") # Get type main <- .parameters_type_basic(var[1], data, reference) return(c("interaction", link, main[3], main[4], main[5], var[2])) } else { .parameters_type_basic(name, data, reference) } } #' @keywords internal .parameters_type_basic <- function(name, data, reference, brackets = c("[", "]")) { if (is.na(name)) { return(c(NA, NA, NA, NA, NA, NA)) } # parameter type is determined here. for formatting / printing, # refer to ".format_parameter()". Make sure that pattern # processed here are not "cleaned" (i.e. removed) in # ".clean_parameter_names()" cleaned_name <- .clean_parameter_names(name, full = TRUE) cleaned_ordered_name <- gsub("(.*)((\\.|\\^).*)", "\\1", cleaned_name) # Intercept if (.in_intercepts(cleaned_name)) { return(c("intercept", "Mean", "(Intercept)", NA, NA, NA)) # Numeric } else if (cleaned_name %in% reference$numeric) { return(c("numeric", "Association", name, name, NA, NA)) # Ordered factors } else if (is.ordered(data[[cleaned_ordered_name]])) { fac <- reference$levels_parent[match(cleaned_name, reference$levels)] return(c( "ordered", "Association", name, fac, .format_ordered(gsub(fac, "", name, fixed = TRUE), brackets = brackets), NA )) # Factors } else if (cleaned_name %in% reference$levels) { fac <- reference$levels_parent[match(cleaned_name, reference$levels)] return(c( "factor", "Difference", name, fac, gsub(fac, "", name, fixed = TRUE), NA )) # Polynomials } else if (grepl("poly(", name, fixed = TRUE)) { if (grepl(", raw = TRUE", name, fixed = TRUE)) { name <- gsub(", raw = TRUE", "", name, fixed = TRUE) type <- "poly_raw" } else { type <- "poly" } var <- .poly_info(name, "name") degree <- .poly_info(name, "degree") return(c(type, "Association", name, var, degree, NA)) # Splines } else if (grepl("(bs|ns|psline|lspline|rcs|mSpline)\\(", name)) { type <- "spline" var <- gsub("(bs|ns|psline|lspline|rcs|mSpline)\\((.*)\\)(\\d)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } degree <- gsub("(bs|ns|psline|lspline|rcs|mSpline)\\((.*)\\)(\\d)", "\\3", name) return(c(type, "Association", name, var, degree, NA)) # log-transformation } else if (grepl("(log|logb|log1p|log2|log10)\\(", name)) { type <- "logarithm" var <- gsub("(log|logb|log1p|log2|log10)\\((.*)\\)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # exp-transformation } else if (grepl("(exp|expm1)\\(", name)) { type <- "exponentiation" var <- gsub("(exp|expm1)\\((.*)\\)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # sqrt-transformation } else if (grepl("sqrt\\(", name)) { type <- "squareroot" var <- gsub("sqrt\\((.*)\\)", "\\1", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # As Is } else if (grepl("^I\\(", name)) { type <- "asis" var <- gsub("^I\\((.*)\\)", "\\1", name) return(c(type, "Association", name, var, NA, NA)) # Smooth } else if (grepl("^s\\(", name)) { return(c("smooth", "Association", name, NA, NA, NA)) # Smooth } else if (grepl("^smooth_", name)) { return(c("smooth", "Association", gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name), NA, NA, NA)) } else { return(c("unknown", NA, NA, NA, NA, NA)) } } #' @keywords internal .poly_info <- function(x, what = "degree") { if (what == "degree") { subs <- "\\4" } else { subs <- "\\2" } p <- "(.*)poly\\((.*),\\s(.*)\\)(.*)" tryCatch( { trimws(sub(p, replacement = subs, x)) }, error = function(x) { 1 } ) } #' @keywords internal .list_factors_numerics <- function(data, model) { out <- list() # retrieve numerics .check_for_numerics <- function(x) { is.numeric(x) && !isTRUE(attributes(x)$factor) } out$numeric <- names(data[sapply(data, .check_for_numerics)]) # get contrast coding contrast_coding <- tryCatch( { model$contrasts }, error = function(e) { NULL } ) # if contrasts are given as matrix, find related contrast name if (!is.null(contrast_coding)) { contrast_coding <- lapply(contrast_coding, function(i) { if (is.array(i)) { cn <- colnames(i) if (is.null(cn)) { if (rowMeans(i)[1] == -1) { i <- "contr.helmert" } else { i <- "contr.sum" } } else if (cn[1] == ".L") { i <- "contr.poly" } else if (cn[1] == "2") { i <- "contr.treatment2" } else if (cn[1] == "1") { i <- "contr.SAS2" } else { i <- "contr.custom" attr(i, "column_names") <- cn } } i }) } # Ordered factors out$ordered <- names(data[sapply(data, is.ordered)]) # Factors out$factor <- names(data[sapply(data, is.factor) | sapply(data, is.character)]) out$levels <- NA out$levels_parent <- NA for (fac in out$factor) { if ((fac %in% out$ordered && is.null(contrast_coding[[fac]])) || (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% "contr.poly"))) { levels <- paste0(fac, c(".L", ".Q", ".C", paste0("^", 4:1000))[1:length(unique(data[[fac]]))]) } else if (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% c("contr.SAS2", "contr.sum", "contr.bayes", "contr.helmert"))) { levels <- paste0(fac, 1:length(unique(data[[fac]]))) } else if (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% c("contr.treatment2"))) { levels <- paste0(fac, 2:length(unique(data[[fac]]))) } else if (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% c("contr.SAS"))) { levels <- paste0(fac, rev(unique(data[[fac]]))) } else if (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% c("contr.custom"))) { levels <- paste0(fac, attributes(contrast_coding[[fac]])$column_names) } else { levels <- paste0(fac, unique(data[[fac]])) } out$levels_parent <- c(out$levels_parent, rep(fac, length(levels))) out$levels <- c(out$levels, levels) } out$levels <- out$levels[!is.na(out$levels)] out$levels_parent <- out$levels_parent[!is.na(out$levels_parent)] out } parameters/R/methods_gjrm.R0000644000175000017500000000351714044454046015570 0ustar nileshnilesh#' @export model_parameters.SemiParBIV <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, component = "all", merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export p_value.SemiParBIV <- function(model, ...) { s <- summary(model) s <- .compact_list(s[grepl("^tableP", names(s))]) params <- do.call(rbind, lapply(1:length(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[4] <- "p" rownames(params) <- NULL .remove_backticks_from_parameter_names(params[c("Parameter", "p", "Component")]) } #' @export standard_error.SemiParBIV <- function(model, ...) { s <- summary(model) s <- .compact_list(s[grepl("^tableP", names(s))]) params <- do.call(rbind, lapply(1:length(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[2] <- "SE" rownames(params) <- NULL .remove_backticks_from_parameter_names(params[c("Parameter", "SE", "Component")]) } parameters/R/methods_lmodel2.R0000644000175000017500000000331614073076665016175 0ustar nileshnilesh# lmodel2 #' @export model_parameters.lmodel2 <- function(model, ci = .95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { if (!missing(ci)) { if (isTRUE(verbose)) { message(insight::format_message("'lmodel2' models do not support other levels for confidence intervals than 0.95. Argument 'ci' is ignored.")) } ci <- .95 } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = c("Parameter", "Component"), standardize = NULL, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, verbose = verbose, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.lmodel2 <- function(model, ...) { NULL } #' @export p_value.lmodel2 <- function(model, ...) { res <- model$regression.results data.frame( Parameter = rep(c("Intercept", "Slope"), each = nrow(res)), p = utils::stack(res, select = 5)[[1]], Component = rep(res$Method, 2), stringsAsFactors = FALSE, row.names = NULL ) } #' @export ci.lmodel2 <- function(x, ...) { res <- x$confidence.intervals data.frame( Parameter = rep(c("Intercept", "Slope"), each = nrow(res)), CI = 95, CI_low = utils::stack(res, select = c(2, 4))[[1]], CI_high = utils::stack(res, select = c(3, 5))[[1]], Component = rep(res$Method, 2), stringsAsFactors = FALSE, row.names = NULL ) } parameters/R/methods_sem.R0000644000175000017500000000141514133000560015373 0ustar nileshnilesh #' @export model_parameters.sem <- model_parameters.default #' @export standard_error.sem <- function(model, ...) { if (!.is_semLme(model)) { return(NULL) } if (is.null(model$se)) { warning(insight::format_message("Model has no standard errors. Please fit model again with bootstrapped standard errors."), call. = FALSE) return(NULL) } .data_frame( Parameter = names(model$se), SE = unname(model$se) ) } #' @export p_value.sem <- function(model, ...) { if (!.is_semLme(model)) { return(NULL) } stat <- insight::get_statistic(model) if (is.null(stat)) { return(NULL) } .data_frame( Parameter = stat$Parameter, p = 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) ) } parameters/R/methods_fitdistr.R0000644000175000017500000000157514013143275016456 0ustar nileshnilesh#' @export model_parameters.fitdistr <- function(model, exponentiate = FALSE, verbose = TRUE, ...) { out <- data.frame( Parameter = names(model$estimate), Coefficient = as.vector(model$estimate), SE = as.vector(model$sd), stringsAsFactors = FALSE ) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { out <- .exponentiate_parameters(out, model, exponentiate) } class(out) <- c("parameters_model", "see_parameters_model", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.fitdistr <- function(model, ...) { data.frame( Parameter = names(model$estimate), SE = as.vector(model$sd), stringsAsFactors = FALSE ) } parameters/R/methods_dbscan.R0000644000175000017500000000246614135275207016066 0ustar nileshnilesh#' @rdname model_parameters.kmeans #' @inheritParams cluster_centers #' #' @examples #' \donttest{ #' # DBSCAN --------------------------- #' if (require("dbscan", quietly = TRUE)) { #' model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) #' #' rez <- model_parameters(model, iris[1:4]) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' #' # HDBSCAN #' model <- dbscan::hdbscan(iris[1:4], minPts = 10) #' model_parameters(model, iris[1:4]) #' } #' } #' @export model_parameters.dbscan <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) { stop("This function requires the data used to compute the clustering to be provided via 'data' as it is not accessible from the clustering object itself.") } if (is.null(clusters)) { clusters <- model$cluster } params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "dbscan" attr(params, "title") <- ifelse(inherits(model, "hdbscan"), "HDBSCAN", "DBSCAN") params } #' @export model_parameters.hdbscan <- model_parameters.dbscan parameters/R/methods_estimatr.R0000644000175000017500000000024413766351257016465 0ustar nileshnilesh #' @export ci.lm_robust <- ci.default #' @export standard_error.lm_robust <- standard_error.default #' @export p_value.lm_robust <- p_value.default parameters/R/methods_multgee.R0000644000175000017500000000016214133000510016242 0ustar nileshnilesh #' @export standard_error.LORgee <- standard_error.default #' @export p_value.LORgee <- p_value.default parameters/R/standard_error_kenward.R0000644000175000017500000000050514036353021017614 0ustar nileshnilesh#' @rdname p_value_kenward #' @export se_kenward <- function(model) { .check_REML_fit(model) vcov_adj <- .vcov_kenward_ajusted(model) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, SE = abs(as.vector(sqrt(diag(as.matrix(vcov_adj))))) ) } parameters/R/n_parameters.R0000644000175000017500000000011014036353021015540 0ustar nileshnilesh#' @importFrom insight n_parameters #' @export insight::n_parameters parameters/R/print_md.R0000644000175000017500000002113414123312055014704 0ustar nileshnilesh# normal print ---------------------------- #' @rdname display.parameters_model #' @export print_md.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, verbose = TRUE, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # table caption table_caption <- .print_caption(x, caption, format = "markdown") # main table formatted_table <- .print_core( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = NULL, ci_brackets = ci_brackets, format = "markdown", groups = groups, ... ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } # footer footer <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula, format = "markdown" ) insight::export_table( formatted_table, format = "markdown", caption = table_caption, subtitle = subtitle, footer = footer, align = "firstleft", ... ) } #' @export print_md.parameters_stan <- print_md.parameters_model #' @export print_md.parameters_brms_meta <- print_md.parameters_model #' @export print_md.parameters_simulate <- print_md.parameters_model # compare parameters ------------------------- #' @export print_md.compare_parameters <- function(x, digits = 2, ci_digits = 2, p_digits = 3, caption = NULL, subtitle = NULL, footer = NULL, style = NULL, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(style) || is.null(style)) { style <- attributes(x)$output_style } formatted_table <- format( x, style, split_components = TRUE, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = NULL, ci_brackets = c("(", ")"), format = "markdown" ) insight::export_table( formatted_table, format = "markdown", caption = caption, subtitle = subtitle, footer = footer ) } # SEM print ---------------------------- #' @export print_md.parameters_sem <- function(x, digits = 2, ci_digits = 2, p_digits = 3, ci_brackets = c("(", ")"), ...) { # check if user supplied digits attributes # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } formatted_table <- format(x = x, digits = digits, ci_digits, p_digits = p_digits, format = "markdown", ci_width = NULL, ci_brackets = ci_brackets, ...) insight::export_table(formatted_table, format = "markdown", align = "firstleft", ...) } # PCA / EFA / CFA ---------------------------- #' @export print_md.parameters_efa_summary <- function(x, digits = 3, ...) { table_caption <- "(Explained) Variance of Components" if ("Parameter" %in% names(x)) { x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } else if ("Component" %in% names(x)) { names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") } insight::export_table(x, digits = digits, format = "markdown", caption = table_caption, align = "firstleft") } #' @export print_md.parameters_pca_summary <- print_md.parameters_efa_summary #' @export print_md.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { .print_parameters_cfa_efa(x, threshold = threshold, sort = sort, format = "markdown", digits = digits, labels = labels, ...) } #' @export print_md.parameters_pca <- print_md.parameters_efa # Equivalence test ---------------------------- #' @export print_md.equivalence_test_lm <- function(x, digits = 2, ci_brackets = c("(", ")"), zap_small = FALSE, ...) { rule <- attributes(x)$rule rope <- attributes(x)$rope if (!is.null(rule)) { if (rule == "cet") { table_caption <- "Conditional Equivalence Testing" } else if (rule == "classic") { table_caption <- "TOST-test for Practical Equivalence" } else { table_caption <- "Test for Practical Equivalence" } } else { table_caption <- "Test for Practical Equivalence" } if ("Component" %in% colnames(x)) { x <- x[x$Component %in% c("conditional", "count"), ] } formatted_table <- insight::format_table(x, pretty_names = TRUE, digits = digits, ci_width = NULL, ci_brackets = ci_brackets, zap_small = zap_small, ...) colnames(formatted_table)[which(colnames(formatted_table) == "Equivalence (ROPE)")] <- "H0" formatted_table$ROPE <- NULL # col_order <- c("Parameter", "H0", "% in ROPE", colnames(formatted_table)[grepl(" CI$", colnames(formatted_table))]) # col_order <- c(col_order, setdiff(colnames(formatted_table), col_order)) # formatted_table <- formatted_table[col_order] # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } if (!is.null(rope)) { names(formatted_table)[names(formatted_table) == "% in ROPE"] <- sprintf("%% in ROPE (%.*f, %.*f)", digits, rope[1], digits, rope[2]) } insight::export_table(formatted_table, format = "markdown", caption = table_caption, align = "firstleft") } # distribution print ---------------------------- #' @export print_md.parameters_distribution <- function(x, digits = 2, ci_brackets = c("(", ")"), ...) { formatted_table <- format(x = x, digits = digits, format = "markdown", ci_width = NULL, ci_brackets = ci_brackets, ...) insight::export_table(formatted_table, format = "markdown", align = "firstleft", ...) } # Reexports models ------------------------ #' @importFrom insight print_md #' @export insight::print_md parameters/R/methods_mgcv.R0000644000175000017500000000152613770346453015572 0ustar nileshnilesh#' @export model_parameters.gamm <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, verbose = TRUE, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters( model, ci = ci, bootstrap = bootstrap, iterations = iterations, robust = FALSE, ... ) } #' @export ci.gamm <- ci.gamm4 #' @export standard_error.gamm <- standard_error.gamm4 #' @export p_value.gamm <- p_value.gamm4 #' @export simulate_model.gamm <- function(model, iterations = 1000, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } parameters/R/methods_brms.R0000644000175000017500000001741014142156524015570 0ustar nileshnilesh#' @rdname model_parameters.stanreg #' @inheritParams insight::get_parameters #' @export model_parameters.brmsfit <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "all", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { modelinfo <- insight::model_info(model, verbose = FALSE) # Bayesian meta analysis if (!insight::is_multivariate(model) && isTRUE(modelinfo$is_meta)) { params <- .model_parameters_brms_meta( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = diagnostic, priors = priors, exponentiate = exponentiate, standardize = standardize, keep_parameters = keep, drop_parameters = drop, ... ) } else { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, component = component, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) if (!(effects == "fixed" && component == "conditional")) { random_effect_levels <- which(params$Effects == "random" & grepl("^(?!sd_|cor_)(.*)", params$Parameter, perl = TRUE) & !(params$Parameter %in% c("car", "sdcar"))) if (length(random_effect_levels) && isFALSE(group_level)) params <- params[-random_effect_levels, ] } params <- .add_pretty_names(params, model) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes(params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ... ) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- unique(c("parameters_stan", "see_parameters_model", "parameters_model", class(params))) } params } # brms meta analysis ------- .model_parameters_brms_meta <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), priors = FALSE, exponentiate = FALSE, standardize = NULL, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ...) { # parameters smd <- insight::get_parameters(model, effects = "fixed", component = "conditional") studies <- insight::get_parameters(model, effects = "random", parameters = "^(?!sd_)") studies[] <- lapply(studies, function(i) i + smd[[1]]) tau <- insight::get_parameters(model, effects = "random", parameters = "^sd_") params <- bayestestR::describe_posterior( cbind(studies, smd), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, ... ) params_diagnostics <- bayestestR::diagnostic_posterior( model, effects = "all", diagnostic = diagnostic, ... ) params_tau <- bayestestR::describe_posterior( tau, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, ... ) # add weights params$Weight <- 1 / c(insight::get_response(model)[[2]], NA) # merge description with diagnostic params <- merge(params, params_diagnostics, by = "Parameter", all.x = TRUE, sort = FALSE) # Renaming re_name <- insight::find_random(model, flatten = TRUE) study_names <- gsub(sprintf("r_%s\\[(.*)\\]", re_name[1]), "\\1", colnames(studies)) # replace dots by white space study_names <- gsub(".", " ", study_names, fixed = TRUE) # remove "Intercept" study_names <- trimws(gsub(",Intercept", "", study_names, fixed = TRUE)) cleaned_parameters <- c(study_names, "Overall", "tau") # components params$Component <- "Studies" params_tau$Component <- "tau" # merge with tau params <- merge(params, params_tau, all = TRUE, sort = FALSE) # reorder columns ci_column <- which(colnames(params) == "CI_high") weight_column <- which(colnames(params) == "Weight") first_cols <- c(1:ci_column, weight_column) params <- params[, c(first_cols, seq_len(ncol(params))[-first_cols])] # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { params <- .filter_parameters(params, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } # add attributes attr(params, "tau") <- params_tau attr(params, "pretty_names") <- cleaned_parameters attr(params, "cleaned_parameters") <- cleaned_parameters attr(params, "ci") <- ci attr(params, "ci_method") <- ci_method attr(params, "exponentiate") <- exponentiate attr(params, "model_class") <- class(model) attr(params, "is_bayes_meta") <- TRUE attr(params, "study_weights") <- params$Weight attr(params, "data") <- cbind(studies, smd, tau) class(params) <- unique(c("parameters_brms_meta", "see_parameters_brms_meta", class(params))) params } #' @export standard_error.brmsfit <- function(model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated"), ...) { effects <- match.arg(effects) component <- match.arg(component) params <- insight::get_parameters(model, effects = effects, component = component, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } #' @export p_value.brmsfit <- p_value.BFBayesFactor parameters/R/format_df_adjust.R0000644000175000017500000000175614132744765016434 0ustar nileshnilesh#' Format the name of the degrees-of-freedom adjustment methods #' #' Format the name of the degrees-of-freedom adjustment methods. #' #' @param method Name of the method. #' @param approx_string,dof_string Suffix added to the name of the method in #' the returned string. #' #' @examples #' library(parameters) #' #' format_df_adjust("kenward") #' format_df_adjust("kenward", approx_string = "", dof_string = " DoF") #' @return A formatted string. #' @export format_df_adjust <- function(method, approx_string = "-approximated", dof_string = " degrees of freedom") { method <- tolower(method) out <- switch(method, "kr" = , "kenward-rogers" = , "kenward-roger" = , "kenward" = "Kenward-Roger", "ml1" = "m-l-1", "betwithin" = , "bw" = "Between-within", "fit" = "Residual", "boot" = "Bootstrapped", .capitalize(method) ) paste0(out, approx_string, dof_string) } parameters/R/methods_flexsurvreg.R0000644000175000017500000000125114133050002017155 0ustar nileshnilesh #' @export standard_error.flexsurvreg <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) se <- model$res[rownames(model$res) %in% params, "se"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } ## TODO add ci_method later? #' @export p_value.flexsurvreg <- function(model, ...) { params <- insight::get_parameters(model) est <- params$Estimate se <- standard_error(model)$SE p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = params$Parameter, p = as.vector(p) ) } parameters/R/methods_AER.R0000644000175000017500000000127214133000251015214 0ustar nileshnilesh# classes: .tobit # The `AER::ivreg` is being spun off to a separate package. The methods in # `methods_ivreg.R` should work for objects produce by `AER`. #################### .tobit ------ #' @export p_value.tobit <- function(model, method = NULL, ...) { params <- insight::get_parameters(model) p <- p_value.default(model, method = method, ...) p[p$Parameter %in% params$Parameter, ] } #' @export simulate_model.tobit <- simulate_model.default #' @export standard_error.tobit <- function(model, ...) { params <- insight::get_parameters(model) std.error <- standard_error.default(model, ...) std.error[std.error$Parameter %in% params$Parameter, ] } parameters/R/methods_gee.R0000644000175000017500000000315514142675377015402 0ustar nileshnilesh #' @export standard_error.geeglm <- standard_error.default #' @export standard_error.gee <- function(model, method = NULL, robust = FALSE, ...) { cs <- stats::coef(summary(model)) if (isTRUE(robust)) { se <- as.vector(cs[, "Robust S.E."]) } else { se <- as.vector(cs[, "Naive S.E."]) } .data_frame(Parameter = .remove_backticks_from_string(rownames(cs)), SE = se) } #' @export p_value.gee <- function(model, method = NULL, robust = FALSE, ...) { cs <- stats::coef(summary(model)) if (is.null(method)) { method <- "any" } if (isTRUE(robust)) { p <- 2 * stats::pt(abs(cs[, "Estimate"] / cs[, "Robust S.E."]), df = degrees_of_freedom(model, method = method), lower.tail = FALSE) } else { p <- 2 * stats::pt(abs(cs[, "Estimate"] / cs[, "Naive S.E."]), df = degrees_of_freedom(model, method = method), lower.tail = FALSE) } .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export ci.geeglm <- function(x, ci = .95, method = "wald", ...) { .ci_generic(x, ci = ci, method = method, ...) } #' @export p_value.geeglm <- function(model, method = "wald", ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { if (identical(method, "residual")) { dof <- degrees_of_freedom(model, method = "residual") p <- as.vector(2 * stats::pt(sqrt(abs(stat$Statistic)), df = dof, lower.tail = FALSE)) } else { p <- as.vector(1 - stats::pchisq(stat$Statistic, df = 1)) } .data_frame( Parameter = stat$Parameter, p = p ) } } parameters/R/methods_psych.R0000644000175000017500000001720114160324505015745 0ustar nileshnilesh#' Parameters from Structural Models (PCA, EFA, ...) #' #' Format structural models from the \pkg{psych} or \pkg{FactoMineR} packages. #' #' @param model PCA or FA created by the \pkg{psych} or \pkg{FactoMineR} #' packages (e.g. through `psych::principal`, `psych::fa` or `psych::omega`). #' @inheritParams principal_components #' @param labels A character vector containing labels to be added to the #' loadings data. Usually, the question related to the item. #' @param ... Arguments passed to or from other methods. #' #' @details #' For the structural models obtained with \pkg{psych}, the following indices #' are present: #' #' - **Complexity** (\cite{Hoffman's, 1978; Pettersson and Turkheimer, #' 2010}) represents the number of latent components needed to account for #' the observed variables. Whereas a perfect simple structure solution has a #' complexity of 1 in that each item would only load on one factor, a #' solution with evenly distributed items has a complexity greater than 1. #' #' - **Uniqueness** represents the variance that is 'unique' to the #' variable and not shared with other variables. It is equal to `1 – #' communality` (variance that is shared with other variables). A uniqueness #' of `0.20` suggests that `20%` or that variable's variance is not shared #' with other variables in the overall factor model. The greater 'uniqueness' #' the lower the relevance of the variable in the factor model. #' #' - **MSA** represents the Kaiser-Meyer-Olkin Measure of Sampling #' Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates #' whether there is enough data for each factor give reliable results for the #' PCA. The value should be > 0.6, and desirable values are > 0.8 #' (\cite{Tabachnick and Fidell, 2013}). #' #' @examples #' \donttest{ #' library(parameters) #' if (require("psych", quietly = TRUE)) { #' # Principal Component Analysis (PCA) --------- #' pca <- psych::principal(attitude) #' model_parameters(pca) #' #' pca <- psych::principal(attitude, nfactors = 3, rotate = "none") #' model_parameters(pca, sort = TRUE, threshold = 0.2) #' #' principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) #' #' #' # Exploratory Factor Analysis (EFA) --------- #' efa <- psych::fa(attitude, nfactors = 3) #' model_parameters(efa, threshold = "max", sort = TRUE, labels = as.character(1:ncol(attitude))) #' #' #' # Omega --------- #' omega <- psych::omega(mtcars, nfactors = 3) #' params <- model_parameters(omega) #' params #' summary(params) #' } #' #' # FactoMineR --------- #' if (require("FactoMineR", quietly = TRUE)) { #' model <- FactoMineR::PCA(iris[, 1:4], ncp = 2) #' model_parameters(model) #' attributes(model_parameters(model))$scores #' #' model <- FactoMineR::FAMD(iris, ncp = 2) #' model_parameters(model) #' } #' } #' #' @return A data frame of loadings. #' @references #' - Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and #' Psychological Measurement, 34(1):111–117 #' #' - Pettersson, E., \& Turkheimer, E. (2010). Item selection, evaluation, and #' simple structure in personality data. Journal of research in personality, #' 44(4), 407-420. #' #' - Revelle, W. (2016). How To: Use the psych package for Factor Analysis and #' data reduction. #' #' - Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics #' (6th ed.). Boston: Pearson Education. #' #' @export model_parameters.principal <- function(model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ...) { # n n <- model$factors # Get summary variance <- as.data.frame(unclass(model$Vaccounted)) data_summary <- .data_frame( Component = names(variance), Eigenvalues = model$values[1:n], Variance = as.numeric(variance["Proportion Var", ]) ) if ("Cumulative Var" %in% row.names(variance)) { data_summary$Variance_Cumulative <- as.numeric(variance["Cumulative Var", ]) } else { if (ncol(variance) == 1) { data_summary$Variance_Cumulative <- as.numeric(variance["Proportion Var", ]) } else { data_summary$Variance_Cumulative <- NA } } data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Get loadings loadings <- as.data.frame(unclass(model$loadings)) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } # Add information loadings$Complexity <- model$complexity loadings$Uniqueness <- model$uniquenesses loadings$MSA <- attributes(model)$MSA # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- model$rotation attr(loadings, "scores") <- model$scores attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "type") <- model$fn attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component(loadings, loadings_columns = loading_cols, variable_names = rownames(model$loadings)) # add class-attribute for printing if (model$fn == "principal") { class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else { class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.fa <- model_parameters.principal #' @export model_parameters.fa.ci <- model_parameters.fa #' @rdname model_parameters.principal #' @export model_parameters.omega <- function(model, verbose = TRUE, ...) { # Table of omega coefficients table_om <- model$omega.group colnames(table_om) <- c("Omega_Total", "Omega_Hierarchical", "Omega_Group") table_om$Composite <- row.names(table_om) row.names(table_om) <- NULL table_om <- table_om[c("Composite", names(table_om)[names(table_om) != "Composite"])] # Get summary: Table of Variance table_var <- as.data.frame(unclass(model$omega.group)) table_var$Composite <- rownames(model$omega.group) table_var$Total <- table_var$total * 100 table_var$General <- table_var$general * 100 table_var$Group <- table_var$group * 100 table_var <- table_var[c("Composite", "Total", "General", "Group")] # colnames(table_var) <- c("Composite", "Total Variance (%)", "Variance due to General Factor (%)", "Variance due to Group Factor (%)") # cor.plot(psych::fa.sort(om), main = title) out <- table_om attr(out, "summary") <- table_var class(out) <- c("parameters_omega", class(out)) out } parameters/R/methods_brglm2.R0000644000175000017500000001230514133047657016016 0ustar nileshnilesh# classes: .bracl, .multinom, .brmultinom ## TODO add ci_method later? ############# .bracl -------------- #' @rdname model_parameters.mlm #' @export model_parameters.bracl <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { # detect number of levels of response nl <- tryCatch( { nlevels(factor(insight::get_response(model))) }, error = function(e) { 0 } ) # merge by response as well if more than 2 levels if (nl > 2) { merge_by <- c("Parameter", "Response") } else { merge_by <- "Parameter" } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.bracl <- function(x, ci = .95, method = NULL, robust = FALSE, ...) { params <- insight::get_parameters(x) out <- .ci_generic(model = x, ci = ci, method = method, robust = robust, ...) if ("Response" %in% colnames(params)) { out$Response <- params$Response } out } #' @export standard_error.bracl <- function(model, ...) { smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(se), Response = params$Response ) } #' @export p_value.bracl <- function(model, ...) { smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) p <- smry[[4]] names(p) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(p), Response = params$Response ) } ############# .multinom -------------- #' @export model_parameters.multinom <- model_parameters.bracl #' @export ci.multinom <- ci.bracl #' @export degrees_of_freedom.multinom <- function(model, method = NULL, ...) { if (identical(method, "normal")) { Inf } else { insight::n_obs(model) - model$edf } } #' @export degrees_of_freedom.nnet <- degrees_of_freedom.multinom #' @export standard_error.multinom <- function(model, ...) { se <- tryCatch( { stderr <- summary(model)$standard.errors if (is.null(stderr)) { vc <- insight::get_varcov(model) stderr <- as.vector(sqrt(diag(vc))) } else { if (is.matrix(stderr)) { tmp <- c() for (i in 1:nrow(stderr)) { tmp <- c(tmp, as.vector(stderr[i, ])) } } else { tmp <- as.vector(stderr) } stderr <- tmp } stderr }, error = function(e) { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } ) params <- insight::get_parameters(model) if ("Response" %in% colnames(params)) { .data_frame( Parameter = params$Parameter, SE = se, Response = params$Response ) } else { .data_frame( Parameter = params$Parameter, SE = se ) } } #' @export p_value.multinom <- function(model, method = "residual", ...) { stat <- insight::get_statistic(model) out <- p_value.default(model, method = method, ...) if (!is.null(stat$Response)) { out$Response <- stat$Response } out } #' @export simulate_parameters.multinom <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model) out$Parameter <- params$Parameter if ("Response" %in% colnames(params)) { out$Response <- params$Response } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } ############# .brmultinom -------------- #' @export model_parameters.brmultinom <- model_parameters.bracl #' @export ci.brmultinom <- ci.bracl #' @export standard_error.brmultinom <- standard_error.multinom #' @export p_value.brmultinom <- p_value.multinom parameters/R/factor_analysis.R0000644000175000017500000000476114057212550016265 0ustar nileshnilesh#' @rdname principal_components #' @export factor_analysis <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, cor = NULL, ...) { UseMethod("factor_analysis") } #' @export factor_analysis.data.frame <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, cor = NULL, ...) { # Standardize if (standardize && is.null(cor)) { x <- as.data.frame(scale(x)) } # N factors n <- .get_n_factors(x, n = n, type = "FA", rotation = rotation, cor = cor) .factor_analysis_rotate( x, n, rotation = rotation, sort = sort, threshold = threshold, cor = cor, ... ) } #' @keywords internal .factor_analysis_rotate <- function(x, n, rotation, sort = FALSE, threshold = NULL, cor = NULL, ...) { if (!(rotation %in% c("varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster", "none"))) { stop("`rotation` must be one of \"varimax\", \"quartimax\", \"promax\", \"oblimin\", \"simplimax\", \"cluster\" or \"none\".") } if (!inherits(x, "data.frame")) { stop("`x` must be a data frame.", call. = FALSE) } # rotate loadings if (!requireNamespace("psych", quietly = TRUE)) { stop(sprintf("Package `psych` required for `%s`-rotation.", rotation), call. = FALSE) } # Pass cor if available if (!is.null(cor)) { out <- model_parameters( psych::fa( cor, nfactors = n, rotate = rotation, n.obs = nrow(x), ... ), sort = sort, threshold = threshold ) } else { out <- model_parameters( psych::fa(x, nfactors = n, rotate = rotation, ...), sort = sort, threshold = threshold ) } attr(out, "data_set") <- x out } parameters/R/methods_biglm.R0000644000175000017500000000066314133044475015722 0ustar nileshnilesh #' @export standard_error.biglm <- function(model, ...) { cs <- summary(model)$mat params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 4]) ) } #' @export degrees_of_freedom.biglm <- function(model, method = NULL, ...) { .degrees_of_freedom_no_dfresid_method(model, method) } #' @export degrees_of_freedom.bigglm <- degrees_of_freedom.biglm parameters/R/methods_bbmle.R0000644000175000017500000000147413770346766015730 0ustar nileshnilesh#' @export model_parameters.mle2 <- model_parameters.glm #' @export ci.mle2 <- ci.glm #' @export standard_error.mle2 <- function(model, ...) { if (!requireNamespace("bbmle", quietly = TRUE)) { stop("Package `bbmle` needs to be installed to extract standard errors.", call. = FALSE) } s <- bbmle::summary(model) .data_frame( Parameter = names(s@coef[, 2]), SE = unname(s@coef[, 2]) ) } #' @export p_value.mle2 <- function(model, ...) { if (!requireNamespace("bbmle", quietly = TRUE)) { stop("Package `bbmle` needs to be installed to extract p-values.", call. = FALSE) } s <- bbmle::summary(model) .data_frame( Parameter = names(s@coef[, 4]), p = unname(s@coef[, 4]) ) } #' @export format_parameters.mle2 <- function(model, ...) { NULL } parameters/R/methods_lm.R0000644000175000017500000000212114131014351015213 0ustar nileshnilesh# lm: .lm, .summary.lm # .lm --------------------- #' @export standard_error.lm <- standard_error.glm #' @export p_value.lm <- p_value.default #' @export ci.lm <- function(x, ci = .95, method = "residual", robust = FALSE, ...) { .ci_generic(model = x, ci = ci, method = method, robust = robust, ...) } # .summary.lm --------------------- #' @export standard_error.summary.lm <- function(model, ...) { cs <- stats::coef(model) data.frame( Parameter = rownames(cs), SE = as.vector(cs[, 2]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export p_value.summary.lm <- function(model, ...) { cs <- stats::coef(model) data.frame( Parameter = rownames(cs), p = as.vector(cs[, 4]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export ci.summary.lm <- function(x, ci = .95, method = "residual", ...) { .ci_generic(model = x, ci = ci, method = method, dof = degrees_of_freedom(x), ...) } #' @export degrees_of_freedom.summary.lm <- function(model, ...) { model$fstatistic[3] } parameters/R/methods_glmm.R0000644000175000017500000000416014131014351015544 0ustar nileshnilesh#' @export model_parameters.glmm <- function(model, ci = .95, effects = c("all", "fixed", "random"), bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, verbose = TRUE, ...) { effects <- match.arg(effects) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Effects"), standardize = standardize, exponentiate = exponentiate, effects = effects, robust = FALSE, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.glmm <- function(x, ci = .95, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) .ci_generic(model = x, ci = ci, dof = Inf, effects = effects, robust = FALSE) } #' @export standard_error.glmm <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) out <- insight::get_parameters(model, effects = "all") out$SE <- sqrt(diag(insight::get_varcov(model, effects = "all"))) out <- out[, c("Parameter", "SE", "Effects")] if (effects != "all") { out <- out[out$Effects == effects, , drop = FALSE] out$Effects <- NULL } out } #' @export p_value.glmm <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) s <- summary(model) out <- insight::get_parameters(model, effects = "all") out$p <- c(s$coefmat[, 4], s$nucoefmat[, 4]) out <- out[, c("Parameter", "p", "Effects")] if (effects != "all") { out <- out[out$Effects == effects, , drop = FALSE] out$Effects <- NULL } out } #' @export format_parameters.glmm <- function(model, brackets = c("[", "]"), ...) { .format_parameter_default(model, effects = "all", brackets = brackets) } parameters/R/display.R0000644000175000017500000001475614077615700014564 0ustar nileshnilesh#' @title Print tables in different output formats #' @name display.parameters_model #' #' @description Prints tables (i.e. data frame) in different output formats. #' `print_md()` is a alias for `display(format = "markdown")`. #' #' @param object An object returned by [`model_parameters()`][model_parameters], #' [`simulate_parameters()`][simulate_parameters], #' [`equivalence_test()`][equivalence_test.lm] or #' [`principal_components()`][principal_components]. #' @param format String, indicating the output format. Can be `"markdown"` #' or `"html"`. #' @param align Only applies to HTML tables. May be one of `"left"`, #' `"right"` or `"center"`. #' @param digits,ci_digits,p_digits Number of digits for rounding or #' significant figures. May also be `"signif"` to return significant #' figures or `"scientific"` to return scientific notation. Control the #' number of digits by adding the value as suffix, e.g. `digits = "scientific4"` #' to have scientific notation with 4 decimal places, or `digits = "signif5"` #' for 5 significant figures (see also [signif()]). #' @inheritParams print.parameters_model #' @inheritParams insight::format_table #' @inheritParams insight::export_table #' #' @return If `format = "markdown"`, the return value will be a character #' vector in markdown-table format. If `format = "html"`, an object of #' class `gt_tbl`. #' #' @details `display()` is useful when the table-output from functions, #' which is usually printed as formatted text-table to console, should #' be formatted for pretty table-rendering in markdown documents, or if #' knitted from rmarkdown to PDF or Word files. See #' [vignette](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) #' for examples. #' #' @examples #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' mp <- model_parameters(model) #' display(mp) #' @export display.parameters_model <- function(object, format = "markdown", pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, verbose = TRUE, ...) { if (identical(format, "html")) { print_html(x = object, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, caption = caption, subtitle = subtitle, footer = footer, ci_digits = ci_digits, p_digits = p_digits, footer_digits = footer_digits, align = align, ci_brackets = ci_brackets, show_sigma = show_sigma, show_formula = show_formula, zap_small = zap_small, verbose = verbose, ...) } else { print_md(x = object, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, caption = caption, subtitle = subtitle, footer = footer, ci_digits = ci_digits, p_digits = p_digits, footer_digits = footer_digits, ci_brackets = ci_brackets, show_sigma = show_sigma, show_formula = show_formula, zap_small = zap_small, verbose = verbose, ...) } } #' @export display.parameters_stan <- display.parameters_model #' @export display.parameters_simulate <- display.parameters_model #' @export display.parameters_brms_meta <- display.parameters_model # Compare Parameters ------------------------ #' @export display.compare_parameters <- function(object, format = "markdown", digits = 2, ci_digits = 2, p_digits = 3, style = NULL, ...) { if (identical(format, "html")) { print_html(x = object, digits = digits, ci_digits = ci_digits, p_digits = p_digits, style = style, ...) } else { print_md(x = object, digits = digits, ci_digits = ci_digits, p_digits = p_digits, style = style, ...) } } # SEM models ------------------------ #' @rdname display.parameters_model #' @export display.parameters_sem <- function(object, format = "markdown", digits = 2, ci_digits = 2, p_digits = 3, ci_brackets = c("(", ")"), ...) { print_md(x = object, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_brackets = ci_brackets, ...) } # PCA /EFA models ------------------------ #' @rdname display.parameters_model #' @export display.parameters_efa_summary <- function(object, format = "markdown", digits = 3, ...) { print_md(x = object, digits = digits, ...) } #' @export display.parameters_pca_summary <- display.parameters_efa_summary #' @inheritParams model_parameters.principal #' @rdname display.parameters_model #' @export display.parameters_efa <- function(object, format = "markdown", digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { print_md(x = object, digits = digits, sort = sort, threshold = threshold, labels = labels, ...) } #' @export display.parameters_pca <- display.parameters_efa # Equivalence tests ------------------------ #' @rdname display.parameters_model #' @export display.equivalence_test_lm <- function(object, format = "markdown", digits = 2, ...) { print_md(x = object, digits = digits, ...) } # Other functions ------------------------ #' @export display.parameters_distribution <- function(object, format = "markdown", digits = 2, ...) { print_md(x = object, digits = digits, ...) } # Reexports models ------------------------ #' @importFrom insight display #' @export insight::display parameters/R/methods_truncreg.R0000644000175000017500000000033114133000622016433 0ustar nileshnilesh# classes: .truncreg #' @export standard_error.truncreg <- standard_error.default #' @export p_value.truncreg <- p_value.default #' @export degrees_of_freedom.truncreg <- degrees_of_freedom.mhurdle parameters/R/bootstrap_model.R0000644000175000017500000002027314133733462016302 0ustar nileshnilesh#' Model bootstrapping #' #' Bootstrap a statistical model n times to return a data frame of estimates. #' #' @param model Statistical model. #' @param iterations The number of draws to simulate/bootstrap. #' @param type Character string specifying the type of bootstrap. For mixed models #' of class `merMod` or `glmmTMB`, may be `"parametric"` (default) or #' `"semiparametric"` (see `?lme4::bootMer` for details). For all #' other models, see argument `sim` in `?boot::boot` (defaults to #' `"ordinary"`). #' @param parallel The type of parallel operation to be used (if any). #' @param n_cpus Number of processes to be used in parallel operation. #' @param ... Arguments passed to or from other methods. #' @inheritParams p_value #' #' @return A data frame of bootstrapped estimates. #' #' @details By default, `boot::boot()` is used to generate bootstraps from #' the model data, which are then used to `update()` the model, i.e. refit #' the model with the bootstrapped samples. For `merMod` objects (**lme4**) #' or models from **glmmTMB**, the `lme4::bootMer()` function is used to #' obtain bootstrapped samples. `bootstrap_parameters()` summarizes the #' bootstrapped model estimates. #' #' @section Using with **emmeans**: #' The output can be passed directly to the various functions from the #' **emmeans** package, to obtain bootstrapped estimates, contrasts, simple #' slopes, etc. and their confidence intervals. These can then be passed to #' `model_parameter()` to obtain standard errors, p-values, etc. (see #' example). #' \cr\cr #' Note that that p-values returned here are estimated under the assumption of #' *translation equivariance*: that shape of the sampling distribution is #' unaffected by the null being true or not. If this assumption does not hold, #' p-values can be biased, and it is suggested to use proper permutation tests #' to obtain non-parametric p-values. #' #' @seealso [bootstrap_parameters()], [simulate_model()], [simulate_parameters()] #' #' @examples #' \dontrun{ #' if (require("boot", quietly = TRUE)) { #' model <- lm(mpg ~ wt + factor(cyl), data = mtcars) #' b <- bootstrap_model(model) #' print(head(b)) #' #' if (require("emmeans", quietly = TRUE)) { #' est <- emmeans(b, consec ~ cyl) #' print(model_parameters(est)) #' } #' } #' } #' @export bootstrap_model <- function(model, iterations = 1000, ...) { UseMethod("bootstrap_model") } #' @rdname bootstrap_model #' @export bootstrap_model.default <- function(model, iterations = 1000, type = "ordinary", parallel = c("no", "multicore", "snow"), n_cpus = 1, verbose = FALSE, ...) { insight::check_if_installed("boot") type <- match.arg(type, choices = c("ordinary", "parametric", "balanced", "permutation", "antithetic")) parallel <- match.arg(parallel) model_data <- data <- insight::get_data(model) model_response <- insight::find_response(model) boot_function <- function(model, data, indices) { d <- data[indices, ] # allows boot to select sample if (inherits(model, "biglm")) { fit <- suppressMessages(stats::update(model, moredata = d)) } else { if (verbose) { fit <- stats::update(model, data = d) } else { fit <- suppressMessages(stats::update(model, data = d)) } } params <- insight::get_parameters(fit, verbose = FALSE) n_params <- insight::n_parameters(model) if (nrow(params) != n_params) { params <- stats::setNames(rep.int(NA, n_params), params$Parameter) } else { params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector } return(params) } if (type == "parametric") { f <- function(x, mle) { out <- model_data resp <- stats::simulate(x, nsim = 1) out[[model_response]] <- resp return(out) } results <- boot::boot( data = data, statistic = boot_function, R = iterations, sim = type, parallel = parallel, ncpus = n_cpus, model = model, ran.gen = f ) } else { results <- boot::boot( data = data, statistic = boot_function, R = iterations, sim = type, parallel = parallel, ncpus = n_cpus, model = model ) } out <- as.data.frame(results$t) out <- out[stats::complete.cases(out), ] names(out) <- insight::get_parameters(model, verbose = FALSE)$Parameter class(out) <- unique(c("bootstrap_model", "see_bootstrap_model", class(out))) attr(out, "original_model") <- model out } #' @rdname bootstrap_model #' @export bootstrap_model.merMod <- function(model, iterations = 1000, type = "parametric", parallel = c("no", "multicore", "snow"), n_cpus = 1, verbose = FALSE, ...) { insight::check_if_installed("lme4") type <- match.arg(type, choices = c("parametric", "semiparametric")) parallel <- match.arg(parallel) boot_function <- function(model) { params <- insight::get_parameters(model, verbose = FALSE) n_params <- insight::n_parameters(model) if (nrow(params) != n_params) { params <- stats::setNames(rep.int(NA, n_params), params$Parameter) } else { params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector } return(params) } if (verbose) { results <- lme4::bootMer( model, boot_function, nsim = iterations, type = type, parallel = parallel, ncpus = n_cpus ) } else { results <- suppressMessages(lme4::bootMer( model, boot_function, nsim = iterations, verbose = FALSE, type = type, parallel = parallel, ncpus = n_cpus )) } out <- as.data.frame(results$t) out <- out[stats::complete.cases(out), ] names(out) <- insight::find_parameters(model, effects = "fixed")$conditional class(out) <- unique(c("bootstrap_model", "see_bootstrap_model", class(out))) attr(out, "original_model") <- model out } #' @export bootstrap_model.glmmTMB <- bootstrap_model.merMod # bootstrap_model.htest <- function(model, n = 1000, verbose = FALSE, ...) { # data <- insight::get_data(model) # # boot_function <- function(model, data, indices) { # d <- data[indices, ] # allows boot to select sample # # if (verbose) { # fit <- suppressMessages(update(model, data = d)) # } else { # fit <- update(model, data = d) # } # # return(model$estimate) # } # # results <- boot::boot(data = data, statistic = boot_function, R = n, model = model) # # return(results) # } #' @export as.data.frame.lm <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } #' @export as.data.frame.merMod <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } #' @export as.data.frame.glmmTMB <- function(x, row.names = NULL, optional = FALSE, iterations = 1000, verbose = FALSE, ...) { bootstrap_model(x, iterations = iterations, verbose = verbose, ...) } parameters/R/utils_clustering.R0000644000175000017500000000333514135551265016505 0ustar nileshnilesh # #' @export # print.cluster_analysis <- function(x, digits = 2, ...) { # # retrieve data # dat <- attr(x, "data", exact = TRUE) # # if (is.null(dat)) { # stop("Could not find data frame that was used for cluster analysis.", call. = FALSE) # } # # # save output from cluster_discrimination() # accuracy <- attributes(x)$accuracy # # # headline # insight::print_color("# Cluster Analysis (mean z-score by cluster)\n\n", "blue") # # # round numeric variables (i.e. all but first term column) # dat[2:ncol(dat)] <- sapply(dat[2:ncol(dat)], round, digits = digits) # print.data.frame(dat, row.names = FALSE) # # if (!is.null(accuracy)) { # cat("\n") # print(accuracy) # } # invisible(x) # } # Utils ------------------------------------------------------------------- #' @keywords internal .prepare_data_clustering <- function(x, include_factors = FALSE, standardize = FALSE, preprocess = TRUE, ...) { if (preprocess == FALSE) { return(x) } # Convert factors to numeric # include factors? if (include_factors) { # ordered factors to numeric factors <- sapply(x, is.ordered) if (any(factors)) { x[factors] <- sapply(x[factors], .factor_to_numeric) } # character and factors to dummies factors <- sapply(x, function(i) is.character(i) | is.factor(i)) if (any(factors)) { dummies <- lapply(x[factors], .factor_to_dummy) x <- cbind(x[!factors], dummies) } } else { # remove factors x <- x[sapply(x, is.numeric)] } # Remove all missing values from data, only use numerics x <- stats::na.omit(x) if (standardize == TRUE) { x <- datawizard::standardize(x, ...) } x } parameters/R/methods_glmmTMB.R0000644000175000017500000003311614166656750016141 0ustar nileshnilesh# Package glmmTMB # model_parameters ----- #' @inheritParams simulate_model #' @rdname model_parameters.merMod #' @export model_parameters.glmmTMB <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, effects = "all", component = "all", group_level = FALSE, standardize = NULL, exponentiate = FALSE, ci_method = "wald", robust = FALSE, p_adjust = NULL, wb_component = TRUE, summary = FALSE, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method, include_sigma = FALSE, ...) { ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } # p-values, CI and se might be based on different df-methods ci_method <- .check_df_method(ci_method) # which components to return? effects <- match.arg(effects, choices = c("fixed", "random", "all")) component <- match.arg(component, choices = c("all", "conditional", "zi", "zero_inflated", "dispersion")) # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { warning(insight::format_message("Standardizing coefficients only works for fixed effects of the mixed model."), call. = FALSE) } effects <- "fixed" } # fix argument, if model has only conditional component cs <- stats::coef(summary(model)) has_zeroinf <- insight::model_info(model, verbose = FALSE)$is_zero_inflated has_disp <- is.list(cs) && !is.null(cs$disp) if (!has_zeroinf && !has_disp && component != "conditional") { component <- "conditional" } params <- params_random <- params_variance <- NULL if (effects %in% c("fixed", "all")) { # Processing if (bootstrap) { params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) if (effects != "fixed") { effects <- "fixed" if (verbose) { warning(insight::format_message("Bootstrapping only returns fixed effects of the mixed model."), call. = FALSE) } } } else { params <- .extract_parameters_generic( model, ci = ci, component = component, standardize = standardize, robust = robust, ci_method = ci_method, p_adjust = p_adjust, wb_component = wb_component, keep_parameters = NULL, drop_parameters = NULL, keep_component_column = component != "conditional", include_sigma = include_sigma, summary = summary, ... ) } # add dispersion parameter if (inherits(model, "glmmTMB") && !is.null(params$Component) && !"dispersion" %in% params$Component) { dispersion_param <- insight::get_parameters(model, component = "dispersion") if (!is.null(dispersion_param)) { params[nrow(params) + 1, ] <- NA params[nrow(params), "Parameter"] <- dispersion_param$Parameter[1] params[nrow(params), "Coefficient"] <- dispersion_param$Estimate[1] params[nrow(params), "Component"] <- dispersion_param$Component[1] } } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params$Effects <- "fixed" } att <- attributes(params) if (effects %in% c("random", "all") && isTRUE(group_level)) { params_random <- .extract_random_parameters(model, ci = ci, effects = effects, component = component) if (length(insight::find_random(model, flatten = TRUE)) > 1) { warning(insight::format_message("Cannot extract confidence intervals for random variance parameters from models with more than one grouping factor."), call. = FALSE) } } if (effects %in% c("random", "all") && isFALSE(group_level)) { params_variance <- .extract_random_variances(model, ci = ci, effects = effects, component = component, ci_method = ci_method, verbose = verbose) } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" # add component column if (!"Component" %in% colnames(params)) { params$Component <- ifelse(component %in% c("zi", "zero_inflated"), "zero_inflated", "conditional") } # reorder if (!is.null(params_random)) { params <- params[match(colnames(params_random), colnames(params))] } else { params <- params[match(colnames(params_variance), colnames(params))] } } params <- rbind(params, params_random, params_variance) # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep, drop, verbose = verbose) } # due to rbind(), we lose attributes from "extract_parameters()", # so we add those attributes back here... if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } params <- .add_model_parameters_attributes( params, model, ci = ci, exponentiate, ci_method = ci_method, p_adjust = p_adjust, verbose = verbose, group_level = group_level, summary = summary, wb_component = wb_component, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # ci ----- #' @rdname ci.default #' @export ci.glmmTMB <- function(x, ci = .95, dof = NULL, method = "wald", robust = FALSE, component = "all", verbose = TRUE, ...) { method <- tolower(method) method <- match.arg(method, choices = c("wald", "normal", "ml1", "betwithin", "profile", "uniroot")) component <- match.arg(component, choices = c("all", "conditional", "zi", "zero_inflated", "dispersion")) if (is.null(.check_component(x, component, verbose = verbose))) { return(NULL) } # profiled CIs if (method == "profile") { pp <- stats::profile(x) out <- lapply(ci, function(i) .ci_profile_glmmTMB(x, ci = i, profiled = pp, component = component, ...)) do.call(rbind, out) # uniroot CIs } else if (method == "uniroot") { out <- lapply(ci, function(i) .ci_uniroot_glmmTMB(x, ci = i, component = component, ...)) do.call(rbind, out) } else { # all other .ci_generic(model = x, ci = ci, dof = dof, method = method, robust = robust, component = component, ...) } } # standard_error ----- #' @rdname standard_error #' @export standard_error.glmmTMB <- function(model, effects = c("fixed", "random"), component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = TRUE, ...) { component <- match.arg(component) effects <- match.arg(effects) if (effects == "random") { if (requireNamespace("TMB", quietly = TRUE) && requireNamespace("glmmTMB", quietly = TRUE)) { s1 <- TMB::sdreport(model$obj, getJointPrecision = TRUE) s2 <- sqrt(s1$diag.cov.random) rand.ef <- glmmTMB::ranef(model)[[1]] rand.se <- lapply(rand.ef, function(.x) { cnt <- nrow(.x) * ncol(.x) s3 <- s2[1:cnt] s2 <- s2[-(1:cnt)] d <- as.data.frame(matrix(sqrt(s3), ncol = ncol(.x), byrow = TRUE)) colnames(d) <- colnames(.x) d }) rand.se } else { return(NULL) } } else { if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } cs <- .compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = i, flatten = TRUE), SE = as.vector(cs[[i]][, 2]), Component = i ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") se$Component <- .rename_values(se$Component, "disp", "dispersion") .filter_component(se, component) } } # simulate model ----- #' @rdname simulate_model #' @export simulate_model.glmmTMB <- function(model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated", "dispersion"), verbose = FALSE, ...) { component <- match.arg(component) info <- insight::model_info(model, verbose = FALSE) ## TODO remove is.list() when insight 0.8.3 on CRAN if (!is.list(info)) { info <- NULL } has_zeroinflated <- !is.null(info) && isTRUE(info$is_zero_inflated) has_dispersion <- !is.null(info) && isTRUE(info$is_dispersion) # check component-argument ---- if (component == "all") { if (!has_zeroinflated && !has_dispersion) { if (verbose) insight::print_color("No zero-inflation and dispersion components. Simulating from conditional parameters.\n", "red") component <- "conditional" } else if (!has_zeroinflated && has_dispersion) { if (verbose) insight::print_color("No zero-inflation component. Simulating from conditional and dispersion parameters.\n", "red") component <- c("conditional", "dispersion") } else if (has_zeroinflated && !has_dispersion) { if (verbose) insight::print_color("No dispersion component. Simulating from conditional and zero-inflation parameters.\n", "red") component <- c("conditional", "zero_inflated") } } else if (component %in% c("zi", "zero_inflated") && !has_zeroinflated) { stop("No zero-inflation model found.") } else if (component == "dispersion" && !has_dispersion) { stop("No dispersion model found.") } if (is.null(iterations)) iterations <- 1000 if (all(component == c("conditional", "zero_inflated"))) { d1 <- .simulate_model(model, iterations, component = "conditional") d2 <- .simulate_model(model, iterations, component = "zero_inflated") colnames(d2) <- paste0(colnames(d2), "_zi") d <- cbind(d1, d2) } else if (all(component == c("conditional", "dispersion"))) { d1 <- .simulate_model(model, iterations, component = "conditional") d2 <- .simulate_model(model, iterations, component = "dispersion") colnames(d2) <- paste0(colnames(d2), "_disp") d <- cbind(d1, d2) } else if (all(component == "all")) { d1 <- .simulate_model(model, iterations, component = "conditional") d2 <- .simulate_model(model, iterations, component = "zero_inflated") d3 <- .simulate_model(model, iterations, component = "dispersion") colnames(d2) <- paste0(colnames(d2), "_zi") colnames(d3) <- paste0(colnames(d3), "_disp") d <- cbind(d1, d2, d3) } else if (all(component == "conditional")) { d <- .simulate_model(model, iterations, component = "conditional") } else if (all(component %in% c("zi", "zero_inflated"))) { d <- .simulate_model(model, iterations, component = "zero_inflated") } else { d <- .simulate_model(model, iterations, component = "dispersion") } class(d) <- c("parameters_simulate_model", class(d)) attr(d, "object_name") <- .safe_deparse(substitute(model)) d } # simulate_parameters ----- #' @rdname simulate_parameters #' @export simulate_parameters.glmmTMB <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model, ...) if ("Effects" %in% colnames(params) && .n_unique(params$Effects) > 1) { out$Effects <- params$Effects } if ("Component" %in% colnames(params) && .n_unique(params$Component) > 1) { out$Component <- params$Component } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } parameters/R/simulate_parameters.R0000644000175000017500000000634014077615700017153 0ustar nileshnilesh#' Simulate Model Parameters #' #' Compute simulated draws of parameters and their related indices such as Confidence Intervals (CI) and p-values. Simulating parameter draws can be seen as a (computationally faster) alternative to bootstrapping. #' #' @inheritParams simulate_model #' @inheritParams bayestestR::describe_posterior #' #' @return A data frame with simulated parameters. #' #' @references Gelman A, Hill J. Data analysis using regression and multilevel/hierarchical models. Cambridge; New York: Cambridge University Press 2007: 140-143 #' #' @seealso [bootstrap_model()], [bootstrap_parameters()], [simulate_model()] #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' \subsection{Technical Details}{ #' `simulate_parameters()` is a computationally faster alternative #' to `bootstrap_parameters()`. Simulated draws for coefficients are based #' on a multivariate normal distribution (`MASS::mvrnorm()`) with mean #' `mu = coef(model)` and variance `Sigma = vcov(model)`. #' } #' \subsection{Models with Zero-Inflation Component}{ #' For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and #' \pkg{countreg}, the `component` argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' } #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' simulate_parameters(model) #' \dontrun{ #' if (require("glmmTMB")) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' simulate_parameters(model, centrality = "mean") #' simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") #' } #' } #' @export simulate_parameters <- function(model, ...) { UseMethod("simulate_parameters") } #' @rdname simulate_parameters #' @export simulate_parameters.default <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model, verbose = FALSE) if ("Effects" %in% colnames(params) && .n_unique(params$Effects) > 1) { out$Effects <- params$Effects } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } parameters/R/print_html.R0000644000175000017500000001046114074636764015276 0ustar nileshnilesh# normal print ---------------------------- #' @rdname display.parameters_model #' @export print_html.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = 2, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, verbose = TRUE, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # table caption table_caption <- .print_caption(x, caption, format = "html") # main table formatted_table <- .print_core( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = NULL, ci_brackets = ci_brackets, format = "html", groups = groups, ... ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } # footer footer <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula, format = "html" ) insight::export_table( formatted_table, format = "html", caption = table_caption, subtitle = subtitle, footer = footer, align = align, ... ) } #' @export print_html.parameters_brms_meta <- print_html.parameters_model #' @export print_html.parameters_simulate <- print_html.parameters_model #' @export print_html.parameters_sem <- print_html.parameters_model #' @export print_html.compare_parameters <- function(x, digits = 2, ci_digits = 2, p_digits = 3, caption = NULL, subtitle = NULL, footer = NULL, style = NULL, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", ci_digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(style) || is.null(style)) { style <- attributes(x)$output_style } formatted_table <- format( x, style, split_components = TRUE, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = NULL, ci_brackets = c("(", ")"), format = "html" ) insight::export_table( formatted_table, format = "html", caption = caption, # TODO: get rid of NOTE subtitle = subtitle, footer = footer, ... ) } parameters/R/methods_survey.R0000644000175000017500000001051014133000604016137 0ustar nileshnilesh# model_parameters ----------------------------------------- #' @export model_parameters.svyglm <- function(model, ci = .95, ci_method = "wald", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { if (insight::n_obs(model) > 1e4 && ci_method == "likelihood") { message(insight::format_message("Likelihood confidence intervals may take longer time to compute. Use 'df_method=\"wald\"' for faster computation of CIs.")) } out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } # simulate_model ----------------------------------------- #' @export simulate_model.svyglm.nb <- simulate_model.default #' @export simulate_model.svyglm.zip <- simulate_model.default # standard erors ----------------------------------------- #' @export standard_error.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.svyglm.zip <- standard_error.svyglm.nb #' @export standard_error.svyglm <- function(model, ...) { vc <- insight::get_varcov(model) .data_frame( Parameter = .remove_backticks_from_string(row.names(vc)), SE = as.vector(sqrt(diag(vc))) ) } #' @export standard_error.svyolr <- standard_error.svyglm # confidence intervals ----------------------------------- #' @export ci.svyglm <- function(x, ci = .95, method = "wald", ...) { method <- match.arg(method, choices = c("wald", "residual", "normal", "likelihood")) if (method == "likelihood") { out <- lapply(ci, function(i) .ci_likelihood(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic(model = x, ci = ci, method = method, ...) } row.names(out) <- NULL out } #' @export ci.svyolr <- ci.svyglm # p values ----------------------------------------------- ## TODO how to calculate p when ci-method is "likelihood"? #' @export p_value.svyglm <- function(model, verbose = TRUE, ...) { statistic <- insight::get_statistic(model) df <- insight::get_df(model, type = "residual") p <- 2 * stats::pt(-abs(statistic$Statistic), df = df) .data_frame( Parameter = statistic$Parameter, p = as.vector(p) ) } #' @export p_value.svyolr <- p_value.svyglm #' @export p_value.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } est <- stats::coef(model) se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) p <- 2 * stats::pt(abs(est / se), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.svyglm.zip <- p_value.svyglm.nb # helper -------------------- .ci_likelihood <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci, method = "likelihood"), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- insight::get_parameters(model, effects = "fixed", component = "conditional")$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } parameters/R/n_clusters.R0000644000175000017500000002053614131531727015266 0ustar nileshnilesh#' Find number of clusters in your data #' #' Similarly to [n_factors()] for factor / principal component analysis, #' `n_clusters` is the main function to find out the optimal numbers of clusters #' present in the data based on the maximum consensus of a large number of #' methods. #' \cr #' Essentially, there exist many methods to determine the optimal number of #' clusters, each with pros and cons, benefits and limitations. The main #' `n_clusters` function proposes to run all of them, and find out the number of #' clusters that is suggested by the majority of methods (in case of ties, it #' will select the most parsimonious solution with fewer clusters). #' \cr #' Note that we also implement some specific, commonly used methods, like the #' Elbow or the Gap method, with their own visualization functionalities. See #' the examples below for more details. #' #' @inheritParams check_clusterstructure #' @param include_factors Logical, if `TRUE`, factors are converted to numerical #' values in order to be included in the data for determining the number of #' clusters. By default, factors are removed, because most methods that #' determine the number of clusters need numeric input only. #' @param package Package from which methods are to be called to determine the #' number of clusters. Can be `"all"` or a vector containing #' `"NbClust"`, `"mclust"`, `"cluster"` and `"M3C"`. #' @param fast If `FALSE`, will compute 4 more indices (sets `index = "allong"` #' in `NbClust`). This has been deactivated by default as it is #' computationally heavy. #' @param n_max Maximal number of clusters to test. #' @param clustering_function,gap_method Other arguments passed to other #' functions. `clustering_function` is used by `fviz_nbclust` and #' can be `kmeans`, code{cluster::pam}, code{cluster::clara}, #' code{cluster::fanny}, and more. `gap_method` is used by #' `cluster::maxSE` to extract the optimal numbers of clusters (see its #' `method` argument). #' @param method,min_size,eps_n,eps_range Arguments for DBSCAN algorithm. #' @param distance_method The distance method (passed to [dist()]). Used by #' algorithms relying on the distance matrix, such as `hclust` or `dbscan`. #' @param hclust_method The hierarchical clustering method (passed to [hclust()]). #' @param nbclust_method The clustering method (passed to `NbClust::NbClust()` #' as `method`). #' @inheritParams model_parameters.glm #' #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' \dontrun{ #' library(parameters) #' #' # The main 'n_clusters' function =============================== #' if (require("mclust", quietly = TRUE) && require("NbClust", quietly = TRUE) && #' require("cluster", quietly = TRUE) && require("see", quietly = TRUE)) { #' n <- n_clusters(iris[, 1:4], package = c("NbClust", "mclust", "cluster")) #' n #' summary(n) #' as.data.frame(n) #' plot(n) #' #' # The following runs all the method but it significantly slower #' # n_clusters(iris[1:4], standardize = FALSE, package = "all", fast = FALSE) #' } #' } #' @export n_clusters <- function(x, standardize = TRUE, include_factors = FALSE, package = c("easystats", "NbClust", "mclust"), fast = TRUE, nbclust_method = "kmeans", n_max = 10, ...) { if (all(package == "all")) { package <- c("easystats", "NbClust", "mclust", "M3C") } x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) out <- data.frame() if ("easystats" %in% tolower(package)) { out <- rbind(out, .n_clusters_easystats(x, n_max = n_max, ...)) } if ("nbclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_NbClust(x, fast = fast, nbclust_method = nbclust_method, n_max = n_max, ...)) } if ("mclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_mclust(x, ...)) } if ("M3C" %in% tolower(package)) { out <- rbind(out, .n_clusters_M3C(x, fast = fast)) } # Drop Nans out <- out[!is.na(out$n_Clusters), ] # Clean out <- out[order(out$n_Clusters), ] # Arrange by n clusters row.names(out) <- NULL # Reset row index out$Method <- as.character(out$Method) # Remove duplicate methods starting with the smallest dupli <- c() for (i in 1:nrow(out)) { if (i > 1 && out[i, "Method"] %in% out$Method[1:i - 1]) { dupli <- c(dupli, i) } } if (!is.null(dupli)) { out <- out[-dupli, ] } # Add summary by_clusters <- .data_frame( n_Clusters = as.numeric(unique(out$n_Clusters)), n_Methods = as.numeric(by(out, as.factor(out$n_Clusters), function(out) n <- nrow(out))) ) attr(out, "summary") <- by_clusters attr(out, "n") <- min(as.numeric(as.character(by_clusters[by_clusters$n_Methods == max(by_clusters$n_Methods), c("n_Clusters")]))) class(out) <- c("n_clusters", "see_n_clusters", class(out)) out } #' @keywords internal .n_clusters_mclust <- function(x, ...) { insight::check_if_installed("mclust") mclustBIC <- mclust::mclustBIC # this is needed as it is internally required by the following function BIC <- mclust::mclustBIC(x, verbose = FALSE) out <- data.frame(unclass(BIC)) n <- which(out == max(out, na.rm = TRUE), arr.ind = TRUE)[1] data.frame(n_Clusters = n, Method = "Mixture", Package = "mclust") } # Methods ----------------------------------------------------------------- #' @keywords internal .n_clusters_easystats <- function(x, n_max = 10, ...) { elb <- n_clusters_elbow(x, preprocess = FALSE, n_max = n_max, ...) sil <- n_clusters_silhouette(x, preprocess = FALSE, n_max = n_max, ...) gap1 <- n_clusters_gap(x, preprocess = FALSE, gap_method = "firstSEmax", n_max = n_max, ...) gap2 <- n_clusters_gap(x, preprocess = FALSE, gap_method = "globalSEmax", n_max = n_max, ...) data.frame( n_Clusters = c(attributes(elb)$n, attributes(sil)$n, attributes(gap1)$n, attributes(gap2)$n), Method = c("Elbow", "Silhouette", "Gap_Maechler2012", "Gap_Dudoit2002"), Package = "easystats" ) } #' @keywords internal .n_clusters_NbClust <- function(x, fast = TRUE, nbclust_method = "kmeans", n_max = 15, ...) { insight::check_if_installed("NbClust") indices <- c("kl", "Ch", "Hartigan", "CCC", "Scott", "Marriot", "trcovw", "Tracew", "Friedman", "Rubin", "Cindex", "DB", "Silhouette", "Duda", "Pseudot2", "Beale", "Ratkowsky", "Ball", "PtBiserial", "Frey", "Mcclain", "Dunn", "SDindex", "SDbw") # c("hubert", "dindex") are graphical methods if (fast == FALSE) { indices <- c(indices, c("gap", "gamma", "gplus", "tau")) } out <- data.frame() for (idx in indices) { tryCatch( expr = { n <- NbClust::NbClust( x, index = tolower(idx), method = nbclust_method, max.nc = n_max, ... ) out <- rbind(out, data.frame( n_Clusters = n$Best.nc[["Number_clusters"]], Method = idx, Package = "NbClust" )) }, error = function(e) { NULL }, warning = function(w) { NULL } ) } out } #' @keywords internal .n_clusters_M3C <- function(x, fast = TRUE, ...) { if (!requireNamespace("M3C", quietly = TRUE)) { stop("Package 'M3C' required for this function to work. Please install it from Bioconductor by first running `remotes::install_github('https://github.com/crj32/M3C')`.") # Not on CRAN (but on github and bioconductor) } data <- data.frame(t(x)) colnames(data) <- paste0("x", seq(1, ncol(data))) # Add columns names as required by the package suppressMessages(out <- M3C::M3C(data, method = 2)) out <- data.frame( n_Clusters = which.max(out$scores$PCSI), Method = "Consensus clustering algorithm (penalty term)", Package = "M3C" ) # Doesn't work # if (fast == FALSE){ # suppressMessages(out <- M3C::M3C(data, method=1)) # out <- rbind(out, data.frame(n_Clusters = which.max(out$scores$RCSI), Method = "Consensus clustering algorithm (Monte Carlo)", Package = "M3C")) # } out } parameters/R/methods_metafor.R0000644000175000017500000001445714137207406016272 0ustar nileshnilesh# package metafor ####### .rma ----------------- #' Parameters from Meta-Analysis #' #' Extract and compute indices and measures to describe parameters of meta-analysis models. #' #' @param include_studies Logical, if `TRUE` (default), includes parameters #' for all studies. Else, only parameters for overall-effects are shown. #' @inheritParams model_parameters.default #' #' @examples #' library(parameters) #' mydat <<- data.frame( #' effectsize = c(-0.393, 0.675, 0.282, -1.398), #' stderr = c(0.317, 0.317, 0.13, 0.36) #' ) #' if (require("metafor", quietly = TRUE)) { #' model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) #' model_parameters(model) #' } #' \dontrun{ #' # with subgroups #' if (require("metafor", quietly = TRUE)) { #' data(dat.bcg) #' dat <- escalc( #' measure = "RR", #' ai = tpos, #' bi = tneg, #' ci = cpos, #' di = cneg, #' data = dat.bcg #' ) #' dat$alloc <- ifelse(dat$alloc == "random", "random", "other") #' model <- rma(yi, vi, mods = ~alloc, data = dat, digits = 3, slab = author) #' model_parameters(model) #' } #' #' if (require("metaBMA", quietly = TRUE)) { #' data(towels) #' m <- meta_random(logOR, SE, study, data = towels) #' model_parameters(m) #' } #' } #' #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.rma <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { # handle ci-level that was defined in function call... ci_level <- parse(text = .safe_deparse(model$call))[[1]]$level if (!is.null(ci_level) && missing(ci)) { ci <- ci_level / 100 } meta_analysis_overall <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, ... ) subgroups <- NULL group_variable <- NULL # subgroup analyses? if (!is.null(model$formula.mods)) { group_variable <- deparse(model$formula.mods[[2]])[1] model_data <- insight::get_data(model) if (group_variable %in% colnames(model_data)) { subgroups <- sort(unique(model_data[[group_variable]])) } } if (nrow(meta_analysis_overall) > 1 && !is.null(subgroups)) { meta_analysis_overall$Subgroup <- subgroups meta_analysis_overall$Parameter <- "(Intercept)" } alpha <- (1 + ci) / 2 rma_parameters <- if (!is.null(model$slab) && !is.numeric(model$slab)) { sprintf("%s", model$slab) } else { sprintf("Study %i", 1:model[["k"]]) } # find missing if (!is.null(model$yi.f) && anyNA(model$yi.f)) { rma_parameters <- rma_parameters[match(model$yi, model$yi.f)] } rma_coeffients <- as.vector(model$yi) rma_se <- as.vector(sqrt(model$vi)) rma_ci_low <- rma_coeffients - rma_se * stats::qt(alpha, df = Inf) rma_ci_high <- rma_coeffients + rma_se * stats::qt(alpha, df = Inf) rma_statistic <- rma_coeffients / rma_se rma_ci_p <- 2 * stats::pt(abs(rma_statistic), df = Inf, lower.tail = FALSE) meta_analysis_studies <- data.frame( Parameter = rma_parameters, Coefficient = rma_coeffients, SE = rma_se, CI = ci, CI_low = rma_ci_low, CI_high = rma_ci_high, z = rma_statistic, df_error = NA, p = rma_ci_p, Weight = 1 / as.vector(model$vi), stringsAsFactors = FALSE ) # subgroup analyses? if (!is.null(subgroups)) { meta_analysis_studies$Subgroup <- insight::get_data(model, verbose = FALSE)[[group_variable]] } original_attributes <- attributes(meta_analysis_overall) out <- merge(meta_analysis_studies, meta_analysis_overall, all = TRUE, sort = FALSE) # fix intercept name out$Parameter[out$Parameter == "(Intercept)"] <- "Overall" # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter == "Overall", ] } original_attributes$names <- names(out) original_attributes$row.names <- 1:nrow(out) original_attributes$pretty_names <- stats::setNames(out$Parameter, out$Parameter) attributes(out) <- original_attributes # no df out$df_error <- NULL attr(out, "object_name") <- .safe_deparse(substitute(model)) attr(out, "measure") <- model$measure if (!"Method" %in% names(out)) { out$Method <- "Meta-analysis using 'metafor'" } attr(out, "title") <- unique(out$Method) out } #' @export p_value.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), p = model$pval ) } #' @export ci.rma <- function(x, ci = .95, ...) { params <- insight::get_parameters(x) out <- tryCatch( { tmp <- lapply(ci, function(i) { model <- stats::update(x, level = i) .data_frame( Parameter = params$Parameter, CI = i, CI_low = as.vector(model$ci.lb), CI_high = as.vector(model$ci.ub) ) }) .remove_backticks_from_parameter_names(do.call(rbind, tmp)) }, error = function(e) { NULL } ) if (is.null(out)) { se <- standard_error(x) out <- lapply(ci, function(i) { alpha <- (1 + i) / 2 fac <- stats::qnorm(alpha) .data_frame( Parameter = params$Parameter, CI = i, CI_low = params$Estimate - as.vector(se$SE) * fac, CI_high = params$Estimate + as.vector(se$SE) * fac ) }) out <- .remove_backticks_from_parameter_names(do.call(rbind, out)) } out } #' @export standard_error.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), SE = model[["se"]] ) } #' @export format_parameters.rma <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) names(params) <- params params } parameters/R/methods_bife.R0000644000175000017500000000164214100573643015531 0ustar nileshnilesh #' @export standard_error.bife <- function(model, ...) { cs <- summary(model) se <- cs$cm[, 2] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs$cm)), SE = as.vector(se) ) } #' @export p_value.bife <- function(model, ...) { cs <- summary(model) p <- cs$cm[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs$cm)), p = as.vector(p) ) } #' @rdname model_parameters.mlm #' @export model_parameters.bifeAPEs <- function(model, ...) { est <- model[["delta"]] se <- sqrt(diag(model[["vcov"]])) z <- est / se p <- 2 * stats::pnorm(-abs(z)) nms <- names(est) out <- data.frame(nms, est, se, z, p) colnames(out) <- c("Parameter", "Coefficient", "Std. error", "z value", "p") rownames(out) <- NULL out <- as.data.frame(out) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } parameters/R/methods_effect_size.R0000644000175000017500000000237314036353021017106 0ustar nileshnilesh#' @export ci.effectsize_std_params <- function(x, ci = .95, verbose = TRUE, ...) { se <- attr(x, "standard_error") if (is.null(se)) { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") } return(NULL) } # for "refit" method if (is.data.frame(se) && "SE" %in% colnames(se)) { se <- se$SE } # check if we have model. if so, use df from model model <- .get_object(x) if (!is.null(model)) { df <- degrees_of_freedom(model, method = "any") if (!is.null(df)) { if (length(df) > 1 && length(df) != nrow(x)) { df <- Inf } } else { df <- Inf } } else { df <- Inf } out <- lapply(ci, function(i) { alpha <- (1 + i) / 2 fac <- stats::qt(alpha, df = df) data.frame( Parameter = x$Parameter, CI = i, CI_low = x$Std_Coefficient - se * fac, CI_high = x$Std_Coefficient + se * fac, stringsAsFactors = FALSE ) }) .remove_backticks_from_parameter_names(do.call(rbind, out)) } #' @export ci.effectsize_table <- ci.effectsize_std_params #' @export standard_error.effectsize_table <- standard_error.effectsize_std_params parameters/R/methods_censReg.R0000644000175000017500000000027214133000261016173 0ustar nileshnilesh #' @export model_parameters.censReg <- model_parameters.default #' @export standard_error.censReg <- standard_error.default #' @export p_value.censReg <- p_value.default parameters/R/methods_aod.R0000644000175000017500000000367514133047502015373 0ustar nileshnilesh# classes: .glimML ## TODO add ci_method later? #################### .glimML ------ #' @export model_parameters.glimML <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("conditional", "random", "dispersion", "all"), standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } # dispersion is just an alias... if (component == "dispersion") { component <- "random" } out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, verbose = verbose, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.glimML <- function(model, ...) { insight::check_if_installed("aod") s <- methods::slot(aod::summary(model), "Coef") se <- s[, 2] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), SE = as.vector(se) ) } #' @export p_value.glimML <- function(model, ...) { insight::check_if_installed("aod") s <- methods::slot(aod::summary(model), "Coef") p <- s[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), p = as.vector(p) ) } parameters/R/dof_betwithin.R0000644000175000017500000000173114142732056015726 0ustar nileshnilesh#' @rdname p_value_betwithin #' @export dof_betwithin <- function(model) { if (!insight::model_info(model, verbose = FALSE)$is_mixed) { stop("Model must be a mixed model.") } ngrps <- sum(.n_randomeffects(model)) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] within_effects <- unlist(insight::find_random_slopes(model)) has_intcp <- insight::has_intercept(model) ddf_within <- ngrps - n_parameters(model) ddf_between <- insight::n_obs(model, disaggregate = TRUE) - ngrps - n_parameters(model) if (has_intcp) { ddf_between <- ddf_between - 1 ddf_within <- ddf_within - 1 } within_index <- match(within_effects, parameters) ddf <- stats::setNames(1:length(parameters), parameters) if (length(within_index) > 0) { ddf[match(within_effects, parameters)] <- ddf_within ddf[-match(within_effects, parameters)] <- ddf_between } else { ddf <- ddf_between } ddf } parameters/R/methods_panelr.R0000644000175000017500000001125114142707642016106 0ustar nileshnilesh# .wbm, .wbgee # model parameters ------------------- #' @inheritParams model_parameters.merMod #' @export model_parameters.wbm <- function(model, ci = .95, effects = "all", group_level = FALSE, bootstrap = FALSE, iterations = 1000, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, include_sigma = FALSE, ...) { effects <- match.arg(effects, choices = c("fixed", "random", "all")) params <- .mixed_model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = NULL, exponentiate = exponentiate, effects = effects, robust = FALSE, p_adjust = p_adjust, group_level = group_level, ci_method = NULL, include_sigma = include_sigma, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", "data.frame") params } #' @export model_parameters.wbgee <- model_parameters.wbm # standard errors ------------------- #' @export standard_error.wbm <- function(model, ...) { s <- summary(model) se <- c( s$within_table[, "S.E."], s$between_table[, "S.E."], s$ints_table[, "S.E."] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, SE = as.vector(se), Component = params$Component ) } #' @export standard_error.wbgee <- standard_error.wbm # p values ------------------- #' @export p_value.wbm <- function(model, ...) { s <- summary(model) p <- c( s$within_table[, "p"], s$between_table[, "p"], s$ints_table[, "p"] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, p = as.vector(p), Component = params$Component ) } #' @export p_value.wbgee <- p_value.wbm # utils ------------------- .mixed_model_parameters_generic <- function(model, ci, bootstrap, iterations, merge_by, standardize, exponentiate, effects, robust, p_adjust, group_level, ci_method, include_sigma = FALSE, ...) { params <- params_random <- params_variance <- att <- NULL if (effects %in% c("fixed", "all")) { params <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, effects = "fixed", robust = FALSE, p_adjust = p_adjust, ci_method = ci_method, include_sigma = include_sigma, ... ) params$Effects <- "fixed" att <- attributes(params) } if (effects %in% c("random", "all") && isTRUE(group_level)) { params_random <- .extract_random_parameters(model, ci = ci, effects = effects) } if (effects %in% c("random", "all") && isFALSE(group_level)) { params_variance <- .extract_random_variances(model, ci = ci, effects = effects, ci_method = ci_method) } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" # reorder if (!is.null(params_random)) { params <- params[match(colnames(params_random), colnames(params))] } else { params <- params[match(colnames(params_variance), colnames(params))] } } params <- rbind(params, params_random, params_variance) if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } params } parameters/R/ci_satterthwaite.R0000644000175000017500000000072214140570131016433 0ustar nileshnilesh#' @rdname p_value_satterthwaite #' @export ci_satterthwaite <- function(model, ci = .95, robust = FALSE, ...) { df_satter <- dof_satterthwaite(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_satter, effects = "fixed", component = "all", method = "satterthwaite", robust = robust, ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_FactoMineR.R0000644000175000017500000000501714106662543016617 0ustar nileshnilesh#' @inheritParams model_parameters.default #' @rdname model_parameters.principal #' @export model_parameters.PCA <- function(model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ...) { loadings <- as.data.frame(model$var$coord) n <- model$call$ncp # Get summary eig <- as.data.frame(model$eig[1:n, ]) data_summary <- .data_frame( Component = names(loadings), Eigenvalues = eig$eigenvalue, Variance = eig$`percentage of variance` / 100, Variance_Cumulative = eig$`cumulative percentage of variance` / 100 ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- "none" attr(loadings, "scores") <- as.data.frame(model$ind$coord) attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # add class-attribute for printing if ("PCA" %in% class(model)) { attr(loadings, "type") <- "pca" class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else if ("FAMD" %in% class(model)) { attr(loadings, "type") <- "fa" class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.FAMD <- model_parameters.PCA parameters/R/methods_BayesX.R0000644000175000017500000000107514131014351016005 0ustar nileshnilesh#' @export standard_error.bayesx <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, component = "conditional", flatten = TRUE), SE = model$fixed.effects[, 2] ) } #' @export ci.bayesx <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, robust = FALSE, component = "conditional") } #' @export p_value.bayesx <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, component = "conditional", flatten = TRUE), p = model$fixed.effects[, 4] ) } parameters/R/methods_ordinal.R0000644000175000017500000000641214133000527016244 0ustar nileshnilesh# model parameters ------------------- #' @rdname model_parameters.mlm #' @export model_parameters.clm2 <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "scale"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export model_parameters.clmm2 <- model_parameters.clm2 #' @rdname model_parameters.merMod #' @export model_parameters.clmm <- model_parameters.cpglmm # CI --------------------- ## TODO residual df? #' @export ci.clm2 <- function(x, ci = .95, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) .ci_generic(model = x, ci = ci, dof = Inf, component = component) } #' @export ci.clmm2 <- ci.clm2 # standard errors ----------------- #' @rdname standard_error #' @export standard_error.clm2 <- function(model, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) stats <- .get_se_from_summary(model) parms <- insight::get_parameters(model, component = component) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Component = parms$Component ) } #' @export standard_error.clmm2 <- standard_error.clm2 # p values ---------------- #' @rdname p_value.DirichletRegModel #' @export p_value.clm2 <- function(model, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.clmm2 <- p_value.clm2 # simulate model ------------------- #' @export simulate_model.clm2 <- function(model, iterations = 1000, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #' @export simulate_model.clmm2 <- simulate_model.clm2 parameters/R/ci_kenward.R0000644000175000017500000000144314122135652015204 0ustar nileshnilesh#' @rdname p_value_kenward #' @export ci_kenward <- function(model, ci = .95) { .check_REML_fit(model) df_kr <- dof_kenward(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_kr, effects = "fixed", component = "all", method = "kenward", se = attr(df_kr, "se", exact = TRUE) ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } .ci_kenward_dof <- function(model, ci = .95, df_kr) { out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_kr$df_error, effects = "fixed", component = "all", method = "kenward", se = df_kr$SE ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_mcmc.R0000644000175000017500000000010213766351057015543 0ustar nileshnilesh#' @export model_parameters.mcmc <- model_parameters.data.frame parameters/R/methods_BBMM.R0000644000175000017500000000332214133047612015334 0ustar nileshnilesh #' @export ci.BBmm <- ci.default #' @export ci.BBreg <- ci.default #' @export standard_error.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.data.frame(summary(model)$fixed.coefficients)$StdErr ) } #' @export standard_error.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.data.frame(summary(model)$coefficients)$StdErr ) } ## TODO add ci_method later? ## TODO BBmm only has p based on normal distribution assumptions? #' @export p_value.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), p = as.data.frame(summary(model)$fixed.coefficients)$p.value ) } ## TODO add ci_method later? ## TODO BBreg only has p based on normal distribution assumptions? #' @export p_value.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), p = as.data.frame(summary(model)$coefficients)$p.value ) } #' @export degrees_of_freedom.BBmm <- function(model, method = "residual", ...) { if (method %in% c("residual", "wald")) { return(model$df) } else { return(degrees_of_freedom.default(model = model, method = method, ...)) } } #' @export degrees_of_freedom.BBreg <- degrees_of_freedom.BBmm parameters/R/check_clusterstructure.R0000644000175000017500000001125314160324505017674 0ustar nileshnilesh#' Check suitability of data for clustering #' #' This checks whether the data is appropriate for clustering using the Hopkins' #' H statistic of given data. If the value of Hopkins statistic is close to 0 #' (below 0.5), then we can reject the null hypothesis and conclude that the #' dataset is significantly clusterable. A value for H lower than 0.25 indicates #' a clustering tendency at the `90%` confidence level. The visual assessment of #' cluster tendency (VAT) approach (Bezdek and Hathaway, 2002) consists in #' investigating the heatmap of the ordered dissimilarity matrix. Following #' this, one can potentially detect the clustering tendency by counting the #' number of square shaped blocks along the diagonal. #' #' @param x A data frame. #' @param standardize Standardize the dataframe before clustering (default). #' @param distance Distance method used. Other methods than "euclidean" #' (default) are exploratory in the context of clustering tendency. See #' [stats::dist()] for list of available methods. #' @param ... Arguments passed to or from other methods. #' #' @examples #' \donttest{ #' library(parameters) #' check_clusterstructure(iris[, 1:4]) #' plot(check_clusterstructure(iris[, 1:4])) #' } #' @return The H statistic (numeric) #' #' @seealso [check_kmo()], [check_sphericity_bartlett()] and [check_factorstructure()]. #' #' @references #' - Lawson, R. G., & Jurs, P. C. (1990). New index for clustering #' tendency and its application to chemical problems. Journal of chemical #' information and computer sciences, 30(1), 36-41. #' #' - Bezdek, J. C., & Hathaway, R. J. (2002, May). VAT: A tool for visual #' assessment of (cluster) tendency. In Proceedings of the 2002 International #' Joint Conference on Neural Networks. IJCNN02 (3), 2225-2230. IEEE. #' @export check_clusterstructure <- function(x, standardize = TRUE, distance = "euclidean", ...) { if (standardize) { x <- as.data.frame(scale(x)) } H <- .clusterstructure_hopkins(x, distance = distance) if (H < 0.5) { text <- paste0( "The dataset is suitable for clustering (Hopkins' H = ", insight::format_value(H), ").\n" ) color <- "green" } else { text <- paste0( "The dataset is not suitable for clustering (Hopkins' H = ", insight::format_value(H), ").\n" ) color <- "red" } out <- list( H = H, dissimilarity_matrix = .clusterstructure_dm(x, distance = distance, method = "ward.D2") ) attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Clustering tendency" class(out) <- c("see_check_clusterstructure", "check_clusterstructure", "easystats_check", class(out)) out } #' @export plot.check_clusterstructure <- function(x, ...) { # Can be reimplemented with ggplot in see stats::heatmap( x$dissimilarity_matrix, Rowv = NA, Colv = NA, labRow = FALSE, labCol = FALSE, col = grDevices::colorRampPalette(c("#2196F3", "#FAFAFA", "#E91E63"))(100) ) } #' @keywords internal .clusterstructure_dm <- function(x, distance = "euclidean", method = "ward.D2") { d <- stats::dist(x, method = distance) hc <- stats::hclust(d, method = method) as.matrix(d)[hc$order, hc$order] } #' @keywords internal .clusterstructure_hopkins <- function(x, distance = "euclidean") { # This is based on the hopkins() function from the clustertend package if (is.data.frame(x)) { x <- as.matrix(x) } n <- nrow(x) - 1 c <- apply(x, 2, min) # minimum value per column d <- apply(x, 2, max) p <- matrix(0, ncol = ncol(x), nrow = n) # n vectors of space for (i in 1:ncol(x)) { p[, i] <- stats::runif(n, min = c[i], max = d[i]) } k <- round(stats::runif(n, 1, nrow(x))) q <- as.matrix(x[k, ]) distp <- rep(0, nrow(x)) # distq=rep(0,nrow(x)-1) distq <- 0 minp <- rep(0, n) minq <- rep(0, n) for (i in 1:n) { distp[1] <- stats::dist(rbind(p[i, ], x[1, ]), method = distance) minqi <- stats::dist(rbind(q[i, ], x[1, ]), method = distance) for (j in 2:nrow(x)) { distp[j] <- stats::dist(rbind(p[i, ], x[j, ]), method = distance) error <- q[i, ] - x[j, ] if (sum(abs(error)) != 0) { # distq[j]<-stats::dist(rbind(q[i,],x[j,])) distq <- stats::dist(rbind(q[i, ], x[j, ]), method = distance) if (distq < minqi) { minqi <- distq } } } minp[i] <- min(distp) # minq[i]<-apply(distq,1,min) minq[i] <- minqi } H <- (sum(minq) / (sum(minp) + sum(minq))) H } parameters/R/methods_bayestestR.R0000644000175000017500000000007214036353021016737 0ustar nileshnilesh#' @importFrom bayestestR ci #' @export bayestestR::ci parameters/R/5_simulate_model.R0000644000175000017500000001477614077615700016350 0ustar nileshnilesh#' Simulated draws from model coefficients #' #' Simulate draws from a statistical model to return a data frame of estimates. #' #' @param model Statistical model (no Bayesian models). #' @param component Should all parameters, parameters for the conditional model, #' or for the zero-inflated part of the model be returned? Applies to models #' with zero-inflated component. `component` may be one of `"conditional"`, #' `"zi"`, `"zero-inflated"`, `"dispersion"` or `"all"` #' (default). May be abbreviated. #' @inheritParams bootstrap_model #' @inheritParams p_value #' #' @return A data frame. #' #' @seealso [`simulate_parameters()`][simulate_parameters], #' [`bootstrap_model()`][bootstrap_model], #' [`bootstrap_parameters()`][bootstrap_parameters] #' #' @details #' \subsection{Technical Details}{ #' `simulate_model()` is a computationally faster alternative #' to `bootstrap_model()`. Simulated draws for coefficients are based #' on a multivariate normal distribution (`MASS::mvrnorm()`) with mean #' `mu = coef(model)` and variance `Sigma = vcov(model)`. #' } #' \subsection{Models with Zero-Inflation Component}{ #' For models from packages \pkg{glmmTMB}, \pkg{pscl}, \pkg{GLMMadaptive} and #' \pkg{countreg}, the `component` argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' } #' #' @examples #' library(parameters) #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' head(simulate_model(model)) #' \donttest{ #' if (require("glmmTMB", quietly = TRUE)) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' head(simulate_model(model)) #' head(simulate_model(model, component = "zero_inflated")) #' } #' } #' @export simulate_model <- function(model, iterations = 1000, ...) { UseMethod("simulate_model") } # Models with single component only ----------------------------------------- #' @export simulate_model.default <- function(model, iterations = 1000, ...) { out <- .simulate_model(model, iterations, component = "conditional", effects = "fixed") class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #' @export simulate_model.lm <- simulate_model.default #' @export simulate_model.glmmadmb <- simulate_model.default #' @export simulate_model.cglm <- simulate_model.default #' @export simulate_model.cpglm <- simulate_model.default #' @export simulate_model.cpglmm <- simulate_model.default #' @export simulate_model.feglm <- simulate_model.default #' @export simulate_model.fixest <- simulate_model.default #' @export simulate_model.iv_robust <- simulate_model.default #' @export simulate_model.rq <- simulate_model.default #' @export simulate_model.crq <- simulate_model.default #' @export simulate_model.nlrq <- simulate_model.default #' @export simulate_model.speedglm <- simulate_model.default #' @export simulate_model.speedlm <- simulate_model.default #' @export simulate_model.glm <- simulate_model.default #' @export simulate_model.glmRob <- simulate_model.default #' @export simulate_model.lmRob <- simulate_model.default #' @export simulate_model.gls <- simulate_model.default #' @export simulate_model.lme <- simulate_model.default #' @export simulate_model.crch <- simulate_model.default #' @export simulate_model.biglm <- simulate_model.default #' @export simulate_model.plm <- simulate_model.default #' @export simulate_model.flexsurvreg <- simulate_model.default #' @export simulate_model.LORgee <- simulate_model.default #' @export simulate_model.feis <- simulate_model.default #' @export simulate_model.lmrob <- simulate_model.default #' @export simulate_model.glmrob <- simulate_model.default #' @export simulate_model.merMod <- simulate_model.default #' @export simulate_model.gamlss <- simulate_model.default #' @export simulate_model.lm_robust <- simulate_model.default #' @export simulate_model.coxme <- simulate_model.default #' @export simulate_model.geeglm <- simulate_model.default #' @export simulate_model.gee <- simulate_model.default #' @export simulate_model.clm <- simulate_model.default #' @export simulate_model.polr <- simulate_model.default #' @export simulate_model.coxph <- simulate_model.default #' @export simulate_model.logistf <- simulate_model.default #' @export simulate_model.truncreg <- simulate_model.default #' @export simulate_model.glimML <- simulate_model.default #' @export simulate_model.lrm <- simulate_model.default #' @export simulate_model.psm <- simulate_model.default #' @export simulate_model.ols <- simulate_model.default #' @export simulate_model.rms <- simulate_model.default #' @export simulate_model.vglm <- simulate_model.default #' @export simulate_model.censReg <- simulate_model.default #' @export simulate_model.survreg <- simulate_model.default #' @export simulate_model.multinom <- simulate_model.default #' @export simulate_model.brmultinom <- simulate_model.default #' @export simulate_model.bracl <- simulate_model.default # helper ----------------------------------------- .simulate_model <- function(model, iterations, component = "conditional", effects = "fixed") { insight::check_if_installed("MASS") if (is.null(iterations)) iterations <- 1000 params <- insight::get_parameters(model, effects = effects, component = component, verbose = FALSE) beta <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector varcov <- insight::get_varcov(model, component = component, effects = effects) as.data.frame(MASS::mvrnorm(n = iterations, mu = beta, Sigma = varcov)) ## Alternative approach, similar to arm::sim() # k <- length(insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE)) # n <- insight::n_obs(model) # beta.cov <- stats::vcov(model) / stats::sigma(model) # s <- vector("double", iterations) # b <- array(NA, c(100, k)) # for (i in 1:iterations) { # s[i] <- stats::sigma(model) * sqrt((n - k) / rchisq(1, n - k)) # b[i,] <- MASS::mvrnorm(n = 1, mu = beta, Sigma = beta.cov * s[i] ^ 2) # } } parameters/R/methods_varest.R0000644000175000017500000000634614022455433016135 0ustar nileshnilesh# .varest #' @export model_parameters.varest <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, verbose = TRUE, ...) { params <- lapply(names(model$varresult), function(i) { out <- model_parameters( model = model$varresult[[i]], ci = ci, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, robust = robust, p_adjust = p_adjust, verbose = verbose, ... ) out$Group <- paste0("Equation ", i) out }) params <- do.call(rbind, params) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) params } #' @export ci.varest <- function(x, ci = .95, method = NULL, ...) { params <- lapply(names(x$varresult), function(i) { out <- ci(x = x$varresult[[i]], ci = ci, method = method, ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export standard_error.varest <- function(model, method = NULL, ...) { params <- lapply(names(model$varresult), function(i) { out <- standard_error(model = model$varresult[[i]], method = method, ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export p_value.varest <- function(model, ...) { params <- lapply(names(model$varresult), function(i) { out <- p_value(model = model$varresult[[i]], ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export simulate_model.varest <- function(model, iterations = 1000, ...) { out <- lapply(names(model$varresult), function(i) { simulate_model(model = model$varresult[[i]], iterations = iterations, ...) }) names(out) <- paste0("Equation ", names(model$varresult)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #' @export simulate_parameters.varest <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- lapply(names(data), function(i) { x <- .summary_bootstrap( data = data[[i]], test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) x$Group <- i x }) out <- do.call(rbind, out) class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) attr(out, "iterations") <- iterations attr(out, "ci") <- ci out } parameters/R/methods_ggeffects.R0000644000175000017500000000550614046421300016553 0ustar nileshnilesh#' @export model_parameters.ggeffects <- function(model, parameters = NULL, verbose = TRUE, ...) { ci <- attributes(model)$ci.lvl terms <- attributes(model)$terms[-1] focal_term <- attributes(model)$terms[1] constant_values <- attributes(model)$constant.values title <- attr(model, "title") # exception for survival if (attributes(model)$type %in% c("surv", "survival", "cumhaz", "cumulative_hazard")) { focal_term <- "Time" } model <- as.data.frame(model) # rename columns new_colnames <- colnames(model) new_colnames[new_colnames == "predicted"] <- "Predicted" new_colnames[new_colnames == "std.error"] <- "SE" new_colnames[new_colnames == "conf.low"] <- "CI_low" new_colnames[new_colnames == "conf.high"] <- "CI_high" new_colnames[new_colnames == "group"] <- "Component" new_colnames[new_colnames == "facet"] <- "Group" new_colnames[new_colnames == "response"] <- "Subgroup" colnames(model) <- new_colnames model$SE <- NULL if (.n_unique(model$Component) == 1) { model$Component <- NULL } if (!is.null(focal_term)) { colnames(model)[1] <- focal_term } if (length(terms) >= 1) { model$Component <- paste0(terms[1], " = ", model$Component) } if (length(terms) >= 2) { model$Group <- paste0(terms[2], " = ", model$Group) } if (length(terms) >= 3) { model$Subgroup <- paste0(terms[3], " = ", model$Subgroup) } # filter parameters if (!is.null(parameters)) { model <- .filter_parameters(model, parameters, verbose = verbose) } model <- .add_model_parameters_attributes(model, model, ci = ci, verbose = verbose) # special attributes attr(model, "is_ggeffects") <- TRUE attr(model, "footer_text") <- .generate_ggeffects_footer(constant_values) attr(model, "title") <- c(title, "blue") attr(model, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(model) <- c("parameters_model", "data.frame") model } .generate_ggeffects_footer <- function(constant_values) { cv <- lapply(constant_values, function(.x) { if (is.numeric(.x)) { sprintf("%.2f", .x) } else { as.character(.x) } }) footer <- NULL if (!.is_empty_object(cv)) { cv.names <- names(cv) cv.space <- max(nchar(cv.names)) # ignore this string when determining maximum length poplev <- which(cv %in% c("NA (population-level)", "0 (population-level)")) if (!.is_empty_object(poplev)) { mcv <- cv[-poplev] } else { mcv <- cv } if (!.is_empty_object(mcv)) { cv.space2 <- max(nchar(mcv)) } else { cv.space2 <- 0 } adjusted_predictors <- paste0(sprintf("* %*s = %*s", cv.space, cv.names, cv.space2, cv), collapse = "\n") footer <- paste0("Adjusted for:\n", adjusted_predictors) } footer } parameters/R/methods_lrm.R0000644000175000017500000000227114133000504015400 0ustar nileshnilesh## from rms / rmsb package # model parameters ------------- #' @export model_parameters.blrm <- model_parameters.bayesQR # standard error ------------- #' @export standard_error.lrm <- function(model, ...) { se <- sqrt(diag(stats::vcov(model))) # psm-models returns vcov-matrix w/o dimnames if (is.null(names(se))) names(se) <- names(stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.ols <- standard_error.lrm #' @export standard_error.rms <- standard_error.lrm #' @export standard_error.psm <- standard_error.lrm # p-values ----------------------- #' @export p_value.lrm <- function(model, ...) { stat <- insight::get_statistic(model) p <- 2 * stats::pt(abs(stat$Statistic), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(stat$Parameter), p = as.vector(p) ) } #' @export p_value.ols <- p_value.lrm #' @export p_value.rms <- p_value.lrm #' @export p_value.psm <- p_value.lrm #' @export p_value.blrm <- p_value.BFBayesFactor parameters/R/cluster_meta.R0000644000175000017500000000733714131075422015573 0ustar nileshnilesh#' Metaclustering #' #' One of the core "issue" of statistical clustering is that, in many cases, different methods will give different results. The **metaclustering** approach proposed by *easystats* (that finds echoes in *consensus clustering*; see Monti et al., 2003) consists of treating the unique clustering solutions as a ensemble, from which we can derive a probability matrix. This matrix contains, for each pair of observations, the probability of being in the same cluster. For instance, if the 6th and the 9th row of a dataframe has been assigned to a similar cluster by 5 our of 10 clustering methods, then its probability of being grouped together is 0.5. #' \cr\cr #' Metaclustering is based on the hypothesis that, as each clustering algorithm embodies a different prism by which it sees the data, running an infinite amount of algorithms would result in the emergence of the "true" clusters. As the number of algorithms and parameters is finite, the probabilistic perspective is a useful proxy. This method is interesting where there is no obvious reasons to prefer one over another clustering method, as well as to investigate how robust some clusters are under different algorithms. #' #' @param list_of_clusters A list of vectors with the clustering assignments from various methods. #' @param rownames An optional vector of row.names for the matrix. #' @param ... Currently not used. #' #' @return A matrix containing all the pairwise (between each observation) probabilities of being clustered together by the methods. #' #' #' @examples #' \dontrun{ #' data <- iris[1:4] #' #' rez1 <- cluster_analysis(data, n = 2, method = "kmeans") #' rez2 <- cluster_analysis(data, n = 3, method = "kmeans") #' rez3 <- cluster_analysis(data, n = 6, method = "kmeans") #' #' list_of_clusters <- list(rez1, rez2, rez3) #' #' m <- cluster_meta(list_of_clusters) #' #' # Visualize matrix without reordering #' heatmap(m, Rowv = NA, Colv = NA, scale = "none") # Without reordering #' # Reordered heatmap #' heatmap(m, scale = "none") #' } #' @export cluster_meta <- function(list_of_clusters, rownames = NULL, ...) { x <- list() # Sanitize output for (i in 1:length(list_of_clusters)) { # Get name name <- names(list_of_clusters[i]) if (is.null(name)) name <- paste0("Solution", i) solution <- list_of_clusters[[i]] if (inherits(solution, "cluster_analysis")) { if (name == paste0("Solution", i)) { name <- paste0(name, "_", attributes(solution)$method) } solution <- stats::predict(solution, ...) } solution[solution == "0"] <- NA x[[name]] <- solution } # Sanity check if (length(unique(sapply(x, length))) != 1) { stop("The clustering solutions are not of equal lengths.") } # Convert to dataframe data <- as.data.frame(x) if (!is.null(names(solution))) row.names(data) <- names(solution) if (!is.null(rownames)) row.names(data) <- rownames # Get probability matrix .cluster_meta_matrix(data) } #' @keywords internal .cluster_meta_matrix <- function(data) { # Internal function .get_prob <- function(x) { if (any(is.na(x))) { NA } else { ifelse(length(unique(x[!is.na(x)])) == 1, 0, 1) } } # Initialize matrix m <- matrix(data = NA, nrow = nrow(data), ncol = nrow(data), dimnames = list(rev(row.names(data)), row.names(data))) for (row in row.names(m)) { for (col in colnames(m)) { if (row == col) { m[row, col] <- 0 next } subset <- data[row.names(data) %in% c(row, col), ] rez <- sapply(subset[2:ncol(subset)], .get_prob) m[row, col] <- sum(rez, na.rm = TRUE) / length(stats::na.omit(rez)) } } m } parameters/R/methods_bfsl.R0000644000175000017500000000236614135320677015563 0ustar nileshnilesh#' @export model_parameters.bfsl <- function(model, ci = .95, ci_method = "residual", p_adjust = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, merge_by = "Parameter", p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.bfsl <- function(model, ...) { cf <- stats::coef(model) params <- data.frame( Parameter = rownames(cf), SE = unname(cf[, "Std. Error"]), stringsAsFactors = FALSE, row.names = NULL ) .remove_backticks_from_parameter_names(params) } #' @export degrees_of_freedom.bfsl <- function(model, method = "residual", ...) { if (is.null(method)) { method <- "wald" } method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal")) if (method %in% c("wald", "residual", "fit")) { model$df.residual } else { degrees_of_freedom.default(model, method = method, ...) } } parameters/R/p_value_betwithin.R0000644000175000017500000000533614140567724016624 0ustar nileshnilesh#' @title Between-within approximation for SEs, CIs and p-values #' @name p_value_betwithin #' #' @description Approximation of degrees of freedom based on a "between-within" heuristic. #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details \subsection{Small Sample Cluster corrected Degrees of Freedom}{ #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics (see \cite{Li and Redden 2015}). The #' *Between-within* denominator degrees of freedom approximation is #' recommended in particular for (generalized) linear mixed models with repeated #' measurements (longitudinal design). `dof_betwithin()` implements a heuristic #' based on the between-within approach. **Note** that this implementation #' does not return exactly the same results as shown in \cite{Li and Redden 2015}, #' but similar. #' } #' \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ #' In particular for repeated measure designs (longitudinal data analysis), #' the *between-within* heuristic is likely to be more accurate than simply #' using the residual or infinite degrees of freedom, because `dof_betwithin()` #' returns different degrees of freedom for within-cluster and between-cluster effects. #' } #' @seealso `dof_betwithin()` is a small helper-function to calculate approximated #' degrees of freedom of model parameters, based on the "between-within" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' dof_betwithin(model) #' p_value_betwithin(model) #' } #' } #' @return A data frame. #' @references \itemize{ #' \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. #' \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' } #' @export p_value_betwithin <- function(model, dof = NULL, robust = FALSE, ...) { if (is.null(dof)) { dof <- dof_betwithin(model) } .p_value_dof(model, dof, method = "betwithin", robust = robust, ...) } parameters/R/methods_sarlm.R0000644000175000017500000000123414131014353015727 0ustar nileshnilesh#' @export p_value.Sarlm <- function(model, ...) { stat <- insight::get_statistic(model) .data_frame( Parameter = stat$Parameter, p = 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) ) } #' @export ci.Sarlm <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, ...) } #' @export standard_error.Sarlm <- function(model, ...) { params <- insight::get_parameters(model) s <- summary(model) # add rho, if present if (!is.null(s$rho)) { rho <- as.numeric(s$rho.se) } else { rho <- NULL } .data_frame( Parameter = params$Parameter, SE = c(rho, as.vector(s$Coef[, 2])) ) } parameters/R/utils.R0000644000175000017500000001305714036353021014236 0ustar nileshnilesh#' help-functions #' @keywords internal .data_frame <- function(...) { x <- data.frame(..., stringsAsFactors = FALSE) rownames(x) <- NULL x } #' Flatten a list #' #' @param object A list. #' @param name Name of column of keys in the case the output is a dataframe. #' @keywords internal .flatten_list <- function(object, name = "name") { if (length(object) == 1) { object[[1]] } else if (all(sapply(object, is.data.frame))) { if (is.null(names(object))) { as.data.frame(t(sapply(object, rbind))) } else { tryCatch( { rn <- names(object) object <- do.call(rbind, object) object[name] <- rn object[c(name, setdiff(names(object), name))] }, warning = function(w) { object }, error = function(e) { object } ) } } else { object } } #' Recode a variable so its lowest value is beginning with zero #' #' @keywords internal .recode_to_zero <- function(x) { # check if factor if (is.factor(x) || is.character(x)) { # try to convert to numeric x <- .factor_to_numeric(x) } # retrieve lowest category minval <- min(x, na.rm = TRUE) sapply(x, function(y) y - minval) } #' Safe transformation from factor/character to numeric #' @keywords internal .factor_to_numeric <- function(x, lowest = NULL) { if (is.numeric(x)) { return(x) } if (is.logical(x)) { return(as.numeric(x)) } if (anyNA(suppressWarnings(as.numeric(as.character(stats::na.omit(x)))))) { if (is.character(x)) { x <- as.factor(x) } x <- droplevels(x) levels(x) <- 1:nlevels(x) } out <- as.numeric(as.character(x)) if (!is.null(lowest)) { difference <- min(out) - lowest out <- out - difference } out } #' Safe transformation from factor/character to numeric #' #' @keywords internal .factor_to_dummy <- function(x) { if (is.numeric(x)) { return(x) } # get unique levels / values values <- if (is.factor(x)) { levels(x) } else { stats::na.omit(unique(x)) } dummy <- as.data.frame(do.call(cbind, lapply(values, function(i) { out <- rep(0, length(x)) out[is.na(x)] <- NA out[x == i] <- 1 out }))) colnames(dummy) <- values dummy } #' Find most common occurence #' #' @keywords internal .find_most_common <- function(x) { out <- names(sort(table(x), decreasing = TRUE))[1] if (is.numeric(x)) out <- as.numeric(out) out } #' remove NULL elements from lists #' @keywords internal .compact_list <- function(x) x[!sapply(x, function(i) length(i) == 0 || is.null(i) || any(i == "NULL", na.rm = TRUE))] #' remove empty string from character #' @keywords internal .compact_character <- function(x) x[!sapply(x, function(i) nchar(i) == 0 || is.null(i) || any(i == "NULL", na.rm = TRUE))] #' @keywords internal .rename_values <- function(x, old, new) { x[x %in% old] <- new x } #' for models with zero-inflation component, return required component of model-summary #' @keywords internal .filter_component <- function(dat, component) { switch(component, "conditional" = dat[dat$Component == "conditional", ], "zi" = , "zero_inflated" = dat[dat$Component == "zero_inflated", ], dat ) } # Find log-terms inside model formula, and return "clean" term names .log_terms <- function(model) { x <- insight::find_terms(model, flatten = TRUE) gsub("^log\\((.*)\\)", "\\1", x[grepl("^log\\((.*)\\)", x)]) } # capitalize first character in string #' @keywords internal .capitalize <- function(x) { capped <- grep("^[A-Z]", x, invert = TRUE) substr(x[capped], 1, 1) <- toupper(substr(x[capped], 1, 1)) x } #' @keywords internal .safe_deparse <- function(string) { paste0(sapply(deparse(string, width.cutoff = 500), trimws, simplify = TRUE), collapse = " ") } #' @keywords internal .remove_columns <- function(data, variables) { to_remove <- which(colnames(data) %in% variables) if (length(to_remove)) { data[, -to_remove, drop = FALSE] } else { data } } #' @keywords internal .is_empty_object <- function(x) { if (is.list(x)) { x <- tryCatch( { .compact_list(x) }, error = function(x) { x } ) } # this is an ugly fix because of ugly tibbles if (inherits(x, c("tbl_df", "tbl"))) x <- as.data.frame(x) x <- suppressWarnings(x[!is.na(x)]) length(x) == 0 || is.null(x) } #' @keywords internal .n_unique <- function(x, na.rm = TRUE) { if (is.null(x)) { return(0) } if (isTRUE(na.rm)) x <- stats::na.omit(x) length(unique(x)) } #' @keywords internal .get_object <- function(x, attribute_name = "object_name") { obj_name <- attr(x, attribute_name, exact = TRUE) model <- NULL if (!is.null(obj_name)) { model <- tryCatch( { get(obj_name, envir = parent.frame()) }, error = function(e) { NULL } ) if (is.null(model)) { model <- tryCatch( { get(obj_name, envir = globalenv()) }, error = function(e) { NULL } ) } } model } .is_semLme <- function(x) { all(inherits(x, c("sem", "lme"))) } # capitalizes the first letter in a string .capitalize <- function(x) { capped <- grep("^[A-Z]", x, invert = TRUE) substr(x[capped], 1, 1) <- toupper(substr(x[capped], 1, 1)) x } parameters/R/methods_lavaan.R0000644000175000017500000001744614160324505016074 0ustar nileshnilesh# Packages lavaan, blavaan # model parameters --------------------------- #' Parameters from CFA/SEM models #' #' Format CFA/SEM objects from the lavaan package (Rosseel, 2012; Merkle and Rosseel 2018). #' #' @param model CFA or SEM created by the `lavaan::cfa` or `lavaan::sem` #' functions. #' @param standardize Return standardized parameters (standardized coefficients). #' Can be `TRUE` (or `"all"` or `"std.all"`) for standardized #' estimates based on both the variances of observed and latent variables; #' `"latent"` (or `"std.lv"`) for standardized estimates based #' on the variances of the latent variables only; or `"no_exogenous"` #' (or `"std.nox"`) for standardized estimates based on both the #' variances of observed and latent variables, but not the variances of #' exogenous covariates. See `lavaan::standardizedsolution` for details. #' @inheritParams model_parameters.default #' @param component What type of links to return. Can be `"all"` or some of `c("regression", "correlation", "loading", "variance", "mean")`. #' @param ... Arguments passed to or from other methods. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examples #' library(parameters) #' #' # lavaan ------------------------------------- #' if (require("lavaan", quietly = TRUE)) { #' #' # Confirmatory Factor Analysis (CFA) --------- #' #' structure <- " visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 " #' model <- lavaan::cfa(structure, data = HolzingerSwineford1939) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' #' # filter parameters #' model_parameters( #' model, #' parameters = list( #' To = "^(?!visual)", #' From = "^(?!(x7|x8))" #' ) #' ) #' #' # Structural Equation Model (SEM) ------------ #' #' structure <- " #' # latent variable definitions #' ind60 =~ x1 + x2 + x3 #' dem60 =~ y1 + a*y2 + b*y3 + c*y4 #' dem65 =~ y5 + a*y6 + b*y7 + c*y8 #' # regressions #' dem60 ~ ind60 #' dem65 ~ ind60 + dem60 #' # residual correlations #' y1 ~~ y5 #' y2 ~~ y4 + y6 #' y3 ~~ y7 #' y4 ~~ y8 #' y6 ~~ y8 #' " #' model <- lavaan::sem(structure, data = PoliticalDemocracy) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' } #' @return A data frame of indices related to the model's parameters. #' #' @references #' - Rosseel Y (2012). lavaan: An R Package for Structural Equation #' Modeling. Journal of Statistical Software, 48(2), 1-36. #' #' - Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation #' Models via Parameter Expansion. Journal of Statistical Software, 85(4), #' 1-30. http://www.jstatsoft.org/v85/i04/ #' @export model_parameters.lavaan <- function(model, ci = 0.95, standardize = FALSE, component = c("regression", "correlation", "loading", "defined"), keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { params <- .extract_parameters_lavaan(model, ci = ci, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # Filter if (all(component == "all")) { component <- c("regression", "correlation", "loading", "variance", "defined", "mean") } params <- params[tolower(params$Component) %in% component, ] # add class-attribute for printing class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) attr(params, "ci") <- ci attr(params, "model") <- model params } #' @export model_parameters.blavaan <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), component = "all", standardize = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = diagnostic, effects = "all", standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # Filter if (!all(component == "all")) { params <- params[tolower(params$Component) %in% component, ] } params <- .add_model_parameters_attributes( params, model, ci, exponentiate = FALSE, ci_method = ci_method, verbose = verbose, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) params } # ci --------------------------- #' @export ci.lavaan <- function(x, ci = .95, ...) { out <- .extract_parameters_lavaan(model = x, ci = ci, ...) out$CI <- ci out[out$Operator != "~1", c("To", "Operator", "From", "CI", "CI_low", "CI_high")] } # SE --------------------------- #' @export standard_error.lavaan <- function(model, ...) { out <- .extract_parameters_lavaan(model, ...) out[out$Operator != "~1", c("To", "Operator", "From", "SE")] } #' @export standard_error.blavaan <- function(model, ...) { params <- insight::get_parameters(model, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } # p-value --------------------------- #' @export p_value.lavaan <- function(model, ...) { out <- .extract_parameters_lavaan(model, ...) out[out$Operator != "~1", c("To", "Operator", "From", "p")] } #' @export p_value.blavaan <- p_value.BFBayesFactor # print --------------------------- #' @export print.parameters_sem <- function(x, digits = 2, ci_digits = 2, p_digits = 3, ...) { # check if user supplied digits attributes if (missing(digits)) digits <- .additional_arguments(x, "digits", 2) if (missing(ci_digits)) ci_digits <- .additional_arguments(x, "ci_digits", 2) if (missing(p_digits)) p_digits <- .additional_arguments(x, "p_digits", 3) verbose <- .additional_arguments(x, "verbose", TRUE) formatted_table <- format(x = x, digits = digits, ci_digits, p_digits = p_digits, format = "text", ci_brackets = TRUE, ci_width = "auto", ...) cat(insight::export_table(formatted_table, format = "text", ...)) if (isTRUE(verbose)) { .print_footer_cimethod(x) } invisible(x) } #' @export predict.parameters_sem <- function(object, newdata = NULL, ...) { insight::check_if_installed("lavaan") as.data.frame(lavaan::lavPredict( attributes(object)$model, newdata = newdata, method = "EBM", ... )) } parameters/R/methods_cgam.R0000644000175000017500000001316414133213745015535 0ustar nileshnilesh#' @title Parameters from Generalized Additive (Mixed) Models #' @name model_parameters.cgam #' #' @description Extract and compute indices and measures to describe parameters #' of generalized additive models (GAM(M)s). #' #' @param model A gam/gamm model. #' @inheritParams model_parameters.default #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @details The reporting of degrees of freedom *for the spline terms* #' slightly differs from the output of `summary(model)`, for example in the #' case of `mgcv::gam()`. The *estimated degrees of freedom*, column #' `edf` in the summary-output, is named `df` in the returned data #' frame, while the column `df_error` in the returned data frame refers to #' the residual degrees of freedom that are returned by `df.residual()`. #' Hence, the values in the the column `df_error` differ from the column #' `Ref.df` from the summary, which is intentional, as these reference #' degrees of freedom \dQuote{is not very interpretable} #' ([web](https://stat.ethz.ch/pipermail/r-help/2019-March/462135.html)). #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' library(parameters) #' if (require("mgcv")) { #' dat <- gamSim(1, n = 400, dist = "normal", scale = 2) #' model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' model_parameters(model) #' } #' @export model_parameters.cgam <- function(model, ci = .95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, robust = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing if (bootstrap) { params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) } else { params <- .extract_parameters_generic( model, ci = ci, ci_method = ci_method, component = "all", merge_by = c("Parameter", "Component"), standardize = standardize, robust = robust, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) } # fix statistic column if ("t" %in% names(params) && !is.null(params$Component) && "smooth_terms" %in% params$Component) { names(params)[names(params) == "t"] <- "t / F" } # fix estimated df column if (inherits(model, c("gam", "cgam", "scam", "rqss")) && "smooth_terms" %in% params$Component && !("df" %in% names(params))) { params$df <- params$Coefficient params$df[params$Component != "smooth_terms"] <- NA params$df_error[params$Component == "smooth_terms"] <- NA params$Coefficient[params$Component == "smooth_terms"] <- NA # reorder insert_column <- which(names(params) == "df_error") if (!length(insert_column)) { insert_column <- which(names(params) == "p") } if (length(insert_column)) { n_col <- ncol(params) params <- params[c(1:(insert_column - 1), n_col, insert_column:(n_col - 1))] } } else if (all(c("df", "df_error") %in% names(params)) && "smooth_terms" %in% params$Component) { params$df_error[params$Component == "smooth_terms"] <- NA } if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) if ("CI" %in% colnames(params)) { params$CI[is.na(params$CI_low)] <- NA } attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @rdname p_value.DirichletRegModel #' @export p_value.cgam <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") cs <- summary(model) p <- as.vector(cs$coefficients[, 4]) if (!is.null(cs$coefficients2)) p <- c(p, as.vector(cs$coefficients2[, "p.value"])) out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.cgam <- function(model, ...) { sc <- summary(model) se <- as.vector(sc$coefficients[, "StdErr"]) params <- insight::get_parameters(model, component = "all") if (!is.null(sc$coefficients2)) se <- c(se, rep(NA, nrow(sc$coefficients2))) .data_frame( Parameter = params$Parameter, SE = se, Component = params$Component ) } #' @export degrees_of_freedom.cgam <- function(model, method = "wald", ...) { if (is.null(method)) { method <- "wald" } method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal")) if (method %in% c("wald", "residual", "fit")) { model$resid_df_obs } else { degrees_of_freedom.default(model, method = method, ...) } } parameters/R/methods_mvord.R0000644000175000017500000000544414044454046015761 0ustar nileshnilesh# classes: .mvord #################### .mvord #' @export model_parameters.mvord <- function(model, ci = .95, component = c("all", "conditional", "thresholds", "correlation"), standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = FALSE, iterations = 10, merge_by = c("Parameter", "Component", "Response"), standardize = standardize, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export standard_error.mvord <- function(model, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") junk <- utils::capture.output(s <- summary(model)) params$SE <- c( unname(s$thresholds[, "Std. Error"]), unname(s$coefficients[, "Std. Error"]), unname(s$error.structure[, "Std. Error"]) ) params <- params[c("Parameter", "SE", "Component", "Response")] if (.n_unique(params$Response) == 1) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export p_value.mvord <- function(model, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") junk <- utils::capture.output(s <- summary(model)) params$p <- c( unname(s$thresholds[, "Pr(>|z|)"]), unname(s$coefficients[, "Pr(>|z|)"]), unname(s$error.structure[, "Pr(>|z|)"]) ) params <- params[c("Parameter", "p", "Component", "Response")] if (.n_unique(params$Response) == 1) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } .remove_backticks_from_parameter_names(params) } #' @export simulate_model.mvord <- function(model, iterations = 1000, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } parameters/R/2_ci.R0000644000175000017500000000727714140567646013741 0ustar nileshnilesh#' @title Confidence Intervals (CI) #' @name ci.default #' #' @description Compute confidence intervals (CI) for frequentist models. #' #' @param x A statistical model. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param dof Number of degrees of freedom to be used when calculating #' confidence intervals. If `NULL` (default), the degrees of freedom are #' retrieved by calling [`degrees_of_freedom()`] with #' approximation method defined in `method`. If not `NULL`, use this argument #' to override the default degrees of freedom used to compute confidence #' intervals. #' @param method Method for computing degrees of freedom for #' confidence intervals (CI) and the related p-values. Allowed are following #' options (which vary depending on the model class): `"residual"`, #' `"normal"`, `"likelihood"`, `"satterthwaite"`, `"kenward"`, `"wald"`, #' `"profile"`, `"boot"`, `"uniroot"`, `"ml1"`, `"betwithin"`, `"hdi"`, #' `"quantile"`, `"ci"`, `"eti"`, `"si"`, `"bci"`, or `"bcai"`. See section #' _Confidence intervals and approximation of degrees of freedom_ in #' [`model_parameters()`] for further details. #' @param robust Logical, if `TRUE`, computes confidence intervals (or p-values) #' based on robust standard errors. See [`standard_error_robust()`]. #' @param component Model component for which parameters should be shown. See #' the documentation for your object's class in [`model_parameters()`] for #' further details. #' @param iterations The number of bootstrap replicates. Only applies to models #' of class `merMod` when `method=boot`. #' @param verbose Toggle warnings and messages. #' @param ... Arguments passed down to [`standard_error_robust()`] #' when confidence intervals or p-values based on robust standard errors #' should be computed. #' #' @return A data frame containing the CI bounds. #' #' @note `ci_robust()` resp. `ci(robust=TRUE)` rely on the \pkg{sandwich} #' or \pkg{clubSandwich} package (the latter if `vcov_estimation="CR"` for #' cluster-robust standard errors) and will thus only work for those models #' supported by those packages. #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @examples #' \donttest{ #' library(parameters) #' if (require("glmmTMB")) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' #' ci(model) #' ci(model, component = "zi") #' } #' } #' @export ci.default <- function(x, ci = .95, dof = NULL, method = NULL, robust = FALSE, ...) { .ci_generic(model = x, ci = ci, dof = dof, method = method, robust = robust, ...) } #' @export ci.glm <- function(x, ci = .95, dof = NULL, method = "profile", robust = FALSE, ...) { method <- match.arg(method, choices = c("profile", "wald", "normal", "residual")) if (method == "profile") { out <- lapply(ci, function(i) .ci_profiled(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic(model = x, ci = ci, dof = dof, method = method, robust = robust, ...) } row.names(out) <- NULL out } # helper ----------------------------------------- #' @keywords internal .check_component <- function(m, x, verbose = TRUE) { if (x %in% c("zi", "zero_inflated")) { minfo <- insight::model_info(m, verbose = FALSE) if (!isTRUE(minfo$is_zero_inflated)) { if (isTRUE(verbose)) { message("Model has no zero-inflation component!") } x <- NULL } } x } parameters/R/methods_speedglm.R0000644000175000017500000000041514133000566016414 0ustar nileshnilesh#' @export p_value.speedlm <- function(model, ...) { p <- p_value.default(model, ...) if (!is.numeric(p$p)) { p$p <- tryCatch( { as.numeric(as.character(p$p)) }, error = function(e) { p$p } ) } p } parameters/R/methods_ivfixed.R0000644000175000017500000000253114132773367016272 0ustar nileshnilesh #' @export ci.ivFixed <- ci.default #' @export standard_error.ivFixed <- standard_error.coxr #' @export degrees_of_freedom.ivFixed <- function(model, method = "wald", ...) { if (is.null(method)) { method <- "wald" } method <- match.arg(tolower(method), choices = c("analytical", "any", "fit", "wald", "residual", "normal")) if (method %in% c("wald", "residual", "fit")) { as.vector(model$df) } else { degrees_of_freedom.default(model, method = method, ...) } } #' @export p_value.ivFixed <- function(model, method = "wald", ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(2 * stats::pt(abs(stat$Statistic), df = degrees_of_freedom(model, method = method), lower.tail = FALSE)) ) } } #' @export model_parameters.ivFixed <- function(model, ci = .95, ci_method = "wald", verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, merge_by = "Parameter", verbose = verbose, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } parameters/R/methods_mice.R0000644000175000017500000000714514077615700015551 0ustar nileshnilesh# confidence intervals -------------------------- #' @export ci.mipo <- ci.gam #' @export ci.mira <- function(x, ci = .95, ...) { insight::check_if_installed("mice") ci(mice::pool(x), ci = ci, ...) } # degrees of freedom ---------------------------- #' @export degrees_of_freedom.mira <- function(model, ...) { insight::check_if_installed("mice") degrees_of_freedom(mice::pool(model), ...) } #' @export degrees_of_freedom.mipo <- function(model, ...) { as.vector(summary(model)$df) } # p values --------------------------------------- #' @export p_value.mipo <- function(model, ...) { .data_frame( Parameter = as.vector(summary(model)$term), p = as.vector(summary(model)$p.value) ) } #' @export p_value.mira <- function(model, ...) { insight::check_if_installed("mice") p_value(mice::pool(model), ...) } # standard errors -------------------------------- #' @export standard_error.mipo <- function(model, ...) { .data_frame( Parameter = as.vector(summary(model)$term), SE = as.vector(summary(model)$std.error) ) } #' @export standard_error.mira <- function(model, ...) { insight::check_if_installed("mice") standard_error(mice::pool(model), ...) } # format ------------------------------------------- #' @export format_parameters.mira <- format_parameters.rma # model_parameters --------------------------------- #' @export model_parameters.mipo <- model_parameters.default #' Parameters from multiply imputed repeated analyses #' #' Format models of class `mira`, obtained from `mice::width.mids()`. #' #' @param model An object of class `mira`. #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @details `model_parameters()` for objects of class `mira` works #' similar to `summary(mice::pool())`, i.e. it generates the pooled summary #' of multiple imputed repeated regression analyses. #' #' @examples #' library(parameters) #' if (require("mice", quietly = TRUE)) { #' data(nhanes2) #' imp <- mice(nhanes2) #' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' model_parameters(fit) #' } #' \dontrun{ #' # model_parameters() also works for models that have no "tidy"-method in mice #' if (require("mice", quietly = TRUE) && require("gee", quietly = TRUE)) { #' data(warpbreaks) #' set.seed(1234) #' warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA #' imp <- mice(warpbreaks) #' fit <- with(data = imp, expr = gee(breaks ~ tension, id = wool)) #' #' # does not work: #' # summary(pool(fit)) #' #' model_parameters(fit) #' } #' } #' #' #' #' # and it works with pooled results #' if (require("mice")) { #' data("nhanes2") #' imp <- mice(nhanes2) #' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' pooled <- pool(fit) #' #' model_parameters(pooled) #' } #' @export model_parameters.mira <- function(model, ci = .95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { insight::check_if_installed("mice") out <- .model_parameters_generic( model = mice::pool(model), ci = ci, bootstrap = FALSE, iterations = 10, merge_by = "Parameter", standardize = NULL, exponentiate = exponentiate, robust = FALSE, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } parameters/R/format_order.R0000644000175000017500000000270614077615700015572 0ustar nileshnilesh#' Order (first, second, ...) formatting #' #' Format order. #' #' @param order value or vector of orders. #' @param textual Return number as words. If `FALSE`, will run [insight::format_value()]. #' @inherit insight::format_number #' #' @return A formatted string. #' @examples #' format_order(2) #' format_order(8) #' format_order(25, textual = FALSE) #' @export format_order <- function(order, textual = TRUE, ...) { if (textual) { order <- insight::format_number(order) parts <- unlist(strsplit(order, " ", fixed = TRUE)) parts[length(parts)] <- switch(utils::tail(parts, 1), "one" = "first", "two" = "second", "three" = "third", "four" = "fourth", "five" = "fifth", "six" = "sixth", "seven" = "seventh", "eight" = "eigth", "nine" = "ninth" ) out <- paste(parts, collapse = " ") } else { number <- insight::format_value(order, digits = 0, ...) last <- substr(number, nchar(number), nchar(number)) last_two <- substr(number, nchar(number) - 1, nchar(number)) # exceptions if (last_two %in% c(11, 12, 13)) { out <- paste0(number, "th") } else { out <- paste0(number, switch(last, "1" = "st", "2" = "nd", "3" = "rd", "4" = "th", "5" = "th", "6" = "th", "7" = "th", "8" = "th", "9" = "th", "0" = "th" )) } } out } parameters/R/robust_estimation.R0000644000175000017500000001703614142676661016671 0ustar nileshnilesh#' @title Robust estimation #' @name standard_error_robust #' #' @description `standard_error_robust()`, `ci_robust()` and `p_value_robust()` #' attempt to return indices based on robust estimation of the variance-covariance #' matrix, using the packages \pkg{sandwich} and \pkg{clubSandwich}. #' #' @param model A model. #' @param vcov_estimation String, indicating the suffix of the #' `vcov*()`-function from the \pkg{sandwich} or \pkg{clubSandwich} #' package, e.g. `vcov_estimation = "CL"` (which calls #' [sandwich::vcovCL()] to compute clustered covariance matrix #' estimators), or `vcov_estimation = "HC"` (which calls #' [sandwich::vcovHC()] to compute #' heteroskedasticity-consistent covariance matrix estimators). #' @param vcov_type Character vector, specifying the estimation type for the #' robust covariance matrix estimation (see #' [sandwich::vcovHC()] or `clubSandwich::vcovCR()` #' for details). Passed down as `type` argument to the related `vcov*()`-function #' from the \pkg{sandwich} or \pkg{clubSandwich} package and hence will be #' ignored if there is no `type` argument (e.g., `sandwich::vcovHAC()` will #' ignore that argument). #' @param vcov_args List of named vectors, used as additional arguments that are #' passed down to the \pkg{sandwich}-function specified in #' `vcov_estimation`. #' @param component Should all parameters or parameters for specific model #' components be returned? #' @param ... Arguments passed to or from other methods. For #' `standard_error()`, if `method = "robust"`, arguments #' `vcov_estimation`, `vcov_type` and `vcov_args` can be passed #' down to `standard_error_robust()`. #' @inheritParams ci.default #' #' @note These functions rely on the \pkg{sandwich} or \pkg{clubSandwich} package #' (the latter if `vcov_estimation = "CR"` for cluster-robust standard errors) #' and will thus only work for those models supported by those packages. #' #' @seealso Working examples cam be found [in this vignette](https://easystats.github.io/parameters/articles/model_parameters_robust.html). #' #' @examples #' if (require("sandwich", quietly = TRUE)) { #' # robust standard errors, calling sandwich::vcovHC(type="HC3") by default #' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) #' standard_error_robust(model) #' } #' \dontrun{ #' if (require("clubSandwich", quietly = TRUE)) { #' # cluster-robust standard errors, using clubSandwich #' iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) #' standard_error_robust( #' model, #' vcov_type = "CR2", #' vcov_args = list(cluster = iris$cluster) #' ) #' } #' } #' @return A data frame. #' @export standard_error_robust <- function(model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", ...) { # exceptions if (inherits(model, "gee")) { return(standard_error(model, robust = TRUE, ...)) } if (inherits(model, "MixMod")) { return(standard_error(model, robust = TRUE, ...)) } # check for existing vcov-prefix if (!grepl("^(vcov|kernHAC|NeweyWest)", vcov_estimation)) { vcov_estimation <- paste0("vcov", vcov_estimation) } robust <- .robust_covariance_matrix( model, vcov_fun = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args, component = component ) if ("Component" %in% colnames(robust) && .n_unique(robust$Component) > 1) { cols <- c("Parameter", "SE", "Component") } else { cols <- c("Parameter", "SE") } robust[, cols] } #' @rdname standard_error_robust #' @export p_value_robust <- function(model, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", method = NULL, ...) { # exceptions if (inherits(model, "gee")) { return(p_value(model, robust = TRUE, ...)) } if (inherits(model, "MixMod")) { return(p_value(model, robust = TRUE, ...)) } # check for existing vcov-prefix if (!grepl("^(vcov|kernHAC|NeweyWest)", vcov_estimation)) { vcov_estimation <- paste0("vcov", vcov_estimation) } robust <- .robust_covariance_matrix( model, vcov_fun = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args, component = component, method = method ) if ("Component" %in% colnames(robust) && .n_unique(robust$Component) > 1) { cols <- c("Parameter", "p", "Component") } else { cols <- c("Parameter", "p") } robust[, cols] } #' @rdname standard_error_robust #' @export ci_robust <- function(model, ci = 0.95, method = NULL, vcov_estimation = "HC", vcov_type = NULL, vcov_args = NULL, component = "conditional", ...) { out <- .ci_generic( model = model, ci = ci, method = method, component = component, robust = TRUE, vcov_estimation = vcov_estimation, vcov_type = vcov_type, vcov_args = vcov_args ) if ("Component" %in% colnames(out) && .n_unique(out$Component) == 1) { out$Component <- NULL } out } .robust_covariance_matrix <- function(x, vcov_fun = "vcovHC", vcov_type = NULL, vcov_args = NULL, component = "conditional", method = "any") { # fix default, if necessary if (!is.null(vcov_type) && vcov_type %in% c("CR0", "CR1", "CR1p", "CR1S", "CR2", "CR3")) { vcov_fun <- "vcovCR" } # set default for clubSandwich if (vcov_fun == "vcovCR" && is.null(vcov_type)) { vcov_type <- "CR0" } # check if required package is available if (vcov_fun == "vcovCR") { insight::check_if_installed("clubSandwich", reason = "to get cluster-robust standard errors") .vcov <- do.call(clubSandwich::vcovCR, c(list(obj = x, type = vcov_type), vcov_args)) } else { insight::check_if_installed("sandwich", reason = "to get robust standard errors") vcov_fun <- get(vcov_fun, asNamespace("sandwich")) .vcov <- do.call(vcov_fun, c(list(x = x, type = vcov_type), vcov_args)) } # get coefficients params <- insight::get_parameters(x, component = component, verbose = FALSE) if (!is.null(component) && component != "all" && nrow(.vcov) > nrow(params)) { keep <- match(insight::find_parameters(x)[[component]], rownames(.vcov)) .vcov <- .vcov[keep, keep, drop = FALSE] } if (is.null(method)) { method <- "any" } se <- sqrt(diag(.vcov)) dendf <- degrees_of_freedom(x, method = method) t.stat <- params$Estimate / se if (is.null(dendf)) { p.value <- 2 * stats::pnorm(abs(t.stat), lower.tail = FALSE) } else { p.value <- 2 * stats::pt(abs(t.stat), df = dendf, lower.tail = FALSE) } out <- .data_frame( Parameter = params$Parameter, Estimate = params$Estimate, SE = se, Statistic = t.stat, p = p.value ) if (!is.null(params$Component) && nrow(params) == nrow(out)) { out$Component <- params$Component } out } parameters/R/methods_lmtest.R0000644000175000017500000000105713766404764016152 0ustar nileshnilesh#' @export degrees_of_freedom.coeftest <- function(model, ...) { attributes(model)$df } #' @export ci.coeftest <- ci.default #' @export p_value.coeftest <- function(model, ...) { .data_frame( Parameter = .remove_backticks_from_string(row.names(model)), p = model[, 4] ) } #' @export standard_error.coeftest <- function(model, ...) { .data_frame( Parameter = .remove_backticks_from_string(row.names(model)), SE = model[, "Std. Error"] ) } #' @export model_parameters.coeftest <- model_parameters.ivFixed parameters/R/methods_other.R0000644000175000017500000000303414133046532015740 0ustar nileshnilesh############# .complmrob -------------- #' @export standard_error.complmrob <- function(model, ...) { stats <- summary(model)$stats params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } #' @export p_value.complmrob <- p_value.default #' @export ci.complmrob <- ci.default #' @export degrees_of_freedom.complmrob <- function(model, method = "wald", ...) { .degrees_of_freedom_no_dfresid_method(model, method) } ############# .Gam -------------- #' @export model_parameters.Gam <- function(model, omega_squared = NULL, eta_squared = NULL, epsilon_squared = NULL, df_error = NULL, type = NULL, table_wide = FALSE, verbose = TRUE, ...) { model_parameters( summary(model)$parametric.anova, omega_squared = omega_squared, eta_squared = eta_squared, epsilon_squared = epsilon_squared, df_error = df_error, type = type, table_wide = table_wide, verbose = verbose, ... ) } #' @export p_value.Gam <- function(model, ...) { p.aov <- stats::na.omit(summary(model)$parametric.anova) .data_frame( Parameter = .remove_backticks_from_string(rownames(p.aov)), p = as.vector(p.aov[, 5]) ) } parameters/R/p_value_ml1.R0000644000175000017500000000572314160324505015306 0ustar nileshnilesh#' @title "m-l-1" approximation for SEs, CIs and p-values #' @name p_value_ml1 #' #' @description Approximation of degrees of freedom based on a "m-l-1" heuristic #' as suggested by Elff et al. (2019). #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details \subsection{Small Sample Cluster corrected Degrees of Freedom}{ #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics (see \cite{Li and Redden 2015}). The #' *m-l-1* heuristic is such an approach that uses a t-distribution with #' fewer degrees of freedom (`dof_ml1()`) to calculate p-values #' (`p_value_ml1()`) and confidence intervals (`ci(method = "ml1")`). #' } #' \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ #' In particular for repeated measure designs (longitudinal data analysis), #' the *m-l-1* heuristic is likely to be more accurate than simply using the #' residual or infinite degrees of freedom, because `dof_ml1()` returns #' different degrees of freedom for within-cluster and between-cluster effects. #' } #' \subsection{Limitations of the "m-l-1" Heuristic}{ #' Note that the "m-l-1" heuristic is not applicable (or at least less accurate) #' for complex multilevel designs, e.g. with cross-classified clusters. In such cases, #' more accurate approaches like the Kenward-Roger approximation (`dof_kenward()`) #' is recommended. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' } #' @seealso `dof_ml1()` is a small helper-function to calculate approximated #' degrees of freedom of model parameters, based on the "m-l-1" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_ml1(model) #' } #' } #' @return A data frame. #' @references #' - Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel #' Analysis with Few Clusters: Improving Likelihood-based Methods to Provide #' Unbiased Estimates and Accurate Inference, British Journal of Political #' Science. #' #' - Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom #' approximations for the generalized linear mixed model in analyzing binary #' outcome in small sample cluster-randomized trials. BMC Medical Research #' Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' #' @export p_value_ml1 <- function(model, dof = NULL, robust = FALSE, ...) { if (is.null(dof)) { dof <- dof_ml1(model) } .p_value_dof(model, dof, method = "ml1", robust = robust, ...) } parameters/R/format_parameters.R0000644000175000017500000002555014133514053016614 0ustar nileshnilesh#' @title Parameter names formatting #' @name format_parameters #' #' @description This functions formats the names of model parameters (coefficients) #' to make them more human-readable. #' #' @param model A statistical model. #' @param brackets A character vector of length two, indicating the opening and closing brackets. #' @param ... Currently not used. #' #' @section Interpretation of Interaction Terms: #' Note that the *interpretation* of interaction terms depends on many #' characteristics of the model. The number of parameters, and overall #' performance of the model, can differ *or not* between `a * b` #' `a : b`, and `a / b`, suggesting that sometimes interaction terms #' give different parameterizations of the same model, but other times it gives #' completely different models (depending on `a` or `b` being factors #' of covariates, included as main effects or not, etc.). Their interpretation #' depends of the full context of the model, which should not be inferred #' from the parameters table alone - rather, we recommend to use packages #' that calculate estimated marginal means or marginal effects, such as #' \CRANpkg{modelbased}, \CRANpkg{emmeans} or \CRANpkg{ggeffects}. To raise #' awareness for this issue, you may use `print(...,show_formula=TRUE)` #' to add the model-specification to the output of the #' [`print()`][print.parameters_model] method for `model_parameters()`. #' #' @examples #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' format_parameters(model) #' @return A (names) character vector with formatted parameter names. The value names refer to the original names of the coefficients. #' @export format_parameters <- function(model, ...) { UseMethod("format_parameters") } #' @rdname format_parameters #' @export format_parameters.default <- function(model, brackets = c("[", "]"), ...) { tryCatch( { .format_parameter_default(model, brackets = brackets, ...) }, error = function(e) { NULL } ) } #' @export format_parameters.parameters_model <- function(model, ...) { if (!is.null(attributes(model)$pretty_names)) { model$Parameter <- attributes(model)$pretty_names[model$Parameter] } model } # Utilities --------------------------------------------------------------- .format_parameter_default <- function(model, effects = "fixed", brackets = c("[", "]"), ...) { original_names <- names <- insight::find_parameters(model, effects = effects, flatten = TRUE) # save some time, if model info is passed as argument dot_args <- list(...) if (!is.null(dot_args$model_info)) { info <- dot_args$model_info } else { info <- insight::model_info(model, verbose = FALSE) } ## TODO remove is.list() when insight 0.8.3 on CRAN if (is.null(info) || !is.list(info)) { info <- list(family = "unknown", link_function = "unknown") } # quick fix, for multivariate response models, we use # info from first model only if (insight::is_multivariate(model) && !"is_zero_inflated" %in% names(info)) { info <- info[[1]] } # special handling hurdle- and zeroinfl-models --------------------- if (isTRUE(info$is_zero_inflated) | isTRUE(info$is_hurdle)) { names <- gsub("^(count_|zero_)", "", names) } # special handling polr --------------------- if (inherits(model, "polr")) { original_names <- gsub("Intercept: ", "", original_names, fixed = TRUE) names <- gsub("Intercept: ", "", names, fixed = TRUE) } # special handling bracl --------------------- if (inherits(model, "bracl")) { names <- gsub("(.*):(.*)", "\\2", names) } # special handling DirichletRegModel --------------------- dirich_names <- NULL if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") dirich_names <- names <- gsub(pattern, "\\2", names(unlist(cf))) } else { dirich_names <- names <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) } original_names <- names } # remove "as.factor()", "log()" etc. from parameter names names <- .clean_parameter_names(names) # Type-specific changes types <- parameters_type(model) if (is.null(types)) { return(NULL) } types$Parameter <- .clean_parameter_names(types$Parameter, full = TRUE) # hurdle- and zeroinfl-models if (isTRUE(info$is_zero_inflated) | isTRUE(info$is_hurdle)) { types$Parameter <- gsub("^(count_|zero_)", "", types$Parameter) } # special handling DirichletRegModel if (inherits(model, "DirichletRegModel") && !is.null(dirich_names)) { types$Parameter <- dirich_names } for (i in 1:nrow(types)) { name <- types$Parameter[i] # No interaction if (!types$Type[i] %in% c("interaction", "nested", "simple")) { type <- types[i, ] names[i] <- .format_parameter(name, variable = type$Variable, type = type$Type, level = type$Level, brackets = brackets) # Interaction or nesting } else { components <- unlist(strsplit(name, ":", fixed = TRUE)) is_nested <- types$Type[i] == "nested" is_simple <- types$Type[i] == "simple" for (j in 1:length(components)) { if (components[j] %in% types$Parameter) { type <- types[types$Parameter == components[j], ] ## TODO check if this is ok... # for models with multiple response categories, we might have same # variable for each response, thus we have multiple rows here, # where only one row is required. if (nrow(type) > 1) type <- type[1, ] components[j] <- .format_parameter(components[j], variable = type$Variable, type = type$Type, level = type$Level, brackets = brackets) } else if (components[j] %in% types$Secondary_Parameter) { type <- types[!is.na(types$Secondary_Parameter) & types$Secondary_Parameter == components[j], ] components[j] <- .format_parameter(components[j], variable = type[1, ]$Secondary_Variable, type = type[1, ]$Secondary_Type, level = type[1, ]$Secondary_Level, brackets = brackets) } } names[i] <- .format_interaction(components, type = types[i, "Type"], is_nested = is_nested, is_simple = is_simple) } } # do some final formatting, like replacing underscores or dots with whitespace. names <- gsub("(\\.|_)", " ", names) # "types$Parameter" here is cleaned, i.e. patterns like "log()", "as.factor()" # etc. are removed. However, these patterns are needed in "format_table()", # code-line x$Parameter <- attributes(x)$pretty_names[x$Parameter] # when we use "types$Parameter" here, matching of pretty names does not work, # so output will be NA resp. blank fields... Thus, I think we should use # the original parameter-names here. names(names) <- original_names # types$Parameter names } #' @keywords internal .format_parameter <- function(name, variable, type, level, brackets = brackets) { # Factors if (type == "factor") { name <- .format_factor(name = name, variable = variable, brackets = brackets) } # Polynomials if (type %in% c("poly", "poly_raw")) { name <- .format_poly(name = name, variable = variable, type = type, degree = level, brackets = brackets) } # Splines if (type == "spline") { name <- .format_poly(name = name, variable = variable, type = type, degree = level, brackets = brackets) } # log-transformation if (type == "logarithm") { name <- .format_log(name = name, variable = variable, type = type, brackets = brackets) } # exp-transformation if (type == "exponentiation") { name <- .format_log(name = name, variable = variable, type = type, brackets = brackets) } # log-transformation if (type == "squareroot") { name <- .format_log(name = name, variable = variable, type = type, brackets = brackets) } # As Is if (type == "asis") { name <- variable } # Smooth if (type == "smooth") { name <- gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name) name <- gsub("s(", "Smooth term (", name, fixed = TRUE) } # Ordered if (type == "ordered") { name <- paste(variable, level) } name } #' @keywords internal .format_interaction <- function(components, type, is_nested = FALSE, is_simple = FALSE) { # sep <- ifelse(is_nested | is_simple, " : ", " * ") # sep <- ifelse(is_nested, " / ", " * ") # sep <- ifelse(is_simple, " : ", ifelse(is_nested, " / ", " * ")) sep <- " * " if (length(components) > 2) { if (type == "interaction") { components <- paste0("(", paste0(utils::head(components, -1), collapse = " * "), ")", sep, utils::tail(components, 1)) } else { components <- paste0(components, collapse = sep) } } else { components <- paste0(components, collapse = sep) } components } #' @keywords internal .format_factor <- function(name, variable, brackets = c("[", "]")) { level <- sub(variable, "", name) # special handling for "cut()" pattern_cut_right <- "^\\((.*),(.*)\\]$" pattern_cut_left <- "^\\[(.*),(.*)\\)$" if (all(grepl(pattern_cut_right, level))) { lower_bounds <- gsub(pattern_cut_right, "\\1", level) upper_bounds <- gsub(pattern_cut_right, "\\2", level) level <- paste0(as.numeric(lower_bounds) + 1, "-", upper_bounds) } else if (all(grepl(pattern_cut_left, level))) { lower_bounds <- gsub(pattern_cut_left, "\\1", level) upper_bounds <- gsub(pattern_cut_left, "\\2", level) level <- paste0(lower_bounds, "-", as.numeric(upper_bounds) - 1) } paste0(variable, " ", brackets[1], level, brackets[2]) } #' @keywords internal .format_poly <- function(name, variable, type, degree, brackets = c("[", "]")) { paste0(variable, " ", brackets[1], format_order(as.numeric(degree), textual = FALSE), " degree", brackets[2]) } #' @keywords internal .format_log <- function(name, variable, type, brackets = c("[", "]")) { paste0(variable, " ", brackets[1], gsub("(.*)\\((.*)\\)", "\\1", name), brackets[2]) } #' @keywords internal .format_ordered <- function(degree, brackets = c("[", "]")) { switch(degree, ".L" = paste0(brackets[1], "linear", brackets[2]), ".Q" = paste0(brackets[1], "quadratic", brackets[2]), ".C" = paste0(brackets[1], "cubic", brackets[2]), paste0(brackets[1], parameters::format_order(as.numeric(gsub("^", "", degree, fixed = TRUE)), textual = FALSE), " degree", brackets[2]) ) } parameters/R/methods_mass.R0000644000175000017500000000625714133044507015574 0ustar nileshnilesh# degrees of freedom ----------------- #' @export degrees_of_freedom.rlm <- function(model, method = "residual", ...) { .degrees_of_freedom_no_dfresid_method(model, method) } # ci ----------------- #' @export ci.negbin <- ci.glm #' @export ci.polr <- function(x, ci = .95, dof = NULL, method = "profile", robust = FALSE, ...) { method <- match.arg(method, choices = c("profile", "wald")) if (method == "profile") { out <- lapply(ci, function(i) .ci_profiled2(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic(model = x, ci = ci, dof = dof, method = method, robust = robust, ...) } # for polr, profiled CI do not return CI for response levels # thus, we also calculate Wald CI and add missing rows to result out_missing <- .ci_generic(model = x, ci = ci) missing_rows <- out_missing$Parameter %in% setdiff(out_missing$Parameter, out$Parameter) out <- rbind(out, out_missing[missing_rows, ]) # fix names, to match standard error and p_value out$Parameter <- gsub("Intercept: ", "", out$Parameter, fixed = TRUE) row.names(out) <- NULL out } # SE ----------------- #' @export standard_error.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # p ----------------- #' @export p_value.negbin <- p_value.default #' @export p_value.rlm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- 2 * stats::pt(abs(cs[, 3]), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error_robust(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) tstat <- smry[[3]] p <- 2 * stats::pt(abs(tstat), df = degrees_of_freedom(model, method = "any"), lower.tail = FALSE) names(p) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } # parameters ----------------- #' @export model_parameters.ridgelm <- function(model, verbose = TRUE, ...) { parameters <- insight::get_parameters(model) parameters$Scale <- as.vector(model$scales) # remove all complete-missing cases parameters <- parameters[apply(parameters, 1, function(i) !all(is.na(i))), ] rownames(parameters) <- NULL class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) attr(parameters, "object_name") <- deparse(substitute(model), width.cutoff = 500) parameters } #' @export model_parameters.polr <- model_parameters.glm #' @export model_parameters.negbin <- model_parameters.glm parameters/R/methods_skewness_kurtosis.R0000644000175000017500000000027713767107546020451 0ustar nileshnilesh#' @export standard_error.parameters_skewness <- function(model, ...) { attributes(model)$SE } #' @export standard_error.parameters_kurtosis <- standard_error.parameters_skewness parameters/R/methods_gam.R0000644000175000017500000000506214131625712015367 0ustar nileshnilesh# classes: .gam, .list #################### .gam ------ #' @rdname model_parameters.cgam #' @export model_parameters.gam <- model_parameters.cgam #' @export ci.gam <- function(x, ci = .95, method = NULL, ...) { .ci_generic(model = x, ci = ci, method = "wald", ...) } #' @export standard_error.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table n_cond <- nrow(p.table) n_smooth <- nrow(s.table) .data_frame( Parameter = .remove_backticks_from_string(c(rownames(p.table), rownames(s.table))), SE = c(as.vector(p.table[, 2]), rep(NA, n_smooth)), Component = c(rep("conditional", n_cond), rep("smooth_terms", n_smooth)) ) } #' @export p_value.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table d1 <- .data_frame( Parameter = rownames(p.table), p = as.vector(p.table[, 4]), Component = "conditional" ) d2 <- .data_frame( Parameter = rownames(s.table), p = as.vector(s.table[, 4]), Component = "smooth_terms" ) .remove_backticks_from_parameter_names(rbind(d1, d2)) } #' @export simulate_model.gam <- function(model, iterations = 1000, ...) { insight::check_if_installed("MASS") if (is.null(iterations)) iterations <- 1000 beta <- stats::coef(model) varcov <- insight::get_varcov(model, component = "all") out <- as.data.frame(MASS::mvrnorm(n = iterations, mu = beta, Sigma = varcov)) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } #################### .list ------ #' @export model_parameters.list <- function(model, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters(model, ...) } else if ("pamobject" %in% names(model)) { model <- model$pamobject model_parameters(model, ...) } else { stop("We don't recognize this object of class 'list'. Please raise an issue.") } } #' @export ci.list <- function(x, ci = .95, ...) { if ("gam" %in% names(x)) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } else { return(NULL) } } #' @export simulate_model.list <- function(model, iterations = 1000, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } } parameters/R/bootstrap_parameters.R0000644000175000017500000001014614133733270017340 0ustar nileshnilesh#' Parameters bootstrapping #' #' Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. #' #' #' @param test The indices to compute. Character (vector) with one or more of these options: `"p-value"` (or `"p"`), `"p_direction"` (or `"pd"`), `"rope"`, `"p_map"`, `"equivalence_test"` (or `"equitest"`), `"bayesfactor"` (or `"bf"`) or `"all"` to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. [bayestestR::rope()] or [bayestestR::p_direction()]) and its results included in the summary output. #' @inheritParams bootstrap_model #' @inheritParams bayestestR::describe_posterior #' #' @return A data frame summarizing the bootstrapped parameters. #' #' @inheritSection bootstrap_model Using with **emmeans** #' #' @references Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1). Cambridge university press. #' #' @seealso [bootstrap_model()], [simulate_parameters()], [simulate_model()] #' #' @details This function first calls [bootstrap_model()] to generate #' bootstrapped coefficients. The resulting replicated for each coefficient #' are treated as "distribution", and is passed to [bayestestR::describe_posterior()] #' to calculate the related indices defined in the `"test"` argument. #' \cr\cr #' Note that that p-values returned here are estimated under the assumption of #' *translation equivariance*: that shape of the sampling distribution is #' unaffected by the null being true or not. If this assumption does not hold, #' p-values can be biased, and it is suggested to use proper permutation tests #' to obtain non-parametric p-values. #' #' @examples #' \dontrun{ #' if (require("boot", quietly = TRUE)) { #' set.seed(2) #' model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) #' b <- bootstrap_parameters(model) #' print(b) #' #' if (require("emmeans")) { #' est <- emmeans(b, trt.vs.ctrl ~ Species) #' print(model_parameters(est)) #' } #' } #' } #' @export bootstrap_parameters <- function(model, iterations = 1000, centrality = "median", ci = .95, ci_method = "quantile", test = "p-value", ...) { data <- bootstrap_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) class(out) <- c("bootstrap_parameters", "parameters_model", class(out)) attr(out, "boot_samples") <- data out } #' @keywords internal .summary_bootstrap <- function(data, test, centrality, ci, ci_method, ...) { # Is the p-value requested? if (any(test %in% c("p-value", "p", "pval"))) { p_value <- TRUE test <- setdiff(test, c("p-value", "p", "pval")) if (length(test) == 0) test <- NULL } else { p_value <- FALSE } parameters <- bayestestR::describe_posterior( data, centrality = centrality, ci = ci, ci_method = ci_method, test = test, ... ) # Remove unnecessary columns if ("CI" %in% names(parameters) && .n_unique(parameters$CI) == 1) { parameters$CI <- NULL } else if ("CI" %in% names(parameters) && .n_unique(parameters$CI) > 1) { parameters <- datawizard::reshape_ci(parameters) } # Coef if (length(c(centrality)) == 1) { names(parameters)[names(parameters) == .capitalize(centrality)] <- "Coefficient" } # p-value if (p_value) { parameters$.row_order <- 1:nrow(parameters) # calculate probability of direction, then convert to p. p <- bayestestR::p_direction(data, null = 0, ...) p$p <- bayestestR::pd_to_p(p$pd) p$pd <- NULL parameters <- merge(parameters, p, all = TRUE) parameters <- parameters[order(parameters$.row_order), ] parameters$.row_order <- NULL } rownames(parameters) <- NULL attr(parameters, "ci") <- ci parameters } parameters/R/format.R0000644000175000017500000005660114142761341014376 0ustar nileshnilesh# usual models --------------------------------- #' @inheritParams print.parameters_model #' @rdname display.parameters_model #' @export format.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, digits = 2, ci_digits = 2, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, ...) { # save attributes coef_name <- attributes(x)$coefficient_name coef_name2 <- attributes(x)$coefficient_name2 s_value <- attributes(x)$s_value m_class <- attributes(x)$model_class htest_type <- attributes(x)$htest_type mixed_model <- attributes(x)$mixed_model random_variances <- isTRUE(attributes(x)$ran_pars) mean_group_values <- attributes(x)$mean_group_values # is information about grouped parameters stored as attribute? if (is.null(groups) && !is.null(attributes(x)$coef_groups)) { groups <- attributes(x)$coef_groups } if (identical(format, "html")) { coef_name <- NULL coef_name2 <- NULL attr(x, "coefficient_name") <- NULL attr(x, "coefficient_name2") <- NULL attr(x, "zi_coefficient_name") <- NULL } # remove method for htest if (!is.null(m_class) && any(m_class %in% c("BFBayesFactor", "htest", "rma", "t1way", "yuen", "PMCMR", "osrt", "trendPMCMR", "anova", "afex_aov"))) { x$Method <- NULL x$Alternative <- NULL } # remove response for mvord if (!is.null(m_class) && any(m_class == "mvord")) { x$Response <- NULL } # rename columns for t-tests if (!is.null(htest_type) && htest_type == "ttest" && !is.null(mean_group_values)) { if (all(c("Mean_Group1", "Mean_Group2") %in% colnames(x))) { colnames(x)[which(colnames(x) == "Mean_Group1")] <- paste0(x$Group, " = ", mean_group_values[1]) colnames(x)[which(colnames(x) == "Mean_Group2")] <- paste0(x$Group, " = ", mean_group_values[2]) } } # Special print for mcp from WRS2 if (!is.null(m_class) && any(m_class %in% c("mcp1", "mcp2"))) { x$Group1 <- paste(x$Group1, x$Group2, sep = " vs. ") x$Group2 <- NULL colnames(x)[1] <- "Group" } # check if we have mixed models with random variance parameters # in such cases, we don't need the group-column, but we rather # merge it with the parameter column if (isTRUE(random_variances)) { if (!is.null(x$Group) && !is.null(x$Effects)) { ran_pars <- which(x$Effects == "random") stddevs <- grepl("^SD \\(", x$Parameter[ran_pars]) x$Parameter[ran_pars[stddevs]] <- paste0(gsub("(.*)\\)", "\\1", x$Parameter[ran_pars[stddevs]]), ": ", x$Group[ran_pars[stddevs]], ")") x$Parameter[x$Parameter == "SD (Observations: Residual)"] <- "SD (Residual)" x$Group <- NULL } } # group parameters if (!is.null(groups)) { x <- .parameter_groups(x, groups) } indent_groups <- attributes(x)$indent_groups indent_rows <- attributes(x)$indent_rows # prepare output, to have in shape for printing x <- .prepare_x_for_print(x, select, coef_name, s_value) # check whether to split table by certain factors/columns (like component, response...) split_by <- .prepare_splitby_for_print(x) # print everything now... if (split_components && !is.null(split_by) && length(split_by)) { formatted_table <- .format_columns_multiple_components(x, pretty_names, split_column = split_by, digits = digits, ci_digits = ci_digits, p_digits = p_digits, coef_column = coef_name, format = format, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, ...) } else { formatted_table <- .format_columns_single_component(x, pretty_names = pretty_names, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, format = format, coef_name = coef_name, zap_small = zap_small, ...) } # remove unique columns if (.n_unique(formatted_table$Component) == 1) formatted_table$Component <- NULL if (.n_unique(formatted_table$Effects) == 1) formatted_table$Effects <- NULL if (.n_unique(formatted_table$Group) == 1 && isTRUE(mixed_model)) formatted_table$Group <- NULL # no column with CI-level in output if (!is.null(formatted_table$CI) && .n_unique(formatted_table$CI) == 1) { formatted_table$CI <- NULL } if (!is.null(indent_rows)) { attr(formatted_table, "indent_rows") <- indent_rows attr(formatted_table, "indent_groups") <- NULL } else if (!is.null(indent_groups)) { attr(formatted_table, "indent_groups") <- indent_groups } formatted_table } #' @export format.parameters_simulate <- format.parameters_model #' @export format.parameters_brms_meta <- format.parameters_model # Compare parameters ---------------------- #' @inheritParams print.parameters_model #' @export format.compare_parameters <- function(x, style = NULL, split_components = TRUE, digits = 2, ci_digits = 2, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, ...) { m_class <- attributes(x)$model_class x$Method <- NULL # remove response for mvord if (!is.null(m_class) && any(m_class == "mvord")) { x$Response <- NULL } out <- data.frame( Parameter = x$Parameter, Component = x$Component, stringsAsFactors = FALSE ) # save model names models <- attributes(x)$model_names # save model parameters attributes parameters_attributes <- attributes(x)$all_attributes # is information about grouped parameters stored as attribute? if (is.null(groups) && !is.null(parameters_attributes[[1]]$coef_groups)) { groups <- parameters_attributes[[1]]$coef_groups } for (i in models) { # each column is suffixed with ".model_name", so we extract # columns for each model separately here pattern <- paste0("\\.", i, "$") cols <- x[grepl(pattern, colnames(x))] # since we now have the columns for a single model, we clean the # column names (i.e. remove suffix), so we can use "format_table" function colnames(cols) <- gsub(pattern, "", colnames(cols)) # save p-stars in extra column cols$p_stars <- insight::format_p(cols$p, stars = TRUE, stars_only = TRUE) cols <- insight::format_table(cols, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small) out <- cbind(out, .format_output_style(cols, style, format, i)) } # group parameters if (!is.null(groups)) { out <- .parameter_groups(out, groups) } indent_groups <- attributes(x)$indent_groups indent_rows <- attributes(x)$indent_rows # check whether to split table by certain factors/columns (like component, response...) split_by <- split_column <- .prepare_splitby_for_print(x) if (length(split_by) > 0) { # set up split-factor if (length(split_column) > 1) { split_by <- lapply(split_column, function(i) x[[i]]) } else { split_by <- list(x[[split_column]]) } names(split_by) <- split_column # make sure we have correct sorting here... formatted_table <- split(out, f = split_by) formatted_table <- lapply(formatted_table, function(i) { # remove unique columns if (.n_unique(i$Component) == 1) i$Component <- NULL if (.n_unique(i$Effects) == 1) i$Effects <- NULL i }) } else { formatted_table <- out # remove unique columns if (.n_unique(formatted_table$Component) == 1) formatted_table$Component <- NULL if (.n_unique(formatted_table$Effects) == 1) formatted_table$Effects <- NULL # add line with info about observations formatted_table <- .add_obs_row(formatted_table, parameters_attributes, style) } formatted_table } # stan models ---------------------------- #' @export format.parameters_stan <- function(x, split_components = TRUE, select = NULL, digits = 2, ci_digits = 2, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, table_caption = NULL, ...) { cp <- attributes(x)$parameter_info att <- attributes(x) final_table <- list() if (!split_components || is.null(cp)) { NextMethod() } else { if (!is.null(select)) { if (all(select == "minimal")) { select <- c("Parameter", "Coefficient", "Median", "Mean", "CI", "CI_low", "CI_high", "pd") } else if (all(select == "short")) { select <- c("Parameter", "Coefficient", "Median", "Mean", "MAD", "SD", "pd") } else { if (is.numeric(select)) select <- colnames(x)[select] } select <- union(select, c("Parameter", "Component", "Effects", "Response", "Subgroup")) to_remove <- setdiff(colnames(x), select) x[to_remove] <- NULL } out <- insight::print_parameters(cp, x, keep_parameter_column = FALSE, format = format) final_table <- lapply(out, function(i) { if (identical(format, "markdown")) { attr(i, "table_caption") <- attributes(i)$main_title } attributes(i) <- utils::modifyList(att, attributes(i)) param_table <- insight::format_table( i, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, digits = digits, ci_digits = ci_digits, p_digits = p_digits, preserve_attributes = TRUE ) param_table$Group <- NULL param_table$Response <- NULL param_table$Function <- NULL param_table }) } final_table <- .compact_list(final_table) # modify table title, if requested if (length(final_table) == 1 && !is.null(table_caption)) { attr(final_table[[1]], "table_caption") <- table_caption } else if (length(final_table) == 1 && attr(final_table[[1]], "table_caption")[1] == "# Fixed effects") { attr(final_table[[1]], "table_caption") <- "" } final_table } # sem-models --------------------------------- #' @export format.parameters_sem <- function(x, digits = 2, ci_digits = 2, p_digits = 3, format = NULL, ci_width = NULL, ci_brackets = TRUE, pretty_names = TRUE, ...) { if (missing(digits)) digits <- .additional_arguments(x, "digits", 2) if (missing(ci_digits)) ci_digits <- .additional_arguments(x, "ci_digits", 2) if (missing(p_digits)) p_digits <- .additional_arguments(x, "p_digits", 3) .format_columns_multiple_components(x, pretty_names = TRUE, split_column = "Component", digits = digits, ci_digits = ci_digits, p_digits = p_digits, format = format, ci_width = ci_width, ci_brackets = ci_brackets, ...) } # distribution --------------------------------- #' @export format.parameters_distribution <- function(x, digits = 2, format = NULL, ci_width = "auto", ci_brackets = TRUE, ...) { if (all(c("Min", "Max") %in% names(x))) { x$Min <- insight::format_ci(x$Min, x$Max, ci = NULL, digits = digits, width = ci_width, brackets = ci_brackets) x$Max <- NULL colnames(x)[which(colnames(x) == "Min")] <- "Range" } if (all(c("Q1", "Q3") %in% names(x))) { x$Q1 <- insight::format_ci(x$Q1, x$Q3, ci = NULL, digits = digits, width = ci_width, brackets = FALSE) x$Q3 <- NULL colnames(x)[which(colnames(x) == "Q1")] <- "Quartiles" } if (all(c("CI_low", "CI_high") %in% names(x))) { x$CI_low <- insight::format_ci(x$CI_low, x$CI_high, ci = NULL, digits = digits, width = ci_width, brackets = ci_brackets) x$CI_high <- NULL ci_lvl <- attributes(x)$ci centrality_ci <- attributes(x)$first_centrality if (!is.null(centrality_ci)) { ci_suffix <- paste0(" (", centrality_ci, ")") } else { ci_suffix <- "" } if (!is.null(ci_lvl)) { colnames(x)[which(colnames(x) == "CI_low")] <- sprintf("%i%% CI%s", round(100 * ci_lvl), ci_suffix) } else { colnames(x)[which(colnames(x) == "CI_low")] <- sprintf("CI%s", ci_suffix) } } if ("Trimmed_Mean" %in% colnames(x)) { threshold <- attributes(x)$threshold if (is.null(threshold)) { trim_name <- "Trimmed" } else { trim_name <- sprintf("Trimmed (%g%%)", round(100 * threshold)) } colnames(x)[which(colnames(x) == "Trimmed_Mean")] <- trim_name } if (".group" %in% colnames(x)) { final_table <- list() grps <- split(x, x[[".group"]]) for (i in names(grps)) { grps[[i]][[".group"]] <- NULL table_caption <- NULL if (is.null(format) || format == "text") { table_caption <- c(sprintf("# %s", i), "blue") } else if (format == "markdown") { table_caption <- sprintf("%s", i) } attr(grps[[i]], "table_caption") <- table_caption final_table <- c(final_table, list(grps[[i]])) } } else { final_table <- x } final_table } # footer functions ------------------ .format_footer <- function(x, digits = 3, verbose = TRUE, show_sigma = FALSE, show_formula = FALSE, show_r2 = FALSE, format = "text") { # prepare footer footer <- NULL type <- tolower(format) sigma <- attributes(x)$sigma r2 <- attributes(x)$r2 residual_df <- attributes(x)$residual_df p_adjust <- attributes(x)$p_adjust model_formula <- attributes(x)$model_formula anova_test <- attributes(x)$anova_test anova_type <- attributes(x)$anova_type footer_text <- attributes(x)$footer_text text_alternative <- attributes(x)$text_alternative n_obs <- attributes(x)$n_obs # footer: model formula if (isTRUE(show_formula)) { footer <- .add_footer_formula(footer, model_formula, n_obs, type) } # footer: residual standard deviation if (isTRUE(show_sigma)) { footer <- .add_footer_sigma(footer, digits, sigma, residual_df, type) } # footer: r-squared if (isTRUE(show_r2)) { footer <- .add_footer_r2(footer, digits, r2, type) } # footer: p-adjustment if ("p" %in% colnames(x) && isTRUE(verbose)) { footer <- .add_footer_padjust(footer, p_adjust, type) } # footer: anova test if (!is.null(anova_test)) { footer <- .add_footer_anova_test(footer, anova_test, type) } # footer: anova test if (!is.null(anova_type)) { footer <- .add_footer_anova_type(footer, anova_type, type) } # footer: htest alternative if (!is.null(text_alternative)) { footer <- .add_footer_alternative(footer, text_alternative, type) } # footer: generic text if (!is.null(footer_text)) { footer <- .add_footer_text(footer, footer_text, type) } # add color code, if we have a footer if (!is.null(footer) && type == "text") { footer <- c(footer, "blue") } # if we have two trailing newlines, remove one if (identical(type, "text") && !is.null(footer) && grepl("\n\n$", footer[1])) { footer[1] <- substr(footer[1], 0, nchar(x) - 1) } footer } # footer: generic text .add_footer_text <- function(footer = NULL, text, type = "text") { if (!is.null(text)) { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%s%s\n", fill, text)) } else if (type == "html") { footer <- c(footer, gsub("\n", "", text)) } } footer } # footer: residual standard deviation .add_footer_sigma <- function(footer = NULL, digits, sigma, residual_df = NULL, type = "text") { if (!is.null(sigma)) { # format residual df if (!is.null(residual_df)) { res_df <- paste0(" (df = ", residual_df, ")") } else { res_df <- "" } if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%sResidual standard deviation: %.*f%s\n", fill, digits, sigma, res_df)) } else if (type == "html") { footer <- c(footer, trimws(sprintf("Residual standard deviation: %.*f%s", digits, sigma, res_df))) } } footer } # footer: r-squared .add_footer_r2 <- function(footer = NULL, digits, r2 = NULL, type = "text") { if (!is.null(r2)) { rsq <- tryCatch( { paste0(unlist(lapply(r2, function(i) { paste0(attributes(i)$names, ": ", insight::format_value(i, digits = digits)) })), collapse = "; ") }, error = function(e) { NULL } ) if (!is.null(rsq)) { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, rsq, "\n") } else if (type == "html") { footer <- c(footer, rsq) } } } footer } # footer: anova type .add_footer_anova_type <- function(footer = NULL, aov_type, type = "text") { if (!is.null(aov_type)) { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%sAnova Table (Type %s tests)\n", fill, aov_type)) } else if (type == "html") { footer <- c(footer, sprintf("Anova Table (Type %s tests)", aov_type)) } } footer } # footer: anova test .add_footer_anova_test <- function(footer = NULL, test, type = "text") { if (!is.null(test)) { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%s%s test statistic\n", fill, test)) } else if (type == "html") { footer <- c(footer, sprintf("%s test statistic", test)) } } footer } # footer: htest alternative .add_footer_alternative <- function(footer = NULL, text_alternative, type = "text") { if (!is.null(text_alternative)) { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%s%s\n", fill, text_alternative)) } else if (type == "html") { footer <- c(footer, text_alternative) } } footer } # footer: p-adjustment .add_footer_padjust <- function(footer = NULL, p_adjust, type = "text") { if (!is.null(p_adjust) && p_adjust != "none") { if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, "p-value adjustment method: ", format_p_adjust(p_adjust), "\n") } else if (type == "html") { footer <- c(footer, paste0("p-value adjustment method: ", format_p_adjust(p_adjust))) } } footer } # footer: model formula .add_footer_formula <- function(footer = NULL, model_formula, n_obs = NULL, type = "text") { if (!is.null(model_formula)) { # format n of observations if (!is.null(n_obs)) { n <- paste0(" (", n_obs, " Observations)") } else { n <- "" } if (type == "text") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, "Model: ", model_formula, n, "\n") } else if (type == "html") { footer <- c(footer, trimws(paste0("Model: ", model_formula, n))) } } footer } # footer: type of uncertainty interval .print_footer_cimethod <- function(x) { # get attributes ci_method <- .additional_arguments(x, "ci_method", NULL) test_statistic <- .additional_arguments(x, "test_statistic", NULL) bootstrap <- .additional_arguments(x, "bootstrap", FALSE) residual_df <- .additional_arguments(x, "residual_df", NULL) random_variances <- .additional_arguments(x, "ran_pars", FALSE) model_class <- .additional_arguments(x, "model_class", NULL) # prepare strings if (!is.null(ci_method)) { # in case of glm's that have df.residual(), and where residual df where requested if (ci_method == "residual" && test_statistic == "z-statistic" && !is.null(residual_df) && !is.infinite(residual_df) && !is.na(residual_df)) { test_statistic <- "t-statistic" } string_tailed <- switch(toupper(ci_method), "HDI" = "highest-density", "UNIROOT" = , "PROFILE" = "profile-likelihood", "equal-tailed" ) string_method <- switch(toupper(ci_method), "BCI" = , "BCAI" = "bias-corrected accelerated bootstrap", "SI" = , "CI" = , "QUANTILE" = , "ETI" = , "HDI" = ifelse(isTRUE(bootstrap), "na\u0131ve bootstrap", "MCMC"), "NORMAL" = "Wald normal", "BOOT" = "parametric bootstrap", "Wald" ) if (toupper(ci_method) %in% c("KENWARD", "KR", "KENWARD-ROGER", "KENWARD-ROGERS", "SATTERTHWAITE")) { string_approx <- paste0("with ", format_df_adjust(ci_method, approx_string = "", dof_string = ""), " ") } else { string_approx <- "" } if (!is.null(test_statistic) && !ci_method %in% c("normal") && !isTRUE(bootstrap)) { string_statistic <- switch(tolower(test_statistic), "t-statistic" = "t", "chi-squared statistic" = , "z-statistic" = "z", "" ) string_method <- paste0(string_method, " ", string_statistic, "-") } else { string_method <- paste0(string_method, " ") } # bootstrapped intervals if (isTRUE(bootstrap)) { msg <- paste0("\nUncertainty intervals (", string_tailed, ") are ", string_method, "intervals.") } else { msg <- paste0("\nUncertainty intervals (", string_tailed, ") and p values (two-tailed) computed using a ", string_method, "distribution ", string_approx, "approximation.") } # do we have random effect variances from glmmTMB? if (identical(model_class, "glmmTMB") && isTRUE(random_variances) && !is.null(x$Effects) && "random" %in% x$Effects && (string_method != "Wald z-" || ci_method != "wald")) { msg <- paste(msg, "Uncertainty intervals for random effect variances computed using a Wald z-distribution approximation.") } message(insight::format_message(msg)) } } parameters/R/methods_scam.R0000644000175000017500000000031113766411061015541 0ustar nileshnilesh #' @export ci.scam <- ci.gam #' @export standard_error.scam <- standard_error.gam #' @export p_value.scam <- p_value.gam #' @export model_parameters.scam <- model_parameters.cgam parameters/R/check_factorstructure.R0000644000175000017500000001466714160324505017505 0ustar nileshnilesh#' Check suitability of data for Factor Analysis (FA) #' #' This checks whether the data is appropriate for Factor Analysis (FA) by #' running the [Bartlett's Test of Sphericity][check_sphericity_bartlett] and #' the [Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy #' (MSA)][check_kmo]. #' #' @inheritParams check_sphericity_bartlett #' @examples #' library(parameters) #' check_factorstructure(mtcars) #' @return A list of lists of indices related to sphericity and KMO. #' @seealso #' [check_kmo()], [check_sphericity_bartlett()] and [check_clusterstructure()]. #' @export check_factorstructure <- function(x, ...) { # TODO: detect (and remove?) factors # TODO: This could be improved using the correlation package to use different correlation methods kmo <- check_kmo(x, ...) sphericity <- check_sphericity_bartlett(x, ...) text <- paste0(" - KMO: ", attributes(kmo)$text, "\n - Sphericity: ", attributes(sphericity)$text) if (attributes(kmo)$color == "red" | attributes(sphericity)$color == "red") { color <- "red" } else { color <- "green" } out <- list(KMO = kmo, sphericity = sphericity) attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Is the data suitable for Factor Analysis?" class(out) <- c("easystats_check", class(out)) out } #' Kaiser, Meyer, Olkin (KMO) Measure of Sampling Adequacy (MSA) for Factor Analysis #' #' Kaiser (1970) introduced a Measure of Sampling Adequacy (MSA), later modified #' by Kaiser and Rice (1974). The Kaiser-Meyer-Olkin (KMO) statistic, which can #' vary from 0 to 1, indicates the degree to which each variable in a set is #' predicted without error by the other variables. #' #' A value of 0 indicates that the sum of partial correlations is large relative #' to the sum correlations, indicating factor analysis is likely to be #' inappropriate. A KMO value close to 1 indicates that the sum of partial #' correlations is not large relative to the sum of correlations and so factor #' analysis should yield distinct and reliable factors. #' #' Kaiser (1975) suggested that KMO > .9 were marvelous, in the .80s, #' meritorious, in the .70s, middling, in the .60s, mediocre, in the .50s, #' miserable, and less than .5, unacceptable. Hair et al. (2006) suggest #' accepting a value > 0.5. Values between 0.5 and 0.7 are mediocre, and values #' between 0.7 and 0.8 are good. #' #' @inheritParams check_sphericity_bartlett #' #' @examples #' library(parameters) #' check_kmo(mtcars) #' @return A list of indices related to KMO. #' #' @details This function is strongly inspired by the `KMO` function in the #' `psych` package (Revelle, 2016). All credit goes to its author. #' #' @references #' - Revelle, W. (2016). How To: Use the psych package for Factor Analysis #' and data reduction. #' #' - Kaiser, H. F. (1970). A second generation little jiffy. #' Psychometrika, 35(4), 401-415. #' #' - Kaiser, H. F., & Rice, J. (1974). Little jiffy, mark IV. Educational #' and psychological measurement, 34(1), 111-117. #' #' - Kaiser, H. F. (1974). An index of factorial simplicity. #' Psychometrika, 39(1), 31-36. #' @export check_kmo <- function(x, ...) { cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) Q <- solve(cormatrix) Q <- stats::cov2cor(Q) diag(Q) <- 0 diag(cormatrix) <- 0 sumQ2 <- sum(Q^2) sumr2 <- sum(cormatrix^2) MSA <- sumr2 / (sumr2 + sumQ2) MSA_variable <- colSums(cormatrix^2) / (colSums(cormatrix^2) + colSums(Q^2)) out <- list(MSA = MSA, MSA_variable = MSA_variable) if (MSA < 0.5) { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that factor analysis is likely to be inappropriate (KMO = %.2f).", MSA) color <- "red" } else { text <- sprintf("The Kaiser, Meyer, Olkin (KMO) measure of sampling adequacy suggests that data seems appropriate for factor analysis (KMO = %.2f).", MSA) color <- "green" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "KMO Measure of Sampling Adequacy" class(out) <- c("easystats_check", class(out)) out } #' Bartlett's Test of Sphericity #' #' Bartlett's (1951) test of sphericity tests whether a matrix (of correlations) #' is significantly different from an identity matrix. The test provides #' probability that the correlation matrix has significant correlations among at #' least some of the variables in a dataset, a prerequisite for factor analysis #' to work. In other words, before starting with factor analysis, one needs to #' check whether Bartlett’s test of sphericity is significant. #' #' @param x A dataframe. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' check_sphericity_bartlett(mtcars) #' @details This function is strongly inspired by the `cortest.bartlett` #' function in the \pkg{psych} package (Revelle, 2016). All credit goes to its #' author. #' #' @return A list of indices related to sphericity. #' #' @references #' - Revelle, W. (2016). How To: Use the psych package for Factor Analysis #' and data reduction. #' #' - Bartlett, M. S. (1951). The effect of standardization on a Chi-square #' approximation in factor analysis. Biometrika, 38(3/4), 337-344. #' @export check_sphericity_bartlett <- function(x, ...) { # This could be improved using the correlation package to use different correlation methods cormatrix <- stats::cor(x, use = "pairwise.complete.obs", ...) n <- nrow(x) p <- dim(cormatrix)[2] detR <- det(cormatrix) statistic <- -log(detR) * (n - 1 - (2 * p + 5) / 6) df <- p * (p - 1) / 2 pval <- stats::pchisq(statistic, df, lower.tail = FALSE) out <- list(chisq = statistic, p = pval, dof = df) if (pval < 0.001) { text <- sprintf("Bartlett's test of sphericity suggests that there is sufficient significant correlation in the data for factor analysis (Chisq(%i) = %.2f, %s).", df, statistic, insight::format_p(pval)) color <- "green" } else { text <- sprintf("Bartlett's test of sphericity suggests that there is not enough significant correlation in the data for factor analysis (Chisq(%i) = %.2f, %s).", df, statistic, insight::format_p(pval)) color <- "red" } attr(out, "text") <- text attr(out, "color") <- color attr(out, "title") <- "Test of Sphericity" class(out) <- c("easystats_check", class(out)) out } parameters/R/random_parameters.R0000644000175000017500000001701014137207406016601 0ustar nileshnilesh#' @title Summary information from random effects #' @name random_parameters #' #' @description This function extracts the different variance components of a #' mixed model and returns the result as a data frame. #' #' @param model A mixed effects model (including `stanreg` models). #' @param component Should all parameters, parameters for the conditional model, #' or for the zero-inflated part of the model be returned? Applies to models #' with zero-inflated component. `component` may be one of #' `"conditional"` (default), `"zi"` or `"zero_inflated"`. #' May be abbreviated. #' #' @return A data frame with random effects statistics for the variance components, #' including number of levels per random effect group, as well as complete #' observations in the model. #' #' @details The variance components are obtained from #' [insight::get_variance()] and are denoted as following: #' \subsection{Within-group (or residual) variance}{ #' The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, #' is the sum of the distribution-specific variance and the variance due to additive dispersion. #' It indicates the *within-group variance*. #' } #' \subsection{Between-group random intercept variance}{ #' The random intercept variance, or *between-group* variance #' for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), #' is obtained from `VarCorr()`. It indicates how much groups #' or subjects differ from each other. #' } #' \subsection{Between-group random slope variance}{ #' The random slope variance, or *between-group* variance #' for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random slopes. It indicates how much groups #' or subjects differ from each other according to their slopes. #' } #' \subsection{Random slope-intercept correlation}{ #' The random slope-intercept correlation #' (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random intercepts and slopes. #' } #' **Note:** For the within-group and between-group variance, variance #' and standard deviations (which are simply the square root of the variance) #' are shown. #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' random_parameters(model) #' } #' @export random_parameters <- function(model, component = "conditional") { component <- match.arg(component, choices = c("conditional", "zi", "zero_inflated")) out <- .randomeffects_summary(model, component) class(out) <- c("parameters_random", class(out)) out } # helper ----------------------------------- .n_randomeffects <- function(model) { sapply(insight::get_data(model, verbose = FALSE)[insight::find_random(model, split_nested = TRUE, flatten = TRUE)], function(i) .n_unique(i)) } .randomeffects_summary <- function(model, component = "conditional") { out <- list() re_variances <- suppressWarnings(insight::get_variance(model, model_component = component)) model_re <- insight::find_random(model, split_nested = FALSE, flatten = TRUE) model_rs <- unlist(insight::find_random_slopes(model)) if (length(re_variances) && sum(!is.na(re_variances)) > 0 && !is.null(re_variances)) { # Residual Variance (Sigma^2) out$Sigma2 <- re_variances$var.residual # Random Intercept Variance if (!.is_empty_object(re_variances$var.intercept)) { var_intercept <- as.list(re_variances$var.intercept) names(var_intercept) <- paste0("tau00_", names(re_variances$var.intercept)) out <- c(out, var_intercept) } # Random Slope Variance if (!.is_empty_object(re_variances$var.slope) && !.is_empty_object(model_rs)) { var_slope <- as.list(re_variances$var.slope) names(var_slope) <- paste0("tau11_", names(re_variances$var.slope)) out <- c(out, var_slope) } # Slope-Intercept Correlation if (!.is_empty_object(re_variances$cor.slope_intercept) && !.is_empty_object(model_rs)) { cor_slope_intercept <- as.list(re_variances$cor.slope_intercept) csi_names <- gsub("(.*)(\\.\\d)(.*)", "\\1\\3", names(re_variances$var.slope)) # csi_names <- names(re_variances$var.slope) names(cor_slope_intercept) <- paste0("rho01_", csi_names) out <- c(out, cor_slope_intercept) } # Slopes Correlation if (!.is_empty_object(re_variances$cor.slopes) && !.is_empty_object(model_rs)) { cor_slopes <- as.list(re_variances$cor.slopes) names(cor_slopes) <- paste0("rho00_", names(cor_slopes)) out <- c(out, cor_slopes) } } # Number of levels per random-effect groups n_re <- as.list(.n_randomeffects(model)) if (.is_empty_object(n_re)) { n_re <- stats::setNames(as.numeric(NA), "N") } else { names(n_re) <- paste0("N_", names(n_re)) out <- c(out, n_re) } # number of observations out$Observations <- insight::n_obs(model) # make nice data frame out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) out$Description <- rownames(out) rownames(out) <- NULL colnames(out) <- c("Value", "Description") # Additional information out$Component <- "" out$Component[out$Description == "Sigma2"] <- "sigma2" out$Component[grepl("^tau00_", out$Description)] <- "tau00" out$Component[grepl("^tau11_", out$Description)] <- "tau11" out$Component[grepl("^rho01_", out$Description)] <- "rho01" out$Component[grepl("^rho00_", out$Description)] <- "rho00" # Additional information out$Term <- "" out$Term[out$Component == "tau00"] <- gsub("^tau00_(.*)", "\\1", out$Description[out$Component == "tau00"]) out$Term[out$Component == "tau11"] <- gsub("^tau11_(.*)", "\\1", out$Description[out$Component == "tau11"]) out$Term[out$Component == "rho01"] <- gsub("^rho01_(.*)", "\\1", out$Description[out$Component == "rho01"]) out$Term[out$Component == "rho00"] <- gsub("^rho00_(.*)(\\.\\.\\.)(.*)", "\\3", out$Description[out$Component == "rho00"]) # renaming out$Type <- "" # Within-Group Variance out$Type[out$Description == "Sigma2"] <- "" out$Description[out$Description == "Sigma2"] <- "Within-Group Variance" # Between-Group Variance out$Type[grepl("^tau00_", out$Description)] <- "Random Intercept" out$Description <- gsub("^tau00_(.*)", "Between-Group Variance", out$Description) out$Type[grepl("^tau11_", out$Description)] <- "Random Slope" out$Description <- gsub("^tau11_(.*)", "Between-Group Variance", out$Description) # correlations out$Type[grepl("^rho01_", out$Description)] <- "" out$Description <- gsub("^rho01_(.*)", "Correlations", out$Description) out$Type[grepl("^rho00_", out$Description)] <- "" out$Description <- gsub("^rho00_(.*)", "Correlations", out$Description) out$Type[grepl("N_(.*)", out$Description)] <- "" out$Term[grepl("N_(.*)", out$Description)] <- gsub("N_(.*)", "\\1", out$Description[grepl("N_(.*)", out$Description)]) out$Description <- gsub("_(.*)", "", out$Description) out$Type[grepl("^X", out$Description)] <- "" out$Description[grepl("^X", out$Description)] <- NA out$Component[out$Component == ""] <- NA out$Term[out$Term == ""] <- NA out[c("Description", "Component", "Type", "Term", "Value")] } parameters/R/methods_base.R0000644000175000017500000001127114131355272015535 0ustar nileshnilesh#' @rdname model_parameters.stanreg #' @export model_parameters.data.frame <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = FALSE, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "ci") <- ci attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # Standard Errors from standard classes --------------------------------------------- #' @rdname standard_error #' @export standard_error.factor <- function(model, force = FALSE, verbose = TRUE, ...) { if (force) { standard_error(as.numeric(model), ...) } else { if (verbose) { warning("Can't compute standard error of non-numeric variables.", call. = FALSE) } return(NA) } } #' @export standard_error.character <- standard_error.factor #' @export standard_error.numeric <- function(model, ...) { sqrt(stats::var(model, na.rm = TRUE) / length(stats::na.omit(model))) } #' @export standard_error.data.frame <- function(model, verbose = TRUE, ...) { unlist(sapply(model, standard_error, verbose = verbose)) } #' @export standard_error.list <- function(model, verbose = TRUE, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } else { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors from model object.\n", "red") } } } #' @export standard_error.table <- function(model, ...) { # compute standard error of proportions if (length(dim(model)) == 1) { total.n <- as.vector(sum(model)) rel.frq <- as.vector(model) / total.n out <- .data_frame( Value = names(model), Proportion = rel.frq, SE = suppressWarnings(sqrt(rel.frq * (1 - rel.frq) / total.n)) ) } else { out <- NA } out } #' @export standard_error.xtabs <- standard_error.table #' @export standard_error.effectsize_std_params <- function(model, verbose = TRUE, ...) { se <- attr(model, "standard_error") if (is.null(se)) { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") } return(NULL) } # for "refit" method if (is.data.frame(se) && "SE" %in% colnames(se)) { se <- se$SE } out <- .data_frame( Parameter = model$Parameter, SE = as.vector(se) ) .remove_backticks_from_parameter_names(out) } # p-Values from standard classes --------------------------------------------- #' @export p_value.numeric <- function(model, null = 0, ...) { # k_lt0 <- sum(model <= 0) # k_gt0 <- sum(model >= 0) # k <- 2 * min(k_lt0, k_gt0) # N <- length(model) # https://blogs.sas.com/content/iml/2011/11/02/how-to-compute-p-values-for-a-bootstrap-distribution.html # https://stats.stackexchange.com/a/28725/293056 x <- stats::na.omit(model) xM <- mean(x) if (is.null(null) || all(is.na(null))) { x0 <- x - xM } else { x0 <- null } k <- sum(x > x0) N <- length(x) (k + 1) / (N + 1) } #' @export p_value.data.frame <- function(model, ...) { data <- model[sapply(model, is.numeric)] .data_frame( Parameter = names(data), p = sapply(data, p_value) ) } #' @export p_value.list <- function(model, method = NULL, verbose = TRUE, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model, method = method) } else { if (isTRUE(verbose)) { warning("Could not extract p-values from model object.", call. = FALSE) } } } parameters/R/reduce_parameters.R0000644000175000017500000001736314160324505016577 0ustar nileshnilesh#' Dimensionality reduction (DR) / Features Reduction #' #' This function performs a reduction in the parameter space (the number of #' variables). It starts by creating a new set of variables, based on the given #' method (the default method is "PCA", but other are available via the #' `method` argument, such as "cMDS", "DRR" or "ICA"). Then, it names this #' new dimensions using the original variables that correlates the most with it. #' For instance, a variable named 'V1_0.97/V4_-0.88' means that the V1 and the #' V4 variables correlate maximally (with respective coefficients of .97 and #' -.88) with this dimension. Although this function can be useful in #' exploratory data analysis, it's best to perform the dimension reduction step #' in a separate and dedicated stage, as this is a very important process in the #' data analysis workflow. `reduce_data()` is an alias for #' `reduce_parameters.data.frame()`. #' #' @inheritParams principal_components #' @param method The feature reduction method. Can be one of 'PCA', 'cMDS', #' 'DRR', 'ICA' (see the Details section). #' @param distance The distance measure to be used. Only applies when #' `method = "cMDS"`. This must be one of "euclidean", "maximum", #' "manhattan", "canberra", "binary" or "minkowski". Any unambiguous substring #' can be given. #' #' @details The different methods available are described below: #' \subsection{Supervised Methods}{ #' \itemize{ #' \item **PCA**: See [principal_components()]. #' #' \item **cMDS / PCoA**: Classical Multidimensional Scaling (cMDS) takes a #' set of dissimilarities (i.e., a distance matrix) and returns a set of points #' such that the distances between the points are approximately equal to the #' dissimilarities. #' #' \item **DRR**: Dimensionality Reduction via Regression (DRR) is a very #' recent technique extending PCA (Laparra et al., 2015). Starting from a #' rotated PCA, it predicts redundant information from the remaining components #' using non-linear regression. Some of the most notable advantages of #' performing DRR are avoidance of multicollinearity between predictors and #' overfitting mitigation. DRR tends to perform well when the first principal #' component is enough to explain most of the variation in the predictors. #' Requires the \pkg{DRR} package to be installed. #' #' \item **ICA**: Performs an Independent Component Analysis using the #' FastICA algorithm. Contrary to PCA, which attempts to find uncorrelated #' sources (through least squares minimization), ICA attempts to find #' independent sources, i.e., the source space that maximizes the #' "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each #' source, which makes it a poor tool for dimensionality reduction. Requires the #' \pkg{fastICA} package to be installed. #' } #' } #' See also [package vignette](https://easystats.github.io/parameters/articles/parameters_reduction.html). #' #' @references #' - Nguyen, L. H., \& Holmes, S. (2019). Ten quick tips for effective #' dimensionality reduction. PLOS Computational Biology, 15(6). #' #' - Laparra, V., Malo, J., & Camps-Valls, G. (2015). Dimensionality #' reduction via regression in hyperspectral imagery. IEEE Journal of Selected #' Topics in Signal Processing, 9(6), 1026-1036. #' #' @examples #' data(iris) #' model <- lm(Sepal.Width ~ Species * Sepal.Length + Petal.Width, data = iris) #' model #' reduce_parameters(model) #' #' out <- reduce_data(iris, method = "PCA", n = "max") #' head(out) #' @export reduce_parameters <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { UseMethod("reduce_parameters") } #' @rdname reduce_parameters #' @export reduce_data <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { if (!is.data.frame(x)) { stop("Only works on data frames.") } reduce_parameters(x, method = method, n = n, distance = distance, ...) } #' @export reduce_parameters.data.frame <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { x <- datawizard::convert_data_to_numeric(x) # N factors if (n == "max") { nfac <- ncol(x) - 1 } else { nfac <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") } # compute new features if (tolower(method) %in% c("pca", "principal")) { features <- principal_components(x, n = nfac, ...) features <- as.data.frame(attributes(features)$scores) } else if (tolower(method) %in% c("cmds", "pcoa")) { features <- .cmds(x, n = nfac, distance = distance, ...) } else if (tolower(method) %in% c("drr")) { features <- .drr(x, n = nfac, ...) } else if (tolower(method) %in% c("ica")) { features <- .ica(x, n = nfac, ...) } else { stop("'method' must be one of 'PCA', 'cMDS', 'DRR' or 'ICA'.") } # Get weights / pseudo-loadings (correlations) cormat <- as.data.frame(stats::cor(x = x, y = features)) cormat <- cbind(data.frame(Variable = row.names(cormat)), cormat) weights <- as.data.frame(.sort_loadings(cormat, cols = 2:ncol(cormat))) if (n == "max") { weights <- .filter_loadings(weights, threshold = "max", 2:ncol(weights)) non_empty <- sapply(weights[2:ncol(weights)], function(x) !all(is.na(x))) weights <- weights[c(TRUE, non_empty)] features <- features[, non_empty] weights[is.na(weights)] <- 0 weights <- .filter_loadings(.sort_loadings(weights, cols = 2:ncol(weights)), threshold = "max", 2:ncol(weights)) } # Create varnames varnames <- sapply(weights[2:ncol(weights)], function(x) { name <- weights$Variable[!is.na(x)] weight <- insight::format_value(x[!is.na(x)]) paste0(paste(name, weight, sep = "_"), collapse = "/") }) names(features) <- as.character(varnames) # Attributes attr(features, "loadings") <- weights class(features) <- c("parameters_reduction", class(features)) # Out features } #' @export reduce_parameters.lm <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { data <- reduce_parameters(datawizard::convert_data_to_numeric(insight::get_predictors(x, ...), ...), method = method, n = n, distance = distance) y <- data.frame(.row = 1:length(insight::get_response(x))) y[insight::find_response(x)] <- insight::get_response(x) y$.row <- NULL formula <- paste(insight::find_response(x), "~", paste(paste0("`", names(data), "`"), collapse = " + ")) stats::update(x, formula = formula, data = cbind(data, y)) } #' @export reduce_parameters.merMod <- reduce_parameters.lm #' @export principal_components.lm <- function(x, ...) { reduce_parameters(x, method = "PCA", ...) } #' @export principal_components.merMod <- principal_components.lm #' @keywords internal .cmds <- function(x, n = "all", distance = "euclidean", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") d <- stats::dist(x, method = distance) cmd <- stats::cmdscale(d, k = n, eig = TRUE) features <- as.data.frame(cmd$points) names(features) <- paste0("CMDS", 1:ncol(features)) features } #' @keywords internal .drr <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") insight::check_if_installed("DRR") junk <- utils::capture.output(suppressMessages(rez <- DRR::drr(x, n))) features <- as.data.frame(rez$fitted.data) names(features) <- paste0("DRR", 1:ncol(features)) features } #' @keywords internal .ica <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") insight::check_if_installed("fastICA") rez <- fastICA::fastICA(x, n.comp = ncol(x) - 1) features <- as.data.frame(rez$S) names(features) <- paste0("ICA", 1:ncol(features)) features } parameters/R/methods_averaging.R0000644000175000017500000000517414131014351016561 0ustar nileshnilesh# classes: .averaging #################### .averaging #' Parameters from special models #' #' Parameters from special regression models not listed under one of the previous categories yet. #' #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @examples #' library(parameters) #' if (require("brglm2", quietly = TRUE)) { #' data("stemcell") #' model <- bracl( #' research ~ as.numeric(religion) + gender, #' weights = frequency, #' data = stemcell, #' type = "ML" #' ) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.averaging <- function(model, ci = .95, component = c("conditional", "full"), exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- match.arg(component) out <- .model_parameters_generic( model = model, ci = ci, merge_by = "Parameter", exponentiate = exponentiate, component = component, p_adjust = p_adjust, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @rdname standard_error #' @export standard_error.averaging <- function(model, component = c("conditional", "full"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component) if (component == "full") { s <- summary(model)$coefmat.full } else { s <- summary(model)$coefmat.subset } .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), SE = as.vector(s[, 3]) ) } #' @rdname p_value.DirichletRegModel #' @export p_value.averaging <- function(model, component = c("conditional", "full"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component) if (component == "full") { s <- summary(model)$coefmat.full } else { s <- summary(model)$coefmat.subset } .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), p = as.vector(s[, 5]) ) } #' @export ci.averaging <- function(x, ci = .95, component = c("conditional", "full"), ...) { component <- match.arg(component) .ci_generic(model = x, ci = ci, dof = Inf, component = component) } parameters/R/methods_ivprobit.R0000644000175000017500000000061114132772717016465 0ustar nileshnilesh #' @export ci.ivprobit <- ci.default #' @export degrees_of_freedom.ivprobit <- degrees_of_freedom.ivFixed #' @export standard_error.ivprobit <- function(model, ...) { .data_frame( Parameter = model$names, SE = as.vector(model$se) ) } #' @export p_value.ivprobit <- p_value.default #' @export model_parameters.ivprobit <- model_parameters.ivFixed parameters/R/methods_eflm.R0000644000175000017500000000042414026373110015536 0ustar nileshnilesh# eflm (.eglm) ----------------- #' @export p_value.eglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.numeric(as.vector(stats[, 4])) ) } parameters/R/methods_DirichletReg.R0000644000175000017500000000741114131014351017157 0ustar nileshnilesh #' @rdname model_parameters.mlm #' @export model_parameters.DirichletRegModel <- function(model, ci = .95, bootstrap = FALSE, iterations = 1000, component = c("all", "conditional", "precision"), standardize = NULL, exponentiate = FALSE, verbose = TRUE, ...) { component <- match.arg(component) if (component == "all") { merge_by <- c("Parameter", "Component", "Response") } else { merge_by <- c("Parameter", "Response") } ## TODO check merge by junk <- utils::capture.output(out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, robust = FALSE, ... )) out$Response[is.na(out$Response)] <- "" attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.DirichletRegModel <- function(x, ci = .95, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(x, component = component) out <- .ci_generic(model = x, ci = ci, dof = Inf, ...) if (is.null(out$Component)) { component <- "all" } if ("Response" %in% colnames(params)) { out$Response <- params$Response } if (component != "all") { out <- out[out$Component == component, ] } out } #' @rdname standard_error #' @export standard_error.DirichletRegModel <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, SE = as.vector(model$se) ) if (!is.null(params$Component)) { out$Component <- params$Component } else { component <- "all" } if (component != "all") { out <- out[out$Component == component, ] } out } #' @title p-values for Models with Special Components #' @name p_value.DirichletRegModel #' #' @description This function attempts to return, or compute, p-values of models #' with special model components. #' #' @param model A statistical model. #' @param component Should all parameters, parameters for the conditional model, #' precision- or scale-component or smooth_terms be returned? `component` #' may be one of `"conditional"`, `"precision"`, `"scale"`, #' `"smooth_terms"`, `"full"` or `"all"` (default). #' @inheritParams p_value #' #' @return The p-values. #' @export p_value.DirichletRegModel <- function(model, component = c("all", "conditional", "precision"), ...) { component <- match.arg(component) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, p = as.vector(2 * stats::pnorm(-abs(params$Estimate / model$se))) ) if (!is.null(params$Component)) { out$Component <- params$Component } else { component <- "all" } if (component != "all") { out <- out[out$Component == component, ] } out } parameters/R/dof_satterthwaite.R0000644000175000017500000000144514057212550016620 0ustar nileshnilesh#' @rdname p_value_satterthwaite #' @export dof_satterthwaite <- function(model) { UseMethod("dof_satterthwaite") } #' @export dof_satterthwaite.lmerMod <- function(model) { insight::check_if_installed("lmerTest") parameters <- insight::find_parameters(model, effects = "fixed", flatten = TRUE) lmerTest_model <- lmerTest::as_lmerModLmerTest(model) s <- summary(lmerTest_model) stats::setNames(as.vector(s$coefficients[, 3]), parameters) } #' @export dof_satterthwaite.lme <- function(model) { insight::check_if_installed("lavaSearch2") parameters <- insight::find_parameters(model, effects = "fixed", flatten = TRUE) lavaSearch2::sCorrect(model) <- TRUE s <- lavaSearch2::summary2(model) stats::setNames(as.vector(s$tTable[, "df"]), parameters) } parameters/R/methods_mjoint.R0000644000175000017500000000761014131260734016123 0ustar nileshnilesh#' @export model_parameters.mjoint <- function(model, ci = .95, effects = "fixed", component = c("all", "conditional", "survival"), exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("fixed", "random", "all")) component <- match.arg(component) params <- params_variance <- NULL if (effects %in% c("fixed", "all")) { # Processing params <- .extract_parameters_generic( model, ci = ci, component = component, standardize = FALSE, robust = FALSE, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params$Effects <- "fixed" } if (effects %in% c("random", "all")) { params_variance <- .extract_random_variances(model, ci = ci, effects = effects, ci_method = NULL) params_variance$Component <- "conditional" } # merge random and fixed effects, if necessary if (!is.null(params) && !is.null(params_variance)) { params$Level <- NA params$Group <- "" # add component column if (!"Component" %in% colnames(params)) { params$Component <- "conditional" } # reorder params <- params[match(colnames(params_variance), colnames(params))] } params <- rbind(params, params_variance) # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } params <- .add_model_parameters_attributes( params, model, ci = ifelse(effects == "random", NA, ci), exponentiate, ci_method = NULL, p_adjust = p_adjust, verbose = verbose, group_level = FALSE, ... ) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export p_value.mjoint <- function(model, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(model) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), p = unname(s$coefs.long[, 4]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), p = unname(s$coefs.surv[, 4]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params } #' @export ci.mjoint <- function(x, ci = .95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, ...) } #' @export standard_error.mjoint <- function(model, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(model) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), SE = unname(s$coefs.long[, 2]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), SE = unname(s$coefs.surv[, 2]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params } parameters/R/methods_PMCMRplus.R0000644000175000017500000000117514057212550016405 0ustar nileshnilesh#' @rdname model_parameters.glht #' @export model_parameters.PMCMR <- function(model, ...) { insight::check_if_installed("PMCMRplus") parameters <- PMCMRplus::toTidy(model) names(parameters) <- c( "Group1", "Group2", "Statistic", "p", "alternative", "Method", "Distribution", "p_adjustment" ) parameters <- .add_htest_parameters_attributes(parameters, model) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.osrt <- model_parameters.PMCMR #' @export model_parameters.trendPMCMR <- model_parameters.PMCMR parameters/R/compare_parameters.R0000644000175000017500000002526314160324505016754 0ustar nileshnilesh#' @title Compare model parameters of multiple models #' @name compare_parameters #' #' @description Compute and extract model parameters of multiple regression #' models. See [model_parameters()] for further details. #' #' @param ... One or more regression model objects, or objects returned by #' `model_parameters()`. Regression models may be of different model #' types. Model objects may be passed comma separated, or as a list. #' If model objects are passed with names or the list has named elements, #' these names will be used as column names. #' @param component Model component for which parameters should be shown. See #' documentation for related model class in [model_parameters()]. #' @param column_names Character vector with strings that should be used as #' column headers. Must be of same length as number of models in `...`. #' @param ci_method Method for computing degrees of freedom for p values #' and confidence intervals (CI). See documentation for related model class #' in [model_parameters()]. #' @param style String, indicating which style of output is requested. Following #' templates are possible: #' #' - `"ci"`: Estimate and confidence intervals, no asterisks for p-values. #' - `"se"`: Estimate and standard errors, no asterisks for p-values. #' - `"ci_p"`: Estimate, confidence intervals and asterisks for p-values. #' - `"se_p"`: Estimate, standard errors and asterisks for p-values. #' - `"ci_p2"`: Estimate, confidence intervals and numeric p-values, in two columns. #' - `"se_p2"`: Estimate, standard errors and numeric p-values, in two columns. #' #' @inheritParams model_parameters.default #' @inheritParams model_parameters.cpglmm #' @inheritParams print.parameters_model #' #' @details #' #' This function is in an early stage and does not yet cope with more complex #' models, and probably does not yet properly render all model components. It #' should also be noted that when including models with interaction terms, not #' only do the values of the parameters change, but so does their meaning (from #' main effects, to simple slopes), thereby making such comparisons hard. #' Therefore, you should not use this function to compare models with #' interaction terms with models without interaction terms. #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' data(iris) #' lm1 <- lm(Sepal.Length ~ Species, data = iris) #' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' compare_parameters(lm1, lm2) #' #' data(mtcars) #' m1 <- lm(mpg ~ wt, data = mtcars) #' m2 <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") #' compare_parameters(m1, m2) #' \dontrun{ #' # exponentiate coefficients, but not for lm #' compare_parameters(m1, m2, exponentiate = "nongaussian") #' #' # change column names #' compare_parameters("linear model" = m1, "logistic reg." = m2) #' compare_parameters(m1, m2, column_names = c("linear model", "logistic reg.")) #' #' # or as list #' compare_parameters(list(m1, m2)) #' compare_parameters(list("linear model" = m1, "logistic reg." = m2)) #' } #' @export compare_parameters <- function(..., ci = .95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, style = NULL, column_names = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, df_method = ci_method) { models <- list(...) ## TODO remove later if (!missing(df_method) && !identical(ci_method, df_method)) { warning(insight::format_message("Argument 'df_method' is deprecated. Please use 'ci_method' instead."), call. = FALSE) ci_method <- df_method } if (length(models) == 1) { if (insight::is_model(models[[1]]) || inherits(models[[1]], "parameters_model")) { modellist <- FALSE } else { models <- models[[1]] modellist <- TRUE } } else { modellist <- FALSE } if (isTRUE(modellist)) { model_names <- names(models) if (length(model_names) == 0) { model_names <- paste("Model", seq_along(models), sep = " ") names(models) <- model_names } } else { model_names <- match.call(expand.dots = FALSE)$`...` if (length(names(model_names)) > 0) { model_names <- names(model_names) } else if (any(sapply(model_names, is.call))) { model_names <- paste("Model", seq_along(models), sep = " ") } else { model_names <- sapply(model_names, as.character) names(models) <- model_names } } supported_models <- sapply(models, function(i) insight::is_model_supported(i) | inherits(i, "lavaan") | inherits(i, "parameters_model")) if (!all(supported_models)) { warning(insight::format_message( sprintf("Following objects are not supported: %s", paste0(model_names[!supported_models], collapse = ", ")), "Dropping unsupported models now." ), call. = FALSE) models <- models[supported_models] model_names <- model_names[supported_models] } # set default if (is.null(style)) { style <- "ci" } # provide own names if (!is.null(column_names)) { if (length(column_names) != length(model_names)) { if (isTRUE(verbose)) { warning("Number of column names does not match number of models.", call. = FALSE) } } else { model_names <- column_names } } # iterate all models and create list of model parameters m <- lapply(1:length(models), function(i) { model <- models[[i]] model_name <- model_names[[i]] if (inherits(model, "parameters_model")) { # we already have model parameters object... dat <- model } else { # set default-ci_type for Bayesian models if (.is_bayesian_model(model) && !ci_method %in% c("hdi", "quantile", "ci", "eti", "si", "bci", "bcai")) { ci_method_tmp <- "hdi" } else { ci_method_tmp <- ci_method } # here we have a model object that needs to be passed to model_parameters dat <- model_parameters( model, ci = ci, effects = effects, component = component, standardize = standardize, exponentiate = exponentiate, ci_method = ci_method_tmp, p_adjust = p_adjust, keep = keep, drop = drop, verbose = verbose ) } # set specific names for coefficient column coef_name <- attributes(dat)$coefficient_name if (!is.null(coef_name)) { colnames(dat)[colnames(dat) == "Coefficient"] <- coef_name } # set pretty parameter names dat <- .set_pretty_names(dat) # make sure we have a component-column, for merging if (!"Component" %in% colnames(dat)) { dat$Component <- "conditional" } # add zi-suffix to parameter names if (any(dat$Component == "zero_inflated")) { dat$Parameter[dat$Component == "zero_inflated"] <- paste0(dat$Parameter[dat$Component == "zero_inflated"], " (zi)") } # add suffix ignore <- colnames(dat) %in% c("Parameter", "Component") colnames(dat)[!ignore] <- paste0(colnames(dat)[!ignore], ".", model_name) # save model number, for sorting dat$model <- i dat$model[.in_intercepts(dat$Parameter)] <- 0 ## NOT SURE why we needed this? It duplicates parameters when ## these are in different order in different models # add index for row order. needed later, because "merge()" sometimes # messes up the correct row sorting, despite setting "sort = FALSE" # dat$.row_index <- 1:nrow(dat) dat }) object_attributes <- lapply(m, attributes) names(object_attributes) <- model_names ## NOT SURE why we needed this? It duplicates parameters when ## these are in different order in different models # # merge all data frames # all_models <- suppressWarnings(Reduce(function(x, y) merge(x, y, all = TRUE, sort = FALSE, by = c("Parameter", "Component", ".row_index")), m)) # # # fix row order # row_order <- order(all_models$.row_index) # all_models <- all_models[row_order, ] # all_models$.row_index <- NULL # merge all data frames all_models <- suppressWarnings(Reduce(function(x, y) merge(x, y, all = TRUE, sort = FALSE, by = c("Parameter", "Component")), m)) # find columns with model numbers and create new variable "params_order", # which is pasted together of all model-column indices. Take lowest index of # all model-column indices, which then indicates order of parameters/rows. model_cols <- which(grepl("^model", colnames(all_models))) params_order <- as.numeric(substr(gsub("NA", "", do.call(paste0, all_models[model_cols]), fixed = TRUE), 0, 1)) all_models <- all_models[order(params_order), ] all_models[model_cols] <- NULL attr(all_models, "model_names") <- gsub("\"", "", unlist(lapply(model_names, .safe_deparse)), fixed = TRUE) attr(all_models, "output_style") <- style attr(all_models, "all_attributes") <- object_attributes class(all_models) <- c("compare_parameters", "see_compare_parameters", unique(class(all_models))) all_models } #' @rdname compare_parameters #' @export compare_models <- compare_parameters # helper ---------------------------- .set_pretty_names <- function(x) { att <- attributes(x) if (!is.null(att$pretty_names)) { # remove strings with NA names att$pretty_names <- att$pretty_names[!is.na(names(att$pretty_names))] if (length(att$pretty_names) != length(x$Parameter)) { match_pretty_names <- stats::na.omit(match(names(att$pretty_names), x$Parameter)) if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } else { match_pretty_names <- att$pretty_names[x$Parameter] if (!anyNA(match_pretty_names)) { x$Parameter <- att$pretty_names[x$Parameter] } else { match_pretty_names <- stats::na.omit(match(names(att$pretty_names), x$Parameter)) if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } } } if (!is.null(x$Parameter)) { x$Parameter <- gsub("]", ")", gsub("[", "(", x$Parameter, fixed = TRUE), fixed = TRUE) } x } parameters/R/principal_components.R0000644000175000017500000004006414106662543017334 0ustar nileshnilesh#' Principal Component Analysis (PCA) and Factor Analysis (FA) #' #' The functions `principal_components()` and `factor_analysis()` can #' be used to perform a principal component analysis (PCA) or a factor analysis #' (FA). They return the loadings as a data frame, and various methods and #' functions are available to access / display other information (see the #' Details section). #' #' @param x A data frame or a statistical model. #' @param n Number of components to extract. If `n="all"`, then `n` is #' set as the number of variables minus 1 (`ncol(x)-1`). If #' `n="auto"` (default) or `n=NULL`, the number of components is #' selected through [n_factors()] resp. [n_components()]. #' In [reduce_parameters()], can also be `"max"`, in which case #' it will select all the components that are maximally pseudo-loaded (i.e., #' correlated) by at least one variable. #' @param rotation If not `"none"`, the PCA / FA will be computed using the #' \pkg{psych} package. Possible options include `"varimax"`, #' `"quartimax"`, `"promax"`, `"oblimin"`, `"simplimax"`, #' or `"cluster"` (and more). See [psych::fa()] for details. #' @param sort Sort the loadings. #' @param threshold A value between 0 and 1 indicates which (absolute) values #' from the loadings should be removed. An integer higher than 1 indicates the #' n strongest loadings to retain. Can also be `"max"`, in which case it #' will only display the maximum loading per variable (the most simple #' structure). #' @param standardize A logical value indicating whether the variables should be #' standardized (centered and scaled) to have unit variance before the #' analysis (in general, such scaling is advisable). #' @param object An object of class `parameters_pca` or #' `parameters_efa` #' @param newdata An optional data frame in which to look for variables with #' which to predict. If omitted, the fitted values are used. #' @param names Optional character vector to name columns of the returned data #' frame. #' @param keep_na Logical, if `TRUE`, predictions also return observations #' with missing values from the original data, hence the number of rows of #' predicted data and original data is equal. #' @param ... Arguments passed to or from other methods. #' @param pca_results The output of the `principal_components()` function. #' @param digits,labels Arguments for `print()`. #' @inheritParams n_factors #' #' @details #' \subsection{Methods and Utilities}{ #' \itemize{ #' \item [n_components()] and [n_factors()] automatically #' estimates the optimal number of dimensions to retain. #' #' \item [check_factorstructure()] checks the suitability of the #' data for factor analysis using the #' [`sphericity()`][check_sphericity_bartlett] and the #' [`sphericity()`][check_kmo] KMO measure. #' #' \item{[performance::check_itemscale()] computes various measures #' of internal consistencies applied to the (sub)scales (i.e., components) #' extracted from the PCA.} #' #' \item{Running `summary` returns information related to each #' component/factor, such as the explained variance and the Eivenvalues.} #' #' \item{Running [get_scores()] computes scores for each subscale.} #' #' \item{Running [closest_component()] will return a numeric vector #' with the assigned component index for each column from the original data #' frame.} #' #' \item{Running [rotated_data()] will return the rotated data, #' including missing values, so it matches the original data frame.} #' #' \item{Running #' [`plot()`](https://easystats.github.io/see/articles/parameters.html#principal-component-analysis) #' visually displays the loadings (that requires the #' \href{https://easystats.github.io/see/}{\pkg{see} package} to work).} #' } #' } #' #' \subsection{Complexity}{ #' Complexity represents the number of latent components needed to account #' for the observed variables. Whereas a perfect simple structure solution #' has a complexity of 1 in that each item would only load on one factor, #' a solution with evenly distributed items has a complexity greater than 1 #' (\cite{Hofman, 1978; Pettersson and Turkheimer, 2010}) . #' } #' #' \subsection{Uniqueness}{ #' Uniqueness represents the variance that is 'unique' to the variable and #' not shared with other variables. It is equal to `1 – communality` #' (variance that is shared with other variables). A uniqueness of `0.20` #' suggests that `20%` or that variable's variance is not shared with other #' variables in the overall factor model. The greater 'uniqueness' the lower #' the relevance of the variable in the factor model. #' } #' #' \subsection{MSA}{ #' MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy #' (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there #' is enough data for each factor give reliable results for the PCA. The #' value should be > 0.6, and desirable values are > 0.8 #' (\cite{Tabachnick and Fidell, 2013}). #' } #' #' \subsection{PCA or FA?}{ #' There is a simplified rule of thumb that may help do decide whether to run #' a factor analysis or a principal component analysis: #' \itemize{ #' \item Run *factor analysis* if you assume or wish to test a #' theoretical model of *latent factors* causing observed variables. #' #' \item Run *principal component analysis* If you want to simply #' *reduce* your correlated observed variables to a smaller set of #' important independent composite variables. #' } #' (Source: [CrossValidated](https://stats.stackexchange.com/q/1576/54740)) #' } #' #' \subsection{Computing Item Scores}{ #' Use [get_scores()] to compute scores for the "subscales" #' represented by the extracted principal components. `get_scores()` #' takes the results from `principal_components()` and extracts the #' variables for each component found by the PCA. Then, for each of these #' "subscales", raw means are calculated (which equals adding up the single #' items and dividing by the number of items). This results in a sum score #' for each component from the PCA, which is on the same scale as the #' original, single items that were used to compute the PCA. #' One can also use `predict()` to back-predict scores for each component, #' to which one can provide `newdata` or a vector of `names` for the #' components. #' } #' #' \subsection{Explained Variance and Eingenvalues}{ #' Use `summary()` to get the Eigenvalues and the explained variance #' for each extracted component. The eigenvectors and eigenvalues represent #' the "core" of a PCA: The eigenvectors (the principal components) #' determine the directions of the new feature space, and the eigenvalues #' determine their magnitude. In other words, the eigenvalues explain the #' variance of the data along the new feature axes. #' } #' #' #' @examples #' #' library(parameters) #' #' \donttest{ #' # Principal Component Analysis (PCA) ------------------- #' if (require("psych")) { #' principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) #' principal_components(mtcars[, 1:7], #' n = 2, rotation = "oblimin", #' threshold = "max", sort = TRUE #' ) #' principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) #' #' pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") #' pca # Print loadings #' summary(pca) # Print information about the factors #' predict(pca, names = c("Component1", "Component2")) # Back-predict scores #' #' # which variables from the original data belong to which extracted component? #' closest_component(pca) #' # rotated_data(pca) # TODO: doesn't work #' } #' #' # Automated number of components #' principal_components(mtcars[, 1:4], n = "auto") #' } #' #' #' #' # Factor Analysis (FA) ------------------------ #' if (require("psych")) { #' factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2) #' factor_analysis(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE) #' factor_analysis(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) #' #' efa <- factor_analysis(mtcars[, 1:5], n = 2) #' summary(efa) #' predict(efa) #' \donttest{ #' # Automated number of components #' factor_analysis(mtcars[, 1:4], n = "auto") #' } #' } #' @return A data frame of loadings. #' @references \itemize{ #' \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational #' and Psychological Measurement, 34(1):111–117 #' #' \item Hofmann, R. (1978). Complexity and simplicity as objective indices #' descriptive of factor solutions. Multivariate Behavioral Research, 13:2, #' 247-250, \doi{10.1207/s15327906mbr1302_9} #' #' \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, #' and simple structure in personality data. Journal of research in #' personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} #' #' \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate #' statistics (6th ed.). Boston: Pearson Education. #' } #' @export principal_components <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { UseMethod("principal_components") } #' @rdname principal_components #' @export rotated_data <- function(pca_results) { original_data <- attributes(pca_results)$data_set rotated_matrix <- insight::get_predicted(attributes(pca_results)$model) out <- NULL if (!is.null(original_data) && !is.null(rotated_matrix)) { compl_cases <- attributes(pca_results)$complete_cases if (is.null(compl_cases) && nrow(original_data) != nrow(rotated_matrix)) { warning("Could not retrieve information about missing data.", call. = FALSE) return(NULL) } original_data$.parameters_merge_id <- 1:nrow(original_data) rotated_matrix$.parameters_merge_id <- (1:nrow(original_data))[compl_cases] out <- merge(original_data, rotated_matrix, by = ".parameters_merge_id", all = TRUE, sort = FALSE) out$.parameters_merge_id <- NULL } else { warning(insight::format_message("Either the original or the rotated data could not be retrieved."), call. = FALSE) return(NULL) } out } #' @export principal_components.data.frame <- function(x, n = "auto", rotation = "none", sort = FALSE, threshold = NULL, standardize = TRUE, ...) { # save name of data set data_name <- deparse(substitute(x)) # original data original_data <- x # remove missings x <- stats::na.omit(x) # PCA model <- stats::prcomp(x, retx = TRUE, center = standardize, scale. = standardize, ...) # N factors n <- .get_n_factors(x, n = n, type = "PCA", rotation = rotation) # Rotation if (rotation != "none") { loadings <- .pca_rotate( x, n, rotation = rotation, sort = sort, threshold = threshold, original_data = original_data, ... ) attr(loadings, "data") <- data_name return(loadings) } # Re-add centers and scales # if (standardize) { # model$center <- attributes(x)$center # model$scale <- attributes(x)$scale # } # Summary (cumulative variance etc.) eigenvalues <- model$sdev^2 data_summary <- .data_frame( Component = sprintf("PC%i", seq_len(length(model$sdev))), Eigenvalues = eigenvalues, Variance = eigenvalues / sum(eigenvalues), Variance_Cumulative = cumsum(eigenvalues / sum(eigenvalues)) ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) model$sdev <- model$sdev[1:n] model$rotation <- model$rotation[, 1:n, drop = FALSE] model$x <- model$x[, 1:n, drop = FALSE] data_summary <- data_summary[1:n, , drop = FALSE] # Compute loadings if (length(model$sdev) > 1) { loadings <- as.data.frame(model$rotation %*% diag(model$sdev)) } else { loadings <- as.data.frame(model$rotation %*% model$sdev) } names(loadings) <- data_summary$Component # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Add information loading_cols <- 2:(n + 1) loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- "none" attr(loadings, "scores") <- model$x attr(loadings, "standardize") <- standardize attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "type") <- "prcomp" attr(loadings, "loadings_columns") <- loading_cols attr(loadings, "complete_cases") <- stats::complete.cases(original_data) # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component(loadings, loadings_columns = loading_cols, variable_names = colnames(x)) attr(loadings, "data") <- data_name attr(loadings, "data_set") <- original_data # add class-attribute for printing class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) loadings } #' @keywords internal .get_n_factors <- function(x, n = NULL, type = "PCA", rotation = "varimax", ...) { # N factors if (is.null(n) || n == "auto") { n <- as.numeric(n_factors(x, type = type, rotation = rotation, ...)) } else if (n == "all") { n <- ncol(x) - 1 } else if (n >= ncol(x)) { n <- ncol(x) - 1 } n } #' @keywords internal .pca_rotate <- function(x, n, rotation, sort = FALSE, threshold = NULL, original_data = NULL, ...) { if (!(rotation %in% c("varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster", "none"))) { stop("`rotation` must be one of \"varimax\", \"quartimax\", \"promax\", \"oblimin\", \"simplimax\", \"cluster\" or \"none\".") } if (!inherits(x, c("prcomp", "data.frame"))) { stop("`x` must be of class `prcomp` or a data frame.", call. = FALSE) } if (!inherits(x, "data.frame") && rotation != "varimax") { stop(sprintf("`x` must be a data frame for `%s`-rotation.", rotation), call. = FALSE) } # rotate loadings if (!requireNamespace("psych", quietly = TRUE)) { stop(sprintf("Package `psych` required for `%s`-rotation.", rotation), call. = FALSE) } pca <- psych::principal(x, nfactors = n, rotate = rotation, ...) msa <- psych::KMO(x) attr(pca, "MSA") <- msa$MSAi out <- model_parameters(pca, sort = sort, threshold = threshold) attr(out, "data_set") <- original_data attr(out, "complete_cases") <- stats::complete.cases(original_data) out } parameters/R/reshape_loadings.R0000644000175000017500000000762314036353021016407 0ustar nileshnilesh#' Reshape loadings between wide/long formats #' #' Reshape loadings between wide/long formats. #' #' #' @examples #' library(parameters) #' library(psych) #' #' pca <- model_parameters(psych::fa(attitude, nfactors = 3)) #' loadings <- reshape_loadings(pca) #' #' loadings #' reshape_loadings(loadings) #' @export reshape_loadings <- function(x, ...) { UseMethod("reshape_loadings") } #' @rdname reshape_loadings #' @inheritParams principal_components #' @export reshape_loadings.parameters_efa <- function(x, threshold = NULL, ...) { current_format <- attributes(x)$loadings_format if (is.null(current_format) || current_format == "wide") { .long_loadings(x, threshold = threshold) } else { .wide_loadings(x) } } #' @rdname reshape_loadings #' @param loadings_columns Vector indicating the columns corresponding to loadings. #' @export reshape_loadings.data.frame <- function(x, threshold = NULL, loadings_columns = NULL, ...) { if (is.null(loadings_columns)) loadings_columns <- 1:ncol(x) if (length(loadings_columns) > 1) { .long_loadings(x, threshold = threshold, loadings_columns = loadings_columns) } } #' @keywords internal .wide_loadings <- function(loadings, loadings_columns = "Loading", component_column = "Component", variable_column = "Variable", ...) { if (is.numeric(loadings[[component_column]])) { loadings[[component_column]] <- paste0("F", loadings[[component_column]]) } complexity_column <- if ("Complexity" %in% colnames(loadings)) "Complexity" else NULL uniqueness_column <- if ("Uniqueness" %in% colnames(loadings)) "Uniqueness" else NULL reshape_columns <- c(loadings_columns, component_column, variable_column, complexity_column, uniqueness_column) loadings <- stats::reshape( loadings[reshape_columns], idvar = variable_column, timevar = component_column, direction = "wide", v.names = c(loadings_columns), sep = "_" ) names(loadings) <- gsub(paste0(loadings_columns, "_"), "", names(loadings)) attr(loadings, "loadings_format") <- "wide" class(loadings) <- unique(c("parameters_loadings", class(loadings))) # clean-up, column-order row.names(loadings) <- NULL column_order <- c(setdiff(colnames(loadings), c("Complexity", "Uniqueness")), c("Complexity", "Uniqueness")) loadings[column_order[column_order %in% colnames(loadings)]] } #' @keywords internal .long_loadings <- function(loadings, threshold = NULL, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold, loadings_columns = loadings_columns) } # Reshape to long long <- stats::reshape(loadings, direction = "long", varying = list(names(loadings)[loadings_columns]), v.names = "Loading", timevar = "Component", idvar = "Variable" ) # Restore component names for (i in 1:.n_unique(long$Component)) { component <- unique(long$Component)[[i]] name <- names(loadings)[loadings_columns][[i]] long[long$Component == component, "Component"] <- name } # Filtering long <- long[!is.na(long$Loading), ] row.names(long) <- NULL # Reorder columns loadings <- long[, c( "Component", "Variable", "Loading", names(loadings)[-loadings_columns][!names(loadings)[-loadings_columns] %in% c("Component", "Variable", "Loading")] )] attr(loadings, "loadings_format") <- "long" class(loadings) <- unique(c("parameters_loadings", class(loadings))) loadings } #' @export print.parameters_loadings <- function(x, ...) { formatted_table <- insight::format_table(x) cat(insight::export_table(formatted_table)) invisible(x) } parameters/R/methods_mclust.R0000644000175000017500000000117014131014352016116 0ustar nileshnilesh#' @rdname model_parameters.kmeans #' #' @examples #' if (require("mclust", quietly = TRUE)) { #' model <- mclust::Mclust(iris[1:4], verbose = FALSE) #' model_parameters(model) #' } #' @export model_parameters.Mclust <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) data <- as.data.frame(model$data) if (is.null(clusters)) clusters <- model$classification params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "mixture" attr(params, "title") <- "Gaussian finite mixture model fitted by EM algorithm" params } parameters/R/cluster_analysis.R0000644000175000017500000003655114160324505016471 0ustar nileshnilesh#' Cluster Analysis #' #' Compute hierarchical or kmeans cluster analysis and return the group #' assignment for each observation as vector. #' #' @references #' - Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster #' Analysis Basics and Extensions. R package. #' #' @param x A data frame. #' @param n Number of clusters used for supervised cluster methods. If `NULL`, #' the number of clusters to extract is determined by calling [n_clusters()]. Note #' that this argument does not apply for unsupervised clustering methods like #' `dbscan`, `mixture`, `pvclust`, or `pamk`. #' @param method Method for computing the cluster analysis. Can be `"kmeans"` #' (default; k-means using `kmeans()`), `"hkmeans"` (hierarchical k-means #' using `factoextra::hkmeans()`), `pam` (K-Medoids using `cluster::pam()`), #' `pamk` (K-Medoids that finds out the number of clusters), `"hclust"` #' (hierarchical clustering using `hclust()` or `pvclust::pvclust()`), #' `dbscan` (DBSCAN using `dbscan::dbscan()`), `hdbscan` (Hierarchical DBSCAN #' using `dbscan::hdbscan()`), or `mixture` (Mixture modelling using #' `mclust::Mclust()`, which requires the user to run `library(mclust)` #' before). #' @param distance_method Distance measure to be used for methods based on #' distances (e.g., when `method = "hclust"` for hierarchical clustering. For #' other methods, such as `"kmeans"`, this argument will be ignored). Must be #' one of `"euclidean"`, `"maximum"`, `"manhattan"`, `"canberra"`, `"binary"` #' or `"minkowski"`. See [dist()] and `pvclust::pvclust()` for more #' information. #' @param hclust_method Agglomeration method to be used when `method = "hclust"` #' or `method = "hkmeans"` (for hierarchical clustering). This should be one #' of `"ward"`, `"ward.D2"`, `"single"`, `"complete"`, `"average"`, #' `"mcquitty"`, `"median"` or `"centroid"`. Default is `"complete"` (see #' [hclust()]). #' @param kmeans_method Algorithm used for calculating kmeans cluster. Only applies, #' if `method = "kmeans"`. May be one of `"Hartigan-Wong"` (default), #' `"Lloyd"` (used by SPSS), or `"MacQueen"`. See [kmeans()] for details on #' this argument. #' @param iterations The number of replications. #' @param dbscan_eps The 'eps' argument for DBSCAN method. See [n_clusters_dbscan()]. #' #' @inheritParams equivalence_test.lm #' @inheritParams n_clusters #' #' @return The group classification for each observation as vector. The #' returned vector includes missing values, so it has the same length #' as `nrow(x)`. #' #' @note #' There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' The `print()` and `plot()` methods show the (standardized) mean value for #' each variable within each cluster. Thus, a higher absolute value indicates #' that a certain variable characteristic is more pronounced within that #' specific cluster (as compared to other cluster groups with lower absolute #' mean values). #' #' @seealso #' - [n_clusters()] to determine the number of clusters to extract, #' [cluster_discrimination()] to determine the accuracy of cluster group #' classification via linear discriminant analysis (LDA) and #' [check_clusterstructure()] to check suitability of data #' for clustering. #' - https://www.datanovia.com/en/lessons/ #' #' @examples #' set.seed(33) #' # K-Means ==================================================== #' rez <- cluster_analysis(iris[1:4], n = 3, method = "kmeans") #' rez # Show results #' predict(rez) # Get clusters #' summary(rez) # Extract the centers values (can use 'plot()' on that) #' cluster_discrimination(rez) # Perform LDA #' #' # Hierarchical k-means (more robust k-means) #' if (require("factoextra", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], n = 3, method = "hkmeans") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # Hierarchical Clustering (hclust) =========================== #' rez <- cluster_analysis(iris[1:4], n = 3, method = "hclust") #' rez # Show results #' predict(rez) # Get clusters #' #' # K-Medoids (pam) ============================================ #' if (require("cluster", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], n = 3, method = "pam") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # PAM with automated number of clusters #' if (require("fpc", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], method = "pamk") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # DBSCAN ==================================================== #' if (require("dbscan", quietly = TRUE)) { #' # Note that you can assimilate more outliers (cluster 0) to neighbouring #' # clusters by setting borderPoints = TRUE. #' rez <- cluster_analysis(iris[1:4], method = "dbscan", dbscan_eps = 1.45) #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # Mixture ==================================================== #' if (require("mclust", quietly = TRUE)) { #' library(mclust) # Needs the package to be loaded #' rez <- cluster_analysis(iris[1:4], method = "mixture") #' rez # Show results #' predict(rez) # Get clusters #' } #' @export cluster_analysis <- function(x, n = NULL, method = "kmeans", include_factors = FALSE, standardize = TRUE, verbose = TRUE, distance_method = "euclidean", hclust_method = "complete", kmeans_method = "Hartigan-Wong", dbscan_eps = 15, iterations = 100, ...) { # Sanity checks ----------------------------------------------------------- insight::check_if_installed("performance") # match arguments method <- match.arg(method, choices = c("kmeans", "hkmeans", "pam", "pamk", "hclust", "dbscan", "hdbscan", "mixture"), several.ok = TRUE) # Preparation ------------------------------------------------------------- # Preprocess data data <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) # Get number of clusters if (is.null(n) && any(method %in% c("kmeans", "hkmeans", "pam"))) { n <- tryCatch( { nc <- n_clusters(data, standardize = FALSE, ...) n <- attributes(nc)$n if (verbose) { insight::print_color(sprintf("Using solution with %i clusters, supported by %i out of %i methods.\n", n, max(summary(nc)$n_Methods), sum(summary(nc)$n_Methods)), "blue") } n }, error = function(e) { if (isTRUE(verbose)) { stop(insight::format_message("Could not extract number of cluster. Please provide argument 'n'."), call. = FALSE) } 2 } ) } # Apply clustering -------------------------------------------------------- if (any(method == "kmeans")) { rez <- .cluster_analysis_kmeans(data, n = n, kmeans_method = kmeans_method, iterations = iterations, ...) } else if (any(method %in% c("hkmeans"))) { rez <- .cluster_analysis_hkmeans(data, n = n, kmeans_method = kmeans_method, hclust_method = hclust_method, iterations = iterations, ...) } else if (any(method %in% c("pam"))) { rez <- .cluster_analysis_pam(data, n = n, distance_method = distance_method, ...) } else if (any(method %in% c("pamk"))) { rez <- .cluster_analysis_pamk(data, distance_method = distance_method, ...) } else if (any(method %in% c("hclust"))) { rez <- .cluster_analysis_hclust(data, n = n, distance_method = distance_method, hclust_method = hclust_method, iterations = iterations, ...) } else if (any(method == "dbscan")) { rez <- .cluster_analysis_dbscan(data, dbscan_eps = dbscan_eps, ...) } else if (any(method == "hdbscan")) { rez <- .cluster_analysis_hdbscan(data, ...) } else if (any(method %in% c("mixture", "mclust"))) { rez <- .cluster_analysis_mixture(data, ...) } else { stop("Did not find 'method' argument. Could be misspecified.") } # Assign clusters to observations # Create NA-vector of same length as original data frame clusters <- rep(NA, times = nrow(x)) # Create vector with cluster group classification (with missing) complete_cases <- stats::complete.cases(x[names(data)]) clusters[complete_cases] <- rez$clusters # Get clustering parameters out <- model_parameters(rez$model, data = data, clusters = clusters, ...) performance <- cluster_performance(out) attr(out, "model") <- rez$model attr(out, "method") <- method attr(out, "clusters") <- clusters attr(out, "data") <- data attr(out, "performance") <- performance class(out) <- c("cluster_analysis", class(out)) out } # Clustering Methods -------------------------------------------------------- #' @keywords internal .cluster_analysis_kmeans <- function(data, n = 2, kmeans_method = "Hartigan-Wong", iterations = 100, ...) { model <- stats::kmeans(data, centers = n, algorithm = kmeans_method, iter.max = iterations, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_hkmeans <- function(data, n = 2, kmeans_method = "Hartigan-Wong", hclust_method = "complete", iterations = 100, ...) { insight::check_if_installed("factoextra") model <- factoextra::hkmeans(data, k = n, km.algorithm = kmeans_method, iter.max = iterations, hc.method = hclust_method, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_pam <- function(data = NULL, n = 2, distance_method = "euclidean", ...) { insight::check_if_installed("cluster") model <- cluster::pam(data, k = n, metric = distance_method, ...) list(model = model, clusters = model$clustering) } #' @keywords internal .cluster_analysis_pamk <- function(data = NULL, distance_method = "euclidean", pamk_method = "ch", ...) { insight::check_if_installed("fpc") model <- fpc::pamk(data, metric = distance_method, criterion = pamk_method, ...) list(model = model$pamobject, clusters = model$pamobject$clustering) } #' @keywords internal .cluster_analysis_hclust <- function(data, n = 2, distance_method = "euclidean", hclust_method = "complete", iterations = 100, ...) { if (is.null(n)) { rez <- n_clusters_hclust(data, preprocess = FALSE, distance_method = distance_method, hclust_method = hclust_method, iterations = iterations, ...) out <- list(model = attributes(rez)$model, clusters = rez$Cluster) } else { if (distance_method %in% c("correlation", "uncentered", "abscor")) { warning(paste0("method '", distance_method, "' not supported by regular hclust(). Please specify another one or set n = NULL to use pvclust.")) } dist <- dist(data, method = distance_method, ...) model <- stats::hclust(dist, method = hclust_method, ...) out <- list(model = model, clusters = stats::cutree(model, k = n)) } out } #' @keywords internal .cluster_analysis_dbscan <- function(data = NULL, dbscan_eps = 0.15, min_size = 0.1, borderPoints = FALSE, ...) { insight::check_if_installed("dbscan") if (min_size < 1) min_size <- round(min_size * nrow(data)) model <- dbscan::dbscan(data, eps = dbscan_eps, minPts = min_size, borderPoints = borderPoints, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_hdbscan <- function(data = NULL, min_size = 0.1, ...) { insight::check_if_installed("dbscan") if (min_size < 1) min_size <- round(min_size * nrow(data)) model <- dbscan::hdbscan(data, minPts = min_size, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_mixture <- function(data = NULL, ...) { insight::check_if_installed("mclust") model <- mclust::Mclust(data, verbose = FALSE, ...) list(model = model, clusters = model$classification) } # Methods ---------------------------------------------------------------- #' @export predict.cluster_analysis <- function(object, newdata = NULL, ...) { if (is.null(newdata)) { attributes(object)$clusters } else { NextMethod() } } #' @export print.cluster_analysis <- function(x, ...) { NextMethod() cat("\n") print(attributes(x)$performance) insight::print_color("\n# You can access the predicted clusters via 'predict()'.\n", "yellow") invisible(x) } #' @export summary.cluster_analysis <- function(object, ...) { data <- as.data.frame(object) cols <- names(attributes(object)$data) data <- data[names(data) %in% c(cols, "Cluster")] # Keep only data class(data) <- c("cluster_analysis_summary", class(data)) data } # Plotting ---------------------------------------------------------------- #' @export visualisation_recipe.cluster_analysis_summary <- function(x, ...) { data <- datawizard::data_to_long( x, cols = names(x)[-1], # skip 'Cluster' column names_to = "Group", values_to = "Center" ) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "bar", data = data, aes = list(x = "Cluster", y = "Center", fill = "Group"), position = "dodge" ) layers[["l2"]] <- list( geom = "hline", data = data, aes = list(yintercept = 0), linetype = "dotted" ) layers[["l3"]] <- list( geom = "labs", x = "Cluster Group", y = "Center", fill = "Variable", title = "Cluster Centers" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export visualisation_recipe.cluster_analysis <- function(x, show_data = "text", ...) { ori_data <- stats::na.omit(attributes(x)$data) # Get 2 PCA Components pca <- principal_components(ori_data, n = 2) data <- stats::predict(pca) names(data) <- c("x", "y") data$Cluster <- as.character(stats::na.omit(attributes(x)$clusters)) data$label <- row.names(ori_data) if (!is.null(show_data) && show_data %in% c("label", "text")) { label <- "label" } else { label <- NULL } # Centers data (also on the PCA scale) data_centers <- stats::predict(pca, newdata = as.data.frame(x)[names(ori_data)], names = c("x", "y")) data_centers$Cluster <- as.character(as.data.frame(x)$Cluster) # Outliers data$Cluster[data$Cluster == "0"] <- NA data_centers <- data_centers[data_centers$Cluster != "0", ] layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = show_data, data = data, aes = list(x = "x", y = "y", label = label, color = "Cluster") ) layers[["l2"]] <- list( geom = "point", data = data_centers, aes = list(x = "x", y = "y", color = "Cluster"), shape = "+", size = 10 ) layers[["l3"]] <- list( geom = "labs", x = "PCA - 1", y = "PCA - 2", title = "Clustering Solution" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data layers } #' @export plot.cluster_analysis <- function(x, ...) { plot(visualisation_recipe(x, ...)) } parameters/R/extract_random_parameters.R0000644000175000017500000000716114100206044020324 0ustar nileshnilesh .extract_random_parameters <- function(model, ...) { UseMethod(".extract_random_parameters") } .extract_random_parameters.merMod <- function(model, ci = .95, effects = "random", ...) { insight::check_if_installed("lme4") out <- as.data.frame(lme4::ranef(model, condVar = TRUE), stringsAsFactors = FALSE) colnames(out) <- c("Group", "Parameter", "Level", "Coefficient", "SE") # coerce to character out$Parameter <- as.character(out$Parameter) out$Level <- as.character(out$Level) out$Group <- as.character(out$Group) out$Effects <- "random" if (length(ci) == 1) { fac <- stats::qnorm((1 + ci) / 2) out$CI_low <- out$Coefficient - fac * out$SE out$CI_high <- out$Coefficient + fac * out$SE ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- c() for (i in ci) { fac <- stats::qnorm((1 + i) / 2) ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) out[[ci_low]] <- out$Coefficient - fac * out$SE out[[ci_high]] <- out$Coefficient + fac * out$SE ci_cols <- c(ci_cols, ci_low, ci_high) } } stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$df_error <- NA out$p <- NA out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p")] <- NULL } out } .extract_random_parameters.glmmTMB <- function(model, ci = .95, effects = "random", component = "conditional", ...) { insight::check_if_installed("lme4") out <- as.data.frame(lme4::ranef(model, condVar = TRUE)) colnames(out) <- c("Component", "Group", "Parameter", "Level", "Coefficient", "SE") # filter component out <- switch(component, "zi" = , "zero_inflated" = out[out$Component == "zi", ], "conditional" = out[out$Component == "cond", ], out ) # coerce to character out$Parameter <- as.character(out$Parameter) out$Level <- as.character(out$Level) out$Group <- as.character(out$Group) out$Effects <- "random" # rename out$Component[out$Component == "zi"] <- "zero_inflated" out$Component[out$Component == "cond"] <- "conditional" if (length(ci) == 1) { fac <- stats::qnorm((1 + ci) / 2) out$CI_low <- out$Coefficient - fac * out$SE out$CI_high <- out$Coefficient + fac * out$SE ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- c() for (i in ci) { fac <- stats::qnorm((1 + i) / 2) ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) out[[ci_low]] <- out$Coefficient - fac * out$SE out[[ci_high]] <- out$Coefficient + fac * out$SE ci_cols <- c(ci_cols, ci_low, ci_high) } } stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$df_error <- NA out$p <- NA out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Component", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p")] <- NULL } out } .extract_random_parameters.MixMod <- function(model, ...) { NULL } parameters/R/methods_bamlss.R0000644000175000017500000000471314137207406016110 0ustar nileshnilesh#' @inheritParams insight::get_parameters #' @export model_parameters.bamlss <- function(model, centrality = "median", dispersion = FALSE, ci = .95, ci_method = "hdi", test = c("pd", "rope"), rope_range = "default", rope_ci = 0.95, component = "all", exponentiate = FALSE, standardize = NULL, keep = NULL, drop = NULL, parameters = keep, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = FALSE, effects = "all", component = component, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) params <- .add_pretty_names(params, model) if (isTRUE(exponentiate) || identical(exponentiate, "nongaussian")) { params <- .exponentiate_parameters(params, model, exponentiate) } params <- .add_model_parameters_attributes(params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ...) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- deparse(substitute(model), width.cutoff = 500) class(params) <- unique(c("parameters_stan", "see_parameters_model", "parameters_model", class(params))) params } #' @export standard_error.bamlss <- function(model, component = c("all", "conditional", "location", "distributional", "auxilliary"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } #' @export p_value.bamlss <- p_value.BFBayesFactor parameters/R/methods_mixor.R0000644000175000017500000000540214142707732015764 0ustar nileshnilesh #' @rdname model_parameters.merMod #' @export model_parameters.mixor <- function(model, ci = .95, effects = "all", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, verbose = TRUE, include_sigma = FALSE, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { warning(insight::format_message("Standardizing coefficients only works for fixed effects of the mixed model."), call. = FALSE) } effects <- "fixed" } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Effects"), standardize = standardize, exponentiate = exponentiate, effects = effects, robust = FALSE, include_sigma = include_sigma, ... ) attr(out, "object_name") <- deparse(substitute(model), width.cutoff = 500) out } #' @export ci.mixor <- function(x, ci = .95, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) .ci_generic(model = x, ci = ci, dof = Inf, effects = effects, robust = FALSE, ...) } #' @rdname standard_error #' @export standard_error.mixor <- function(model, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) stats <- model$Model[, "Std. Error"] parms <- insight::get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Effects = parms$Effects ) } #' @export p_value.mixor <- function(model, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) stats <- model$Model[, "P(>|z|)"] parms <- insight::get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, p = stats[parms$Parameter], Effects = parms$Effects ) } #' @export simulate_model.mixor <- function(model, iterations = 1000, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) out <- .simulate_model(model, iterations, component = "conditional", effects = effects) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- .safe_deparse(substitute(model)) out } parameters/inst/0000755000175000017500000000000014167546374013545 5ustar nileshnileshparameters/inst/CITATION0000644000175000017500000000064714053135315014670 0ustar nileshnileshbibentry( bibtype = "article", title = "Extracting, Computing and Exploring the Parameters of Statistical Models using {R}.", volume = "5", doi = "10.21105/joss.02445", number = "53", journal = "Journal of Open Source Software", author = c(person("Daniel", "Lüdecke"), person("Mattan S.", "Ben-Shachar"), person("Indrajeet", "Patil"), person("Dominique", "Makowski")), year = "2020", pages = "2445" ) parameters/inst/WORDLIST0000644000175000017500000000373114141263004014715 0ustar nileshnileshAnova BGGM BLUPs BMC BayesFM BayesFactor Bentler Bezdek Biometrics Biometrika Blume CFA CNG Cattell Cattell's Cohens Cramer's CrossValidated D'Agostino DBSCAN DOI DRR Davison Delacre DoF DoFs Dupont EFA ESS Eingenvalues Eivenvalues Elff Epskamp FastICA Fidell GJRM GLMM GLMMadaptive Garrido Gelman Golino Gorsuch Greevy Gustafson HC HDI HEXACO Heisig Hinkley Hofman Hofmann Hornik ICA IJCNN Jurs KMO Kenward Kruschke Kutner LMMs Lakens Laparra Lawley MSA Maechler Malo Merkle Metaclustering Modelling Monti NHST Neter Nieto Nonresponse Olkin PCoA PLOS PMCMRplus Pernet Pettersson PloS Psychometrika ROPE's Rchoice Recode Revelle Rhat Rocklin Rosseel Rousseeuw Routledge SBC SEM SEs SGPV Sadana Satterthwaite Satterthwaite's Schaeffer Shachar Shi Shikano Sphericity Stata Struyf TOST Tabachnick Thiyagarajan Timepoint Turkheimer VSS Valls Velicer WRS Wasserman Wisenbaker Zoski afex al anova bamlss bayesGARCH bayestestR behaviour behaviours biserial blavaan blme brms brmsfit btergm cAIC cMDS canberra centre centred centroid ci clara clubSandwich clusterable cmprsk coxrobust datanovia datawizard df doi easystats effectsize emmGrid emmeans epiR eps equivariance ergm et exponentiate gam gamm gaussianity github glm glm's glmmTMB heteroskedasticity homoscedasticity htest http https hyperspectral interpretable io ivfixed ivprobit joineRML jstatsoft kmeans lavaan lm lme lmodel lmtest loadings merMod metaBMA metacluster metaclustering metafor mhurdle minkowski modelling modelsummary multicollinearity mvord nubmer occurence pam pamk performant plm posthoc pre priori quantreg quartiles reproducibility riskRegression rmarkdown rms rmsb rownumbers rstanarm sampleSelection setosa spatialreg sphericity statitics subclusters subscale subscales th tidymodels tseries varimax www ’ parameters/inst/doc/0000755000175000017500000000000014167546374014312 5ustar nileshnileshparameters/inst/doc/overview_of_vignettes.html0000644000175000017500000002161714167546374021631 0ustar nileshnilesh Overview of Vignettes

Overview of Vignettes

All package vignettes are available at https://easystats.github.io/parameters/.

Function Overview

parameters/inst/doc/overview_of_vignettes.R0000644000175000017500000000035514167546372021060 0ustar nileshnilesh## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) parameters/inst/doc/overview_of_vignettes.Rmd0000644000175000017500000000367714136174105021376 0ustar nileshnilesh--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/parameters/](https://easystats.github.io/parameters/). ## Function Overview * [Function Reference](https://easystats.github.io/parameters/reference/index.html) ## Description of Parameters * [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) * [Standardized Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_standardized.html) * [Robust Estimation of Standard Errors, Confidence Intervals, and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) * [Model Parameters for Multiply Imputed Repeated Analyses](https://easystats.github.io/parameters/articles/model_parameters_mice.html) ## Formatting and Printing * [Formatting Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) * [Printing Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_print.html) ## Dimension Reduction and Clustering * [Feature Reduction (PCA, cMDS, ICA...)](https://easystats.github.io/parameters/articles/parameters_reduction.html) * [Structural Models (EFA, CFA, SEM...)](https://easystats.github.io/parameters/articles/efa_cfa.html) * [Selection of Model Parameters](https://easystats.github.io/parameters/articles/parameters_selection.html) * [Clustering with easystats](https://easystats.github.io/parameters/articles/clustering.html) parameters/data/0000755000175000017500000000000013741777636013505 5ustar nileshnileshparameters/data/fish.RData0000644000175000017500000000673113617043573015346 0ustar nileshnileshZ TW%+1c͉N\D61f4D D4Hb4.51c xԈG(%J%b 4 jZ9{ޯ_eFL}{7q'z :Nuq^.lj玾m=yk5WwjU.WWxꯥ=oM+[-Xrm-rn]ѝzZڷasʶ^(<:qYe<cuqf6jVT[y\A#+W85GswG̓^̏lkeiwɫ6nۜCպѺ=[Uj s l-Zt's&pwMily륵== V1z\|t_眎{Z+C۪GGť1|X-8Ը9b51ǵr@ *1i:i},\Z1V-t\VnX4W9z׭1֢ڵӰqt j/2}ao^+6CsQæseW;Aie *6t\j]|W~l~kmv=E٪Bкoa[Aաƕ_Ikٔ8 nԚH{Km xVvE2nv.њB2%}cf'_$8ƚNA} -;C-0F[R y,dF@%BܦdĵxftO9׀SF[_< Ec,Q1yԥ,ԍOZ:\~ŝ:{q>o3#!!N~Ua5. vByB71}9pM}Q?qĭ]]ٞOO>׆wk9b[9%?U"9?+| Ik!:zYr/=^yW}Q=/΂wu@Ƴ }U3Ghk8p z,ԥ_tcxB< s5zy;^^>=-{& Yct@YOx&!FmcgO%˒z<[s]Kg+-Ek;hR^48ܵGb|w}(VwX.oys+⬧Pd%qLmU~Ͱ;rm3+<?HtH'~[ *E? )z|.o jЖ_Z ;I?)eOމ8؋Cz|~mQ%]o>uuget%\錼XfGlm@{uUѵfJA7!^WTx$ _&oY.-&*jWB6]#zy3Nt)1oF[3V[? vO*ր=- v~Ɉ[?=.| ;t[g=w}n}'+;?c |EיIhV""}yÒu8׎ AV1e14f& U2f%7#Ɇ/^A~@Q0;9^C_fVax qGm̧}ۿ _a۶rjSOJ(^tTGz "T605;=:3 )U93NO7s,WtF~ 32tz*~cY[ i=x#x0;8K6Gc|{A>/$~ݵ%]5~']x/ x 3QO#.A/&/0g?prנ}ޓoCy{4JzCG۱d^*}qL]VcV.u@R<J1o;/dWAl+s/egڢRyO*LKwG+ < !_"Kˇn{e9&},u.G_T8tطn>WLp=!%qd4~zo0:R}-+KMGLx>%3- ~-kX78^9F/  ՜E궯~FY/^1d\ D<_)U?DēorA4 yzuQyXQN)J)e'sG!>WDTT E>t[xn`F=%)?ȣSyn4G"b@R7)C}}¬o+B|MߪWiN{#q%KwY~ ׮|T2? ӓ0SaWmݧ_(y9?&oEk\" 9W-7~C՟~8^i[#z}P"4N_$%%wJC;T7a*ǩ#jswV|NtO>Ծay;vva13 ז87tNx,l^̓scJ(|fhTs6,tμ=OilX'^ػQѳ;3gK"7Y8 %Z''1 v޳BBD,ppʟQm9-parameters/data/qol_cancer.RData0000644000175000017500000000646413741777636016542 0ustar nileshnilesh]iW~LL j6%T2OE4рFH\p]q7-%wKLY,*e[,5_uyHYUv9sν=E۴~k4uP(/7} rpFl_(_|2)/l۽ ^-qpk^pp#otaaN]corxøn={9w8pN98mowxG.w;{pÇ>:sÅw'>i8<:pswx%tCB9|txao9|a#q:?qx?sb~a2g8\a3.qaeV;\YvpxW:\p5uxykur(:D%ءPq8ߡPs;</tXpðCaË^/ux78lprW8p+^pFMi'Go Gb'[7 \?P8Y|?8 ?K/01o9?K3O=gړ{z ? ױ8 \q7gz$fͣyb#?Cu ؟l\~Aҗw.1x<>ZCI9̋#ğu BпX6ۚ$@;4/ɬizn;\Ȓ'o=|?d|Ï|(WދEח_[xv+c:`^b+ޫ>?ڕ|u>wwk??_2^IICǣ?^:ȸ7 ؤ~WﶿN<N>乮oO^'a|*~}x4ٸǩ]—8x Q}wvځNj^/yI^]o#Ŏi>'xXܮ%ͳ^^&Ruq>;S'dFKLkfS7O`wjuIyuYYѣ|7Zy{X5%1 NeVuOrN~sKC7!OSoh?kX7$>-<ϝYqO GcI'|z}JƩOS" Xپ"I>!y