mutoss/ 0000755 0001762 0000144 00000000000 15127645135 011615 5 ustar ligges users mutoss/tests/ 0000755 0001762 0000144 00000000000 15123457163 012755 5 ustar ligges users mutoss/tests/testthat/ 0000755 0001762 0000144 00000000000 15127645135 014617 5 ustar ligges users mutoss/tests/testthat/test-bonferroni.R 0000644 0001762 0000144 00000000245 15123457163 020061 0 ustar ligges users context("test-bonferroni.R")
test_that("bonferroni works", {
result <- bonferroni(c(0.1,0.2,0.3,0.4))
expect_equal(result$adjPValues, c(0.4, 0.8, 1.0, 1.0))
})
mutoss/tests/testthat.R 0000644 0001762 0000144 00000000070 15123457163 014735 0 ustar ligges users library(testthat)
library(mutoss)
test_check("mutoss")
mutoss/MD5 0000644 0001762 0000144 00000013620 15127645135 012127 0 ustar ligges users 6bdc604ad7c6b7645f6545d1c550a79f *DESCRIPTION
30ddf833ae2c4f2d2e79ef7607772125 *NAMESPACE
0f6e99a92ef48e1ecd5ab214d38a5900 *NEWS
0486311855fe5f2d057a567cead65ed5 *R/Augmentation.R
efe0d23bfdcb5867c31852e818071948 *R/CompareObjects.R
734ae11bce0bb8135917578206f4e84c *R/Hommel.R
fee03b2be8177e89bfc5c262802c0fbd *R/MuTossObject.R
9dd35d17353f758e72fdbf08ec037bab *R/REGWQ.R
9f281478f4181538658af06792b2e2fc *R/Rank_Truncated.R
7cbefee0c54a44be7803328ec738cf08 *R/SNK.R
6b33c0f21c3a754ec17b2526028c7ef4 *R/SUD.R
cc28228cc99be053e71307f762dfbc78 *R/SUDProcedures.R
048a16243ad293fb5f51c59294d0d7bc *R/Tukey.R
7e241cdffdf9e9f869a50632cb8910c2 *R/fisher.R
659031b4129eef957b099a8a0e7a0b48 *R/gao.R
246b17b3084e00ed9df8c22a5f79b404 *R/helperfunctions.R
9e34df88c79357f8020e6b762c2d0511 *R/localfdr.R
906494f7583b130b04c4f7b00a7f6c2b *R/marginal.R
e53810ac73a1105a00297bcd86a3fc7d *R/moreSUDprocedures.R
2bea527ab0ab59a6e33258bc032891f9 *R/multcomp.R
3ba32eb84449084cd182e7288fbca54b *R/multtest.R
c2b94f20263a4e4c9f20e907bfb7545f *R/mutoss.plotCI.R
9d994b8fb7ff65858eae79ba0eafdb8c *R/mutossApply.R
f6cc3b2bacd0aa6cb2ead2d0c4654db7 *R/nparcomp1.R
0424bec2459e0a0778b104a5b5a97daf *R/onLoad.R
bcac1e8d141c72d641ef44b8a997ddb9 *R/pi0Est.R
139d1502c0098adfdf4425095074d7a7 *R/simulationList.R
7c3ea508c548cb0b5fe8101fda31aa61 *R/testFunctionsForTheGUI.R
da1b4df398bb8936c28a52c9b249ca4e *R/wrapperFunctions.R
f923283492d5e015cf28e2de436a1648 *build/vignette.rds
1f02923db0a094b86c734267c84930c5 *data/T.Test.tumor.vs.normal.rda
ff97ffe42424af068e86989a4b837da3 *data/notterman.grpLabel.rda
cdeaa07e767165c8561b4b5b8d856f67 *data/notterman.rda
570a381a5e7e5e721241a1d5a80fdd24 *inst/doc/quickstart.Rnw
33426bb5ba6c5ee80f6305fefe07a802 *inst/doc/quickstart.pdf
2c1001a58203a371714181b25b5f55e3 *inst/doc/simToolManual.Rnw
c84d43002076807f01859d31e2e6132f *inst/doc/simToolManual.pdf
f34cd7e467600e4f88a01c91f4fe0794 *man/ABH_pi0_est.Rd
cc78dc7fb9848c3dc3ce230fc6ec301d *man/BH.Rd
c5693535822f99a3278d0c3b3028c815 *man/BL.Rd
51d15f4a3ed343c70133f36aec3ed1e7 *man/BR_pi0_est.Rd
6401e9252287b9588a0459bf9578b92b *man/BY.Rd
b32329c2098364302a147b5259ec8ffe *man/BlaRoq.Rd
353c298262bada952a962f345d54953e *man/ErrorControl.Rd
ca2e74bdf88572e36561baafb0c11139 *man/Mutoss.Rd
ecde017a3b22af727493ca63c4ece060 *man/MutossMethod.Rd
5993e3b93b2ff933aa203c7cbbbc9240 *man/Qvalue.Rd
1dee178bd29782694c541d7727cd6abb *man/SD.Rd
817e3a76652c3099f1eeebdbad5f3273 *man/SU.Rd
ebc7551d3d257f72646e4ffe6eec32f2 *man/SUD.Rd
8f6fc5c5e047f9bd96d8fe7c52a8b5d0 *man/SidakSD.Rd
73beeb1d5250a66c67e660ec21974b7c *man/TSBKY_pi0_est.Rd
eddb94202ddeadde5277da236dff66da *man/adaptiveBH.Rd
10aff53b8d5615fbaeb8a0c6d010887c *man/adaptiveSTS.Rd
3e6b87e8d70253ed18e2e770dc953d0e *man/adjPValuesPlot.Rd
87c1138671a3aac3925f48930c5f79f0 *man/aorc.Rd
93b846592b80cad04ff9a4f841c6ec5a *man/augmentation.Rd
8255108b9ed55ee381711e70fe4f65ca *man/bonferroni.Rd
0b3d1e066211ecba3a10c07fc1b6ae05 *man/calculateBetaAdjustment.Rd
c8c03e7980c125ef8e80eef34b42be58 *man/compareMutoss.Rd
7838a6404a4ffb44323c73d150affd75 *man/fisher22.marginal.Rd
72a405f68d87af73789895c2a0868686 *man/fisher22_fast.Rd
ac4be68b85e2285718feb355ad01af6f *man/fisher23.marginal.Rd
c001f8b60f5a975d4a09480f7b656188 *man/fisher23_fast.Rd
b0f9198077e24043bb85ef83f133c5fd *man/ftest.marginal.Rd
0beab1d5a01c00f357440baa874527ee *man/gao.Rd
6bb1486f3d6c272aca96caaf51bd390c *man/gatherParameters.Rd
6e0a23f81f12706b784a6566b5448260 *man/gatherStatistics.Rd
cd71460a29b347425615a9c0d72030c2 *man/hochberg.Rd
aefd43018e31dfe30435411cbab40d16 *man/holm.Rd
78a722882a74290871126d5e62cd24e0 *man/hommel.Rd
10143d872e44e3258e1b24fbafa8d5ec *man/indepBR.Rd
0a7777639c90a1feb362cf4433c293ce *man/jointCDF.orderedUnif.Rd
26105a1a8ddd50d6d4341d0cb30f6f75 *man/linearStepUp.Rd
92a4dd7c0263f38f912345299adca53f *man/mu.test.class.Rd
445e69d88d61aea988db2bd0239727d0 *man/mu.test.name.Rd
63189f641861eff9c57acda00961f355 *man/mu.test.rates.Rd
7288d808a012c87246a042c2468a3f4d *man/mu.test.same.data.Rd
2ddacd10608a4a44a7734d1c865b4757 *man/mu.test.type.Rd
51391afa1da000f7af6610b35b3db505 *man/multcomp.wrapper.Rd
fb124ea63f80be0b27ae391756f0ce57 *man/multiple.down.Rd
cfd230490fb03adb3a9f8e7f74dfbccb *man/multiple.down.adjust.Rd
e6ffbf57ff449b0e81e603cf1efa39c8 *man/mutoss.apply.Rd
40a66bc8127361fb7cca8148b1d396cb *man/mutoss.models.Rd
beb0768868003afaa2c3bdd518169fe4 *man/mutoss.plotCI.Rd
72ef19fd2e870d0a675eaf587847e482 *man/notterman.Rd
7727d9b319e85c7c6c7b9413949df9ec *man/nparcomp.Rd
5c41cfa586bfbb0898ea803e510d3f9e *man/nparcomp.wrapper.Rd
b91a8a192b97ca36af7bb7715e932087 *man/onesamp.marginal.Rd
707d37f14b05817033c3469bd639fadb *man/oracleBH.Rd
08568eb6719f94c7fc73db8078a40b0c *man/pValuesPlot.Rd
d8161644042e14647de2552771d067c8 *man/paired.marginal.Rd
107aba3697fc0b4ff548f7e7922c3cb1 *man/printRejected.Rd
1282e3f764ce0f6e5b6e932ce8027c0c *man/pval2locfdr.Rd
4920d203fc361d0d1b9e45cb3421fa11 *man/pval2qval.Rd
d25f9bd1b627db897ee4e8550aac6588 *man/ranktruncated.Rd
b90a99e8abaae5db4f01482a70219e10 *man/regwq.Rd
d75f42e604947b8369c1adec8e2823bb *man/reject.Rd
5014f7fe13124312937b0753fc2a7646 *man/requireLibrary.Rd
ea03f92190fa36ede309343c46031dff *man/rom.Rd
20790c73fc0b7cde8976b876b18a10c1 *man/sidak.Rd
2314868a8eb0b3ed89989a57754c46fa *man/simulation.Rd
167aba4e41217b718b0dde847b83bbcb *man/snk.Rd
ac26c199cf1200e585ed5be2502d01b4 *man/storey_pi0_est.Rd
4308881e861e11e96e0bb55e60f6f9db *man/tukey.wrapper.Rd
72afa55ac480bc1cc86ea8972ef0c990 *man/two.stage.Rd
08733a11896904340358cd81cccb023e *man/twosamp.marginal.Rd
d6b477803a7e00c2185c453482bf6d97 *man/twostageBR.Rd
0ebf11de3c4372cc9361ea0f4a61e9f2 *tests/testthat.R
a347228da0db5d2812d99a7ae35a4c9f *tests/testthat/test-bonferroni.R
345a5b9517af229114da8246d0c454a7 *vignettes/index.html
570a381a5e7e5e721241a1d5a80fdd24 *vignettes/quickstart.Rnw
2c1001a58203a371714181b25b5f55e3 *vignettes/simToolManual.Rnw
5c8e035b58499cb525600f038eb75e84 *vignettes/simToolManual.actuallyRnw
b284f220c77983628c0b80a2b15c99c0 *vignettes/simToolVignette.pdf
mutoss/R/ 0000755 0001762 0000144 00000000000 15123457163 012014 5 ustar ligges users mutoss/R/localfdr.R 0000644 0001762 0000144 00000006040 15123457163 013725 0 ustar ligges users #
# Author: JonathanRosenblatt
###############################################################################
pval2qval<- function(pValues, cutoff){
requireLibrary('fdrtool')
fdrtool <- get("fdrtool", envir=asNamespace("fdrtool"))
qvals<-fdrtool(
pValues,
statistic= 'pvalue',
plot=FALSE,verbose=FALSE)$qval
if (missing(cutoff)) {
return(list(qValues=qvals))
}
return(list(qValues=qvals, rejected= qvals<=cutoff ))
}
pval2locfdr<- function(pValues, cutoff){
requireLibrary('fdrtool')
fdrtool <- get("fdrtool", envir=asNamespace("fdrtool"))
locfdr<-fdrtool(
pValues,
statistic= 'pvalue',
plot=FALSE,verbose=FALSE)$lfdr
if (missing(cutoff)) {
return(list(locFDR=locfdr))
}
return(list(locFDR=locfdr, rejected= locfdr<=cutoff ))
}
mutoss.locfdr <- function() {
return(new(Class="MutossMethod",
label="Local FDR (fdr)",
callFunction="pval2locfdr",
output=c("locFDR", "rejected"),
info=
"
Name:
Local fdr.\n
Also known as:
fdr, empirical posterior probability of the null. \n
Error Type:
Motivated by Bayesian considerations. Does not guarantee control of frequentist error types like FWER or FDR.\n
Recommended Usage:
Typically used when a massive amount of hypotheses is being tested as in microarray analyses.\n
Related procedures:
See FDR methods for similar procedures for frequentist error control.\n
References:
\n
- Efron B., Tibshirani R., Storey J. D. and Tusher, V. (2001). Empirical Bayes Analysis of a Microarray Experiment. \n
Journal of the American Statistical Association 96(456):1151-1160.
",
parameters=list(
pValues=list(type="numeric"),
cutoff=list(type="numeric", label="Local fdr cutoff for rejection", optional=TRUE))))
}
mutoss.qvalues <- function() {
return(new(Class="MutossMethod",
label="q Values (Fdr)",
callFunction="pval2qval",
output=c("qValues", "rejected"),
info=
" Name:
q-Values.\n
Also known as:
\n
- Estimated pFDR
\n
- Estimated Positive FDR
\n
- Empirical tail-area posterior probability of the null
\n
Error Type:
Motivated by Bayesian considerations. Guarantees FDR control only when masses of hypotheses are being tested.\n
Recommended Usage:
Typically used when a massive amount of hypotheses is being tested as in microarray analyses.\n
Related procedures:
See FDR methods for similar procedures with frequentist error control.\n
References:
\n
- Storey, J. D. (2003)The Positive False Discovery Rate: A Bayesian Interpretation and the q-Value.
The Annals of Statistics 31(6): 2013-2035.
",
parameters=list(
pValues=list(type="numeric"),
cutoff=list(type="numeric", label="q-value (pFDR) cutoff for rejection", optional=TRUE))))
}
mutoss/R/SUD.R 0000644 0001762 0000144 00000005306 15123457163 012576 0 ustar ligges users # Implements the elementary functions for a general
# step-up-down test.
# Step-Up and step-down are derived from that.
#
# Author: MarselScheer
###############################################################################
SUD <- function(pValues, criticalValues, startIDX_SUD)
{
len <- length(criticalValues)
# +++++++++++++++++ Plausis ++++++++++++++
if (len == 1)
stop("SUD(): There is only 1 critical Value. Use the function SS()!")
if (len != length(pValues))
stop("SUD(): Length of critical Values and pValues need to be of the same length!")
if (startIDX_SUD < 1 || len < startIDX_SUD )
stop("SUD(): startIDX out of bound. criticalValues[startIDX] will not exist.")
# ----------------- Plausis ---------------
rejected <- rep(FALSE, times = len)
# need to work with orderd pValues
sortedPV <- sort(pValues, index.return = TRUE)
suspiciousPV <- (sortedPV$x <= criticalValues)
if (suspiciousPV[startIDX_SUD])
{# Suspicious pValue.
# Actually doing now a StepDown on startIDX:len.
# Additionally reject anything before startIDX
nonSuspAboveStartIDX <- which(!suspiciousPV[startIDX_SUD:len])
# ! looking only at the subset startIDX:len probably gives a shift of the index !
# ! gonna correct this soon !
# perhaps any pValue from startIDX to the end is suspicious, thus reject all!
if (length(nonSuspAboveStartIDX) == 0)
return(rep(TRUE, times = len))
# Correcting the shift
nonSuspAboveStartIDX <- nonSuspAboveStartIDX + startIDX_SUD - 1
# There must be some pValue between startIDX and the end that is not suspicious
# Searching the first one. Anything immediately BEFORE that pValue will be rejected.
minIDX <- min(nonSuspAboveStartIDX) - 1
rejected[sortedPV$ix[1:minIDX]] <- TRUE
}
else
{# not suspicious pValue
# Actually doing now a StepUp on 1:startIDX
# The rejected are only the one rejected by this StepUp
suspiciousIDX <- which(suspiciousPV[1:startIDX_SUD])
# perhaps no pValue is suspicious, thus we do not reject anything
if (length(suspiciousIDX) == 0)
return(rep(FALSE, times = len))
# There must be some pValue between 1 and startIDX that is suspicious
# Searching the last one. Anything before (including the maximum) will be rejected.
maxIDX <- max(suspiciousIDX)
rejected[sortedPV$ix[1:maxIDX]] <- TRUE
}
return(rejected)
}
SD <- function(pValues, criticalValues)
{
SUD(criticalValues = criticalValues,
pValues = pValues,
startIDX_SUD = 1)
}
SU <- function(pValues, criticalValues)
{
SUD(criticalValues = criticalValues,
pValues = pValues,
startIDX_SUD = length(criticalValues))
}
mutoss/R/testFunctionsForTheGUI.R 0000644 0001762 0000144 00000001001 15123457163 016454 0 ustar ligges users # A File for temporarily functions to test the GUI.
#
# Author: MarselScheer
###############################################################################
pValuesPlot = function(pValues) {
plot(ecdf(pValues), do.points=FALSE, verticals = TRUE, main="ecdf", ylim=c(0,1))
abline(0,1, col=2)
}
adjPValuesPlot = function(adjPValues, alpha) {
plot(sort(adjPValues), main="Adjusted p-values", ylab="adjusted p-values", xlab="ordered index", ylim=c(0,1))
if (!missing(alpha)) {
abline(alpha,0, col=2)
}
}
mutoss/R/fisher.R 0000644 0001762 0000144 00000011645 15123457163 013426 0 ustar ligges users #############
# Fisher 23 #
#############
fisher23.model <- function() {
return(list(model=list(typ="Fisher 2-by-3")))
}
mutoss.fisher23.model <- function() { return(new(Class="MutossMethod",
label="Fisher's exact test in (2x3) tables",
callFunction="fisher23.model",
output=c("model"),
info="
(Marginal) Fisher's exact test in (2x3) tables
Reference:
- Fisher, R. A. (1922). \"On the interpretation of Chi^2 from contingency tables, and the calculation of P.\" Journal of the Royal Statistical Society, 85 (1):87-94.
",
parameters=list(
)
)) }
fisher23.marginal <- function(data, model) {
#m <- dim(data)[3]
#result <- vector(mode="numeric",length=m)
result <- apply(data, 3, function(x) {fisher23_fast(x,2.0e-16)$rand_p} )
return(list(pValues=result))
}
fisher23_fast <- function(obs, epsilon){
# obs = observations = a 2x3 table
#build marginals = (n1., n2., n.1, n.2, n.3)
marginals <- c(sum(obs[1, ]), sum(obs[2, ]), sum(obs[, 1]), sum(obs[, 2]), sum(obs[, 3]))
n <- sum(marginals) / 2
x <- array(data=rep.int(0.0, 2*3*max(marginals)*max(marginals)), c(2, 3, max(marginals)*max(marginals)))
#build log nominator statistic
log_nom <- sum(log(gamma(marginals+1))) - log(gamma(n+1))
#build log denominator statistic
log_denom <- sum(log(gamma(obs+1)))
#compute probability of observed table
prob_table <- exp(log_nom - log_denom)
nonrand_p <- 0.0
rand_count <- 0
dim1 <- min(marginals[1], marginals[3])
#traverse all possible tables with given marginals
counter <- 0
for (k in 0:dim1)
{
for (l in max(0,marginals[1]-marginals[5]-k):min(marginals[1]-k, marginals[4]))
{
counter <- counter+1
x[1, 1, counter] <- k
x[1, 2, counter] <- l
x[1, 3, counter] <- marginals[1] - x[1, 1, counter] - x[1, 2, counter]
x[2, 1, counter] <- marginals[3] - x[1, 1, counter]
x[2, 2, counter] <- marginals[4] - x[1, 2, counter]
x[2, 3, counter] <- marginals[5] - x[1, 3, counter]
}
}
log_denom_iter <- rep.int(0.0, times=counter)
for (k in 1:counter)
{
log_denom_iter[k] <- sum(log(gamma(x[, , k]+1)))
}
prob_lauf <- exp(log_nom - log_denom_iter)
nonrand_p <- sum(prob_lauf[prob_lauf <= prob_table])
rand_count <- sum(abs(exp(prob_lauf) - exp(prob_table)) < epsilon)
u <- runif(1)
rand_p <- max(0.0, nonrand_p - u*rand_count*prob_table)
return(list(nonrand_p=nonrand_p, rand_p=rand_p, prob_table=prob_table))
}
#############
# Fisher 22 #
#############
fisher22.model <- function() {
return(list(model=list(typ="Fisher 2-by-2")))
}
mutoss.fisher22.model <- function() { return(new(Class="MutossMethod",
label="Fisher's exact test in (2x2) tables",
callFunction="fisher22.model",
output=c("model"),
info="
(Marginal) Fisher's exact test in (2x2) tables
Reference:
- Fisher, R. A. (1922). \"On the interpretation of Chi^2 from contingency tables, and the calculation of P.\" Journal of the Royal Statistical Society, 85 (1):87-94.
",
parameters=list(
)
)) }
fisher22.marginal <- function(data, model) {
#m <- dim(data)[3]
#result <- vector(mode="numeric",length=m)
result <- apply(data, 3, function(x) {fisher22_fast(x,2.0e-16)$rand_p} )
return(list(pValues=result))
}
fisher22_fast <- function(obs, epsilon){
# obs = observations = a 2x2 table
#build marginals = (n1., n2., n.1, n.2)
marginals <- c(sum(obs[1, ]), sum(obs[2, ]), sum(obs[, 1]), sum(obs[, 2]))
n <- sum(obs)
x <- array(data=rep.int(0.0, 2*2*max(marginals)*max(marginals)), c(2, 2, max(marginals)*max(marginals)))
#build log nominator statistic
log_nom <- sum(log(gamma(marginals+1))) - log(gamma(n+1))
#build log denominator statistic
log_denom <- sum(log(gamma(obs+1)))
#compute probability of observed table
prob_table <- exp(log_nom - log_denom)
nonrand_p <- 0.0
rand_count <- 0
dim1 <- min(marginals[1], marginals[3])
#traverse all possible tables with given marginals
counter <- 0
for (k in (marginals[1]-marginals[4]):dim1)
{
counter <- counter+1
x[1, 1, counter] <- k
x[1, 2, counter] <- marginals[1] - k
x[2, 1, counter] <- marginals[3] - k
x[2, 2, counter] <- marginals[2] - x[2, 1, counter]
}
log_denom_iter <- rep.int(0.0, times=counter)
for (k in 1:counter)
{
log_denom_iter[k] <- sum(log(gamma(x[, , k]+1)))
}
prob_lauf <- exp(log_nom - log_denom_iter)
nonrand_p <- sum(prob_lauf[prob_lauf <= prob_table])
rand_count <- sum(abs(exp(prob_lauf) - exp(prob_table)) < epsilon)
u <- runif(1)
rand_p <- max(0.0, nonrand_p - u*rand_count*prob_table)
return(list(nonrand_p=nonrand_p, rand_p=rand_p, prob_table=prob_table))
}
mutoss/R/wrapperFunctions.R 0000644 0001762 0000144 00000007636 15123457163 015524 0 ustar ligges users
setClass("MutossMethod",
representation = representation(
label = "character", # this label will be shown in the menus
errorControl = "character", # FWER, FWER.weak, FDR, FDX, gFWER, perComparison (?)
callFunction = "character", # the function to call
output = "character", # this is the character vector of the _possible_ output of the function
info = "character", # info text, should contain small description, author, reference etc.
assumptions = "character", # assumptions for this method
parameters = "list", # optional description of parameters - see MuToss developer handbook
misc = "list" # a list where you can put all your miscellaneous stuff
)
)
bonferroni <- function(pValues, alpha, silent=FALSE) {
adjPValues=sapply(pValues*length(pValues),function(x){min(x,1)})
if (missing(alpha)) {
return(list(adjPValues=adjPValues))
} else {
rejected <- (adjPValues<=alpha)
if (! silent)
{
cat("\n\n\t\tBonferroni correction\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, rejected=rejected,
errorControl = new(Class='ErrorControl', type="FWER", alpha=alpha)))
}
}
mutoss.bonferroni <- function() { return(new(Class="MutossMethod",
label="Bonferroni correction",
errorControl="FWER",
callFunction="bonferroni",
output=c("adjPValues", "rejected", "errorControl"),
info="Bonferroni correction
\n\n\
The classical Bonferroni correction outputs adjusted p-values, ensuring strong FWER control under arbitrary
dependence of the input p-values. It simply multiplies each input p-value by the total number of hypotheses
(and ceils at value 1).
It is recommended to use Holm's step-down instead, which is valid under the exact same assumptions and more powerful.
Reference:
- Bonferroni, C. E. \"Il calcolo delle assicurazioni su gruppi di teste.\" In Studi in Onore del Professore Salvatore Ortu Carboni. Rome: Italy, pp. 13-60, 1935.
\n
- Bonferroni, C. E. \"Teoria statistica delle classi e calcolo delle probabilita.\" Pubblicazioni del R Istituto Superiore di Scienze Economiche e Commerciali di Firenze 8, 3-62, 1936.
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric", optional=TRUE))
)) }
sidak <- function(pValues, alpha, silent=FALSE) {
adjPValues <- sapply(1-(1-pValues)^length(pValues), function(x){min(x,1)})
if (missing(alpha)) {
return(list(adjPValues=adjPValues))
} else {
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tSidak correction\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
}
mutoss.sidak <- function() { return(new(Class="MutossMethod",
label="Sidak correction",
errorControl="FWER",
callFunction="sidak",
output=c("adjPValues", "rejected", "errorControl"),
assumptions=c("test independence"),
info="Sidak correction
\n\n\
The classical Sidak correction returns adjusted p-values, ensuring strong FWER control under
the assumption of independence of the input p-values. It only uses the fact that the probability of no incorrect
rejection is the product over true nulls of those marginal probabilities (using the assumed independence of p-values).
The procedure is more generally valid for positive orthant dependent test statistics.
It is recommended to use the step-down version of the Sidak correction instead,
which is valid under the exact same assumptions and more powerful.
Reference:
- Sidak, Z. (1967). Rectangular confidence regions for the means of multivariate normal distributions.
Journal of the American Statistical Association, 62:626-633.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric", optional=TRUE))
)) }
mutoss/R/simulationList.R 0000644 0001762 0000144 00000033755 15123457163 015174 0 ustar ligges users # Generates a list of lists. Every of these lists, denoted for now
# by L, can be evaluated as a function call by eval(as.call(L))
# listFunAndParameter MUST have the following form:
# list(
# funName = "UnifRandom", # Description/Label of the function to be used
# fun = runif, # A real function, not only the name
# n = 1 # "n" is integer
# )
# Example for the argument:
# Arguments of runif are: n, min, max.
# listFunAndParameter = list(funName="UnifRandomVariable", fun=runif, n=2, min=c(1:2), max=c(1.1, 2.1))
# sapply(generateFunctionStack(listFunAndParameter), function(fc) eval(as.call(fc)))
# is same as
# runif(2, 1, 1.1); runif(2, 2, 1.1); runif(2, 1, 2.1); runif(2, 2, 2.1)
# Of course the second and third call do not make sense.
generateFunctionStack <- function(listFunAndParameter)
{
# special case: the procedure does not need any further parameter
if (length(listFunAndParameter) == 2)
return(list(noParameters=list(fun=listFunAndParameter$fun)))
# listFunAndParameter[[3]] is the first real parameter. The first 2 are the function to
# to be called and a description.
outerPar <- 1:length(listFunAndParameter[[3]])
# Actually I want to build the outerproduct of the parameters,
# but instead of this I use index numbers indicating the position
# of the used parameter. If n=c("a", "b"), n0=c(1:5), alpha=c(0.1, 0.2)
# then 2, 3, 2 stands for n="b", n0=3, alpha=0.2
for(par in listFunAndParameter[-3:-1])
outerPar <- outer(outerPar, 1:length(par), paste)
# special case of only ONE parameter
if (length(listFunAndParameter) == 3)
outerPar <- outer(outerPar, "", paste)
# build now for every parameter constellation
# a list that can be casted into a function call.
fcStack <- list()
for (parIDX in outerPar)
{
idx <- as.numeric(unlist(strsplit(parIDX, " ")))
parameter <- list()
for(i in 1:length(listFunAndParameter[-2:-1]))
# listFunAndParameter[-2:-1][[i]] is the i-th parameter in the list.
# from the 1st parameter we want the idx[1]-th entry from the 2nd parameter
# we want the idx[2]-th entry and so on.
parameter <- c(parameter, listFunAndParameter[-2:-1][[i]][idx[i]])
stackPosName <- paste(listFunAndParameter$funName, parIDX)
fcStack[[stackPosName]] <- c(listFunAndParameter$fun, parameter)
names(fcStack[[stackPosName]]) <- names(listFunAndParameter[-1])
}
return(fcStack)
}
gatherParameters <- function(simObject)
{
#+++++++++++++++++++ Subfunctions +++++++++++++++++++++++
# extract from resultVecotr ( = simObject$results ) all values of the
# parameter with the name paraName.
getParamWithName <- function(resultVector, paraName)
{
unlist(
lapply(resultVector,
function(mts)
{
val <- mts$parameters[[paraName]]
if (is.null(val))
return("")
val
}
)
)
}
#------------------- Subfunctions -----------------------
# gathering all parameters used in the simObject$results
parNames <- unique(unlist(lapply(simObject$results, function(obj) names(obj$parameters))))
return(data.frame(sapply(parNames, function(pN) getParamWithName(simObject$results, pN))))
# # calling data.frame(sapply(parNames, function(pN) getParamWithName(simObject$results, pN)))
# # is not good, because numeric parameters can be converted to characters and then to factors
# # and it is possible that the original order is lost. For example the order of c(64, 128)
# # will be 128 < 64.
# ret = data.frame(getParamWithName(simObject$results, parNames[1]))
#
# for (pN in parNames[2:length(parNames)]) # {"funName", "method"} subset of parNames; that is length(parName) >= 2
# ret = data.frame(ret, factor(getParamWithName(simObject$results, pN)))
#
# names(ret) <- parNames
# return(ret)
}
gatherStatistics <- function(simObject, listOfStatisticFunctions, listOfAvgFunctions)
{
#+++++++++++++++++++++++++++ Subfunctions ++++++++++++++++++++++++++
# calculates the intersection of all elements given in aList
listIntersect <- function(aList)
{
nn <- length(aList)
if(nn == 1)
return(aList[[1]])
intersect(aList[[1]], listIntersect(aList[-1]))
}
# actually the whole work is done by this subfunction.
gatherStatisticsOneAvgFun <- function(simObject, listOfStatisticFunctions, avgFun, avgFunName = deparse(substitute(avgFun)))
{
# extract the parameter constellations form the obejct returned by simulation()
paraNameDF <- gatherParameters(simObject)
unqParaNameDF <- unique(paraNameDF)
rownames(unqParaNameDF) <- 1:length(rownames(unqParaNameDF))
# this will be the data.frame containing the parameter constellation and the calculated (averaged) statistics
statDF <- data.frame()
data.set.numbers <- sapply(simObject$results, function(res) res$data.set.number)
for (i in rownames(unqParaNameDF))
{
# search which objects in simObject$results belong to
# parameter configuration in unqParaNameDF[i, ]
idxs <- listIntersect(
lapply(names(paraNameDF),
function(pN) which(unqParaNameDF[i, pN] == paraNameDF[ , pN])
)
)
# applying any given statistic to the objects with the same
# parameter constellation
if (missing(avgFun))
{ # no avgFun, thus the resulting data.frame will have one row for every simObject$results
tmp <- sapply(listOfStatisticFunctions,
function(fun) sapply(idxs, function(idx) fun(simObject$data[[data.set.numbers[idx]]], simObject$results[[idx]]))
)
statDF <- rbind(statDF, cbind(paraNameDF[idxs,], tmp))
}else
{ # avgFun supplied, thus the resulting data.frame will have only one row for
# every parameter constellation
statDF <- rbind(statDF,
sapply(listOfStatisticFunctions,
function(fun) avgFun(sapply(idxs, function(idx)
fun(simObject$data[[data.set.numbers[idx]]],
simObject$results[[idx]])
)
)
)
)
}
}
if (missing(avgFun))
{
# number the rows consecutively
rownames(statDF) <- 1:length(rownames(statDF))
return(
list(
statisticDF = statDF,
name.parameters = names(paraNameDF),
name.statistics = names(listOfStatisticFunctions),
name.avgFun = ""
)
)
}
# label the columns of the resulting data.frame
names(statDF) <- paste(names(listOfStatisticFunctions), avgFunName, sep=".")
statDF <- cbind(unqParaNameDF, statDF)
list(
statisticDF = statDF,
name.parameters = names(paraNameDF),
name.statistics = paste(names(listOfStatisticFunctions), avgFunName, sep="."),
name.avgFun = avgFunName
)
}
#--------------------------- Subfunctions --------------------------
# if no average function is given
# the resulting data.frame will have one row
# for every object in simObject$results
if (missing(listOfAvgFunctions))
return(gatherStatisticsOneAvgFun(simObject, listOfStatisticFunctions))
# the average function is a function, pass this directly to
# gatherStatisticsOneAvgFun
if (is.function(listOfAvgFunctions))
{
return(gatherStatisticsOneAvgFun(
simObject,
listOfStatisticFunctions,
listOfAvgFunctions,
deparse(substitute(listOfAvgFunctions))
)
)
}
# call gatherStatisticsOneAvgFun for every function in
# listOfAvgFunctions
if (length(listOfAvgFunctions) > 0)
{
if (sum(names(listOfAvgFunctions) != "") != length(listOfAvgFunctions))
warning("The functions in listOfAvgFunctions should have a name!")
tmp <- list()
# cnt is needed to determine the name of "fun"
cnt <- 0
for (fun in listOfAvgFunctions)
{
cnt <- cnt + 1
tmp[[cnt]] <- gatherStatisticsOneAvgFun(
simObject,
listOfStatisticFunctions,
fun,
names(listOfAvgFunctions)[cnt]
)
}
# We have gathered many statistics, now join the information
ret <- tmp[[1]]
ret$statisticDF <- ret$statisticDF[ret$name.parameters]
for (i in seq(along.with = listOfAvgFunctions))
{
ret$statisticDF <- cbind(ret$statisticDF, tmp[[i]]$statisticDF[tmp[[i]]$name.statistics])
ret$name.statistics <- c(ret$name.statistics, tmp[[i]]$name.statistics)
ret$name.avgFun <- c(ret$name.avgFun, tmp[[i]]$name.avgFun)
}
ret$name.statistics <- unique(ret$name.statistics)
ret$name.avgFun <- unique(ret$name.avgFun)
return(ret)
}
}
simulation <- function(replications, DataGen, listOfProcedures, discardProcInput=FALSE)
{
paraNameDataGen <- names(DataGen)
if (length(paraNameDataGen) != length(unique(paraNameDataGen)))
{
cat("Parameter of data generating function:\n\t", paraNameDataGen, "\n")
stop("Parameternames of the data generating function are not unique")
}
# check if parameter of the procedures are unique
nameProblems <- FALSE
for( i in seq(along.with = listOfProcedures) )
{
paraNameProc <- names(listOfProcedures[[i]])
if (length(paraNameProc) != length(unique(paraNameProc)))
{
nameProblems <- TRUE
cat("Parameters of procedure", listOfProcedures[[i]]$funName, "are not unique:\n\t", paraNameProc, "\n")
}
}
if (nameProblems) stop("Parameters of some procedures are not unique.\n")
# no intersection between parameters of the data generating function and the
# procedures are allowed.
nameProblems <- FALSE
for( i in seq(along.with = listOfProcedures) )
{
paraNameProc <- names(listOfProcedures[[i]])
equalNames = sort(intersect(paraNameDataGen, paraNameProc))
if (length(equalNames)!= 2 || !all(equalNames == c("fun", "funName")))
{
nameProblems <- TRUE
cat("Common names of the data generating function and multiple test procedure", listOfProcedures[[i]]$funName, "are:\n\t", equalNames, "\n")
}
}
if (nameProblems) stop("The only common name of data generating function and multiple test procedure should be 'fun' and 'funName'.\n")
# TODO: MS print progress of the simulation on the console!
# generating all data generating functions
dataGenStack <- generateFunctionStack(DataGen)
# a bunch of stacks full of procedures
# for example for every method (bonferroni and holm) there
# is a stack for bonferroni with the different parameter configurations
# and a stack for holm with the different parameter configurations
procedureStacks <- lapply(listOfProcedures, function(procs) generateFunctionStack(procs))
names(procedureStacks) <- sapply(listOfProcedures, function(procs) procs$funName)
ret = list()
# cnt is used as an identifier. So every list with
# the same $data.set.number is based on the same generated data
cnt <- 0
for( dataGenCall in dataGenStack )
{
# This is probably the right place for gridComputation
# It calls ONE time
# dataGenCall. Every procedure in procedureStacks is applied
# to this one "dataSet". returns $data and $results
genOneDataSetAndApplyProcedures <- function(dummy)
{
# generating data
data <- eval(as.call(dataGenCall))
# cnt is a global variable that has to be increased
# each time a new "dataSet" is generated.
assign("cnt", get("cnt", envir=sys.frame(-2)) + 1, envir = sys.frame(-2))
# every procedure will be applied to the generated Dataset and the results
# will be stored in the following list
procs.results <- vector("list", sum(sapply(procedureStacks, function(stack) length(stack))))
procs.results.idx <- 0
for (pS in seq(along.with=procedureStacks))
{
# procedureStacks consists of stacks, go through one by one
# this means applying every procedure to the given dataset
procStack <- procedureStacks[[pS]]
for (proc in procStack)
{
procs.results.idx <- procs.results.idx + 1
result <- list()
# every dataset get a unique number
result$data.set.number <- get("cnt", envir = sys.frame(-2))
# saving the parameter constellation of the used data generating function
paramDataGen <- c(DataGen$funName, dataGenCall[-1])
names(paramDataGen)[1] <- "funName"
# saving the parameter constallation of the used procedure
paramProc <- names(procedureStacks)[pS]
# if (length(proc) == 1)
# #proc uses only the parameter from the output of the data generating function
# paramProc <- c(paramProc)#, dummy="")
# else
if(length(proc)>1)
paramProc <- c(paramProc, proc[-1])
names(paramProc)[1] <- "method"
# saving the parameter constallation of the data generating function and the procedure
result$parameters <- c(paramDataGen, paramProc)
# next step is to assign the inputdata for the procedure parameters that was generated by
# the data generating function. But at first I check if this will overwrite
# other parameters already specified for the procedure.
inter <- intersect(names(data$procInput), names(proc))
if (length(inter) != 0)
warning("\n\n\tSome of the parameter of one procedure are already specified,\n\t",
"and the data generating function now provides new values for these",
"parameters :\n\n\t",
"Affected procedure : ", listOfProcedures[[pS]]$funName, "\n\t",
"Affected parameters: ", paste(inter, collapse=" "), "\n")
# assign inputdata generated by the data generating function to the procedure parameters.
for(paraInputName in names(data$procInput))
proc[[paraInputName]] <- data$procInput[[paraInputName]]
# calling the procedure
procOutput <- eval(as.call(proc))
# writing the output of the procedure into the result
for(name in names(procOutput))
result[[name]] <- procOutput[[name]]
# append the new result
procs.results[[procs.results.idx]] <- result
}
}
if (discardProcInput)
data$procInput = NULL
# return the used dataset, the output of the procedure with the used
# parameter constellation
return(list(data=data, results=procs.results))
}
ret <- c(ret, lapply(1:replications, genOneDataSetAndApplyProcedures))
}
# I want to have $data for data and $results for the output of
# the procedures and parameter constellations.
only.results <- sapply(ret, function(obj) obj$results)
dim(only.results) <- NULL
return(
list(data=lapply(ret, function(obj) obj$data),
results=only.results
)
)
}
mutoss/R/mutoss.plotCI.R 0000644 0001762 0000144 00000000700 15123457163 014657 0 ustar ligges users ############################################
# mutoss.plotCI
mutoss.plotCI<-function(mat){
diff<-max(mat[,1])-min(mat[,1])
if(any(is.na(mat[,2]))) mat[,2]<-min(mat[,1])-diff*2
if(any(is.na(mat[,3]))) mat[,3]<-max(mat[,1])+diff*2
k<-nrow(mat)
plotCI(1:k,mat[,1],abs(mat[,3]-mat[,1]),abs(mat[,2]-mat[,1]),lwd=2,col="red",scol="blue",
main="CI plot",xaxt="n",xlab="Parameters",
ylab="Values")
axis(1, at=c(1:k), labels=rownames(mat))
}
mutoss/R/gao.R 0000644 0001762 0000144 00000011320 15123457163 012702 0 ustar ligges users #
#
# Author: FrankKonietschke
###############################################################################
gao<-function(formula, data, alpha = 0.05, control = NULL , silent = FALSE){
dat <- model.frame(formula, data)
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula !")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric !")
}
response <- dat[, 1]
group <- as.factor(dat[, 2])
fl <- levels(group)
a <- nlevels(group)
N <- length(response)
n <- aggregate(response,list(group),FUN="length")$x
if (any(n <= 2)) {
warn <- paste("The factor level", fl[n <= 2], "has got less than two observations!")
stop(warn)
}
if (is.null(control)) {
cont <- 1
}
if(! is.null(control)){
if (!any(fl == control)) {
stop("The dataset doesn't contain this control group!")
}
cont <- which(fl == control)
}
C<-contrMat(1:a,"Dunnett",base=cont)
# ------------- Compute the pseudo-ranks------------------ #
#browser()
rx <- c()
for (i in 1:N){
help <- expand.grid(response[i],response)
help1 <- (help[,1]>help[,2])+1/2*(help[,1]== help[,2])
help2 <- data.frame(h1=help1,h2=group)
samples2 <- split(help2$h1, help2$h2)
pseudo <- sapply(1:a, function(arg) {
mean(samples2[[arg]])
})
rx[i] <-N*mean(pseudo)
}
new.data <-data.frame(res=rx,group=group)
# ------------------ Point estimators ---------------------#
pd <- 1/N*aggregate(new.data$res,list(group), FUN="mean")$x
Cpd <- C%*%pd
# ------------ Compute the variance estimators ----------- #
v1 <- 1/N^2*aggregate(new.data$res,list(group),FUN="var")$x
lambda <- N/n
v11 <-c(v1*lambda)
v2 <- diag(v1*lambda)
Cv <- C%*%v2%*%t(C)
# ------------------ Test Statistics ----------------------#
T <-sqrt(N)*Cpd / sqrt(c(diag(Cv)))
# ------------------ Degrees of freedom--------------------#
ncont <-which((1:a)!= cont)
numerator <- c(diag(Cv))^2
denu1<-v1[cont]^2/(n[cont]^2*(n[cont]-1))
denu2 <- v1[ncont]^2 /(n[ncont]^2*(n[ncont]-1))
denu <- N^2*(denu1 + denu2)
df <- numerator / denu
#-------------------------p-Values ------------------------#
pv<- c()
for (h in 1:(a-1)){
pv[h]<- min(2*pt(T[h],df[h]),2-2*pt(T[h],df[h]))
}
adj.p <- p.adjust(pv,"hochberg")
Rejected <- (adj.p<=alpha)
#------------------- Build the output ---------------------#
vj <- which((1:a) != cont)
vi <- rep(cont, a - 1)
cmpid <- sapply(1:(a-1), function(arg) {
i <- vi[arg]
j <- vj[arg]
paste("F", "(", fl[j], ")", "-","F","(" ,fl[i],")", sep = "")
})
result <- data.frame(Comparison=cmpid, Estimator = Cpd, df=df, Statistic = T, P.Raw=pv,P.Adj=adj.p,Rejected = Rejected )
rownames(result)<-1:(a-1)
output = list(Info=data.frame(Sample=fl, Size=n, Single.Effects=pd),
Analysis=result)
if (! silent)
{
cat("#----Xin Gao's (2008) Non-Parametric Multiple Test Procedure","\n")
cat("#----Type of Adjustment: Hochberg", "\n")
cat("#----Level of significance", "=", alpha ,"\n")
cat("#----The procedure compares if the distribution functions F() are equal. The FWER is strongly controlled", "\n")
print(result)
}
return(output)
}
gao.wrapper <- function(model, data, alpha, control) {
control <- NULL
result <- gao(formula=formula(model),
data=data,
alpha = alpha,control)
pvalues <- result$Analysis$P.Adj
estimates <- result$Analysis$Estimator
confint <- cbind(estimates, rep(NA, length(estimates)),rep(NA,length(estimates)))
rownames(confint)<-result$Analysis$Comparison
rejected1 <- result$Analysis$Rejected
return(list(adjPValues=pvalues,rejected=rejected1,confIntervals= confint,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.gao <- function() { return(new(Class="MutossMethod",
label="Nonparametric Multiple contrast tests",
errorControl="FWER",
callFunction="gao.wrapper",
output=c("adjPValues", "rejected","confIntervals","errorControl"),
info="Nonparametric multiple contrast tests
This function computes Xin Gao's nonparametric multiple test procedures in an unbalanced one way layout.
Reference:
- Gao, X. et al. \"Nonparametric multiple comparison procedures for unbalanced one-way factorial designs.\" Journal of Statistical Planning and Inference, 77, 2574-2591, 2008.
",
parameters=list(model=list(type="ANY"),
hypotheses=list(type="ANY"),
alpha=list(type="numeric")
)
)) }
mutoss/R/Augmentation.R 0000644 0001762 0000144 00000006257 15123457163 014604 0 ustar ligges users
augmentation <- function(adjPValues, newErrorControl, newK, newQ, silent=FALSE) {
if (newErrorControl == "gFWER") {
out = fwer2gfwer(adjPValues, k = newK)
if(!silent) cat('\n\n\t\tGeneralized Family-Wise Error Rate\n\n')
return(list(adjPValues=as.numeric(out), rejected=NULL, errorControl = new(Class='ErrorControl',type="gFWER")))
} else if (newErrorControl == "FDX") {
out = fwer2tppfp(adjPValues, q = newQ)
if(!silent) cat('\n\n\t\tTail Probability of the Proportion of False Positives\n\n')
return(list(adjPValues=as.numeric(out), rejected=NULL, errorControl = new(Class='ErrorControl',type="FDX")))
} else if (newErrorControl == "FDR") {
out = fwer2fdr(adjPValues, method = "restricted")
if(!silent) cat('\n\n\t\tFalse Discovery Rate\n\n')
return(list(adjPValues=as.numeric(out$adjp), rejected=NULL, errorControl = new(Class='ErrorControl',type="FDR")))
} else{
if(!silent)cat('\n\n\t\tUnknown newErrorControl method')
}
}
mutoss.augmentation <- function() { return(new(Class="MutossMethod",
label="Augmentation MTP adjusted p-values",
errorControl=c("FWER"),
callFunction="augmentation",
output=c("adjPValues", "rejected", "errorControl"),
info="Augmentation MTP adjusted p-values
Wrapper function to the augmentation methods of the multtest package.
The augmentation method turns a vector of p-values which are already adjusted for FWER control
into p-values that are adjusted for gFWER, FDX or FDR. The underlying idea (for gFWER and FDX)
is that the set of hypotheses rejected at a given level alpha under FWER can be 'augmented'
by rejecting some additional hypotheses while still ensuring (strong) control of the desired weaker type I criterion.
For FDR, it uses the fact that FDX control for q=alpha=1-sqrt(1-beta) entails FDR control at level beta.
Use of these augmentation methods is recommended only in the situation where FWER-controlled p-values are
directly available from the data (using some specific method). When only marginal p-values are available,
it is generally prerefable to use other adjustment methods directly aimed at the intended criterion
(as opposed to first adjust for FWER, then augment)
Note: In the multtest package, two methods ('restricted' and 'conservative') are available for FDR augmentation.
Here the 'restricted' method is forced for FDR augmentation since it is in fact always valid and better than
'conservative' (M. van der Laan, personal communication) with respect to power.
Reference:
- S. Dudoit, M.J. van der Laan. \" Multiple Testing Procedures with Applications to Genomics\", Springer, 2008. (chapter 6)
\n\
",
# TODO: <- Add the possibility of filling the rejected slot? (need additional input alpha)
parameters=list(adjPValues=list(type="numeric"),
newErrorControl=list(type="character", label="New error control", choices=c("FDR","FDX","gFWER")),
newK=list(type="numeric", optional=TRUE, label="For gFWER set k"),
newQ=list(type="numeric", optional=TRUE, label="For FDX set q")
))) }
mutoss/R/REGWQ.R 0000644 0001762 0000144 00000013026 15123457163 013026 0 ustar ligges users # REGWQ - Ryan / Einot and Gabriel / Welsch test procedure
#
# Author: FrankKonietschke
###############################################################################
regwq <- function(formula, data,alpha, MSE=NULL, df=NULL, silent = FALSE){
dat <- model.frame(formula, data)
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
response <- dat[, 1]
group <- as.factor(dat[, 2])
fl <- levels(group)
a <-nlevels(group)
N <- length(response)
samples <- split(response,group)
n <- sapply(samples,"length")
mm <- sapply(samples,"mean")
vv <- sapply(samples,"var")
if (is.null(MSE)){
MSE <- sum((n-1)*vv)/(N-a)
}
if (is.null(df)){
df <- N-a
}
nc <- a*(a-1)/2
order.h1 <- data.frame(Sample=fl, Size=n, Means=mm,Variance=vv)
ordered <- order.h1[order(order.h1$Means,decreasing=FALSE), ]
rownames(ordered) <- 1:a
#---------------- Compute helping indices ----------#
i <- 1:(a-1)
h1 <- list()
for(s in 1:(a-1)){
h1[[s]]<- i[1:s]
}
vi <- unlist(h1)
j <- a:2
h2 <-list()
for (s in 1:(a-1)){
h2[[s]] <- j[s:1]
}
vj <- unlist(h2)
h3 <- list()
h4 <- list()
for (s in 1:(a-1)){
h3[[s]] <- rep(j[s],s)
h4[[s]] <- rep(i[s],s)
}
Nmean <- unlist(h3)
Step <- unlist(h4)
#--------Compute the Mean Differences---------#
mean.difference <- sapply(1:nc,function(arg){
i <- vi[arg]
j <- vj[arg]
(ordered$Means[j]-ordered$Means[i])
})
mean.difference <- round(mean.difference, 4)
# ------- Compute the test statistics --------#
T <- sapply(1:nc,function(arg){
i<-vi[arg]
j<-vj[arg]
(ordered$Means[j]-ordered$Means[i])/sqrt(MSE/2*(1/ordered$Size[i] + 1/ordered$Size[j]))
})
T <- round(T, 4)
#-------Compute the adjusted p-Values-------#
pvalues <- ptukey(T,Nmean,df,lower.tail=FALSE)
#------Compute the adjusted alpha-levels----#
alpha.level <- 1-(1-alpha)^(Nmean/a)
level1 <- (Nmean==a)
level2 <- (Nmean==a-1)
level3 <- level1 + level2
alpha.level[level3==1] <- alpha
alpha.level <- round(alpha.level,4)
# ----- Compute now the critical value -----#
quantiles <- qtukey(1-alpha.level,Nmean,df)
for (h in 1:(nc-1)){
if (quantiles[h+1] >=quantiles[h]){
quantiles[h+1] <- quantiles[h]
}
}
#---- Calculate the rejected Hypotheses ------#
Rejected1 <- (pvalues=vj)
Under2 <- (vi[s]<=vi)
Under3 <- Under1 * Under2
Under4 <- which(Under3==1)
Rejected1[Under4] <- FALSE
}
}
#-----Prepare the pValues for the Output----#
Out1 <- (pvalues < alpha.level)
Out2 <- (Rejected1 == FALSE)
Out3 <- Out1 * Out2
Out4 <- (Out3 == 1)
pvalues <- round(pvalues,4)
quantiles <- round(quantiles,4)
pvalues[Out4] <- paste(">",alpha.level[Out4])
quantiles[Out4] <- paste(">", T[Out4])
variances.output <- data.frame(Overall=MSE, df=df)
Comparison <- data.frame(Comparison=names.ordered,Diff=mean.difference, Statistic=T,Quantiles=quantiles, Adj.P=pvalues, Alpha.Level=alpha.level, Rejected=Rejected1, Layer = Step)
if (!silent)
{
cat("#----REGWQ - Ryan / Einot and Gabriel / Welsch test procedure \n\n")
printRejected(Comparison$Rejected, pvalues, Comparison$Adj.P)
}
#result <- list(Ordered.Means = ordered, Variances=variances.output,
# REGWQ = Comparison)
#diffm<-matrix(c(Comparison["Diff"],rep(NA,length(Comparison["Diff"])*2)),nrow=length(Comparison["Diff"]))
diffm<-cbind(Comparison$Diff,rep(NA,length(Comparison$Diff)),rep(NA,length(Comparison$Diff)))
diffm<-matrix(diffm,nrow=length(Comparison$Diff))
rownames(diffm)<-Comparison$Comparison
return(list(adjPValues=Comparison$Adj.P, rejected=Comparison$Rejected, statistic=Comparison$Statistic,
confIntervals=diffm,errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.regwq <- function() { return(new(Class="MutossMethod",
label="Ryan / Einot and Gabriel / Welsch test",
errorControl="FWER",
callFunction="regwq",
output=c("adjPValues","rejected","statistic","confIntervals","errorControl"),
info="Ryan / Einot and Gabriel / Welsch test procedure.
The procedure controls the FWER.
\n\n\
It is based on a stepwise or \
layer approach to significance testing. Sample means are \
ordered from the smallest to the largest. The largest \
difference, which involves means that are r = p steps apart, \
is tested first at alpha level of significance; if significant, \
means that are r < p steps apart are tested at an adjusted alpha level \
of significance and so on. \
The alpha levels are adjusted for the p-1 different\
layers by the formula alpha_p= alpha, if p=k or p=k-1,\
alpha_p = 1-(1-\alpha)^{p/k} otherwise.
\n\
Reference:
\
\
- Hochberg, Y., Yamhane, A.C. (1987). \" Multiple Comparison Procedures \
\" Wiley, New York.
\n\
",
parameters=list(formula=list(type="formula"), data=list(type="ANY"), alpha=list(type="numeric"),
MSE=list(type="numeric"), df=list(type="numeric"))
)
) }
mutoss/R/SNK.R 0000644 0001762 0000144 00000012474 15123457163 012602 0 ustar ligges users # Student - Newman - Keuls Test
#
# Author: FrankKonietschke
###############################################################################
snk <- function(formula,data,alpha, MSE=NULL, df=NULL, silent = FALSE){
dat <- model.frame(formula, data)
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
response <- dat[, 1]
group <- as.factor(dat[, 2])
fl <- levels(group)
a <-nlevels(group)
N <- length(response)
samples <- split(response,group)
n <- sapply(samples,"length")
mm <- sapply(samples,"mean")
vv <- sapply(samples,"var")
if (is.null(MSE)){
MSE <- sum((n-1)*vv)/(N-a)
}
if (is.null(df)){
df <- N-a
}
nc <- a*(a-1)/2
order.h1 <- data.frame(Sample=fl, Size=n, Means=mm,Variance=vv)
ordered <- order.h1[order(order.h1$Means,decreasing=FALSE), ]
rownames(ordered) <- 1:a
#---------------- Compute helping indices ----------#
i <- 1:(a-1)
h1 <- list()
for(s in 1:(a-1)){
h1[[s]]<- i[1:s]
}
vi <- unlist(h1)
j <- a:2
h2 <-list()
for (s in 1:(a-1)){
h2[[s]] <- j[s:1]
}
vj <- unlist(h2)
h3 <- list()
h4 <- list()
for (s in 1:(a-1)){
h3[[s]] <- rep(j[s],s)
h4[[s]] <- rep(i[s],s)
}
Nmean <- unlist(h3)
Step <- unlist(h4)
#--------Compute the Mean Differences---------#
mean.difference <- sapply(1:nc,function(arg){
i <- vi[arg]
j <- vj[arg]
(ordered$Means[j]-ordered$Means[i])
})
mean.difference <- round(mean.difference, 4)
# ------- Compute the test statistics --------#
T <- sapply(1:nc,function(arg){
i<-vi[arg]
j<-vj[arg]
(ordered$Means[j]-ordered$Means[i])/sqrt(MSE/2*(1/ordered$Size[i] + 1/ordered$Size[j]))
})
T <- round(T, 4)
# ----- Compute now the critical value -----#
quantiles <- qtukey(1-alpha,Nmean,df)
pvalues <- ptukey(T,Nmean,df,lower.tail=FALSE)
#---- Calculate the rejected Hypotheses ------#
Rejected1 <- (pvalues=vj)
Under2 <- (vi[s]<=vi)
Under3 <- Under1 * Under2
Under4 <- which(Under3==1)
Rejected1[Under4] <- FALSE
}
}
#-----Prepare the pValues for the Output----#
Out1 <- (pvalues < alpha)
Out2 <- (Rejected1 == FALSE)
Out3 <- Out1 * Out2
Out4 <- (Out3 == 1)
pvalues <- round(pvalues,4)
pvalues[Out4] <- paste(">",alpha)
variances.output <- data.frame(Overall=MSE, df=df)
Comparison <- data.frame(Comparison=names.ordered,Diff=mean.difference, Statistic=T, Adj.P=pvalues, Rejected=Rejected1, Layer = Step)
if (! silent)
{
cat("#----Student-Newman-Keuls (1927; 1939; 1952) rejective Multiple Test Procedure \n\n")
cat("#----Attention: The SNK test controls the FWER only in the WEAK sense \n\n")
}
result <- list(Ordered.Means = ordered, Variances=variances.output,
SNK = Comparison)
return(result)
}
snk.wrapper <- function(model, data, alpha, silent=FALSE) {
result <- snk(formula=formula(model),
data,
alpha = alpha)
difference <- result$SNK$Diff#, result$SNK$Statistic, result$SNK$Layer)
diffm<-cbind(difference,rep(NA,length(difference)),rep(NA,length(difference)))
diffm<-matrix(diffm,nrow=length(difference))
rownames(diffm)<-result$SNK$Comparison
return(list(adjPValues=result$SNK$Adj.P,rejected=result$SNK$Rejected,statistics=result$SNK$Statistic,
confIntervals= diffm,errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.snk <- function() { return(new(Class="MutossMethod",
label="Student-Newman-Keuls Test",
errorControl="FWER.weak",
callFunction="snk.wrapper",
output=c("adjPValues", "rejected", "confIntervals", "errorControl"),
info="Student - Newman - Keuls rejective test procedure.\
The procedure controls the FWER in the WEAK sense.
\n\n\
The Newman-Keuls procedure is based on a stepwise or \
layer approach to significance testing. Sample means are \
ordered from the smallest to the largest. The largest \
difference, which involves means that are r = p steps apart, \
is tested first at alpha level of significance; if significant, \
means that are r = p - 1 steps apart are tested at \alpha level \
of significance and so on.\
\n\
Reference:
\
\
- Keuls M (1952). \" The use of the studentized range in \
connection with an analysis of variance
\" Euphytica 1 37, 112-122.
\n\
",
parameters=list(
data=list(type="data.frame"),
model=list(type="ANY"),
alpha=list(type="numeric")
)
)) }
mutoss/R/Hommel.R 0000644 0001762 0000144 00000020661 15123457163 013365 0 ustar ligges users hommel <- function(pValues, alpha,silent=FALSE) {
m <- length(pValues)
adjPValues <- p.adjust(pValues, "hommel")
rejected <- adjPValues<=alpha
if (! silent)
{
cat("\n\n\t\tHommel's (1988) step-up Procedure\n\n")
printRejected(rejected, pValues, NULL)
}
return(list(adjPValues=adjPValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.hommel <- function() { return(new(Class="MutossMethod",
label="Hommel (1988) adjustment",
errorControl="FWER",
callFunction="hommel",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("any dependency structure"),
info="Hommel (1988) adjustment
\n\n\
Reference:
\
\
- Hommel, Gerhard. \" A stagewise rejective multiple test procedure based on a modified Bonferroni test. \" Biometrika 75, pp. 383-386, 1988.
\n\
The method is applied to pValues. It controls \
the FWER in the strong sense when the hypothesis tests are independent \
or when they are non-negatively associated. \
The method base upon the closure principle to assure the FWER alpha and \
the critical Values of this procedure are given by alpha/n, \
alpha/(n-1), ..., alpha/2, alpha/1.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
oracleBH <- function(pValues, alpha, pi0, silent=FALSE) {
m <- length(pValues)
adjPValues <- p.adjust(pValues,"BH")*pi0
rejected <- (adjPValues <= alpha)
criticalValues <- sapply(1:m, function(i) (i*alpha)/(pi0*m))
if (! silent)
{
cat("\n\n\t\tBenjamini-Hochberg's (1995) oracle linear-step-up Procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.oracleBH <- function() { return(new(Class="MutossMethod",
label="Benjamini-Hochberg (1995) oracle linear-step-up",
errorControl="FDR",
callFunction="oracleBH",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("test independence or positive regression dependency"),
info="Benjamini-Hochberg (1995) oracle linear step-up Procedure
\n\n\
Reference:
\
\
- Bejamini, Yoav and Hochberg, Josef. \" Controlling the false discovery rate: a practical and powerful approach to multiple testing.
\" J. Roy. Statist. Soc. Ser. B 57 289-300, 1995.
\n\
Knowledge of the number of true null hypotheses (m0) can be very useful to improve upon the performance of the FDR controlling procedure. \
For the oracle linear step-up procedure we assume that m0 were given to us by an `oracle', the linear step-up procedure with q0 = q*m/m0 \
would control the FDR at precisely the desired level q in the independent and continuous case, and \
would then be more powerful in rejecting hypotheses for which the alternative holds.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"), pi0=list(type="numeric"))
)) }
Qvalue <- function(pValues, lambda=seq(0,.90,.05), pi0.method="smoother", fdr.level=NULL, robust=FALSE, smooth.df=3, smooth.log.pi0=FALSE, silent=FALSE) {
requireLibrary("qvalue")
qvalue <- get("qvalue", envir=asNamespace("qvalue"))
out <- qvalue(pValues, lambda=lambda, pi0.method=pi0.method, fdr.level=fdr.level, robust=robust, smooth.df=smooth.df, smooth.log.pi0=smooth.log.pi0)
qValues<-out$qvalues
pi0<-out$pi0
if (! silent)
{
cat("\n\n\t\tStorey's (2001) q-value Procedure\n\n")
cat("Number of hyp.:\t", length(pValues), "\n")
cat("Estimate of the prop. of null hypotheses:\t", pi0, "\n")
}
return(list(qValues=qValues, pi0=pi0, errorControl = new(Class='ErrorControl', type="pFDR")))
}
mutoss.Qvalue <- function() { return(new(Class="MutossMethod",
label="Storey's (2001) q-value Procedure",
errorControl="pFDR",
callFunction="Qvalue",
output=c("qValues", "pi0", "errorControl"),
info="Storey (2001) qvalue Procedure
\n\n\
Reference:
\
\
- Storey, John \" The Positive False Discovery Rate: A Baysian Interpretation and the Q-Value.
\" The Annals of Statistics 2001, Vol. 31, No. 6, 2013-2035, 2001.
\n\
The Qvalue procedure estimates the q-values for a given set of p-values. The q-value of a test measures the \
proportion of false positive incurred when that particular test is called sigificant. \
It gives the scientist a hypothesis testing error measure for each observed statistic with respect to the pFDR. \
Note: If no options are selected, then the method used to estimate pi0 is the smoother method desribed in Storey and Tibshirani (2003). \
The bootstrap method is described in Storey, Taylor and Siegmund (2004).
\n",
parameters=list(pValues=list(type="numeric"), lambda=list(type="numeric",optional=TRUE,label="Tuning parameter lambda"), pi0.method=list(type="character",optional=TRUE,choices=c("smoother","bootstrap"),label="Tuning parameter for the estimation of pi_0"),
fdr.level=list(type="numeric",optional=TRUE,label="Level at which to control the FDR"),robust=list(type="logical",optional=TRUE,label="Robust estimate"),
smooth.df=list(type="integer",optional=TRUE,label="Number of degrees-of-freedom"),smooth.log.pi0=list(type="logical",optional=TRUE))
)) }
BlaRoq<-function(pValues, alpha, pii, silent=FALSE){
k <- length(pValues)
if (missing(pii)) {
pii = sapply( 1:k, function(i) exp(-i/(0.15*k)) ) # a default choice different from BY, exponential decreasing prior
}
if (any(pii<0)) {
stop("BlaRoq(): Prior pii can only have positive elements")
}
if ( length(pii) != k) {
stop("BlaRoq(): Prior pii must have the same length as pValues")
}
pii <- pii / sum(pii)
precriticalValues <- cumsum(sapply(1:k, function(i) (i*pii[i])/k))
# The following code is inspired from p.adjust
i <- k:1
o <- order(pValues, decreasing = TRUE)
ro <- order(o)
adjPValues <- pmin(1, cummin( pValues[o] / precriticalValues[i] ))[ro]
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\t Blanchard-Roquain/Sarkar (2008) step-up for arbitrary dependence\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=alpha*precriticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.BlaRoq <- function() { return(new(Class="MutossMethod",
label="Blanchard-Roquain/Sarkar (2008) step-up",
errorControl="FDR",
callFunction="BlaRoq",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
info="Blanchard-Roquain (2008) step-up Procedure for arbitrary dependent p-Values.
\n\
(Also proposed independently by Sarkar (2008))\n\
References:
\
\
- Blanchard, G. and Roquain, E. (2008).\" Two simple sufficient conditions for FDR control.\
\" Electronic Journal of Statistics, 2:963-992.
\n\
- Sarkar, S. K. (2008) \"On methods controlling the false discovery rate.\"\
Sankhya, Series A, 70:135-168
\n\
\
A generalization of the Benjamini-Yekutieli procedure, taking as an additional parameter
a distribution pi on [1..k] (k is the number of hypotheses)
representing prior belief on the number of hypotheses that will be rejected.
The procedure is a step-up with critical values C_i defined as alpha/k times
the sum for j in [1..i] of j*pi[j]. For any fixed prior pii, the FDR is controlled at
level alpha for arbitrary dependence structure of the p-Values. The particular case of the
Benjamini-Yekutieli step-up is recovered by taking pii[i] proportional to 1/i.
If pii is missing, a default prior distribution proportional to exp( -i/(0.15*k) ) is taken.
It should perform better than the BY procedure if more than about 5% to 10% of hypotheses are rejected,
and worse otherwise.
Note: the procedure automatically normalizes the prior pii to sum to one if this is not the case.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"), pii=list(type="RObject",label="Prior pi",optional=TRUE))
)) }
mutoss/R/moreSUDprocedures.R 0000644 0001762 0000144 00000017423 15123457163 015560 0 ustar ligges users #
# Author: JonathanRosenblatt
###############################################################################
#---------- Service Functions---------#
reject<- function(sorted, criticals){
m<- length(sorted)
stopifnot( length(criticals) == m )
indicators<- sorted0 , j>=1 , j<=m , r>=0 , r<=m)
adjusted<- ifelse(a>0.5, 1 , a/(1-a) )
temp.min<- adjusted[m]
for(i in m:j){
if(adjusted[i]<=temp.min) temp.min<- adjusted[i]
else adjusted[i]<- temp.min
}
return(adjusted)
}# Close solve.q function
two.stage.adjust<- function(sorted, r=0, patience=4, m){
adjusted<- rep(0,m)
# Adjusting sorted p-values
adjusted.q<- solve.q(sorted=sorted,m=m,j=1,r=0)
checking<- adjusted.q
#Has the procedure rejected everything at the first stage?
if(sum(linearStepUp(sorted,adjusted.q[1]/(1+adjusted.q[1]),m=m)$Pvals[['rejected']])==m){
adjusted.q<- rep(adjusted.q[1],m)
return(adjusted.q)
}
else{
for (j in 1:m) {
delta.r<- 1
delta.q<- 1
new.q<- adjusted.q[j]
r.new<- sum(linearStepUp(sorted,new.q/(1+new.q),m=m)$Pvals[['rejected']])
counter<- 0
max.q<- 0
while(delta.r>0 & delta.q>0){
old.q<- new.q
r.old<- r.new
new.q<- solve.q(sorted=sorted,m=m,j=j,r=r.old)[j]
r.new<- sum(linearStepUp(sorted,new.q/(1+new.q),m=m)$Pvals[['rejected']])
delta.r<- abs(r.new-r.old)
delta.q<- abs(new.q-old.q)
counter<- counter+1
if(counter>patience & max.q!=new.q) max.q<- max(max.q,new.q)
else if(counter>patience & max.q==new.q ) break
} #Close interations inside q[j]
adjusted.q[j]<- min(new.q,1)
adjusted.q[min(j+1,m)]<- adjusted.q[j]
stopifnot(any(adjusted.q[j]<=checking[j]))
}#Close looping over j.
temp.min<- adjusted.q[m]
for(i in m:1){
if(adjusted.q[i]<=temp.min) temp.min<- adjusted.q[i]
else adjusted.q[i]<- temp.min
}
return(adjusted.q)
}#Close 'else' clause
}# Close two.stage.adjust
two.stage<- function(pValues, alpha){
ranks<- rank(pValues)
sorted<-sort(pValues)
m<- length(sorted)
#Stage I- estimating m0
q1<- alpha/(1+alpha)
stage.one<- linearStepUp(sorted, q1, adjust=TRUE, m=m)
r<- sum(stage.one$Pvals[['rejected']]) #count rejection
if (r==0) { #if nothing is rejected, return the results of the linear step up
stage.one$Pvals[['adjusted.pvals']]<- 1
return(stage.one)
}
else if (r==m) {
stage.one$Pvals[['adjusted.pvals']]<- stage.one$Pvals[['adjusted.pvals']][1]
return(stage.one)
}
#Stage II- updating alpha using m0
else {
m0<- m-r
output<- linearStepUp(sorted=sorted,q=q1,m0=m0,m=m)
output$Pvals[['adjusted.pvals']]<- two.stage.adjust(sorted, alpha, m=m)
output<-output$Pvals[ranks,]
output.2<- list(
criticalValues=output$criticals,
rejected=output$rejected,
adjPValues=output$adjusted.pvals,
errorControl=new(Class='ErrorControl',type="FDR",alpha=alpha),
pi0= m0/m
)
return(output.2)
}
}
#pvals<- runif(100,0,0.1)
#two.stage(pvals,0.1)
mutoss.two.stage<- function() { return(new(Class="MutossMethod",
label="B.K.Y. (2006) Two-Stage Step-Up",
errorControl="FDR",
callFunction="two.stage",
output=c("adjPValues", "criticalValues", "rejected", "pi0", "errorControl"),
assumptions=c("Independent test statistics"),
info="Benjamini-Krieger-Yekutieli (2006) Two-Stage Step-Up Procedure
\n\n
A p-value procedure which controls the FDR at level α for independent test statistics, in which case it is more powerful then non adaptive procedures such as the Linear Step-Up (BH). On the other hand, when this is not the case, no error control is guaranteed.
The linear step-up procedure is used in he first stage to estimate the number of true null hypotheses (mo) which is plugged in a linear step-up
procedure at the second stage.
Reference:
- Benjamini, Y., Krieger, A. and Yekutieli, D. \" Adaptive linear step-up procedures that control the false
discovery rate. \" Biometrika, 93(3):491-507, 2006.
\n
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
#---------------------Multistage Step-Down-------------------#
multiple.down.adjust<- function(sorted, m){
adjusted<- rep(NA,m)
temp.max<- sorted[1]
max.ind<- rep(0,m)
for (i in 1:m) {
temp<- min(sorted[i]*(m+1-i)/(i*(1-sorted[i])),1)
if ( temp >= temp.max ) {
temp.max <- temp
max.ind[i] <- 1
}
adjusted[i]<- temp.max
}
return(adjusted)
}
multiple.down=function(pValues, alpha){
sorted<- sort(pValues)
ranks<- rank(pValues)
m<- length(pValues)
if(alpha>0.5) warning('FDR is not controlled when q>0.5')
criticals<- sapply(1:m,function(i) alpha*i/(m-i*(1-alpha)+1))
indicators<- sorted Adaptive linear step-up procedures that control the false
discovery rate. \" Biometrika, 93(3):491-507, 2006. \n
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
mutoss/R/CompareObjects.R 0000644 0001762 0000144 00000017365 15123457163 015053 0 ustar ligges users #
# Author: JonathanRosenblatt
###############################################################################
#----------- Validating input of compare procedure------------#
mu.test.class<- function(classes){
if (any(classes!='Mutoss')) stop('Input is not a "Motoss" class object')
##TODO: will this cause prolems for inherited class objects?
}
mu.test.type<- function(types){
if( any(types != types[1]) ){
message(' Notice:You are comparing methods for different error types. \n These should not be compared! \n Output will be generated nevertheless. \n')
}
}
mu.test.rates<-function(rates){
if( any(rates != rates[1]) ){
message(' Notice:You are comparing methods with different error rates. \n These should not be compared! \n Output will be generated nevertheless. \n')
}
}
mu.test.same.data<- function(pvals){
pvals.different<- any( apply(pvals,1, function(x) any(x!=x[1])))
if(pvals.different) stop('Different data was used for suppied procedures.')
}
mu.test.name<- function(hyp.names){
names.different<- any( apply(hyp.names,1, function(x) any(x!=x[1])))
if(names.different) message('Notice: Hypotheses have different names. Can they be compared?')
}
#-------- Create comparison list for Mutoss objects--------------#
compareMutoss<-function(...){
objects<-list(...)
classes<- sapply(objects, function(x) class(x) )#getting object classes
mu.test.class(classes) #testing for compatible object classes
types<- sapply(objects, function(x) x@errorControl@type)#getting error control types
mu.test.type(types) #testing for compatible error control types
rates<-sapply(objects, function(x) x@errorControl@alpha)#extracting error rates
pi.nulls<- as.numeric(lapply(objects, function(x) x@pi0))#extracting pi0 estimates
pvalues<- sapply(objects, function(x) x@pValues )# getting adjusted pvals
mu.test.same.data(pvalues)
m<- nrow(pvalues)
raw.hyp.names<- lapply(objects, function(x) x@hypNames )# getting hypothesis names
if(all(sapply(raw.hyp.names, function(x) identical(x, character(0))))){
hyp.names<- paste('hyp', 1:m, sep='')
}
else if(all(sapply(raw.hyp.names, function(x) length(x)==m))) {
mu.test.name(raw.hyp.names)
hyp.names<- raw.hyp.names[,1]
}
##TODO: [JR] Deal with mising hyp names only in a subset of objects
# Preparing Raw Pvalues
raw.pvals<- pvalues[,1]
pval.order<- order(raw.pvals)
pval.ranks<- rank(raw.pvals)
raw.pvals.frame<-data.frame(
pValue=raw.pvals,
order=pval.order,
ranks=pval.ranks)
row.names(raw.pvals.frame)<-hyp.names
#Preparing adjusted pvalues
adj.pvals<- lapply(objects, function(x) x@adjPValues )
method.names<- unlist(lapply(adj.pvals, function(x) attributes(x)[1]))
adj.pvals.frame<- data.frame(adj.pvals)
colnames(adj.pvals.frame)<- method.names
row.names(adj.pvals.frame)<- hyp.names
#Preparing critical values
critical<- lapply(objects, function(x) x@criticalValues )
method.names<- unlist(lapply(critical, function(x) attributes(x)[1]))
critical.frame<- data.frame(critical)
colnames(critical.frame)<- method.names
row.names(critical.frame)<- hyp.names
#Preparing decisions
rejections<- lapply(objects, function(x) x@rejected )
method.names<- unlist(lapply(rejections, function(x) attributes(x)[1]))
rejections.frame<- data.frame(rejections)
colnames(rejections.frame)<- method.names
row.names(rejections.frame)<- hyp.names
##TODO: [JR] Add groud truth to comparison method
comparing<- list(
types=types,
rates=rates,
pi.nulls=pi.nulls,
raw.pValues=raw.pvals.frame,
adjusted.pvals=adj.pvals.frame,
criticalValue=critical.frame,
rejections=rejections.frame
)
return(comparing)
}
#For testing purposes
#source('~/workspace/mutoss/src/BasicFunctions/DummyBigObjects.R')
#test<- list(mu.test.obj.1, mu.test.obj.2)
#-------------- Comparison of adjusted p values -----------------#
mu.compare.adjusted<- function(comparison.list, identify.check=F){
adjPValues<- comparison.list[['adjusted.pvals']]
hyp.num<- nrow(adjPValues)
method.num<- ncol(adjPValues)
method.names<- factor(colnames(adjPValues))
method.index<- as.numeric(method.names)
raw.pValues<-comparison.list[['raw.pValues']]
pvalue.ranks<-raw.pValues$ranks
method.type<-comparison.list[['types']]
stacked.adjPValues<- unlist(adjPValues, use.names=F)
x<- rep(pvalue.ranks, method.num)
method.labels<- rep(method.names, each=hyp.num)
hyp.labels<- rep(row.names(adjPValues), method.num)
point.charachters<- rep(method.index, each=hyp.num) #for plotting purposes only
point.size<- hyp.num^(-0.1)
plot(stacked.adjPValues~x,
pch=point.charachters,
ylim=c(0,1),
cex=point.size,
xlab='')
the.title<- paste('Adjusted p-values for ',unique(method.type),' controlling procedures')
title(the.title)
par(xpd=T)
legend(x=0, y=-0.15,
horiz=T,
legend=method.names,
pch=method.index,
cex=method.num ^ (-1/4) )
par(xpd=F) #reset par to default value
if(identify.check) {
identify(stacked.adjPValues~x, labels=hyp.labels )
}
##TODO: [JR] Plotting method using colors?
}
#For testing purposes
#source('~/workspace/mutoss/src/BasicFunctions/DummyBigObjects.R')
#----------- Comparison of critical vales---------- #
mu.compare.critical<- function(comparison.list, identify.check=F){
method.type<-comparison.list[['types']] #extracting method type
criticalValues<- comparison.list[['criticalValue']]#extracting critical values
hyp.num<- nrow(criticalValues)
method.num<- ncol(criticalValues)
method.names<- factor(colnames(criticalValues))
method.index<- as.numeric(method.names)
raw.pValues<-comparison.list[['raw.pValues']] #extracting raw palues
pvalue.ranks<-raw.pValues$ranks
stacked.criticalValues<- unlist(criticalValues, use.names=F)
x<- rep(pvalue.ranks, method.num)
method.labels<- rep(method.names, each=hyp.num)
hyp.labels<- rep(row.names(criticalValues), method.num)
point.charachters<- rep(method.index, each=hyp.num) #for plotting purposes only
point.size<- hyp.num^(-0.1)
plot(stacked.criticalValues~x,
pch=point.charachters,
ylim=c(0,1),
xlab='',
cex=point.size)
the.title<- paste('Critical Values for ',unique(method.type),' controlling procedures')
title(the.title)
par(xpd=T)
legend(
x=0,
y=-0.15,
horiz=T,
legend=method.names,
pch=method.index,
cex=method.num ^ (-1/4) )
par(xpd=F) #reset par to default value
if(identify.check) {
identify(stacked.criticalValues~x, labels=hyp.labels )
}
}
#For testing purposes:
#source('~/workspace/mutoss/src/BasicFunctions/DummyBigObjects.R')
#mu.compare.critical(1)
#mu.compare.critical(compare.3, T)
#----- Sumary of comparison-----------#
mu.compare.summary<- function(comparison.list){
method.type<-comparison.list[['types']] #extracting method type
error.rates<-comparison.list[['rates']] #extracting error rates
pi.nulls<-comparison.list[['pi.nulls']] #extracting pi0
rejections<-comparison.list[['rejections']]
hyp.num<- nrow(rejections)
method.num<- ncol(rejections)
method.names<- factor(colnames(rejections))
method.index<- as.numeric(method.names)
count.rejections<-apply(rejections, 2, sum)
seperate<- rep('|', method.num)
summary<-data.frame(
method.type, seperate,
error.rates, seperate,
count.rejections, seperate,
pi.nulls)
colnames(summary) <-c(
'Error Type'," ",
'Error Rate'," ",
'Rejections Count', " ",
'pi_0')
cat('\n Comparing multipe hypothesis procedures.\n',
hyp.num, 'hypotheses tested.\n\n')
print(summary)
}
#For testing purposes :
#source('~/workspace/mutoss/src/BasicFunctions/DummyBigObjects.R')
#mu.compare.summary(1)
#mu.compare.summary(compare.3) mutoss/R/helperfunctions.R 0000644 0001762 0000144 00000000616 15123457163 015352 0 ustar ligges users
requireLibrary <- function(package) {
if(!require(package, character.only=TRUE)) {
answer <- readline(paste("Package ",package," is required - should we install it?",sep=""))
if (substr(answer, 1, 1) %in% c("y","Y")) {
install.packages(package)
require(package, character.only=TRUE)
} else {
stop(paste("Required package",package,"should not be installed"))
}
}
}
mutoss/R/mutossApply.R 0000644 0001762 0000144 00000002254 15123457163 014502 0 ustar ligges users mutoss.apply <- function(mutossObj, f, label = deparse(substitute(f)), recordHistory = TRUE, ...) {
params <- list()
for (param in names(formals(f))) { #runs over all parameters of f
if (param %in% slotNames(mutossObj)) { #checks if parameter name corresponds to a mutoss slot
paramTail <- list(slot(mutossObj, param)) #extracts values from appropriate slot
names(paramTail) <- param #attaches names for parameter values
params <- c(params, paramTail) #concatenates parameters with and without data from objects
}
}
result <- eval(as.call(c(f,params,...))) #evaluates function call with extracted parameters
for (param in names(result)) {
if (param %in% slotNames(mutossObj)) {
value <- result[param][[1]]
attr(value, "method.name") <- label #attaches attributes to
slot(mutossObj, param) <- value #writes result in corresponding mutoss slots
}
}
if ( recordHistory ) {
mutossObj@commandHistory = c( mutossObj@commandHistory,
paste(format( match.call( call = sys.call( sys.parent(1) ))), collapse='')) #write command into history
}
return(mutossObj)
}
mutoss/R/nparcomp1.R 0000644 0001762 0000144 00000075202 15123457163 014045 0 ustar ligges users # Simultaneous confidence intervals for relative contrast effects
#
# Author: FrankKonietschke
###############################################################################
nparcomp <- function (formula, data, type = c("UserDefined", "Tukey", "Dunnett", "Sequen",
"Williams", "Changepoint", "AVE", "McDermott", "Marcus","UmbrellaWilliams"),
control = NULL, conflevel = 0.95, alternative = c("two.sided",
"less", "greater"), rounds = 3, correlation = FALSE,
asy.method = c("logit", "probit", "normal", "mult.t"), plot.simci = FALSE,
info = TRUE, contrastMatrix = NULL)
{
corr.mat <- function(m, nc) {
rho <- matrix(c(0), ncol = nc, nrow = nc)
for (i in 1:nc) {
for (j in 1:nc) {
rho[i, j] <- m[i, j]/sqrt(m[i, i] * m[j, j])
}
}
return(rho)
}
ssq <- function(x) {
sum(x * x)
}
logit <- function(p) {
log(p/(1 - p))
}
probit <- function(p) {
qnorm(p)
}
expit <- function(G) {
exp(G)/(1 + exp(G))
}
index <- function(char, test) {
nc <- length(char)
for (i in 1:nc) {
if (char[i] == test) {
return(i)
}
}
}
z.quantile <- function(conflevel = conflevel, corr, a, df = df.sw,
dbs) {
if (dbs == "n") {
if (a == "two.sided") {
z <- qmvnorm(conflevel, corr = corr, tail = "both")$quantile
}
if (a == "less" || a == "greater") {
z <- qmvnorm(conflevel, corr = corr, tail = "lower")$quantile
}
}
if (dbs == "t") {
if (a == "two.sided") {
z <- qmvt(conflevel, df = df.sw, interval = c(-10,
10), corr = corr, tail = "both")$quantile
}
if (a == "less" || a == "greater") {
z <- qmvt(conflevel, df = df.sw, interval = c(-10,
10), corr = corr, tail = "lower")$quantile
}
}
return(z)
}
if (conflevel >= 1 || conflevel <= 0) {
stop("The confidence level must be between 0 and 1!")
if (is.null(alternative)) {
stop("Please declare the alternative! (two.sided, less, greater)")
}
}
type <- match.arg(type)
alternative <- match.arg(alternative)
asy.method <- match.arg(asy.method)
if (length(formula) != 3) {
stop("You can only analyse one-way layouts!")
}
dat <- model.frame(formula, data)
if (ncol(dat) != 2) {
stop("Specify one response and only one class variable in the formula")
}
if (is.numeric(dat[, 1]) == FALSE) {
stop("Response variable must be numeric")
}
response <- dat[, 1]
factorx <- as.factor(dat[, 2])
fl <- levels(factorx)
a <- nlevels(factorx)
if (a <= 2) {
stop("You want to perform a two-sample test. Please use the function npar.t.test")
}
samples <- split(response, factorx)
n <- sapply(samples, length)
if (any(n <= 1)) {
warn <- paste("The factor level", fl[n <= 1], "has got only one observation!")
stop(warn)
}
ntotal <- sum(n)
a <- length(n)
tmp <- expand.grid(1:a, 1:a)
ind <- tmp[[1]] > tmp[[2]]
vi <- tmp[[2]][ind]
vj <- tmp[[1]][ind]
nc <- length(vi)
gn <- n[vi] + n[vj]
intRanks <- lapply(samples, rank)
pairRanks <- lapply(1:nc, function(arg) {
rank(c(samples[[vi[arg]]], samples[[vj[arg]]]))
})
pd <- sapply(1:nc, function(arg) {
i <- vi[arg]
j <- vj[arg]
(sum(pairRanks[[arg]][(n[i] + 1):gn[arg]])/n[j] - (n[j] +
1)/2)/n[i]
})
dij <- dji <- list(0)
sqij <- sapply(1:nc, function(arg) {
i <- vi[arg]
j <- vj[arg]
pr <- pairRanks[[arg]][(n[i] + 1):gn[arg]]
dij[[arg]] <<- pr - sum(pr)/n[j] - intRanks[[j]] + (n[j] +
1)/2
ssq(dij[[arg]])/(n[i] * n[i] * (n[j] - 1))
})
sqji <- sapply(1:nc, function(arg) {
i <- vi[arg]
j <- vj[arg]
pr <- pairRanks[[arg]][1:n[i]]
dji[[arg]] <<- pr - sum(pr)/n[i] - intRanks[[i]] + (n[i] +
1)/2
ssq(dji[[arg]])/(n[j] * n[j] * (n[i] - 1))
})
vd.bf <- ntotal * (sqij/n[vj] + sqji/n[vi])
singular.bf <- (vd.bf == 0)
vd.bf[singular.bf] <- 1e-05
df.sw <- (n[vi] * sqij + n[vj] * sqji)^2/((n[vi] * sqij)^2/(n[vj] -
1) + (n[vj] * sqji)^2/(n[vi] - 1))
lambda <- sqrt(n[vi]/(gn + 1))
cov.bf1 <- diag(nc)
rho.bf <- diag(nc)
for (x in 1:(nc - 1)) {
for (y in (x + 1):nc) {
i <- vi[x]
j <- vj[x]
v <- vi[y]
w <- vj[y]
p <- c(i == v, j == w, i == w, j == v)
if (sum(p) == 1) {
cl <- list(function() (t(dji[[x]]) %*% dji[[y]])/(n[j] *
n[w] * n[i] * (n[i] - 1)), function() (t(dij[[x]]) %*%
dij[[y]])/(n[i] * n[v] * n[j] * (n[j] - 1)),
function() -(t(dji[[x]]) %*% dij[[y]])/(n[v] *
n[j] * n[i] * (n[i] - 1)), function() -(t(dij[[x]]) %*%
dji[[y]])/(n[i] * n[w] * n[j] * (n[j] - 1)))
case <- (1:4)[p]
rho.bf[x, y] <- rho.bf[y, x] <- sqrt(ntotal *
ntotal)/sqrt(vd.bf[x] * vd.bf[y]) * cl[[case]]()
cov.bf1[x, y] <- cov.bf1[y, x] <- sqrt(vd.bf[x] *
vd.bf[y])
}
}
}
V <- (cov.bf1 + diag(vd.bf - 1)) * rho.bf
cov.bf1 <- cbind(V,-1*V)
cov.bf2<-cbind(-1*V,V)
cov.bf <- rbind(cov.bf1,cov.bf2)
switch(type,
UserDefined = {
if (is.null(contrastMatrix)){stop("Give a contrast matrix with the contrast.matrix = <> option or choose a contrast!")}
if (is.null(control)) {
nc <- nrow(contrastMatrix)
weights.help <- weightMatrix(n, contrast.matrix = contrastMatrix)
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:nc)
type.of.contrast <- "User Defined"
}
else {
stop("Please declare the control group via your contrast matrix!")
}
type.of.contrast <- "User Defined"
},
Tukey = {
if (is.null(control)) {
if (alternative!="two.sided"){stop("The Tukey contrast can only be tested two-sided!")}
nc <- a * (a - 1)/2
cmpid <- sapply(1:nc, function(arg) {
i <- vi[arg]
j <- vj[arg]
paste("p", "(", fl[i], ",", fl[j], ")", sep = "")
})
weights.help <- weightMatrix(n, "Tukey")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
}
else {
stop("The Tukey contrast hasn't got a control group!")
}
type.of.contrast <- "Tukey"
}, Dunnett = {
nc <- a - 1
if (is.null(control)) {
cont <- 1
}
else {
if (!any(fl == control)) {
stop("The dataset doesn't contain this control group!")
}
cont <- which(fl == control)
}
vj <- which((1:a) != cont)
vi <- rep(cont, a - 1)
weights.help <- weightMatrix(n, "Dunnett",cont)
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- sapply(1:nc, function(arg) {
i <- vi[arg]
j <- vj[arg]
paste("p", "(", fl[i], ",", fl[j], ")", sep = "")
})
type.of.contrast <- "Dunnett"
}, Sequen = {
if (is.null(control)) {
nc <- a - 1
vi <- 1:(a - 1)
vj <- 2:a
weights.help <- weightMatrix(n, "Sequen")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- sapply(1:nc, function(arg) {
i <- vj[arg]
j <- vi[arg]
paste("p", "(", fl[j], ",", fl[i], ")", sep = "")
})
type.of.contrast <- "Sequen"
}
else {
stop("The Sequen-Contrast hasn't got a control group!")
}
},
Williams = {
if (is.null(control)) {
nc <- a - 1
weights.help <- weightMatrix(n, "Williams")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:(a - 1))
type.of.contrast <- "Williams"
}
else {
stop("The Williams contrast hasn't got a control group!")
}
}, Changepoint = {
if (is.null(control)) {
nc <- a - 1
weights.help <- weightMatrix(n, "Changepoint")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:(a - 1))
type.of.contrast <- "Changepoint"
}
else {
stop("The Changepoint-Contrast hasn't got a control group!")
}
}, AVE = {
if (is.null(control)) {
nc <- a
weights.help <- weightMatrix(n, "AVE")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:a)
type.of.contrast <- "Average"
}
else {
stop("The Average-Contrast hasn't got a control group!")
}
}, McDermott = {
if (is.null(control)) {
nc <- a - 1
weights.help <- weightMatrix(n, "McDermott")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:(a - 1))
type.of.contrast <- "McDermott"
}
else {
stop("The McDermott-Contrast hasn't got a control group!")
}
}, UmbrellaWilliams = {
if (is.null(control)) {
nc <- a * (a - 1)/2
weights.help <- weightMatrix(n, "UmbrellaWilliams")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:(a * (a - 1)/2))
type.of.contrast <- "Umbrella Williams"
}
else {
stop("The Umbrella Williams-Contrast hasn't got a control group!")
}
},
Marcus = {
if (is.null(control)) {
nc <- a * (a - 1)/2
weights.help <- weightMatrix(n, "Marcus")
weight<-weights.help$weight.matrix
weight.help <- weights.help$weight.help
cmpid <- paste("C", 1:(a * (a - 1)/2))
type.of.contrast <- "Marcus"
}
else {
stop("The Marcus-Contrast hasn't got a control group!")
}
}
)
pd1 <- (pd == 1)
pd0 <- (pd == 0)
pd[pd1] <- 0.999
pd[pd0] <- 0.001
pd.help1 <- c(pd, 1-pd)
pd <- c(weight%*%pd.help1)
cov.bf <- weight %*% cov.bf %*% t(weight)
for (i in 1:nc) {
if (cov.bf[i, i] == 0) {
cov.bf[i, i] <- 0.001
}
}
vd.bf <- c(diag(cov.bf))
vd.bf <- c(vd.bf)
rho.bf <- corr.mat(cov.bf, nc)
t.bf <- sqrt(ntotal) * (pd - 1/2)/sqrt(vd.bf)
rownames(weight) <- paste("C", 1:nc)
ncomp <- a * (a - 1)/2
tmp <- expand.grid(1:a, 1:a)
ind <- tmp[[1]] > tmp[[2]]
v2 <- tmp[[2]][ind]
v1 <- tmp[[1]][ind]
namen1 <- sapply(1:ncomp, function(arg) {
i <- v2[arg]
j <- v1[arg]
paste("p", "(", fl[i], ",", fl[j], ")", sep = "")
})
namen2 <- sapply(1:ncomp, function(arg) {
i <- v2[arg]
j <- v1[arg]
paste("p", "(", fl[j], ",", fl[i], ")", sep = "")
})
colnames(weight) <- c(namen1,namen2)
df.sw[is.nan(df.sw)] <- 1000
df.sw <- weight %*% c(df.sw,df.sw)
df.sw <- max(4, min(df.sw))
pd <- c(pd)
logit.pd <- logit(pd)
logit.dev <- diag(1/(pd * (1 - pd)))
logit.cov <- logit.dev %*% cov.bf %*% t(logit.dev)
vd.logit <- c(diag(logit.cov))
t.logit <- (logit.pd) * sqrt(ntotal/vd.logit)
probit.pd <- qnorm(pd)
probit.dev <- diag(sqrt(2 * pi)/(exp(-0.5 * qnorm(pd) * qnorm(pd))))
probit.cov <- probit.dev %*% cov.bf %*% t(probit.dev)
vd.probit <- c(diag(probit.cov))
t.probit <- (probit.pd) * sqrt(ntotal/vd.probit)
p.bfn = p.bft = p.bflogit = p.bfprobit = c()
p.n = p.t = p.logit = p.probit = c()
if (alternative == "two.sided") {
z.bft <- z.quantile(conflevel = conflevel, corr = rho.bf,
"two.sided", df = df.sw, dbs = "t")
z.bfn <- z.quantile(conflevel = conflevel, corr = rho.bf,
"two.sided", df = 0, dbs = "n")
lower.bft <- pd - sqrt(vd.bf/ntotal) * z.bft
upper.bft <- pd + sqrt(vd.bf/ntotal) * z.bft
lower.bfn <- pd - sqrt(vd.bf/ntotal) * z.bfn
upper.bfn <- pd + sqrt(vd.bf/ntotal) * z.bfn
lower.logit <- expit(logit.pd - sqrt(vd.logit/ntotal) *
z.bfn)
upper.logit <- expit(logit.pd + sqrt(vd.logit/ntotal) *
z.bfn)
lower.probit <- pnorm(probit.pd - sqrt(vd.probit/ntotal) *
z.bfn)
upper.probit <- pnorm(probit.pd + sqrt(vd.probit/ntotal) *
z.bfn)
#---------------------------------------------------------#
#--------------Berechnung von p-Werten -------------------#
for (i in 1:nc) {
p.bft[i] <- 1-pmvt(lower=-abs(t.bf[i]), abs(t.bf[i]), df=df.sw, corr=rho.bf,delta=rep(0,nc))
p.bfn[i] <- 1-pmvnorm(lower=-abs(t.bf[i]), abs(t.bf[i]),corr=rho.bf,mean=rep(0,nc))
p.bflogit[i] <- 1-pmvnorm(lower=-abs(t.logit[i]), abs(t.logit[i]),corr=rho.bf ,mean=rep(0,nc))
p.bfprobit[i] <- 1-pmvnorm(lower=-abs(t.probit[i]), abs(t.probit[i]),corr=rho.bf,mean=rep(0,nc))
p.tt <- pt(t.bf[i], df.sw)
p.t[i] <- min(2*p.tt,2-2*p.tt)
p.nn <- pnorm(t.bf[i])
p.n[i] <- min(2*p.nn, 2-2*p.nn)
p.ll <- pnorm(t.logit[i])
p.logit[i] <- min(2*p.ll, 2-2*p.ll)
p.pp <- pnorm(t.probit[i])
p.probit[i] <- min(2*p.pp, 2-2*p.pp)
}
text.output.p <- "H_0: p(i,j)=1/2"
text.output.KI <- paste(100 * conflevel, "%", "2-sided",
"Simultaneous-Confidence-Intervals for Relative Effects")
upper <- "]"
lower <- "["
}
if (alternative == "greater") {
z.bft <- z.quantile(conflevel = conflevel, corr = rho.bf,
"less", df = df.sw, dbs = "t")
z.bfn <- qmvnorm(conflevel, corr = rho.bf, tail = "lower")$quantile
lower.bft <- pd - sqrt(vd.bf/ntotal) * z.bft
lower.bfn <- pd - sqrt(vd.bf/ntotal) * z.bfn
lower.logit <- expit(logit.pd - sqrt(vd.logit/ntotal) *
z.bfn)
lower.probit <- pnorm(probit.pd - sqrt(vd.probit/ntotal) *
z.bfn)
upper.bft = upper.probit = upper.logit = upper.bfn = 1
for (i in 1:nc) {
p.bfn[i] <- 1 - pmvnorm(lower = -Inf, upper = t.bf[i],
mean = rep(0, nc), corr = rho.bf)
p.bft[i] <- 1 - pmvt(lower = -Inf, upper = t.bf[i],
delta = rep(0, nc), df = df.sw, corr = rho.bf)
p.bflogit[i] <- 1 - pmvnorm(lower = -Inf, upper = t.logit[i],
mean = rep(0, nc), corr = rho.bf)
p.bfprobit[i] <- 1 - pmvnorm(lower = -Inf, upper = t.probit[i],
mean = rep(0, nc), corr = rho.bf)
p.t[i] <-1-pt(t.bf[i], df.sw)
p.n[i] <- 1-pnorm(t.bf[i])
p.logit[i] <- 1- pnorm(t.logit[i])
p.probit[i] <-1- pnorm(t.probit[i])
}
text.output.p <- "H_0: p(i,j)<=1/2"
text.output.KI <- paste(100 * conflevel, "%", "1-sided",
"Simultaneous-Confidence-Intervals for Relative Effects")
upper <- "]"
lower <- "("
}
if (alternative == "less") {
z.bft <- z.quantile(conflevel = conflevel, corr = rho.bf,
"less", df = df.sw, dbs = "t")
z.bfn <- z.quantile(conflevel = conflevel, corr = rho.bf,
"less", df = 0, dbs = "n")
upper.bft <- pd + sqrt(vd.bf/ntotal) * z.bft
upper.bfn <- pd + sqrt(vd.bf/ntotal) * z.bfn
upper.logit <- expit(logit.pd + sqrt(vd.logit/ntotal) *
z.bfn)
upper.probit <- pnorm(probit.pd + sqrt(vd.probit/ntotal) *
z.bfn)
lower.bft = lower.probit = lower.logit = lower.bfn = 0
for (i in 1:nc) {
p.bfn[i] <- 1 - pmvnorm(lower = t.bf[i], upper = Inf,
mean = rep(0, nc), corr = rho.bf)
p.bft[i] <- 1 - pmvt(lower = t.bf[i], upper = Inf,
delta = rep(0, nc), df = df.sw, corr = rho.bf)
p.bflogit[i] <- 1 - pmvnorm(lower = t.logit[i], upper = Inf,
mean = rep(0, nc), corr = rho.bf)
p.bfprobit[i] <- 1 - pmvnorm(lower = t.probit[i],
upper = Inf, mean = rep(0, nc), corr = rho.bf)
p.t[i] <- pt(t.bf[i], df.sw)
p.n[i] <- pnorm(t.bf[i])
p.logit[i] <- pnorm(t.logit[i])
p.probit[i] <- pnorm(t.probit[i])
}
text.output.p <- " H_0: p(i,j)>=1/2"
text.output.KI <- paste(100 * conflevel, "%", "1-sided",
"Simultaneous-Confidence-Intervals for Relative Effects")
upper <- ")"
lower <- "["
}
bfn.lower <- round(lower.bfn, rounds)
bfn.upper <- round(upper.bfn, rounds)
bft.lower <- round(lower.bft, rounds)
bft.upper <- round(upper.bft,rounds)
logit.lower <- round(lower.logit, rounds)
logit.upper <- round(upper.logit,rounds)
probit.lower <- round(lower.probit, rounds)
probit.upper <- round(upper.probit,rounds)
p.bflogit <- round(p.bflogit, rounds)
p.bfprobit <- round(p.bfprobit, rounds)
p.bft <- round(p.bft, rounds)
p.bfn <- round(p.bfn, rounds)
p.logit <- round(p.logit, rounds)
p.probit <- round(p.probit, rounds)
p.t <- round(p.t, rounds)
p.n <- round(p.n, rounds)
pd <- round(pd, rounds)
if (correlation == TRUE) {
Correlation <- list(Correlation.matrix.N = rho.bf, Covariance.matrix.N = cov.bf,
Warning = paste("Attention! The covariance matrix is multiplied with N",
"=", ntotal))
}
else {
Correlation <- NA
}
data.info <- data.frame(row.names = 1:a, Sample = fl, Size = n)
switch(asy.method, logit = {
x.werte = cbind(lower.logit, pd, upper.logit)
result <- list(weight.matrix = weight, Data.Info = data.info,
Analysis.of.relative.effects = data.frame(row.names = c(1:nc),
comparison = cmpid, rel.effect = pd, lower= logit.lower, upper=logit.upper,
t.value = t.logit, p.adj = p.bflogit, p.raw= p.logit), Mult.Distribution = data.frame(Quantile = z.bfn,
p.Value.global = min(p.bflogit)), Correlation = Correlation)
Asymptotic.Method <- "Multivariate Delta-Method (Logit)"
}, probit = {
x.werte = cbind(lower.probit, pd, upper.probit)
result <- list(weight.matrix = weight, Data.Info = data.info,
Analysis.of.relative.effects = data.frame(row.names = c(1:nc),
comparison = cmpid, rel.effect = pd, lower = probit.lower, upper = probit.upper,
t.value = t.probit, p.adj = p.bfprobit, p.raw = p.probit), Mult.Distribution = data.frame(Quantile = z.bfn,
p.Value.global = min(p.bfprobit)), Correlation = Correlation)
Asymptotic.Method <- "Multivariate Delta-Method (Probit)"
}, normal = {
x.werte = cbind(lower.bfn, pd, upper.bfn)
result <- list(weight.matrix = weight, Data.Info = data.info,
Analysis.of.relative.effects = data.frame(row.names = c(1:nc),
comparison = cmpid, rel.effect = pd, lower = bfn.lower, upper=bfn.upper,
t.value = t.bf, p.adj = p.bfn, p.raw = p.n), Mult.Distribution = data.frame(Quantile = z.bfn,
p.Value.global = min(p.bfn)), Correlation = Correlation)
Asymptotic.Method <- "Multivariate Normal Distribution"
}, mult.t = {
x.werte = cbind(lower.bft, pd, upper.bft)
result <- list(weight.matrix = weight, Data.Info = data.info,
Analysis.of.relative.effects = data.frame(row.names = c(1:nc),
comparison = cmpid, rel.effect = pd, lower = bft.lower, upper=bft.upper,
t.value = t.bf, p.adj = p.bft,p.raw = p.t), Mult.Distribution = data.frame(Quantile = z.bft,
p.Value.global = min(p.bft), d.f. = df.sw), Correlation = Correlation)
Asymptotic.Method <- paste("Multi t - Distribution with d.f.= ",
round(df.sw, 4))
})
if (plot.simci == TRUE) {
test <- matrix(c(1:nc), ncol = nc, nrow = nc)
angaben <- c(cmpid)
angaben <- matrix(c(angaben), ncol = nc, nrow = nc)
k <- c(1:nc)
plot(x.werte[, 2], k, xlim = c(0, 1), axes = FALSE, type = "p",
pch = 15, xlab = "", ylab = "")
abline(v = 0.5, col = "red", lty = 1, lwd = 2)
axis(1, at = seq(0, 1, 0.1))
axis(2, at = test, labels = angaben)
axis(4, at = test, labels = test)
points(x = x.werte[, 3], y = test[, 1], pch = upper)
points(x = x.werte[, 1], y = test[, 1], pch = lower)
for (i in 1:nc) {
polygon(c(x.werte[i, 1], x.werte[i, 3]), c(i, i))
}
box()
title(main = c(text.output.KI, paste("Type of Contrast:",
"", type.of.contrast, sep = ""), paste("Method:",
"", Asymptotic.Method, sep = "")), ylab = "Comparison",
xlab = paste("lower", lower, "-----", "p", "------",
upper, "upper"))
}
if (info == TRUE) {
cat("\n", "", "Nonparametric Multiple Comparison Procedure based on relative contrast effects",
",", "Type of Contrast", ":", type.of.contrast, "\n",
"NOTE:", "\n", "*-------------------Weight Matrix------------------*",
"\n", "-", "Weight matrix for choosen contrast based on all-pairs comparisons",
"\n", "\n", "*-----------Analysis of relative effects-----------*",
"\n", "-", "Simultaneous Confidence Intervals for relative effects p(i,j)\n with confidence level",
conflevel, "\n", "-", "Method", "=", Asymptotic.Method,
"\n", "-", "p-Values for ", text.output.p, "\n",
"\n", "*----------------Interpretation--------------------*",
"\n", "p(a,b)", ">", "1/2", ":", "b tends to be larger than a",
"\n", "*--------------Mult.Distribution-------------------*",
"\n", "-", "Equicoordinate Quantile", "\n", "-", "Global p-Value",
"\n", "*--------------------------------------------------*",
"\n")
}
return(result)
}
weightMatrix <- function(n,type = c("UserDefined","Tukey","AVE","Dunnett", "Sequen",
"Changepoint", "Marcus",
"McDermott", "Williams", "UmbrellaWilliams"), base = 1, contrast.matrix=NULL) {
a <- length (n)
n.col <- a*(a-1)/2
tmp <- expand.grid(1:a, 1:a)
ind <- tmp[[1]] > tmp[[2]]
vi <- tmp[[2]][ind]
vj <- tmp[[1]][ind]
type <- match.arg(type)
switch (type,
UserDefined = {
if (is.null(contrast.matrix)){stop("Choose a contrast or give a contrast matrix by using 'contrast.matrix = matrix'")}
#if(any(abs(contrast.matrix)>1)){stop("The contrast weights must be between 0 and 1!")}
if (ncol(contrast.matrix)!=a){stop("The contrast matrix has more or less columns than samples!")}
nc<-nrow(contrast.matrix)
for (i in 1:nc){
places_pos<-(contrast.matrix[i,]>0)
places_neg<-(contrast.matrix[i,]<0)
sum_pos<-sum(contrast.matrix[i,][places_pos])
sum_neg<-sum(contrast.matrix[i,][places_neg])
if (abs(sum_neg)!= sum(sum_pos)) {
stop(" Wrong contrast matrix!The sum of negative and positive weights must be equal") }
for ( j in 1:a){
if (contrast.matrix[i,j]<0){ contrast.matrix[i,j]<-contrast.matrix[i,j]/abs(sum_neg)}
if (contrast.matrix[i,j]>0){ contrast.matrix[i,j]<-contrast.matrix[i,j]/sum_pos}
}
}
n.row <- nrow(contrast.matrix)
ch <- contrast.matrix
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
AVE = {
n.row <- a
ch <- contrMat(n, type="AVE")
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
Changepoint = {
n.row <- a - 1
ch <- contrMat(n, type = "Changepoint")
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
Dunnett = {
n.row <- a - 1
ch <- contrMat(n, type = "Dunnett", base = base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
Sequen = {
n.row <- a - 1
ch <- contrMat(n, type = "Sequen", base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
Marcus = {
n.row <- a*(a-1)/2
ch <- contrMat(n, type = "Marcus", base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
McDermott = {
n.row <- a -1
ch <- contrMat(n, type = "McDermott", base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
Williams = {
n.row <- a - 1
ch <- contrMat(n, type = "Williams")
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
} ,
Tukey = {
n.row <- a*(a-1)/2
ch <- contrMat(n, type = "Tukey", base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
},
UmbrellaWilliams = {
n.row <- a*(a-1)/2
ch <- contrMat(n, type = "UmbrellaWilliams", base)
w<-matrix(0,nrow = n.row , ncol=n.col )
weight.help2<-matrix(0,nrow = n.row , ncol=n.col )
weight.help1<-matrix(0,nrow = n.row , ncol=n.col )
for (i in 1:n.row){
for (j in 1:n.col){
help <- c(rep(i,n.col))
a <- help[j]
b <- vi[j]
d <- vj[j]
w[i,j] <- ch[a,b] *ch[a,d]
if (w[i,j]>0){w[i,j]<-0}
if (ch[a,b] > 0 && ch[a,d] < 0) {weight.help2[i,j]<-1}
if(ch[a,b] < 0 && ch[a,d] > 0){weight.help1[i,j] <-1}
}
}
w1<-w*weight.help1
w2<-w*weight.help2
w<--1*cbind(w1,w2)
w
}
)
ch.out <- matrix(c(ch),nrow=n.row)
result <- list(weight.matrix = w, weight.help1 = weight.help1,weight.help2 = weight.help2,contrast.matrix = ch.out)
result
}
nparcomp.wrapper <- function(model, data, hypotheses, alpha, alternative, asy.method) {
control <- NULL
type <- NULL
contrastMatrix <- NULL
if (hypotheses %in% c("Tukey", "Dunnett", "Sequen",
"Williams", "Changepoint", "AVE", "McDermott", "Marcus", "UmbrellaWilliams")) {
type <- hypotheses
} else {
type <- "UserDefined"
contrastMatrix <- hypotheses
}
result <- nparcomp(formula=formula(model),
data,
type = type,
control = control,
conflevel = 1-alpha,
alternative,
rounds = 3,
correlation = TRUE,
asy.method,
plot.simci = FALSE,
info = TRUE,
contrastMatrix = contrastMatrix)
pvalues <- result$Analysis.of.relative.effects$p.adj
rejected1 <- (pvalues < alpha)
confi <- cbind(result$Analysis.of.relative.effects$rel.effect, result$Analysis.of.relative.effects$lower, result$Analysis.of.relative.effects$upper)
rownames(confi)<-result$Analysis.of.relative.effects$comparison
temp <- cbind(confi,pvalues)
colnames(temp) <- c("Estimate", "Lower","Upper","pValue")
print(temp)
return(list(adjPValues=pvalues,
rejected=rejected1, confIntervals= confi,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.nparcomp <- function() { return(new(Class="MutossMethod",
label="Nonparametric relative contrast effects",
errorControl="FWER",
callFunction="nparcomp.wrapper",
output=c("adjPValues", "rejected","confIntervals", "errorControl"),
info="Nonparametric relative contrast effects
With this function, it is possible to compute nonparametric simultaneous confidence\
intervals for relative contrast effects in the unbalanced one way layout. Moreover, it computes\
adjusted p-values. The simultaneous confidence intervals can be computed using\
multivariate normal distribution, multivariate t-distribution with a Satterthwaite Approximation\
of the degree of freedom or using multivariate range preserving transformations with Logit or\
Probit as transformation function. There is no assumption on the underlying distribution function, only\
that the data have to be at least ordinal numbers.
Reference:
- Konietschke, F. \"Simultane Konfidenzintervalle fuer nichtparametrische relative Kontrasteffekte.\" Dissertation, University of Goettingen, 2009.
- Konietschke, F., Brunner, E., Hothorn, L.A. \"Simultaneous confidence intervals for nonparametric relative contrast effects.\" Research report at the University of Hannover, 2009.
",
parameters=list(
data=list(type="data.frame"),
model=list(type="ANY"),
hypotheses=list(type="ANY"),
alpha=list(type="numeric"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
asy.method=list(type="character", label="Asymptotic approx. method", choices=c("logit", "probit", "normal", "mult.t"))
)
)) }
mutoss/R/pi0Est.R 0000644 0001762 0000144 00000014354 15123457163 013312 0 ustar ligges users # Collection of elementary functions calculating an estimate
# of the number m0 resp. the proportion pi0 of true null hypotheses
# in a finite family of hypotheses.
#
# Author: MarselScheer and WiebkeWerft
###############################################################################
storey_pi0_est = function(pValues, lambda)
{
pi0 = (sum(pValues > lambda) + 1) / (1 - lambda) / length(pValues)
return(list(pi0 = pi0, lambda = lambda))
}
mutoss.storey_pi0_est <- function() { return(new(Class="MutossMethod",
label="Storey-Taylor-Siegmund (2004) Procedure",
callFunction="storey_pi0_est",
output=c("pi0", "lambda"),
info="Storey-Taylor-Siegmund procedure
\n\n\
The Storey-Taylor-Siegmund procedure for estimating pi0 is applied to pValues.\
The formula is equivalent to that in Schweder and Spjotvoll (1982),\
page 497, except the additional '+1' in the nominator that\
introduces a conservative bias which is proven to be sufficiently large\
for FDR control in finite families of hypotheses if the estimation\
is used for adjusting the nominal level of a linear step-up test.
\n\
Reference:
\
\
- Storey, J. D., Taylor, J. E. and Siegmund, D. (2004). \" Plots of P-values to evaluate many tests simultaneously. \" Biometrika 69, 3, 493-502.
\n\
- Huang, Y. and Hsu, J. (2007). \" Strong control, conservative point estimation and simultaneous conservative consistency of false discovery rates: a unified approach.\" JRSS B 66, 1, 187-205.
",
parameters=list(pValues=list(type="numeric"), lambda=list(type="numeric"))
)) }
ABH_pi0_est <- function(pValues)
{
m <- length(pValues)
index <- order(pValues)
spval <- pValues[index]
m0.m <- rep(0, m)
for (k in 1:m) {
m0.m[k] <- (m + 1 - k)/(1 - spval[k])
}
idx <- which(diff(m0.m, na.rm = TRUE) > 0)
if (length(idx) == 0)
grab <- 2
else grab <- min(idx, na.rm = TRUE)+1
pi0.ABH <- (ceiling(min(m0.m[grab], m))/m)
return(list(pi0 = pi0.ABH))
}
mutoss.ABH_pi0_est <- function() { return(new(Class="MutossMethod",
label="Hochberg-Benjamini (1990) lowest slope line method",
callFunction="ABH_pi0_est",
output=c("pi0"),
info="Hochberg-Benjamini lowest slope line method
\n\n\
The Lowest Slope Line (LSL) method of Hochberg and Benjamini for estimating pi0 is applied to pValues.\
This method for estimating pi0 is motivated by the graphical approach proposed\
by Schweder and Spjotvoll (1982), as developed and presented in Hochberg and Benjamini (1990).
\n\
Reference:
\
\
- Hochberg, Y. and Benjamini, Y. (1990). \" More powerful procedures for multiple significance testing. \" Statistics in Medicine 9, 811-818.
\n\
- Schweder, T. and Spjotvoll, E. (1982). \" Plots of P-values to evaluate many tests simultaneously.\" Biometrika 69, 3, 493-502.
",
parameters=list(pValues=list(type="numeric"))
)) }
TSBKY_pi0_est <- function(pValues, alpha)
{
m <- length(pValues)
adjp <- p.adjust(pValues,"BH")
pi0.TSBKY <- ((m - sum(adjp < alpha/(1 + alpha), na.rm = TRUE)) / m)
return(list(pi0=pi0.TSBKY))
}
mutoss.TSBKY_pi0_est <- function() { return(new(Class="MutossMethod",
label="Benjamini, Krieger and Yekutieli (2006) two-step estimation method",
callFunction="TSBKY_pi0_est",
output=c("pi0"),
info="Hochberg-Benjamini lowest slope line method
\n\n\
The two-step estimation method of Benjamini, Krieger and Yekutieli for estimating pi0 is applied to pValues.
It consists of the following two steps:
\
Step 1. Use the linear step-up procedure at level alpha' =alpha/(1+alpha). Let r1 be the number of\
rejected hypotheses. If r1=0 do not reject any hypothesis and stop; if r1=m reject all m\
hypotheses and stop; otherwise continue.
\n\
Reference:
\
\
- Benjamini, Y., Krieger, A. and Yekutieli, D. (2006). \" Adaptive linear step-up procedures that control the false discovery rate. \" Biometrika 93, 3, page 495.
\n\
",
parameters=list(pValues=list(type="numeric"),alpha=list(type="numeric"))
)) }
# TODO: GB (MS) This will help will look terrible!
BR_pi0_est <- function(pValues, alpha, lambda=1, truncate = TRUE)
{
if ( lambda <= 0 || lambda >= 1/alpha) {
stop('BR_pi0_est() : lambda should belong to (0, 1/alpha)')
}
m <- length(pValues)
stage1 <- indepBR( pValues, alpha, lambda, silent = TRUE)
pi0 <- ( m + 1 - sum(stage1$rejected) ) / ( m * ( 1 - lambda*alpha ) )
if (truncate) {
pi0 = min(1,pi0)
}
return(list(pi0=pi0))
}
mutoss.BR_pi0_est <- function() { return(new(Class="MutossMethod",
label="Blanchard-Roquain (2009) estimation method",
callFunction="BR_pi0_est",
output=c("pi0"),
info=" Blanchard-Roquain estimation under independence
\n\n\
Reference:
\
\
- Blanchard, G. and Roquain, E. \" Adaptive False Discovery Rate Control under Independence and Dependence.\"
Journal of Machine Learning Research 10:2837-2871, 2009. .
\n\
\
The proportion of true\
nulls is estimated using the Blanchard-Roquain 1-stage procedure with parameter lambda,\
via the formula\n\
estimated pi0 = ( m - R(alpha,lambda) + 1) / ( m*( 1 - lambda * alpha ) )\n\
where R(alpha,lambda) is the number of hypotheses rejected by the BR 1-stage procedure,
alpha is FDR level control for this procedure and lambda a
parameter belonging to (0, 1/alpha) with default value 1. Independence of p-values is assumed.
This estimate may in some cases be larger than 1; it is truncated to 1 if the parameter truncated=TRUE.
The estimate is used in the Blanchard-Roquain 2-stage step-up (with the non-truncated version)
",
parameters=list(pValues=list(type="numeric"),alpha=list(type="numeric"),
lambda=list(type="numeric", default=1), truncate=list(type="logical", default=TRUE))
)) }
mutoss/R/onLoad.R 0000755 0001762 0000144 00000001036 15123457163 013356 0 ustar ligges users .onLoad <- function(libname, pkgname) {
# if(!require("multtest", character.only=TRUE)) {
# if (interactive()) {
# answer <- readline("Multtest is missing - do you want to install it (y/N)? ")
# if (substr(answer, 1, 1) %in% c("y","Y")) {
# source("http://bioconductor.org/biocLite.R")
# biocLite("multtest")
# require("multtest")
# } else {
# warning("Package multtest is not avaible - please install it!")
# }
# } else {
# warning("Package multtest is not avaible - please install it!")
# }
# }
}
mutoss/R/SUDProcedures.R 0000644 0001762 0000144 00000115166 15123457163 014640 0 ustar ligges users # Here all implemented concrete SUD-Procedures can be found.
#
# Author: MarselScheer and WerftWiebke
###############################################################################
#++++++++++++++++++++++++++++ OutputFkt +++++++++++++++++++++
printRejected = function(rejected, pValues = NULL, adjPValues = NULL)
{
cat("Number of hyp.:\t", length(rejected), "\n")
cat("Number of rej.:\t", sum(rejected), "\n")
idx <- which(rejected)
if (length(idx) != 0)
{
output <- data.frame(rejected = idx)
if (!is.null(pValues))
{
output <- data.frame(output, pValues[idx])
names(output)[length(names(output))] <- "pValues"
}
if (!is.null(adjPValues))
{
output <- data.frame(output, adjPValues[idx])
names(output)[length(names(output))] <- "adjPValues"
}
if (!is.null(pValues))
# sorting by pValues
output <- output[order(output$pValue), ]
else
{
if (!is.null(adjPValues))
# no pValues availible, sorting by adjPValues
output <- output[order(output$adjPValues), ]
}
rownames(output) <- 1:length(idx)
print(output)
}
}
#---------------------------- OutputFkt ---------------------
#Rom_simpleImplementation <- function(pValues, alpha)
#{
# # ROM, D. M. (1990). A sequentially rejective test procedure based
# # on a modified Bonferroni inequality. Biometrika 77, 663-665.
#
# # Formula for the critical values is taken from
# # FINNER, H. and ROTERS, M. (2002). Multiple hypotheses testing and
# # expected type I errors. Ann. Statist. 30, 220-238.
# # Notice: The smallest critical value in this paper is alpha_1!!
# # Thus the critical values are calculated in this manner, and
# # at the end the order is reversed.
#
# # ++++++ Calculating critical values
#
# # TODO: Perhaps there are computational problems if too many hypotheses are tested.
# len <- length(pValues)
# criticalValues <- rep(0, times=len)
# criticalValues[1] <- alpha
#
# # TODO: !! firstSum_k[14:len] is constant for example if alpha=5%, len=200
# # firstSum_k := SUMME(alpha^i, i=1..(k-1))
# firstSum_k <- cumsum(c(0, sapply(1:(len-1), function(i) alpha^i)))
#
# criticalValues[2] <- 1/2 * (firstSum_k[2] - 0)
# secondSummand <- function(i) choose(k,i) * criticalValues[i+1]^(k-i)
# for (k in 3:len)
# {
# # TODO: secondSum can be calculated faster!
# secondSum <- sum(sapply(1:(k-2), secondSummand))
# criticalValues[k] <- 1 / k * (firstSum_k[k] - secondSum)
# #cat("1 ", secondSum, "\n")
# }
# criticalValues <- criticalValues[len:1]
#
# # ------ Calculating critical Values
#
# SU(pValues, criticalValues)
#
#}
# TODO: MS !! Discussion about big n !!
rom <- function(pValues, alpha, silent = FALSE)
{
#
# Remark: The critical values calculated by this procedure were
# compared with the critical values calculated by Rom
# himself in his paper and they are the same.
# Formula for the critical values is taken from
# FINNER, H. and ROTERS, M. (2002). Multiple hypotheses testing and
# expected type I errors. Ann. Statist. 30, 220-238.
# Notice: The smallest critical value in this paper is alpha_1!!
# Thus the critical values are calculated in this manner, and
# at the end the order is reversed.
# ++++++ Calculating critical Values
# TODO: MS perhaps there are computational problems if too many hypotheses are tested.
len <- length(pValues)
criticalValues <- rep(0, times=len)
criticalValues[1] <- alpha
# TODO: MS !! firstSum_k[14:len] is constant for example if alpha=5%, len=200
# firstSum_k := SUMME(alpha^i, i=1..(k-1))
firstSum_k <- cumsum(c(0, sapply(1:(len-1), function(i) alpha^i)))
criticalValues[2] <- 1/2 * (firstSum_k[2] - 0)
# SUMME(binomial(k,i) * alpha_{i+1}^{k-i}, i=1..(k-2))
# = SUMME(aki, i=1..(k-2))
# = SUMME(binomial(k-1, i) * k / (k-i) * a(k-1)i * alpha_{i+1}, i=1..(k-2))
# for k = 3 and i = 1
# aki and binKoef actually has 2 dimensions, the k-dimension and the i-dimension.
# But in this code we will only work with the i-dimension. In every step of
# the for-loop aki[i] will be updated.
aki <- rep(0, times = (len-2))
binKoef <- rep(1, times = (len-2))
binKoef[1] <- choose(3,1)
aki[1] <- criticalValues[2]^(3-1)
for (k in 3:len)
{
secondSummand <- sum(binKoef[1:(k-2)] * aki[1:(k-2)])
criticalValues[k] <- 1/k * (firstSum_k[k] - secondSummand)
# updating the vectors for the next step
binKoef[k-1] <- (k+1) * k / 2 #choose(k+1, k-1)
binKoef[1:(k-2)] <- binKoef[1:(k-2)] * (k+1) / (k:3)
aki[k-1] <- criticalValues[k]^2
aki[1:(k-2)] <- aki[1:(k-2)] * criticalValues[2:(k-1)]
}
criticalValues <- criticalValues[len:1]
# ------ Calculating critical Values
rejected <- SU(pValues, criticalValues)
if (! silent)
{
cat("\n\n\t\tRom's (1990) step-up procedure\n\n")
printRejected(rejected, pValues, NULL)
}
# TODO: MS calculating adjustedPValues for ROM numerically
return(list(rejected = rejected, criticalValues = criticalValues,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha))
)
}
mutoss.rom <- function() { return(new(Class="MutossMethod",
label="Rom's (1990) step-up",
errorControl="FWER",
callFunction="rom",
output=c("rejected", "criticalValues","errorControl"),
info="Rom's step-up procedure
\n\n\
Rom's step-up-procedure is applied to pValues. The procedure
controls the FWER in the strong sense if the pValues are
stochastically independent.
This function calculates the critical values by the formula given
in Finner, H. and Roters, M. (2002) based on the joint distribution
of order statistics. After that a step-up test
is performed to reject hypotheses associated with pValues.
Since the formula for the critical values is recursive,
the calculation of adjusted pValues is far from obvious and is
not implemented here.
Reference:
\
\
- Rom, D. M. \" A sequentially rejective test procedure based on a modified Bonferroni inequality. \" Biometrika 77, 663-665.
\n\
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
#-------------------- Holm's Step-down--------------------#
holm <- function(pValues, alpha, silent = FALSE)
{
m <- length(pValues)
criticalValues <- sapply(1:m, function(i) alpha/(m-i+1))
adjPValues <- p.adjust(pValues, "holm")
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tHolm's (1979) step-down Procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues = adjPValues,
rejected = rejected, criticalValues=criticalValues,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha))
)
}
mutoss.holm <- function() { return(new(Class="MutossMethod",
label="Holm's (1979) step-down",
errorControl="FWER",
callFunction="holm",
output=c("adjPValues", "rejected", "criticalValues","errorControl"),
info="Holm's step-down-procedure
\n\n\
Holm's step-down-procedure is applied to pValues. It controls
the FWER in the strong sense under arbitrary dependency.
\n\
Holm's procedure uses the same critical values as the Hochberg's procedure, namely c(i)=α/(m-i+1),
but is a step-down version while Hochberg's method is a step-up version of the Bonferroni test.
Holm's method is based on the Bonferroni inequality and is valid regardless of the joint
distribution of the test statistics, whereas Hochberg's method relies on the assumption that
Simes' inequality holds for the joint null distribution of the test statistics. If this assumption is met, Hochberg's
step-up procedure is more powerful than Holm's step-down procedure.
\n\
Reference:
\
\
- Holm, S. (1979). \" A simple sequentially rejective multiple test procedure. \" Scand. J. Statist. Vol. 6, 65-70.
\n\
- Huang, Y. and Hsu, J. (2007). \" Hochberg's step-up method: cutting corners off Holm's step-down method. \" Biometrika, 94(4):965-975.
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
# TODO: MS Probably jointCDF.unif should probably be moved to some math.R or so.
# TODO: MS !! jointCDF.unif: There are numerical issues because the of accuracy of doublePrecison
# TODO: MS !! How to communicate numerical issues to the user.
jointCDF.orderedUnif = function(vec)
{
# vec is not ordered. Thus the probability must be 0
if (!all(order(vec) == 1:length(vec)))
{
print("ORDER!")
print(vec)
print(order(vec))
vecName <- deparse(substitute(vec))
warning(paste("jointCDF.unif(): The variable", vecName, "is not ordered. Thus the probability is 0!"))
return(0)
}
if (min(vec) <= 0) return(0)
vec[ vec > 1 ] <- 1
if (100 < length(vec))
warning("Length of the argument is longer than 100. Calculated value may not be useable!")
# By Bolshev's recursion
# P(U_{1:n} <= vec[1], ..., U_{n:n} <= vec[n])
# = Fn(vec[1], ..., vec[n])
# = 1 - sum( binom(n, j) * Fj(vec[1], ..., vec[j]) * (1-vec[j+1])^(n-j), j=0..n-1)
# with F0 = 1.
# The variable Fj[k+1] used in this function will correspond to
# Fk(vec[1], ..., vec[k]) for all k = 0 .. n.
# So Fj[1] is F0 = 1, Fj[2] is F1(vec[1]) and so on.
Fj <- rep(0, times = length(vec) + 1)
Fj[1] <- 1 # F0
# consider k;
# Fj[k+1];
# Fk(vec[1], ..., vec[k])
# = 1 - sum( binom(k, s) * Fs(vec[1], ..., vec[s]) * (1-vec[s+1])^(n-s) , s=0..k-1 )
# = 1 - sum( choose(k,s) * Fj[s+1] * (1 - vec[s+1])^(k-s), s=0..k-1 )
summand <- function(s) choose(k,s) * Fj[s+1] * (1 - vec[s+1])^(k-s)
for(k in 1:length(vec))
Fj[k+1] <- 1 - sum( sapply(0:(k-1), summand))
return(Fj[length(vec)+1])
}
calculateBetaAdjustment = function(n, startIDX_SUD, alpha, silent = FALSE, initialBeta = 1, maxBinarySteps = 50, tolerance = 0.0001)
{
#+++++++++++++++++++++++++++ Subfunctions +++++++++++++++++++++++++
#probability mass function
pmf <- function(criticalValues, startIDX_SUD, n, n0, j)
{# Calculates P_{n,n0}(V = j) for a set of critical Values.
# Formulas are from Finner, Gontscharuk, Dickhaus: FDR controlling step-up-down tests
# related to the asmptotically optimal rejection curve. (to appear)
if (n0 < j) return(0)
n1 <- n - n0
if (startIDX_SUD <= n1)
{
if (j == 0)
return( choose(n0,j)
* 1
* (1-criticalValues[n1 + j + 1])^(n0-j)
)
return( choose(n0,j)
* jointCDF.orderedUnif(criticalValues[(n1 + 1):(n1 + j)])
* (1-criticalValues[n1 + j + 1])^(n0-j)
)
}
if ((n1 < startIDX_SUD) && (j < startIDX_SUD - n1 - 1))
{
if (n1 + j == 0) # <=> n1 == 0 and j == 0
return(choose(n0,j)
* jointCDF.orderedUnif(c(rep(1 - criticalValues[startIDX_SUD], times = (n - startIDX_SUD + 1)), 1 - criticalValues[(startIDX_SUD - 1):(n1 + j + 1)]))
* 1
)
return(choose(n0,j)
* jointCDF.orderedUnif(c(rep(1 - criticalValues[startIDX_SUD], times = (n - startIDX_SUD + 1)), 1 - criticalValues[(startIDX_SUD - 1):(n1 + j + 1)]))
* criticalValues[n1 + j]^j
)
}
if ((n1 < startIDX_SUD) && (j == startIDX_SUD - n1 - 1))
{
if (n1 + j == 0) # <=> n1 == 0 and j == 0
return(choose(n0,j)
* jointCDF.orderedUnif(rep(1 - criticalValues[startIDX_SUD], times = (n - startIDX_SUD + 1)))
* 1
)
return(choose(n0,j)
* jointCDF.orderedUnif(rep(1 - criticalValues[startIDX_SUD], times = (n - startIDX_SUD + 1)))
* criticalValues[n1 + j]^j
)
}
if ((n1 < startIDX_SUD) && (j == startIDX_SUD - n1))
return(choose(n0,j)
* jointCDF.orderedUnif(rep(criticalValues[startIDX_SUD], times=j))
* (1 - criticalValues[n1 + j + 1])^(n0 - j)
)
if ((n1 < startIDX_SUD) && (startIDX_SUD - n1 < j))
return(choose(n0,j)
* jointCDF.orderedUnif(c(rep(criticalValues[startIDX_SUD], times=(startIDX_SUD - n1)), criticalValues[(startIDX_SUD + 1):(n1 + j)]))
* (1 - criticalValues[n1 + j + 1])^(n0 - j)
)
}
calculateMaximumUpperFDRBound <- function(criticalValues, n, startIDX_SUD)
{
# Formulas are from Finner, Gontscharuk, Dickhaus: FDR controlling step-up-down tests
# related to the asmptotically optimal rejection curve. (to appear)
# Calculating the probability mass function of V under a DU(n_0, n) model
# n = Number of hypotheses
# n0 = Number of true hypotheses
# pm[j+1,n0+1] = P_{n,n0}(V = j)
pm <- sapply( 1:n,
function(n0)
{
sapply(0:n, # actually we only had to go to n0, but in this way pm will be a matrix
function(j)
{
pmf(criticalValues, startIDX_SUD, n, n0, j)
}
)
}
)
# now the special case n0 = 0, then P_{n,n0}(V=0) = 1
pm <- cbind(c(1, rep(0, times = n)), pm)
# just for plausibility, gonna look if P_{n,n0}(V in {0, ..., n}) = 1 for every n0 = 1, ..., n
rng <- range(colSums(pm))
if (rng[2]-rng[1] > 0.01)
warning("Maximum upper bounds of FDR probably not accurate!")
# Calculating b(n,n0|startIDX_SUD) for every n0
# which is a uppper bound for the FDR according to Finner, Gontscharuk, Dickhaus.
bn <- sapply(1:n,
function(n0)
{
n1 <- n - n0
n0 * sum(sapply(1:n0, function(j) criticalValues[n1 + j]/(n1 + j) * pm[j, n0]))
}
)
bn
return(max(bn))
}
SearchInitialBetaInterval <- function(n, startIDX_SUD, alpha, initialBeta)
{
# searches two beta's: beta1 and beta2 such that the beta2-adjusted AORC
# controls the FDR and the beta1-adjusted AORC not and beta2 + step = beta1
criticalValues <- sapply(1:n, function(i) i * alpha / (n + initialBeta - i * (1 - alpha)))
UpperFDRBound <- calculateMaximumUpperFDRBound(criticalValues, n, startIDX_SUD)
# startBeta controls the FDR, thus beta must be reduced!
if (UpperFDRBound < alpha)
{
FDRControlOfInitialBeta <- TRUE
step <- -1
}
else
{
FDRControlOfInitialBeta <- FALSE
step <- 1
}
beta <- initialBeta + step
intervalFound <- FALSE
while (!intervalFound)
{
criticalValues <- sapply(1:n, function(i) i * alpha / (n + beta - i * (1 - alpha)))
UpperFDRBound <- calculateMaximumUpperFDRBound(criticalValues, n, startIDX_SUD)
# if we have FDRControl by the initialBeta but not for beta, then we are done!
# Also if we have not control of the FDR by the initalBeta but for beta, then we are done!
if (xor(UpperFDRBound < alpha, FDRControlOfInitialBeta))
intervalFound <- TRUE
else
# initialBeta and beta both control the FDR or both do not control the FDR
beta <- beta + step
}
if (FDRControlOfInitialBeta)
return(c(beta, beta + 1))
return(c(beta - 1, beta))
}
#--------------------------- Subfunctions -------------------------
if (!silent)
cat("Searching initial interval to start the bisection approach.\n")
betaInt <- SearchInitialBetaInterval(n, startIDX_SUD, alpha, initialBeta)
beta <- betaInt[2]
step <- (betaInt[2] - betaInt[1]) / 2
numberOfSteps <- 0
if (!silent)
cat("Starting the bisection approach.\n")
lastFeasibleBeta <- beta
lastFeasibleUpperFDRBound <- -Inf
while(numberOfSteps < maxBinarySteps)
{
numberOfSteps <- numberOfSteps + 1
criticalValues <- sapply(1:n, function(i) i * alpha / (n + beta - i * (1 - alpha)))
UpperFDRBound <- calculateMaximumUpperFDRBound(criticalValues, n, startIDX_SUD)
if (!silent)
cat("Step ", numberOfSteps, ": beta =", beta, " => Upper FDR bound =", UpperFDRBound, "\n")
if (alpha - tolerance <= UpperFDRBound && UpperFDRBound <= alpha)
{
if (!silent)
{
cat("\nUpper FDR bound element in [alpha - tolerance, alpha]\n")
cat("Returned beta =", beta, " => Upper FDR bound =", UpperFDRBound, "\n")
}
return(beta)
}
if (UpperFDRBound > alpha)
beta <- beta + step
else
{
lastFeasibleBeta <- min(beta, lastFeasibleBeta)
lastFeasibleUpperFDRBound <- max(UpperFDRBound, lastFeasibleUpperFDRBound)
beta <- beta - step
}
step <- step / 2
}
if(!silent)
cat("\nReturned beta =", lastFeasibleBeta, " => Upper FDR bound =", lastFeasibleUpperFDRBound, "\n" )
return(lastFeasibleBeta)
}
#------------------------ AORC---------------------#
aorc <- function(pValues, alpha, startIDX_SUD = length(pValues), betaAdjustment, silent = FALSE)
{
len <- length(pValues)
if (missing(betaAdjustment))
{
if (!silent)
cat("Using calculateBetaAdjustment() to set the missing parameter betaAdjustment.\n")
betaAdjustment = calculateBetaAdjustment(len, startIDX_SUD, alpha, silent)
}
criticalValues <- sapply(1:len, function(i) i * alpha / (len + betaAdjustment - i * (1 - alpha)))
rejected <- SUD(pValues, criticalValues, startIDX_SUD)
if (! silent)
{
cat("\n\n\t\tAsymptotically optimal rejection curve (2009)\n\n")
printRejected(rejected, pValues)
}
return(list(rejected = rejected, criticalValues = criticalValues,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.aorc <- function() { return(new(Class="MutossMethod",
label="Asymptotically optimal rejection curve (2009)",
errorControl="FDR",
callFunction="aorc",
output=c("criticalValues", "rejected", "errorControl"),
info="Step-up-down procedure based on the asymptotically optimal rejection curve
\n\n\
The graph of the function f(t) = t / (t * (1 - alpha) + alpha) is called the asymptotically \
optimal rejection curve. Denote by finv(t) the inverse of f(t). Using the \
critical values finv(i/n) for i = 1, ..., n yields asymptotic FDR control. \
To ensure finite FDR control it is possible to adjust f(t) by a factor. The \
function calculateBetaAdjustment() calculates a beta such that (1 + beta / n) * f(t) \
can be used to control the FDR for a given finite sample size. If beta is not provided,
calculateBetaAdjustment() will be called automatically.\
\n\
Reference:
\
\
- Finner, H., Dickhaus, T. & Roters, M. \" On the false discovery rate and an asymptotically \
optimal rejection curve. \" The Annals of Statistics 37, 596-618.
\n\
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"),
startIDX_SUD=list(type="integer", label="Start Index for Step-Up-Down", optional=TRUE),
betaAdjustment=list(type="numeric", label="Adjustment factor beta_n", optional=TRUE))
)) }
#aorc( runif(30), 0.05, 5)
#----------------- Banjamini Liu----------------#
BL <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
criticalValues <- sapply(1:m, function(i) 1-(1-min(1, (m*alpha)/(m-i+1)))^(1/(m-i+1)))
rejected <- SD(pValues, criticalValues)
index <- order(pValues) # index for sorting pValues
rindex <- order(index) # reversed index to obtain the original order
spval <- pValues[index]
adjPValues <- vector(mode="numeric",length=m)
adjPValues[1] <- min(1 - (1 - spval[1])^m, 1)
for (i in 2:m) adjPValues[i] <- max(adjPValues[i - 1], ifelse((alpha*m)/(m-i+1)<=1, ((m-i+1)/m)*(1 - (1 - spval[i])^(m - i + 1)), 0))#(0)?!
adjPValues <- adjPValues[rindex] # obtain the original order
#rejected <- (adjustedPValues <= alpha) # either this or SUD leads to rejected
if (! silent)
{
cat("\n\n\t\tBenjamini-Liu's (1999) step-down procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.BL <- function() { return(new(Class="MutossMethod",
label="Benjamini-Liu (1999) step-down",
errorControl="FDR",
callFunction="BL",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("Independent test statistics."),
info="Benjamini-Liu (1999) step-down
\n\n
Reference:
- Bejamini, Y. and Liu, W. (1999). \" A step-down multiple hypotheses testing procedure that controls the false discovery rate under independence . \" Journal of Statistical Planning and Inference Vol. 82(1-2): 163-170.
\n
Benjamini-Liu's step-down procedure is applied to pValues.
The procedure controls the FDR if the corresponding test statistics are stochastically independent.
In Benjamini and Liu (1999) a large simulation study concerning the power of the two procedures suggested that the BL step-down procedure is more powerfull then the Linear Step-Up (BH) when the number of hypotheses is small.
This is also the case when most hypotheses are far from the null. The BL step-down method calculates critical values according to Benjamin and Liu (1999), i.e.
ci = 1 - (1 - min(1, m*α/(m-i+1)))(1/(m-i+1)) for i = 1,...,m,
where m is the number of hypotheses tested.
Then, let k be the smallest i for which P(i) > ci and reject associated hypotheses H(1),...,H(k-1).",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
#-------------------- BH Linear Step Up--------------------#
BH <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
criticalValues <- sapply(1:m, function(i) (i*alpha)/m)
adjPValues <- p.adjust(pValues, "BH")
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tBenjamini-Hochberg's (1995) step-up procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(
adjPValues=adjPValues,
criticalValues=criticalValues,
rejected=rejected,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.BH <- function() { return(new(Class="MutossMethod",
label="Benjamini-Hochberg (1995) step-up",
errorControl="FDR",
callFunction="BH",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("independence or positive regression dependency"),
info="
Benjamini-Hochberg (1995) Linear Step-Up Procedure
\n\n\
The Benjamini-Hochberg (BH) linear step-up procedure controls the FDR if the test statistics are stochastically independent or satisfy positive regression dependency (see Benjamini and Yekutieli 2001 for details).
In their seminal paper, Benjamini and Hochberg (1995) suggest the False Discovery Rate (FDR) as an alternative error criterion to the Family-Wise-Error-Rate and show that for 0<=m0<=m independent pValues corresponding to true null hypotheses
and for any joint distribution of the m1=m-m0 p-values corresponding to the non-null hypotheses the FDR is controlled at level (m0/m)*α.
Benjamini and Yekutieli show (2001) that this procedure controls the FDR in a much more general setting i.e. when the PRDS condition is satisfied.
#####Benjamini et al. (2006) improved by adaptive procedures which use an estimate of m0 and apply the BH method at level α'=&alpha*m/m0, to fully exhaust the desired level α.
\n
References:
\
\
- Benjamini, Y. and Hochberg, Y. (1995). \" Controlling the false discovery rate: A practical and powerful approach to mulitple testing. \" Journal of the Royal Statistical Society, Series B, 57:289-300.
\n\
- Benjamini, Y. and Yekutieli, D. (2001). \" The control of the false discovery rate in multiple testing under dependency. \" Annals of Statistics, 29(4):1165-1188.
\n\
- Benjamini, Y., Krieger, A. and Yekutieli, D. \" Adaptive linear step-up procedures that control the false
discovery rate. \" Biometrika, 93(3):491-507, 2006.
\n\
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
BY <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
a <- sum(1/(1:m))
criticalValues <- sapply(1:m, function(i) (i*alpha)/(a*m))
#rejected <- SU(pValues, criticalValues)
adjPValues <- p.adjust(pValues, "BY")
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tBenjamini-Yekutieli's (2001) step-up procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.BY <- function() { return(new(Class="MutossMethod",
label="Benjamini-Yekutieli (2001) step-up",
errorControl="FDR",
callFunction="BY",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("any dependency structure"),
info="Benjamini-Yekutieli (2001) step-up procedure
\n\n\
Reference:
\
\
- Bejamini, Y. and Yekutieli, D. (2001). \" The control of the false discovery rate in multiple testing under dependency. \" Annals of Statistics, 29(4):1165-1188.
\n\
The Benjamini-Yekutieli step-up procedure is applied to pValues.
The procedure ensures FDR control for any dependency structure.
The critical values of the Benjamini-Yekutieli (BY) procedure are calculated by
replacing the α of the Benjamini-Hochberg procedure by α/(∑1/i), i.e.
c(i)=i*α/m*(∑1/i) for i=1,...,m. For large number m of hypotheses the critical values of the BY procedure and the
BH procedure differ by a factor log(m). Benjamini and Yekutieli (2001) showed that this step-up procedure controls
the FDR at level α*m/m0 for any test statistics dependency structure.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
hochberg <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
criticalValues <- sapply(1:m, function(i) alpha/(m-i+1))
#rejected <- SU(pValues, criticalValues)
adjPValues <- p.adjust(pValues, "hochberg")
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tHochberg's (1988) step-up procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.hochberg <- function() { return(new(Class="MutossMethod",
label="Hochberg (1988) step-up",
errorControl="FWER",
callFunction="hochberg",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("independent tests"),
info="Hochberg (1988) step-up procedure
\n\n\
Reference:
\
\
- Hochberg, Y. (1988). A sharper Bonferroni procedure for multiple tests of significance. Biometrika, 75:800-802.
\n\
- Huang, Y. and Hsu, J. (2007). Hochberg's step-up method: cutting corners off Holm's step-down method. Biometrika, 94(4):965-975.
\n
The Hochberg step-up procedure is based on marginal p-values. It controls the FWER in the strong
sense under joint null distributions of the test statistics that satisfy Simes' inequality.
The Hochberg procedure is more powerful than Holm's (1979) procedure, but the test statistics need to be
independent or have a distribution with multivariate total positivity of order two or a scale mixture
thereof for its validity (Sarkar, 1998).
Both procedures use the same set of critical values c(i)=α/(m-i+1). Whereas Holm's procedure is a step-down
version of the Bonferroni test, and Hochberg's is a step-up version of the Bonferroni test.
Note that Holm's method is based on the Bonferroni inequality and is valid regardless of the joint
distribution of the test statistics.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
adaptiveBH <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
pi0.ABH <- ABH_pi0_est(pValues)$pi0
criticalValues <- sapply(1:m, function(i) (i*alpha)/(m*pi0.ABH))
adjPValues <- p.adjust(pValues,"BH")*pi0.ABH
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tBenjamini-Hochberg (2000) adaptive step-up procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected, pi0=pi0.ABH,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.adaptiveBH <- function() { return(new(Class="MutossMethod",
label="Benjamini-Hochberg (2000) adaptive step-up",
errorControl="FDR",
callFunction="adaptiveBH",
output=c("adjPValues", "criticalValues", "rejected", "pi0", "errorControl"),
assumptions=c("independence or positive regression dependency"),
info="Benjamini-Hochberg (2000) adaptive linear step-up procedure
\n\n\
Reference:
\
\
- Benjamini, Y. and Hochberg, Y. (2000). On the Adaptive Control of the False Discovery Rate
in Multiple Testing With Independent Statistics. Journal of Educational and Behavioral Statistics, 25(1): 60-83.
\n\
- Benjamini, Y. and Hochberg, Y. (1995). Controlling the false discovery rate: A practical and powerful approach to mulitple testing.
Journal of the Royal Statistical Society, Series B, 57:289-300.
\n\
- Hochberg, Y. and Benjamini, Y. (1990). More powerful procedures for multiple significance testing.
Statistics in Medicine 9, 811-818.
\n
The adaptive Benjamini-Hochberg step-up procedure is applied to pValues.
It controls the FDR at level alpha for independent or positive regression dependent test statistics.
In the adaptive Benjamini-Hochberg step-up procedure the number of true null hypotheses is estimated first as in Hochberg and
Benjamini (1990), and this estimate is used in the procedure of Benjamini and
Hochberg (1995) with alpha'=alpha*m/m0. The method for estimating m0 is motivated by
the graphical approach proposed by Schweder and Spjotvoll (1982),
as developed and presented in Hochberg and Benjamini (1990).
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
#---------------------- Adaptive STS-----------------#
adaptiveSTS <- function(pValues, alpha, lambda=0.5, silent=FALSE) {
m <- length(pValues)
adjP <- p.adjust(pValues,"BH")
pi0 <- storey_pi0_est(pValues, lambda)$pi0
criticalValues <- sapply(1:m, function(i) (i*alpha)/(m*pi0))
adjPValues <- adjP*min(pi0, 1)
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\t\tStorey-Taylor-Siegmund (2004) adaptive step-up procedure\n\n")
printRejected(rejected, pValues, adjPValues)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected, pi0=pi0,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.adaptiveSTS <- function() { return(new(Class="MutossMethod",
label="Storey-Taylor-Siegmund (2004) adaptive step-up",
errorControl="FDR",
callFunction="adaptiveSTS",
output=c("adjPValues", "criticalValues", "rejected", "pi0", "errorControl"),
assumptions=c("test independence or positive regression dependency"),
info="Storey-Taylor-Siegmund (2004) adaptive step-up procedure
\n\n
The adaptive STS method uses a conservative estimate of pi0 which is plugged in a linear step-up procedure. The estimation of pi0 requires a parameter λ which is set to 0.5 by default.
Note that the estimated pi0 is truncated at 1 as suggested by the author, so the implemetation of the procedure is not entirely supported by the proof in the reference.
\n
Reference:
- Storey, J.D., Taylor, J.E. and Siegmund, D. \" Strong control, conservative point estimation and
simultaneous conservative consistency of false discovery rates: a unified approach.\" Journal of the Royal Statistical Society, B 66(1):187-205, 2004.
\n
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"), lambda=list(type="numeric", default = 0.5))
)) }
#---------------------------- Sidack Step Down--------------------------------#
SidakSD <- function(pValues, alpha, silent=FALSE) {
m <- length(pValues)
criticalValues <- sapply(1:m, function(i) 1-(1-alpha)^(1/(m-i+1)))
#rejected <- SD(pValues, criticalValues)
tmp <- mt.rawp2adjp(pValues, "SidakSD")
adjPValues <- tmp$adjp[order(tmp$index),2]
rejected <- (adjPValues <= alpha)
if (! silent)
{
cat("\n\n\tSidak-like (1987) step-down procedure\n\n")
printRejected(rejected, pValues, NULL)
}
return(list(adjPValues=adjPValues, criticalValues=criticalValues, rejected=rejected,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.SidakSD <- function() { return(new(Class="MutossMethod",
label="Sidak-like (1987) step-down",
errorControl="FWER",
callFunction="SidakSD",
output=c("adjPValues", "criticalValues", "rejected", "errorControl"),
assumptions=c("test independence","positive orthant dependent test statistics"),
info="Sidak-like (1987) step-down procedure
\n\n\
Reference:
\
\
- Hollander, B.S. and Covenhaver, M.D. \" An Improved Sequentially Rejective Bonferroni Test Procedure.\" Biometrics, 43(2):417-423, 1987.
\n\
The Sidak-like step-down procedure is an improvement over Holm's (1979) step-down procedure. The improvement is analogous to the Sidak's correction over the original Bonferroni procedure. This Sidak-like step-down procedure assumes positive orthant dependent test statistics.
\n",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"))
)) }
#----------------------------------Blanchard Roquain 2008 ----------------------#
indepBR <- function(pValues, alpha, lambda=1, silent = FALSE)
{
if ( lambda <= 0 || lambda >= 1/alpha) {
stop('indepBR() : lambda should belong to (0, 1/alpha)')
}
len <- length(pValues)
criticalValues <- sapply( 1:len, function(i) alpha * min( i * ( 1 - lambda * alpha) / (len - i + 1) , lambda ) )
rejected <- SU(pValues, criticalValues)
if (! silent)
{
cat("\n\n\t\t Blanchard-Roquain 1-stage step-up under independence (2009)\n\n")
printRejected(rejected, pValues)
}
return(list(rejected = rejected, criticalValues = criticalValues,
errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.indepBR <- function() { return(new(Class="MutossMethod",
label="Blanchard-Roquain adaptive step-up (2009)",
errorControl="FDR",
callFunction="indepBR",
output=c("criticalValues", "rejected", "errorControl"),
assumptions=c("p-value independence"),
info=" Blanchard-Roquain (2009) 1-stage adaptive step-up
\n\n\
Reference:
\
\
- Blanchard, G. and Roquain, E. \" Adaptive False Discovery Rate Control under Independence and Dependence.\"
Journal of Machine Learning Research 10:2837-2871, 2009. .
\n\
This is a step-up procedure with critical values\n\
Ci = alpha * min( i * ( 1 - lambda * alpha) / (m - i + 1) , lambda )\n\
where alpha is the level at which FDR should be controlled and lambda an \
arbitrary parameter belonging to (0, 1/alpha) with default value 1. \
This procedure controls FDR at the desired level when the p-values are independent.
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"), lambda=list(type="numeric", default=1))
)) }
#----------------------------Blanchard Roquain 2009---------------------#
twostageBR <- function(pValues, alpha, lambda=1, silent = FALSE)
{
if ( lambda <= 0 || lambda >= 1/alpha) {
stop('twostageBR() : lambda should belong to (0, 1/alpha)')
}
m <- length(pValues)
stage1 <- indepBR( pValues, alpha, lambda, silent = TRUE)
pi0inv <- ( 1 - lambda*alpha )*m / ( m + 1 - sum(stage1$rejected) )
BHadjPValues <- p.adjust(pValues,"BH")
rejected <- ( BHadjPValues <= alpha*pi0inv )
if (! silent)
{
cat("\n\n\t\tBlanchard-Roquain (2009) 2-stage step-up Procedure\n\n")
printRejected(rejected, pValues)
}
return(list(rejected=rejected, errorControl = new(Class='ErrorControl',type="FDR",alpha=alpha)))
}
mutoss.twostageBR <- function() { return(new(Class="MutossMethod",
label="Blanchard-Roquain 2-stage adaptive step-up (2009)",
errorControl="FDR",
callFunction="twostageBR",
output=c("rejected", "errorControl"),
assumptions=c("p-value independence"),
info=" Blanchard-Roquain 2-stage step-up under independence
\n\n\
Reference:
\
\
- Blanchard, G. and Roquain, E. \" Adaptive False Discovery Rate Control under Independence and Dependence.\"
Journal of Machine Learning Research 10:2837-2871, 2009. .
\n\
\
This is an adaptive linear step-up procedure where the proportion of true\
nulls is estimated using the Blanchard-Roquain 1-stage procedure with parameter lambda,\
via the formula\n\
estimated pi0 = ( m - R(alpha,lambda) + 1) / ( m*( 1 - lambda * alpha ) )\n\
where R(alpha,lambda) is the number of hypotheses rejected by the BR 1-stage procedure,
alpha is the level at which FDR should be controlled and lambda an
arbitrary parameter belonging to (0, 1/alpha) with default value 1.
This procedure controls FDR at the desired level when the p-values are independent.
",
parameters=list(pValues=list(type="numeric"), alpha=list(type="numeric"), lambda=list(type="numeric",default=1))
)) }
mutoss/R/MuTossObject.R 0000644 0001762 0000144 00000003572 15123457163 014527 0 ustar ligges users setClass("ErrorControl",
representation = representation(
type = "character", # Type of error rate controlled for (FWER, FWER.weak, FDR, FDX, gFWER, perComparison (?))
alpha = "numeric", # Error rate of the procedure
k = "numeric", # Additional parameter for generalized FWE control
q = "numeric" # Additional paramter for FDX control
)
)
##TODO: add slots for "model": formula, link, family
##TODO: write some header, it is the main object that comes out after four weeks of hard work!!
setClass("Mutoss",
representation = representation(
data = "ANY", # Raw data used in model
model = "ANY", # link function,error family and design
description = "character",
statistic = "numeric", # For Z, T or F statistics (maybe different slots?)
hypotheses = "ANY",
hypNames = "character", # Identifiers for the hypotheses tested
criticalValues = "numeric", # Procedure-specific critical values
pValues = "numeric", # Raw p-values. Either imported or calculated with data and model
adjPValues = "numeric", # Procedure-specific adjusted p-values
errorControl = "ErrorControl", # Details of the multiplicity control procedure used.
rejected = "logical", # Logical vector of the output of a procedure at a given error rate
qValues = "numeric", # Storey's estimates of the supremum of the pFDR
locFDR = "numeric", # Efron's local fdr estimates (by which method?)
pi0 = "numeric", # Estimate of the proportion of null hypotheses (by which method?)
confIntervals = "matrix", # Confidence intervals for selected parameters (of which kind? selected how?)
commandHistory = "character"
)
)
mutoss/R/multcomp.R 0000644 0001762 0000144 00000004114 15123457163 013777 0 ustar ligges users # Simultaneous confidence intervals for linear contrasts
#
# Author: FrankKonietschke
###############################################################################
multcomp.wrapper <- function(model, hypotheses, alternative, rhs=0, alpha, factorC ) {
type<-""
if (any(factorC== c("Tukey", "Dunnett", "Sequen",
"Williams", "Changepoint", "AVE", "McDermott", "Marcus", "UmbrellaWilliams"))) {
eval(parse(text=paste("type <- mcp(",factorC,"=hypotheses)")))
} else {
eval(parse(text=paste("type <- mcp(",factorC,"=hypotheses)")))
}
glhtObj <- glht(model, linfct = type, rhs=rhs, alternative=alternative)
summaryGLHT <- summary(glhtObj)
pvalues <- summaryGLHT$test$pvalues
estimates <- summaryGLHT$test$coefficients
confint <- confint(glhtObj,level=(1-alpha))$confint
rejected1 <- (pvalues < alpha)
confi <- cbind(confint)
print(cbind(confi,pvalues))
return(list(adjPValues=pvalues,rejected=rejected1,confIntervals= confi,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
mutoss.multcomp<- function() { return(new(Class="MutossMethod",
label="Multiple Contrast Tests",
errorControl="FWER",
callFunction="multcomp.wrapper",
output=c("adjPValues", "rejected","confIntervals","errorControl"),
info="Parametric multiple contrast tests and simultaneous confidence intervals
With this function, it is possible to compute simultaneous tests and confidence intervals for general linear hypotheses in parametric models
Reference:
- Frank Bretz, Alan Genz and Ludwig A. Hothorn \"On the numerical availability of multiple comparison procedures.\" Biometrical Journal, 43(5), 645-656. , 2001.
",
parameters=list(model=list(type="ANY"),
hypotheses=list(type="ANY"),
alpha=list(type="numeric"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
factorC=list(type="character", label="Factor for Comparison", fromR="FactorVar")
)
)) }
mutoss/R/marginal.R 0000644 0001762 0000144 00000031753 15123457163 013742 0 ustar ligges users # TODO: Add comment
#
# Author: wiebke
###############################################################################
#### one sample model ####
onesamp.model <- function() {
return(list(model=list(typ="onesamp")))
}
mutoss.onesamp.model <- function() { return(new(Class="MutossMethod",
label="One-sample test",
callFunction="onesamp.model",
output=c("model"),
info="One sample model
The input for this one sample model is a data matrix whose columns represent the samples and the rows represent the multiple endpoints.
E.g. for genomics this would be a gene matrix, where each row gives the expression for a single gene.
For the next steps you have the following choices:
Either marginal hypotheses tests (if robust Wilcoxon otherwise t-test) could be performed on each row of the
data matrix to obtain raw p-values which then need to be adjusted for multiplicity to control a chosen error rate.
Or resampling based methods could be performed based on Dudoit and van der Laan (2007) to obtain adjusted p-values which control the
FWER. Afterwards it is possible to use augmentation procedures to get adjusted p-values for control of FDR, FDX or gFWER.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
",
parameters=list(
)
)) }
onesamp.marginal <- function(data, robust, alternative, psi0) {
result <- NULL
if (robust) {
result <- apply(data, 1, function(x) {wilcox.test(x, alternative=alternative, mu=psi0)$p.value} )
} else {
result <- apply(data, 1, function(x) {t.test(x ,alternative=alternative, mu=psi0)$p.value} )
}
return(list(pValues=result))
}
mutoss.onesamp.marginal.model <- function(model) {
return("typ" %in% names(model) && model$typ == "onesamp")
}
mutoss.onesamp.marginal <- function() { return(new(Class="MutossMethod",
label="One-sample test",
callFunction="onesamp.marginal",
output=c("pValues"),
info="Marginal one sample test.
The robust version uses the Wilcoxon-Mann-Whitney test, otherwise a t-test will be performed.
Reference:
- Wilcoxon, F. (1945). Individual Comparisons by Ranking Methods. Biometrics Bulletin 1:80-83.
\n\
- Mann, H. and Whitney, D. (1947). On a test of whether one of two random variables is stochastically larger
than the other. Annals of Mathematical Statistics 18:50-60
\n\
- Student (1908).The probable error of a mean. Biometrika, 6(1):1-25.
\n
",
parameters=list(
data=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0)
)
)) }
#### two sample model ####
twosamp.model <- function(classlabel) {
classlabel <- as.vector(classlabel)
return(list(model=list(typ="twosamp", classlabel=classlabel)))
}
mutoss.twosamp.model <- function() { return(new(Class="MutossMethod",
label="Two sample test",
callFunction="twosamp.model",
output=c("model"),
info="Two sample model
The input for this one sample model is a data matrix whose columns represent the samples and the rows represent the multiple endpoints.
E.g. for genomics this would be a gene matrix, where each row gives the expression for a single gene.
Furthermore, a classlabel needs to be provided to distinguish the two sample groups.
For the next steps you have the following choices:
Either marginal hypotheses tests (if robust Wilcoxon otherwise t-test) could be performed on each row of the
data matrix to obtain raw p-values which then need to be adjusted for multiplicity to control a chosen error rate.
Or resampling based methods could be performed based on Dudoit and van der Laan (2007) to obtain adjusted p-values which control the
FWER. Afterwards it is possible to use augmentation procedures to get adjusted p-values for control of FDR, FDX or gFWER.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
",
parameters=list(
classlabel=list(type="RObject", label="classlabel")
)
)) }
twosamp.marginal <- function(data, model, robust, alternative, psi0, equalvar) {
label <- as.numeric(as.factor(model$classlabel))
result <- NULL
if (robust) {
result <- apply(data, 1, function(x) {wilcox.test(x=x[ ,label==1], y=x[label==2], alternative=alternative, mu=psi0)$p.value} )
} else {
result <- apply(data, 1, function(x) {t.test(x=x[ ,label==1], y=x[label==2], alternative=alternative, mu=psi0, equal.var=equalvar)$p.value} )
}
return(list(pValues=result))
}
mutoss.twosamp.marginal.model <- function(model) {
return("typ" %in% names(model) && model$typ == "twosamp")
}
mutoss.twosamp.marginal <- function() { return(new(Class="MutossMethod",
label="Two sample test",
callFunction="twosamp.marginal",
output=c("pValues"),
info="
The robust version uses the Wilcoxon-Mann-Whitney test, otherwise a two-sample t-test will be performed.
Reference:
- Wilcoxon, F. (1945). Individual Comparisons by Ranking Methods. Biometrics Bulletin 1:80-83.
\n\
- Mann, H. and Whitney, D. (1947). On a test of whether one of two random variables is stochastically larger
than the other. Annals of Mathematical Statistics 18:50-60
\n
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0),
equalvar=list(type="logical", label="Equal variance")
)
)) }
### paired sample model ###
paired.model <- function(classlabel) {
classlabel <- as.vector(classlabel)
return(list(model=list(typ="pairedsamp", classlabel=classlabel)))
}
mutoss.paired.model <- function() { return(new(Class="MutossMethod",
label="Paired sample test",
callFunction="paired.model",
output=c("model"),
info="Paired sample test
The robust version uses the Wilcoxon signed rank test, otherwise a paired t-test will be performed.
The input for this paired sample model is a data matrix whose columns represent the samples and the rows represent the multiple endpoints.
E.g. for genomics this would be a gene matrix, where each row gives the expression for a single gene.
Furthermore, a classlabel needs to be provided to distinguish the two paired groups. The arrangement of group indices does not matter, as long
as the columns are arranged in the same corresponding order between groups. For example, if group 1 is code as 0 and group 2 is
coded as 1, for 3 pairs of data, it does not matter if the classlabel is coded as (0,0,0,1,1,1) or (1,1,1,0,0,0) or (0,1,0,1,0,1)
or (1,0,1,0,1,0), the paired differences between groups will be calculated as group2 - group1.
For the next steps you have the following choices:
Either marginal hypotheses tests (if robust Wilcoxon otherwise t-test) could be performed on each row of the
data matrix to obtain raw p-values which then need to be adjusted for multiplicity to control a chosen error rate.
Or resampling based methods could be performed based on Dudoit and van der Laan (2007) to obtain adjusted p-values which control the
FWER. Afterwards it is possible to use augmentation procedures to get adjusted p-values for control of FDR, FDX or gFWER.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
",
parameters=list(
classlabel=list(type="RObject", label="classlabel")
)
)) }
paired.marginal <- function(data, model, robust, alternative, psi0, equalvar) {
label <- as.numeric(as.factor(model$classlabel))
result <- NULL
if (robust) {
result <- apply(data, 1, function(x) {wilcox.test(x=x[label==1], y=x[label==2], alternative=alternative, mu=psi0, paired=TRUE, var.equal=equalvar)$p.value} )
} else {
result <- apply(data, 1, function(x) {t.test(x=x[label==1], y=x[label==2], alternative=alternative, mu=psi0, paired=TRUE, var.equal=equalvar)$p.value} )
}
return(list(pValues=result))
}
mutoss.paired.marginal <- function() { return(new(Class="MutossMethod",
label="Paired sample test",
callFunction="paired.marginal",
output=c("pValues"),
info="
The robust version uses the Wilcoxon test, otherwise a paired t-test will be performed.
A vector of classlabels needs to be provided to distinguish the two paired groups. The arrangement of group indices does not matter, as long
as the columns are arranged in the same corresponding order between groups. For example, if group 1 is code as 0 and group 2 is
coded as 1, for 3 pairs of data, it does not matter if the classlabel is coded as (0,0,0,1,1,1) or (1,1,1,0,0,0) or (0,1,0,1,0,1)
or (1,0,1,0,1,0), the paired differences between groups will be calculated as group2 - group1.
You could either choose a valid R object to load as classlabels or you could provide it manually by inserting e.g. c(0,1,0,1,0,1) or rep(c(0,1), each=5) or rep(c(0,1), 5).
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0),
equalvar=list(type="logical", label="Equal variance")
)
)) }
mutoss.paired.marginal.model <- function(model) {
return("typ" %in% names(model) && model$typ == "pairedsamp")
}
### f test model ###
ftest.model <- function(classlabel) {
classlabel <- as.vector(classlabel)
return(list(model=list(typ="ftest", classlabel=classlabel)))
}
mutoss.ftest.model <- function() { return(new(Class="MutossMethod",
label="F test",
callFunction="ftest.model",
output=c("model"),
info="F test
The input for this F test model is a data matrix whose columns represent the samples and the rows represent the multiple endpoints.
E.g. for genomics this would be a gene matrix, where each row gives the expression for a single gene.
Furthermore, a classlabel needs to be provided to distinguish k sample groups.
For the next steps you have the following choices:
Either marginal hypotheses tests (if robust Kruskal-Wallis test, otherwise F-test) could be performed on each row of the
data matrix to obtain raw p-values which then need to be adjusted for multiplicity to control a chosen error rate.
Or resampling based methods could be performed based on Dudoit and van der Laan (2007) to obtain adjusted p-values which control the
FWER. Afterwards it is possible to use augmentation procedures to get adjusted p-values for control of FDR, FDX or gFWER.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
",
parameters=list(
classlabel=list(type="RObject", label="classlabel")
)
)) }
ftest.marginal <- function(data, model, robust) {
label <- as.numeric(as.factor(model$classlabel))
result <- NULL
if (robust) {
result <- apply(data, 1, function(x) {kruskal.test(x=x, g=label)$p.value} )
} else {
result <- apply(data, 1, function(x) { out=x
anova(lm( out ~ label ))$'Pr(>F)'[1]} )
}
return(list(pValues=result))
}
mutoss.ftest.marginal <- function() { return(new(Class="MutossMethod",
label="F test",
callFunction="ftest.marginal",
output=c("pValues"),
info="
Robust = Kruskal-Wallis test. Otherwise F-test.
Reference:
- Kruskal, W.H. und Wallis, W.A. (1952). Use of ranks in one-criterion variance analysis. JASA, 47:583-621
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic")
)
)) }
mutoss.ftest.marginal.model <- function(model) {
return("typ" %in% names(model) && model$typ == "ftest")
}
mutoss/R/multtest.R 0000644 0001762 0000144 00000041252 15123457163 014024 0 ustar ligges users #######################################################################
########### here are the multtest methods - marginal siblings can be found in marginal.R ######################
### one sample model with multtest ###
onesamp.multtest <- function(data, alternative, robust, psi0, alpha, nulldist, B=1000, method, seed=12345) {
result <- MTP(X=data, W = NULL, Y = NULL, Z = NULL, Z.incl = NULL, Z.test = NULL,
na.rm = TRUE, test = "t.onesamp", robust = robust,
standardize = TRUE, alternative = alternative, psi0 = psi0,
typeone = "fwer", k = 0, q = 0.1, fdr.method = "restricted",
alpha = alpha, smooth.null = FALSE, nulldist = nulldist,
B = B, ic.quant.trans = FALSE, MVN.method = "mvrnorm",
penalty = 1e-06, method = method, get.cr = FALSE, get.cutoff = FALSE,
get.adjp = TRUE, keep.nulldist = FALSE, keep.rawdist = FALSE,
seed = seed, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL,
marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL,
keep.index = FALSE, keep.label = FALSE)
return(list(adjPValues=result@adjp, rejected=as.vector(result@reject)))
}
mutoss.onesamp.multtest.model <- function(model) {
return("typ" %in% names(model) && model$typ == "onesamp")
}
mutoss.onesamp.multtest <- function() { return(new(Class="MutossMethod",
label="Resampling-based one sample test",
errorControl="FWER",
callFunction="onesamp.multtest",
output=c("adjPValues", "rejected"),
info="Resampling-based one sample test
There are different choices of resampling methods available for estimating the joint test statistics null distribution:\n\
- \"boot.cs\": non-parametric bootstrap with centering and scaling
\n\
- \"boot.ctr\": centered-only bootstrap distribution
\n\
- \"boot.qt\": quantile transformed bootstrap distribution. the default marginal t-distribution with n-1 degree of freedom is used.
\n\
- \"perm\": permutation distribution (refering to the Westfall and Young procedures)
\n\
- \"ic\": under GUI construction (available at (library(multtest))
\n
There are four adjustment methods to control the FWER:
- \"sd.minP\": step-down common-quantile procedure based on the minima of unadjusted p-values
- \"sd.maxT\": step-down common-cut-off procedure based on the maxima of test statistics
- \"ss.minP\": single-step common-quantile procedure
- \"ss.maxT\": single-step common-cut-off procedure
The default number of bootstrap iterations (or number of permutations if resampling method is \"perm\") is 1000. This can be reduced to increase the speed
of computations, at a cost to precision. However, it is recommended to use a large number of resampling iterations, e.g. 10,000.
The robust version uses the Wilcoxon-Mann-Whitney test, otherwise a t-test will be performed.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
\n\
- Westfall, P.H. and Young, S.S. (1993). Resampling-Based Multiple Testing. Examples and Methods for p-value adjustment. Wiley Series in Probability and Mathematical Statistics.
\n
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0),
alpha=list(type="numeric"),
nulldist=list(type="character", label="Resampling Method", choices=c("boot.cs", "boot.ctr", "boot.qt", "perm")),
B=list(type="numeric", label="Number of Resampling Iterations", default=1000),
method=list(type="character", label="Adjustment Method", choices=c("sd.minP","sd.maxT","ss.minP","ss.maxT"),
seed=list(type="ANY", default=12345)
)
))) }
### paired sample model with multtest ###
paired.multtest <- function(data, model, alternative, robust, psi0, alpha, nulldist, B=1000, method, seed=12345) {
result <- MTP(X=data, W = NULL, Y = model$classlabel, Z = NULL, Z.incl = NULL, Z.test = NULL,
na.rm = TRUE, test = "t.pair", robust = robust,
standardize = TRUE, alternative = alternative, psi0 = psi0,
typeone = "fwer", k = 0, q = 0.1, fdr.method = "restricted",
alpha = alpha, smooth.null = FALSE, nulldist = nulldist,
B = B, ic.quant.trans = FALSE, MVN.method = "mvrnorm",
penalty = 1e-06, method = method, get.cr = FALSE, get.cutoff = FALSE,
get.adjp = TRUE, keep.nulldist = FALSE, keep.rawdist = FALSE,
seed = seed, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL,
marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL,
keep.index = FALSE, keep.label = FALSE)
return(list(adjPValues=result@adjp, rejected=as.vector(result@reject)))
}
mutoss.paired.multtest.model <- function(model) {
return("typ" %in% names(model) && model$typ == "pairedsamp")
}
mutoss.paired.multtest <- function() { return(new(Class="MutossMethod",
label="Resampling-based paired sample test",
errorControl="FWER",
callFunction="paired.multtest",
output=c("adjPValues", "rejected"),
info="Resampling-based paired test
There are different choices of resampling methods available for estimating the joint test statistics null distribution:\n\
- \"boot.cs\": non-parametric bootstrap with centering and scaling
\n\
- \"boot.ctr\": centered-only bootstrap distribution
\n\
- \"boot.qt\": quantile transformed bootstrap distribution. the default marginal t-distribution with n-1 degree
of freedom is used, where n is the number of pairs.
\n\
- \"perm\": permutation distribution (refering to the Westfall and Young procedures)
\n\
- \"ic\": under GUI construction (available at (library(multtest))
\n
There are four adjustment methods to control the FWER:
- \"sd.minP\": step-down common-quantile procedure based on the minima of unadjusted p-values
- \"sd.maxT\": step-down common-cut-off procedure based on the maxima of test statistics
- \"ss.minP\": single-step common-quantile procedure
- \"ss.maxT\": single-step common-cut-off procedure
The default number of bootstrap iterations (or number of permutations if resampling method is \"perm\") is 1000. This can be reduced to increase the speed
of computations, at a cost to precision. However, it is recommended to use a large number of resampling iterations, e.g. 10,000.
The robust version uses the Wilcoxon-Mann-Whitney test, otherwise a t-test will be performed.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
\n\
- Westfall, P.H. and Young, S.S. (1993). Resampling-Based Multiple Testing. Examples and Methods for p-value adjustment. Wiley Series in Probability and Mathematical Statistics.
\n
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0),
alpha=list(type="numeric"),
nulldist=list(type="character", label="Resampling Method", choices=c("boot.cs", "boot.ctr", "boot.qt", "perm")),
B=list(type="numeric", label="Number of Resampling Iterations", default=1000),
method=list(type="character", label="Adjustment Method", choices=c("sd.minP","sd.maxT","ss.minP","ss.maxT"),
seed=list(type="ANY", default=12345))
)
)) }
### two sample model with multtest ####
twosamp.multtest <- function(data, model, alternative, robust, psi0, equalvar, alpha, nulldist, B=1000, method, seed=12345) {
if (equalvar) {
result <- MTP(X=data, W = NULL, Y = model$classlabel, Z = NULL, Z.incl = NULL, Z.test = NULL,
na.rm = TRUE, test = "t.twosamp.equalvar", robust = robust,
standardize = TRUE, alternative = alternative, psi0 = psi0,
typeone = "fwer", k = 0, q = 0.1, fdr.method = "restricted",
alpha = alpha, smooth.null = FALSE, nulldist = nulldist,
B = B, ic.quant.trans = FALSE, MVN.method = "mvrnorm",
penalty = 1e-06, method = method, get.cr = FALSE, get.cutoff = FALSE,
get.adjp = TRUE, keep.nulldist = FALSE, keep.rawdist = FALSE,
seed = seed, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL,
marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL,
keep.index = FALSE, keep.label = FALSE)
}
else {
result <- MTP(X=data, W = NULL, Y = model$classlabel, Z = NULL, Z.incl = NULL, Z.test = NULL,
na.rm = TRUE, test = "t.twosamp.unequalvar", robust = robust,
standardize = TRUE, alternative = alternative, psi0 = psi0,
typeone = "fwer", k = 0, q = 0.1, fdr.method = "restricted",
alpha = alpha, smooth.null = FALSE, nulldist = nulldist,
B = B, ic.quant.trans = FALSE, MVN.method = "mvrnorm",
penalty = 1e-06, method = method, get.cr = FALSE, get.cutoff = FALSE,
get.adjp = TRUE, keep.nulldist = FALSE, keep.rawdist = FALSE,
seed = seed, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL,
marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL,
keep.index = FALSE, keep.label = FALSE)
}
return(list(adjPValues=result@adjp, rejected=as.vector(result@reject)))
}
mutoss.twosamp.multtest.model <- function(model) {
return("typ" %in% names(model) && model$typ == "twosamp")
}
mutoss.twosamp.multtest <- function() { return(new(Class="MutossMethod",
label="Resampling-based two sample test",
errorControl="FWER",
callFunction="twosamp.multtest",
output=c("adjPValues", "rejected"),
info="Resampling-based two sample test
There are different choices of resampling methods available for estimating the joint test statistics null distribution:\n\
- \"boot.cs\": non-parametric bootstrap with centering and scaling
\n\
- \"boot.ctr\": centered-only bootstrap distribution
\n\
- \"boot.qt\": quantile transformed bootstrap distribution. the default marginal t-distribution with n-1 degree of freedom is used.
\n\
- \"perm\": permutation distribution (refering to the Westfall and Young procedures)
\n\
- \"ic\": under GUI construction (available at (library(multtest))
\n
There are four adjustment methods to control the FWER:
- \"sd.minP\": step-down common-quantile procedure based on the minima of unadjusted p-values
- \"sd.maxT\": step-down common-cut-off procedure based on the maxima of test statistics
- \"ss.minP\": single-step common-quantile procedure
- \"ss.maxT\": single-step common-cut-off procedure
The default number of bootstrap iterations (or number of permutations if resampling method is \"perm\") is 1000. This can be reduced to increase the speed
of computations, at a cost to precision. However, it is recommended to use a large number of resampling iterations, e.g. 10,000.
The robust version uses the Wilcoxon-Mann-Whitney test, otherwise a t-test will be performed.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
\n\
- Westfall, P.H. and Young, S.S. (1993). Resampling-Based Multiple Testing. Examples and Methods for p-value adjustment. Wiley Series in Probability and Mathematical Statistics.
\n
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alternative=list(type="character", label="Alternative", choices=c("two.sided", "less", "greater")),
psi0=list(type="numeric", label="Hypothesized null value", default=0),
equalvar=list(type="logical", label="Equal variance"),
alpha=list(type="numeric"),
nulldist=list(type="character", label="Resampling Method", choices=c("boot.cs", "boot.ctr", "boot.qt", "perm")),
B=list(type="numeric", label="Number of Resampling Iterations", default=1000),
method=list(type="character", label="Adjustment Method", choices=c("sd.minP","sd.maxT","ss.minP","ss.maxT"),
seed=list(type="ANY", default=12345))
)
)) }
##################### F test #############################################
###########################################################################
ftest.multtest <- function(data, model, robust, alpha, nulldist, B=1000, method, seed=12345) {
result <- MTP(X=data, W = NULL, Y = model$classlabel, Z = NULL, Z.incl = NULL, Z.test = NULL,
na.rm = TRUE, test = "f", robust = robust,
standardize = TRUE, typeone = "fwer",
alpha = alpha, smooth.null = FALSE, nulldist = nulldist,
B = B, ic.quant.trans = FALSE, MVN.method = "mvrnorm",
penalty = 1e-06, method = method, get.cr = FALSE, get.cutoff = FALSE,
get.adjp = TRUE, keep.nulldist = FALSE, keep.rawdist = FALSE,
seed = seed, cluster = 1, type = NULL, dispatch = NULL, marg.null = NULL,
marg.par = NULL, keep.margpar = TRUE, ncp = NULL, perm.mat = NULL,
keep.index = FALSE, keep.label = FALSE)
return(list(adjPValues=result@adjp, rejected=as.vector(result@reject)))
}
mutoss.ftest.multtest <- function() { return(new(Class="MutossMethod",
label="Resampling-based F test",
errorControl="FWER",
callFunction="ftest.multtest",
output=c("adjPValues", "rejected"),
info="Resampling-based F test
There are different choices of resampling methods available for estimating the joint test statistics null distribution:\n\
- \"boot.cs\": non-parametric bootstrap with centering and scaling
\n\
- \"boot.ctr\": centered-only bootstrap distribution
\n\
- \"boot.qt\": quantile transformed bootstrap distribution. the default marginal F-distribution with df1=k-1, df2=n-k for k gruops is used.
\n\
- \"perm\": permutation distribution (refering to the Westfall and Young procedures)
\n\
- \"ic\": under GUI construction (available at (library(multtest))
\n
There are four adjustment methods to control the FWER:
- \"sd.minP\": step-down common-quantile procedure based on the minima of unadjusted p-values
- \"sd.maxT\": step-down common-cut-off procedure based on the maxima of test statistics
- \"ss.minP\": single-step common-quantile procedure
- \"ss.maxT\": single-step common-cut-off procedure
The default number of bootstrap iterations (or number of permutations if resampling method is \"perm\") is 1000. This can be reduced to increase the speed
of computations, at a cost to precision. However, it is recommended to use a large number of resampling iterations, e.g. 10,000.
The robust version uses the Kruskal-Wallis test, otherwise a F test will be performed.
Reference:
- Dudoit, S. and van der Laan, M.J. (2007). Mulitple Testing Procedures and Applications to Genomics. Springer Series in Statistics.
\n\
- Westfall, P.H. and Young, S.S. (1993). Resampling-Based Multiple Testing. Examples and Methods for p-value adjustment. Wiley Series in Probability and Mathematical Statistics.
\n
",
parameters=list(
data=list(type="ANY"),
model=list(type="ANY"),
robust=list(type="logical", label="Robust statistic"),
alpha=list(type="numeric"),
nulldist=list(type="character", label="Resampling Method", choices=c("boot.cs", "boot.ctr", "boot.qt", "perm")),
B=list(type="numeric", label="Number of Resampling Iterations", default=1000),
method=list(type="character", label="Adjustment Method", choices=c("sd.minP","sd.maxT","ss.minP","ss.maxT"),
seed=list(type="ANY", default=12345)
)))
)}
mutoss.ftest.multtest.model <- function(model) {
return("typ" %in% names(model) && model$typ == "ftest")
} mutoss/R/Tukey.R 0000644 0001762 0000144 00000004047 15123457163 013245 0 ustar ligges users # Tukey HSD Test in parametric factorial designs
#
# Author: FrankKonietschke
###############################################################################
tukey.wrapper <- function(model, alpha,factorC) {
model <- aov(model)
tukeyObj <- TukeyHSD(model,conf.level=1-alpha, factorC)
estimates <- c(tukeyObj)[[1]][,1]
confintL <- c(tukeyObj)[[1]][,2]
confintU <- c(tukeyObj)[[1]][,3]
confi <- cbind(estimates, confintL, confintU)
pvalues <- c(tukeyObj)[[1]][,4]
rejected1 <- (pvalues < alpha)
#confi <- cbind(confint)
print(cbind(confi,pvalues))
return(list(adjPValues=pvalues,rejected=rejected1,confIntervals= confi,
errorControl = new(Class='ErrorControl',type="FWER",alpha=alpha)))
}
#tukey.wrapper(aov(breaks~wool*tension,data=warpbreaks),alpha=0.05,"tension")
mutoss.tukey<- function() { return(new(Class="MutossMethod",
label="Tukey HSD Test",
errorControl="FWER",
callFunction="tukey.wrapper",
output=c("adjPValues", "rejected","confIntervals","errorControl"),
info="Tukey HSD test and simultaneous confidence intervals in parametric factorial designs
'With this function, it is possible to compute all pairs comparisons for expectations and
simultaneous confidence intervals in factorial linear models. Hereby, the all-pairs comparisons
can be performed for user given effects. The overall variance is estimated by the linear model
as well as the degree of freedom used by the studentized range distribution.
Reference:
- Tukey, J.W. \"The problem of multiple comparisons. Unpublished manuscript
reprinted in: The Collected Works of John W. Tukey (1953).\" Volume 8 (1994),Braun, H.I. (ed.), Chapman and Hall, New York.
",
parameters=list(model=list(type="ANY"),
alpha=list(type="numeric"),
factorC=list(type="character", label="Factor for Comparison", fromR="FactorVar")
)
)) }
mutoss/R/Rank_Truncated.R 0000644 0001762 0000144 00000003051 15123457163 015042 0 ustar ligges users #
#
# Author: FrankKonietschke
###############################################################################
ranktruncated <- function(pValues, K, silent = FALSE){
L <- length(pValues)
if (L>1000) {
warning("Method implementation is numerical instable for large numbers of p-values. Please do not rely on results.")
}
if (K > L){
warn1 <- paste("K must be smaller than L")
stop(warn1)
}
#-----Compute the test statistic-----#
index <- order(pValues)
rindex <- order(index)
spval <- pValues[index]
w <- prod(spval[1:K])
#---Compute the used pvalues--------#
w.pvalues <- pValues[rindex]
p.used <- data.frame(Position=index[1:K], pValue=spval[1:K])
#--Compute the function awt as in the paper-----#
awt <- function(w,t,K){
if (w<=t^K){
s <- c(0:(K-1))
num1 <- K*log(t)-log(w)
num2 <- w * sum(num1^s/factorial(s))
}
if (w > t^K) {
num2<- t^K
}
return(num2)
}
# ----- Compute now the exact distribution-----#
fac1 <- choose(L, K+1)*(K+1)
t <- seq(0.001,0.999,0.001)
terg <- c()
for (i in 1:length(t)){
terg[i] <- (1-t[i])^(L-K-1)*awt(w,t[i],K)
}
#--------------Compute the p-Value -------------#
distribution <- fac1*mean(terg)
#-Regarding the error of numerical integration-#
p1 <- (distribution>1)
distribution[p1] <- 1
#--------------Prepare the output---------------#
if (! silent)
{
cat("#----Rank Truncated Product of P-Values (Dubridge and Koeleman; 2003) \n\n")
}
result <- data.frame(Statistic = w, p.Value=distribution)
return(list(Used.pValue=p.used, RTP=result))
}
mutoss/NEWS 0000644 0001762 0000144 00000002157 15123457163 012317 0 ustar ligges users Version 0.1-14
==============
- fixed CRAN notes and warnings
Version 0.1-13
==============
- ranktruncated warns for large numbers of p-values. (Thanks to Phillip Bredahl Mogensen for notification!)
- noted that adaptiveBH is not equivalent to multcomp's ABH. (Thanks to David Swanepoel for notification!)
Version 0.1-12
==============
- changed description according to CRAN policy
Version 0.1-11
==============
- fixed CRAN notes and warnings
- switched from RUnit to testthat
Version 0.1-10
==============
- compatibility with qvalue 1.99.0
Version 0.1-9
=============
- compatibility to qvalue 1.43.0
Version 0.1-8
=============
- moving many packages from DEPENDS to IMPORTS
- fixed layout in examples
- fixed various warnings
Version 0.1-7
=============
- corrections in the Hommel documentation
- package multtest is a dependency
Version 0.1-6
=============
- simulation Tool: Objects returned by simulation() use less memory and
generated data can be discarded.
Version 0.1-5
=============
- jointCDF.orderedUnif which was only a helper function is now exported
Version 0.1-4
=============
- Bug fix in ABH_pi0_est mutoss/vignettes/ 0000755 0001762 0000144 00000000000 15123457212 013616 5 ustar ligges users mutoss/vignettes/simToolManual.Rnw 0000644 0001762 0000144 00000000332 15123457163 017075 0 ustar ligges users % \VignetteIndexEntry{MuToss Simulation Tool Guide}
\documentclass[a4paper]{article}
\usepackage{hyperref}
\usepackage{pdfpages}
\begin{document}
\includepdf[fitpaper=true,pages=-]{simToolVignette.pdf}
\end{document} mutoss/vignettes/simToolVignette.pdf 0000644 0001762 0000144 00000640301 15123457163 017456 0 ustar ligges users %PDF-1.5
%¿÷¢þ
1 0 obj
<< /Type /ObjStm /Length 3466 /Filter /FlateDecode /N 99 /First 790 >>
stream
xœí[[sG~ß_Ño!•BÓ÷ËV*Uã@À@lLƒ<ÆJdI‘dBö×ïwNä‘4²“r«§çô¹|çÒ=šF)ŒP2+´ŒÂ c´ðÂ:/‚pQ‰(‚ô"‰h$èDrN(…žMBit#pÐÑ
E‰q8ƒqÀ'b0
•ÀL%È‚0-…ÖßJhã.„¶à¯Ñu
tò×ø‡›^èõt:“(Œ¤ÉIÄ…0ó‚!àBKãø8·Âx"tƒÇ·&z4Á¨b¥Ãä$¬âa55JX‚ÅâcÁć.„õÔ8aƒ¦a#Œ¶@3ÁX<%
&áÀh§’G#œøÖÂ"#œ
¸ {¡ªó ƒ ä i¢ûIxéá”ÄÀ¡žÝ‡!ÒÓããà3L0îñ\šBž>ðŸ¿ÿA… aOPø-ø¥‚Áüué¾Ã7#F¬qÀ7œ –Á !¡¹‘†0NQ¤pQ1 ¸6Ñ|ˆLñ@¡!5¤EĆô€#"bdn!£+eA™†
ꉄÀR 1“Ú¾DqèZ²0+³‘Ш+ãR œMD„¢¯à]Ѐ³5«RRÏ©}û(†ƒ©àÂ=!MލOÖ]Ì´u}Ý…+BÝ…ÎÜûî;Q¼»ÇÕT¼Ewÿ@'Õ§©x‡[,ê!DUƒé¨<ù°:ë•{ÃO˜ ñÏ%ס€‰Vu Ä;p)ǘA ÉôGÕdx5îV¡óÀÉŸ£Š¨>T3!{å¤b›Šßìÿ|ôô›‡‡G@±8è'Ó‡å˜ÜÁVïW“î¸7šÇ„(³{VÖ4ð…(ޝÞO™?IQ3a™ùëÞÙôbBžÌ¶/É>>yñúéK–›²m‹lý¥²ÝÙG{o^?ƒì½Ó%árExŒËÂQg6nÛ…?~üêÙûY¸¼IxX.7nÚ…ÿôÃÞñ“Œú‚lDö
êjE¶ÞL¶n—½÷êôôÍ#È>|r£ð˜V\¾¡pÕþpx…{^O{g¤PFEeUö¬Î3t†ZgÁ׉•ârMfÜ…êŬ24Н|Ÿ+
ªÑ¬h½Ü¡dè]K†R7׌ïŸ?ûáÅ)¼xr²èEÓ’¸«á»¡e{½z~šC(,FÐjî$¹B›åNHíŸ>:Ù;=ðãg+¶›UñnÅv¿™øØ.þøàùáÃ$þÍ¢plŠV„û%áz³‚BSvë·]ÚèíÓfç˜ëbš€!=&´}ašGƒîð¬7ø@ûš¬óµ‡SÆc/+AûNÊmÇÂéÆa9÷X¿ŽD•¶¬g³ÿ®áâö¼¼¬2B
mI¯elž\Bé=Fgɉ¦Ý‰¶Ý‰
ÏíPvŠÓﺙç“K~ ˜ÍYuŸÛy#׺o\•ÓÞp°_N+qoÿßZbHMRØ“~] îáâpx¶––ž]u+¢º£dGað¤7íW4òý¸]ô%/®¦£«é×-+zÕÿXM{ݲAº¶¼öòçËÞzŽ÷÷†ý³õlͦlç$ƒƒIïz`¿w~^}Bû-=Á—½ÁÕ„¼’yÍI?Wnu!ó;G_57®Ùkê=RÞ¥agKW¶'«ÑtôtÿÕkZ9ŽMײfÚ¥uÃm¸d.l7¯1¾U¬âvX³Œ•Y»½YÙ'æ”Éúšì%“CÄf.ÖßâFñ®Â+í™Ý ‘î¦
Ìb}å7¨·³(®*¿åã{‹òk»‘^;-ÝšªÖ-ÝjËתöK–î(7\º3á],ÝÖ~ÙÒ}7kìn>6ô[Ùzo¹=kñ±û"ûM}ìoÁÇ?—£ó}`û¾„Í3¯~þ7œâÄÉÿŸ8±¿>N¶Ü¦¸•8q/½.—$—-ðcŸ}e¿ÅÒû¹0TjÃ0d»(5Îÿ/…ÐÍ;â[]"·ÜϵÕÚGû»Ùª-·S-§¿mG¢·ÜN*ï׿ðÝÀ·ÜCµhl¶Óø‹´ÜòW×-×®¡·‚ëR¥wPyö´ÖbÝΟV¬Yÿ{Wz5¯ô>ÞF×Òl¶›¨ ý÷–ñ·ô–¸ãÄ;ô´éÈ€ÉXdB?:–&_ è¦ÒówfoE¡™«Ãð?ýa¤ïè¼ð"îMêä”ëЀ¾µ¶céLH' krÍ&O-òÎ
ãÆ] ?¹h:ûVݡ㶦vt¨ÛhT'ÒJPRiØà:HñŽîz™³ç»4uì(žo¼&jg1_I:¦°ØhÎØÜ:\oóלæ®{ÎHXðØ\„†AYèu Q¡‘·z†HšG£ÐZ ¡¯Ãâ‘™(1’
¯ˆÒŸl“Õ"$ºÓ)Ãhî>EF6±' ʼè.߉´Ð^£b•'b|¢øZõd¤ÉÏ6’¦6„ÚÆ’ÎV©šÎ"I¦ã2RHö;I6sv²~ÎqÔpa$¨Í¸Éw[Ð'\s”ñb@7°O’ôURQ …†£®]Å\÷šTMX®Zßk¨÷¥áÒHõÜæI§l–'+r xG††’èŒWê×v"5Db2Ÿ<ÆskîÆÓ¼æï2œt”h&ÞÑ )b}mèr¯3«½Ñè7¼Ç£>Õ×°U°XÕ3òèÖ«îl$9
Þzæœ:SØ@¶[Ç-û¦y7c•ûìüvä5
ެ/õ̼—ïìÒÖsk©>’‡š¬ë–®©"£þ¦D1Ë´äÃÕœ&kÈ
t«#z£3|H?$ap\e=A™ "‚^õ‘˽e Ž(Óä<Ët$X)¯ënn}sIN¦EΞĔ²‹¿Ç?"6Ôô4:³ŸGµy$1þ5›ÛäÉ4RéI\Öé¹ëÁ¤›Ÿ¥-žcËÑãª÷á—Ñç3*ôL¯Ø+ö‹GÅ“âiñ¼8*ÞeÑ-Ίª8/>E¯è—Å ÃAUŒŠ=Q÷«óiî‰c1ªÆ½áY1.&Ť÷©˜Ó‹qUÓ?†ÅUñ±øTü§ñ4L/„I¡ûŠ4:è—&Â.¹ï¼¸¯i‰UÞòâZŸ‚9èá9ZÍ^¿ÓH~&^:ôødZö{݃ ¦7ÌÓêò'~SÑxPn¼–.NkL°ÎV7í`{à¶C ÷²†îý¼óÞG|
¯Æ€ñv@üs
ÁHÇ—Mj1,øtpµF™nF³{ŠÇÅOEÙ]”ˆ6‚éò
863Óè:Z¤-fÆe3—NŠ6ͼOA’
E¸ÉN£¶²s°™9jût<~Åš´lÍâ¡Å5ÆDuÛÆÌbÿQq ç=)~@<ã,xÁyp\œ¯àÔ×ÅiïÇe÷·jÊ_÷sÌw‹î°? ½¼,‘8gÃ~¿#XpQý~Uö‹êS·_^RJQV-$ÖEqñçè¢ Ã~-~«³lÐC’Ý”jdç,å~/~¿ª&ô+ :Ãiuö¾Ÿéø*w9-«Õ–œäôÜ"AœE®lIÐù¡¹¯ÏW·æ§¿9=·ôô>¼z\Wµì¢ªNR— ! 67>
stream
xÚÝXKÛ6¾çW=I@¥ˆEIÇn›¶E‚ 1PMŠ,晴äÝÍ¿ï¼(Ó¯`4ŦÛ5Î|óöÕüÅËkSÎTk•™Ù|9Ó¶ŒM–Ϭ5±.ìl¾˜ým¨‚Õ<ÌË`FÚ·
ºðÏù¯/¯Wq™eG–NEºˆ‹Róé×P÷ðš¾ßçuÜÁ¢ièpšEÈ&Qp6ÓÜðÙ_¦nV¡.‚Ot5ˆRXjXþˆËŒ–‰‚/ÅRev¦LœËR)mcc‹Ydt\%óþÈ7ë0UÁˆ·ðjàóVÍʸ´ÚâñdÙTÁçT•på<Ÿ±¹Ê°¡»©5Sß {Œ=ŠÒ$XÀj‡PŒ-rY£rI©…w?hRà e™°IÙH%:
^#×tf6¨PŸ–µ‚$ â0ÊøÇ”åÅŸ'<ò2SV¡ñlé³Ð8|”±Á«Gx¨V[òZ05 Mñ´¾Ïþx0$—Ð_E@ÿˆ.z‚– ÁTB‘Q±ò·ñ€Dï¶[|¼åwKÎ5E§JÚêç
ᑞ†õáðã¡Ã44Œ’¤’b±Yâ¯>Á«Yð¾«Q銂§ðeHÅ~b}î±–_X®n
ï4'ÑblöLÃä æbN0‡Pxšï“ØÇjß[·€=ë<ð5JBÄ^D$DÞvTCCn—µäIŸd{„{¾çây„ŽtTrëuÍjãEM½Át@´8Ì£ü¨Tþ½ýßaä2g¿Èu^~C—rC÷ãC§?êç¦:R@j‘q…QT¤”`›A(»q`ªeïȘêê7XàõÇ$K¨¹•…ìašeE hëLËÆL!!j;õ¬Ji¼‘Žõ¤Ç䮵HDÉù¸;ŠhÒàývì
eyXp½J›@äÑ#0UÏá‚ÏPDxZðáß ájºòc¢í©\6N’Ô‘ÌC@d3x×T5Jò9̲ ºEñЬ¹ ®vt±vCC]Q‘AÉ,«ÆuÜ3¥‚GÇ#Ü»ƒúu$ÖVÙÙ{r&-ˆ†×õ†2ëN1UÇ”{ø4M%d÷ü7ï»’“u¨Žh±t#1j2pübcz–$§R¦ÛË)zãò‡P¿Ç 5ß³«.:—3:Ê*x·FÞ°’ÃOpQ-.zCúã,æ†%È0;nÄxRúGÎ[f¾áØ8ŒpNÎY’³¥NˆŸoo†6ª5ÿ¶“]ú‰åbwÜ&"å(¢ÃížÅðqòC "Oƒ_7_F˜n:ðcwýŽ{BýÐJ¡1©e“ ;c‹‡Vìu,jÏ4$›{Ȓ˵¤)nĈ~ë|Uô…xhÒkÍôT ÚÁ1оâøHUâ¶ç¡ŸN úø¦¦pœÚǩ҂¤_˜´•y¯Ç²!GfùÉý1®†sHTk*R@Zò•>BERnÏØYh§‚¬FÁù¢ñP¢ïÄ
¿X:¦+~¾¢kÿ¢~“dkùO
]æ–¾Õn©ÂÊ›˜›ÏÈß™¥aKùê[—X3¬:I,UN’Ю*r¼š’O·/í(ã¦?è~&A›NGs‚–¤Âäk÷×΂ŸýñjCž¼qyŸ¾ˆš
{lR¥cU`wQ¯Å®Jj¸È¸ÜÉ‘úx„q×Wã…Qfàýq’ÜÕŸB˜“6ù´¾iÆZ¹âµ=Í[Yœ¨©¸EüßM'ÝË”r±—§ˆ5U-\Ï n÷>[“ vârÄQg‡hÀs3ýoD“à`6!tT_×”:*p~µðgk¨·ÇxÉ-ÇcÅar„G®Ö›uälžOÕUô]:ÃõT4[׌öNIªRb|å¡71+÷%'uà¾CnQS–ì=Û+WÝqÇ¥ú“d'ñ´‘{ÑÂ×”Ý{>s:`âqa|'ë%qö´',%O©ä’Ø1ŒóRá2”[»?d1Þ5öo†Šk8Ôo›¬˜E™ŽÒ®»„W/^Í_ü
¬’íjendstream
endobj
102 0 obj
<< /Filter /FlateDecode /Length 2610 >>
stream
xÚå]ã¶ñý~…Ñ—ÈÀšII¤
¤Åº‡¦¸¦@ºI’}ÐÙòÚ‰m¹Ö:—ͯÏ|‘ú°öãŠ&Å¡»¦ÈÑpf83œ½¹yõù[?+UY˜bv³ži“)Ÿš™K¥M9»Y;K¾\Ï™.’sã’šÇÕi®Ã¸=ΦHêêG|ΓusâAý3 Uû#üßuà ÓišâÈ%„+@máoYÝ˨9´ðÿ
ରÇwª? ô÷º>×@[¡ôžÈÐɱßÞüm–ÎZ«2Ï™QÁ¯“¼fmcî»+^còÕI u«‚2“Üz–VªÀ
ÎTë€î4F÷Œ4:2(Ç€s›
Éÿin|˜yÏ”ã.‡)~eOOD,üãg¤´ÆgÜOÛä®>„ÝXø‚âS¹†!ïŠËüÒj`—,ü9ë$êJ°W}ìÖ+žÅ}šn¸!éÊë/¢)ìs]ôÔÙ˜N›`<”Î¥^"Iß§:ÊpmÍ”ÑÙá3R&ªôfQçÌS’h.2wlaâL#ßoDgHn&Ñ“vžYQ S*¾ÅÚ¸œOî¶ñôxâ,‡C´.ë\Òœ‘DÜ‹¨ÁÞ3ÙÛ:dœ°|F0úÃ\›|ˆ†ƒ‚æYòR»¤3cHP¬^{}X1ÜP"8Ou¦; †É䣵a°D1ɶåßêØ78Ò@k¢Z+¢oƒµÙÀŒwsöG‚ÇJ?ɤô½@¸qÊ;¦Û újP2pÉÉߢû§yÆ‚èÙ0–B÷8rï•õz€-A'„-óÉ5ëªÆò`RŒÍ?ŽìŸxtdª¤nMFÚÙRƒ"ObH;9ÂpÙÐu®œj›½Œ–øÎ†Öªeô[1fÁRô+e«Ø7u"@ö{Êúˆ_Wô¢ŒöÀ81üàþz)mU–Û D*¿š@¶°pœ…ÓC}Ü,X×ö—¸Mª¼77zXðiQÊÂdœ;ðôÀt_r«RŸß
D:+Té¬'åµ²TV
U‡KJrfÖƒù>ÍÓ‰í,¨lÖIl‘§“2s
T/€*\dé
ÐB ✎ý›p±iÀ;štò|ŒÊ|qÉGPB<™žÌ™Îr Ÿx
•6À£žÃf »ìØšþG¤·?à?Ôå{r÷|Å-táU^C]{;÷Y‚Á˜)Ëþ%%VoJ
!‚QâxK÷‘a7_Ÿ‚󓻟^Ýø7²ãUßXÉr߇kC\&NÜä*-? ÅEa=ª¸@¤3¿«âæ/UÜü)>Ða÷÷7—*ôTmƒQmŽg>AÐ £M\_îíT ¹„¬7%3å
÷ƒÏ^ ²ì79ø[”AñøÉ[r¸Œ?M9|”êÜÂÅä¼NÞ…ucCÃLÅÛŵù‘ÐàoLcD²ègîÈsÖÑùvvÀz^¨Âý\?ÝãðÀ>™ ÌåÂv£pžŒ`ýh†áx‹èÏFÂÔh¶¤Ë˃9 Œ‰pÅÕ(ôìRƆÃ~ÜGð"ÙßR:Ã1Â9PÌ‘w h' •¥VðîBÄÀ±(.}Á?oaæ5ü½ƒ?è®{<Œcç¯`í}7-°>“Oløüß Î?Îã¾í9@ZQ-º0AŒdÏTìý„u
ž¢óE—Œ_y¡t¯+ŒBtòaÃ17†å¼{¸Û«Ãª‹K(>MR{èòËê|õ —ÍI§–ì®!G»•Ó¸û/[v(ï:¼JTøŒ¶ÑäÕ„ŒŠBeÝEýx¬ou”㲉n‰vÛ¡ ¼ëB ¯ù”'ˆ–¤±ñ\%‘EB»Š
?‹2£²aÔÿµ\ÔÞ‘Ä'Ú—Ê–ñò}.»°Ê必܏MiTiG9ÌšÂÈ`.øGu…V˜¸8æò’Å¡}-†¤x÷:iü°îçíAâWœþЃ„Øü°!Az ¡x;e%ÆyeŠ17q¼ {Û–¡îD,„(i”¦Në>.„ÓñêmÏä%´F/U‰êžÖ,í,T«~Ž5‘%^+ûq¾Ø¶QN_ð.©”p¼
Æq>
ç/01ÓÅ!1!ñ™Ó¹Ã‰‚ß/äúßå·‚+ëA•*KËÈê‰ýtªògCsÿœ|ü'ÍÜþYrk@»GVöú u{ŸKñŠ*K¤_G =58¸ê»¾S \x&3Fx¡aôF„‡1¬z]&Ló`)ûµ±:BžsŸ°o[‡•–'¥4ðø8Ùèöë‡$Í®W’?Ü1@Ûì;hŽ3¨Û˜3ö¾©¨\€MŸ±qÅ:ë*ÖYj)ëEëº[Ý„ŽOuL>Ù•‘íàU ù³TKWš^_&ÈóCÕJúA)÷›¸¯¦¼˜²ú ·vÓë[ [Išt
K§57.kéEUÒÛr5Ó'ç]¿Òƒ˜VÔ±Òƒ6,c+W\Q¯¯~9X9Ú§W¿»‡Ï½jÕèpïªAOtänÜ
ýi% DH`p¼8ò²A8’h@Aº"*Í ü/$DìT{wéâ ÉRTi
ùZCÑ„¦¨°Á±ZLñLaõ¾t’mOÖ´¬hÔRQS*ïC&Zµ±çÞÃŽ[‘Ò$¶sò’›fð[íèÂäH\æä¢WXP½‡û†3í°ÅgrÊqb÷w4ýfZO5rÙ§ø<»;½ß®]ò;ëX?è$p5%Îc¨ÔƒÅ°ÀÌSètZ9ÜÁ®!ÆU¾PYM¸³“±çUQÞ~_Ëa·W¼ÛòWÒÁwã^š-ºž¤ëµÍxm º§XÉ`ÕE€è &Õiä‘ã‡pÄw•Ä4ø¹ÆTüíM³ð7J XÕ~9µòaGp·Ú_øVxuJðkŽk¨RQæÉW5 x÷0Åš\RÈ{þd:'¾
¯i§&y%¨è§Ü±]‘Ú|ÆF€€ïù®ì;lD¶á
•×+8!QeËTqᢼc•ÙG¯(4aèÜb9uß«ªLÜ
Oùq›™ÉøÜ+Ñàc%L¾3ù@Þšì{6j7Œ‡À™HÆ€šgâ€ê©„=Ï©>›v¡m
RE}+)3Ö\®
Ý%œ›øI 'ÖHÔ—’L€ðr[ŽÌ¤’™Øª@áÀùs‘Iƒ¶@´>]°½PA>4Øvjþ:*Äñ»‡áuWŦˆFJbÌSóûœø†ú~|V;vôº(!-Êg—ªÌ!Š„¥W×7¯~%3èBendstream
endobj
103 0 obj
<< /Filter /FlateDecode /Length 2171 >>
stream
xÚÕYYã6~Ÿ_a¬œŒ9"u/¶L6ÓHƒ,°q&v÷A±ä¶ËîÆ$Øÿ¾uQ"-Y} /ûÐmÅbÕWŪ"õÕüÕÛë$hŠ$1“ùr¢M¬òÐL²0UÚ“y5ùW`¦:Pf:Ó0|]BïXNgQ·Ð®wõ~hx
ÿv·8—KhŸvÛï¦ÿ™ûö:ŸªHMŠ[…“™ÎU”kÞäzšÇÁþ ks౟š,xàζkld¤ƒ
ü}ræÊÝÔäÁ‘GޏŸÛ8¼êºwÐ<0%0XÔЬN8††…ÍRGZ†*YC–ö+ û¦¯“Ž•‰cKT¢°Õ 3]¨¼#[‘¬Ö¶ÏÓ„*Ï%VÓYR„Á»Û’°†¿7 Q”üHÒ“†+² (wàˆÚ%²ŸÌL’¨8Ñ` ²