BradleyTerry2/0000755000176200001440000000000014776015014012751 5ustar liggesusersBradleyTerry2/tests/0000755000176200001440000000000014775715607014131 5ustar liggesusersBradleyTerry2/tests/old-tests/0000755000176200001440000000000014775237530016041 5ustar liggesusersBradleyTerry2/tests/old-tests/old-tests.R0000644000176200001440000000170414775237530020104 0ustar liggesusers# old test of BTabilities # - all parameters are estimable, not sure what this was about ## modelled by covariates where some parameters inestimable summary(chameleon.model <- BTm(player1 = winner, player2 = loser, formula = ~ prev.wins.2 + ch.res[ID] + prop.main[ID] + (1|ID), id = "ID", data = chameleons)) head(BTabilities(chameleon.model)) # old test of grouped residuals # - there is no "separate" attribute here, has behaviour changed? Whiting.model3 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards, trace = TRUE) residuals(Whiting.model3, "grouped") ## Note the "separate" attribute here, identifying two lizards with ## missing values of at least one predictor variableBradleyTerry2/tests/testthat/0000755000176200001440000000000014776015014015753 5ustar liggesusersBradleyTerry2/tests/testthat/test-countsToBinomial.R0000644000176200001440000000106214775237530022353 0ustar liggesuserscontext("implementation [countsToBinomial]") test_that("countstoBinomial works as expected", { ## Convert frequencies to success/failure data results <- countsToBinomial(citations) lev <- c("Biometrika", "Comm Statist", "JASA", "JRSS-B") expect_equal(results, data.frame(player1 = factor(rep(lev[1:3], 3:1), lev), player2 = factor(lev[c(2:4, 3:4, 4)], lev), win1 = c(730, 498, 221, 68, 17, 142), win2 = c(33, 320, 284, 813, 276, 325))) }) BradleyTerry2/tests/testthat/test-add1-drop1.R0000644000176200001440000000223214775237530020716 0ustar liggesuserscontext("methods [add1, drop1]") tol <- 1e-6 # flatlizards GLMM result <- rep(1, nrow(flatlizards$contests)) BTmodel1 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + (1|..), data = flatlizards, tol = 1e-4, sigma = 2) # add a term BTmodel2 <- update(BTmodel1, formula = ~ . + head.length[..]) test_that("drop1 works with GLMM", { # check against expected values for single term deletions res <- drop1(BTmodel1) expect_known_value(res, file = test_path("outputs/drop1.rds"), tol = tol) # check against anova res2 <- drop1(BTmodel2, test = "Chisq") expect_equal(res2$Statistic[3], anova(BTmodel1, BTmodel2)$Statistic[2]) }) test_that("add1 with Chisq tests works with GLMM", { # check against expected values for single term additions res <- add1(BTmodel1, ~ . + head.length[..] + SVL[..], test = "Chisq") expect_known_value(res, file = test_path("outputs/add1.rds"), tol = tol) # check against anova expect_equal(res$Statistic[1], anova(BTmodel1, BTmodel2)$Statistic[2]) }) BradleyTerry2/tests/testthat/test-baseball.R0000644000176200001440000000262014775237530020630 0ustar liggesuserscontext("data sets [baseball]") ## This reproduces the analysis in Sec 10.6 of Agresti (2002). ## pp 437-438 Categorical Data Analysis (2nd Edn.) ## Simple Bradley-Terry model, ignoring home advantage: baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") ## Now incorporate the "home advantage" effect baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) test_that("baseball analysis reproducible", { # check model 1 cf1 <- coef(summary(baseballModel1)) # check against Table 10.11, column 3 expect_identical(unname(round(sort(cf1[, "Estimate"]), 2)), c(0.68, 1.11, 1.25, 1.29, 1.44, 1.58)) # check statement that standard errors are about 0.3 expect_identical(unname(round(cf1[, "Std. Error"], 1)), rep(0.3, 6)) # check model 2 abilities <- exp(BTabilities(baseballModel2)[, "ability"]) abilities <- abilities/sum(abilities) # check against Table 10.11, column 5 expect_identical(unname(round(sort(abilities), 3)), c(0.044, 0.088, 0.137, 0.157, 0.164, 0.190, 0.220)) expect_identical(unname(round(coef(baseballModel2)["at.home"], 3)), 0.302) })BradleyTerry2/tests/testthat/test-flatlizards.R0000644000176200001440000001051514775237530021404 0ustar liggesuserscontext("data sets [flatlizards]") tol <- 1e-6 ## standard BT model, using the bias-reduced maximum likelihood method: result <- rep(1, nrow(flatlizards$contests)) BTmodel <- BTm(result, winner, loser, br = TRUE, data = flatlizards$contests) ## "structured" B-T model: abilities are determined by a linear predictor. Whiting.model1 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..], family = binomial, data = flatlizards) ## Equivalently, fit the same model using glmmPQL: Whiting.model1b <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), sigma = 0, sigma.fixed = TRUE, data = flatlizards) ## Same predictor but with a normally distributed error Whiting.model2 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards) ## Now use probit rather than logit as the link function: Whiting.model3 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards) test_that("standard model as expected on flatlizards", { # check standard model # ignore family: mode of initialize changes between R versions res <- summary(BTmodel) res$family <- NULL expect_known_value(res, file = test_path("outputs/flatlizards-BTmodel.rds"), tol = tol) # check structured model against Table 1 of Whiting et al. (2006) # (for coefficients of covariates only, not separate lizard effects) cf <- coef(summary(Whiting.model1))[-(1:2),] expect_equal(unname(round(cf[, "Estimate"], 2)), c(-0.09, 0.34, -1.13, 0.19)) expect_equal(unname(round(cf[, "Std. Error"], 2)), c(0.03, 0.11, 0.49, 0.1)) # reported Z stat appear to be Chi-squared stat expect_equal(unname(round(cf[, "z value"]^2, 1)), c(10.3, 9.5, 5.2, 3.6), tol = 1e-1) expect_equal(unname(signif(cf[, "Pr(>|z|)"], 1)), c(0.001, 0.002, 0.02, 0.06)) # check equiv glmmPQL against Table 1 of Whiting et al. (2006) # (for coefficients of covariates only, not separate lizard effects) cf <- coef(summary(Whiting.model1b))[-(1:2),] expect_equal(unname(round(cf[, "Estimate"], 2)), c(-0.09, 0.34, -1.13, 0.19)) expect_equal(unname(round(cf[, "Std. Error"], 2)), c(0.03, 0.11, 0.49, 0.1)) # reported Z stat appear to be Chi-squared stat expect_equal(unname(round(cf[, "z value"]^2, 1)), c(10.3, 9.5, 5.2, 3.6), tol = 1e-1) expect_equal(unname(signif(cf[, "Pr(>|z|)"], 1)), c(0.001, 0.002, 0.02, 0.06)) }) test_that("GLMM models as expected on flatlizards", { ## The estimated coefficients (of throat.PC1, throat.PC3, ## head.length and SVL are not changed substantially by ## the recognition of an error term in the model cf <- coef(summary(Whiting.model1b))[-(1:2),] cf2 <- summary(Whiting.model2)$fixef[-(1:2),] expect_equal(cf[, "Estimate"], cf2[, "Estimate"], tol = 0.5) ## but the estimated ## standard errors are larger, as expected. The main conclusions from ## Whiting et al. (2006) are unaffected. expect_true(all(cf2[, "Std. Error"] > cf[, "Std. Error"])) ## Modulo the usual scale change between logit and probit, the results ## are (as expected) very similar to Whiting.model2. cf3 <- summary(Whiting.model3)$fixef[-(1:2),] expect_equal(unname(cf2[, "Estimate"]/cf3[, "Estimate"]), rep(1.6, 4), tol = 0.1) ## drop lizard 996as coef not estimable !! should be 96 abilities <- BTabilities(Whiting.model3)[-55,] expect_known_value(abilities, file = test_path("outputs/flatlizards-abilities.rds"), tol = tol) resids <- residuals(Whiting.model3, "grouped") expect_known_value(resids, file = test_path("outputs/flatlizards-residuals.rds"), tol = tol) }) BradleyTerry2/tests/testthat/test-BTabilities.R0000644000176200001440000000400514775237530021255 0ustar liggesuserscontext("implementation [BTabilities]") # citations data ## Convert frequencies to success/failure data citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") ## First fit the "standard" Bradley-Terry model citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) ## Now the same thing with a different "reference" journal citeModel2 <- update(citeModel, refcat = "JASA") test_that("BTabilities works with changing refcat", { # standard model abilities1 <- BTabilities(citeModel) abilities2 <- BTabilities(citeModel2) ## check abilities expect_equal(abilities2[, "ability"], abilities1[, "ability"] - abilities1["JASA", "ability"]) ## check standard errors M <- diag(4) M[3, ] <- -1 M[, 3] <- 0 V <- cbind(0, rbind(0, vcov(citeModel))) expect_equal(unname(abilities2[, "s.e."]), sqrt(diag(t(M) %*% V %*% M))) }) test_that("BTabilities works with sum to zero contrasts", { # specify contrasts via contrast arg mod3 <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", x = FALSE, contrasts = list(journal = "contr.sum"), data = citations.sf) # or as attribute of factors citations.sf$journal1 <- C(citations.sf$journal1, "contr.sum") citations.sf$journal2 <- C(citations.sf$journal2, "contr.sum") mod3b <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", x = FALSE, data = citations.sf) # results should be the same expect_equivalent(BTabilities(mod3), BTabilities(mod3b)) # check vs deriving from model based on treatment contrasts M <- matrix(- 1/4, nrow = 4, ncol = 4) diag(M) <- 1 - 1/4 expect_equivalent(BTabilities(mod3)[, "ability"], BTabilities(citeModel)[, "ability"] %*% M) V <- cbind(0, rbind(0, vcov(citeModel))) expect_equivalent(BTabilities(mod3)[, "s.e."], sqrt(diag(t(M) %*% V %*% M))) }) BradleyTerry2/tests/testthat/outputs/0000755000176200001440000000000014775237530017506 5ustar liggesusersBradleyTerry2/tests/testthat/outputs/drop1.rds0000644000176200001440000000034214775237530021244 0ustar liggesusersb```b`@Yg` o;X 0h'fKM-29KK2K2L.is&%B9\)%ziE@raW $(?D/0ZO/6S"j {FjbJf^:qb@TԢ\ԜԒb.o>PJNmTˀ 5 zz @mLfއBradleyTerry2/tests/testthat/outputs/nested.rds0000644000176200001440000006315614775237530021515 0ustar liggesusers=|r`2I @ {)8؉ao.(-3eU(\أ7¦(P|-u@{%=N^';94xr\oƖӊn|=Ml Vۜ#rCn܋{?^/}r+b|_/> ;]{xuX**|ສqJbXe.Vz+ UުBo5:z(3~_̱rv8w#mV6ṼϷH<2|sk3k×I[s5q:w?מh6]\d?jch7LRt}n9xuM)`5OO}91wwoEsێrV VzͲo~{gy].a`3-Fm '\ 뻫wlbmG܀6?vA9֮~?EBg-wsOm^@XÀ#"rӿo1\[^6|]޶5eleD2R2R2R2R2R2R2Rt2RhN|q9 UUUUUρ\) (KWC7s8.yrqmCuʕׁє; ힼpAhGxZ}3swzoiW Vut>gSM9wøN^;?ɘ~{ΐ@W#sO. ͘:}9I:m~̣W^eJ9C/Z2 Sy4 >uOr9Y /2Zه'^\iC..wIsu|#mGJʸ~4v<]§% ѣ#p_WfUU_4{|@(@D_k6Ӥ٣?ob6p-Iɧዥ>U=^p>ء`~>P9`;Tçu{L/&5ς7[hwM`|,:F) n\zgxiz=&ťp?<[DLׁU.<}㏛v=Uߥj´Voꗷ;@>3sGKKu\}Sk6Kށ˩;TP^dY[ܣ-5{moJqRn]cwRwUy'=']Ý+>ӸI+-d4z{;IݑT'=w[1u1•o:UlͻՌ~ۯjWٴa}/Oq{ /ݭW}\Ns y꼹KzzJnƂSvuty`j.TˆR3pV nИ wb<`|Q5-{\R2S) Z6m':/o E#G,zwxWl +]r o}K}iSQ¥d@?*ң5}yw۽~vxdۚ yu7>OX=xZ9y R{5 y5b[Us݅)i_@Z#@mד-1[k>z,mڼ㷹f^z^UsǦU:fD5nmr蛃ru}y KTm̥$4+knO 9E<(S#~Q#:Ôwk~3?g2MM`p4y?jy90RLg4Нؾ~w$<[7ǥsɽǦ>27Ivr;;m1s:rv)ܣtK8xni+n~ mʗ;'9mi\_][nu3}0z&|իuC-L?*B߶[r[v|օ[^-<\]˭agIns4m-CCz99[wӅ7-wYw~oe48)|V-܆9kLͩ=RxBudꊅy⌓O[SZ|Fw'6oIX~f"\E #'s]ٵ .2u +GvRU5Z͖6Y%a=yh$ 8C՚ȭJZU܂r]Cnp` LY퇪]'g)4WS.M^?vp/X^bupy/:)\oK_ح1W}MNpſ1kI6˰3\aUn} pj45t*Z=X'_sgB /|.nO~zq=ප_ga r 5.8!$}=Ez*QoiVpG#].t3˳.5E+_(yyE^+LKojTz^ W;; K~@!ݸ{x(ضϕ_qss ])=[a߮W .nNG?=il8JavFoNhތ?sF(1&x}88n_}>)YC[:%gcow.|&{L:LupS'Eg~^mϝo :ul!Dh0n@"HHxQ2j/}7FR,fؕDr*}o"qīid2DeW\b:*KIxE_xG"3ÆM}]phn{.P ^H.}?oĥKnd<="=aj7 v{/y>2ڳ|q*T:o7g޵9^^^<5|ָ×^#&$Yy3\n? '|.zo 64wUge8߯G]w 痏 W&G{\8ޞE'*,_V'|T!"wyKIyO^GzﺠHxJ!zF|~_ÿrq슄fo|ͦ7,\%[ˎ䙱~ۈ2/":6{TYg6Cቱ]7.Oopܚ"|]o >~[uf ݻ]zSDnN*z|w<[Owm_/HV wo,*gdnX{U}Y=s_ 3.OI VPg/=;NW!^E&^@Ʈ0=σIk0OWڱ ;)Iag <|Oarܖq[ [ؽIxz^ h2 gK'8 S(09aCmw=.[ v2~ ݂tIø$]<5$|x_ هz;#-ͤad0n /%ҵW?[tId٠2.N+qP.FMMl&Q_}'KoVY=J=_>?~sy+z>2+OV[O+Ӈ>_~W禟ds{>_KÌ&,й)t:Uo5" I+2u!?: z6NgN7&³BR5v"\bU:=6c>, kN&-'s{#)(woӐXE/e,j6ZR@.J\k:%!p9Eiq ^~*FT#.XKh*cIZm|]>Fl|˛:>u|yl}~^:>D񹯐`H}k1TB7,;Wq|+:a7ozp|M^7oz:fq~K9X^*&Ne]c5UzrWXNГUiJ!C*2+Nqop\C ȯGʐYc\ cb~RϟMT RP@N6aV7ĸucX j2Y.Xac)ΏAPK S~lP.8ȸBǼ1Cn>4(- Qr 8McQzoZ( 5@^Rn] tW/7@ 8ei*`*va*\jS:ByAO`*U Өi:g4Hu[˟M}lM42 ,K,K36Ǻh-)њM[0n;ۑqb.8q|w`*^סhߵq>5ik8-[,CTq:h=Da7уC/`~g|Z}>@u A)=ǑbNSts>!" CpX)$V" "S3 ĈU{mb.jFϤuJHğ!Z̖Pnv _cK,GfxJ TcnMP nNSYNbP`zKa*%]i6Ʈ8b!!NgP!Q Z RuyaR+LqJQ&%!$[] Q'4|㘄y FVƊhCJ;V+=kZe.Ih 1WOC٘6  G~;*hnr!زJ)$Rk)cj /^e lQ2#]v֪ Kx\ꪋU͔̖oQZe<5S&0BX զTD1Eۛbͩti&zQzj Br):JCWRc x2[IHOiY0zw93Ps+NP14J i@k],{4Vں!oT`urY9?OT!H.&p-MM+4?66@TG{aW!3ɴ2?A(T$|' }%\#t?}r%3ː _k8dŰ8\JcQ| q O74~6#7nC9@=XQ eXWHMZvꑷ٨GBc8d|j⺨2;_ؗ4~vLu˪>&$?%L_>}٥0gcf;X\d[ʔIuX}y? -1ff5I6\<(2w<%KʋmD' ԓY5&69Fgg,քEZKf јsMTRb`1ak;2D<Hs=ҭZlHhfHlcm:$#fU7'+|3=Y.LI\)px`D8H[aoP)(* $05 HAi (CAY (OA;U쀚V:%b@RPh@34Іv6]$%1'8lS+C H+3pWB%ܴfl65w>7ܩ^$9Z!P]lձx,CE{Potm0=c!Z^:+T!7(-h SAQMkˉH5NWdeB|50'aWl3#dnLA\+ieXiSMTTh<$2Kd0e,đ⦊Q EtV|fyȌ>Y0li!V20H]ImxqfE6{ZM`MJDDths)'^WkjcXMoJ;ϲreԔ;7;bdd$5ӦAZ4Б)NďZj 嫆QOJGY=78&tM)!+3i '*Æ]Ir:/G'2G#'4Frv@]`̍Ob1vN=ȣUY,zuh*Z'goeDbo-am/8#_#{-Z ]b`52°蹷É U1)rSĪї3tycu^߅)}n wr®+GDD?Z Q)fkL a,v$P!UjyVV[]AdloA:\kCJue8礯a$~l|>iFKUz)L.V #᤾f'zzUG)mN\[Ɉ1 >yd>}ܑ9Nl'%?.L P03w#i_Ǵc15kHT)~է0G6ZkFhi#|oN+ER7s?(-+f3f%F4'0{b ܵ jM/4yH^x],͟,ɋʼn+Q 6bB(.S\JIrN<.QR+kՖ'V %ShfT>;SRIWD-f-Z5d=2Ig]1UKe-OH1{qVҙd+fݤֽZzj7@UԽEH:>](hSK2z?ˏicƏiY~,?,f93 m B&hVgYz#,gs!W`}U˱X2e9b#_k5H^rBp|IɁzgN$&Ofz,Nd3yVi :LsǶ ^yQ/3sgm[cvԡp[F`:Nb4ZBiݡtraz,ݒE#2gBjl8!C+/7%?s%UcmɞI_eܚOwe *L +`J >8F ;ĸqpWf VDJ+(Y>x$5b:RPI+Z[K Xك#Pu@ P(D` uJ69h?=Ѱ<:R/tJ.zcN?LH|f!NOǫ. uSs^xG"3ÆM}]phn{.P ^H.}?oĥKnd<="=aj7 v{/y>2ڳ|q*T:o7g޵}}Wñ{,n5en׈ {V~m̿6f ޛ~.p7 x]Y;z8;Qs{G41$~gQEI ˗I7&Uh~[uf ݻ]i^i귃sUyO"pۿ^D/& X:9>Uݰ<W]\kq'3b!F*3øq-Mn9(vRD(!*2rb6v=y~L: g]]!~ڔԎ|MI . ?S/lL.wb ;\ w0>,H}<"s 1> O7>r{Y&#<V~~* > j\,!tٚxU[c#OjL%屨!Rxl>K&oi&ï ·qx)]>v³٠Q8/ ,[Uuq*^r5j:oe#|H6R=dV_~Q_Yy?~z_я>#>7|h?%M?-:pnjX3 >FBXe.Vz+ UުBo5a%A?dh>Y&]$#XY ̀fwx- 3Eoo'b;5d0oQIk.0IIQEra-jUƑEVp} y2szVO;ZWԋUc X4{Oșb^$`vq=BӪHОZM%Ĥt$֍P.CbƓU9iD}ƈF}kM8BGμi*P:_aa,&oH$V^7S*aTьx\ ^&r5P\ν?@nlY<^?1䖽_ϽH-qS'.?qe?zÝ?Ww~AU/E /xqiz7 fec<|+7WV>6|雴.oۚZű VA!+Eo]}4X_[1]uq΀C{לseF_& |"xծǮ'ֿn TZdSxreirw'Upo;G Zt=G}ѹW?i9F<:.f@xڔCĆ vzOSd%!Ǜ] Sݼ\{vL!:J\@hNtvO^ #QUù;oUۺl:L۳a}'p/[vzqUdLncgH ꫑'fL 윤IsT?mV~/2wpvC~̡S?-igc੼O\sZ' }-/|ݡ{V夹:׶[#w%epe?V.SHY8+3**Iy{vf=> "/5iRQ7VK1KʖqҶSIv{BD8P0`y_~0}ӺOsOZgA-&|0_G>oЉK}BW}p.3N4CMKwzxvȞ\oZtٿXx`xi>MR5aZ7sjcG L#}ƥ_ߺg>&[y}+mN{=νSw$%× |,ɲ۷GgW[\k .R޼ݺR&Q̥sNWWZ}hEGv#wseNzHb`/p=bׅ+t,w=p{_=tw?xͯFia^^w [>{6+<.ys8Vw_-*ZɅ܊<{C||M?Ԝ]ĥ y73݅7S ~-:̛ho`kax9qۉNAŅ:>U[3ijDL{ViSQ¥d@?*ң5}yw۽~vxdۚ yu7>OX=xZ9y R{5 y5b[Us݅)i_@Z# mד-1[k>z,mڼ㷹f^z^UsǦU:fD5nmr蛃ru}y KTm̥$4+knO 9E<(S#~Q#:Ôwk~3?g2MM`p4y?jy90RLg4Нؾ~w$<[7ǥsɽǦ>2ً7Ivr;;m1s:rv)ܣtK8xni+n~ mʗ;'9mi\_][nu3}0z&|իuC-L?*B߶[r[v|օ[^-<\]˭agIns4m-CCz99[wӅ7-wYw~oe48)|V-܆9kLͩ=RxBudꊅy⌓O[SZ|Fw'6oIX~f"\E #'s]ٵ .2u +GvRU5Z͖6Y%a=yh$ 8C՚ȭJZU܂r]Cnp` LY퇪]'g)4WS.M^?b/X^bupy/:)\oK_ح1W}MNpſ1kI6˰3\aUn} pj45t*Z=X'_sgB /|.nO~zq=ප_ga r 5.8!ͣ }=Ez*QoiVpG#].t3˳.5E+_(yyE^+LKojTz^ W;; K~@!ݸ{x(ضϕ_qss ])=[a߮W .nNG?=il8Jav8@|>ei\J[WjOJk3ĥNI ^'ft/>;}IWs['N];&.]+Ъ*9eNd^n؍Ȳ0)Ah ZEW5뾈ie<9X:_.Bpnjbg`eK.Szp\ opsϊs\ Jdnǥĵq)Rxr\ q)\șwKK᥯5~gȸ(.C-%ʼn莾WC.sۅ3swv^n*컓uY"q w"rz?rܸ2];n\U{Ԏ72ǍFǍq:V쒀uǍf:7Ǎ =7`uFf 9n\'y@,q㺝o=2;n\N.Lq]BND?PHquy7ׁvƖ,|Z.uǍl ;n\zǍ7;n\wܸuǍV.7Kqܸ=|7ǍRP 8n\wܸqݸqui!q!#vܸ.p}?]mJjGǍ^* q2PǍ"|xqx:Ǎqq=sy7g/~V;n\^<ǍKq]￾s|r;f|uuvgM;SObҧMs@$~E/MU0|~4C) 3Gܑ(d4f4}XC8{ǿn4Can v[ʐf6}+*nlsDZ94ʘUJ֓Χqk"1V]!3#&F'?;7Y U mHVB:!T<ڜ&Ee5ȗ_y4cBHH2C"MAdҨ)ܣ&Q8M$sR$λH!щ&&F${j'4[RGCxUSTIcIJmkn *z!BB# sd8P_Ѣ1/;򎾼/Ax+@=xXMecRXŋ0>y @XlfұKxCj8a=ڒDұ.)_f%~I9H~m)''"kl>ٿ?bB_*: [C=OX`dq,g09@!cD\ȱh8>YsrgZ0 X0{w9lXXB@9ebɠ=z+ B+Ԁ Go3u]b/O"" ͖eNEP$' 2/ v 15'L߹ehُoIQ: 0{T;ӆhZA4mȼ,8qgM5~ <4e~`QUӭq8 aa.i30saڞ64$^CCqA8 LGl/C2^م›<0-D%v bY cpzkWC+c(=pZ9NӖIJw=x!4ȑepr%" SYL]7[0VE1PJO(0q=Y qB)H1>-*؏BTtY '2Ca@0:Q]zl_HO"zjnD@)좴Mp9"ُR!c@h_vkGdOh]GvȀh(]x1@hwpb(Ry`#1Nlw3N0_u_oUP׉!'hM0-0 8?C{!|5oi1?ژWu'@ԡi1AQT=e#b2| 괅<`9~r,2?ȱ˱q)7dWJ_)YW`^+ <= xDUjL-uzWJf˾ ښ8SoNL~`0pڭ8_Epwb]wz}ٍqa} T~S$I `b}W3!\ (Uv7j3.TL뤁VOL uWvK!;:Lj."0{q&e!n$04k3 خo`C/(zD;XKqd&}G˒!럘 tu6Ҵ~B}v;ֿ/fx"$(!x6T&Lj)Q}BKri NB 79Kpȡ҅DQh )V(q&zFcXޮ@so K,),>`d>TGLc?J D49^Ua^݆=$xzlK͞T3sM,1gZdȜx1<ޯ\uתb$B"V,d ;(Mojl${cCy$ҡaPR9 X[o㫣Q)Yeג³raRh '*5MbUօô~\u1\Pᗲ+ʾ$iP:<5&|Ǖ`M 4h <&VFj1ePCm36оBOm=Vn1YJջPć$u*wˮ|>YTvZ:IY[V_mb z5HbfhɥK^+W`Yk+u'ǔʧ(?k#K#l諮 }Eq;ɥ҅(I!M )Vb= 9LiM+ULl} ^nb\Kt]Zm gb7VX͖Q# =~J#NX"ANEv%1Tooj[ds` V۲6跔PՆVŌE9qn%l'K!H^JBBƭ5^mt9HCV+Vv^֬=by_E RK8OgiN|:o46Nj}~d} I`} ŴNv!Wwa|Y6ka|5tmL_m `51rS*T\G-W&$*b?BSj)ě [EʚN$83MVߣxVjæ(cGܣ593F[f;5z[ɗ9Э 3zg흋7Z OaO"T쟊c?)x'FR EdYh gUji%Ѫhc /_^RGi Rd+ڐ+HAV+qmR.ͯ?G)e?EVB%fk*}ٓ J!VECtUaٓ/Ql~1NM'jLҷE'v|C < ɞQAÐ-lY$PH$(`պ-G jhm:5ZB!<91@XPX){rqV2~N&mL*sĕA"~NMwƮH6$GCL;h:&\G.xԧV^nġ蝌Zs3(܀X0 o 2aELj;=]/zb7'vs`3I+e+@'}"ebeJ$`!k_F٘n^3/0 Iɿ P*`|~rK5d3_ C4I&#ڤG~TmmHz_T(P-b _B‹2rp8g\wU^(Jk]udbF/.H-.߆nykA#гAL Q8fe˸Ϯѓҭ L~`sRK0dh4t?f8OJ i-`a+n)ѢխM(QyD֣"9)l1 "`?Lecc 1'1#Ċ&8ۗ4 Q2<؅_w"حJu$}񳱼0Obv5xUq͂׋L>[IZ:$LmnMat7nboCLV!+X1 b'vj?;۰؍0@ˤ\Q̼Ÿ̏׀ЭOcCD6F]p&&zliO Ax`:VNj M(jloS_52&6nS6n3V-ef߶(WK[` qE8uYN&ZD'EX4"ovnv\ 7Ó;va%>{/_4@[ U L׉ֲcX2h%C0I}zRwZk | >]g?dփoD_NL] O>  K Q؍ɤ1QTzw #B<#"k0%SȘD7vb7N$2)bWcg&7;H&Ń$ p- ׋d276 >L|9—Κec]VE/YyCiY>̄V `!1l ?U@rdRٶM ჰ;:}V ltdxm7C;,s9bK^t ysJR >20w*yPa8̋ aT7A)'˔ HyGo L|EI1 [(d;}FKpD#MY8|;vGaw5vݰ;$ 27G47sD׀7r4 !+<м30L&}j,d`b& mR3Qq\蹇 sa!1oÅ< @gNg;y@^ | Ɠ"w> PZBV`8@ %(e) C9 wJ(NAe;V-TPPCU0 "m@} ~44Cs ZYn J@t C'z(}(Cb7qK4L_)b@#Q@c*$Nf`0vhL{ fӱfP0 @pR0O2 ' ~@`X HE,E ,ea90ۋ`% Xa=:JڀnR(-(*`8=L 8FQ )HppB,1\f2+*+ 71ܦ&w0ܥ`2{P@2ßQ0<=bo!3}j}}]moHBۻŠN?ZyPJ44^+&QF%ϲj;lĺ֓Y=~iͪ~2GKOV#[SfT*^vE?RzK׏T>~>7|n+718` M(> UUUUU1嘨8&j?=y5ً􎉚sLd.cF￞=܎D'Л6Las,uqscsF6gJ#&0#|9xka8 }J3~Qa2?d%Q?RIE z3h{ PjXS9QAPV6< Ʊ$w)kJ$L|=1DaJ? 'y1ɂs|WI&*q!^ptRSdS sӋcIJmW0]HXcc>ۖka1˖kanʳA}M>`w(p;:ke0tHIGive9I%XA`B82LCG|ߣ$Xcѧ(l$+Q~#PžY }Bs짎> #!ߘ[q_{a$rqc6*Fef+V6^KkߘDuVy99I>x@ / "4j&Rĩc`R#V?:a'U/ 7hD RGS($e1## 5E嵉4* Hf!^z<ԦEjZC3_bGi, M[Pg!ry+9bHG]dZg$'1` B Ӱɒ+ zݢ֕%=DצYu H I hua:t2>3NvI[(GL 0Crl6*7 ˨!lKw NN@E/UJՏ) p@hy9c~IG/܏b]c5Uzbe͇1`~gR MA4[ i {T?l3>8S8mLx:" fy ve8> 'c9_,$G'S Seˀ.[6$Cʂc? ]nbg5V~G֘^]0kN 3̓g=s(g@MGgF4#G {[7'#+ _ 쥸Y|C'aWl0V.V`,>Hs(hF5Lt -X^uytEBoVK2TT+Q y)wl\IDU "q2_]E%nOu_]Hх}+Ai2;JK[KpնotNZ\6jDOĴxHv@,^CGMck8}hO5PgVjtMU; * n$/ghתSCKU+ ^8, 덚G~c&6>!#|?2+=ھc' 臌(ރA닌"d(uˢ.ƾ&ȐS:O0`w)wu"8w`</_<|E b%?Nf+9G4"q"8}1Ǣ8(޴lŁiv!hlzEKaAvi.E8_ar8,V0NƩP,CNS VƸU[ V溷[0#Tߢr*U Өi:g0եձu`"XH cZnfoL6쿁eieiX-%²6J[ׇ'7AWZzGyiSF+!>m=}Z o&1z嬧}4fK[JBOL-r2.X3|frXٱ({.@?;ň $~FNwa}P{JxO9G\_C=G2ܻ&{>|<&ʡ8'.-dW ׾Hgc WB5+> KgeU/@8dLz43B~dyxb>ۏp ٧@?X_k7BϏE,"; 9fd)`}Zvꑷ٨Gv'SEM_ľ|W+,*/@G0IFg>&œACb& `rn)SF$] aAٟX6 ·3'_oBeϓrawe#)9H]U^D[G"oߌ٧0?;+eMZ4ԈĈ-# 6]kk`LC J.9X:#F):H.4L_oOW1"z!fz\瞹[zw9}NڴC~; ܏j  $0`P*v@M+P1D` u)g4GC hEhCA;`0|-(舡0?a ^Y<b(FZր˟)Es3m;4P}wwL>~ J"PyE+ɉZ UaRUz"V5?hmxO">U"{xi|>ziqPUӿv(K'H3E@Z~mHVmGy`N>p|4ڴ#9?EBC*$u6 g΂C<ɋ '3v7*ϜsOE5 i͊jDu>oTJr12^Րѓkb.j:6hQ{ƅLr{ə0fw6';NL F&ϱ$Fi11(Rm>8C:cBLuGL2O$k:2;㔠ſ#À:Z@yD+S4ԄLNubll2s 9:&$3!Dr2Npn^DuBK\4Jx:]ӧ w7!D>}@uּBS&|tǪ i#ۓ ;a`e0ʢ:A3BradleyTerry2/tests/testthat/outputs/flatlizards-pred1-rainy.rds0000644000176200001440000000016014775237530024665 0ustar liggesusersb```b` Bg`wdX~\CHY]WfΝ,4H%PAi%P&[q{tBradleyTerry2/tests/testthat/outputs/flatlizards-BTmodel.rds0000644000176200001440000024273514775237530024100 0ustar liggesusers=<רhQd(RF堢ҐJ.{"JdQv1&-)e'ݼŽW;ߟ8<9=PbFQ1u3G(TP3`)'fY[N@ʹ63%&4u2H`lnnhMPxj .r1cdkkjkm`CEoankhc%M7* 3f̍RP$׵635!RƖHbn2gZX2c4DFe`P%9;0z%ex,V,c-hwU\5T0 ?u`4c߾{Ϙўk85Ço{yLm*0|W8yoAE8Fwfb0m-6#ĵ2`jޑ =klOCkRsc۽lm8RnYP 佮ô>?|/~Yez!~ۡR&<$P,a>Xz*/Ov,˔dHv1)=#kŰ+@xOVƒ? IQR$(!AR)J%B"($I&(LKXEGaRd$f[T lf[TU REI3$JK)/1RUHU#(F^8iሓJ'(NZJ֣8iI%AJH*!AZ#I*Q$$I$UB4ےٖ"(EP$ Iy *Du6!1B @ֶ8IA Vi Vi IJZBU+$AZzV$)z6 RuPX~ W=Y }^##Ѿ6"9Oez_1,T9opOJ]';WY G>V f\m.j4:AxN@7[=BP|@zv_ x~}%\ FD'(I˺A tV:A1wtI-J+e@7SɆrvQú]Ae'|VǠuoI>X ֯}/5:Tn;8vQxq= t0⸾Q_!|o痸޲K見xD/B>zY5Vje z27OwdFU%-.w\AEůTAM+=ޝ;UAIC96q.) QsjY%A_QL-=ZbdC:/*J{.}VfR_JD&<{mH׏wik"gvʁl97ǀ*n~u+i}3a-Btǖ5vB/\/})·9גX E+EG?:n殈|x/Ŕ%fpsqdidYЃ |8—{7]WF`i&l6ҏ;,/ ^ie.u!MOSi eXgޱ,ZnռkD-O}/4C,_h6 :1S= 40MHcZlA9rN-X;ݤ1i}=] /V0=/|>֋ՙ@ņ,>M9G[KWrh.b ٶWݰO,VEl|:.]8:9U']Lsjn@%b`y0%umϸ1%c :dwZֽ-;+EWr?nRM.?XʫN8#|UQr.iGEtnՑg/τ];ʨ.uonSFR0E6Zp-qD4m |,@ (GX#zPAlcWѦ}(g Ep^VD,ܸPZ5E<+VæKgQ PK ե\/f}n-5᭏_Nmw;,N\oO8Щ3DkW^d/>ƫڻw9|Oz>~33;,*1vnPZܲx0Vex/nlXQ!;ill36_6 &0OĉI9D0yN\a2йD +LN.2!2!S"dtF'qdt%#WLD%O ?12:!N&dҊɯ8:''L 2 ]$"AF 2e*AL%ȕ$S.dt$$2$SRdJI+Ev|<G~CL/OiBdچv0>g#SBd\L D9u>>w$:>Oasu8ph,i iWzLO1 ]ԕ+^Lw|%?47]?47?`e?47vY|O1 @P?9Ld<ּJBbŞ~՛`ߙҕMO;az}|~'LO;)_i= Ѝ;e_4J쇳ǪyIۣt7sٸ!N0f۽c|p.Rv[;tROgtς0g`Rqv*\m\Z޷%c>q;9Ocxi^z:Nby 3IlxCakޓi? U6AwQN{I&SUβy ݦ Iz-nI vMK=qGM~X^RGbU&a&`E]ҲjNJ<bƱlX8-%{i,%^]*E#:jb~ٍzJ8e._OE:RKp,Z}e="oS킺Ǩ/iO+MWOӀv+q|J6bS/Y?zL`U#i=UL?h#:+hy ŋ6-*aDÆ>'Y~{Ͱ|Wy-N[x H0ECצJ7QCo/iaZ^gs4W\K:"ǧDG~ig4C;yv8q J} _S9nˍǨm\;-Oؾ>X)cZ_*}9tZ80a=p:o9Sd('s-;8 mYI7fo!}[>~<}??!#4[q3/RV8> :ՙE WVV_m%nۓ 7gL4LAVN$#al2v#!'װ]r x8U 9+=)vAb~8-fF(J)y 6r,̎n\E4˷#K1jEYXV0%/G>?0ɲe0/[aU(}-[si5Y]NiCyĔ°Mc}}?4 e {h^(|vt5:C-֦,MLb;(q;p =޾\@.wREoN ^(u0IG|ʨO1 >_xlxH}EG8:>z8ݍva~a 7`"d:|6S}IX1wkobJ<KYz"y]E":DQ ճjK /{'b'ɗ+FY7+o;ah߳cf% 5X ApTۯn l5oxUna+S4`n!a #T f;8'\=уzOO\?>~:s8}#0k= 7]'HؿGÐxa]q>wW(N;+ ZH)<?ؓG̗П\.>5C&8hkAG>|NZw8$7„zDGS$`-r* S̙-kfc 9 suΏH!mY^h/ky2@ۻ7lzC\IBl7Tv|^o 9λ@X+`RY Om*z 8P[1[kqVʋ0֝dNȒ'?4WY{Zœֻ%d {?E8/$Fc{}lAWcgتw{S`> ~ׄp^{G 4tXq}Pc9b4|Ar F $ٯ% 6:~(?QL.t$ȅ'{0(oGAoe{QC:hzqXx<ݿ@B95h򪍊8ȫG ugzUt7XF5yapڃ8p~+Xr75ZjLB=,;# DZz15ja]M ]JX_2o0 Tq51?3[ f Ū,"w:L)kOj%Ö`?9@& (6&ǥ g*ٵڛ|u̇R6\㹝QυĆ;{dAUL׀`g4ޯNXM^pUtuцnbM֮D 1`gAQ>lV/-/JUe`cC-]ޖ=2T+0X*YL'`PD ^}k5ݴYp^spXFh+j "^n~BfdYa.ɥ YCWڧ r L޳Kw gfGݲ1ݗEp =q u!u796Sl,NJsK0q~#`P$n> ;g%˛bEr!bdۘOMtGnAMs#Et˂|p^~k'b(~9:رl/c7Z;˼x^+ps.3w"5 yf[.'h֮#KnsZl V!96[Q8(&+rXѬ7vXyamitDaṈSf);Mu3F0qg_0uئa~ oMRoDIR'+a'jEvufcnM Ar)t%uYth[_-1̹(Ūԗ=GvMgSZ01#$|@#Fz@m\\`nOFk:Ě+kdѽVJecsH킬FfƗ$nRF{: 8׻IT:2xr@K*? Ȇ"F].8OޛLR^^\$}ġs W xAG}4ǜIMoݢ㭆F_;'^wmn%ݍ{9m\W r81|㯍ij qXEp&+b9h/Ķ:/ߦ!|1I;b(xI@ϛX-Sx|ξWmoVbNßpnOC7?^c=U@; lI7 @-go<v xKdCP tE+j •g4T7m_eJ{hӗ5\kѼXMJGKǧF͙ڋ5OF,>mr5 K^ cKz?=Vŕ<ި<39N& -'#BCzݽҨ<7D^pEFXA{">KXggSu}6Z2  FDΟ '\@550`ȧkG]]*\. 342u3Nm KKxBlG<sy@#˟Gb)77vo>9z裂=ӵ}KGf`G0@#tc:¸TFܚ]@/6p&~+mbS3 {HLNſR`! N_D N+.#q"Bɳvn}?(nYoqfaea]o Ə?`/Yz4(jb.0Gen^Zk!ܖrG%<ōa~ܱڜ-p,?$S .RcT8P{rc~ d0{%/wp's>jRk 7aZSJtCmEQU^JFtg@ɬp4/#^e:S]2 ׇA{Mky'.@Q!W[c-XVHO \|mtO^5v$ /ES0:7CP^J0RJA]; Ou!f~HGtk4 ޝ5ۨ]Qr]fM"pxW] m;"'/GW*~jxRT~XlZ+u=8p"s9Tݝ(/TϾ7ZQY3c;逈t&ggB]AlY}H~AO',j/д~C?R=?qq_e:0jSw,O#WBE˿k"$/ &Q,h΅cv{]KOa* 3mxse9s%H1IvܣŬ!s+pÎ޿(`uzmsP ո8WYdus42xBPSG߃OX[yC:m9J'798XBQvzrm"(}9.4(F> b?>xJm>TEƬ̜Ie~|#'@ՀhrqRE ; u˫@}+i%08_8s Jmb~Y:JO$W@/Lj+aĕ:ۈ\ҾHoY;B_d)\', 6r$S}B[ެdϟ26-[wAX]I`U۰Ayen Ί}ZprM]ѥ7U# ΁bCU6Hχ bYwF\ Oe +j{cPjxai]+T2$酦7bً>ԂRE. B^r\z$ge**nf{/I'y9 zӒSڻ =U>k'HG jr2D;|y+ g-: ֔'ޛṣD)) >Y"C@ۏ%y>Jio}&">cxB~>||'(h~ھ^{D Q.{2>o)7"%5Yk`+ W(W%IEp(9~wfC|o'@a e4KjO*$#:&\/g+OkpVc 5Zuhtkz]3Ē ?x>n9Q|Z`Nb>\JyP:^NDzJpjw l ׻,*P'ܜQ;tOvAwVZ tꯆ5' ؃ˇZċ\JAWksk4/PK{ ܴf8zFSޡE>>zeGi=YCz?~wlxmh^_;:kZB.3U `Q:\p]e;f\Xx.Z1֫aᲛugB6@ӃcgO pxJ i.soh2,CpD&uAz`lwp23eAB`S9ֶy\$%y%qH~,78r(oj7(Yx nc zT+̆ YV/Ab?W[>Yjsc,=zN:o`2{{ [E+wv.y!u+7)6C{lnm{hZ yؖdR۟8>~/s<_@uо<ཋ=e*3<,ТC3q]kkc֘Q4_:1+A:{p#ٓr^sr`` gsubu7L9 :-?2._MvYrb {j/'~7p$;>E2?mAӎeߠAuOA,$vw|Q`zoF91 |5bZ{zUH% lSNyU ҦjlFǥ]ze/dfci/x")-5=P߉ϭIB׶_ksb|%5I>!S/;ilFVNM% ߨoPǥ >קg'S/5^s5H U>cPJ̸'%H2A7C]!P')hy^lE?ͷ-A Iv5lZ||ZyNt*g 7oz![P?kw|x|rk /#_a5x%M_ 㫟nho9 m_m .N0  ko^Bx{ųAŃG+Y`|o k/|`%|e:?-7$ԝTtʿ 3ֿ @amh.ag//Ԝ]~Yd]Tv[ZS%t{$bjJ獽`ʓ%^{tB0]W$G>xj{f,b<,gӯ+ &/>^|=0APxª_|nэA ϠWU gI*}{f\e~2lMMNA$tn)e]~o6EksÏ (XzjnU"Wg4'&)JYˀnY.zVJd _-"oɻS)Gnݖ4_|}7~1TTa{@DO\p$iri :M'sFpU8yq8#PfDոASK u4)ߪDyuz|zkz\׿n԰ՉC6琜/ic]|W`^@=+-8 ^?lօGVy5h_I'nç}!Nh뇳QNݿט :׻2q`/,fJj s<{όCf%YޖI~}g须0x% 0@rFLbpeʝr?ǎ2ΓIzvN4_~iZ˦{D*?)JRQ"X"Yj:lE|5Cyttil&6jqz>> +pgYSSTZ`8f$YZmEOpP; =*>NeىadDlw *' L WQ:uY~4Dmfyb׆߭^,Ps0}ٽ w<Nzw~H{g֥?1}bzW.XuVlܴ3 4Psz8uܼq]6µh")g'4,jWK%{ kϬɠnpUxi~n+j_xra-f졳~@çg]IUT6WaWQwdMA+w>Ϋ Qgi4A̩t|Kz&7ZiG<88CX:{sRwkלz+D|b#*HmsE{婸ɼ(/n: hiJ}/0ݫ8ABbX"*$! jER9S󦍥<#?doy/2[BC2mgr\?%yy.c+`wݗ [ZzWa8W3@=V-[`%:WgL/yVgXIcue$}|W#WFb<(OXPrc& D|9􂈏<Ł׵Scurkⓡ›i۪[/%T8UjWqEMox̿{^MٝL=g,nLl?ʅ)FW]@:\=YK^~~ v\&&>*<=TS%RUgp83] ]5|ĩ~Y|%ЇW= pX'2ctҔ/(2avP@6a6>.4=3j B8Yà9ő_bC%Hzb햴`=U<>gkTV'+ Lt^[]txzUq?JZR;.EB'cP' ?ҫ3>%};pHo<bDѹި繹X_f4l_.r Υ#E!0ֺqc+Q H\~C } GRWFI*,Wı)SX8֔$?ޗjDx0B}OgϭLMß0r½ 9)X8 gS=w8&Rwp\>)귏0u`4%Gh EFqVgva,c֔8z.솹!G-'xDe#Va{^GJ;IJ;U<ז9skH\v$I1KdDRvg_Ǯ:x'Ѹa{Ϥbm rbL^d'O6xC~ίD%WS$`NNjQ3"١(u 70%Td霫Dr_gB.OZsڳ=2zW߳Gvv߬lg,sZ$t>ښ a$Z*T>/bci ) e1|Soi* tS:\|?1^ٔ;[*x-c;Sퟐ L4f9.X޺JۻB%ڿHLN;B\xwVdx [̭C~]ήM0`R?[pC: >s-RڵvxaEP9ދpBʅ>5a~xn#׆ HKU͋{G6i.>7޿Yѷ_E??t;a896t}{}ZdiI)9s[#}z/p]oaa9oIRi]/Z@iQϥj秤Η#ye 4<~oTDSD%,>_$rp+F9(xl垯 1W[t䋔>۔b2H-8|&"+cG) 7=0ˌZ5c7 n΋t:\H RWpMPaz9{:MM{-ቈX$ޝ[O/j^U >|^ϻYX|GJ$ms λq _Ө_B/d+QwV*u : NpC/0+7{I 1oqQ2,j^˄=3Jt3_"`pRؾ$:XisLmDxxpo_N OK,8' s.GqK$trY fj$p/.~x5$ٛOl~Y=5ٰ\]!~O#;+I״{cW: 7:1e+ D܄V`dKtI8ؕf}E>*\T<<  [w?MZE NkwmC+kj$Ssi L^2UjKŒDFAذ=S 3o "AvS]l0,j,:XS<<2_$y`YmIBtU|XBI୷Qrh8Jʯ]dr<$'-̓)ϻ%uХ$*ҙ/OCfV"!*k:ϫR~:O9(nDYq|$'˅Έt)9m0ێ4ڀڂ+ @2n<3 }W^nt''/8yN &t]k|taKW̫p_p :_Z<(]\w2hu@X"Gq# 똼LH陵0x̫AVt7A3`Gmw<9`SÓ$II-XBsbо\T>La)NϹTe1bgkuW%W FzȳqO𻡫A&S~t>Jkݜ>v@Nrֱrc*bXKul"TآȔOT|Q` GrU3,ZC]lS0j*d\m% Wn._Mb+፮NAueSr 0`{7# Z: viP%y0p勣G˶ǴQ**R_]@嗧OeWz}3Z߂un|@1}~;p78脻ו*Nʀq*!JG]`љpP}CxvŻ(PUh8-ήs;_ Ň| $'st$80Wԇn2>3F%jHY-]<脠GîsCw=yI9TKOނ Igr]T nsDs ;]&n/xk y/gsK%u)Q;e y~_؅/K\Yn7b@nޤ62nsg\L #V(ݵ玴޺jA#YJ 60QQXq[e+u[>_Ew/ݚM;ע`֬%50/6:5$+7@83@==BjX%NsQ|KaWC-(”ß3a s;-:φݧ>- l"4Ϡ~}PII5A_E|2n3HP{F0M"}P^}q;zF* 10Y@ԙ[FSLât3d}{H{=([e+S/s>OlbROk'tIt]ɾ}r& /0U"'$L.~K3ECx ,.ez;>Up2䣨=}\_<#%tx[{ę_Q\2 vxI&-x fj{{tVg|thgO\4?gŁ[B[n{ K̠ߝvh s(+;eoUX٨:gO2" W@{.p0AzF$b-~}c.݇Wˆg|y ĜE҅& geg S廸 6hePK3V'N?"w]tN.T|V{UaB+9 U݋|ZG:?SM;̭CTwi\-;$!;$9nsL@U'|u`]s[ aQioC WXpL1p \ P A Dۭ(܁1vb^IO̗x'4d _A;`-+Bvd -1v. lKN9y&+H Y>KLa$`s8 *Z1DlIޖAz! Yw &C_CyPNGҍspX{{[|dK@{Q͸f}7(m<7 HWRY03:Nk+I]}z3x2h׿n EçSL W[L?]FP{{'-B; Vc='18ZKgqWUnS,umq't qȎ0nT0y}똞K vDtDʕ<_ђ:At85޽3,/}tL(4_USAc]Q|čVFZD@M "uAvLrP-;LnlHۙ%xzcV3!+?sNGVTVG lQ$9#iVo8f$S*+< SƧEо#{wK+2:kGIaǗP)|uP|@f~X!?81eoyIsThso0#Zrx̡_n`CJY^d^xi ;q:`y ݄! ԯQ:{=R.ë^*ZiM$b8u15ϯ7+ Hųf#kNxO*lF/ zВ<+^a.xiCF\`.AjEvE$Ֆ 50FQŁgy~|@[mLO:֛MwU p =0` MH>8s䝄޽ /}5Bߍ[m&3҉| fqJoS܆g{*A; X5,Z_X ]fk; 0Dtɽ'T7rkϟJ7GB]B_DI '|鹼-(oWQ!KORH㘟2v| аJ9CX=>$X-5oֵ߰ O!q;O.7⛚Yki 3$'m%T5 jΞLpp ͹V?-]\8XS45j3?8-9|3cw|_cs"x؅Q9]@۬Eΰؒ8I0.eU\;G?f%U͸;3,X8/o"o0QAϕ7\< %DSvCЮƾP;q΁"_@RSpMrڝ ^Rx]Z GDt[`ĥQuQ)j3 eQM$z9h:g8uƟ?篗eʵ FWyOC8Ǎ7ʘ0ԓfK5JKcZF>'J#[H7(+yo[> }ae2|sT6_܃AƩ}R`-C2aǷpbX:cw׶w396o=^*9d>(R4\|S: YsۨϵnXR V&%oz3IۛSX!f{VAI;@tGg'ٗP]r&~_c k-9|K|#k sNą{d9;:._XaMDDm?{9>]ya8G GG-;'W؄O Pzަv6a2ҙNI$X a0]ڠn+=Mt6W͋ `Cʬuo@ޅӥ߂Њ5MઔV0uyS͏aO&Lt0`U|ގP].X/ Lq *{)S7 H)K;.Ugv Gs帀OA*VUJ,%"M Q1í/G0OwiqMe9оz#U&pd5#DZVDMݶtʮs=CmֱV Gi );i p6z B^׫#úDHA(c͍CtCpZ_ԉŠҥ9'y yΘvD%<7Sϻm|F8kZGp*t5Xd,U{P^TM+6r ^/N[).a]xH!s27} coae֗fɖ<8/bw'|{%ޞm͸V y [̰҅QƓC=LhScLT=-1H×4_7O EˣRaŋ@ާrza`PBR@? W}%.x+۴9P,M }x^(w=;ؚ`% >쳦 6(zl,=l4[݄#PW%L uN@89`нiE"\AƎ|Y?WՂ bn{IU,O_/xymx:7Zʫ' N[~4_xmؾJf%FGD\{ʛpe|"w-j0 =I81qJVb#m>"J)a%{7y99p*`Ҍ9Y 7|ha?\&j O6aOi >wm ]]@zEJm.NgSs9_Q"sU-~9י *r+[Nx7$Rw3\đo,9sUQݚ~y@pmBٴ22Y+7], P$ w B< gs10>>p<{i^N,ZZyh|$.,U}> u9aWƱEgçT:M`%]0F=F v ū70extó.lוrf3a Xw ڦTW^^K?s,j\nޚ[7HeW㪆>暴"n\-i5{/%W]@2G"7}v> YP(}gsi>sRu1S(9 H Ń.lV yX߳'{Ti#ȵDR<K]AԆ$!@P8'RўaJX~%k'cU_HXq;DrhFΝI"{f_A[ 4f $[Z+Mj+IfFZpkX\*z+ȜA!5rdgP<xD| }vru=~ vWe3s<0~ C_LI!a`i u(Ӝ92n4 '`<4WM)N8ӦҞ u| 4WǬHl]vҢĞ7zSCn֍e={mRka=To4x|,sL4 'Z'elyg#E I/a^^mLi@alsLTu{-n3p-[f9 ?ԡNkp8̻vOlZ/WWĨm)O^wUyzVW]rMfiAJ3}3cNߟ-({ xW.cܑ3VщCx큤}y\.5ixŹdY\ L73g~g[=d:g~I0X3Uo~ٓwGtIjW$E8y^=Jv,W[͚W}7PZZݓ}e䁸mȢѓMȎ>~^c6p!(qwIGO*I~LR)2}bjo-ɶveb(v'^MqJR2HGK!oKַuxC ofgxC#wPBp -:p >|!|ˠ臚Y8 l?"6د]t{p0jfYwNZemixg& 4ܪQ}w7$[ʛPfu."MAx& v^[4H9au|N J\͉<.laʼnA6l&H72>xy`𲋅1 7ޓ@oc%o[aJ֥ *cL7DSM- 7Ae#b黝*Dt [~Я؞6U,</^xFc /?j?HeO_ ҤE wڎkvAE{wqVb#m0V_zy_F2"Ǡ3upD q/wat3a*V|/,,iȯT ]=fkm"`L= 7U .| 7y1^ ϟ</ٴ ֣jعIsG.5t}h*D)zZSDK皂|%R8TJqd0{siq'wr>~x? Z-IK?}6`>kdgb?[?c cke"ēDov"_zLәFMK9w.KE'Uy{ע S- } ̪OlO"/ˋ]w22l\xXQ;7e&,F0t! .ƞ_ prnbϩBM$p7b́;9@J׆o߱km0LƁ*p55߱$SyW&RVcZн}vߥT ڴw{_# o cġL0I~67{zC]`݄#p `۠-4 Nnסmysaǝ4Ƽ'al1 &?^UyN+iG9q =xlVgtnѵp,?XL""wT.,nSuK^v}vzlsNn kiS-^[Yw9d g,Lb 8oc>wglnue!N5z`iNkjtoUcvk\VyQG|88'yQI~)Q7+yQBvz{qa%.,ǚ-V UNe+-myߟn7ZϬ0s'\ !A Aº;y:u,Xz#Uo:^^?~ѕ˫n}"'~V_QbU"QPmJsL*/P;8:ɦ@K3ꋧ/Qf=#λia?}f_ jwBQ@+ZMx.ю@w."Mj3v/:.; 9kVhzk ߹}4QfX15L g9_<۶x9 Yy =Ps HըəI{ N^ җYzfz1@wq.$8nnh ϫ?d&{'˃e..]kJ9D3?Lƭ$%N{[ 8 Gq O7݆U9 ,{3.{b`q67zY&El. Y;_-[\B\tey3`v>UQ_Œ<4|a%JW}V9܅nlY}n7 򸃳΋@j)Ҏ\SLXP7.!LlnʘC zOC,Fb?:v%QS\f-pߖpٲ iX1Gze>yvlKĀUj-ŀvJ94#ݾr <:rsR䇍#ov)LhBgO0N:Lzafg#0C@Z4lsfm2 gb?_ zX [Vαdv9~} {MXcpg'۔Gޮ[t85.{)oтji@u'FaFdw/)Qp;+9jaZڛ0zΎ 8|dC [v2/>2Ն# ˜嫋AZa Ԏ;p3P bzApW!Qy``58Y .0܅ژ5WΡEgr<|*ޞ3 S'i\ɹڊo|Z(3$%9==k.BwA }RF8Y4ȸvxs 6-ΟF|qh/٬$ /Ajorԅ zŠ5G阀}{qsF/]F#/VHr:8?Zn^j&stXγcJZEzw_W BgKϾ2euKEdn;z.axAXc3S;UC'MY+:-|Z M՜KDZ}r&--j}>?E#yz@4*\;"i"zq8%%宪R|3A[9o ̒9޿xܮC*[` 'ߝ=iƔw0WuQ?h󎄝^3DqV1,g0GiJ)n`| A;ݼ|(8NiF_|wrk8C8C q#ń%>{wy>,fαaw7R`Uiϼp.fڿݟ;=Iƀ.msj7}9痊m'Mڍu3/р<Dt& 7!R; 9߃hX4I*sT,7y>mL҆sJ5۹gdau`P01ٶXƟTײہ[v}j ʍz3Fl,FËgz'NWm`ca;E\J:]IL[ȕP?76VQ@-ǺS˅IUr. t 7kK gX:J}Xt 0~G=AgZ2ēf8Xay k\Х qrW~9 W{+W8/{{TA @1 Sf*7ry0{LٖP_qxx}E3(‡ -G_Nà&NPM3RgPh)d*- K*44ta= WRU;_8%¦uOX^e?BKf)bT9K@*קsGg~}3> *Bѵt։ʕE ZO%&gL!CB0":L,Hqf*X9`a^.}?l90ķH(f| z {፯JG@řcgE@Aȵe B)epsӧJt}Q S.TWXdcXx~*^^ &S𥨲:v(^_uGe*N}ڲ;a9>Ьq[o@Eahn_ OpsCN%=+6O8 |ulqIi~h@mx=vWe'+f }Bpc^|G}|Z$s*5WPaBo}Ul$,h}}@6 ;tP\Rp/; pm+;o\^$TQ[8Fs{C h}aCM)f8R+E85 SoëwB%c70nind: @ȜޟGϖl<*yDo(Uʜj'HXmx˜vAM6xh زQSl8|GCxnTt)ui}xm*Ef=h3 U_dct |׃kb_@ݜ S_5ms݌.@' ڟ{y}0qhq x6{*v-Q*^ŞwљNR! ])s0:q'&qp~s~IӟKE< aK#wr/"GGp} fPpl fbo6,7JVL O c4fknui2DAvp4rG%Rarf/}xzCK0.Ÿ۔wTϮ˼oɲ2S JèػN0aҌfAib v.|_} W5 LiAݽ?|0̢G!ω w]12<[zü '7ofܽsA^8fk7=1,uj=`ي5bNJǪeE=wb类^f)o ^u]NYtuܹ0- ;V츼`/__[ S'pC˕/maRp 1U^Ɂ &q18k^y6VK;T]xkgpj '(773[`M^xJsc݄nibqw+. H%2_뮽/Y+7riqh VL\֙ \o`䦬4c|ղU5 G6,[K?4+veGbtp:7ͱ|Uԙ {0Xs p w6-={,M H5dҵkPd:]-PgtVk5's+Arw+KrP7@1njsTRCf=/&1BI׿h, lZ6:td${[ԽANxH K,bۢD9wxeX} _eS2W_zn7M4j 5vԟkcc| o`뷘i _ G×X9 x<7VB„"bRJ!AK%E_/I*C!Fe2Q¨ aT0*C!Ae2DP" T*C!Ae2DQ QT(*C!Ee2DQ 1T*C !Ce2Pb 1T*C!Ge2Q qT8*C!ʐ@eH2$P T*C!ʐ@eH2$Q IT$*C!ʐDeH2$Q )T*C !ʐBeH2PR )TA oEPsw;Z IЇ{"N N N \􁟀&#W ?􁟈#_a2r%S"dt!S"d@h1GFQ2rEM ?Q2#d+N&8#qr2ԑ8  2IE.dt SdT\I2"IFI2H)I2e EFRi})Tg!2!$F*WL"6ȴ!qҲ"SBd\L s! 2:s!2u.$AL%G΅$#Wbdڋ''c@p󨛏7X1m!e ɓ)Ths ~ʙVf^pV0 *S]xx Q~0`%xb>$ #w ng006}GP\ї{a ?Ϡuԣ Yofg9n.F0ccb+%i"vQx--usjq:-j%HQu|nuUpQ'4G~Xtԉ: :QuxvMOwɍ:!|φnu=/(: 9(#e6gFSsV/jk]s +k];GBv,= !=ńz*'[gZ4PRRH[][B@__h Oi7iL*G55E9wo[ȷ`!bOeh;"tYۏEsqPZ$ 6fFvcTHb-ێCm>pZ4ڪ)#eό{*NL<&D[#^Y gD~}Sosh#TLŀ̈́?uu^H^<4Kː<.E~쉺\6D;f?aoN\W!|W#tkxEv'D|!Da$Wd_1V%Zq^eۚ e ("rnMׄrFcXX'mc9иb~Z?f +K~.`"75R"򖒑Go+~<Wy}x27a_$LV\NB?wfgk,9&z  zR EЋo#C J:!s冝H;+h1لI!hhJ7^p Dxb[3KdKg& x?EǶTC4ES{9??#1f\7 3(^/|_o> ǿ$ ߿o3~a=%f6<و ߇,C!?oCEτ8|߂ð il!D;j$ wSL'/JObiXv`(S I=z2l&:DFV7IȘ9EƩl숏!Qg-3 M'm9r{5ffr^H2395DD`4 2̔5SMue;)#jk I{RS2V W s*Sl@XT>ń%wd)۳Cn!OCn@jb"5fb̤րY%z(=bkDvq٤+-Y)~f|k\͙+- O?st0?[ߓ7/Gf><~!,4Ÿ17OfL'd @OO~1G|MMTq욱cƉDm7Z4"wH; -ioqc)F>@P-Aw'7l~2ώ!u*ekjl03>؉}R\{Ⱋ3yNU 3y>Qv!Uz) |70$N) vؖ% DjVs uGm\S1 } $焱h0!4xL}<3Ɨ(r'/AOhfw`Fp8MX,ȕɷHg^2kK[ȼ̎ڮG_HajnuAg:ikflDbנQ^6D#B8OjYq!kM~`YNQrR{1HNgG~كV5?WqqY{eN^G֑DZcbfK7a}b&VfE+eeeeֲOD-&^š㒙M=>Iھ~1rJHL~v0̾v p5f=dȾzjw&5>fhkn.fhX]xq233g3pcb7a7g7)Q„(#]cS;kD~C8.4Ѣ߿mB7ln=iO6'>5Ϩf)>NjLwFީ|DֆZbyDX~r}O;XvI1R'h#$h#$4#'4^0xgAG&A,3GMJM"|la/Rˑ&rzKL0^\r')KN_Gb!ȴq<1O=!ӲNN#C`xȮ7-xYD8Wd:YTQ41~Gv ʜ&lmr֟iwtm#br዇ou/lZL{oB'>/ULG$>*W۶$IO۲^},5IKiÕI- WbȯMA ODhi1ӆ+RP*ӆ+Dy-?mrpʟiÕp7f S$kT;~/0dHl8r%y(1(W` Pi(L10p0?wU{!! $x)@ދHG@DR UHU bXPP 偂QTD)#"M:k }ljGhHcҙLBl|cMǹ-b㢼lj|rgóx9mߗ|@BB.:FsYϯ}xZ#G"]ާ ^LOso߮9R3/J }(nMLTx2`]1ypɔm=1M9{]#Ы_1(`(W}/, Uuz#_NєjUo{xfsW^fŋDtcgWO)sݒ7F=;W<-IX_Č_Ї>6檠ׁW ,"~m}S/$n:O7SG74+nmbE 6)-E[o|6Hc↤?7^;NܸܖSzm3W ĵïTi g#wN﯋[^Ȼ/#~5K5[^}87]5jUw C7]9i 巹S9w%n̬ Gk)7)ފ\߱FfM6?󴅖jެYƏm26A|pڜ?ZMIOf|[Ǝ-K7W6Bŭʮ(n{K]z/~ٹJZ,nuyX-?_qmv%T񓄗5޸Y\gI }3Cm@_kkz꠸~_u?Z\gޫ5Sw=S ByV*GPZՕun]qsJ2n$nXoSo/oįV3}ۈ?̬`oij/}7SnSmϽ#n:}٣(mM_ݲE\:btyH͊/')}K%8?ɟ.υO8?' XO4&47fN"_$g6G8 G8'#ppN8 G8 #pDA8"G #pDHI8" G$$#pD(E8G"Q#pD(E8,B8,B8,B8,B8,B8J8J8J8J8J8lF8lF8lF8lF8lF8 G4&ф#pDhM8 G4(dnDP9& rq;gɞ^ºkeޟX-'iu?ƮYo:V_oeL'd^hQ|xֽV#2 ofcYߋ=MRQN?&7?ųr]_ cuʔ߷>\1-$/5_x&zM]%ߖO3;<*+/d2|>Fzsg{,uu|fȇ o*϶'%-?VG?ao|`/gfyZ!o:Aw q2.o>˻T\Sl\tr-?=˭8( fcNo3={ J>ew{h}Ngp!q]K<’ǻ7Ov8#trvrvg|ɣyzv 8Lp?vųWOWvή)xޮAO9Cw P _??%-u'mZ+gxl9tۏ?iJ-'`W|tzorV( g/A`%Cߜ[vv`[ "̑>e>;7!ow>5΃ܾU􋌟CVWKU%\7Ԫ^ϳ+wĜ#r'tCJUmMM9{BBCn?nܯ[953pZwV~w+4yW ҲLi~)!+wo:4u~=x_];ⱟ{ݠ pWhRw!?t Rmvy0@+wVo:rΝG)]x)7^)]?)m~4f"I[)yo={r>徟oj"0G)?~r=7+=^]r/oן?M 鷾j:w׹+աSEwzyw'CoskP<a| MMǽ /q{'8Yonڷ=Ss;_=RP(tݯKev{_y "O@Ac)p'Sy ]Y<:)Cֺ4͐IqxSs n=*-vxfͦАWU~]5SOga5<ӷMISAv~xafV0krWz}AA^axXgg.SO7yὺߓzU;[wupq_!36IB4)U uם,puvFo79o_Oozm4c~#A򼸄89WN$bڧ'#X&ﺊGW믯px 5 *ó~zckkw:.3/[ÀxtjǡvP/U{vQxF2wĿ\F+ԩ=ї9cuZIAŸbfOr;s~S-^>yU3pLųã>l⁌{ixSi[~SO^C/xhOJWԺuIui_v,=/,>6|íkm5F3 "9`ʋG~jz&CG<8E<:}׋-+xHwGN*3c k?qo[{\vI[, |S:45wߊ+oQt)x5!:>\!?槔k&O{z, w1bf˩OZ"i!}!1s#{}qI ut-m[8ZF'8_kyQH?w; ɟlLN|.3lpEN?`?p<w8p O8ܛY8>Un{~|ŁCAA9p Nџxvsp|ŁC.o9M<ý- \Nr[.O8'- y 0ṕq wr~NAnד s;=ܫ.OwonݪϚp}' vې G { jvS8V{<~p;>o{ N8LBAAnG8p]r`%9~~^=Ew ]9=Xކ6 wCAŁC/8x;)e x`?!? _!m3~^oȯqC^ w J;+hxG nrp-wqN8p |ρpwg 'p)o=8h#8xnmr'w ܫ|i;|xU~)nzp?AAwCA>{`8dwg ރ{[?%_s.Jx Y|?d?'D(c?Vc?܄_8/Y蟕'#ppN8 G8'#pDA8"G #pDHI8" G$$#pDHI8G"Q#pD(E8G     +++++&ф#pDhM8 G4&шIλ@PĄqMD,ƠhC}ؚ 1(a 2b\ 7b 7E&Xp#p'Ŏ0a,vFRIhDD1F i+ҘW1(#QFR[[̱1Z-zb4..V#slF6#'lF"lF"lbیŎ6b6&6$!@(:fb2`46[ 6lZjƪ5[lZB.c՚mƼ5F{n1`.[*2ySb8b+SeE 9[YɅR f_ׂ0Q nde1i]-'/eIUV&(a+ϲxKIMĭ[øbRƦy):16--ޑGhԄONMOM6RS#M֓#KKVRٙdԔqaɱI2Jf(>baRI|Et-NxP皢_OZ34d_}HjXĔdGHRt?=5vh<!Kr#֘Z" +(,U22:}oaJU58࿯YqBB2JW. ,p8\E+p8ipWa+pUgIo4gxr e4#!/9!˲%ijAr@[Hq+6ȳ]?9}?qӄC< k@IÇ@ ª mo)lX؀h&`fB3] gyزX XEh#E` -h#Y,,llQlQ,aQ,aQ,(6S -baXzh-lXjtXY:,Z+ʢ<ŷXllim,Z.&fsQ gҚVVJ3[s؊2ef+V:2V:ˤytwS@G7)8Y*Ҍ vB{+q@N6."ڳ\UW׍Fq5]E8 8ˑy$wŀkp-Nt8IPXٵv6K t!:i\gAˮx7ǻ9nws9; ‰)i,1c oͳ}{voHĤa n4kQU`$͜9-bVL| |}=}u٘Ж[._䫀(Fy7ʻt#L MДa&'_j|ZBܘD (1,!==>.lllxҲqJL=ojlH;W2DRBNLHM s)?6!6 ŒM1a{߄tZ,-:.>a"ĨԄ0}`aaX:Lr ~CSƧbCR$Ŧb]S%=44%.!mD|ja)HX_8J1aI)q*dɤ}[APzjJbX*ՠQɱV0[Cc$ Tr#ÊK6,- *>"93r?6Lش{瓀 Jj{IZ!,1vaVl(q3*J"Ⴏ<"(,M,aLzULuKؐ䔤GR!x1Cd%dž9R )N^NKMMwRbMtjFא7 Yt*PQέ(dEvRGܜ x'TΐG7xj[~9 ^~W UU<<rY#?e  t5lLgk2 -u!ВE~W+S -6 0c&l Fʴp)Bږ'Fp >4<5c M NPΎt)2)O06n3ta{u{:Cvg8SP0401a!>Lx?'v3zmr~C u! @x#O~cw4u g"g5ɂ<%(m*x9ZSi1w,1=q^'iOv%ˍ'Oާ_JA>z/MGL=y5}CSW*By+C*)uj4:]7A71:] Tđ5!' SMS] %QGPF@>rL2c^;yT)sCȇ-s 1.hi;>`hv_M S4/2xb }<3jBA71Iϖ|N(d1S[x߂IwۜA XNL}#迍.Lnoy{Ii]˒r9zA~[DPNjW_˷(:-A759&ǼAo&N}QA. X^L!XGL #8hsà|ǘ$|4htR6ncBQ氦$+aU\)jXFAX&m;P>\pΕ K2qT-6c4qCdM=s*m'D _y ~oq؎$-NPώ J}}{!+b>IVuHM:" R7|9Dr! !-lء⬅Vg|ݗt ܅g:KJ̒!Y&ذ1ɸX_8>ylB*-̣bS4>Ww|.\,3C'6e|"GK]Vk JMHv֐>+j2.WG<?4ex%_>~W&Lw4$`ǏҾipUHf2㘬*NO MI>"69"Y?tL,3'&$JOrwtr]4)!--!ybtym;+Ƣm,ٙ,M3ފIQw1)+SvǏM KVAkQՔO)3p9kXi65]EQ]W72]"x7RClqĕ4SuYUճ SG%} TpcA5}1W_6,Z5FF6 5ܔ}Շ2*lL0P%0bAkFe5'X.9UKp!8cӲ2 a,C?-nJR.ǯ*njҼm;Wq z!%hF. gXUcR`Ai}ԌF023WhtQ3%҆#dG&ąQT\4 uvAp]k)+ k,)69 wdž&2K9 ׿Y#qR]=$EWt&eBĦ3u GknɌrOf)}BZ>}T>Yq]'~LE$'1)c-Xj84Gm)[h)w6E{^ҿfY橜-+';tUNo aKIe6XVpdJ&'J:v.r P>'y\]* S.Rvٽ]=xwU/)v< ǎ8Yq 9m mM9LbiW@gr*eܔC؊u6.>~`/yo(h&PW%R<~xl.AO J(E܉Km e]/ނ{YGpۍ$zȿ ;d-Z3LׁZ*˄ T ܶr_Ͼ_A6͸i#G ֿOtʐ'VTN06>ZMzwlbb8+C*^~ =5Lp߇+!{V>~w5V{3qFcx7RՎW瞀,Sb[|>pKP#k6gyc%L?6l(p ؠRT=uz ze7Q~)vV KHNO?پ 9 I,^ '<0hr~2țIuvU%Dse\~gگF~?E]\sxݎfa}ԔQنqu_-A-9aŌ1ЌiBf.FwNg$Zss>mwW{B {(i?O-;]; 1K4<ԂlU6WU:[XO?ͻwÜO 8_ݗߋý^JY"}|z/2oe1[yM$mt*5=. ;n. ݉Wܭm甿&ӇJMmR)d yK/rNn[m7t@,}bv-F7vR8;gD˝.wxFmD oD9лr>ܡEt8 ~$s8wN'n/ܶW|4w4<\aܡWYsb9KP*\~ R{ 2%d@M ' w{@Bp%\OP/ fd+<+ijxԿWwt;hrU#{g_VyW [1K+}|V>>^2ʳx?~\x`]A}NTŦ Y+2=ktOjgiIIM[ƵNp OGQ5蜪չ^J(wVm n[ 0W sU:JD9/B  S+u NjSrr.DMNOa%nf 58)ǏJOK:&ŏxS޸`4<1:Xht٢Ց_?gQx%J1}lA\lIrR'n4FItr'I"9w6~n}r~YWf_glfN*D @vr>qɯ< Zv~R fͼ57HI7Sk0QAomof7 =cN20Gv3-f2 4s2\0ʐ+4MɜhƗYjw A~&#c#3F&b#s[ wa8dq]CBSxiU/16~.ea4& Ϣp\BI4Rlvnұ.LO-ْ!hnKdNIǒK c%R>>E~ <[+DL:%T!e#WQgz6IMCRgLpF 0?;sq ;Iܥgnv?\%y ܳ,T|;*Gk}<crMC+*"1-~OT_8N9ۃ–x @vSXqɞ #Gz) K jmgm.ockB!)*Yb#phjNj%3Ԋ MS |b^Pm+Y ɀncA= çsZm_`A|dfNd89X3l2S~0\>A3B[1ߧ99%6pbK|e;4|F};Brwr3i+^WmLjnw!~9{˿9|])~oxszi'o*6K!pv}O樣zk~f^#js'{ nw2NeC={e~}6G/r./$[N%v|Y^d}1G#x~>_L+rH|dx2'<٥m>2] Y<)i(Uѹ,9Rx~%\ϕJF?!< x#e;` qUvi/q1̶^V؏]a g'R-(gGOaZhZ{hJ8.LNj6ҽx|WkV[rIslI{DINez b%Ʀ4hTvMI7]:F<*(+(NVˮs !LgQˡ+(JqA̸pACh|⥜Up*pBU׸jT\Mpָ:.\mW_pg\7.BYݸ(ƅkqҸւ)F:puոY{pj&700|Ňmt=Qp.\ s0AKu7+I 'Rp׸&q5&k\ 7U䵾fp5n 8y9jg5?ykNfK.j|/ ƽ@nHB[M-uVhrp|]q\p_k3Ѹ0[p5lѸ.mCw=n'p.].npjo^:mӃ;q=#w̅;q2w] 酃vMZHk2iO>:?+aq05+ ƕոJ.પ=PeYu8S55pTGyVW\=p^\p45v"4ά:*(pM5\㚩ǒDðHIZN88Skpm\֪sVv:h[NZaXUu\w:ořz.kL{H0řz:yhaXo&>7Pq85=q{@M|Uwᆂ6HðKt 9á{$p]Iú]v|}4qƥLU=R7b!y(Jи +ˋ$]X͑Ȃ8qA]̑Cg)ϒ/i 08[ \:C^rZy!/̵x6H[hi0ny6e~[/A|?G^ zpa ^"]6⢡߶÷]q;1ablj\&l@`as6,Z3lcY,ll[pK [m[p,aV6EbdIddbb b bDZL-l-, ֋EkaVaeZYV婕-bcKkcX6p6p,h6I4ܤ 05mb٪4UifklaXhf+V(3[Qf+K[QfVA666Gdu>.B>s+:v99o/w8|Łg}r5>!rk.r w΁CNnowss, pynޏp-oqMp~?rOpy!?nN8pK$mp8pO׾{_s wy/^wy~s V}܆{;pg܆N

rC /8p)Kzp' )SPÂV6yp(Pо "u.{{I>/+Xc9u pM ه~/\ns38drC~ /qPPYA7S?RPvpCoqpwgxp=<N8p ;_|ހ{MNA~A 3p&(h{;)h[^[N˅ ˝sN_v3 ڸ "<_!<W8pߋ r)pBuQƒ`z+9&3 ?'D ?>&L_QBFG8'#ppN8 G8'#pDA8"G #pDHI8" G$$#pDHE8G"Q#pD(E8pXpXpXpXpXpX pX pX pX pX pppppG4&ф#pDhM8 G4FMw%&LMk$d1EAƠc#}jPc1/5A21;h,vc#Ŏ0J@d$"҈1XHc^ƼyEI2eebbdňbh1֣HXcYtYtYDXDXDXc3b9a3a3a3f,vc1a!,/ ARFa66s`6ֶb`Xfc՚Uk6Vj$Xfc՚Fڌtl3Ǭ18vswr7űɟ݇eMY:\!iWtsp|9 <w8p \ NM,r*ֿ_Pk>!~8|m&^_W.'9pԿ- 'OrNw8{;WU?yIkWzq_7ngm׾w zm#ýy5_W)nr~x?~qs='n|&u#8.pZ[|~ρ?tGwꞢ;?,hpoCA ; _!L|{ N8p ;3فu_ Q?<[n6N׿U\pY<*ie?Ax=sǡ s=u0;3~B-ǟ/9\%lMn 01w1\d,ccbG0;X#$ 4AF""#4i+ʘW(#QFQ--FX-Fc=ZDXh1VHHHHH{96#F6#6#6cmbG1FFd lfc31G0 llfcm-FUk6VXfc՚FRUk6VjdHj6c^z=c7ǘM0q-wS S Ӽ(B2r%ppgp\õW ;\p  0J3[fAef+V(uA,66e(uxAQ7WHWTdNW J3.؉ ƕ.;۔ʊhrU:j:\]7N9\5DPkt .J,.G56õp8$Abe0N.q(AP qep'x7ǻ9J7' *(0}PKHNO_ĔT?.6=6lXjlRc:&|D&ʿ{t&, _e>^fxY{38K(s֭7(_vN(Fy7ʻQoѬb{??uGAP_\a)Icc7$\)569.% })Æŧxz|jRXb4?lh,FbǤƦⲯ,QSz/-:q GPE QIRE#v(4qO/$,KjLG/NN|`Qii )i ::և[7&>ABWa ^=_1|c՟ix&|-ႯO_p oyf2r坘:2|e$/ <K^*ix'?&yiHwkJA`x_e)eF-40,_eAVyF3m)ߤѿdҿȓ,pV׮2-k9'gs%H3`,h(˾S$h}A\֛W4qei*碏b2?1LI7xز:^;vcz{:/P]?Xg񦘇MAХ-q(XRQ%࿳1 |!­XVۿh ;} '8s1L Vfl-z< ~@:}3rCwJ! > j^|rEɿjS8~ܦӸ1ڲƅUWB*.jcw4.)S8wO1d}-SDhF9 Y^%Ŝjk #l-yhiq»|9{w4Yn{i\む6 j92t5 ")I"-2 I^L>5YnW7|]37YS>">N:b'4ttnņhBS9Ps4?64$IJMMOM:nD|jjMH MN UR~&$qД]y*<۾]]+sϵn˳bL\([Y/W|qV3'q+}Y1{L^pҭM!fEm$YbW`VcDt>MPN\044gmY?Pu|4N 8YV &gz3>LQ2 Ӭ:i:G_F L2q5[Σ 6𠬆7~o])oS- xPpawGAUܔߗ 'ȗU,WeeI9y4 }(ce(gk n-m꼌'TP!݆wM૜w-ȷ]Յ|8!NCA=!#M8a <@C8i Fh򍂸xq-*{Bjx$E| ?,(u?DPV0ʙpn?˖a-42L 3C<Ç!eX5B]ԆL:SY|X|L>w>טa -~3ٕs_?~f˰kbv (=#eqW>\G9+;ee j64699%=4-A֡IJMHI Cu+ѩ%mR@m1r9k9N3Y\!} gd|eݗOdǗ[zBi+Ӷr!ZrWxJrp$ ۹㣓|J|{7A^ѝ$vw#o"oWfUxtt]|w&_^to_C;i웘.CgϿ,=lu+&{7JuOOOzy>>[Yy}:%/@cgvE猾0=}zj,d_puE7}w.Jp ]:|xGE?5F/CgQş57?4B:?ˣq\@a :G&]Gpk9J͠$R݂Z5&5`j6~.p th]Ҳ0rzBjNnU.h H-6PS?S^pLq*xǛXgY tB i G^I]cDm _|K>R|d@v-".ۣwshVv/{@ɿ j#F?AFR (g%`p&"ZJ1]MA/xWFy5ޜ\RmyG_)mܮV+[~f# (Kk +MlDduo7o19^i'!nt pJC[t*.yfRc! /JB~>.\akiQ=jcVJ4.\Y/\ypiqM13WqHWMPk%t]zkqtg5s5 ' !'I7ɵ8lu`\5!w Ը>qh -"4nH]ip4.i[m< #h@?lL֛<Ʀ&&Ec qcH|D؄$iLXg<&XvM |xvV=T4ў/8ܹxֺS&'-Y-Eӄ;|&"N6*} hN[~^S {Ը!~vB"Ĉ:̔5%'-]֝1XB_u?1z?\K\(xڀּ/i>pd13v1(OE?OY3P/J*5Yxu1mLot^'6bq;,īv6x^]`xnǍT~CH3|P@#Oo?~yߗONP{fryj3)`7+}ӓ ZW<=q߶ۯ=Ԗ7B_/m[>oCi%/4~Pd&_گ?<祉UPF/9/4&h߰۩N*;F_G%Yo٭RԫԋtԧOo_Ewa}zQv>ə)wu BTR ~Nf_[~_lgxh/?5bsZ 濐VU}{ 5?YqϿn9'/{UVDdX3xQ׹+W7~+W7r>C \}nOJxTzOa#*J~oyTvPkF/ݱH$SA;-J|ҿ) " ]ek .'а2| IEt^MԎxQ)ҍ_'ع_TE jT2ttLI%.7M}.^*ّ|ؓquS=PT|dSq/":~R`h7K%G-X_w}̦Rj 3w.P)$?xU*uO?KeG&nuO)$Jenڹfk)TxzbsKO.>JJe/j |'T*:-'Α5ͧ~Bj.T{CkW}j˶ok&}m?tj{%y~tXϨu߆>0ͮQşY^}j^Lq_볿%: w7^l.b?x3'}dF7uzj3xsufl/Yhųn|lc'#/;o>Ow]^Y'~G[l?JȰXl:o#Y#~4_HqK^QW݇}xxß-nmCӘۿm9?ۿ]u~j)|e$'5:cK{do0~l>xvn9wξoړ/~ҼIEC|"olZZҦ,tR K~k'ӁMbYy//}SCn;/GbٷTzmUkJ5E-:Eԍ{/3}@q)?v1]N<$>)#gw~mm9=ewq7m 1eCkTKDԧc3NxbƯ?Xcxz653 yxR-uJ<2o _&73GVx|y[\')g4.+$TK(ͬg#-jgw>1k;K=^aO|fX&_^acԅ*9OfxĵX%МtfxLۃ[.6qd哎PAk'qz'o)v %}N<'xG2OPFAYQڰÃ`z ٍ׬&5Ai 2O'Mz#͝Ԡp#pc^4qnF""Ŏ0;H*Ͱ4AF""#4i+ʘW(cc-Ƅc-8X#FFF"F"F"FZ,1ڌm3;ڈ1ژ0ڐP¯ 2j6J c0X۪U~5Xfc՚Uk6Vj_jU+jHjU[h-*MKOH(CqamSSStDŖ&ZŤ|X^i0i&w &?#N(cc'8qf_VJWeϵF5ݵ~6x~ 혁iCNQy|g#ⅉ}|W#{6ZF;_&C*r~V:KAVx/5]]ȯFJrċ.)5^KI鿓LO.CPI[/-N*O=mx5Ŵc*)Uth͆|X!< n*XuX[|bedEA\> U_\ ֊(wNK'/.>#T*7ooQWZ>sIqoW)wE̴Rg~@whG] YDlۦ(|iGcmL'> J5\wΚu4i |C~4S)Ўv4 -H)|v|ʳ%:r9H}Rs/ kvJ-g},/YzH1K~<\W^MvGIPD(jGe_oҎ2ҎA;ZBO;vךDX"gvt@)ЎVBx#X,hc!_۬"]BFh! PLPO})_S)GЎ@;z \KoR;[@}!zs_LZߦA=5vHSb\ƏA;*hd]^#=]e$kGlVmEV˄3B; 7h~XPD(*oB; B~J;:@v hk?~CMH9ӎKM-]th跧JNڣ<.^UytC~{bzF-0#?H_?ۊG{U-%[3q _>(?ACl=Km-M'Jz<"|cj{WOˣ#2e) 2rI\ πJ52lDqWEȟgǛJ;T͈g!%>E:]RD' N2{ssrre7t_|^j ᙒħ٧v!’k{_OAd4B_i|_xUNךtwR&5br{y oϻ-~RCm2Foևʟrm'0LWm-R֛UTEF~K~d/SVmqB :²]hg@e'WW_k?ʮr^п~~ݬcO~Cx:~uTϩ,lV8fG<^q~@>tRj)liɜiͯ)jm2['W,~8DOU6b?/.R ;TEsZLtJ?M˷|7~{P+饲.\ۻK![[؆ɷe޼)6@4jEkt>!_?a&+s8v4{q}K*sDݥw/>߉Q2_.vȓwrʅ>w_vﯷzHm6 -чV$@<9b^ :>tե ~җHRǕ}_0$)cv}?50lx܇EGA|o #[}á>*.C҈ܭRrZ ©I~?N,xfKzԌ/\,ki=w`<'7y-2?6r*UNdŏc_L'^^*@_(>i3;I!M|"L)=>54Rp7|:0^z]|=_ y:tK?}Y&E v3SV"R-_|eTW'Ukt}"UjUӍrR2COw0)x䛛V\ʥa> k6}~k0_[/Y$[R/v?jw:yem<Up/`4yXjK`<,2_ 0_"~#?Iz*|mEvT+o{9\~;/2a#_Qr$()𽭁}2XJO ět~ @_wO^k{9yԝ@/Kytʥ<Ә|*|ݮf d{w@Yz4V}'An#*R[z*rx1ka=r"|/g>V]$ik}mK }kk%˥ 4I swJv>䖎rl~կMWPMzmЯ̀~mWA? =ˣk5 Z{_2FJ7}+e׍HYQCe|J <潅L{p̻"#,?5G*7mml2{G6&VRJ~Egʿ#]W͟ x}R҃WH>'-n߱˿شDK?_yOMmFIљ=S}j7 >%h `?p|jSI$Scyj:RP'+\ R~۶2xSz|)UT]g]:J?g6syJ `tB/IRj6>Wz}M/(>2 o/2CG rY*΅k)#Ÿ/\+ ]Q}/UTؙ/7O.H)𬤠R?A=kF PAE'o;aҁ`k|_Q`̠ʬl)=>&^]3Slۼ} t:GC'{xU2VԱ_խ&_yy.;TF=sҤYH_:hc~SRY);/΋r{>`|a@jJS+*q k"уNYo`[|?W| xSJ::Mi"kw{_xkέRO֮uv lS*?.e0?H'R')1/ fxTÐo%~oGGa)ghI!+te)~m_ pя_<짣*?Mw? Cz\;ο|Sjߌ߀xgDcgF|^v?B ]>i*vP>߮g;l(^\֯ŮT.=mb_^+/htJ ]?L OV =sbˍZo/.Ys_oljSh%S5kc/~6z?;qywc3cf=Kw:;gX?3xQzt~ 2lC)в =39Ʊ`݆@`3τƏx q<gs-a\I`<,2g!yF~(^g7q&߂~'Գxp >s=Ō[Sz뇰 +>s蜀^A|F!=?>=8We*4:=2z^Ͽ>翗z!#z:Kdht0_/(_1.ߖ4E??+L:n'"N>:T:lDP;Ƶ5p)CO+ߨRPq?'qZߤx'~RU޾JRx# ʼX^ͤM^d%iy9Lr=Ea<,OѢ[7^Iw7-mQNjoG񚧇~S xU*_t7 c d? O~ `L: K tk/qT~*/x~$_}x=3G~8#)@ŁO+hZq[?G#I?x,#ܧڟ_2_`7G*r!-Q D Ϸa:hr׈/ m*65׹zJTvF O(>r%(͎5[H>Kztg/|??o|ݴ6 gwg|_w ֳqGezGѻ*=ܣfRNy_wܑxI~$LڻhSރŸ (W чAH7pq ~\AB/?/zz>ʅUsлvYgzr!PD}n~>Z_Bl>oU_/Bݏ~|Ԝ7h|d/pq:N~1KL{t/1u>6?\}氀uB܊[E网ȏOr Y'<=-x22ƃ{^/ (7_o3@?W.}G&ߒx%ӹR<PUѯKA W}Qu$Y|(W]׿x1#suQ; hwjyJ>XĖ9_($uAJR] !tK#ǷW; zqWY)Az `?r?هd}t!(ᑿ7>n8rٛ>pd>^+ ^#(1uj|]oķ~&?&x >b:OQ?_*q߭աt[P|}5n!a_ϣp/f򭮦'دCcd? ćЏSփGg?F|te뇸r_8~҃<[v!#ژDŽ~#?tN7=a)쑝gGC;/#܏z{h0Nw?E/P%(_G7|}(=T_ʨtȇoWH>$@ywɣT.<*ȣ|zy%2?xx `ޟI|ﴮ_@ 8 M(er{ |s}z`4Gh|42bG[(33>b|a|Dh|4m'xf|GgG_PQGhJ0>zm<ѯܳ%R3_h^sʔ0h5.ߊto\x/g-'<Gh틡F?_OW@ϟA(ˑ3PוqOa xX\;οpF~SSzы{9a EV?z.B:{1/PWp)%>0S˭kp.R]ޟyx%? |Pz|pٟha~\%h췀'P<8O\^_B,>?D1A S3iF~`EO9U#AB A:@GmxVuQu]QWu͐>V+!gJ{kXl#+ۏs )G?baik^ TI_{G]UytC~{bzF-0#?ʠlիg[xmzG`k([hkYgGO9+Acytĕ<<5S<wdƹ(?j?e؈tweMrZRC`ߢ|]G7:`30w ҝpxɄ6q C~=]ImMLiЦڄr{6L;™.3iF0鄁NdО:!Lksrg?Il~ՕEggݵkf{%Lk5?lp U=V_[Sh+m&g&%a}8ԝayMKkޝMҬ5*?(Wp~'(_xa#s{AߣLpe]M {;t.ܕw]]t Muɏt< Q<\z~z1xu!ó>D+* u=Ja֪q=w`<'7y-NQIŦ$RN.`~Ʌ(ٸNh[w0'tw.}JwW]_~53laU3C۫d;ʅdŇ;@g҇<+ {{o{ e#8/?~\a9B.g3;]?G':F0==;jF:T3Wxp.,u>~V+ l%2~[ݶa;Ϫ2w6bv/";l[^=s-my?ۃ\~?1~T?ykC+>]ٴUIh>=3+<<ڐDiig{\9û-ٚglH^2!؊*6mӆoZy#dq]xi;οh A$_x#G2aK u AY@tWժiσMl]ȾlZl+zVjoڷRpX폔u?TM?^˷[c[ȴtfMw1Uzd+=Lx?^4mRF˪-m]sh;vJɷ%gUWFCQ@#`#θe:F~|:LQL1#?ˣ5.=#?~/l4(#U$f;꡻V >A|`+ʣ^?{lmlV۟Bޟ{g?S:X?fٳ|S~ZC\;gK?~ K@W0}m@m陡= wQ(65.no+^G|xW!5w]OxWݵ`<,G[h5=ښ"[A`/h?C,0;\1-^=?!0~B_(4>:'_DzsyDmSO=u_FBϯW]xW^g2z{T[#z:KdG7$ӡ;~x+?Pےƽ;Z?+L:n'"G)mR/-w==x7R .;Ƃ8oR<ݝHW;3~f]x%ޡ90 xXdkNjoD[hm(\?A-`22/?r'ӯSC"Sѫ6.֏|Z=Cᨏwo+|ZIw%)Zq[?G#I?x>ŻPר!=u>-zGŪ_?*OL>MyT&G 3hȣV=TD{`OiGGs=y>1h}v@}RվnЫE\Π4dz8gҫ;},GZ덯xG:zu OKGG/W $Q;/?|Ỗg0z=նW[Kիj5L;:HSU2(] GBg_}@}WI |_z;ԟi-p;A;* h% vF˃`gt-ȣO3zQNR~aVQ>Ow5w]sxW9~_;UbQHw-面ϟA<w|WQz|!ꏑo;qr_0~_O6} ?|e7Q)$!w$~Pׁ?C\7> |DtWZɌ>VGuG`Gh?ο{!G{HP?>jN4>S8r5>_4>#V$]-)Y Yjzd;Wk!|1=>C}}8?7;L;:Fx+ӎ&2 <#gy?;~E\$ygV(աWm޳W}E|ϻ99H|Oq /;ο 7gkA.qEt9^tr9O uz 8S=9=C+t Ϥ|m)|O}0=> yFU_G17~]~ qpgUf)z#@oy#Q3e!z0G97r;!xW<ǻݜ~8dWw~?+A8ozRĴ#'`` Gys 4CZO뇆uH?:$~ﰾv_~?گT7;dP)TƯw;F?s~&)QΟA:wꇡ3Qz2h OяiEaWceTfΗ7";sՂxO9'#ϒf˺o߸ Iakv,Fw8nsxpDJ Yw['?Kڏ%&߽jzxe:ABԟG?ꯒ_ןq˅w/X|~j[׾J}źtŸP> zsXnoTnέoG_'^نއ| yWTOhpxv曌?J7h#oIJO<\) s]w*zӥ^(} \WrẎ_,>+.j~_庨?T;<%,b˜L/c m%~)o :t?۫J~N S=y}\[MO9ΟهeAv>HO~HAcL_՛]uqd7GMxwbcrcЕ}H/?H_:5.7^[d?~n <cr1_Џ/X{8ECx_X.]η8g/?~ wb@=;_IYrJ몌aϷ5џSnKW~&0dG߂7T p/GQϧ*P<ݹT.yT\a?俱[ ч<ʹUWe sٓ~mgw8=i'^~mU5Ӿ 'g xVRP:Wf-(>ߚd/~83VWס1~ C G~E? 3CCra>:u2C\A/?xXZN;KmcB^QE :lA\γ#㡝gGP==]zy4;W۟kuK_/ˣL>g*گxeTQ:yC7Џ߫?$?x~_<;E*^eI>A[P@vSzzx_|˪ 3!ٯ#ch? η:Bh|Џ/0?*@9h\;οGcSGbMK!6K=޵j5<٫U){jt^5{LF{`۫[{;z}Gܻ_%}G7ْSV욶t׫.I'G`&8ɤy~/_E`߇~p~B}\x~ϟb<߀wZ/ܿgaypg`&~2yzsX}^=_^my4>Q1㣭 f|a|ԙur1>J0>Zs4>}Q<3>3/(#4Q%]PVW x~b/4/F]eJ _oEp.c?Η3Sƃh쇣gߋP^Я  wO HŇcʸN`<,_8~F~#?P`)CRَ=Sߜ"We=_=O+^YKAz)V58wu.SSLK(=> σp鰿G?ג_GgD[(_X.|/!y)Ѓhi4~#?"*`A y TUf9_ ǧ&X\؄/07&H$)'#G?hT|rlb86QI+( 4`hJa Cӑ¢ b1\BڨԴd&6&9QX5!Y)1a$%ŦN:Ye95WiZtm~"ɔȕդ,IJ:JeHњoV@w>1To -#7[Fő Nm4˟t Un4<]uEHF 6mɅb+?^«mD-94ͮʆu]#m@!{*R` 3Y+6,?gM W2&u yer[{g)QfGrx?y%4ݫID|0!Q rOߞon;HfDⰀyef: ⊩m;Vnkzw^?u'DخHN)P#k'HS »` ,R 29Ud-; &Daq,s[Q6b,%J;p4'"}kc(Z{d ЙmI'Dh[E,+JX/;&.^-R E=Ѥ\hc+;$+{[)ce,ji(8čZ'z10q L?8>BradleyTerry2/tests/testthat/outputs/flatlizards-pred0-rainy.rds0000644000176200001440000000016014775237530024664 0ustar liggesusersb```b` Bg`ϵ x"+ڿݹ[ũShw{5`F9@5/17dN,2يS@<BradleyTerry2/tests/testthat/outputs/flatlizards-pred0-new.rds0000644000176200001440000000014114775237530024332 0ustar liggesusersb```b` Bg`bGZݕ*|ە* H #P'fKM-22KLT=upBradleyTerry2/tests/testthat/outputs/flatlizards-abilities.rds0000644000176200001440000000302714775237530024504 0ustar liggesuserse{4i'H%JonuL7 j+f (!)VH)'uΖkY["0 3c"i\h?wy92Fit_*6\*j=)](DBg^tć9]O2 L[%:\}΀m ָ-^Eڜ$=pȬ$_N)JS אwE@JҴfGyBy:sj]+R-w!t&s.A EaU8m^//ٜ:LT2RK CŽP?L/4D}(}jk6K* )jM9I ttGxd8`o˸y$}NI+wbn=ʹrnɁ/c$h髷`@C؃FPtGq*Vk8[ c`1[7Jײ&gu1T[}h7Fw_y *.jWOڐk mjȂgrt_kf([*k=Y0RFe ȩjǠ,Yy4dZX@R|  xn[@) [PS(8c9XS.NX.rq8.g)eg:P8 wK 7G(\F7F^( {p/ýP/}^(.ۛNp:Am5迅)T BradleyTerry2/tests/testthat/outputs/add1.rds0000644000176200001440000000046514775237530021036 0ustar liggesusersb```b`@YD1HAIGGVބi`F.L]lwե[xX@94k^bnj1!"\XY\ `rIIh8gdhX 3 &_p$$-DY_~Լh=X0{pj ;Huf^:!b@TԢ\Ĕ̒b.o~JjB]IFQ~b^!lm1T@PFAOOSAfԂĢĒTĤ>ɬJ,J14rBradleyTerry2/tests/testthat/test-nested.R0000644000176200001440000000354014775237530020347 0ustar liggesuserscontext("bugs [nested model calls]") tol <- 1e-6 ## nested use of BTm (in response to Jing Hua Zhao's bug report) ## example data x <- matrix(c(0,0, 0, 2, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 1, 3, 0,0, 0, 2, 3, 0, 0, 0, 2,3,26,35, 7,0, 2,10,11, 3, 4, 1, 2,3,22,26, 6,2, 4, 4,10, 2, 2, 0, 0,1, 7,10, 2,0, 0, 2, 2, 1, 1, 0, 0,0, 1, 4, 0,1, 0, 1, 0, 0, 0, 0, 0,2, 5, 4, 1,1, 0, 0, 0, 2, 0, 0, 0,0, 2, 6, 1,0, 2, 0, 2, 0, 0, 0, 0,3, 6,19, 6,0, 0, 2, 5, 3, 0, 0, 0,0, 3, 1, 1,0, 0, 0, 1, 0, 0, 0, 0,0, 0, 2, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 1, 0, 0,0, 0, 0, 0, 0, 0, 0),nrow=12) colnames(x) <- 1:12 rownames(x) <- 1:12 ## function calling BTm, based on data created in function fun1 <- function(x) { c2b <- countsToBinomial(x) names(c2b) <- c("allele1", "allele2", "transmitted", "nontransmitted") btx <- BTm(cbind(transmitted, nontransmitted), allele1, allele2, ~allele, id = "allele", data = c2b) } ## function calling BTm, based on data and variables created in function fun2 <- function(x) { c2b <- countsToBinomial(x) names(c2b) <- c("allele1", "allele2", "transmitted", "nontransmitted") denom <- with(c2b, transmitted + nontransmitted) outcome <- with(c2b, transmitted/denom) btx <- BTm(outcome, allele1, allele2, ~allele, id = "allele", weights = denom, data = c2b) } test_that("nested call to BTm works", { # ignore family: mode of initialize changes between R versions res <- fun1(x) res$family <- NULL expect_known_value(res, file = test_path("outputs/nested.rds"), tol = tol) res2 <- fun2(x) res2$family <- NULL nm <- setdiff(names(res), c("call", "model")) expect_equal(res[nm], res2[nm]) }) BradleyTerry2/tests/testthat/test-predict.R0000644000176200001440000002311114775237530020513 0ustar liggesuserscontext("methods [add1, drop1]") tol <- 1e-6 ## some awkward cases for predict ## (in response to bug reports from Arthur Spirling and Fonti Kar) ## Case 1: The final model in example(flatlizards) Whiting.model3 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards) ## add new lizard (54, 59) lev <- c(levels(flatlizards$contests$winner), "lizard054", "lizard059") ## add features for new lizards (excluding factor variables for convenience) ## 59 has missing values for some model predictors features <- rbind(flatlizards$predictors[, -c(1,18)], c(1.5, 1.5, 1.5, -.2, 3, 1, -1, -1.5, -1.5, 250, 2000, 1, 0.1, 0.2, 0.5, -0.2), c(NA, 1.5, 1.5, -.2, 3, 1, -1, -1.5, -1.5, 250, 2000, 1, 0.1, 0.2, 0.5, -0.2)) ## alternatively create new data just for lizards of interest: lev must match lev2 <- c("lizard048", "lizard052", "lizard099", "lizard054", "lizard059") features2 <- rbind(flatlizards$predictors[c(27, 29, 56),-c(1,18) ], c(1.5, 1.5, 1.5, -.2, 3, 1, -1, -1.5, -1.5, 250, 2000, 1, 0.1, 0.2, 0.5, -0.2), c(NA, 1.5, 1.5, -.2, 3, 1, -1, -1.5, -1.5, 250, 2000, 1, 0.1, 0.2, 0.5, -0.2)) test_that("predict on original data same as original fit", { tmp <- predict(Whiting.model3) tmp2 <- predict(Whiting.model3, newdata = flatlizards) expect_identical(tmp, tmp2) }) test_that("predict works at level 0 only with new lizard", { newdata <- list(contests = data.frame(winner = factor("lizard054", levels = lev), loser = factor("lizard048", levels = lev)), predictors = features) pred0 <- predict(Whiting.model3, level = 0, se.fit = TRUE, newdata = newdata) expect_known_value(pred0, file = test_path("outputs/flatlizards-pred0-new.rds"), tol = tol) pred1 <- predict(Whiting.model3, level = 1, se.fit = TRUE, newdata = newdata) expect_true(all(is.na(pred1))) # use alternative newdata newdata <- list(contests = data.frame(winner = factor("lizard054", levels = lev2), loser = factor("lizard048", levels = lev2)), predictors = features2) pred0b <- predict(Whiting.model3, level = 0, se.fit = TRUE, newdata = newdata) pred1b <- predict(Whiting.model3, level = 1, se.fit = TRUE, newdata = newdata) expect_identical(pred0, pred0b) expect_identical(pred1, pred1b) }) test_that("predict works for original lizard with NA predictors", { newdata <- list(contests = data.frame(winner = factor("lizard099", levels = lev), loser = factor("lizard052", levels = lev)), predictors = features) # predict based on "new" data pred0a <- predict(Whiting.model3, level = 0, se.fit = TRUE, newdata = newdata) pred1a <- predict(Whiting.model3, level = 1, se.fit = TRUE, newdata = newdata) # should be same as original fit pred0b <- predict(Whiting.model3, level = 0, se.fit = TRUE) pred1b <- predict(Whiting.model3, level = 1, se.fit = TRUE) expect_equal(pred0a$fit, pred0b$fit[34]) expect_equal(pred0a$se.fit, pred0b$se.fit[34]) expect_equal(pred1a$fit, pred1b$fit[34]) expect_equal(pred1a$se.fit, pred1b$se.fit[34]) # use alternative newdata newdata <- list(contests = data.frame(winner = factor("lizard099", levels = lev2), loser = factor("lizard052", levels = lev2)), predictors = features2) pred0b <- predict(Whiting.model3, level = 0, se.fit = TRUE, newdata = newdata) pred1b <- predict(Whiting.model3, level = 1, se.fit = TRUE, newdata = newdata) expect_identical(pred0a, pred0b) expect_identical(pred1a, pred1b) }) test_that("predict respects na.action for new lizard with NA", { newdata <- list(contests = data.frame(winner = factor(c("lizard099", "lizard059"), levels = lev), loser = factor(c("lizard052", "lizard048"), levels = lev)), predictors = features) # keep NA where prediction not possible (due to NAs in predictors) pred_na_pass <- predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata, na.action = na.pass) # predictions for contest 1 should be as original fit, contest 2 NA pred <- predict(Whiting.model3, level = 0:1, se.fit = TRUE) expect_equal(pred_na_pass$population$fit[1], pred$population$fit[34]) expect_equal(pred_na_pass$population$se.fit[1], pred$population$se.fit[34]) expect_equal(pred_na_pass$individual$fit[1], pred$individual$fit[34]) expect_equal(pred_na_pass$individual$se.fit[1], pred$individual$se.fit[34]) expect_true(all(is.na(c(pred_na_pass$population$fit[2], pred_na_pass$population$se.fit[2], pred_na_pass$individual$fit[2], pred_na_pass$individual$se.fit[2])))) # remove NA with na.omit pred_na_omit <- predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata, na.action = na.omit) expect_equal(pred_na_pass$population$fit[1], pred_na_omit$population$fit[1]) expect_equal(pred_na_pass$population$se.fit[1], pred_na_omit$population$se.fit[1]) expect_equal(pred_na_pass$individual$fit[1], pred_na_omit$individual$fit[1]) expect_equal(pred_na_pass$individual$se.fit[1], pred_na_omit$individual$se.fit[1]) # use alternative newdata newdata <- list(contests = data.frame(winner = factor(c("lizard099", "lizard059"), levels = lev2), loser = factor(c("lizard052", "lizard048"), levels = lev2)), predictors = features2) pred_na_pass2 <- predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata, na.action = na.pass) pred_na_omit2 <- predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata, na.action = na.omit) expect_identical(pred_na_pass, pred_na_pass2) expect_identical(pred_na_omit, pred_na_omit2) }) ## Case 2: model in which some parameters are inestimable, e.g. contest-level ## predictor that is same for both players (interactions may be of interest in ## practice) ### set seed for consistency with historical results ### (when sampling predictor values for new hypothetical lizards) suppressWarnings(RNGversion("2.10")) set.seed(1) flatlizards$contests$rainy <- sample(c(0, 1), nrow(flatlizards$contests), replace = TRUE) ### "rainy" main effect is inestimable example.model <- BTm(1, winner, loser, ~ rainy + throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards) ## create data for 4 new lizards (sample data from of old lizards) lev <- c("lizard100", "lizard101", "lizard102", "lizard103") newdata <- list(contests = data.frame( rainy = c(0, 1), winner = factor(c("lizard100", "lizard101"), levels = lev), loser = factor(c("lizard103", "lizard102"), levels = lev)), predictors = as.data.frame(lapply(flatlizards$predictors, sample, 4))) # or new data for 4 old lizards id <- 5:8 lev <- paste0("lizard0", 10:13) newcontests <- list(contests = data.frame( rainy = c(0, 1), winner = factor(c("lizard010", "lizard013"), levels = lev), loser = factor(c("lizard012", "lizard011"), levels = lev)), predictors = flatlizards$predictors[id,]) test_that("predict as expected for model with inestimable par", { ## no se pred0a <- predict(example.model, level = 0) pred1a <- predict(example.model, level = 1) ## with se pred0b <- predict(example.model, level = 0, se.fit = TRUE) pred1b <- predict(example.model, level = 1, se.fit = TRUE) ## predictions (fitted values) are the same expect_equal(pred0a, pred0b$fit) expect_equal(pred1a, pred1b$fit) }) test_that("predict works for unknown lizards at level 0 only", { pred0 <- predict(example.model, level = 0, newdata = newdata, type = "response", se.fit = TRUE) expect_known_value(pred0, file = test_path("outputs/flatlizards-pred0-rainy.rds"), tol = tol) pred1 <- predict(example.model, level = 1, newdata = newdata, type = "response", se.fit = TRUE) expect_true(all(is.na(unlist(pred1)))) }) test_that("predict works for known lizards at level 1", { pred1 <- predict(example.model, level = 1, newdata = newcontests, type = "response", se.fit = TRUE) expect_known_value(pred1, file = test_path("outputs/flatlizards-pred1-rainy.rds"), tol = tol) })BradleyTerry2/tests/testthat.R0000644000176200001440000000010514775237530016102 0ustar liggesuserslibrary(testthat) library(BradleyTerry2) test_check("BradleyTerry2")BradleyTerry2/MD50000644000176200001440000001302714776015014013264 0ustar liggesusers6250aaafb7aca6a63ee80d5ce551e08f *DESCRIPTION b4a60f3ba1f9224eddfa1ddec28cf5eb *NAMESPACE 39d3ab4bbdf781c6af8ae3258dcb4d98 *NEWS.md 08172d78e5c312907f6618a33bfdce6b *R/BTabilities.R 107896cd786f75acde51cdf613c7940a *R/BTm.R 964b111982941af22b4f7d229764a4d7 *R/BTm.setup.R 2d4943321a42fc96c20125c78f43ffc3 *R/CEMS.R 27e525264e7368f50cf3d0b150ad4136 *R/Diff.R 070b197dc1b4c515ee22b614d15b98f7 *R/GenDavidson.R 02c0e53cac09c2faaa218c44faaf808f *R/add1.BTm.R e704869faf47311257e4e190ab534c27 *R/anova.BTm.R eb276481eb9605e5dd94d9b2a79fd0e5 *R/anova.BTmlist.R ff3dc98281b3017bf721a018da9a90ce *R/baseball.R a9a4924b10faa5b2d70882d026ffc52d *R/chameleons.R 030b0ae34e782887c2fdaa60678af4d4 *R/citations.R a8c757bfd838de9a258d1cff69d2c8da *R/countsToBinomial.R cb77a329168157872f1083cb8fe7bef1 *R/drop1.BTm.R 2a1f8f25816edf6fa096b9c14d711d69 *R/flatlizards.R 7a5e66bf9864169a9b4f30eb4740c55c *R/football.R 4a6ead5401d25a3e6a2dfdf0536748e1 *R/formula.BTm.R 10d3a2f3229a4574cd7ae097df63acb1 *R/glmmPQL.R 3fa9494ca70fc2767ec48e34d9c5be11 *R/glmmPQL.control.R 9be3697e0db9dc0b7d4fbf1590fe4e45 *R/glmmPQL.fit.R cb08555cdad1523dec73d1ae2aa017f8 *R/icehockey.R ad1d3edac7e8613101ee16ad0493d223 *R/missToZero.R f2ffb551a8d3250eb7e83bd94ad9a0d1 *R/model.matrix.BTm.R af3b0525d78eddd70e5cfdca89b5365f *R/plotProportions.R 11964e277d0442dfde1ffd9a563f79f3 *R/predict.BTglmmPQL.R 8ebbe84cc6f8fd4b1c14518ce31b93f3 *R/predict.BTm.R 93e6029dccb86b3bebee46f628cf9d58 *R/print.BTglmmPQL.R 1b35359cd5a9cee5c8850a00793c5cf2 *R/print.BTm.R 89eacfd76292016f6cc3d1e678d7d9e5 *R/print.summary.glmmPQL.R 5c5227f4048a5cb5b16e5689877c1de8 *R/qvcalc.BTabilities.R 9f32c8a38dcb0c958ee1c6c3f66c2b37 *R/residuals.BTm.R 55edfaa680490009b1582200d3656842 *R/seeds.R 6e7c170e459be043d324e46c58c58e12 *R/sound.fields.R 7c33db37fe53efdd1c6906c57c23d7d3 *R/springall.R 02998d4e723e28bbe9a2fe84500cec03 *R/summary.BTglmmPQL.R 694c54f87587c2d001e7aac9f76bc069 *R/vcov.BTglmmPQL.R 2ef48c27d37224140bf6b564ff00db04 *README.md b52aabe9a2757fc86742763db9dafc9b *build/partial.rdb 2733a4d884d0532490856cfea1ca084c *build/vignette.rds 1fc50a457f1d470b3240c6ad95bf6395 *data/CEMS.R ee0852f01caba3acc47ed1a8267062e8 *data/baseball.RData 574871743191288d58e87fdfc59fd2ae *data/chameleons.R af6fdfda88e19552edeeca8dbde50e0a *data/citations.R a8402c784331c5e6a14ded61b6d615e3 *data/flatlizards.rda 8aba0268f583bf5e16c86f094e7747e4 *data/football.RData 2984f5baded99506531e620f3608707f *data/icehockey.R 014c49b99c7580d80c4d6166b79b6c70 *data/seeds.R 7a61b76e0536fe9783aa057f7500df57 *data/sound.fields.R 266b9521161753354473b71fcade4561 *data/springall.R 2ec1296232ab80dcfab843659d294e6e *inst/CITATION e56f9affb8b951a02e7babf573beb1f6 *inst/WORDLIST 9e253b289a7e524fda128dcc905741cf *inst/doc/BradleyTerry.R 24843ba5202fe5611ad1684093decad8 *inst/doc/BradleyTerry.Rmd 87389a72580deb2339eb6cb37fa68afd *inst/doc/BradleyTerry.html 87ea788b9784b38e7103b167a4195b30 *man/BTabilities.Rd 9aeb1bfa3de2d35b0daa8f0c8fbbf527 *man/BTm.Rd 26efc898148baafa69f1fdeebb1c0e98 *man/CEMS.Rd 8c00f3eddac308814830aa66fc669abd *man/GenDavidson.Rd 450525be8b6f142ae7455e4e4a1cd1bd *man/add1.BTm.Rd 5af0710a5615975e8c2b73b1583fb32c *man/anova.BTm.Rd 096cfc5939f4bea2441fac1204a4b1b7 *man/baseball.Rd 36291d49dc0d9c27b64d01437948be98 *man/chameleons.Rd 86a5a11f3e456f626bac932364be0f13 *man/citations.Rd b605365c6595a030d90ca10684c073a9 *man/countsToBinomial.Rd 5c2d4aadc913523f6ef4038b74b0fb71 *man/flatlizards.Rd f130fbcc7e5353fb57a19164c80bc236 *man/football.Rd 73f86de43d43c70621a053530568c114 *man/glmmPQL.Rd 874fca5df890a4b3cb532cabf41007a0 *man/glmmPQL.control.Rd 28e9cbd4f44331d9f248f08c4c344987 *man/icehockey.Rd 2b61796e183a1c7ff5e7cd212ee597a3 *man/plotProportions.Rd a047b0eb282a27884c2eaff1eb8cd8f9 *man/predict.BTglmmPQL.Rd cf94eba0b718e60aa4d09564d61babb2 *man/predict.BTm.Rd 6b5e1e4d6beaa1c8a99cff912cf54b65 *man/qvcalc.BTabilities.Rd be513ac5a3744266c73c7d5df116bf24 *man/reexports.Rd f5114d52b31ea2b9b384e51545fc4764 *man/residuals.BTm.Rd 7a3a7bb54cfa07bd3705b669eb129678 *man/seeds.Rd 3b2debe60a63e00175db9c68ab2eab32 *man/sound.fields.Rd 1a3eeaea8d0d663d5481157a5e0f1f2c *man/springall.Rd 73e9893b10fbc3398cde605aa8b72700 *tests/old-tests/old-tests.R 9d36a66f3b206c50b0f8a2c1e3b8c7af *tests/testthat.R 90adc2a0be1f680c9b7980b6f25b946c *tests/testthat/outputs/add1.rds 231dcb947ba04dc567bc413eaea85acb *tests/testthat/outputs/drop1.rds 7734cfbb7e08cb5c7eeec4a21364ce95 *tests/testthat/outputs/flatlizards-BTmodel.rds 113d09ced58b8d3ca20df33a7ce6961e *tests/testthat/outputs/flatlizards-abilities.rds 74ee6523760d9d36889a0c3547ab0e86 *tests/testthat/outputs/flatlizards-pred0-new.rds 8ebe8e3e0f7a0c6b35311281e8f3a337 *tests/testthat/outputs/flatlizards-pred0-rainy.rds 0d52877d133a04f4343d179821c3b4b3 *tests/testthat/outputs/flatlizards-pred1-rainy.rds d62e73c0193ba36d3ce2a2acc8848ccb *tests/testthat/outputs/flatlizards-residuals.rds b01943e351cb7283ef994ecc9abfdd31 *tests/testthat/outputs/nested.rds d1ebf343c4e9c18e129047121f2b9827 *tests/testthat/test-BTabilities.R 74339bbeb85313d50d585541f1c53de6 *tests/testthat/test-add1-drop1.R 872dc7aa91d4ba22ac400b0f4db69d59 *tests/testthat/test-baseball.R e4d286a3a67060f9fcd78e362e27d512 *tests/testthat/test-countsToBinomial.R 1920c8c83b5a4a6b9724e40d8c8b5e14 *tests/testthat/test-flatlizards.R c72cb1fc790bf458ccc50875f27e4f17 *tests/testthat/test-nested.R c7ef183aea7fa1c9b9183a6a3d7dacdb *tests/testthat/test-predict.R 24843ba5202fe5611ad1684093decad8 *vignettes/BradleyTerry.Rmd 342f5789fccddd1b250a75c23e78937e *vignettes/BradleyTerry.bib 77dcb58d140a7a6e8738209b5288d75b *vignettes/baseball-qvplot.png fd3a4171daed11f93b6f05b5f979cee6 *vignettes/residuals.png BradleyTerry2/R/0000755000176200001440000000000014775676207013172 5ustar liggesusersBradleyTerry2/R/anova.BTmlist.R0000644000176200001440000000661414775237530015775 0ustar liggesusers#' @importFrom stats coef fitted formula na.omit pchisq pf terms vcov anova.BTmlist <- function (object, ..., dispersion = NULL, test = NULL) { ## Pass on if no random effects fixed <- unlist(lapply(object, function(x) is.null(x$random))) if (!all(!fixed)) stop("Models must have the same random effects structure") responses <- as.character(lapply(object, function(x) { deparse(formula(terms(x))[[2]]) })) sameresp <- responses == responses[1] if (!all(sameresp)) { object <- object[sameresp] warning("models with response ", deparse(responses[!sameresp]), " removed because response differs from model 1") } ns <- vapply(object, function(x) length(fitted(x)), numeric(1)) if (any(ns != ns[1])) stop("models were not all fitted to the same size of dataset") nmodels <- length(object) ncoefs <- vapply(object, function(x) length(na.omit(coef(x))), numeric(1)) #omit aliased labels <- lapply(object, function(x) x$term.labels) stat <- numeric(nmodels) for (i in 2:nmodels) { descending <- ncoefs[i] < ncoefs[i - 1] bigger <- i - descending smaller <- i - !descending if (!all(labels[[smaller]] %in% labels[[bigger]])) stop("models are not nested") term.ind <- !(labels[[bigger]] %in% labels[[smaller]]) ind <- object[[bigger]]$assign %in% which(term.ind) stat[i] <- t(coef(object[[bigger]])[ind]) %*% chol2inv(chol(vcov(object[[bigger]], dispersion = dispersion)[ind, ind])) %*% coef(object[[bigger]])[ind] #vcov should handle dispersion != 1 } stat[1] <- NA table <- data.frame(stat, c(NA, diff(ncoefs))) variables <- lapply(object, function(x) paste(deparse(formula(x)), collapse = "\n")) dimnames(table) <- list(1:nmodels, c("Statistic", "Df")) title <- paste("Sequential Wald Tests\n\n", "Response: ", responses[1], "\n", sep = "") topnote <- paste("Model ", format(1:nmodels), ": ", variables, sep = "", collapse = "\n") if (!is.null(test)) { ## Assume dispersion fixed at one - if dispersion estimated, would use ## "residual" df from larger model in each comparison df.dispersion <- Inf if (test == "F" && df.dispersion == Inf) { fam <- object[[1]]$family$family if (fam == "binomial" || fam == "poisson") warning(gettextf( "using F test with a '%s' family is inappropriate", fam), domain = NA, call. = FALSE) else { warning("using F test with a fixed dispersion is inappropriate") } } table <- switch(test, Chisq = { dfs <- table[, "Df"] vals <- table[, "Statistic"] vals[dfs %in% 0] <- NA cbind(table, `P(>|Chi|)` = pchisq(vals, abs(dfs), lower.tail = FALSE)) }, F = { dfs <- table[, "Df"] Fvalue <- table[, "Statistic"]/abs(dfs) Fvalue[dfs %in% 0] <- NA cbind(table, F = Fvalue, `Pr(>F)` = pf(Fvalue, abs(dfs), df.dispersion, lower.tail = FALSE)) }) } structure(table, heading = c(title, topnote), class = c("anova", "data.frame")) } BradleyTerry2/R/CEMS.R0000755000176200001440000001612214775676207014051 0ustar liggesusers#' Dittrich, Hatzinger and Katzenbeisser (1998, 2001) Data on Management School #' Preference in Europe #' #' *Community of European management schools* (CEMS) data as used in the #' paper by Dittrich et al. (1998, 2001), re-formatted for use with #' [BTm()] #' #' The variables `win1.adj` and `win2.adj` are provided in order to #' allow a simple way of handling ties (in which a tie counts as half a win and #' half a loss), which is slightly different numerically from the Davidson #' (1970) method that is used by Dittrich et al. (1998): see the examples. #' #' @name CEMS #' @docType data #' @format A list containing three data frames, `CEMS$preferences`, #' `CEMS$students` and `CEMS$schools`. #' #' The `CEMS$preferences` data frame has `303 * 15 = 4505` #' observations (15 possible comparisons, for each of 303 students) on the #' following 8 variables: \describe{ #' \item{student}{a factor with #' levels `1:303`} #' \item{school1}{a factor with levels #' `c("Barcelona", "London", "Milano", "Paris", "St.Gallen", #' "Stockholm")`; the first management school in a comparison} #' \item{school2}{a factor with the same levels as `school1`; the #' second management school in a comparison} #' \item{win1}{integer (value #' 0 or 1) indicating whether `school1` was preferred to `school2`} #' \item{win2}{integer (value 0 or 1) indicating whether `school2` #' was preferred to `school1`} #' \item{tied}{integer (value 0 or 1) #' indicating whether no preference was expressed} #' \item{win1.adj}{numeric, equal to `win1 + tied/2`} #' \item{win2.adj}{numeric, equal to `win2 + tied/2`} } #' #' The `CEMS$students` data frame has 303 observations (one for each #' student) on the following 8 variables: \describe{ #' \item{STUD}{a #' factor with levels `c("other", "commerce")`, the student's main #' discipline of study} #' \item{ENG}{a factor with levels `c("good, #' poor")`, indicating the student's knowledge of English} #' \item{FRA}{a #' factor with levels `c("good, poor")`, indicating the student's #' knowledge of French} #' \item{SPA}{a factor with levels `c("good, #' poor")`, indicating the student's knowledge of Spanish} #' \item{ITA}{a #' factor with levels `c("good, poor")`, indicating the student's #' knowledge of Italian} #' \item{WOR}{a factor with levels `c("no", #' "yes")`, whether the student was in full-time employment while studying} #' \item{DEG}{a factor with levels `c("no", "yes")`, whether the #' student intended to take an international degree} #' \item{SEX}{a #' factor with levels `c("female", "male")` } } #' #' The `CEMS$schools` data frame has 6 observations (one for each #' management school) on the following 7 variables: \describe{ #' \item{Barcelona}{numeric (value 0 or 1)} #' \item{London}{numeric (value 0 or 1)} #' \item{Milano}{numeric #' (value 0 or 1)} \item{Paris}{numeric (value 0 or 1)} #' \item{St.Gallen}{numeric (value 0 or 1)} #' \item{Stockholm}{numeric (value 0 or 1)} #' \item{LAT}{numeric #' (value 0 or 1) indicating a 'Latin' city} } #' @author David Firth #' @references Davidson, R. R. (1970) Extending the Bradley-Terry model to #' accommodate ties in paired comparison experiments. *Journal of the #' American Statistical Association* **65**, 317--328. #' #' Dittrich, R., Hatzinger, R. and Katzenbeisser, W. (1998) Modelling the #' effect of subject-specific covariates in paired comparison studies with an #' application to university rankings. *Applied Statistics* **47**, #' 511--525. #' #' Dittrich, R., Hatzinger, R. and Katzenbeisser, W. (2001) Corrigendum: #' Modelling the effect of subject-specific covariates in paired comparison #' studies with an application to university rankings. *Applied #' Statistics* **50**, 247--249. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' @source Royal Statistical Society datasets website, at #' \url{https://rss.onlinelibrary.wiley.com/hub/journal/14679876/series-c-datasets/pre_2016}. #' @keywords datasets #' @examples #' #' ## #' ## Fit the standard Bradley-Terry model, using the simple 'add 0.5' #' ## method to handle ties: #' ## #' table3.model <- BTm(outcome = cbind(win1.adj, win2.adj), #' player1 = school1, player2 = school2, #' formula = ~.. , refcat = "Stockholm", #' data = CEMS) #' ## The results in Table 3 of Dittrich et al (2001) are reproduced #' ## approximately by a simple re-scaling of the estimates: #' table3 <- summary(table3.model)$coef[, 1:2]/1.75 #' print(table3) #' ## #' ## Now fit the 'final model' from Table 6 of Dittrich et al.: #' ## #' table6.model <- BTm(outcome = cbind(win1.adj, win2.adj), #' player1 = school1, player2 = school2, #' formula = ~ .. + #' WOR[student] * Paris[..] + #' WOR[student] * Milano[..] + #' WOR[student] * Barcelona[..] + #' DEG[student] * St.Gallen[..] + #' STUD[student] * Paris[..] + #' STUD[student] * St.Gallen[..] + #' ENG[student] * St.Gallen[..] + #' FRA[student] * London[..] + #' FRA[student] * Paris[..] + #' SPA[student] * Barcelona[..] + #' ITA[student] * London[..] + #' ITA[student] * Milano[..] + #' SEX[student] * Milano[..], #' refcat = "Stockholm", #' data = CEMS) #' ## #' ## Again re-scale to reproduce approximately Table 6 of Dittrich et #' ## al. (2001): #' ## #' table6 <- summary(table6.model)$coef[, 1:2]/1.75 #' print(table6) #' ## #' \dontrun{ #' ## Now the slightly simplified model of Table 8 of Dittrich et al. (2001): #' ## #' table8.model <- BTm(outcome = cbind(win1.adj, win2.adj), #' player1 = school1, player2 = school2, #' formula = ~ .. + #' WOR[student] * LAT[..] + #' DEG[student] * St.Gallen[..] + #' STUD[student] * Paris[..] + #' STUD[student] * St.Gallen[..] + #' ENG[student] * St.Gallen[..] + #' FRA[student] * London[..] + #' FRA[student] * Paris[..] + #' SPA[student] * Barcelona[..] + #' ITA[student] * London[..] + #' ITA[student] * Milano[..] + #' SEX[student] * Milano[..], #' refcat = "Stockholm", #' data = CEMS) #' table8 <- summary(table8.model)$coef[, 1:2]/1.75 #' ## #' ## Notice some larger than expected discrepancies here (the coefficients #' ## named "..Barcelona", "..Milano" and "..Paris") from the results in #' ## Dittrich et al. (2001). Apparently a mistake was made in Table 8 of #' ## the published Corrigendum note (R. Dittrich personal communication, #' ## February 2010). #' ## #' print(table8) #' } #' "CEMS" BradleyTerry2/R/drop1.BTm.R0000644000176200001440000000733614775237530015024 0ustar liggesusers#' @importFrom stats coef drop.scope model.matrix formula pchisq pf terms update.formula vcov #' @export drop1.BTm <- function(object, scope, scale = 0, test = c("none", "Chisq", "F"), ...) { x <- model.matrix(object) ## Pass on if no random effects if (is.null(object$random)){ object$x <- x attr(object$x, "assign") <- object$assign object$terms <- terms(object$formula) return(NextMethod()) } form <- formula(object) if (missing(scope)) scope <- drop.scope(nobars(form)) else { if (!is.character(scope)) { srandom <- findbars(scope[[2]]) if (length(srandom)) stop("Scope should not include random effects.") scope <- attr(terms(update.formula(form, scope)), "term.labels") } if (!all(match(scope, terms(form), 0L) > 0L)) stop("scope is not a subset of term labels") } asgn <- object$assign coefs <- coef(object) if (scale == 0) dispersion <- 1 else dispersion <- scale vc <- vcov(object, dispersion = dispersion) #vcov should handle disp != 1 sTerms <- vapply(strsplit(scope, ":", fixed = TRUE), function(x) paste(sort(x), collapse = ":"), character(1)) stat <- df <- numeric(length(scope)) names(stat) <- names(df) <- as.character(lapply(scope, as.name)) tryerror <- FALSE for (i in seq(scope)) { stt <- paste(sort(strsplit(scope[i], ":")[[1]]), collapse = ":") usex <- match(asgn, match(stt, sTerms), 0) > 0 trystat <- try(t(coefs[usex]) %*% chol2inv(chol(vc[usex, usex])) %*% coefs[usex], silent = TRUE) if (inherits(trystat, "try-error")) { stat[i] <- df[i] <- NA tryerror <- TRUE } else { stat[i] <- trystat df[i] <- sum(usex) } } table <- data.frame(stat, df) dimnames(table) <- list(names(df), c("Statistic", "Df")) title <- "Single term deletions\n" topnote <- gsub("\\s+", " ", paste("Model: ", paste(deparse(as.vector(formula(object))), collapse = ""), if (scale > 0) paste("\nscale: ", format(scale), "\n"), if (tryerror) "\n\nTest statistic unestimable for at least one term"), perl = TRUE) test <- match.arg(test) if (test == "Chisq") { dfs <- table[, "Df"] vals <- table[, "Statistic"] vals[dfs %in% 0] <- NA table <- cbind(table, `P(>|Chi|)` = pchisq(vals, abs(dfs), lower.tail = FALSE)) } else if (test == "F") { ## Assume dispersion fixed at one - if dispersion estimated, would use ## "residual" df from larger model in each comparison df.dispersion <- Inf if (df.dispersion == Inf) { fam <- object[[1]]$family$family if (fam == "binomial" || fam == "poisson") warning(gettextf("using F test with a '%s' family is ", "inappropriate", fam), domain = NA, call. = FALSE) else { warning("using F test with a fixed dispersion is inappropriate") } } dfs <- table[, "Df"] Fvalue <- table[, "Statistic"]/abs(dfs) Fvalue[dfs %in% 0] <- NA table <- cbind(table, F = Fvalue, `Pr(>F)` = pf(Fvalue, abs(dfs), df.dispersion, lower.tail = FALSE)) } structure(table, heading = c(title, topnote), class = c("anova", "data.frame")) } BradleyTerry2/R/GenDavidson.R0000644000176200001440000003117714775237530015517 0ustar liggesusers#' Specify a Generalised Davidson Term in a gnm Model Formula #' #' GenDavidson is a function of class `"nonlin"` to specify a generalised #' Davidson term in the formula argument to [gnm::gnm()], providing a #' model for paired comparison data where ties are a possible outcome. #' #' `GenDavidson` specifies a generalisation of the Davidson model (1970) #' for paired comparisons where a tie is a possible outcome. It is designed for #' modelling trinomial counts corresponding to the win/draw/loss outcome for #' each contest, which are assumed Poisson conditional on the total count for #' each match. Since this total must be one, the expected counts are #' equivalently the probabilities for each possible outcome, which are modelled #' on the log scale: \deqn{\log(p(i \textrm{beats} j)_k) = \theta_{ijk} + #' \log(\mu\alpha_i}{log(p(i beats j)_k) = theta_{ijk} + log(mu * alpha_i)} #' \deqn{\log(p(draw)_k) = \theta_{ijk} + \delta + c + }{ log(p(draw)_k) = #' theta_{ijk} + log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * #' log(alpha_j)) + (1 - sigma) * log(mu * alpha_i + alpha_j) }\deqn{ #' \sigma(\pi\log(\mu\alpha_i) - (1 - \pi)log(\alpha_j)) + }{ log(p(draw)_k) = #' theta_{ijk} + log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * #' log(alpha_j)) + (1 - sigma) * log(mu * alpha_i + alpha_j) }\deqn{ (1 - #' \sigma)(\log(\mu\alpha_i + \alpha_j))}{ log(p(draw)_k) = theta_{ijk} + #' log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * log(alpha_j)) #' + (1 - sigma) * log(mu * alpha_i + alpha_j) } \deqn{\log(p(j \textrm{beats} #' i)_k) = \theta_{ijk} + }{log(p(j beats i)_k) = theta_{ijk} + #' log(alpha_j)}\deqn{ log(\alpha_j)}{log(p(j beats i)_k) = theta_{ijk} + #' log(alpha_j)} Here \eqn{\theta_{ijk}}{theta_{ijk}} is a structural parameter #' to fix the trinomial totals; \eqn{\mu}{mu} is the home advantage parameter; #' \eqn{\alpha_i}{alpha_i} and \eqn{\alpha_j}{alpha_j} are the abilities of #' players \eqn{i} and \eqn{j} respectively; \eqn{c}{c} is a function of the #' parameters such that \eqn{\textrm{expit}(\delta)}{plogis(delta)} is the #' maximum probability of a tie, \eqn{\sigma}{sigma} scales the dependence of #' the probability of a tie on the relative abilities and \eqn{\pi}{pi} allows #' for asymmetry in this dependence. #' #' For parameters that must be positive (\eqn{\alpha_i, \sigma, \mu}{alpha, #' sigma, mu}), the log is estimated, while for parameters that must be between #' zero and one (\eqn{\delta, \pi}), the logit is estimated, as illustrated in #' the example. #' #' @param win a logical vector: `TRUE` if player1 wins, `FALSE` #' otherwise. #' @param tie a logical vector: `TRUE` if the outcome is a tie, #' `FALSE` otherwise. #' @param loss a logical vector: `TRUE` if player1 loses, `FALSE` #' otherwise. #' @param player1 an ID factor specifying the first player in each contest, #' with the same set of levels as `player2`. #' @param player2 an ID factor specifying the second player in each contest, #' with the same set of levels as `player2`. #' @param home.adv a formula for the parameter corresponding to the home #' advantage effect. If `NULL`, no home advantage effect is estimated. #' @param tie.max a formula for the parameter corresponding to the maximum tie #' probability. #' @param tie.scale a formula for the parameter corresponding to the scale of #' dependence of the tie probability on the probability that `player1` #' wins, given the outcome is not a draw. #' @param tie.mode a formula for the parameter corresponding to the location of #' maximum tie probability, in terms of the probability that `player1` #' wins, given the outcome is not a draw. #' @param at.home1 a logical vector: `TRUE` if `player1` is at home, #' `FALSE` otherwise. #' @param at.home2 a logical vector: `TRUE` if `player2` is at home, #' `FALSE` otherwise. #' @return A list with the anticipated components of a "nonlin" function: #' \item{ predictors }{ the formulae for the different parameters and the ID #' factors for player 1 and player 2. } \item{ variables }{ the outcome #' variables and the \dQuote{at home} variables, if specified. } \item{ common #' }{ an index to specify that common effects are to be estimated for the #' players. } \item{ term }{ a function to create a deparsed mathematical #' expression of the term, given labels for the predictors.} \item{ start }{ a #' function to generate starting values for the parameters.} #' @author Heather Turner #' @seealso [football()], [plotProportions()] #' @references Davidson, R. R. (1970). On extending the Bradley-Terry model to #' accommodate ties in paired comparison experiments. *Journal of the #' American Statistical Association*, **65**, 317--328. #' @keywords models nonlinear #' @examples #' #' ### example requires gnm #' if (require(gnm)) { #' ### convert to trinomial counts #' football.tri <- expandCategorical(football, "result", idvar = "match") #' head(football.tri) #' #' ### add variable to indicate whether team playing at home #' football.tri$at.home <- !logical(nrow(football.tri)) #' #' ### fit shifted & scaled Davidson model #' ### - subset to first and last season for illustration #' shifScalDav <- gnm(count ~ #' GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, tie.scale = ~1, tie.mode = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri, #' subset = season %in% c("2008-9", "2012-13")) #' #' ### look at coefs #' coef <- coef(shifScalDav) #' ## home advantage #' exp(coef["home.adv"]) #' ## max p(tie) #' plogis(coef["tie.max"]) #' ## mode p(tie) #' plogis(coef["tie.mode"]) #' ## scale relative to Davidson of dependence of p(tie) on p(win|not a draw) #' exp(coef["tie.scale"]) #' #' ### check model fit #' alpha <- names(coef[-(1:4)]) #' plotProportions(result == 1, result == 0, result == -1, #' home:season, away:season, #' abilities = coef[alpha], home.adv = coef["home.adv"], #' tie.max = coef["tie.max"], tie.scale = coef["tie.scale"], #' tie.mode = coef["tie.mode"], #' at.home1 = at.home, at.home2 = !at.home, #' data = football.tri, subset = count == 1) #' } #' #' ### analyse all five seasons #' ### - takes a little while to run, particularly likelihood ratio tests #' \dontrun{ #' ### fit Davidson model #' Dav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri) #' #' ### fit scaled Davidson model #' scalDav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, tie.scale = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri) #' #' ### fit shifted & scaled Davidson model #' shifScalDav <- gnm(count ~ #' GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, tie.scale = ~1, tie.mode = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri) #' #' ### compare models #' anova(Dav, scalDav, shifScalDav, test = "Chisq") #' #' ### diagnostic plots #' main <- c("Davidson", "Scaled Davidson", "Shifted & Scaled Davidson") #' mod <- list(Dav, scalDav, shifScalDav) #' names(mod) <- main #' #' ## use football.tri data so that at.home can be found, #' ## but restrict to actual match results #' par(mfrow = c(2,2)) #' for (i in 1:3) { #' coef <- parameters(mod[[i]]) #' plotProportions(result == 1, result == 0, result == -1, #' home:season, away:season, #' abilities = coef[alpha], #' home.adv = coef["home.adv"], #' tie.max = coef["tie.max"], #' tie.scale = coef["tie.scale"], #' tie.mode = coef["tie.mode"], #' at.home1 = at.home, #' at.home2 = !at.home, #' main = main[i], #' data = football.tri, subset = count == 1) #' } #' } #' #' @importFrom stats coef plogis runif #' @export GenDavidson <- function(win, # TRUE/FALSE tie, # TRUE/FALSE loss, # TRUE/FALSE player1, # player1 in each contest player2, # ditto player2 home.adv = NULL, tie.max = ~1, tie.mode = NULL, tie.scale = NULL, at.home1 = NULL, at.home2 = NULL){ call <- as.expression(sys.call()[c(1,5:6)]) extra <- NULL if (is.null(tie.max)) stop("a formula must be specified for tie.max") if (!is.null(home.adv) & is.null(at.home1)) stop("at.home1 and at.home2 must be specified") has.home.adv <- !is.null(home.adv) has.tie.mode <- !is.null(tie.mode) has.tie.scale <- !is.null(tie.scale) if (has.home.adv) extra <- c(extra, list(home.adv = home.adv)) if (has.tie.mode) extra <- c(extra, list(tie.mode = tie.mode)) if (has.tie.scale) extra <- c(extra, list(tie.scale = tie.scale)) i <- has.home.adv + has.tie.mode + has.tie.scale a <- match("home.adv", names(extra), 1) b <- match("tie.mode", names(extra), 1) c <- match("tie.scale", names(extra), 1) adv <- has.home.adv | has.tie.mode list(predictors = {c(extra, list(tie.max = tie.max, substitute(player1), # player1 & 2 are homogeneous substitute(player2)))}, ## substitutes "result" for "outcome", but also substitutes all of ## code vector variables = {c(list(loss = substitute(loss), tie = substitute(tie), win = substitute(win)), list(at.home1 = substitute(at.home1), at.home2 = substitute(at.home2))[adv])}, common = c(1[has.home.adv], 2[has.tie.mode], 3[has.tie.scale], 4, 5, 5), term = function(predLabels, varLabels){ if (has.home.adv) { ability1 <- paste("(", predLabels[a], ") * ", varLabels[4], " + ", predLabels[i + 2], sep = "") ability2 <- paste("(", predLabels[a], ") * ", varLabels[5], " + ", predLabels[i + 3], sep = "") } else { ability1 <- predLabels[i + 2] ability2 <- predLabels[i + 3] } tie.scale <- ifelse(has.tie.scale, predLabels[c], 0) scale <- paste("exp(", tie.scale, ")", sep = "") if (has.tie.mode) { psi1 <- paste("exp((", predLabels[b], ") * ", varLabels[4], ")", sep = "") psi2 <- paste("exp((", predLabels[b], ") * ", varLabels[5], ")", sep = "") weight1 <- paste(psi1, "/(", psi1, " + ", psi2, ")", sep = "") weight2 <- paste(psi2, "/(", psi1, " + ", psi2, ")", sep = "") } else { weight1 <- weight2 <- "0.5" } nu <- paste(predLabels[i + 1], " - ", scale, " * (", weight1, " * log(", weight1, ") + ", weight2, " * log(", weight2, "))", sep = "") paste(varLabels[1], " * (", ability2, ") + ", varLabels[2], " * (", nu, " + ", scale, " * ", weight1, " * (", ability1, ") + ", scale, " * ", weight2, " * (", ability2, ") + ", "(1 - ", scale, ") * ", "log(exp(", ability1, ") + exp(", ability2, "))) + ", varLabels[3], " * (", ability1, ")", sep = "") }, start = function(theta) { init <- runif(length(theta)) - 0.5 init[c] <- 0.5 } ) } class(GenDavidson) <- "nonlin" BradleyTerry2/R/citations.R0000755000176200001440000000310214775237530015301 0ustar liggesusers#' Statistics Journal Citation Data from Stigler (1994) #' #' Extracted from a larger table in Stigler (1994). Inter-journal citation #' counts for four journals, \dQuote{Biometrika}, \dQuote{Comm Statist.}, #' \dQuote{JASA} and \dQuote{JRSS-B}, as used on p448 of Agresti (2002). #' #' In the context of paired comparisons, the \sQuote{winner} is the cited #' journal and the \sQuote{loser} is the one doing the citing. #' #' @name citations #' @docType data #' @format A 4 by 4 contingency table of citations, cross-classified by the #' factors `cited` and `citing` each with levels `Biometrika`, #' `Comm Statist`, `JASA`, and `JRSS-B`. #' @seealso [BTm()] #' @references Firth, D. (2005) Bradley-Terry models in R. *Journal of #' Statistical Software* **12**(1), 1--12. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' #' Stigler, S. (1994) Citation patterns in the journals of statistics and #' probability. *Statistical Science* **9**, 94--108. #' @source Agresti, A. (2002) *Categorical Data Analysis* (2nd ed). New #' York: Wiley. #' @keywords datasets #' @examples #' #' ## Data as a square table, as in Agresti p448 #' citations #' #' ## #' ## Convert frequencies to success/failure data: #' ## #' citations.sf <- countsToBinomial(citations) #' names(citations.sf)[1:2] <- c("journal1", "journal2") #' #' ## Standard Bradley-Terry model fitted to these data #' citeModel <- BTm(cbind(win1, win2), journal1, journal2, #' data = citations.sf) #' "citations" BradleyTerry2/R/football.R0000755000176200001440000000431214775237530015112 0ustar liggesusers#' English Premier League Football Results 2008/9 to 2012/13 #' #' The win/lose/draw results for five seasons of the English Premier League #' football results, from 2008/9 to 2012/13 #' #' In each season, there are 20 teams, each of which plays one home game and #' one away game against all the other teams in the league. The results in 380 #' games per season. #' #' @name football #' @docType data #' @format A data frame with 1881 observations on the following 4 variables. #' \describe{ #' \item{season}{a factor with levels `2008-9`, #' `2009-10`, `2010-11`, `2011-12`, `2012-13`} #' \item{home}{a factor specifying the home team, with 29 levels #' `Ars` (Arsenal), ... , `Wol` (Wolverhampton)} #' \item{away}{a factor specifying the away team, with the same levels #' as `home`.} #' \item{result}{a numeric vector giving the result #' for the home team: 1 for a win, 0 for a draw, -1 for a loss.} } #' @seealso [GenDavidson()] #' @references Davidson, R. R. (1970). On extending the Bradley-Terry model to #' accommodate ties in paired comparison experiments. *Journal of the #' American Statistical Association*, **65**, 317--328. #' @source These data were downloaded from http://soccernet.espn.go.com in #' 2013. The site has since moved and the new site does not appear to have an #' equivalent source. #' @keywords datasets #' @examples #' #' ### example requires gnm #' if (require(gnm)) { #' ### convert to trinomial counts #' football.tri <- expandCategorical(football, "result", idvar = "match") #' head(football.tri) #' #' ### add variable to indicate whether team playing at home #' football.tri$at.home <- !logical(nrow(football.tri)) #' #' ### fit Davidson model for ties #' ### - subset to first and last season for illustration #' Davidson <- gnm(count ~ #' GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, #' home.adv = ~1, tie.max = ~1, #' at.home1 = at.home, at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri, #' subset = season %in% c("2008-9", "2012-13")) #' #' ### see ?GenDavidson for further analysis #' } #' "football" BradleyTerry2/R/chameleons.R0000755000176200001440000000707014775237530015432 0ustar liggesusers#' Male Cape Dwarf Chameleons: Measured Traits and Contest Outcomes #' #' Data as used in the study by Stuart-Fox et al. (2006). Physical #' measurements made on 35 male Cape dwarf chameleons, and the results of 106 #' inter-male contests. #' #' The published paper mentions 107 contests, but only 106 contests are #' included here. Contest number 16 was deleted from the data used to fit the #' models, because it involved a male whose predictor-variables were incomplete #' (and it was the only contest involving that lizard, so it is uninformative). #' #' @name chameleons #' @docType data #' @format A list containing three data frames: `chameleons$winner`, #' `chameleons$loser` and `chameleons$predictors`. #' #' The `chameleons$winner` and `chameleons$loser` data frames each #' have 106 observations (one per contest) on the following 4 variables: #' \describe{ #' \item{ID}{a factor with 35 levels `C01`, `C02`, #' ... , `C43`, the identity of the winning (or losing) male in each #' contest} #' \item{prev.wins.1}{integer (values 0 or 1), did the #' winner/loser of this contest win in an immediately previous contest?} #' \item{prev.wins.2}{integer (values 0, 1 or 2), how many of his #' (maximum) previous 2 contests did each male win?} #' \item{prev.wins.all}{integer, how many previous contests has each #' male won?} } #' #' The `chameleons$predictors` data frame has 35 observations, one for #' each male involved in the contests, on the following 7 variables: #' \describe{ #' \item{ch.res}{numeric, residuals of casque height regression on #' `SVL`, i.e. relative height of the bony part on the top of the #' chameleons' heads} #' \item{jl.res}{numeric, residuals of jaw length #' regression on `SVL`} #' \item{tl.res}{numeric, residuals of tail #' length regression on `SVL`} #' \item{mass.res}{numeric, residuals #' of body mass regression on `SVL` (body condition)} #' \item{SVL}{numeric, snout-vent length (body size)} #' \item{prop.main}{numeric, proportion (arcsin transformed) of area of #' the flank occupied by the main pink patch on the flank} #' \item{prop.patch}{numeric, proportion (arcsin transformed) of area #' of the flank occupied by the entire flank patch} } #' @author David Firth #' @source The data were obtained by Dr Devi Stuart-Fox, #' \url{https://devistuartfox.com/}, #' and they are reproduced here with her kind permission. #' #' These are the same data that were used in #' #' Stuart-Fox, D. M., Firth, D., Moussalli, A. and Whiting, M. J. (2006) #' Multiple signals in chameleon contests: designing and analysing animal #' contests as a tournament. *Animal Behaviour* **71**, 1263--1271. #' @keywords datasets #' @examples #' #' ## #' ## Reproduce Table 3 from page 1268 of the above paper: #' ## #' summary(chameleon.model <- BTm(player1 = winner, player2 = loser, #' formula = ~ prev.wins.2 + ch.res[ID] + prop.main[ID] + (1|ID), id = "ID", #' data = chameleons)) #' head(BTabilities(chameleon.model)) #' ## #' ## Note that, although a per-chameleon random effect is specified as in the #' ## above [the term "+ (1|ID)"], the estimated variance for that random #' ## effect turns out to be zero in this case. The "prior experience" #' ## effect ["+ prev.wins.2"] in this analysis has explained most of the #' ## variation, leaving little for the ID-specific predictors to do. #' ## Despite that, two of the ID-specific predictors do emerge as #' ## significant. #' ## #' ## Test whether any of the other ID-specific predictors has an effect: #' ## #' add1(chameleon.model, ~ . + jl.res[ID] + tl.res[ID] + mass.res[ID] + #' SVL[ID] + prop.patch[ID]) #' "chameleons" BradleyTerry2/R/formula.BTm.R0000644000176200001440000000006614775237530015435 0ustar liggesusers#' @export formula.BTm <- function(x, ...) x$formula BradleyTerry2/R/vcov.BTglmmPQL.R0000644000176200001440000000021114775237530016012 0ustar liggesusers#' @export vcov.BTglmmPQL <- function (object, ...) { so <- summary(object, corr = FALSE, ...) so$dispersion * so$cov.unscaled } BradleyTerry2/R/predict.BTm.R0000644000176200001440000002661214775673305015433 0ustar liggesusers#' Predict Method for Bradley-Terry Models #' #' Obtain predictions and optionally standard errors of those predictions from #' a fitted Bradley-Terry model. #' #' If `newdata` is omitted the predictions are based on the data used for #' the fit. In that case how cases with missing values in the original fit are #' treated is determined by the `na.action` argument of that fit. If #' `na.action = na.omit` omitted cases will not appear in the residuals, #' whereas if `na.action = na.exclude` they will appear (in predictions #' and standard errors), with residual value `NA`. See also #' `napredict`. #' #' @param object a fitted object of class `"BTm"` #' @param newdata (optional) a data frame in which to look for variables with #' which to predict. If omitted, the fitted linear predictors are used. #' @param level for models with random effects: an integer vector giving the #' level(s) at which predictions are required. Level zero corresponds to #' population-level predictions (fixed effects only), whilst level one #' corresponds to the player-level predictions (full model) which are NA for #' contests involving players not in the original data. By default, `level = 0` #' for a fixed effects model, `1` otherwise. #' @param type the type of prediction required. The default is on the scale of #' the linear predictors; the alternative `"response"` is on the scale of #' the response variable. Thus for a default Bradley-Terry model the default #' predictions are of log-odds (probabilities on logit scale) and #' `type = "response"` gives the predicted probabilities. The `"terms"` option #' returns a matrix giving the fitted values of each term in the model formula #' on the linear predictor scale (fixed effects only). #' @param se.fit logical switch indicating if standard errors are required. #' @param dispersion a value for the dispersion, not used for models with #' random effects. If omitted, that returned by `summary` applied to the #' object is used, where applicable. #' @param terms with `type ="terms"` by default all terms are returned. A #' character vector specifies which terms are to be returned. #' @param na.action function determining what should be done with missing #' values in `newdata`. The default is to predict `NA`. #' @param \dots further arguments passed to or from other methods. #' @return If `se.fit = FALSE`, a vector or matrix of predictions. If #' `se = TRUE`, a list with components \item{fit }{Predictions} #' \item{se.fit }{Estimated standard errors} #' @author Heather Turner #' @seealso [predict.glm()], [MASS::predict.glmmPQL()] #' @keywords models #' @examples #' #' ## The final model in example(flatlizards) #' result <- rep(1, nrow(flatlizards$contests)) #' Whiting.model3 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..] + (1|..), #' family = binomial(link = "probit"), #' data = flatlizards, trace = TRUE) #' #' ## `new' data for contests between four of the original lizards #' ## factor levels must correspond to original levels, but unused levels #' ## can be dropped - levels must match rows of predictors #' newdata <- list(contests = data.frame( #' winner = factor(c("lizard048", "lizard060"), #' levels = c("lizard006", "lizard011", #' "lizard048", "lizard060")), #' loser = factor(c("lizard006", "lizard011"), #' levels = c("lizard006", "lizard011", #' "lizard048", "lizard060")) #' ), #' predictors = flatlizards$predictors[c(3, 6, 27, 33), ]) #' #' predict(Whiting.model3, level = 1, newdata = newdata) #' #' ## same as #' predict(Whiting.model3, level = 1)[1:2] #' #' ## introducing a new lizard #' newpred <- rbind(flatlizards$predictors[c(3, 6, 27), #' c("throat.PC1","throat.PC3", "SVL", "head.length")], #' c(-5, 1.5, 1, 0.1)) #' rownames(newpred)[4] <- "lizard059" #' #' newdata <- list(contests = data.frame( #' winner = factor(c("lizard048", "lizard059"), #' levels = c("lizard006", "lizard011", #' "lizard048", "lizard059")), #' loser = factor(c("lizard006", "lizard011"), #' levels = c("lizard006", "lizard011", #' "lizard048", "lizard059")) #' ), #' predictors = newpred) #' #' ## can only predict at population level for contest with new lizard #' predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata) #' #' ## predicting at specific levels of covariates #' #' ## consider a model from example(CEMS) #' table6.model <- BTm(outcome = cbind(win1.adj, win2.adj), #' player1 = school1, player2 = school2, #' formula = ~ .. + #' WOR[student] * Paris[..] + #' WOR[student] * Milano[..] + #' WOR[student] * Barcelona[..] + #' DEG[student] * St.Gallen[..] + #' STUD[student] * Paris[..] + #' STUD[student] * St.Gallen[..] + #' ENG[student] * St.Gallen[..] + #' FRA[student] * London[..] + #' FRA[student] * Paris[..] + #' SPA[student] * Barcelona[..] + #' ITA[student] * London[..] + #' ITA[student] * Milano[..] + #' SEX[student] * Milano[..], #' refcat = "Stockholm", #' data = CEMS) #' #' ## estimate abilities for a combination not seen in the original data #' #' ## same schools #' schools <- levels(CEMS$preferences$school1) #' ## new student data #' students <- data.frame(STUD = "other", ENG = "good", FRA = "good", #' SPA = "good", ITA = "good", WOR = "yes", DEG = "no", #' SEX = "female", stringsAsFactors = FALSE) #' ## set levels to be the same as original data #' for (i in seq_len(ncol(students))){ #' students[,i] <- factor(students[,i], levels(CEMS$students[,i])) #' } #' newdata <- list(preferences = #' data.frame(student = factor(500), # new id matching with `students[1,]` #' school1 = factor("London", levels = schools), #' school2 = factor("Paris", levels = schools)), #' students = students, #' schools = CEMS$schools) #' #' ## warning can be ignored as model specification was over-parameterized #' predict(table6.model, newdata = newdata) #' #' ## if treatment contrasts are use (i.e. one player is set as the reference #' ## category), then predicting the outcome of contests against the reference #' ## is equivalent to estimating abilities with specific covariate values #' #' ## add student with all values at reference levels #' students <- rbind(students, #' data.frame(STUD = "other", ENG = "good", FRA = "good", #' SPA = "good", ITA = "good", WOR = "no", DEG = "no", #' SEX = "female", stringsAsFactors = FALSE)) #' ## set levels to be the same as original data #' for (i in seq_len(ncol(students))){ #' students[,i] <- factor(students[,i], levels(CEMS$students[,i])) #' } #' newdata <- list(preferences = #' data.frame(student = factor(rep(c(500, 502), each = 6)), #' school1 = factor(schools, levels = schools), #' school2 = factor("Stockholm", levels = schools)), #' students = students, #' schools = CEMS$schools) #' #' predict(table6.model, newdata = newdata, se.fit = TRUE) #' #' ## the second set of predictions (elements 7-12) are equivalent to the output #' ## of BTabilities; the first set are adjust for `WOR` being equal to "yes" #' BTabilities(table6.model) #' #' @importFrom stats model.matrix na.pass reformulate #' @export predict.BTm <- function (object, newdata = NULL, level = ifelse(is.null(object$random), 0, 1), type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ...) { type <- match.arg(type) if (!is.null(newdata)) { ## need to define X so will work with model terms setup <- match(c("player1", "player2", "formula", "id", "separate.ability", "refcat", "weights", "subset", "offset", "contrasts"), names(object$call), 0L) setup <- do.call(BTm.setup, c(as.list(object$call)[setup], list(data = newdata)), envir = environment(object$formula)) nfix <- length(object$coefficients) newdata <- data.frame(matrix(, nrow(setup$X), 0)) keep <- as.logical(match(colnames(setup$X), names(object$coefficients), nomatch = 0)) if (any(!keep)){ ## new players with missing data - set to NA missing <- rowSums(setup$X[,!keep, drop = FALSE]) != 0 setup$X <- setup$X[, keep, drop = FALSE] setup$X[missing,] <- NA } if (ncol(setup$X) != nfix) { ## newdata does not include original players with missing data X <- matrix(0, nrow(setup$X), nfix, dimnames = list(rownames(setup$X), names(object$coefficients))) X[, colnames(setup$X)] <- setup$X newdata$X <- X } else newdata$X <- setup$X nran <- length(attr(object$coefficients, "random")) if (1 %in% level && !is.null(object$random) && type != "terms"){ if (ncol(setup$random) != nran) { ## expand to give col for every random effect Z <- matrix(0, nrow(setup$random), nran, dimnames = list(rownames(setup$random), colnames(object$random))) #ranef need names!! ## set to NA for contests with new players ## (with predictors present) miss <- !colnames(setup$random) %in% colnames(Z) Z[, colnames(setup$random)[!miss]] <- setup$random[,!miss] if (any(miss)) { miss <- rowSums(setup$random[, miss, drop = FALSE] != 0) > 0 Z[miss,] <- NA } newrandom <- Z } else newrandom <- setup$random return(NextMethod(newrandom = newrandom)) } } if (type == "terms") { object$x <- model.matrix(object) attr(object$x, "assign") <- object$assign id <- unique(object$assign) terms <- paste("X", id, sep = "") object$terms <- terms(reformulate(c(0, terms))) splitX <- function(X) { newdata <- data.frame(matrix(, nrow(X), 0)) for (i in seq(id)) newdata[terms[i]] <- X[,object$assign == id[i]] newdata } if (is.null(newdata)) newdata <- splitX(object$x) else newdata <- splitX(newdata$X) tmp <- NextMethod(newdata = newdata) #tmp$fit[tmp$se.fit == 0] <- NA tmp$se.fit[tmp$se.fit == 0] <- NA colnames(tmp$fit) <- colnames(tmp$se.fit) <- c("(separate)"[0 %in% id], object$term.labels) return(tmp) } else NextMethod() } BradleyTerry2/R/residuals.BTm.R0000644000176200001440000000632714775237530015771 0ustar liggesusers#' Residuals from a Bradley-Terry Model #' #' Computes residuals from a model object of class `"BTm"`. In additional #' to the usual options for objects inheriting from class `"glm"`, a #' `"grouped"` option is implemented to compute player-specific residuals #' suitable for diagnostic checking of a predictor involving player-level #' covariates. #' #' For `type` other than `"grouped"` see [residuals.glm()]. #' #' For `type = "grouped"` the residuals returned are weighted means of #' working residuals, with weights equal to the binomial denominators in the #' fitted model. These are suitable for diagnostic model checking, for example #' plotting against candidate predictors. #' #' @param object a model object for which `inherits(model, "BTm")` is #' `TRUE`. #' @param type the type of residuals which should be returned. The #' alternatives are: `"deviance"` (default), `"pearson"`, #' `"working"`, `"response"`, and `"partial"`. #' @param by the grouping factor to use when `type = "grouped"`. #' @param ... arguments to pass on other methods. #' @return A numeric vector of length equal to the number of players, with a #' `"weights"` attribute. #' @author David Firth and Heather Turner #' @seealso [BTm()], [BTabilities()] #' @references Firth, D. (2005) Bradley-Terry models in R. *Journal of #' Statistical Software* **12**(1), 1--12. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' @keywords models #' @examples #' #' ## #' ## See ?springall #' ## #' springall.model <- BTm(cbind(win.adj, loss.adj), #' col, row, #' ~ flav[..] + gel[..] + #' flav.2[..] + gel.2[..] + flav.gel[..] + (1 | ..), #' data = springall) #' res <- residuals(springall.model, type = "grouped") #' with(springall$predictors, plot(flav, res)) #' with(springall$predictors, plot(gel, res)) #' ## Weighted least-squares regression of these residuals on any variable #' ## already included in the model yields slope coefficient zero: #' lm(res ~ flav, weights = attr(res, "weights"), #' data = springall$predictors) #' lm(res ~ gel, weights = attr(res, "weights"), #' data = springall$predictors) #' #' @importFrom stats as.formula model.frame model.matrix terms #' @export residuals.BTm <- function(object, type = c("deviance", "pearson", "working", "response", "partial", "grouped"), by = object$id, ...) { type <- match.arg(type) if (type != "grouped") return(NextMethod()) ## for glm, lm would just be ## X <- model.matrix(formula, data = object$data) formula <- as.formula(paste("~", by, "- 1")) mt <- terms(formula) mf1 <- model.frame(mt, data = c(object$player1, object$data)) X1 <- model.matrix(mt, data = mf1) mf2 <- model.frame(mt, data = c(object$player2, object$data)) X2 <- model.matrix(mt, data = mf2) X <- X1 - X2 r <- object$residuals ## the "working" residuals w <- object$weights total.resid <- crossprod(X, r * w) total.weight <- crossprod(abs(X), w) result <- total.resid / total.weight attr(result, "weights") <- total.weight result } BradleyTerry2/R/summary.BTglmmPQL.R0000644000176200001440000000310514775237530016537 0ustar liggesusers#' @importFrom stats coef pnorm #' @export summary.BTglmmPQL <- function(object, dispersion = NULL, correlation = FALSE, symbolic.cor = FALSE, ...) { if (identical(object$sigma, 0)){ ans <- NextMethod("summary") ans$sigma <- 0 class(ans) <- c("summary.BTglmmPQL", class(ans)) return(ans) } aliased <- is.na(coef(object)) coefs <- coef(object)[!aliased] cov.scaled <- cov.unscaled <- object$varFix # when dispersion != 1? dn <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)") if (object$rank > 0) { sterr <- sqrt(diag(cov.scaled)) tvalue <- coefs/sterr pvalue <- 2 * pnorm(-abs(tvalue)) fixef.table <- cbind(coefs, sterr, tvalue, pvalue) dimnames(fixef.table) <- list(names(coefs), dn) } else { fixef.table <- matrix(, 0, 4) dimnames(fixef.table) <- list(NULL, dn) } sterr <- sqrt(object$varSigma) tvalue <- object$sigma/sterr pvalue <- 2 * pnorm(-abs(tvalue)) ranef.table <- cbind(object$sigma, sterr, tvalue, pvalue) dimnames(ranef.table) <- list("Std. Dev.", dn) ans <- c(object[c("call", "family", "iter", "rank", "na.action")], list(fixef = fixef.table, ranef = ranef.table, aliased = aliased, dispersion = 1, cov.unscaled = cov.unscaled)) if (correlation & object$rank > 0) { dd <- sqrt(diag(cov.unscaled)) ans$correlation <- cov.unscaled/outer(dd, dd) ans$symbolic.cor <- symbolic.cor } class(ans) <- "summary.BTglmmPQL" ans } BradleyTerry2/R/anova.BTm.R0000644000176200001440000002151314775237530015074 0ustar liggesusers#' Compare Nested Bradley Terry Models #' #' Compare nested models inheriting from class `"BTm"`. For models with no #' random effects, compute analysis of deviance table, otherwise compute Wald #' tests of additional terms. #' #' For models with no random effects, an analysis of deviance table is computed #' using [anova.glm()]. Otherwise, Wald tests are computed as #' detailed here. #' #' If a single object is specified, terms are added sequentially and a Wald #' statistic is computed for the extra parameters. If the full model includes #' player covariates and there are players with missing values over these #' covariates, then the `NULL` model will include a separate ability for #' these players. If there are missing values in any contest-level variables in #' the full model, the corresponding contests will be omitted throughout. The #' random effects structure of the full model is assumed for all sub-models. #' #' For a list of objects, consecutive pairs of models are compared by computing #' a Wald statistic for the extra parameters in the larger of the two models. #' #' The Wald statistic is always based on the variance-covariance matrix of the #' larger of the two models being compared. #' #' @param object a fitted object of class inheriting from `"BTm"`. #' @param ... additional `"BTm"` objects. #' @param dispersion a value for the dispersion. Not implemented for models #' with random effects. #' @param test optional character string (partially) matching one of #' `"Chisq"`, `"F"` or `"Cp"` to specify that p-values should be #' returned. The Chisq test is a likelihood ratio test for models with no #' random effects, otherwise a Wald test. Options `"F"` and `"Cp"` #' are only applicable to models with no random effects, see #' [stat.anova()]. #' @return An object of class `"anova"` inheriting from class #' `"data.frame"`. #' @section Warning: The comparison between two or more models will only be #' valid if they are fitted to the same dataset. This may be a problem if there #' are missing values and 's default of `na.action = na.omit` is used. An #' error will be returned in this case. #' #' The same problem will occur when separate abilities have been estimated for #' different subsets of players in the models being compared. However no #' warning is given in this case. #' @author Heather Turner #' @seealso [BTm()], [add1.BTm()] #' @keywords models #' @examples #' #' result <- rep(1, nrow(flatlizards$contests)) #' BTmodel <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + (1|..), data = flatlizards, #' trace = TRUE) #' anova(BTmodel) #' #' @export anova.BTm <- function (object, ..., dispersion = NULL, test = NULL) { ## Only list models in ... dotargs <- list(...) named <- if (is.null(names(dotargs))) rep(FALSE, length(dotargs)) else (names(dotargs) != "") if (any(named)) warning("the following arguments to 'anova.BTm' are invalid and ", "dropped: ", paste(deparse(dotargs[named]), collapse = ", ")) dotargs <- dotargs[!named] is.BTm <- unlist(lapply(dotargs, function(x) inherits(x, "BTm"))) dotargs <- dotargs[is.BTm] ## Compare list of models models <- c(list(object), dotargs) if (length(dotargs) > 0){ fixed <- unlist(lapply(models, function(x) is.null(x$random))) if (all(fixed)) { variables <- lapply(models, function(x) paste(deparse(formula(x)), collapse = "\n")) models <- lapply(models, function(x) { x$formula <- formula(x$terms) class(x) <- setdiff(class(x), "BTm") x}) call <- match.call() anova.table <- do.call("anova", c(models, dispersion = call$dispersion, test = call$test)) attr(anova.table, "heading") <- c(paste("Analysis of Deviance Table\n\n", "Response: ", deparse(object$call$outcome, 500), "\n", sep = ""), paste("Model ", format(seq(models)), ": ", variables, sep = "", collapse = "\n")) return(anova.table) } else return(anova.BTmlist(c(list(object), dotargs), dispersion = dispersion, test = test)) } X <- model.matrix(object) Z <- object$random sep <- 0 %in% object$assign ## Passing on to glm when no random effects if (is.null(Z)) { object$x <- X attr(object$x, "assign") <- object$assign + sep attr(object$terms, "term.labels") <- c("[sep]"[sep], object$term.labels) anova.table <- NextMethod() attr(anova.table, "heading") <- paste("Analysis of Deviance Table", "\n\nModel: ", object$family$family, ", link: ", object$family$link, "\n\nResponse: ", deparse(object$call$outcome, 500), "\n\nTerms added sequentially (first to last)\n\n", sep = "") if (sep) { anova.table <- anova.table[-1,] rownames(anova.table)[1] <- "NULL" anova.table[1, 1:2] <- NA } return(anova.table) } varseq <- object$assign nvars <- max(0, varseq) stat <- df <- numeric(nvars) tryerror <- FALSE if (nvars > 1) { y <- object$y ## Extension to further methods method <- object$method if (!is.function(method)) method <- get(method, mode = "function") control <- object$control control$trace <- FALSE for (i in 1:(nvars - 1)) { fit <- method(X = X[, varseq <= i, drop = FALSE], y = y, Z = Z, weights = object$prior.weights, start = object$start, offset = object$offset, family = object$family, control = control, sigma = object$call$sigma, sigma.fixed = object$sigma.fixed) class(fit) <- oldClass(object) ind <- (varseq == i)[varseq <= i] trystat <- try(t(coef(fit)[ind]) %*% chol2inv(chol(suppressMessages( #vcov should deal with dispersion != 1 vcov(fit, dispersion = dispersion))[ind, ind])) %*% coef(fit)[ind], silent = TRUE) if (inherits(trystat, "try-error")) { stat[i] <- df[i] <- NA tryerror <- TRUE } else { stat[i] <- trystat df[i] <- sum(ind) } } } ind <- varseq == nvars trystat <- try(t(coef(object)[ind]) %*% chol2inv(chol(object$varFix[ind, ind])) %*% coef(object)[ind], silent = TRUE) if (inherits(trystat, "try-error")) { stat[nvars] <- df[nvars] <- NA tryerror <- TRUE } else { stat[nvars] <- trystat df[nvars] <- sum(ind) } table <- data.frame(c(NA, stat), c(NA, df)) dimnames(table) <- list(c("NULL", object$term.labels), c("Statistic", "Df")) title <- paste("Sequential Wald Tests", "\n\nModel: ", object$family$family, ", link: ", object$family$link, "\n\nResponse: ", deparse(object$call$outcome, 500), "\n\nPredictor: ", paste(formula(object), collapse = ""), "\n\nTerms added sequentially (first to last)", if (tryerror) "\n\nTest statistic unestimable for at least one term", "\n", sep = "") ## Assume dispersion fixed at one - if dispersion estimated, would use ## "residual" df from larger model in each comparison df.dispersion <- Inf if (!is.null(test)) { if (test == "F" && df.dispersion == Inf) { fam <- object$family$family if (fam == "binomial" || fam == "poisson") warning(gettextf("using F test with a %s family is ", "inappropriate", fam), domain = NA) else { warning("using F test with a fixed dispersion is inappropriate") } } table <- switch(test, Chisq = { dfs <- table[, "Df"] vals <- table[, "Statistic"] vals[dfs %in% 0] <- NA cbind(table, `P(>|Chi|)` = pchisq(vals, dfs, lower.tail = FALSE)) }, F = { dfs <- table[, "Df"] Fvalue <- table[, "Statistic"]/dfs Fvalue[dfs %in% 0] <- NA cbind(table, F = Fvalue, `Pr(>F)` = pf(Fvalue, dfs, df.dispersion, lower.tail = FALSE)) }) } structure(table, heading = title, class = c("anova", "data.frame")) } BradleyTerry2/R/seeds.R0000755000176200001440000000207214775237530014414 0ustar liggesusers#' Seed Germination Data from Crowder (1978) #' #' Data from Crowder(1978) giving the proportion of seeds germinated for 21 #' plates that were arranged according to a 2x2 factorial layout by seed #' variety and type of root extract. #' #' #' @name seeds #' @docType data #' @format A data frame with 21 observations on the following 4 variables. #' \describe{ #' \item{r}{the number of germinated seeds.} #' \item{n}{the total number of seeds.} #' \item{seed}{the seed #' variety.} #' \item{extract}{the type of root extract.} } #' @seealso [glmmPQL()] #' @references Breslow, N. E. and Clayton, D. G. (1993) Approximate inference #' in Generalized Linear Mixed Models. *Journal of the American #' Statistical Association*, **88**(421), 9--25. #' @source Crowder, M. (1978) Beta-Binomial ANOVA for proportions. #' *Applied Statistics*, **27**, 34--37. #' @keywords datasets #' @examples #' #' summary(glmmPQL(cbind(r, n - r) ~ seed + extract, #' random = diag(nrow(seeds)), #' family = binomial, #' data = seeds)) #' "seeds" BradleyTerry2/R/BTabilities.R0000755000176200001440000001715314775237530015512 0ustar liggesusers#' Estimated Abilities from a Bradley-Terry Model #' #' Computes the (baseline) ability of each player from a model object of class #' `"BTm"`. #' #' The player abilities are either directly estimated by the model, in which #' case the appropriate parameter estimates are returned, otherwise the #' abilities are computed from the terms of the fitted model that involve #' player covariates only (those indexed by `model$id` in the model #' formula). Thus parameters in any other terms are assumed to be zero. If one #' player has been set as the reference, then `predict.BTm()` can be used to #' obtain ability estimates with non-player covariates set to other values, #' see examples for [predict.BTm()]. #' #' If the abilities are structured according to a linear predictor, and if #' there are player covariates with missing values, the abilities for the #' corresponding players are estimated as separate parameters. In this event #' the resultant matrix has an attribute, named `"separate"`, which #' identifies those players whose ability was estimated separately. For an #' example, see [flatlizards()]. #' #' @aliases BTabilities print.BTabilities coef.BTabilities vcov.BTabilities #' @param model a model object for which `inherits(model, "BTm")` is #' `TRUE` #' @return A two-column numeric matrix of class `c("BTabilities", #' "matrix")`, with columns named `"ability"` and `"se"`; has one row #' for each player; has attributes named `"vcov"`, `"modelcall"`, #' `"factorname"` and (sometimes --- see below) `"separate"`. The #' first three attributes are not printed by the method #' `print.BTabilities`. #' #' @author David Firth and Heather Turner #' @seealso [BTm()], [residuals.BTm()] #' @references Firth, D. (2005) Bradley-Terry models in R. *Journal of #' Statistical Software*, **12**(1), 1--12. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' @keywords models #' @examples #' #' ### citations example #' #' ## Convert frequencies to success/failure data #' citations.sf <- countsToBinomial(citations) #' names(citations.sf)[1:2] <- c("journal1", "journal2") #' #' ## Fit the "standard" Bradley-Terry model #' citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) #' BTabilities(citeModel) #' #' ### baseball example #' #' data(baseball) # start with baseball data as provided by package #' #' ## Fit mode with home advantage #' baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) #' baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) #' baseballModel2 <- BTm(cbind(home.wins, away.wins), home.team, away.team, #' formula = ~ team + at.home, id = "team", #' data = baseball) #' ## Estimate abilities for each team, relative to Baltimore, when #' ## playing away from home: #' BTabilities(baseballModel2) #' #' @importFrom stats C contrasts model.frame model.matrix model.offset na.exclude na.pass terms reformulate relevel vcov #' @export BTabilities <- function (model) { if (!inherits(model, "BTm")) stop("model is not of class BTm") X0 <- model.matrix(model) player1 <- model$player1[, model$id] player.names <- levels(player1) factors <- attr(terms(model$formula), "factors") if (!(model$id %in% rownames(factors))) { players <- data.frame(factor(seq(player.names), labels = player.names)) names(players) <- model$id ## assume player covariates indexed by id fixed <- nobars(model$formula) factors <- attr(terms(fixed), "factors") vars <- rownames(factors) by.id <- grep(paste("[", model$id, "]", sep = ""), vars, fixed = TRUE) drop <- setdiff(seq(length(vars)), by.id) ## following will only work for linear terms ## (drop any term involving non-player covariate) keep <- colSums(factors[drop, , drop = FALSE]) == 0 formula <- reformulate(names(keep)[keep]) mf <- model.frame(terms(formula), data = c(players, model$data), na.action = na.pass) rownames(mf) <- player.names players <- players[, model$id] offset <- model.offset(mf) if (is.null(offset)) offset <- 0 predvars <- setdiff(seq(ncol(mf)), attr(attr(mf, "terms"), "offset")) predvars <- reformulate(colnames(mf)[predvars]) X <- model.matrix(predvars, mf) Xmiss <- is.na(rowSums(X)) | players %in% model$separate.ability X[Xmiss, ] <- 0 X <- X[, -1, drop = FALSE] separate.ability <- unique(union(players[Xmiss], model$separate.ability)) ns <- length(separate.ability) if (ns) { S <- matrix(0, nrow = nrow(X), ncol = ns) S[cbind(which(players %in% separate.ability), seq(ns))] <- 1 X <- cbind(S, X) } ## remove inestimable coef est <- !is.na(model$coef) kept <- model$assign[est] %in% c(0, which(keep)) est <- est[kept] X <- X[, est, drop = FALSE] sqrt.vcov <- chol(vcov(model)[kept, kept]) V <- crossprod(sqrt.vcov %*% t(X)) se <- sqrt(diag(V)) abilities <- cbind(X %*% coef(model)[est][kept] + offset, se) attr(abilities, "vcov") <- V if (length(separate.ability)) { attr(abilities, "separate") <- separate.ability } } else { ## get ability coef and corresponding vcov asgn <- model$assign if (is.null(asgn)) abilities <- TRUE else { idterm <- attr(terms(model$formula), "term.labels") == model$id if (!any(idterm)) stop("abilities not uniquely defined for this parameterization") coefs.to.include <- asgn == which(idterm) vcov.to.include <- asgn[!is.na(coef(model))] == which(idterm) } coef <- na.exclude(coef(model)[coefs.to.include]) vc <- vcov(model)[names(coef), names(coef), drop = FALSE] ## setup factor reflecting contrasts used .. fac <- factor(player.names, levels = player.names) if (!is.null(model$refcat)) { fac <- C(relevel(fac, model$refcat), "contr.treatment") } else fac <- C(fac, model$contrasts[[model$id]]) contr <- contrasts(fac)[player.names,] ## calc abilities and s.e., fill in NA as necessary if (!is.null(attr(coef, "na.action"))) { contr <- contr[, -attr(coef, "na.action"), drop = FALSE] } est <- contr %*% coef ## vc of contrasts for use with qvcalc vc <- contr %*% vc %*% t(contr) se <- sqrt(diag(vc)) if (!is.null(attr(coef, "na.action"))){ id <- match(names(attr(coef, "na.action")), paste0(model$id, rownames(contr))) est[id] <- se[id] <- NA } abilities <- cbind(est, se) rownames(abilities) <- player.names attr(abilities, "vcov") <- vc } colnames(abilities) <- c("ability", "s.e.") attr(abilities, "modelcall") <- model$call attr(abilities, "factorname") <- model$id class(abilities) <- c("BTabilities", "matrix") abilities } #' @export print.BTabilities <- function(x, ...) { attr(x, "vcov") <- attr(x, "modelcall") <- attr(x, "factorname") <- NULL class(x) <- "matrix" print(x, ...) ## ie, print without showing the messy attributes } #' @export vcov.BTabilities <- function(object, ...) { attr(object, "vcov") } #' @export coef.BTabilities <- function(object, ...) { object[, "ability"] } BradleyTerry2/R/qvcalc.BTabilities.R0000755000176200001440000000703714775673305016766 0ustar liggesusers#' Quasi Variances for Estimated Abilities #' #' A method for [qvcalc::qvcalc()] to compute a set of quasi variances (and #' corresponding quasi standard errors) for estimated abilities from a #' Bradley-Terry model as returned by [BTabilities()]. #' #' For details of the method see Firth (2000), Firth (2003) or Firth and de #' Menezes (2004). Quasi variances generalize and improve the accuracy of #' \dQuote{floating absolute risk} (Easton et al., 1991). This device for #' economical model summary was first suggested by Ridout (1989). #' #' Ordinarily the quasi variances are positive and so their square roots #' (the quasi standard errors) exist and can be used in plots, etc. #' #' @param object a `"BTabilities"` object as returned by [BTabilities()]. #' @param ... additional arguments, currently ignored. #' @return A list of class `"qv"`, with components #' \item{covmat}{The full variance-covariance matrix for the estimated #' abilities.} #' \item{qvframe}{A data frame with variables `estimate`, `SE`, `quasiSE` and #' `quasiVar`, the last two being a quasi standard error and quasi-variance #' for each ability.} #' \item{dispersion}{`NULL` (dispersion is fixed to 1).} #' \item{relerrs}{Relative errors for approximating the standard errors of all #' simple contrasts.} #' \item{factorname}{The name of the ID factor identifying players in the `BTm` #' formula.} #' \item{coef.indices}{`NULL` (no required for this method).} #' \item{modelcall}{The call to `BTm` to fit the Bradley-Terry model from which #' the abilities were estimated.} #' @references #' Easton, D. F, Peto, J. and Babiker, A. G. A. G. (1991) Floating absolute #' risk: an alternative to relative risk in survival and case-control analysis #' avoiding an arbitrary reference group. *Statistics in Medicine* **10**, #' 1025--1035. #' #' Firth, D. (2000) Quasi-variances in Xlisp-Stat and on the web. #' *Journal of Statistical Software* **5(4)**, 1--13. #' \doi{https://doi.org/10.18637/jss.v005.i04}. #' #' Firth, D. (2003) Overcoming the reference category problem in the #' presentation of statistical models. *Sociological Methodology* #' **33**, 1--18. #' #' Firth, D. and de Menezes, R. X. (2004) Quasi-variances. #' *Biometrika* **91**, 65--80. #' #' Menezes, R. X. de (1999) More useful standard errors for group and factor #' effects in generalized linear models. *D.Phil. Thesis*, #' Department of Statistics, University of Oxford. #' #' Ridout, M.S. (1989). Summarizing the results of fitting generalized #' linear models to data from designed experiments. In: *Statistical #' Modelling: Proceedings of GLIM89 and the 4th International #' Workshop on Statistical Modelling held in Trento, Italy, July 17--21, #' 1989* (A. Decarli et al., eds.), pp 262--269. New York: Springer. #' @author David Firth #' @seealso [qvcalc::worstErrors()], [qvcalc::plot.qv()]. #' @examples #' example(baseball) #' baseball.qv <- qvcalc(BTabilities(baseballModel2)) #' print(baseball.qv) #' plot(baseball.qv, xlab = "team", #' levelNames = c("Bal", "Bos", "Cle", "Det", "Mil", "NY", "Tor")) #' @method qvcalc BTabilities #' @importFrom qvcalc qvcalc.default #' @importFrom stats coef vcov #' @export qvcalc.BTabilities <- function(object, ...){ vc <- vcov(object) cf <- coef(object) factorname <- attr(object, "factorname") modelcall <- attr(object, "modelcall") qvcalc.default(vc, factorname = factorname, estimates = cf, modelcall = modelcall) } #' @importFrom qvcalc qvcalc #' @export qvcalc::qvcalc BradleyTerry2/R/icehockey.R0000755000176200001440000001104414775237530015253 0ustar liggesusers#' College Hockey Men's Division I 2009-10 results #' #' Game results from American College Hockey Men's Division I composite #' schedule 2009-2010. #' #' The Division I ice hockey teams are arranged in six conferences: Atlantic #' Hockey, Central Collegiate Hockey Association, College Hockey America, ECAC #' Hockey, Hockey East and the Western Collegiate Hockey Association, all part #' of the National Collegiate Athletic Association. The composite schedule #' includes within conference games and between conference games. #' #' The data set here contains only games from the regular season, the results #' of which determine the teams that play in the NCAA national tournament. #' There are six automatic bids that go to the conference tournament champions, #' the remaining 10 teams are selected based upon ranking under the NCAA's #' system of pairwise comparisons #' (\url{https://www.collegehockeynews.com/info/?d=pwcrpi}). Some have argued #' that Bradley-Terry rankings would be fairer #' (\url{https://www.collegehockeynews.com/info/?d=krach}). #' #' @name icehockey #' @docType data #' @format A data frame with 1083 observations on the following 6 variables. #' \describe{ #' \item{date}{a numeric vector} #' \item{visitor}{a #' factor with 58 levels `Alaska Anchorage` ... `Yale`} #' \item{v_goals}{a numeric vector} #' \item{opponent}{a factor #' with 58 levels `Alaska Anchorage` ... `Yale`} #' \item{o_goals}{a numeric vector} #' \item{conference}{a factor #' with levels `AH`, `CC`, `CH`, `EC`, `HE`, #' `NC`, `WC`} #' \item{result}{a numeric vector: 1 if visitor #' won, 0.5 for a draw and 0 if visitor lost} #' \item{home.ice}{a logical #' vector: 1 if opponent on home ice, 0 if game on neutral ground} } #' @references Schlobotnik, J. Build your own rankings: #' \url{http://www.elynah.com/tbrw/2010/rankings.diy.shtml}. #' #' College Hockey News \url{https://www.collegehockeynews.com/}. #' #' Selections for 2010 NCAA tournament: #' \url{https://www.espn.com/college-sports/news/story?id=5012918}. #' @source \url{http://www.collegehockeystats.net/0910/schedules/men}. #' @keywords datasets #' @examples #' #' ### Fit the standard Bradley-Terry model #' standardBT <- BTm(outcome = result, #' player1 = visitor, player2 = opponent, #' id = "team", data = icehockey) #' #' ## Bradley-Terry abilities #' abilities <- exp(BTabilities(standardBT)[,1]) #' #' ## Compute round-robin winning probability and KRACH ratings #' ## (scaled abilities such that KRACH = 100 for a team with #' ## round-robin winning probability of 0.5) #' rankings <- function(abilities){ #' probwin <- abilities/outer(abilities, abilities, "+") #' diag(probwin) <- 0 #' nteams <- ncol(probwin) #' RRWP <- rowSums(probwin)/(nteams - 1) #' low <- quantile(abilities, 0.45) #' high <- quantile(abilities, 0.55) #' middling <- uniroot(function(x) {sum(x/(x+abilities)) - 0.5*nteams}, #' lower = low, upper = high)$root #' KRACH <- abilities/middling*100 #' cbind(KRACH, RRWP) #' } #' #' ranks <- rankings(abilities) #' ## matches those produced by Joe Schlobotnik's Build Your Own Rankings #' head(signif(ranks, 4)[order(ranks[,1], decreasing = TRUE),]) #' #' ## At one point the NCAA rankings gave more credit for wins on #' ## neutral/opponent's ground. Home ice effects are easily #' ## incorporated into the Bradley-Terry model, comparing teams #' ## on a "level playing field" #' levelBT <- BTm(result, #' data.frame(team = visitor, home.ice = 0), #' data.frame(team = opponent, home.ice = home.ice), #' ~ team + home.ice, #' id = "team", data = icehockey) #' #' abilities <- exp(BTabilities(levelBT)[,1]) #' ranks2 <- rankings(abilities) #' #' ## Look at movement between the two rankings #' change <- factor(rank(ranks2[,1]) - rank(ranks[,1])) #' barplot(xtabs(~change), xlab = "Change in Rank", ylab = "No. Teams") #' #' ## Take out regional winners and look at top 10 #' regional <- c("RIT", "Alabama-Huntsville", "Michigan", "Cornell", "Boston College", #' "North Dakota") #' #' ranks <- ranks[!rownames(ranks) %in% regional] #' ranks2 <- ranks2[!rownames(ranks2) %in% regional] #' #' ## compare the 10 at-large selections under both rankings #' ## with those selected under NCAA rankings #' cbind(names(sort(ranks, decr = TRUE)[1:10]), #' names(sort(ranks2, decr = TRUE)[1:10]), #' c("Miami", "Denver", "Wisconsin", "St. Cloud State", #' "Bemidji State", "Yale", "Northern Michigan", "New Hampshire", #' "Alsaka", "Vermont")) #' #' "icehockey" BradleyTerry2/R/model.matrix.BTm.R0000755000176200001440000000016314775237530016374 0ustar liggesusers#' @importFrom stats model.frame #' @export model.matrix.BTm <- function(object, ...){ model.frame(object)$X } BradleyTerry2/R/glmmPQL.control.R0000644000176200001440000000507014775237530016277 0ustar liggesusers#' Control Aspects of the glmmPQL Algorithm #' #' Set control variables for the glmmPQL algorithm. #' #' This function provides an interface to control the PQL algorithm used by #' [BTm()] for fitting Bradley Terry models with random effects. #' #' The algorithm iterates between a series of iterated weighted least squares #' iterations to update the fixed effects and a single Fisher scoring iteration #' to update the standard deviation of the random effects. #' #' Convergence of both the inner and outer iterations are judged by comparing #' the squared components of the relevant score vector with corresponding #' elements of the diagonal of the Fisher information matrix. If, for all #' components of the relevant score vector, the ratio is less than #' `tolerance^2`, or the corresponding diagonal element of the Fisher #' information matrix is less than 1e-20, iterations cease. #' #' @param maxiter the maximum number of outer iterations. #' @param IWLSiter the maximum number of iterated weighted least squares #' iterations used to estimate the fixed effects, given the standard deviation #' of the random effects. #' @param tol the tolerance used to determine convergence in the IWLS #' iterations and over all (see details). #' @param trace logical: whether or not to print the score for the random #' effects variance at the end of each iteration. #' @return A list with the arguments as components. #' @author Heather Turner #' @seealso [glmmPQL()], [BTm()] #' @references Breslow, N. E. and Clayton, D. G. (1993), Approximate inference #' in Generalized Linear Mixed Models. *Journal of the American #' Statistical Association* **88**(421), 9--25. #' @keywords models #' @examples #' #' ## Variation on example(flatlizards) #' result <- rep(1, nrow(flatlizards$contests)) #' #' ## BTm passes arguments on to glmmPQL.control() #' args(BTm) #' BTmodel <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..] + (1|..), #' data = flatlizards, tol = 1e-3, trace = TRUE) #' summary(BTmodel) #' #' @export glmmPQL.control <- function (maxiter = 50, IWLSiter = 10, tol = 1e-6, trace = FALSE) { call <- as.list(match.call()) if (length(call) > 1) { argPos <- match(c("maxiter", "IWLSiter", "tol"), names(call)) for (n in argPos[!is.na(argPos)]) { if (!is.numeric(call[[n]]) || call[[n]] <= 0) stop("value of '", names(call)[n], "' must be > 0") } } list(maxiter = maxiter, IWLSiter = IWLSiter, tol = tol, trace = trace) } BradleyTerry2/R/glmmPQL.fit.R0000644000176200001440000002115414775237530015402 0ustar liggesusers#' @importFrom utils flush.console glmmPQL.fit <- function(X, y, Z, weights = rep(1, NROW(y)), start = NULL, etastart = NULL, mustart = NULL, offset = rep(0, NROW(y)), family = gaussian(), control = glmmPQL.control(...), sigma = NULL, sigma.fixed = FALSE, ...) { matchCall <- as.list(match.call(expand.dots = FALSE)) dots <- names(matchCall[["..."]]) dots <- intersect(dots, setdiff(names(formals(glm)), "control")) fit0 <- do.call("glm.fit", c(list(X, y, weights, start = start, etastart = etastart, mustart = mustart, offset = offset, family = family, control = glm.control()), matchCall[dots])) w <- fit0$prior.weights # QR missing from glm.fit if ncol(X) = 0 QR <- qr(X) R <- qr.R(QR) rank <- QR$rank p <- ncol(R) nm <- colnames(R)[seq(length = rank)] if (rank < p) { X0 <- X[,colnames(R)[-seq(length = rank)]] X <- X[, nm] } empty <- !length(X) if (empty) { alpha <- numeric(0) Xa <- matrix(0, length(y), 1) } eta <- fit0$linear.predictors residuals <- fit0$residuals Y <- eta + residuals - offset #working response wy <- fit0$weights # iterative weights wY <- sqrt(wy) * Y wZ <- sqrt(wy) * Z ZWy <- crossprod(wZ, wY) ZWZ <- crossprod(wZ, wZ) if (!empty) { wX <- sqrt(wy) * X XWy <- crossprod(wX, wY) XWX <- crossprod(wX, wX) ZWX <- crossprod(wZ, wX) E <- chol(XWX) J <- backsolve(E, t(ZWX), transpose = TRUE) f <- backsolve(E, XWy, transpose = TRUE) ZSy <- ZWy - crossprod(J, f) ZSZ <- ZWZ - crossprod(J, J) } if (is.null(sigma)) sigma <- 0.1 logtheta <- log(sigma^2) conv <- FALSE for (i in 1:control$maxiter) { ## Update coefficients for (j in 1:control$IWLSiter) { IZWZD <- ZWZ * sigma^2 diag(IZWZD) <- 1 + diag(IZWZD) A <- chol(IZWZD) if (!empty) { IZSZD <- ZSZ * sigma^2 diag(IZSZD) <- 1 + diag(IZSZD) G <- chol(IZSZD) g <- backsolve(G, ZSy, transpose = TRUE) v <- backsolve(G, g) B <- backsolve(A, sigma * ZWX, transpose = TRUE) K <- chol(XWX - crossprod(B, B)) b <- backsolve(A, sigma * ZWy, transpose = TRUE) c <- backsolve(K, XWy - t(B) %*% b, transpose = TRUE) alpha <- backsolve(K, c) Xa <- X %*% alpha beta <- sigma^2 * v } else { g <- backsolve(A, ZWy, transpose = TRUE) v <- backsolve(A, g) beta <- sigma^2 * v } eta <- c(Xa + Z %*% beta + offset) ## Update working response & weights mu <- family$linkinv(eta) mu.eta.val <- family$mu.eta(eta) residuals <- (fit0$y - mu)/mu.eta.val Y <- eta + residuals - offset wy <- w * mu.eta.val^2/family$variance(mu) wY <- sqrt(wy) * Y wZ <- sqrt(wy) * Z ZWy <- crossprod(wZ, wY) ZWZ <- crossprod(wZ, wZ) if (!empty) { wX <- sqrt(wy) * X XWy <- crossprod(wX, wY) XWX <- crossprod(wX, wX) ZWX <- crossprod(wZ, wX) E <- chol(XWX) J <- backsolve(E, t(ZWX), transpose = TRUE) f <- backsolve(E, XWy, transpose = TRUE) ZSy <- ZWy - crossprod(J, f) ZSZ <- ZWZ - crossprod(J, J) score <- c(crossprod(X, wy * residuals), crossprod(Z, wy * residuals) - v) diagInfo <- c(diag(XWX), diag(ZWZ)) if (all(diagInfo < 1e-20) || all(abs(score) < control$tol * sqrt(control$tol + diagInfo))) { if (sigma.fixed) conv <- TRUE break } } else { score <- crossprod(Z, wy * residuals) - v diagInfo <- diag(ZWZ) if (all(diagInfo < 1e-20) || all(abs(score) < control$tol * sqrt(control$tol + diagInfo))) { if (sigma.fixed) conv <- TRUE break } } } if (!sigma.fixed){ ## Update sigma ## sigma^2 = exp(logtheta) ## One Fisher scoring iteration IZWZD <- ZWZ * sigma^2 diag(IZWZD) <- 1 + diag(IZWZD) A <- chol(IZWZD) if (!empty) { IZSZD <- ZSZ * sigma^2 diag(IZSZD) <- 1 + diag(IZSZD) G <- chol(IZSZD) g <- backsolve(G, ZSy, transpose = TRUE) v <- backsolve(G, g) h <- backsolve(G, ZSZ, transpose = TRUE) H <- backsolve(G, h) } else { g <- backsolve(A, ZWy, transpose = TRUE) v <- backsolve(A, g) h <- backsolve(A, ZWZ, transpose = TRUE) H <- backsolve(A, h) } ## Harville p326 score <- drop(-0.5 * sum(diag(H)) + 0.5 * crossprod(v, v)) * sigma^2 Info <- 0.5 * sum(H^2) * sigma^4 if (control$trace) { ##B & K eq 5 - still not consistently increasing cat("Iteration ", i, ". Score = ", abs(score) , "\n", sep = "") flush.console() } ## check for overall convergence if (Info < 1e-20 || abs(score) < control$tol * sqrt(control$tol + Info)){ conv <- TRUE break } ## Cannot use beta to update t(YXa) %*% Vinv %*% YXa ZWYXa <- crossprod(wZ, sqrt(wy) * (Y - Xa)) optfun <- function(logtheta) { IZWZD <- ZWZ * exp(logtheta) diag(IZWZD) <- 1 + diag(IZWZD) A <- chol(IZWZD) if (!empty) { IZSZD <- ZSZ * exp(logtheta) diag(IZSZD) <- 1 + diag(IZSZD) G <- chol(IZSZD) d <- backsolve(A, sqrt(exp(logtheta)) * ZWYXa, transpose = TRUE) sum(log(diag(G))) - 0.5 * crossprod(d, d) } else { d <- backsolve(A, sqrt(exp(logtheta)) * ZWy, transpose = TRUE) sum(log(diag(A))) - 0.5 * crossprod(d, d) } } optres <- optimize(optfun, c(-10, 10)) if (optfun(-10) < optfun(optres$minimum)) sigma <- 0 else { if (abs(optres$minimum - (logtheta + score/Info)) > 0.1) logtheta <- optres$minimum else logtheta <- logtheta + score/Info sigma <- sqrt(exp(logtheta)) } } else if (conv) break } if (!empty) varFix <- chol2inv(K) else varFix <- matrix(, 0, 0) rownames(varFix) <- colnames(varFix) <- colnames(X) fit0$coef[nm] <- alpha if (!sigma.fixed) varSigma <- sigma^2/(4 * Info) else varSigma <- NA glm <- identical(sigma, 0) if (!empty) { if (rank < p) QR <- qr(cbind(wX, sqrt(w) * X0)) else QR <- qr(wX) R <- qr.R(QR) } list(coefficients = structure(fit0$coef, random = beta), residuals = residuals, fitted.values = mu, #effect = ? R = if (!empty) R, rank = rank, qr = if (!empty) QR, family = family, linear.predictors = eta, deviance = if (glm) sum(family$dev.resids(y, mu, w)), aic = if (glm) family$aic(y, length(y), mu, w, sum(family$dev.resids(y, mu, w))) + 2 * rank, null.deviance = if (glm) { wtdmu <- family$linkinv(offset) sum(family$dev.resids(y, wtdmu, w)) }, iter = ifelse(glm, NA, i), weights = wy, prior.weights = w, df.residual = length(y) - rank, df.null = if (glm) length(y) - sum(w == 0), y = y, sigma = sigma, sigma.fixed = sigma.fixed, varFix = varFix, varSigma = varSigma, converged = conv) } BradleyTerry2/R/flatlizards.R0000755000176200001440000001532214775674213015635 0ustar liggesusers#' Augrabies Male Flat Lizards: Contest Results and Predictor Variables #' #' Data collected at Augrabies Falls National Park (South Africa) in #' September-October 2002, on the contest performance and background attributes #' of 77 male flat lizards (*Platysaurus broadleyi*). The results of #' exactly 100 contests were recorded, along with various measurements made on #' each lizard. Full details of the study are in Whiting et al. (2006). #' #' There were no duplicate contests (no pair of lizards was seen fighting more #' than once), and there were no tied contests (the result of each contest was #' clear). #' #' The variables `head.length`, `head.width`, `head.height` and #' `condition` were all computed as residuals (of directly measured head #' length, head width, head height and body mass index, respectively) from #' simple least-squares regressions on `SVL`. #' #' Values of some predictors are missing (`NA`) for some lizards, #' \sQuote{at random}, because of instrument problems unconnected with the #' value of the measurement being made. #' #' @name flatlizards #' @docType data #' @format This dataset is a list containing two data frames: #' `flatlizards$contests` and `flatlizards$predictors`. #' #' The `flatlizards$contests` data frame has 100 observations on the #' following 2 variables: \describe{ #' \item{winner}{a factor with 77 #' levels `lizard003` ... `lizard189`.} #' \item{loser}{a factor #' with the same 77 levels `lizard003` ... `lizard189`.} } #' #' The `flatlizards$predictors` data frame has 77 observations (one for #' each of the 77 lizards) on the following 18 variables: \describe{ #' \item{id}{factor with 77 levels (3 5 6 ... 189), the lizard #' identifiers.} #' \item{throat.PC1}{numeric, the first principal #' component of the throat spectrum.} #' \item{throat.PC2}{numeric, the #' second principal component of the throat spectrum.} #' \item{throat.PC3}{numeric, the third principal component of the #' throat spectrum.} #' \item{frontleg.PC1}{numeric, the first principal #' component of the front-leg spectrum.} #' \item{frontleg.PC2}{numeric, #' the second principal component of the front-leg spectrum.} #' \item{frontleg.PC3}{numeric, the third principal component of the #' front-leg spectrum.} #' \item{badge.PC1}{numeric, the first principal #' component of the ventral colour patch spectrum.} #' \item{badge.PC2}{numeric, the second principal component of the #' ventral colour patch spectrum.} #' \item{badge.PC3}{numeric, the third #' principal component of the ventral colour patch spectrum.} #' \item{badge.size}{numeric, a measure of the area of the ventral #' colour patch.} #' \item{testosterone}{numeric, a measure of blood #' testosterone concentration.} #' \item{SVL}{numeric, the snout-vent #' length of the lizard.} #' \item{head.length}{numeric, head length.} #' \item{head.width}{numeric, head width.} #' \item{head.height}{numeric, head height.} #' \item{condition}{numeric, a measure of body condition.} #' \item{repro.tactic}{a factor indicating reproductive tactic; levels #' are `resident` and `floater`.} } #' @seealso [BTm()] #' @references Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The #' BradleyTerry2 package. *Journal of Statistical Software*, #' **48**(9), 1--21. #' #' Whiting, M. J., Stuart-Fox, D. M., O'Connor, D., Firth, D., Bennett, N. C. #' and Blomberg, S. P. (2006). Ultraviolet signals ultra-aggression in a #' lizard. *Animal Behaviour* **72**, 353--363. #' @source The data were collected by Dr Martin Whiting, #' \url{https://whitinglab.com/people/martin-whiting/}, and they appear here #' with his kind permission. #' @keywords datasets #' @examples #' #' ## #' ## Fit the standard Bradley-Terry model, using the bias-reduced #' ## maximum likelihood method: #' ## #' result <- rep(1, nrow(flatlizards$contests)) #' BTmodel <- BTm(result, winner, loser, br = TRUE, data = flatlizards$contests) #' summary(BTmodel) #' ## #' ## That's fairly useless, though, because of the rather small #' ## amount of data on each lizard. And really the scientific #' ## interest is not in the abilities of these particular 77 #' ## lizards, but in the relationship between ability and the #' ## measured predictor variables. #' ## #' ## So next fit (by maximum likelihood) a "structured" B-T model in #' ## which abilities are determined by a linear predictor. #' ## #' ## This reproduces results reported in Table 1 of Whiting et al. (2006): #' ## #' Whiting.model <- BTm(result, winner, loser, #' ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..], #' data = flatlizards) #' summary(Whiting.model) #' ## #' ## Equivalently, fit the same model using glmmPQL: #' ## #' Whiting.model <- BTm(result, winner, loser, #' ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..] + (1|..), #' sigma = 0, sigma.fixed = TRUE, data = flatlizards) #' summary(Whiting.model) #' ## #' ## But that analysis assumes that the linear predictor formula for #' ## abilities is _perfect_, i.e., that there is no error in the linear #' ## predictor. This will always be unrealistic. #' ## #' ## So now fit the same predictor but with a normally distributed error #' ## term --- a generalized linear mixed model --- by using the BTm #' ## function instead of glm. #' ## #' Whiting.model2 <- BTm(result, winner, loser, #' ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..] + (1|..), #' data = flatlizards, trace = TRUE) #' summary(Whiting.model2) #' ## #' ## The estimated coefficients (of throat.PC1, throat.PC3, #' ## head.length and SVL are not changed substantially by #' ## the recognition of an error term in the model; but the estimated #' ## standard errors are larger, as expected. The main conclusions from #' ## Whiting et al. (2006) are unaffected. #' ## #' ## With the normally distributed random error included, it is perhaps #' ## at least as natural to use probit rather than logit as the link #' ## function: #' ## #' require(stats) #' Whiting.model3 <- BTm(result, winner, loser, #' ~ throat.PC1[..] + throat.PC3[..] + #' head.length[..] + SVL[..] + (1|..), #' family = binomial(link = "probit"), #' data = flatlizards, trace = TRUE) #' summary(Whiting.model3) #' BTabilities(Whiting.model3) #' ## Note the "separate" attribute here, identifying two lizards with #' ## missing values of at least one predictor variable #' ## #' ## Modulo the usual scale change between logit and probit, the results #' ## are (as expected) very similar to Whiting.model2. #' "flatlizards" BradleyTerry2/R/Diff.R0000644000176200001440000001371414775237530014163 0ustar liggesusers#' @importFrom stats is.empty.model model.frame model.matrix model.offset na.omit na.pass reformulate relevel terms Diff <- function(player1, player2, formula = NULL, id = "..", data = NULL, separate.ability = NULL, refcat = NULL, contrasts = NULL, subset = NULL) { player.one <- player1[[id]] player.two <- player2[[id]] if (!is.factor(player.one) || !is.factor(player.two) || !identical(levels(player.one), levels(player.two))) stop("'player1$", id, "' and 'player2$", id, "' must be factors with the same levels") if (!identical(attr(player.one, "contrasts"), attr(player.two, "contrasts"))) stop("'player1$", id, "' and 'player2$", id, "' must have the same contrasts attribute") if(is.null(formula)) formula <- reformulate(id) players <- levels(player.one) nplayers <- nlevels(player.one) ncontests <- length(player.one) D <- matrix(nrow = ncontests, ncol = nplayers) D <- col(D) == as.numeric(player.one) D <- D - (col(D) == as.numeric(player.two)) colnames(D) <- paste(id, players, sep = "") fixed <- nobars(formula) X <- offset <- missing <- term.labels <- NULL saturated <- FALSE sep <- list() empty <- is.null(fixed) || is.empty.model(mt <- terms(fixed)) if (!empty) { factors <- attr(mt, "factors") term.labels <- as.character(colnames(factors)) vars <- rownames(factors) indexed <- grep("[[][^],]+[],]", vars) if (length(indexed)) { #set NAs to zero indices <- gsub("[^[]*[[]([^],]+)[],].*", "\\1", vars[indexed]) vars <- gsub("[[][^]]*[]]", "", vars[indexed]) ## assumes no overlap, e.g. no age[..]:judge.gender[judge] grp <- split(vars, indices) for (ind in names(grp)) { vars <- model.frame(terms(reformulate(grp[[ind]])), data = data, na.action = na.pass) lev <- levels(eval(as.name(ind), c(player1, data))) as.sep <- rowSums(is.na(vars)) | lev %in% separate.ability if (any(as.sep)) { sep[[ind]] <- as.sep vars[sep[[ind]], ] <- lapply(vars, function(x) max(levels(x)[1], 0)) colnames(vars) <- gsub(".*[$[],? ?\"?([^]\"]*).*", "\\1", grp[[ind]]) labels <- gsub("([^[$]*)[[$].*", "\\1", grp[[ind]]) for (lab in intersect(labels, grp[[ind]])) data[lab] <- vars[lab] for (lab in setdiff(labels, grp[[ind]])) data[[lab]] <- vars[, labels == lab, drop = FALSE] } } if (length(sep)) { fixed <- reformulate(c(names(sep), attr(mt, "term.labels"), rownames(attr(mt, "factors"))[ attr(mt, "offset")])) mt <- terms(fixed) } } idterm <- id %in% rownames(attr(mt, "factors")) mf1 <- model.frame(mt, data = c(player1, data), na.action = na.pass) if (nrow(mf1) != ncontests) stop("Predictor variables are not of the correct length --", "they probably need indexing in 'formula'.") mf2 <- model.frame(mt, data = c(player2, data), na.action = na.pass) if (idterm){ if (!is.null(refcat)) { mf1[[id]] <- relevel(mf1[[id]], refcat) mf2[[id]] <- relevel(mf2[[id]], refcat) if (!is.null(contrasts)) contrasts[[id]] <- "contr.treatment" } else { ## 'else' defined by contrasts arg/contrasts attr of id factor ## leave refcat NULL if (is.null(contrasts) & !is.null(attr(player.one, "contrasts"))){ contrasts <- list() contrasts[[id]] <- attr(player.one, "contrasts") } } } offset <- model.offset(mf1) if (!is.null(offset)) offset <- offset - model.offset(mf2) if (length(sep)){ #create separate effect factor recode <- function(x, keep){ lev <- levels(x) ext <- make.unique(c(lev[keep], "nosep"))[sum(keep) + 1] levels(x)[!keep] <- ext relevel(x, ref = ext) } for (ind in names(grp)) { mf1[ind] <- recode(mf1[[ind]], sep[[ind]]) mf2[ind] <- recode(mf2[[ind]], sep[[ind]]) } } X1 <- model.matrix(fixed, mf1, contrasts = contrasts) X2 <- model.matrix(fixed, mf2, contrasts = contrasts) X <- X1 - X2 ## will need to check for saturation in each set of indexed var ## - however as only allowing (1|..) just consider player id for now saturated <- qr(na.omit(X))$rank == qr(na.omit(cbind(D, X)))$rank && !idterm if (all(X[,1] == 0)) X <- X[, -1, drop = FALSE] attr(X, "assign") <- attr(X1, "assign")[-1] } random <- findbars(formula[[2]]) if (!is.null(random)) { if (!is.list(random)) random <- list(random) if (length(random) > 1 || random[[1]] != parse(text = paste("1|", id, sep = ""))[[1]]) stop("Currently '(1 | ", id, ")' is the only random effects", "structure allowed.") random <- D } else if (!empty && (!idterm & !saturated)) warning("Ability modelled by predictors but no random effects", call. = FALSE) if (length(sep)) { attr(X, "assign") <- attr(X, "assign") - 1 if (!is.null(random)) random <- D[,!sep[[id]], drop = FALSE] } list(X = X, random = random, offset = offset, term.labels = term.labels, refcat = refcat, contrasts = contrasts, saturated = saturated) } BradleyTerry2/R/baseball.R0000755000176200001440000000350714775237530015062 0ustar liggesusers#' Baseball Data from Agresti (2002) #' #' Baseball results for games in the 1987 season between 7 teams in the Eastern #' Division of the American League. #' #' #' @name baseball #' @docType data #' @format A data frame with 42 observations on the following 4 variables. #' \describe{ #' \item{home.team}{a factor with levels `Baltimore`, #' `Boston`, `Cleveland`, `Detroit`, `Milwaukee`, `New York`, `Toronto`.} #' \item{away.team}{a factor with levels #' `Baltimore`, `Boston`, `Cleveland`, `Detroit`, #' `Milwaukee`, `New York`, `Toronto`.} #' \item{home.wins}{a numeric vector.} #' \item{away.wins}{a numeric vector.} } #' @note This dataset is in a simpler format than the one described in Firth #' (2005). #' @seealso [BTm()] #' @references Firth, D. (2005) Bradley-Terry models in R. *Journal of #' Statistical Software*, **12**(1), 1--12. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' @source Page 438 of Agresti, A. (2002) *Categorical Data Analysis* (2nd #' Edn.). New York: Wiley. #' @keywords datasets #' @examples #' #' ## This reproduces the analysis in Sec 10.6 of Agresti (2002). #' data(baseball) # start with baseball data as provided by package #' #' ## Simple Bradley-Terry model, ignoring home advantage: #' baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, #' data = baseball, id = "team") #' #' ## Now incorporate the "home advantage" effect #' baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) #' baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) #' baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) #' #' ## Compare the fit of these two models: #' anova(baseballModel1, baseballModel2) #' #' "baseball" BradleyTerry2/R/print.summary.glmmPQL.R0000644000176200001440000000443714775237530017455 0ustar liggesusers#' @importFrom stats naprint printCoefmat symnum #' @export print.summary.BTglmmPQL <- function(x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { if (identical(x$sigma, 0)){ cat("PQL algorithm converged to fixed effects model\n") return(NextMethod("print.summary")) } cat("\nCall:\n", deparse(x$call), sep = "", fill = TRUE) p <- length(x$aliased) tidy.zeros <- function(vec) ifelse(abs(vec) < 100 * .Machine$double.eps, 0, vec) if (p == 0) { cat("\nNo Fixed Effects\n") } else { if (nsingular <- p - x$rank) { cat("\nFixed Effects: (", nsingular, " not defined because of singularities)\n", sep = "") cn <- names(x$aliased) pars <- matrix(NA, p, 4, dimnames = list(cn, colnames(x$fixef))) pars[!x$aliased, ] <- tidy.zeros(x$fixef) } else { cat("\nFixed Effects:\n") pars <- tidy.zeros(x$fixef) } printCoefmat(pars, digits = digits, signif.stars = signif.stars, na.print = "NA", ...) } cat("\n(Dispersion parameter for ", x$family$family, " family taken to be 1)\n", sep = "") cat("\nRandom Effects:\n") pars <- tidy.zeros(x$ranef) printCoefmat(pars, digits = digits, signif.stars = signif.stars, na.print = "NA", ...) if (nzchar(mess <- naprint(x$na.action))) cat("\n", mess, "\n", sep = "") cat("\nNumber of iterations: ", x$iter, "\n", sep = "") correl <- x$correlation if (!is.null(correl)) { if (x$rank > 1) { cat("\nCorrelation of Coefficients:\n") if (is.logical(symbolic.cor) && symbolic.cor) { print(symnum(correl, abbr.colnames = NULL)) } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -x$rank, drop = FALSE], quote = FALSE) } } } cat("\n") invisible(x) } BradleyTerry2/R/plotProportions.R0000644000176200001440000003403014775237530016542 0ustar liggesusers## P(win|not tie) in terms of expit(lambda_i - lambda_j) GenDavidsonTie <- function(p){ scale <- match("tie.scale", substring(names(coef), 1, 9), 0) if (scale != 0) scale <- exp(coef[scale]) else scale <- 1 tie.mode <- match("tie.mode", substring(names(coef), 1, 8), 0) if (tie.mode != 0) tie.mode <- coef["tie.mode"] delta <- coef[match("tie.max", substring(names(coef), 1, 7))] ## first player is at home weight1 <- plogis(tie.mode) weight2 <- 1 - weight1 ## plogis = expit plogis(delta - scale * (weight1 * log(weight1) + weight2 * log(weight2)) + scale * (weight1 * log(p) + weight2 * log(1-p))) } #tmp <- eval(substitute(player1), data, parent.frame()) #' Plot Proportions of Tied Matches and Non-tied Matches Won #' #' Plot proportions of tied matches and non-tied matches won by the first #' player, within matches binned by the relative player ability, as expressed #' by the probability that the first player wins, given the match is not a tie. #' Add fitted lines for each set of matches, as given by the generalized #' Davidson model. #' #' If `home.adv` is specified, the results are re-ordered if necessary so #' that the home player comes first; any matches played on neutral ground are #' omitted. #' #' First the probability that the first player wins given that the match is not #' a tie is computed: \deqn{expit(home.adv + abilities[player1] - #' abilities[player2])} where `home.adv` and `abilities` are #' parameters from a generalized Davidson model that have been estimated on the #' log scale. #' #' The matches are then binned according to this probability, grouping together #' matches with similar relative ability between the first player and the #' second player. Within each bin, the proportion of tied matches is computed #' and these proportions are plotted against the mid-point of the bin. Then the #' bins are re-computed omitting the tied games and the proportion of non-tied #' matches won by the first player is found and plotted against the new #' mid-point. #' #' Finally curves are added for the probability of a tie and the conditional #' probability of win given the match is not a tie, under a generalized #' Davidson model with parameters as specified by `tie.max`, #' `tie.scale` and `tie.mode`. #' #' The function can also be used to plot the proportions of wins along with the #' fitted probability of a win under the Bradley-Terry model. #' #' @param win a logical vector: `TRUE` if player1 wins, `FALSE` #' otherwise. #' @param tie a logical vector: `TRUE` if the outcome is a tie, #' `FALSE` otherwise (`NULL` if there are no ties). #' @param loss a logical vector: `TRUE` if player1 loses, `FALSE` #' otherwise. #' @param player1 an ID factor specifying the first player in each contest, #' with the same set of levels as `player2`. #' @param player2 an ID factor specifying the second player in each contest, #' with the same set of levels as `player2`. #' @param abilities the fitted abilities from a generalized Davidson model (or #' a Bradley-Terry model). #' @param home.adv if applicable, the fitted home advantage parameter from a #' generalized Davidson model (or a Bradley-Terry model). #' @param tie.max the fitted parameter from a generalized Davidson model #' corresponding to the maximum tie probability. #' @param tie.scale if applicable, the fitted parameter from a generalized #' Davidson model corresponding to the scale of dependence of the tie #' probability on the probability that `player1` wins, given the outcome #' is not a draw. #' @param tie.mode if applicable, the fitted parameter from a generalized #' Davidson model corresponding to the location of maximum tie probability, in #' terms of the probability that `player1` wins, given the outcome is not #' a draw. #' @param at.home1 a logical vector: `TRUE` if `player1` is at home, #' `FALSE` otherwise. #' @param at.home2 a logical vector: `TRUE` if `player2` is at home, #' `FALSE` otherwise. #' @param data an optional data frame providing variables required by the #' model, with one observation per match. #' @param subset an optional logical or numeric vector specifying a subset of #' observations to include in the plot. #' @param bin.size the approximate number of matches in each bin. #' @param xlab the label to use for the x-axis. #' @param ylab the label to use for the y-axis. #' @param legend text to use for the legend. #' @param col a vector specifying colours to use for the proportion of non-tied #' matches won and the proportion of tied matches. #' @param \dots further arguments passed to plot. #' @return A list of data frames: \item{win}{ a data frame comprising #' `prop.win`, the proportion of non-tied matches won by the first player #' in each bin and `bin.win`, the mid-point of each bin. } \item{tie}{ #' (when ties are present) a data frame comprising `prop.tie`, the #' proportion of tied matches in each bin and `bin.tie`, the mid-point of #' each bin. } #' @note This function is designed for single match outcomes, therefore data #' aggregated over player pairs will need to be expanded. #' @author Heather Turner #' @seealso [GenDavidson()], [BTm()] #' @keywords models nonlinear #' @examples #' #' #### A Bradley-Terry example using icehockey data #' #' ## Fit the standard Bradley-Terry model, ignoring home advantage #' standardBT <- BTm(outcome = result, #' player1 = visitor, player2 = opponent, #' id = "team", data = icehockey) #' #' ## comparing teams on a "level playing field" #' levelBT <- BTm(result, #' data.frame(team = visitor, home.ice = 0), #' data.frame(team = opponent, home.ice = home.ice), #' ~ team + home.ice, #' id = "team", data = icehockey) #' #' ## compare fit to observed proportion won #' ## exclude tied matches as not explicitly modelled here #' par(mfrow = c(1, 2)) #' plotProportions(win = result == 1, loss = result == 0, #' player1 = visitor, player2 = opponent, #' abilities = BTabilities(standardBT)[,1], #' data = icehockey, subset = result != 0.5, #' main = "Without home advantage") #' #' plotProportions(win = result == 1, loss = result == 0, #' player1 = visitor, player2 = opponent, #' home.adv = coef(levelBT)["home.ice"], #' at.home1 = 0, at.home2 = home.ice, #' abilities = BTabilities(levelBT)[,1], #' data = icehockey, subset = result != 0.5, #' main = "With home advantage") #' #' #### A generalized Davidson example using football data #' if (require(gnm)) { #' #' ## subset to first and last season for illustration #' football <- subset(football, season %in% c("2008-9", "2012-13")) #' #' ## convert to trinomial counts #' football.tri <- expandCategorical(football, "result", idvar = "match") #' #' ## add variable to indicate whether team playing at home #' football.tri$at.home <- !logical(nrow(football.tri)) #' #' ## fit Davidson model #' Dav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri) #' #' ## fit shifted & scaled Davidson model #' shifScalDav <- gnm(count ~ #' GenDavidson(result == 1, result == 0, result == -1, #' home:season, away:season, home.adv = ~1, #' tie.max = ~1, tie.scale = ~1, tie.mode = ~1, #' at.home1 = at.home, #' at.home2 = !at.home) - 1, #' eliminate = match, family = poisson, data = football.tri) #' #' ## diagnostic plots #' main <- c("Davidson", "Shifted & Scaled Davidson") #' mod <- list(Dav, shifScalDav) #' names(mod) <- main #' alpha <- names(coef(Dav)[-(1:2)]) #' #' ## use football.tri data so that at.home can be found, #' ## but restrict to actual match results #' par(mfrow = c(1,2)) #' for (i in 1:2) { #' coef <- parameters(mod[[i]]) #' plotProportions(result == 1, result == 0, result == -1, #' home:season, away:season, #' abilities = coef[alpha], #' home.adv = coef["home.adv"], #' tie.max = coef["tie.max"], #' tie.scale = coef["tie.scale"], #' tie.mode = coef["tie.mode"], #' at.home1 = at.home, #' at.home2 = !at.home, #' main = main[i], #' data = football.tri, subset = count == 1) #' } #' } #' #' @importFrom graphics curve plot points #' @importFrom stats na.omit #' @export plotProportions <- function(win, tie = NULL, loss, player1, player2, abilities = NULL, home.adv = NULL, tie.max = NULL, tie.scale = NULL, tie.mode = NULL, at.home1 = NULL, at.home2 = NULL, data = NULL, subset = NULL, bin.size = 20, xlab = "P(player1 wins | not a tie)", ylab = "Proportion", legend = NULL, col = 1:2, ...){ call <- as.list(match.call()) var <- intersect(names(call), c("win", "tie", "loss", "player1", "player2", "at.home1", "at.home2")) var <- var[!vapply(call[var], is.null, logical(1))] dat <- with(data, do.call("data.frame", call[var])) if (!missing(subset)){ subset <- eval(substitute(subset), data, parent.frame()) dat <- subset(dat, subset) } if (!missing(tie) && sum(dat$tie) == 0) dat$tie <- NULL if (!is.null(home.adv) && (missing(at.home1) || missing(at.home2))) stop("at.home1 and at.home2 must be specified") if (!is.null(home.adv)){ ## exclude neutral contests, make sure home player is first dat <- subset(dat, at.home1 | at.home2) swap <- which(as.logical(dat$at.home2)) if (length(swap)) { dat$win[swap] <- dat$loss[swap] if (is.null(dat$tie)) dat$loss[swap] <- !dat$win[swap] else dat$loss[swap] <- !(dat$win[swap] | dat$tie[swap]) tmp <- dat$player1[swap] dat$player1[swap] <- dat$player2[swap] dat$player2[swap] <- tmp dat$at.home1[swap] <- TRUE dat$at.home2[swap] <- FALSE } } else home.adv <- 0 ### get proportions p <- with(dat, plogis(home.adv + abilities[as.character(player1)] - abilities[as.character(player2)])) ## Depending on the distribution of p_ij (across all matches), ## divide the range of probabilities p_ij into discrete "bins", each ## of which has at least (say) 20 matches in it getBins <- function(p, bin.size) { ## alternatively estimate bins to same size intervals ## at least bin.size - distribute extra evenly over range min.size <- bin.size n <- length(p) r <- n %% min.size size <- rep(min.size, n %/% min.size) if (r > 0) { step <- length(size)/r extra <- round(seq(from = step/2 + 0.01, to = step/2 + 0.01 + (r - 1)*step, by = step)) size[extra] <- min.size + 1 } bin <- factor(rep(seq(length(size)), size))[match(p, sort(p))] low <- sort(p)[cumsum(c(1, size[-length(size)]))] #first high <- sort(p)[cumsum(size)] #last mid <- (high - low)/2 + low list(bin = bin, mid = mid) } winBin <- getBins(p, bin.size) ## Within each bin b, calculate ## d_b = proportion of matches in that bin that were drawn if (!is.null(dat$tie)) { tieBin <- winBin tri <- with(dat, win - (!win & !tie)) d_b <- tapply(tri, tieBin$bin, function(x) sum(x == 0)/length(x)) ## recompute bins omitting ties winBin <- getBins(p[!dat$tie], bin.size) } ## h_b = proportion of *non-drawn* matches in that bin that were won ## by the home team if (!is.null(dat$tie)) { h_b <- tapply(tri[!dat$tie], winBin$bin, function(x) sum(x == 1)/length(x)) } else h_b <- tapply(dat$win, winBin$bin, function(x) sum(x == 1)/length(x)) ## Plot d_b and h_b against the bin midpoints, in a plot with ## axis limits both (0,1) plot(h_b ~ winBin$mid, xlim = c(0, 1), ylim = c(0, 1), xlab = xlab, ylab = ylab, ...) if (missing(legend)) { if (is.null(dat$tie)) legend <- "Matches won" else legend <- c("Non-tied matches won", "Matches tied") } legend("topleft", legend, col = col[c(1, 2[!missing(tie)])], pch = 1) if (!is.null(dat$tie)) points(d_b ~ tieBin$mid, col = col[2]) ## Add to the plot the lines/curves ## y = x ## y = expit(log(nu * sqrt(p_ij * (1 - p_ij)))) ## The d_b should lie around the latter curve, and the h_b should ## lie around the former line. Any clear patterns of departure are ## of interest. curve(I, 0, 1, add = TRUE) env <- new.env() environment(GenDavidsonTie) <- env coef <- na.omit(c(home.adv = unname(home.adv), tie.max = unname(tie.max), tie.scale = unname(tie.scale), tie.mode = unname(tie.mode))) assign("coef", coef, envir=env) curve(GenDavidsonTie, 0, 1, col = col[2], add = TRUE) out <- list(win = data.frame(prop.win = h_b, bin.win = winBin$mid)) if (!is.null(dat$tie)) out <- c(out, tie = data.frame(prop.tie = d_b, bin.tie = tieBin$mid)) invisible(out) } BradleyTerry2/R/countsToBinomial.R0000644000176200001440000000354614775237530016606 0ustar liggesusers#' Convert Contingency Table of Wins to Binomial Counts #' #' Convert a contingency table of wins to a four-column data frame containing #' the number of wins and losses for each pair of players. #' #' #' @param xtab a contingency table of wins cross-classified by \dQuote{winner} #' and \dQuote{loser} #' @return A data frame with four columns \item{player1 }{ the first player in #' the contest. } \item{player2 }{ the second player in the contest. } #' \item{win1 }{ the number of times `player1` won. } \item{win2 }{ the #' number of times `player2` won. } #' @author Heather Turner #' @seealso [BTm()] #' @keywords models #' @examples #' #' ######################################################## #' ## Statistics journal citation data from Stigler (1994) #' ## -- see also Agresti (2002, p448) #' ######################################################## #' citations #' #' ## Convert frequencies to success/failure data #' citations.sf <- countsToBinomial(citations) #' names(citations.sf)[1:2] <- c("journal1", "journal2") #' citations.sf #' #' @importFrom gtools combinations #' @export countsToBinomial <- function(xtab) { ## make square if necessary if (nrow(xtab) != ncol(xtab) || !all(rownames(xtab) == colnames(xtab))) { dat <- as.data.frame(xtab) lev <- union(rownames(xtab), colnames(xtab)) dat[,1] <- factor(dat[,1], levels = lev) dat[,2] <- factor(dat[,2], levels = lev) xtab <- tapply(dat[,3], dat[1:2], sum) xtab[is.na(xtab)] <- 0 } ##assumes square players <- rownames(xtab) comb <- combinations(nrow(xtab), 2) won <- xtab[comb] lost <- t(xtab)[comb] res <- !(won == 0 & lost == 0) player1 <- factor(players[comb[,1]], levels = players)[res] player2 <- factor(players[comb[,2]], levels = players)[res] data.frame(player1, player2, win1 = won[res], win2 = lost[res]) } BradleyTerry2/R/print.BTglmmPQL.R0000644000176200001440000000124214775237530016176 0ustar liggesusers#' @importFrom stats coef naprint #' @export print.BTglmmPQL <- function (x, digits = max(3, getOption("digits") - 3), ...) { if (identical(x$sigma, 0)){ cat("PQL algorithm converged to fixed effects model\n") return(NextMethod()) } cat("\nCall: ", deparse(x$call), "\n", sep = "", fill = TRUE) if (length(coef(x))) { cat("Fixed effects:\n\n") print.default(format(x$coefficients, digits = digits), print.gap = 2, quote = FALSE) } else cat("No fixed effects\n\n") cat("\nRandom Effects Std. Dev.:", x$sigma, "\n") if (nzchar(mess <- naprint(x$na.action))) cat("\n", mess, "\n", sep = "") } BradleyTerry2/R/missToZero.R0000644000176200001440000000015414775237530015423 0ustar liggesusersmissToZero <- function(x, miss, dim = 1) { if (dim == 1) x[miss, ] <- 0 else x[, miss] <- 0 x } BradleyTerry2/R/print.BTm.R0000644000176200001440000000017614775237530015126 0ustar liggesusers#' @export print.BTm <- function (x, ...) { cat("Bradley Terry model fit by ") cat(x$method, "\n") NextMethod() } BradleyTerry2/R/sound.fields.R0000755000176200001440000000715714775237530015717 0ustar liggesusers#' Kousgaard (1984) Data on Pair Comparisons of Sound Fields #' #' The results of a series of factorial subjective room acoustic experiments #' carried out at the Technical University of Denmark by A C Gade. #' #' The variables `win1.adj` and `win2.adj` are provided in order to #' allow a simple way of handling ties (in which a tie counts as half a win and #' half a loss), which is slightly different numerically from the Davidson #' (1970) method that is used by Kousgaard (1984): see the examples. #' #' @name sound.fields #' @docType data #' @format A list containing two data frames, `sound.fields$comparisons`, #' and `sound.fields$design`. #' #' The `sound.fields$comparisons` data frame has 84 observations on the #' following 8 variables: \describe{ #' \item{field1}{a factor with levels #' `c("000", "001", "010", "011", "100", "101", "110", "111")`, the first #' sound field in a comparison} #' \item{field2}{a factor with the same #' levels as `field1`; the second sound field in a comparison} #' \item{win1}{integer, the number of times that `field1` was #' preferred to `field2`} #' \item{tie}{integer, the number of times #' that no preference was expressed when comparing `field1` and #' `field2`} #' \item{win2}{integer, the number of times that #' `field2` was preferred to `field1`} #' \item{win1.adj}{numeric, equal to `win1 + tie/2`} #' \item{win2.adj}{numeric, equal to `win2 + tie/2`} #' \item{instrument}{a factor with 3 levels, `c("cello", "flute", #' "violin")`} } #' #' The `sound.fields$design` data frame has 8 observations (one for each #' of the sound fields compared in the experiment) on the following 3 #' variables: \describe{ #' \item{a")}{a factor with levels `c("0", #' "1")`, the *direct sound* factor (0 for *obstructed sight line*, 1 #' for *free sight line*); contrasts are sum contrasts} #' \item{b}{a #' factor with levels `c("0", "1")`, the *reflection* factor (0 for #' *-26dB*, 1 for *-20dB*); contrasts are sum contrasts} #' \item{c}{a factor with levels `c("0", "1")`, the #' *reverberation* factor (0 for *-24dB*, 1 for *-20dB*); #' contrasts are sum contrasts} } #' @author David Firth #' @references Davidson, R. R. (1970) Extending the Bradley-Terry model to #' accommodate ties in paired comparison experiments. *Journal of the #' American Statistical Association* **65**, 317--328. #' @source Kousgaard, N. (1984) Analysis of a Sound Field Experiment by a Model #' for Paired Comparisons with Explanatory Variables. *Scandinavian #' Journal of Statistics* **11**, 51--57. #' @keywords datasets #' @examples #' #' ## #' ## Fit the Bradley-Terry model to data for flutes, using the simple #' ## 'add 0.5' method to handle ties: #' ## #' flutes.model <- BTm(cbind(win1.adj, win2.adj), field1, field2, ~ field, #' id = "field", #' subset = (instrument == "flute"), #' data = sound.fields) #' ## #' ## This agrees (after re-scaling) quite closely with the estimates given #' ## in Table 3 of Kousgaard (1984): #' ## #' table3.flutes <- c(-0.581, -1.039, 0.347, 0.205, 0.276, 0.347, 0.311, 0.135) #' plot(c(0, coef(flutes.model)), table3.flutes) #' abline(lm(table3.flutes ~ c(0, coef(flutes.model)))) #' ## #' ## Now re-parameterise that model in terms of the factorial effects, as #' ## in Table 5 of Kousgaard (1984): #' ## #' flutes.model.reparam <- update(flutes.model, #' formula = ~ a[field] * b[field] * c[field] #' ) #' table5.flutes <- c(.267, .250, -.088, -.294, .062, .009, -0.070) #' plot(coef(flutes.model.reparam), table5.flutes) #' abline(lm(table5.flutes ~ coef(flutes.model.reparam))) #' "sound.fields" BradleyTerry2/R/springall.R0000755000176200001440000000565114775237530015312 0ustar liggesusers#' Springall (1973) Data on Subjective Evaluation of Flavour Strength #' #' Data from Section 7 of the paper by Springall (1973) on Bradley-Terry #' response surface modelling. An experiment to assess the effects of gel and #' flavour concentrations on the subjective assessment of flavour strength by #' pair comparisons. #' #' The variables `win.adj` and `loss.adj` are provided in order to #' allow a simple way of handling ties (in which a tie counts as half a win and #' half a loss), which is slightly different numerically from the Rao and #' Kupper (1967) model that Springall (1973) uses. #' #' @name springall #' @docType data #' @format A list containing two data frames, `springall$contests` and #' `springall$predictors`. #' #' The `springall$contests` data frame has 36 observations (one for each #' possible pairwise comparison of the 9 treatments) on the following 7 #' variables: \describe{ #' \item{row}{a factor with levels `1:9`, #' the row number in Springall's dataset} # #' \item{col}{a factor with #' levels `1:9`, the column number in Springall's dataset} #' \item{win}{integer, the number of wins for column treatment over row #' treatment} #' \item{loss}{integer, the number of wins for row treatment #' over column treatment} #' \item{tie}{integer, the number of ties #' between row and column treatments} #' \item{win.adj}{numeric, equal to #' `win + tie/2`} #' \item{loss.adj}{numeric, equal to `loss + tie/2`} } #' #' The `predictors` data frame has 9 observations (one for each treatment) #' on the following 5 variables: \describe{ #' \item{flav}{numeric, the #' flavour concentration} #' \item{gel}{numeric, the gel concentration} #' \item{flav.2}{numeric, equal to `flav^2`} #' \item{gel.2}{numeric, equal to `gel^2`} #' \item{flav.gel}{numeric, equal to `flav * gel`} } #' @author David Firth #' @references Rao, P. V. and Kupper, L. L. (1967) Ties in paired-comparison #' experiments: a generalization of the Bradley-Terry model. *Journal of #' the American Statistical Association*, **63**, 194--204. #' @source Springall, A (1973) Response surface fitting using a generalization #' of the Bradley-Terry paired comparison method. *Applied Statistics* #' **22**, 59--68. #' @keywords datasets #' @examples #' #' ## #' ## Fit the same response-surface model as in section 7 of #' ## Springall (1973). #' ## #' ## Differences from Springall's fit are minor, arising from the #' ## different treatment of ties. #' ## #' ## Springall's model in the paper does not include the random effect. #' ## In this instance, however, that makes no difference: the random-effect #' ## variance is estimated as zero. #' ## #' summary(springall.model <- BTm(cbind(win.adj, loss.adj), col, row, #' ~ flav[..] + gel[..] + #' flav.2[..] + gel.2[..] + flav.gel[..] + #' (1 | ..), #' data = springall)) #' "springall" BradleyTerry2/R/BTm.setup.R0000644000176200001440000000525114775237530015131 0ustar liggesusers#' @importFrom stats reformulate BTm.setup <- function(outcome = 1, player1, player2, formula = NULL, id = "..", separate.ability = NULL, refcat = NULL, data = NULL, weights = NULL, subset = NULL, offset = NULL, contrasts = NULL, ...){ if (!is.data.frame(data)){ keep <- names(data) %in% c(deparse(substitute(player1)), deparse(substitute(player2))) if (!length(keep)) keep <- FALSE ## save row names for checking against index variables (in Diff) data <- lapply(data, as.data.frame) nm <- lapply(data, rownames) data <- c(data[keep], unlist(unname(data[!keep]), recursive = FALSE)) if (any(dup <- duplicated(names(data)))) warning("'data' argument specifies duplicate variable names: ", paste(names(data)[dup], collapse = " ")) } ## (will take first occurence of replicated names) withIfNecessary <- function(x, formula, data = NULL, as.data.frame = TRUE) { if (as.data.frame) expr <- substitute(data.frame(x), list(x = x)) else expr <- x eval(expr, data, enclos = environment(formula)) } player1 <- withIfNecessary(substitute(player1), formula, data) player2 <- withIfNecessary(substitute(player2), formula, data) if (ncol(player1) == 1) colnames(player1) <- colnames(player2) <- id Y <- withIfNecessary(substitute(outcome), formula, c(player1, player2, data), as.data.frame = FALSE) weights <- withIfNecessary(substitute(weights), formula, data, FALSE) subset1 <- withIfNecessary(substitute(subset), formula, c(player1 = list(player1), player2 = list(player2), player1, data), FALSE) subset2 <- withIfNecessary(substitute(subset), formula, c(player1 = list(player1), player2 = list(player2), player2, data), FALSE) if (is.logical(subset1)) subset <- subset1 | subset2 else subset <- c(subset1, subset2) diffModel <- Diff(player1, player2, formula, id, data, separate.ability, refcat, contrasts, nm) # offset is contest level offset <- withIfNecessary(substitute(offset), formula, data, FALSE) if (!is.null(offset)) { if (is.null(diffModel$offset)) diffModel$offset <- offset else diffModel$offset <- diffModel$offset + offset } res <- c(diffModel, list(data = data, player1 = player1, player2 = player2, Y = Y, weights = weights, subset = subset, formula = formula)) } BradleyTerry2/R/add1.BTm.R0000644000176200001440000002200114775237530014572 0ustar liggesusers#' Add or Drop Single Terms to/from a Bradley Terry Model #' #' Add or drop single terms within the limit specified by the `scope` #' argument. For models with no random effects, compute an analysis of deviance #' table, otherwise compute the Wald statistic of the parameters that have been #' added to or dropped from the model. #' #' The hierarchy is respected when considering terms to be added or dropped: #' all main effects contained in a second-order interaction must remain, and so #' on. #' #' In a scope formula \samp{.} means \sQuote{what is already there}. #' #' For `drop1`, a missing `scope` is taken to mean that all terms in #' the model may be considered for dropping. #' #' If `scope` includes player covariates and there are players with #' missing values over these covariates, then a separate ability will be #' estimated for these players in *all* fitted models. Similarly if there #' are missing values in any contest-level variables in `scope`, the #' corresponding contests will be omitted from all models. #' #' If `formula` includes random effects, the same random effects structure #' will apply to all models. #' #' @aliases add1.BTm drop1.BTm #' @param object a fitted object of class inheriting from `"BTm"`. #' @param scope a formula specifying the model including all terms to be #' considered for adding or dropping. #' @param scale an estimate of the dispersion. Not implemented for models with #' random effects. #' @param test should a p-value be returned? The F test is only appropriate for #' models with no random effects for which the dispersion has been estimated. #' The Chisq test is a likelihood ratio test for models with no random effects, #' otherwise a Wald test. #' @param x a model matrix containing columns for all terms in the scope. #' Useful if `add1` is to be called repeatedly. **Warning:** no checks #' are done on its validity. #' @param \dots further arguments passed to [add1.glm()]. #' @return An object of class `"anova"` summarizing the differences in fit #' between the models. #' @author Heather Turner #' @seealso [BTm()], [anova.BTm()] #' @keywords models #' @examples #' #' result <- rep(1, nrow(flatlizards$contests)) #' BTmodel1 <- BTm(result, winner, loser, #' ~ throat.PC1[..] + throat.PC3[..] + (1|..), #' data = flatlizards, #' tol = 1e-4, sigma = 2, trace = TRUE) #' #' drop1(BTmodel1) #' #' add1(BTmodel1, ~ . + head.length[..] + SVL[..], test = "Chisq") #' #' BTmodel2 <- update(BTmodel1, formula = ~ . + head.length[..]) #' #' drop1(BTmodel2, test = "Chisq") #' #' @importFrom stats add.scope coef model.frame model.offset model.response model.weights formula pchisq pf reformulate terms update update.formula vcov #' @importFrom lme4 findbars nobars #' @export add1.BTm <- function(object, scope, scale = 0, test = c("none", "Chisq", "F"), x = NULL, ...) { old.form <- formula(object) new.form <- update.formula(old.form, scope) if (!is.character(scope)){ orandom <- findbars(old.form[[2]]) srandom <- findbars(new.form[[2]]) if (length(srandom) && !identical(orandom, srandom)) stop("Random effects structure of object and scope must be ", "identical.") scope <- add.scope(old.form, new.form) } if (!length(scope)) stop("no terms in scope for adding to object") if (is.null(x)) { # create model.matrix for maximum scope model <- Diff(object$player1, object$player2, new.form, object$id, object$data, object$separate.ability, object$refcat) if (sum(model$offset) > 0) warning("ignoring offset terms in scope") x <- model$X asgn <- attr(x, "assign") ## add dummy term for any separate effects oTerms <- c("sep"[0 %in% asgn], object$term.labels) object$terms <- terms(reformulate(oTerms)) y <- object$y dummy <- y ~ x - 1 if (!is.null(model$random)) { dummy <- update(dummy, .~ . + Z) Z <- model$random } argPos <- match(c("weights", "subset", "na.action"), names(object$call), 0) mf <- as.call(c(model.frame, as.list(object$call)[argPos], list(formula = dummy, offset = object$offset))) mf <- eval(mf, parent.frame()) x <- mf$x y <- model.response(mf) Z <- mf$Z wt <- model.weights(mf) if (is.null(wt)) wt <- rep.int(1, length(y)) offset <- model.offset(mf) } else { asgn <- attr(x, "assign") y <- object$y wt <- object$prior.weights offset <- object$offset Z <- object$random } if (is.null(object$random)){ attr(x, "assign") <- asgn + 1 object$formula <- formula(object$terms) object$x <- x object$y <- y object$random <- Z object$prior.weights <- wt object$offset <- offset stat.table <- NextMethod(x = x) attr(stat.table, "heading")[3] <- deparse(old.form) if (newsep <- sum(asgn == 0) - sum(object$assign ==0)) attr(stat.table, "heading") <- c(attr(stat.table, "heading"), paste("\n", newsep, " separate effects added\n", sep = "")) attr(stat.table, "separate.abilities") <- colnames(x)[asgn == 0] return(stat.table) } ## use original term labels: no sep effects or backticks (typically) oTerms <- attr(terms(nobars(old.form)), "term.labels") Terms <- attr(terms(nobars(new.form)), "term.labels") ousex <- asgn %in% c(0, which(Terms %in% oTerms)) sTerms <- vapply(strsplit(Terms, ":", fixed = TRUE), function(x) paste(sort(x), collapse = ":"), character(1)) method <- switch(object$method, glmmPQL.fit) control <- object$control control$trace <- FALSE if (scale == 0) dispersion <- 1 else dispersion <- scale ns <- length(scope) stat <- df <- numeric(ns) # don't add in original as don't need for tests names(stat) <- names(df) <- as.character(scope) tryerror <- FALSE for (i in seq(scope)) { stt <- paste(sort(strsplit(scope[i], ":")[[1]]), collapse = ":") usex <- match(asgn, match(stt, sTerms), 0) > 0 | ousex fit <- method(X = x[, usex, drop = FALSE], y = y, Z = Z, weights = wt, offset = offset, family = object$family, control = control, sigma = object$call$sigma, sigma.fixed = object$sigma.fixed) class(fit) <- oldClass(object) ind <- (usex & !ousex)[usex] trystat <- try(t(coef(fit)[ind]) %*% chol2inv(chol(vcov(fit, dispersion = dispersion)[ind, ind])) %*% coef(fit)[ind], silent = TRUE) #vcov should handle disp != 1 if (inherits(trystat, "try-error")) { stat[i] <- df[i] <- NA tryerror <- TRUE } else { stat[i] <- trystat df[i] <- sum(ind) } } table <- data.frame(stat, df) dimnames(table) <- list(names(df), c("Statistic", "Df")) title <- "Single term additions\n" topnote <- paste("Model: ", deparse(as.vector(formula(object))), if (scale > 0) paste("\nscale: ", format(scale), "\n"), if (tryerror) "\n\nTest statistic unestimable for at least one term") test <- match.arg(test) if (test == "Chisq") { dfs <- table[, "Df"] vals <- table[, "Statistic"] vals[dfs %in% 0] <- NA table <- cbind(table, `P(>|Chi|)` = pchisq(vals, abs(dfs), lower.tail = FALSE)) } else if (test == "F") { ## Assume dispersion fixed at one - if dispersion estimated, would use ## "residual" df from larger model in each comparison df.dispersion <- Inf if (df.dispersion == Inf) { fam <- object[[1]]$family$family if (fam == "binomial" || fam == "poisson") warning(gettextf( "using F test with a '%s' family is inappropriate", fam), domain = NA, call. = FALSE) else { warning("using F test with a fixed dispersion is inappropriate") } } dfs <- table[, "Df"] Fvalue <- table[, "Statistic"]/abs(dfs) Fvalue[dfs %in% 0] <- NA table <- cbind(table, F = Fvalue, `Pr(>F)` = pf(Fvalue, abs(dfs), df.dispersion, lower.tail = FALSE)) } if (newsep <- sum(asgn == 0) - sum(object$assign ==0)) heading <- c(heading, paste("\n", newsep, " separate effects added\n", sep = "")) structure(table, heading = c(title, topnote), class = c("anova", "data.frame"), separate.abilities = colnames(x)[asgn == 0]) } BradleyTerry2/R/glmmPQL.R0000644000176200001440000002506314775237530014624 0ustar liggesusers#' PQL Estimation of Generalized Linear Mixed Models #' #' Fits GLMMs with simple random effects structure via Breslow and Clayton's #' PQL algorithm. #' The GLMM is assumed to be of the form \ifelse{html}{\out{g(μ) = #' + Ze}}{\deqn{g(\boldsymbol{\mu}) = \boldsymbol{X\beta} #' + \boldsymbol{Ze}}{ g(mu) = X * beta + Z * e}} where \eqn{g} is the link #' function, \ifelse{html}{\out{μ}}{\eqn{\boldsymbol{\mu}}{mu}} is the #' vector of means and \ifelse{html}{\out{X, Z}}{\eqn{\boldsymbol{X}, #' \boldsymbol{Z}}{X,Z}} are design matrices for the fixed effects #' \ifelse{html}{\out{β}}{\eqn{\boldsymbol{\beta}}{beta}} and random #' effects \ifelse{html}{\out{e}}{\eqn{\boldsymbol{e}}{e}} respectively. #' Furthermore the random effects are assumed to be i.i.d. #' \ifelse{html}{\out{N(0, σ2)}}{\eqn{N(0, \sigma^2)}{ #' N(0, sigma^2)}}. #' #' @param fixed a formula for the fixed effects. #' @param random a design matrix for the random effects, with number of rows #' equal to the length of variables in `formula`. #' @param family a description of the error distribution and link function to #' be used in the model. This can be a character string naming a family #' function, a family function or the result of a call to a family function. #' (See [family()] for details of family functions.) #' @param data an optional data frame, list or environment (or object coercible #' by [as.data.frame()] to a data frame) containing the variables in #' the model. If not found in `data`, the variables are taken from #' `environment(formula)`, typically the environment from which #' `glmmPQL` called. #' @param subset an optional logical or numeric vector specifying a subset of #' observations to be used in the fitting process. #' @param weights an optional vector of \sQuote{prior weights} to be used in #' the fitting process. #' @param offset an optional numeric vector to be added to the linear predictor #' during fitting. One or more `offset` terms can be included in the #' formula instead or as well, and if more than one is specified their sum is #' used. See [model.offset()]. #' @param na.action a function which indicates what should happen when the data #' contain `NA`s. The default is set by the `na.action` setting of #' [options()], and is [na.fail()] if that is unset. #' @param start starting values for the parameters in the linear predictor. #' @param etastart starting values for the linear predictor. #' @param mustart starting values for the vector of means. #' @param control a list of parameters for controlling the fitting process. #' See the [glmmPQL.control()] for details. #' @param sigma a starting value for the standard deviation of the random #' effects. #' @param sigma.fixed logical: whether or not the standard deviation of the #' random effects should be fixed at its starting value. #' @param model logical: whether or not the model frame should be returned. #' @param x logical: whether or not the design matrix for the fixed effects #' should be returned. #' @param contrasts an optional list. See the `contrasts.arg` argument of #' [model.matrix()]. #' @param \dots arguments to be passed to [glmmPQL.control()]. #' @return An object of class `"BTglmmPQL"` which inherits from #' `"glm"` and `"lm"`: \item{coefficients}{ a named vector of #' coefficients, with a `"random"` attribute giving the estimated random #' effects.} \item{residuals}{ the working residuals from the final iteration #' of the IWLS loop.} \item{random}{the design matrix for the random effects.} #' \item{fitted.values}{ the fitted mean values, obtained by transforming the #' linear predictors by the inverse of the link function.} \item{rank}{the #' numeric rank of the fitted linear model.} \item{family}{the `family` #' object used.} \item{linear.predictors}{the linear fit on link scale.} #' \item{deviance}{up to a constant, minus twice the maximized log-likelihood.} #' \item{aic}{a version of Akaike's *An Information Criterion*, minus #' twice the maximized log-likelihood plus twice the number of parameters, #' computed by the `aic` component of the family.} #' \item{null.deviance}{the deviance for the null model, comparable with #' `deviance`.} \item{iter}{the numer of iterations of the PQL algorithm.} #' \item{weights}{the working weights, that is the weights in the final #' iteration of the IWLS loop.} \item{prior.weights}{the weights initially #' supplied, a vector of `1`'s if none were.} \item{df.residual}{the #' residual degrees of freedom.} \item{df.null}{the residual degrees of freedom #' for the null model.} \item{y}{if requested (the default) the `y` vector #' used. (It is a vector even for a binomial model.)} \item{x}{if requested, #' the model matrix.} \item{model}{if requested (the default), the model #' frame.} \item{converged}{logical. Was the PQL algorithm judged to have #' converged?} \item{call}{the matched call.} \item{formula}{the formula #' supplied.} \item{terms}{the `terms` object used.} \item{data}{the #' `data` argument used.} \item{offset}{the offset vector used.} #' \item{control}{the value of the `control` argument used.} #' \item{contrasts}{(where relevant) the contrasts used.} \item{xlevels}{(where #' relevant) a record of the levels of the factors used in fitting.} #' \item{na.action}{(where relevant) information returned by `model.frame` #' on the special handling of `NA`s.} \item{sigma}{the estimated standard #' deviation of the random effects} \item{sigma.fixed}{logical: whether or not #' `sigma` was fixed} \item{varFix}{the variance-covariance matrix of the #' fixed effects} \item{varSigma}{the variance of `sigma`} #' @author Heather Turner #' @seealso #' [predict.BTglmmPQL()],[glmmPQL.control()],[BTm()] #' @references Breslow, N. E. and Clayton, D. G. (1993) Approximate inference #' in Generalized Linear Mixed Models. *Journal of the American #' Statistical Association* **88**(421), 9--25. #' #' Harville, D. A. (1977) Maximum likelihood approaches to variance component #' estimation and to related problems. *Journal of the American #' Statistical Association* **72**(358), 320--338. #' @keywords models #' @examples #' #' ############################################### #' ## Crowder seeds example from Breslow & Clayton #' ############################################### #' #' summary(glmmPQL(cbind(r, n - r) ~ seed + extract, #' random = diag(nrow(seeds)), #' family = "binomial", data = seeds)) #' #' summary(glmmPQL(cbind(r, n - r) ~ seed*extract, #' random = diag(nrow(seeds)), #' family = "binomial", data = seeds)) #' #' @importFrom stats gaussian .getXlevels glm.control is.empty.model glm.control glm.fit model.frame model.matrix model.offset model.response model.weights optimize terms #' @export glmmPQL <- function(fixed, random = NULL, family = "binomial", data = NULL, subset = NULL, weights = NULL, offset = NULL, na.action = NULL, start = NULL, etastart = NULL, mustart = NULL, control = glmmPQL.control(...), sigma = 0.1, sigma.fixed = FALSE, model = TRUE, x = FALSE, contrasts = NULL, ...) { call <- match.call() nm <- names(call)[-1] if (is.null(random)) { keep <- is.element(nm, c("family", "data", "subset", "weights", "offset", "na.action")) for (i in nm[!keep]) call[[i]] <- NULL call$formula <- fixed environment(call$formula) <- environment(fixed) call[[1]] <- as.name("glm") return(eval.parent(call)) } modelTerms <- terms(fixed, data = data) modelCall <- as.list(match.call(expand.dots = FALSE)) argPos <- match(c("data", "subset", "na.action", "weights", "offset"), names(modelCall), 0) modelData <- as.call(c(model.frame, list(formula = modelTerms, drop.unused.levels = TRUE), modelCall[argPos])) modelData <- eval(modelData, parent.frame()) if (!is.matrix(random) || nrow(random) != nrow(modelData)) { stop("`random` should be a matrix object, with ", nrow(modelData), " rows.") } if (!is.null(modelCall$subset)) Z <- random[eval(modelCall$subset, data, parent.frame()),] else Z <- random if (!is.null(attr(modelData, "na.action"))) Z <- Z[-attr(modelData, "na.action"),] nObs <- nrow(modelData) y <- model.response(modelData, "numeric") if (is.null(y)) y <- rep(0, nObs) weights <- as.vector(model.weights(modelData)) if (!is.null(weights) && any(weights < 0)) stop("negative weights are not allowed") if (is.null(weights)) weights <- rep.int(1, nObs) offset <- as.vector(model.offset(modelData)) if (is.null(offset)) offset <- rep.int(0, nObs) if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("`family' not recognized") } if (family$family == "binomial") { if (is.factor(y) && NCOL(y) == 1) y <- y != levels(y)[1] else if (NCOL(y) == 2) { n <- y[, 1] + y[, 2] y <- ifelse(n == 0, 0, y[, 1]/n) weights <- weights * n } } ## Use GLM to estimate fixed effects empty <- is.empty.model(modelTerms) if (!empty) X <- model.matrix(formula(modelTerms), data = modelData, contrasts) else X <- matrix(, nObs, 0) fit <- glmmPQL.fit(X = X, y = y, Z = Z, weights = weights, start = start, etastart = etastart, mustart = mustart, offset = offset, family = family, control = control, sigma = sigma, sigma.fixed = sigma.fixed, ...) if (sum(offset) && attr(modelTerms, "intercept") > 0) { fit$null.deviance <- glm.fit(x = X[, "(Intercept)", drop = FALSE], y = y, weights = weights, offset = offset, family = family, control = glm.control(), intercept = TRUE)$deviance } if (model) fit$model <- modelData fit$na.action <- attr(modelData, "na.action") if (x) fit$x <- X fit <- c(fit, list(call = call, formula = fixed, random = random, terms = modelTerms, data = data, offset = offset, control = control, method = "glmmPQL.fit", contrasts = attr(X, "contrasts"), xlevels = .getXlevels(modelTerms, modelData))) class(fit) <- c("BTglmmPQL", "glm", "lm") fit } BradleyTerry2/R/BTm.R0000644000176200001440000003252314775237530013774 0ustar liggesusers#' Bradley-Terry Model and Extensions #' #' Fits Bradley-Terry models for pair comparison data, including models with #' structured scores, order effect and missing covariate data. Fits by either #' maximum likelihood or maximum penalized likelihood (with Jeffreys-prior #' penalty) when abilities are modelled exactly, or by penalized #' quasi-likelihood when abilities are modelled by covariates. #' #' In each comparison to be modelled there is a 'first player' and a 'second #' player' and it is assumed that one player wins while the other loses (no #' allowance is made for tied comparisons). #' #' The [countsToBinomial()] function is provided to convert a #' contingency table of wins into a data frame of wins and losses for each pair #' of players. #' #' The `formula` argument specifies the model for player ability and #' applies to both the first player and the second player in each contest. If #' `NULL` a separate ability is estimated for each player, equivalent to #' setting `formula = reformulate(id)`. #' #' Contest-level variables can be specified in the formula in the usual manner, #' see [formula()]. Player covariates should be included as variables #' indexed by `id`, see examples. Thus player covariates must be ordered #' according to the levels of the ID factor. #' #' If `formula` includes player covariates and there are players with #' missing values over these covariates, then a separate ability will be #' estimated for those players. #' #' When player abilities are modelled by covariates, then random player effects #' should be added to the model. These should be specified in the formula using #' the vertical bar notation of [lme4::lmer()], see examples. #' #' When specified, it is assumed that random player effects arise from a #' \eqn{N(0, }{N(0, sigma^2)}\eqn{ \sigma^2)}{N(0, sigma^2)} distribution and #' model parameters, including \eqn{\sigma}{sigma}, are estimated using PQL #' (Breslow and Clayton, 1993) as implemented in the [glmmPQL()] #' function. #' #' @param outcome the binomial response: either a numeric vector, a factor in #' which the first level denotes failure and all others success, or a #' two-column matrix with the columns giving the numbers of successes and #' failures. #' @param player1 either an ID factor specifying the first player in each #' contest, or a data.frame containing such a factor and possibly other #' contest-level variables that are specific to the first player. If given in a #' data.frame, the ID factor must have the name given in the `id` #' argument. If a factor is specified it will be used to create such a #' data.frame. #' @param player2 an object corresponding to that given in `player1` for #' the second player in each contest, with identical structure -- in particular #' factors must have identical levels. #' @param formula a formula with no left-hand-side, specifying the model for #' player ability. See details for more information. #' @param id the name of the ID factor. #' @param separate.ability (if `formula` does not include the ID factor as #' a separate term) a character vector giving the names of players whose #' abilities are to be modelled individually rather than using the #' specification given by `formula`. #' @param refcat (if `formula` includes the ID factor as a separate term) #' a character specifying which player to use as a reference, with the first #' level of the ID factor as the default. Overrides any other contrast #' specification for the ID factor. #' @param family a description of the error distribution and link function to #' be used in the model. Only the binomial family is implemented, with #' either`"logit"`, `"probit"` , or `"cauchit"` link. (See #' [stats::family()] for details of family functions.) #' @param data an optional object providing data required by the model. This #' may be a single data frame of contest-level data or a list of data frames. #' Names of data frames are ignored unless they refer to data frames specified #' by `player1` and `player2`. The rows of data frames that do not #' contain contest-level data must correspond to the levels of a factor used #' for indexing, i.e. row 1 corresponds to level 1, etc. Note any rownames are #' ignored. Objects are searched for first in the `data` object if #' provided, then in the environment of `formula`. If `data` is a #' list, the data frames are searched in the order given. #' @param weights an optional numeric vector of \sQuote{prior weights}. #' @param subset an optional logical or numeric vector specifying a subset of #' observations to be used in the fitting process. #' @param na.action a function which indicates what should happen when any #' contest-level variables contain `NA`s. The default is the #' `na.action` setting of `options`. See details for the handling of #' missing values in other variables. #' @param start a vector of starting values for the fixed effects. #' @param etastart a vector of starting values for the linear predictor. #' @param mustart a vector of starting values for the vector of means. #' @param offset an optional offset term in the model. A vector of length equal #' to the number of contests. #' @param br logical. If `TRUE` fitting will be by penalized maximum #' likelihood as in Firth (1992, 1993), using [brglm::brglm()], #' rather than maximum likelihood using [glm()], when abilities are #' modelled exactly or when the abilities are modelled by covariates and the #' variance of the random effects is estimated as zero. #' @param model logical: whether or not to return the model frame. #' @param x logical: whether or not to return the design matrix for the fixed #' effects. #' @param contrasts an optional list specifying contrasts for the factors in #' `formula`. See the `contrasts.arg` of [model.matrix()]. #' @param \dots other arguments for fitting function (currently either #' [glm()], [brglm::brglm()], or [glmmPQL()]) #' @return An object of class `c("BTm", "x")`, where `"x"` is the #' class of object returned by the model fitting function (e.g. `glm`). #' Components are as for objects of class `"x"`, with additionally #' \item{id}{the `id` argument.} \item{separate.ability}{the #' `separate.ability` argument.} \item{refcat}{the `refcat` #' argument.} \item{player1}{a data frame for the first player containing the #' ID factor and any player-specific contest-level variables.} \item{player2}{a #' data frame corresponding to that for `player1`.} \item{assign}{a #' numeric vector indicating which coefficients correspond to which terms in #' the model.} \item{term.labels}{labels for the model terms.} #' \item{random}{for models with random effects, the design matrix for the #' random effects. } #' @author Heather Turner, David Firth #' @seealso [countsToBinomial()], [glmmPQL()], #' [BTabilities()], [residuals.BTm()], #' [add1.BTm()], [anova.BTm()] #' @references #' #' Agresti, A. (2002) *Categorical Data Analysis* (2nd ed). New York: #' Wiley. #' #' Firth, D. (1992) Bias reduction, the Jeffreys prior and GLIM. In #' *Advances in GLIM and Statistical Modelling*, Eds. Fahrmeir, L., #' Francis, B. J., Gilchrist, R. and Tutz, G., pp91--100. New York: Springer. #' #' Firth, D. (1993) Bias reduction of maximum likelihood estimates. #' *Biometrika* **80**, 27--38. #' #' Firth, D. (2005) Bradley-Terry models in R. *Journal of Statistical #' Software*, **12**(1), 1--12. #' #' Stigler, S. (1994) Citation patterns in the journals of statistics and #' probability. *Statistical Science* **9**, 94--108. #' #' Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 #' package. *Journal of Statistical Software*, **48**(9), 1--21. #' @keywords models #' @examples #' #' ######################################################## #' ## Statistics journal citation data from Stigler (1994) #' ## -- see also Agresti (2002, p448) #' ######################################################## #' #' ## Convert frequencies to success/failure data #' citations.sf <- countsToBinomial(citations) #' names(citations.sf)[1:2] <- c("journal1", "journal2") #' #' ## First fit the "standard" Bradley-Terry model #' citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) #' #' ## Now the same thing with a different "reference" journal #' citeModel2 <- update(citeModel, refcat = "JASA") #' BTabilities(citeModel2) #' #' ################################################################## #' ## Now an example with an order effect -- see Agresti (2002) p438 #' ################################################################## #' data(baseball) # start with baseball data as provided by package #' #' ## Simple Bradley-Terry model, ignoring home advantage: #' baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, #' data = baseball, id = "team") #' #' ## Now incorporate the "home advantage" effect #' baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) #' baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) #' baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) #' #' ## Compare the fit of these two models: #' anova(baseballModel1, baseballModel2) #' #' ## #' ## For a more elaborate example with both player-level and contest-level #' ## predictor variables, see help(chameleons). #' ## #' #' @importFrom brglm brglm #' @export BTm <- function(outcome = 1, player1, player2, formula = NULL, id = "..", separate.ability = NULL, refcat = NULL, family = "binomial", data = NULL, weights = NULL, subset = NULL, na.action = NULL, start = NULL, etastart = NULL, mustart = NULL, offset = NULL, br = FALSE, model = TRUE, x = FALSE, contrasts = NULL, ...){ call <- match.call() if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() if (is.null(family$family)) { print(family) stop("`family' not recognized") } if (family$family != "binomial") stop("`family' must be binomial") if (!family$link %in% c("logit", "probit", "cauchit")) stop("link for binomial family must be one of \"logit\", \"probit\"", "or \"cauchit\"") fcall <- as.list(match.call(expand.dots = FALSE)) if (is.null(formula)) { formula <- reformulate(id) environment(formula) <- parent.frame() fcall$formula <- formula } setup <- match(c("outcome", "player1", "player2", "formula", "id", "separate.ability", "refcat", "data", "weights", "subset", "offset", "contrasts"), names(fcall), 0L) setup <- do.call(BTm.setup, fcall[setup], envir = parent.frame()) if (setup$saturated) warning("Player ability saturated - equivalent to fitting ", "separate abilities.") mf <- data.frame(X = setup$player1) #just to get length if (!is.null(setup$X)) { mf$X <- setup$X formula <- Y ~ X - 1 } else formula <- Y ~ 0 mf$Y <- setup$Y argPos <- match(c("na.action", "start", "etastart", "mustart", "control", "model", "x"), names(fcall), 0) dotArgs <- fcall$"..." if (is.null(setup$random)) { method <- get(ifelse(br, "brglm", "glm"), mode = "function") fit <- as.call(c(method, fcall[argPos], list(formula = formula, family = family, data = mf, offset = setup$offset, subset = setup$subset, weights = setup$weights), dotArgs)) fit <- eval(fit, parent.frame()) } else { method <- get("glmmPQL", mode = "function") fit <- as.call(c(method, fcall[argPos], list(formula, setup$random, family = family, data = mf, offset = setup$offset, subset = setup$subset, weights = setup$weights), dotArgs)) fit <- eval(fit, parent.frame()) if (br) { if (identical(fit$sigma, 0)){ argPos <- match(c("na.action", "model", "x"), names(fcall), 0) method <- get("brglm", mode = "function") fit <- as.call(c(method, fcall[argPos], list(formula, family = family, data = mf, offset = setup$offset, subset = setup$subset, weights = setup$weights, etastart = fit$linear.predictors))) fit <- eval(fit, parent.frame()) fit$class <- c("glmmPQL", class(fit)) } else warning("'br' argument ignored for models with random effects", call. = FALSE) } } if (length(fit$coefficients)) { if (ncol(setup$X) > 1) names(fit$coefficients) <- substring(names(fit$coefficients), 2) else names(fit$coefficients) <- colnames(setup$X) fit$assign <- attr(setup$X, "assign") } fit$call <- call fit$id <- id fit$separate.ability <- separate.ability fit$contrasts <- setup$contrasts fit$refcat <- setup$refcat fit$formula <- setup$formula fit$player1 <- setup$player1 fit$player2 <- setup$player2 fit$term.labels <- setup$term.labels fit$data <- setup$data fit$random <- setup$random class(fit) <- c("BTm", class(fit)) fit } BradleyTerry2/R/predict.BTglmmPQL.R0000644000176200001440000002147114775237530016502 0ustar liggesusers#' Predict Method for BTglmmPQL Objects #' #' Obtain predictions and optionally standard errors of those predictions from #' a `"BTglmmPQL"` object. #' #' If `newdata` is omitted the predictions are based on the data used for #' the fit. In that case how cases with missing values in the original fit are #' treated is determined by the `na.action` argument of that fit. If #' `na.action = na.omit` omitted cases will not appear in the residuals, #' whereas if `na.action = na.exclude` they will appear (in predictions #' and standard errors), with residual value `NA`. See also #' `napredict`. #' #' Standard errors for the predictions are approximated assuming the variance #' of the random effects is known, see Booth and Hobert (1998). #' #' @param object a fitted object of class `"BTglmmPQL"` #' @param newdata (optional) a data frame in which to look for variables with #' which to predict. If omitted, the fitted linear predictors are used. #' @param newrandom if `newdata` is provided, a corresponding design #' matrix for the random effects, will columns corresponding to the random #' effects estimated in the original model. #' @param level an integer vector giving the level(s) at which predictions are #' required. Level zero corresponds to population-level predictions (fixed #' effects only), whilst level one corresponds to the individual-level #' predictions (full model) which are NA for contests involving individuals not #' in the original data. By default `level = 0` if the model converged to a #' fixed effects model, `1` otherwise. #' @param type the type of prediction required. The default is on the scale of #' the linear predictors; the alternative `"response"` is on the scale of #' the response variable. Thus for a default binomial model the default #' predictions are of log-odds (probabilities on logit scale) and `type = #' "response"` gives the predicted probabilities. The `"terms"` option #' returns a matrix giving the fitted values of each term in the model formula #' on the linear predictor scale (fixed effects only). #' @param se.fit logical switch indicating if standard errors are required. #' @param terms with `type ="terms"` by default all terms are returned. A #' character vector specifies which terms are to be returned. #' @param na.action function determining what should be done with missing #' values in `newdata`. The default is to predict `NA`. #' @param \dots further arguments passed to or from other methods. #' @return If `se.fit = FALSE`, a vector or matrix of predictions. If #' `se = TRUE`, a list with components \item{fit }{Predictions} #' \item{se.fit }{Estimated standard errors} #' @author Heather Turner #' @seealso [predict.glm()], [predict.BTm()] #' @references Booth, J. G. and Hobert, J. P. (1998). Standard errors of #' prediction in Generalized Linear Mixed Models. *Journal of the American #' Statistical Association* **93**(441), 262 -- 272. #' @keywords models #' @examples #' #' seedsModel <- glmmPQL(cbind(r, n - r) ~ seed + extract, #' random = diag(nrow(seeds)), #' family = binomial, #' data = seeds) #' #' pred <- predict(seedsModel, level = 0) #' predTerms <- predict(seedsModel, type = "terms") #' #' all.equal(pred, rowSums(predTerms) + attr(predTerms, "constant")) #' #' @importFrom stats .checkMFClasses coef delete.response family model.frame model.matrix na.exclude na.pass napredict #' @export predict.BTglmmPQL <- function(object, newdata = NULL, newrandom = NULL, level = ifelse(object$sigma == 0, 0, 1), type = c("link", "response", "terms"), se.fit = FALSE, terms = NULL, na.action = na.pass, ...) { ## only pass on if a glm if (object$sigma == 0) { if (level != 0) warning("Fixed effects model: setting level to 0") return(NextMethod()) } if (!all(level %in% c(0, 1))) stop("Only level %in% c(0, 1) allowed") type <- match.arg(type) if (!is.null(newdata) || type == "terms") tt <- terms(object) if (!is.null(newdata)) { ## newdata should give variables in terms formula Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) na.action <- attr(m, "na.action") if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) D <- model.matrix(Terms, m, contrasts.arg = object$contrasts) np <- nrow(D) # n predictions offset <- rep(0, np) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) } else { D <- model.matrix(object) newrandom <- object$random na.action <- object$na.action offset <- object$offset } cf <- coef(object) keep <- !is.na(cf) aa <- attr(D, "assign")[keep] cf <- cf[keep] D <- D[, keep, drop = FALSE] if (se.fit == TRUE) { sigma <- object$sigma w <- sqrt(object$weights) wX <- w * model.matrix(object)[, keep] wZ <- w * object$random XWX <- crossprod(wX) XWZ <- crossprod(wX, wZ) ZWZ <- crossprod(wZ, wZ) diag(ZWZ) <- diag(ZWZ) + 1/sigma^2 K <- cbind(XWX, XWZ) K <- chol(rbind(K, cbind(t(XWZ), ZWZ))) if (type == "terms" || 0 %in% level){ ## work out (chol of inverse of) topleft of K-inv directly A <- backsolve(chol(ZWZ), t(XWZ), transpose = TRUE) A <- chol(XWX - t(A) %*% A) } } if (type == "terms") { # ignore level if (1 %in% level) warning("type = \"terms\": setting level to 0", call. = FALSE) ll <- attr(tt, "term.labels") if (!is.null(terms)) { include <- ll %in% terms ll <- ll[include] } hasintercept <- attr(tt, "intercept") > 0L if (hasintercept) { avx <- colMeans(model.matrix(object)) termsconst <- sum(avx * cf) #NA coefs? D <- sweep(D, 2, avx) } pred0 <- matrix(ncol = length(ll), nrow = NROW(D)) colnames(pred0) <- ll if (se.fit) { A <- chol2inv(A) se.pred0 <- pred0 } for (i in seq(length.out = length(ll))){ ind <- aa == which(attr(tt, "term.labels") == ll[i]) pred0[, i] <- D[, ind, drop = FALSE] %*% cf[ind] if (se.fit) { se.pred0[, i] <- sqrt(diag(D[, ind] %*% tcrossprod(A[ind, ind], D[, ind]))) } } if (hasintercept) attr(pred0, "constant") <- termsconst if (se.fit) return(list(fit = pred0, se.fit = se.pred0)) return(pred0) } if (0 %in% level) { pred0 <- napredict(na.action, c(D %*% cf) + offset) if (type == "response") pred0 <- family(object)$linkinv(pred0) if (se.fit == TRUE) { na.act <- attr(na.exclude(pred0), "na.action") H <- backsolve(A, t(na.exclude(D)), transpose = TRUE) ## se.pred0 <- ## sqrt(diag(D %*% chol2inv(K)[1:ncol(D), 1:ncol(D)] %*% t(D))) se.pred0 <- napredict(na.action, napredict(na.act, sqrt(colSums(H^2)))) if (type == "response") se.pred0 <- se.pred0*abs(family(object)$mu.eta(pred0)) pred0 <- list(fit = pred0, se.fit = se.pred0) } if (identical(level, 0)) return(pred0) } r <- nrow(D) ## newrandom should give new design matrix for original random effects if (!is.null(newdata)){ if(is.null(newrandom)) stop("newdata specified without newrandom") if (!is.null(na.action)) newrandom <- newrandom[-na.action, , drop = FALSE] } if (!identical(dim(newrandom), c(r, ncol(object$random)))) stop("newrandom should have ", r, " rows and ", ncol(object$random), " columns") D <- cbind(D, newrandom) cf <- c(cf, attr(coef(object), "random")) pred <- napredict(na.action, c(D %*% cf) + offset) if (type == "response") pred <- family(object)$linkinv(pred) if (se.fit == TRUE) { ##se.pred <- sqrt(diag(D %*% chol2inv(K) %*% t(D))) na.act <- attr(na.exclude(pred), "na.action") H <- backsolve(K, t(na.exclude(D)), transpose = TRUE) se.pred <- napredict(na.action, napredict(na.act, sqrt(colSums(H^2)))) if (type == "response") se.pred <- se.pred*abs(family(object)$mu.eta(pred)) pred <- list(fit = pred, se.fit = se.pred) } if (0 %in% level) list(population = pred0, individual = pred) else pred } BradleyTerry2/vignettes/0000755000176200001440000000000014775715607014777 5ustar liggesusersBradleyTerry2/vignettes/BradleyTerry.bib0000644000176200001440000001662514775700575020075 0ustar liggesusers@Manual{r, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2012}, note = {{ISBN} 3-900051-07-0}, url = {https://www.R-project.org/}, } @Article{bock:01, author = {U B\"ockenholt}, year = {2001}, title = {Hierarchical Modeling of Paired Comparison Data}, journal = {Psychological Methods}, volume = {6}, number = {1}, pages = {49-66}, } @Book{agre:02, year = {2002}, title = {Categorical Data Analysis}, edition = {2nd}, publisher = {John Wiley \& Sons}, author = {A. Agresti}, } @InCollection{brad:84, title = {Paired Comparisons: Some Basic Procedures and Examples}, editor = {P. R. Krishnaiah and P. K. Sen}, booktitle = {Nonparametric Methods}, publisher = {Elsevier}, year = {1984}, volume = {4}, pages = {299 - 326}, series = {Handbook of Statistics}, author = {R. A. Bradley}, } @Article{brad:terr:52, journal = {Biometrika}, year = {1952}, title = {Rank Analysis of Incomplete Block Designs {I}: {T}he Method of Paired Comparisons}, pages = {324--45}, author = {R. A. Bradley and M. E. Terry}, volume = {39}, } @Article{sham:curt:95, journal = {Annals of Human Genetics}, year = {1995}, title = {An Extended Transmission/Disequilibrium Test ({TDT}) for Multi-Allele Marker Loci}, number = {3}, pages = {323--336}, author = {P. C. Sham and D. Curtis}, volume = {59}, } @Article{ihak:gent:96, journal = {Journal of Computational and Graphical Statistics}, year = {1996}, title = {{R}: A Language for Data Analysis and Graphics}, number = {3}, pages = {299--314}, author = {Ross Ihaka and Robert Gentleman}, volume = {5}, } @article{spring:73, Journal = {Applied Statistics}, Year = {1973}, Title = {Response Surface Fitting Using a Generalization of the {B}radley-{T}erry Paired Comparisons Model}, Pages = {59--68}, Author = {Springall, A}, Volume = {22}} @article{crit:flig:91, Journal = {Psychometrika}, Year = {1991}, Title = {Paired Comparison, Triple Comparison, and Ranking Experiments as Generalized Linear Models, and Their Implementation in {GLIM}}, Pages = {517--533}, Author = {Critchlow, D E and Fligner, M A}, Volume = {56}} @Article{firt:93, journal = {Biometrika}, year = {1993}, title = {Bias Reduction of Maximum Likelihood Estimates}, pages = {27--38}, author = {David Firth}, volume = {80}, } @Article{hein:sche:02, journal = {Statistics in Medicine}, year = {2002}, title = {A Solution to the Problem of Separation in Logistic Regression}, author = {G Heinze and M Schemper}, pages = {2409--2419}, volume = {21}, } @Article{stig:94, journal = {Statistical Science}, year = {1994}, title = {Citation Patterns in the Journals of Statistics and Probability}, pages = {94--108}, author = {S Stigler}, volume = {9}, } @Article{bres:93, journal = {Journal of the American Statistical Association}, year = {1993}, title = {Approximate Inference in Generalized Linear Mixed Models}, pages = {9--25}, author = {N E Breslow and D G Clayton}, volume = {88}, number = {421}, } @article{spri:73, author = {Springall, A}, title = {Response Surface Fitting Using a Generalization of the {B}radley-{T}erry Paired Comparison Model}, year = {1973}, journal = {Applied Statistics}, volume = {22}, pages = {59--68} } @article{ditt:98, author = {Dittrich, R and Hatzinger, R and Katzenbeisser, W}, title = {Modelling the Effect of Subject-specific Covariates in Paired Comparison Studies with an Application to University Rankings}, year = {1998}, journal = {Applied Statistics}, volume = {47}, pages = {511--525}, keywords = {Bradley-Terry model; Log-linear model} } @Article{ditt:01, author = {R Dittrich and R Hatzinger and W Katzenbeisser}, title = {Corrigendum: {M}odelling the Effect of Subject-Specific Covariates in Paired Comparison Studies with an Application to University Rankings}, year = {2001}, journal = {Applied Statistics}, volume = {50}, pages = {247--249}, } @Article{davi:70, author = {R. R. Davidson}, title = {On Extending the {B}radley-{T}erry Model to Accommodate Ties in Paired Comparison Experiments}, year = {1970}, journal = {Journal of the American Statistical Association}, volume = {65}, pages = {317--328}, } @Article{rao:kupp:67, author = {P. V. Rao and L. L. Kupper}, title = {Ties in Paired-Comparison Experiments: {A} Generalization of the {B}radley-{T}erry Model}, year = {1967}, journal = {Journal of the American Statistical Association}, volume = {62}, pages = {194--204}, } @Article{whit:06, author = {Martin J. Whiting and Devi M. Stuart-Fox and David O'Connor and David Firth and Nigel C. Bennett and Simon P. Blomberg}, title = {{Ultraviolet Signals Ultra-Aggression in a Lizard}}, journal = {Animal Behaviour}, year = {{2006}}, volume = {{72}}, pages = {353--363}, } @article{stua:06, Author = {Stuart-Fox, D M and David Firth and Moussalli, A and Whiting, M J}, Title = {Multiple Signals in Chameleon Contests: Designing and Analysing Animal Contests as a Tournament}, Journal = {Animal Behaviour}, Year = {{2006}}, Volume = {{71}}, Pages = {1263--1271}, DOI = {10.1016/j.anbehav.2005.07.028} } @article{kous:84, author = {Kousgaard, N}, title = {Analysis of a Sound Field Experiment by a Model for Paired Comparisons with Explanatory Variables}, year = {1984}, journal = {Scandinavian Journal of Statistics}, volume = {11}, pages = {51--57}, keywords = {Bradley-Terry model} } @Article{firt:04, author = {David Firth and R. X. {de Menezes}}, title = {Quasi-Variances}, journal = {Biometrika}, volume = {91}, year = {2004}, pages = {65--80}, } @Article{firt:05, author = {David Firth}, title = {Bradley-Terry Models in {R}}, journal = {Journal of Statistical Software}, year = {2005}, volume = {12}, number = {1}, pages = {1--12}, doi = {10.18637/jss.v012.i01} } @Manual{kosm:07, title = {{brglm}: Bias Reduction in Binary-Response GLMs}, author = {Ioannis Kosmidis}, year = {2007}, note = {{R}~package version~0.5-6}, url = {https://CRAN.R-project.org/package=brglm}, } @Manual{bate:11, title = {{lme4}: Linear Mixed-Effects Models Using {S}4 Classes}, author = {Douglas Bates and Martin M\"achler and Ben Bolker}, year = {2011}, note = {{R}~package version~0.999375-42}, url = {https://CRAN.R-project.org/package=lme4}, } @Manual{firt:10, title = {{qvcalc}: Quasi-Variances for Factor Effects in Statistical Models}, author = {David Firth}, year = {2010}, note = {{R}~package version~0.8-7}, url = {https://CRAN.R-project.org/package=qvcalc}, } @Article{hatz:12, author = {Reinhold Hatzinger and Regina Dittrich}, title = {{prefmod}: An {R} Package for Modeling Preferences Based on Paired Comparisons, Rankings, or Ratings}, journal = {Journal of Statistical Software}, year = {2012}, volume = {48}, number = {10}, pages = {1--31}, doi = {10.18637/jss.v048.i10} } @Article{turn:12, author = {Heather Turner and David Firth}, title = {Bradley-Terry Models in {R}: The {BradleyTerry2} Package}, journal = {Journal of Statistical Software}, year = {2012}, volume = {48}, number = {9}, pages = {1--21}, doi = {10.18637/jss.v048.i09} }BradleyTerry2/vignettes/baseball-qvplot.png0000644000176200001440000011210114775673305020567 0ustar liggesusersPNG  IHDR' pHYs]s IDATxy|T7 "(^!`,m bk֥tc+JPk@D@ Bcϼ$CHa~9sfy;YRbXj!XXb@@!XXb@@!XXb@@!XXb@@!XXbb@!!Xbb@!!Xbb@!!Xbb@!!Xbb@!!XX84*8\-[l„ 5jԸ{K=/N6lXV>;w+ѣGᵰ}771bi:r̘1cѢEW_}uL!pHbJK]vYm:uK 4K;wE]ۼysGcǎ#6nذaN;-LF'޾ W^y)zbryۋ/R,5B,p0ꫯ|ۣ>. `  Κ4iҦMZj)X #<#!(-[|GuT֭W^Y?[TT~?Yf)))5km*ۿ 5kVnJknݺUVuԩڱcڵk N8}M6Ynݺ?j֬y" ᅬOmfijsrrmְa&MԨQ Yvvڵf͚;vkVX,~ƍ7idf`ժU;]ji%ٴi2~ek׮kڴiF񧅅ŶYf|gŊm۶-U^)4hP˖-UϢVZ{믿>PV:<#yyyel_~e>)Ln?m;1X__Z/~lٲcؖ-[n~n80&bo&LK;5lذw;ŽkRO8餓.]m۶iӦM|771$#2k֬h۷/^7nLƣ͛m޼ /L~SNO$9S ={Lc$n:^uU%'UdnݚQ.\Xs1Ifm۶|I/Vd)z5jL: 1cF⋑bׯ_8‡9J8+5.˅Æ /qkÆ 80Tri`^K}qݻ5k>%ԩSK녻[G~^wVVV6*8mǾuv횤$W~뭷_yF]wݖ-[9Đpb<3ڋ/X]QF| ~wD7MFNꫯ"0eʔO|)E]9|{̙6mnݺI~ .عsg6mk#]9*_bw |NHM;#KӲS&LHrd:u"PhѢ[ZX,vwT/}Z+ǟ} VD$&o$k^fMd233D@$"&MD:Ҽ~VZ$]0ا\jUfRW^yeQQQi6 yT.>u։tM=z8#hѢG=z_Hn~_?:IKKkܸq;"#FG#0lذSL_ƍ{]t K/q{ӦMK;K/+8s" .(_؝Oxv֭SNRRR6mT)sTƏ_b~+m?oBK/T?:먣 oѢ_Y^0%%%IM%آիw .СC8Ν;72B֭/.OL[)!|Z5XE^zAAA$Sſ</_iu93/?{M7%iާ~zdx&we]v{ŏधxI>d+?b߆X-Zsyyygn޼y "|n[xOVZoV8F:Ÿ{*v nڴ/K²O<1LΝB;vl/;I7.!~ܢ?[n5~gepuQGUPPtᑥ&+5mڴpOKK Z[?MZxڵܹsv1gΜ:tc= osrr>pG'x"nݚtԩnQFǎ۽{I$̶FƹkdeeEԩS'|0 }%\z'Nܯb<֫W/"'Λ7!vO%KOEkذab1OGԊ wլY3|KdРAX,bŊ$I C=Tbի'V^:uje˖p #<#4~8vEBw=N6-Zj}嗑q~߅iРANi6oUz_RbJ:thx5Ñӿ/^~ׯxȟ5Vd- :uoGMuھ}{.D ݾ}{.]DAC`xׯ.ݻw޽۴iSr(qlM6->_dɒ_|1rVvQ;>J(_?~mNqG}СC ,(-Vⲳ`<ӏ~F@zzz䲽˗|Ʉ V^̝;7z뭷4hp%ݻW^͚5+ riOZkI"ĆI^{/ںuJwB޾}g}_,\p„ e__}œŧ`vlݻ; 6[nbb%ޛ-==x=bM7Jx֬Y~vq}]Ï?nkA:v8^2gJJJAAo+(~!ꫯfff;*o߾Zܹ_GզM޽{?N~ݽ*~Ӛ>qݺuoۂZVZ+dff.Zh˖-uVZxK,}PAm۶,%yT)SN98ժU;餓SMx333_{O?,_ƏgϞ"6iҤI&UVsιK RVZb_~@4!%%m۶ɧg׮]o̙3gϞ_QȄ˭j{ȺuNz4i޶mۖ-[Jpᤚ"M6W6nXȩ <8:;;;ޏر#1f)ꢴ90bL:|V^fJ iܸ_ʄ >J .n-r}rg~g?/$裏uvmie}ȾQ1cD:Zlٷo]YsT𑈰ի/ML"%Y.X}׮]i獜([z20*nȐ!+W_f (w:`U|XbEΝ,YXVs=믟2eJ}r mGrX (Ǖ888/4hرSNN%}K/4wq-6mZ6?t%"pM7otw}^xa(=SSxl;*e6+kuq6p8 ӫ=&{S^r]?~9x…[k2df$Y'8pɓsss_{ÇG{|wˑ;޽{wmǎӧOGy; ?A[9*wh֬Y8˭Y&r\Wt>~'r.\:tm޼yeH^Wv֬YC My7pٳsss'Lзoȭn̟Dq{kEn/ؑG%o}'Κ5{ž*ƒjQuŋ]iii^8nݺ%,X97?<;~g 馛}/X즛n F+şgX]n_רQW^z>|JG~ANa?ßF /;wGزeˉ' 66mڔe~/rp׆?]n],K.$psYc9rTrk %"灯]%'ww- |ɻwN|ࢋ. pGVd ;0ێ2ãk믿#@WVu]W/czbr_`A]t9s7|W^ƍԩSj~n-|/N4hP<3'o[ ⊢?pܸqmfڴi{ymucݴi… x𙽉Gז?'G?:cf̵͚^F;RFڲeK.]6noZsTгg3<3WPPpW2䢋.ə6mZd7}z}>}zbwNXhO<կ~u`Ç?5k\}-ZxwǏ6ۼy k:uꔗ`סC9XK/zժU&MJr+ /0%%%Lr1 6u5k\~Y1`㯔?MOk2;/7xs hݺ&i۷VZ3v5jqt҇~x6)Lo;ۧN(e˖wy7tS׮]sssgΜOG6.86'8{bfMQƱkG裏e155S;vq/'v֭&tIͭ]vדguVYzbc>l?Avvv{b8׬Y|ۧ7\)sTŞ}ƻ5jT_.޿zbcX^^^O8gϖ;c_N|wҔgϞ VO*`OlQQQ$EUsebJJʖ-[Vp ;ێ/w}wWqp=u17vO<o׮]y"{̅}s38㭷ފܗ֭hݰA4l0Uܯ~𩹋/2 /JjԨ1mڴ&McNk֬)7q͛'\r%+mZjWx+OIE" ફ5jT+} ,Kg9U:uO6>`$pd IDATp&5k|K+u֑>#'(ciժs=^ʭ"| e /bC-OO$y%\ҩSpgƔ)S&wɍ3feYrNzg/̆3B,PN5jxꩧ"7)!̙Sa0qwѣ~:C,K:#FÏ kܸĉ_w51p]w]w&N裏&BϞ=y]|ig'lW_+"Ǐ_<Ap駿sO/rܸq2Gi{{Æ ?"Msy#O;|_~y*EN͛wYgs?cƌSk LB&Mf̘խ[^j߾}񏮼{oذaၓ&MJ\ܨQy92S.kԨwީQ?l?%ʨm۶ .\8 3cƌ$wԩ?\FW^KG>xp#ի?gMr|W^ .}`V`Xϋثu=c_|7oަM7x#33/ܸqc&MڴiӾ}$q:>^xaɒ%999͚5ի׀*i w3g.]q۷7lNhժ~I̚5_reAAA˖-sW&I8ʛodɒ]vtIӧOʚ47N<^vmڵ۵k7hР?W_}`g{\rIQA{_~sΖ-[5k>|xJ3fӧx?s|u֦Mꫯ.yvnZN;T\͛7gff~˗/߲eˮ]5ko*W\qE˖-GqOk27oޔ)S-[ֽ{!Cwq{bNNγ>x+W]v2dHd\SWն7ޘ5k֊+6lPfƍ}ٽ{.-lb,!cN'@!!X$!]v-,,Lm޼2;2N  B,B,   B,B,   B,B,   B,B,   B,B,!XXb@@!XXb@@!XXb@@!XXb *U222̙1cw}O?]bݻ##zjFFF~5Pbݻ}qm޼_K?kNl!ٵkWgΜ[ZSO=On֬Yݺu>;vlذaڵ~i⚖zM>? XV;Br7.-[} _~,Y3<wnԨQVVVZZ XV;B~ߤI]u){l(`c XHG#v~ Ə_A.]FAnݺܹ3͛_uU]vm׮]fիvܹaÆoO?}72330`ʔ)U\snxn5`#VzkРAׯ/K+뮻Y XV;Bm۶~xĉ˗/O>fZzuwEl!*-[O>Ͽm۶秦֮]k۶)ҵkZji5`#Ģ5US VCfddddddee) ,U^:M6U榧ggg+[ !͔݉)SmA>}Ҫ,kjGvlՎӉb;qeV\Y^v5l0ɘ6mo h۶1#'w=SO9޽{5۷\3g<38ŋk=Bs?/_y.]L6M8ϙ3g̘1A\~]tVܹsgΜYPP0`K.DbXQQш# ]ӻw~뭷W^y-[~ӟ.YAZ A=xժUǍHqݻwSRR[oUbزe/?={ 駟j%BlU?&QF-4hРHg,[ׯ͛wU 47n\s̙:u V6mA{)S6_ߩS ~__^[bFN7oOf*hU_aÆ>}lݺUsb@JJʽ;{O6?,**dA|gyԩS4*KsX숝aÆ=!7o>c#=wqG.\عsAn5`T;gygڷo|nmҤI-[TTqVwt0>`ŹFYfٳgƌӧOꫯV\9mڴs=*k]8wl!E8W;4/^zj#F(A=SkVPT=׿u<4(+++>pժUcƌ `]v-,,TPs503g'޽{Gf]t9rGeA(@!V*SXXآE1cw}%VŋӃ XjUVUv촴q:v8r f̘X*` ?#555hzJ  P5-Ze]|A0yd% TΝ;'l֭ GJ @2;v `ܹG㏃  PeN< fΜY8<@ݻwWbB,@]c=arssPXXد_5!Z"L ;uꔕĉ pQGūf={<$RQB(ll3gN{U+vڳgϞ8qbZZZ䣑#GYF8ǡZsS`pmܸqaK F XEP5U[N'.%=p9LaiӦA|wlT FՠX mԠjTQ5 Xdɗ_~m۶ q"*͚5+-Zo߾#G\rex\㏧k7U=xϞ=A?hС'O.9z뭿okVҶRjP5X:G=ÉۻwΝ;tI999K,l$AЪUɓ'׮]{֭ӟ[> N=޽{8‰'اO z- V[A;v `ݺu @J\wؑdGubg$|A4o\qg}/9?mҤw}/^/A`G#vAhѢm۶k֬Y|yݺu_}nݺ%Fb^{ j֬tO<*k9\J6XjT FՠKzn.]ꫜDFoۼ^x!bO=e˖_}U`LGhVڂ a+V|Wk֬)**|믿zwyޫx):۵kիSRRڶmxw헿ivPԜs-8\J6XjT FՠX mԠjP5F}UJMOO.UZm ao>y?4hSO=UvmeQ5 PEmXaaaNfxظq rrrL2bĈ ?T YjT< 9%O>dVVVZZڪUnFŇ7jo\jUZZZVV֓O>8<j9L G:uAٻw3g'D UjaCO,2~ zHݻGA̞=[ U>/O>Z|E)1X|A4h h'xbJ !2rJ}6o bbtgA0} sJ zwŋ/X"зoߦMVe͹uZIۆ%NձcŋNlQ5 0u>7߼馛* rJՆX@k9r䣏>ڳgϬ577gϞA9RtN|8c8}ӳm6o޼={.^XA񿲺CPj9 XjT P۸q ƌs]w??'Of͚T%mQ ѣ}W<׿V\ԡZs mU9+"+++qk?矟駟*1m[@AK FP5V^ݺuHE:uA{ngkqN' z|ڵk_]V!UnݺA̙3'h5Rh@hժUEnnnV\AZZZKX %ߣDC "ٚsCmRjP5Ľ&N8xU3vѣGXBO'#EaaիϟDVVZ=cA 2dXxqFFѣ xg$XWVwP9GѶKa_L4iȐ!ӧOO\Ɂ㏏1O333{vpȈ$YYY[~ǕRUWZUY;#Gɑ`)!;:x֜hۀFP6GįLTM~~'O;UVb5PжR9B%"ɉ?k݃,5 tbgrK'N' X2W 7 `J X2K. `РAGk߾}c!͛2ZigB,8;v >K>ʕ+ HOOWb @9 M2ڿ ~*1bԮ];~Al;^zA 0@ Ucs5jTaaaۥK  ԭ[7B,TFM81G}E&M?رc[nSO)+8$b1pH\JJmԨb={?36bРAO=TڵF hmFըEaaY&Ll=.`F hmFըUQ5   B,B,   B,B,   B,B,  Tŋ_^Z ܌:uꤧLJnݺiӦ&MR8AdҤI73gNAvv!C222rssp(7o޼y@JbJᐬ T3g'=zL4QF=iҤ!C?={[ԨBUoӦMvv1cm;77qAL8qJ lRjPN'_5j4q nUƎ;vXTqVwP98 Z= Xt@XF2͛eޣ*Q\X IDATP@ڲHt,hhhꂪmmi:.[ʠo#L`δ4EZ5qpPܦhBS%rH qʏQB.+y<ϻ}@L:wn~oBU !h3ƍ !)"}.^ԧbրFkbMPh֭[3ϼ8gggO6Gavh (Ě`n5Z[(`3   B,B,  @D"H$666!!{={T!]8zhQQKJJvYWWzJOO͝8qbFFN=s9trϛ7/--;BW2[֠B쇝8qbҤIocccӇ ַoĮ]TUUݻ2:2))iƍ3Al5h d֬Ys !\z3gѣG3nݺx{.99455s[kՠ5 (ĞwH$%%%dggX"!! WܰaCnnn${fϞm5h jlM;WD"!Ey !deeM>=t2BU^^BHLL9rdKnz.111pرǏtÇ7ByBhhhXhQVlhhXpa,N Ğ#F3&0cƌ744Z7tӚ5kBSN5{ZY}ΦMrrrjkkC曳훔X[[[UUgϞW_}(zq^^ҥK۸sn'udnk ZA؏jժdVen ts[kՠ5 (Ķs.Ydǎ͏֭뮬,mXn]!''G)MkPDo߾ym۶۷:sСÇ֭ (`( ءi(4B,RheqJ"֭+++ !Ẻ^ziǏWb2~_C~ 1c;dɒiӦ)1JSRRBꪫ B=z͛׮]B8vɓ_z饘7^y޲e?+x}iι8Խ5vhZm-L?BKy晿.e˖ <… a}׾}W>|xۆXNŗoٲ%yȮ]okf_|ЁL={BF:ew/7́_{BZZŋ[ f@{ݻwO;2n޽{??|pbbb[-8zvŊuuu ;y>BHKKkϿbbbg>!v!wy?A3þo&NK,'6N4)yk_?vة~ɓ'?! 6[ni%9rd=v.,"tqm۶-m\\\LLL]]]åG.ONN?|pU>/-mMk|=uz[o͝;W^!cˣ K.3fӟ$p.ljGYlن {ȑ}2dǏ߿36Z`5tƘ"&(h `(`G|bClqqi O;rƍӦM{T [ly7nxڑz_2Dܙ 9ydǏN8DYnժU!2鳟oyv>? };Sh  {xwLy&1W_kcǎҗZ]tQvvW\0;4>[PPu}}}qqW\1c ˉ?|߾ꪫVs5}zsU>/s6m :n݉CyyyB=ztBBݻa>ॗ^SO&mb׿j6lXaǎM 466: @Bmm Bضm۩ /š5kT iii!-[|xS^|!Ue&Ħy?M ']/_~h=ch| .8~رcL? .; ! ,!9R8'b~כ7o?BۣpKHH8tP!66Ps9FkJ>}BS YwDg̙3?W^B& /G`hWJKKKKK:jV11CկV^]]]=jԨT9D0Y81AFkuGtqgΉ']dɒ}s @Ǐ3 GmbΝM111g܋[h+zg@hvB btB9rClFFFa۶mj@{7CO<￯|{y{ݴi jbCCg?ٷz+Я_!C4Gy$z2s11!hh xZ &Dla޽{mfM&&>Gņ/fNÆT ˖- !t֭K_ jZ|cŒ3$X{MHH!\qj@{!w}Wh![VLLŋkjjvbǎ{}|,ߝx999_5lذ!C4󔝻;--Mb[?|w_ꫯVs߹BK6Zެ;OMM=]tQ;6Zt7X@!47vZ|͛CW]uºuV\yԩS/2༇駟!}v{q WeeҥKׯ_vʔ)W_}u||.~XLLL\\\ƫ2@GT__;Ω SSS/^| 7N󈝓'OFm{g]`ns-RXXB/((0aBaɒ%?piii)S8jG!iwN=Ԛx`!w}wo-..^lu]ȍ7>Ch>`nw eee !5]3L2%z]qyyy>f88Z|-[~7vC~_&@S~~~3z}'C{U1՜kjjN<!'ND"fV[jU бK!/~͌/,,\~+b7]Z5;Q;L,p>5hk_'''7vlruUTTxH,,=z411yLHH*((޽dT^{ʕ+C!Ra@F!ɓ^pJ*++L>}#<,#N4!6ةSG!>>.:x`aԨQRb7ر#z7A+ m7vZreS-((.<|paaaaرUUUs !lݺ3:g\~,БB/~͛D'N!< wygVVVgUe&ڵ+p-4-|B\rIvvvLJvޭM}B 8uڵkC_)LhB8tPӒ={!7ԑ_B=zPe&<8bŊ%[:ԑW!\ve @ۄo1vYf߿_衇B^c` 6F-~Nɓ'/wy˟y晛o9 /?sOwݻByN,`t7-..ܼyseeemmSRRb.]zoݺlڵkSRR~_Wsl.'/oٲ[սK'O.((8~Ж!_/Bϟ?`3&X%KL6M8WZ|w⒒Cb~Fyk;6y^z)&&fƍW^yBιM(`t7}C\r3<Mv/[STcc… M&&nٲ%p7ӧa]v-((!l߾]hgϞ¨QN;2A]v2mbwB(//?B||*6!6zvŊuuu ;y>BHKKSe&N<9;?hf7񍒒ĉU &M>GGk^cǎ:ߞ} _yBGEEEӧO߽{ ǎ` MtˉB7xStɂqq|YΝ۔`o1c <[n-,,y衇 r뭷@kgugbO=k׮QF֦,[lرUVV6y7&%%^vem9DY kkkCO=laGyǕ @mK6m !p 9`РA&L!ZJضtȑBCrm)1ךfD/4 @mK!7x1%%%!~)۪~^xa۶m9}k_KIIٷos=lφrssV ӿCر#11_iXTT4mڴ.]; jy`t7Nݬ]v8p)ٳgϞ=o=}3gl 9u~hXu;wڵɓX/K .g͚e}؉'bbbڴӟtNNw1jԨv9}<֭ !zoB,&f}xB,m 77777T5ZYȺuBT VLl,( 92=Puuu!--휼ar, yNg]( jfrbX8|&D"H$666!!{={T!]8zhQQKJJvYWWzJOO͝8qbFFN}cG}tΜ9:Uy杫[ιÊ|o֝7Ğ8qbҤIocccӇ ַoĮ]TUUݻ2:2))iƍ3Am`Y d֬Ys !\z3gѣG3nݺx{.99455f-ĞwH$%%%dggX"!! WܰaCnnn${fϞmVxnM;WD"!Ey !deeM>=t2BU^^BHLL9rdKnz.111pرǏtÇ7ByBhhhXhQVlhhXpa,N Ğ#F3&0cƌ744Z7tӚ5kBSN5{ZY}ΦMrrrjkkC曳훔X[[[UUgϞW_}(zq^^ҥK۸sn`:ߛuLg>YjU~~?2vۂ tbVxb[Uuuܹs,YcǎGvmwuWVV j [ ZmK۷o߼ymW]]D{9tÇgggwF) `Y^N   B,   B,   B,   B,   B,B,   B,B,   B,B,   TEX:RE\Lcc*tĄOSL,B,   B,   B,   B,   B,  *B,   B,   B,   /N NU___UUD"HlllBBB{2Blp!Fu IDATѢ˗ܹzꕞ;qČ h+1{G̙sС3\믟7o^ZZZw.&&Йۧ q;o=qĤIƦ6o߾]vڻwoIIIeeetdRRǍg!Jf͚5gΜ¥^:s̼=z43~֭/~꒓KKKSSSMP[88bϻH$DWXp+nذ!777s=g6Am5uG^:-Zt 65}BuWyyy!11qȑ-]7;;bϻ±cǎ?u> ]fffaѢE-Za…!8 {6F1f̘Œ3ϟp&kUTTtMk֬ !L:he;6mɩ !oNKK۷oRRR\\\cccmmmUU՞={JJJ^}բyyyK.mι@;čV߿GoUn[`A.]LP[88meǟwy6?pu6q?m`9k[lrss~]n]}}@DEm߾}۶m۷o_uuu$ٳСCݭ[v9'[(~텅QFFʕ+U \N QYYyז/| W]uUaEEEw0`Z #m,77wʕ˗/&զVVVN2eʕqqqn4xW\BXrϵ&''˩F8pǽ.'ӧOL)!pڛ6]{!;v([a2=Puuu!--M5:Ӫߴi֭!$Evs.'>c}hrH$BW4d|~63&zT uVw*vΙ3VVV6pBQQ 7~!gb]vnݺ 0`! &w}׿u4K޳SyG-**Z|yIIΝ;>0W^鹹'Nh/s&%xRSS+**B^{/]Α:u{G̙sС3\믟7o^{5{ohj=Ւ%KL>BluĉI&GMOO6lX߾}vZ___SSSUUwޒȤ˗7y &}~9 +Ķwf͚3gNK/9sf^^^=uŋ?cuuuɥ&- ]$IIID"+V> Llذ!777s=M4Amutһ^:-Zt 65}BuWyyy!11qȑ-]7;;bϻ±cǎ?u> ]fffaѢE-Za…!8 {6F1f̘Œ3ϟp&kUTTtMk֬ !L:he;6mɩ !oNKK۷oRRR\\\cccmmmUU՞={JJJ^}բyyyK.mι@;čV߿GoUn[`A.]LP[88b[Uuuܹs,YcǎGvmwuWVV j Bl[ھ}͛m۶o߾H$߳gϡC><;;[n&- qO/Vb@@!XXb@@!XXb@@!XXb@@!XXb@@!!Xbb@!!Xbb@!!Xbb@!!Xbb@!!Xbb@@!XXb@@!XXb@@!XXb@@!XXb@@!XXbb@s)N u?՗\rIfff>}@mmJ#Fӧ޽ԅcǎ]`AFFy&b;B7O'O~t菋}bbi$V@<.'s6%na̘1>p֭[ kjjz!CzjYݙSڵkԨQ)))˖-;vkM>ȑ#?rmiӦM!n# 4h„ !UV)ۖ9Bh×_~yS.!-E?ZSS̘^xrm)333o43$Я_?b[o /l۶-z?}k))){>r-[}Bnn:#v!bb?t;v$&&999MMVUUեKwygРAm9OQ@|qYYYv:p@SFݳgϞ={=z!6==}ѯgΜٶ s!666vѰ;wܵkWyyɓ'?_\pwϚ5h}1.'N :i?霜;cԨQsU@|B XbVAAAnnnnnniij8%hu֕<̙X: gb[桇!@+9OQ@|.'@sgbH$Dbccw޳gObۅG-_dΝuuuЫW܉'fdd@[7v{G̙sС3\믟7o^{5) tCbO81iҤ跱Æ ۷obbb׮]kjj[RRRYY|qƝFeeeFFFEE٭.SvˉG쥗^:s̼=z43~֭/~ǎ9WZZj;hլ9DE"H$bŊ3\qÆ H{={v[vΉDwIzH$BXhљ'BVVCM! Ğ_!đ#GtW@=Cǎ;~xK=|p+ Ğw!Ehņ .Jjj[s11b̘1!3f̟?L֪馛֬YB:uӡt,96mɩ !oNKK۷oRRR\\\cccmmmUU՞={JJJ^}բyyyK.mι.2BLgNAV߿GoUn[`A.]LP@b[Uuuܹs,YcǎGvmwuWVV Bl[ھ}͛m۶o߾H$߳gϡC><;;[n&( pzq:ݼ5kּW^ye߾}Um*A~N8qڵJ   B,   B, @!)Ajkk{m.Q:dbbBtrbXbb@!!Xbb@!!Xbb@!!Xbb@!!Xbb@@UXbb@!!Xbb@!!Xbb@!!Xbb@!!Xbb@@!XXb@@!XXb@@!XXb@@!XXb┠#~@LlGOبQdlrrRԏ !Zh Z5h B,E5Z֠5Zp^!XXb@@!XXb@@!X>─)55UFk:FU !$''+֠5Zh Z    B,   B,   B,  '@tr555UUU)))M@cر{~w~'|B߶;qŋlذ..8piӦOޭ[5rcǎ}k_5kCǏ߱cLJ&''_z_ĉ{Vm(??_رco_~=lذ+VD^n]AAA7͘1c|?~h_~f 0dȐM7$P\\o}+pkkߚvXiFcjjR$++%%%-[R򳂃⼽?N߁M6EFFqF1͞=;00uַ5۱cDzM6ٳV+//:u<9r͛7 ٓ&֜nLX{ro`$ƍyyy4H" FbzEdGyyy3}XPPltZw;0Sճg4h?vGii+W222***z>333;;Kiiir~c׮]J2w\ys[QVVCk78??3f!W\ifAAK h{dB͛7+$kAAAƧ!&O\s"V+B]zՠBhh|b̙4sssSSS*Nji@B,0hڴ<ş`={>cmlB_瞳rqqqvvׯ_|||]?W6li֬Fݻwbb"\/JJ[$''Ζ-[ڶm`mm+$'' hʔ)r0oybҥ666666&44` 8zŋRC_~YWWoJMΝͮL9kժ\877Wy~w.).))Q:$vfΜI;SYYYoF𰳳kݺuhhhWeAAA߮(FӶmzouCk&={zyZZ+m(//Wi„ :Ѧu7@壬k׮B ;{Z9rd}R莻&ȶ߫_v˫^MLL4,QQQB=urrG]kMU˖-u:]Ν}&6))I6;73))):NhϞ=|&V*((PQf6k}C43 ͛7ӧ 嵢-[Ԭs3׬Y#x饗 &Nxe!D~6o|ԩ'N<-?> h1o޼M6 !,YO?ݻwܹ22X`^z_ԩSGYh1cƅ hpNBѼ2{h!D6m/_~ȑ{Ι3BO2eմC֭<0XU800Pݻ5kNb~iԟJZr6~zw-//B|rW_}Uy7 $6lXtttrr7|#@uvs="#`k…SLٳwޣG۹T~*))qwwBٓׯeWҪ:y򤽽ڵkJ죎;$n:) C#%%E6΀Mjfdd ! /ukZhQ}VHlTT+V(b!D߾}:]^tI6m )z=#6+K>#PX>HնhBaccSb$vڴilko<5iҤ#*'رcH!aHMu!77GrFi\^ܻ$$I^M۶m+++9888Z WUU־ruR63<3~xZ:~)ՙM0AߑUgoo?vXQCV .]oZlYPP0n8;R!rssCCCyhHKKBx{{zgeeO+Մ*jB/zxx̞=ȑ#F6˃HܱTycǎFӵlR)#IDATqyA@bTy8}BVdz^;w*gϞ5.1GϞ=e_"""no>+MLLKk)ǧ rW v"K*ʤZ:vÇ fRF$Vr]vOu:h!ąuUES]`mbx ^ U酆զMݻ !6lؠaX'''7 … jsέ9|gWl…xذa]t15R|I#Wא$֭[˩t7n8rRvZ77GGG/EEE666ݻw,ݡka}!|E*Zhq_=[ZZzSifڴi h۶ܞs+VPjZN.k&ML)2J/ۚWíqqqMR? WX!nw>̅&4 y%дiSdl]BeaN3B,رڇ{NNNr"QK)'''SSSPR5*>>>77w֭SL&^sС9\.RW0QQQF)KnB{{{^r^`{bU*UIII\\`.f#߯,M-//Q_]A#EsppK,t^|K/$qAwZ9[؀=>m4e=4@B{:uNB >\#wM:p@-iӵk׮@9QOhhBקF(ؿNn޼)ݻ]]LMM0mj+-jZ8Q!MBl߾^hǡc};w,,,QO?T7i$+P w۷oۻuֲ\BlPQ]jjݻi7xCQVV&/5 (֏7NʡBd$0337o<>|xJJJΞ={3f7|S?GGG<;Cյ$i۶m~~~r"|PJe˖I/ܹs/,//]@F nҤIYYYXX눹q[=zVZuuO޺u" @.wx/]ԠPeddHgP^^on޼Y1l0ez^.]6mZQQpBwwٳ'<<\ALHHXn]NN̙3CBB-Xn޼Ƞ۷o_okٲeZaaTyyyXXتU䕇է0fee߿ŋdСf2xEvڕ /l߾]3nܸg !j]ۦfK}YSN,jҤIXXؠA}ٸS:t^WT-?zﬞ\ +MMM {5Saff֧O]VD^!i^daa!+5kL٥V_}mO=,133;|0=R@U*U6m|||\\\/W\\lZɓnٲE٦m۶Æ 0`F%...7nܠٍSաôiӔݺu>|x>},--eI``N^W_Ǝz!z/ ! ʗ/_4 vYY/+sg/^,-%%H5e!Ę1c* PPPlgWCʳBI&ԗK[4#95hw„ Ν?\H.];v_U.彽hWuڵݻ?ޠN1c{􌏏;t˗/k^c=`ZR!Vرֱ!CwbU*O<ѭ[ٳg_p,YDI 33Sּ!8(;}Gb[YY,@}0ClyyĉYfaaa5be*}m?#&&… Zɩ}Y蒒??_wr߾}?cffo]K\~}˖-W^Ȱvqqԩ?KjRRRbbbVVV~~FիW+++OLL~7ܘrssmۖ|M'''#sgooˬQzz֭[ϝ;ogg׵k bN @b @b @b @b @b @b @b @b @b 4 !@ !@ !@ !@ !@ !@ @LiAqqBV7mڔ1 @ch4VVVf͢) b@MUUUYYYW^t|Nzjnn^iK.iښn޼yҥR@SPP cMTv_~ƦUVmڴxccc늦/_~,--۴icooonn޽{[nT^xq@@ŋ111^^^??㓔$Y駟nѢeǎwI4ۺq n͛7[hQ<==u?ɓjV1bw}g]r5=:**z+~#FtaΜ9j###kݻO?pY[[ƚ !,^V8qbUUywȑ'|R~qU_I&Ǐ߳gOJJʡC"""}]\\\~iΜ9ݻw߿^^ީS^|E!Deee߾}###۵k}_uȑU3gΤFbh4[TT4}O?T)t]t9sF9x𠛛rJ=zTM6 !,X0{ڮ];^/\P)#B^z%$$jY^VVᑑ!puu=uꔕ<뽽OY//Ν; !SNjZ^/**̼ 2ҫhٲ%x1;BΝ;Z599ʕ+ffftgϞ]z,FSsVJBԌX?r !._qcǎ?ʕ+YFB)y@R )}5Bz~ƌh4:t_bEzz:m V222Ǝzzj޼yr}cF3a??.](k2}4, WW׌?KKK#""ڵ;z mx԰:1"))I<냂|}}` cǎ5 kIIɌ3zu$%%=SDfe!Ć rssiU!4 }\}||V7n\yyr*&&&$$D0x`Yfii)߸qC\QQp1c$b< @ch޼yQQڵk5kg͚egg'XxqBBBnn~۫W/ss󔔔 .!bcc4idԩ ,HKKswwѣGTO>ٲeKffСCO81qD!DUUUddmhvGUC5aݺuk͚5,//wtt3gLIIIwzj&M<<<^|EJU뻝8qb޽/^rss2dHNz믿NLL4555jT߾}iy!B,X!B,X!B,X!B,X!B,X!B,X!B,X!B,X!B,X!B,KbXbXbXbXbXl3IENDB`BradleyTerry2/vignettes/BradleyTerry.Rmd0000644000176200001440000011474014775700220020043 0ustar liggesusers--- title: |- Bradley-Terry Models in R abstract: | This is a short overview of the R add-on package **BradleyTerry2**, which facilitates the specification and fitting of Bradley-Terry logit, probit or cauchit models to pair-comparison data. Included are the standard 'unstructured' Bradley-Terry model, structured versions in which the parameters are related through a linear predictor to explanatory variables, and the possibility of an order or 'home advantage' effect or other 'contest-specific' effects. Model fitting is either by maximum likelihood, by penalized quasi-likelihood (for models which involve a random effect), or by bias-reduced maximum likelihood in which the first-order asymptotic bias of parameter estimates is eliminated. Also provided are a simple and efficient approach to handling missing covariate data, and suitably-defined residuals for diagnostic checking of the linear predictor. date: |- For **BradleyTerry2** version `r packageDescription("BradleyTerry2")[["Version"]]`, `r Sys.Date()` vignette: |- %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} %\VignetteIndexEntry{Bradley-Terry Models in R} %\VignetteDepends{} output: function(){ if (requireNamespace('bookdown', quietly = TRUE)) { function(...){ bookdown::html_document2(..., base_format = rmarkdown::html_vignette, number_sections = TRUE, math_method = "mathjax") } } else function(...){ rmarkdown::html_vignette(..., number_sections = TRUE, math_method = "mathjax") }}() link-citations: yes bibliography: BradleyTerry.bib --- ``` {r include=FALSE} library <- function(...) suppressPackageStartupMessages(base::library(...)) library(knitr) opts_chunk$set( tidy=FALSE ) ``` ``` {r set_options, echo = FALSE} options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE, digits = 7) ``` ## Contents { .unnumbered} \@ref(sec:intro) [Introduction] \@ref(sec:BTmodel) [Standard Bradley-Terry model]     \@ref(sec:citations) [Example: Analysis of journal citations]     \@ref(sec:bias-reduced) [Bias-reduced estimates] \@ref(sec:covariates) [Abilities predicted by explanatory variables]     \@ref(sec:player-specific) ['Player-specific' predictor variables]     \@ref(sec:missing) [Missing values]     \@ref(sec:order) [Order effect]     \@ref(sec:CEMS) [More general (contest-specific) predictors] \@ref(sec:ability) [Ability scores] \@ref(sec:residuals) [Residuals] \@ref(sec:model) [Model search] \@ref(sec:data) [Setting up the data]     \@ref(sec:contest) [Contest-specific data]     \@ref(sec:non-contest) [Non contest-specific data]     \@ref(sec:wide) [Converting data from a 'wide' format]     \@ref(sec:BradleyTerry) [Converting data from the format required by the earlier **BradleyTerry** package] \@ref(sec:functions) [A list of the functions provided in **BradleyTerry2**] \@ref(sec:finalremarks) [Some final remarks]     \@ref(sec:ties) [A note on the treatment of ties]     \@ref(sec:random-effects) [A note on 'contest-specific' random effects] [Acknowledgments] [References] ## Introduction {#sec:intro} The Bradley-Terry model [@brad:terr:52] assumes that in a 'contest' between any two 'players', say player $i$ and player $j$ $(i, j \in \{1,\ldots,K\})$, the odds that $i$ beats $j$ are $\alpha_i/\alpha_j$, where $\alpha_i$ and $\alpha_j$ are positive-valued parameters which might be thought of as representing 'ability'. A general introduction can be found in @brad:84 or @agre:02. Applications are many, ranging from experimental psychology to the analysis of sports tournaments to genetics [for example, the allelic transmission/disequilibrium test of @sham:curt:95 is based on a Bradley-Terry model in which the 'players' are alleles]. In typical psychometric applications the 'contests' are comparisons, made by different human subjects, between pairs of items. The model can alternatively be expressed in the logit-linear form $$\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j, \label{eq:unstructured} (\#eq:unstructured)$$ where $\lambda_i=\log\alpha_i$ for all $i$. Thus, assuming independence of all contests, the parameters $\{\lambda_i\}$ can be estimated by maximum likelihood using standard software for generalized linear models, with a suitably specified model matrix. The primary purpose of the **BradleyTerry2** package [@turn:12], implemented in the R statistical computing environment [@ihak:gent:96;@r], is to facilitate the specification and fitting of such models and some extensions. The **BradleyTerry2** package supersedes the earlier **BradleyTerry** package [@firt:05], providing a more flexible user interface to allow a wider range of models to be fitted. In particular, **BradleyTerry2** allows the inclusion of simple random effects so that the ability parameters can be related to available explanatory variables through a linear predictor of the form $$\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i. (\#eq:autonumber2) $$ The inclusion of the prediction error $U_i$ allows for variability between players with equal covariate values and induces correlation between comparisons with a common player. **BradleyTerry2** also allows for general contest-specific effects to be included in the model and allows the logit link to be replaced, if required, by a different symmetric link function (probit or cauchit). The remainder of the paper is organised as follows. Section \@ref(sec:BTmodel) demonstrates how to use the **BradleyTerry2** package to fit a standard (i.e., unstructured) Bradley-Terry model, with a separate ability parameter estimated for each player, including the use of bias-reduced estimation for such models. Section \@ref(sec:covariates) considers variations of the standard model, including the use of player-specific variables to model ability and allowing for contest-specific effects such as an order effect or judge effects. Sections \@ref(sec:ability) and \@ref(sec:residuals) explain how to obtain important information about a fitted model, in particular the estimates of ability and their standard errors, and player-level residuals, whilst Section \@ref(sec:model) notes the functions available to aid model search. Section \@ref(sec:data) explains in more detail how set up data for use with the **BradleyTerry2** package, Section \@ref(sec:functions) lists the functions provided by the package and finally Section \@ref(sec:finalremarks) comments on two directions for further development of the software. ## Standard Bradley-Terry model {#sec:BTmodel} ### Example: Analysis of journal citations {#sec:citations} The following data come from page 448 of @agre:02, extracted from the larger table of @stig:94. The data are counts of citations among four prominent journals of statistics and are included the **BradleyTerry2** package as the data set `citations`: ``` {r LoadBradleyTerry2} library("BradleyTerry2") ``` ``` {r CitationData} data("citations", package = "BradleyTerry2") ``` ``` {r CitationData2} citations ``` Thus, for example, *Biometrika* was cited 498 times by papers in *Journal of the American Statistical Association* (JASA) during the period under study. In order to fit a Bradley-Terry model to these data using `BTm` from the **BradleyTerry2** package, the data must first be converted to binomial frequencies. That is, the data need to be organised into pairs (`player1`, `player2`) and corresponding frequencies of wins and losses for `player1` against `player2`. The **BradleyTerry2** package provides the utility function `countsToBinomial` to convert a contingency table of wins to the format just described: ``` {r countsToBinomial} citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") citations.sf ``` Note that the self-citation counts are ignored -- these provide no information on the ability parameters, since the abilities are relative rather than absolute quantities. The binomial response can then be modelled by the difference in player abilities as follows: ``` {r citeModel} citeModel <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", data = citations.sf) citeModel ``` The coefficients here are maximum likelihood estimates of $\lambda_2, \lambda_3, \lambda_4$, with $\lambda_1$ (the log-ability for *Biometrika*) set to zero as an identifying convention. The one-sided model formula ``` r ~ journal ``` specifies the model for player ability, in this case the 'citeability' of the journal. The `id` argument specifies that `"journal"` is the name to be used for the factor that identifies the player -- the values of which are given here by `journal1` and `journal2` for the first and second players respectively. Therefore in this case a separate citeability parameter is estimated for each journal. If a different 'reference' journal is required, this can be achieved using the optional `refcat` argument: for example, making use of `update` to avoid re-specifying the whole model, ``` {r citeModelupdate} update(citeModel, refcat = "JASA") ``` -- the same model in a different parameterization. The use of the standard Bradley-Terry model for this application might perhaps seem rather questionable -- for example, citations within a published paper can hardly be considered independent, and the model discards potentially important information on self-citation. @stig:94 provides arguments to defend the model's use despite such concerns. ### Bias-reduced estimates {#sec:bias-reduced} Estimation of the standard Bradley-Terry model in `BTm` is by default computed by maximum likelihood, using an internal call to the `glm` function. An alternative is to fit by bias-reduced maximum likelihood [@firt:93]: this requires additionally the **brglm** package [@kosm:07], and is specified by the optional argument `br = TRUE`. The resultant effect, namely removal of first-order asymptotic bias in the estimated coefficients, is often quite small. One notable feature of bias-reduced fits is that all estimated coefficients and standard errors are necessarily finite, even in situations of 'complete separation' where maximum likelihood estimates take infinite values [@hein:sche:02]. For the citation data, the parameter estimates are only very slightly changed in the bias-reduced fit: ``` {r citeModelupdate2} update(citeModel, br = TRUE) ``` Here the bias of maximum likelihood is small because the binomial counts are fairly large. In more sparse arrangements of contests -- that is, where there is less or no replication of the contests -- the effect of bias reduction would typically be more substantial than the insignificant one seen here. ## Abilities predicted by explanatory variables {#sec:covariates} ### 'Player-specific' predictor variables {#sec:player-specific} In some application contexts there may be 'player-specific' explanatory variables available, and it is then natural to consider model simplification of the form $$\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i, (\#eq:autonumber3) $$ in which ability of each player $i$ is related to explanatory variables $x_{i1},\ldots,x_{ip}$ through a linear predictor with coefficients $\beta_1,\ldots,\beta_p$; the $\{U_i\}$ are independent errors. Dependence of the player abilities on explanatory variables can be specified via the `formula` argument, using the standard *S*-language model formulae. The difference in the abilities of player $i$ and player $j$ is modelled by $$\sum_{r=1}^p\beta_rx_{ir} - \sum_{r=1}^p\beta_rx_{jr} + U_i - U_j, \label{eq:structured} (\#eq:structured)$$ where $U_i \sim N(0, \sigma^2)$ for all $i$. The Bradley-Terry model is then a generalized linear mixed model, which the `BTm` function currently fits by using the penalized quasi-likelihood algorithm of @bres:93. As an illustration, consider the following simple model for the `flatlizards` data, which predicts the fighting ability of Augrabies flat lizards by body size (snout to vent length): ``` {r lizModel} options(show.signif.stars = FALSE) data("flatlizards", package = "BradleyTerry2") lizModel <- BTm(1, winner, loser, ~ SVL[..] + (1|..), data = flatlizards) ``` Here the winner of each fight is compared to the loser, so the outcome is always 1. The special name '`..`' appears in the formula as the default identifier for players, in the absence of a user-specified `id` argument. The values of this factor are given by `winner` for the winning lizard and `loser` for the losing lizard in each contest. These factors are provided in the data frame `contests` that is the first element of the list object `flatlizards`. The second element of `flatlizards` is another data frame, `predictors`, containing measurements on the observed lizards, including `SVL`, which is the snout to vent length. Thus `SVL[..]` represents the snout to vent length indexed by lizard (`winner` or `loser` as appropriate). Finally a random intercept for each lizard is included using the bar notation familiar to users of the **lme4** package [@bate:11]. (Note that a random intercept is the only random effect structure currently implemented in **BradleyTerry2**.) The fitted model is summarized below: ``` {r summarize_lizModel} summary(lizModel) ``` The coefficient of snout to vent length is weakly significant; however, the standard deviation of the random effect is quite large, suggesting that this simple model has fairly poor explanatory power. A more appropriate model is considered in the next section. ### Missing values {#sec:missing} The contest data may include all possible pairs of players and hence rows of missing data corresponding to players paired with themselves. Such rows contribute no information to the Bradley-Terry model and are simply discarded by `BTm`. Where there are missing values in player-specific *predictor* (or *explanatory*) variables which appear in the formula, it will typically be very wasteful to discard all contests involving players for which some values are missing. Instead, such cases are accommodated by the inclusion of one or more parameters in the model. If, for example, player $1$ has one or more of its predictor values $x_{11},\ldots,x_{1p}$ missing, then the combination of Equations \@ref(eq:unstructured) and \@ref(eq:structured) above yields $$\mathop{\rm logit}[\mathop{\rm pr}(1\ \mathrm{beats}\ j)]=\lambda_1 - \left(\sum_{r=1}^p\beta_rx_{jr} + U_j\right), (\#eq:autonumber5) $$ for all other players $j$. This results in the inclusion of a 'direct' ability parameter for each player having missing predictor values, in addition to the common coefficients $\beta_1,\ldots,\beta_p$ -- an approach which will be appropriate when the missingness mechanism is unrelated to contest success. The same device can be used also to accommodate any user-specified departures from a structured Bradley-Terry model, whereby some players have their abilities determined by the linear predictor but others do not. In the original analysis of the `flatlizards` data [@whit:06], the final model included the first and third principal components of the spectral reflectance from the throat (representing brightness and UV intensity respectively) as well as head length and the snout to vent length seen in our earlier model. The spectroscopy data was missing for two lizards, therefore the ability of these lizards was estimated directly. The following fits this model, with the addition of a random intercept as before: ``` {r lizModel2} lizModel2 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards) summary(lizModel2) ``` Note that `BTm` detects that lizards 96 and 99 have missing values in the specified predictors and automatically includes separate ability parameters for these lizards. This model was found to be the single best model based on the principal components of reflectance and the other predictors available and indeed the standard deviation of the random intercept is much reduced, but still highly significant. Allowing for this significant variation between lizards with the same predictor values produces more realistic (i.e., larger) standard errors for the parameters when compared to the original analysis of @whit:06. Although this affects the significance of the morphological variables, it does not affect the significance of the principal components, so in this case does not affect the main conclusions of the study. ### Order effect {#sec:order} In certain types of application some or all contests have an associated 'bias', related to the order in which items are presented to a judge or with the location in which a contest takes place, for example. A natural extension of the Bradley-Terry model (Equation \@ref(eq:unstructured)) is then $$\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j + \delta z, (\#eq:autonumber6) $$ where $z=1$ if $i$ has the supposed advantage and $z=-1$ if $j$ has it. (If the 'advantage' is in fact a disadvantage, $\delta$ will be negative.) The scores $\lambda_i$ then relate to ability in the absence of any such advantage. As an example, consider the baseball data given in @agre:02, page 438: ``` {r baseball} data("baseball", package = "BradleyTerry2") head(baseball) ``` The data set records the home wins and losses for each baseball team against each of the 6 other teams in the data set. The `head` function is used to show the first 6 records, which are the Milwaukee home games. We see for example that Milwaukee played 7 home games against Detroit and won 4 of them. The 'standard' Bradley-Terry model without a home-advantage parameter will be fitted if no formula is specified in the call to `BTm`: ``` {r baseballModel} baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") summary(baseballModel1) ``` The reference team is Baltimore, estimated to be the weakest of these seven, with Milwaukee and Detroit the strongest. In the above, the ability of each team is modelled simply as `  team` where the values of the factor `team` are given by `home.team` for the first team and `away.team` for the second team in each game. To estimate the home-advantage effect, an additional variable is required to indicate whether the team is at home or not. Therefore data frames containing both the team factor and this new indicator variable are required in place of the factors `home.team` and `away.team` in the call to `BTm`. This is achieved here by over-writing the `home.team` and `away.team` factors in the `baseball` data frame: ``` {r baseballDataUpdate} baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) ``` The `at.home` variable is needed for both the home team and the away team, so that it can be differenced as appropriate in the linear predictor. With the data organised in this way, the ability formula can now be updated to include the `at.home` variable as follows: ``` {r baseballModelupdate} baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) summary(baseballModel2) ``` This reproduces the results given on page 438 of @agre:02: the home team has an estimated odds-multiplier of $\exp(0.3023) = 1.35$ in its favour. ### More general (contest-specific) predictors {#sec:CEMS} The 'home advantage' effect is a simple example of a contest-specific predictor. Such predictors are necessarily interactions, between aspects of the contest and (aspects of) the two 'players' involved. For more elaborate examples of such effects, see `?chameleons` and `?CEMS`. The former includes an 'experience' effect, which changes through time, on the fighting ability of male chameleons. The latter illustrates a common situation in psychometric applications of the Bradley-Terry model, where *subjects* express preference for one of two *objects* (the 'players'), and it is the influence on the results of subject attributes that is of primary interest. As an illustration of the way in which such effects are specified, consider the following model specification taken from the examples in `?CEMS`, where data on students' preferences in relation to six European management schools is analysed. ``` {r CEMSmodel} data("CEMS", package = "BradleyTerry2") table8.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * LAT[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) ``` This model reproduces results from Table 8 of @ditt:01 apart from minor differences due to the different treatment of ties. Here the outcome is the binomial frequency of preference for `school1` over `school2`, with ties counted as half a 'win' and half a 'loss'. The formula specifies the model for school 'ability' or worth. In this formula, the default label '`..`' represents the school (with values given by `school1` or `school2` as appropriate) and `student` is a factor specifying the student that made the comparison. The remaining variables in the formula use [R]{.sans-serif}'s standard indexing mechanism to include student-specific variables, e.g., `WOR`: whether or not the student was in full-time employment, and school-specific variables, e.g., `LAT`: whether the school was in a 'Latin' city. Thus there are three types of variables: contest-specific (`school1`, `school2`, `student`), subject-specific (`WOR`, `DEG`, ...) and object-specific (`LAT`, `St.Gallen`, ...). These three types of variables are provided in three data frames, contained in the list object `CEMS`. ## Ability scores {#sec:ability} The function `BTabilities` extracts estimates and standard errors for the log-ability scores $\lambda_1, \ldots,\lambda_K$. These will either be 'direct' estimates, in the case of the standard Bradley-Terry model or for players with one or more missing predictor values, or 'model-based' estimates of the form $\hat\lambda_i=\sum_{r=1}^p\hat\beta_rx_{ir}$ for players whose ability is predicted by explanatory variables. As a simple illustration, team ability estimates in the home-advantage model for the `baseball` data are obtained by: ``` {r BTabilities} BTabilities(baseballModel2) ``` This gives, for each team, the estimated ability when the team enjoys no home advantage. Similarly, estimates of the fighting ability of each lizard in the `flatlizards` data under the model based on the principal components of the spectral reflectance from the throat are obtained as follows: ``` {r BTabilities2} head(BTabilities(lizModel2), 4) ``` The ability estimates in an unstructured Bradley-Terry model are particularly well suited to presentation using the device of *quasi-variances* [@firt:04]. The **qvcalc** package [@firt:10,version 0.8-5 or later] contains a function of the same name which does the necessary work: ``` r > library("qvcalc") > baseball.qv <- qvcalc(BTabilities(baseballModel2)) > plot(baseball.qv, + levelNames = c("Bal", "Bos", "Cle", "Det", "Mil", "NY", "Tor")) ``` ```{r figqvplot, echo=FALSE , fig.cap="Estimated relative abilities of baseball teams.", fig.alt="The ability for Baltimore is fixed at zero, with an interval ranging from -0.5 to 0.5. Boston has a relative ability near 1.2; Cleveland around 0.7. The remaining teams have relative abilities around 1.3 to 1.6. The intervals are based on quasi standard errors and all have length of approximately 1. Therefore, aside from Cleveland, all teams are clearly significantly stronger than Baltimore as the intervals do not overlap.", fig.show='hold', fig.align="center", out.width="67.0%"} knitr::include_graphics(c("baseball-qvplot.png")) ``` The 'comparison intervals' as shown in Figure \@ref(fig:figqvplot) are based on 'quasi standard errors', and can be interpreted as if they refer to *independent* estimates of ability for the journals. This has the advantage that comparison between any pair of journals is readily made (i.e., not only comparisons with the 'reference' journal). For details of the theory and method of calculation see @firt:04. ## Residuals {#sec:residuals} There are two main types of residuals available for a Bradley-Terry model object. First, there are residuals obtained by the standard methods for models of class `"glm"`. These all deliver one residual for each contest or type of contest. For example, Pearson residuals for the model `lizModel2` can be obtained simply by ``` {r residuals} res.pearson <- round(residuals(lizModel2), 3) head(cbind(flatlizards$contests, res.pearson), 4) ``` More useful for diagnostics on the linear predictor $\sum\beta_rx_{ir}$ are 'player'-level residuals, obtained by using the function `residuals` with argument `type = "grouped"`. These residuals can then be plotted against other player-specific variables. ``` {r BTresiduals} res <- residuals(lizModel2, type = "grouped") # with(flatlizards$predictors, plot(throat.PC2, res)) # with(flatlizards$predictors, plot(head.width, res)) ``` These residuals estimate the error in the linear predictor; they are obtained by suitable aggregation of the so-called 'working' residuals from the model fit. The `weights` attribute indicates the relative information in these residuals -- weight is roughly inversely proportional to variance -- which may be useful for plotting and/or interpretation; for example, a large residual may be of no real concern if based on very little information. Weighted least-squares regression of these residuals on any variable already in the model is null. For example: ``` {r residualWLS} lm(res ~ throat.PC1, weights = attr(res, "weights"), data = flatlizards$predictors) lm(res ~ head.length, weights = attr(res, "weights"), data = flatlizards$predictors) ``` As an illustration of evident *non-null* residual structure, consider the unrealistically simple model `lizModel` that was fitted in Section \@ref(sec:covariates) above. That model lacks the clearly significant predictor variable `throat.PC3`, and the plot shown in Figure \@ref(fig:figresiduals) demonstrates this fact graphically: ``` r lizModel.residuals <- residuals(lizModel, type = "grouped") plot(flatlizards$predictors$throat.PC3, lizModel.residuals) ``` ```{r figresiduals, echo=FALSE , fig.cap="Lizard residuals for the simple model lizModel, plotted against throat.PC3.", fig.alt="The residuals are quite spread out over the range -2 to 2, but the distribution is clearly not uniform over the range of the predictor variable, throat.PC3. Residuals between -2 and -1 range correspond to values throat.PC3 between -6 and 4; residuals between -1 and 1 correspond to throat.PC3 values of -4 to 4, and residuals from 1 to 2 correspond to throat.PC3 values between -3 and 6. Thus there is an overall positive correlation bewteen the residuals and throat.PC3.", fig.show='hold', fig.align="center", out.width="69.0%"} knitr::include_graphics(c("residuals.png")) ``` The residuals in the plot exhibit a strong, positive regression slope in relation to the omitted predictor variable `throat.PC3`. ## Model search {#sec:model} In addition to `update()` as illustrated in preceding sections, methods for the generic functions `add1()`, `drop1()` and `anova()` are provided. These can be used to investigate the effect of adding or removing a variable, whether that variable is contest-specific, such as an order effect, or player-specific; and to compare the fit of nested models. ## Setting up the data {#sec:data} ### Contest-specific data {#sec:contest} The `outcome` argument of `BTm` represents a binomial response and can be supplied in any of the formats allowed by the `glm` function. That is, either a two-column matrix with the columns giving the number of wins and losses (for `player1` vs. `player2`), a factor where the first level denotes a loss and all other levels denote a win, or a binary variable where 0 denotes a loss and 1 denotes a win. Each row represents either a single contest or a set of contests between the same two players. The `player1` and `player2` arguments are either factors specifying the two players in each contest, or data frames containing such factors, along with any contest-specific variables that are also player-specific, such as the `at.home` variable seen in Section \@ref(sec:order). If given in data frames, the factors identifying the players should be named as specified by the `id` argument and should have identical levels, since they represent a particular sample of the full set of players. Thus for the model `baseballModel2`, which was specified by the following call: ``` {r baseballModel2_call} baseballModel2$call ``` the data are provided in the `baseball` data frame, which has the following structure: ``` {r str_baseball} str(baseball, vec.len = 2) ``` In this case `home.team` and `away.team` are both data frames, with the factor `team` specifying the team and the variable `at.home` specifying whether or not the team was at home. So the first comparison ``` {r first_comparison} baseball$home.team[1,] baseball$away.team[1,] ``` is Milwaukee playing at home against Detroit. The outcome is given by ``` {r first_outcome} baseball[1, c("home.wins", "away.wins")] ``` Contest-specific variables that are *not* player-specific -- for example, whether it rained or not during a contest -- should only be used in interactions with variables that *are* player-specific, otherwise the effect on ability would be the same for both players and would cancel out. Such variables can conveniently be provided in a single data frame along with the `outcome`, `player1` and `player2` data. An offset in the model can be specified by using the `offset` argument to `BTm`. This facility is provided for completeness: the authors have not yet encountered an application where it is needed. To use only certain rows of the contest data in the analysis, the `subset` argument may be used in the call to `BTm`. This should either be a logical vector of the same length as the binomial response, or a numeric vector containing the indices of rows to be used. ### Non contest-specific data {#sec:non-contest} Some variables do not vary by contest directly, but rather vary by a factor that is contest-specific, such as the player ID or the judge making the paired comparison. For such variables, it is more economical to store the data by the levels of the contest-specific factor and use indexing to obtain the values for each contest. The `CEMS` example in Section \@ref(sec:CEMS) provides an illustration of such variables. In this example student-specific variables are indexed by `student` and school-specific variables are indexed by `..`, i.e., the first or second school in the comparison as appropriate. There are then two extra sets of variables in addition to the usual contest-specific data as described in the last section. A good way to provide these data to `BTm` is as a list of data frames, one for each set of variables, e.g., ``` {r str_CEMS} str(CEMS, vec.len = 2) ``` The names of the data frames are only used by `BTm` if they match the names specified in the `player1` and `player2` arguments, in which case it is assumed that these are data frames providing the data for the first and second player respectively. The rows of data frames in the list should either correspond to the contests or the levels of the factor used for indexing. Player-specific offsets should be included in the formula by using the `offset` function. ### Converting data from a 'wide' format {#sec:wide} The `BTm` function requires data in a 'long' format, with one row per contest, provided either directly as in Section \@ref(sec:contest) or via indexing as in Section \@ref(sec:non-contest). In studies where the same set of paired comparisons are made by several judges, as in a questionnaire for example, the data may be stored in a 'wide' format, with one row per judge. As an example, consider the `cemspc` data from the **prefmod** package [@hatz:12], which provides data from the CEMS study in a wide format. Each row corresponds to one student; the first 15 columns give the outcome of all pairwise comparisons between the 6 schools in the study and the last two columns correspond to two of the student-specific variables: `ENG` (indicating the student's knowledge of English) and `SEX` (indicating the student's gender). The following steps convert these data into a form suitable for analysis with `BTm`. First a new data frame is created from the student-specific variables and these variables are converted to factors: ``` {r student-specific_data} library("prefmod") student <- cemspc[c("ENG", "SEX")] student$ENG <- factor(student$ENG, levels = 1:2, labels = c("good", "poor")) student$SEX <- factor(student$SEX, levels = 1:2, labels = c("female", "male")) ``` This data frame is put into a list, which will eventually hold all the necessary data. Then a `student` factor is created for indexing the student data to produce contest-level data. This is put in a new data frame that will hold the contest-specific data. ``` {r student_factor} cems <- list(student = student) student <- gl(303, 1, 303 * 15) #303 students, 15 comparisons contest <- data.frame(student = student) ``` Next the outcome data is converted to a binomial response, adjusted for ties. The result is added to the `contest` data frame. ``` {r binomial_response} win <- cemspc[, 1:15] == 0 lose <- cemspc[, 1:15] == 2 draw <- cemspc[, 1:15] == 1 contest$win.adj <- c(win + draw/2) contest$lose.adj <- c(lose + draw/2) ``` Then two factors are created identifying the first and second school in each comparison. The comparisons are in the order 1 vs. 2, 1 vs. 3, 2 vs. 3, 1 vs. 4, ..., so the factors can be created as follows: ``` {r school_factors} lab <- c("London", "Paris", "Milano", "St. Gallen", "Barcelona", "Stockholm") contest$school1 <- factor(sequence(1:5), levels = 1:6, labels = lab) contest$school2 <- factor(rep(2:6, 1:5), levels = 1:6, labels = lab) ``` Note that both factors have exactly the same levels, even though only five of the six players are represented in each case. In other words, the numeric factor levels refer to the same players in each case, so that the player is unambiguously identified. This ensures that player-specific parameters and player-specific covariates are correctly specified. Finally the `contest` data frame is added to the main list: ``` {r cems_data} cems$contest <- contest ``` This creates a single data object that can be passed to the `data` argument of `BTm`. Of course, such a list could be created on-the-fly as in `data = list(contest, student)`, which may be more convenient in practice. ### Converting data from the format required by the earlier **BradleyTerry** package {#sec:BradleyTerry} The **BradleyTerry** package described in @firt:05 required contest/comparison results to be in a data frame with columns named `winner`, `loser` and `Freq`. The following example shows how `xtabs` and `countsToBinomial` can be used to convert such data for use with the `BTm` function in **BradleyTerry2**: ``` r library("BradleyTerry") ## the /old/ BradleyTerry package ## load data frame with columns "winner", "loser", "Freq" data("citations", package = "BradleyTerry") ## convert to 2-way table of counts citations <- xtabs(Freq ~ winner + loser, citations) ## convert to a data frame of binomial observations citations.sf <- countsToBinomial(citations) ``` The `citations.sf` data frame can then be used with `BTm` as shown in Section \@ref(sec:citations). ## A list of the functions provided in **BradleyTerry2** {#sec:functions} The standard R help files provide the definitive reference. Here we simply list the main user-level functions and their arguments, as a convenient overview: ``` {r functions, echo = FALSE} ## cf. prompt options(width = 55) for (fn in getNamespaceExports("BradleyTerry2")) { name <- as.name(fn) args <- formals(fn) n <- length(args) arg.names <- arg.n <- names(args) arg.n[arg.n == "..."] <- "\\dots" is.missing.arg <- function(arg) typeof(arg) == "symbol" && deparse(arg) == "" Call <- paste(name, "(", sep = "") for (i in seq_len(n)) { Call <- paste(Call, arg.names[i], if (!is.missing.arg(args[[i]])) paste(" = ", paste(deparse(args[[i]]), collapse = "\n"), sep = ""), sep = "") if (i != n) Call <- paste(Call, ", ", sep = "") } Call <- paste(Call, ")", sep = "") cat(deparse(parse(text = Call)[[1]], width.cutoff = 50), fill = TRUE) } options(width = 60) ``` ## Some final remarks {#sec:finalremarks} ### A note on the treatment of ties {#sec:ties} The present version of **BradleyTerry2** provides no sophisticated facilities for handling tied contests/comparisons; the well-known models of @rao:kupp:67 and @davi:70 are not implemented here. At present the `BTm` function requires a binary or binomial response variable, the third ('tied') category of response is not allowed. In several of the data examples (e.g., `?CEMS`, `?springall`, `?sound.fields`), ties are handled by the crude but simple device of adding half of a 'win' to the tally for each player involved; in each of the examples where this has been done it is found that the result is very similar, after a simple re-scaling, to the more sophisticated analyses that have appeared in the literature. Note that this device when used with `BTm` typically gives rise to warnings produced by the back-end `glm` function, about non-integer 'binomial' counts; such warnings are of no consequence and can be safely ignored. It is likely that a future version of **BradleyTerry2** will have a more general method for handling ties. ### A note on 'contest-specific' random effects {#sec:random-effects} The current version of **BradleyTerry2** provides facilities for fitting models with random effects in 'player-specific' predictor functions, as illustrated in Section \@ref(#sec:covariates). For more general, 'contest-specific' random-effect structures, such as random 'judge' effects in psychological studies [e.g., @bock:01], **BradleyTerry2** provides (through `BTm`) the necessary user interface but as yet no back-end calculation. It is hoped that this important generalization can be made successfully in a future version of **BradleyTerry2**. ## Acknowledgments {#sec:acknowledgments .unnumbered} This work was supported by the UK Engineering and Physical Sciences Research Council. ## References {#sec:references .unnumbered} BradleyTerry2/vignettes/residuals.png0000644000176200001440000015662414775673305017514 0ustar liggesusersPNG  IHDR' pHYs]s IDATxqpG|ZDKk.#M50XaW 7&3dΥLMIL8ĽL⻐ eA)(8.A]oI~|K+^>y}?y&''ԂktB,   B,   B,   B,   B,   B,B,   B,B,   B,B,   B,B,   B,B,   B,   B,   B,   B,   B,B,   B,B,   B,B,   B,B,   B,B,    B,   B,   B,   B,   B,  @ 5cxxX?;vxX799ikrt0͜x*&(0̓Bgbb@@!XXb@@٥^~={nxJ˿ۿ뮻N{%g[oիW755}C:u ~~{.3{wy?ԋ'xmr;v6nxw\s5G{wpp_a37!4448p஻}'?򑏼k_Yb.'W$]wս+gM!v??'>qׯ'?B?{,bgR|LNSSV ڵo|7v1X,.Z3w!vX`Aܹs7x?韚(BYlYabbOO.擟+B\W^|]@/VXqM7IСCk͆  ; C'?'xɓoƴ/|!p??}MKsyaMկNrܹ__:69<`]1025y߿˗Wod2`R^ĉ\X,nݺ~?@`f455Rƍd2ḍXfң>EQ6ԗr___|HlCbU7O#7?2ުcǎ^:m'Nx'l|qϞ=: d!b: OE;;;uB,&(0r\.w-r]wXLP@Fx'kreSSS[[[GGGSSnbf]|]~}Pv=Lf2Q`vfGrb }}}!(޶lEG2 3,%BOOݻu4u_߱1X@Fm1R*֯_~u\z>L&CtZw̺W# oK&ꊢ_y3bq…!%K4@F=Tb!xʉMMMT*pi= ̘#GZ[[7koo!8qB3nf?O 0cB.ldd$ 03\/~\._MT ̌7FQfw67o!$ [3a!iRN2tlؚ9eعsCDbͭډ'yBBQ b044iӦ8NH$8`!1 #0J~ĉ?|^2 N   B,B,   B,B,   B,B,   B,B,    b@@!XXb@@!XXb@@T `)JO?'yš5kV\vچCM 59ruu!;я~P(LE;;;uP,'9eΝW. DѣGM$BaÆ ;wKpVWʫՑS<\.Hzzzv]_??X.Bf---KFb1AI-* ==={`[ݻ73gx>VFb1A1ǎ[zuE/TryŅBѣVi2BL,N !{K!oM1 !UorJcbYܹs:!asO՛ PslT#gc'|>\}RlٲB0::d&#X#,YH$ ͛/fͅB!HtMzJy:r*'.Ɔd&ijjT*>ae!Y!>-68Jx|ꩧ B 2 ,R,t\t*H8p@ VFb1AuHq[[[{{*#α;1B,[.(ϟ>}ܹs---K.mhh' R.۷k׮4ŊojjE0N\#gwbX,&l61JzJ=zUt2 0+lEnpattA3N۷/Nlvj !455>|8.qrYwduZ9X+T*566%ةm-[V(RB8{l!*鴡a!c r!uUoB8r!f̫Bhoo!T/XK. !TovB2c Y|ya``Ӱ7nc YdIE!m۶]M.ۻwo{c L:tPa޽;w<֯_BYdؚ9\;wܾ}{!L~3Y|ySSٳg5D"qz} b̼ 6\𥞞ݻwϱ_(>0YN @ggXoooe D"s={̙;::ꚛ… nZ*fuVWʫՑS1LGQo|cժUz @FTb`~ٺuk`SÇ'''l"( Wr: YՕjuTbxr\"! vvvN{\.o۶m޽Qf!XGBOO 6P__{( C=Y])VGN%T*566.رcWN&i2lc\r:::Bٳ!($7BlB,W+_P(LL&3(K]Ӻu7L|>db9q͎?T94%{キ-pA6miiQY>onnW^yfXLPjl9ӳ{t:/ S^L&Xav;1@+Jׯ={|'L㙰y*i۶m!_0R^J,+qlry߾}###ı3goN*K{{{l2r|w''O:'@FTbjC<@%rrŋwwwOK!|>%K;::r\\!cǎ-^xxx8H\G !Vl"( Q NNN~c !677ǻ2?8kn…W͡ClJ  u 7)ǦR_|3N#K,ŋCׯo;66cǎ(*Doo/hkX;3}sq1k_ڴ###!]vEQT(ۧ泦|W^xlb1jvlJSO|G–J/zD"s=j5oƍQeS|[o&>R%H,Y$>*6͖ee7ƲR^J,0І ZlRyxwH b精;wn߾=^;q#wȣPsGgguz꩖~!ֶϾ~s !رC݆ՑS.$%0e4}}} #&)---! 6lݺ\.W^*t:N,PY])VGN%[ݻ78H|_U,ݲe^Af\.w}M=k'_[ZZH&k֬뮻ma2kNT:{l.kiiYtiCC>Yry۶m2SMLd!`+e2T{iooO:k׮Xl6k 2k0= (:tдZ*6o<00Eљ3gAFbMPfRZ+uڞ={1d rN>:vXX|ѯ|+!?p@a޽SHXRuֺ 6^z…-d2~ȑ¿J%KDQBx饗_\nٲemSTP(tuuuttJx?KVonݺȈ!!y*'BH$FGG'''3s=711B?|?. Gryƍ!T*uɩק鱱8^ud2 /To600BX|!&Ż y6?~g!ƍ}V*׿~R^d)^hzA===w`n-^t֊t:=00pyEd2 ЦMvOhn#<?iwŋ֐믯ެ=ꫯ^$Nر2^t:lݺn۰aC`GFF$XfdOEQ'BTj||r}||'~W2E)Jַ L&; Su\U񚦷wbbx!#Tg9q_md2MMM+Ϥ]쑳gzAuR1V;vlՕYG]jU-糝;wn߾}ǎ>`^W?~o݃*}:f5:6/^1TJ2#=z,kSēyx' }x~3m[f9PM6vqiE #/U{Z˅wؓO>~uKpxʕ+C###+QlSZZZSwqd2{ !:tHw,'P{x}slP9q۶mq荏*[|cgr2#²e*W*S29C/!u֭L&nLs=!ǏWoB֭3ʗadd$n߾kmllL$qp2Lcccwwwe9@wwwccceMB,Wa՛o[O>}M7W/ZԴbյw޽{vuu577vm<#GTVWzc=VI5LuL&swB,H􌍍 ˗N~2ovuufB,XVqܹg}Jo33 ;vl…͈T*ff뭷_}*7'qhh(McSSFXjU&x㇝744Z?|pgg%K,Yyٖ˧fc+Nlқ(aq=|^0'&&{zzJRccc_TpU6qm+g[_vѩ[ۘoM9xMLLS9M?i{o4Xcǎ*m*7 dL511Q9fɘ?vwx,H^~qD"qes՘7f?JD|~|pķV$ܜX!`i^RaN}g_9128n=z={/Ske/3GQߟfGGGS*!㱞؉nsA/޳g`e(Uj(n31?oNx7m&#B,\(O#ja;*uW*ad6K;o+K{zzίx .X v'U%qXS2713z*Dd!V*%v2U_]Ď;L&w1'c;Նʭە'`/`ڵsqhtJ^qXOOO*)kd<ԟWтWΉ IDAT1W4^ /*GVN81|fDڴiS|35l.f\GGG|Ü){ : M`Xj՞={{6m&ٲeKUK/]ʭo9tzN:BXfM6 H<6* DS޽;B3)~>%N ډo$I!VfySK7xc?i6Vz{Esm۶ ( 7o\__6{o駟c̸L&B^bŴl|~ŊmB,0[ tz*+nKqȑ#՛} !g?ۺu봒H\W:tJ@bf=JmKofo[nݺnknnw?zho_BP*,玟nM$o)F.]4fss޽{x BNzGGvwaERk %VZu̙͛7 dʱ!T*裏zPx{͛7_M\?BG>>yCCCe/ >u,>~xdwwիW^C|A5%ǏWovz,^qPoLFR*1Sc#WWB0|B|9rھ}{EgΜyŋzСR@BlT*='NA^fʕ+׮]wرիWW$bq…!1X?A5Ar|ȑ#l6LYw~w,}uwwRԞ={*#w}r %[n宻jkk̸X,&BBH$wyv +l*F8rN&c bԞ|>qƩ Uxisl,LVԉƪUb:>(Y%l۶-ޟiD"q}oT:{laҥs{JWJd2LNaW!FQ]嗟|۷fxx؛wxP6o<-k\޷o߮]t%?ԧ6S'I"3ēr7>~N2L~ߜVm,;4BhhhDrСC6m3Q^8uToXLPx* PtMbxɥl*]nݺw(U*-[V(t5 #Bi/٬O>}ܹB{{M7T5A7]CQï `\./0ۛZ2BHPN>BH&՛UZ֯_?u/BEwd(4ޤSO=5]嘨x瑑!Ժŋƪ7+J]w]|k;wJz{{STEB\.sC7n4b`V[ti!VOtњQ.AL&>gbb"lٲ%ɼ+!O|8d!gyzºu.!oz T0544DQBطo_f?p{ﭕ߿gϞiO@->/z7!J*_Cs|޷rKW_,G7b`ۿ;>|CCC!|35}!$ɋ=dɒ¶mjngfQJڹmʕWOV70uvvRBssd\޺u B͑#GB_җԧ>B( }kH&όcr___www|{b~ ֮]09bfG;P\Q[}dd$.v}&N)^{ma||z۲lrgT*/>sO쿃#=Õq|=b1AaucccMMM7(+VfTOr[a]pb2 {޴¥RiQ9sf  +DP(vq]wO_~z׮]ݗA5A*_J$X9(3<3</JX֭[.\ظhѢ;wJ0MNNꅚ找(* oZJ/!#̇ @$T*5:::99+LLLdD"Q(V^t̋W# |9ߟNZ.m۶w(^|z=0MXf7R 6P__{xuC=`guZ9X`(^{ma||b͎;zd2ya b̌x-qEJfbq…!#ˉ`֭[WYSSS%4YͪZ9X`PaXfn)P(JRfCdR'0{'͛/֦\.oڴ)f=s5;r,sرSN RUVT9'w˖-'O|ΉcT*m޼y``Rԣ>Zl.E___www!L>K,gG?Z(BlE_!#@5b1H9'Nؾ}{!l6[>'tuuUEQLJ`A\GGpE aE>߸qc6M$'O2}kJ|}>O[b1A7744aÆkRiٲeBsRd6[at'B #_ +^{ma||\.sbަ|>P}ӦIbQ3˪ccc fGrb5#NۗxRB. #\&X'/J{,L&%X+ՕjuTb`rl|9088٩2}---q16Hd2if28R) duZ9Xw=<<G;SGf!d2|Zb@FbMP!fL&u-[@FbMP!fRt\.w{eҥ~  Ě0 !XXb@@!XXbm_?_>ydccwq}.J|7x衇կ?W⋯ʕ+GGG+no}[{U>@F_!w~wo(?BCCïگm`6.[~&vxx`k600BXfMP/}K!?s =Xg?ⷿ!<Í\sg>;#ggz ?яB'?ǕwCy{+;pY ̄ؗ_~9dɒ=B)7ef&FQB8w\__CwyԖ?C ,L__T[:S[>!z ЇBG?'GBۧ1Bhmm\o7xӮ?BַݿwO gϞ]p#X٤X,~?w܂ /_>m暡>l'h`?G -w}lvN3bC}{GFF|ɿ˿;:::*گZkkkkkmۖ/_C=}drb1-bÆ Tk_Z}}+zZ9ˉfZ___wwwgݕZ.J2@FbMP!`&peù\.Hk4 _CTb7]pŲz>00lٲ%>Z+dN)H8p3b_&ӧO_J뮻.lܸ|2A\.|ȑ#l6u^[[ۛ2U??ªU~w~={No]_SeѢEBaZ vrxBwMw-AQF˷"+iaPM(xZLʼdLaVa UF9\u:]nڍvm m VlDPab"񽖫?JS -m6~ϩ(++όA ^t:3 sZFű6#DrL)S斗3IaСC"b47KJJF-]Wwٵk_|ѣ)4 o޼9;;;pn텅TTTi Hz=@;vl6]Vӝuٳ].[S]v{yLM]]X"H^o0D䣏>b+qqqEEE"X^^t:^ذaC||RZg=xŋEd֭Tsu:jO]vuuu3gΤ?A.^w'޼yOLL `E9s~[\\Lg) 1c IDATNN[|"g Nj:snFtc=*"<GFȗ_~I ӹtRFUUKKK4#FN[jmbbb2t8>>^KFEQDndz"bClrrڵbifZw)"sΥSevvƖfb4E$;; Vw'OLLo/'x{m:ug}O>=`{N:Bwcwb y<mJQQQVV9#nff4662.0Ċl/~}} # :(.ztϧD,\VqF)))w?Bﯨػw]ng͚tR>XA\,m5Xá k'g4KJJ|ADDDdd$CW@p8\.WEb]ہcي(Z]Y$Ԫ* # 0 h9d6 CF`6HݨNDRRRw]xq1B¡CGMW\)"۷obظ@Zyyyg B,"wuȑ#7"2bm?͂ D?ǜov.tuuuڦM&)""">>^OjA+**o2Vd3fLaa1?~mYkNjjjD{^Zekm;vLsECNi2p6 իW#p( bE9rW_}O?v!?~ۖ|\yT@qD.+++H39A2L&|GǏ'̙3 :[q"vzqN,BPUUD,K=Nٳ].h>rZRRB ߿l2˥[YY~zɴiӦ}in޼ZN={vjjjw%%%ӦMG#qV533wƒ6훊#7n9/ZHDP2B_ <裁9Z|)Nrʂlc2l6[NNΦMdbx}і=Z__O #t uc˖-=59r#}VV֕W^hxŋȟ{ReoYiK/HJJ by1b֭[G}6_~mۮg}*@{s6p8"r}Q.!pȒ%Kbcc4y人^xA[.k0b g555uR{]=s&J9ܖl^vNPOoСC8p`GΜ9׿UD&NHݿ]]ݎ;RRR/^vz˗/ߺuC~_5{EdٲeT䬬,J..otmi{u|էNj[|s='"&L뮻2[Dvkn}SNƌs'NhGGGܹsҤITG\D\@F]5111駿|"Ԥ%A}$X@wf'Nضmo}ȑ'N5j^{ ƌCq{1 2B) *;>v'NCK4w5;O?0a‡~Hz1 2B7vJLLO?us!l6l6S>@ #Fx~ǏSD5ܷo_ssc=5iҤÇk3r_wukb_Fr}ꩧ/quuyOtPB?}^}G4hЕW^مљ@xs)((;m}7v[yyidd_?a„QF :t~_|qȑz#"?jkkKKKϟσ~СC"b0p-ڵ@ NSD˾?#Zk֭[蠃>oO8aE?hӲΝ٨QD@o ݉^ƍE$))я~7xƍz-^x}Yz|ɓ'Eo2BEy'{{/,mﭷu_8{윜 C@RRرӗ"ª"E=5x}njjCp ON=z(.j?_Gz{1Ƞb[t:СCEԩSO?~<.錸 ]N+((Lm7dzիW\.UUg͚E{IM:UD]oV[ {CzAt/d0D$&&jt&&&l6)-- 1GurNf{w.dee]{}/馛v=p0`@_r^{Mdɒ޼r @wz˖- l>l0*"+ @Mw: +W|/TWWϛ7nrr_-"G;&N8jԨ+Bӵ~_|ŧ~Z__oiӏ322lB_Zyyy.FDS>b׬YcX.3gNwa4?s^*Ń Џy<>O>IJJ=z4SbC#{---O?b裏2dȂ wtPXBlo8777x^N믺뮻nҤIIIIC  bA@FŮ/8x 9SL}+nٲ;x` Ԍ9Gٳg~ F5>oɒ%wyh?To߾|+Vhk!G?wW.** ;k֬X,+W卝N*"s̱lGiӦ8p&99YDN:|۷GDD;ӧO @e.>3"2bĈ[j /|۶mW_}ukk>KgNu8"dɒ b 2wB짟~*"7xc- Æ N[z<TK;Nz |C6| Μ9׿UD&NHV)S[\\) BEСC?pfsO}},[*piL."("".+;;{…<)(vm9:7|suuS6x/_sωȄ . IUUU"bXZZZ[ZZDJUUfBZ"vϟkt7f̘Ǐ8qB=::zΝ&M=r8'+..VֶWlrFj\@F+"O~g{챣GwРAYYYk׮>|8= ^oTT錏ݞ@F!Vsĉm۶G9qĨQƏ.X`̘1!:(3g` ,77bL&dP/+VZj* 7so6k,پ};!")| "ǏLEB,MIII"h*QL'^vz1?ॗ^1cNn"x℄~ѴiDd׮] _M7ܬMO`g" @ץK-6MDE(;uTZwu8)ڰ-0Ξ=ȑ#ǣF;vlllv:GѶJKK3L #{ȚF޽{֭.KUUټ~z}QQQvvvff9ǖ=OZZ?EPq!Glܸx`ʔ)m=x/˗_~YD~_?=r8bĈh,))~^^^aaXkQQŋx*++333Ԥ@F{3fG}tݺu5{'֮];`3gҟлLbۃu] jeW' #.Nyfߟ$?vؑ#GO>$\v)))t&:(r:ҡ 5r/{޻{k5:v* IDATw $e˖۷ @}v 2ZPP ".tR1E]ؗM4i۶m{>|_^{^dɒND,XՆNg4m6ۡC"!VH[6MD&OYRRfۻw/;>.R$%LUU9ydf?EdT @f"[oo0?JKK/ grN -Z(JCC^?g9sHKKKGmdɚ>^/ *--MUUݾzjھE$'' ք .( @}%ӕl6cZZr~ݻ_WW7iҤӧϛ7l=b?[o5##!C9sTUUĨz 76.UUt:]jfff=''ydMzWmVSSsƨ@` “^WED9vΌ3(lmmmξog}/믿šC̙C@JII7iS^^}Xmmmݽ{-[mgn/_|tÕcM, pNG{;1ׯbb۷e˖[~'Ǎ1m4[XX(?.ӹtRݮ}t:@Fk۷lٲe˖Çnk :(_p`0,]t޼yc݅"(ng.12B7؆_~y˖-~aQFq7tV&A@ywƒ|Cʕ+_|EcEQn˗''']<Oeeݻ=,^8))).. #HrmvM7џ 477_8}4 p)BIϟ1?+n|PrL'~!+ !eeeRy睷z2wB+xⴴSNio߾ѣT;!m3gNSSEq:~:O+Dd3fx#FDD8pʔ)6lx<!6))!C>󔔔?p8ƌ]UUn竪Z^^NbEd;wO>}w?gΜәrEqݭ---v]UU˵h"r, CL>}Ϟ=ӧO_׷z')!Ϟ=r h 3"h"B>ĊW_s˗HyyM7ta @ؼy6kttׯ7 "K!bEdȐ!fsDD?s7|JKKEW^)))F!j֯_u/رc ,xgZ[[/}ӄ >p:T OB~v=z?N}T[h:?!VDٳg̙TonڴcIJJh ro~{[&t:UUE䣏> \s+WQQQ ""DaÆ|UU~wbMnnnaaT 6Ă ^].l^~}V533SDv{BB2BJz?/"mx<&IKf Y=xL/--=p̙3[nZm/tw;Bwc$}-Zh4رC[+"EEEYYYT !bW\/C=OSO=C7o <Onnfk{ӟ2BߡtC:>.::jZVyСɓ'=KY=xL?s """2}1 2B'o\}@u2f;Z [QQs-[F\;hР n-z8b@e6vb b b b b b b b b bXJ @b @b @b @b @b @b !%3NɓDGGS Fb'FDDǫz)X*S}PXX8~ZJ!>!777;;[DFnmmlmm\9s8 @?JBED"RVV~ֽ~?//PQ& dH, XBDrrr'Xt\rYPr덊^Q9s J #4Fb!?EQ$X0a}Xz]SJJJfcN'E 18dӉ@t+i36d~X@=z\.Y]]9<b:NW+WR1BӉC1DTᢢ .RUu߾} #:Fb-..bHvvvjj"xjkknjSUU(Jii)  +H,aZ333y( T @F&vfEQ7ZTTDduB1 zg #bXbXbXbXbXbXbXbXbXB,%b b b ƒ.㉍:. B,ζve˖P u:Jл/ b b b b 7%nĉ_UW]E)ERr"VB,X!B,X@Xat:th̘1ƍ }]zfk{(?|zz:A8s:uuuww3}3fDGGS@_OW.<2.^mmrOF{gK:!8C#h40UXAx^ѣ&R/"i͛7ggg`+V533SDEIIIYxqCCî]+**(tnܸ0p>`FF$&&[bL&0Q^^h"9LӹtRݮ(JCC@F Ă t;Z\R"xǻ\.ټ~s>XV*dB,@`F?M$zo֪U+:}4566&6 }:Z v } ICKeeeV5..N{9 r…hCQU_]D| i> VX!"999<#G׿kNo 4}WTx<ũ6lp:TFDEMΝ+"bB,tCD)((訍^5j6;vX ?X֘lm]Ϗ/..y^t:^oon[DEرc(!.6QU绦Hmmm6~_:&L@nnvr`(++kllt555ڤNب(UU㣢bcc{eۤ ެTDONbb}'"r 7ovȉ'#0JiRYYlZDGGǓm6"r233SSS=ϥ}/^,"/BY{sg+!@D?Y}ts6ZZӟDG u~ժU"RTTtS-dff^GWUU |>헩RURNr^tia^+l W6l0LVw]2ߪEdv=&&jjCںTmީlt}RUu͚5A6 ZYYyi~<˥(6L :d2nEQ\.6#0>~ɤt:Et඗tZVp8zq4]M\pfE9#ZUp(Errr:jҢUjibУ-]j--Dny8kmZZZ.eEړb555EEE疚L0 n;in4ü2g\i:(BKMMvEimmmllX,KEQ!^ӂ>O.m_4D+pru?K뫗kOQ Yi6lW҆h4h]FsrrlvֽګaEQhq^ n;fomd4/Y4]OE Kl6]0oNNNHó10c" s y;n;P0h@ƶbK/c/hLZ4-˕oK{th4W DH͒кe*#J[vt>^WDWCmڴn)榦~]Z&8ތ3Df%hiӦ]_I;?رc"2iҤj]hҡnkf?~ӦMM֯_=}麁nYQQs{II\EE Ucƌq8mr:Zтڽ')//~ЭիC~_}O;K&77WJx(ܹs7ӦڵPﲠ79,G[BvαidL2=?ic5>wCgmj4rxծHG{~ې^r>9?O>u8c&|61: 4H,k˵3EQ,Kcc>k Y۱x:0&V^-;?za|ڴiڲEy0O\\\SSlVrl6ͦbihhHNNϚ5k 劏/..u:.`0Y&tk-UU5sΝwݰaCOM`6i& gYfۃ7C:O/\i:(z6Nv^h`˓DfaBo &b566lg.H PU5ўFs1w7j 睸!AޠԊ, @_OY/ (5v+mk„ Amw-9r$iCAރ w/y.bEdܸqG}>6|OoPvng tPܟfm瑪vߚ^LvbhX,F]`=Ȁ 8J {-oE)Z/s O>>O>0qsWռ>0wQՉ( bбTFMDMB,\m+\Wq?~'ݶR@W],mu4U-M3fLBsd2*2s>2P) _8>3s^}kjs;U:ݠjrlVmj y9^iE@F RA9<|*V(|Yyyyyyy^ew:{Z.]6ZֶV\}w4KdBSՕP:ɭu*Q ]JDU~5oFn/X*(<,Թ:,d󅜷mW4 c0Sl!8b7>nd3뀪] -jjWj9t\ςܙ:vߞdB,MZtb9j] 699m8[ Ea.z #9J>+xWw&<3mFFFUҨg[4s]zkdB,I>ܹ#Y}j}Oju9}R87m)܅[}JMBtS"C6G:sCGUN1ub_TPxQH IDATVq^,ǝb빮̙VbW\ˀVyThxU:Ӻe7ϝH~]sd_G,>>>RpDGGCȄv^00>>m߿`tnv=00P'Bqqqm)d¼|Q?ڵk s={!z=x#F\r%$$Dh5z'OܳgOdddǎozta… ,j۶mmEQN>_}:nܸqN/׿ZVJ]m/={deem߾_~Ç33ayK vwYи222g_Dԭs:XMAua<X#25b`z:{^yv#p\n9&Y;O~2~L4nrm-{z̐rn[ѝXuTL+222xhYŢs9OA`y* F:wSXul6dEk8/CO[l6\q'sL,3Ƀ@ ]]_ܺ!܏z빛'^!UV1b[=Ztq@s[SrdB,EOm?zr`\hnRp8dq&XHjbq]mt›5Q;H\Ͱ!$nT6#tz_,8%c@F ĂƼHNNvLjҮ^*7*R=&o/t ժsДiB]_|7 E6m\h,YW n~^ƹvbbl?UfY}ZV # Nc@rrrM& BQuM4I>"## 6l Yj7oEZ:N:YVVqƀgm6^h4`j&i6CCCkd2;2| ͭsRFp 4H>|xݻcǎI$%%N[v-M #0;1wYLN$-EyyySQ{å"(iiifh4:OF¤MYfEE_vjh_D]gzh9]*:Y@F%,-aۻwnZϟ_nxg233E)***++۴i޽{m6[dd#""bUӬ3FbYx=n}]Сju` xEEk6檋vhd&' d1IATP6fJMMuW͛߈oplݺۭV(F!..9RTT7yJv dO׌# FדuȠp8(4Vrrr.\pœo^BLW$DmL&Ә1cԻ!Ç ڳgOffܡ33+g kO~m^Z;bDGG=zƿJKK9s&Wd:|p߾}Ǎ}݉-444??\a6  57nb޽wN91c! `m6ې!C=(}@ɯY9YBBŋ)_'<<\ |ٝ xUV)S-ܕRQQQmH,Hw櫟QQQZvƍ6MXu.4WjBK,& Wj&%%նʕ+F(#F/xWݻhX6*W~ocǎMLLBV\!pkmݺU^}%X g [fڻ8ŒcXMٮ]t !AjRRRV*X-.gJHH4h` iZ#\O.m6 *..B~w!p$8wW69rbB,Ζk6i4F'Xl}0ST!gBdffP4rH!dr(ς֭[[λGQRRB1!Ǝ[PP(ztW+ItJPYYi6eZ\\9FEQ/_۷oBDFF:&[۶mK13f'#N,`r.\`4-<غu?۷[VEQF5y1c8n R1Ziii?5M~~'0,Xnӧ !t:]m̯l_Jqy(2hРL*Z111 q ÿKBE]xq֬YBM69ʕ+`jV)S-~_ˮtO>Myb튢+ƍ lYYY-e]e[B̛7/""bرǎkѢEݻw6LBX*(_jRo][[9N:YVVq*n?>77WQ 9/w͚5{z+>T!Nc52B ߜin Fz5۰܎Yr[= !6oެ(j]xqKKK322t:(rvUTT<~~~QQQ:t 611qڵ<Ԑi#GK, Rײ:Oyyyup[7yyyCmuw8/3~4?%K̯چD:*ճo.nvcǎ@FpBɓ';΄ c rIu4]EwP,bhIIIŋWϱg*gΜIqy7F n5{쑣;1uY9k׮6رc&Lp 9|;z.u)cVAmV2!XHɓL65)))au8III˖- ȜêBl\J2!Xd}zfff?a3cbQEF1dggWVVn۶m۶mيFF#w;ȝ=ݎ;#Fp%GQD߯_?!ĩS7Lݻ222d;|\\\TT$IIIVUQ*Kcǎ-((h4VuѣG]vuAd EQ\֧O!ď?H%i,jj4={T81cXVFc6ZJCȐrJ@gr.֭[82* `ӦMBL}eKJJ\&w2Bm۶4"7߸G]ع~.^Xަ;;[`X(g!@TVbPN'p=VhB9 5jT}>*cXN*6mZm7ɓr9(u [n+رc7ڵk)g!`޽ ƍSwRTT l.ڣL&YYK/ !-Z1`0!Emb4'ʑzz<`'Oz$YvE9((w;1Xppl2dHn3FDDD#kd2%RRR!777WQܐtd,Kzz:o+={6 Ъ3@[ݻnr3V^(j޽{NNpt]-D뫎x4h:R~t9{Ve wl4؄F4tP @}쏪Re͚5U݂Uf o4] ɯ:dB,<rL&\c0--->>Áf={zǚ:uX,GԩS׮]c.\`EQjަ4LdB,u4ÑZ)Vk0XdB,bdeeٳ'33S>|XPA;bXbXbXbXb^ɗ"@ð텅.]ٳgpp0W%`СC``F6lXHHH񩬬<!8|l6^+hFQgnn.Kh|SNVU1qD___}֭ӦMZP\>N [%))Ib^/wرjN>P߬NS9Zb{큁B^>%,,>h-sN!VuNCCCu:d2{f) &jBX׻7NXb,\>>>~~~5kjX4G !\֥Kz#===,,lF155U̚5Y wB׻BFYe')))!!A򊋋KKKFcbb"555:: b$=Za׻B~{999Bl0te˖FEQrssW\ɡ {cvb"##Cז$cbb TmZZZ||f6CCC9 # ZbpKfdd!j'bL6M֐ V[VF3s;vVBٳ bźr$"Z,9jj.-"Cȑ#}}}]6uTѨ3' Am޼Y'),,',,LQiƍ siq ۷o8X4 r$N]Nl0#TϙF8nBV 7 fFZPvrA}0;9f'v{``h4>l!D^^СC)4݉ȑcƌwp8rY VK+ӔGX@]*VQ5k֌;V}d23j !3@F RA v{ddhSOeff*W_RP #bAp cʕ-Ĕ_ #bb9rHIIIdddǎ #bA@F~=d2QQQ̂ bz}nnn)"@wb=rt'42d9NbbbDDD.]~xuؠy\SSS?cҚm۶nW\/5H{N x^(ʚ5kƎd3fjj۶mٝXDwb #tLݻ333_}U+7B\@qun˗/B9bK4ҿ !DRR,0anqyf;9uBp !JKKp 4h4*W_U!..NOq B,1l6[HHH}FYphVBEFFlذAj7ofV' #bA9p nO>]v"##CSD@F Ă xJwbxqݴi޽{m6[dd#""h2!XSu\cE9}4%X*(F#W7ƪ] ubC ɩdh4BVK;ӔGXsʰ۵kג_nTh4SV@h|cʕ ՟JLLLIIa"@F RAݾs?fGDDDEE X*(2bb'!B, !e ~(,,{uw… ?GZ!5]vs={;vaÂ{챃ֶNN΀ `2=@m8oȑ#=>2dȆ nnwd!Oll!C5kcǎ'fgg?#T 6!D@@MF-ϝ;/x'/^s:t(((Znv'6Ͳo&XiӧΝKBl#gycƌ1c?ϧ!1erk-hѢ cڶm+())~z;BO>Fӽ{w!Dyyujgƌ BgΜ!q 4cǎ2nݺilժUgϞt^B\z1cƆ 8PQQQ%[B 8O>|2-UVV6?mڴ[JJJڴiSe/ͫRP_uDDDp!DS>|`Fh֔5k֬YӧO׻4 ;wF4 !o2l6۬YkƍYYY6m:qDaa hҤIuN eZb=fsD0&S)B!pXl4F3tln"||}$23Ge NgB,nU45t'b b b _^z]v]v4:JJ!HM@s9QG4ӹѝ@ ( 288 FBh f3 28΍1B,X!B,X!B,X!B,X!B,X!B,X!B,X!KV|'|wߝ;wgϞ{q|||(ouʕv}ɒ%ݺu@Ɖ'6nxĉwҥW^=Phh(%򲲲~֭[w]pp0M*++۷y'O^vGziݺ5^u4eʔXJÛ[رc}1b}GpsZ*00JM<|0fΜ)) pܹnzɒ%׸zK/ԬY>SLpSYYSYYIM~~~C {L  L<իW| !7o(ѣG>\QQ!7ng}FAyg>'NBtaРAΝ;3{lJ;vpuCϷX,jIJJNєp׵Z]m۶~/_/-%>SYx"lΜ9r{\\eVsDZbë*hnN8no#Zjf̘!YBBBYYvAѲeKB)yaÆɣ裏^|YݞѶm[ه"??&7n9rKKHLL}iix̙ѣGW_{Bu]Νs~bĈj^^&::Z6bƵk䍉:T7'NBopP\nHU:nݺU3gΤ<ڷ~+ȑ#]Vugccc)+o[o9blBDGGWyٳ?>HA1;1nm۶=zT^9?3cƌ@__/7o |) {n&6mZ.]<۵kI& !Ν;wQ\n_׍7ͫ2^TTԽ+ذa%=_ى'vU_PV^o}MixUV !^~*Oo Ͽxb/(B,n^N|˗y;9r뿄˖-cFbor1`Ȑ!5п@w;ۼy߿^9fliiiii)^W^^OO8Ν;cX%%%mڴ!7G^^o߾䙦qqqW^}(oҢE 0{5 'Bu];|b-Z/| 0`[jmݺu=ڹs紴4JÛ\pȑ#BG})]cXǏBgx…;̟? Yz5e{97&n-[իή\RTT$I `x+Vx{N}(Ѳ׿|4yWz9}ȑǏuehbqs9sիB;l6oٲEd?{q;B{yjb4K,B;oߞ {HwqҥKk\½ ;1nuMIIIDDDNNL/DYY[o5x`Fye3e)̵2Uv={C좢x =s{7) /޵kV Yf=yI&1!7͕+W_6-""/())tΝ;>!d61cƩSu&cDر{}7+++?=AFQ[/___yQwx#駟2-_<''Eէ7]N{>\r̙>HQ!j*ʊCX}ݻw?Cmڴ |w)O\sׯo޼?6'O|ꩧFe2?p~~~>}(ײeK@.P\W7ӧl69sܹ)<ݑ#G? !|MFCxo{!!!&M:p/;w Tu㗐m&E˗/2ߖlB eqp֬Y۳n?tiiisϺu;vl¤ĞBT[Cv /y!DDDD^^^JJz;J]LJJ@z9ݯ_9sTy72.^W_5bb'7nU/N:UmȐ!eee1n޼yK,}m۶]|ǧpԩ;ow!x;XrrrƍWϝ pAeee'N}͛#TVVZwPb<#?K.o֤IBfw}'hѢIuy࣏>:tbԨQ< ѣG8:d;!UZlyڵrh֬YΝ?^XXHqyJP۳YYY#ԥK-Z$h۶#< X40aWZղe˷~[^93sԨQξK`͚5k׮u믿^Dnp;v2e>lŊ/~̙~ߟl@@@BBnT pg3g|wMvUoʼn't:"o5q^Μ9s} ! p8<(Wqҥ˾}*_طo_}O/w}ǏB1"""f}/^Bo>}:乮\R9ڶm{J˨݉,YBwbsΨ(wHHȽ?>|Xf;o>y䯿tΝSSLYlN)nԩS\UQЦM/bҤI_}Ν;o-[.],K=OX p#FΞ2e?\\\e^hٳVܵk͛5k裏<Ffe˖'Onݺ_~:.887tլݻw[}>]tdlB,X!B,X!B,X!B,X!B,X!B,X!B,X!B,X!B,X!B,KbXbXbXbXbXbMn?s) S^^~̙R@4K,Qe׮]ٳg()SRR?߿@{BBBz%7Lm۶݊tڴiaaa1117nqٲeQn֭oٳ_o>!Dvv~{ OKg jݻwRڻwA?CNNb߾};vr/sp*;SkBu]v>|iӖ.]j]ٳg_}UFӮ]޽{w}wy~z{O?t>l|||jjj^^? [[ꗪܸ~eƍů_^ZZZZZZeq P8?Qn|VWHYY٫ںu*nڵ{<.M>>Ǐ7oyǿVZ9sO߾}KJJ#G?jƌ/_ޱcW_+3gN}m B̝;RɓM>};?s͛ !׭[W ?O]rsB͛gff/_}kΛߛo)j+WqN5ux!DZZsMwߝ)9rQgyƍ͛7ߴiĉyXX?3gΨO ~I|zJjժ޽{ !>Sb1'.]̙3EEEh8}So=Ϝ9SYYYϗ텅׮]#GTTT!z} ?o͚&qڵ'Nl>ҥK?%%%%%%ݸqӅW^-sU\TTrŋ pV^^n6khCF,NWO?|~W|'OVTTX, .VO:uĉ_ϯ8qn֭[? n=Pm~qFuCv۳>[Odk V@vhS]1_~O>٣GݻiӦuǏ߹sn+V^z㣣ynYpat]ċ/ϐ!Cڵk(JΝw[oUʗ/_haÆ9m۶z(00{(t>O?4cƌ+00[nZر믿~E ښ(ccc.]`5; .6::Z{޼yU}>SRRצM=ziFQ-VTTX"<<]ԇIDATv=zh׮=ܳ|Ça-ԩS۷o]wu-00p֭SwˋNHH|ףLRu>aÆvڵs΁QQQ[lf6nܨjm۶mn݊oVo]k׮}O?'_ԩSvtҥKʱ?NԩSΝ;?￯ۄ e>@Q9s8~;v޽{֭˗My\tIPya7k*'"==]ѼyyXy~FYw}w˖-IHH|?У~PCguN;QYŔ)Sj[YY)G:N<~:Gܯ_K.9LguPuR=zXx|ӧ_n۶Ν;'xBL:U02;qNNNwT:NND\)ol„ _|?]t|ͦ&%,gBhb޽#cbs}.cbKEQ٪U#G8`۶me|MMMrOeȐ!8$𷌉f̘!wS^pAv衇~wΝ;4i=e4>/+umƍgϞ}=PmҾ}dj׮ݜ9sFT\\e˖k׮͜93666((H1}tVq>@^vmrz{駫OR*8s̻+;\BBl4zv*{~۷СCG~xaaOoȠ4dȐ+Vt_~{?Cppȑ#t"3ĉeɓ:vxu֭\:رcrno}LKK9r[s'tn;wnFK=Zx yˠSNΦoTgKs='_v3gȦȠk׮UUf'Z2ѣoVf*ӧOWiwj&O|z5klʔ)ӟlcsvb!ħ~Z8PFG$SOpppvvvHHԪU[:o?{zK_o9q*o_{U|s!/f^Z"J0)QGVXdH)aR ԼbZ$ 3I@/D!H {:͙F~s9g93{ɡdftOyvbK_z%%%gΜCCC .6b/U(dܸq:^gϞ=]X>nΖH$hR2^|E_~%z7į&\rɃذ0R[[/3gΜ9s= zYDžԁ\ G,~A:D|2Ǝ;&n;@yyyXJôT!ڶm W bc666MMMIIIKΦ{'Q4_hQJJZy=X\.>jڵF-?*'4R+Ci<8Z[[{FSSSS41]R-vvv...Н )ʜnQAs2CCCkIT*3f<ȈB^^^|0ىHPPÇ>,,bffv#Gvڇ~8vqJK1><0#_ŋ)w 4@~U(K.(Z4eq3-[l2a!Ξ=K'C_?~>}74ve~=ɉpurrrMƯ:**j6664sV1 }ySn100X|{{{+Gyp:4222~7;bĈSN '߻w~۷oWJUuuu A,Sss+W:HJJJ`矏3gά\RcVm#NLjGb===NbŊ/2?? gQG~*b3iҤs0Ab'Պ q.RXX8wܶ~ȑ#?߉RJrԩ7o P(j.RSS H@5_}СC-Zwaaa:J rT~V Fkd߾}^^^#GkggMQ\~ڦ1+/5222։*z'jvL}}=#Q*ʨ;dW^S74?Ĭ֮]Ey3=WA=n0x>?8݃ɷ={IVV>?F/'@ 1}o1fook׮v>xƍFFFBiiiͫٳgHHHbb;w볲6oG)Du`r,ܺukiiŋmۦP(((..~`AAAff 9 566j<>zötڿ?uOh$TJ(&FRCzZjUnn͛780|Z￿Gfyyy:.`vݦvExC;tcǎ<--MΝkiiimmP555IIIIII:t^$?#clܹyyy/ט-^:22R633sܹQjWWcv'7-]4..666outŢ|8Eg)cLG'`hVն(dW# u#@K/߿"&&`*uuum7߻FL疺ؘ+QW}駺wތ۷o?u bxqPqmٲFϟN8Jk>R&8Za/+ ^333}ׯ_S#,Ë!N3&&&jlpIL&xN}ʫ |x烃۝;Jӧ5ق̤gP(f͚ ~~~{wm GݻF{޽ަ&v>@ݹNWKU?Z ;~_]pA! ׸A,?t8³}xdu+..NMM}^`%Cڰax<-""Wnܸk.ܔ?(S,-=u͹T۵{nFrW2Ƃ!0`-wܽ{waa8Yz5>ᙜBBBh￯־uƍtB`m޼YZgcvqq:ulDDx/?W_}Ujhh?z(Fz[ީTXVYYI7jͣ~!2RJxwуSSSSVVVVVvκ({{{Mrtt4ݑ}FsssJwqvwxx8u7~X6""B{TUUK{/ցd9sZ[[{nݺ+Whߑj ]x&}}Tݝlmm]~}xx*S]]+AAA|HppJ:u* ѻ4HNlr1p¹s p={>((1&Hhʨȑ#r0'/RVX;vٳ + TT>kjjL[)4]FF5?\555uvvV(|D"y&oGmmmWZn:ዛ񣼼&MdnnN[,,,'sh[&;88L2W3)_ Or<88/(ݸq= gtǃ5Ӛ>hL##cǎ wQTh7\|9?=Mh5k:˪DXtذajΞ=?ȶSLd<511mtSR˝bHSSwy_O99Y\~'UTJr̙vvv.((lmmմИzW\Q;sA,?{3ƦLr5qLmγݻk{:=皾Q#JQTj㖥]ĪTwXs*Hk}:%%t/xPJJJ^^^yyݻw|IS?ؘ{E\sM0]-zWR͛7ھ~wcƌ XUWW'$$ϵRۛgxΉUUUi֬Y \xĉi'OJ2**&&&#FXrpoݺedd4x`///L1mWAAAzzzYYYee!Cd2ƌG .dgg''''%%5jڴi~~~={g!777))ƍMMM2[[NKKϞ=Dqu\J2===++Q"X[[d2ݵv?~;wXZZ1b̘1/Xx֯_F-ńA,o̙3&MZf͆ ;X%J'L \6 }0aBW"L'N /X0Fɓ'ׯr@Wtbtb@  @ b bXbA,X A,X@ A, @  @  @ b +}\w&IENDB`BradleyTerry2/data/0000755000176200001440000000000014775237530013672 5ustar liggesusersBradleyTerry2/data/CEMS.R0000644000176200001440000044206514775237530014557 0ustar liggesusersCEMS <- structure(list(preferences = structure(list(student = c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 35, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 53, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 68, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 71, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 73, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 75, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 76, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 77, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 78, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 79, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 80, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 81, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 82, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 83, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 84, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 85, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 86, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 87, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 88, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 89, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 90, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 91, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 92, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 93, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 94, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 95, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 96, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 97, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 98, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 102, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 103, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 104, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 105, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 107, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 108, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 109, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 111, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 112, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 113, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 114, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 117, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 118, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 119, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 122, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 123, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 124, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 125, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 126, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 127, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 129, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 130, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 132, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 134, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 135, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 136, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 137, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 138, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 139, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 140, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 141, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 142, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 143, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 144, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 145, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 146, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 147, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 148, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 149, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 150, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 152, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 153, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 154, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 155, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 156, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 157, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 159, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 161, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 162, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 163, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 164, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 165, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 166, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 167, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 168, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 169, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 170, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 171, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 172, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 173, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 174, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 175, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 176, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 177, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 179, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 180, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 181, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 182, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 183, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 184, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 185, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 186, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 187, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 188, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 189, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 192, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 193, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 194, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 195, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 196, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 197, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 198, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 199, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 201, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 202, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 203, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 204, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 205, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 206, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 207, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 208, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 210, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 211, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 212, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 213, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 214, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 215, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 216, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 217, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 218, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 219, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 221, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 222, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 224, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 225, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 226, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 227, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 228, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 229, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 230, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 232, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 233, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 234, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 235, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 236, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 237, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 238, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 239, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 241, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 242, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 243, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 244, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 245, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 246, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 247, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 248, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 249, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 250, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 251, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 252, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 253, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 256, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 257, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 258, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 259, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 260, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 261, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 264, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 265, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 266, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 267, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 268, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 269, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 270, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 271, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 285, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 286, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 287, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 288, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 289, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 290, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 291, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 292, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 293, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 294, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 295, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 296, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 297, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 298, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 299, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 300, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 301, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 302, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303, 303), school1 = structure(c(2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L, 2L, 2L, 4L, 2L, 4L, 3L, 2L, 4L, 3L, 5L, 2L, 4L, 3L, 5L, 1L), .Label = c("Barcelona", "London", "Milano", "Paris", "St.Gallen", "Stockholm"), class = "factor"), school2 = structure(c(4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L, 4L, 3L, 3L, 5L, 5L, 5L, 1L, 1L, 1L, 1L, 6L, 6L, 6L, 6L, 6L), .Label = c("Barcelona", "London", "Milano", "Paris", "St.Gallen", "Stockholm"), class = "factor"), win1 = c(1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, NA, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, NA, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, NA, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, NA, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, NA, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, NA, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, NA, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, NA, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, NA, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, NA, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, NA, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, NA, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, NA, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, NA, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, NA, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, NA, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, NA, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, NA, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, NA, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, NA, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, NA, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, NA, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, NA, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, NA, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, NA, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, NA, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, NA, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, NA, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, NA, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, NA, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, NA, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, NA, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1), win2 = c(0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, NA, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, NA, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, NA, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, NA, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, NA, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, NA, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, NA, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, NA, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, NA, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, NA, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, NA, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, NA, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, NA, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, NA, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, NA, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, NA, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, NA, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0), tied = c(0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, NA, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, NA, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, NA, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, NA, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, NA, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, NA, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, NA, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0), win1.adj = c(1, 1, NA, 0, 0, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 1, 1, NA, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0.5, 1, NA, 1, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 1, 1, NA, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, NA, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 0.5, 0, 0.5, 1, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 1, 0.5, 1, 1, 1, 0, 1, 0.5, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, NA, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 0.5, 0.5, 1, 1, NA, 0, 0, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, NA, 1, 0, 1, 0, 0, 0, 0, 1, 0.5, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 0.5, 0.5, 1, 1, 0, 0, 1, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0.5, 0, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0.5, 0, 0.5, 0, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0.5, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, NA, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 0, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0.5, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0.5, 0, 0, 1, 0.5, NA, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, NA, 1, 1, 0, 0, 0, 0, 0, 1, 0.5, 0, 0, 1, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0.5, NA, 0, 0, 0, 0.5, 1, 0, 1, 0.5, 1, 0.5, 1, 1, 1, 1, NA, 1, 1, 0.5, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, NA, 1, 0, 1, 1, 0, 0.5, 0, 0, 0, 1, 0, 0.5, 1, 0.5, NA, 1, 1, 1, 0.5, 0.5, 0.5, 0, 1, 0.5, 0.5, 0, 1, 1, 1, NA, 1, 1, 0.5, 1, 1, 0.5, 0, 0, 1, 0, 0, 0, 0.5, 1, NA, 0, 0.5, 1, 0, 0, 1, 0, 1, 1, 1, 0.5, 1, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, NA, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 0, 1, 0.5, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0.5, 1, NA, 1, 1, 1, 0, 0.5, 0, 0, 1, 1, 1, 0, 1, 0.5, 1, NA, 1, 1, 0.5, 0, 0.5, 0, 0, 1, 1, 0, 0, 1, 1, 1, NA, 1, 0, 0, 0.5, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, NA, 0, 1, 0, 1, 1, 0.5, 1, 1, 1, 0, 1, 0, 1, 1, NA, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0.5, 1, 0, 0, 1, NA, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, NA, 0, 0, 0.5, 0.5, 0, 0.5, 0.5, 1, 0.5, 1, 1, 1, 0, 1, NA, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, NA, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, NA, 0, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 0, 1, NA, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, NA, 0, 1, 1, 0, 0, 0.5, 0, 0.5, 1, 1, 0.5, 1, 1, 1, NA, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, NA, 1, 0, 1, 1, 0, 1, 0.5, 1, 0, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0.5, 0.5, NA, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 1, NA, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, NA, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0.5, 0, 0.5, 0, 0, NA, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, NA, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, NA, 1, 0, 1, 1, 0, 1, 0, 1, 0.5, 1, 1, 1, 1, 1, NA, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, NA, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 0.5, 0, 0, 1, 1, 0.5, 1, 0.5, 0, 0, 0.5, 0, 1, 1, NA, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0.5, 1, 0, 0.5, NA, 0.5, 0.5, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, NA, 0, 0, 0, 1, 1, 0.5, 1, 0, 0, 0, 1, 0, 1, 1, NA, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, NA, 1, 1, 0.5, 1, 0, 0, 0, 1, 0.5, 0.5, 1, 1, 1, 1, NA, 1, 1, 0, 1, 0, 0.5, 0, 1, 1, 0.5, 0, 1, 1, 0.5, NA, 1, 0, 1, 1, 0, 1, 0, 0.5, 0, 0.5, 0, 0.5, 0, 1, NA, 0.5, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0, 0, 1, NA, 1, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 0.5, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0.5, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0.5, 0, 0.5, 1, 0, 0.5, 0.5, 1, 0, 0, 0.5, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0.5, 0, 0, 1, 1, 0.5, 0, 1, 0.5, 1, 1, 0.5, 1, 0.5, 1, 0.5, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 0, 0, 1, 0.5, 0.5, 1, 1, 0, 0.5, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 0.5, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0.5, 0, 1, 0, 0, 0.5, 0, 0.5, 1, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0.5, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1, 0.5, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0.5, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0.5, 0.5, 0, 0, 1, 0.5, 0.5, 1, 1, 0.5, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0.5, 0.5, 1, 1, 0, 0, 1, 0, 0.5, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0.5, 0, 0.5, 0, 1, 1, 1, 1, 0, 0, 1, 0.5, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 1, 0, 1, 1, 0.5, 1, 0.5, 0.5, 0, 0.5, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0.5, 0.5, 0, 0, 0, 0.5, 1, 0.5, 1, 0, 1, 0.5, 1, 0.5, 0, 1, 1, 0, 1, 1, 0.5, 0.5, 0.5, 0, 1, 1, 1, 0.5, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 0.5, 0.5, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0.5, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 0, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 1, 1, 1, 1, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0.5, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0.5, 0, 0, 1, 1, 0.5, 1, 1, 1, 1, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0.5, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0, 0.5, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0.5, 0, 1, 1, 0.5, 1, 0, 0, 1, 0.5, 0.5, 1, 1, 0.5, 0, 1, 1, 1, 1, 0.5, 1, 0.5, 0.5, 1, 0, 0, 0, 1, 0.5, 0.5, 0.5, 1, 1, 0.5, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0.5, 0, 1, 0, 0.5, 0.5, 0.5, 0, 1, 1, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 0.5, 0.5, 1, 1, 1, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0.5, 1, 1, 0.5, 0.5, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0.5, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0.5, 1, 1, 0, 0, 0, 0.5, 0, 1, 1, 0, 0, 0, 1, 1, 0.5, 1, 1, 1, 1, 1, 0.5, 0, 1, 1, 0.5, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 1, 0.5, 0, 0, 0, 0.5, 1, 1, 0, 0, 1, 0, 0.5, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0.5, 1, 1, 1, 0.5, 1, 1, 0.5, 1, 0.5, 0, 0, 1, 1, 0.5, 0, 1, 1, 1, 0.5, 0, 0, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 0.5, 0, 0, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 0.5, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0.5, 0, 0, 0, 0, 0.5, 1, 0.5, 0.5, 1, 0, 1, 1, 0.5, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 0.5, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 0, 0, 0, 0, 0, 0, 1, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0.5, 1, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0.5, 1, 0, 0.5, 0, 1, 1, 1, 0.5, 0.5, 0.5, 0, 1, 1, 1, 0, 0.5, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0.5, 1, 1, 0, 0.5, 0, 1, 1, 0.5, 1, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 0.5, 1, 0, 1, 1, 1, 1, 1, 1, 0.5, 1, 0.5, 0, 1, 1, 0, 0, 0, 1, 1, 0.5, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 0.5, 1, 0.5, 0.5, 1, 0.5, 0.5, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0.5, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0.5, 0.5, 0.5, 1, 1, 1, 0.5, 0.5, 0.5, 0, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 1, 1, 1, 0, 0.5, 0, 0, 0.5, 1, 1, 0, 1, 1, 1, 0.5, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0.5, 1, 0.5, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0.5, 1, 1, 1, 0, 1, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 0.5, 0.5, 0, 1, 1, 1, 1, 0.5, 0, 1, 0, 0, 1, 1, 0.5, 0.5, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 1, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0.5, 0.5, 1, 1, 0.5, 1, 0.5, 1, 1, 1, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0.5, 1, 0.5, 0.5, 0.5, 1, 1, 1, 1, 0, 0.5, 0.5, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 0.5, 0.5, 1, 1, 0, 0.5, 0.5, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0.5, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0.5, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0.5, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 0, 1, 1, 0, 1, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 0.5, 1, 1, 1, 1, 0, 1, 1, 0.5, 1, 1, 1, 0, 0.5, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0.5, 1, 1, 0.5, 0.5, 0, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 0.5, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0.5, 1, 1, 0, 0, 1, 1, 0.5, 0.5, 1, 0.5, 0.5, 1, 0, 0, 0, 0.5, 1, 0.5, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0.5, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0.5, 1, 0, 0, 1, 0.5, 0.5, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0.5, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0.5, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0.5, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, 0.5, 1, 0, 1, 0.5, 0.5, 0.5, 0, 0.5, 0, 0.5, 0, 0.5, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0, 0.5, 0, 0.5, 1, 1, 1, 1), win2.adj = c(0, 0, NA, 1, 1, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0, 0, NA, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0.5, 0, NA, 0, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0, 0, NA, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, NA, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 1, 0.5, 1, 1, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0.5, 1, 0.5, 0, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0.5, 0, 0, 0, 1, 0, 0.5, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0.5, NA, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 0.5, 0, 0, NA, 1, 1, 1, 0, 0.5, 0.5, 0, 1, 1, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, NA, 0, 1, 0, 1, 1, 1, 1, 0, 0.5, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0.5, 0.5, 0, 0, 1, 1, 0, 1, 1, 1, NA, 1, 1, 0, 1, 1, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 0, 0, 1, 0.5, 1, 0.5, 1, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0.5, 0, 0, NA, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 1, 1, 0, 0.5, 0.5, 0, 1, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, NA, 0, 0, 0, 1, 1, 0.5, 1, 0, 0, 0, 1, 0, 1, 1, NA, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0.5, 1, 1, 0, 0.5, NA, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 0, 1, 1, 1, 1, 1, 0, 0.5, 1, 1, 0, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0.5, NA, 1, 1, 1, 0.5, 0, 1, 0, 0.5, 0, 0.5, 0, 0, 0, 0, NA, 0, 0, 0.5, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, NA, 0, 1, 0, 0, 1, 0.5, 1, 1, 1, 0, 1, 0.5, 0, 0.5, NA, 0, 0, 0, 0.5, 0.5, 0.5, 1, 0, 0.5, 0.5, 1, 0, 0, 0, NA, 0, 0, 0.5, 0, 0, 0.5, 1, 1, 0, 1, 1, 1, 0.5, 0, NA, 1, 0.5, 0, 1, 1, 0, 1, 0, 0, 0, 0.5, 0, 1, 1, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, NA, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0.5, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0.5, 0, NA, 0, 0, 0, 1, 0.5, 1, 1, 0, 0, 0, 1, 0, 0.5, 0, NA, 0, 0, 0.5, 1, 0.5, 1, 1, 0, 0, 1, 1, 0, 0, 0, NA, 0, 1, 1, 0.5, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, NA, 1, 0, 1, 0, 0, 0.5, 0, 0, 0, 1, 0, 1, 0, 0, NA, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0.5, 0, 1, 1, 0, NA, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, NA, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, NA, 1, 1, 0.5, 0.5, 1, 0.5, 0.5, 0, 0.5, 0, 0, 0, 1, 0, NA, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, NA, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, NA, 1, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 1, 0, NA, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, NA, 1, 1, 1, 1, 1, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, NA, 1, 0, 0, 1, 1, 0.5, 1, 0.5, 0, 0, 0.5, 0, 0, 0, NA, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, NA, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, NA, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, NA, 0, 1, 0, 0, 1, 0, 0.5, 0, 1, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0.5, 0.5, NA, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0, NA, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, NA, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0.5, 1, 0.5, 1, 1, NA, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, NA, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, NA, 0, 1, 0, 0, 1, 0, 1, 0, 0.5, 0, 0, 0, 0, 0, NA, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, NA, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, NA, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0.5, 1, 1, 0, 0, 0.5, 0, 0.5, 1, 1, 0.5, 1, 0, 0, NA, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0.5, 0, 1, 0.5, NA, 0.5, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, NA, 1, 1, 1, 0, 0, 0.5, 0, 1, 1, 1, 0, 1, 0, 0, NA, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, NA, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NA, 0, 0, 0.5, 0, 1, 1, 1, 0, 0.5, 0.5, 0, 0, 0, 0, NA, 0, 0, 1, 0, 1, 0.5, 1, 0, 0, 0.5, 1, 0, 0, 0.5, NA, 0, 1, 0, 0, 1, 0, 1, 0.5, 1, 0.5, 1, 0.5, 1, 0, NA, 0.5, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 1, 1, 0, NA, 0, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0.5, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0.5, 1, 0.5, 0, 1, 0.5, 0.5, 0, 1, 1, 0.5, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0.5, 1, 1, 0, 0, 0.5, 1, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0.5, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 1, 1, 0, 0.5, 0.5, 0, 0, 1, 0.5, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0.5, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5, 1, 0, 1, 1, 0.5, 1, 0.5, 0, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0.5, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0.5, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0.5, 0.5, 1, 1, 0, 0.5, 0.5, 0, 0, 0.5, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0.5, 0.5, 0, 0, 1, 1, 0, 1, 0.5, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0.5, 1, 0.5, 1, 0, 0, 0, 0, 1, 1, 0, 0.5, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 1, 0, 0, 0.5, 0, 0.5, 0.5, 1, 0.5, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0.5, 0.5, 1, 1, 1, 0.5, 0, 0.5, 0, 1, 0, 0.5, 0, 0.5, 1, 0, 0, 1, 0, 0, 0.5, 0.5, 0.5, 1, 0, 0, 0, 0.5, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0.5, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 1, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 1, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0.5, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0.5, 1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0.5, 1, 1, 1, 0.5, 1, 1, 1, 0.5, 1, 1, 0.5, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5, 1, 0, 0, 0.5, 0, 1, 1, 0, 0.5, 0.5, 0, 0, 0.5, 1, 0, 0, 0, 0, 0.5, 0, 0.5, 0.5, 0, 1, 1, 1, 0, 0.5, 0.5, 0.5, 0, 0, 0.5, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0.5, 1, 0, 1, 0.5, 0.5, 0.5, 1, 0, 0, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0.5, 0, 0, 0.5, 0.5, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0.5, 0, 0, 1, 1, 1, 0.5, 1, 0, 0, 1, 1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0, 0.5, 1, 1, 1, 0.5, 0, 0, 1, 1, 0, 1, 0.5, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 0.5, 1, 1, 0, 0, 0.5, 1, 0, 0, 0, 0.5, 1, 1, 1, 0, 0.5, 0.5, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0.5, 1, 1, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0.5, 1, 1, 1, 1, 0.5, 0, 0.5, 0.5, 0, 1, 0, 0, 0.5, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 1, 1, 1, 1, 1, 1, 0, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5, 0, 1, 0.5, 1, 0, 0, 0, 0.5, 0.5, 0.5, 1, 0, 0, 0, 1, 0.5, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0.5, 0, 0, 1, 0.5, 1, 0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0.5, 0, 1, 0, 0, 0, 0, 0, 0, 0.5, 0, 0.5, 1, 0, 0, 1, 1, 1, 0, 0, 0.5, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0.5, 0, 0.5, 0.5, 0, 0.5, 0.5, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0.5, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0.5, 0.5, 0.5, 0, 0, 0, 0.5, 0.5, 0.5, 1, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0, 0, 0, 1, 0.5, 1, 1, 0.5, 0, 0, 1, 0, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0.5, 0, 0, 0, 1, 0, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0.5, 0.5, 1, 0, 0, 0, 0, 0.5, 1, 0, 1, 1, 0, 0, 0.5, 0.5, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 0.5, 0.5, 0.5, 0, 0, 1, 0, 0.5, 0.5, 0, 1, 1, 1, 0.5, 1, 1, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0.5, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 0, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0.5, 0, 0.5, 0.5, 0.5, 0, 0, 0, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0.5, 0.5, 0, 0, 1, 0.5, 0.5, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0.5, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0.5, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 1, 0, 0, 1, 0, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 0.5, 0, 0, 0, 0, 1, 0, 0, 0.5, 0, 0, 0, 1, 0.5, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0.5, 0, 0, 0.5, 0.5, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 0, 0.5, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0.5, 0, 0, 1, 1, 0, 0, 0.5, 0.5, 0, 0.5, 0.5, 0, 1, 1, 1, 0.5, 0, 0.5, 0, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0.5, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0.5, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0.5, 0, 1, 1, 0, 0.5, 0.5, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0.5, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0.5, 0.5, 0, 1, 0, 0.5, 0.5, 0.5, 1, 0.5, 1, 0.5, 1, 0.5, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0.5, 1, 1, 0.5, 0.5, 0.5, 1, 1, 0.5, 1, 0.5, 0, 0, 0, 0)), .Names = c("student", "school1", "school2", "win1", "win2", "tied", "win1.adj", "win2.adj" ), row.names = c(NA, -4545L), class = "data.frame"), students = structure(list( STUD = structure(c(1L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L), .Label = c("other", "commerce" ), class = "factor"), ENG = structure(c(1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L), .Label = c("good", "poor"), class = "factor"), FRA = structure(c(1L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L ), .Label = c("good", "poor"), class = "factor"), SPA = structure(c(2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L), .Label = c("good", "poor"), class = "factor"), ITA = structure(c(2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L), .Label = c("good", "poor"), class = "factor"), WOR = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L), .Label = c("no", "yes"), class = "factor"), DEG = structure(c(2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L), .Label = c("no", "yes"), class = "factor"), SEX = structure(c(2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 2L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 1L), .Label = c("female", "male"), class = "factor")), .Names = c("STUD", "ENG", "FRA", "SPA", "ITA", "WOR", "DEG", "SEX"), row.names = c(NA, -303L), class = "data.frame"), schools = structure(list(Barcelona = c(1, 0, 0, 0, 0, 0), London = c(0, 1, 0, 0, 0, 0), Milano = c(0, 0, 1, 0, 0, 0), Paris = c(0, 0, 0, 1, 0, 0), St.Gallen = c(0, 0, 0, 0, 1, 0), Stockholm = c(0, 0, 0, 0, 0, 1), LAT = c(1, 0, 1, 1, 0, 0)), .Names = c("Barcelona", "London", "Milano", "Paris", "St.Gallen", "Stockholm", "LAT"), row.names = c("Barcelona", "London", "Milano", "Paris", "St.Gallen", "Stockholm"), class = "data.frame")), .Names = c("preferences", "students", "schools")) BradleyTerry2/data/citations.R0000644000176200001440000000051614775237530016014 0ustar liggesuserscitations <- structure(c(714, 33, 320, 284, 730, 425, 813, 276, 498, 68, 1072, 325, 221, 17, 142, 188), .Dim = c(4L, 4L), .Dimnames = structure(list( cited = c("Biometrika", "Comm Statist", "JASA", "JRSS-B"), citing = c("Biometrika", "Comm Statist", "JASA", "JRSS-B" )), .Names = c("cited", "citing")), class = "table") BradleyTerry2/data/chameleons.R0000644000176200001440000002073314775237530016140 0ustar liggesuserschameleons <- structure(list(winner = structure(list(ID = structure(c(22L, 4L, 24L, 30L, 17L, 26L, 4L, 23L, 24L, 8L, 16L, 21L, 22L, 17L, 26L, 23L, 24L, 12L, 31L, 28L, 33L, 15L, 10L, 18L, 22L, 20L, 26L, 28L, 33L, 5L, 19L, 8L, 25L, 12L, 31L, 15L, 18L, 8L, 16L, 9L, 12L, 10L, 19L, 11L, 18L, 28L, 5L, 19L, 8L, 12L, 28L, 26L, 34L, 18L, 31L, 16L, 12L, 17L, 34L, 15L, 16L, 25L, 31L, 34L, 15L, 18L, 31L, 12L, 21L, 26L, 33L, 8L, 20L, 34L, 24L, 25L, 33L, 16L, 21L, 15L, 10L, 12L, 10L, 18L, 8L, 13L, 12L, 33L, 28L, 34L, 26L, 25L, 9L, 23L, 22L, 10L, 10L, 9L, 33L, 34L, 26L, 31L, 23L, 13L, 25L, 30L), .Label = c("C01", "C02", "C03", "C04", "C05", "C06", "C08", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C19", "C20", "C21", "C22", "C24", "C25", "C26", "C28", "C29", "C30", "C31", "C32", "C34", "C35", "C36", "C37", "C38", "C39", "C40", "C41"), class = "factor"), prev.wins.1 = c(0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 1L, 1L, 0L, 1L, 1L, 1L, 0L), prev.wins.2 = c(0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 2L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 2L, 0L, 2L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 2L, 1L, 0L, 2L, 1L, 1L, 0L, 2L, 2L, 1L, 2L, 2L, 2L, 2L, 2L, 0L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 1L, 2L, 1L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 0L, 2L, 2L, 1L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 2L, 1L, 0L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 2L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 2L, 0L), prev.wins.all = c(0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 1L, 1L, 2L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 2L, 0L, 2L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 1L, 1L, 1L, 2L, 1L, 0L, 2L, 1L, 1L, 0L, 2L, 2L, 1L, 2L, 3L, 3L, 3L, 3L, 0L, 3L, 2L, 2L, 4L, 2L, 1L, 2L, 3L, 1L, 3L, 2L, 3L, 4L, 4L, 5L, 1L, 4L, 2L, 4L, 1L, 3L, 3L, 2L, 3L, 4L, 2L, 4L, 2L, 6L, 3L, 5L, 5L, 0L, 7L, 4L, 4L, 4L, 5L, 3L, 1L, 2L, 3L, 4L, 5L, 2L, 5L, 5L, 6L, 5L, 3L, 1L, 4L, 1L)), .Names = c("ID", "prev.wins.1", "prev.wins.2", "prev.wins.all"), row.names = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L, 34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L, 73L, 74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L, 86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L, 98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L), class = "data.frame"), loser = structure(list(ID = structure(c(2L, 7L, 23L, 35L, 29L, 34L, 6L, 30L, 35L, 13L, 25L, 2L, 20L, 34L, 29L, 35L, 30L, 9L, 27L, 3L, 1L, 32L, 11L, 11L, 21L, 2L, 17L, 1L, 3L, 10L, 14L, 16L, 13L, 27L, 9L, 11L, 32L, 25L, 13L, 27L, 31L, 14L, 5L, 32L, 15L, 33L, 14L, 10L, 27L, 3L, 20L, 4L, 6L, 23L, 8L, 27L, 28L, 22L, 7L, 30L, 9L, 27L, 3L, 4L, 14L, 5L, 1L, 20L, 29L, 15L, 31L, 2L, 7L, 35L, 19L, 3L, 4L, 29L, 14L, 17L, 30L, 21L, 17L, 24L, 4L, 27L, 17L, 20L, 30L, 11L, 19L, 6L, 7L, 5L, 5L, 15L, 12L, 14L, 28L, 20L, 24L, 11L, 15L, 3L, 29L, 14L), .Label = c("C01", "C02", "C03", "C04", "C05", "C06", "C08", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C19", "C20", "C21", "C22", "C24", "C25", "C26", "C28", "C29", "C30", "C31", "C32", "C34", "C35", "C36", "C37", "C38", "C39", "C40", "C41"), class = "factor"), prev.wins.1 = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 1L, 1L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), prev.wins.2 = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 2L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 2L, 0L, 1L, 0L, 2L, 2L, 0L, 1L, 0L, 0L, 1L, 2L, 0L, 2L, 2L, 0L, 2L, 2L, 0L, 0L, 1L, 0L, 0L, 1L, 0L, 1L, 0L, 1L, 0L, 2L, 2L, 0L, 0L, 0L, 2L, 0L, 0L, 0L, 0L, 1L, 0L, 2L, 1L, 2L, 0L, 0L, 0L, 1L, 0L, 1L, 1L, 0L, 0L, 1L, 0L, 1L, 2L, 0L, 1L, 1L, 1L, 1L, 1L, 0L, 0L, 0L), prev.wins.all = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 3L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 2L, 0L, 0L, 1L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 2L, 0L, 1L, 0L, 2L, 2L, 0L, 2L, 0L, 0L, 1L, 3L, 0L, 2L, 4L, 0L, 4L, 3L, 0L, 1L, 1L, 0L, 0L, 3L, 0L, 2L, 0L, 1L, 0L, 4L, 5L, 0L, 0L, 0L, 3L, 0L, 3L, 0L, 0L, 3L, 1L, 3L, 3L, 4L, 3L, 0L, 3L, 2L, 1L, 1L, 3L, 0L, 0L, 2L, 2L, 5L, 8L, 0L, 5L, 2L, 4L, 1L, 5L, 0L, 0L, 0L)), .Names = c("ID", "prev.wins.1", "prev.wins.2", "prev.wins.all"), row.names = c(1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 10L, 11L, 12L, 13L, 14L, 15L, 17L, 18L, 19L, 20L, 21L, 22L, 23L, 24L, 25L, 26L, 27L, 28L, 29L, 30L, 31L, 32L, 33L, 34L, 35L, 36L, 37L, 38L, 39L, 40L, 41L, 42L, 43L, 44L, 45L, 46L, 47L, 48L, 49L, 50L, 51L, 52L, 53L, 54L, 55L, 56L, 57L, 58L, 59L, 60L, 61L, 62L, 63L, 64L, 65L, 66L, 67L, 68L, 69L, 70L, 71L, 72L, 73L, 74L, 75L, 76L, 77L, 78L, 79L, 80L, 81L, 82L, 83L, 84L, 85L, 86L, 87L, 88L, 89L, 90L, 91L, 92L, 93L, 94L, 95L, 96L, 97L, 98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 106L, 107L), class = "data.frame"), predictors = structure(list(ch.res = c(0.94, 0.85, 0.02, -1.76, 0.02, -0.37, -0.87, -0.01, 0.63, 1.63, -0.58, 1.46, -0.54, -0.11, 0.11, -0.21, 0.49, 0.24, 0.02, 0.18, 0.45, -0.59, 0.25, 0.24, 0.18, 0.39, -1.11, -0.4, -1.02, 1.63, -0.47, -0.3, 0.26, -1.88, -1.08), jl.res = c(0.52, 1.41, -0.16, -0.79, 0.25, -0.24, -0.72, -0.21, -0.04, -1.2, -0.19, -1.43, -0.1, 0.33, -0.44, -0.23, 0.73, -0.12, 0.65, 0.72, 0.19, 0.22, -0.03, 0.12, 0.58, 0.22, -0.45, -0.4, -0.83, 1.3, 0.02, -0.26, 0.24, -0.48, -0.14), tl.res = c(-1.58, 5.04, 3.43, -5.63, 2.97, -4.1, 1.83, -3.8, -0.15, -8.68, -1.22, -1.85, 1.59, 3.24, -2.48, 0.74, 4.73, 3.07, -6.44, -0.41, -7.03, -0.84, 8.08, -9.25, -1.48, -1.66, 1.22, -1.84, -2, 11.13, 1.25, 11.95, 5.39, 1.09, 4.31), mass.res = c(-0.45, 0.81, 0.31, -1.4, -0.19, -1.21, -1.46, 0.28, 0.9, 0.52, -0.42, 0.09, 0.24, 0.77, -0.86, -0.16, 0.29, -0.04, 1.19, 0.01, 0.79, -0.11, 0.21, 0.2, 0.24, 0.91, 0.19, 1.29, -0.42, 2.39, 0.07, -0.45, 0.19, -0.91, -0.62), SVL = c(72.8, 74.57, 74.66, 79.17, 89.52, 80.26, 80.31, 72.22, 70.62, 85.91, 81.84, 73.74, 69.32, 90.43, 81.29, 68.47, 77.04, 81.99, 88.7, 76.03, 76.48, 77.83, 82.73, 85.96, 70.12, 78.45, 72.2, 73.74, 77.95, 82.7, 70.32, 81.34, 75.8, 79, 83.3), prop.main = c(35.66853756, 21.13429221, 25.84193276, 33.21091076, 25.10409025, 25.10409025, 27.97210113, 24.3500636, 24.3500636, 31.30644625, 30.65729899, 31.94805943, 34.44990199, 31.30644625, 33.21091076, 22.786498, 31.30644625, 33.21091076, 27.27472868, 27.97210113, 37.46496893, 31.94805943, 27.97210113, 22.786498, 25.10409025, 45, 35.66853756, 33.21091076, 21.13429221, 26.56505118, 27.27472868, 25.10409025, 32.58270626, 31.30644625, 31.30644625), prop.patch = c(50.1848799, 30, 28.65818058, 41.55394871, 38.05672982, 39.8151201, 43.28009362, 34.44990199, 36.86989765, 31.30644625, 39.23152048, 48.44605129, 42.13041476, 38.6454835, 49.02392312, 29.33387425, 38.6454835, 39.8151201, 33.83315867, 30, 49.60344811, 43.85377861, 42.70571713, 27.27472868, 40.97607688, 50.76847952, 47.29428287, 42.13041476, 21.97275978, 44.427004, 37.46496893, 34.44990199, 45, 53.72880156, 43.28009362)), .Names = c("ch.res", "jl.res", "tl.res", "mass.res", "SVL", "prop.main", "prop.patch"), row.names = c("C01", "C02", "C03", "C04", "C05", "C06", "C08", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C19", "C20", "C21", "C22", "C24", "C25", "C26", "C28", "C29", "C30", "C31", "C32", "C34", "C35", "C36", "C37", "C38", "C39", "C40", "C41"), class = "data.frame")), .Names = c("winner", "loser", "predictors")) BradleyTerry2/data/flatlizards.rda0000644000176200001440000002067614775237530016714 0ustar liggesusers{wTIs@@DIb@S슂a ( (M19ŀšA*# ( "I$D~]5 ۺws9=5Uw[{3u @V "WNr+/+GF Ж <}܂}C݃$Srҫ..1Dt$b#kF0V"$b$CX2jCt $I"=$W"]U S+T߾Ł[>ց9(%%lZmmCҚJk~E<Ze5?P@Ye @Y;u0quLTRa*f%VF< H a**S b"s*TK׿SI@EKŶQ "*L5b|TTP*aR}!]S{՘dF<uS1jpPahYza@oأxoZ5X`9Y>ԩ- CzR)jǏYm4sr˛ufJ7߃ zsVw1ֺcqoTcJna __(@ zDϠbQja|wXcwn@h&sVnJ1 B37]ey鉗lǞ^,$`vjOxF(<& OA>nm^ MJ>ZΙuZMJۉM @4@ާ~0Tzq!!4}cD}dٰKZA,嵻^'yP6nmַ%d};_b|A%*NFʥe`nhx4k9t<9$^֞c_Xe2l}bԀkUC+&/ Cgz2l5]`=5=*ۺ(& 5ƲD~ͳ@.}f xhj64m`0Y?v G3:e\9 ׾Wi{T}C+3@?dHEI[MwJ&vى*J_lV¶z7/c3M>}%)/Vb2;Cѓ|Q[mS|3ǝRzʕrKUx1)l|cԔ>Zn<±"vfNU:- +UVVw_ qzSl>mwq Szop060Tu0ۯ 4xagOD֖eu,O`~w8-\5=3@\P͈S{P_C &LGmKb6^q 5vy1_l9)nӋ 㳴CH׫Cֆ_S 3 ,BNbɗ{e,%a@i͛%\# *Y5^N5T͏0d *~y rKs/^UIQn$#k.Wጪz3ȉE5Vu[|3 Lz9{&z TlQ 1Wڸ_"qWðfľL(׫lu__Q~.8sȷ@U+'&zm=X ُ>޶_az o['1up*&,CE_~ x+XXV.gzzM%o .ls㷲x1Adɘ~ gq`͗(6vԙ'Η^`מ4M=wQ8V;_ܐZ%ߚ&c$KR_Y-"oxp4 x]36YC#,+X([]<M>+48AXE|bz-XQ{"g?{-g_V Uk]23t3(fvMt2ϰdwW@MC0io̲O[6I-{dF; ֯9t/%9~ʺk-eݮI. j&džV:~+YI`:eX;\0'M9XgsYS2^d?ªn7j0j}vB!oY󖢖o9nP'\S5?(?^H/X5Hms6^P uiaE Ɨ]sb2{z$PG4ugetjԾNɒ̛x£`%ܩ؄藗8AbVc6H,_ Q͞W-c\ mHuHhՇ}aag6v!Elm$H^*w( z'2T;$|N3% 9Va;ШM ,c: 3^ͪHe8A#9r:kna׃׬[C%dK[8=Ь+S_+s`yl칆OTOsL}=dH;†w("=/ oyj_;$/Q)79oX-?i؈ĝdXO؅}CṈ A}IT$:6?L I:n_K˜Hj#W,Kx ES~_H@E=Ԧ~Ы"9 :T`R_XR>-Q%ƳSQ7u~VȦy?Ah:qEX8(V~zC-wM|?=oPFPI8~Dϳ&PO+;l='-^a!'@w&>f0B=634wuAC_*cq4M_lk ֡uY'ބiޫ^18i"S]?nGZձ$ kczlꔒ>Ao[P8Z񇚄w%«ŤBYt*[K&",砂~ vRM`m2zu@PGтMI+S. y!]eqW7Ii=@ zUȾ1=&IM}؈>7 L@!.?-ZIc9hPM.9JyNڧ[Y hA?8Fb V?3M؝1?} H /Bs P#T|L @>mLҷj tm5hӵrKoBU,oroh~@5U򭕥Ѩ:$_W o@]+6O?MdLI(R|(Crh@h֝AI< s3hvԱ}5_eHfROzE *"(5{| *(,d%yWBr1:mZ߀,.$0J؍J󚉐|SA99Eʹ%˯a7Qj?} 69/V'Z|J |~lx2]V'~:ili@RE]'D׿< M|_\Zʮ2I L̒l?}>6wapy. Lyw/Vr~9+&W:}# \OېYG|AߕtR9^C"X13Ȁc*54 _&$MDbUP8#9O7H#~˵(9?othR!{>` /lv3HpxG}nK ^w? JqW$}?vpYMl?J>Fg \W%ww!=*F>0*?) D_h+H(~xyl3ȗkD9'HT5ѻGgsjֹ4AIιg u{1T}657O~,~/:7ONHܕ2T,^u+VęW=Rx~=$qqn sgG3\\sHKۼN+ ([Jv'UgH8i:>[2XߜDc~k&M +ƭ $ (E4ON?n[şOPVRf;\yh޸ayQAy)G@nf\P(=;.bJgty%@DxyPZ[4Dׂ) vK6_wI5zrŻ8f oٝ: =5˙ɡ}q~4^sn/^_vg [R{dM3=@Nܵط) lktq3dJI?t-Ԏ|l0 (ݼraˢj@V3Arv[Z?DúħkV7-iChn^0#]w=me_*`SWw#-uY7ߵ! m(|28C8?24&ob9*e~ } uPccv'r&Gw71r@Jc>色`OS!,|uy0:z%t8PS4닦|3BUWF^7EmǽA5S Ji$GW3kp%dЁz٭F;ՠTݔg7CwϽJR&i4Ag֢|}r)K (s`PzV!H[$&$] :mtz9+^(ʮM*nٻ$hiP(t|opϖa @bڷ4M{]G%3m IOY|x h<Pڃw2>VeE$ eԜ+&ҜE=ÐW%|ugQ r_@QKQo ?T[K ^-55+4A]v9 G>8=&a%ţFGOYI AC9egG%#y O+O8q˃ʖu=+A~IBy*E7=#A9m3R t$nOn= ޯ 6:a>x2 ciUU 5iYt+"eAz13@LFF6@o܇n;p擒n1:63ȯ{7{9m|u:ǧO| =Wr+/Nl^e̳ J,9ٖύj9c~vlG9 *vzvpqS_+Tm9x{P-}[s3nQ՚&wS?j.cz{|Z(/PX9xhUP6̦ @@a# [Y9 / Wp]zznFI__*V>?g֢)7k[Ğ@Br,ƀ"f=5tTչxmz9ވ>~y?y4 dH1]ҧvYF'Oy7Qj*7LrN#b& 8*Fgߔn<+t‰iP6g=qʗUTZo=ddo{T?r>PEM_u%VP֧mO%XJ óʶQکWɫPIPpoI7n#^1{X( cA~~:tU{@RZ¤hfgWU:+$/u--Pָ( d;o.D vjԬdV#]oԏ rŜR\P[gْ9ĩfܐA3lq >d-TTbƃqg!_r6WqjWrz6"g. 2$l `dUL+wa7.]?Qbv=2<]2:ˬawrGo0?FdfKgҟQ{Jt2F?P Xa]1q?YȣBn? ]JC<=$>9Lj^n}<|{IuC+ݹ2{Ա* |ݽ|8~%^HO?''?wf>H?ݛ)jAd .jvFBradleyTerry2/data/seeds.R0000644000176200001440000000105314775237530015117 0ustar liggesusersseeds <- structure(list(r = c(10, 23, 23, 26, 17, 5, 53, 55, 32, 46, 10, 8, 10, 8, 23, 0, 3, 22, 15, 32, 3), n = c(39, 62, 81, 51, 39, 6, 74, 72, 51, 79, 13, 16, 30, 28, 45, 4, 12, 41, 30, 51, 7), seed = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), extract = c(0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1)), .Names = c("r", "n", "seed", "extract" ), row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21"), class = "data.frame") BradleyTerry2/data/baseball.RData0000644000176200001440000000054214775237530016355 0ustar liggesusersRN0 vxzƮp@2Q-m4Pq)yQ5g'u~,0֍="!vVlPJ~w Z{cE@@@ Mc4O*ŗng6_4Ujj(4fw!0ZUϕ EtJ(Mi5l%{wt&H7><믪.򓂂+(!u,x1ϺOx֡Ŧ%;rN>x<8#'u9ʟz桙 `-hcUGĐUM;/.vhk[k!ת+}^7BradleyTerry2/data/icehockey.R0000644000176200001440000015317614775237530015775 0ustar liggesusersicehockey <- structure(list(date = c(20091008L, 20091008L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091009L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091010L, 20091011L, 20091011L, 20091015L, 20091015L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091016L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091017L, 20091018L, 20091020L, 20091022L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091023L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091024L, 20091025L, 20091025L, 20091025L, 20091027L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091030L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091031L, 20091101L, 20091101L, 20091101L, 20091101L, 20091105L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091106L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091107L, 20091108L, 20091108L, 20091108L, 20091108L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091113L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091114L, 20091115L, 20091115L, 20091115L, 20091115L, 20091117L, 20091118L, 20091119L, 20091119L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091120L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091121L, 20091122L, 20091124L, 20091124L, 20091124L, 20091124L, 20091124L, 20091124L, 20091124L, 20091125L, 20091125L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091127L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091128L, 20091129L, 20091130L, 20091201L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091204L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091205L, 20091206L, 20091208L, 20091209L, 20091209L, 20091210L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091211L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091212L, 20091213L, 20091213L, 20091215L, 20091218L, 20091218L, 20091219L, 20091219L, 20091220L, 20091229L, 20091229L, 20091229L, 20091229L, 20091229L, 20091229L, 20091229L, 20091230L, 20091230L, 20091230L, 20091230L, 20091230L, 20091230L, 20100101L, 20100101L, 20100101L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100102L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100103L, 20100104L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100108L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100109L, 20100110L, 20100110L, 20100110L, 20100110L, 20100110L, 20100112L, 20100112L, 20100112L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100115L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100116L, 20100118L, 20100119L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100122L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100123L, 20100124L, 20100125L, 20100126L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100129L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100130L, 20100201L, 20100201L, 20100203L, 20100204L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100205L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100206L, 20100208L, 20100208L, 20100209L, 20100209L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100212L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100213L, 20100216L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100219L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100220L, 20100221L, 20100221L, 20100221L, 20100223L, 20100225L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100226L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100227L, 20100228L, 20100228L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100305L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100306L, 20100307L, 20100307L, 20100307L, 20100307L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100312L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100313L, 20100314L, 20100314L, 20100314L, 20100314L, 20100314L, 20100314L, 20100314L, 20100314L, 20100314L, 20100318L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100319L, 20100320L, 20100320L, 20100320L, 20100320L, 20100320L, 20100320L, 20100320L, 20100320L), visitor = structure(c(47L, 49L, 2L, 3L, 6L, 8L, 16L, 15L, 21L, 24L, 27L, 28L, 34L, 29L, 38L, 41L, 47L, 51L, 31L, 55L, 2L, 3L, 8L, 16L, 15L, 21L, 23L, 27L, 28L, 34L, 29L, 39L, 38L, 51L, 53L, 31L, 55L, 24L, 41L, 20L, 46L, 3L, 12L, 11L, 13L, 14L, 18L, 20L, 23L, 28L, 32L, 35L, 33L, 30L, 42L, 40L, 46L, 49L, 50L, 48L, 54L, 3L, 6L, 12L, 13L, 14L, 18L, 28L, 32L, 35L, 33L, 30L, 42L, 40L, 49L, 50L, 48L, 54L, 7L, 43L, 40L, 4L, 10L, 7L, 15L, 18L, 20L, 27L, 35L, 36L, 26L, 37L, 38L, 44L, 46L, 50L, 48L, 52L, 55L, 57L, 56L, 4L, 10L, 20L, 23L, 27L, 28L, 29L, 35L, 36L, 26L, 37L, 38L, 44L, 46L, 50L, 48L, 52L, 53L, 57L, 56L, 18L, 23L, 28L, 4L, 1L, 6L, 11L, 8L, 9L, 16L, 18L, 21L, 22L, 23L, 25L, 28L, 34L, 32L, 29L, 30L, 39L, 40L, 44L, 47L, 49L, 52L, 51L, 55L, 57L, 2L, 4L, 6L, 10L, 8L, 16L, 18L, 21L, 28L, 32L, 29L, 35L, 30L, 39L, 40L, 44L, 46L, 47L, 52L, 51L, 31L, 58L, 2L, 1L, 10L, 7L, 42L, 4L, 6L, 12L, 7L, 11L, 8L, 9L, 13L, 16L, 19L, 20L, 22L, 23L, 24L, 32L, 33L, 26L, 42L, 40L, 37L, 41L, 43L, 48L, 53L, 31L, 58L, 4L, 6L, 12L, 8L, 9L, 16L, 19L, 20L, 22L, 24L, 28L, 32L, 33L, 26L, 37L, 38L, 41L, 43L, 48L, 52L, 53L, 58L, 11L, 25L, 39L, 46L, 2L, 5L, 1L, 4L, 11L, 15L, 14L, 17L, 19L, 21L, 22L, 24L, 27L, 35L, 36L, 39L, 49L, 50L, 48L, 52L, 51L, 31L, 54L, 57L, 2L, 5L, 1L, 6L, 10L, 15L, 14L, 18L, 17L, 19L, 21L, 22L, 24L, 27L, 34L, 29L, 36L, 39L, 40L, 41L, 49L, 50L, 52L, 51L, 31L, 54L, 55L, 57L, 10L, 18L, 41L, 55L, 9L, 49L, 25L, 43L, 2L, 3L, 5L, 6L, 7L, 11L, 8L, 13L, 16L, 23L, 32L, 26L, 30L, 42L, 37L, 44L, 45L, 46L, 47L, 48L, 52L, 53L, 56L, 2L, 3L, 5L, 6L, 7L, 13L, 16L, 23L, 34L, 32L, 29L, 26L, 30L, 42L, 39L, 37L, 38L, 44L, 45L, 47L, 48L, 52L, 53L, 56L, 58L, 35L, 12L, 11L, 15L, 18L, 25L, 52L, 31L, 40L, 45L, 1L, 12L, 8L, 13L, 16L, 24L, 27L, 28L, 32L, 36L, 33L, 41L, 44L, 46L, 51L, 55L, 57L, 56L, 1L, 11L, 8L, 13L, 18L, 24L, 25L, 28L, 32L, 36L, 33L, 39L, 41L, 44L, 51L, 53L, 54L, 57L, 56L, 58L, 19L, 58L, 9L, 3L, 5L, 4L, 12L, 7L, 16L, 15L, 18L, 17L, 20L, 24L, 29L, 35L, 36L, 30L, 42L, 40L, 37L, 38L, 43L, 45L, 46L, 47L, 50L, 52L, 51L, 31L, 55L, 3L, 5L, 4L, 12L, 7L, 15L, 14L, 18L, 17L, 24L, 25L, 34L, 29L, 35L, 36L, 33L, 42L, 40L, 37L, 43L, 45L, 46L, 47L, 50L, 52L, 51L, 53L, 39L, 4L, 7L, 54L, 47L, 14L, 20L, 21L, 28L, 32L, 35L, 33L, 30L, 42L, 39L, 41L, 43L, 45L, 49L, 56L, 4L, 7L, 8L, 14L, 20L, 28L, 34L, 33L, 30L, 42L, 40L, 38L, 41L, 44L, 45L, 53L, 57L, 56L, 29L, 52L, 48L, 36L, 42L, 19L, 30L, 15L, 12L, 14L, 35L, 45L, 47L, 49L, 54L, 17L, 27L, 25L, 29L, 49L, 54L, 42L, 50L, 53L, 2L, 3L, 10L, 7L, 8L, 9L, 16L, 15L, 21L, 23L, 24L, 25L, 28L, 34L, 42L, 40L, 38L, 50L, 54L, 2L, 10L, 9L, 16L, 15L, 17L, 23L, 24L, 28L, 34L, 26L, 37L, 38L, 41L, 58L, 47L, 3L, 5L, 1L, 4L, 6L, 11L, 8L, 9L, 13L, 14L, 18L, 22L, 23L, 24L, 32L, 29L, 36L, 30L, 49L, 53L, 31L, 54L, 3L, 5L, 1L, 4L, 6L, 8L, 13L, 14L, 18L, 21L, 22L, 23L, 24L, 25L, 34L, 36L, 30L, 53L, 31L, 54L, 57L, 21L, 25L, 49L, 48L, 58L, 41L, 46L, 58L, 2L, 3L, 5L, 1L, 4L, 6L, 10L, 11L, 8L, 15L, 17L, 27L, 28L, 32L, 35L, 26L, 42L, 37L, 38L, 50L, 51L, 31L, 56L, 2L, 3L, 5L, 1L, 6L, 10L, 8L, 9L, 15L, 17L, 19L, 23L, 27L, 25L, 28L, 34L, 32L, 26L, 42L, 37L, 38L, 43L, 49L, 50L, 51L, 56L, 19L, 46L, 12L, 11L, 9L, 13L, 14L, 18L, 19L, 20L, 21L, 22L, 23L, 34L, 32L, 33L, 26L, 39L, 40L, 37L, 41L, 43L, 44L, 50L, 48L, 57L, 58L, 10L, 12L, 7L, 9L, 13L, 14L, 18L, 19L, 20L, 22L, 23L, 32L, 29L, 39L, 40L, 37L, 41L, 43L, 44L, 46L, 50L, 48L, 51L, 55L, 57L, 58L, 55L, 18L, 52L, 2L, 5L, 12L, 16L, 20L, 21L, 27L, 25L, 29L, 33L, 30L, 42L, 39L, 40L, 45L, 46L, 47L, 49L, 48L, 52L, 51L, 53L, 31L, 54L, 57L, 56L, 2L, 5L, 12L, 16L, 20L, 21L, 27L, 25L, 34L, 35L, 33L, 30L, 42L, 40L, 45L, 49L, 48L, 52L, 51L, 53L, 54L, 57L, 56L, 22L, 38L, 48L, 29L, 2L, 1L, 6L, 12L, 7L, 11L, 13L, 16L, 15L, 17L, 19L, 22L, 24L, 28L, 35L, 26L, 39L, 38L, 41L, 43L, 44L, 50L, 53L, 55L, 1L, 6L, 12L, 13L, 16L, 15L, 17L, 19L, 20L, 22L, 24L, 28L, 29L, 35L, 26L, 39L, 41L, 43L, 44L, 50L, 53L, 31L, 55L, 7L, 22L, 8L, 18L, 3L, 5L, 10L, 7L, 9L, 21L, 23L, 27L, 28L, 34L, 32L, 29L, 33L, 30L, 37L, 38L, 45L, 46L, 47L, 49L, 50L, 52L, 54L, 57L, 58L, 5L, 4L, 10L, 12L, 9L, 13L, 21L, 27L, 25L, 34L, 32L, 29L, 33L, 30L, 37L, 45L, 47L, 49L, 50L, 31L, 54L, 57L, 58L, 17L, 2L, 1L, 6L, 12L, 7L, 11L, 16L, 15L, 14L, 17L, 23L, 24L, 25L, 35L, 36L, 26L, 42L, 40L, 41L, 43L, 44L, 45L, 47L, 51L, 53L, 31L, 55L, 2L, 1L, 4L, 6L, 16L, 15L, 14L, 18L, 17L, 21L, 24L, 34L, 36L, 26L, 42L, 40L, 41L, 43L, 44L, 45L, 46L, 47L, 48L, 52L, 51L, 53L, 31L, 55L, 4L, 38L, 48L, 34L, 43L, 3L, 5L, 6L, 10L, 12L, 11L, 8L, 9L, 13L, 19L, 20L, 22L, 24L, 27L, 25L, 28L, 33L, 39L, 37L, 44L, 46L, 49L, 52L, 54L, 57L, 56L, 58L, 3L, 1L, 4L, 10L, 9L, 13L, 19L, 20L, 21L, 22L, 23L, 24L, 27L, 28L, 32L, 29L, 35L, 33L, 37L, 38L, 49L, 52L, 31L, 54L, 56L, 58L, 3L, 11L, 4L, 10L, 7L, 8L, 9L, 16L, 14L, 19L, 22L, 24L, 25L, 34L, 36L, 26L, 40L, 38L, 43L, 51L, 55L, 57L, 56L, 10L, 11L, 8L, 9L, 16L, 18L, 19L, 20L, 22L, 24L, 25L, 36L, 26L, 30L, 39L, 43L, 46L, 48L, 55L, 57L, 9L, 16L, 19L, 56L, 3L, 5L, 1L, 6L, 9L, 13L, 14L, 18L, 22L, 23L, 25L, 34L, 29L, 36L, 33L, 30L, 42L, 40L, 44L, 47L, 53L, 31L, 55L, 5L, 1L, 6L, 9L, 13L, 14L, 18L, 22L, 23L, 25L, 34L, 29L, 36L, 33L, 30L, 42L, 40L, 44L, 47L, 48L, 53L, 31L, 55L, 9L, 14L, 34L, 33L, 30L, 44L, 47L, 31L, 55L, 26L, 2L, 9L, 13L, 27L, 29L, 37L, 41L, 51L, 53L, 55L, 9L, 21L, 27L, 29L, 37L, 52L, 54L, 56L), .Label = c("Alaska Anchorage", "Air Force", "Alab-Huntsville", "American Int'l", "Alaska", "Army", "Boston College", "Bowling Green", "Brown", "Bemidji State", "Boston University", "Bentley", "Canisius", "Colorado College", "Colgate", "Clarkson", "Cornell", "Connecticut", "Dartmouth", "Denver", "Ferris State", "Harvard", "Holy Cross", "Lake Superior", "Massachusetts", "Minnesota Duluth", "Maine", "Mercyhurst", "Michigan", "Minnesota State", "UMass Lowell", "Miami", "Minnesota", "Merrimack", "Michigan State", "Michigan Tech", "North Dakota", "Northeastern", "New Hampshire", "Niagara", "Northern Michigan", "Nebraska-Omaha", "Notre Dame", "Ohio State", "Princeton", "Providence", "Quinnipiac", "Robert Morris", "Rensselaer", "RIT", "St. Cloud State", "Sacred Heart", "St. Lawrence", "Union", "Vermont", "Wisconsin", "Western Michigan", "Yale"), class = "factor"), v_goals = c(4L, 2L, 1L, 3L, 4L, 2L, 1L, 4L, 5L, 3L, 1L, 3L, 2L, 0L, 2L, 3L, 3L, 2L, 3L, 4L, 3L, 1L, 1L, 3L, 3L, 0L, 1L, 3L, 1L, 2L, 6L, 1L, 4L, 0L, 5L, 3L, 6L, 3L, 3L, 2L, 3L, 4L, 2L, 2L, 4L, 3L, 1L, 0L, 1L, 1L, 6L, 5L, 0L, 2L, 1L, 3L, 0L, 0L, 1L, 2L, 3L, 4L, 3L, 2L, 0L, 1L, 1L, 3L, 5L, 3L, 3L, 2L, 4L, 3L, 1L, 3L, 5L, 1L, 1L, 3L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 1L, 1L, 3L, 5L, 2L, 2L, 8L, 4L, 4L, 3L, 2L, 2L, 6L, 2L, 5L, 3L, 3L, 2L, 3L, 2L, 3L, 5L, 2L, 1L, 1L, 4L, 3L, 2L, 1L, 6L, 1L, 2L, 2L, 1L, 3L, 4L, 3L, 1L, 4L, 5L, 3L, 0L, 1L, 2L, 1L, 5L, 0L, 5L, 2L, 3L, 4L, 5L, 3L, 1L, 2L, 1L, 5L, 4L, 3L, 1L, 1L, 1L, 1L, 4L, 2L, 2L, 1L, 2L, 0L, 2L, 6L, 2L, 6L, 5L, 4L, 1L, 2L, 2L, 2L, 5L, 3L, 3L, 3L, 5L, 4L, 1L, 2L, 3L, 0L, 1L, 1L, 3L, 4L, 0L, 1L, 3L, 6L, 2L, 1L, 3L, 4L, 4L, 3L, 3L, 2L, 4L, 3L, 1L, 4L, 1L, 3L, 0L, 5L, 3L, 2L, 1L, 1L, 0L, 2L, 1L, 1L, 2L, 3L, 3L, 1L, 3L, 5L, 5L, 2L, 4L, 1L, 4L, 1L, 1L, 5L, 2L, 3L, 2L, 4L, 3L, 0L, 1L, 1L, 1L, 1L, 3L, 5L, 1L, 2L, 1L, 2L, 1L, 0L, 6L, 3L, 3L, 3L, 5L, 2L, 5L, 3L, 2L, 3L, 4L, 2L, 2L, 5L, 2L, 0L, 1L, 3L, 3L, 4L, 6L, 2L, 1L, 3L, 3L, 2L, 4L, 0L, 1L, 4L, 3L, 3L, 1L, 3L, 1L, 3L, 4L, 5L, 1L, 2L, 6L, 0L, 2L, 3L, 3L, 1L, 4L, 1L, 2L, 1L, 3L, 5L, 4L, 2L, 4L, 7L, 3L, 1L, 3L, 4L, 8L, 1L, 0L, 1L, 2L, 1L, 5L, 3L, 2L, 3L, 1L, 3L, 3L, 3L, 3L, 3L, 4L, 2L, 1L, 3L, 1L, 4L, 3L, 1L, 3L, 3L, 2L, 3L, 5L, 4L, 3L, 1L, 3L, 2L, 4L, 6L, 1L, 3L, 6L, 2L, 0L, 6L, 2L, 2L, 5L, 1L, 0L, 2L, 1L, 1L, 5L, 0L, 3L, 8L, 2L, 2L, 0L, 3L, 1L, 2L, 3L, 3L, 3L, 7L, 3L, 3L, 4L, 2L, 1L, 2L, 4L, 2L, 5L, 2L, 2L, 5L, 4L, 2L, 3L, 1L, 5L, 2L, 2L, 7L, 6L, 0L, 4L, 2L, 1L, 3L, 5L, 3L, 3L, 1L, 0L, 2L, 2L, 2L, 3L, 1L, 2L, 3L, 0L, 2L, 4L, 1L, 0L, 1L, 1L, 4L, 1L, 1L, 3L, 1L, 3L, 4L, 4L, 2L, 3L, 4L, 5L, 4L, 1L, 2L, 5L, 3L, 1L, 2L, 2L, 0L, 6L, 1L, 2L, 2L, 0L, 1L, 1L, 1L, 3L, 2L, 3L, 4L, 5L, 2L, 3L, 5L, 1L, 4L, 3L, 3L, 3L, 6L, 3L, 3L, 5L, 3L, 4L, 1L, 1L, 1L, 5L, 3L, 2L, 3L, 1L, 4L, 3L, 3L, 1L, 3L, 3L, 5L, 3L, 1L, 3L, 2L, 3L, 0L, 3L, 4L, 0L, 4L, 5L, 1L, 1L, 3L, 1L, 6L, 4L, 4L, 10L, 6L, 1L, 4L, 3L, 2L, 3L, 1L, 5L, 1L, 7L, 0L, 1L, 5L, 1L, 3L, 0L, 3L, 1L, 1L, 0L, 2L, 1L, 0L, 7L, 3L, 0L, 4L, 2L, 1L, 7L, 0L, 2L, 1L, 3L, 3L, 3L, 1L, 5L, 4L, 2L, 1L, 1L, 2L, 3L, 2L, 4L, 2L, 3L, 1L, 3L, 1L, 2L, 3L, 3L, 2L, 4L, 4L, 2L, 1L, 2L, 2L, 2L, 1L, 4L, 2L, 1L, 4L, 7L, 2L, 7L, 3L, 1L, 2L, 3L, 0L, 3L, 3L, 1L, 2L, 5L, 1L, 0L, 3L, 2L, 2L, 1L, 2L, 3L, 6L, 2L, 1L, 4L, 4L, 4L, 2L, 4L, 1L, 1L, 2L, 6L, 2L, 0L, 6L, 1L, 2L, 4L, 1L, 0L, 0L, 3L, 1L, 3L, 4L, 2L, 2L, 1L, 3L, 3L, 4L, 3L, 2L, 4L, 3L, 2L, 3L, 5L, 4L, 4L, 3L, 5L, 6L, 1L, 2L, 0L, 2L, 2L, 2L, 4L, 5L, 4L, 4L, 1L, 2L, 4L, 1L, 1L, 3L, 5L, 1L, 1L, 4L, 5L, 2L, 2L, 6L, 0L, 1L, 3L, 0L, 4L, 6L, 4L, 1L, 3L, 1L, 3L, 6L, 0L, 2L, 6L, 2L, 5L, 1L, 4L, 3L, 5L, 3L, 1L, 3L, 5L, 1L, 2L, 3L, 3L, 3L, 3L, 5L, 2L, 4L, 3L, 3L, 3L, 1L, 4L, 1L, 4L, 3L, 4L, 1L, 2L, 2L, 3L, 1L, 6L, 2L, 2L, 1L, 1L, 2L, 3L, 6L, 2L, 2L, 7L, 1L, 5L, 2L, 2L, 2L, 2L, 3L, 3L, 1L, 4L, 5L, 1L, 1L, 3L, 1L, 5L, 3L, 2L, 5L, 3L, 4L, 3L, 4L, 4L, 3L, 4L, 1L, 3L, 2L, 2L, 5L, 4L, 5L, 3L, 4L, 3L, 2L, 1L, 0L, 0L, 1L, 4L, 2L, 0L, 4L, 3L, 5L, 7L, 3L, 3L, 2L, 3L, 2L, 2L, 5L, 0L, 1L, 0L, 1L, 2L, 5L, 3L, 2L, 5L, 5L, 3L, 5L, 0L, 4L, 2L, 5L, 1L, 0L, 3L, 5L, 2L, 3L, 4L, 3L, 2L, 1L, 3L, 3L, 4L, 1L, 3L, 4L, 2L, 1L, 1L, 4L, 1L, 0L, 2L, 3L, 4L, 2L, 1L, 3L, 3L, 6L, 4L, 3L, 2L, 3L, 3L, 1L, 3L, 3L, 2L, 3L, 2L, 5L, 5L, 3L, 4L, 5L, 2L, 6L, 3L, 3L, 5L, 0L, 2L, 4L, 2L, 2L, 3L, 3L, 10L, 1L, 1L, 4L, 8L, 2L, 1L, 3L, 5L, 1L, 3L, 1L, 2L, 6L, 2L, 2L, 4L, 1L, 2L, 4L, 2L, 6L, 0L, 3L, 1L, 0L, 1L, 1L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 1L, 3L, 5L, 5L, 4L, 4L, 0L, 3L, 2L, 2L, 4L, 4L, 4L, 2L, 4L, 2L, 1L, 3L, 3L, 1L, 4L, 1L, 4L, 1L, 3L, 7L, 1L, 3L, 3L, 5L, 4L, 2L, 4L, 3L, 3L, 1L, 4L, 0L, 0L, 3L, 7L, 8L, 3L, 1L, 3L, 2L, 3L, 0L, 3L, 3L, 1L, 1L, 3L, 1L, 5L, 3L, 4L, 3L, 2L, 0L, 2L, 3L, 1L, 2L, 5L, 7L, 1L, 2L, 2L, 2L, 3L, 4L, 3L, 4L, 1L, 3L, 1L, 0L, 5L, 2L, 0L, 3L, 2L, 0L, 3L, 1L, 1L, 1L, 2L, 2L, 5L, 3L, 3L, 2L, 2L, 3L, 3L, 1L, 3L, 2L, 2L, 2L, 4L, 2L, 5L, 5L, 1L, 2L, 3L, 4L, 1L, 2L, 2L, 0L, 3L, 2L, 4L, 1L, 1L, 4L, 2L, 6L, 7L, 3L, 0L, 4L, 2L, 6L, 2L, 2L, 2L, 1L, 2L, 1L, 1L, 3L, 2L, 1L, 1L, 1L, 3L, 1L, 0L, 3L, 5L, 2L, 3L, 1L, 0L, 5L, 2L, 5L, 1L, 0L, 5L, 1L, 5L, 2L, 3L, 2L, 2L, 4L, 1L, 2L, 2L, 3L, 5L, 5L, 1L, 0L, 3L, 2L, 3L, 5L, 2L, 4L, 2L, 2L, 2L, 5L, 1L, 3L, 4L, 0L, 1L, 1L, 0L, 0L, 1L, 2L, 1L, 1L, 2L, 1L, 0L, 1L, 0L, 0L, 5L, 5L, 4L, 5L, 2L, 1L, 0L, 3L, 1L, 6L, 2L, 5L, 1L, 0L, 6L), opponent = structure(c(44L, 25L, 10L, 43L, 42L, 30L, 35L, 40L, 13L, 26L, 54L, 1L, 37L, 5L, 14L, 36L, 44L, 32L, 53L, 20L, 10L, 43L, 30L, 35L, 50L, 13L, 46L, 54L, 5L, 37L, 1L, 49L, 14L, 32L, 6L, 42L, 20L, 36L, 26L, 44L, 43L, 2L, 38L, 25L, 24L, 56L, 21L, 44L, 34L, 57L, 39L, 27L, 37L, 26L, 15L, 16L, 43L, 1L, 53L, 5L, 51L, 2L, 34L, 47L, 24L, 56L, 21L, 57L, 39L, 27L, 37L, 26L, 15L, 53L, 5L, 16L, 1L, 51L, 55L, 11L, 29L, 54L, 41L, 43L, 31L, 12L, 33L, 25L, 32L, 14L, 51L, 1L, 39L, 24L, 8L, 2L, 21L, 49L, 34L, 3L, 30L, 49L, 41L, 33L, 13L, 39L, 6L, 11L, 32L, 14L, 51L, 1L, 31L, 24L, 8L, 2L, 21L, 54L, 16L, 3L, 30L, 34L, 13L, 6L, 52L, 33L, 15L, 31L, 42L, 45L, 26L, 50L, 5L, 19L, 4L, 46L, 12L, 7L, 41L, 24L, 20L, 56L, 17L, 43L, 48L, 54L, 53L, 36L, 27L, 35L, 13L, 23L, 49L, 3L, 42L, 26L, 50L, 5L, 12L, 41L, 24L, 57L, 20L, 56L, 15L, 43L, 25L, 48L, 53L, 36L, 11L, 45L, 13L, 33L, 3L, 34L, 35L, 18L, 50L, 2L, 39L, 38L, 21L, 54L, 28L, 47L, 17L, 1L, 15L, 52L, 57L, 29L, 56L, 14L, 35L, 25L, 36L, 44L, 5L, 10L, 45L, 55L, 49L, 18L, 50L, 2L, 21L, 49L, 45L, 15L, 1L, 17L, 57L, 13L, 29L, 56L, 14L, 36L, 7L, 44L, 5L, 10L, 23L, 47L, 54L, 27L, 40L, 31L, 55L, 23L, 8L, 56L, 6L, 34L, 9L, 30L, 58L, 45L, 32L, 47L, 42L, 38L, 29L, 26L, 25L, 16L, 28L, 40L, 12L, 37L, 3L, 53L, 44L, 23L, 8L, 56L, 4L, 33L, 58L, 30L, 13L, 9L, 47L, 32L, 45L, 42L, 38L, 11L, 35L, 26L, 25L, 48L, 43L, 53L, 28L, 12L, 37L, 3L, 16L, 7L, 44L, 33L, 13L, 43L, 7L, 46L, 40L, 58L, 35L, 18L, 10L, 24L, 12L, 27L, 39L, 29L, 4L, 19L, 28L, 57L, 33L, 1L, 41L, 20L, 21L, 17L, 38L, 15L, 14L, 50L, 22L, 51L, 18L, 10L, 24L, 12L, 27L, 4L, 22L, 28L, 31L, 57L, 8L, 33L, 1L, 41L, 11L, 20L, 46L, 21L, 15L, 17L, 14L, 50L, 19L, 51L, 9L, 43L, 9L, 22L, 17L, 23L, 55L, 58L, 46L, 48L, 47L, 14L, 49L, 43L, 6L, 7L, 54L, 31L, 4L, 10L, 30L, 29L, 5L, 37L, 19L, 20L, 38L, 42L, 35L, 14L, 17L, 43L, 6L, 9L, 12L, 47L, 4L, 37L, 30L, 35L, 34L, 5L, 10L, 20L, 27L, 49L, 42L, 29L, 23L, 22L, 55L, 22L, 48L, 57L, 2L, 13L, 25L, 53L, 54L, 28L, 49L, 14L, 8L, 44L, 41L, 56L, 33L, 21L, 10L, 26L, 34L, 32L, 9L, 27L, 58L, 23L, 6L, 1L, 39L, 11L, 48L, 57L, 2L, 13L, 11L, 49L, 20L, 28L, 54L, 8L, 31L, 38L, 44L, 41L, 56L, 30L, 21L, 10L, 26L, 32L, 58L, 27L, 9L, 23L, 6L, 1L, 16L, 55L, 9L, 22L, 49L, 23L, 51L, 26L, 57L, 2L, 44L, 8L, 36L, 10L, 5L, 46L, 24L, 29L, 31L, 11L, 37L, 47L, 46L, 35L, 51L, 26L, 2L, 25L, 36L, 10L, 5L, 50L, 27L, 24L, 32L, 31L, 55L, 21L, 37L, 43L, 19L, 13L, 41L, 30L, 55L, 42L, 13L, 18L, 17L, 36L, 27L, 22L, 29L, 25L, 45L, 14L, 12L, 36L, 35L, 18L, 20L, 30L, 7L, 52L, 55L, 57L, 20L, 33L, 51L, 41L, 43L, 58L, 31L, 48L, 11L, 26L, 56L, 53L, 37L, 19L, 30L, 6L, 52L, 57L, 51L, 8L, 40L, 39L, 19L, 48L, 3L, 21L, 55L, 43L, 31L, 33L, 56L, 45L, 16L, 42L, 20L, 28L, 52L, 7L, 44L, 19L, 12L, 26L, 2L, 33L, 50L, 35L, 48L, 57L, 51L, 37L, 47L, 40L, 39L, 45L, 16L, 42L, 20L, 28L, 52L, 44L, 12L, 26L, 2L, 43L, 33L, 50L, 35L, 39L, 27L, 51L, 37L, 40L, 38L, 47L, 29L, 43L, 38L, 45L, 32L, 19L, 36L, 7L, 22L, 12L, 48L, 29L, 36L, 23L, 13L, 40L, 46L, 41L, 53L, 16L, 7L, 52L, 21L, 43L, 30L, 24L, 33L, 55L, 18L, 47L, 25L, 14L, 12L, 48L, 29L, 36L, 13L, 40L, 41L, 58L, 16L, 53L, 39L, 4L, 46L, 31L, 52L, 11L, 21L, 30L, 24L, 33L, 55L, 35L, 54L, 18L, 47L, 14L, 22L, 38L, 28L, 7L, 53L, 52L, 1L, 6L, 49L, 56L, 29L, 54L, 2L, 31L, 5L, 51L, 10L, 46L, 47L, 17L, 42L, 24L, 35L, 4L, 15L, 8L, 16L, 26L, 28L, 31L, 16L, 52L, 1L, 6L, 54L, 56L, 49L, 2L, 5L, 21L, 11L, 47L, 17L, 42L, 24L, 35L, 34L, 4L, 15L, 33L, 25L, 8L, 53L, 25L, 45L, 4L, 6L, 41L, 50L, 15L, 37L, 44L, 55L, 11L, 35L, 1L, 36L, 43L, 34L, 3L, 22L, 7L, 19L, 9L, 10L, 18L, 14L, 17L, 38L, 58L, 32L, 26L, 6L, 41L, 50L, 17L, 37L, 44L, 55L, 46L, 39L, 29L, 1L, 36L, 43L, 3L, 19L, 58L, 10L, 18L, 14L, 15L, 9L, 32L, 26L, 7L, 11L, 40L, 8L, 14L, 51L, 23L, 4L, 25L, 31L, 18L, 54L, 45L, 47L, 58L, 9L, 32L, 20L, 5L, 36L, 27L, 34L, 21L, 57L, 42L, 52L, 49L, 46L, 51L, 23L, 4L, 18L, 49L, 47L, 45L, 9L, 2L, 58L, 32L, 14L, 56L, 5L, 36L, 27L, 21L, 57L, 42L, 52L, 54L, 34L, 46L, 11L, 38L, 29L, 23L, 40L, 44L, 48L, 31L, 17L, 24L, 12L, 11L, 13L, 55L, 8L, 42L, 20L, 56L, 51L, 25L, 16L, 39L, 53L, 22L, 6L, 4L, 19L, 41L, 15L, 44L, 52L, 48L, 23L, 15L, 28L, 24L, 11L, 38L, 55L, 8L, 42L, 20L, 56L, 51L, 53L, 16L, 19L, 6L, 7L, 22L, 41L, 17L, 15L, 50L, 30L, 28L, 18L, 38L, 46L, 9L, 19L, 33L, 22L, 52L, 5L, 34L, 21L, 20L, 37L, 32L, 10L, 29L, 8L, 57L, 54L, 49L, 56L, 58L, 27L, 39L, 50L, 30L, 13L, 28L, 58L, 22L, 33L, 12L, 19L, 35L, 5L, 25L, 20L, 37L, 32L, 10L, 29L, 8L, 57L, 49L, 11L, 54L, 3L, 23L, 56L, 9L, 27L, 39L, 13L, 7L, 3L, 7L, 29L, 40L, 1L, 4L, 42L, 23L, 55L, 35L, 47L, 50L, 53L, 30L, 16L, 41L, 34L, 7L, 18L, 26L, 38L, 14L, 32L, 31L, 15L, 2L, 17L, 21L, 36L, 45L, 40L, 5L, 6L, 42L, 45L, 50L, 16L, 30L, 57L, 53L, 12L, 41L, 34L, 18L, 44L, 43L, 8L, 26L, 14L, 39L, 17L, 2L, 46L, 15L, 36L, 47L, 40L, 55L, 23L, 3L, 39L, 42L, 49L, 53L, 20L, 47L, 45L, 29L, 27L, 46L, 37L, 1L, 48L, 11L, 44L, 30L, 31L, 5L, 33L, 3L, 38L, 42L, 49L, 53L, 12L, 47L, 14L, 45L, 29L, 27L, 37L, 1L, 51L, 7L, 44L, 34L, 40L, 31L, 5L, 49L, 53L, 47L, 33L, 48L, 41L, 56L, 2L, 58L, 28L, 26L, 50L, 17L, 52L, 7L, 11L, 35L, 20L, 37L, 51L, 21L, 10L, 32L, 54L, 15L, 27L, 39L, 41L, 56L, 2L, 58L, 28L, 26L, 50L, 17L, 52L, 7L, 11L, 35L, 20L, 37L, 51L, 21L, 3L, 32L, 54L, 10L, 15L, 27L, 39L, 58L, 26L, 11L, 37L, 51L, 32L, 54L, 27L, 39L, 37L, 52L, 17L, 50L, 11L, 32L, 20L, 21L, 56L, 54L, 7L, 53L, 32L, 7L, 41L, 51L, 50L, 17L, 20L), .Label = c("Alaska Anchorage", "Air Force", "Alab-Huntsville", "American Int'l", "Alaska", "Army", "Boston College", "Bowling Green", "Brown", "Bemidji State", "Boston University", "Bentley", "Canisius", "Colorado College", "Colgate", "Clarkson", "Cornell", "Connecticut", "Dartmouth", "Denver", "Ferris State", "Harvard", "Holy Cross", "Lake Superior", "Massachusetts", "Minnesota Duluth", "Maine", "Mercyhurst", "Michigan", "Minnesota State", "UMass Lowell", "Miami", "Minnesota", "Merrimack", "Michigan State", "Michigan Tech", "North Dakota", "Northeastern", "New Hampshire", "Niagara", "Northern Michigan", "Nebraska-Omaha", "Notre Dame", "Ohio State", "Princeton", "Providence", "Quinnipiac", "Robert Morris", "Rensselaer", "RIT", "St. Cloud State", "Sacred Heart", "St. Lawrence", "Union", "Vermont", "Wisconsin", "Western Michigan", "Yale"), class = "factor"), o_goals = c(2L, 5L, 3L, 2L, 6L, 3L, 6L, 4L, 1L, 4L, 4L, 5L, 5L, 2L, 4L, 5L, 1L, 3L, 0L, 5L, 7L, 3L, 4L, 4L, 2L, 1L, 2L, 6L, 5L, 3L, 1L, 3L, 3L, 2L, 2L, 4L, 4L, 1L, 1L, 0L, 2L, 2L, 3L, 3L, 5L, 2L, 2L, 4L, 3L, 5L, 3L, 3L, 4L, 5L, 1L, 4L, 2L, 3L, 3L, 4L, 3L, 2L, 6L, 3L, 3L, 1L, 6L, 4L, 5L, 5L, 3L, 3L, 1L, 4L, 1L, 5L, 3L, 3L, 4L, 0L, 3L, 4L, 3L, 2L, 5L, 4L, 0L, 5L, 2L, 4L, 3L, 0L, 4L, 3L, 2L, 5L, 5L, 4L, 5L, 1L, 0L, 3L, 0L, 0L, 2L, 5L, 3L, 3L, 2L, 8L, 4L, 2L, 3L, 3L, 1L, 3L, 4L, 5L, 4L, 1L, 3L, 4L, 2L, 1L, 5L, 5L, 4L, 4L, 3L, 1L, 4L, 6L, 3L, 3L, 3L, 3L, 4L, 4L, 3L, 1L, 4L, 4L, 3L, 3L, 0L, 3L, 6L, 3L, 4L, 2L, 3L, 4L, 1L, 1L, 3L, 4L, 7L, 3L, 2L, 1L, 3L, 3L, 4L, 6L, 4L, 2L, 1L, 8L, 3L, 1L, 2L, 2L, 1L, 4L, 1L, 5L, 3L, 3L, 6L, 3L, 4L, 1L, 5L, 3L, 4L, 4L, 5L, 2L, 5L, 2L, 3L, 1L, 4L, 3L, 3L, 4L, 2L, 0L, 2L, 2L, 2L, 3L, 5L, 4L, 2L, 3L, 3L, 3L, 4L, 3L, 7L, 6L, 2L, 4L, 1L, 2L, 6L, 1L, 5L, 5L, 3L, 6L, 4L, 5L, 3L, 3L, 2L, 6L, 3L, 0L, 3L, 5L, 2L, 6L, 3L, 0L, 4L, 2L, 2L, 5L, 4L, 2L, 2L, 2L, 4L, 2L, 1L, 3L, 6L, 4L, 1L, 3L, 4L, 2L, 3L, 6L, 2L, 4L, 3L, 2L, 4L, 0L, 4L, 1L, 3L, 1L, 5L, 6L, 2L, 8L, 2L, 3L, 2L, 2L, 2L, 1L, 2L, 0L, 1L, 7L, 4L, 2L, 1L, 2L, 2L, 4L, 4L, 3L, 1L, 2L, 6L, 4L, 1L, 3L, 4L, 2L, 0L, 2L, 5L, 0L, 3L, 2L, 5L, 1L, 8L, 5L, 4L, 3L, 4L, 3L, 2L, 4L, 1L, 6L, 2L, 4L, 3L, 0L, 2L, 4L, 6L, 1L, 1L, 2L, 4L, 6L, 3L, 3L, 3L, 6L, 5L, 2L, 4L, 5L, 3L, 1L, 5L, 4L, 2L, 5L, 4L, 5L, 2L, 8L, 4L, 0L, 5L, 5L, 5L, 2L, 4L, 6L, 0L, 1L, 2L, 3L, 5L, 6L, 3L, 4L, 4L, 5L, 2L, 4L, 3L, 2L, 3L, 4L, 7L, 8L, 2L, 5L, 0L, 5L, 3L, 1L, 4L, 4L, 1L, 2L, 10L, 4L, 4L, 3L, 3L, 2L, 1L, 1L, 2L, 3L, 5L, 4L, 1L, 3L, 3L, 5L, 1L, 1L, 1L, 5L, 0L, 8L, 1L, 2L, 8L, 2L, 3L, 1L, 3L, 3L, 7L, 1L, 2L, 1L, 2L, 3L, 2L, 4L, 6L, 9L, 1L, 3L, 4L, 3L, 2L, 2L, 5L, 2L, 1L, 3L, 6L, 2L, 3L, 3L, 3L, 4L, 4L, 3L, 2L, 2L, 4L, 2L, 3L, 2L, 5L, 2L, 4L, 1L, 4L, 2L, 0L, 3L, 0L, 2L, 2L, 1L, 4L, 3L, 5L, 4L, 4L, 3L, 3L, 5L, 1L, 4L, 1L, 6L, 1L, 4L, 2L, 2L, 3L, 2L, 5L, 6L, 4L, 2L, 3L, 4L, 3L, 2L, 1L, 5L, 2L, 1L, 4L, 2L, 7L, 1L, 2L, 1L, 6L, 3L, 3L, 4L, 3L, 2L, 4L, 3L, 6L, 0L, 7L, 6L, 2L, 4L, 4L, 0L, 4L, 3L, 5L, 4L, 5L, 6L, 3L, 6L, 7L, 6L, 5L, 2L, 3L, 0L, 3L, 1L, 5L, 0L, 5L, 4L, 5L, 2L, 2L, 0L, 4L, 5L, 5L, 3L, 1L, 2L, 2L, 4L, 3L, 1L, 1L, 7L, 3L, 2L, 7L, 9L, 5L, 5L, 2L, 5L, 5L, 4L, 3L, 3L, 4L, 4L, 1L, 2L, 2L, 3L, 3L, 2L, 3L, 5L, 5L, 4L, 2L, 4L, 2L, 2L, 3L, 2L, 3L, 7L, 4L, 5L, 3L, 4L, 5L, 1L, 6L, 2L, 1L, 1L, 1L, 2L, 1L, 4L, 3L, 3L, 3L, 6L, 2L, 1L, 5L, 6L, 3L, 6L, 4L, 1L, 6L, 6L, 0L, 5L, 1L, 2L, 3L, 1L, 0L, 1L, 3L, 0L, 0L, 4L, 3L, 4L, 4L, 3L, 3L, 8L, 2L, 1L, 5L, 1L, 3L, 1L, 3L, 6L, 4L, 3L, 5L, 5L, 9L, 4L, 3L, 2L, 2L, 6L, 4L, 3L, 2L, 4L, 5L, 4L, 1L, 5L, 2L, 3L, 2L, 1L, 4L, 5L, 1L, 4L, 4L, 2L, 2L, 1L, 7L, 1L, 5L, 3L, 4L, 4L, 2L, 4L, 6L, 3L, 2L, 5L, 2L, 1L, 1L, 4L, 3L, 4L, 0L, 3L, 1L, 3L, 1L, 5L, 1L, 2L, 2L, 0L, 3L, 1L, 3L, 3L, 4L, 0L, 8L, 1L, 4L, 2L, 4L, 6L, 0L, 5L, 3L, 6L, 3L, 4L, 4L, 3L, 3L, 3L, 1L, 5L, 4L, 4L, 5L, 3L, 4L, 2L, 2L, 4L, 4L, 2L, 3L, 3L, 4L, 5L, 2L, 3L, 4L, 1L, 4L, 5L, 2L, 2L, 3L, 3L, 3L, 0L, 2L, 2L, 6L, 2L, 2L, 2L, 4L, 6L, 2L, 4L, 1L, 2L, 4L, 5L, 6L, 1L, 2L, 1L, 11L, 1L, 0L, 4L, 2L, 2L, 4L, 2L, 3L, 3L, 1L, 3L, 7L, 5L, 2L, 3L, 1L, 6L, 1L, 3L, 2L, 2L, 2L, 5L, 5L, 1L, 6L, 10L, 5L, 3L, 1L, 2L, 6L, 0L, 4L, 4L, 5L, 4L, 3L, 1L, 3L, 4L, 4L, 3L, 2L, 1L, 5L, 4L, 5L, 4L, 4L, 7L, 1L, 2L, 2L, 4L, 3L, 4L, 4L, 0L, 4L, 5L, 0L, 4L, 0L, 1L, 4L, 7L, 3L, 3L, 4L, 2L, 4L, 6L, 2L, 1L, 5L, 6L, 2L, 2L, 4L, 5L, 8L, 1L, 2L, 4L, 4L, 2L, 2L, 4L, 2L, 1L, 2L, 3L, 5L, 4L, 4L, 3L, 5L, 3L, 6L, 3L, 0L, 3L, 4L, 4L, 4L, 5L, 5L, 6L, 3L, 1L, 4L, 1L, 4L, 5L, 1L, 7L, 8L, 5L, 3L, 4L, 2L, 4L, 5L, 2L, 7L, 4L, 5L, 3L, 5L, 0L, 5L, 5L, 2L, 6L, 5L, 1L, 3L, 0L, 2L, 7L, 4L, 3L, 7L, 2L, 2L, 3L, 5L, 7L, 2L, 7L, 4L, 4L, 4L, 6L, 1L, 4L, 7L, 5L, 8L, 8L, 2L, 1L, 2L, 2L, 6L, 2L, 1L, 2L, 4L, 2L, 6L, 4L, 3L, 6L, 4L, 2L, 2L, 4L, 3L, 3L, 2L, 3L, 7L, 5L, 3L, 3L, 1L, 4L, 1L, 3L, 1L, 3L, 0L, 5L, 2L, 3L, 2L, 3L, 1L, 8L, 2L, 5L, 2L, 4L, 2L, 3L, 4L, 2L, 3L, 6L, 1L, 3L, 1L, 3L, 2L, 5L, 2L, 0L, 5L, 3L, 1L, 5L, 3L, 4L, 5L, 4L, 2L, 2L, 3L, 6L, 4L, 3L, 1L, 3L, 3L, 0L, 6L, 3L, 3L, 3L, 2L, 3L, 8L, 1L, 4L, 1L, 4L, 2L, 3L, 2L, 6L, 0L, 4L, 4L, 3L, 2L, 1L, 3L, 4L, 5L, 4L, 6L, 3L, 1L, 2L, 6L, 4L, 3L, 4L, 6L, 2L, 1L, 1L, 7L, 5L, 7L, 4L, 6L, 4L, 3L, 4L, 3L, 4L, 5L, 2L, 3L, 4L, 2L, 3L, 3L, 3L, 4L, 3L, 3L, 3L, 2L, 0L, 0L, 4L, 3L, 4L, 3L, 2L, 2L, 3L, 0L, 2L, 2L, 3L, 4L, 2L, 2L, 3L, 4L, 0L, 3L, 3L, 0L, 2L, 7L, 1L, 3L, 6L, 3L, 3L), conference = structure(c(6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 5L, 6L, 7L, 6L, 6L, 6L, 6L, 6L, 6L, 7L, 7L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 7L, 6L, 6L, 6L, 6L, 7L, 7L, 6L, 6L, 6L, 6L, 6L, 6L, 5L, 6L, 6L, 6L, 6L, 6L, 6L, 1L, 7L, 5L, 2L, 7L, 7L, 7L, 5L, 2L, 6L, 1L, 6L, 6L, 5L, 6L, 7L, 6L, 6L, 7L, 1L, 5L, 1L, 6L, 2L, 7L, 7L, 7L, 5L, 2L, 6L, 1L, 6L, 6L, 6L, 6L, 7L, 6L, 1L, 1L, 1L, 7L, 6L, 5L, 2L, 6L, 6L, 1L, 2L, 4L, 1L, 5L, 1L, 5L, 2L, 2L, 7L, 6L, 6L, 2L, 6L, 6L, 6L, 7L, 5L, 2L, 1L, 1L, 6L, 3L, 2L, 6L, 1L, 2L, 1L, 2L, 2L, 2L, 7L, 6L, 6L, 2L, 5L, 6L, 6L, 7L, 5L, 6L, 1L, 7L, 3L, 5L, 2L, 1L, 1L, 1L, 5L, 5L, 2L, 4L, 1L, 4L, 4L, 7L, 4L, 1L, 2L, 2L, 7L, 7L, 2L, 6L, 7L, 2L, 2L, 3L, 4L, 5L, 4L, 1L, 1L, 1L, 2L, 4L, 4L, 4L, 7L, 4L, 2L, 1L, 2L, 7L, 7L, 7L, 5L, 2L, 2L, 3L, 1L, 4L, 4L, 5L, 6L, 5L, 5L, 1L, 2L, 7L, 1L, 5L, 4L, 7L, 4L, 4L, 2L, 4L, 2L, 5L, 2L, 7L, 5L, 4L, 1L, 3L, 1L, 7L, 6L, 4L, 2L, 1L, 2L, 7L, 1L, 6L, 4L, 7L, 1L, 4L, 4L, 2L, 4L, 2L, 5L, 5L, 2L, 7L, 5L, 3L, 2L, 4L, 1L, 1L, 7L, 6L, 4L, 5L, 2L, 6L, 1L, 2L, 5L, 6L, 6L, 6L, 2L, 1L, 3L, 2L, 1L, 5L, 5L, 2L, 1L, 4L, 1L, 2L, 7L, 7L, 2L, 7L, 2L, 4L, 5L, 4L, 6L, 1L, 4L, 7L, 1L, 3L, 2L, 1L, 5L, 1L, 4L, 1L, 5L, 2L, 2L, 7L, 7L, 2L, 5L, 7L, 5L, 2L, 4L, 4L, 6L, 1L, 4L, 7L, 4L, 2L, 6L, 6L, 4L, 1L, 5L, 6L, 5L, 3L, 4L, 7L, 6L, 2L, 1L, 6L, 6L, 5L, 1L, 6L, 7L, 6L, 2L, 6L, 6L, 7L, 5L, 2L, 6L, 7L, 6L, 2L, 1L, 6L, 6L, 6L, 1L, 6L, 7L, 6L, 5L, 2L, 6L, 7L, 6L, 6L, 2L, 6L, 6L, 6L, 6L, 4L, 3L, 2L, 1L, 1L, 5L, 4L, 4L, 1L, 4L, 7L, 2L, 2L, 2L, 7L, 7L, 2L, 3L, 7L, 5L, 2L, 4L, 5L, 4L, 1L, 1L, 7L, 5L, 5L, 3L, 2L, 1L, 1L, 5L, 4L, 7L, 1L, 4L, 2L, 5L, 5L, 2L, 2L, 7L, 7L, 2L, 3L, 7L, 2L, 4L, 5L, 4L, 1L, 1L, 7L, 4L, 5L, 6L, 6L, 4L, 6L, 7L, 7L, 2L, 1L, 2L, 2L, 7L, 6L, 2L, 5L, 2L, 2L, 6L, 6L, 7L, 6L, 5L, 2L, 7L, 7L, 1L, 5L, 7L, 6L, 2L, 6L, 5L, 2L, 2L, 6L, 6L, 2L, 7L, 2L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 1L, 6L, 6L, 6L, 4L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 1L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 5L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 1L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 4L, 6L, 2L, 7L, 1L, 1L, 5L, 2L, 4L, 1L, 7L, 1L, 6L, 1L, 2L, 6L, 2L, 7L, 7L, 4L, 6L, 5L, 4L, 6L, 2L, 7L, 1L, 1L, 2L, 1L, 7L, 1L, 2L, 6L, 1L, 2L, 5L, 5L, 7L, 7L, 6L, 5L, 4L, 2L, 2L, 5L, 4L, 6L, 4L, 6L, 5L, 4L, 1L, 3L, 2L, 7L, 1L, 1L, 3L, 5L, 2L, 4L, 4L, 5L, 1L, 2L, 2L, 7L, 2L, 7L, 5L, 1L, 6L, 5L, 7L, 1L, 3L, 2L, 7L, 1L, 3L, 2L, 4L, 4L, 4L, 6L, 1L, 5L, 5L, 1L, 5L, 2L, 7L, 2L, 7L, 5L, 2L, 4L, 1L, 6L, 7L, 4L, 5L, 1L, 5L, 4L, 1L, 7L, 1L, 4L, 7L, 2L, 4L, 1L, 5L, 2L, 7L, 6L, 5L, 6L, 6L, 2L, 2L, 2L, 1L, 6L, 2L, 4L, 6L, 1L, 5L, 4L, 1L, 7L, 1L, 4L, 7L, 4L, 1L, 2L, 2L, 5L, 6L, 6L, 2L, 2L, 2L, 5L, 1L, 6L, 7L, 5L, 2L, 4L, 5L, 6L, 1L, 1L, 2L, 1L, 4L, 7L, 2L, 5L, 5L, 2L, 7L, 7L, 2L, 5L, 3L, 4L, 5L, 4L, 4L, 3L, 1L, 7L, 4L, 5L, 4L, 2L, 7L, 1L, 2L, 1L, 4L, 7L, 2L, 5L, 5L, 5L, 2L, 7L, 7L, 2L, 3L, 4L, 4L, 3L, 1L, 7L, 4L, 4L, 2L, 7L, 6L, 6L, 3L, 2L, 6L, 7L, 1L, 1L, 5L, 5L, 1L, 4L, 4L, 4L, 4L, 4L, 2L, 6L, 2L, 7L, 5L, 5L, 2L, 2L, 2L, 1L, 4L, 5L, 7L, 1L, 1L, 1L, 4L, 4L, 4L, 4L, 6L, 4L, 2L, 6L, 6L, 2L, 7L, 5L, 2L, 2L, 2L, 1L, 4L, 5L, 5L, 6L, 6L, 2L, 1L, 3L, 2L, 3L, 5L, 4L, 2L, 1L, 5L, 1L, 5L, 2L, 2L, 7L, 7L, 7L, 5L, 4L, 5L, 4L, 4L, 1L, 1L, 4L, 2L, 4L, 2L, 1L, 3L, 1L, 4L, 1L, 2L, 5L, 5L, 5L, 2L, 2L, 7L, 7L, 7L, 4L, 4L, 4L, 1L, 5L, 4L, 2L, 4L, 4L, 1L, 7L, 1L, 1L, 5L, 5L, 4L, 4L, 7L, 4L, 1L, 2L, 5L, 2L, 7L, 7L, 2L, 3L, 2L, 2L, 2L, 4L, 4L, 7L, 4L, 5L, 5L, 1L, 7L, 1L, 1L, 4L, 4L, 7L, 1L, 4L, 2L, 2L, 5L, 7L, 7L, 2L, 3L, 2L, 2L, 2L, 4L, 5L, 4L, 3L, 1L, 7L, 4L, 5L, 5L, 1L, 5L, 3L, 5L, 2L, 3L, 6L, 1L, 6L, 1L, 5L, 2L, 4L, 1L, 4L, 7L, 4L, 2L, 5L, 5L, 1L, 7L, 5L, 7L, 2L, 5L, 4L, 1L, 4L, 2L, 7L, 4L, 3L, 6L, 1L, 6L, 4L, 1L, 4L, 7L, 2L, 4L, 1L, 2L, 5L, 1L, 2L, 2L, 2L, 7L, 7L, 5L, 4L, 1L, 5L, 4L, 7L, 4L, 3L, 5L, 6L, 3L, 5L, 6L, 6L, 6L, 7L, 6L, 6L, 6L, 5L, 5L, 7L, 7L, 3L, 5L, 6L, 7L, 5L, 6L, 7L, 3L, 5L, 6L, 6L, 6L, 6L, 6L, 7L, 6L, 6L, 5L, 7L, 7L, 7L, 5L, 6L, 5L, 3L, 5L, 6L, 6L, 6L, 6L, 7L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L, 6L), .Label = c("AH", "CC", "CH", "EC", "HE", "NC", "WC"), class = "factor"), result = c(1, 0, 0, 1, 0, 0, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 1, 0, 0.5, 0, 1, 0, 0, 1, 0, 0, 0.5, 1, 0, 0, 1, 0, 0, 0, 0.5, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0.5, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0.5, 1, 0.5, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0.5, 0, 0, 0.5, 1, 0, 0.5, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0.5, 0.5, 0, 0, 0.5, 1, 0, 0, 1, 0, 1, 0.5, 1, 0, 1, 0.5, 0, 1, 1, 1, 0, 1, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0.5, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0.5, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0.5, 1, 0, 0, 0, 0.5, 1, 0.5, 1, 0, 0.5, 0.5, 1, 0, 0, 0, 0, 1, 0.5, 1, 0, 1, 0.5, 1, 1, 1, 0, 0, 1, 0, 0.5, 1, 0, 0, 1, 0.5, 0.5, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0.5, 1, 0.5, 0, 0, 0.5, 1, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0, 0, 0.5, 0, 0, 0, 1, 0, 1, 1, 0.5, 0.5, 0, 0, 0.5, 0, 1, 0.5, 0, 1, 1, 0.5, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0.5, 0, 0, 1, 1, 0.5, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0.5, 0, 1, 0, 0.5, 1, 0.5, 0, 0, 1, 1, 0.5, 0, 0.5, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0.5, 0.5, 1, 1, 0.5, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0.5, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0.5, 0, 0.5, 0, 0, 0, 1, 1, 1, 0.5, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0.5, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0.5, 1, 1, 0.5, 0, 0, 1, 0.5, 0, 0.5, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0.5, 1, 0.5, 0, 0, 0, 0, 0, 1, 0, 0.5, 1, 0, 0, 0.5, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0.5, 0, 0, 1, 0, 0, 1, 0.5, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0.5, 1, 1, 1, 0, 1, 1, 0, 0.5, 1, 0.5, 1, 0.5, 0, 1, 0.5, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0.5, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0.5, 0, 1, 1, 0, 0.5, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0.5, 1, 1, 0, 0, 1, 0.5, 0, 1, 1, 0, 0.5, 0, 1, 0, 1, 0.5, 1, 0, 0.5, 1, 0, 1, 0.5, 1, 0, 0, 0, 1, 0, 1, 0, 0.5, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0.5, 0, 1, 0, 1, 0.5, 0.5, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0.5, 0, 0, 0, 0, 0.5, 1, 0, 0.5, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0.5, 0, 0.5, 1, 0.5, 1, 0, 1, 0, 1, 0, 0, 0, 0.5, 1, 0, 0, 0, 0, 0.5, 1, 0, 1, 0, 0, 0, 0, 0, 0.5, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0.5, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0.5, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0.5, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0.5, 0, 0, 0, 0, 0.5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0.5, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0.5, 0.5, 1, 0, 0, 0, 1, 0, 0.5, 1, 0.5, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0.5, 1, 0, 0, 0, 0, 0, 0.5, 1, 1, 0, 0, 0.5, 0, 0, 0, 0.5, 1, 0.5, 0, 0.5, 0, 1, 0, 0.5, 0, 0.5, 0, 1, 0, 0.5, 0, 0.5, 0, 1, 0, 1, 0, 0, 1, 0.5, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0.5, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0.5, 0, 0, 0.5, 0, 0.5, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0.5, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1), home.ice = c(TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE)), .Names = c("date", "visitor", "v_goals", "opponent", "o_goals", "conference", "result", "home.ice" ), row.names = c(1L, 2L, 3L, 4L, 5L, 6L, 8L, 7L, 9L, 10L, 12L, 13L, 15L, 14L, 16L, 17L, 18L, 19L, 11L, 20L, 21L, 22L, 23L, 25L, 24L, 26L, 27L, 29L, 30L, 32L, 31L, 34L, 33L, 35L, 36L, 28L, 37L, 38L, 39L, 40L, 41L, 42L, 44L, 43L, 46L, 45L, 47L, 48L, 49L, 51L, 53L, 50L, 54L, 52L, 55L, 56L, 57L, 59L, 60L, 58L, 61L, 62L, 63L, 64L, 66L, 65L, 67L, 69L, 71L, 68L, 72L, 70L, 73L, 74L, 76L, 77L, 75L, 78L, 79L, 80L, 81L, 82L, 84L, 83L, 85L, 86L, 87L, 91L, 89L, 90L, 88L, 92L, 93L, 94L, 95L, 97L, 96L, 98L, 99L, 100L, 101L, 102L, 103L, 104L, 105L, 109L, 110L, 111L, 107L, 108L, 106L, 112L, 113L, 114L, 115L, 117L, 116L, 118L, 119L, 120L, 121L, 122L, 123L, 124L, 125L, 126L, 127L, 129L, 128L, 130L, 131L, 132L, 133L, 135L, 134L, 136L, 137L, 141L, 140L, 138L, 139L, 142L, 143L, 144L, 145L, 146L, 148L, 147L, 149L, 150L, 151L, 152L, 153L, 155L, 154L, 156L, 157L, 158L, 161L, 164L, 162L, 160L, 163L, 165L, 166L, 167L, 168L, 169L, 171L, 170L, 159L, 172L, 174L, 173L, 176L, 175L, 177L, 178L, 179L, 184L, 180L, 182L, 181L, 183L, 185L, 186L, 188L, 187L, 190L, 189L, 191L, 194L, 195L, 192L, 198L, 199L, 196L, 197L, 200L, 201L, 202L, 193L, 203L, 204L, 205L, 208L, 206L, 207L, 209L, 211L, 210L, 212L, 213L, 215L, 216L, 217L, 214L, 218L, 219L, 220L, 221L, 222L, 223L, 224L, 225L, 226L, 227L, 228L, 229L, 231L, 233L, 230L, 232L, 234L, 236L, 235L, 237L, 238L, 239L, 240L, 241L, 245L, 243L, 244L, 246L, 248L, 249L, 247L, 251L, 250L, 242L, 252L, 253L, 255L, 256L, 254L, 257L, 258L, 260L, 259L, 262L, 261L, 263L, 264L, 265L, 266L, 269L, 271L, 270L, 268L, 272L, 274L, 273L, 275L, 276L, 278L, 277L, 267L, 279L, 280L, 281L, 282L, 283L, 284L, 285L, 286L, 287L, 288L, 289L, 290L, 291L, 292L, 293L, 294L, 296L, 295L, 297L, 298L, 299L, 302L, 300L, 301L, 304L, 303L, 305L, 306L, 307L, 308L, 309L, 310L, 311L, 312L, 313L, 314L, 315L, 316L, 317L, 318L, 319L, 320L, 325L, 324L, 322L, 321L, 323L, 329L, 328L, 326L, 327L, 330L, 331L, 332L, 333L, 334L, 335L, 336L, 337L, 338L, 340L, 339L, 341L, 342L, 343L, 345L, 344L, 346L, 347L, 348L, 350L, 349L, 351L, 352L, 353L, 355L, 356L, 357L, 354L, 358L, 359L, 360L, 361L, 362L, 363L, 364L, 365L, 366L, 368L, 367L, 369L, 370L, 371L, 372L, 374L, 375L, 373L, 376L, 377L, 378L, 379L, 380L, 381L, 382L, 383L, 384L, 385L, 386L, 387L, 388L, 389L, 391L, 390L, 393L, 392L, 395L, 394L, 397L, 396L, 398L, 399L, 403L, 401L, 402L, 404L, 407L, 408L, 405L, 406L, 409L, 410L, 411L, 412L, 413L, 415L, 414L, 400L, 416L, 417L, 419L, 418L, 421L, 420L, 423L, 422L, 425L, 424L, 426L, 427L, 432L, 430L, 428L, 429L, 431L, 434L, 435L, 433L, 436L, 437L, 438L, 439L, 440L, 442L, 441L, 443L, 444L, 445L, 446L, 447L, 448L, 449L, 450L, 451L, 453L, 455L, 452L, 456L, 454L, 459L, 457L, 458L, 460L, 461L, 462L, 463L, 464L, 465L, 466L, 467L, 468L, 469L, 472L, 471L, 470L, 475L, 476L, 473L, 474L, 477L, 478L, 479L, 480L, 481L, 482L, 483L, 484L, 485L, 486L, 487L, 488L, 489L, 490L, 491L, 492L, 493L, 494L, 495L, 496L, 497L, 499L, 498L, 500L, 501L, 502L, 503L, 504L, 505L, 506L, 507L, 510L, 508L, 509L, 511L, 513L, 512L, 514L, 515L, 516L, 517L, 518L, 519L, 521L, 522L, 520L, 523L, 524L, 525L, 526L, 527L, 529L, 528L, 530L, 531L, 532L, 534L, 535L, 533L, 536L, 537L, 538L, 539L, 540L, 542L, 544L, 541L, 543L, 545L, 547L, 546L, 548L, 550L, 549L, 551L, 553L, 552L, 554L, 559L, 557L, 556L, 558L, 560L, 561L, 555L, 562L, 564L, 566L, 563L, 565L, 567L, 568L, 570L, 569L, 571L, 572L, 574L, 573L, 575L, 576L, 580L, 578L, 579L, 581L, 577L, 582L, 583L, 584L, 585L, 587L, 586L, 588L, 589L, 590L, 591L, 593L, 594L, 596L, 592L, 595L, 597L, 599L, 600L, 598L, 601L, 602L, 606L, 607L, 608L, 605L, 603L, 611L, 609L, 610L, 612L, 613L, 604L, 614L, 616L, 617L, 618L, 615L, 619L, 621L, 620L, 622L, 623L, 624L, 625L, 626L, 629L, 627L, 630L, 632L, 631L, 628L, 635L, 633L, 634L, 636L, 637L, 638L, 639L, 640L, 641L, 642L, 645L, 643L, 644L, 647L, 646L, 648L, 650L, 649L, 651L, 653L, 652L, 657L, 655L, 656L, 654L, 659L, 661L, 658L, 660L, 662L, 663L, 665L, 664L, 666L, 667L, 669L, 671L, 668L, 670L, 673L, 672L, 674L, 676L, 675L, 678L, 677L, 680L, 679L, 682L, 684L, 681L, 683L, 685L, 686L, 687L, 689L, 688L, 690L, 691L, 692L, 693L, 694L, 695L, 696L, 697L, 698L, 699L, 700L, 701L, 702L, 705L, 703L, 706L, 708L, 707L, 710L, 709L, 711L, 712L, 713L, 714L, 716L, 715L, 718L, 717L, 719L, 704L, 720L, 721L, 722L, 723L, 724L, 725L, 726L, 727L, 728L, 731L, 729L, 734L, 730L, 733L, 732L, 735L, 736L, 737L, 739L, 738L, 741L, 740L, 742L, 743L, 744L, 745L, 746L, 747L, 748L, 749L, 751L, 750L, 752L, 755L, 753L, 754L, 756L, 758L, 757L, 759L, 760L, 761L, 762L, 765L, 764L, 763L, 767L, 766L, 768L, 769L, 770L, 771L, 772L, 773L, 774L, 775L, 776L, 777L, 779L, 778L, 780L, 782L, 781L, 783L, 784L, 788L, 789L, 787L, 785L, 790L, 791L, 792L, 793L, 794L, 795L, 786L, 796L, 797L, 798L, 799L, 800L, 801L, 802L, 804L, 803L, 805L, 806L, 807L, 808L, 809L, 814L, 812L, 810L, 813L, 811L, 815L, 816L, 817L, 818L, 819L, 820L, 821L, 822L, 823L, 824L, 825L, 827L, 826L, 828L, 830L, 829L, 831L, 832L, 835L, 833L, 840L, 838L, 836L, 839L, 837L, 841L, 842L, 843L, 844L, 845L, 834L, 846L, 847L, 848L, 849L, 851L, 850L, 852L, 855L, 853L, 854L, 858L, 857L, 856L, 859L, 860L, 861L, 862L, 865L, 866L, 863L, 868L, 869L, 867L, 870L, 871L, 872L, 873L, 874L, 875L, 864L, 876L, 878L, 877L, 879L, 880L, 883L, 882L, 881L, 885L, 884L, 886L, 887L, 891L, 890L, 888L, 893L, 894L, 892L, 895L, 896L, 897L, 898L, 899L, 900L, 902L, 901L, 903L, 889L, 904L, 905L, 906L, 907L, 908L, 909L, 910L, 911L, 912L, 914L, 917L, 915L, 913L, 916L, 918L, 920L, 919L, 921L, 922L, 924L, 923L, 925L, 926L, 928L, 927L, 929L, 930L, 931L, 932L, 933L, 934L, 935L, 936L, 938L, 937L, 939L, 940L, 941L, 942L, 944L, 943L, 945L, 947L, 946L, 948L, 951L, 952L, 954L, 953L, 950L, 955L, 956L, 957L, 958L, 959L, 949L, 960L, 961L, 962L, 963L, 964L, 965L, 968L, 966L, 967L, 969L, 971L, 970L, 972L, 973L, 974L, 975L, 978L, 977L, 976L, 980L, 979L, 981L, 982L, 983L, 984L, 985L, 987L, 988L, 986L, 989L, 990L, 991L, 993L, 992L, 994L, 995L, 996L, 998L, 997L, 999L, 1000L, 1001L, 1002L, 1003L, 1004L, 1005L, 1006L, 1007L, 1008L, 1009L, 1011L, 1012L, 1010L, 1013L, 1014L, 1016L, 1015L, 1017L, 1019L, 1018L, 1020L, 1026L, 1023L, 1022L, 1025L, 1024L, 1027L, 1028L, 1029L, 1030L, 1031L, 1021L, 1032L, 1034L, 1033L, 1035L, 1036L, 1038L, 1037L, 1039L, 1041L, 1040L, 1042L, 1048L, 1045L, 1044L, 1047L, 1046L, 1049L, 1050L, 1051L, 1052L, 1053L, 1054L, 1043L, 1055L, 1056L, 1057L, 1061L, 1060L, 1059L, 1062L, 1063L, 1058L, 1064L, 1065L, 1066L, 1067L, 1068L, 1069L, 1070L, 1071L, 1072L, 1073L, 1074L, 1075L, 1076L, 1077L, 1078L, 1079L, 1080L, 1081L, 1082L, 1083L ), class = "data.frame") BradleyTerry2/data/football.RData0000644000176200001440000001420314775237530016411 0ustar liggesusersyW$%TdY@a}ԿII( 쿴Mh1ABη+pN=[J"@q:=SX˿nɵ'o\|Ƶqr׷zo?|xrro>}×˱˱P˱˱kQ˱˱kQn˱˱kQywgQ{Gp:7M{.:o'ev>z|/'?X~2|65\y`39坁eW˭݁7qsqu^9|>G#F#Gwu~|ӁÁptGwa2c??X^2>y56??[ٽOvO.?nowsn_v_.w|ߗ~nj:_}~v>>f{_g_~v꼾T//e]&2 ȴ,v-K/qY's}68[s18s\o\7xpsn\ I#Ϛ['WUW\o \՚u|\|\7xps\uspzoplo^ƺ|}d7~MZ~Kc\=l}8$WWU㪹ukpۃs}:8s78 eкB\[^-Xk?Z- }8$WWU㪹>\=8s}08{,[|tr|?*ׇs譹~<8gs]6_%ךN38׵Wuwp[s>qxE|?z˶s_{[skxx~:-g{غn~{-}꫎}k/zgk[zxlV_og<˶Cz~u<懵7Uk۱uqvX;[yoU/y8gΧ~:t^:r/_}^}ggm{?t>i׫-|~=?lW:ʿ~׏Z緎mzkT?|[{Wo~5_+oe]7O^lX;o_C}뺇Sz5Gk~/Zש?Wwh>_ovϫʷv>߻Ρmz_[[om[Uo/7?/mm{^?:NӲ׏~(˶[/O׎}nm{_uY;>zlϭ^?y~՛޽lq?>{\븕gxq?me[;mqozWxz]{~oԼkC,[[ڛgWǷ޶v8[߫׶w}~zv|y^νquxW+ޛڛ:_mזmwh]oZ;|W=ڿV|kۣmߺ~ӛ/[νX>Źc鍯ZmmkwZ;?m[mZ;?z<9yg7?}G˟ã?.|wk9'>miz?O_]G}q7q~ccWǡ:qsu\?ξuvw:@:P&UML5j0`Ts u&uM5kRפICMjP,` 5XB kX,UkXƚ4֤&5iISMjT,`K5Xr k\,Uk\4פ&-5iIKMZjR`+5\5\5\u5\5鼐ڧL#dD " @8p wz9yfY`fY`B 4 B 4 BV f999999999000000000V#333333333 Q3g A8p `+V[ lӚY/_[ l%J`+V[ l%J`+V/_ l%J`+V[ l%J`+V  %K`+V[ l%J`+V[ lfK/_[ l%J`+V[ l%J`+` %J`+V [2leʰa+V [2 2e˰a+V [2leʰa+V [2le>>>>>>>>555555555#3Q3Q3Q3Q3Q3Q3Q3Q%BBBBBBBBBB3QgD:u<P(CQ E2[[B(Q(CQ E2e(P(CQ E2e(po5o ~(Q(CQ E2e(P(CQ E2eHO?~(P(CQ E2e(P(CQ E2[; яE?~e(P(CQ E2e(P(CQ E-!ӏE?2e(P)CS M24ehД)CS ͇EӏM?~4ehД)CS M24ehД)CS MM?~4hД)CS M24ehД)CS M24M?~4hє)CS M24ehД)CS M24eh>,~4h)CS M24ehД)CS M24eh|X4hjtojˀ@:@u~[{"F$ IdD "`fP BP  BШU`6 f`6####j59999999999000000000V33333    g A8pZ,~4h)CS M24ehД)CS M24eh|X4hGS M24ehД)CS M24ehДhGӏ M24ehД)CS M24ehД)CaGӏy7yo5eu ԁXRuԁ: 潊L#$D2"0 @( @( AhԪl0l0@@@@@AAAAQ3g A8pF`>>>>>>>>555555555#3Q3Q3Q3Q3Q3Q3Q3Q%BBBBBBBBBB3QgD:u<P(CQ E2_; ^Gя E2e(P(CQ E2e(P(ռGяE2e(P(CQ E2e(P(CQyg!ӏE?2e(P(CQ E2e(P(CQ W^GяE?2e(P(CQ E2e(P(CQ WBE?~e(P(CS M24ehД)CS MM?~4hД)CS M24ehД)CS M24M?~4hє)CS M24ehД)CS M24eh>,~4h)CS M24ehД)CS M24eh|X4hGS M24ehД)CS M24ehДhy''yQz(bb"BradleyTerry2/data/sound.fields.R0000644000176200001440000001010214775237530016404 0ustar liggesuserssound.fields <- structure(list(comparisons = structure(list(field1 = structure(c(8L, 8L, 8L, 8L, 8L, 8L, 8L, 7L, 7L, 7L, 7L, 7L, 7L, 6L, 6L, 6L, 6L, 6L, 5L, 5L, 5L, 5L, 4L, 4L, 4L, 3L, 3L, 2L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 7L, 7L, 7L, 7L, 7L, 7L, 6L, 6L, 6L, 6L, 6L, 5L, 5L, 5L, 5L, 4L, 4L, 4L, 3L, 3L, 2L, 8L, 8L, 8L, 8L, 8L, 8L, 8L, 7L, 7L, 7L, 7L, 7L, 7L, 6L, 6L, 6L, 6L, 6L, 5L, 5L, 5L, 5L, 4L, 4L, 4L, 3L, 3L, 2L), .Label = c("000", "001", "010", "011", "100", "101", "110", "111"), class = "factor"), field2 = structure(c(7L, 6L, 5L, 4L, 3L, 2L, 1L, 6L, 5L, 4L, 3L, 2L, 1L, 5L, 4L, 3L, 2L, 1L, 4L, 3L, 2L, 1L, 3L, 2L, 1L, 2L, 1L, 1L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 6L, 5L, 4L, 3L, 2L, 1L, 5L, 4L, 3L, 2L, 1L, 4L, 3L, 2L, 1L, 3L, 2L, 1L, 2L, 1L, 1L, 7L, 6L, 5L, 4L, 3L, 2L, 1L, 6L, 5L, 4L, 3L, 2L, 1L, 5L, 4L, 3L, 2L, 1L, 4L, 3L, 2L, 1L, 3L, 2L, 1L, 2L, 1L, 1L), .Label = c("000", "001", "010", "011", "100", "101", "110", "111"), class = "factor"), win1 = c(4L, 4L, 6L, 3L, 8L, 7L, 7L, 6L, 6L, 7L, 3L, 7L, 7L, 5L, 5L, 4L, 6L, 7L, 5L, 3L, 7L, 3L, 6L, 5L, 8L, 6L, 6L, 5L, 0L, 3L, 2L, 2L, 4L, 4L, 2L, 1L, 1L, 4L, 2L, 4L, 4L, 1L, 2L, 1L, 3L, 3L, 1L, 2L, 5L, 4L, 0L, 2L, 4L, 3L, 4L, 1L, 1L, 0L, 0L, 2L, 3L, 5L, 5L, 3L, 1L, 2L, 2L, 4L, 3L, 3L, 2L, 3L, 3L, 3L, 1L, 1L, 5L, 4L, 2L, 4L, 1L, 5L, 4L, 2L), tie = c(4L, 2L, 2L, 4L, 0L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 1L, 2L, 3L, 2L, 1L, 1L, 2L, 2L, 2L, 4L, 1L, 4L, 0L, 1L, 2L, 2L, 3L, 1L, 1L, 2L, 0L, 0L, 3L, 3L, 1L, 1L, 3L, 0L, 1L, 2L, 3L, 3L, 2L, 2L, 3L, 1L, 0L, 0L, 3L, 3L, 0L, 2L, 1L, 2L, 1L, 1L, 1L, 2L, 1L, 0L, 0L, 0L, 2L, 1L, 1L, 1L, 1L, 0L, 0L, 0L, 2L, 1L, 0L, 1L, 0L, 0L, 0L, 1L, 2L, 0L, 1L, 2L), win2 = c(2L, 4L, 2L, 3L, 2L, 1L, 1L, 2L, 2L, 0L, 4L, 0L, 2L, 3L, 2L, 4L, 3L, 2L, 3L, 5L, 1L, 3L, 3L, 1L, 2L, 3L, 2L, 3L, 2L, 1L, 2L, 1L, 1L, 1L, 0L, 1L, 3L, 0L, 0L, 1L, 0L, 2L, 0L, 1L, 0L, 0L, 1L, 2L, 0L, 1L, 2L, 0L, 1L, 0L, 0L, 2L, 3L, 4L, 4L, 1L, 1L, 0L, 0L, 2L, 2L, 2L, 2L, 0L, 1L, 2L, 3L, 2L, 0L, 1L, 4L, 3L, 0L, 1L, 3L, 0L, 2L, 0L, 0L, 1L), win1.adj = c(6, 5, 7, 5, 8, 8, 8, 7, 7, 8.5, 4.5, 8.5, 7.5, 6, 6.5, 5, 6.5, 7.5, 6, 4, 8, 5, 6.5, 7, 8, 6.5, 7, 6, 1.5, 3.5, 2.5, 3, 4, 4, 3.5, 2.5, 1.5, 4.5, 3.5, 4, 4.5, 2, 3.5, 2.5, 4, 4, 2.5, 2.5, 5, 4, 1.5, 3.5, 4, 4, 4.5, 2, 1.5, 0.5, 0.5, 3, 3.5, 5, 5, 3, 2, 2.5, 2.5, 4.5, 3.5, 3, 2, 3, 4, 3.5, 1, 1.5, 5, 4, 2, 4.5, 2, 5, 4.5, 3), win2.adj = c(4, 5, 3, 5, 2, 2, 2, 3, 3, 1.5, 5.5, 1.5, 2.5, 4, 3.5, 5, 3.5, 2.5, 4, 6, 2, 5, 3.5, 3, 2, 3.5, 3, 4, 3.5, 1.5, 2.5, 2, 1, 1, 1.5, 2.5, 3.5, 0.5, 1.5, 1, 0.5, 3, 1.5, 2.5, 1, 1, 2.5, 2.5, 0, 1, 3.5, 1.5, 1, 1, 0.5, 3, 3.5, 4.5, 4.5, 2, 1.5, 0, 0, 2, 3, 2.5, 2.5, 0.5, 1.5, 2, 3, 2, 1, 1.5, 4, 3.5, 0, 1, 3, 0.5, 3, 0, 0.5, 2), instrument = structure(c(3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 3L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L), .Label = c("cello", "flute", "violin"), class = "factor")), .Names = c("field1", "field2", "win1", "tie", "win2", "win1.adj", "win2.adj", "instrument" ), class = "data.frame", row.names = c(NA, -84L)), design = structure(list( a = structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L), .Label = c("0", "1"), class = "factor", contrasts = structure(c(-1, 1), .Dim = c(2L, 1L), .Dimnames = list(c("0", "1"), NULL))), b = structure(c(1L, 1L, 2L, 2L, 1L, 1L, 2L, 2L), .Label = c("0", "1"), class = "factor", contrasts = structure(c(-1, 1), .Dim = c(2L, 1L), .Dimnames = list(c("0", "1"), NULL))), c = structure(c(1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L), .Label = c("0", "1"), class = "factor", contrasts = structure(c(-1, 1), .Dim = c(2L, 1L), .Dimnames = list(c("0", "1"), NULL)))), .Names = c("a", "b", "c"), row.names = c("000", "001", "010", "011", "100", "101", "110", "111"), class = "data.frame")), .Names = c("comparisons", "design")) BradleyTerry2/data/springall.R0000644000176200001440000000367014775237530016016 0ustar liggesusersspringall <- structure(list(contests = structure(list(row = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L, 4L, 4L, 4L, 4L, 4L, 5L, 5L, 5L, 5L, 6L, 6L, 6L, 7L, 7L, 8L), .Label = c("1", "2", "3", "4", "5", "6", "7", "8", "9"), class = "factor"), col = structure(c(2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 3L, 4L, 5L, 6L, 7L, 8L, 9L, 4L, 5L, 6L, 7L, 8L, 9L, 5L, 6L, 7L, 8L, 9L, 6L, 7L, 8L, 9L, 7L, 8L, 9L, 8L, 9L, 9L), .Label = c("1", "2", "3", "4", "5", "6", "7", "8", "9"), class = "factor"), win = c(2L, 0L, 5L, 2L, 0L, 12L, 13L, 10L, 6L, 17L, 9L, 8L, 24L, 16L, 15L, 22L, 14L, 11L, 24L, 22L, 17L, 3L, 2L, 12L, 10L, 8L, 5L, 20L, 15L, 13L, 27L, 19L, 18L, 2L, 2L, 6L), loss = c(16L, 21L, 10L, 15L, 22L, 3L, 9L, 12L, 11L, 2L, 6L, 12L, 2L, 3L, 2L, 2L, 4L, 4L, 1L, 0L, 2L, 14L, 18L, 6L, 6L, 9L, 11L, 0L, 2L, 5L, 1L, 1L, 2L, 13L, 21L, 8L), tie = c(7L, 1L, 10L, 7L, 2L, 9L, 5L, 3L, 5L, 7L, 8L, 6L, 0L, 5L, 7L, 4L, 5L, 7L, 3L, 3L, 6L, 9L, 4L, 5L, 6L, 11L, 9L, 2L, 5L, 9L, 0L, 5L, 2L, 8L, 5L, 8L), win.adj = c(5.5, 0.5, 10, 5.5, 1, 16.5, 15.5, 11.5, 8.5, 20.5, 13, 11, 24, 18.5, 18.5, 24, 16.5, 14.5, 25.5, 23.5, 20, 7.5, 4, 14.5, 13, 13.5, 9.5, 21, 17.5, 17.5, 27, 21.5, 19, 6, 4.5, 10), loss.adj = c(19.5, 21.5, 15, 18.5, 23, 7.5, 11.5, 13.5, 13.5, 5.5, 10, 15, 2, 5.5, 5.5, 4, 6.5, 7.5, 2.5, 1.5, 5, 18.5, 20, 8.5, 9, 14.5, 15.5, 1, 4.5, 9.5, 1, 3.5, 3, 17, 23.5, 12)), .Names = c("row", "col", "win", "loss", "tie", "win.adj", "loss.adj"), row.names = c(NA, -36L), class = "data.frame"), predictors = structure(list(flav = c(0.6, 4.8, 9, 0.6, 4.8, 9, 0.6, 4.8, 9), gel = c(0, 0, 0, 2.4, 2.4, 2.4, 4.8, 4.8, 4.8), flav.2 = c(0.36, 23.04, 81, 0.36, 23.04, 81, 0.36, 23.04, 81), gel.2 = c(0, 0, 0, 5.76, 5.76, 5.76, 23.04, 23.04, 23.04), flav.gel = c(0, 0, 0, 1.44, 11.52, 21.6, 2.88, 23.04, 43.2)), .Names = c("flav", "gel", "flav.2", "gel.2", "flav.gel" ), row.names = c(NA, -9L), class = "data.frame")), .Names = c("contests", "predictors")) BradleyTerry2/NAMESPACE0000644000176200001440000000373114775237530014204 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(add1,BTm) S3method(anova,BTm) S3method(coef,BTabilities) S3method(drop1,BTm) S3method(formula,BTm) S3method(model.matrix,BTm) S3method(predict,BTglmmPQL) S3method(predict,BTm) S3method(print,BTabilities) S3method(print,BTglmmPQL) S3method(print,BTm) S3method(print,summary.BTglmmPQL) S3method(qvcalc,BTabilities) S3method(residuals,BTm) S3method(summary,BTglmmPQL) S3method(vcov,BTabilities) S3method(vcov,BTglmmPQL) export(BTabilities) export(BTm) export(GenDavidson) export(countsToBinomial) export(glmmPQL) export(glmmPQL.control) export(plotProportions) export(qvcalc) importFrom(brglm,brglm) importFrom(graphics,curve) importFrom(graphics,plot) importFrom(graphics,points) importFrom(gtools,combinations) importFrom(lme4,findbars) importFrom(lme4,nobars) importFrom(qvcalc,qvcalc) importFrom(qvcalc,qvcalc.default) importFrom(stats,.checkMFClasses) importFrom(stats,.getXlevels) importFrom(stats,C) importFrom(stats,add.scope) importFrom(stats,as.formula) importFrom(stats,coef) importFrom(stats,contrasts) importFrom(stats,delete.response) importFrom(stats,drop.scope) importFrom(stats,family) importFrom(stats,fitted) importFrom(stats,formula) importFrom(stats,gaussian) importFrom(stats,glm.control) importFrom(stats,glm.fit) importFrom(stats,is.empty.model) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,model.offset) importFrom(stats,model.response) importFrom(stats,model.weights) importFrom(stats,na.exclude) importFrom(stats,na.omit) importFrom(stats,na.pass) importFrom(stats,napredict) importFrom(stats,naprint) importFrom(stats,optimize) importFrom(stats,pchisq) importFrom(stats,pf) importFrom(stats,plogis) importFrom(stats,pnorm) importFrom(stats,printCoefmat) importFrom(stats,reformulate) importFrom(stats,relevel) importFrom(stats,runif) importFrom(stats,symnum) importFrom(stats,terms) importFrom(stats,update) importFrom(stats,update.formula) importFrom(stats,vcov) importFrom(utils,flush.console) BradleyTerry2/NEWS.md0000644000176200001440000001233014775715265014064 0ustar liggesusersChanges in BradleyTerry2 1.1.3 ============================== * fix cross-references in documentation * update references to JSS articles * convert Sweave vignette to bookdown for greater accessibility Changes in BradleyTerry2 1.1-2 ============================== * use absolute vs relative link in README so it works on CRAN Changes in BradleyTerry2 1.1-1 ============================== * improve the way `BTm` finds variables passed to `outcome`, `player1` etc, so that it works when run in a separate environment. * convert old tests to unit tests. Changes in BradleyTerry2 1.1-0 ============================== * `anova.BTm` now respects `test` and `dispersion` arguments for models that inherit from `glm`. * fix bug in `anova.BTmlist` affecting models where ability is modelled by predictors but ability is estimated separately for some players due to missing values. * fix bug in `glmmPQL` affecting models with `.` in formula and either offset or weights specified. * standardize tests to use random number generation as in R 2.10 for backwards compatibility. Changes in BradleyTerry2 1.0-9 ============================== * fix bug in setting contrasts in internal function `Diff()` that gave warning under R-devel. * update urls (using https where possible). * fix a couple of `if` statements where argument could be > 1. Changes in BradleyTerry2 1.0-8 ============================== * fix bug in `qvcalc.BTabilities` Changes in BradleyTerry2 1.0-7 ============================== Improvements ------------ * new examples of prediction added, including using `predict.BTm` to estimate abilities with non-player abilities set to non-zero values (for models with a fixed reference category). * `qvcalc.BTabilities` moved over from package **qvcalc**. * package imports rather than depends on **lme4**. Changes in behaviour -------------------- * default `level` in `predict.BTm` and `predict.glmmPQL` is 0 if a fixed effects model has been fitted, 1 otherwise. Bug fixes --------- * BTabilities now works (again) for models where the reference category is not the first player. Players are kept in their original order (levels of `player1` and `player2`), but the abilities are returned with the appropriate reference. * BTabilities now works when ability is modelled by covariates and some parameters are inestimable (e.g. as in `chameleons.model` on `?chameleons`). * `predict.BTglmmPQL` now works for models with inestimable parameters Changes in BradleyTerry2 1.0-6 ============================== Changes in behaviour -------------------- * `BTabilities` now returns `NA` for unidentified abilities Bug fixes --------- * BTabilities now respects contrasts argument and contrasts attributes of `player1` and `player2` factors. Also handle unidentified coefficients correctly. Changes in BradleyTerry2 1.0-5 ============================== Bug fixes --------- * no longer imports from **gnm**, so **gnm** need not be installed. Changes in BradleyTerry2 1.0-4 ============================== Bug fixes --------- * depends on **lme4** (>=1.0). Changes in BradleyTerry2 1.0-3 ============================== New Features ------------ * updated football data to include full 2011-12 season. Changes in BradleyTerry2 1.0-2 ============================== New Features ------------ * added football example presented at useR! 2013 with generalised Davidson model for ties. Changes in BradleyTerry2 1.0-1 ============================== Bug fixes --------- * renamed `glmmPQL` object `BTglmmPQL` to avoid conflict with **lme4** (which loads **MASS**). * fixed `BTm` so that it is able to find variables when called inside another function (stackoverflow.com question 14911525). Changes in BradleyTerry2 1.0-0 ============================== * updated references and CITATION to cite JSS paper on BradleyTerry2 Changes in BradleyTerry2 0.9-7 ============================== Bug fixes --------- * fixed `anova.BTmlist` to work for models with random effects * allow models to be specified with no fixed effects Improvements ------------ * updated vignette, including example of bias-reduction, a new example incorporating random effects and a new example on preparing data for use with package Changes in BradleyTerry2 0.9-6 ============================== Bug fixes --------- * fixed `offset` argument to work as documented * corrected documentation for `citations` data Improvements ------------ * updated vignette, to provide more explanation of setting up the data Changes in BradleyTerry2 0.9-5 ============================== * updated contact details Changes in BradleyTerry2 0.9-4 ============================== New Features ------------ * added ice hockey example presented at useR! 2010 Bug fixes --------- * `predict.BTm` now works for models with no random effects and handles new individuals with missing values in predictors. Changes in BradleyTerry2 0.9-3 ============================= New Features ------------ * added predict method for BTm objects. Bug fixes --------- * fixed bug in `BTm.setup` causing problems in finding variables when `BTm` nested within another function. BradleyTerry2/inst/0000755000176200001440000000000014775715607013744 5ustar liggesusersBradleyTerry2/inst/CITATION0000644000176200001440000000054214775677344015107 0ustar liggesusersbibentry( bibtype = "Article", title = "Bradley-Terry Models in {R}: The {BradleyTerry2} Package", author = c(person("Heather", "Turner"), person("David", "Firth")), journal = "Journal of Statistical Software", year = "2012", volume = "48", number = "9", pages = "1--21", doi = "10.18637/jss.v048.i09", ) BradleyTerry2/inst/doc/0000755000176200001440000000000014775715607014511 5ustar liggesusersBradleyTerry2/inst/doc/BradleyTerry.html0000644000176200001440000064622414775715607020025 0ustar liggesusers Bradley-Terry Models in R

Bradley-Terry Models in R

For BradleyTerry2 version 1.1.3, 2025-04-10

Abstract

This is a short overview of the R add-on package BradleyTerry2, which facilitates the specification and fitting of Bradley-Terry logit, probit or cauchit models to pair-comparison data. Included are the standard ‘unstructured’ Bradley-Terry model, structured versions in which the parameters are related through a linear predictor to explanatory variables, and the possibility of an order or ‘home advantage’ effect or other ‘contest-specific’ effects. Model fitting is either by maximum likelihood, by penalized quasi-likelihood (for models which involve a random effect), or by bias-reduced maximum likelihood in which the first-order asymptotic bias of parameter estimates is eliminated. Also provided are a simple and efficient approach to handling missing covariate data, and suitably-defined residuals for diagnostic checking of the linear predictor.

1 Introduction

The Bradley-Terry model (Bradley and Terry 1952) assumes that in a ‘contest’ between any two ‘players’, say player \(i\) and player \(j\) \((i, j \in \{1,\ldots,K\})\), the odds that \(i\) beats \(j\) are \(\alpha_i/\alpha_j\), where \(\alpha_i\) and \(\alpha_j\) are positive-valued parameters which might be thought of as representing ‘ability’. A general introduction can be found in Bradley (1984) or Agresti (2002). Applications are many, ranging from experimental psychology to the analysis of sports tournaments to genetics (for example, the allelic transmission/disequilibrium test of Sham and Curtis 1995 is based on a Bradley-Terry model in which the ‘players’ are alleles). In typical psychometric applications the ‘contests’ are comparisons, made by different human subjects, between pairs of items.

The model can alternatively be expressed in the logit-linear form

\[\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j, \label{eq:unstructured} \tag{1}\]

where \(\lambda_i=\log\alpha_i\) for all \(i\). Thus, assuming independence of all contests, the parameters \(\{\lambda_i\}\) can be estimated by maximum likelihood using standard software for generalized linear models, with a suitably specified model matrix. The primary purpose of the BradleyTerry2 package (Turner and Firth 2012), implemented in the R statistical computing environment (Ihaka and Gentleman 1996; R Development Core Team 2012), is to facilitate the specification and fitting of such models and some extensions.

The BradleyTerry2 package supersedes the earlier BradleyTerry package (Firth 2005), providing a more flexible user interface to allow a wider range of models to be fitted. In particular, BradleyTerry2 allows the inclusion of simple random effects so that the ability parameters can be related to available explanatory variables through a linear predictor of the form

\[\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i. \tag{2} \]

The inclusion of the prediction error \(U_i\) allows for variability between players with equal covariate values and induces correlation between comparisons with a common player. BradleyTerry2 also allows for general contest-specific effects to be included in the model and allows the logit link to be replaced, if required, by a different symmetric link function (probit or cauchit).

The remainder of the paper is organised as follows. Section 2 demonstrates how to use the BradleyTerry2 package to fit a standard (i.e., unstructured) Bradley-Terry model, with a separate ability parameter estimated for each player, including the use of bias-reduced estimation for such models. Section 3 considers variations of the standard model, including the use of player-specific variables to model ability and allowing for contest-specific effects such as an order effect or judge effects. Sections 4 and 5 explain how to obtain important information about a fitted model, in particular the estimates of ability and their standard errors, and player-level residuals, whilst Section 6 notes the functions available to aid model search. Section 7 explains in more detail how set up data for use with the BradleyTerry2 package, Section 8 lists the functions provided by the package and finally Section 9 comments on two directions for further development of the software.

2 Standard Bradley-Terry model

2.1 Example: Analysis of journal citations

The following data come from page 448 of Agresti (2002), extracted from the larger table of Stigler (1994). The data are counts of citations among four prominent journals of statistics and are included the BradleyTerry2 package as the data set citations:

library("BradleyTerry2")
data("citations", package = "BradleyTerry2")
citations
##               citing
## cited          Biometrika Comm Statist JASA JRSS-B
##   Biometrika          714          730  498    221
##   Comm Statist         33          425   68     17
##   JASA                320          813 1072    142
##   JRSS-B              284          276  325    188

Thus, for example, Biometrika was cited 498 times by papers in Journal of the American Statistical Association (JASA) during the period under study. In order to fit a Bradley-Terry model to these data using BTm from the BradleyTerry2 package, the data must first be converted to binomial frequencies. That is, the data need to be organised into pairs (player1, player2) and corresponding frequencies of wins and losses for player1 against player2. The BradleyTerry2 package provides the utility function countsToBinomial to convert a contingency table of wins to the format just described:

citations.sf <- countsToBinomial(citations)
names(citations.sf)[1:2] <- c("journal1", "journal2")
citations.sf
##       journal1     journal2 win1 win2
## 1   Biometrika Comm Statist  730   33
## 2   Biometrika         JASA  498  320
## 3   Biometrika       JRSS-B  221  284
## 4 Comm Statist         JASA   68  813
## 5 Comm Statist       JRSS-B   17  276
## 6         JASA       JRSS-B  142  325

Note that the self-citation counts are ignored – these provide no information on the ability parameters, since the abilities are relative rather than absolute quantities. The binomial response can then be modelled by the difference in player abilities as follows:

citeModel <- BTm(cbind(win1, win2), journal1, journal2, ~ journal,
    id = "journal", data = citations.sf)
citeModel
## Bradley Terry model fit by glm.fit 
## 
## Call:  BTm(outcome = cbind(win1, win2), player1 = journal1, player2 = journal2, 
##     formula = ~journal, id = "journal", data = citations.sf)
## 
## Coefficients:
## journalComm Statist          journalJASA        journalJRSS-B  
##             -2.9491              -0.4796               0.2690  
## 
## Degrees of Freedom: 6 Total (i.e. Null);  3 Residual
## Null Deviance:       1925 
## Residual Deviance: 4.293     AIC: 46.39

The coefficients here are maximum likelihood estimates of \(\lambda_2, \lambda_3, \lambda_4\), with \(\lambda_1\) (the log-ability for Biometrika) set to zero as an identifying convention.

The one-sided model formula

  ~ journal

specifies the model for player ability, in this case the ‘citeability’ of the journal. The id argument specifies that "journal" is the name to be used for the factor that identifies the player – the values of which are given here by journal1 and journal2 for the first and second players respectively. Therefore in this case a separate citeability parameter is estimated for each journal.

If a different ‘reference’ journal is required, this can be achieved using the optional refcat argument: for example, making use of update to avoid re-specifying the whole model,

update(citeModel, refcat = "JASA")
## Bradley Terry model fit by glm.fit 
## 
## Call:  BTm(outcome = cbind(win1, win2), player1 = journal1, player2 = journal2, 
##     formula = ~journal, id = "journal", refcat = "JASA", data = citations.sf)
## 
## Coefficients:
##   journalBiometrika  journalComm Statist        journalJRSS-B  
##              0.4796              -2.4695               0.7485  
## 
## Degrees of Freedom: 6 Total (i.e. Null);  3 Residual
## Null Deviance:       1925 
## Residual Deviance: 4.293     AIC: 46.39

– the same model in a different parameterization.

The use of the standard Bradley-Terry model for this application might perhaps seem rather questionable – for example, citations within a published paper can hardly be considered independent, and the model discards potentially important information on self-citation. Stigler (1994) provides arguments to defend the model’s use despite such concerns.

2.2 Bias-reduced estimates

Estimation of the standard Bradley-Terry model in BTm is by default computed by maximum likelihood, using an internal call to the glm function. An alternative is to fit by bias-reduced maximum likelihood (Firth 1993): this requires additionally the brglm package (Kosmidis 2007), and is specified by the optional argument br = TRUE. The resultant effect, namely removal of first-order asymptotic bias in the estimated coefficients, is often quite small. One notable feature of bias-reduced fits is that all estimated coefficients and standard errors are necessarily finite, even in situations of ‘complete separation’ where maximum likelihood estimates take infinite values (Heinze and Schemper 2002).

For the citation data, the parameter estimates are only very slightly changed in the bias-reduced fit:

update(citeModel, br = TRUE)
## Bradley Terry model fit by brglm.fit 
## 
## Call:  BTm(outcome = cbind(win1, win2), player1 = journal1, player2 = journal2,      formula = ~journal, id = "journal", data = citations.sf,      br = TRUE) 
## 
## Coefficients:
## journalComm Statist          journalJASA        journalJRSS-B  
##             -2.9444              -0.4791               0.2685  
## 
## Degrees of Freedom: 6 Total (i.e. Null);  3 Residual
## Deviance:        4.2957 
## Penalized Deviance: -11.4816     AIC: 46.3962

Here the bias of maximum likelihood is small because the binomial counts are fairly large. In more sparse arrangements of contests – that is, where there is less or no replication of the contests – the effect of bias reduction would typically be more substantial than the insignificant one seen here.

3 Abilities predicted by explanatory variables

3.1 ‘Player-specific’ predictor variables

In some application contexts there may be ‘player-specific’ explanatory variables available, and it is then natural to consider model simplification of the form

\[\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i, \tag{3} \]

in which ability of each player \(i\) is related to explanatory variables \(x_{i1},\ldots,x_{ip}\) through a linear predictor with coefficients \(\beta_1,\ldots,\beta_p\); the \(\{U_i\}\) are independent errors. Dependence of the player abilities on explanatory variables can be specified via the formula argument, using the standard S-language model formulae. The difference in the abilities of player \(i\) and player \(j\) is modelled by

\[\sum_{r=1}^p\beta_rx_{ir} - \sum_{r=1}^p\beta_rx_{jr} + U_i - U_j, \label{eq:structured} \tag{4}\]

where \(U_i \sim N(0, \sigma^2)\) for all \(i\). The Bradley-Terry model is then a generalized linear mixed model, which the BTm function currently fits by using the penalized quasi-likelihood algorithm of Breslow and Clayton (1993).

As an illustration, consider the following simple model for the flatlizards data, which predicts the fighting ability of Augrabies flat lizards by body size (snout to vent length):

options(show.signif.stars = FALSE)
data("flatlizards", package = "BradleyTerry2")
lizModel <- BTm(1, winner, loser, ~ SVL[..] + (1|..),
                data = flatlizards)

Here the winner of each fight is compared to the loser, so the outcome is always 1. The special name ‘..’ appears in the formula as the default identifier for players, in the absence of a user-specified id argument. The values of this factor are given by winner for the winning lizard and loser for the losing lizard in each contest. These factors are provided in the data frame contests that is the first element of the list object flatlizards. The second element of flatlizards is another data frame, predictors, containing measurements on the observed lizards, including SVL, which is the snout to vent length. Thus SVL[..] represents the snout to vent length indexed by lizard (winner or loser as appropriate). Finally a random intercept for each lizard is included using the bar notation familiar to users of the lme4 package (Bates, Mächler, and Bolker 2011). (Note that a random intercept is the only random effect structure currently implemented in BradleyTerry2.)

The fitted model is summarized below:

summary(lizModel)
## 
## Call:
## 
## BTm(outcome = 1, player1 = winner, player2 = loser, formula = ~SVL[..] + 
##     (1 | ..), data = flatlizards)
## 
## Fixed Effects:
##         Estimate Std. Error z value Pr(>|z|)
## SVL[..]   0.2051     0.1158   1.772   0.0765
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
## Random Effects:
##           Estimate Std. Error z value Pr(>|z|)
## Std. Dev.    1.126      0.261   4.314  1.6e-05
## 
## Number of iterations: 8

The coefficient of snout to vent length is weakly significant; however, the standard deviation of the random effect is quite large, suggesting that this simple model has fairly poor explanatory power. A more appropriate model is considered in the next section.

3.2 Missing values

The contest data may include all possible pairs of players and hence rows of missing data corresponding to players paired with themselves. Such rows contribute no information to the Bradley-Terry model and are simply discarded by BTm.

Where there are missing values in player-specific predictor (or explanatory) variables which appear in the formula, it will typically be very wasteful to discard all contests involving players for which some values are missing. Instead, such cases are accommodated by the inclusion of one or more parameters in the model. If, for example, player \(1\) has one or more of its predictor values \(x_{11},\ldots,x_{1p}\) missing, then the combination of Equations (1) and (4) above yields

\[\mathop{\rm logit}[\mathop{\rm pr}(1\ \mathrm{beats}\ j)]=\lambda_1 - \left(\sum_{r=1}^p\beta_rx_{jr} + U_j\right), \tag{5} \]

for all other players \(j\). This results in the inclusion of a ‘direct’ ability parameter for each player having missing predictor values, in addition to the common coefficients \(\beta_1,\ldots,\beta_p\) – an approach which will be appropriate when the missingness mechanism is unrelated to contest success. The same device can be used also to accommodate any user-specified departures from a structured Bradley-Terry model, whereby some players have their abilities determined by the linear predictor but others do not.

In the original analysis of the flatlizards data (Whiting et al. 2006), the final model included the first and third principal components of the spectral reflectance from the throat (representing brightness and UV intensity respectively) as well as head length and the snout to vent length seen in our earlier model. The spectroscopy data was missing for two lizards, therefore the ability of these lizards was estimated directly. The following fits this model, with the addition of a random intercept as before:

lizModel2 <- BTm(1, winner, loser,
            ~ throat.PC1[..] + throat.PC3[..] +
            head.length[..] + SVL[..] + (1|..),
            data = flatlizards)
summary(lizModel2)
## 
## Call:
## 
## BTm(outcome = 1, player1 = winner, player2 = loser, formula = ~throat.PC1[..] + 
##     throat.PC3[..] + head.length[..] + SVL[..] + (1 | ..), data = flatlizards)
## 
## Fixed Effects:
##                   Estimate Std. Error z value Pr(>|z|)
## ..lizard096      3.668e+01  3.875e+07   0.000   1.0000
## ..lizard099      9.531e-01  1.283e+00   0.743   0.4576
## throat.PC1[..]  -8.689e-02  4.120e-02  -2.109   0.0349
## throat.PC3[..]   3.735e-01  1.527e-01   2.445   0.0145
## head.length[..] -1.382e+00  7.390e-01  -1.870   0.0614
## SVL[..]          1.722e-01  1.373e-01   1.254   0.2098
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
## Random Effects:
##           Estimate Std. Error z value Pr(>|z|)
## Std. Dev.   1.1099     0.3223   3.443 0.000575
## 
## Number of iterations: 8

Note that BTm detects that lizards 96 and 99 have missing values in the specified predictors and automatically includes separate ability parameters for these lizards. This model was found to be the single best model based on the principal components of reflectance and the other predictors available and indeed the standard deviation of the random intercept is much reduced, but still highly significant. Allowing for this significant variation between lizards with the same predictor values produces more realistic (i.e., larger) standard errors for the parameters when compared to the original analysis of Whiting et al. (2006). Although this affects the significance of the morphological variables, it does not affect the significance of the principal components, so in this case does not affect the main conclusions of the study.

3.3 Order effect

In certain types of application some or all contests have an associated ‘bias’, related to the order in which items are presented to a judge or with the location in which a contest takes place, for example. A natural extension of the Bradley-Terry model (Equation (1)) is then

\[\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j + \delta z, \tag{6} \]

where \(z=1\) if \(i\) has the supposed advantage and \(z=-1\) if \(j\) has it. (If the ‘advantage’ is in fact a disadvantage, \(\delta\) will be negative.) The scores \(\lambda_i\) then relate to ability in the absence of any such advantage.

As an example, consider the baseball data given in Agresti (2002), page 438:

data("baseball", package = "BradleyTerry2")
head(baseball)
##   home.team away.team home.wins away.wins
## 1 Milwaukee   Detroit         4         3
## 2 Milwaukee   Toronto         4         2
## 3 Milwaukee  New York         4         3
## 4 Milwaukee    Boston         6         1
## 5 Milwaukee Cleveland         4         2
## 6 Milwaukee Baltimore         6         0

The data set records the home wins and losses for each baseball team against each of the 6 other teams in the data set. The head function is used to show the first 6 records, which are the Milwaukee home games. We see for example that Milwaukee played 7 home games against Detroit and won 4 of them. The ‘standard’ Bradley-Terry model without a home-advantage parameter will be fitted if no formula is specified in the call to BTm:

baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team,
                      data = baseball, id = "team")
summary(baseballModel1)
## 
## Call:
## BTm(outcome = cbind(home.wins, away.wins), player1 = home.team, 
##     player2 = away.team, id = "team", data = baseball)
## 
## Coefficients:
##               Estimate Std. Error z value Pr(>|z|)
## teamBoston      1.1077     0.3339   3.318 0.000908
## teamCleveland   0.6839     0.3319   2.061 0.039345
## teamDetroit     1.4364     0.3396   4.230 2.34e-05
## teamMilwaukee   1.5814     0.3433   4.607 4.09e-06
## teamNew York    1.2476     0.3359   3.715 0.000203
## teamToronto     1.2945     0.3367   3.845 0.000121
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 78.015  on 42  degrees of freedom
## Residual deviance: 44.053  on 36  degrees of freedom
## AIC: 140.52
## 
## Number of Fisher Scoring iterations: 4

The reference team is Baltimore, estimated to be the weakest of these seven, with Milwaukee and Detroit the strongest.

In the above, the ability of each team is modelled simply as   team where the values of the factor team are given by home.team for the first team and away.team for the second team in each game. To estimate the home-advantage effect, an additional variable is required to indicate whether the team is at home or not. Therefore data frames containing both the team factor and this new indicator variable are required in place of the factors home.team and away.team in the call to BTm. This is achieved here by over-writing the home.team and away.team factors in the baseball data frame:

baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1)
baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0)

The at.home variable is needed for both the home team and the away team, so that it can be differenced as appropriate in the linear predictor. With the data organised in this way, the ability formula can now be updated to include the at.home variable as follows:

baseballModel2 <- update(baseballModel1, formula = ~ team + at.home)
summary(baseballModel2)
## 
## Call:
## BTm(outcome = cbind(home.wins, away.wins), player1 = home.team, 
##     player2 = away.team, formula = ~team + at.home, id = "team", 
##     data = baseball)
## 
## Coefficients:
##               Estimate Std. Error z value Pr(>|z|)
## teamBoston      1.1438     0.3378   3.386 0.000710
## teamCleveland   0.7047     0.3350   2.104 0.035417
## teamDetroit     1.4754     0.3446   4.282 1.85e-05
## teamMilwaukee   1.6196     0.3474   4.662 3.13e-06
## teamNew York    1.2813     0.3404   3.764 0.000167
## teamToronto     1.3271     0.3403   3.900 9.64e-05
## at.home         0.3023     0.1309   2.308 0.020981
## 
## (Dispersion parameter for binomial family taken to be 1)
## 
##     Null deviance: 78.015  on 42  degrees of freedom
## Residual deviance: 38.643  on 35  degrees of freedom
## AIC: 137.11
## 
## Number of Fisher Scoring iterations: 4

This reproduces the results given on page 438 of Agresti (2002): the home team has an estimated odds-multiplier of \(\exp(0.3023) = 1.35\) in its favour.

3.4 More general (contest-specific) predictors

The ‘home advantage’ effect is a simple example of a contest-specific predictor. Such predictors are necessarily interactions, between aspects of the contest and (aspects of) the two ‘players’ involved.

For more elaborate examples of such effects, see ?chameleons and ?CEMS. The former includes an ‘experience’ effect, which changes through time, on the fighting ability of male chameleons. The latter illustrates a common situation in psychometric applications of the Bradley-Terry model, where subjects express preference for one of two objects (the ‘players’), and it is the influence on the results of subject attributes that is of primary interest.

As an illustration of the way in which such effects are specified, consider the following model specification taken from the examples in ?CEMS, where data on students’ preferences in relation to six European management schools is analysed.

data("CEMS", package = "BradleyTerry2")
table8.model <-  BTm(outcome = cbind(win1.adj, win2.adj),
    player1 = school1, player2 = school2, formula = ~ .. +
    WOR[student] * LAT[..] +  DEG[student] * St.Gallen[..] +
    STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] +
    ENG[student] * St.Gallen[..] + FRA[student] * London[..] +
    FRA[student] * Paris[..] + SPA[student] * Barcelona[..] +
    ITA[student] * London[..] + ITA[student] * Milano[..] +
    SEX[student] * Milano[..],
    refcat = "Stockholm", data = CEMS)
## Warning in eval(family$initialize): non-integer counts in a binomial
## glm!

This model reproduces results from Table 8 of Dittrich, Hatzinger, and Katzenbeisser (2001) apart from minor differences due to the different treatment of ties. Here the outcome is the binomial frequency of preference for school1 over school2, with ties counted as half a ‘win’ and half a ‘loss’. The formula specifies the model for school ‘ability’ or worth. In this formula, the default label ‘..’ represents the school (with values given by school1 or school2 as appropriate) and student is a factor specifying the student that made the comparison. The remaining variables in the formula use R‘s standard indexing mechanism to include student-specific variables, e.g., WOR: whether or not the student was in full-time employment, and school-specific variables, e.g., LAT: whether the school was in a ’Latin’ city. Thus there are three types of variables: contest-specific (school1, school2, student), subject-specific (WOR, DEG, …) and object-specific (LAT, St.Gallen, …). These three types of variables are provided in three data frames, contained in the list object CEMS.

4 Ability scores

The function BTabilities extracts estimates and standard errors for the log-ability scores \(\lambda_1, \ldots,\lambda_K\). These will either be ‘direct’ estimates, in the case of the standard Bradley-Terry model or for players with one or more missing predictor values, or ‘model-based’ estimates of the form \(\hat\lambda_i=\sum_{r=1}^p\hat\beta_rx_{ir}\) for players whose ability is predicted by explanatory variables.

As a simple illustration, team ability estimates in the home-advantage model for the baseball data are obtained by:

BTabilities(baseballModel2)
##             ability      s.e.
## Baltimore 0.0000000 0.0000000
## Boston    1.1438027 0.3378422
## Cleveland 0.7046945 0.3350014
## Detroit   1.4753572 0.3445518
## Milwaukee 1.6195550 0.3473653
## New York  1.2813404 0.3404034
## Toronto   1.3271104 0.3403222

This gives, for each team, the estimated ability when the team enjoys no home advantage.

Similarly, estimates of the fighting ability of each lizard in the flatlizards data under the model based on the principal components of the spectral reflectance from the throat are obtained as follows:

head(BTabilities(lizModel2), 4)
##             ability      s.e.
## lizard003  1.562453 0.5227564
## lizard005  0.869896 0.5643448
## lizard006 -0.243853 0.5939836
## lizard009  1.211622 0.6476100

The ability estimates in an unstructured Bradley-Terry model are particularly well suited to presentation using the device of quasi-variances (Firth and de Menezes 2004). The qvcalc package (Firth 2010,version 0.8-5 or later) contains a function of the same name which does the necessary work:

> library("qvcalc")
> baseball.qv <- qvcalc(BTabilities(baseballModel2))
> plot(baseball.qv,
+      levelNames = c("Bal", "Bos", "Cle", "Det", "Mil", "NY", "Tor"))
The ability for Baltimore is fixed at zero, with an interval ranging from -0.5 to 0.5. Boston has a relative ability near 1.2; Cleveland around 0.7. The remaining teams have relative abilities around 1.3 to 1.6. The intervals are based on quasi standard errors and all have length of approximately 1. Therefore, aside from Cleveland, all teams are clearly significantly stronger than Baltimore as the intervals do not overlap.

Figure 1: Estimated relative abilities of baseball teams.

The ‘comparison intervals’ as shown in Figure 1 are based on ‘quasi standard errors’, and can be interpreted as if they refer to independent estimates of ability for the journals. This has the advantage that comparison between any pair of journals is readily made (i.e., not only comparisons with the ‘reference’ journal). For details of the theory and method of calculation see Firth and de Menezes (2004).

5 Residuals

There are two main types of residuals available for a Bradley-Terry model object.

First, there are residuals obtained by the standard methods for models of class "glm". These all deliver one residual for each contest or type of contest. For example, Pearson residuals for the model lizModel2 can be obtained simply by

res.pearson <- round(residuals(lizModel2), 3)
head(cbind(flatlizards$contests, res.pearson), 4)
##      winner     loser res.pearson
## 1 lizard048 lizard006       0.556
## 2 lizard060 lizard011       0.664
## 3 lizard023 lizard012       0.220
## 4 lizard030 lizard012       0.153

More useful for diagnostics on the linear predictor \(\sum\beta_rx_{ir}\) are ‘player’-level residuals, obtained by using the function residuals with argument type = "grouped". These residuals can then be plotted against other player-specific variables.

res <- residuals(lizModel2, type = "grouped")
#  with(flatlizards$predictors, plot(throat.PC2, res))
#  with(flatlizards$predictors, plot(head.width, res))

These residuals estimate the error in the linear predictor; they are obtained by suitable aggregation of the so-called ‘working’ residuals from the model fit. The weights attribute indicates the relative information in these residuals – weight is roughly inversely proportional to variance – which may be useful for plotting and/or interpretation; for example, a large residual may be of no real concern if based on very little information. Weighted least-squares regression of these residuals on any variable already in the model is null. For example:

lm(res ~ throat.PC1, weights = attr(res, "weights"),
   data = flatlizards$predictors)
## 
## Call:
## lm(formula = res ~ throat.PC1, data = flatlizards$predictors, 
##     weights = attr(res, "weights"))
## 
## Coefficients:
## (Intercept)   throat.PC1  
##  -3.674e-15   -2.442e-15
lm(res ~ head.length, weights = attr(res, "weights"),
   data = flatlizards$predictors)
## 
## Call:
## lm(formula = res ~ head.length, data = flatlizards$predictors, 
##     weights = attr(res, "weights"))
## 
## Coefficients:
## (Intercept)  head.length  
##  -3.663e-15   -6.708e-14

As an illustration of evident non-null residual structure, consider the unrealistically simple model lizModel that was fitted in Section 3 above. That model lacks the clearly significant predictor variable throat.PC3, and the plot shown in Figure 2 demonstrates this fact graphically:

lizModel.residuals <- residuals(lizModel, type = "grouped")
plot(flatlizards$predictors$throat.PC3, lizModel.residuals)
The residuals are quite spread out over the range -2 to 2, but the distribution is clearly not uniform over the range of the predictor variable, throat.PC3. Residuals between -2 and -1 range correspond to values throat.PC3 between -6 and 4; residuals between -1 and 1 correspond to throat.PC3 values of -4 to 4, and residuals from 1 to 2 correspond to throat.PC3 values between -3 and 6. Thus there is an overall positive correlation bewteen the residuals and throat.PC3.

Figure 2: Lizard residuals for the simple model lizModel, plotted against throat.PC3.

The residuals in the plot exhibit a strong, positive regression slope in relation to the omitted predictor variable throat.PC3.

6 Model search

In addition to update() as illustrated in preceding sections, methods for the generic functions add1(), drop1() and anova() are provided. These can be used to investigate the effect of adding or removing a variable, whether that variable is contest-specific, such as an order effect, or player-specific; and to compare the fit of nested models.

7 Setting up the data

7.1 Contest-specific data

The outcome argument of BTm represents a binomial response and can be supplied in any of the formats allowed by the glm function. That is, either a two-column matrix with the columns giving the number of wins and losses (for player1 vs. player2), a factor where the first level denotes a loss and all other levels denote a win, or a binary variable where 0 denotes a loss and 1 denotes a win. Each row represents either a single contest or a set of contests between the same two players.

The player1 and player2 arguments are either factors specifying the two players in each contest, or data frames containing such factors, along with any contest-specific variables that are also player-specific, such as the at.home variable seen in Section 3.3. If given in data frames, the factors identifying the players should be named as specified by the id argument and should have identical levels, since they represent a particular sample of the full set of players.

Thus for the model baseballModel2, which was specified by the following call:

 baseballModel2$call
## BTm(outcome = cbind(home.wins, away.wins), player1 = home.team, 
##     player2 = away.team, formula = ~team + at.home, id = "team", 
##     data = baseball)

the data are provided in the baseball data frame, which has the following structure:

str(baseball, vec.len = 2)
## 'data.frame':    42 obs. of  4 variables:
##  $ home.team:'data.frame':   42 obs. of  2 variables:
##   ..$ team   : Factor w/ 7 levels "Baltimore","Boston",..: 5 5 5 5 5 ...
##   ..$ at.home: num  1 1 1 1 1 ...
##  $ away.team:'data.frame':   42 obs. of  2 variables:
##   ..$ team   : Factor w/ 7 levels "Baltimore","Boston",..: 4 7 6 2 3 ...
##   ..$ at.home: num  0 0 0 0 0 ...
##  $ home.wins: int  4 4 4 6 4 ...
##  $ away.wins: int  3 2 3 1 2 ...

In this case home.team and away.team are both data frames, with the factor team specifying the team and the variable at.home specifying whether or not the team was at home. So the first comparison

baseball$home.team[1,]
##        team at.home
## 1 Milwaukee       1
baseball$away.team[1,]
##      team at.home
## 1 Detroit       0

is Milwaukee playing at home against Detroit. The outcome is given by

 baseball[1, c("home.wins", "away.wins")]
##   home.wins away.wins
## 1         4         3

Contest-specific variables that are not player-specific – for example, whether it rained or not during a contest – should only be used in interactions with variables that are player-specific, otherwise the effect on ability would be the same for both players and would cancel out. Such variables can conveniently be provided in a single data frame along with the outcome, player1 and player2 data.

An offset in the model can be specified by using the offset argument to BTm. This facility is provided for completeness: the authors have not yet encountered an application where it is needed.

To use only certain rows of the contest data in the analysis, the subset argument may be used in the call to BTm. This should either be a logical vector of the same length as the binomial response, or a numeric vector containing the indices of rows to be used.

7.2 Non contest-specific data

Some variables do not vary by contest directly, but rather vary by a factor that is contest-specific, such as the player ID or the judge making the paired comparison. For such variables, it is more economical to store the data by the levels of the contest-specific factor and use indexing to obtain the values for each contest.

The CEMS example in Section 3.4 provides an illustration of such variables. In this example student-specific variables are indexed by student and school-specific variables are indexed by .., i.e., the first or second school in the comparison as appropriate. There are then two extra sets of variables in addition to the usual contest-specific data as described in the last section. A good way to provide these data to BTm is as a list of data frames, one for each set of variables, e.g.,

str(CEMS, vec.len = 2)
## List of 3
##  $ preferences:'data.frame': 4545 obs. of  8 variables:
##   ..$ student : num [1:4545] 1 1 1 1 1 ...
##   ..$ school1 : Factor w/ 6 levels "Barcelona","London",..: 2 2 4 2 4 ...
##   ..$ school2 : Factor w/ 6 levels "Barcelona","London",..: 4 3 3 5 5 ...
##   ..$ win1    : num [1:4545] 1 1 NA 0 0 ...
##   ..$ win2    : num [1:4545] 0 0 NA 1 1 ...
##   ..$ tied    : num [1:4545] 0 0 NA 0 0 ...
##   ..$ win1.adj: num [1:4545] 1 1 NA 0 0 ...
##   ..$ win2.adj: num [1:4545] 0 0 NA 1 1 ...
##  $ students   :'data.frame': 303 obs. of  8 variables:
##   ..$ STUD: Factor w/ 2 levels "other","commerce": 1 2 1 2 1 ...
##   ..$ ENG : Factor w/ 2 levels "good","poor": 1 1 1 1 2 ...
##   ..$ FRA : Factor w/ 2 levels "good","poor": 1 2 1 1 2 ...
##   ..$ SPA : Factor w/ 2 levels "good","poor": 2 2 2 2 2 ...
##   ..$ ITA : Factor w/ 2 levels "good","poor": 2 2 2 1 2 ...
##   ..$ WOR : Factor w/ 2 levels "no","yes": 1 1 1 1 1 ...
##   ..$ DEG : Factor w/ 2 levels "no","yes": 2 1 2 1 1 ...
##   ..$ SEX : Factor w/ 2 levels "female","male": 2 1 2 1 2 ...
##  $ schools    :'data.frame': 6 obs. of  7 variables:
##   ..$ Barcelona: num [1:6] 1 0 0 0 0 ...
##   ..$ London   : num [1:6] 0 1 0 0 0 ...
##   ..$ Milano   : num [1:6] 0 0 1 0 0 ...
##   ..$ Paris    : num [1:6] 0 0 0 1 0 ...
##   ..$ St.Gallen: num [1:6] 0 0 0 0 1 ...
##   ..$ Stockholm: num [1:6] 0 0 0 0 0 ...
##   ..$ LAT      : num [1:6] 1 0 1 1 0 ...

The names of the data frames are only used by BTm if they match the names specified in the player1 and player2 arguments, in which case it is assumed that these are data frames providing the data for the first and second player respectively. The rows of data frames in the list should either correspond to the contests or the levels of the factor used for indexing.

Player-specific offsets should be included in the formula by using the offset function.

7.3 Converting data from a ‘wide’ format

The BTm function requires data in a ‘long’ format, with one row per contest, provided either directly as in Section 7.1 or via indexing as in Section 7.2. In studies where the same set of paired comparisons are made by several judges, as in a questionnaire for example, the data may be stored in a ‘wide’ format, with one row per judge.

As an example, consider the cemspc data from the prefmod package (Hatzinger and Dittrich 2012), which provides data from the CEMS study in a wide format. Each row corresponds to one student; the first 15 columns give the outcome of all pairwise comparisons between the 6 schools in the study and the last two columns correspond to two of the student-specific variables: ENG (indicating the student’s knowledge of English) and SEX (indicating the student’s gender).

The following steps convert these data into a form suitable for analysis with BTm. First a new data frame is created from the student-specific variables and these variables are converted to factors:

library("prefmod")
student <- cemspc[c("ENG", "SEX")]
student$ENG <- factor(student$ENG, levels = 1:2,
                      labels = c("good", "poor"))
student$SEX <- factor(student$SEX, levels = 1:2,
                      labels = c("female", "male"))

This data frame is put into a list, which will eventually hold all the necessary data. Then a student factor is created for indexing the student data to produce contest-level data. This is put in a new data frame that will hold the contest-specific data.

cems <- list(student = student)
student <- gl(303, 1, 303 * 15) #303 students, 15 comparisons
contest <- data.frame(student = student)

Next the outcome data is converted to a binomial response, adjusted for ties. The result is added to the contest data frame.

win <- cemspc[, 1:15] == 0
lose <- cemspc[, 1:15] == 2
draw <- cemspc[, 1:15] == 1
contest$win.adj <- c(win + draw/2)
contest$lose.adj <- c(lose + draw/2)

Then two factors are created identifying the first and second school in each comparison. The comparisons are in the order 1 vs. 2, 1 vs. 3, 2 vs. 3, 1 vs. 4, …, so the factors can be created as follows:

lab <- c("London", "Paris", "Milano", "St. Gallen", "Barcelona",
         "Stockholm")
contest$school1 <- factor(sequence(1:5), levels = 1:6, labels = lab)
contest$school2 <- factor(rep(2:6, 1:5), levels = 1:6, labels = lab)

Note that both factors have exactly the same levels, even though only five of the six players are represented in each case. In other words, the numeric factor levels refer to the same players in each case, so that the player is unambiguously identified. This ensures that player-specific parameters and player-specific covariates are correctly specified.

Finally the contest data frame is added to the main list:

cems$contest <- contest

This creates a single data object that can be passed to the data argument of BTm. Of course, such a list could be created on-the-fly as in data = list(contest, student), which may be more convenient in practice.

7.4 Converting data from the format required by the earlier BradleyTerry package

The BradleyTerry package described in Firth (2005) required contest/comparison results to be in a data frame with columns named winner, loser and Freq. The following example shows how xtabs and countsToBinomial can be used to convert such data for use with the BTm function in BradleyTerry2:

library("BradleyTerry")  ## the /old/ BradleyTerry package
## load data frame with columns "winner", "loser", "Freq"
data("citations", package = "BradleyTerry")
## convert to 2-way table of counts
citations <- xtabs(Freq ~ winner + loser, citations)
## convert to a data frame of binomial observations
citations.sf <- countsToBinomial(citations)

The citations.sf data frame can then be used with BTm as shown in Section 2.1.

8 A list of the functions provided in BradleyTerry2

The standard R help files provide the definitive reference. Here we simply list the main user-level functions and their arguments, as a convenient overview:

## BTabilities(model)
## glmmPQL(fixed, random = NULL, family = "binomial",  
##     data = NULL, subset = NULL, weights = NULL, offset = NULL,  
##     na.action = NULL, start = NULL, etastart = NULL,  
##     mustart = NULL, control = glmmPQL.control(...),  
##     sigma = 0.1, sigma.fixed = FALSE, model = TRUE,  
##     x = FALSE, contrasts = NULL, ...)
## countsToBinomial(xtab)
## plotProportions(win, tie = NULL, loss, player1, player2,  
##     abilities = NULL, home.adv = NULL, tie.max = NULL,  
##     tie.scale = NULL, tie.mode = NULL, at.home1 = NULL,  
##     at.home2 = NULL, data = NULL, subset = NULL, bin.size = 20,  
##     xlab = "P(player1 wins | not a tie)", ylab = "Proportion",  
##     legend = NULL, col = 1:2, ...)
## qvcalc(object, ...)
## glmmPQL.control(maxiter = 50, IWLSiter = 10, tol = 1e-06,  
##     trace = FALSE)
## BTm(outcome = 1, player1, player2, formula = NULL,  
##     id = "..", separate.ability = NULL, refcat = NULL,  
##     family = "binomial", data = NULL, weights = NULL,  
##     subset = NULL, na.action = NULL, start = NULL,  
##     etastart = NULL, mustart = NULL, offset = NULL,  
##     br = FALSE, model = TRUE, x = FALSE, contrasts = NULL,  
##     ...)
## GenDavidson(win, tie, loss, player1, player2, home.adv = NULL,  
##     tie.max = ~1, tie.mode = NULL, tie.scale = NULL,  
##     at.home1 = NULL, at.home2 = NULL)

9 Some final remarks

9.1 A note on the treatment of ties

The present version of BradleyTerry2 provides no sophisticated facilities for handling tied contests/comparisons; the well-known models of Rao and Kupper (1967) and Davidson (1970) are not implemented here. At present the BTm function requires a binary or binomial response variable, the third (‘tied’) category of response is not allowed.

In several of the data examples (e.g., ?CEMS, ?springall, ?sound.fields), ties are handled by the crude but simple device of adding half of a ‘win’ to the tally for each player involved; in each of the examples where this has been done it is found that the result is very similar, after a simple re-scaling, to the more sophisticated analyses that have appeared in the literature. Note that this device when used with BTm typically gives rise to warnings produced by the back-end glm function, about non-integer ‘binomial’ counts; such warnings are of no consequence and can be safely ignored.

It is likely that a future version of BradleyTerry2 will have a more general method for handling ties.

9.2 A note on ‘contest-specific’ random effects

The current version of BradleyTerry2 provides facilities for fitting models with random effects in ‘player-specific’ predictor functions, as illustrated in Section @ref(#sec:covariates). For more general, ‘contest-specific’ random-effect structures, such as random ‘judge’ effects in psychological studies (e.g., Böckenholt 2001), BradleyTerry2 provides (through BTm) the necessary user interface but as yet no back-end calculation. It is hoped that this important generalization can be made successfully in a future version of BradleyTerry2.

Acknowledgments

This work was supported by the UK Engineering and Physical Sciences Research Council.

References

Agresti, A. 2002. Categorical Data Analysis. 2nd ed. John Wiley & Sons.
Bates, Douglas, Martin Mächler, and Ben Bolker. 2011. lme4: Linear Mixed-Effects Models Using S4 Classes. https://CRAN.R-project.org/package=lme4.
Böckenholt, U. 2001. “Hierarchical Modeling of Paired Comparison Data.” Psychological Methods 6 (1): 49–66.
Bradley, R. A. 1984. “Paired Comparisons: Some Basic Procedures and Examples.” In Nonparametric Methods, edited by P. R. Krishnaiah and P. K. Sen, 4:299–326. Handbook of Statistics. Elsevier.
Bradley, R. A., and M. E. Terry. 1952. “Rank Analysis of Incomplete Block Designs I: The Method of Paired Comparisons.” Biometrika 39: 324–45.
Breslow, N E, and D G Clayton. 1993. “Approximate Inference in Generalized Linear Mixed Models.” Journal of the American Statistical Association 88 (421): 9–25.
Davidson, R. R. 1970. “On Extending the Bradley-Terry Model to Accommodate Ties in Paired Comparison Experiments.” Journal of the American Statistical Association 65: 317–28.
Dittrich, R, R Hatzinger, and W Katzenbeisser. 2001. “Corrigendum: Modelling the Effect of Subject-Specific Covariates in Paired Comparison Studies with an Application to University Rankings.” Applied Statistics 50: 247–49.
Firth, David. 1993. “Bias Reduction of Maximum Likelihood Estimates.” Biometrika 80: 27–38.
———. 2005. “Bradley-Terry Models in R.” Journal of Statistical Software 12 (1): 1–12. https://doi.org/10.18637/jss.v012.i01.
———. 2010. qvcalc: Quasi-Variances for Factor Effects in Statistical Models. https://CRAN.R-project.org/package=qvcalc.
Firth, David, and R. X. de Menezes. 2004. “Quasi-Variances.” Biometrika 91: 65–80.
Hatzinger, Reinhold, and Regina Dittrich. 2012. prefmod: An R Package for Modeling Preferences Based on Paired Comparisons, Rankings, or Ratings.” Journal of Statistical Software 48 (10): 1–31. https://doi.org/10.18637/jss.v048.i10.
Heinze, G, and M Schemper. 2002. “A Solution to the Problem of Separation in Logistic Regression.” Statistics in Medicine 21: 2409–19.
Ihaka, Ross, and Robert Gentleman. 1996. R: A Language for Data Analysis and Graphics.” Journal of Computational and Graphical Statistics 5 (3): 299–314.
Kosmidis, Ioannis. 2007. brglm: Bias Reduction in Binary-Response GLMs. https://CRAN.R-project.org/package=brglm.
R Development Core Team. 2012. R: A Language and Environment for Statistical Computing. Vienna, Austria: R Foundation for Statistical Computing. https://www.R-project.org/.
Rao, P. V., and L. L. Kupper. 1967. “Ties in Paired-Comparison Experiments: A Generalization of the Bradley-Terry Model.” Journal of the American Statistical Association 62: 194–204.
Sham, P. C., and D. Curtis. 1995. “An Extended Transmission/Disequilibrium Test (TDT) for Multi-Allele Marker Loci.” Annals of Human Genetics 59 (3): 323–36.
Stigler, S. 1994. “Citation Patterns in the Journals of Statistics and Probability.” Statistical Science 9: 94–108.
Turner, Heather, and David Firth. 2012. “Bradley-Terry Models in R: The BradleyTerry2 Package.” Journal of Statistical Software 48 (9): 1–21. https://doi.org/10.18637/jss.v048.i09.
Whiting, Martin J., Devi M. Stuart-Fox, David O’Connor, David Firth, Nigel C. Bennett, and Simon P. Blomberg. 2006. Ultraviolet Signals Ultra-Aggression in a Lizard.” Animal Behaviour 72: 353–63.
BradleyTerry2/inst/doc/BradleyTerry.R0000644000176200001440000001732114775715606017247 0ustar liggesusers## ----include=FALSE------------------------------------------------------------ library <- function(...) suppressPackageStartupMessages(base::library(...)) library(knitr) opts_chunk$set( tidy=FALSE ) ## ----set_options, echo = FALSE-------------------------------------- options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE, digits = 7) ## ----LoadBradleyTerry2---------------------------------------------- library("BradleyTerry2") ## ----CitationData--------------------------------------------------- data("citations", package = "BradleyTerry2") ## ----CitationData2-------------------------------------------------- citations ## ----countsToBinomial----------------------------------------------- citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") citations.sf ## ----citeModel------------------------------------------------------ citeModel <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", data = citations.sf) citeModel ## ----citeModelupdate------------------------------------------------ update(citeModel, refcat = "JASA") ## ----citeModelupdate2----------------------------------------------- update(citeModel, br = TRUE) ## ----lizModel------------------------------------------------------- options(show.signif.stars = FALSE) data("flatlizards", package = "BradleyTerry2") lizModel <- BTm(1, winner, loser, ~ SVL[..] + (1|..), data = flatlizards) ## ----summarize_lizModel--------------------------------------------- summary(lizModel) ## ----lizModel2------------------------------------------------------ lizModel2 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards) summary(lizModel2) ## ----baseball------------------------------------------------------- data("baseball", package = "BradleyTerry2") head(baseball) ## ----baseballModel-------------------------------------------------- baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") summary(baseballModel1) ## ----baseballDataUpdate--------------------------------------------- baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) ## ----baseballModelupdate-------------------------------------------- baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) summary(baseballModel2) ## ----CEMSmodel------------------------------------------------------ data("CEMS", package = "BradleyTerry2") table8.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * LAT[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) ## ----BTabilities---------------------------------------------------- BTabilities(baseballModel2) ## ----BTabilities2--------------------------------------------------- head(BTabilities(lizModel2), 4) ## ----figqvplot, echo=FALSE , fig.cap="Estimated relative abilities of baseball teams.", fig.alt="The ability for Baltimore is fixed at zero, with an interval ranging from -0.5 to 0.5. Boston has a relative ability near 1.2; Cleveland around 0.7. The remaining teams have relative abilities around 1.3 to 1.6. The intervals are based on quasi standard errors and all have length of approximately 1. Therefore, aside from Cleveland, all teams are clearly significantly stronger than Baltimore as the intervals do not overlap.", fig.show='hold', fig.align="center", out.width="67.0%"---- knitr::include_graphics(c("baseball-qvplot.png")) ## ----residuals------------------------------------------------------ res.pearson <- round(residuals(lizModel2), 3) head(cbind(flatlizards$contests, res.pearson), 4) ## ----BTresiduals---------------------------------------------------- res <- residuals(lizModel2, type = "grouped") # with(flatlizards$predictors, plot(throat.PC2, res)) # with(flatlizards$predictors, plot(head.width, res)) ## ----residualWLS---------------------------------------------------- lm(res ~ throat.PC1, weights = attr(res, "weights"), data = flatlizards$predictors) lm(res ~ head.length, weights = attr(res, "weights"), data = flatlizards$predictors) ## ----figresiduals, echo=FALSE , fig.cap="Lizard residuals for the simple model lizModel, plotted against throat.PC3.", fig.alt="The residuals are quite spread out over the range -2 to 2, but the distribution is clearly not uniform over the range of the predictor variable, throat.PC3. Residuals between -2 and -1 range correspond to values throat.PC3 between -6 and 4; residuals between -1 and 1 correspond to throat.PC3 values of -4 to 4, and residuals from 1 to 2 correspond to throat.PC3 values between -3 and 6. Thus there is an overall positive correlation bewteen the residuals and throat.PC3.", fig.show='hold', fig.align="center", out.width="69.0%"---- knitr::include_graphics(c("residuals.png")) ## ----baseballModel2_call-------------------------------------------- baseballModel2$call ## ----str_baseball--------------------------------------------------- str(baseball, vec.len = 2) ## ----first_comparison----------------------------------------------- baseball$home.team[1,] baseball$away.team[1,] ## ----first_outcome-------------------------------------------------- baseball[1, c("home.wins", "away.wins")] ## ----str_CEMS------------------------------------------------------- str(CEMS, vec.len = 2) ## ----student-specific_data------------------------------------------ library("prefmod") student <- cemspc[c("ENG", "SEX")] student$ENG <- factor(student$ENG, levels = 1:2, labels = c("good", "poor")) student$SEX <- factor(student$SEX, levels = 1:2, labels = c("female", "male")) ## ----student_factor------------------------------------------------- cems <- list(student = student) student <- gl(303, 1, 303 * 15) #303 students, 15 comparisons contest <- data.frame(student = student) ## ----binomial_response---------------------------------------------- win <- cemspc[, 1:15] == 0 lose <- cemspc[, 1:15] == 2 draw <- cemspc[, 1:15] == 1 contest$win.adj <- c(win + draw/2) contest$lose.adj <- c(lose + draw/2) ## ----school_factors------------------------------------------------- lab <- c("London", "Paris", "Milano", "St. Gallen", "Barcelona", "Stockholm") contest$school1 <- factor(sequence(1:5), levels = 1:6, labels = lab) contest$school2 <- factor(rep(2:6, 1:5), levels = 1:6, labels = lab) ## ----cems_data------------------------------------------------------ cems$contest <- contest ## ----functions, echo = FALSE------------------------------ ## cf. prompt options(width = 55) for (fn in getNamespaceExports("BradleyTerry2")) { name <- as.name(fn) args <- formals(fn) n <- length(args) arg.names <- arg.n <- names(args) arg.n[arg.n == "..."] <- "\\dots" is.missing.arg <- function(arg) typeof(arg) == "symbol" && deparse(arg) == "" Call <- paste(name, "(", sep = "") for (i in seq_len(n)) { Call <- paste(Call, arg.names[i], if (!is.missing.arg(args[[i]])) paste(" = ", paste(deparse(args[[i]]), collapse = "\n"), sep = ""), sep = "") if (i != n) Call <- paste(Call, ", ", sep = "") } Call <- paste(Call, ")", sep = "") cat(deparse(parse(text = Call)[[1]], width.cutoff = 50), fill = TRUE) } options(width = 60) BradleyTerry2/inst/doc/BradleyTerry.Rmd0000644000176200001440000011474014775700220017555 0ustar liggesusers--- title: |- Bradley-Terry Models in R abstract: | This is a short overview of the R add-on package **BradleyTerry2**, which facilitates the specification and fitting of Bradley-Terry logit, probit or cauchit models to pair-comparison data. Included are the standard 'unstructured' Bradley-Terry model, structured versions in which the parameters are related through a linear predictor to explanatory variables, and the possibility of an order or 'home advantage' effect or other 'contest-specific' effects. Model fitting is either by maximum likelihood, by penalized quasi-likelihood (for models which involve a random effect), or by bias-reduced maximum likelihood in which the first-order asymptotic bias of parameter estimates is eliminated. Also provided are a simple and efficient approach to handling missing covariate data, and suitably-defined residuals for diagnostic checking of the linear predictor. date: |- For **BradleyTerry2** version `r packageDescription("BradleyTerry2")[["Version"]]`, `r Sys.Date()` vignette: |- %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} %\VignetteIndexEntry{Bradley-Terry Models in R} %\VignetteDepends{} output: function(){ if (requireNamespace('bookdown', quietly = TRUE)) { function(...){ bookdown::html_document2(..., base_format = rmarkdown::html_vignette, number_sections = TRUE, math_method = "mathjax") } } else function(...){ rmarkdown::html_vignette(..., number_sections = TRUE, math_method = "mathjax") }}() link-citations: yes bibliography: BradleyTerry.bib --- ``` {r include=FALSE} library <- function(...) suppressPackageStartupMessages(base::library(...)) library(knitr) opts_chunk$set( tidy=FALSE ) ``` ``` {r set_options, echo = FALSE} options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE, digits = 7) ``` ## Contents { .unnumbered} \@ref(sec:intro) [Introduction] \@ref(sec:BTmodel) [Standard Bradley-Terry model]     \@ref(sec:citations) [Example: Analysis of journal citations]     \@ref(sec:bias-reduced) [Bias-reduced estimates] \@ref(sec:covariates) [Abilities predicted by explanatory variables]     \@ref(sec:player-specific) ['Player-specific' predictor variables]     \@ref(sec:missing) [Missing values]     \@ref(sec:order) [Order effect]     \@ref(sec:CEMS) [More general (contest-specific) predictors] \@ref(sec:ability) [Ability scores] \@ref(sec:residuals) [Residuals] \@ref(sec:model) [Model search] \@ref(sec:data) [Setting up the data]     \@ref(sec:contest) [Contest-specific data]     \@ref(sec:non-contest) [Non contest-specific data]     \@ref(sec:wide) [Converting data from a 'wide' format]     \@ref(sec:BradleyTerry) [Converting data from the format required by the earlier **BradleyTerry** package] \@ref(sec:functions) [A list of the functions provided in **BradleyTerry2**] \@ref(sec:finalremarks) [Some final remarks]     \@ref(sec:ties) [A note on the treatment of ties]     \@ref(sec:random-effects) [A note on 'contest-specific' random effects] [Acknowledgments] [References] ## Introduction {#sec:intro} The Bradley-Terry model [@brad:terr:52] assumes that in a 'contest' between any two 'players', say player $i$ and player $j$ $(i, j \in \{1,\ldots,K\})$, the odds that $i$ beats $j$ are $\alpha_i/\alpha_j$, where $\alpha_i$ and $\alpha_j$ are positive-valued parameters which might be thought of as representing 'ability'. A general introduction can be found in @brad:84 or @agre:02. Applications are many, ranging from experimental psychology to the analysis of sports tournaments to genetics [for example, the allelic transmission/disequilibrium test of @sham:curt:95 is based on a Bradley-Terry model in which the 'players' are alleles]. In typical psychometric applications the 'contests' are comparisons, made by different human subjects, between pairs of items. The model can alternatively be expressed in the logit-linear form $$\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j, \label{eq:unstructured} (\#eq:unstructured)$$ where $\lambda_i=\log\alpha_i$ for all $i$. Thus, assuming independence of all contests, the parameters $\{\lambda_i\}$ can be estimated by maximum likelihood using standard software for generalized linear models, with a suitably specified model matrix. The primary purpose of the **BradleyTerry2** package [@turn:12], implemented in the R statistical computing environment [@ihak:gent:96;@r], is to facilitate the specification and fitting of such models and some extensions. The **BradleyTerry2** package supersedes the earlier **BradleyTerry** package [@firt:05], providing a more flexible user interface to allow a wider range of models to be fitted. In particular, **BradleyTerry2** allows the inclusion of simple random effects so that the ability parameters can be related to available explanatory variables through a linear predictor of the form $$\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i. (\#eq:autonumber2) $$ The inclusion of the prediction error $U_i$ allows for variability between players with equal covariate values and induces correlation between comparisons with a common player. **BradleyTerry2** also allows for general contest-specific effects to be included in the model and allows the logit link to be replaced, if required, by a different symmetric link function (probit or cauchit). The remainder of the paper is organised as follows. Section \@ref(sec:BTmodel) demonstrates how to use the **BradleyTerry2** package to fit a standard (i.e., unstructured) Bradley-Terry model, with a separate ability parameter estimated for each player, including the use of bias-reduced estimation for such models. Section \@ref(sec:covariates) considers variations of the standard model, including the use of player-specific variables to model ability and allowing for contest-specific effects such as an order effect or judge effects. Sections \@ref(sec:ability) and \@ref(sec:residuals) explain how to obtain important information about a fitted model, in particular the estimates of ability and their standard errors, and player-level residuals, whilst Section \@ref(sec:model) notes the functions available to aid model search. Section \@ref(sec:data) explains in more detail how set up data for use with the **BradleyTerry2** package, Section \@ref(sec:functions) lists the functions provided by the package and finally Section \@ref(sec:finalremarks) comments on two directions for further development of the software. ## Standard Bradley-Terry model {#sec:BTmodel} ### Example: Analysis of journal citations {#sec:citations} The following data come from page 448 of @agre:02, extracted from the larger table of @stig:94. The data are counts of citations among four prominent journals of statistics and are included the **BradleyTerry2** package as the data set `citations`: ``` {r LoadBradleyTerry2} library("BradleyTerry2") ``` ``` {r CitationData} data("citations", package = "BradleyTerry2") ``` ``` {r CitationData2} citations ``` Thus, for example, *Biometrika* was cited 498 times by papers in *Journal of the American Statistical Association* (JASA) during the period under study. In order to fit a Bradley-Terry model to these data using `BTm` from the **BradleyTerry2** package, the data must first be converted to binomial frequencies. That is, the data need to be organised into pairs (`player1`, `player2`) and corresponding frequencies of wins and losses for `player1` against `player2`. The **BradleyTerry2** package provides the utility function `countsToBinomial` to convert a contingency table of wins to the format just described: ``` {r countsToBinomial} citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") citations.sf ``` Note that the self-citation counts are ignored -- these provide no information on the ability parameters, since the abilities are relative rather than absolute quantities. The binomial response can then be modelled by the difference in player abilities as follows: ``` {r citeModel} citeModel <- BTm(cbind(win1, win2), journal1, journal2, ~ journal, id = "journal", data = citations.sf) citeModel ``` The coefficients here are maximum likelihood estimates of $\lambda_2, \lambda_3, \lambda_4$, with $\lambda_1$ (the log-ability for *Biometrika*) set to zero as an identifying convention. The one-sided model formula ``` r ~ journal ``` specifies the model for player ability, in this case the 'citeability' of the journal. The `id` argument specifies that `"journal"` is the name to be used for the factor that identifies the player -- the values of which are given here by `journal1` and `journal2` for the first and second players respectively. Therefore in this case a separate citeability parameter is estimated for each journal. If a different 'reference' journal is required, this can be achieved using the optional `refcat` argument: for example, making use of `update` to avoid re-specifying the whole model, ``` {r citeModelupdate} update(citeModel, refcat = "JASA") ``` -- the same model in a different parameterization. The use of the standard Bradley-Terry model for this application might perhaps seem rather questionable -- for example, citations within a published paper can hardly be considered independent, and the model discards potentially important information on self-citation. @stig:94 provides arguments to defend the model's use despite such concerns. ### Bias-reduced estimates {#sec:bias-reduced} Estimation of the standard Bradley-Terry model in `BTm` is by default computed by maximum likelihood, using an internal call to the `glm` function. An alternative is to fit by bias-reduced maximum likelihood [@firt:93]: this requires additionally the **brglm** package [@kosm:07], and is specified by the optional argument `br = TRUE`. The resultant effect, namely removal of first-order asymptotic bias in the estimated coefficients, is often quite small. One notable feature of bias-reduced fits is that all estimated coefficients and standard errors are necessarily finite, even in situations of 'complete separation' where maximum likelihood estimates take infinite values [@hein:sche:02]. For the citation data, the parameter estimates are only very slightly changed in the bias-reduced fit: ``` {r citeModelupdate2} update(citeModel, br = TRUE) ``` Here the bias of maximum likelihood is small because the binomial counts are fairly large. In more sparse arrangements of contests -- that is, where there is less or no replication of the contests -- the effect of bias reduction would typically be more substantial than the insignificant one seen here. ## Abilities predicted by explanatory variables {#sec:covariates} ### 'Player-specific' predictor variables {#sec:player-specific} In some application contexts there may be 'player-specific' explanatory variables available, and it is then natural to consider model simplification of the form $$\lambda_i=\sum_{r=1}^p\beta_rx_{ir} + U_i, (\#eq:autonumber3) $$ in which ability of each player $i$ is related to explanatory variables $x_{i1},\ldots,x_{ip}$ through a linear predictor with coefficients $\beta_1,\ldots,\beta_p$; the $\{U_i\}$ are independent errors. Dependence of the player abilities on explanatory variables can be specified via the `formula` argument, using the standard *S*-language model formulae. The difference in the abilities of player $i$ and player $j$ is modelled by $$\sum_{r=1}^p\beta_rx_{ir} - \sum_{r=1}^p\beta_rx_{jr} + U_i - U_j, \label{eq:structured} (\#eq:structured)$$ where $U_i \sim N(0, \sigma^2)$ for all $i$. The Bradley-Terry model is then a generalized linear mixed model, which the `BTm` function currently fits by using the penalized quasi-likelihood algorithm of @bres:93. As an illustration, consider the following simple model for the `flatlizards` data, which predicts the fighting ability of Augrabies flat lizards by body size (snout to vent length): ``` {r lizModel} options(show.signif.stars = FALSE) data("flatlizards", package = "BradleyTerry2") lizModel <- BTm(1, winner, loser, ~ SVL[..] + (1|..), data = flatlizards) ``` Here the winner of each fight is compared to the loser, so the outcome is always 1. The special name '`..`' appears in the formula as the default identifier for players, in the absence of a user-specified `id` argument. The values of this factor are given by `winner` for the winning lizard and `loser` for the losing lizard in each contest. These factors are provided in the data frame `contests` that is the first element of the list object `flatlizards`. The second element of `flatlizards` is another data frame, `predictors`, containing measurements on the observed lizards, including `SVL`, which is the snout to vent length. Thus `SVL[..]` represents the snout to vent length indexed by lizard (`winner` or `loser` as appropriate). Finally a random intercept for each lizard is included using the bar notation familiar to users of the **lme4** package [@bate:11]. (Note that a random intercept is the only random effect structure currently implemented in **BradleyTerry2**.) The fitted model is summarized below: ``` {r summarize_lizModel} summary(lizModel) ``` The coefficient of snout to vent length is weakly significant; however, the standard deviation of the random effect is quite large, suggesting that this simple model has fairly poor explanatory power. A more appropriate model is considered in the next section. ### Missing values {#sec:missing} The contest data may include all possible pairs of players and hence rows of missing data corresponding to players paired with themselves. Such rows contribute no information to the Bradley-Terry model and are simply discarded by `BTm`. Where there are missing values in player-specific *predictor* (or *explanatory*) variables which appear in the formula, it will typically be very wasteful to discard all contests involving players for which some values are missing. Instead, such cases are accommodated by the inclusion of one or more parameters in the model. If, for example, player $1$ has one or more of its predictor values $x_{11},\ldots,x_{1p}$ missing, then the combination of Equations \@ref(eq:unstructured) and \@ref(eq:structured) above yields $$\mathop{\rm logit}[\mathop{\rm pr}(1\ \mathrm{beats}\ j)]=\lambda_1 - \left(\sum_{r=1}^p\beta_rx_{jr} + U_j\right), (\#eq:autonumber5) $$ for all other players $j$. This results in the inclusion of a 'direct' ability parameter for each player having missing predictor values, in addition to the common coefficients $\beta_1,\ldots,\beta_p$ -- an approach which will be appropriate when the missingness mechanism is unrelated to contest success. The same device can be used also to accommodate any user-specified departures from a structured Bradley-Terry model, whereby some players have their abilities determined by the linear predictor but others do not. In the original analysis of the `flatlizards` data [@whit:06], the final model included the first and third principal components of the spectral reflectance from the throat (representing brightness and UV intensity respectively) as well as head length and the snout to vent length seen in our earlier model. The spectroscopy data was missing for two lizards, therefore the ability of these lizards was estimated directly. The following fits this model, with the addition of a random intercept as before: ``` {r lizModel2} lizModel2 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards) summary(lizModel2) ``` Note that `BTm` detects that lizards 96 and 99 have missing values in the specified predictors and automatically includes separate ability parameters for these lizards. This model was found to be the single best model based on the principal components of reflectance and the other predictors available and indeed the standard deviation of the random intercept is much reduced, but still highly significant. Allowing for this significant variation between lizards with the same predictor values produces more realistic (i.e., larger) standard errors for the parameters when compared to the original analysis of @whit:06. Although this affects the significance of the morphological variables, it does not affect the significance of the principal components, so in this case does not affect the main conclusions of the study. ### Order effect {#sec:order} In certain types of application some or all contests have an associated 'bias', related to the order in which items are presented to a judge or with the location in which a contest takes place, for example. A natural extension of the Bradley-Terry model (Equation \@ref(eq:unstructured)) is then $$\mathop{\rm logit}[\mathop{\rm pr}(i\ \mathrm{beats}\ j)]=\lambda_i-\lambda_j + \delta z, (\#eq:autonumber6) $$ where $z=1$ if $i$ has the supposed advantage and $z=-1$ if $j$ has it. (If the 'advantage' is in fact a disadvantage, $\delta$ will be negative.) The scores $\lambda_i$ then relate to ability in the absence of any such advantage. As an example, consider the baseball data given in @agre:02, page 438: ``` {r baseball} data("baseball", package = "BradleyTerry2") head(baseball) ``` The data set records the home wins and losses for each baseball team against each of the 6 other teams in the data set. The `head` function is used to show the first 6 records, which are the Milwaukee home games. We see for example that Milwaukee played 7 home games against Detroit and won 4 of them. The 'standard' Bradley-Terry model without a home-advantage parameter will be fitted if no formula is specified in the call to `BTm`: ``` {r baseballModel} baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") summary(baseballModel1) ``` The reference team is Baltimore, estimated to be the weakest of these seven, with Milwaukee and Detroit the strongest. In the above, the ability of each team is modelled simply as `  team` where the values of the factor `team` are given by `home.team` for the first team and `away.team` for the second team in each game. To estimate the home-advantage effect, an additional variable is required to indicate whether the team is at home or not. Therefore data frames containing both the team factor and this new indicator variable are required in place of the factors `home.team` and `away.team` in the call to `BTm`. This is achieved here by over-writing the `home.team` and `away.team` factors in the `baseball` data frame: ``` {r baseballDataUpdate} baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) ``` The `at.home` variable is needed for both the home team and the away team, so that it can be differenced as appropriate in the linear predictor. With the data organised in this way, the ability formula can now be updated to include the `at.home` variable as follows: ``` {r baseballModelupdate} baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) summary(baseballModel2) ``` This reproduces the results given on page 438 of @agre:02: the home team has an estimated odds-multiplier of $\exp(0.3023) = 1.35$ in its favour. ### More general (contest-specific) predictors {#sec:CEMS} The 'home advantage' effect is a simple example of a contest-specific predictor. Such predictors are necessarily interactions, between aspects of the contest and (aspects of) the two 'players' involved. For more elaborate examples of such effects, see `?chameleons` and `?CEMS`. The former includes an 'experience' effect, which changes through time, on the fighting ability of male chameleons. The latter illustrates a common situation in psychometric applications of the Bradley-Terry model, where *subjects* express preference for one of two *objects* (the 'players'), and it is the influence on the results of subject attributes that is of primary interest. As an illustration of the way in which such effects are specified, consider the following model specification taken from the examples in `?CEMS`, where data on students' preferences in relation to six European management schools is analysed. ``` {r CEMSmodel} data("CEMS", package = "BradleyTerry2") table8.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * LAT[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) ``` This model reproduces results from Table 8 of @ditt:01 apart from minor differences due to the different treatment of ties. Here the outcome is the binomial frequency of preference for `school1` over `school2`, with ties counted as half a 'win' and half a 'loss'. The formula specifies the model for school 'ability' or worth. In this formula, the default label '`..`' represents the school (with values given by `school1` or `school2` as appropriate) and `student` is a factor specifying the student that made the comparison. The remaining variables in the formula use [R]{.sans-serif}'s standard indexing mechanism to include student-specific variables, e.g., `WOR`: whether or not the student was in full-time employment, and school-specific variables, e.g., `LAT`: whether the school was in a 'Latin' city. Thus there are three types of variables: contest-specific (`school1`, `school2`, `student`), subject-specific (`WOR`, `DEG`, ...) and object-specific (`LAT`, `St.Gallen`, ...). These three types of variables are provided in three data frames, contained in the list object `CEMS`. ## Ability scores {#sec:ability} The function `BTabilities` extracts estimates and standard errors for the log-ability scores $\lambda_1, \ldots,\lambda_K$. These will either be 'direct' estimates, in the case of the standard Bradley-Terry model or for players with one or more missing predictor values, or 'model-based' estimates of the form $\hat\lambda_i=\sum_{r=1}^p\hat\beta_rx_{ir}$ for players whose ability is predicted by explanatory variables. As a simple illustration, team ability estimates in the home-advantage model for the `baseball` data are obtained by: ``` {r BTabilities} BTabilities(baseballModel2) ``` This gives, for each team, the estimated ability when the team enjoys no home advantage. Similarly, estimates of the fighting ability of each lizard in the `flatlizards` data under the model based on the principal components of the spectral reflectance from the throat are obtained as follows: ``` {r BTabilities2} head(BTabilities(lizModel2), 4) ``` The ability estimates in an unstructured Bradley-Terry model are particularly well suited to presentation using the device of *quasi-variances* [@firt:04]. The **qvcalc** package [@firt:10,version 0.8-5 or later] contains a function of the same name which does the necessary work: ``` r > library("qvcalc") > baseball.qv <- qvcalc(BTabilities(baseballModel2)) > plot(baseball.qv, + levelNames = c("Bal", "Bos", "Cle", "Det", "Mil", "NY", "Tor")) ``` ```{r figqvplot, echo=FALSE , fig.cap="Estimated relative abilities of baseball teams.", fig.alt="The ability for Baltimore is fixed at zero, with an interval ranging from -0.5 to 0.5. Boston has a relative ability near 1.2; Cleveland around 0.7. The remaining teams have relative abilities around 1.3 to 1.6. The intervals are based on quasi standard errors and all have length of approximately 1. Therefore, aside from Cleveland, all teams are clearly significantly stronger than Baltimore as the intervals do not overlap.", fig.show='hold', fig.align="center", out.width="67.0%"} knitr::include_graphics(c("baseball-qvplot.png")) ``` The 'comparison intervals' as shown in Figure \@ref(fig:figqvplot) are based on 'quasi standard errors', and can be interpreted as if they refer to *independent* estimates of ability for the journals. This has the advantage that comparison between any pair of journals is readily made (i.e., not only comparisons with the 'reference' journal). For details of the theory and method of calculation see @firt:04. ## Residuals {#sec:residuals} There are two main types of residuals available for a Bradley-Terry model object. First, there are residuals obtained by the standard methods for models of class `"glm"`. These all deliver one residual for each contest or type of contest. For example, Pearson residuals for the model `lizModel2` can be obtained simply by ``` {r residuals} res.pearson <- round(residuals(lizModel2), 3) head(cbind(flatlizards$contests, res.pearson), 4) ``` More useful for diagnostics on the linear predictor $\sum\beta_rx_{ir}$ are 'player'-level residuals, obtained by using the function `residuals` with argument `type = "grouped"`. These residuals can then be plotted against other player-specific variables. ``` {r BTresiduals} res <- residuals(lizModel2, type = "grouped") # with(flatlizards$predictors, plot(throat.PC2, res)) # with(flatlizards$predictors, plot(head.width, res)) ``` These residuals estimate the error in the linear predictor; they are obtained by suitable aggregation of the so-called 'working' residuals from the model fit. The `weights` attribute indicates the relative information in these residuals -- weight is roughly inversely proportional to variance -- which may be useful for plotting and/or interpretation; for example, a large residual may be of no real concern if based on very little information. Weighted least-squares regression of these residuals on any variable already in the model is null. For example: ``` {r residualWLS} lm(res ~ throat.PC1, weights = attr(res, "weights"), data = flatlizards$predictors) lm(res ~ head.length, weights = attr(res, "weights"), data = flatlizards$predictors) ``` As an illustration of evident *non-null* residual structure, consider the unrealistically simple model `lizModel` that was fitted in Section \@ref(sec:covariates) above. That model lacks the clearly significant predictor variable `throat.PC3`, and the plot shown in Figure \@ref(fig:figresiduals) demonstrates this fact graphically: ``` r lizModel.residuals <- residuals(lizModel, type = "grouped") plot(flatlizards$predictors$throat.PC3, lizModel.residuals) ``` ```{r figresiduals, echo=FALSE , fig.cap="Lizard residuals for the simple model lizModel, plotted against throat.PC3.", fig.alt="The residuals are quite spread out over the range -2 to 2, but the distribution is clearly not uniform over the range of the predictor variable, throat.PC3. Residuals between -2 and -1 range correspond to values throat.PC3 between -6 and 4; residuals between -1 and 1 correspond to throat.PC3 values of -4 to 4, and residuals from 1 to 2 correspond to throat.PC3 values between -3 and 6. Thus there is an overall positive correlation bewteen the residuals and throat.PC3.", fig.show='hold', fig.align="center", out.width="69.0%"} knitr::include_graphics(c("residuals.png")) ``` The residuals in the plot exhibit a strong, positive regression slope in relation to the omitted predictor variable `throat.PC3`. ## Model search {#sec:model} In addition to `update()` as illustrated in preceding sections, methods for the generic functions `add1()`, `drop1()` and `anova()` are provided. These can be used to investigate the effect of adding or removing a variable, whether that variable is contest-specific, such as an order effect, or player-specific; and to compare the fit of nested models. ## Setting up the data {#sec:data} ### Contest-specific data {#sec:contest} The `outcome` argument of `BTm` represents a binomial response and can be supplied in any of the formats allowed by the `glm` function. That is, either a two-column matrix with the columns giving the number of wins and losses (for `player1` vs. `player2`), a factor where the first level denotes a loss and all other levels denote a win, or a binary variable where 0 denotes a loss and 1 denotes a win. Each row represents either a single contest or a set of contests between the same two players. The `player1` and `player2` arguments are either factors specifying the two players in each contest, or data frames containing such factors, along with any contest-specific variables that are also player-specific, such as the `at.home` variable seen in Section \@ref(sec:order). If given in data frames, the factors identifying the players should be named as specified by the `id` argument and should have identical levels, since they represent a particular sample of the full set of players. Thus for the model `baseballModel2`, which was specified by the following call: ``` {r baseballModel2_call} baseballModel2$call ``` the data are provided in the `baseball` data frame, which has the following structure: ``` {r str_baseball} str(baseball, vec.len = 2) ``` In this case `home.team` and `away.team` are both data frames, with the factor `team` specifying the team and the variable `at.home` specifying whether or not the team was at home. So the first comparison ``` {r first_comparison} baseball$home.team[1,] baseball$away.team[1,] ``` is Milwaukee playing at home against Detroit. The outcome is given by ``` {r first_outcome} baseball[1, c("home.wins", "away.wins")] ``` Contest-specific variables that are *not* player-specific -- for example, whether it rained or not during a contest -- should only be used in interactions with variables that *are* player-specific, otherwise the effect on ability would be the same for both players and would cancel out. Such variables can conveniently be provided in a single data frame along with the `outcome`, `player1` and `player2` data. An offset in the model can be specified by using the `offset` argument to `BTm`. This facility is provided for completeness: the authors have not yet encountered an application where it is needed. To use only certain rows of the contest data in the analysis, the `subset` argument may be used in the call to `BTm`. This should either be a logical vector of the same length as the binomial response, or a numeric vector containing the indices of rows to be used. ### Non contest-specific data {#sec:non-contest} Some variables do not vary by contest directly, but rather vary by a factor that is contest-specific, such as the player ID or the judge making the paired comparison. For such variables, it is more economical to store the data by the levels of the contest-specific factor and use indexing to obtain the values for each contest. The `CEMS` example in Section \@ref(sec:CEMS) provides an illustration of such variables. In this example student-specific variables are indexed by `student` and school-specific variables are indexed by `..`, i.e., the first or second school in the comparison as appropriate. There are then two extra sets of variables in addition to the usual contest-specific data as described in the last section. A good way to provide these data to `BTm` is as a list of data frames, one for each set of variables, e.g., ``` {r str_CEMS} str(CEMS, vec.len = 2) ``` The names of the data frames are only used by `BTm` if they match the names specified in the `player1` and `player2` arguments, in which case it is assumed that these are data frames providing the data for the first and second player respectively. The rows of data frames in the list should either correspond to the contests or the levels of the factor used for indexing. Player-specific offsets should be included in the formula by using the `offset` function. ### Converting data from a 'wide' format {#sec:wide} The `BTm` function requires data in a 'long' format, with one row per contest, provided either directly as in Section \@ref(sec:contest) or via indexing as in Section \@ref(sec:non-contest). In studies where the same set of paired comparisons are made by several judges, as in a questionnaire for example, the data may be stored in a 'wide' format, with one row per judge. As an example, consider the `cemspc` data from the **prefmod** package [@hatz:12], which provides data from the CEMS study in a wide format. Each row corresponds to one student; the first 15 columns give the outcome of all pairwise comparisons between the 6 schools in the study and the last two columns correspond to two of the student-specific variables: `ENG` (indicating the student's knowledge of English) and `SEX` (indicating the student's gender). The following steps convert these data into a form suitable for analysis with `BTm`. First a new data frame is created from the student-specific variables and these variables are converted to factors: ``` {r student-specific_data} library("prefmod") student <- cemspc[c("ENG", "SEX")] student$ENG <- factor(student$ENG, levels = 1:2, labels = c("good", "poor")) student$SEX <- factor(student$SEX, levels = 1:2, labels = c("female", "male")) ``` This data frame is put into a list, which will eventually hold all the necessary data. Then a `student` factor is created for indexing the student data to produce contest-level data. This is put in a new data frame that will hold the contest-specific data. ``` {r student_factor} cems <- list(student = student) student <- gl(303, 1, 303 * 15) #303 students, 15 comparisons contest <- data.frame(student = student) ``` Next the outcome data is converted to a binomial response, adjusted for ties. The result is added to the `contest` data frame. ``` {r binomial_response} win <- cemspc[, 1:15] == 0 lose <- cemspc[, 1:15] == 2 draw <- cemspc[, 1:15] == 1 contest$win.adj <- c(win + draw/2) contest$lose.adj <- c(lose + draw/2) ``` Then two factors are created identifying the first and second school in each comparison. The comparisons are in the order 1 vs. 2, 1 vs. 3, 2 vs. 3, 1 vs. 4, ..., so the factors can be created as follows: ``` {r school_factors} lab <- c("London", "Paris", "Milano", "St. Gallen", "Barcelona", "Stockholm") contest$school1 <- factor(sequence(1:5), levels = 1:6, labels = lab) contest$school2 <- factor(rep(2:6, 1:5), levels = 1:6, labels = lab) ``` Note that both factors have exactly the same levels, even though only five of the six players are represented in each case. In other words, the numeric factor levels refer to the same players in each case, so that the player is unambiguously identified. This ensures that player-specific parameters and player-specific covariates are correctly specified. Finally the `contest` data frame is added to the main list: ``` {r cems_data} cems$contest <- contest ``` This creates a single data object that can be passed to the `data` argument of `BTm`. Of course, such a list could be created on-the-fly as in `data = list(contest, student)`, which may be more convenient in practice. ### Converting data from the format required by the earlier **BradleyTerry** package {#sec:BradleyTerry} The **BradleyTerry** package described in @firt:05 required contest/comparison results to be in a data frame with columns named `winner`, `loser` and `Freq`. The following example shows how `xtabs` and `countsToBinomial` can be used to convert such data for use with the `BTm` function in **BradleyTerry2**: ``` r library("BradleyTerry") ## the /old/ BradleyTerry package ## load data frame with columns "winner", "loser", "Freq" data("citations", package = "BradleyTerry") ## convert to 2-way table of counts citations <- xtabs(Freq ~ winner + loser, citations) ## convert to a data frame of binomial observations citations.sf <- countsToBinomial(citations) ``` The `citations.sf` data frame can then be used with `BTm` as shown in Section \@ref(sec:citations). ## A list of the functions provided in **BradleyTerry2** {#sec:functions} The standard R help files provide the definitive reference. Here we simply list the main user-level functions and their arguments, as a convenient overview: ``` {r functions, echo = FALSE} ## cf. prompt options(width = 55) for (fn in getNamespaceExports("BradleyTerry2")) { name <- as.name(fn) args <- formals(fn) n <- length(args) arg.names <- arg.n <- names(args) arg.n[arg.n == "..."] <- "\\dots" is.missing.arg <- function(arg) typeof(arg) == "symbol" && deparse(arg) == "" Call <- paste(name, "(", sep = "") for (i in seq_len(n)) { Call <- paste(Call, arg.names[i], if (!is.missing.arg(args[[i]])) paste(" = ", paste(deparse(args[[i]]), collapse = "\n"), sep = ""), sep = "") if (i != n) Call <- paste(Call, ", ", sep = "") } Call <- paste(Call, ")", sep = "") cat(deparse(parse(text = Call)[[1]], width.cutoff = 50), fill = TRUE) } options(width = 60) ``` ## Some final remarks {#sec:finalremarks} ### A note on the treatment of ties {#sec:ties} The present version of **BradleyTerry2** provides no sophisticated facilities for handling tied contests/comparisons; the well-known models of @rao:kupp:67 and @davi:70 are not implemented here. At present the `BTm` function requires a binary or binomial response variable, the third ('tied') category of response is not allowed. In several of the data examples (e.g., `?CEMS`, `?springall`, `?sound.fields`), ties are handled by the crude but simple device of adding half of a 'win' to the tally for each player involved; in each of the examples where this has been done it is found that the result is very similar, after a simple re-scaling, to the more sophisticated analyses that have appeared in the literature. Note that this device when used with `BTm` typically gives rise to warnings produced by the back-end `glm` function, about non-integer 'binomial' counts; such warnings are of no consequence and can be safely ignored. It is likely that a future version of **BradleyTerry2** will have a more general method for handling ties. ### A note on 'contest-specific' random effects {#sec:random-effects} The current version of **BradleyTerry2** provides facilities for fitting models with random effects in 'player-specific' predictor functions, as illustrated in Section \@ref(#sec:covariates). For more general, 'contest-specific' random-effect structures, such as random 'judge' effects in psychological studies [e.g., @bock:01], **BradleyTerry2** provides (through `BTm`) the necessary user interface but as yet no back-end calculation. It is hoped that this important generalization can be made successfully in a future version of **BradleyTerry2**. ## Acknowledgments {#sec:acknowledgments .unnumbered} This work was supported by the UK Engineering and Physical Sciences Research Council. ## References {#sec:references .unnumbered} BradleyTerry2/inst/WORDLIST0000644000176200001440000000146214775237530015133 0ustar liggesusersachler Acknowledgments Agresti anova AppVeyor Augrabies Babiker Bal baseballModel Biometrika Bolker Bos br BradleyTerry Breslow brglm broadleyi BTabilities BTglmmPQL BTm casque cauchit CEMS cemspc Chisq citeability Cle com countsToBinomial Crowder Decarli Det DG Dittrich DM doi ECAC ed Edn ENG erry espn etc Fahrmeir flatlizards Gade Gallen GenDavidson github glm GLMM glmmPQL GLMMs gnm Harville Hatzinger Heinze Hobert Ihaka JRSS JSS Katzenbeisser Kosmidis Kousgaard Krishnaiah Kupper LAT levelNames lizModel lme Menezes Mil MJ Modeling Moussalli NC NCAA's nd nonlin numer ockenholt odelling Peto Platysaurus Poisson PQL prefmod PV qv qvcalc radley refcat Ridout RR RX Schemper Schlobotnik Sen soccernet SP springall Springall Springall's stackoverflow Stat SVL TDT th Trento Tutz urls urlstyle useR WOR Xlisp xtabs BradleyTerry2/README.md0000644000176200001440000000150714775676270014253 0ustar liggesusers # BradleyTerry2 [![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/BradleyTerry2)](https://cran.r-project.org/package=BradleyTerry2) Specify and fit the Bradley-Terry model, including structured versions in which the parameters are related to explanatory variables through a linear predictor and versions with contest-specific effects, such as a home advantage. ## Installation You can install **BradleyTerry2** from github with: ``` r # install.packages("devtools") devtools::install_github("hturner/BradleyTerry2") ``` ## Code of conduct Please note that this project is released with a [Contributor Code of Conduct](https://github.com/hturner/BradleyTerry2/blob/master/CONDUCT.md). By participating in this project you agree to abide by its terms. BradleyTerry2/build/0000755000176200001440000000000014775715607014066 5ustar liggesusersBradleyTerry2/build/vignette.rds0000644000176200001440000000032614775715607016426 0ustar liggesusersb```b`a@&0`b fd`aSrR+CR*rS%` )9 y Ah Q (ASjQ`Y* `aBR͚ZfKjAj^ HvѴpxVaaqIY0AAn0Ez0?"Ht&${+%$Q/n[ұBradleyTerry2/build/partial.rdb0000644000176200001440000000007414775715600016205 0ustar liggesusersb```b`a 00 FN ͚Z d@$/7BradleyTerry2/man/0000755000176200001440000000000014775676220013537 5ustar liggesusersBradleyTerry2/man/qvcalc.BTabilities.Rd0000644000176200001440000000651714775673305017503 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qvcalc.BTabilities.R \name{qvcalc.BTabilities} \alias{qvcalc.BTabilities} \title{Quasi Variances for Estimated Abilities} \usage{ \method{qvcalc}{BTabilities}(object, ...) } \arguments{ \item{object}{a \code{"BTabilities"} object as returned by \code{\link[=BTabilities]{BTabilities()}}.} \item{...}{additional arguments, currently ignored.} } \value{ A list of class \code{"qv"}, with components \item{covmat}{The full variance-covariance matrix for the estimated abilities.} \item{qvframe}{A data frame with variables \code{estimate}, \code{SE}, \code{quasiSE} and \code{quasiVar}, the last two being a quasi standard error and quasi-variance for each ability.} \item{dispersion}{\code{NULL} (dispersion is fixed to 1).} \item{relerrs}{Relative errors for approximating the standard errors of all simple contrasts.} \item{factorname}{The name of the ID factor identifying players in the \code{BTm} formula.} \item{coef.indices}{\code{NULL} (no required for this method).} \item{modelcall}{The call to \code{BTm} to fit the Bradley-Terry model from which the abilities were estimated.} } \description{ A method for \code{\link[qvcalc:qvcalc]{qvcalc::qvcalc()}} to compute a set of quasi variances (and corresponding quasi standard errors) for estimated abilities from a Bradley-Terry model as returned by \code{\link[=BTabilities]{BTabilities()}}. } \details{ For details of the method see Firth (2000), Firth (2003) or Firth and de Menezes (2004). Quasi variances generalize and improve the accuracy of \dQuote{floating absolute risk} (Easton et al., 1991). This device for economical model summary was first suggested by Ridout (1989). Ordinarily the quasi variances are positive and so their square roots (the quasi standard errors) exist and can be used in plots, etc. } \examples{ example(baseball) baseball.qv <- qvcalc(BTabilities(baseballModel2)) print(baseball.qv) plot(baseball.qv, xlab = "team", levelNames = c("Bal", "Bos", "Cle", "Det", "Mil", "NY", "Tor")) } \references{ Easton, D. F, Peto, J. and Babiker, A. G. A. G. (1991) Floating absolute risk: an alternative to relative risk in survival and case-control analysis avoiding an arbitrary reference group. \emph{Statistics in Medicine} \strong{10}, 1025--1035. Firth, D. (2000) Quasi-variances in Xlisp-Stat and on the web. \emph{Journal of Statistical Software} \strong{5(4)}, 1--13. \doi{https://doi.org/10.18637/jss.v005.i04}. Firth, D. (2003) Overcoming the reference category problem in the presentation of statistical models. \emph{Sociological Methodology} \strong{33}, 1--18. Firth, D. and de Menezes, R. X. (2004) Quasi-variances. \emph{Biometrika} \strong{91}, 65--80. Menezes, R. X. de (1999) More useful standard errors for group and factor effects in generalized linear models. \emph{D.Phil. Thesis}, Department of Statistics, University of Oxford. Ridout, M.S. (1989). Summarizing the results of fitting generalized linear models to data from designed experiments. In: \emph{Statistical Modelling: Proceedings of GLIM89 and the 4th International Workshop on Statistical Modelling held in Trento, Italy, July 17--21, 1989} (A. Decarli et al., eds.), pp 262--269. New York: Springer. } \seealso{ \code{\link[qvcalc:worstErrors]{qvcalc::worstErrors()}}, \code{\link[qvcalc:plot.qv]{qvcalc::plot.qv()}}. } \author{ David Firth } BradleyTerry2/man/plotProportions.Rd0000644000176200001440000002054614775237530017267 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plotProportions.R \name{plotProportions} \alias{plotProportions} \title{Plot Proportions of Tied Matches and Non-tied Matches Won} \usage{ plotProportions( win, tie = NULL, loss, player1, player2, abilities = NULL, home.adv = NULL, tie.max = NULL, tie.scale = NULL, tie.mode = NULL, at.home1 = NULL, at.home2 = NULL, data = NULL, subset = NULL, bin.size = 20, xlab = "P(player1 wins | not a tie)", ylab = "Proportion", legend = NULL, col = 1:2, ... ) } \arguments{ \item{win}{a logical vector: \code{TRUE} if player1 wins, \code{FALSE} otherwise.} \item{tie}{a logical vector: \code{TRUE} if the outcome is a tie, \code{FALSE} otherwise (\code{NULL} if there are no ties).} \item{loss}{a logical vector: \code{TRUE} if player1 loses, \code{FALSE} otherwise.} \item{player1}{an ID factor specifying the first player in each contest, with the same set of levels as \code{player2}.} \item{player2}{an ID factor specifying the second player in each contest, with the same set of levels as \code{player2}.} \item{abilities}{the fitted abilities from a generalized Davidson model (or a Bradley-Terry model).} \item{home.adv}{if applicable, the fitted home advantage parameter from a generalized Davidson model (or a Bradley-Terry model).} \item{tie.max}{the fitted parameter from a generalized Davidson model corresponding to the maximum tie probability.} \item{tie.scale}{if applicable, the fitted parameter from a generalized Davidson model corresponding to the scale of dependence of the tie probability on the probability that \code{player1} wins, given the outcome is not a draw.} \item{tie.mode}{if applicable, the fitted parameter from a generalized Davidson model corresponding to the location of maximum tie probability, in terms of the probability that \code{player1} wins, given the outcome is not a draw.} \item{at.home1}{a logical vector: \code{TRUE} if \code{player1} is at home, \code{FALSE} otherwise.} \item{at.home2}{a logical vector: \code{TRUE} if \code{player2} is at home, \code{FALSE} otherwise.} \item{data}{an optional data frame providing variables required by the model, with one observation per match.} \item{subset}{an optional logical or numeric vector specifying a subset of observations to include in the plot.} \item{bin.size}{the approximate number of matches in each bin.} \item{xlab}{the label to use for the x-axis.} \item{ylab}{the label to use for the y-axis.} \item{legend}{text to use for the legend.} \item{col}{a vector specifying colours to use for the proportion of non-tied matches won and the proportion of tied matches.} \item{\dots}{further arguments passed to plot.} } \value{ A list of data frames: \item{win}{ a data frame comprising \code{prop.win}, the proportion of non-tied matches won by the first player in each bin and \code{bin.win}, the mid-point of each bin. } \item{tie}{ (when ties are present) a data frame comprising \code{prop.tie}, the proportion of tied matches in each bin and \code{bin.tie}, the mid-point of each bin. } } \description{ Plot proportions of tied matches and non-tied matches won by the first player, within matches binned by the relative player ability, as expressed by the probability that the first player wins, given the match is not a tie. Add fitted lines for each set of matches, as given by the generalized Davidson model. } \details{ If \code{home.adv} is specified, the results are re-ordered if necessary so that the home player comes first; any matches played on neutral ground are omitted. First the probability that the first player wins given that the match is not a tie is computed: \deqn{expit(home.adv + abilities[player1] - abilities[player2])} where \code{home.adv} and \code{abilities} are parameters from a generalized Davidson model that have been estimated on the log scale. The matches are then binned according to this probability, grouping together matches with similar relative ability between the first player and the second player. Within each bin, the proportion of tied matches is computed and these proportions are plotted against the mid-point of the bin. Then the bins are re-computed omitting the tied games and the proportion of non-tied matches won by the first player is found and plotted against the new mid-point. Finally curves are added for the probability of a tie and the conditional probability of win given the match is not a tie, under a generalized Davidson model with parameters as specified by \code{tie.max}, \code{tie.scale} and \code{tie.mode}. The function can also be used to plot the proportions of wins along with the fitted probability of a win under the Bradley-Terry model. } \note{ This function is designed for single match outcomes, therefore data aggregated over player pairs will need to be expanded. } \examples{ #### A Bradley-Terry example using icehockey data ## Fit the standard Bradley-Terry model, ignoring home advantage standardBT <- BTm(outcome = result, player1 = visitor, player2 = opponent, id = "team", data = icehockey) ## comparing teams on a "level playing field" levelBT <- BTm(result, data.frame(team = visitor, home.ice = 0), data.frame(team = opponent, home.ice = home.ice), ~ team + home.ice, id = "team", data = icehockey) ## compare fit to observed proportion won ## exclude tied matches as not explicitly modelled here par(mfrow = c(1, 2)) plotProportions(win = result == 1, loss = result == 0, player1 = visitor, player2 = opponent, abilities = BTabilities(standardBT)[,1], data = icehockey, subset = result != 0.5, main = "Without home advantage") plotProportions(win = result == 1, loss = result == 0, player1 = visitor, player2 = opponent, home.adv = coef(levelBT)["home.ice"], at.home1 = 0, at.home2 = home.ice, abilities = BTabilities(levelBT)[,1], data = icehockey, subset = result != 0.5, main = "With home advantage") #### A generalized Davidson example using football data if (require(gnm)) { ## subset to first and last season for illustration football <- subset(football, season \%in\% c("2008-9", "2012-13")) ## convert to trinomial counts football.tri <- expandCategorical(football, "result", idvar = "match") ## add variable to indicate whether team playing at home football.tri$at.home <- !logical(nrow(football.tri)) ## fit Davidson model Dav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri) ## fit shifted & scaled Davidson model shifScalDav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, tie.scale = ~1, tie.mode = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri) ## diagnostic plots main <- c("Davidson", "Shifted & Scaled Davidson") mod <- list(Dav, shifScalDav) names(mod) <- main alpha <- names(coef(Dav)[-(1:2)]) ## use football.tri data so that at.home can be found, ## but restrict to actual match results par(mfrow = c(1,2)) for (i in 1:2) { coef <- parameters(mod[[i]]) plotProportions(result == 1, result == 0, result == -1, home:season, away:season, abilities = coef[alpha], home.adv = coef["home.adv"], tie.max = coef["tie.max"], tie.scale = coef["tie.scale"], tie.mode = coef["tie.mode"], at.home1 = at.home, at.home2 = !at.home, main = main[i], data = football.tri, subset = count == 1) } } } \seealso{ \code{\link[=GenDavidson]{GenDavidson()}}, \code{\link[=BTm]{BTm()}} } \author{ Heather Turner } \keyword{models} \keyword{nonlinear} BradleyTerry2/man/anova.BTm.Rd0000644000176200001440000000555414775237530015621 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/anova.BTm.R \name{anova.BTm} \alias{anova.BTm} \title{Compare Nested Bradley Terry Models} \usage{ \method{anova}{BTm}(object, ..., dispersion = NULL, test = NULL) } \arguments{ \item{object}{a fitted object of class inheriting from \code{"BTm"}.} \item{...}{additional \code{"BTm"} objects.} \item{dispersion}{a value for the dispersion. Not implemented for models with random effects.} \item{test}{optional character string (partially) matching one of \code{"Chisq"}, \code{"F"} or \code{"Cp"} to specify that p-values should be returned. The Chisq test is a likelihood ratio test for models with no random effects, otherwise a Wald test. Options \code{"F"} and \code{"Cp"} are only applicable to models with no random effects, see \code{\link[=stat.anova]{stat.anova()}}.} } \value{ An object of class \code{"anova"} inheriting from class \code{"data.frame"}. } \description{ Compare nested models inheriting from class \code{"BTm"}. For models with no random effects, compute analysis of deviance table, otherwise compute Wald tests of additional terms. } \details{ For models with no random effects, an analysis of deviance table is computed using \code{\link[=anova.glm]{anova.glm()}}. Otherwise, Wald tests are computed as detailed here. If a single object is specified, terms are added sequentially and a Wald statistic is computed for the extra parameters. If the full model includes player covariates and there are players with missing values over these covariates, then the \code{NULL} model will include a separate ability for these players. If there are missing values in any contest-level variables in the full model, the corresponding contests will be omitted throughout. The random effects structure of the full model is assumed for all sub-models. For a list of objects, consecutive pairs of models are compared by computing a Wald statistic for the extra parameters in the larger of the two models. The Wald statistic is always based on the variance-covariance matrix of the larger of the two models being compared. } \section{Warning}{ The comparison between two or more models will only be valid if they are fitted to the same dataset. This may be a problem if there are missing values and 's default of \code{na.action = na.omit} is used. An error will be returned in this case. The same problem will occur when separate abilities have been estimated for different subsets of players in the models being compared. However no warning is given in this case. } \examples{ result <- rep(1, nrow(flatlizards$contests)) BTmodel <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + (1|..), data = flatlizards, trace = TRUE) anova(BTmodel) } \seealso{ \code{\link[=BTm]{BTm()}}, \code{\link[=add1.BTm]{add1.BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/add1.BTm.Rd0000644000176200001440000000541214775237530015317 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/add1.BTm.R \name{add1.BTm} \alias{add1.BTm} \alias{drop1.BTm} \title{Add or Drop Single Terms to/from a Bradley Terry Model} \usage{ \method{add1}{BTm}(object, scope, scale = 0, test = c("none", "Chisq", "F"), x = NULL, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{"BTm"}.} \item{scope}{a formula specifying the model including all terms to be considered for adding or dropping.} \item{scale}{an estimate of the dispersion. Not implemented for models with random effects.} \item{test}{should a p-value be returned? The F test is only appropriate for models with no random effects for which the dispersion has been estimated. The Chisq test is a likelihood ratio test for models with no random effects, otherwise a Wald test.} \item{x}{a model matrix containing columns for all terms in the scope. Useful if \code{add1} is to be called repeatedly. \strong{Warning:} no checks are done on its validity.} \item{\dots}{further arguments passed to \code{\link[=add1.glm]{add1.glm()}}.} } \value{ An object of class \code{"anova"} summarizing the differences in fit between the models. } \description{ Add or drop single terms within the limit specified by the \code{scope} argument. For models with no random effects, compute an analysis of deviance table, otherwise compute the Wald statistic of the parameters that have been added to or dropped from the model. } \details{ The hierarchy is respected when considering terms to be added or dropped: all main effects contained in a second-order interaction must remain, and so on. In a scope formula \samp{.} means \sQuote{what is already there}. For \code{drop1}, a missing \code{scope} is taken to mean that all terms in the model may be considered for dropping. If \code{scope} includes player covariates and there are players with missing values over these covariates, then a separate ability will be estimated for these players in \emph{all} fitted models. Similarly if there are missing values in any contest-level variables in \code{scope}, the corresponding contests will be omitted from all models. If \code{formula} includes random effects, the same random effects structure will apply to all models. } \examples{ result <- rep(1, nrow(flatlizards$contests)) BTmodel1 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + (1|..), data = flatlizards, tol = 1e-4, sigma = 2, trace = TRUE) drop1(BTmodel1) add1(BTmodel1, ~ . + head.length[..] + SVL[..], test = "Chisq") BTmodel2 <- update(BTmodel1, formula = ~ . + head.length[..]) drop1(BTmodel2, test = "Chisq") } \seealso{ \code{\link[=BTm]{BTm()}}, \code{\link[=anova.BTm]{anova.BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/CEMS.Rd0000644000176200001440000001570214775676220014562 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/CEMS.R \docType{data} \name{CEMS} \alias{CEMS} \title{Dittrich, Hatzinger and Katzenbeisser (1998, 2001) Data on Management School Preference in Europe} \format{ A list containing three data frames, \code{CEMS$preferences}, \code{CEMS$students} and \code{CEMS$schools}. The \code{CEMS$preferences} data frame has \code{303 * 15 = 4505} observations (15 possible comparisons, for each of 303 students) on the following 8 variables: \describe{ \item{student}{a factor with levels \code{1:303}} \item{school1}{a factor with levels \code{c("Barcelona", "London", "Milano", "Paris", "St.Gallen", "Stockholm")}; the first management school in a comparison} \item{school2}{a factor with the same levels as \code{school1}; the second management school in a comparison} \item{win1}{integer (value 0 or 1) indicating whether \code{school1} was preferred to \code{school2}} \item{win2}{integer (value 0 or 1) indicating whether \code{school2} was preferred to \code{school1}} \item{tied}{integer (value 0 or 1) indicating whether no preference was expressed} \item{win1.adj}{numeric, equal to \code{win1 + tied/2}} \item{win2.adj}{numeric, equal to \code{win2 + tied/2}} } The \code{CEMS$students} data frame has 303 observations (one for each student) on the following 8 variables: \describe{ \item{STUD}{a factor with levels \code{c("other", "commerce")}, the student's main discipline of study} \item{ENG}{a factor with levels \code{c("good, poor")}, indicating the student's knowledge of English} \item{FRA}{a factor with levels \code{c("good, poor")}, indicating the student's knowledge of French} \item{SPA}{a factor with levels \code{c("good, poor")}, indicating the student's knowledge of Spanish} \item{ITA}{a factor with levels \code{c("good, poor")}, indicating the student's knowledge of Italian} \item{WOR}{a factor with levels \code{c("no", "yes")}, whether the student was in full-time employment while studying} \item{DEG}{a factor with levels \code{c("no", "yes")}, whether the student intended to take an international degree} \item{SEX}{a factor with levels \code{c("female", "male")} } } The \code{CEMS$schools} data frame has 6 observations (one for each management school) on the following 7 variables: \describe{ \item{Barcelona}{numeric (value 0 or 1)} \item{London}{numeric (value 0 or 1)} \item{Milano}{numeric (value 0 or 1)} \item{Paris}{numeric (value 0 or 1)} \item{St.Gallen}{numeric (value 0 or 1)} \item{Stockholm}{numeric (value 0 or 1)} \item{LAT}{numeric (value 0 or 1) indicating a 'Latin' city} } } \source{ Royal Statistical Society datasets website, at \url{https://rss.onlinelibrary.wiley.com/hub/journal/14679876/series-c-datasets/pre_2016}. } \usage{ CEMS } \description{ \emph{Community of European management schools} (CEMS) data as used in the paper by Dittrich et al. (1998, 2001), re-formatted for use with \code{\link[=BTm]{BTm()}} } \details{ The variables \code{win1.adj} and \code{win2.adj} are provided in order to allow a simple way of handling ties (in which a tie counts as half a win and half a loss), which is slightly different numerically from the Davidson (1970) method that is used by Dittrich et al. (1998): see the examples. } \examples{ ## ## Fit the standard Bradley-Terry model, using the simple 'add 0.5' ## method to handle ties: ## table3.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~.. , refcat = "Stockholm", data = CEMS) ## The results in Table 3 of Dittrich et al (2001) are reproduced ## approximately by a simple re-scaling of the estimates: table3 <- summary(table3.model)$coef[, 1:2]/1.75 print(table3) ## ## Now fit the 'final model' from Table 6 of Dittrich et al.: ## table6.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * Paris[..] + WOR[student] * Milano[..] + WOR[student] * Barcelona[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) ## ## Again re-scale to reproduce approximately Table 6 of Dittrich et ## al. (2001): ## table6 <- summary(table6.model)$coef[, 1:2]/1.75 print(table6) ## \dontrun{ ## Now the slightly simplified model of Table 8 of Dittrich et al. (2001): ## table8.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * LAT[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) table8 <- summary(table8.model)$coef[, 1:2]/1.75 ## ## Notice some larger than expected discrepancies here (the coefficients ## named "..Barcelona", "..Milano" and "..Paris") from the results in ## Dittrich et al. (2001). Apparently a mistake was made in Table 8 of ## the published Corrigendum note (R. Dittrich personal communication, ## February 2010). ## print(table8) } } \references{ Davidson, R. R. (1970) Extending the Bradley-Terry model to accommodate ties in paired comparison experiments. \emph{Journal of the American Statistical Association} \strong{65}, 317--328. Dittrich, R., Hatzinger, R. and Katzenbeisser, W. (1998) Modelling the effect of subject-specific covariates in paired comparison studies with an application to university rankings. \emph{Applied Statistics} \strong{47}, 511--525. Dittrich, R., Hatzinger, R. and Katzenbeisser, W. (2001) Corrigendum: Modelling the effect of subject-specific covariates in paired comparison studies with an application to university rankings. \emph{Applied Statistics} \strong{50}, 247--249. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. } \author{ David Firth } \keyword{datasets} BradleyTerry2/man/BTabilities.Rd0000644000176200001440000000615714775237530016227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BTabilities.R \name{BTabilities} \alias{BTabilities} \alias{print.BTabilities} \alias{coef.BTabilities} \alias{vcov.BTabilities} \title{Estimated Abilities from a Bradley-Terry Model} \usage{ BTabilities(model) } \arguments{ \item{model}{a model object for which \code{inherits(model, "BTm")} is \code{TRUE}} } \value{ A two-column numeric matrix of class \code{c("BTabilities", "matrix")}, with columns named \code{"ability"} and \code{"se"}; has one row for each player; has attributes named \code{"vcov"}, \code{"modelcall"}, \code{"factorname"} and (sometimes --- see below) \code{"separate"}. The first three attributes are not printed by the method \code{print.BTabilities}. } \description{ Computes the (baseline) ability of each player from a model object of class \code{"BTm"}. } \details{ The player abilities are either directly estimated by the model, in which case the appropriate parameter estimates are returned, otherwise the abilities are computed from the terms of the fitted model that involve player covariates only (those indexed by \code{model$id} in the model formula). Thus parameters in any other terms are assumed to be zero. If one player has been set as the reference, then \code{predict.BTm()} can be used to obtain ability estimates with non-player covariates set to other values, see examples for \code{\link[=predict.BTm]{predict.BTm()}}. If the abilities are structured according to a linear predictor, and if there are player covariates with missing values, the abilities for the corresponding players are estimated as separate parameters. In this event the resultant matrix has an attribute, named \code{"separate"}, which identifies those players whose ability was estimated separately. For an example, see \code{\link[=flatlizards]{flatlizards()}}. } \examples{ ### citations example ## Convert frequencies to success/failure data citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") ## Fit the "standard" Bradley-Terry model citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) BTabilities(citeModel) ### baseball example data(baseball) # start with baseball data as provided by package ## Fit mode with home advantage baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) baseballModel2 <- BTm(cbind(home.wins, away.wins), home.team, away.team, formula = ~ team + at.home, id = "team", data = baseball) ## Estimate abilities for each team, relative to Baltimore, when ## playing away from home: BTabilities(baseballModel2) } \references{ Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical Software}, \strong{12}(1), 1--12. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. } \seealso{ \code{\link[=BTm]{BTm()}}, \code{\link[=residuals.BTm]{residuals.BTm()}} } \author{ David Firth and Heather Turner } \keyword{models} BradleyTerry2/man/baseball.Rd0000644000176200001440000000370614775673305015602 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/baseball.R \docType{data} \name{baseball} \alias{baseball} \title{Baseball Data from Agresti (2002)} \format{ A data frame with 42 observations on the following 4 variables. \describe{ \item{home.team}{a factor with levels \code{Baltimore}, \code{Boston}, \code{Cleveland}, \code{Detroit}, \code{Milwaukee}, \verb{New York}, \code{Toronto}.} \item{away.team}{a factor with levels \code{Baltimore}, \code{Boston}, \code{Cleveland}, \code{Detroit}, \code{Milwaukee}, \verb{New York}, \code{Toronto}.} \item{home.wins}{a numeric vector.} \item{away.wins}{a numeric vector.} } } \source{ Page 438 of Agresti, A. (2002) \emph{Categorical Data Analysis} (2nd Edn.). New York: Wiley. } \usage{ baseball } \description{ Baseball results for games in the 1987 season between 7 teams in the Eastern Division of the American League. } \note{ This dataset is in a simpler format than the one described in Firth (2005). } \examples{ ## This reproduces the analysis in Sec 10.6 of Agresti (2002). data(baseball) # start with baseball data as provided by package ## Simple Bradley-Terry model, ignoring home advantage: baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") ## Now incorporate the "home advantage" effect baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) ## Compare the fit of these two models: anova(baseballModel1, baseballModel2) } \references{ Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical Software}, \strong{12}(1), 1--12. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. } \seealso{ \code{\link[=BTm]{BTm()}} } \keyword{datasets} BradleyTerry2/man/chameleons.Rd0000644000176200001440000000706314775673305016153 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/chameleons.R \docType{data} \name{chameleons} \alias{chameleons} \title{Male Cape Dwarf Chameleons: Measured Traits and Contest Outcomes} \format{ A list containing three data frames: \code{chameleons$winner}, \code{chameleons$loser} and \code{chameleons$predictors}. The \code{chameleons$winner} and \code{chameleons$loser} data frames each have 106 observations (one per contest) on the following 4 variables: \describe{ \item{ID}{a factor with 35 levels \code{C01}, \code{C02}, ... , \code{C43}, the identity of the winning (or losing) male in each contest} \item{prev.wins.1}{integer (values 0 or 1), did the winner/loser of this contest win in an immediately previous contest?} \item{prev.wins.2}{integer (values 0, 1 or 2), how many of his (maximum) previous 2 contests did each male win?} \item{prev.wins.all}{integer, how many previous contests has each male won?} } The \code{chameleons$predictors} data frame has 35 observations, one for each male involved in the contests, on the following 7 variables: \describe{ \item{ch.res}{numeric, residuals of casque height regression on \code{SVL}, i.e. relative height of the bony part on the top of the chameleons' heads} \item{jl.res}{numeric, residuals of jaw length regression on \code{SVL}} \item{tl.res}{numeric, residuals of tail length regression on \code{SVL}} \item{mass.res}{numeric, residuals of body mass regression on \code{SVL} (body condition)} \item{SVL}{numeric, snout-vent length (body size)} \item{prop.main}{numeric, proportion (arcsin transformed) of area of the flank occupied by the main pink patch on the flank} \item{prop.patch}{numeric, proportion (arcsin transformed) of area of the flank occupied by the entire flank patch} } } \source{ The data were obtained by Dr Devi Stuart-Fox, \url{https://devistuartfox.com/}, and they are reproduced here with her kind permission. These are the same data that were used in Stuart-Fox, D. M., Firth, D., Moussalli, A. and Whiting, M. J. (2006) Multiple signals in chameleon contests: designing and analysing animal contests as a tournament. \emph{Animal Behaviour} \strong{71}, 1263--1271. } \usage{ chameleons } \description{ Data as used in the study by Stuart-Fox et al. (2006). Physical measurements made on 35 male Cape dwarf chameleons, and the results of 106 inter-male contests. } \details{ The published paper mentions 107 contests, but only 106 contests are included here. Contest number 16 was deleted from the data used to fit the models, because it involved a male whose predictor-variables were incomplete (and it was the only contest involving that lizard, so it is uninformative). } \examples{ ## ## Reproduce Table 3 from page 1268 of the above paper: ## summary(chameleon.model <- BTm(player1 = winner, player2 = loser, formula = ~ prev.wins.2 + ch.res[ID] + prop.main[ID] + (1|ID), id = "ID", data = chameleons)) head(BTabilities(chameleon.model)) ## ## Note that, although a per-chameleon random effect is specified as in the ## above [the term "+ (1|ID)"], the estimated variance for that random ## effect turns out to be zero in this case. The "prior experience" ## effect ["+ prev.wins.2"] in this analysis has explained most of the ## variation, leaving little for the ID-specific predictors to do. ## Despite that, two of the ID-specific predictors do emerge as ## significant. ## ## Test whether any of the other ID-specific predictors has an effect: ## add1(chameleon.model, ~ . + jl.res[ID] + tl.res[ID] + mass.res[ID] + SVL[ID] + prop.patch[ID]) } \author{ David Firth } \keyword{datasets} BradleyTerry2/man/countsToBinomial.Rd0000644000176200001440000000231414775237530017314 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/countsToBinomial.R \name{countsToBinomial} \alias{countsToBinomial} \title{Convert Contingency Table of Wins to Binomial Counts} \usage{ countsToBinomial(xtab) } \arguments{ \item{xtab}{a contingency table of wins cross-classified by \dQuote{winner} and \dQuote{loser}} } \value{ A data frame with four columns \item{player1 }{ the first player in the contest. } \item{player2 }{ the second player in the contest. } \item{win1 }{ the number of times \code{player1} won. } \item{win2 }{ the number of times \code{player2} won. } } \description{ Convert a contingency table of wins to a four-column data frame containing the number of wins and losses for each pair of players. } \examples{ ######################################################## ## Statistics journal citation data from Stigler (1994) ## -- see also Agresti (2002, p448) ######################################################## citations ## Convert frequencies to success/failure data citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") citations.sf } \seealso{ \code{\link[=BTm]{BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/glmmPQL.Rd0000644000176200001440000001621214775237530015336 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmPQL.R \name{glmmPQL} \alias{glmmPQL} \title{PQL Estimation of Generalized Linear Mixed Models} \usage{ glmmPQL( fixed, random = NULL, family = "binomial", data = NULL, subset = NULL, weights = NULL, offset = NULL, na.action = NULL, start = NULL, etastart = NULL, mustart = NULL, control = glmmPQL.control(...), sigma = 0.1, sigma.fixed = FALSE, model = TRUE, x = FALSE, contrasts = NULL, ... ) } \arguments{ \item{fixed}{a formula for the fixed effects.} \item{random}{a design matrix for the random effects, with number of rows equal to the length of variables in \code{formula}.} \item{family}{a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family function or the result of a call to a family function. (See \code{\link[=family]{family()}} for details of family functions.)} \item{data}{an optional data frame, list or environment (or object coercible by \code{\link[=as.data.frame]{as.data.frame()}} to a data frame) containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{glmmPQL} called.} \item{subset}{an optional logical or numeric vector specifying a subset of observations to be used in the fitting process.} \item{weights}{an optional vector of \sQuote{prior weights} to be used in the fitting process.} \item{offset}{an optional numeric vector to be added to the linear predictor during fitting. One or more \code{offset} terms can be included in the formula instead or as well, and if more than one is specified their sum is used. See \code{\link[=model.offset]{model.offset()}}.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link[=options]{options()}}, and is \code{\link[=na.fail]{na.fail()}} if that is unset.} \item{start}{starting values for the parameters in the linear predictor.} \item{etastart}{starting values for the linear predictor.} \item{mustart}{starting values for the vector of means.} \item{control}{a list of parameters for controlling the fitting process. See the \code{\link[=glmmPQL.control]{glmmPQL.control()}} for details.} \item{sigma}{a starting value for the standard deviation of the random effects.} \item{sigma.fixed}{logical: whether or not the standard deviation of the random effects should be fixed at its starting value.} \item{model}{logical: whether or not the model frame should be returned.} \item{x}{logical: whether or not the design matrix for the fixed effects should be returned.} \item{contrasts}{an optional list. See the \code{contrasts.arg} argument of \code{\link[=model.matrix]{model.matrix()}}.} \item{\dots}{arguments to be passed to \code{\link[=glmmPQL.control]{glmmPQL.control()}}.} } \value{ An object of class \code{"BTglmmPQL"} which inherits from \code{"glm"} and \code{"lm"}: \item{coefficients}{ a named vector of coefficients, with a \code{"random"} attribute giving the estimated random effects.} \item{residuals}{ the working residuals from the final iteration of the IWLS loop.} \item{random}{the design matrix for the random effects.} \item{fitted.values}{ the fitted mean values, obtained by transforming the linear predictors by the inverse of the link function.} \item{rank}{the numeric rank of the fitted linear model.} \item{family}{the \code{family} object used.} \item{linear.predictors}{the linear fit on link scale.} \item{deviance}{up to a constant, minus twice the maximized log-likelihood.} \item{aic}{a version of Akaike's \emph{An Information Criterion}, minus twice the maximized log-likelihood plus twice the number of parameters, computed by the \code{aic} component of the family.} \item{null.deviance}{the deviance for the null model, comparable with \code{deviance}.} \item{iter}{the numer of iterations of the PQL algorithm.} \item{weights}{the working weights, that is the weights in the final iteration of the IWLS loop.} \item{prior.weights}{the weights initially supplied, a vector of \code{1}'s if none were.} \item{df.residual}{the residual degrees of freedom.} \item{df.null}{the residual degrees of freedom for the null model.} \item{y}{if requested (the default) the \code{y} vector used. (It is a vector even for a binomial model.)} \item{x}{if requested, the model matrix.} \item{model}{if requested (the default), the model frame.} \item{converged}{logical. Was the PQL algorithm judged to have converged?} \item{call}{the matched call.} \item{formula}{the formula supplied.} \item{terms}{the \code{terms} object used.} \item{data}{the \code{data} argument used.} \item{offset}{the offset vector used.} \item{control}{the value of the \code{control} argument used.} \item{contrasts}{(where relevant) the contrasts used.} \item{xlevels}{(where relevant) a record of the levels of the factors used in fitting.} \item{na.action}{(where relevant) information returned by \code{model.frame} on the special handling of \code{NA}s.} \item{sigma}{the estimated standard deviation of the random effects} \item{sigma.fixed}{logical: whether or not \code{sigma} was fixed} \item{varFix}{the variance-covariance matrix of the fixed effects} \item{varSigma}{the variance of \code{sigma}} } \description{ Fits GLMMs with simple random effects structure via Breslow and Clayton's PQL algorithm. The GLMM is assumed to be of the form \ifelse{html}{\out{g(μ) = + Ze}}{\deqn{g(\boldsymbol{\mu}) = \boldsymbol{X\beta} + \boldsymbol{Ze}}{ g(mu) = X * beta + Z * e}} where \eqn{g} is the link function, \ifelse{html}{\out{μ}}{\eqn{\boldsymbol{\mu}}{mu}} is the vector of means and \ifelse{html}{\out{X, Z}}{\eqn{\boldsymbol{X}, \boldsymbol{Z}}{X,Z}} are design matrices for the fixed effects \ifelse{html}{\out{β}}{\eqn{\boldsymbol{\beta}}{beta}} and random effects \ifelse{html}{\out{e}}{\eqn{\boldsymbol{e}}{e}} respectively. Furthermore the random effects are assumed to be i.i.d. \ifelse{html}{\out{N(0, σ2)}}{\eqn{N(0, \sigma^2)}{ N(0, sigma^2)}}. } \examples{ ############################################### ## Crowder seeds example from Breslow & Clayton ############################################### summary(glmmPQL(cbind(r, n - r) ~ seed + extract, random = diag(nrow(seeds)), family = "binomial", data = seeds)) summary(glmmPQL(cbind(r, n - r) ~ seed*extract, random = diag(nrow(seeds)), family = "binomial", data = seeds)) } \references{ Breslow, N. E. and Clayton, D. G. (1993) Approximate inference in Generalized Linear Mixed Models. \emph{Journal of the American Statistical Association} \strong{88}(421), 9--25. Harville, D. A. (1977) Maximum likelihood approaches to variance component estimation and to related problems. \emph{Journal of the American Statistical Association} \strong{72}(358), 320--338. } \seealso{ \code{\link[=predict.BTglmmPQL]{predict.BTglmmPQL()}},\code{\link[=glmmPQL.control]{glmmPQL.control()}},\code{\link[=BTm]{BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/icehockey.Rd0000644000176200001440000001066214775673305015777 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/icehockey.R \docType{data} \name{icehockey} \alias{icehockey} \title{College Hockey Men's Division I 2009-10 results} \format{ A data frame with 1083 observations on the following 6 variables. \describe{ \item{date}{a numeric vector} \item{visitor}{a factor with 58 levels \verb{Alaska Anchorage} ... \code{Yale}} \item{v_goals}{a numeric vector} \item{opponent}{a factor with 58 levels \verb{Alaska Anchorage} ... \code{Yale}} \item{o_goals}{a numeric vector} \item{conference}{a factor with levels \code{AH}, \code{CC}, \code{CH}, \code{EC}, \code{HE}, \code{NC}, \code{WC}} \item{result}{a numeric vector: 1 if visitor won, 0.5 for a draw and 0 if visitor lost} \item{home.ice}{a logical vector: 1 if opponent on home ice, 0 if game on neutral ground} } } \source{ \url{http://www.collegehockeystats.net/0910/schedules/men}. } \usage{ icehockey } \description{ Game results from American College Hockey Men's Division I composite schedule 2009-2010. } \details{ The Division I ice hockey teams are arranged in six conferences: Atlantic Hockey, Central Collegiate Hockey Association, College Hockey America, ECAC Hockey, Hockey East and the Western Collegiate Hockey Association, all part of the National Collegiate Athletic Association. The composite schedule includes within conference games and between conference games. The data set here contains only games from the regular season, the results of which determine the teams that play in the NCAA national tournament. There are six automatic bids that go to the conference tournament champions, the remaining 10 teams are selected based upon ranking under the NCAA's system of pairwise comparisons (\url{https://www.collegehockeynews.com/info/?d=pwcrpi}). Some have argued that Bradley-Terry rankings would be fairer (\url{https://www.collegehockeynews.com/info/?d=krach}). } \examples{ ### Fit the standard Bradley-Terry model standardBT <- BTm(outcome = result, player1 = visitor, player2 = opponent, id = "team", data = icehockey) ## Bradley-Terry abilities abilities <- exp(BTabilities(standardBT)[,1]) ## Compute round-robin winning probability and KRACH ratings ## (scaled abilities such that KRACH = 100 for a team with ## round-robin winning probability of 0.5) rankings <- function(abilities){ probwin <- abilities/outer(abilities, abilities, "+") diag(probwin) <- 0 nteams <- ncol(probwin) RRWP <- rowSums(probwin)/(nteams - 1) low <- quantile(abilities, 0.45) high <- quantile(abilities, 0.55) middling <- uniroot(function(x) {sum(x/(x+abilities)) - 0.5*nteams}, lower = low, upper = high)$root KRACH <- abilities/middling*100 cbind(KRACH, RRWP) } ranks <- rankings(abilities) ## matches those produced by Joe Schlobotnik's Build Your Own Rankings head(signif(ranks, 4)[order(ranks[,1], decreasing = TRUE),]) ## At one point the NCAA rankings gave more credit for wins on ## neutral/opponent's ground. Home ice effects are easily ## incorporated into the Bradley-Terry model, comparing teams ## on a "level playing field" levelBT <- BTm(result, data.frame(team = visitor, home.ice = 0), data.frame(team = opponent, home.ice = home.ice), ~ team + home.ice, id = "team", data = icehockey) abilities <- exp(BTabilities(levelBT)[,1]) ranks2 <- rankings(abilities) ## Look at movement between the two rankings change <- factor(rank(ranks2[,1]) - rank(ranks[,1])) barplot(xtabs(~change), xlab = "Change in Rank", ylab = "No. Teams") ## Take out regional winners and look at top 10 regional <- c("RIT", "Alabama-Huntsville", "Michigan", "Cornell", "Boston College", "North Dakota") ranks <- ranks[!rownames(ranks) \%in\% regional] ranks2 <- ranks2[!rownames(ranks2) \%in\% regional] ## compare the 10 at-large selections under both rankings ## with those selected under NCAA rankings cbind(names(sort(ranks, decr = TRUE)[1:10]), names(sort(ranks2, decr = TRUE)[1:10]), c("Miami", "Denver", "Wisconsin", "St. Cloud State", "Bemidji State", "Yale", "Northern Michigan", "New Hampshire", "Alsaka", "Vermont")) } \references{ Schlobotnik, J. Build your own rankings: \url{http://www.elynah.com/tbrw/2010/rankings.diy.shtml}. College Hockey News \url{https://www.collegehockeynews.com/}. Selections for 2010 NCAA tournament: \url{https://www.espn.com/college-sports/news/story?id=5012918}. } \keyword{datasets} BradleyTerry2/man/reexports.Rd0000755000176200001440000000062514775237530016064 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qvcalc.BTabilities.R \docType{import} \name{reexports} \alias{reexports} \alias{qvcalc} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{qvcalc}{\code{\link[qvcalc]{qvcalc}}} }} BradleyTerry2/man/sound.fields.Rd0000644000176200001440000000730414775673305016430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sound.fields.R \docType{data} \name{sound.fields} \alias{sound.fields} \title{Kousgaard (1984) Data on Pair Comparisons of Sound Fields} \format{ A list containing two data frames, \code{sound.fields$comparisons}, and \code{sound.fields$design}. The \code{sound.fields$comparisons} data frame has 84 observations on the following 8 variables: \describe{ \item{field1}{a factor with levels \code{c("000", "001", "010", "011", "100", "101", "110", "111")}, the first sound field in a comparison} \item{field2}{a factor with the same levels as \code{field1}; the second sound field in a comparison} \item{win1}{integer, the number of times that \code{field1} was preferred to \code{field2}} \item{tie}{integer, the number of times that no preference was expressed when comparing \code{field1} and \code{field2}} \item{win2}{integer, the number of times that \code{field2} was preferred to \code{field1}} \item{win1.adj}{numeric, equal to \code{win1 + tie/2}} \item{win2.adj}{numeric, equal to \code{win2 + tie/2}} \item{instrument}{a factor with 3 levels, \code{c("cello", "flute", "violin")}} } The \code{sound.fields$design} data frame has 8 observations (one for each of the sound fields compared in the experiment) on the following 3 variables: \describe{ \item{a")}{a factor with levels \code{c("0", "1")}, the \emph{direct sound} factor (0 for \emph{obstructed sight line}, 1 for \emph{free sight line}); contrasts are sum contrasts} \item{b}{a factor with levels \code{c("0", "1")}, the \emph{reflection} factor (0 for \emph{-26dB}, 1 for \emph{-20dB}); contrasts are sum contrasts} \item{c}{a factor with levels \code{c("0", "1")}, the \emph{reverberation} factor (0 for \emph{-24dB}, 1 for \emph{-20dB}); contrasts are sum contrasts} } } \source{ Kousgaard, N. (1984) Analysis of a Sound Field Experiment by a Model for Paired Comparisons with Explanatory Variables. \emph{Scandinavian Journal of Statistics} \strong{11}, 51--57. } \usage{ sound.fields } \description{ The results of a series of factorial subjective room acoustic experiments carried out at the Technical University of Denmark by A C Gade. } \details{ The variables \code{win1.adj} and \code{win2.adj} are provided in order to allow a simple way of handling ties (in which a tie counts as half a win and half a loss), which is slightly different numerically from the Davidson (1970) method that is used by Kousgaard (1984): see the examples. } \examples{ ## ## Fit the Bradley-Terry model to data for flutes, using the simple ## 'add 0.5' method to handle ties: ## flutes.model <- BTm(cbind(win1.adj, win2.adj), field1, field2, ~ field, id = "field", subset = (instrument == "flute"), data = sound.fields) ## ## This agrees (after re-scaling) quite closely with the estimates given ## in Table 3 of Kousgaard (1984): ## table3.flutes <- c(-0.581, -1.039, 0.347, 0.205, 0.276, 0.347, 0.311, 0.135) plot(c(0, coef(flutes.model)), table3.flutes) abline(lm(table3.flutes ~ c(0, coef(flutes.model)))) ## ## Now re-parameterise that model in terms of the factorial effects, as ## in Table 5 of Kousgaard (1984): ## flutes.model.reparam <- update(flutes.model, formula = ~ a[field] * b[field] * c[field] ) table5.flutes <- c(.267, .250, -.088, -.294, .062, .009, -0.070) plot(coef(flutes.model.reparam), table5.flutes) abline(lm(table5.flutes ~ coef(flutes.model.reparam))) } \references{ Davidson, R. R. (1970) Extending the Bradley-Terry model to accommodate ties in paired comparison experiments. \emph{Journal of the American Statistical Association} \strong{65}, 317--328. } \author{ David Firth } \keyword{datasets} BradleyTerry2/man/residuals.BTm.Rd0000644000176200001440000000515614775237530016506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/residuals.BTm.R \name{residuals.BTm} \alias{residuals.BTm} \title{Residuals from a Bradley-Terry Model} \usage{ \method{residuals}{BTm}( object, type = c("deviance", "pearson", "working", "response", "partial", "grouped"), by = object$id, ... ) } \arguments{ \item{object}{a model object for which \code{inherits(model, "BTm")} is \code{TRUE}.} \item{type}{the type of residuals which should be returned. The alternatives are: \code{"deviance"} (default), \code{"pearson"}, \code{"working"}, \code{"response"}, and \code{"partial"}.} \item{by}{the grouping factor to use when \code{type = "grouped"}.} \item{...}{arguments to pass on other methods.} } \value{ A numeric vector of length equal to the number of players, with a \code{"weights"} attribute. } \description{ Computes residuals from a model object of class \code{"BTm"}. In additional to the usual options for objects inheriting from class \code{"glm"}, a \code{"grouped"} option is implemented to compute player-specific residuals suitable for diagnostic checking of a predictor involving player-level covariates. } \details{ For \code{type} other than \code{"grouped"} see \code{\link[=residuals.glm]{residuals.glm()}}. For \code{type = "grouped"} the residuals returned are weighted means of working residuals, with weights equal to the binomial denominators in the fitted model. These are suitable for diagnostic model checking, for example plotting against candidate predictors. } \examples{ ## ## See ?springall ## springall.model <- BTm(cbind(win.adj, loss.adj), col, row, ~ flav[..] + gel[..] + flav.2[..] + gel.2[..] + flav.gel[..] + (1 | ..), data = springall) res <- residuals(springall.model, type = "grouped") with(springall$predictors, plot(flav, res)) with(springall$predictors, plot(gel, res)) ## Weighted least-squares regression of these residuals on any variable ## already included in the model yields slope coefficient zero: lm(res ~ flav, weights = attr(res, "weights"), data = springall$predictors) lm(res ~ gel, weights = attr(res, "weights"), data = springall$predictors) } \references{ Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical Software} \strong{12}(1), 1--12. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. } \seealso{ \code{\link[=BTm]{BTm()}}, \code{\link[=BTabilities]{BTabilities()}} } \author{ David Firth and Heather Turner } \keyword{models} BradleyTerry2/man/flatlizards.Rd0000644000176200001440000001504314775674331016351 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/flatlizards.R \docType{data} \name{flatlizards} \alias{flatlizards} \title{Augrabies Male Flat Lizards: Contest Results and Predictor Variables} \format{ This dataset is a list containing two data frames: \code{flatlizards$contests} and \code{flatlizards$predictors}. The \code{flatlizards$contests} data frame has 100 observations on the following 2 variables: \describe{ \item{winner}{a factor with 77 levels \code{lizard003} ... \code{lizard189}.} \item{loser}{a factor with the same 77 levels \code{lizard003} ... \code{lizard189}.} } The \code{flatlizards$predictors} data frame has 77 observations (one for each of the 77 lizards) on the following 18 variables: \describe{ \item{id}{factor with 77 levels (3 5 6 ... 189), the lizard identifiers.} \item{throat.PC1}{numeric, the first principal component of the throat spectrum.} \item{throat.PC2}{numeric, the second principal component of the throat spectrum.} \item{throat.PC3}{numeric, the third principal component of the throat spectrum.} \item{frontleg.PC1}{numeric, the first principal component of the front-leg spectrum.} \item{frontleg.PC2}{numeric, the second principal component of the front-leg spectrum.} \item{frontleg.PC3}{numeric, the third principal component of the front-leg spectrum.} \item{badge.PC1}{numeric, the first principal component of the ventral colour patch spectrum.} \item{badge.PC2}{numeric, the second principal component of the ventral colour patch spectrum.} \item{badge.PC3}{numeric, the third principal component of the ventral colour patch spectrum.} \item{badge.size}{numeric, a measure of the area of the ventral colour patch.} \item{testosterone}{numeric, a measure of blood testosterone concentration.} \item{SVL}{numeric, the snout-vent length of the lizard.} \item{head.length}{numeric, head length.} \item{head.width}{numeric, head width.} \item{head.height}{numeric, head height.} \item{condition}{numeric, a measure of body condition.} \item{repro.tactic}{a factor indicating reproductive tactic; levels are \code{resident} and \code{floater}.} } } \source{ The data were collected by Dr Martin Whiting, \url{https://whitinglab.com/people/martin-whiting/}, and they appear here with his kind permission. } \usage{ flatlizards } \description{ Data collected at Augrabies Falls National Park (South Africa) in September-October 2002, on the contest performance and background attributes of 77 male flat lizards (\emph{Platysaurus broadleyi}). The results of exactly 100 contests were recorded, along with various measurements made on each lizard. Full details of the study are in Whiting et al. (2006). } \details{ There were no duplicate contests (no pair of lizards was seen fighting more than once), and there were no tied contests (the result of each contest was clear). The variables \code{head.length}, \code{head.width}, \code{head.height} and \code{condition} were all computed as residuals (of directly measured head length, head width, head height and body mass index, respectively) from simple least-squares regressions on \code{SVL}. Values of some predictors are missing (\code{NA}) for some lizards, \sQuote{at random}, because of instrument problems unconnected with the value of the measurement being made. } \examples{ ## ## Fit the standard Bradley-Terry model, using the bias-reduced ## maximum likelihood method: ## result <- rep(1, nrow(flatlizards$contests)) BTmodel <- BTm(result, winner, loser, br = TRUE, data = flatlizards$contests) summary(BTmodel) ## ## That's fairly useless, though, because of the rather small ## amount of data on each lizard. And really the scientific ## interest is not in the abilities of these particular 77 ## lizards, but in the relationship between ability and the ## measured predictor variables. ## ## So next fit (by maximum likelihood) a "structured" B-T model in ## which abilities are determined by a linear predictor. ## ## This reproduces results reported in Table 1 of Whiting et al. (2006): ## Whiting.model <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..], data = flatlizards) summary(Whiting.model) ## ## Equivalently, fit the same model using glmmPQL: ## Whiting.model <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), sigma = 0, sigma.fixed = TRUE, data = flatlizards) summary(Whiting.model) ## ## But that analysis assumes that the linear predictor formula for ## abilities is _perfect_, i.e., that there is no error in the linear ## predictor. This will always be unrealistic. ## ## So now fit the same predictor but with a normally distributed error ## term --- a generalized linear mixed model --- by using the BTm ## function instead of glm. ## Whiting.model2 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards, trace = TRUE) summary(Whiting.model2) ## ## The estimated coefficients (of throat.PC1, throat.PC3, ## head.length and SVL are not changed substantially by ## the recognition of an error term in the model; but the estimated ## standard errors are larger, as expected. The main conclusions from ## Whiting et al. (2006) are unaffected. ## ## With the normally distributed random error included, it is perhaps ## at least as natural to use probit rather than logit as the link ## function: ## require(stats) Whiting.model3 <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards, trace = TRUE) summary(Whiting.model3) BTabilities(Whiting.model3) ## Note the "separate" attribute here, identifying two lizards with ## missing values of at least one predictor variable ## ## Modulo the usual scale change between logit and probit, the results ## are (as expected) very similar to Whiting.model2. } \references{ Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. Whiting, M. J., Stuart-Fox, D. M., O'Connor, D., Firth, D., Bennett, N. C. and Blomberg, S. P. (2006). Ultraviolet signals ultra-aggression in a lizard. \emph{Animal Behaviour} \strong{72}, 353--363. } \seealso{ \code{\link[=BTm]{BTm()}} } \keyword{datasets} BradleyTerry2/man/BTm.Rd0000644000176200001440000002345214775237530014513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BTm.R \name{BTm} \alias{BTm} \title{Bradley-Terry Model and Extensions} \usage{ BTm( outcome = 1, player1, player2, formula = NULL, id = "..", separate.ability = NULL, refcat = NULL, family = "binomial", data = NULL, weights = NULL, subset = NULL, na.action = NULL, start = NULL, etastart = NULL, mustart = NULL, offset = NULL, br = FALSE, model = TRUE, x = FALSE, contrasts = NULL, ... ) } \arguments{ \item{outcome}{the binomial response: either a numeric vector, a factor in which the first level denotes failure and all others success, or a two-column matrix with the columns giving the numbers of successes and failures.} \item{player1}{either an ID factor specifying the first player in each contest, or a data.frame containing such a factor and possibly other contest-level variables that are specific to the first player. If given in a data.frame, the ID factor must have the name given in the \code{id} argument. If a factor is specified it will be used to create such a data.frame.} \item{player2}{an object corresponding to that given in \code{player1} for the second player in each contest, with identical structure -- in particular factors must have identical levels.} \item{formula}{a formula with no left-hand-side, specifying the model for player ability. See details for more information.} \item{id}{the name of the ID factor.} \item{separate.ability}{(if \code{formula} does not include the ID factor as a separate term) a character vector giving the names of players whose abilities are to be modelled individually rather than using the specification given by \code{formula}.} \item{refcat}{(if \code{formula} includes the ID factor as a separate term) a character specifying which player to use as a reference, with the first level of the ID factor as the default. Overrides any other contrast specification for the ID factor.} \item{family}{a description of the error distribution and link function to be used in the model. Only the binomial family is implemented, with either\code{"logit"}, \code{"probit"} , or \code{"cauchit"} link. (See \code{\link[stats:family]{stats::family()}} for details of family functions.)} \item{data}{an optional object providing data required by the model. This may be a single data frame of contest-level data or a list of data frames. Names of data frames are ignored unless they refer to data frames specified by \code{player1} and \code{player2}. The rows of data frames that do not contain contest-level data must correspond to the levels of a factor used for indexing, i.e. row 1 corresponds to level 1, etc. Note any rownames are ignored. Objects are searched for first in the \code{data} object if provided, then in the environment of \code{formula}. If \code{data} is a list, the data frames are searched in the order given.} \item{weights}{an optional numeric vector of \sQuote{prior weights}.} \item{subset}{an optional logical or numeric vector specifying a subset of observations to be used in the fitting process.} \item{na.action}{a function which indicates what should happen when any contest-level variables contain \code{NA}s. The default is the \code{na.action} setting of \code{options}. See details for the handling of missing values in other variables.} \item{start}{a vector of starting values for the fixed effects.} \item{etastart}{a vector of starting values for the linear predictor.} \item{mustart}{a vector of starting values for the vector of means.} \item{offset}{an optional offset term in the model. A vector of length equal to the number of contests.} \item{br}{logical. If \code{TRUE} fitting will be by penalized maximum likelihood as in Firth (1992, 1993), using \code{\link[brglm:brglm]{brglm::brglm()}}, rather than maximum likelihood using \code{\link[=glm]{glm()}}, when abilities are modelled exactly or when the abilities are modelled by covariates and the variance of the random effects is estimated as zero.} \item{model}{logical: whether or not to return the model frame.} \item{x}{logical: whether or not to return the design matrix for the fixed effects.} \item{contrasts}{an optional list specifying contrasts for the factors in \code{formula}. See the \code{contrasts.arg} of \code{\link[=model.matrix]{model.matrix()}}.} \item{\dots}{other arguments for fitting function (currently either \code{\link[=glm]{glm()}}, \code{\link[brglm:brglm]{brglm::brglm()}}, or \code{\link[=glmmPQL]{glmmPQL()}})} } \value{ An object of class \code{c("BTm", "x")}, where \code{"x"} is the class of object returned by the model fitting function (e.g. \code{glm}). Components are as for objects of class \code{"x"}, with additionally \item{id}{the \code{id} argument.} \item{separate.ability}{the \code{separate.ability} argument.} \item{refcat}{the \code{refcat} argument.} \item{player1}{a data frame for the first player containing the ID factor and any player-specific contest-level variables.} \item{player2}{a data frame corresponding to that for \code{player1}.} \item{assign}{a numeric vector indicating which coefficients correspond to which terms in the model.} \item{term.labels}{labels for the model terms.} \item{random}{for models with random effects, the design matrix for the random effects. } } \description{ Fits Bradley-Terry models for pair comparison data, including models with structured scores, order effect and missing covariate data. Fits by either maximum likelihood or maximum penalized likelihood (with Jeffreys-prior penalty) when abilities are modelled exactly, or by penalized quasi-likelihood when abilities are modelled by covariates. } \details{ In each comparison to be modelled there is a 'first player' and a 'second player' and it is assumed that one player wins while the other loses (no allowance is made for tied comparisons). The \code{\link[=countsToBinomial]{countsToBinomial()}} function is provided to convert a contingency table of wins into a data frame of wins and losses for each pair of players. The \code{formula} argument specifies the model for player ability and applies to both the first player and the second player in each contest. If \code{NULL} a separate ability is estimated for each player, equivalent to setting \code{formula = reformulate(id)}. Contest-level variables can be specified in the formula in the usual manner, see \code{\link[=formula]{formula()}}. Player covariates should be included as variables indexed by \code{id}, see examples. Thus player covariates must be ordered according to the levels of the ID factor. If \code{formula} includes player covariates and there are players with missing values over these covariates, then a separate ability will be estimated for those players. When player abilities are modelled by covariates, then random player effects should be added to the model. These should be specified in the formula using the vertical bar notation of \code{\link[lme4:lmer]{lme4::lmer()}}, see examples. When specified, it is assumed that random player effects arise from a \eqn{N(0, }{N(0, sigma^2)}\eqn{ \sigma^2)}{N(0, sigma^2)} distribution and model parameters, including \eqn{\sigma}{sigma}, are estimated using PQL (Breslow and Clayton, 1993) as implemented in the \code{\link[=glmmPQL]{glmmPQL()}} function. } \examples{ ######################################################## ## Statistics journal citation data from Stigler (1994) ## -- see also Agresti (2002, p448) ######################################################## ## Convert frequencies to success/failure data citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") ## First fit the "standard" Bradley-Terry model citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) ## Now the same thing with a different "reference" journal citeModel2 <- update(citeModel, refcat = "JASA") BTabilities(citeModel2) ################################################################## ## Now an example with an order effect -- see Agresti (2002) p438 ################################################################## data(baseball) # start with baseball data as provided by package ## Simple Bradley-Terry model, ignoring home advantage: baseballModel1 <- BTm(cbind(home.wins, away.wins), home.team, away.team, data = baseball, id = "team") ## Now incorporate the "home advantage" effect baseball$home.team <- data.frame(team = baseball$home.team, at.home = 1) baseball$away.team <- data.frame(team = baseball$away.team, at.home = 0) baseballModel2 <- update(baseballModel1, formula = ~ team + at.home) ## Compare the fit of these two models: anova(baseballModel1, baseballModel2) ## ## For a more elaborate example with both player-level and contest-level ## predictor variables, see help(chameleons). ## } \references{ Agresti, A. (2002) \emph{Categorical Data Analysis} (2nd ed). New York: Wiley. Firth, D. (1992) Bias reduction, the Jeffreys prior and GLIM. In \emph{Advances in GLIM and Statistical Modelling}, Eds. Fahrmeir, L., Francis, B. J., Gilchrist, R. and Tutz, G., pp91--100. New York: Springer. Firth, D. (1993) Bias reduction of maximum likelihood estimates. \emph{Biometrika} \strong{80}, 27--38. Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical Software}, \strong{12}(1), 1--12. Stigler, S. (1994) Citation patterns in the journals of statistics and probability. \emph{Statistical Science} \strong{9}, 94--108. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. } \seealso{ \code{\link[=countsToBinomial]{countsToBinomial()}}, \code{\link[=glmmPQL]{glmmPQL()}}, \code{\link[=BTabilities]{BTabilities()}}, \code{\link[=residuals.BTm]{residuals.BTm()}}, \code{\link[=add1.BTm]{add1.BTm()}}, \code{\link[=anova.BTm]{anova.BTm()}} } \author{ Heather Turner, David Firth } \keyword{models} BradleyTerry2/man/predict.BTglmmPQL.Rd0000644000176200001440000000727614775237530017227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.BTglmmPQL.R \name{predict.BTglmmPQL} \alias{predict.BTglmmPQL} \title{Predict Method for BTglmmPQL Objects} \usage{ \method{predict}{BTglmmPQL}( object, newdata = NULL, newrandom = NULL, level = ifelse(object$sigma == 0, 0, 1), type = c("link", "response", "terms"), se.fit = FALSE, terms = NULL, na.action = na.pass, ... ) } \arguments{ \item{object}{a fitted object of class \code{"BTglmmPQL"}} \item{newdata}{(optional) a data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used.} \item{newrandom}{if \code{newdata} is provided, a corresponding design matrix for the random effects, will columns corresponding to the random effects estimated in the original model.} \item{level}{an integer vector giving the level(s) at which predictions are required. Level zero corresponds to population-level predictions (fixed effects only), whilst level one corresponds to the individual-level predictions (full model) which are NA for contests involving individuals not in the original data. By default \code{level = 0} if the model converged to a fixed effects model, \code{1} otherwise.} \item{type}{the type of prediction required. The default is on the scale of the linear predictors; the alternative \code{"response"} is on the scale of the response variable. Thus for a default binomial model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"} gives the predicted probabilities. The \code{"terms"} option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale (fixed effects only).} \item{se.fit}{logical switch indicating if standard errors are required.} \item{terms}{with \code{type ="terms"} by default all terms are returned. A character vector specifies which terms are to be returned.} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{further arguments passed to or from other methods.} } \value{ If \code{se.fit = FALSE}, a vector or matrix of predictions. If \code{se = TRUE}, a list with components \item{fit }{Predictions} \item{se.fit }{Estimated standard errors} } \description{ Obtain predictions and optionally standard errors of those predictions from a \code{"BTglmmPQL"} object. } \details{ If \code{newdata} is omitted the predictions are based on the data used for the fit. In that case how cases with missing values in the original fit are treated is determined by the \code{na.action} argument of that fit. If \code{na.action = na.omit} omitted cases will not appear in the residuals, whereas if \code{na.action = na.exclude} they will appear (in predictions and standard errors), with residual value \code{NA}. See also \code{napredict}. Standard errors for the predictions are approximated assuming the variance of the random effects is known, see Booth and Hobert (1998). } \examples{ seedsModel <- glmmPQL(cbind(r, n - r) ~ seed + extract, random = diag(nrow(seeds)), family = binomial, data = seeds) pred <- predict(seedsModel, level = 0) predTerms <- predict(seedsModel, type = "terms") all.equal(pred, rowSums(predTerms) + attr(predTerms, "constant")) } \references{ Booth, J. G. and Hobert, J. P. (1998). Standard errors of prediction in Generalized Linear Mixed Models. \emph{Journal of the American Statistical Association} \strong{93}(441), 262 -- 272. } \seealso{ \code{\link[=predict.glm]{predict.glm()}}, \code{\link[=predict.BTm]{predict.BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/springall.Rd0000644000176200001440000000571614775673305016033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/springall.R \docType{data} \name{springall} \alias{springall} \title{Springall (1973) Data on Subjective Evaluation of Flavour Strength} \format{ A list containing two data frames, \code{springall$contests} and \code{springall$predictors}. The \code{springall$contests} data frame has 36 observations (one for each possible pairwise comparison of the 9 treatments) on the following 7 variables: \describe{ \item{row}{a factor with levels \code{1:9}, the row number in Springall's dataset} # \item{col}{a factor with levels \code{1:9}, the column number in Springall's dataset} \item{win}{integer, the number of wins for column treatment over row treatment} \item{loss}{integer, the number of wins for row treatment over column treatment} \item{tie}{integer, the number of ties between row and column treatments} \item{win.adj}{numeric, equal to \code{win + tie/2}} \item{loss.adj}{numeric, equal to \code{loss + tie/2}} } The \code{predictors} data frame has 9 observations (one for each treatment) on the following 5 variables: \describe{ \item{flav}{numeric, the flavour concentration} \item{gel}{numeric, the gel concentration} \item{flav.2}{numeric, equal to \code{flav^2}} \item{gel.2}{numeric, equal to \code{gel^2}} \item{flav.gel}{numeric, equal to \code{flav * gel}} } } \source{ Springall, A (1973) Response surface fitting using a generalization of the Bradley-Terry paired comparison method. \emph{Applied Statistics} \strong{22}, 59--68. } \usage{ springall } \description{ Data from Section 7 of the paper by Springall (1973) on Bradley-Terry response surface modelling. An experiment to assess the effects of gel and flavour concentrations on the subjective assessment of flavour strength by pair comparisons. } \details{ The variables \code{win.adj} and \code{loss.adj} are provided in order to allow a simple way of handling ties (in which a tie counts as half a win and half a loss), which is slightly different numerically from the Rao and Kupper (1967) model that Springall (1973) uses. } \examples{ ## ## Fit the same response-surface model as in section 7 of ## Springall (1973). ## ## Differences from Springall's fit are minor, arising from the ## different treatment of ties. ## ## Springall's model in the paper does not include the random effect. ## In this instance, however, that makes no difference: the random-effect ## variance is estimated as zero. ## summary(springall.model <- BTm(cbind(win.adj, loss.adj), col, row, ~ flav[..] + gel[..] + flav.2[..] + gel.2[..] + flav.gel[..] + (1 | ..), data = springall)) } \references{ Rao, P. V. and Kupper, L. L. (1967) Ties in paired-comparison experiments: a generalization of the Bradley-Terry model. \emph{Journal of the American Statistical Association}, \strong{63}, 194--204. } \author{ David Firth } \keyword{datasets} BradleyTerry2/man/predict.BTm.Rd0000644000176200001440000001771014775673305016150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.BTm.R \name{predict.BTm} \alias{predict.BTm} \title{Predict Method for Bradley-Terry Models} \usage{ \method{predict}{BTm}( object, newdata = NULL, level = ifelse(is.null(object$random), 0, 1), type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, ... ) } \arguments{ \item{object}{a fitted object of class \code{"BTm"}} \item{newdata}{(optional) a data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used.} \item{level}{for models with random effects: an integer vector giving the level(s) at which predictions are required. Level zero corresponds to population-level predictions (fixed effects only), whilst level one corresponds to the player-level predictions (full model) which are NA for contests involving players not in the original data. By default, \code{level = 0} for a fixed effects model, \code{1} otherwise.} \item{type}{the type of prediction required. The default is on the scale of the linear predictors; the alternative \code{"response"} is on the scale of the response variable. Thus for a default Bradley-Terry model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"} gives the predicted probabilities. The \code{"terms"} option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale (fixed effects only).} \item{se.fit}{logical switch indicating if standard errors are required.} \item{dispersion}{a value for the dispersion, not used for models with random effects. If omitted, that returned by \code{summary} applied to the object is used, where applicable.} \item{terms}{with \code{type ="terms"} by default all terms are returned. A character vector specifies which terms are to be returned.} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{further arguments passed to or from other methods.} } \value{ If \code{se.fit = FALSE}, a vector or matrix of predictions. If \code{se = TRUE}, a list with components \item{fit }{Predictions} \item{se.fit }{Estimated standard errors} } \description{ Obtain predictions and optionally standard errors of those predictions from a fitted Bradley-Terry model. } \details{ If \code{newdata} is omitted the predictions are based on the data used for the fit. In that case how cases with missing values in the original fit are treated is determined by the \code{na.action} argument of that fit. If \code{na.action = na.omit} omitted cases will not appear in the residuals, whereas if \code{na.action = na.exclude} they will appear (in predictions and standard errors), with residual value \code{NA}. See also \code{napredict}. } \examples{ ## The final model in example(flatlizards) result <- rep(1, nrow(flatlizards$contests)) Whiting.model3 <- BTm(1, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), family = binomial(link = "probit"), data = flatlizards, trace = TRUE) ## `new' data for contests between four of the original lizards ## factor levels must correspond to original levels, but unused levels ## can be dropped - levels must match rows of predictors newdata <- list(contests = data.frame( winner = factor(c("lizard048", "lizard060"), levels = c("lizard006", "lizard011", "lizard048", "lizard060")), loser = factor(c("lizard006", "lizard011"), levels = c("lizard006", "lizard011", "lizard048", "lizard060")) ), predictors = flatlizards$predictors[c(3, 6, 27, 33), ]) predict(Whiting.model3, level = 1, newdata = newdata) ## same as predict(Whiting.model3, level = 1)[1:2] ## introducing a new lizard newpred <- rbind(flatlizards$predictors[c(3, 6, 27), c("throat.PC1","throat.PC3", "SVL", "head.length")], c(-5, 1.5, 1, 0.1)) rownames(newpred)[4] <- "lizard059" newdata <- list(contests = data.frame( winner = factor(c("lizard048", "lizard059"), levels = c("lizard006", "lizard011", "lizard048", "lizard059")), loser = factor(c("lizard006", "lizard011"), levels = c("lizard006", "lizard011", "lizard048", "lizard059")) ), predictors = newpred) ## can only predict at population level for contest with new lizard predict(Whiting.model3, level = 0:1, se.fit = TRUE, newdata = newdata) ## predicting at specific levels of covariates ## consider a model from example(CEMS) table6.model <- BTm(outcome = cbind(win1.adj, win2.adj), player1 = school1, player2 = school2, formula = ~ .. + WOR[student] * Paris[..] + WOR[student] * Milano[..] + WOR[student] * Barcelona[..] + DEG[student] * St.Gallen[..] + STUD[student] * Paris[..] + STUD[student] * St.Gallen[..] + ENG[student] * St.Gallen[..] + FRA[student] * London[..] + FRA[student] * Paris[..] + SPA[student] * Barcelona[..] + ITA[student] * London[..] + ITA[student] * Milano[..] + SEX[student] * Milano[..], refcat = "Stockholm", data = CEMS) ## estimate abilities for a combination not seen in the original data ## same schools schools <- levels(CEMS$preferences$school1) ## new student data students <- data.frame(STUD = "other", ENG = "good", FRA = "good", SPA = "good", ITA = "good", WOR = "yes", DEG = "no", SEX = "female", stringsAsFactors = FALSE) ## set levels to be the same as original data for (i in seq_len(ncol(students))){ students[,i] <- factor(students[,i], levels(CEMS$students[,i])) } newdata <- list(preferences = data.frame(student = factor(500), # new id matching with `students[1,]` school1 = factor("London", levels = schools), school2 = factor("Paris", levels = schools)), students = students, schools = CEMS$schools) ## warning can be ignored as model specification was over-parameterized predict(table6.model, newdata = newdata) ## if treatment contrasts are use (i.e. one player is set as the reference ## category), then predicting the outcome of contests against the reference ## is equivalent to estimating abilities with specific covariate values ## add student with all values at reference levels students <- rbind(students, data.frame(STUD = "other", ENG = "good", FRA = "good", SPA = "good", ITA = "good", WOR = "no", DEG = "no", SEX = "female", stringsAsFactors = FALSE)) ## set levels to be the same as original data for (i in seq_len(ncol(students))){ students[,i] <- factor(students[,i], levels(CEMS$students[,i])) } newdata <- list(preferences = data.frame(student = factor(rep(c(500, 502), each = 6)), school1 = factor(schools, levels = schools), school2 = factor("Stockholm", levels = schools)), students = students, schools = CEMS$schools) predict(table6.model, newdata = newdata, se.fit = TRUE) ## the second set of predictions (elements 7-12) are equivalent to the output ## of BTabilities; the first set are adjust for `WOR` being equal to "yes" BTabilities(table6.model) } \seealso{ \code{\link[=predict.glm]{predict.glm()}}, \code{\link[MASS:predict.glmmPQL]{MASS::predict.glmmPQL()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/football.Rd0000644000176200001440000000443314775673305015635 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/football.R \docType{data} \name{football} \alias{football} \title{English Premier League Football Results 2008/9 to 2012/13} \format{ A data frame with 1881 observations on the following 4 variables. \describe{ \item{season}{a factor with levels \code{2008-9}, \code{2009-10}, \code{2010-11}, \code{2011-12}, \code{2012-13}} \item{home}{a factor specifying the home team, with 29 levels \code{Ars} (Arsenal), ... , \code{Wol} (Wolverhampton)} \item{away}{a factor specifying the away team, with the same levels as \code{home}.} \item{result}{a numeric vector giving the result for the home team: 1 for a win, 0 for a draw, -1 for a loss.} } } \source{ These data were downloaded from http://soccernet.espn.go.com in 2013. The site has since moved and the new site does not appear to have an equivalent source. } \usage{ football } \description{ The win/lose/draw results for five seasons of the English Premier League football results, from 2008/9 to 2012/13 } \details{ In each season, there are 20 teams, each of which plays one home game and one away game against all the other teams in the league. The results in 380 games per season. } \examples{ ### example requires gnm if (require(gnm)) { ### convert to trinomial counts football.tri <- expandCategorical(football, "result", idvar = "match") head(football.tri) ### add variable to indicate whether team playing at home football.tri$at.home <- !logical(nrow(football.tri)) ### fit Davidson model for ties ### - subset to first and last season for illustration Davidson <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri, subset = season \%in\% c("2008-9", "2012-13")) ### see ?GenDavidson for further analysis } } \references{ Davidson, R. R. (1970). On extending the Bradley-Terry model to accommodate ties in paired comparison experiments. \emph{Journal of the American Statistical Association}, \strong{65}, 317--328. } \seealso{ \code{\link[=GenDavidson]{GenDavidson()}} } \keyword{datasets} BradleyTerry2/man/seeds.Rd0000644000176200001440000000223214775673305015131 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/seeds.R \docType{data} \name{seeds} \alias{seeds} \title{Seed Germination Data from Crowder (1978)} \format{ A data frame with 21 observations on the following 4 variables. \describe{ \item{r}{the number of germinated seeds.} \item{n}{the total number of seeds.} \item{seed}{the seed variety.} \item{extract}{the type of root extract.} } } \source{ Crowder, M. (1978) Beta-Binomial ANOVA for proportions. \emph{Applied Statistics}, \strong{27}, 34--37. } \usage{ seeds } \description{ Data from Crowder(1978) giving the proportion of seeds germinated for 21 plates that were arranged according to a 2x2 factorial layout by seed variety and type of root extract. } \examples{ summary(glmmPQL(cbind(r, n - r) ~ seed + extract, random = diag(nrow(seeds)), family = binomial, data = seeds)) } \references{ Breslow, N. E. and Clayton, D. G. (1993) Approximate inference in Generalized Linear Mixed Models. \emph{Journal of the American Statistical Association}, \strong{88}(421), 9--25. } \seealso{ \code{\link[=glmmPQL]{glmmPQL()}} } \keyword{datasets} BradleyTerry2/man/glmmPQL.control.Rd0000644000176200001440000000441314775237530017015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmPQL.control.R \name{glmmPQL.control} \alias{glmmPQL.control} \title{Control Aspects of the glmmPQL Algorithm} \usage{ glmmPQL.control(maxiter = 50, IWLSiter = 10, tol = 1e-06, trace = FALSE) } \arguments{ \item{maxiter}{the maximum number of outer iterations.} \item{IWLSiter}{the maximum number of iterated weighted least squares iterations used to estimate the fixed effects, given the standard deviation of the random effects.} \item{tol}{the tolerance used to determine convergence in the IWLS iterations and over all (see details).} \item{trace}{logical: whether or not to print the score for the random effects variance at the end of each iteration.} } \value{ A list with the arguments as components. } \description{ Set control variables for the glmmPQL algorithm. } \details{ This function provides an interface to control the PQL algorithm used by \code{\link[=BTm]{BTm()}} for fitting Bradley Terry models with random effects. The algorithm iterates between a series of iterated weighted least squares iterations to update the fixed effects and a single Fisher scoring iteration to update the standard deviation of the random effects. Convergence of both the inner and outer iterations are judged by comparing the squared components of the relevant score vector with corresponding elements of the diagonal of the Fisher information matrix. If, for all components of the relevant score vector, the ratio is less than \code{tolerance^2}, or the corresponding diagonal element of the Fisher information matrix is less than 1e-20, iterations cease. } \examples{ ## Variation on example(flatlizards) result <- rep(1, nrow(flatlizards$contests)) ## BTm passes arguments on to glmmPQL.control() args(BTm) BTmodel <- BTm(result, winner, loser, ~ throat.PC1[..] + throat.PC3[..] + head.length[..] + SVL[..] + (1|..), data = flatlizards, tol = 1e-3, trace = TRUE) summary(BTmodel) } \references{ Breslow, N. E. and Clayton, D. G. (1993), Approximate inference in Generalized Linear Mixed Models. \emph{Journal of the American Statistical Association} \strong{88}(421), 9--25. } \seealso{ \code{\link[=glmmPQL]{glmmPQL()}}, \code{\link[=BTm]{BTm()}} } \author{ Heather Turner } \keyword{models} BradleyTerry2/man/GenDavidson.Rd0000644000176200001440000002137514775237530016234 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/GenDavidson.R \name{GenDavidson} \alias{GenDavidson} \title{Specify a Generalised Davidson Term in a gnm Model Formula} \usage{ GenDavidson( win, tie, loss, player1, player2, home.adv = NULL, tie.max = ~1, tie.mode = NULL, tie.scale = NULL, at.home1 = NULL, at.home2 = NULL ) } \arguments{ \item{win}{a logical vector: \code{TRUE} if player1 wins, \code{FALSE} otherwise.} \item{tie}{a logical vector: \code{TRUE} if the outcome is a tie, \code{FALSE} otherwise.} \item{loss}{a logical vector: \code{TRUE} if player1 loses, \code{FALSE} otherwise.} \item{player1}{an ID factor specifying the first player in each contest, with the same set of levels as \code{player2}.} \item{player2}{an ID factor specifying the second player in each contest, with the same set of levels as \code{player2}.} \item{home.adv}{a formula for the parameter corresponding to the home advantage effect. If \code{NULL}, no home advantage effect is estimated.} \item{tie.max}{a formula for the parameter corresponding to the maximum tie probability.} \item{tie.mode}{a formula for the parameter corresponding to the location of maximum tie probability, in terms of the probability that \code{player1} wins, given the outcome is not a draw.} \item{tie.scale}{a formula for the parameter corresponding to the scale of dependence of the tie probability on the probability that \code{player1} wins, given the outcome is not a draw.} \item{at.home1}{a logical vector: \code{TRUE} if \code{player1} is at home, \code{FALSE} otherwise.} \item{at.home2}{a logical vector: \code{TRUE} if \code{player2} is at home, \code{FALSE} otherwise.} } \value{ A list with the anticipated components of a "nonlin" function: \item{ predictors }{ the formulae for the different parameters and the ID factors for player 1 and player 2. } \item{ variables }{ the outcome variables and the \dQuote{at home} variables, if specified. } \item{ common }{ an index to specify that common effects are to be estimated for the players. } \item{ term }{ a function to create a deparsed mathematical expression of the term, given labels for the predictors.} \item{ start }{ a function to generate starting values for the parameters.} } \description{ GenDavidson is a function of class \code{"nonlin"} to specify a generalised Davidson term in the formula argument to \code{\link[gnm:gnm]{gnm::gnm()}}, providing a model for paired comparison data where ties are a possible outcome. } \details{ \code{GenDavidson} specifies a generalisation of the Davidson model (1970) for paired comparisons where a tie is a possible outcome. It is designed for modelling trinomial counts corresponding to the win/draw/loss outcome for each contest, which are assumed Poisson conditional on the total count for each match. Since this total must be one, the expected counts are equivalently the probabilities for each possible outcome, which are modelled on the log scale: \deqn{\log(p(i \textrm{beats} j)_k) = \theta_{ijk} + \log(\mu\alpha_i}{log(p(i beats j)_k) = theta_{ijk} + log(mu * alpha_i)} \deqn{\log(p(draw)_k) = \theta_{ijk} + \delta + c + }{ log(p(draw)_k) = theta_{ijk} + log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * log(alpha_j)) + (1 - sigma) * log(mu * alpha_i + alpha_j) }\deqn{ \sigma(\pi\log(\mu\alpha_i) - (1 - \pi)log(\alpha_j)) + }{ log(p(draw)_k) = theta_{ijk} + log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * log(alpha_j)) + (1 - sigma) * log(mu * alpha_i + alpha_j) }\deqn{ (1 - \sigma)(\log(\mu\alpha_i + \alpha_j))}{ log(p(draw)_k) = theta_{ijk} + log(delta) + c + sigma * (pi * log(mu * alpha_i) + (1 - pi) * log(alpha_j)) + (1 - sigma) * log(mu * alpha_i + alpha_j) } \deqn{\log(p(j \textrm{beats} i)_k) = \theta_{ijk} + }{log(p(j beats i)_k) = theta_{ijk} + log(alpha_j)}\deqn{ log(\alpha_j)}{log(p(j beats i)_k) = theta_{ijk} + log(alpha_j)} Here \eqn{\theta_{ijk}}{theta_{ijk}} is a structural parameter to fix the trinomial totals; \eqn{\mu}{mu} is the home advantage parameter; \eqn{\alpha_i}{alpha_i} and \eqn{\alpha_j}{alpha_j} are the abilities of players \eqn{i} and \eqn{j} respectively; \eqn{c}{c} is a function of the parameters such that \eqn{\textrm{expit}(\delta)}{plogis(delta)} is the maximum probability of a tie, \eqn{\sigma}{sigma} scales the dependence of the probability of a tie on the relative abilities and \eqn{\pi}{pi} allows for asymmetry in this dependence. For parameters that must be positive (\eqn{\alpha_i, \sigma, \mu}{alpha, sigma, mu}), the log is estimated, while for parameters that must be between zero and one (\eqn{\delta, \pi}), the logit is estimated, as illustrated in the example. } \examples{ ### example requires gnm if (require(gnm)) { ### convert to trinomial counts football.tri <- expandCategorical(football, "result", idvar = "match") head(football.tri) ### add variable to indicate whether team playing at home football.tri$at.home <- !logical(nrow(football.tri)) ### fit shifted & scaled Davidson model ### - subset to first and last season for illustration shifScalDav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, tie.scale = ~1, tie.mode = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri, subset = season \%in\% c("2008-9", "2012-13")) ### look at coefs coef <- coef(shifScalDav) ## home advantage exp(coef["home.adv"]) ## max p(tie) plogis(coef["tie.max"]) ## mode p(tie) plogis(coef["tie.mode"]) ## scale relative to Davidson of dependence of p(tie) on p(win|not a draw) exp(coef["tie.scale"]) ### check model fit alpha <- names(coef[-(1:4)]) plotProportions(result == 1, result == 0, result == -1, home:season, away:season, abilities = coef[alpha], home.adv = coef["home.adv"], tie.max = coef["tie.max"], tie.scale = coef["tie.scale"], tie.mode = coef["tie.mode"], at.home1 = at.home, at.home2 = !at.home, data = football.tri, subset = count == 1) } ### analyse all five seasons ### - takes a little while to run, particularly likelihood ratio tests \dontrun{ ### fit Davidson model Dav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri) ### fit scaled Davidson model scalDav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, tie.scale = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri) ### fit shifted & scaled Davidson model shifScalDav <- gnm(count ~ GenDavidson(result == 1, result == 0, result == -1, home:season, away:season, home.adv = ~1, tie.max = ~1, tie.scale = ~1, tie.mode = ~1, at.home1 = at.home, at.home2 = !at.home) - 1, eliminate = match, family = poisson, data = football.tri) ### compare models anova(Dav, scalDav, shifScalDav, test = "Chisq") ### diagnostic plots main <- c("Davidson", "Scaled Davidson", "Shifted & Scaled Davidson") mod <- list(Dav, scalDav, shifScalDav) names(mod) <- main ## use football.tri data so that at.home can be found, ## but restrict to actual match results par(mfrow = c(2,2)) for (i in 1:3) { coef <- parameters(mod[[i]]) plotProportions(result == 1, result == 0, result == -1, home:season, away:season, abilities = coef[alpha], home.adv = coef["home.adv"], tie.max = coef["tie.max"], tie.scale = coef["tie.scale"], tie.mode = coef["tie.mode"], at.home1 = at.home, at.home2 = !at.home, main = main[i], data = football.tri, subset = count == 1) } } } \references{ Davidson, R. R. (1970). On extending the Bradley-Terry model to accommodate ties in paired comparison experiments. \emph{Journal of the American Statistical Association}, \strong{65}, 317--328. } \seealso{ \code{\link[=football]{football()}}, \code{\link[=plotProportions]{plotProportions()}} } \author{ Heather Turner } \keyword{models} \keyword{nonlinear} BradleyTerry2/man/citations.Rd0000644000176200001440000000330014775673305016020 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/citations.R \docType{data} \name{citations} \alias{citations} \title{Statistics Journal Citation Data from Stigler (1994)} \format{ A 4 by 4 contingency table of citations, cross-classified by the factors \code{cited} and \code{citing} each with levels \code{Biometrika}, \verb{Comm Statist}, \code{JASA}, and \code{JRSS-B}. } \source{ Agresti, A. (2002) \emph{Categorical Data Analysis} (2nd ed). New York: Wiley. } \usage{ citations } \description{ Extracted from a larger table in Stigler (1994). Inter-journal citation counts for four journals, \dQuote{Biometrika}, \dQuote{Comm Statist.}, \dQuote{JASA} and \dQuote{JRSS-B}, as used on p448 of Agresti (2002). } \details{ In the context of paired comparisons, the \sQuote{winner} is the cited journal and the \sQuote{loser} is the one doing the citing. } \examples{ ## Data as a square table, as in Agresti p448 citations ## ## Convert frequencies to success/failure data: ## citations.sf <- countsToBinomial(citations) names(citations.sf)[1:2] <- c("journal1", "journal2") ## Standard Bradley-Terry model fitted to these data citeModel <- BTm(cbind(win1, win2), journal1, journal2, data = citations.sf) } \references{ Firth, D. (2005) Bradley-Terry models in R. \emph{Journal of Statistical Software} \strong{12}(1), 1--12. Turner, H. and Firth, D. (2012) Bradley-Terry models in R: The BradleyTerry2 package. \emph{Journal of Statistical Software}, \strong{48}(9), 1--21. Stigler, S. (1994) Citation patterns in the journals of statistics and probability. \emph{Statistical Science} \strong{9}, 94--108. } \seealso{ \code{\link[=BTm]{BTm()}} } \keyword{datasets} BradleyTerry2/DESCRIPTION0000644000176200001440000000204214776015014014455 0ustar liggesusersPackage: BradleyTerry2 Title: Bradley-Terry Models Version: 1.1.3 Authors@R: c( person("Heather", "Turner", , "ht@heatherturner.net", role = c("aut", "cre")), person("David", "Firth", role = "aut") ) Description: Specify and fit the Bradley-Terry model, including structured versions in which the parameters are related to explanatory variables through a linear predictor and versions with contest-specific effects, such as a home advantage. License: GPL (>= 2) URL: https://github.com/hturner/BradleyTerry2 BugReports: https://github.com/hturner/BradleyTerry2/issues Depends: R (>= 2.10) Imports: brglm, gtools, lme4 (>= 1.0), qvcalc, stats Suggests: bookdown, knitr, litedown, prefmod, testthat Enhances: gnm VignetteBuilder: knitr, litedown Encoding: UTF-8 Language: en-GB LazyData: yes RoxygenNote: 7.3.2 NeedsCompilation: no Packaged: 2025-04-10 10:20:56 UTC; stspao Author: Heather Turner [aut, cre], David Firth [aut] Maintainer: Heather Turner Repository: CRAN Date/Publication: 2025-04-10 19:20:44 UTC