# Study 1.5 Results: Chi-squared test with 5 df, heterogeneity in sample size only. > > rm(list=ls()) > options(scipen=999) # To avoid scientific notation! > # read.table chokes on large data sets > simdata = scan("Study1.5.Data.txt") # .1=1df, .3=3df, .5=5df Read 1600000 items > nrows = 20*10000; ncols = 8 > dim(simdata) = c(ncols,nrows) # Reversed: R reads by columns, rows > simdata = t(simdata) > colnames(simdata) = c("Simulation", "Nstudies", "PopMeanPower", "SampleMeanPower", + "Pcurve", "Puniform", "MaxLike", "Zcurve") > simdata = data.frame(simdata) > head(simdata) Simulation Nstudies PopMeanPower SampleMeanPower Pcurve Puniform 1 1 15 0.05 0.05 0.05024723 0.05315979 2 2 15 0.05 0.05 0.07723133 0.06400400 3 3 15 0.05 0.05 0.05023118 0.05023118 4 4 15 0.05 0.05 0.05024138 0.10573080 5 5 15 0.05 0.05 0.09878703 0.09631671 6 6 15 0.05 0.05 0.12926320 0.06571485 MaxLike Zcurve 1 0.05277425 0.03777508 2 0.07438012 0.13293790 3 0.05000000 0.04232557 4 0.08449641 0.18478580 5 0.10818550 0.15665700 6 0.07353902 0.10684770 > # tail(simdata) > # attach(simdata) > # Assembing a data frame in univariate format for nice easy tables > # It's much faster to bind columns together. > nlines = dim(simdata)[1] # Will be bigger for full data set. > est = as.matrix(simdata[,5:8]) # Estimates > unidata = cbind(rep(simdata$Nstudies,each=4), rep(simdata$PopMeanPower,each=4), + rep(simdata$SampleMeanPower,each=4), rep(1:4,times=nlines), + as.numeric(t(est)) ) > colnames(unidata) = c("Nstudies", "PopMeanPower", "SampleMeanPower", "Method", + "Estimate") > unidata = data.frame(unidata) > Methods = c("Pcurve","Puniform","MaxLike","Zcurve") > unidata$Method = factor(unidata$Method ,labels = Methods) > attach(unidata) > AbsError = abs(Estimate-SampleMeanPower) # This is the target for a meta-analysis > Bias = Estimate - PopMeanPower > # Call the estimate close enough if it's within 5% of the target. > CloseEnough = numeric(length(AbsError)); CloseEnough[AbsError<0.05] = 1 > > > ######################### Sample sizes ############################# > kounts = table(Method,Nstudies, PopMeanPower) > cat("\n Sample sizes \n\n"); print(kounts) Sample sizes , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 10000 10000 10000 10000 10000 Puniform 10000 10000 10000 10000 10000 MaxLike 10000 10000 10000 10000 10000 Zcurve 10000 10000 10000 10000 10000 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 10000 10000 10000 10000 10000 Puniform 10000 10000 10000 10000 10000 MaxLike 10000 10000 10000 10000 10000 Zcurve 10000 10000 10000 10000 10000 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 10000 10000 10000 10000 10000 Puniform 10000 10000 10000 10000 10000 MaxLike 10000 10000 10000 10000 10000 Zcurve 10000 10000 10000 10000 10000 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 10000 10000 10000 10000 10000 Puniform 10000 10000 10000 10000 10000 MaxLike 10000 10000 10000 10000 10000 Zcurve 10000 10000 10000 10000 10000 > > ###################### Average Performance ###################### > estframe = aggregate(Estimate,by=list(Method,Nstudies, PopMeanPower),FUN=mean, na.rm=T) > meanest = estframe$x; dim(meanest) = c(4,5,4) # Goes down col1 first > MeanEstimatedPower = kounts > for(j in 1:4) MeanEstimatedPower[,,j] = meanest[,,j] > cat("\n Mean Estimated Power \n\n") Mean Estimated Power > print(round(MeanEstimatedPower,3)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 0.085 0.076 0.066 0.060 0.056 Puniform 0.077 0.070 0.063 0.058 0.055 MaxLike 0.077 0.070 0.063 0.058 0.055 Zcurve 0.083 0.071 0.056 0.047 0.039 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 0.264 0.258 0.255 0.252 0.250 Puniform 0.253 0.251 0.251 0.250 0.250 MaxLike 0.255 0.253 0.252 0.251 0.250 Zcurve 0.316 0.307 0.295 0.282 0.269 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 0.486 0.492 0.494 0.498 0.500 Puniform 0.477 0.487 0.492 0.496 0.499 MaxLike 0.482 0.490 0.494 0.497 0.500 Zcurve 0.532 0.535 0.532 0.529 0.525 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 0.729 0.738 0.744 0.746 0.749 Puniform 0.724 0.734 0.742 0.746 0.748 MaxLike 0.729 0.738 0.744 0.746 0.749 Zcurve 0.722 0.730 0.735 0.739 0.743 > > ########################## Standard deviations ######################### > vframe = aggregate(Estimate,by=list(Method,Nstudies, PopMeanPower),FUN=var, na.rm=T) > vest = vframe$x; dim(vest) = c(4,5,4) > SDestimatedPower = kounts > for(j in 1:4) SDestimatedPower[,,j] = sqrt(vest[,,j]) > cat("\n Standard deviations of estimated power \n\n") Standard deviations of estimated power > print(round(SDestimatedPower,3)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 0.059 0.043 0.026 0.017 0.009 Puniform 0.048 0.034 0.021 0.013 0.008 MaxLike 0.048 0.035 0.021 0.014 0.008 Zcurve 0.087 0.067 0.044 0.030 0.018 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 0.147 0.120 0.087 0.063 0.041 Puniform 0.136 0.110 0.081 0.057 0.037 MaxLike 0.136 0.109 0.080 0.057 0.037 Zcurve 0.159 0.128 0.095 0.069 0.045 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 0.164 0.130 0.095 0.067 0.043 Puniform 0.155 0.123 0.090 0.063 0.040 MaxLike 0.153 0.120 0.087 0.061 0.039 Zcurve 0.150 0.120 0.091 0.068 0.045 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 0.120 0.092 0.064 0.044 0.028 Puniform 0.117 0.090 0.063 0.043 0.027 MaxLike 0.113 0.086 0.060 0.042 0.026 Zcurve 0.099 0.080 0.061 0.047 0.034 > > ########################## Sample mean true power ######################### > TPframe = aggregate(SampleMeanPower,by=list(Method,Nstudies, PopMeanPower),FUN=mean, na.rm=T) > tp = TPframe$x; dim(tp) = c(4,5,4) > TPOW = kounts > for(j in 1:4) TPOW[,,j] = tp[,,j] > cat("\n Sample mean true power \n\n") Sample mean true power > print(round(TPOW,3)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 0.05 0.05 0.05 0.05 0.05 Puniform 0.05 0.05 0.05 0.05 0.05 MaxLike 0.05 0.05 0.05 0.05 0.05 Zcurve 0.05 0.05 0.05 0.05 0.05 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 0.25 0.25 0.25 0.25 0.25 Puniform 0.25 0.25 0.25 0.25 0.25 MaxLike 0.25 0.25 0.25 0.25 0.25 Zcurve 0.25 0.25 0.25 0.25 0.25 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 0.50 0.50 0.50 0.50 0.50 Puniform 0.50 0.50 0.50 0.50 0.50 MaxLike 0.50 0.50 0.50 0.50 0.50 Zcurve 0.50 0.50 0.50 0.50 0.50 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 0.75 0.75 0.75 0.75 0.75 Puniform 0.75 0.75 0.75 0.75 0.75 MaxLike 0.75 0.75 0.75 0.75 0.75 Zcurve 0.75 0.75 0.75 0.75 0.75 > > ###################### Bias ###################### > biasframe = aggregate(Bias,by=list(Method,Nstudies, PopMeanPower),FUN=mean, na.rm=T) > meanbias = biasframe$x; dim(meanbias) = c(4,5,4) # Goes down col1 first > MeanBias = kounts > for(j in 1:4) MeanBias[,,j] = meanbias[,,j] > cat("\n Mean Bias \n\n") Mean Bias > print(round(MeanBias,3)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 0.035 0.026 0.016 0.010 0.006 Puniform 0.027 0.020 0.013 0.008 0.005 MaxLike 0.027 0.020 0.013 0.008 0.005 Zcurve 0.033 0.021 0.006 -0.003 -0.011 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 0.014 0.008 0.005 0.002 0.000 Puniform 0.003 0.001 0.001 0.000 0.000 MaxLike 0.005 0.003 0.002 0.001 0.000 Zcurve 0.066 0.057 0.045 0.032 0.019 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve -0.014 -0.008 -0.006 -0.002 0.000 Puniform -0.023 -0.013 -0.008 -0.004 -0.001 MaxLike -0.018 -0.010 -0.006 -0.003 0.000 Zcurve 0.032 0.035 0.032 0.029 0.025 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve -0.021 -0.012 -0.006 -0.004 -0.001 Puniform -0.026 -0.016 -0.008 -0.004 -0.002 MaxLike -0.021 -0.012 -0.006 -0.004 -0.001 Zcurve -0.028 -0.020 -0.015 -0.011 -0.007 > > ########################## Z-tests for bias ######################### > # Calculate Z > ZforBias = kounts > for(j in 1:4) ZforBias[,,j] = + sqrt(kounts[1,1,1])*(MeanEstimatedPower[,,j] - TPOW[,,j])/SDestimatedPower[,,j] > cat("\n Z-tests for bias \n", + " Any Z with |Z|>4.37 is significant.\n\n") Z-tests for bias Any Z with |Z|>4.37 is significant. > print(round(ZforBias,2)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 59.51 59.80 60.90 62.63 66.21 Puniform 55.93 57.25 60.31 62.94 65.70 MaxLike 56.21 57.22 59.59 61.84 63.72 Zcurve 38.19 31.91 14.03 -10.42 -60.56 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 9.49 7.03 5.46 3.78 0.81 Puniform 2.02 0.81 1.63 0.00 -0.26 MaxLike 3.90 2.44 2.92 1.28 0.20 Zcurve 41.25 44.31 47.44 46.86 42.93 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve -8.68 -5.86 -5.94 -3.52 0.28 Puniform -14.69 -10.31 -8.81 -6.51 -1.62 MaxLike -11.49 -7.99 -7.43 -4.85 -0.52 Zcurve 21.08 29.04 35.05 43.43 54.93 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve -17.07 -13.58 -10.20 -8.68 -4.46 Puniform -22.17 -17.76 -13.33 -10.65 -6.79 MaxLike -18.33 -14.41 -10.86 -9.36 -5.38 Zcurve -27.87 -25.31 -24.75 -24.36 -20.10 > > # Protect the 80 tests in this table with Bonferroni > # Two-tailed at the 0.001 significance level > # 0.0005/80 = 0.00000625, and qnorm(0.00000625) = -4.36868 > # So any Z with |Z|>4.37 is significant. > > ###################### Mean Absolute Error ###################### > # Take a look at marginals to see roughly who won overall > cat(" Marginal means for method \n") Marginal means for method > print(aggregate(round(100*AbsError,2),by=list(Method),FUN=mean, na.rm=T)) Group.1 x 1 Pcurve 5.681586 2 Puniform 5.293300 3 MaxLike 5.186936 4 Zcurve 6.446785 > > errframe = aggregate(AbsError,by=list(Method,Nstudies, PopMeanPower),FUN=mean, na.rm=T) > meanerr = 100*errframe$x; dim(meanerr) = c(4,5,4) > MeanAbsoluteError = kounts > for(j in 1:4) MeanAbsoluteError[,,j] = meanerr[,,j] > cat("\n Mean Absolute Error of Prediction \n\n") Mean Absolute Error of Prediction > print(round(MeanAbsoluteError,2)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 3.51 2.57 1.59 1.05 0.63 Puniform 2.68 1.97 1.27 0.85 0.52 MaxLike 2.72 1.98 1.26 0.84 0.50 Zcurve 6.49 4.98 3.36 2.42 1.82 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 12.13 9.77 7.01 5.09 3.26 Puniform 11.16 8.91 6.50 4.63 2.95 MaxLike 11.13 8.87 6.43 4.58 2.92 Zcurve 14.00 11.29 8.43 6.09 3.91 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 13.26 10.44 7.58 5.34 3.39 Puniform 12.58 9.89 7.15 4.99 3.19 MaxLike 12.36 9.69 6.99 4.87 3.11 Zcurve 12.57 10.28 7.83 5.99 4.17 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 9.18 7.12 5.03 3.50 2.22 Puniform 9.11 7.01 4.91 3.43 2.16 MaxLike 8.71 6.71 4.71 3.29 2.08 Zcurve 7.49 6.27 4.94 3.83 2.79 > > ###################### Is estimate close enough? ###################### > > closeframe = + aggregate(CloseEnough,by=list(Method,Nstudies, PopMeanPower),FUN=mean, na.rm=T) > klose = closeframe$x; dim(klose) = c(4,5,4) > ProportionWithin.05 = kounts > for(j in 1:4) ProportionWithin.05[,,j] = klose[,,j] > cat("\n Proportion of estimates within 0.05 of target \n\n") Proportion of estimates within 0.05 of target > print(round(ProportionWithin.05,3)) , , PopMeanPower = 0.05 Nstudies Method 15 25 50 100 250 Pcurve 0.752 0.805 0.898 0.962 0.997 Puniform 0.807 0.856 0.929 0.980 0.999 MaxLike 0.802 0.855 0.930 0.980 0.999 Zcurve 0.679 0.734 0.847 0.934 0.994 , , PopMeanPower = 0.25 Nstudies Method 15 25 50 100 250 Pcurve 0.235 0.300 0.422 0.561 0.780 Puniform 0.259 0.336 0.447 0.610 0.822 MaxLike 0.256 0.334 0.453 0.616 0.830 Zcurve 0.215 0.266 0.363 0.489 0.690 , , PopMeanPower = 0.5 Nstudies Method 15 25 50 100 250 Pcurve 0.232 0.291 0.397 0.546 0.760 Puniform 0.240 0.310 0.422 0.576 0.787 MaxLike 0.248 0.314 0.435 0.588 0.798 Zcurve 0.230 0.280 0.374 0.481 0.654 , , PopMeanPower = 0.75 Nstudies Method 15 25 50 100 250 Pcurve 0.346 0.437 0.577 0.750 0.927 Puniform 0.349 0.440 0.592 0.761 0.935 MaxLike 0.363 0.456 0.608 0.780 0.943 Zcurve 0.448 0.502 0.595 0.709 0.851 > ################################################################################## > # Need to do all 6 pairwise matched t-tests for mean absolute error, for each > # combination of Nstudies and PopMeanPower, > # with a Bonferroni correction for 120 tests in the table. > # alpha = 0.001; a = alpha/120; critZ = qnorm(1-a/2); critZ # 4.456436 > # Start with simdata, multivariate format. It's best to re-start R. > ###################################################################################### > > rm(list=ls()) > options(scipen=999) # To avoid scientific notation > alpha = 0.001; a = alpha/120; critZ = qnorm(1-a/2) > # read.table chokes on large data sets > simdata = scan("Study1.5.Data.txt") # .1=1df, .3=3df, .5=5df Read 1600000 items > nrows = 20*10000; ncols = 8 > dim(simdata) = c(ncols,nrows) # Reversed: R reads by columns, rows > simdata = t(simdata) > colnames(simdata) = c("Simulation", "Nstudies", "PopMeanPower", "SampleMeanPower", + "Pcurve", "Puniform", "MaxLike", "Zcurve") > simdata = data.frame(simdata) > attach(simdata) > ae = as.matrix(simdata[,5:8]) # The estimates > for(kol in 1:4) ae[,kol] = abs(ae[,kol]-SampleMeanPower) # Now they are absolute errors. > > # table(Nstudies, SampleMeanPower) # 5 by 4 > Methods = c("Pcurve","Puniform","MaxLike","Zcurve") > nstudies = unique(Nstudies); truepower = unique(PopMeanPower) > nlines = dim(simdata)[1]; index = 1:nlines > > pairwise = matrix(0,4,4); rownames(pairwise) = colnames(pairwise) = Methods > keepscore = pairwise # Count winners in this matrix > > # This does not have to be efficient, and it isn't. > > cat("\n\n Z-tests for pairwise differences in mean absolute error \n", + "Positive value means the row method is less accurate on average. \n", + "Critical value Bonferroni protected at the 0.001 level is",critZ,".\n\n") Z-tests for pairwise differences in mean absolute error Positive value means the row method is less accurate on average. Critical value Bonferroni protected at the 0.001 level is 4.456436 . > > for(i in 1:5) # Looping over Nstudies + { + for(j in 1:4) # Looping over PopMeanPower + { + cat("\n Nstudies =",nstudies[i]," and PopMeanPower =",truepower[j],"\n\n") + In = (Nstudies==nstudies[i]) * (PopMeanPower==truepower[j]) + pick = index[In==1] + # Fill matrix of pairwise tests in a double loop + for(K in 1:4) + { + for(L in 1:4) + { + differ = (ae[,K]-ae[,L])[pick] + Z = 0 + if(K != L) Z = t.test(differ)$statistic + if(K < L) pairwise[K,L] = Z + if(Z < -critZ) keepscore[K,L] = keepscore[K,L]+1 + } # Next L + } # Next K + print(round(pairwise[1:3,2:4],2)) + } # Next j (PopMeanPower) + } # Next i (Nstudies) Nstudies = 15 and PopMeanPower = 0.05 Puniform MaxLike Zcurve Pcurve 23.8 26.30 -77.49 Puniform 0.0 -4.66 -104.17 MaxLike 0.0 0.00 -112.13 Nstudies = 15 and PopMeanPower = 0.25 Puniform MaxLike Zcurve Pcurve 14.44 18.40 -28.94 Puniform 0.00 1.29 -33.55 MaxLike 0.00 0.00 -39.05 Nstudies = 15 and PopMeanPower = 0.5 Puniform MaxLike Zcurve Pcurve 9.04 15.58 10.60 Puniform 0.00 7.36 0.07 MaxLike 0.00 0.00 -2.82 Nstudies = 15 and PopMeanPower = 0.75 Puniform MaxLike Zcurve Pcurve 1.25 11.55 36.54 Puniform 0.00 14.66 22.45 MaxLike 0.00 0.00 20.42 Nstudies = 25 and PopMeanPower = 0.05 Puniform MaxLike Zcurve Pcurve 23.23 25.86 -74.70 Puniform 0.00 -1.89 -104.40 MaxLike 0.00 0.00 -109.07 Nstudies = 25 and PopMeanPower = 0.25 Puniform MaxLike Zcurve Pcurve 15.65 20.15 -26.70 Puniform 0.00 2.33 -33.63 MaxLike 0.00 0.00 -38.60 Nstudies = 25 and PopMeanPower = 0.5 Puniform MaxLike Zcurve Pcurve 8.88 15.98 2.82 Puniform 0.00 8.55 -4.95 MaxLike 0.00 0.00 -8.98 Nstudies = 25 and PopMeanPower = 0.75 Puniform MaxLike Zcurve Pcurve 2.48 13.2 21.51 Puniform 0.00 13.6 12.43 MaxLike 0.00 0.0 8.96 Nstudies = 50 and PopMeanPower = 0.05 Puniform MaxLike Zcurve Pcurve 20.16 23.03 -70.01 Puniform 0.00 4.16 -96.65 MaxLike 0.00 0.00 -98.77 Nstudies = 50 and PopMeanPower = 0.25 Puniform MaxLike Zcurve Pcurve 12.56 17.57 -30.29 Puniform 0.00 5.91 -35.96 MaxLike 0.00 0.00 -41.33 Nstudies = 50 and PopMeanPower = 0.5 Puniform MaxLike Zcurve Pcurve 9.43 17.29 -5.60 Puniform 0.00 9.34 -11.20 MaxLike 0.00 0.00 -16.21 Nstudies = 50 and PopMeanPower = 0.75 Puniform MaxLike Zcurve Pcurve 3.71 15.39 2.84 Puniform 0.00 12.96 -0.53 MaxLike 0.00 0.00 -5.94 Nstudies = 100 and PopMeanPower = 0.05 Puniform MaxLike Zcurve Pcurve 19.72 22.48 -65.55 Puniform 0.00 5.72 -88.48 MaxLike 0.00 0.00 -88.82 Nstudies = 100 and PopMeanPower = 0.25 Puniform MaxLike Zcurve Pcurve 15.44 20.56 -28.50 Puniform 0.00 4.93 -36.59 MaxLike 0.00 0.00 -41.41 Nstudies = 100 and PopMeanPower = 0.5 Puniform MaxLike Zcurve Pcurve 10.82 19.27 -18.28 Puniform 0.00 9.24 -21.71 MaxLike 0.00 0.00 -27.77 Nstudies = 100 and PopMeanPower = 0.75 Puniform MaxLike Zcurve Pcurve 3.1 14.77 -13.45 Puniform 0.0 12.83 -11.81 MaxLike 0.0 0.00 -18.61 Nstudies = 250 and PopMeanPower = 0.05 Puniform MaxLike Zcurve Pcurve 19.21 22.61 -71.74 Puniform 0.00 12.25 -88.10 MaxLike 0.00 0.00 -88.21 Nstudies = 250 and PopMeanPower = 0.25 Puniform MaxLike Zcurve Pcurve 15.92 21.5 -27.67 Puniform 0.00 5.9 -37.29 MaxLike 0.00 0.0 -41.81 Nstudies = 250 and PopMeanPower = 0.5 Puniform MaxLike Zcurve Pcurve 9.66 18.28 -30.14 Puniform 0.00 9.78 -30.52 MaxLike 0.00 0.00 -37.01 Nstudies = 250 and PopMeanPower = 0.75 Puniform MaxLike Zcurve Pcurve 4.21 15.97 -30.36 Puniform 0.00 12.14 -26.02 MaxLike 0.00 0.00 -33.29 > > cat("\nTotal number of significant comparisons: \n") Total number of significant comparisons: > print(sum(keepscore)) [1] 106 > > cat("\n\n Number of times row method is significantly more accurate than column method \n\n"); print(addmargins(keepscore,2)) Number of times row method is significantly more accurate than column method Pcurve Puniform MaxLike Zcurve Sum Pcurve 0 0 0 15 15 Puniform 15 0 1 16 32 MaxLike 20 15 0 17 52 Zcurve 3 2 2 0 7 > ############################################################################### > > cat("\n\n Now just the case where H0 is false \n", + "Positive value means the row method is less accurate on average. \n", + "Critical value Bonferroni protected at the 0.001 level is still ",critZ,".\n\n") Now just the case where H0 is false Positive value means the row method is less accurate on average. Critical value Bonferroni protected at the 0.001 level is still 4.456436 . > keepscore2 = matrix(0,4,4) # Count winners in this matrix > rownames(keepscore2) = colnames(keepscore2) = Methods > > > for(i in 1:5) # Looping over Nstudies + { + for(j in 2:4) # Looping over PopMeanPower + { + # cat("\n Nstudies =",nstudies[i]," and PopMeanPower =",truepower[j],"\n\n") + In = (Nstudies==nstudies[i]) * (PopMeanPower==truepower[j]) + pick = index[In==1] + # Fill matrix of pairwise tests in a double loop + for(K in 1:4) + { + for(L in 1:4) + { + differ = (ae[,K]-ae[,L])[pick] + Z = 0 + if(K != L) Z = t.test(differ)$statistic + if(K < L) pairwise[K,L] = Z + if(Z < -critZ) keepscore2[K,L] = keepscore2[K,L]+1 + } # Next L + } # Next K + # print(round(pairwise[1:3,2:4],2)) + } # Next j (PopMeanPower) + } # Next i (Nstudies) > > cat("\nTotal number of significant comparisons: \n") Total number of significant comparisons: > print(sum(keepscore2)) [1] 78 > > cat("\n\n Number of times row method is significantly more accurate than column method out of 15\n\n"); print(addmargins(keepscore2,2)) Number of times row method is significantly more accurate than column method out of 15 Pcurve Puniform MaxLike Zcurve Sum Pcurve 0 0 0 10 10 Puniform 10 0 0 11 21 MaxLike 15 13 0 12 40 Zcurve 3 2 2 0 7