Calculate within and between variances and confidence intervals in R - r

I need to calculate the within and between run variances from some data as part of developing a new analytical chemistry method. I also need confidence intervals from this data using the R language
I assume I need to use anova or something ?
My data is like
> variance
Run Rep Value
1 1 1 9.85
2 1 2 9.95
3 1 3 10.00
4 2 1 9.90
5 2 2 8.80
6 2 3 9.50
7 3 1 11.20
8 3 2 11.10
9 3 3 9.80
10 4 1 9.70
11 4 2 10.10
12 4 3 10.00

You have four groups of three observations:
> run1 = c(9.85, 9.95, 10.00)
> run2 = c(9.90, 8.80, 9.50)
> run3 = c(11.20, 11.10, 9.80)
> run4 = c(9.70, 10.10, 10.00)
> runs = c(run1, run2, run3, run4)
> runs
[1] 9.85 9.95 10.00 9.90 8.80 9.50 11.20 11.10 9.80 9.70 10.10 10.00
Make some labels:
> n = rep(3, 4)
> group = rep(1:4, n)
> group
[1] 1 1 1 2 2 2 3 3 3 4 4 4
Calculate within-run stats:
> withinRunStats = function(x) c(sum = sum(x), mean = mean(x), var = var(x), n = length(x))
> tapply(runs, group, withinRunStats)
$`1`
sum mean var n
29.800000000 9.933333333 0.005833333 3.000000000
$`2`
sum mean var n
28.20 9.40 0.31 3.00
$`3`
sum mean var n
32.10 10.70 0.61 3.00
$`4`
sum mean var n
29.80000000 9.93333333 0.04333333 3.00000000
You can do some ANOVA here:
> data = data.frame(y = runs, group = factor(group))
> data
y group
1 9.85 1
2 9.95 1
3 10.00 1
4 9.90 2
5 8.80 2
6 9.50 2
7 11.20 3
8 11.10 3
9 9.80 3
10 9.70 4
11 10.10 4
12 10.00 4
> fit = lm(runs ~ group, data)
> fit
Call:
lm(formula = runs ~ group, data = data)
Coefficients:
(Intercept) group2 group3 group4
9.933e+00 -5.333e-01 7.667e-01 -2.448e-15
> anova(fit)
Analysis of Variance Table
Response: runs
Df Sum Sq Mean Sq F value Pr(>F)
group 3 2.57583 0.85861 3.5437 0.06769 .
Residuals 8 1.93833 0.24229
---
Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
> degreesOfFreedom = anova(fit)[, "Df"]
> names(degreesOfFreedom) = c("treatment", "error")
> degreesOfFreedom
treatment error
3 8
Error or within-group variance:
> anova(fit)["Residuals", "Mean Sq"]
[1] 0.2422917
Treatment or between-group variance:
> anova(fit)["group", "Mean Sq"]
[1] 0.8586111
This should give you enough confidence to do confidence intervals.

If you want to apply a function (such as var) across a factor such as Run or Rep, you can use tapply:
> with(variance, tapply(Value, Run, var))
1 2 3 4
0.005833333 0.310000000 0.610000000 0.043333333
> with(variance, tapply(Value, Rep, var))
1 2 3
0.48562500 0.88729167 0.05583333

I'm going to take a crack at this when I have more time, but meanwhile, here's the dput() for Kiar's data structure:
structure(list(Run = c(1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4), Rep = c(1,
2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3), Value = c(9.85, 9.95, 10, 9.9,
8.8, 9.5, 11.2, 11.1, 9.8, 9.7, 10.1, 10)), .Names = c("Run",
"Rep", "Value"), row.names = c(NA, -12L), class = "data.frame")
... in case you'd like to take a quick shot at it.

I've been looking at a similar problem. I've found reference to caluclating confidence intervals by Burdick and Graybill (Burdick, R. and Graybill, F. 1992, Confidence Intervals on variance components, CRC Press)
Using some code I've been trying I get these values
> kiaraov = aov(Value~Run+Error(Run),data=kiar)
> summary(kiaraov)
Error: Run
Df Sum Sq Mean Sq
Run 3 2.57583 0.85861
Error: Within
Df Sum Sq Mean Sq F value Pr(>F)
Residuals 8 1.93833 0.24229
> confint = 95
> a = (1-(confint/100))/2
> grandmean = as.vector(kiaraov$"(Intercept)"[[1]][1]) # Grand Mean (I think)
> within = summary(kiaraov)$"Error: Within"[[1]]$"Mean Sq" # S2^2Mean Square Value for Within Run
> dfRun = summary(kiaraov)$"Error: Run"[[1]]$"Df"
> dfWithin = summary(kiaraov)$"Error: Within"[[1]]$"Df"
> Run = summary(kiaraov)$"Error: Run"[[1]]$"Mean Sq" # S1^2Mean Square for between Run
> between = (Run-within)/((dfWithin/(dfRun+1))+1) # (S1^2-S2^2)/J
> total = between+within
> between # Between Run Variance
[1] 0.2054398
> within # Within Run Variance
[1] 0.2422917
> total # Total Variance
[1] 0.4477315
> betweenCV = sqrt(between)/grandmean * 100 # Between Run CV%
> withinCV = sqrt(within)/grandmean * 100 # Within Run CV%
> totalCV = sqrt(total)/grandmean * 100 # Total CV%
> #within confidence intervals
> withinLCB = within/qf(1-a,8,Inf) # Within LCB
> withinUCB = within/qf(a,8,Inf) # Within UCB
> #Between Confidence Intervals
> n1 = dfRun
> n2 = dfWithin
> G1 = 1-(1/qf(1-a,n1,Inf)) # According to Burdick and Graybill this should be a
> G2 = 1-(1/qf(1-a,n2,Inf))
> H1 = (1/qf(a,n1,Inf))-1 # and this should be 1-a, but my results don't agree
> H2 = (1/qf(a,n2,Inf))-1
> G12 = ((qf(1-a,n1,n2)-1)^2-(G1^2*qf(1-a,n1,n2)^2)-(H2^2))/qf(1-a,n1,n2) # again, should be a, not 1-a
> H12 = ((1-qf(a,n1,n2))^2-H1^2*qf(a,n1,n2)^2-G2^2)/qf(a,n1,n2) # again, should be 1-a, not a
> Vu = H1^2*Run^2+G2^2*within^2+H12*Run*within
> Vl = G1^2*Run^2+H2^2*within^2+G12*within*Run
> betweenLCB = (Run-within-sqrt(Vl))/J # Betwen LCB
> betweenUCB = (Run-within+sqrt(Vu))/J # Between UCB
> #Total Confidence Intervals
> y = (Run+(J-1)*within)/J
> totalLCB = y-(sqrt(G1^2*Run^2+G2^2*(J-1)^2*within^2)/J) # Total LCB
> totalUCB = y+(sqrt(H1^2*Run^2+H2^2*(J-1)^2*within^2)/J) # Total UCB
> result = data.frame(Name=c("within", "between", "total"),CV=c(withinCV,betweenCV,totalCV),LCB=c(sqrt(withinLCB)/grandmean*100,sqrt(betweenLCB)/grandmean*100,sqrt(totalLCB)/grandmean*100),UCB=c(sqrt(withinUCB)/grandmean*100,sqrt(betweenUCB)/grandmean*100,sqrt(totalUCB)/grandmean*100))
> result
Name CV LCB UCB
1 within 4.926418 3.327584 9.43789
2 between 4.536327 NaN 19.73568
3 total 6.696855 4.846030 20.42647
Here the lower confidence interval for between run CV is less than zero, so reported as NaN.
I'd love to have a better way to do this. If I get time I might try to create a function to do this.
Paul.
--
Edit: I did eventually write a function, here it is (caveat emptor)
#' avar Function
#'
#' Calculate thewithin, between and total %CV of a dataset by ANOVA, and the
#' associated confidence intervals
#'
#' #param dataf - The data frame to use, in long format
#' #param afactor Character string representing the column in dataf that contains the factor
#' #param aresponse Charactyer string representing the column in dataf that contains the response value
#' #param aconfidence What Confidence limits to use, default = 95%
#' #param digits Significant Digits to report to, default = 3
#' #param debug Boolean, Should debug messages be displayed, default=FALSE
#' #returnType dataframe containing the Mean, Within, Between and Total %CV and LCB and UCB for each
#' #return
#' #author Paul Hurley
#' #export
#' #examples
#' #Using the BGBottles data from Burdick and Graybill Page 62
#' assayvar(dataf=BGBottles, afactor="Machine", aresponse="weight")
avar<-function(dataf, afactor, aresponse, aconfidence=95, digits=3, debug=FALSE){
dataf<-subset(dataf,!is.na(with(dataf,get(aresponse))))
nmissing<-function(x) sum(!is.na(x))
n<-nrow(subset(dataf,is.numeric(with(dataf,get(aresponse)))))
datadesc<-ddply(dataf, afactor, colwise(nmissing,aresponse))
I<-nrow(datadesc)
if(debug){print(datadesc)}
if(min(datadesc[,2])==max(datadesc[,2])){
balance<-TRUE
J<-min(datadesc[,2])
if(debug){message(paste("Dataset is balanced, J=",J,"I is ",I,sep=""))}
} else {
balance<-FALSE
Jh<-I/(sum(1/datadesc[,2], na.rm = TRUE))
J<-Jh
m<-min(datadesc[,2])
M<-max(datadesc[,2])
if(debug){message(paste("Dataset is unbalanced, like me, I is ",I,sep=""))}
if(debug){message(paste("Jh is ",Jh, ", m is ",m, ", M is ",M, sep=""))}
}
if(debug){message(paste("Call afactor=",afactor,", aresponse=",aresponse,sep=""))}
formulatext<-paste(as.character(aresponse)," ~ 1 + Error(",as.character(afactor),")",sep="")
if(debug){message(paste("formula text is ",formulatext,sep=""))}
aovformula<-formula(formulatext)
if(debug){message(paste("Formula is ",as.character(aovformula),sep=""))}
assayaov<-aov(formula=aovformula,data=dataf)
if(debug){
print(assayaov)
print(summary(assayaov))
}
a<-1-((1-(aconfidence/100))/2)
if(debug){message(paste("confidence is ",aconfidence,", alpha is ",a,sep=""))}
grandmean<-as.vector(assayaov$"(Intercept)"[[1]][1]) # Grand Mean (I think)
if(debug){message(paste("n is",n,sep=""))}
#This line commented out, seems to choke with an aov object built from an external formula
#grandmean<-as.vector(model.tables(assayaov,type="means")[[1]]$`Grand mean`) # Grand Mean (I think)
within<-summary(assayaov)[[2]][[1]]$"Mean Sq" # d2e, S2^2 Mean Square Value for Within Machine = 0.1819
dfRun<-summary(assayaov)[[1]][[1]]$"Df" # DF for within = 3
dfWithin<-summary(assayaov)[[2]][[1]]$"Df" # DF for within = 8
Run<-summary(assayaov)[[1]][[1]]$"Mean Sq" # S1^2Mean Square for Machine
if(debug){message(paste("mean square for Run ?",Run,sep=""))}
#Was between<-(Run-within)/((dfWithin/(dfRun+1))+1) but my comment suggests this should be just J, so I'll use J !
between<-(Run-within)/J # d2a (S1^2-S2^2)/J
if(debug){message(paste("S1^2 mean square machine is ",Run,", S2^2 mean square within is ",within))}
total<-between+within
between # Between Run Variance
within # Within Run Variance
total # Total Variance
if(debug){message(paste("between is ",between,", within is ",within,", Total is ",total,sep=""))}
betweenCV<-sqrt(between)/grandmean * 100 # Between Run CV%
withinCV<-sqrt(within)/grandmean * 100 # Within Run CV%
totalCV<-sqrt(total)/grandmean * 100 # Total CV%
n1<-dfRun
n2<-dfWithin
if(debug){message(paste("n1 is ",n1,", n2 is ",n2,sep=""))}
#within confidence intervals
if(balance){
withinLCB<-within/qf(a,n2,Inf) # Within LCB
withinUCB<-within/qf(1-a,n2,Inf) # Within UCB
} else {
withinLCB<-within/qf(a,n2,Inf) # Within LCB
withinUCB<-within/qf(1-a,n2,Inf) # Within UCB
}
#Mean Confidence Intervals
if(debug){message(paste(grandmean,"+/-(sqrt(",Run,"/",n,")*qt(",a,",df=",I-1,"))",sep=""))}
meanLCB<-grandmean+(sqrt(Run/n)*qt(1-a,df=I-1)) # wrong
meanUCB<-grandmean-(sqrt(Run/n)*qt(1-a,df=I-1)) # wrong
if(debug){message(paste("Grandmean is ",grandmean,", meanLCB = ",meanLCB,", meanUCB = ",meanUCB,aresponse,sep=""))}
if(debug){print(summary(assayaov))}
#Between Confidence Intervals
G1<-1-(1/qf(a,n1,Inf))
G2<-1-(1/qf(a,n2,Inf))
H1<-(1/qf(1-a,n1,Inf))-1
H2<-(1/qf(1-a,n2,Inf))-1
G12<-((qf(a,n1,n2)-1)^2-(G1^2*qf(a,n1,n2)^2)-(H2^2))/qf(a,n1,n2)
H12<-((1-qf(1-a,n1,n2))^2-H1^2*qf(1-a,n1,n2)^2-G2^2)/qf(1-a,n1,n2)
if(debug){message(paste("G1 is ",G1,", G2 is ",G2,sep=""))
message(paste("H1 is ",H1,", H2 is ",H2,sep=""))
message(paste("G12 is ",G12,", H12 is ",H12,sep=""))
}
if(balance){
Vu<-H1^2*Run^2+G2^2*within^2+H12*Run*within
Vl<-G1^2*Run^2+H2^2*within^2+G12*within*Run
betweenLCB<-(Run-within-sqrt(Vl))/J # Betwen LCB
betweenUCB<-(Run-within+sqrt(Vu))/J # Between UCB
} else {
#Burdick and Graybill seem to suggest calculating anova of mean values to find n1S12u/Jh
meandataf<-ddply(.data=dataf,.variable=afactor, .fun=function(df){mean(with(df, get(aresponse)), na.rm=TRUE)})
meandataaov<-aov(formula(paste("V1~",afactor,sep="")), data=meandataf)
sumsquare<-summary(meandataaov)[[1]]$`Sum Sq`
#so maybe S12u is just that bit ?
Runu<-(sumsquare*Jh)/n1
if(debug){message(paste("n1S12u/Jh is ",sumsquare,", so S12u is ",Runu,sep=""))}
Vu<-H1^2*Runu^2+G2^2*within^2+H12*Runu*within
Vl<-G1^2*Runu^2+H2^2*within^2+G12*within*Runu
betweenLCB<-(Runu-within-sqrt(Vl))/Jh # Betwen LCB
betweenUCB<-(Runu-within+sqrt(Vu))/Jh # Between UCB
if(debug){message(paste("betweenLCB is ",betweenLCB,", between UCB is ",betweenUCB,sep=""))}
}
#Total Confidence Intervals
if(balance){
y<-(Run+(J-1)*within)/J
if(debug){message(paste("y is ",y,sep=""))}
totalLCB<-y-(sqrt(G1^2*Run^2+G2^2*(J-1)^2*within^2)/J) # Total LCB
totalUCB<-y+(sqrt(H1^2*Run^2+H2^2*(J-1)^2*within^2)/J) # Total UCB
} else {
y<-(Runu+(Jh-1)*within)/Jh
if(debug){message(paste("y is ",y,sep=""))}
totalLCB<-y-(sqrt(G1^2*Runu^2+G2^2*(Jh-1)^2*within^2)/Jh) # Total LCB
totalUCB<-y+(sqrt(H1^2*Runu^2+H2^2*(Jh-1)^2*within^2)/Jh) # Total UCB
}
if(debug){message(paste("totalLCB is ",totalLCB,", total UCB is ",totalUCB,sep=""))}
# result<-data.frame(Name=c("within", "between", "total"),CV=c(withinCV,betweenCV,totalCV),
# LCB=c(sqrt(withinLCB)/grandmean*100,sqrt(betweenLCB)/grandmean*100,sqrt(totalLCB)/grandmean*100),
# UCB=c(sqrt(withinUCB)/grandmean*100,sqrt(betweenUCB)/grandmean*100,sqrt(totalUCB)/grandmean*100))
result<-data.frame(Mean=grandmean,MeanLCB=meanLCB, MeanUCB=meanUCB, Within=withinCV,WithinLCB=sqrt(withinLCB)/grandmean*100, WithinUCB=sqrt(withinUCB)/grandmean*100,
Between=betweenCV, BetweenLCB=sqrt(betweenLCB)/grandmean*100, BetweenUCB=sqrt(betweenUCB)/grandmean*100,
Total=totalCV, TotalLCB=sqrt(totalLCB)/grandmean*100, TotalUCB=sqrt(totalUCB)/grandmean*100)
if(!digits=="NA"){
result$Mean<-signif(result$Mean,digits=digits)
result$MeanLCB<-signif(result$MeanLCB,digits=digits)
result$MeanUCB<-signif(result$MeanUCB,digits=digits)
result$Within<-signif(result$Within,digits=digits)
result$WithinLCB<-signif(result$WithinLCB,digits=digits)
result$WithinUCB<-signif(result$WithinUCB,digits=digits)
result$Between<-signif(result$Between,digits=digits)
result$BetweenLCB<-signif(result$BetweenLCB,digits=digits)
result$BetweenUCB<-signif(result$BetweenUCB,digits=digits)
result$Total<-signif(result$Total,digits=digits)
result$TotalLCB<-signif(result$TotalLCB,digits=digits)
result$TotalUCB<-signif(result$TotalUCB,digits=digits)
}
return(result)
}
assayvar<-function(adata, aresponse, afactor, anominal, aconfidence=95, digits=3, debug=FALSE){
result<-ddply(adata,anominal,function(df){
resul<-avar(dataf=df,afactor=afactor,aresponse=aresponse,aconfidence=aconfidence, digits=digits, debug=debug)
resul$n<-nrow(subset(df, !is.na(with(df, get(aresponse)))))
return(resul)
})
return(result)
}

Related

How to optimzie my function by dropping loops

I have the following function that uses nested loops and honestly I'm not sure how to proceed with making the code run more efficient. It runs fine for 100 sims in my opinion but when I ran for 2000 sims it took almost 12 seconds.
This code will generate any n Brownian Motion simulations and works well, the issue is once the simulation size is increased to say 500+ then it starts to bog down, and when it hits 2k then it's pretty slow ie 12.
Here is the function:
ts_brownian_motion <- function(.time = 100, .num_sims = 10, .delta_time = 1,
.initial_value = 0) {
# TidyEval ----
T <- as.numeric(.time)
N <- as.numeric(.num_sims)
delta_t <- as.numeric(.delta_time)
initial_value <- as.numeric(.initial_value)
# Checks ----
if (!is.numeric(T) | !is.numeric(N) | !is.numeric(delta_t) | !is.numeric(initial_value)){
rlang::abort(
message = "All parameters must be numeric values.",
use_cli_format = TRUE
)
}
# Initialize empty data.frame to store the simulations
sim_data <- data.frame()
# Generate N simulations
for (i in 1:N) {
# Initialize the current simulation with a starting value of 0
sim <- c(initial_value)
# Generate the brownian motion values for each time step
for (t in 1:(T / delta_t)) {
sim <- c(sim, sim[t] + rnorm(1, mean = 0, sd = sqrt(delta_t)))
}
# Bind the time steps, simulation values, and simulation number together in a data.frame and add it to the result
sim_data <- rbind(
sim_data,
data.frame(
t = seq(0, T, delta_t),
y = sim,
sim_number = i
)
)
}
# Clean up
sim_data <- sim_data %>%
dplyr::as_tibble() %>%
dplyr::mutate(sim_number = forcats::as_factor(sim_number)) %>%
dplyr::select(sim_number, t, y)
# Return ----
attr(sim_data, ".time") <- .time
attr(sim_data, ".num_sims") <- .num_sims
attr(sim_data, ".delta_time") <- .delta_time
attr(sim_data, ".initial_value") <- .initial_value
return(sim_data)
}
Here is some output of the function:
> ts_brownian_motion(.time = 10, .num_sims = 25)
# A tibble: 275 × 3
sim_number t y
<fct> <dbl> <dbl>
1 1 0 0
2 1 1 -2.13
3 1 2 -1.08
4 1 3 0.0728
5 1 4 0.562
6 1 5 0.255
7 1 6 -1.28
8 1 7 -1.76
9 1 8 -0.770
10 1 9 -0.536
# … with 265 more rows
# ℹ Use `print(n = ...)` to see more rows
As suggested in the comments, if you want speed, you should use cumsum. You need to be clear what type of Brownian Motion you want (arithmetic, geometric). For geometric Brownian motion, you'll need to correct the approximation error by adjusting the mean. As an example, the NMOF package (which I maintain), contains a function gbm that implements geometric Brownian Motion through cumsum. Here is an example call for 2000 paths with 100 timesteps each.
library("NMOF")
library("zoo") ## for plotting
timesteps <- 100
system.time(b <- NMOF::gbm(2000, tau = 1, timesteps = 100, r = 0, v = 1))
## user system elapsed
## 0.013 0.000 0.013
dim(b) ## each column is one path, starting at time zero
## [1] 101 2000
plot(zoo(b[, 1:5], 0:timesteps), plot.type = "single")

How to plot correlation matrix in R with already derived correlation values between variables?

I have an output table which looks like:
Vars Corr SE
1_2 0.51 0.003
1_3 0.32 0.001
...
49_50 0.23 0.006
where correlation values were derived in another software for variables stated in Vars (1_2 refers to between Variable 1 and 2). What is the best way to convert this into a format which could display the correlation matrix between all 50 variables?
I'm assuming there needs to be a way to make the diagonals 1 as well?
Thanks!
So suppose you have the data in a single column, you can restructure and use corrplot
cordata = data.frame(
Vars = paste0(rep(1:50, times = 50), "_",
rep(1:50, each = 50)),
Corr = rnorm(n = 50*50, mean = 0, sd = .3)
) %>%
#for the sake of demonstration return Corrs beyond -1 and 1 to 0.
mutate(Corr = replace(Corr, Corr > 1 | Corr < -1, 0))
head(cordata)
Vars Corr
1 1_1 0.453807195
2 2_1 0.237179163
3 3_1 0.303635874
4 4_1 -0.314318833
5 5_1 0.008682918
6 6_1 -0.067164730
cormat = matrix(cordata$Corr, byrow = TRUE, ncol = 50)
# You can use corrplot::corrplot
corrplot(cormat)

What is wrong with my implementation of AdaBoost?

I tried to implement the AdaBoost algorithm of Freund and Schapire as close to the original as possible (see p. 2 here: http://rob.schapire.net/papers/explaining-adaboost.pdf):
library(rpart)
library(OneR)
maxdepth <- 1
T <- 100 # number of rounds
# Given: (x_1, y_1),...,(x_m, y_m) where x_i element of X, y_i element of {-1, +1}
myocarde <- read.table("http://freakonometrics.free.fr/myocarde.csv", head = TRUE, sep = ";")
#myocarde <- read.table("data/myocarde.csv", header = TRUE, sep = ";")
y <- (myocarde[ , "PRONO"] == "SURVIE") * 2 - 1
x <- myocarde[ , 1:7]
m <- nrow(x)
data <- data.frame(x, y)
# Initialize: D_1(i) = 1/m for i = 1,...,m
D <- rep(1/m, m)
H <- replicate(T, list())
a <- vector(mode = "numeric", T)
set.seed(123)
# For t = 1,...,T
for(t in 1:T) {
# Train weak learner using distribution D_t
# Get weak hypothesis h_t: X -> {-1, +1}
data_D_t <- data[sample(m, 10*m, replace = TRUE, prob = D), ]
H[[t]] <- rpart(y ~., data = data_D_t, maxdepth = maxdepth, method = "class")
# Aim: select h_t with low weighted error: e_t = Pr_i~D_t[h_t(x_i) != y_i]
h <- predict(H[[t]], x, type = "class")
e <- sum(h != y) / m
# Choose a_t = 0.5 * log((1-e) / e)
a[t] <- 0.5 * log((1-e) / e)
# Update for i = 1,...,m: D_t+1(i) = (D_t(i) * exp(-a_t * y_i * h_t(x_i))) / Z_t
# where Z_t is a normalization factor (chosen so that Dt+1 will be a distribution)
D <- D * exp(-a[t] * y * as.numeric(h))
D <- D / sum(D)
}
# Output the final hypothesis: H(x) = sign(sum of a_t * h_t(x) for t=1 to T)
newdata <- x
H_x <- sapply(H, function(x) as.numeric(as.character(predict(x, newdata = newdata, type = "class"))))
H_x <- t(a * t(H_x))
pred <- sign(rowSums(H_x))
#H
#a
eval_model(pred, y)
##
## Confusion matrix (absolute):
## Actual
## Prediction -1 1 Sum
## -1 0 1 1
## 1 29 41 70
## Sum 29 42 71
##
## Confusion matrix (relative):
## Actual
## Prediction -1 1 Sum
## -1 0.00 0.01 0.01
## 1 0.41 0.58 0.99
## Sum 0.41 0.59 1.00
##
## Accuracy:
## 0.5775 (41/71)
##
## Error rate:
## 0.4225 (30/71)
##
## Error rate reduction (vs. base rate):
## -0.0345 (p-value = 0.6436)
As can be seen the accuracy of the model is horrible compared to other AdaBoost implementations, e.g.:
library(JOUSBoost)
## JOUSBoost 2.1.0
boost <- adaboost(as.matrix(x), y, tree_depth = maxdepth, n_rounds = T)
pred <- predict(boost, x)
eval_model(pred, y)
##
## Confusion matrix (absolute):
## Actual
## Prediction -1 1 Sum
## -1 29 0 29
## 1 0 42 42
## Sum 29 42 71
##
## Confusion matrix (relative):
## Actual
## Prediction -1 1 Sum
## -1 0.41 0.00 0.41
## 1 0.00 0.59 0.59
## Sum 0.41 0.59 1.00
##
## Accuracy:
## 1 (71/71)
##
## Error rate:
## 0 (0/71)
##
## Error rate reduction (vs. base rate):
## 1 (p-value < 2.2e-16)
My question
Could you please give me a hint what went wrong in my implementation? Thank you
Edit
The final and corrected code can be found in my blog post: Understanding AdaBoost – or how to turn Weakness into Strength
There are quite a few contributing factors as to why your implementation is not working.
You were not using rpart correctly. Adaboost implementation does not mention upsampling with the weights - but rpart itself can accept weights. My example below shows how rpart should be used for this purpose.
Calculation of the weighted error was wrong. You were calculating the error proportion (number of samples calculated incorrectly divided by number of samples). Adaboost uses the sum of the weights that were incorrectly predicted (sum(D[y != yhat])).
Final predictions seemed to be incorrect too, I just ended up doing a simple loop.
Next time I recommend diving into the source code the the other implementations you are comparing against.
https://github.com/cran/JOUSBoost/blob/master/R/adaboost.R uses almost identical code to my below example - and probably would have helped guide you originally.
Additionally using T as a variable could potentially interfere with the logical TRUE and it's shorthand T, so I'd avoid it.
### packages ###
library(rpart)
library(OneR)
### parameters ###
maxdepth <- 1
rounds <- 100
set.seed(123)
### data ###
myocarde <- read.table("http://freakonometrics.free.fr/myocarde.csv", head = TRUE, sep = ";")
y <- (myocarde[ , "PRONO"] == "SURVIE") * 2 - 1
x <- myocarde[ , 1:7]
m <- nrow(x)
dataset <- data.frame(x, y)
### initialisation ###
D <- rep(1/m, m)
H <- list()
a <- vector(mode = "numeric", length = rounds)
for (i in seq.int(rounds)) {
# train weak learner
H[[i]] = rpart(y ~ ., data = dataset, weights = D, maxdepth = maxdepth, method = "class")
# predictions
yhat <- predict(H[[i]], x, type = "class")
yhat <- as.numeric(as.character(yhat))
# weighted error
e <- sum(D[yhat != y])
# alpha coefficient
a[i] <- 0.5 * log((1 - e) / e)
# updating weights (D)
D <- D * exp(-a[i] * y * yhat)
D <- D / sum(D)
}
# predict with each weak learner on dataset
y_hat_final <- vector(mode = "numeric", length = m)
for (i in seq(rounds)) {
pred = predict(H[[i]], dataset, type = "class")
pred = as.numeric(as.character(pred))
y_hat_final = y_hat_final + (a[i] * pred)
}
pred <- sign(y_hat_final)
eval_model(pred, y)
> eval_model(pred, y)
Confusion matrix (absolute):
Actual
Prediction -1 1 Sum
-1 29 0 29
1 0 42 42
Sum 29 42 71
Confusion matrix (relative):
Actual
Prediction -1 1 Sum
-1 0.41 0.00 0.41
1 0.00 0.59 0.59
Sum 0.41 0.59 1.00
Accuracy:
1 (71/71)
Error rate:
0 (0/71)
Error rate reduction (vs. base rate):
1 (p-value < 2.2e-16)

Expected return and covariance from return time series

I’m trying to simulate the Matlab ewstats function here defined:
https://it.mathworks.com/help/finance/ewstats.html
The results given by Matlab are the following ones:
> ExpReturn = 1×2
0.1995 0.1002
> ExpCovariance = 2×2
0.0032 -0.0017
-0.0017 0.0010
I’m trying to replicate the example with the RiskPortfolios R package:
https://cran.r-project.org/web/packages/RiskPortfolios/RiskPortfolios.pdf
The R code I’m using is this one:
library(RiskPortfolios)
rets <- as.matrix(cbind(c(0.24, 0.15, 0.27, 0.14), c(0.08, 0.13, 0.06, 0.13)))
w <- 0.98
rets
w
meanEstimation(rets, control = list(type = 'ewma', lambda = w))
covEstimation(rets, control = list(type = 'ewma', lambda = w))
The mean estimation is the same of the one in the example, but the covariance matrix is different:
> rets
[,1] [,2]
[1,] 0.24 0.08
[2,] 0.15 0.13
[3,] 0.27 0.06
[4,] 0.14 0.13
> w
[1] 0.98
>
> meanEstimation(rets, control = list(type = 'ewma', lambda = w))
[1] 0.1995434 0.1002031
>
> covEstimation(rets, control = list(type = 'ewma', lambda = w))
[,1] [,2]
[1,] 0.007045044 -0.003857217
[2,] -0.003857217 0.002123827
Am I missing something?
Thanks
They give the same answer if type = "lw" is used:
round(covEstimation(rets, control = list(type = 'lw')), 4)
## 0.0032 -0.0017
## -0.0017 0.0010
They are using different algorithms. From the RiskPortfolio manual:
ewma ... See RiskMetrics (1996)
From the Matlab hlp page:
There is no relationship between ewstats function and the RiskMetrics® approach for determining the expected return and covariance from a return time series.
Unfortunately Matlab does not tell us which algorithm is used.
For those who eventually need an equivalent ewstats function in R, here the code I wrote:
ewstats <- function(RetSeries, DecayFactor=NULL, WindowLength=NULL){
#EWSTATS Expected return and covariance from return time series.
# Optional exponential weighting emphasizes more recent data.
#
# [ExpReturn, ExpCovariance, NumEffObs] = ewstats(RetSeries, ...
# DecayFactor, WindowLength)
#
# Inputs:
# RetSeries : NUMOBS by NASSETS matrix of equally spaced incremental
# return observations. The first row is the oldest observation, and the
# last row is the most recent.
#
# DecayFactor : Controls how much less each observation is weighted than its
# successor. The k'th observation back in time has weight DecayFactor^k.
# DecayFactor must lie in the range: 0 < DecayFactor <= 1.
# The default is DecayFactor = 1, which is the equally weighted linear
# moving average Model (BIS).
#
# WindowLength: The number of recent observations used in
# the computation. The default is all NUMOBS observations.
#
# Outputs:
# ExpReturn : 1 by NASSETS estimated expected returns.
#
# ExpCovariance : NASSETS by NASSETS estimated covariance matrix.
#
# NumEffObs: The number of effective observations is given by the formula:
# NumEffObs = (1-DecayFactor^WindowLength)/(1-DecayFactor). Smaller
# DecayFactors or WindowLengths emphasize recent data more strongly, but
# use less of the available data set.
#
# The standard deviations of the asset return processes are given by:
# STDVec = sqrt(diag(ECov)). The correlation matrix is :
# CorrMat = VarMat./( STDVec*STDVec' )
#
# See also MEAN, COV, COV2CORR.
NumObs <- dim(RetSeries)[1]
NumSeries <- dim(RetSeries)[2]
# size the series and the window
if (is.null(WindowLength)) {
WindowLength <- NumObs
}
if (is.null(DecayFactor)) {
DecayFactor = 1
}
if (DecayFactor <= 0 | DecayFactor > 1) {
stop('Must have 0< decay factor <= 1.')
}
if (WindowLength > NumObs){
stop(sprintf('Window Length #d must be <= number of observations #d',
WindowLength, NumObs))
}
# ------------------------------------------------------------------------
# size the data to the window
RetSeries <- RetSeries[NumObs-WindowLength+1:NumObs, ]
# Calculate decay coefficients
DecayPowers <- seq(WindowLength-1, 0, by = -1)
VarWts <- sqrt(DecayFactor)^DecayPowers
RetWts <- (DecayFactor)^DecayPowers
NEff = sum(RetWts) # number of equivalent values in computation
# Compute the exponentially weighted mean return
WtSeries <- matrix(rep(RetWts, times = NumSeries),
nrow = length(RetWts), ncol = NumSeries) * RetSeries
ERet <- colSums(WtSeries)/NEff;
# Subtract the weighted mean from the original Series
CenteredSeries <- RetSeries - matrix(rep(ERet, each = WindowLength),
nrow = WindowLength, ncol = length(ERet))
# Compute the weighted variance
WtSeries <- matrix(rep(VarWts, times = NumSeries),
nrow = length(VarWts), ncol = NumSeries) * CenteredSeries
ECov <- t(WtSeries) %*% WtSeries / NEff
list(ExpReturn = ERet, ExpCovariance = ECov, NumEffObs = NEff)
}

Carrying out a PBIB.test

I have data set from a incomplete lattice design study that I have imported into R from excel and would like to conduct a PBIB.test. However, after running the function as shown below, the output shows object Area not found, even after repeated times.
library("agricolae", lib.loc = "~/R/win-library/3.3")
Rdata2 <- PBIB.test("BlockNo", "AccNo", "Rep", Area, k = 9, c("REML"), console = TRUE)
Error in data.frame(v1 = 1, y) : object 'Area' not found
What is the problem?
See below for a sample application of PBIB.test, based on the agricolae tutorial.
First, create some sample data.
# Construct the alpha design with 30 treatments, 2 repetitions, and block size = 3
Genotype <- c(paste("gen0", 1:9, sep= ""), paste("gen", 10:30, sep= ""));
r <- 2;
k <- 3;
s <- 10;
b <- s * r;
book <- design.alpha(Genotype, k, r,seed = 5);
# Source dataframe
df <- book$book;
Create a vector of response values.
# Response variable
response <- c(
5,2,7,6,4,9,7,6,7,9,6,2,1,1,3,2,4,6,7,9,8,7,6,4,3,2,2,1,1,2,
1,1,2,4,5,6,7,8,6,5,4,3,1,1,2,5,4,2,7,6,6,5,6,4,5,7,6,5,5,4);
Run PBIB.test
model <- with(df, PBIB.test(block, Genotype, replication, response, k = 3, method="REML"))
head(model);
#$ANOVA
#Analysis of Variance Table
#
#Response: yield
# Df Sum Sq Mean Sq F value Pr(>F)
#Genotype 29 72.006 2.4830 1.2396 0.3668
#Residuals 11 22.034 2.0031
#
#$method
#[1] "Residual (restricted) maximum likelihood"
#
#$parameters
# test name.t treatments blockSize blocks r alpha
# PBIB-lsd Genotype 30 3 10 2 0.05
#
#$statistics
# Efficiency Mean CV
# 0.6170213 4.533333 31.22004
#
#$model
#Linear mixed-effects model fit by REML
# Data: NULL
# Log-restricted-likelihood: -73.82968
# Fixed: y ~ trt.adj
# (Intercept) trt.adjgen02 trt.adjgen03 trt.adjgen04 trt.adjgen05 trt.adjgen06
# 6.5047533 -3.6252940 -0.7701618 -2.5264354 -3.1633495 -1.9413054
#trt.adjgen07 trt.adjgen08 trt.adjgen09 trt.adjgen10 trt.adjgen11 trt.adjgen12
# -3.0096514 -4.0648738 -3.5051139 -2.8765561 -1.7111335 -1.6308755
#trt.adjgen13 trt.adjgen14 trt.adjgen15 trt.adjgen16 trt.adjgen17 trt.adjgen18
# -2.2187974 -2.3393290 -2.0807215 -0.3122845 -3.4526453 -1.0320169
#trt.adjgen19 trt.adjgen20 trt.adjgen21 trt.adjgen22 trt.adjgen23 trt.adjgen24
# -3.1257616 0.2101325 -1.7632411 -1.9177848 -1.0500345 -2.5612960
#trt.adjgen25 trt.adjgen26 trt.adjgen27 trt.adjgen28 trt.adjgen29 trt.adjgen30
# -4.3184716 -2.3071359 1.2239927 -1.3643068 -1.4354599 -0.4726870
#
#Random effects:
# Formula: ~1 | replication
# (Intercept)
#StdDev: 8.969587e-05
#
# Formula: ~1 | block.adj %in% replication
# (Intercept) Residual
#StdDev: 1.683459 1.415308
#
#Number of Observations: 60
#Number of Groups:
# replication block.adj %in% replication
# 2 20
#
#$Fstat
# Fit Statistics
#AIC 213.65937
#BIC 259.89888
#-2 Res Log Likelihood -73.82968

Resources