multirow contingency table in R - r

Let's consider this data set:
df <- data.frame(age= sample(c(20:90), 20, rep=T),
sex = sample(c('m', 'f'), 20, rep=T),
smoker=sample(c("never", "former", "active"), 20, rep=T),
size= sample (c(8:40), 20, rep=T),
fac = as.factor(sample(c("neg","lo","med","hi"), 20, rep=T)),
outcome = sample(c(0,1), 20, rep=T)
)
# let's introduce some missing data
for (i in (1:3)) {df[sample(c(1:20),1), sample(c(1:6),1)] <- NA}
In a medical manuscript the first table summarizes the population (or its subgroups as appropriate); here the rows would be age, sex, smoking status, etc and the two outcomes would be listed in separate columns. The continuous variables are reported as means; the categorical variables as counts.
I was wondering if there is a function that I am missing that
creates such contingency tables. I can do that manually but would love to be able to automatically update if the data set changes. Ultimately I need to output in latex.
the function would need to ignore missing data, but not delete those rows.
Asking too much?!

In medical articles, 'Table 1' summarizes the demographics of the study population, usually broken down between subgroups
Generate data set
n <- 100
df <- data.frame(
age = sample(c(20:90), n, rep = T),
sex = sample(c("m", "f"), 20, rep = T, prob = c(0.55, 0.45)),
smoker = sample(c("never", "former", "active"), n, rep = T, prob = c(0.4, 0.45, 0.15)),
size = abs(rnorm(n, 20, 8)),
logitest = sample(c(TRUE, FALSE), n, rep = T, prob = c(0.1, 0.9)),
labtest = as.factor(sample(c("neg", "lo", quot;med",quot;hi"), n, rep = T, prob = c(0.4, 0.3, 0.2, 0.1))),
outcome = sample(c(0, 1), n, rep = T, prob = c(0.8, 0.2))
)
# let's introduce some missing data
for (i in (1:floor(n/6))) {
df[sample(c(1:n), 1), sample(c(1:ncol(df)), 1)] <- NA
}
head(df)
## age sex smoker size logitest labtest outcome
## 1 70 m former 39.17 NA med NA
## 2 51 f former 33.64 FALSE hi 1
## 3 58 f former 10.10 FALSE neg 1
## 4 30 m former 43.24 FALSE med 0
## 5 54 m former 22.78 FALSE lo 0
## 6 86 f former 8.20 FALSE neg 0
if working a real data set, use it instead
# df <- read.csv()
#you may need to eliminate some columns
#colnames(df)
#df0<-df #backup
#df <- df[,-c(1,...,27:38)]
Change this as needed: the column with the diagnosis has to be removed from the variables list!
dx <- 7 #index of outcome/diagnosis
####################################
summary(df[, -dx])
## age sex smoker size logitest
## Min. :20.0 f :44 active:19 Min. : 0.91 Mode :logical
## 1st Qu.:42.5 m :54 former:49 1st Qu.:15.00 FALSE:85
## Median :58.0 NA's: 2 never :30 Median :20.12 TRUE :12
## Mean :57.3 NA's : 2 Mean :20.44 NA's :3
## 3rd Qu.:74.0 3rd Qu.:27.10
## Max. :88.0 Max. :43.24
## NA's :1 NA's :2
## labtest
## hi : 4
## lo :29
## med :20
## neg :45
## NA's: 2
##
##
attach(df)
Build list of vars
vars <- colnames(df)
vars
## [1] "age" "sex" "smoker" "size" "logitest" "labtest"
## [7] "outcome"
catvars <- NULL #categorical variables
contvars <- NULL #continuous variables
logivars <- NULL #logic variables
vars <- vars[-dx]
vars
## [1] "age" "sex" "smoker" "size" "logitest" "labtest"
for (i in 1:length(vars)) {
ifelse(is.factor(df[, i]), catvars <- c(catvars, vars[i]), ifelse(is.logical(df[,
i]), logivars <- c(logivars, vars[i]), contvars <- c(contvars, vars[i])))
}
contvars
## [1] "age" "size"
catvars
## [1] "sex" "smoker" "labtest"
logivars
## [1] "logitest"
Create the subgroups
bg <- df[df[, dx] == 0 & !is.na(df[, dx]), ]
nrow(bg) #; bg
## [1] 73
mg <- df[df[, dx] == 1 & !is.na(df[, dx]), ]
nrow(mg) #; mg
## [1] 23
indet <- df[is.na(df[, dx]), ]
nrow(indet)
## [1] 4
indet
## age sex smoker size logitest labtest outcome
## 1 70 m former 39.173 NA med NA
## 9 87 m former 23.621 FALSE lo NA
## 18 65 m former 2.466 FALSE <NA> NA
## 67 88 f former 17.575 FALSE med NA
For continuous variables
normality testing
normality <- NULL
for (i in 1:length(contvars)) {
j <- which(vars == contvars[i]) #find position of variable in the original data frame and its subsets
st <- shapiro.test(df[, j]) #normality testing on all patients, bg and mg alike
normality <- c(normality, st$p.value) #normality testing on all patients, bg and mg alike
}
normality
## [1] 0.00125 0.73602
comparing the means of two samples; if normal, use t-test, otherwise wilcoxon
ttpvalue <- NULL
for (i in 1:length(contvars)) {
j <- which(vars == contvars[i]) #find position of variable in the original data frame and its subsets
## if normal, use t-test, otherwise wilcoxon if shapiro p<.05 then pop
## likely NOT normally dist
ifelse(normality[i] < 0.05, tt <- wilcox.test(bg[, j], mg[, j]), tt <- t.test(bg[,
j], mg[, j]))
ttpvalue <- c(ttpvalue, tt$p.value) ##if t-test p<.05 then pop likely have different means
}
ttpvalue
## [1] 0.6358 0.3673
contvarlist <- list(variables = contvars, normality = normality, ttest.by.subgroup = ttpvalue)
For categorical variables
chisqpvalue <- NULL
for (i in 1:length(catvars)) {
j <- which(vars == catvars[i]) #find position of variable in the original data frame and its subsets
tbl <- table(df[, j], df[, dx])
chisqtest <- summary(tbl)
chisqpvalue <- c(chisqpvalue, chisqtest$p.value)
}
chisqpvalue
## [1] 0.01579 0.77116 0.39484
catvarlist <- list(variables = catvars, chisq.by.subgroup = chisqpvalue)
For logic variables
proppvalue <- NULL
for (i in 1:length(logivars)) {
j <- which(vars == logivars[i]) #find position of variable in the original data frame and its subsets
tbl <- table(df[, j], df[, dx])
chisqtest <- summary(tbl)
proppvalue <- c(proppvalue, chisqtest$p.value)
}
proppvalue
## [1] 0.5551
logivarlist = list(variables = logivars, chisq.by.subgroup = proppvalue)
And now, the results!
str(contvarlist) #if shapiro p<.05 then pop likely NOT normally dist; if t-test p<.05 then pop likely have different means
## List of 3
## $ variables : chr [1:2] "age" "size"
## $ normality : num [1:2] 0.00125 0.73602
## $ ttest.by.subgroup: num [1:2] 0.636 0.367
str(catvarlist) #if chisq p<.05 then variables are likely NOT independent
## List of 2
## $ variables : chr [1:3] "sex" "smoker" "labtest"
## $ chisq.by.subgroup: num [1:3] 0.0158 0.7712 0.3948
str(logivarlist) #if chisq p<.05 then variables are likely NOT independent
## List of 2
## $ variables : chr "logitest"
## $ chisq.by.subgroup: num 0.555

Related

How to store summary function into a vector then for loop in R?

I have the following dataset. For each month and site, I'm trying to use a particular package (EcoSim) to estimate overlap (RA4Model).
I want to achieve two things:
install.packages("EcoSimR")
library(EcoSimR)
set.seed(111)
month <- rep(c("J","J","J","F"), each = 4)
site <- rep(c("1","2","3","1"), each = 4)
species <- rep(c("A","B","C","D"), rep = 4)
q1 <- rnorm(16,5,1)
q2 <- rnorm(16,5,1)
q3 <- rnorm(16,5,1)
q4 <- rnorm(16,5,1)
q5 <- rnorm(16,5,1)
df <- data.frame(month, site, species,q1,q2,q3,q4,q5)
df.site <- df[df$month == "J" & df$site == "1",]
df.site <- df.site[,-c(1,2,3)]
RA4model <- niche_null_model(speciesData=df.site,
algo="ra4", metric="pianka",
suppressProg=TRUE,nReps=5000)
summary(RA4model)
First, I want to store certain values from the summary into the corresponding rows in R
Here is the output of summary(RA4Model) for Month "J" and site "1"
# Time Stamp: Sun Jan 8 11:43:05 2023
# Reproducible: FALSE
# Number of Replications: 5000
# Elapsed Time: 1 secs
# Metric: pianka
# Algorithm: ra4
# Observed Index: 0.96516
# Mean Of Simulated Index: 0.95101
# Variance Of Simulated Index: 9.4906e-05
# Lower 95% (1-tail): 0.93823
# Upper 95% (1-tail): 0.96979
# Lower 95% (2-tail): 0.93729
# Upper 95% (2-tail): 0.97272
# Lower-tail P = 0.8982
# Upper-tail P = 0.1018
# Observed metric > 4491 simulated metrics
# Observed metric < 509 simulated metrics
# Observed metric = 0 simulated metrics
# Standardized Effect Size (SES): 1.453
Storing this output into vector. I don't know how to store the lower-1tailP and SES from the summary output into the dataset
df.out <- df[,c(1,2,3)]
df.out$Obs <- RA4model$Obs
df.out$Sim <- mean(RA4model$Sim)
df.out$lower-1tailP <- #This should be 0.93
df.out$SES <- #This should be 1.453
Next, I want to loop this so that it does it for each unique(month,site). So the final dataframe looks something like this:
month site Obs Sim lower-1tailP SES
J 1 .. .. .. ..
J 2 .. .. .. ..
J 3 .. .. .. ..
F 1 .. .. .. ..
You can create a helper function get_eco_sim_result(), which returns a list of the parameters of interest:
get_eco_sim_result <- function(spd, algo= "ra4", metric = "pianka", nReps=500) {
model = niche_null_model(speciesData = spd,
algo = algo,metric =metric, nReps = nReps, suppressProg = TRUE
)
return(list(
Obs = model$Obs,
Sim = mean(model$Sim),
lower_1tailp = quantile(model$Sim,0.05),
SES = (model$Obs - mean(model$Sim))/sd(model$Sim)
))
}
Then use lapply() to apply that helper function to each subset of the data; here I obtain the subsets of the data using split(). By wrapping the retuned list in data.frame(), you can subsequently use do.call() with rbind()
do.call(
rbind, lapply(split(df, list(month,site), drop=T), \(d) {
data.frame(get_eco_sim_result(d[,-c(1,2,3)], nReps=5000))
})
)
Output:
Obs Sim lower_1tailp SES
F.1 0.9641760 0.9546306 0.9429722 1.0966176
J.1 0.9651613 0.9508969 0.9381335 1.4635265
J.2 0.9931026 0.9842322 0.9800247 2.9524328
J.3 0.9726413 0.9674799 0.9586858 0.7669562
Your original question, however, is perhaps less about the helper function and the loop, but rather about how the parameters in summary(RA4model) are estimated? Use getAnywhere(summary.nichenullmod) to see how these are estimated.

What is wrong with my implementation of AdaBoost?

I tried to implement the AdaBoost algorithm of Freund and Schapire as close to the original as possible (see p. 2 here: http://rob.schapire.net/papers/explaining-adaboost.pdf):
library(rpart)
library(OneR)
maxdepth <- 1
T <- 100 # number of rounds
# Given: (x_1, y_1),...,(x_m, y_m) where x_i element of X, y_i element of {-1, +1}
myocarde <- read.table("http://freakonometrics.free.fr/myocarde.csv", head = TRUE, sep = ";")
#myocarde <- read.table("data/myocarde.csv", header = TRUE, sep = ";")
y <- (myocarde[ , "PRONO"] == "SURVIE") * 2 - 1
x <- myocarde[ , 1:7]
m <- nrow(x)
data <- data.frame(x, y)
# Initialize: D_1(i) = 1/m for i = 1,...,m
D <- rep(1/m, m)
H <- replicate(T, list())
a <- vector(mode = "numeric", T)
set.seed(123)
# For t = 1,...,T
for(t in 1:T) {
# Train weak learner using distribution D_t
# Get weak hypothesis h_t: X -> {-1, +1}
data_D_t <- data[sample(m, 10*m, replace = TRUE, prob = D), ]
H[[t]] <- rpart(y ~., data = data_D_t, maxdepth = maxdepth, method = "class")
# Aim: select h_t with low weighted error: e_t = Pr_i~D_t[h_t(x_i) != y_i]
h <- predict(H[[t]], x, type = "class")
e <- sum(h != y) / m
# Choose a_t = 0.5 * log((1-e) / e)
a[t] <- 0.5 * log((1-e) / e)
# Update for i = 1,...,m: D_t+1(i) = (D_t(i) * exp(-a_t * y_i * h_t(x_i))) / Z_t
# where Z_t is a normalization factor (chosen so that Dt+1 will be a distribution)
D <- D * exp(-a[t] * y * as.numeric(h))
D <- D / sum(D)
}
# Output the final hypothesis: H(x) = sign(sum of a_t * h_t(x) for t=1 to T)
newdata <- x
H_x <- sapply(H, function(x) as.numeric(as.character(predict(x, newdata = newdata, type = "class"))))
H_x <- t(a * t(H_x))
pred <- sign(rowSums(H_x))
#H
#a
eval_model(pred, y)
##
## Confusion matrix (absolute):
## Actual
## Prediction -1 1 Sum
## -1 0 1 1
## 1 29 41 70
## Sum 29 42 71
##
## Confusion matrix (relative):
## Actual
## Prediction -1 1 Sum
## -1 0.00 0.01 0.01
## 1 0.41 0.58 0.99
## Sum 0.41 0.59 1.00
##
## Accuracy:
## 0.5775 (41/71)
##
## Error rate:
## 0.4225 (30/71)
##
## Error rate reduction (vs. base rate):
## -0.0345 (p-value = 0.6436)
As can be seen the accuracy of the model is horrible compared to other AdaBoost implementations, e.g.:
library(JOUSBoost)
## JOUSBoost 2.1.0
boost <- adaboost(as.matrix(x), y, tree_depth = maxdepth, n_rounds = T)
pred <- predict(boost, x)
eval_model(pred, y)
##
## Confusion matrix (absolute):
## Actual
## Prediction -1 1 Sum
## -1 29 0 29
## 1 0 42 42
## Sum 29 42 71
##
## Confusion matrix (relative):
## Actual
## Prediction -1 1 Sum
## -1 0.41 0.00 0.41
## 1 0.00 0.59 0.59
## Sum 0.41 0.59 1.00
##
## Accuracy:
## 1 (71/71)
##
## Error rate:
## 0 (0/71)
##
## Error rate reduction (vs. base rate):
## 1 (p-value < 2.2e-16)
My question
Could you please give me a hint what went wrong in my implementation? Thank you
Edit
The final and corrected code can be found in my blog post: Understanding AdaBoost – or how to turn Weakness into Strength
There are quite a few contributing factors as to why your implementation is not working.
You were not using rpart correctly. Adaboost implementation does not mention upsampling with the weights - but rpart itself can accept weights. My example below shows how rpart should be used for this purpose.
Calculation of the weighted error was wrong. You were calculating the error proportion (number of samples calculated incorrectly divided by number of samples). Adaboost uses the sum of the weights that were incorrectly predicted (sum(D[y != yhat])).
Final predictions seemed to be incorrect too, I just ended up doing a simple loop.
Next time I recommend diving into the source code the the other implementations you are comparing against.
https://github.com/cran/JOUSBoost/blob/master/R/adaboost.R uses almost identical code to my below example - and probably would have helped guide you originally.
Additionally using T as a variable could potentially interfere with the logical TRUE and it's shorthand T, so I'd avoid it.
### packages ###
library(rpart)
library(OneR)
### parameters ###
maxdepth <- 1
rounds <- 100
set.seed(123)
### data ###
myocarde <- read.table("http://freakonometrics.free.fr/myocarde.csv", head = TRUE, sep = ";")
y <- (myocarde[ , "PRONO"] == "SURVIE") * 2 - 1
x <- myocarde[ , 1:7]
m <- nrow(x)
dataset <- data.frame(x, y)
### initialisation ###
D <- rep(1/m, m)
H <- list()
a <- vector(mode = "numeric", length = rounds)
for (i in seq.int(rounds)) {
# train weak learner
H[[i]] = rpart(y ~ ., data = dataset, weights = D, maxdepth = maxdepth, method = "class")
# predictions
yhat <- predict(H[[i]], x, type = "class")
yhat <- as.numeric(as.character(yhat))
# weighted error
e <- sum(D[yhat != y])
# alpha coefficient
a[i] <- 0.5 * log((1 - e) / e)
# updating weights (D)
D <- D * exp(-a[i] * y * yhat)
D <- D / sum(D)
}
# predict with each weak learner on dataset
y_hat_final <- vector(mode = "numeric", length = m)
for (i in seq(rounds)) {
pred = predict(H[[i]], dataset, type = "class")
pred = as.numeric(as.character(pred))
y_hat_final = y_hat_final + (a[i] * pred)
}
pred <- sign(y_hat_final)
eval_model(pred, y)
> eval_model(pred, y)
Confusion matrix (absolute):
Actual
Prediction -1 1 Sum
-1 29 0 29
1 0 42 42
Sum 29 42 71
Confusion matrix (relative):
Actual
Prediction -1 1 Sum
-1 0.41 0.00 0.41
1 0.00 0.59 0.59
Sum 0.41 0.59 1.00
Accuracy:
1 (71/71)
Error rate:
0 (0/71)
Error rate reduction (vs. base rate):
1 (p-value < 2.2e-16)

Finding corresponding values from already subsetted vectors in R?

Background: I have my dataset as a csv file called D (please load it to your R):
D = read.csv("https://docs.google.com/uc?id=0B5V8AyEFBTmXWU40TUZGTURQWlU&export=download")
I use the following function to obtain 2 quantities from my data (please source to your R):
source("https://docs.google.com/uc?id=0B5V8AyEFBTmXWTk0LWhaMkY2b3M&export=download")
The 2 quantities are obtained as follows:
b = BF.d.pvalue(t = D$t.value, n1 = D$n1, n2 = D$n2)
BF = b[1, ] ; p.value = b[2, ]
Subsetting Details: I have subsetted p.value larger than .05 and their corresponding BFs:
pvalue.05_1 = p.value[p.value > .05] ;
BF.pvalue.05_1 = BF[p.value > .05]`
I have further subsetted BF.pvalue.05_1 that are between 1/10 and 1/3:
BF.pvalue.05_1_.1_.33 = BF.pvalue.05_1[BF.pvalue.05_1 > 1/10 & BF.pvalue.05_1 <= 1/3]
Question: Now I'm wondering how I can find the corresponding p.value for BF.pvalue.05_1_.1_.33 above?
The preferred way to do this is to combine your data to a dataframe and then using the subset command for filtering.
myDf = data.frame(p = p.value, BF = BF)
head(myDf)
# p BF
# 1 2.274873e-06 6.241835e+03
# 2 3.811612e-02 1.736017e+00
# 3 0.000000e+00 2.592434e+147
# 4 0.000000e+00 1.982820e+130
# 5 0.000000e+00 1.315152e+29
# 6 9.992007e-15 4.442134e+11
Now, whenever you subset your data rowwise, you will have access to both the p value and the BF value.
firstSubset = subset(myDf, p > .05)
dim(firstSubset)
# [1] 175 2
secondSubset = subset(firstSubset, BF > .1 & BF < 1/3)
dim(secondSubset)
# [1] 76 2
head(secondSubset)
# p BF
# 28 0.8518770 0.3131790
# 34 0.9358011 0.2910234
# 35 0.9302671 0.2911639
# 52 0.6825720 0.3101911
# 88 0.7201547 0.2770751
# 96 0.6472360 0.2868055
Alternatively, you can use both conditions simultaniousely
secondSubset = subset(myDf, (BF > .1) & (BF < 1/3) & (p > .05))

Using rollapply and lm over multiple columns of data

I have a data frame similar to the following with a total of 500 columns:
Probes <- data.frame(Days=seq(0.01, 4.91, 0.01), B1=5:495,B2=-100:390, B3=10:500,B4=-200:290)
I would like to calculate a rolling window linear regression where my window size is 12 data points and each sequential regression is separated by 6 data points. For each regression, "Days" will always be the x component of the model, and the y's would be each of the other columns (B1, followed by B2, B3, etc). I would then like to save the co-efficients as a dataframe with the existing column titles (B1, B2, etc).
I think my code is close, but is not quite working. I used rollapply from the zoo library.
slopedata<-rollapply(zoo(Probes), width=12, function(Probes) {
coef(lm(formula=y~Probes$Days, data = Probes))[2]
}, by = 6, by.column=TRUE, align="right")
If possible, I would also like to have the "xmins" saved to a vector to add to the dataframe. This would mean the smallest x value used in each regression (basically it would be every 6 numbers in the "Days" column.)
Thanks for your help.
1) Define a zoo object z whose data contains Probes and whose index is taken from the first column of Probes, i.e. Days. Noting that lm allows y to be a matrix define a coefs function which computes the regression coefficients. Finally rollapply over z. Note that the index of the returned object gives xmin.
library(zoo)
z <- zoo(Probes, Probes[[1]])
coefs <- function(z) c(unlist(as.data.frame(coef(lm(z[,-1] ~ z[,1])))))
rz <- rollapply(z, 12, by = 6, coefs, by.column = FALSE, align = "left")
giving:
> head(rz)
B11 B12 B21 B22 B31 B32 B41 B42
0.01 4 100 -101 100 9 100 -201 100
0.07 4 100 -101 100 9 100 -201 100
0.13 4 100 -101 100 9 100 -201 100
0.19 4 100 -101 100 9 100 -201 100
0.25 4 100 -101 100 9 100 -201 100
0.31 4 100 -101 100 9 100 -201 100
Note that DF <- fortify.zoo(rz) could be used if you needed a data frame representation of rz.
2) An alternative somewhat similar approch would be to rollaplly over the row numbers:
library(zoo)
y <- as.matrix(Probes[-1])
Days <- Probes$Days
n <- nrow(Probes)
coefs <- function(ix) c(unlist(as.data.frame(coef(lm(y ~ Days, subset = ix)))),
xmins = Days[ix][1])
r <- rollapply(1:n, 12, by = 6, coefs)
try this:
# here are the xmin values you wanted
xmins <- Probes$Days[seq(1,nrow(Probes),6)]
# here we build a function that will run regressions across the columns
# y1 vs x, y2 vs x, y3 vs x...
# you enter the window and by (12/6) in order to limit the interval being
# regressed. this is later called in do.call
runreg <- function(Probes,m,window=12,by=6){
# beg,end are used to specify the interval
beg <- seq(1,nrow(Probes),by)[m]
end <- beg+window-1
# this is used to go through all the columns
N <- ncol(Probes)-1
tmp <- numeric(N)
# go through each column and store the coefficients in tmp
for(i in 1:N){
y <- Probes[[i+1]][beg:end]
x <- Probes$Days[beg:end]
tmp[i] <- coef(lm(y~x))[2][[1]]
}
# put all our column regressions into a dataframe
res <- rbind('coeff'=tmp)
colnames(res) <- colnames(Probes)[-1]
return(res)
}
# now that we've built the function to do the column regressions
# we just need to go through all the window-ed regressions (row regressions)
res <- do.call(rbind,lapply(1:length(xmins),function(m) runreg(Probes,m)))
# these rownames are the index of the xmin values
rownames(res) <- seq(1,nrow(Probes),6)
res <- data.frame(res,xmins)
You can also use the rollRegres package as follows
# setup data
Probes <- data.frame(
# I changed the days to be intergers
Days=seq(1L, 491L, 1L),
B1=5:495, B2=-100:390, B3=10:500 , B4=-200:290)
# setup grp argument
grp_arg <- as.integer((Probes$Days - 1L) %/% 6)
# estimate coefs. width argument is realtive in grp units
library(rollRegres)
X <- cbind(1, Probes$Days / 100)
Ys <- as.matrix(Probes[, 2:5])
out <- lapply(1:ncol(Ys), function(i)
roll_regres.fit(x = X, y = Ys[, i], width = 2L, grp = grp_arg)$coefs)
out <- do.call(cbind, out)
# only keep the complete.cases and the unique values
colnames(out) <- sapply(1:4, function(i) paste0("B", i, 0:1))
out <- out[c(T, grp_arg[-1] != head(grp_arg, -1)), ]
out <- out[complete.cases(out), ]
head(out)
#R B10 B11 B20 B21 B30 B31 B40 B41
#R [1,] 4 100 -101 100 9 100 -201 100
#R [2,] 4 100 -101 100 9 100 -201 100
#R [3,] 4 100 -101 100 9 100 -201 100
#R [4,] 4 100 -101 100 9 100 -201 100
#R [5,] 4 100 -101 100 9 100 -201 100
#R [6,] 4 100 -101 100 9 100 -201 100
The solution is a lot faster than e.g., the zoo solution
library(zoo) coefs <- function(z) c(unlist(as.data.frame(coef(lm(z[,-1] ~ z[,1]))))) microbenchmark::microbenchmark( rollapply = {
z <- zoo(Probes, Probes[[1]])
rz <- rollapply(z, 12, by = 6, coefs, by.column = FALSE, align = "left") }, roll_regres = {
grp_arg <- as.integer((Probes$Days - 1L) %/% 6)
X <- cbind(1, Probes$Days / 100)
Ys <- as.matrix(Probes[, 2:5])
out <- lapply(1:ncol(Ys), function(i)
roll_regres.fit(x = X, y = Ys[, i], width = 2L, grp = grp_arg)$coefs)
out <- do.call(cbind, out)
colnames(out) <- sapply(1:4, function(i) paste0("B", i, 0:1))
out <- out[c(T, grp_arg[-1] != head(grp_arg, -1)), ]
out <- out[complete.cases(out), ]
head(out) } )
#R Unit: microseconds
#R expr min lq mean median uq max neval
#R rollapply 53392.614 56330.492 59793.106 58363.2825 60902.938 119206.76 100
#R roll_regres 865.186 920.297 1074.161 983.9015 1047.705 5071.41 100
At present you though need to install the package from Github due to an error in the validation in version 0.1.0. Thus, run
devtools::install_github("boennecd/rollRegres", upgrade_dependencies = FALSE,
build_vignettes = TRUE)

Splitting Dataframe into Confirmatory and Exploratory Samples

I have a very large dataframe (N = 107,251), that I wish to split into relatively equal halves (~53,625). However, I would like the split to be done such that three variables are kept in equal proportion in the two sets (pertaining to Gender, Age Category with 6 levels, and Region with 5 levels).
I can generate the proportions for the variables independently (e.g., via prop.table(xtabs(~dat$Gender))) or in combination (e.g., via prop.table(xtabs(~dat$Gender + dat$Region + dat$Age)), but I'm not sure how to utilise this information to actually do the sampling.
Sample dataset:
set.seed(42)
Gender <- sample(c("M", "F"), 1000, replace = TRUE)
Region <- sample(c("1","2","3","4","5"), 1000, replace = TRUE)
Age <- sample(c("1","2","3","4","5","6"), 1000, replace = TRUE)
X1 <- rnorm(1000)
dat <- data.frame(Gender, Region, Age, X1)
Probabilities:
round(prop.table(xtabs(~dat$Gender)), 3) # 48.5% Female; 51.5% Male
round(prop.table(xtabs(~dat$Age)), 3) # 16.8, 18.2, ..., 16.0%
round(prop.table(xtabs(~dat$Region)), 3) # 21.5%, 17.7, ..., 21.9%
# Multidimensional probabilities:
round(prop.table(xtabs(~dat$Gender + dat$Age + dat$Region)), 3)
The end goal for this dummy example would be two data frames with ~500 observations in each (completely independent, no participant appearing in both), and approximately equivalent in terms of gender/region/age splits. In the real analysis, there is more disparity between the age and region weights, so doing a single random split-half isn't appropriate. In real world applications, I'm not sure if every observation needs to be used or if it is better to get the splits more even.
I have been reading over the documentation from package:sampling but I'm not sure it is designed to do exactly what I require.
You can check out my stratified function, which you should be able to use like this:
set.seed(1) ## just so you can reproduce this
## Take your first group
sample1 <- stratified(dat, c("Gender", "Region", "Age"), .5)
## Then select the remainder
sample2 <- dat[!rownames(dat) %in% rownames(sample1), ]
summary(sample1)
# Gender Region Age X1
# F:235 1:112 1:84 Min. :-2.82847
# M:259 2: 90 2:78 1st Qu.:-0.69711
# 3: 94 3:82 Median :-0.03200
# 4: 97 4:80 Mean :-0.01401
# 5:101 5:90 3rd Qu.: 0.63844
# 6:80 Max. : 2.90422
summary(sample2)
# Gender Region Age X1
# F:238 1:114 1:85 Min. :-2.76808
# M:268 2: 92 2:81 1st Qu.:-0.55173
# 3: 97 3:83 Median : 0.02559
# 4: 99 4:83 Mean : 0.05789
# 5:104 5:91 3rd Qu.: 0.74102
# 6:83 Max. : 3.58466
Compare the following and see if they are within your expectations.
x1 <- round(prop.table(
xtabs(~dat$Gender + dat$Age + dat$Region)), 3)
x2 <- round(prop.table(
xtabs(~sample1$Gender + sample1$Age + sample1$Region)), 3)
x3 <- round(prop.table(
xtabs(~sample2$Gender + sample2$Age + sample2$Region)), 3)
It should be able to work fine with data of the size you describe, but a "data.table" version is in the works that promises to be much more efficient.
Update:
stratified now has a new logical argument "bothSets" which lets you keep both sets of samples as a list.
set.seed(1)
Samples <- stratified(dat, c("Gender", "Region", "Age"), .5, bothSets = TRUE)
lapply(Samples, summary)
# $SET1
# Gender Region Age X1
# F:235 1:112 1:84 Min. :-2.82847
# M:259 2: 90 2:78 1st Qu.:-0.69711
# 3: 94 3:82 Median :-0.03200
# 4: 97 4:80 Mean :-0.01401
# 5:101 5:90 3rd Qu.: 0.63844
# 6:80 Max. : 2.90422
#
# $SET2
# Gender Region Age X1
# F:238 1:114 1:85 Min. :-2.76808
# M:268 2: 92 2:81 1st Qu.:-0.55173
# 3: 97 3:83 Median : 0.02559
# 4: 99 4:83 Mean : 0.05789
# 5:104 5:91 3rd Qu.: 0.74102
# 6:83 Max. : 3.58466
The following code basically creates a key based on the group membership then loops through each group, sampling half to one set and half (roughly) to the other. If you compare the resulting probabilities they are within 0.001 of each other. The downside to this is that its biased to make a larger sample size for the second group due to how rounding of odd-numbered group member number is handled. In this case the first sample is 488 observations and the second is 512. You can probably throw in some logic to account for that and even it out better.
EDIT: Added that logic and it split it up evenly.
set.seed(42)
Gender <- sample(c("M", "F"), 1000, replace = TRUE)
Region <- sample(c("1","2","3","4","5"), 1000, replace = TRUE)
Age <- sample(c("1","2","3","4","5","6"), 1000, replace = TRUE)
X1 <- rnorm(1000)
dat <- data.frame(Gender, Region, Age, X1)
dat$group <- with(dat, paste(Gender, Region, Age))
groups <- unique(dat$group)
setA <- dat[NULL,]
setB <- dat[NULL,]
for (i in 1:length(groups)){
temp <- dat[dat$group==groups[i],]
if (nrow(setA) > nrow(setB)){
tempA <- temp[1:floor(nrow(temp)/2),]
tempB <- temp[(1+floor(nrow(temp)/2)):nrow(temp),]
} else {
tempA <- temp[1:ceiling(nrow(temp)/2),]
tempB <- temp[(1+ceiling(nrow(temp)/2)):nrow(temp),]
}
setA <- rbind(setA, tempA)
setB <- rbind(setB, tempB)
}

Resources