Optimizing replacement in a data frame - r

This is an extension of Update pairs of columns based on pattern in their names . Thus, this is partially motivated by curiosity and partially for entertainment.
While developing an answer to that question, it occurred to me that this may be one of those cases where a for loop is more efficient than an *apply function (and I've been looking for a good illustration of the fact that *apply is not necessarily "more efficient" than a well constructed for loop). So I'd like to pose the question again, and ask if anyone is able to write a solution using an *apply function (or purr if that's your thing) that performs better than the for loop I've written below. Performance will be judged on execution time as evaluated via microbenchmark on my laptop (A cheap Windows box running R 3.3.2).
data.table and dplyr suggestions are welcome as well. (I'm already making plans for what I'll do with all the microseconds I save).
The Challenge
Consider the data frame:
col_1 <- c(1,2,NA,4,5)
temp_col_1 <-c(12,2,2,3,4)
col_2 <- c(1,23,423,NA,23)
temp_col_2 <-c(1,2,23,4,5)
df_test <- data.frame(col_1, temp_col_1, col_2, temp_col_2)
set.seed(pi)
df_test <- df_test[sample(1:nrow(df_test), 1000, replace = TRUE), ]
For each col_x, replace the missing values with the corresponding value in temp_col_x. So, for example:
col_1 temp_col_1 col_2 temp_col_2
1 1 12 1 1
2 2 2 23 2
3 NA 2 423 23
4 4 3 NA 4
5 5 4 23 5
becomes
col_1 temp_col_1 col_2 temp_col_2
1 1 12 1 1
2 2 2 23 2
3 2 2 423 23
4 4 3 4 4
5 5 4 23 5
Existing Solutions
The for loop I've already written
temp_cols <- names(df_test)[grepl("^temp", names(df_test))]
cols <- sub("^temp_", "", temp_cols)
for (i in seq_along(temp_cols)){
row_to_replace <- which(is.na(df_test[[cols[i]]]))
df_test[[cols[i]]][row_to_replace] <- df_test[[temp_cols[i]]][row_to_replace]
}
My best apply function so far is:
lapply(names(df_test)[grepl("^temp_", names(df_test))],
function(tc){
col <- sub("^temp_", "", tc)
row_to_replace <- which(is.na(df_test[[col]]))
df_test[[col]][row_to_replace] <<- df_test[[tc]][row_to_replace]
})
Benchmarking
As (if) suggestions come in, I will begin showing benchmarks in edits to this question. (edit: code is now a copy of Frank's answer, but run 100 times on my machine, as promised)
library(magrittr)
library(data.table)
library(microbenchmark)
set.seed(pi)
nc = 1e3
nr = 1e2
df_m0 = sample(c(1:10, NA_integer_), nc*nr, replace = TRUE) %>% matrix(nr, nc) %>% data.frame
df_r = sample(c(1:10), nc*nr, replace = TRUE) %>% matrix(nr, nc) %>% data.frame
microbenchmark(times = 100,
for_vec = {
df_m <- df_m0
for (col in 1:nc){
w <- which(is.na(df_m[[col]]))
df_m[[col]][w] <- df_r[[col]][w]
}
}, lapply_vec = {
df_m <- df_m0
lapply(seq_along(df_m),
function(i){
w <- which(is.na(df_m[[i]]))
df_m[[i]][w] <<- df_r[[i]][w]
})
}, for_df = {
df_m <- df_m0
for (col in 1:nc){
w <- which(is.na(df_m[[col]]))
df_m[w, col] <- df_r[w, col]
}
}, lapply_df = {
df_m <- df_m0
lapply(seq_along(df_m),
function(i){
w <- which(is.na(df_m[[i]]))
df_m[w, i] <<- df_r[w, i]
})
}, mat = { # in lmo's answer
df_m <- df_m0
bah = is.na(df_m)
df_m[bah] = df_r[bah]
}, set = {
df_m <- copy(df_m0)
for (col in 1:nc){
w = which(is.na(df_m[[col]]))
set(df_m, i = w, j = col, v = df_r[w, col])
}
}
)
Results:
Unit: milliseconds
expr min lq mean median uq max neval cld
for_vec 135.83875 157.84548 175.23005 166.60090 176.81839 502.0616 100 b
lapply_vec 135.67322 158.99496 179.53474 165.11883 178.06968 551.7709 100 b
for_df 173.95971 204.16368 222.30677 212.76608 224.78188 446.6050 100 c
lapply_df 181.46248 205.57069 220.38911 215.08505 223.98406 381.1006 100 c
mat 129.27835 154.01248 173.11378 159.83070 169.67439 453.0888 100 b
set 66.86402 81.08138 86.32626 85.51029 89.58331 123.1926 100 a

Data.table provides the set function to modify data.tables or data.frames by reference.
Here's a benchmark that is more flexible with respect to numbers of cols and rows and that sidesteps the awkward column-name stuff in the OP:
library(magrittr)
nc = 1e3
nr = 1e2
df_m0 = sample(c(1:10, NA_integer_), nc*nr, replace = TRUE) %>% matrix(nr, nc) %>% data.frame
df_r = sample(c(1:10), nc*nr, replace = TRUE) %>% matrix(nr, nc) %>% data.frame
library(data.table)
library(microbenchmark)
microbenchmark(times = 10,
for_vec = {
df_m <- df_m0
for (col in 1:nc){
w <- which(is.na(df_m[[col]]))
df_m[[col]][w] <- df_r[[col]][w]
}
}, lapply_vec = {
df_m <- df_m0
lapply(seq_along(df_m), function(i){
w <- which(is.na(df_m[[i]]))
df_m[[i]][w] <<- df_r[[i]][w]
})
}, for_df = {
df_m <- df_m0
for (col in 1:nc){
w <- which(is.na(df_m[[col]]))
df_m[w, col] <- df_r[w, col]
}
}, lapply_df = {
df_m <- df_m0
lapply(seq_along(df_m), function(i){
w <- which(is.na(df_m[[i]]))
df_m[w, i] <<- df_r[w, i]
})
}, mat = { # in lmo's answer
df_m <- df_m0
bah = is.na(df_m)
df_m[bah] = df_r[bah]
}, set = {
df_m <- copy(df_m0)
for (col in 1:nc){
w = which(is.na(df_m[[col]]))
set(df_m, i = w, j = col, v = df_r[w, col])
}
}
)
Which gives...
Unit: milliseconds
expr min lq mean median uq max neval
for_vec 77.06501 89.53430 100.10051 96.33764 106.13486 142.1329 10
lapply_vec 77.67366 89.04438 98.81510 99.08863 108.86491 117.2956 10
for_df 103.79097 130.33134 140.95398 144.46526 157.11335 161.4507 10
lapply_df 97.04616 114.17825 126.10633 131.20382 137.64375 149.7765 10
mat 73.47691 84.51473 100.16745 103.44476 112.58006 128.6166 10
set 44.32578 49.58586 62.52712 56.30460 71.63432 101.3517 10
Comments:
If we adjust nc and nr or the frequency of NAs, the ranking of these four options might change. I guess the more cols there are, the better the mat way (from #lmo's answer) and set way look.
The copy in the set test takes some extra time beyond what we'd see in practice, since the set function just modifies the table by reference (unlike the other options, I think).

Here is a readable solution. Probably slower than some.
df_test[c(TRUE, FALSE)][is.na(df_test[c(TRUE, FALSE)])] <-
df_test[c(FALSE, TRUE)][is.na(df_test[c(TRUE, FALSE)])]
This could be sped up a bit with pre-allocating the replacement so it is only performed once.
filler <- is.na(df_test[c(TRUE, FALSE)])
df_test[c(TRUE, FALSE)][filler] <- df_test[c(FALSE, TRUE)][filler]
In a two data.frame scenario, df1 and df2, this logic would be
filler <- is.na(df1)
df1[filler] <- df2[filler]

Maybe this is naive, but how about neither? I think it's still in the spirit of things if you're just looking for the fastest method. I suspect this won't be it though.
col_1 <- c(1,2,NA,4,5)
temp_col_1 <-c(12,2,2,3,4)
col_2 <- c(1,23,423,NA,23)
temp_col_2 <-c(1,2,23,4,5)
df_test <- data.frame(col_1, temp_col_1, col_2, temp_col_2)
set.seed(pi)
df_test <- df_test[sample(1:nrow(df_test), 1000, replace = TRUE), ]
df_test$col_1 <- ifelse(is.na(df_test$col_1), df_test$temp_col_1,df_test$col_1)
df_test$col_2 <- ifelse(is.na(df_test$col_2), df_test$temp_col_2,df_test$col_2)

Related

Quickly apply & operation to pairs of columns in R

Let’s say I have two large data.tables and need to combine their columns pairwise using the & operation. The combinations are dictated by grid (combine dt1 column1 with dt2 column2, etc.)
Right now I'm using a mclapply loop and the script takes hours when I run the full dataset. I tried converting the data to a matrix and using a vectorized approach but that took even longer. Is there a faster and/or more elegant way to do this?
mx1 <- replicate(10, sample(c(T,F), size = 1e6, replace = T)) # 1e6 rows x 10 columns
mx1 <- as.data.table(mx1)
colnames(mx1) <- LETTERS[1:10]
mx2 <- replicate(10, sample(c(T,F), size = 1e6, replace = T)) # 1e6 rows x 10 columns
mx2 <- as.data.table(mx2)
colnames(mx2) <- letters[1:10]
grid <- expand.grid(col1 = colnames(mx1), col2 = colnames(mx2)) # the combinations I want to evaluate
out <- new_layer <- mapply(grid$col1, grid$col2, FUN = function(col1, col2) { # <--- mclapply loop
mx1[[col1]] & mx2[[col2]]
}, SIMPLIFY = F)
setDT(out) # convert output into data table
colnames(out) <- paste(grid$col1, grid$col2, sep = "_")
For context, this data is from a gene expression matrix where 1 row = 1 cell
This can be done directly with no mapply: Just ensure that the with argument is FALSE
ie:
mx1[, grid$col1, with = FALSE] & mx2[, grid$col2, with=FALSE]
After some digging around I found a package called bit that is specifically designed for fast boolean operations. Converting each column of my data.table from logical to bit gave me a 100-fold increase in compute speed.
# Load libraries.
library(data.table)
library(bit)
# Create data set.
mx1 <- replicate(10, sample(c(T,F), size = 5e6, replace = T)) # 5e6 rows x 10 columns
colnames(mx1) <- LETTERS[1:10]
mx2 <- replicate(10, sample(c(T,F), size = 5e6, replace = T)) # 5e6 rows x 10 columns
colnames(mx2) <- letters[1:10]
grid <- expand.grid(col1 = colnames(mx1), col2 = colnames(mx2)) # combinations I want to evaluate
# Single operation with logical matrix.
system.time({
out <- mx1[, grid$col1] & mx2[, grid$col2]
}) # 26.014s
# Loop with logical matrix.
system.time({
out <- mapply(grid$col1, grid$col2, FUN = function(col1, col2) {
mx1[, col1] & mx2[, col2]
})
}) # 31.914s
# Single operation with logical data.table.
mx1.dt <- as.data.table(mx1)
mx2.dt <- as.data.table(mx2)
system.time({
out <- mx1.dt[, grid$col1, with = F] & mx2.dt[, grid$col2, with = F] # 26.014s
}) # 32.349s
# Loop with logical data.table.
system.time({
out <- mapply(grid$col1, grid$col2, FUN = function(col1, col2) {
mx1.dt[[col1]] & mx2.dt[[col2]]
})
}) # 15.031s <---- SECOND FASTEST TIME, ~2X IMPROVEMENT
# Loop with bit data.table.
mx1.bit <- mx1.dt[, lapply(.SD, as.bit)]
mx2.bit <- mx2.dt[, lapply(.SD, as.bit)]
system.time({
out <- mapply(grid$col1, grid$col2, FUN = function(col1, col2) {
mx1.bit[[col1]] & mx2.bit[[col2]]
})
}) # 0.383s <---- FASTEST TIME, ~100X IMPROVEMENT
# Convert back to logical table.
out <- setDT(out)
colnames(out) <- paste(grid$col1, grid$col2, sep = "_")
out <- out[, lapply(.SD, as.logical)]
There are also special functions like sum.bit and ri that you can use to aggregate data without converting it back to logical.

Efficient way to add numbers to alphanumeric strings in R

I have a data.frame with ids composed of sequences of alphanumeric characters (e.g., id = c(A001, A002, B013)). I was looking for an easy function under stringr or stirngi that would easily do math with this strings (id + 1 should return c(A002, A003, B014)).
I made a custom function that does the trick, however I have a feeling that there must be a better/more efficient/within package way to achieve this.
str_add_n <- function(df, string, n, width=3){
string <- enquo(string)
## split the string using pattern
df <- df %>%
separate(!!string,
into = c("text", "num"),
sep = "(?<=[A-Za-z])(?=[0-9])",
remove=FALSE
) %>%
mutate(num = as.numeric(num),
num = num + n,
num = stringr::str_pad(as.character(num),
width = width,
side = "left",
pad = 0
)
) %>%
unite(next_string, text:num, sep = "")
return(df)
}
Let's make a toy df
df <- data.frame(id = c("A001", "A002", "B013"))
str_add_n(df, id, 1)
id next_string
1 A001 A002
2 A002 A003
3 B013 B014
Again, this works, I'm wondering if there's a better way to do this, all tweaks welcome!
UPDATE
Based on the suggested answers I ran some benchmarking and it appears that both come very close, I would be inclined for the str_add_n_2 (I changed the name to be able to run both, and took the suggestion of x<-as.character(x))
microbenchmark::microbenchmark(question = str_add_n(df, id, 1),
answer = df %>% mutate_at(vars(id), funs(str_add_n_2(., 1))),
string_add = df %>% mutate_at(vars(id), funs(string_add(as.character(.)))))
Which yields
Unit: milliseconds
expr min lq mean median uq
question 4.312094 4.448391 4.695276 4.570860 4.755748
answer 2.932146 3.017874 3.191262 3.117627 3.240688
string_add 3.388442 3.466466 3.699363 3.534416 3.682762
max neval cld
10.29253 100 c
8.24967 100 a
9.05441 100 b
More tweaks are welcome!
Here is a way with gsubfn
id <- c("A001", "A002", "B013")
library(gsubfn)
gsubfn("([0-9]+)", function(x) sprintf("%03.0f", as.numeric(x) + 1), id)
#[1] "A002" "A003" "B014"
You could make it a function
string_add <- function(string, add = 1, width = 3) {
gsubfn::gsubfn("([0-9]+)", function(x) sprintf(paste0("%0", width, ".0f"), as.numeric(x) + add), string)
}
string_add(id, add = 10, width = 5)
#"A00011" "A00012" "B00023"
I'd suggest it's easier to define the function based on a vector of strings and not hard-code it to looking for columns in the frame; for the latter, you can always use something like mutate_at(vars(id,...), funs(str_add_n)).
str_add_n <- function(x, n = 1L) {
gr <- gregexpr("\\d+", x)
reg <- regmatches(x, gr)
widths <- nchar(reg)
regmatches(x, gr) <- sprintf(paste0("%0", widths, "d"), as.integer(reg) + n)
x
}
vec <- c("A001", "A002", "B013")
str_add_n(vec)
# [1] "A002" "A003" "B014"
If in a frame:
df <- data.frame(id = c("A001", "A002", "B013"), x = 1:3,
stringsAsFactors = FALSE)
library(dplyr)
df %>%
mutate_at(vars(id), funs(str_add_n(., 3)))
# id x
# 1 A004 1
# 2 A005 2
# 3 B016 3
Caveat: this silently requires true character, not factor ... a possible defensive tactic might be to add x <- as.character(x) in the function definition.

Fast ways to subset categorical data in R with multiple conditions

I have a large dataset in R (say >40,000 rows and >20 categorical columns) that I repeatedly subset, so I would like to speed this up as much as possible. It needs to be a general function (each categorical column has a discrete number of possible values, say in string format).
Each time I subset, I need to identify the subset of rows that satisfy multiple logical set membership conditions (e.g. >10 conditions). I.e., I need to check several columns and check if values in that column match a certain set membership (hence the use of %in%).
# simple dataset example
library(dplyr)
num_col <- 15
num_row <- 100000
dat_list <- list()
for (i in 1:num_col) {
dat_list[[i]] <- data_frame(sample(letters[1:10], size = num_row, r = T))
}
dat <- bind_cols(dat_list)
names(dat) <- paste0("col", seq(15))
I've looked around the internet and SO a lot, but haven't found the discussion of performance I'm looking for. I mostly code using dplyr, so apologies if there's a clear performance improvement here in data.table; I've tried some simple benchmarks between the two (but without using any data.table indexing or etc.) and it's not obvious if one is faster.
Example options I've considered (since I'm not great at data.table, I've excluded data.table options from here):
base_filter <- function(dat) {
for (i in 1:7) {
col_name <- paste0('col', i)
dat <- dat[dat[[col_name]] %in% sample(letters[1:10], size = 4), ]
}
dat
}
dplyr_filter1 <- function(dat) {
for (i in 1:7) {
col_name <- paste0('col', i)
dat <- filter_(dat,
.dots = interp(~ colname %in% vals,
colname = as.name(col_name),
vals = sample(letters[1:10], size = 4)))
}
dat
}
dplyr_filter2 <- function(dat) {
dots_filter <- list()
for (i in 1:7) {
col_name <- paste0('col', i)
dots_filter[[i]] <- interp(~ colname %in% vals,
colname = as.name(col_name),
vals = sample(letters[1:10], size = 4))
}
filter_(dat, .dots = dots_filter)
}
Note: In practice, on my real datasets, dplyr_filter2 actually works fastest. I've also tried dtplyr or converting my data to a data.table, but this seems slower than without.
Note: On the other hand, in practice, the base R function outperforms the dplyr examples when data has fewer rows and fewer columns (perhaps due to copying speed?).
Thus, I'd like to ask SO what the general, most efficient way(s) to subset a categorical dataframe under multiple (set membership) conditions is. And if possible, explain the mechanics for why? Does this answer differ for smaller datasets? Does it depend on copying time or search time?
Useful related links
fast lookup for one key
using hash tables in R for key-value pairs
Understand that you prefer not to use data.table. Just providing some timings for reference below. With indexing, subsetting can be performed much faster and inner join of the 2 tables can also be done easily in data.table.
# simple dataset example
library(dplyr)
library(lazyeval)
set.seed(0L)
num_col <- 15
num_row <- 100000
dat_list <- list()
for (i in 1:num_col) {
dat_list[[i]] <- data_frame(sample(letters[1:10], size = num_row, r = T))
}
dat <- bind_cols(dat_list)
names(dat) <- paste0("col", seq(15))
selection <- lapply(1:7, function(n) sample(letters[1:10], size = 4))
base_filter <- function(df) {
for (i in 1:7) {
col_name <- paste0('col', i)
df <- df[df[[col_name]] %in% selection[[i]], ]
}
df
}
dplyr_filter1 <- function(df) {
for (i in 1:7) {
col_name <- paste0('col', i)
df <- filter_(df,
.dots = interp(~ colname %in% vals,
colname = as.name(col_name),
vals = selection[[i]]))
}
df
}
dplyr_filter2 <- function(df) {
dots_filter <- list()
for (i in 1:7) {
col_name <- paste0('col', i)
dots_filter[[i]] <- interp(~ colname %in% vals,
colname = as.name(col_name),
vals = selection[[i]])
}
filter_(df, .dots = dots_filter)
}
library(data.table)
#convert data.frame into data.table
dt <- data.table(dat, key=names(dat)[1:7])
#create the sets of selection
dtSelection <- data.table(expand.grid(selection, stringsAsFactors=FALSE))
library(microbenchmark)
microbenchmark(
base_filter(dat),
dplyr_filter1(dat),
dplyr_filter2(dat),
dt[dtSelection, nomatch=0], #perform inner join between dataset and selection
times=5L)
#Unit: milliseconds
# expr min lq mean median uq max neval
# base_filter(dat) 27.084801 27.870702 35.849261 32.045900 32.872601 59.372301 5
# dplyr_filter1(dat) 23.130100 24.114301 26.922081 24.860701 29.804301 32.701002 5
# dplyr_filter2(dat) 29.641101 30.686002 32.363681 31.103000 31.884701 38.503601 5
# dt[dtSelection, nomatch = 0] 3.626001 3.646201 3.829341 3.686601 3.687001 4.500901 5
In addition to chinsoon12's alternatives, one thing to consider is to avoid subsetting the data.frame in each iteration. So, instead of
f0 = function(x, cond)
{
for(j in seq_along(x)) x = x[x[[j]] %in% cond[[j]], ]
return(x)
}
one alternative is to accumulate a logical vector of whether to include each row in the final subset:
f1 = function(x, cond)
{
i = rep_len(TRUE, nrow(x))
for(j in seq_along(x)) i = i & (x[[j]] %in% cond[[j]])
return(x[i, ])
}
or, another alternative, is to iteratively reduce the amount of comparisons, but by reducing the row indices instead of the data.frame itself:
f2 = function(x, cond)
{
i = 1:nrow(x)
for(j in seq_along(x)) i = i[x[[j]][i] %in% cond[[j]]]
return(x[i, ])
}
And a comparison with data:
set.seed(1821)
dat = as.data.frame(replicate(30, sample(c(letters, LETTERS), 5e5, TRUE), FALSE),
stringsAsFactors = FALSE)
conds = replicate(ncol(dat), sample(c(letters, LETTERS), 48), FALSE)
system.time({ ans0 = f0(dat, conds) })
# user system elapsed
# 3.44 0.28 3.86
system.time({ ans1 = f1(dat, conds) })
# user system elapsed
# 0.66 0.01 0.68
system.time({ ans2 = f2(dat, conds) })
# user system elapsed
# 0.34 0.01 0.39
identical(ans0, ans1)
#[1] TRUE
identical(ans1, ans2)
#[1] TRUE

Adding a new column for each existing one in data frame (for outlier detection)

I have a dataframe df. For each column I want to add another column indicating whether the value is inside or outside my simple "outlier detection thresholds" by writing TRUE (= outlier) or FALSE (= no outlier).
Here's the code:
df <- read.csv("<FILE>", header=TRUE, sep=";")
column_names <- colnames(df[,-1]) # first column is actually row name
for(name in column_names) {
med <- median(df[[name]], na.rm = TRUE)
std <- sd(df[[name]], na.rm = TRUE)
max <- med + 3 * std
min <- med - 3 * std
newcol <- paste(name, "outlier", sep="_") # create new column name
df <- within(df, newcol <- ifelse(name < max & name > min,"FALSE","TRUE"))
}
Instead of adding a new column for every existing one, just one column named "newcol" is added. How do I access the actual value of the variable newcol in this case? Alread tried get(newcol) and [[newcol]].
Thank you so much for your help!
EDIT:
Solution looks like this
df <- read.csv("<FILE>", header=TRUE, sep=";")
column_names <- colnames(df[,-1]) # first column is actually row name
for(name in column_names) {
med <- median(df[[name]], na.rm = TRUE)
std <- sd(df[[name]], na.rm = TRUE)
max <- med + 3 * std
min <- med - 3 * std
newcol <- paste(name, "outlier", sep="_")
df[[newcol]] <- with(df, ifelse(df[[name]] < max & df[[name]] > min,"FALSE","TRUE"))
}
Your last line should read:
df[[newcol]] <- with(df, ifelse(...))
The <- operator assumes that newcol is the actual name of the column, not a variable that contains this name.
This is an approach using data.table
require(data.table)
outlier <- function(x) {
med <- median(x, na.rm = TRUE)
std <- sd(x, na.rm = TRUE)
max <- med + 3 * std
min <- med - 3 * std
return(!(x < max & x > min))
}
# df <- fread("<FILE>")
df <- data.table(x = rt(10, 5), y = rt(10, 5))
df[3, x := 100]
df[7, y := 100]
df[, paste(names(df), "outlier", sep="_") := lapply(.SD, outlier)]
df
You could assign everything at once:
is_outlier <- function(x) {
med <- median(x, na.rm = TRUE)
std <- sd(x, na.rm = TRUE)
max <- med + 3 * std
min <- med - 3 * std
!(x < max & x > min)
}
column_names <- names(df)[-1]
column_names_outlier <- paste(column_names, "outlier", sep="_")
df[column_names_outlier] <- lapply(df[column_names], is_outlier)

How do I sub sample data by group efficiently?

I do have a similar problem that is explained in this question. Similar to that question I have a data frame that has 3 columns (id, group, value). I want to take n samples with replacement from each group and produce a smaller data frame with n samples from each group.
However, I am doing hundreds of subsamples in a simulation code and the solution based on ddply is very slow to be used in my code. I tried to rewrite a simple code to see if I can get a better performance but it is still slow (not better than the ddply solution if not worse). Below is my code. I am wondering if it can be improved for performance
#Producing example DataFrame
dfsize <- 10
groupsize <- 7
test.frame.1 <- data.frame(id = 1:dfsize, group = rep(1:groupsize,each = ceiling(dfsize/groupsize))[1:dfsize], junkdata = sample(1:10000, size =dfsize))
#Main function for subsampling
sample.from.group<- function(df, dfgroup, size, replace){
outputsize <- 1
newdf <-df # assuming a sample cannot be larger than the original
uniquegroups <- unique(dfgroup)
for (uniquegroup in uniquegroups){
dataforgroup <- which(dfgroup==uniquegroup)
mysubsample <- df[sample(dataforgroup, size, replace),]
sizeofsample <- nrow(mysubsample)
newdf[outputsize:(outputsize+sizeofsample-1), ] <- mysubsample
outputsize <- outputsize + sizeofsample
}
return(newdf[1:(outputsize-1),])
}
#Using the function
sample.from.group(test.frame.1, test.frame.1$group, 100, replace = TRUE)
Here's two plyr based solutions:
library(plyr)
dfsize <- 1e4
groupsize <- 7
testdf <- data.frame(
id = seq_len(dfsize),
group = rep(1:groupsize, length = dfsize),
junkdata = sample(1:10000, size = dfsize))
sample_by_group_1 <- function(df, dfgroup, size, replace) {
ddply(df, dfgroup, function(x) {
x[sample(nrow(df), size = size, replace = replace), , drop = FALSE]
})
}
sample_by_group_2 <- function(df, dfgroup, size, replace) {
idx <- split_indices(df[[dfgroup]])
subs <- lapply(idx, sample, size = size, replace = replace)
df[unlist(subs, use.names = FALSE), , drop = FALSE]
}
library(microbenchmark)
microbenchmark(
ddply = sample_by_group_1(testdf, "group", 100, replace = TRUE),
plyr = sample_by_group_2(testdf, "group", 100, replace = TRUE)
)
# Unit: microseconds
# expr min lq median uq max neval
# ddply 4488 4723 5059 5360 36606 100
# plyr 443 487 507 536 31343 100
The second approach is much faster because it does the subsetting in a single step - if you can figure out how to do it in one step, it's usually any easy way to get better performance.
I think this is cleaner and possibly faster:
z <- sapply(unique(test.frame.1$group), FUN= function(x){
sample(which(test.frame.1$group==x), 100, TRUE)
})
out <- test.frame.1[z,]
out

Resources