R parallel execution - r

I have a dataframe containing 5 columns
COL1 | COL2 | COL 3 | COL 4 | COL 5
I need to aggregate on COL1 and apply 4 different function on COL2 to COL5 columns
a1<-aggregate( COL2 ~ COL1, data = dataframe, sum)
a2<-aggregate( COL3 ~ COL1, data = dataframe, length)
a3<-aggregate( COL4 ~ COL1, data = dataframe, max)
a4<-aggregate( COL5 ~ COL1, data = dataframe, min)
finalDF<- Reduce(function(x, y) merge(x, y, all=TRUE), list(a1,a2,a3,a4))
1)I have 24 cores on the machine.
How can I execute above 4 lines of code (a1,a2,a3,a4) in parallel?
I want to use 4 cores simultaneously and then use Reduce to compute finalDF
2) Can I use different function on different column in one aggregate
(I can use one fun on multiple column and I can also use multiple function on one column in aggregate but I was unable to apply multiple functions on different columns
[COL2-sum,COL3-length,COL4-max,COL5-min])

This is an example of how you might do it with dplyr as suggested by #Roland
set.seed(2)
df <- data.frame(COL1 = sample(LETTERS, 1e6, replace=T),
COL2 = rnorm(1e6),
COL3 = runif(1e6, 100, 1000),
COL4 = rnorm(1e6, 25, 100),
COL5 = runif(1e6, -100, 10))
#> head(df)
# COL1 COL2 COL3 COL4 COL5
#1 E 1.0579823 586.2360 -3.157057 -14.462318
#2 S 0.1238110 872.3868 129.579090 9.525772
#3 O 0.4902512 498.0537 93.063487 1.910506
#4 E 1.7215843 200.7077 126.716256 -5.865204
#5 Y 0.6515853 275.3369 12.554218 -26.301225
#6 Y 0.7959678 134.4977 54.789415 -33.145334
require(dplyr)
df <- df %.%
group_by(COL1) %.%
summarize(a1 = sum(COL2),
a2 = length(COL3),
a3 = max(COL4),
a4 = min(COL5)) #add as many calculations as you like
On my machine this took 0.064 seconds.
#> head(df)
#Source: local data frame [6 x 5]
#
# COL1 a1 a2 a3 a4
#1 A -0.9068368 38378 403.4208 -99.99943
#2 B 6.0557452 38551 419.0970 -99.99449
#3 C 108.5680251 38673 491.8061 -99.99382
#4 D -34.1217133 38469 481.0626 -99.99697
#5 E -68.2998926 38168 452.8280 -99.99602
#6 F -185.9059338 38159 417.2271 -99.99995

Related

Simple and efficient way to subset a data frame using values and names in a vector

Given a dataset (let say stored as data frame) in the form:
> n <- 10
> set.seed(123)
> ds.df <- data.frame(col1 = round(rnorm(n,2,4), digit = 1),
col2 = sample.int(2, n, replace = TRUE),
col3 = sample.int(n*10, n),
col4 = sample(letters, n, replace = TRUE))
is there a simple and efficient way to subset it, using a vector of value that defines multiple equalities the subset should respect? Something like:
> subset_v <- c(col1 = -0.2, col4 = "i")
> ds.subset <- subset(ds.df, subset_v)
> ds.subset
col1 col2 col3 col4
1 -0.2 1 9 i
where the function subset(ds.df,subset_v) should return the subset that respect:
ds.df[ ds.df$col1 == subset_v["col1"] & ds.df$col2 == subset_v["col2"] & ds.df$col4 == subset_v["col4"], ]
But this last expression isn't very handy and I would like to be able to have any column without knowing them into advance.
I did something that works:
subset <- function(ds.df,subset_v){
sub = rep(TRUE, nrow(ds.df))
for(cn in names(subset_v)){
sub=sub & (ds.df[,cn] == subset_v[[cn]])
}
ds.df[sub,]
}
But I feel like there is a much better and more efficient way to do it (maybe removing the for loop somehow).
Personally, I wonder whether it is a good idea to use a named vector to subset a dataframe, since it can only be used for equality =, while larger than and smaller than cannot be expressed this way. I would recommend using a quoted expression instead of a named vector (see approach below).
However, I figured out a tidyverse way to write a function with said functionality:
library(tidyverse)
set.seed(123)
n <- 10
ds.df <- data.frame(col1 = round(rnorm(n,2,4), digit=1),
col2 = sample.int(2, n, replace=T),
col3 = sample.int(n*10, n),
col4 = sample(letters, n, replace=T))
new_filter <- function (data, expr) {
exprs_ls <- purrr::imap(expr, ~ rlang::exprs(!! rlang::sym(.y) == !!.x))
filter(data, !!! unname(unlist(exprs_ls)))
}
new_filter(ds.df, c(col1 = -0.2, col4 = "i"))
#> col1 col2 col3 col4
#> 1 -0.2 1 9 i
Created on 2020-06-17 by the reprex package (v0.3.0)
Below is my alternative approach.
In base R you can use quote to quote the subset expression (instead of creating a vector) and then you can use eval to evaluate it inside subset.
n <- 10
ds.df=data.frame(col1=round(rnorm(n,2,4),digit=1),
col2=sample.int(2,n,replace=T),
col3=sample.int(n*10,n),
col4=sample(letters,n,replace=T))
subset_v = quote(col1 > 2 & col3 > 40)
subset(ds.df, eval(subset_v))
#> col1 col2 col3 col4
#> 1 6.6 1 93 m
#> 2 7.0 2 62 j
#> 4 3.9 1 94 t
#> 7 4.5 1 46 r
#> 8 2.8 2 98 h
#> 10 4.9 1 78 p
Created on 2020-06-17 by the reprex package (v0.3.0)
Same approach but using dplyr filter
library(dplyr)
n <- 10
ds.df = data.frame(col1 = round(rnorm(n,2,4), digit=1),
col2 = sample.int(2, n, replace=T),
col3 = sample.int(n*10, n),
col4 = sample(letters, n, replace=T))
filter_v = expr(col1 > 2 & col3 > 40)
filter(ds.df, !! filter_v)
#> col1 col2 col3 col4
#> 1 3.3 1 70 a
#> 2 2.5 2 82 q
#> 3 3.6 1 51 z
Created on 2020-06-17 by the reprex package (v0.3.0)
In data.table you could do this:
setDT(ds.df)
subset_v = list(col1=-3.3, col2=1, col4="e")
ds.df[as.list(subset_v), on = names(subset_v)]
# col1 col2 col3 col4
# 1: -3.3 1 29 e
Reproducible data:
set.seed(20)
n <- 10
ds.df <- data.frame(
col1 = round(rnorm(n, 2, 4), digit = 1),
col2 = sample.int(2, n, replace = TRUE),
col3 = sample.int(n*10, n),
col4 = sample(letters, n, replace = TRUE)
)
I think you're looking for merge:
subset <- function(ds.df,subset_v){
filter = data.frame(as.list(subset_v))
merge(ds.df,filter,by=names(filter),all=F)
}
This works on both data.frame and data.table, and with data.table, should be the same underneath as #sindri_baldur's answer, so, if you're already using data.table, the primary difference is whether you prefer typing merge(x,y,by=z,all=F) or x[y,on=z].

resample each two columns together in a data frame in R

I have a very large data frame that contains 100 rows and 400000 columns.
To sample each column, I can simply do:
df <- apply(df, 2, sample)
But I want every two column to be sampled together. For example, if originally col1 is c(1,2,3,4,5) and col2 is also c(6,7,8,9,10), and after resampling, col1 becomes c(1,3,2,4,5), I want col2 to be c(6,8,7,9,10) that follows the resampling pattern of col1. Same thing for col3 & col4, col5 & col6, etc.
I wrote a for loop to do this, which takes forever. Is there a better way? Thanks!
You might try this; split the data frame every two columns with split.default, for each sub data frame, sample the rows and then bind them together:
df <- data.frame(col1 = 1:5, col2 = 6:10, col3 = 11:15)
index <- seq_len(nrow(df))
cbind.data.frame(
setNames(lapply(
split.default(df, (seq_along(df) - 1) %/% 2),
function(sdf) sdf[sample(index),,drop=F]),
NULL)
)
# col1 col2 col3
#5 5 10 12
#4 4 9 11
#1 1 6 15
#2 2 7 14
#3 3 8 13

How do you clear column elements from an R data frame based off of other another columns elements in the same data frame?

I have the following data frame
>data.frame
col1 col2
A
x B
C
D
y E
I need a new data frame that looks like:
>new.data.frame
col1 col2
A
x
C
D
y
I just need a method for reading from col1 and if there is ANY characters in Col1 then clear corresponding row value of col2. I was thinking about using an if statement and data.table for this but am unsure of how to relay the information for deleting col2's values based on ANY characters being present in col1.
Something like this works:
# Create data frame
dat <- data.frame(col1=c(NA,"x", NA, NA, "y"), col2=c("A", "B", "C", "D", "E"))
# Create new data frame
dat_new <- dat
dat_new$col2[!is.na(dat_new$col1)] <- NA
# Check that it worked
dat
dat_new
This depends on what you mean by 'remove'. Here I'm assuming a blank string "". However, the same principle will apply for NAs
## create data frame
df <- data.frame(col1 = c("", "x", "","", "y"),
col2 = LETTERS[1:5],
stringsAsFactors = FALSE)
df
# col1 col2
# 1 A
# 2 x B
# 3 C
# 4 D
# 5 y E
## subset by blank values in col1, and replace the values in col2
df[df$col1 != "",]$col2 <- ""
## or df$col2[df$col1 != ""] <- ""
df
# col1 col2
# 1 A
# 2 x
# 3 C
# 4 D
# 5 y
And as you mentioned data.table, the code for this would be
library(data.table)
setDT(df)
## filter by blank entries in col1, and update col2 by-reference (:=)
df[col1 != "", col2 := ""]
df
Using dplyr
library(dplyr)
df %>%
mutate(col2 = replace(col2, col1!="", ""))
# col1 col2
#1 A
#2 x
#3 C
#4 D
#5 y

Remove rows from a data frame that contain duplicate information across the columns

col1 <- c('A','B','C', 'D')
col2 <- c('B','A','C', 'C')
col3 <- c('B','C','C', 'A')
dat <- data.frame(cbind(col1, col2, col3))
dat
col1 col2 col3
1 A B B
2 B A C
3 C C C
4 D C A
I would like to remove rows 1 and 3 from dat as the letter B is present more than once in row 1 and the letter C is present more than once in row 3.
EDIT:
My actual data contains over 1 million rows and 14 columns, all of which contain character data. The solution that runs the fastest is preferred as I am using the dataframe in a live setting to make decisions, and the underlying data is changing every few minutes.
You could try this (but I'm sure there is a better way)
cols <- ncol(dat)
indx <- apply(dat, 1, function(x) length(unique(x)) == cols)
dat[indx, ]
# col1 col2 col3
# 2 B A C
# 4 D C A
Another way (if your columns are characters and if you don't have too many columns) is something like the following (which is vectorized)
indx <- with(dat, (col1 == col2) | (col1 == col3) | (col2 == col3))
dat[!indx, ]
# col1 col2 col3
# 2 B A C
# 4 D C A
You could do this in dplyr, if you don't mind specifying the columns:
library(dplyr)
dat %>%
rowwise() %>%
mutate(repeats = max(table(c(col1, col2, col3))) - 1) %>%
filter(repeats == 0) %>%
select(-repeats) # if you don't want that column to appear in results
Source: local data frame [2 x 3]
col1 col2 col3
1 B A C
2 D C A
Here is an alternative. I haven't tested on big dataset,
library(data.table) #devel version v1.9.5
dat[setDT(melt(as.matrix(dat)))[,uniqueN(value)==.N , Var1]$V1,]
# col1 col2 col3
#2 B A C
#4 D C A
Or use anyDuplicated
dat[!apply(dat, 1, anyDuplicated),]
# col1 col2 col3
#2 B A C
#4 D C A

Only Keep Certain Combinations of Predictors in a Dataframe

Imagine that I have a data frame like this:
> col1 <- rep(1:3,10)
> col2 <- rep(c("a","b"),15)
> col3 <- rnorm(30,10,2)
> sample_df <- data.frame(col1 = col1, col2 = col2, col3 = col3)
> head(sample_df)
col1 col2 col3
1 1 a 13.460322
2 2 b 3.404398
3 3 a 8.952066
4 1 b 11.148271
5 2 a 9.808366
6 3 b 9.832299
I only want to keep combinations of predictors which, together, have a col3 standard deviation below 2. I can find the combinations using ddply, but I don't know how to backtrack to the original DF and select the correct levels.
> sample_df_summ <- ddply(sample_df, .(col1, col2), summarize, sd = sd(col3), count = length(col3))
> head(sample_df_summ)
col1 col2 sd count
1 1 a 2.702328 5
2 1 b 1.032371 5
3 2 a 2.134151 5
4 2 b 3.348726 5
5 3 a 2.444884 5
6 3 b 1.409477 5
For clarity, in this example, I'd like the DF with col1 = 3, col2 = b and col1 = 1 and col 2 = b. How would I do this?
You can add a "keep" column that is TRUE only if the standard deviation is below 2. Then, you can use a left join (merge) to add the "keep" column to the initial dataframe. In the end, you just select with keep equal to TRUE.
# add the keep column
sample_df_summ$keep <- sample_df_summ$sd < 2
sample_df_summ$sd <- NULL
sample_df_summ$count <- NULL
# join and select the rows
sample_df_keep <- merge(sample_df, sample_df_summ, by = c("col1", "col2"), all.x = TRUE, all.y = FALSE)
sample_df_keep <- sample_df_keep[sample_df_keep$keep, ]
sample_df_keep$keep <- NULL
Using dplyr:
library(dplyr)
sample_df %>% group_by(col1, col2) %>% mutate(sd = sd(col3)) %>% filter(sd < 2)
You get:
#Source: local data frame [6 x 4]
#Groups: col1, col2
#
# col1 col2 col3 sd
#1 1 a 10.516437 1.4984853
#2 1 b 11.124843 0.8652206
#3 2 a 7.585740 1.8781241
#4 3 b 9.806124 1.6644076
#5 1 a 7.381209 1.4984853
#6 1 b 9.033093 0.8652206

Resources