R changing Data Frame values based on a secondary Data Frame - r

I'm looking for a more efficient way to do some replacements/lookups.
My current method is using paste0 to create a lookup value, and then matching on that to filter.
Given x,
x <- data.frame(var1 = c("AA","BB","CC","DD"),
var2 = c("--","AA","AA","--"),
val1 = c(1,2,1,4),
val2 = c(5,5,7,8))
var1 var2 val1 val2
1 AA -- 1 5
2 BB AA 2 5
3 CC AA 1 7
4 DD -- 4 8
var1 is the primary column and var2 is the secondary column. val1 and val2 are value columns.
If var2 is a value in var1 and the values match, we want to replace the stated val with NA - and we want to do this independently for the value columns.
The way that I've come up with uses a lookup that loops over the columns and creates a lookup value essentially.
lookup.df <- x %>% filter(var2 == "--")
x[,c("val1","val2")] <- lapply(c("val1","val2"), function(column) {
var2.lookup <- paste0(x$var2,x[[column]])
var1.lookup <- paste0(lookup.df$var1,lookup.df[[column]])
x[[column]][var2.lookup %in% var1.lookup] <- NA
return(x[[column]])
})
which does return what I would expect.
> x
var1 var2 val1 val2
1 AA -- 1 5
2 BB AA 2 NA
3 CC AA NA 7
4 DD -- 4 8
However, in practice, when profiling the code, the majority of the time is spent in the paste - and it just doesn't feel like the most efficient way to do this.
My real data set is millions of rows and about 25 columns, and runs in around 60 seconds. I'd think there'd be a way to do a logical matrix replacement instead of accessing each column individually. I can't figure it out though.
Any help is greatly appreciated. Thanks!
Edit -- benchmarks
na.replace.orig <- function(x) {
lookup.df <- x %>% filter(var2 == "--")
x[,c("val1","val2")] <- lapply(c("val1","val2"), function(column) {
var2.lookup <- paste0(x$var2,x[[column]])
var1.lookup <- paste0(lookup.df$var1,lookup.df[[column]])
x[[column]][var2.lookup %in% var1.lookup] <- NA
return(x[[column]])
})
return(x)
}
# pulled out the lookup table since it causes a lot of overhead
na.replace.orig.no.lookup <- function(x) {
x[,c("val1","val2")] <- lapply(c("val1","val2"), function(column) {
var2.lookup <- paste0(x$var2,x[[column]])
var1.lookup <- paste0(lookup.df$var1,lookup.df[[column]])
x[[column]][var2.lookup %in% var1.lookup] <- NA
return(x[[column]])
})
return(x)
}
na.replace.1 <- function(x) {
inx <- match(x$var2, x$var1)
jnx <- which(!is.na(inx))
inx <- inx[!is.na(inx)]
knx <- grep("^val", names(x))
for(i in seq_along(inx))
for(k in knx)
if(x[[k]][inx[i]] == x[[k]][jnx[i]]) x[[k]][jnx[i]] <- NA
return(x)
}
na.replace.2 <- function(x) {
for(col in c("val1","val2")) {
x[x[,'var2'] %in% x[,'var1'] & x[,col] %in% lookup.df[,col] , col] <- NA
}
return(x)
}
> microbenchmark::microbenchmark(na.replace.orig(x), na.replace.orig.no.lookup(x), na.replace.1(x), na.replace.2(x), times = 10)
Unit: microseconds
expr min lq mean median uq max neval
na.replace.orig(x) 1267.23 1274.2 1441.9 1408.8 1609.8 1762.8 10
na.replace.orig.no.lookup(x) 217.43 228.9 270.9 239.2 296.6 394.2 10
na.replace.1(x) 98.46 106.3 133.0 123.9 136.6 239.2 10
na.replace.2(x) 117.74 147.7 162.9 166.6 183.0 189.9 10
Edit - 3rd Variable Required
I realized that I have a 3rd variable I need to check against.
x <- data.frame(var1 = c("AA","BB","CC","DD"),
var2 = c("--","AA","AA","--"),
var3 = c("Y","Y","N","N"),
val1 = c(1,2,1,4),
val2 = c(5,5,7,8))
var1 var2 var3 val1 val2
1 AA -- Y 1 5
2 BB AA Y 2 5
3 CC AA N 1 7
4 DD -- N 4 8
with the expected result
var1 var2 var3 val1 val2
1 AA -- Y 1 5
2 BB AA Y 2 NA
3 CC AA N 1 7
4 DD -- N 4 8
My code still works for this case.
x[,c("val1","val2")] <- lapply(c("val1","val2"), function(column) {
var2.lookup <- paste0(x$var2, x$var3, x[[column]])
var1.lookup <- paste0(lookup.df$var1, x$var3, lookup.df[[column]])
x[[column]][var2.lookup %in% var1.lookup] <- NA
return(x[[column]])
})

The following solution uses only vectorized logic. It uses the lookup table you already made. I think it'll be even faster than Rui's solution
library(dplyr)
x <- data.frame(var1 = c("AA","BB","CC","DD"),
var2 = c("--","AA","AA","--"),
val1 = c(1,2,1,4),
val2 = c(5,5,7,8))
lookup.df <- x[ x[,'var2'] == "--", ]
x[x[,'var2'] %in% x[,'var1'] & x[,'val1'] %in% lookup.df[,'val1'] , 'val1'] <- NA
x[x[,'var2'] %in% x[,'var1'] & x[,'val2'] %in% lookup.df[,'val2'] , 'val2'] <- NA
x
#> var1 var2 val1 val2
#> 1 AA -- 1 5
#> 2 BB AA 2 NA
#> 3 CC AA NA 7
#> 4 DD -- 4 8
EDIT:
It might be or it might not be.
set.seed(4)
microbenchmark::microbenchmark(na.replace.orig(x), na.replace.1(x), na.replace.2(x), times = 50)
#> Unit: microseconds
#> expr min lq mean median uq max
#> na.replace.orig(x) 184.348 192.410 348.4430 202.1615 223.375 6206.546
#> na.replace.1(x) 68.127 86.621 281.3503 89.8715 93.381 9693.029
#> na.replace.2(x) 95.885 105.858 210.7638 113.2060 118.668 4993.849
#> neval
#> 50
#> 50
#> 50
OP, you'll need to test it on your dataset to see how the two scale differently at larger-sized dataframes.
Edit 2: Implemented Rui's suggestion for the lookup table. In order from slowest to fastest benchmark:
lookup.df <- x %>% filter(var2 == "--")
lookup.df <- filter(x, var2 == "--")
lookup.df <- x[x[,'var2'] == "--", ]

I find the following solution a bit confusing (and I came up with it!) but it works.
And contrary to the popular belief, for loops are not much slower than the *apply family.
inx <- match(x$var2, x$var1)
jnx <- which(!is.na(inx))
inx <- inx[!is.na(inx)]
knx <- grep("^val", names(x))
for(i in seq_along(inx))
for(k in knx)
if(x[[k]][inx[i]] == x[[k]][jnx[i]]) x[[k]][jnx[i]] <- NA
x
# var1 var2 val1 val2
#1 AA -- 1 5
#2 BB AA 2 NA
#3 CC AA NA 7
#4 DD -- 4 8

Related

Filtering ids when they have the same value across a column in r [duplicate]

I have a data frame like below
sample <- data.frame(ID = 1:9,
Group = c('AA','AA','AA','BB','BB','CC','CC','BB','CC'),
Value = c(1,1,1,2,2,2,3,2,3))
ID Group Value
1 AA 1
2 AA 1
3 AA 1
4 BB 2
5 BB 2
6 CC 2
7 CC 3
8 BB 2
9 CC 3
I want to select groups according to the number of distinct (unique) values within each group. For example, select groups where all values within the group are the same (one distinct value per group). If you look at the group CC, it has more than one distinct value (2 and 3) and should thus be removed. The other groups, with only one distinct value, should be kept. Desired output:
ID Group Value
1 AA 1
2 AA 1
3 AA 1
4 BB 2
5 BB 2
8 BB 2
Would you tell me simple and fast code in R that solves the problem?
Here's a solution using dplyr:
library(dplyr)
sample <- data.frame(
ID = 1:9,
Group= c('AA', 'AA', 'AA', 'BB', 'BB', 'CC', 'CC', 'BB', 'CC'),
Value = c(1, 1, 1, 2, 2, 2, 3, 2, 3)
)
sample %>%
group_by(Group) %>%
filter(n_distinct(Value) == 1)
We group the data by Group, and then only select groups where the number of distinct values of Value is 1.
data.table version:
library(data.table)
sample <- as.data.table(sample)
sample[ , if(uniqueN(Value) == 1) .SD, by = Group]
# Group ID Value
#1: AA 1 1
#2: AA 2 1
#3: AA 3 1
#4: BB 4 2
#5: BB 5 2
#6: BB 8 2
An alternative using ave if the data is numeric, is to check if the variance is 0:
sample[with(sample, ave(Value, Group, FUN=var ))==0,]
An alternative solution that could be faster on large data is:
setkey(sample, Group, Value)
ans <- sample[unique(sample)[, .N, by=Group][N==1, Group]]
The point is that calculating unique values for each group could be time consuming when there are more groups. Instead, we can set the key on the data.table, then take unique values by key (which is extremely fast) and then count the total values for each group. We then require only those where it is 1. We can then perform a join (which is once again very fast). Here's a benchmark on large data:
require(data.table)
set.seed(1L)
sample <- data.table(ID=1:1e7,
Group = sample(rep(paste0("id", 1:1e5), each=100)),
Value = sample(2, 1e7, replace=TRUE, prob=c(0.9, 0.1)))
system.time (
ans1 <- sample[,if(length(unique(Value))==1) .SD ,by=Group]
)
# minimum of three runs
# user system elapsed
# 14.328 0.066 14.382
system.time ({
setkey(sample, Group, Value)
ans2 <- sample[unique(sample)[, .N, by=Group][N==1, Group]]
})
# minimum of three runs
# user system elapsed
# 5.661 0.219 5.877
setkey(ans1, Group, ID)
setkey(ans2, Group, ID)
identical(ans1, ans2) # [1] TRUE
You can make a selector for sample using ave many different ways.
sample[ ave( sample$Value, sample$Group, FUN = function(x) length(unique(x)) ) == 1,]
or
sample[ ave( sample$Value, sample$Group, FUN = function(x) sum(x - x[1]) ) == 0,]
or
sample[ ave( sample$Value, sample$Group, FUN = function(x) diff(range(x)) ) == 0,]
Here's an approach
> ind <- aggregate(Value~Group, FUN=function(x) length(unique(x))==1, data=sample)[,2]
> sample[sample[,"Group"] %in% levels(sample[,"Group"])[ind], ]
ID Group Value
1 1 AA 1
2 2 AA 1
3 3 AA 1
4 4 BB 2
5 5 BB 2
8 8 BB 2

Removing groups from dataframe if variable has repeated values

I would like to ask if there is a way of removing a group from dataframe using dplyr (or anz other way in that matter) in the following way. Lets say I have a dataframe in the following form grouped by variable 1:
Variable 1 Variable 2
1 a
1 b
2 a
2 a
2 b
3 a
3 c
3 a
... ...
I would like to remove only groups that have in Variable 2 two consecutive same values. That is in table above it would remove group 2 because there are values a,a,b but not group c where is a,c,a. So I would get the table bellow?
Variable 1 Variable 2
1 a
1 b
3 a
3 c
3 a
... ...
To test for consecutive identical values, you can compare a value to the previous value in that column. In dplyr, this is possible with lag. (You could do the same thing with comparing to the next value, using lead. Result comes out the same.)
Group the data by variable1, get the lag of variable2, then add up how many of these duplicates there are in that group. Then filter for just the groups with no duplicates. After that, feel free to remove the dupesInGroup column.
library(tidyverse)
df %>%
group_by(variable1) %>%
mutate(dupesInGroup = sum(variable2 == lag(variable2), na.rm = T)) %>%
filter(dupesInGroup == 0)
#> # A tibble: 5 x 3
#> # Groups: variable1 [2]
#> variable1 variable2 dupesInGroup
#> <int> <chr> <int>
#> 1 1 a 0
#> 2 1 b 0
#> 3 3 a 0
#> 4 3 c 0
#> 5 3 a 0
Created on 2018-05-10 by the reprex package (v0.2.0).
prepare data frame:
df <- data.frame("Variable 1" = c(1, 1, 2, 2, 2, 3, 3, 3), "Variable 2" = unlist(strsplit("abaabaca", "")))
write functions to test if consecutive repetitions are there or not:
any.consecutive.p <- function(v) {
for (i in 1:(length(v) - 1)) {
if (v[i] == v[i + 1]) {
return(TRUE)
}
}
return(FALSE)
}
any.consecutive.in.col.p <- function(df, col) {
any.consecutive.p(df[, col])
}
any.consecutive.p returns TRUE if it finds first consecutive repetition in a vector (v).
any.consecutive.in.col.p() looks for consecutive repetitions in a column of a data frame.
split data frame by values of Variable.1
df.l <- split(df, df$Variable.1)
df.l
$`1`
Variable.1 Variable.2
1 1 a
2 1 b
$`2`
Variable.1 Variable.2
3 2 a
4 2 a
5 2 b
$`3`
Variable.1 Variable.2
6 3 a
7 3 c
8 3 a
Finally go over this data.frame list and test for each data frame, if it contains consecutive duplicates in Variable.2 column.
If found, don't collect it.
Bind the collected data frames by rows.
Reduce(rbind, lapply(df.l, function(df) if(!any.consecutive.in.col.p(df, "Variable.2")) {df}))
Variable.1 Variable.2
1 1 a
2 1 b
6 3 a
7 3 c
8 3 a
Say you want to remove all groups of df, grouped by a, where the column b has repeated values. You can do that as below.
set.seed(0)
df <- data.frame(a = rep(1:3, rep(3, 3)), b = sample(1:5, 9, T))
# dplyr
library(dplyr)
df %>%
group_by(a) %>%
filter(all(b != lag(b), na.rm = T))
#data.table
library(data.table)
setDT(df)
df[, if(all(b != shift(b), na.rm = T)) .SD, by = a]
Benchmark shows data.table is faster
#Results
# Unit: milliseconds
# expr min lq mean median uq max neval
# use_dplyr() 141.46819 165.03761 201.0975 179.48334 205.82301 539.5643 100
# use_DT() 36.27936 50.23011 64.9218 53.87114 66.73943 345.2863 100
# Method
set.seed(0)
df <- data.table(a = rep(1:2000, rep(1e3, 2000)), b = sample(1:1e3, 2e6, T))
use_dplyr <- function(x){
df %>%
group_by(a) %>%
filter(all(b != lag(b), na.rm = T))
}
use_DT <- function(x){
df[, if (all(b != shift(b), na.rm = T)) .SD, a]
}
microbenchmark(use_dplyr(), use_DT())

Two-way contingency table in R

I have a dataframe and I want to output a two-way contingency table from two of the columns. They both have values "Too Little", "About Right" or "Too Much".
I'm typing
df %>%
filter(!is.na(col1)) %>%
group_by(col1) %>%
summarise(count = n())
for both of them separately and get something like this:
col1 count
<fctr> <int>
Too Little 19259
About Right 9539
Too Much 2816
What I would like to achieve is this:
Too Little About Right Too Much Total
col1 19259 9539 2816 31614
col2 20619 9374 2262 32255
Total 39878 18913 5078 63869
I've been trying to use table function
addmargins(table(df$col1, df$col2))
But the result is not what I want
Too Little About Right Too Much Sum
Too Little 13770 4424 740 18934
About Right 4901 3706 700 9307
Too Much 1250 800 679 2729
Sum 19921 8930 2119 30970
I'd give tabulate a try, which is the foundation for table (see ?tabulate). For example given
set.seed(123)
vals <- LETTERS[1:3]
df <- as.data.frame(replicate(3, sample(vals, 5, T)))
df <- data.frame(lapply(df, "levels<-", vals))
then you could do
m <- t(sapply(df, tabulate, nbins = length(vals)))
colnames(m) <- vals
addmargins(m)
# A B C Sum
# V1 1 1 3 5
# V2 1 3 1 5
# V3 1 2 2 5
# Sum 3 6 6 15
Or (via #thelatemail) just
addmargins(t(sapply(df, table)))
# A B C Sum
# V1 1 1 3 5
# V2 1 3 1 5
# V3 1 2 2 5
# Sum 3 6 6 15
We can use table in a loop then rbind:
# Using dummy data from #lukeA's answer
addmargins(do.call(rbind, lapply(df1, table)))
# A B C Sum
# V1 1 1 3 5
# V2 1 3 1 5
# V3 1 2 2 5
# Sum 3 6 6 15
Benchmarking
# bigger data
set.seed(123)
vals <- LETTERS[1:20]
df1 <- as.data.frame(replicate(20, sample(vals, 100000, T)))
df1 <- data.frame(lapply(df1, "levels<-", vals))
microbenchmark::microbenchmark(
lukeA = {
m1 <- t(sapply(df1, tabulate, nbins = length(vals)))
colnames(m1) <- vals
m1 <- addmargins(m1)
},
# as vals only used for luke's solution, keep it in.
lukeA_1 = {
vals <- LETTERS[1:20]
m2 <- t(sapply(df1, tabulate, nbins = length(vals)))
colnames(m2) <- vals
m2 <- addmargins(m2)
},
thelatemail = {m3 <- addmargins(t(sapply(df1, table)))},
zx8754 = {m4 <- addmargins(do.call(rbind, lapply(df1, table)))}
)
# Unit: milliseconds
# expr min lq mean median uq max neval
# lukeA 2.349969 2.371922 2.518447 2.473839 2.558653 3.363738 100
# lukeA_1 2.351680 2.377196 2.523473 2.473839 2.542831 3.459242 100
# thelatemail 38.316506 42.054136 43.785777 42.674912 44.234193 90.287809 100
# zx8754 38.695101 41.979728 44.933602 42.762006 44.244314 110.834292 100

Counting amount of zeros within a "melted" data frame

Hei, I learn R and I try to count how many zeros I have within the melted data. So, I want to know how many zeros corresponds to column a and b and print two results out.
I generated an example:
library(reshape)
library(plyr)
library(dplyr)
id = c(1,2,3,4,5,6,7,8,9,10)
b = c(0,0,5,6,3,7,2,8,1,8)
c = c(0,4,9,87,0,87,0,4,5,0)
test = data.frame(id,b,c)
test_melt = melt(test, id.vars = "id")
test_melt
I imagine for that I should create an if statement. Something with
if (test$value == 0){print()}, but how can I tell R to count zeros for a columns that have been melted?
With your data:
test_melt %>%
group_by(variable) %>%
summarize(zeroes = sum(value == 0))
# # A tibble: 2 x 2
# variable zeroes
# <fctr> <int>
# 1 b 2
# 2 c 4
Base R:
aggregate(test_melt$value, by = list(variable = test_melt$variable),
FUN = function(x) sum(x == 0))
# variable x
# 1 b 2
# 2 c 4
... and for curiosity:
library(microbenchmark)
microbenchmark(
dplyr = group_by(test_melt, variable) %>% summarize(zeroes = sum(value == 0)),
base1 = aggregate(test_melt$value, by = list(variable = test_melt$variable), FUN = function(x) sum(x == 0)),
# #PankajKaundal's suggested "formula" notation reads easier
base2 = aggregate(value ~ variable, test_melt, function(x) sum(x == 0))
)
# Unit: microseconds
# expr min lq mean median uq max neval
# dplyr 916.421 986.985 1069.7000 1022.1760 1094.7460 2272.636 100
# base1 647.658 682.302 783.2065 715.3045 765.9940 1905.411 100
# base2 813.219 867.737 950.3247 897.0930 959.8175 2017.001 100
sum(test_melt$value==0)
This should do it.
This might help . Is this what you're looking for ?
> test_melt[4] <- 1
> test_melt2 <- aggregate(V4 ~ value + variable, test_melt, sum)
> test_melt2
value variable V4
1 0 b 2
2 1 b 1
3 2 b 1
4 3 b 1
5 5 b 1
6 6 b 1
7 7 b 1
8 8 b 2
9 0 c 4
10 4 c 2
11 5 c 1
12 9 c 1
13 87 c 2
V4 is the count

Select groups based on number of unique / distinct values

I have a data frame like below
sample <- data.frame(ID = 1:9,
Group = c('AA','AA','AA','BB','BB','CC','CC','BB','CC'),
Value = c(1,1,1,2,2,2,3,2,3))
ID Group Value
1 AA 1
2 AA 1
3 AA 1
4 BB 2
5 BB 2
6 CC 2
7 CC 3
8 BB 2
9 CC 3
I want to select groups according to the number of distinct (unique) values within each group. For example, select groups where all values within the group are the same (one distinct value per group). If you look at the group CC, it has more than one distinct value (2 and 3) and should thus be removed. The other groups, with only one distinct value, should be kept. Desired output:
ID Group Value
1 AA 1
2 AA 1
3 AA 1
4 BB 2
5 BB 2
8 BB 2
Would you tell me simple and fast code in R that solves the problem?
Here's a solution using dplyr:
library(dplyr)
sample <- data.frame(
ID = 1:9,
Group= c('AA', 'AA', 'AA', 'BB', 'BB', 'CC', 'CC', 'BB', 'CC'),
Value = c(1, 1, 1, 2, 2, 2, 3, 2, 3)
)
sample %>%
group_by(Group) %>%
filter(n_distinct(Value) == 1)
We group the data by Group, and then only select groups where the number of distinct values of Value is 1.
data.table version:
library(data.table)
sample <- as.data.table(sample)
sample[ , if(uniqueN(Value) == 1) .SD, by = Group]
# Group ID Value
#1: AA 1 1
#2: AA 2 1
#3: AA 3 1
#4: BB 4 2
#5: BB 5 2
#6: BB 8 2
An alternative using ave if the data is numeric, is to check if the variance is 0:
sample[with(sample, ave(Value, Group, FUN=var ))==0,]
An alternative solution that could be faster on large data is:
setkey(sample, Group, Value)
ans <- sample[unique(sample)[, .N, by=Group][N==1, Group]]
The point is that calculating unique values for each group could be time consuming when there are more groups. Instead, we can set the key on the data.table, then take unique values by key (which is extremely fast) and then count the total values for each group. We then require only those where it is 1. We can then perform a join (which is once again very fast). Here's a benchmark on large data:
require(data.table)
set.seed(1L)
sample <- data.table(ID=1:1e7,
Group = sample(rep(paste0("id", 1:1e5), each=100)),
Value = sample(2, 1e7, replace=TRUE, prob=c(0.9, 0.1)))
system.time (
ans1 <- sample[,if(length(unique(Value))==1) .SD ,by=Group]
)
# minimum of three runs
# user system elapsed
# 14.328 0.066 14.382
system.time ({
setkey(sample, Group, Value)
ans2 <- sample[unique(sample)[, .N, by=Group][N==1, Group]]
})
# minimum of three runs
# user system elapsed
# 5.661 0.219 5.877
setkey(ans1, Group, ID)
setkey(ans2, Group, ID)
identical(ans1, ans2) # [1] TRUE
You can make a selector for sample using ave many different ways.
sample[ ave( sample$Value, sample$Group, FUN = function(x) length(unique(x)) ) == 1,]
or
sample[ ave( sample$Value, sample$Group, FUN = function(x) sum(x - x[1]) ) == 0,]
or
sample[ ave( sample$Value, sample$Group, FUN = function(x) diff(range(x)) ) == 0,]
Here's an approach
> ind <- aggregate(Value~Group, FUN=function(x) length(unique(x))==1, data=sample)[,2]
> sample[sample[,"Group"] %in% levels(sample[,"Group"])[ind], ]
ID Group Value
1 1 AA 1
2 2 AA 1
3 3 AA 1
4 4 BB 2
5 5 BB 2
8 8 BB 2

Resources