Add relative complement of two data.table with rbind - r

I have a keyed data.table to which I would like to add rows from another table of the same key:
library(data.table)
key.cols <- c("ID", "Code")
set.seed(1)
DT1 = data.table(
ID = c("b","b","b","a","a","c"),
Code = LETTERS[seq(1,6)],
Number = runif(6)
);DT1
DT2 = data.table(
ID = c("a","a","c","b","b","b"),
Code = LETTERS[seq(4,9)],
Number = runif(6)
);DT2
I would like to only add to DT1 rows from DT2 of the keys that do not occur in DT1 i.e. rbind a relative complement:
https://en.wikipedia.org/wiki/Complement_(set_theory)#Relative_complement
I can try and use setops and just add the keys letting the non-keyed columns be filled NA and join them afterwards:
DT1 <- rbind(DT1, fsetdiff(DT2[,(key.cols), with=FALSE], DT1[,(key.cols), with=FALSE]), fill=TRUE)
DT1[DT2, Number:=ifelse(is.na(Number), i.Number, Number), on = key.cols];DT1
Is there a less cumbersome way to do it?

Slightly less cumbersome is:
rbind(DT1, DT2[!DT1, on = .(ID, Code)])
ID Code Number
1: b A 0.26550866
2: b B 0.37212390
3: b C 0.57285336
4: a D 0.90820779
5: a E 0.20168193
6: c F 0.89838968
7: b G 0.06178627
8: b H 0.20597457
9: b I 0.17655675
Perhaps more tractable would be to use unique():
unique(rbind(DT1, DT2), by = c("ID", "Code"))

Related

Merging a sum by reference with data.table

Let's say I have two data.table, dt_a and dt_b defined as below.
library(data.table)
set.seed(20201111L)
dt_a <- data.table(
foo = c("a", "b", "c")
)
dt_b <- data.table(
bar = sample(c("a", "b", "c"), 10L, replace=TRUE),
value = runif(10L)
)
dt_b[]
## bar value
## 1: c 0.4904536
## 2: c 0.9067509
## 3: b 0.1831664
## 4: c 0.0203943
## 5: c 0.8707686
## 6: a 0.4224133
## 7: a 0.6025349
## 8: b 0.4916672
## 9: a 0.4566726
## 10: b 0.8841110
I want to left join dt_b on dt_a by reference, summing over the multiple match. A way to do so would be to first create a summary of dt_b (thus solving the multiple match issue) and merge if afterwards.
dt_b_summary <- dt_b[, .(value=sum(value)), bar]
dt_a[dt_b_summary, value_good:=value, on=c(foo="bar")]
dt_a[]
## foo value_good
## 1: a 1.481621
## 2: b 1.558945
## 3: c 2.288367
However, this will allow memory to the object dt_b_summary, which is inefficient.
I would like to have the same result by directly joining on dt_b and summing multiple match. I'm looking for something like below, but that won't work.
dt_a[dt_b, value_bad:=sum(value), on=c(foo="bar")]
dt_a[]
## foo value_good value_bad
## 1: a 1.481621 5.328933
## 2: b 1.558945 5.328933
## 3: c 2.288367 5.328933
Anyone knows if there is something possible?
We can use .EACHI with by
library(data.table)
dt_b[dt_a, .(value = sum(value)), on = .(bar = foo), by = .EACHI]
# bar value
#1: a 1.481621
#2: b 1.558945
#3: c 2.288367
If we want to update the original object 'dt_a'
dt_a[, value := dt_b[.SD, sum(value), on = .(bar = foo), by = .EACHI]$V1]
dt_a
# foo value
#1: a 1.481621
#2: b 1.558945
#3: c 2.288367
For multiple columns
dt_b$value1 <- dt_b$value
nm1 <- c('value', 'value1')
dt_a[, (nm1) := dt_b[.SD, lapply(.SD, sum),
on = .(bar = foo), by = .EACHI][, .SD, .SDcols = nm1]]

what is happening when using data.table with tail command

Create the data.table DT
set.seed(1L)
DT <- data.table(A = rep(letters[2:1], each = 4L),
B = rep(1:4, each = 2L),
C = sample(8))
Create the new data.table, DT2
DT2 <-DT[, .(C = cumsum(C)), by =.(A,B)]
Select from DT2 the last two values from C while you group by A
DT2[, .(C = tail(C,2)), by = A]
A C
1: b 4
2: b 9
3: a 2
4: a 8
Can anyone explain what is happening in the last command ? I am selecting 2 values using tail(C,2) but it is giving me 4 values ?

Data table operations with multiple group by variable sets

I have a data.table that I would like to perform group-by operations on, but would like to retain the null variables and use different group-by variable sets.
A toy example:
library(data.table)
set.seed(1)
DT <- data.table(
id = sample(c("US", "Other"), 25, replace = TRUE),
loc = sample(LETTERS[1:5], 25, replace = TRUE),
index = runif(25)
)
I would like to find the sum of index by all combinations of the key variables (including the null set). The concept is analogous to "grouping sets" in Oracle SQL, here is an example of my current workaround:
rbind(
DT[, list(id = "", loc = "", sindex = sum(index)), by = NULL],
DT[, list(loc = "", sindex = sum(index)), by = "id"],
DT[, list(id = "", sindex = sum(index)), by = "loc"],
DT[, list(sindex = sum(index)), by = c("id", "loc")]
)[order(id, loc)]
id loc sindex
1: 11.54218399
2: A 2.82172063
3: B 0.98639578
4: C 2.89149433
5: D 3.93292900
6: E 0.90964424
7: Other 6.19514146
8: Other A 1.12107080
9: Other B 0.43809711
10: Other C 2.80724742
11: Other D 1.58392886
12: Other E 0.24479728
13: US 5.34704253
14: US A 1.70064983
15: US B 0.54829867
16: US C 0.08424691
17: US D 2.34900015
18: US E 0.66484697
Is there a preferred "data table" way to accomplish this?
As of this commit, this is now possible with the dev version of data.table with cube or groupingsets:
library("data.table")
# data.table 1.10.5 IN DEVELOPMENT built 2017-08-08 18:31:51 UTC
# The fastest way to learn (by data.table authors): https://www.datacamp.com/courses/data-analysis-the-data-table-way
# Documentation: ?data.table, example(data.table) and browseVignettes("data.table")
# Release notes, videos and slides: http://r-datatable.com
cube(DT, list(sindex = sum(index)), by = c("id", "loc"))
# id loc sindex
# 1: US B 0.54829867
# 2: US A 1.70064983
# 3: Other B 0.43809711
# 4: Other E 0.24479728
# 5: Other C 2.80724742
# 6: Other A 1.12107080
# 7: US E 0.66484697
# 8: US D 2.34900015
# 9: Other D 1.58392886
# 10: US C 0.08424691
# 11: NA B 0.98639578
# 12: NA A 2.82172063
# 13: NA E 0.90964424
# 14: NA C 2.89149433
# 15: NA D 3.93292900
# 16: US NA 5.34704253
# 17: Other NA 6.19514146
# 18: NA NA 11.54218399
groupingsets(DT, j = list(sindex = sum(index)), by = c("id", "loc"), sets = list(character(), "id", "loc", c("id", "loc")))
# id loc sindex
# 1: NA NA 11.54218399
# 2: US NA 5.34704253
# 3: Other NA 6.19514146
# 4: NA B 0.98639578
# 5: NA A 2.82172063
# 6: NA E 0.90964424
# 7: NA C 2.89149433
# 8: NA D 3.93292900
# 9: US B 0.54829867
# 10: US A 1.70064983
# 11: Other B 0.43809711
# 12: Other E 0.24479728
# 13: Other C 2.80724742
# 14: Other A 1.12107080
# 15: US E 0.66484697
# 16: US D 2.34900015
# 17: Other D 1.58392886
# 18: US C 0.08424691
I have a generic function that you can feed in a dataframe and a vector of dimensions you wish to group by, and it will return the sum of all numeric fields grouped by those dimensions.
rollSum = function(input, dimensions){
#cast dimension inputs to character in case a dimension input is numeric
for (x in 1:length(dimensions)){
input[[eval(dimensions[x])]] = as.character(input[[eval(dimensions[x])]])
}
numericColumns = which(lapply(input,class) %in% c("integer", "numeric"))
output = input[,lapply(.SD, sum, na.rm = TRUE), by = eval(dimensions),
.SDcols = numericColumns]
return(output)
}
So then you can create a list of your different group by vectors:
groupings = list(c("id"),c("loc"),c("id","loc"))
And then use it with lapply and rbindlist in the way of:
groupedSets = rbindlist(lapply(groupings, function(x){
return(rollSum(DT,x))}), fill = TRUE)
using dplyr, an adaption of this should work, if I understand your question correctly.
sum <- mtcars %>%
group_by(vs, am) %>%
summarise(Sum=sum(mpg))
I didnt check how it treats the missung values though,but it should just make another group of them (last group).

Inconsistent data.table assignment by reference behaviour

When assigning by reference with a data.table using a column from a second data.table, the results are inconsistent. When there are no matches by the key columns of both data.tables, it appears the assigment expression y := y is totally ignored - not even NAs are returned.
library(data.table)
dt1 <- data.table(id = 1:2, x = 3:4, key = "id")
dt2 <- data.table(id = 3:4, y = 5:6, key = "id")
print(dt1[dt2, y := y])
## id x # Would have also expected column: y
## 1: 1 3 # NA
## 2: 2 4 # NA
However, when there is a partial match, non-matching columns have a placeholder NA.
dt2[, id := 2:3]
print(dt1[dt2, y := y])
## id x y
## 1: 1 3 NA # <-- placeholder NA here
## 2: 2 4 5
This wreaks havoc on later code that assumes a y column exists in all cases. Otherwise I keep having to write cumbersome additional checks to take into account both cases.
Is there an elegant way around this inconsistency?
With this recent commit, this issue, #759, is now fixed in v1.9.7. It works as expected when nomatch=NA (the current default).
require(data.table)
dt1 <- data.table(id = 1:2, x = 3:4, key = "id")
dt2 <- data.table(id = 3:4, y = 5:6, key = "id")
dt1[dt2, y := y][]
# id x y
# 1: 1 3 NA
# 2: 2 4 NA
Using merge works:
> dt3 <- merge(dt1, dt2, by='id', all.x=TRUE)
> dt3
id x y
1: 1 3 NA
2: 2 4 NA

Cross-correlation with multiple groups in one data.table

I'd like to calculate the cross-correlations between groups of time series within on data.table. I have a time series data in this format:
data = data.table( group = c(rep("a", 5),rep("b",5),rep("c",5)) , Y = rnorm(15) )
group Y
1: a 0.90855520
2: a -0.12463737
3: a -0.45754652
4: a 0.65789709
5: a 1.27632196
6: b 0.98483700
7: b -0.44282527
8: b -0.93169070
9: b -0.21878359
10: b -0.46713392
11: c -0.02199363
12: c -0.67125826
13: c 0.29263953
14: c -0.65064603
15: c -1.41143837
Each group has the same number of observations. What I am looking for is a way to obtain cross correlation between the groups:
group.1 group.2 correlation
a b 0.xxx
a c 0.xxx
b c 0.xxx
I am working on a script to subset each group and append the cross-correlations, but the data size is fairly large. Is there any efficient / zen way to do this?
Does this help?
data[,id:=rep(1:5,3)]
dtw = dcast.data.table(data, id ~ group, value.var="Y" )[, id := NULL]
cor(dtw)
See Correlation between groups in R data.table
Another way would be:
# data
set.seed(45L)
data = data.table( group = c(rep("a", 5),rep("b",5),rep("c",5)) , Y = rnorm(15) )
# method 2
setkey(data, "group")
data2 = data[J(c("b", "c", "a"))][, list(group2=group, Y2=Y)]
data[, c(names(data2)) := data2]
data[, cor(Y, Y2), by=list(group, group2)]
# group group2 V1
# 1: a b -0.2997090
# 2: b c 0.6427463
# 3: c a -0.6922734
And to generalize this "other" way to more than three groups...
data = data.table( group = c(rep("a", 5),rep("b",5),rep("c",5),rep("d",5)) ,
Y = rnorm(20) )
setkey(data, "group")
groups = unique(data$group)
ngroups = length(groups)
library(gtools)
pairs = combinations(ngroups,2,groups)
d1 = data[pairs[,1],,allow.cartesian=TRUE]
d2 = data[pairs[,2],,allow.cartesian=TRUE]
d1[,c("group2","Y2"):=d2]
d1[,cor(Y,Y2), by=list(group,group2)]
# group group2 V1
# 1: a b 0.10742799
# 2: a c 0.52823511
# 3: a d 0.04424170
# 4: b c 0.65407400
# 5: b d 0.32777779
# 6: c d -0.02425053

Resources