Inner join using an inequality expression - r

Background
(Not required for the question, but may be useful to read)
Rolling join on data.table with duplicate keys
Odd behaviour when joining with multiple conditions
Data
library(data.table) ## using version 1.9.6
## arrival timetable
dt_arrive <- structure(list(txn_id = c(1L, 1L, 1L, 1L, 1L), place = c("place_a",
"place_a", "place_a", "place_a", "place_a"), arrival_minutes = c(515,
534, 547, 561, 581), journey_id = 1:5), .Names = c("txn_id",
"place", "arrival_minutes", "journey_id"), class = c("data.table",
"data.frame"), row.names = c(NA, -5L), sorted = c("txn_id",
"place"))
## departure timetable
dt_depart <- structure(list(txn_id = c(1L, 1L, 1L, 1L), place = c("place_a",
"place_a", "place_a", "place_a"), arrival_minutes = c(489, 507,
519, 543), journey_id = 10:13), .Names = c("txn_id", "place",
"arrival_minutes", "journey_id"), sorted = c("txn_id", "place"
), class = c("data.table", "data.frame"), row.names = c(NA, -4L
))
> dt_arrive
txn_id place arrival_minutes journey_id
1: 1 place_a 515 1
2: 1 place_a 534 2
3: 1 place_a 547 3
4: 1 place_a 561 4
5: 1 place_a 581 5
> dt_depart
txn_id place arrival_minutes journey_id
1: 1 place_a 489 10
2: 1 place_a 507 11
3: 1 place_a 519 12
4: 1 place_a 543 13
Question
I would like to join the arrivals to the departures for only those dt_depart$journey_id that occur after dt_arrive$journey_id in terms of arrival_minutes (i.e. an inner join on txn_id & place)
For example, the output I would like is:
txn_id place journey_in_id journey_out_id journey_place_arrive journey_place_depart
1 place_a 1 12 515 519
1 place_a 1 13 515 543
1 place_a 2 13 534 543
Attempts
Using the method from the two linked questions I have constructed
setkey(dt_arrive, txn_id, place)
setkey(dt_depart, txn_id, place)
dt_join <- dt_arrive[dt_depart,
{
idx = (i.arrival_minutes > arrival_minutes)
.(journey_in_id = journey_id[idx],
journey_out_id = i.journey_id,
journey_place_arrive = arrival_minutes[idx],
journey_place_depart = i.arrival_minutes
)
},
by=.EACHI]
But this gives everything from dt_depart, so includes NAs in the result - which suggests a 'right join':
txn_id place journey_in_id journey_out_id journey_place_arrive journey_place_depart
1: 1 place_a NA 10 NA 489
2: 1 place_a NA 11 NA 507
3: 1 place_a 1 12 515 519
4: 1 place_a 1 13 515 543
5: 1 place_a 2 13 534 543
I've tried using nomatch=0 to force it to 'inner join', but this hasn't worked.
I can use complete.cases to remove the NA rows, but I was wondering if there's a way of doing this within the query itself?

Here's the unclever approach: take the cross/Cartesian join, and then filter.
merge(dt_arrive, dt_depart, allow.cartesian=TRUE)[arrival_minutes.y > arrival_minutes.x]
# txn_id place arrival_minutes.x journey_id.x arrival_minutes.y journey_id.y
# 1: 1 place_a 515 1 519 12
# 2: 1 place_a 515 1 543 13
# 3: 1 place_a 534 2 543 13
By taking the Cartesian join, we're liable to eat up a lot of memory.

A potential solution is to use foverlaps by making some arbitrary interval columns
setDT(dt_arrive)
setDT(dt_depart)
dt_arrive[, `:=`(arrival_minutes_copy = arrival_minutes)]
## reorder columns
dt_arrive <- dt_arrive[, .(txn_id, place, journey_id, arrival_minutes, arrival_minutes_copy)]
dt_depart[, `:=`(arrival_minutes_copy = min(arrival_minutes))]
## reorder columns
dt_depart <- dt_depart[, .(txn_id, place, journey_id, arrival_minutes_copy, arrival_minutes)]
setkey(dt_arrive, arrival_minutes, arrival_minutes_copy)
setkey(dt_depart, arrival_minutes_copy, arrival_minutes)
foverlaps(dt_arrive,
dt_depart,
type = "within",
nomatch=0L)
# place txn_id journey_id arrival_minutes_copy arrival_minutes i.txn_id i.journey_id i.arrival_minutes i.arrival_minutes_copy
# 1: place_a 1 12 489 519 1 1 515 515
# 2: place_a 1 13 489 543 1 1 515 515
# 3: place_a 1 13 489 543 1 2 534 534
Benchmarking
library(microbenchmark)
fun_foverlap <- function(dt_a, dt_d){
dt <- foverlaps(dt_a,
dt_d,
type = "within",
nomatch=0L)
return(dt)
}
fun_merge <- function(dt_a, dt_d){
dt <- merge(dt_a, dt_d, allow.cartesian=TRUE)[arrival_minutes.y > arrival_minutes.x]
return(dt)
}
fun_nomatch <- function(dt_a, dt_d){
dt <- dt_a[dt_d, nomatch=0, allow.cartesian=TRUE][i.arrival_minutes > arrival_minutes]
return(dt)
}
microbenchmark(fun_foverlap(dt_arrive_foverlap, dt_depart_foverlap),
fun_merge(dt_arrive_merge, dt_depart_merge),
fun_nomatch(dt_arrive_nomatch, dt_depart_nomatch))
# Unit: microseconds
expr min lq mean median uq max neval cld
# fun_foverlap(dt_arrive_foverlap, dt_depart_foverlap) 3538.189 3717.077 3967.6648 3872.586 4006.7205 5812.355 100 c
# fun_merge(dt_arrive_merge, dt_depart_merge) 883.697 925.655 980.4159 942.877 967.9745 2223.147 100 b
# fun_nomatch(dt_arrive_nomatch, dt_depart_nomatch) 593.082 625.471 682.8975 643.034 665.4125 2077.748 100 a

Related

Adding a second column as a function of first with data.table's (`:=`) [duplicate]

I want to create a new data.table or maybe just add some columns to a data.table. It is easy to specify multiple new columns but what happens if I want a third column to calculate a value based on one of the columns I am creating. I think plyr package can do something such as that. Can we perform such iterative (sequential) column creation in data.table?
I want to do as follows
dt <- data.table(shop = 1:10, income = 10:19*70)
dt[ , list(hope = income * 1.05, hopemore = income * 1.20, hopemorerealistic = hopemore - 100)]
or maybe
dt[ , `:=`(hope = income*1.05, hopemore = income*1.20, hopemorerealistic = hopemore-100)]
You can also use <- within the call to list eg
DT <- data.table(a=1:5)
DT[, c('b','d') := list(b1 <- a*2, b1*3)]
DT
a b d
1: 1 2 6
2: 2 4 12
3: 3 6 18
4: 4 8 24
5: 5 10 30
Or
DT[, `:=`(hope = hope <- a+1, z = hope-1)]
DT
a b d hope z
1: 1 2 6 2 1
2: 2 4 12 3 2
3: 3 6 18 4 3
4: 4 8 24 5 4
5: 5 10 30 6 5
It is possible by using curly braces and semicolons in j
There are multiple ways to go about it, here are two examples:
# If you simply want to output:
dt[ ,
{hope=income*1.05;
hopemore=income*1.20;
list(hope=hope, hopemore=hopemore, hopemorerealistic=hopemore-100)}
]
# if you want to save the values
dt[ , c("hope", "hopemore", "hopemorerealistic") :=
{hope=income*1.05;
hopemore=income*1.20;
list(hope, hopemore, hopemore-100)}
]
dt
# shop income hope hopemore hopemorerealistic
# 1: 1 700 735.0 840 740
# 2: 2 770 808.5 924 824
# 3: 3 840 882.0 1008 908
# 4: 4 910 955.5 1092 992
# 5: 5 980 1029.0 1176 1076
# 6: 6 1050 1102.5 1260 1160
# 7: 7 1120 1176.0 1344 1244
# 8: 8 1190 1249.5 1428 1328
# 9: 9 1260 1323.0 1512 1412
# 10: 10 1330 1396.5 1596 1496

replace data.frame using specified data set

I want to replace data.frame using specified data set
> test_data
support count
1 0.01235235 663
2 0.01373104 737
3 0.01393598 748
4 0.01265045 679
5 0.01548236 831
6 0.01565004 840
> replace_support
2 3 4 6
-0.008884196 -0.007991622 -0.011675116 -0.013086012
names of replace_support corresponds with row name of test_data
my expect is replace the column support
support count
1 0.01235235 663
2 -0.008884196 737
3 -0.007991622 748
4 -0.011675116 679
5 0.01548236 831
6 -0.013086012 840
hare are the example data
test_data <- structure(list(support = c(0.0123523493684093, 0.0137310429630734,
0.0139359839028207, 0.0126504452807691, 0.0154823564481872, 0.0156500353988896
), count = c(663, 737, 748, 679, 831, 840)), .Names = c("support",
"count"), row.names = c(NA, 6L), class = "data.frame")
replace_support <- structure(c(-0.00888419577036815, -0.00799162193023339, -0.0116751160488589,
-0.0130860121134779), .Names = c("2", "3", "4", "6"))
You can use the replace function:
indexes <- as.integer(names(replace_support))
test_data$support <- replace(test_data$support,indexes,replace_support)
test_data
support count
1 0.012352349 663
2 -0.008884196 737
3 -0.007991622 748
4 -0.011675116 679
5 0.015482356 831
6 -0.013086012 840
If the names of replace_support don't match the appropriate indexes, you can supply them manually.
How about:
test_data$support[as.integer(names(replace_support))] <- replace_support
test_data
#> support count
#> 1 0.012352349 663
#> 2 -0.008884196 737
#> 3 -0.007991622 748
#> 4 -0.011675116 679
#> 5 0.015482356 831
#> 6 -0.013086012 840

R: sum rows from column A until conditioned value in column B

I'm pretty new to R and can't seem to figure out how to deal with what seems to be a relatively simple problem. I want to sum the rows of the column 'DURATION' per 'TRIAL_INDEX', but then only those first rows where the values of 'X_POSITION" are increasing. I only want to sum the first round within a trial where X increases.
The first rows of a simplified dataframe:
TRIAL_INDEX DURATION X_POSITION
1 1 204 314.5
2 1 172 471.6
3 1 186 570.4
4 1 670 539.5
5 1 186 503.6
6 2 134 306.8
7 2 182 503.3
8 2 806 555.7
9 2 323 490.0
So, for TRIAL_INDEX 1, only the first three values of DURATION should be added (204+172+186), as this is where X has the highest value so far (going through the dataframe row by row).
The desired output should look something like:
TRIAL_INDEX DURATION X_POSITION FIRST_PASS_TIME
1 1 204 314.5 562
2 1 172 471.6 562
3 1 186 570.4 562
4 1 670 539.5 562
5 1 186 503.6 562
6 2 134 306.8 1122
7 2 182 503.3 1122
8 2 806 555.7 1122
9 2 323 490.0 1122
I tried to use dplyr, to generate a new dataframe that can be merged with my original dataframe.
However, the code doesn't work, and also I'm not sure on how to make sure it's only adding the first rows per trial that have increasing values for X_POSITION.
FirstPassRT = dat %>%
group_by(TRIAL_INDEX) %>%
filter(dplyr::lag(dat$X_POSITION,1) > dat$X_POSITION) %>%
summarise(FIRST_PASS_TIME=sum(DURATION))
Any help and suggestions are greatly appreciated!
library(data.table)
dt = as.data.table(df) # or setDT to convert in place
# find the rows that will be used for summing DURATION
idx = dt[, .I[1]:.I[min(.N, which(diff(X_POSITION) < 0), na.rm = T)], by = TRIAL_INDEX]$V1
# sum the DURATION for those rows
dt[idx, time := sum(DURATION), by = TRIAL_INDEX][, time := time[1], by = TRIAL_INDEX]
dt
# TRIAL_INDEX DURATION X_POSITION time
#1: 1 204 314.5 562
#2: 1 172 471.6 562
#3: 1 186 570.4 562
#4: 1 670 539.5 562
#5: 1 186 503.6 562
#6: 2 134 306.8 1122
#7: 2 182 503.3 1122
#8: 2 806 555.7 1122
#9: 2 323 490.0 1122
Here is something you can try with dplyr package:
library(dplyr);
dat %>% group_by(TRIAL_INDEX) %>%
mutate(IncLogic = X_POSITION > lag(X_POSITION, default = 0)) %>%
mutate(FIRST_PASS_TIME = sum(DURATION[IncLogic])) %>%
select(-IncLogic)
Source: local data frame [9 x 4]
Groups: TRIAL_INDEX [2]
TRIAL_INDEX DURATION X_POSITION FIRST_PASS_TIME
(int) (int) (dbl) (int)
1 1 204 314.5 562
2 1 172 471.6 562
3 1 186 570.4 562
4 1 670 539.5 562
5 1 186 503.6 562
6 2 134 306.8 1122
7 2 182 503.3 1122
8 2 806 555.7 1122
9 2 323 490.0 1122
If you want to summarize it down to one row per trial you can use summarize like this:
library(dplyr)
df <- data_frame(TRIAL_INDEX = c(1,1,1,1,1,2,2,2,2),
DURATION = c(204,172,186,670, 186,134,182,806, 323),
X_POSITION = c(314.5, 471.6, 570.4, 539.5, 503.6, 306.8, 503.3, 555.7, 490.0))
res <- df %>%
group_by(TRIAL_INDEX) %>%
mutate(x.increasing = ifelse(X_POSITION > lag(X_POSITION), TRUE, FALSE),
x.increasing = ifelse(is.na(x.increasing), TRUE, x.increasing)) %>%
filter(x.increasing == TRUE) %>%
summarize(FIRST_PASS_TIME = sum(X_POSITION))
res
#Source: local data frame [2 x 2]
#
# TRIAL_INDEX FIRST_PASS_TIME
# (dbl) (dbl)
#1 1 1356.5
#2 2 1365.8

Apply a rule to calculate sum of specific

Hi I have a data set like this.
Num C Pr Value Volume
111 aa Alen 111 222
111 aa Paul 100 200
222 vv Iva 444 555
222 vv John 333 444
I would like to filter the data according to Num and to add a new row where take the sum of column Value and Volume but to keep the information of column Num and C, but in column Pr to put Total. It should look like this way.
Num C Pr Value Volume
222 vv Total 777 999
Could you suggest me how to do it? I would like only for Num 222.
When I try to use res command I end up with this result.
# Num C Pr Value Volume
1: 111 aa Alen 111 222
2: 111 aa Paul 100 200
3: 111 aa Total NA NA
4: 222 vv Iva 444 555
5: 222 vv John 333 444
6: 222 vv Total NA NA
What cause this?
The structure of my data is the following one.
'data.frame': 4 obs. of 5 variables:
$ Num : Factor w/ 2 levels "111","222": 1 1 2 2
$ C : Factor w/ 2 levels "aa","vv": 1 1 2 2
$ Pr : Factor w/ 4 levels "Alen","Iva","John",..: 1 4 2 3
$ Value : Factor w/ 4 levels "100","111","333",..: 2 1 4 3
$ Volume: Factor w/ 4 levels "200","222","444",..: 2 1 4 3
We could use data.table. We convert the 'data.frame' to 'data.table' (setDT(df1)), grouped by 'Num', 'C' columns and specifying the columns to do the sum in .SDcols, we loop those columns using lapply, get the sum, and create the 'Pr' column. We can rbind the original dataset with the new summarised output ('DT1') and order the result based on 'Num'.
library(data.table)#v1.9.5+
DT1 <- setDT(df1)[,lapply(.SD, sum) , by = .(Num,C),
.SDcols=Value:Volume][,Pr:='Total'][]
rbind(df1, DT1)[order(Num)]
# Num C Pr Value Volume
#1: 111 aa Alen 111 222
#2: 111 aa Paul 100 200
#3: 111 aa Total 211 422
#4: 222 vv Iva 444 555
#5: 222 vv John 333 444
#6: 222 vv Total 777 999
This can be done using base R methods as well. We get the sum of 'Value', 'Volume' columns grouped by 'Num', 'C', using the formula method of aggregate, transform the output by creating the 'Pr' column, rbind with original dataset and order the output ('res') based on 'Num'.
res <- rbind(df1,transform(aggregate(.~Num+C, df1[-3], FUN=sum), Pr='Total'))
res[order(res$Num),]
# Num C Pr Value Volume
#1 111 aa Alen 111 222
#2 111 aa Paul 100 200
#5 111 aa Total 211 422
#3 222 vv Iva 444 555
#4 222 vv John 333 444
#6 222 vv Total 777 999
EDIT: Noticed that the OP mentioned filter. If this is for a single 'Num', we subset the data, and then do the aggregate, transform steps.
transform(aggregate(.~Num+C, subset(df1, Num==222)[-3], FUN=sum), Pr='Total')
# Num C Value Volume Pr
#1 222 vv 777 999 Total
Or we may not need aggregate. After subsetting the data, we convert the 'Num' to 'factor', loop through the output dataset ('df2') get the sum if it the column is numeric class or else we get the first element and wrap with data.frame.
df2 <- transform(subset(df1, Num==222), Num=factor(Num))
data.frame(c(lapply(df2[-3], function(x) if(is.numeric(x))
sum(x) else x[1]), Pr='Total'))
# Num C Value Volume Pr
#1 222 vv 777 999 Total
data
df1 <- structure(list(Num = c(111L, 111L, 222L, 222L), C = c("aa", "aa",
"vv", "vv"), Pr = c("Alen", "Paul", "Iva", "John"), Value = c(111L,
100L, 444L, 333L), Volume = c(222L, 200L, 555L, 444L)), .Names = c("Num",
"C", "Pr", "Value", "Volume"), class = "data.frame",
row.names = c(NA, -4L))
Or using dplyr:
library(dplyr)
df1 %>%
filter(Num == 222) %>%
summarise(Value = sum(Value),
Volume = sum(Volume),
Pr = 'Total',
Num = Num[1],
C = C[1])
# Value Volume Pr Num C
# 1 777 999 Total 222 vv
where we first filter to keep only Num == 222, and then use summarise to obtain the sums and the values for Num and C. This assumes that:
You do not want to get the result for each unique Num (I select one here, you could select multiple). If you need this, use group_by.
There is only ever one C for every unique Num.
You can also use a dplyr package:
df %>%
filter(Num == 222) %>%
group_by(Num, C) %>%
summarise(
Pr = "Total"
, Value = sum(Value)
, Volume = sum(Volume)
) %>%
rbind(df, .)
# Num C Pr Value Volume
# 1 111 aa Alen 111 222
# 2 111 aa Paul 100 200
# 3 222 vv Iva 444 555
# 4 222 vv John 333 444
# 5 222 vv Total 777 999
If you want the total for each Num value you just comment a filter line

Count number of occurances of a string in R under different conditions

I have a dataframe, with multiple columns called "data" which looks like this:
Preferences Status Gender
8a 8b 9a Employed Female
10b 11c 9b Unemployed Male
11a 11c 8e Student Female
That is, each customer selected 3 preferences and specified other information such as Status and Gender. Each preference is given by a [number][letter] combination, and there are c. 30 possible preferences. The possible preferences are:
8[a - c]
9[a - k]
10[a - d]
11[a - c]
12[a - i]
I want to count the number of occurrences of each preference, under certain conditions for the other columns - eg. for all women.
The output will ideally be a dataframe that looks like this:
Preference Female Male Employed Unemployed Student
8a 1034 934 234 495 203
8b 539 239 609 394 235
8c 124 395 684 94 283
9a 120 999 895 945 345
9b 978 385 596 923 986
etc.
What's the most efficient way to achieve this?
Thanks.
I am assuming you are starting with something that looks like this:
mydf <- structure(list(
Preferences = c("8a 8b 9a", "10b 11c 9b", "11a 11c 8e"),
Status = c("Employed", "Unemployed", "Student"),
Gender = c("Female", "Male", "Female")),
.Names = c("Preferences", "Status", "Gender"),
class = c("data.frame"), row.names = c(NA, -3L))
mydf
# Preferences Status Gender
# 1 8a 8b 9a Employed Female
# 2 10b 11c 9b Unemployed Male
# 3 11a 11c 8e Student Female
If that's the case, you need to "split" the "Preferences" column (by spaces), transform the data into a "long" form, and then reshape it to a wide form, tabulating while you do so.
With the right tools, this is pretty straightforward.
library(devtools)
library(data.table)
library(reshape2)
source_gist(11380733) # for `cSplit`
dcast.data.table( # Step 3--aggregate to wide form
melt( # Step 2--convert to long form
cSplit(mydf, "Preferences", " ", "long"), # Step 1--split "Preferences"
id.vars = "Preferences"),
Preferences ~ value, fun.aggregate = length)
# Preferences Employed Female Male Student Unemployed
# 1: 10b 0 0 1 0 1
# 2: 11a 0 1 0 1 0
# 3: 11c 0 1 1 1 1
# 4: 8a 1 1 0 0 0
# 5: 8b 1 1 0 0 0
# 6: 8e 0 1 0 1 0
# 7: 9a 1 1 0 0 0
# 8: 9b 0 0 1 0 1
I also tried a dplyr + tidyr approach, which looks like the following:
library(dplyr)
library(tidyr)
mydf %>%
separate(Preferences, c("P_1", "P_2", "P_3")) %>% ## splitting things
gather(Pref, Pvals, P_1:P_3) %>% # stack the preference columns
gather(Var, Val, Status:Gender) %>% # stack the status/gender columns
group_by(Pvals, Val) %>% # group by these new columns
summarise(count = n()) %>% # aggregate the numbers of each
spread(Val, count) # spread the values out
# Source: local data table [8 x 6]
# Groups:
#
# Pvals Employed Female Male Student Unemployed
# 1 10b NA NA 1 NA 1
# 2 11a NA 1 NA 1 NA
# 3 11c NA 1 1 1 1
# 4 8a 1 1 NA NA NA
# 5 8b 1 1 NA NA NA
# 6 8e NA 1 NA 1 NA
# 7 9a 1 1 NA NA NA
# 8 9b NA NA 1 NA 1
Both approaches are actually pretty quick. Test it with some better sample data than what you shared, like this:
preferences <- c(paste0(8, letters[1:3]),
paste0(9, letters[1:11]),
paste0(10, letters[1:4]),
paste0(11, letters[1:3]),
paste0(12, letters[1:9]))
set.seed(1)
nrow <- 10000
mydf <- data.frame(
Preferences = vapply(replicate(nrow,
sample(preferences, 3, FALSE),
FALSE),
function(x) paste(x, collapse = " "),
character(1L)),
Status = sample(c("Employed", "Unemployed", "Student"), nrow, TRUE),
Gender = sample(c("Male", "Female"), nrow, TRUE)
)

Resources