R - split a value into the amounts within each layer - r

I'm stumped with this one. I have a starting dataset with 2 columns: an ID and a value.
df <- data.frame(id = c('ABC','XYZ'),
value = c(150, 300))
I then define how I want to 'layer' the values (in this case, I want to split the value into layers of 100).
cut <- seq(0, 300, 100)
So, for the first record of the dataset, the value is 150. I want to split that into the amount within the range 0-100, 100-200 and 200-300.
Starting dataset
id value
ABC 150
XYZ 300
Ending dataset (after defining cut)
id value val_0_100 val_100_200 val_200_300
ABC 150 100 50 0
XYZ 300 100 100 100

You can do it like this:
df <- data.frame(id = c('ABC','XYZ'),
value = c(150, 300))
initial_value = 0
final_value = 300
step = 100
number_of_columns = ceiling(final_value / step)
for (i in 1:number_of_columns){
new_col_name <- paste0("val_", step*(i-1), "_", step*i)
df[,new_col_name] = apply(df["value"] - (step*(i-1)),1, FUN=min,100)
df[,new_col_name] = apply(df[new_col_name],1, FUN=max,0)
}

Here is another way using data.table and dcast
library(data.table)
df <- data.frame(id = c('ABC','XYZ'),
value = c(160, 230))
# Data table
dt <- data.table(df)
# Append Data multiple times based on its value
dt <- dt[rep(seq_len(nrow(dt)), ceiling(dt$value/100)), ]
# cumulative sum to be used in splitting into columns in dcast
dt[, csum := 100]
dt[, csum := cumsum(csum), by = "id"]
# Adding extra column to split into 100s and remainder
dt[, value2 := 100]
dt[csum > value, value2 := value %% 100]
dt[value < 100, value2 := value]
dt_dcast <- dcast(dt, id + value ~ csum, value.var = "value2", fill = 0)
# Rename columns as per the example shown above
colstart <- seq(0, max(dt$csum) - 100, 100)
colend <- seq(100, max(dt$csum), 100)
newname <- c("id", "value", paste0("val_", colstart, "_", colend))
setnames(dt_dcast, names(dt_dcast), newname)

Related

How can I subtract 1 column from another in R based on 2 aggregate columns

First Data frame 'total_coming_in' column names: 'LocationID','PartNumber',"Quantity"
Second Data frame 'total_going_out' column names: 'LocationID','PartNumber',"Quantity"
I want output as 'total_data' column names: 'LocationID','PartNumber',"Quantity_subtract" where
Quantity_subtract = total_coming_in$Quantity - total_going_out$Quantity grouped for each 'LocationID','PartNumber'
I tried this :-
matchingCols <- c('LocationID','PartNumber')
mergingCols <- names(coming_in)[3]
total_coming_in[total_going_out,on=matchingCols,
lapply(
setNames(mergingCols),
function(x) get(x) - get(paste0("i.", x))
),
nomatch=0L,
by=.EACHI
]
Using data.table as you seem to want to, I would first cleanly merge the two tables and then do the substract operation on just the rows that make sense (i.e. for rows in total_coming_in which have matching values values in total_going_out and vice-versa):
library(data.table)
M <- merge(total_coming_in, total_going_out, by = c('LocationID','PartNumber'))
# i.e. all.x = FALSE, all.y = FALSE,
# thereby eliminating rows in x without matching row in y and vice-versa
M[ , Quantity_subtract := Quantity.x - Quantity.y,
by = c('LocationID','PartNumber')]
Now for completenes, as your question might be interpreted as allowing 0 values for Quantity.y in total_going_out for rows of total_coming_in that have no matching values in total_going_out and vice-versa, you could do in this case:
M <- merge(total_coming_in, total_going_out, all = TRUE, by = c('LocationID','PartNumber'))
# i.e. all.x = TRUE, all.y = TRUE,
# thereby allowing rows in x without matching row in y and vice-versa
M[is.na(Quantity.x), Quantity.x := 0]
M[is.na(Quantity.y), Quantity.y := 0]
M[ , Quantity_subtract := Quantity.x - Quantity.y,
by = c('LocationID','PartNumber')]
So you want to have a column that gives you the difference of total_coming_in and total_going_out for each combination of PartNumber and LocationID, correct?
If so, the following will do:
library(dplyr)
matchingCols <- c("LocationID", "PartNumber")
total_data <- full_join(total_coming_in, total_going_out, by=matchingCols)
total_data <- mutate(total_data, Quantity_subtract = Quantity.x - Quantity.y)
total_data <- select(total_data, -Quantity.x, -Quantity.y) #if you want to get rid of these columns
I used this example data:
total_coming_in <- list(LocationID = round(runif(26, 1000, 9000)),
PartNumber = paste(runif(26, 10000, 20000), LETTERS, sep="-"),
Quantity = round(runif(26, 2, 4))
) %>% as_tibble()
random_integers <- sample(1:26,26,FALSE)
total_going_out <- list(LocationID = total_coming_in$LocationID[random_integers],
PartNumber = total_coming_in$PartNumber[random_integers],
Quantity = round(runif(26, 1, 3))
) %>% as_tibble()

How to get counter incremented in apply loop

I'm trying to make a counter count each row of a data frame which column 1 needs to equal "vsrv11" and column 3 must is a date that needs to have year 2017.
So I did this code and the counter increments inside the if statement but for every iteration of the loop the counter becomes 0 again.
count <- 0
funcao.teste <- function (x) {
if (x[1] == "vsrv11" && substring(x[3],0,4) == "2017") {
count <<- count + 1
}
}
apply(vpnsessions, 1, funcao.teste, count)
Generally, I'd advise against using global variables and also, you could check this with simple filtering.
df <- data.frame(x = sample(c("vsrv11", rnorm(10)), 100, replace = TRUE),
y = rnorm(100),
z = as.character(sample(c(2017, 2018), 100, replace = TRUE)))
nrow(df[df[, 1] == "vsrv11" & grepl("2017", df[, 3]), ])
or just
sum(df[, 1] == "vsrv11" & grepl("2017", df[, 3]))
In the tidyverse you can perform such an operation using dplyr::count:
# Sample data
vpnsessions <- data.frame(
srv = "vsrv11",
id = c(rep("2017_abc", 10), rep("2018_def", 8)),
stringsAsFactors = F)
library(dplyr);
count(vpnsessions, year = substr(id, 1, 4))
## A tibble: 2 x 2
# year n
# <chr> <int>
#1 2017 10
#2 2018 8
Note how count counts the number of occurrences of ids. It's easy to extract relevant rows from the resulting data.frame/tibble.
To nitpick, in R indexing starts with 1 not with 0, so substring(..., 0, 4) from your code should be substring(..., 1, 4).

Join 2 data frames using data.table with conditions

I have these two data frames:
set.seed(42)
A <- data.table(station = sample(1:10, 1000, replace=TRUE),
hash = sample(letters[1:5], 1000, replace=TRUE),
point = sample(1:24, 1000, replace=TRUE))
B <- data.table(station = sample(1:10, 100, replace=TRUE),
card = sample(letters[6:10], 100, replace=TRUE),
point = sample(1:24, 100, replace=TRUE))
Dataframe A contains more than 1M rows.
I try to find hash (from A) for each card (from B). I have some conditions there: stations and points in A lays in a range(for station +- 1 and for points just + 2).
I use grouping B by card and execute for each group function for binding rows after implementing such conditions and get max by freq.
detect <- function(x){
am0 <- data.frame(station = 0,
hash = 0,
point = 0)
for (i in 1:nrow(x)) {
am1 <- A %>%
filter(station %in% (B$station[i] - 1) : (B$station[i] + 1) &
point > B$point[i] & point < B$point[i] + 2)
am0 <- rbind(am0, am1)
}
t <- as.data.frame(table(am0$hash))
t <- t %>%
arrange(-Freq) %>%
filter(row_number() == 1)
return(t)
}
And then just:
library(dplyr)
B %>%
group_by(card) %>%
do(detect(.)) %>%
ungroup
But I don't know how to implement function by each group with indices [i] so I actually get a wrong result.
# A tibble: 5 x 3
card Var1 Freq
<chr> <fctr> <int>
1 f c 46
2 g c 75
3 h c 41
4 i c 64
5 j c 62
I`m a beginner but I know best solution for big datasets - using data.table library for join 2 datasets like these. Can you help me to find decision for it?
I think what you want to do is:
#### Prepare join limits
B[, point_limit := as.integer(point + 2)]
B[, station_lower := as.integer(station - 1)]
B[, station_upper := as.integer(station + 1)]
## Join A on B, creates All combinations of points in A and B fulfilling the conditions
joined_table <- B[A,
, on = .( point_limit >= point, point <= point,
station_lower <= station, station_upper >= station),
nomatch = 0,
allow.cartesian=TRUE]
## Count the occurrences of the combinations
counted_table <- joined_table[,.N, by=.(card,hash)][order(card, -N)]
## Select the top for each group.
counted_table[, head(.SD, 1 ),by = .(card)][order(card)]
This will create a full table with all the information in and then do the counting on that. It relies purely on data.tables since to fully take advantage of the speed gains from that package. The data.table vignette is good if you are unfamiliar with the syntax. The nomatch condition ensures that we are doing an inner join.
This will probably be fine if A is only 1M rows and B is kept the same size, depending on your datas distribution. We can however split B also in a similar way to your do statement using the package purrr. I'm not sure how this interacts with R:s garabage collection however.
frame_list <- purrr::map(unique(B$card),
~ B[card == .x][A,
, on = .(point_limit >= point,
point <= point,
station_lower <= station,
station_upper >= station),
nomatch = 0,
allow.cartesian = TRUE][, .N, by = .(card, hash)])
counted_table_mem <- rbindlist(frame_list )
Something to note in this is that I use, rbindlist instead of multiple rbind. Repeatedly calling rbind will be very slow, since you will need to allocate new memory each time.

update table row values conditionally matching multiple columns in R [duplicate]

I have two data.frames that I want to merge together. The first is:
datess <- seq(as.Date('2005-01-01'), as.Date('2009-12-31'), 'days')
sample<- data.frame(matrix(ncol = 3, nrow = length(datess)))
colnames(sample) <- c('Date', 'y', 'Z')
sample$Date <- datess
The second:
a <- data.frame(matrix(ncol = 3, nrow = 5))
colnames(a) <- c('a', 'y', 'Z')
a$Z <- c(1, 3, 4, 5, 2)
a$a <- c(2005, 2006, 2007, 2008, 2009)
a$y <- c('abc', 'def', 'ijk', 'xyz', 'thanks')
And I'd like the merged one to match the year and then fill in the rest of the values for every day of that year.
Date y Z
2005-01-01 abc 1
2005-01-02 abc 1
2005-01-03 abc 1
{cont}
2009-12-31 thanks 2
So far, three different approaches have been posted:
using match()
using dplyr
using merge()
There is a fourth approach called update join suggested by Frank in chat:
library(data.table)
setDT(sample)[, yr := year(Date)][setDT(a), on = .(yr = a), `:=`(y = i.y, Z = i.Z)]
which turned out to be the fastest and most concise of the four.
Benchmark results:
To decide which of the approaches is the most efficient in terms of speed I've set up a benchmark using the microbenchmarkpackage.
Unit: microseconds
expr min lq mean median uq max neval
create_data 248.827 291.116 316.240 302.0655 323.588 665.298 100
match 4488.685 4545.701 4752.226 4649.5355 4810.763 6881.418 100
dplyr 6086.609 6275.588 6513.997 6385.2760 6625.229 8535.979 100
merge 2871.883 2942.490 3183.712 3004.6025 3168.096 5616.898 100
update_join 1484.272 1545.063 1710.651 1659.8480 1733.476 3434.102 100
As sample is modified it has to be created anew before each benchmark run. This is been done by a function which is included in the benchmark as well (create data). The times for create data need to be subtracted from the other timings.
So, even for the small data set of about 1800 rows, update join is the fastest, nearly twice as fast as the second merge, followed by match, and dplyr being last, more than 4 times slower than update join (with the time for create data subtracted).
Benchmark code
datess <- seq(as.Date('2005-01-01'), as.Date('2009-12-31'), 'days')
a <- data.frame(Z = c(1, 3, 4, 5, 2),
a = 2005:2009,
y = c('abc', 'def', 'ijk', 'xyz', 'thanks'),
stringsAsFactors = FALSE)
setDT(a)
make_sample <- function() data.frame(Date = datess, y = NA_character_, Z = NA_real_)
library(data.table)
library(magrittr)
microbenchmark::microbenchmark(
create_data = make_sample(),
match = {
sample <- make_sample()
matched<-match(format(sample$Date,"%Y"),a$a)
sample$y<-a$y[matched]
sample$Z<-a$Z[matched]
},
dplyr = {
sample <- make_sample()
sample <- sample %>%
dplyr::mutate(a = format(Date, "%Y") %>% as.numeric) %>%
dplyr::inner_join(a %>% dplyr::select(a), by = "a")
},
merge = {
sample <- make_sample()
sample2 <- data.frame(Date = datess)
sample2$a <- lubridate::year(sample2$Date)
sample <- base::merge(sample2, a, by="a")
},
update_join = {
sample <- make_sample()
setDT(sample)[, yr := year(Date)][a, on = .(yr = a), `:=`(y = i.y, Z = i.Z)]
}
)
You can use match
matched<-match(format(sample$Date,"%Y"),a$a)
sample$y<-a$y[matched]
sample$Z<-a$Z[matched]
If y and Z are always zero in sample you do not need them there, so all you have to do is join on year like this:
library(dplyr)
sample %>% mutate(a = format(Date, "%Y") %>% as.numeric) %>%
inner_join(a %>% select(a))
Is there anything speaking against having a column with year in your new df? If not you could generate one in 'sample' and use the merge function
require(lubridate) #to make generating the year easy
sample2<-data.frame(Date=datess)
sample2$a<-year(sample2$Date)
df<-merge(sample2,a,by="a")
this will result in something like this:
head(df)
a Date y Z
1 2005 2005-01-01 abc 1
2 2005 2005-01-02 abc 1
3 2005 2005-01-03 abc 1
4 2005 2005-01-04 abc 1
5 2005 2005-01-05 abc 1
6 2005 2005-01-06 abc 1
You could then remove the year column again if it bothers you.

merge data.frames based on year and fill in missing values

I have two data.frames that I want to merge together. The first is:
datess <- seq(as.Date('2005-01-01'), as.Date('2009-12-31'), 'days')
sample<- data.frame(matrix(ncol = 3, nrow = length(datess)))
colnames(sample) <- c('Date', 'y', 'Z')
sample$Date <- datess
The second:
a <- data.frame(matrix(ncol = 3, nrow = 5))
colnames(a) <- c('a', 'y', 'Z')
a$Z <- c(1, 3, 4, 5, 2)
a$a <- c(2005, 2006, 2007, 2008, 2009)
a$y <- c('abc', 'def', 'ijk', 'xyz', 'thanks')
And I'd like the merged one to match the year and then fill in the rest of the values for every day of that year.
Date y Z
2005-01-01 abc 1
2005-01-02 abc 1
2005-01-03 abc 1
{cont}
2009-12-31 thanks 2
So far, three different approaches have been posted:
using match()
using dplyr
using merge()
There is a fourth approach called update join suggested by Frank in chat:
library(data.table)
setDT(sample)[, yr := year(Date)][setDT(a), on = .(yr = a), `:=`(y = i.y, Z = i.Z)]
which turned out to be the fastest and most concise of the four.
Benchmark results:
To decide which of the approaches is the most efficient in terms of speed I've set up a benchmark using the microbenchmarkpackage.
Unit: microseconds
expr min lq mean median uq max neval
create_data 248.827 291.116 316.240 302.0655 323.588 665.298 100
match 4488.685 4545.701 4752.226 4649.5355 4810.763 6881.418 100
dplyr 6086.609 6275.588 6513.997 6385.2760 6625.229 8535.979 100
merge 2871.883 2942.490 3183.712 3004.6025 3168.096 5616.898 100
update_join 1484.272 1545.063 1710.651 1659.8480 1733.476 3434.102 100
As sample is modified it has to be created anew before each benchmark run. This is been done by a function which is included in the benchmark as well (create data). The times for create data need to be subtracted from the other timings.
So, even for the small data set of about 1800 rows, update join is the fastest, nearly twice as fast as the second merge, followed by match, and dplyr being last, more than 4 times slower than update join (with the time for create data subtracted).
Benchmark code
datess <- seq(as.Date('2005-01-01'), as.Date('2009-12-31'), 'days')
a <- data.frame(Z = c(1, 3, 4, 5, 2),
a = 2005:2009,
y = c('abc', 'def', 'ijk', 'xyz', 'thanks'),
stringsAsFactors = FALSE)
setDT(a)
make_sample <- function() data.frame(Date = datess, y = NA_character_, Z = NA_real_)
library(data.table)
library(magrittr)
microbenchmark::microbenchmark(
create_data = make_sample(),
match = {
sample <- make_sample()
matched<-match(format(sample$Date,"%Y"),a$a)
sample$y<-a$y[matched]
sample$Z<-a$Z[matched]
},
dplyr = {
sample <- make_sample()
sample <- sample %>%
dplyr::mutate(a = format(Date, "%Y") %>% as.numeric) %>%
dplyr::inner_join(a %>% dplyr::select(a), by = "a")
},
merge = {
sample <- make_sample()
sample2 <- data.frame(Date = datess)
sample2$a <- lubridate::year(sample2$Date)
sample <- base::merge(sample2, a, by="a")
},
update_join = {
sample <- make_sample()
setDT(sample)[, yr := year(Date)][a, on = .(yr = a), `:=`(y = i.y, Z = i.Z)]
}
)
You can use match
matched<-match(format(sample$Date,"%Y"),a$a)
sample$y<-a$y[matched]
sample$Z<-a$Z[matched]
If y and Z are always zero in sample you do not need them there, so all you have to do is join on year like this:
library(dplyr)
sample %>% mutate(a = format(Date, "%Y") %>% as.numeric) %>%
inner_join(a %>% select(a))
Is there anything speaking against having a column with year in your new df? If not you could generate one in 'sample' and use the merge function
require(lubridate) #to make generating the year easy
sample2<-data.frame(Date=datess)
sample2$a<-year(sample2$Date)
df<-merge(sample2,a,by="a")
this will result in something like this:
head(df)
a Date y Z
1 2005 2005-01-01 abc 1
2 2005 2005-01-02 abc 1
3 2005 2005-01-03 abc 1
4 2005 2005-01-04 abc 1
5 2005 2005-01-05 abc 1
6 2005 2005-01-06 abc 1
You could then remove the year column again if it bothers you.

Resources