I am trying to merge two data frames. The original data frame is much larger than the data frame that is going to be merged with however there is only 1 possible match for each row. The row is found by matching the type (a factor) and the level. The level is an integer that will be put into one of several buckets (the example only has two)
My current method works but uses sapply and is slow for large numbers of rows. How can I vectorise this operation?
set.seed(123)
sample <- 100
data <- data.frame(type= sample(LETTERS[1:4], sample, replace=TRUE), level =round(runif(sample, 1,sample)), value = round(runif(sample, 200,1000)))
data2 <- data.frame(type= rep(LETTERS[1:4],2), lower= c(rep(1,4), rep(51,4)), upper = c(rep(50,4), rep(sample,4)), cost1 = runif(8, 0,1), cost2 = runif(8, 0,1),cost3 = runif(8, 0,1))
data2[,4:6] <- data2[,4:6]/rowSums(data2[,4:6]) #turns the variables in to percentages, not necessary on real data
x <- unlist(sapply(1:sample, function(n) which(ll <-data$type[n]==data2$type & data$level[n] >= data2$lower & data$level[n] <= data2$upper)))
data3 <- cbind(data, percentage= data2[x, -c(1:3)])
If I understand the matching problem you've set up, the following code seems to speed things up a bit by dividing data by type and then using cut to find the proper bucket. I think it will accommodate larger numbers of pairs of lower and upper values but haven't checked carefully.
library(plyr)
percents <- function(value, cost) {
cost <- cost[cost[,1]== value[1,1],]
cost <- cost[order(cost[,2]),]
ints <- cut(value[,2], breaks=c(t(cost[,2:3])), labels=FALSE, include.lowest=TRUE )
cbind(value,percentage=cost[ceiling(ints/2),-(1:3)])
}
data4 <- rbind.fill(mapply(percents, value=split(data, data$type), cost=list(data2), SIMPLIFY=FALSE) )
Setting
sample <- 10000
gives the following execution time comparisons
microbenchmark({x <- unlist(sapply(1:sample, function(n) which(ll <-data$type[n]==data2$type & data$level[n] >= data2$lower & data$level[n] <= data2$upper)));
data3 <- cbind(data, percentage= data2[x, -c(1:3)])} ,
data4 <- rbind.fill(mapply(percents, value=split(data, data$type), cost=list(data2), SIMPLIFY=FALSE) ),
times=10)
Unit: milliseconds
expr
{ x <- unlist(sapply(1:sample, function(n) which(ll <- data$type[n] == data2$type & data$level[n] >= data2$lower & data$level[n] <= data2$upper))) data3 <- cbind(data, percentage = data2[x, -c(1:3)]) }
data4 <- rbind.fill(mapply(percents, value = split(data, data$type), cost = list(data2), SIMPLIFY = FALSE))
min lq mean median uq max neval
1198.18269 1214.10560 1225.85117 1226.79838 1234.2671 1258.63122 10
20.81022 20.93255 21.50001 21.24237 22.1305 22.65291 10
where the first numbers are for the code shown in your question and the second times are for the code in my post. For this case, the new code seems almost 60 times faster.
Edit
To use rbind_all and avoid mapply, use the following:
microbenchmark({x <- unlist(sapply(1:sample, function(n) which(ll <-data$type[n]==data2$type & data$level[n] >= data2$lower & data$level[n] <= data2$upper)));
data3 <- cbind(data, percentage= data2[x, -c(1:3)])} ,
data4 <- rbind_all(lapply(split(data, data$type), percents, cost=data2 )),
times=10)
which gives slightly improved execution times
min lq mean median uq max neval
1271.57023 1289.17614 1297.68572 1301.84540 1308.31476 1313.56822 10
18.33819 18.57373 23.28578 19.53742 19.95132 58.96143 10
Edit 2
Modification to use the data2$lower values only for setting intervals
percents <- function(value, cost) {
cost <- cost[cost[,"type"] == value[1,"type"],]
cost <- cost[order(cost[,"lower"]),]
ints <- cut(value[,"value"], breaks= c(cost[,"lower"], max(cost[,"upper"])), labels=FALSE, right=FALSE, include.highest=TRUE )
cbind(value,percentage=cost[ints,-(1:3)])
}
to use with
data4 <- rbind_all(lapply(split(data, data$type), percents, cost=data2 ))
Related
How to subtract 2 colmun in a .csv file uploaded in R?
I have named the new column using reading <- $started_time- $ended_time
Since you do not post any example data I post an example based on the iris built-in dataset: You can simply use - to subtract vector of the same length (if the length is not the same the shorter vector will be recycled).
You can select the column from your dataset with the $ operator or with [] operator
data(iris)
#assigning the result to a new column
iris$subtraction <- iris$Sepal.Length-iris$Sepal.Width
iris$subtraction <- iris[,1]-iris[,2]
#assigning the result to a new variable
subtraction <- iris[,1]-iris[,2]
subtraction <- iris$Sepal.Length-iris$Sepal.Width
EDIT
a mincrobenchmark of 3 equivalent solutions:
library(microbenchmark)
library(data.table)
library(dplyr)
library(ggplot2)
#prepare simulation ------------------------------------------------------------
#number of rows to be tested
nr <- seq(100000,10000000,100000)
#initialize an list to store results
time <- as.list(rep(NA,100))
#benchmark
for (i in 1:length(nr)) {
set.seed(5)
#create data
df <- data.frame(x=rnorm(nr[i]),y=rnorm(nr[i]))
dt <- data.table(x=rnorm(nr[i]),y=rnorm(nr[i]))
#benchmark
x <- print(microbenchmark(
base=df$new.col <- df$x-df$y,
DT=dt <- dt[,new.col:=x-y],
dplyr=df %>% mutate(new.col=x-y),
times = 10
))
#store results
time[[i]] <- x[,c(1,4)]
}
#discard the first 4 elements because they run in microsenconds
bench <- do.call(rbind,time[5:100])
#add the number of rows as column
bench$nrow <- rep(nr[5:100],each=3)
ggplot(bench,aes(x=nrow,y=mean,group=expr,col=expr))+
geom_smooth(se=F)+
theme_minimal()+
xlab("# rows")+
ylab("time (milliseconds)")
As you can see, for this simple task both the base and data.table solutions are equivalent, while the mutate solution is a bit slower. However, the entire simulation runs in a minute and the single operations in few milliseconds.
my PC has 16Gb RAM and 12 cores.
EDIT
After the OP asked for a Date case, here a small example with date as POSIXct class:
day <- Sys.Date()
hm <- merge(0:23, seq(0, 45, by = 15))
datetime <- merge(last7days, chron(time = paste(hm$x, ':', hm$y, ':', 0)))
colnames(datetime) <- c('date', 'time')
# create datetime
dt <- as.POSIXct(paste(datetime$date, datetime$time))
df <- data.frame(x=sample(dt,200000,replace = T),y=sample(dt,200000,replace = T))
microbenchmark(df$x-df$y)
the operation runs in a few milliseconds, as expected:
Unit: milliseconds
expr min lq mean median uq max neval
df$x - df$y 1.459801 1.544301 2.755227 1.624501 1.845401 62.7416 100
I am trying to identify the most probable group that an observation belongs to, for several thousand large datasets. It is possible that some of the data is incorrectly classified and I am trying to work out the most likely "true" value. I have tried to use knn3 from the caret package but the predictions take too long to compute. In researching alternatives I have came across the nn2 function from RANN package which performs a nearest neighbour search that is significantly faster than K-Nearest Neighbours.
library(RANN)
library(tidyverse)
iris.scaled <- iris %>%
mutate_if(is.numeric, scale)
iris.nn2 <- nn2(iris.scaled[1:4])
The result on the nn2 function is two lists, one of indices and one of distances. I want to use the indices table to work out the most likely grouping of each observation, however it returns the row number of the observation and not it's group. I need to replace this with the group it belongs to (in this case, the species column).
distance.index <- iris.nn2$nn.idx[,-1]
target = iris.scaled$Species
I have removed the first column as the first nearest neighbour is always the observation itself.
matrix(target[distance.index[,]], nrow = nrow(distance.index), ncol = ncol(distance.index))
This code gives me the output I want, but is there a tidier way of creating this table and then calculating the most common response for each row, with the speed of calculation being the key.
Your scaling can be a real bottleneck when you have more columns (tested on 200 x 22216 gene expression matrix). My version might not seem that impressive with the iris dataset, but on the larger dataset I get 1.3 sec vs. 32.8 sec execution time.
Using tabulate instead of table gives an additional improvement, which is dwarfed, however, by the matrix scaling.
I used a custom scale function here, but using base::scale on a matrix would already be a major improvement.
I also addressed the issue raised by M. Papenberg of "self" not being considered the nearest neighbor by setting those to NA.
invisible(lapply(c("tidyverse", "matrixStats", "RANN", "microbenchmark", "compiler"),
require, character.only=TRUE))
enableJIT(3)
# faster column scaling (modified from https://www.r-bloggers.com/author/strictlystat/)
colScale <- function(x, center = TRUE, scale = TRUE, rows = NULL, cols = NULL) {
if (!is.null(rows) && !is.null(cols)) {x <- x[rows, cols, drop = FALSE]
} else if (!is.null(rows)) {x <- x[rows, , drop = FALSE]
} else if (!is.null(cols)) x <- x[, cols, drop = FALSE]
cm <- colMeans(x, na.rm = TRUE)
if (scale) csd <- matrixStats::colSds(x, center = cm, na.rm = TRUE) else
csd <- rep(1, length = length(cm))
if (!center) cm <- rep(0, length = length(cm))
x <- t((t(x) - cm) / csd)
return(x)
}
# your posted version (mostly):
oldv <- function(){
iris.scaled <- iris %>%
mutate_if(is.numeric, scale)
iris.nn2 <- nn2(iris.scaled[1:4])
distance.index <- iris.nn2$nn.idx[,-1]
target = iris.scaled$Species
category_neighbours <- matrix(target[distance.index[,]], nrow = nrow(distance.index), ncol = ncol(distance.index))
class <- apply(category_neighbours, 1, function(x) {
x1 <- table(x)
names(x1)[which.max(x1)]})
cbind(iris, class)
}
## my version:
myv <- function(){
iris.scaled <- colScale(data.matrix(iris[, 1:(dim(iris)[2]-1)]))
iris.nn2 <- nn2(iris.scaled)
# set self neighbors to NA
iris.nn2$nn.idx[iris.nn2$nn.idx - seq_len(dim(iris.nn2$nn.idx)[1]) == 0] <- NA
# match up categories
category_neighbours <- matrix(iris$Species[iris.nn2$nn.idx[,]],
nrow = dim(iris.nn2$nn.idx)[1], ncol = dim(iris.nn2$nn.idx)[2])
# turn category_neighbours into numeric for tabulate
cn <- matrix(as.numeric(factor(category_neighbours, exclude=NULL)),
nrow = dim(iris.nn2$nn.idx)[1], ncol = dim(iris.nn2$nn.idx)[2])
cnl <- levels(factor(category_neighbours, exclude = NULL))
# tabulate frequencies and match up with factor levels
class <- apply(cn, 1, function(x) {
cnl[which.max(tabulate(x, nbins=length(cnl))[!is.na(cnl)])]})
cbind(iris, class)
}
microbenchmark(oldv(), myv(), times=100L)
#> Unit: milliseconds
#> expr min lq mean median uq max neval cld
#> oldv() 11.015986 11.679337 12.806252 12.064935 12.745082 33.89201 100 b
#> myv() 2.430544 2.551342 3.020262 2.612714 2.691179 22.41435 100 a
The following code runs a loops but the problem is the speed; it takes several hours to finish and I am looking for an alternative so that I donĀ“t have to wait so long.
Basically what the code does the follolling calculations:
1.-It calculates the mean of the values of the 60 days.
2.-It gets the standard deviation of the values of the 60 days.
3.-It gets the Max of the values of the 60 days.
4.-It gets the Min of the values of the 60 days.
5.-Then with the previous calculations the code "smooths" the peaks up and down.
6.-Then the code simply get the means from 60, 30, 15 and 7 Days.
So the purpose of these code is to remove the peaks of the data using the method already mentioned.
Here is the code:
options(stringsAsFactors=F)
DAT <- data.frame(ITEM = "x", CLIENT = as.numeric(1:100000), matrix(sample(1:1000, 60, replace=T), ncol=60, nrow=100000, dimnames=list(NULL,paste0('DAY_',1:60))))
DATT <- DAT
nRow <- nrow(DAT)
TMP <- NULL
for(iROW in 1:nRow){#iROW <- 1
print(c(iROW,nRow))
Demand <- NULL
for(iCOL in 3:ncol(DAT)){#iCOL <- 1
Demand <- c(Demand,DAT[iROW,iCOL])
}
ww <- which(!is.na(Demand))
if(length(ww) > 0){
Average <- round(mean(Demand[ww]),digits=4)
DesvEst <- round(sd(Demand,na.rm=T),digits=4)
Max <- round(Average + (1 * DesvEst),digits=4)
Min <- round(max(Average - (1 * DesvEst), 0),digits=4)
Demand <- round(ifelse(is.na(Demand), Demand, ifelse(Demand > Max, Max, ifelse(Demand < Min, Min, Demand))))
Prom60 <- round(mean(Demand[ww]),digits=4)
Prom30 <- round(mean(Demand[intersect(ww,(length(Demand) - 29):length(Demand))]),digits=4)
Prom15 <- round(mean(Demand[intersect(ww,(length(Demand) - 14):length(Demand))]),digits=4)
Prom07 <- round(mean(Demand[intersect(ww,(length(Demand) - 6):length(Demand))]),digits=4)
}else{
Average <- DesvEst <- Max <- Min <- Prom60 <- Prom30 <- Prom15 <- Prom07 <- NA
}
DAT[iROW,3:ncol(DAT)] <- Demand
TMP <- rbind(TMP, cbind(DAT[iROW,], Average, DesvEst, Max, Min, Prom60, Prom30, Prom15, Prom07))
}
DAT <- TMP
If one runs your code (with smaller number of rows) through a profiler, one sees that the main issue is the rbind in the end, followed by the c mentioned by #Riverarodrigoa:
We can focus on these two by creating numeric matrices of suitable size and working with those. Only in the end the final data.frame is created:
options(stringsAsFactors=F)
N <- 1000
set.seed(42)
DAT <- data.frame(ITEM = "x",
CLIENT = as.numeric(1:N),
matrix(sample(1:1000, 60, replace=T), ncol=60, nrow=N, dimnames=list(NULL,paste0('DAY_',1:60))))
nRow <- nrow(DAT)
TMP <- matrix(0, ncol = 8, nrow = N,
dimnames = list(NULL, c("Average", "DesvEst", "Max", "Min", "Prom60", "Prom30", "Prom15", "Prom07")))
DemandMat <- as.matrix(DAT[,3:ncol(DAT)])
for(iROW in 1:nRow){
Demand <- DemandMat[iROW, ]
ww <- which(!is.na(Demand))
if(length(ww) > 0){
Average <- round(mean(Demand[ww]),digits=4)
DesvEst <- round(sd(Demand,na.rm=T),digits=4)
Max <- round(Average + (1 * DesvEst),digits=4)
Min <- round(max(Average - (1 * DesvEst), 0),digits=4)
Demand <- round(ifelse(is.na(Demand), Demand, ifelse(Demand > Max, Max, ifelse(Demand < Min, Min, Demand))))
Prom60 <- round(mean(Demand[ww]),digits=4)
Prom30 <- round(mean(Demand[intersect(ww,(length(Demand) - 29):length(Demand))]),digits=4)
Prom15 <- round(mean(Demand[intersect(ww,(length(Demand) - 14):length(Demand))]),digits=4)
Prom07 <- round(mean(Demand[intersect(ww,(length(Demand) - 6):length(Demand))]),digits=4)
}else{
Average <- DesvEst <- Max <- Min <- Prom60 <- Prom30 <- Prom15 <- Prom07 <- NA
}
DemandMat[iROW, ] <- Demand
TMP[iROW, ] <- c(Average, DesvEst, Max, Min, Prom60, Prom30, Prom15, Prom07)
}
DAT <- cbind(DAT[,1:2], DemandMat, TMP)
For 1000 rows this takes about 0.2 s instead of over 4 s. For 10.000 rows I get 2 s instead of 120 s.
Obviously, this is not really pretty code. One could do this much nicer using tidyverse or data.table. I just find it worth noting that for loops are not necessarily slow in R. But dynamically growing data structures is.
I was hoping someone could help point me in the right direction to create a vector in R, containing a defined amount of randomly generated numbers. I am a complete newbie to R, and I have learned that the concatenate function is used for creating vectors. However, I wish to populate the vector with 50 random numbers. I do not wish to specify a range or any other conditions for the numbers.
MyVectorObject <- c(...)
Any suggestions would be greatly appreciated!
It depends on which numbers you want to generate. These are some options.
x1 <- rpois(n = 50, lambda = 10)
x2 <- runif(n = 50, min = 1, max = 10)
x3 <- sample(x = c(1, 3, 5), size = 50, replace = TRUE)
If we are talking about integer numbers, you want to generate number in interval <-base::.Machine$integer.max, base::.Machine$integer.max> which is for example on my computer interval <-2147483647,2147483647>
Implementation
you can use base::sample to generate positive numbers from 1 to base::.Machine$integer.max
random.pos <- function(N) {
int.max <- .Machine$integer.max
return(sample(int.max, N, replace=TRUE))
}
if you want also negative numbers, you can use
random.posneg <- function(N) {
int.max <- .Machine$integer.max
random.numbers <- sample(int.max, N, replace = TRUE)
random.signs <- sample(c(1,-1), N, replace=TRUE)
return(random.numbers * random.signs)
}
NOTE: No one from functions specified above does generate 0 (zero)
The best approach (by my opinion) is to use base::runif function.
random.runif <- function(N) {
int.max <- .Machine$integer.max
int.min <- -int.max
return(as.integer(runif(N, int.min, int.max)))
}
This will be little bit slower then using base::sample but you get random numbers uniformly distributed with possible zero.
Benchmark
library(microbenchmark)
require(compiler)
random.runif <- cmpfun(random.runif)
random.pos <- cmpfun(random.pos)
random.posneg <- cmpfun(random.posneg)
N <- 500
op <- microbenchmark(
RUNIF = random.runif(N),
POS = random.pos(N),
POSNEG = random.posneg(N),
times = 10000
)
print(op)
## library(ggplot2)
## boxplot(op)
## qplot(y=time, data=op, colour=expr) + scale_y_log10()
and results from the benchmark above
Unit: microseconds
expr min lq mean median uq max neval
RUNIF 13.423 14.251 15.197122 14.482 14.694 2425.290 10000
POS 4.174 5.043 5.613292 5.317 5.645 2436.909 10000
POSNEG 11.673 12.845 13.383194 13.285 13.800 60.304 10000
I do have a similar problem that is explained in this question. Similar to that question I have a data frame that has 3 columns (id, group, value). I want to take n samples with replacement from each group and produce a smaller data frame with n samples from each group.
However, I am doing hundreds of subsamples in a simulation code and the solution based on ddply is very slow to be used in my code. I tried to rewrite a simple code to see if I can get a better performance but it is still slow (not better than the ddply solution if not worse). Below is my code. I am wondering if it can be improved for performance
#Producing example DataFrame
dfsize <- 10
groupsize <- 7
test.frame.1 <- data.frame(id = 1:dfsize, group = rep(1:groupsize,each = ceiling(dfsize/groupsize))[1:dfsize], junkdata = sample(1:10000, size =dfsize))
#Main function for subsampling
sample.from.group<- function(df, dfgroup, size, replace){
outputsize <- 1
newdf <-df # assuming a sample cannot be larger than the original
uniquegroups <- unique(dfgroup)
for (uniquegroup in uniquegroups){
dataforgroup <- which(dfgroup==uniquegroup)
mysubsample <- df[sample(dataforgroup, size, replace),]
sizeofsample <- nrow(mysubsample)
newdf[outputsize:(outputsize+sizeofsample-1), ] <- mysubsample
outputsize <- outputsize + sizeofsample
}
return(newdf[1:(outputsize-1),])
}
#Using the function
sample.from.group(test.frame.1, test.frame.1$group, 100, replace = TRUE)
Here's two plyr based solutions:
library(plyr)
dfsize <- 1e4
groupsize <- 7
testdf <- data.frame(
id = seq_len(dfsize),
group = rep(1:groupsize, length = dfsize),
junkdata = sample(1:10000, size = dfsize))
sample_by_group_1 <- function(df, dfgroup, size, replace) {
ddply(df, dfgroup, function(x) {
x[sample(nrow(df), size = size, replace = replace), , drop = FALSE]
})
}
sample_by_group_2 <- function(df, dfgroup, size, replace) {
idx <- split_indices(df[[dfgroup]])
subs <- lapply(idx, sample, size = size, replace = replace)
df[unlist(subs, use.names = FALSE), , drop = FALSE]
}
library(microbenchmark)
microbenchmark(
ddply = sample_by_group_1(testdf, "group", 100, replace = TRUE),
plyr = sample_by_group_2(testdf, "group", 100, replace = TRUE)
)
# Unit: microseconds
# expr min lq median uq max neval
# ddply 4488 4723 5059 5360 36606 100
# plyr 443 487 507 536 31343 100
The second approach is much faster because it does the subsetting in a single step - if you can figure out how to do it in one step, it's usually any easy way to get better performance.
I think this is cleaner and possibly faster:
z <- sapply(unique(test.frame.1$group), FUN= function(x){
sample(which(test.frame.1$group==x), 100, TRUE)
})
out <- test.frame.1[z,]
out