column bind in R and name the column - r

I want to column bind (cbind) mydf[,"c"] and give it a new name newcolumn in one step and get the result matrix mydf. How do I do it?
mydf
# a b c
# 1 2 6
# 1 3 4
mydf
# a b c newcolumn
# 1 2 6 6
# 1 3 4 4

You can specify the new column name in the call to cbind:
mydf <- cbind(mydf, newcolumn=mydf[,"c"])
mydf
# a b c newcolumn
# [1,] 1 2 6 6
# [2,] 1 3 4 4
Data (constructed with the same approach):
mydf <- cbind(a=c(1, 1), b=c(2, 3), c=c(6, 4))
If you had a data frame instead of a matrix, you could simply do mydf$newcolumn <- mydf$c.

There are many approaches you could take here:
mydf <- data.frame(a=c(1,1),b=c(2,3),c=c(6,4));
mydf;
## a b c
## 1 1 2 6
## 2 1 3 4
data.frame(mydf,newcolumn=mydf$c);
## a b c newcolumn
## 1 1 2 6 6
## 2 1 3 4 4
cbind(mydf,newcolumn=mydf$c);
## a b c newcolumn
## 1 1 2 6 6
## 2 1 3 4 4
transform(mydf,newcolumn=c);
## a b c newcolumn
## 1 1 2 6 6
## 2 1 3 4 4
within(mydf,newcolumn <- c);
## a b c newcolumn
## 1 1 2 6 6
## 2 1 3 4 4
mydf$newcolumn <- mydf$c;
mydf;
## a b c newcolumn
## 1 1 2 6 6
## 2 1 3 4 4
Of the 5 approaches shown above, only the last actually modifies mydf. For the other 4, you have to assign mydf to the return value to replace it with the new data.frame that has the additional column.
library('microbenchmark');
bind.df <- function() mydf <- data.frame(mydf,newcolumn=mydf$c);
bind.cb <- function() mydf <- cbind(mydf,newcolumn=mydf$c);
bind.tr <- function() mydf <- transform(mydf,newcolumn=c);
bind.wi <- function() mydf <- within(mydf,newcolumn <- c);
bind.as1 <- function() mydf$newcolumn <- mydf$c;
bind.as2 <- function() mydf['newcolumn'] <- mydf['c'];
bind.as3 <- function() mydf[,'newcolumn'] <- mydf[,'c'];
bind.as4 <- function() mydf[['newcolumn']] <- mydf[['c']];
N <- 1e5; mydf <- data.frame(a=rep(c(1,1),N),b=rep(c(2,3),N),c=rep(c(6,4),N));
microbenchmark(bind.df(),bind.cb(),bind.tr(),bind.as1(),bind.as2(),bind.as3(),bind.as4(),times=1e4);
## Unit: microseconds
## expr min lq mean median uq max neval
## bind.df() 97.077 112.046 128.66080 121.027 134.711 1690.513 10000
## bind.cb() 86.814 100.927 117.14364 109.907 122.737 1849.172 10000
## bind.tr() 105.203 120.171 138.90802 131.290 145.830 1680.250 10000
## bind.as1() 12.402 20.100 23.35085 22.239 25.660 148.397 10000
## bind.as2() 370.776 412.686 596.47901 425.088 449.036 41799.239 10000
## bind.as3() 347.682 385.743 564.78320 396.435 419.528 42144.355 10000
## bind.as4() 17.534 26.087 30.09639 28.654 32.930 638.915 10000

If there are two columns and you would like to add two column by column then use cbind in dataframe type.
dapu <- cbind(data.frame(data_r), data.frame(data_c))

Related

Post-processing of full_join output to remove multiplicity

I have two data frames(df1, df2) and performed full_join using the common column of interest col1.
df1 <- data.frame(col1=c('A','D','C','C','E','E','I'),col2=c(4,7,8,3,2,4,9))
df2 <- data.frame(col1=c('A','A','B','C','C','E','E','I'),col2=c(4,1,6,8,3,2,1,9))
df1 %>% full_join(df2, by = "col1")
# col1 col2.x col2.y
# 1 A 4 4
# 2 A 4 1
# 3 D 7 NA
# 4 C 8 8
# 5 C 8 3
# 6 C 3 8
# 7 C 3 3
# 8 E 2 2
# 9 E 2 1
# 10 E 4 2
# 11 E 4 1
# 12 I 9 9
# 13 B NA 6
As expected the full_join provides multiplicty of the joining column values and I wish to avoid it. I wish to arrive at the following output. What kind of post-processing approaches do you suggest?
# col1 col2.x col2.y
# 1 A 4 4
# 2 A NA 1
# 3 D 7 NA
# 4 C 8 8
# 5 C 3 3
# 6 E 2 2
# 7 E 4 1
# 8 I 9 9
# 9 B NA 6
More information:
Case 1: I do not need four rows in the output for two same values in both input objects:
# 4 C 8 8
# 5 C 8 3
# 6 C 3 8
# 7 C 3 3
instead, I want only two as:
# 4 C 8 8
# 5 C 3 3
Case 2: Similarly, I need same row for the difference in values:
# 8 E 2 2
# 9 E 2 1
# 10 E 4 2
# 11 E 4 1
instead, I want only two rows as below:
# 8 E 2 2
# 9 E 4 1
A possible solution in 2 steps using the data.table-package:
0) load package & convert to data.table's
library(data.table)
setDT(df1)
setDT(df2)
1) define helper function
unlistSD <- function(x) {
l <- length(x)
ls <- sapply(x, lengths)
m <- max(ls)
newSD <- vector(mode = "list", length = l)
for (i in 1:l) {
u <- unlist(x[[i]])
lu <- length(u)
if (lu < m) {
u <- c(u, rep(NA_real_, m - lu))
}
newSD[[i]] <- u
}
return(setNames(as.list(newSD), names(x)))
}
2) merge and apply helper function
merge(df1[, .(col2 = list(col2)), by = col1],
df2[, .(col2 = list(col2)), by = col1],
by = "col1", all = TRUE
)[, unlistSD(.SD), by = col1]
which gives the following result:
col1 col2.x col2.y
1: A 4 4
2: A NA 1
3: C 8 8
4: C 3 3
5: D 7 NA
6: E 2 2
7: E 4 1
8: I 9 9
9: B NA 6
Another possibiliy with base R:
unlistDF <- function(d, groupcols) {
ds <- split(d[, setdiff(names(d), groupcols)], d[,groupcols])
ls <- lapply(ds, function(x) max(sapply(x, lengths)))
dl <- lapply(ds, function(x) lapply(as.list(x), unlist))
du <- Map(function(x, y) {
lapply(x, function(i) {
if(length(i) < y) {
c(i, rep(NA_real_, y - length(i)))
} else i
})
}, x = dl, y = ls)
ld <- lapply(du, as.data.frame)
cbind(d[rep(1:nrow(d), ls), groupcols, drop = FALSE],
do.call(rbind.data.frame, c(ld, make.row.names = FALSE)),
row.names = NULL)
}
Now you can use this function as follows in combination with merge:
df <- merge(aggregate(col2 ~ col1, df1, as.list),
aggregate(col2 ~ col1, df2, as.list),
by = "col1", all = TRUE)
unlistDF(df, "col1")

apply similar variable to multiple dataset in r

I have 6 data named from dat1 to dat6, I want to add variable region and label them in a similar way, like this:
dat1$region <- paste("NE-1")
dat2$region <- paste("NE-2")
dat3$region <- paste("NE-3")
dat4$region <- paste("NE-4")
dat5$region <- paste("NE-5")
How can I write this code in a more concise way? using apply or for-loop?
Thanks!!
One option is to use get and assign functions in a for-loop.
Sample data:
dat1 <- data.frame(id=1:4, region = letters[1:4])
dat2 <- data.frame(id=5:8, region = letters[5:8])
dat3 <- data.frame(id=9:12, region = letters[9:12])
dat4 <- data.frame(id=13:16, region = letters[13:16])
dat5 <- data.frame(id=17:20, region = letters[17:20])
dat1
# id region
# 1 1 a
# 2 2 b
# 3 3 c
# 4 4 d
Apply for-loop:
for(i in 1:5){
name = paste("dat",i,sep="")
temp <- get(name)
temp$region = paste("NE",i,sep = "-")
assign(name, temp)
}
Verify results:
dat1
# id region
# 1 1 NE-1
# 2 2 NE-1
# 3 3 NE-1
# 4 4 NE-1
dat5
# id region
# 1 17 NE-5
# 2 18 NE-5
# 3 19 NE-5
# 4 20 NE-5
Keep all dataframes in a list then use lapply:
# example dataframes
dat1 <- cars[1:2, ]
dat2 <- cars[3:4, ]
dat3 <- cars[5:6, ]
myList <- list(dat1, dat2, dat3)
# myList
# [[1]]
# speed dist
# 1 4 2
# 2 4 10
#
# [[2]]
# speed dist
# 3 7 4
# 4 7 22
#
# [[3]]
# speed dist
# 5 8 16
# 6 9 10
Then it is easier to do repetitive operations. Loop through the list, add region column:
res <- lapply(seq_along(myList), function(i){
x <- myList[[ i ]]
x$region <- paste0("NE-", i)
x
})
res
# [[1]]
# speed dist region
# 1 4 2 NE-1
# 2 4 10 NE-1
#
# [[2]]
# speed dist region
# 3 7 4 NE-2
# 4 7 22 NE-2
#
# [[3]]
# speed dist region
# 5 8 16 NE-3
# 6 9 10 NE-3
How about this (assuming all your items start with dat and end with a unique identifier string):
dat_names <- ls()[grepl("^dat", ls())]
dat_ID <- sapply(dat_names, function(d) gsub("dat", "", d))
for(d in 1:length(dat_names)) {
dat_names[[d]]$region <- paste("NE-", dat_ID[d], sep="")
}

Preserving non-numerical columns when using combn in R

I have a data frame similar to the following format:
Doc Category val
A aa 1
B ab 6
C ab 3
D cc 6.....
I am using the following code to identify all combinations of sums of val and then extracting the rows that add up to a target sum I have already identified.
#all combinations
res <- Map(combn, list(val), seq_along(val), simplify = FALSE)
x=unlist(res, recursive = FALSE)
z=lapply(x, function(x) sum(x))
My issue is determining the best way to preserve the character columns in the data frame as the code above only gives numerical values. The way I am doing it now is a mapping based on val, which normally works fine, however, I can run into issues when there are duplicated values.
For example, if my target sum is 7, I eventually want output that looks like this (there are other ways to get to this value, but for now just returning the first instance works):
Doc Category val
A aa 1
B ab 6
Is there a better way to map to the non-numerical columns to achieve this output ?
Would this solution work for you:
df <- data.frame(Doc = LETTERS[1:7],
Category = c("aa","ab","ab","cc","ca","cb","bb"),
val = c(1,6,3, 6, 4, 5, 2),
stringsAsFactors=FALSE)
df
# Doc Category val
# 1 A aa 1
# 2 B ab 6
# 3 C ab 3
# 4 D cc 6
# 5 E ca 4
# 6 F cb 5
# 7 G bb 2
target.sum=7
# create an "id" variable that is equal to the index of all rows
df$id <- seq_along(df$val)
id.res <- Map(combn, list(df$id), seq_along(df$id), simplify = FALSE)
x=unlist(id.res, recursive = FALSE)
#remove all elements in the list where the sum of
# values in column val is not equal to target value
x.list <- lapply(x,FUN=function(x){ if(sum(df$val[x]) == target.sum ) df[x,] else NA})
#remove missing values
x.list <-x.list[!is.na(x.list)]
x.list
# [[1]]
# Doc Category val id
# 1 A aa 1 1
# 2 B ab 6 2
#
# [[2]]
# Doc Category val id
# 1 A aa 1 1
# 4 D cc 6 4
#
# [[3]]
# Doc Category val id
# 3 C ab 3 3
# 5 E ca 4 5
#
# [[4]]
# Doc Category val id
# 6 F cb 5 6
# 7 G bb 2 7
#
# [[5]]
# Doc Category val id
# 1 A aa 1 1
# 5 E ca 4 5
# 7 G bb 2 7

How do I get the minimum or maximum of two values for each row in a data.table? [duplicate]

I need to find the row-wise minimum of many (+60) relatively large data.frame (~ 250,000 x 3) (or I can equivalently work on an xts).
set.seed(1000)
my.df <- sample(1:5, 250000*3, replace=TRUE)
dim(my.df) <- c(250000,3)
my.df <- as.data.frame(my.df)
names(my.df) <- c("A", "B", "C")
The data frame my.df looks like this
> head(my.df)
A B C
1 2 5 2
2 4 5 5
3 1 5 3
4 4 4 3
5 3 5 5
6 1 5 3
I tried
require(data.table)
my.dt <- as.data.table(my.df)
my.dt[, row.min:=0] # without this: "Attempt to add new column(s) and set subset of rows at the same time"
system.time(
for (i in 1:dim(my.dt)[1]) my.dt[i, row.min:= min(A, B, C)]
)
On my system this takes ~400 seconds. It works, but I am not confident it is the best way to use data.table.
Am I using data.table correctly? Is there a more efficient
way to do simple row-wise opertations?
Or, just pmin.
my.dt <- as.data.table(my.df)
system.time(my.dt[,row.min:=pmin(A,B,C)])
# user system elapsed
# 0.02 0.00 0.01
head(my.dt)
# A B C row.min
# [1,] 2 5 2 2
# [2,] 4 5 5 4
# [3,] 1 5 3 1
# [4,] 4 4 3 3
# [5,] 3 5 5 3
# [6,] 1 5 3 1
After some discussion around row-wise first/last occurrences from column series in data.table, which suggested that melting first would be faster than a row-wise calculation, I decided to benchmark:
pmin (Matt Dowle's answer above), below as tm1
apply (Andrie's answer above), below as tm2
melting first, then min by group, below as tm3
so:
library(microbenchmark); library(data.table)
set.seed(1000)
b <- data.table(m=integer(), n=integer(), tm1 = numeric(), tm2 = numeric(), tm3 = numeric())
for (m in c(2.5,100)*1e5){
for (n in c(3,50)){
my.df <- sample(1:5, m*n, replace=TRUE)
dim(my.df) <- c(m,n)
my.df <- as.data.frame(my.df)
names(my.df) <- c(LETTERS,letters)[1:n]
my.dt <- as.data.table(my.df)
tm1 <- mean(microbenchmark(my.dt[, foo := do.call(pmin, .SD)], times=30L)$time)/1e6
my.dt <- as.data.table(my.df)
tm2 <- mean(microbenchmark(apply(my.dt, 1, min), times=30L)$time)/1e6
my.dt <- as.data.table(my.df)sv
tm3 <- mean(microbenchmark(
melt(my.dt[, id:=1:nrow(my.dt)], id.vars='id')[, min(value), by=id],
times=30L
)$time)/1e6
b <- rbind(b, data.table(m, n, tm1, tm2, tm3) )
}
}
(I ran out of time to try more combinations) gives us:
b
# m n tm1 tm2 tm3
# 1: 2.5e+05 3 16.20598 1000.345 39.36171
# 2: 2.5e+05 50 166.60470 1452.239 588.49519
# 3: 1.0e+07 3 662.60692 31122.386 1668.83134
# 4: 1.0e+07 50 6594.63368 50915.079 17098.96169
c <- melt(b, id.vars=c('m','n'))
library(ggplot2)
ggplot(c, aes(x=m, linetype=as.factor(n), col=variable, y=value)) + geom_line() +
ylab('Runtime (millisec)') + xlab('# of rows') +
guides(linetype=guide_legend(title='Number of columns'))
Although I knew apply (tm2) would scale poorly, I am surprised that pmin (tm1) scales so well if R is not really designed for row-wise operations. I couldn't identify a case where pmin shouldn't be used over melt-min-by-group (tm3).
The classical way of doing row-wise operations in R is to use apply:
apply(my.df, 1, min)
> head(my.df)
A B C min
1 2 5 4 2
2 4 3 1 1
3 1 1 5 1
4 4 1 5 1
5 3 3 4 3
6 1 1 1 1
On my machine, this operation takes about 0.25 of a second.

Count frequency of duplicated rows [duplicate]

This question already has answers here:
Count number of rows within each group
(17 answers)
Closed 6 years ago.
I have a matrix with a large number of duplicates and would like to obtain a matrix with the unique rows and a frequency count to each unique row.
The example shown below solves this problem but is painfully slow.
rowsInTbl <- function(tbl,row){
sum(apply(tbl, 1, function(x) all(x == row) ))
}
colFrequency <- function(tblall){
tbl <- unique(tblall)
results <- matrix(nrow = nrow(tbl),ncol=ncol(tbl)+1)
results[,1:ncol(tbl)] <- as.matrix(tbl)
dimnames(results) <- list(c(rownames(tbl)),c(colnames(tbl),"Frequency"))
freq <- apply(tbl,1,function(x)rowsInTbl(tblall,x))
results[,"Frequency"] <- freq
return(results)
}
m <- matrix(c(1,2,3,4,3,4,1,2,3,4),ncol=2,byrow=T)
dimnames(m) <- list(letters[1:nrow(m)],c("c1","c2"))
print("Matrix")
print(m)
[1] "Matrix"
c1 c2
a 1 2
b 3 4
c 3 4
d 1 2
e 3 4
print("Duplicate frequency table")
print(colFrequency(m))
[1] "Duplicate frequency table"
c1 c2 Frequency
a 1 2 2
b 3 4 3
Here are the speed measurements of the answers of #Heroka and #m0h3n compared to my example. The matrix shown above was repeated 1000 times. Data.table clearly is the fastest solution.
[1] "Duplicate frequency table - my example"
user system elapsed
0.372 0.000 0.371
[1] "Duplicate frequency table - data.table"
user system elapsed
0.008 0.000 0.008
[1] "Duplicate frequency table - aggregate"
user system elapsed
0.092 0.000 0.089
Looks like a job for data.table, as you need something that can aggregate quickly.
library(data.table)
m <- matrix(c(1,2,3,4,3,4,1,2,3,4),ncol=2,byrow=T)
mdt <- as.data.table(m)
res <- mdt[,.N, by=names(mdt)]
res
# > res
# V1 V2 N
# 1: 1 2 2
# 2: 3 4 3
How about this using base R for extracting unique rows:
mat <- matrix(c(2,5,3,5,2,3,4,2,3,5,4,2,1,5,3,5), ncol = 2, byrow = T)
mat[!duplicated(mat),]
# [,1] [,2]
# [1,] 2 5
# [2,] 3 5
# [3,] 2 3
# [4,] 4 2
# [5,] 1 5
Extracting unique rows along with their frequencies:
m <- as.data.frame(mat)
aggregate(m, by=m, length)[1:(ncol(m)+1)]
# V1 V2 V1.1
# 1 4 2 2
# 2 2 3 1
# 3 1 5 1
# 4 2 5 1
# 5 3 5 3

Resources