Custom bins per group in dataframe - R function - r

I would like to create bins for the variable numbers per category name inside a function. But I am having difficulties related to using the category name provided as parameter inside the function. Maybe a data.table approach would be better.
set.seed(10)
b<-(rnorm(10, sd=1,mean=10))
y<-runif(3)
pr<-y/sum(y)
names<-unlist(lapply(mapply(rep, LETTERS[1:3], 1:3), function (x) paste0(x, collapse = "") ) )
x <- sample(names, 10, replace=TRUE, prob=pr)
df<-data.frame(name=x,numbers=b)
df
#working without bin limits per category (not desired)
#and using "numbers" in cut (not desired)
binfunction1 <- function(df, colgroup1, varcount,binsize) {
new<-df %>%
group_by_(colgroup1) %>%
mutate(bin = cut(numbers, breaks <- c(seq(7, 15, by = binsize)), # limits by colgroup not implemented
labels = 1:(length(breaks)-1) ) )
return(new)
}
binfunction1(df,"name","numbers",0.5)
name numbers bin
<fctr> <dbl> <fctr>
1 BB 10.018746 7
2 A 9.815747 6
3 CCC 8.628669 4
4 CCC 9.400832 5
5 BB 10.294545 7
6 CCC 10.389794 7
7 A 8.791924 4
8 A 9.636324 6
9 A 8.373327 3
10 A 9.743522 6

Not the most elegant solution, but is the outcome something you are after? (I didn't quite understand your question)
binfunction3 <- function(x, colgroup1, varcount, binsize) {
tmp <- split(x, x[[colgroup1]], drop = TRUE)
tp <- lapply(tmp, function(k) {
breaks <- c(seq(min(k[[varcount]])*0.9, max(k[[varcount]])*1.1, by = binsize))
cbind(k, data.frame(bin = cut(k[[varcount]], breaks, labels = 1:(length(breaks)-1))))
})
tp <- do.call(rbind, tp)
rownames(tp) <- gsub("[[:alpha:]]*\\.", "", rownames(tp))
return(tp[rownames(x),])
}
binfunction3(df,"name","numbers",0.5)
# name numbers bin
# 1 A 10.018746 5
# 2 CCC 9.815747 5
# 3 CCC 8.628669 2
# 4 BB 9.400832 2
# 5 A 10.294545 6
# 6 BB 10.389794 4
# 7 A 8.791924 3
# 8 CCC 9.636324 4
# 9 A 8.373327 2
# 10 A 9.743522 5

My answer based on Mikko's, but allowing better control over min and max limits of breaks and bin size.
binfunctionnew <- function(x, colgroup, varcount, binexp) {
tmp <- split(x, x[colgroup], drop = TRUE)
tp <- lapply(tmp, function(k) {
bin<-cut(k[,varcount],
breaks=c(seq(min(k[,varcount])*(1-10^(-(binexp+1))),
max(k[,varcount])*(1+10^(-(binexp-2))),
by = 10^(-(binexp))) ) , labels=F)
cbind (k, data.frame(bin = bin))
} )
tp <- do.call(rbind, tp)
return(tp)
}
#example or
binfunctionnew(df,"name","numbers",1) binfunctionnew(df,"name","numbers",0)
# name numbers bin name numbers bin
# A.1 A 10.018746 18 A.1 A 10.018746 3
# A.5 A 10.294545 21 A.5 A 10.294545 3
# A.7 A 8.791924 6 A.7 A 8.791924 2
# A.9 A 8.373327 1 A.9 A 8.373327 1
# A.10 A 9.743522 15 A.10 A 9.743522 3
# BB.4 BB 9.400832 1 BB.4 BB 9.400832 1
# BB.6 BB 10.389794 11 BB.6 BB 10.389794 2
# CCC.2 CCC 9.815747 13 CCC.2 CCC 9.815747 3
# CCC.3 CCC 8.628669 1 CCC.3 CCC 8.628669 1
# CCC.8 CCC 9.636324 11 CCC.8 CCC 9.636324 2

Related

apply similar variable to multiple dataset in r

I have 6 data named from dat1 to dat6, I want to add variable region and label them in a similar way, like this:
dat1$region <- paste("NE-1")
dat2$region <- paste("NE-2")
dat3$region <- paste("NE-3")
dat4$region <- paste("NE-4")
dat5$region <- paste("NE-5")
How can I write this code in a more concise way? using apply or for-loop?
Thanks!!
One option is to use get and assign functions in a for-loop.
Sample data:
dat1 <- data.frame(id=1:4, region = letters[1:4])
dat2 <- data.frame(id=5:8, region = letters[5:8])
dat3 <- data.frame(id=9:12, region = letters[9:12])
dat4 <- data.frame(id=13:16, region = letters[13:16])
dat5 <- data.frame(id=17:20, region = letters[17:20])
dat1
# id region
# 1 1 a
# 2 2 b
# 3 3 c
# 4 4 d
Apply for-loop:
for(i in 1:5){
name = paste("dat",i,sep="")
temp <- get(name)
temp$region = paste("NE",i,sep = "-")
assign(name, temp)
}
Verify results:
dat1
# id region
# 1 1 NE-1
# 2 2 NE-1
# 3 3 NE-1
# 4 4 NE-1
dat5
# id region
# 1 17 NE-5
# 2 18 NE-5
# 3 19 NE-5
# 4 20 NE-5
Keep all dataframes in a list then use lapply:
# example dataframes
dat1 <- cars[1:2, ]
dat2 <- cars[3:4, ]
dat3 <- cars[5:6, ]
myList <- list(dat1, dat2, dat3)
# myList
# [[1]]
# speed dist
# 1 4 2
# 2 4 10
#
# [[2]]
# speed dist
# 3 7 4
# 4 7 22
#
# [[3]]
# speed dist
# 5 8 16
# 6 9 10
Then it is easier to do repetitive operations. Loop through the list, add region column:
res <- lapply(seq_along(myList), function(i){
x <- myList[[ i ]]
x$region <- paste0("NE-", i)
x
})
res
# [[1]]
# speed dist region
# 1 4 2 NE-1
# 2 4 10 NE-1
#
# [[2]]
# speed dist region
# 3 7 4 NE-2
# 4 7 22 NE-2
#
# [[3]]
# speed dist region
# 5 8 16 NE-3
# 6 9 10 NE-3
How about this (assuming all your items start with dat and end with a unique identifier string):
dat_names <- ls()[grepl("^dat", ls())]
dat_ID <- sapply(dat_names, function(d) gsub("dat", "", d))
for(d in 1:length(dat_names)) {
dat_names[[d]]$region <- paste("NE-", dat_ID[d], sep="")
}

Preserving non-numerical columns when using combn in R

I have a data frame similar to the following format:
Doc Category val
A aa 1
B ab 6
C ab 3
D cc 6.....
I am using the following code to identify all combinations of sums of val and then extracting the rows that add up to a target sum I have already identified.
#all combinations
res <- Map(combn, list(val), seq_along(val), simplify = FALSE)
x=unlist(res, recursive = FALSE)
z=lapply(x, function(x) sum(x))
My issue is determining the best way to preserve the character columns in the data frame as the code above only gives numerical values. The way I am doing it now is a mapping based on val, which normally works fine, however, I can run into issues when there are duplicated values.
For example, if my target sum is 7, I eventually want output that looks like this (there are other ways to get to this value, but for now just returning the first instance works):
Doc Category val
A aa 1
B ab 6
Is there a better way to map to the non-numerical columns to achieve this output ?
Would this solution work for you:
df <- data.frame(Doc = LETTERS[1:7],
Category = c("aa","ab","ab","cc","ca","cb","bb"),
val = c(1,6,3, 6, 4, 5, 2),
stringsAsFactors=FALSE)
df
# Doc Category val
# 1 A aa 1
# 2 B ab 6
# 3 C ab 3
# 4 D cc 6
# 5 E ca 4
# 6 F cb 5
# 7 G bb 2
target.sum=7
# create an "id" variable that is equal to the index of all rows
df$id <- seq_along(df$val)
id.res <- Map(combn, list(df$id), seq_along(df$id), simplify = FALSE)
x=unlist(id.res, recursive = FALSE)
#remove all elements in the list where the sum of
# values in column val is not equal to target value
x.list <- lapply(x,FUN=function(x){ if(sum(df$val[x]) == target.sum ) df[x,] else NA})
#remove missing values
x.list <-x.list[!is.na(x.list)]
x.list
# [[1]]
# Doc Category val id
# 1 A aa 1 1
# 2 B ab 6 2
#
# [[2]]
# Doc Category val id
# 1 A aa 1 1
# 4 D cc 6 4
#
# [[3]]
# Doc Category val id
# 3 C ab 3 3
# 5 E ca 4 5
#
# [[4]]
# Doc Category val id
# 6 F cb 5 6
# 7 G bb 2 7
#
# [[5]]
# Doc Category val id
# 1 A aa 1 1
# 5 E ca 4 5
# 7 G bb 2 7

remove cases following certain other cases

I have a dataframe, say
df = data.frame(x = c("a","a","b","b","b","c","d","t","c","b","t","c","t","a","a","b","d","t","t","c"),
y = c(2,4,5,2,6,2,4,5,2,6,2,4,5,2,6,2,4,5,2,6))
I want to remove only those rows in which one or multiple ts are directly in between a d and a c, in all other cases I want to retain the cases. So for this example, I would like to remove the ts on row 8, 18 and 19, but keep the others. I have over thousands of cases so doing this manually would be a true horror. Any help is very much appreciated.
One option would be to use rle to get runs of the same string and then you can use an sapply to check forward/backward and return all the positions you want to drop:
rle_vals <- rle(as.character(df$x))
drop <- unlist(sapply(2:length(rle_vals$values), #loop over values
function(i, vals, lengths) {
if(vals[i] == "t" & vals[i-1] == "d" & vals[i+1] == "c"){#Check if value is "t", previous is "d" and next is "c"
(sum(lengths[1:i-1]) + 1):sum(lengths[1:i]) #Get row #s
}
},vals = rle_vals$values, lengths = rle_vals$lengths))
drop
#[1] 8 18 19
df[-drop,]
# x y
#1 a 2
#2 a 4
#3 b 5
#4 b 2
#5 b 6
#6 c 2
#7 d 4
#9 c 2
#10 b 6
#11 t 2
#12 c 4
#13 t 5
#14 a 2
#15 a 6
#16 b 2
#17 d 4
#20 c 6
This also works, by collapsing to a string, identifying groups of t's between d and c (or c and d - not sure whether you wanted this option as well), then working out where they are and removing the rows as appropriate.
df = data.frame(x=c("a","a","b","b","b","c","d","t","c","b","t","c","t","a","a","b","d","t","t","c"),
y=c(2,4,5,2,6,2,4,5,2,6,2,4,5,2,6,2,4,5,2,6),stringsAsFactors = FALSE)
dfs <- paste0(df$x,collapse="") #collapse to a string
dfs2 <- do.call(rbind,lapply(list(gregexpr("dt+c",dfs),gregexpr("ct+d",dfs)),
function(L) data.frame(x=L[[1]],y=attr(L[[1]],"match.length"))))
dfs2 <- dfs2[dfs2$x>0,] #remove any -1 values (if string not found)
drop <- unlist(mapply(function(a,b) (a+1):(a+b-2),dfs2$x,dfs2$y))
df2 <- df[-drop,]
Here is another solution with base R:
df = data.frame(x = c("a","a","b","b","b","c","d","t","c","b","t","c","t","a","a","b","d","t","t","c"),
y = c(2,4,5,2,6,2,4,5,2,6,2,4,5,2,6,2,4,5,2,6))
#
s <- paste0(df$x, collapse="")
L <- c(NA, NA)
while (TRUE) {
r <- regexec("dt+c", s)[[1]]
if (r[1]==-1) break
L <- rbind(L, c(pos=r[1]+1, length=attr(r, "match.length")-2))
s <- sub("d(t+)c", "x\\1x", s)
}
L <- L[-1,]
drop <- unlist(apply(L,1, function(x) seq(from=x[1], len=x[2])))
df[-drop, ]
# > drop
# 8 18 19
# > df[-drop, ]
# x y
# 1 a 2
# 2 a 4
# 3 b 5
# 4 b 2
# 5 b 6
# 6 c 2
# 7 d 4
# 9 c 2
# 10 b 6
# 11 t 2
# 12 c 4
# 13 t 5
# 14 a 2
# 15 a 6
# 16 b 2
# 17 d 4
# 20 c 6
With gregexpr() it is shorter:
s <- paste0(df$x, collapse="")
g <- gregexpr("dt+c", s)[[1]]
L <- data.frame(pos=g+1, length=attr(g, "match.length")-2)
drop <- unlist(apply(L,1, function(x) seq(from=x[1], len=x[2])))
df[-drop, ]

R data frame manipulation

Suppose I have a data frame that looks like this.
# start end motif
# 2 6 a
# 10 15 b
# 30 35 c
How would I create a data frame that fills in the remaining start and end locations like so up to a certain number Max_end:
Max_end <- 33
# start end motif
# 0 2 na # <- 0-2 are filled in because it is not in the original data frame
# 2 6 a # <- 2-6 are in the original
# 6 10 na # <- 6-10 is not
# 10 15 b # <- 10-15 is
# 15 30 na # and so on
# 30 33 c
And further, calculates the distance between the start and end locations and creates a one column data frame.
# Length motif
# 2 na
# 4 a
# 4 na
# 5 b
# 15 na
# 3 c
Currently this is how i am doing it: It is very inefficient
library(data.table)
library(stringi)
f <- fread('ABC.txt',header=F,skip=1)$V1
f <- paste(f, collapse = "")
motifs = c('GATC', 'CTGCAG', 'ACCACC', 'CC(A|T)GG', 'CCAC.{8}TGA(C|T)')
v <- na.omit(data.frame(do.call(rbind, lapply(stri_locate_all_regex(f, motifs), unlist))))
v <- v[order(v[,1]),]
v2difference <- "blah"
for(i in 2:nrow(v)){
if(v[i,1] > v[i-1,2]+2){v2difference[i] <- v[i,1]-v[i-1,2]-2}
}
v2difference[1] <- v[1,1]
v2 <- data.frame(Order=seq(1, 2*nrow(v), 2),Lengths=matrix(v2difference, ncol = 1),Motifs="na")
v1 <- data.frame(Order=seq(2, 2*nrow(v), 2),Lengths=(v$end-v$start+1),Motifs=na.omit(unlist(stri_extract_all_regex(f,motifs))))
V <- data.frame(Track=1,rbind(v1,v2))
V <- V[order(V$Order),]
B <- V[,!(names(V) %in% "Order")]
Max_end <- 33
breaks <- c(0, t(as.matrix(dat[,1:2])), Max_end) # get endpoints
breaks <- breaks[breaks <= Max_end]
merge(dat, data.frame(start=breaks[-length(breaks)], end=breaks[-1]), all=T)
# start end motif
# 1 0 2 <NA>
# 2 2 6 a
# 3 6 10 <NA>
# 4 10 15 b
# 5 15 30 <NA>
# 6 30 33 <NA>
# 7 30 35 c
To specify a start and endpoint, you could do
Max_end <- 33
Max_start <- 10
breaks <- unique(c(Max_start, t(as.matrix(dat[,1:2])), Max_end))
breaks <- breaks[breaks <= Max_end & breaks >= Max_start]
merge(dat, data.frame(start=breaks[-length(breaks)], end=breaks[-1]), all.y=T)
# start end motif
# 1 10 15 b
# 2 15 30 <NA>
# 3 30 33 <NA>
Note: this doesn't include "c" in the shortened final interval, you would need to decide if that values gets included or not when the interval changes.

R: Euclidian distances between objects in a group

I want to create a matrix with similarities based on two identifiers, consider following matrix:
x1 <- c(2,2,2,3,1,2,4,6,4)
y1 <- c(5,4,3,3,4,2,1,6,3)
x2 <- c(8,2,7,3,1,2,2,2,6)
y2 <- c(1,3,3,3,1,2,4,3,8)
x3 <- c(4,4,1,2,4,6,3,2,9)
y3 <- c(1,2,3,3,1,2,4,6,1)
id1 <- c("a","a","a","a","b","b","b","b","b")
id2 <- c(2002,2002,2003,2003,2002,2002,2003,2003,2003)
dat <- data.frame(x1,y1,x2,y2,x3,y3,id1,id2)
For the groups marked by id1 and id2 I want to create the euclidean distance (sqrt((x1a-x1b)^2+(y1a-y1b)^2 + ... + (y3a-y3b)^2)) between the lines in the dataset. In the best case, there would be a new variable that indicates the distances of each line to each other line with the same id1 and id2. Please note that different numbers of members can be in each group as for instance in 2003 in the b-group there are three cases.
Any advice would be great!!!
I think it would be a good idea first to distinguish the lines whose distances you want to calculate. For example, for id1 == b and id2 == 2003 you have 3 lines, and you want to calculate 3 different distances (between each possible pair). So let's first assign each of these a unique id.
f <- function(n) {
# Returns a vector
# 1, 2, 1, 3, ..., 1, n, 2, 3, 2, 4, ..., 2, n, ..., (n-1), n
m <- matrix(ncol = 2, nrow = n * (n-1) / 2)
m[, 1] <- rep(1:(n-1), (n-1):1)
m[, 2] <- unlist(lapply(2:n, function(x) x:n))
as.numeric(t(m))
}
# Alternatively,
# f <- function(n) {
# d <- expand.grid(a = 1:n, b = 1:n)
# d <- d[d$a < d$b, ]
# unlist(d)
# }
# but this is slower
# Using plyr...
library(plyr)
dat <- ddply(dat, .(id1, id2), function(d) {
d <- d[f(nrow(d)), ]
d$id3 <- paste0(d$id1, rep(1:(nrow(d) / 2), each = 2))
d
})
# ...or using base R
dat <- do.call(rbind,
by(dat, list(dat$id1, dat$id2), function(d) {
d <- d[f(nrow(d)), ]
d$id3 <- paste0(d$id1, rep(1:(nrow(d) / 2), each = 2))
d
}))
Now there will only be two lines for each (id3, id2) pair and you can calculate the differences as follows
# Using plyr
result <- ddply(dat, .(id3, id2), function(d) {
d <- d[paste0(rep(c("x", "y"), 3), 1:3)]
d$dist <- sqrt(sum((d[1, ] - d[2, ])^2))
d
})
# Base R
result <- do.call(rbind,
by(dat[paste0(rep(c("x", "y"), 3), 1:3)],
list(dat$id3, dat$id2),
function(d){
d$dist <- sqrt(sum((d[1, ] - d[2, ])^2))
d
}
))
result[c("id3", "id2")] <- dat[c("id3", "id2")]
result
# x1 y2 x3 y1 x2 y3 dist id3 id2
# 1 2 1 4 5 8 1 6.480741 a1 2002
# 2 2 3 4 4 2 2 6.480741 a1 2002
# 5 1 1 4 4 1 1 3.464102 b1 2002
# 6 2 2 6 2 2 2 3.464102 b1 2002
# 3 2 3 1 3 7 3 4.242641 a1 2003
# 4 3 3 2 3 3 3 4.242641 a1 2003
# 7 4 4 3 1 2 4 5.916080 b1 2003
# 8 6 3 2 6 2 6 5.916080 b1 2003
# 7.1 4 4 3 1 2 4 9.000000 b2 2003
# 9 4 8 9 3 6 1 9.000000 b2 2003
# 8.1 6 3 2 6 2 6 11.313708 b3 2003
# 9.1 4 8 9 3 6 1 11.313708 b3 2003
Maybe this could be helpful.
dist(dat[which(dat[,"id1"]=="a" & dat[,"id2"]=="2002"),], method ="euclidean")
dist(dat[which(dat[,"id1"]=="b" & dat[,"id2"]=="2003"),], method ="euclidean")

Resources