Bin formation in a R data.frame - r

I have a data.frame with two columns:
category quantity
a 20
b 30
c 100
d 10
e 1
f 23
g 3
h 200
I need to write a function with two parameters: dataframe, bin_size which runs a cumsum over the quantity column, does a split of the subsequent row if the the cumsum exceeds the bin_size and adds a running bin number as an additional column.
Say, by entering this:
function(dataframe, 50)
in the above example should give me:
category quantity cumsum bin_nbr
a 20 20 1
b 30 50 1
c 50 50 2
c 50 50 3
d 10 10 4
e 1 11 4
f 23 34 4
g 3 37 4
h 13 50 4
h 50 50 5
h 50 50 6
h 50 50 7
h 37 37 8
Explanation:
row a + b sum up to 50 --> bin_nbr 1
row c is 100 -> split into 2 rows # 50 -> bin nbr 2, bin_nbr 3
row d,e,f,g sum up to 37 -> bin_nbr 4
I need another 13 from row h to fill in bin_nbr 4 to 50
The rest of the remaining quantity from h will be spitted into 4 bins -> bin_nbr 5, 6, 7, 8

I couldn't think of a clean way to do this with apply/data.table etc since you have an inter-row dependency and a changing size data frame. You can probably do it in an iterative/recursive manner, but I felt it would be quicker to figure out to just write the loop. One challenge is that it is difficult to know the final size of your object, so this is likely to be slow. You can mitigate the problem somewhat by switching from a df to a matrix (code should work fine, except transform bits) if performance is an issue in this application.
fun <- function(df, binsize){
df$cumsum <- cumsum(df$quantity)
df$bin <- 1
i <- 1
repeat {
if((extra <- (df[i, "cumsum"] - binsize)) > 0) { # Bin finished halfway through
top <- if(i > 1L) df[1L:(i - 1L), ] else df[0L, ]
mid <- transform(df[i, ], quantity=quantity-extra, cumsum=cumsum-extra)
bot <- transform(df[i, ], quantity=extra, cumsum=extra, bin=bin + 1L)
end <- if(i >= nrow(df)) df[0L, ] else df[(i + 1L):nrow(df), ]
end <- transform(end, cumsum=cumsum(end$quantity) + extra, bin=bin + 1L)
df <- rbind(top, mid, bot, end)
} else if (extra == 0 && nrow(df) > i) { # Bin finished cleanly
df[(i + 1L):nrow(df), ]$cumsum <- df[(i + 1L):nrow(df), ]$cumsum - binsize
df[(i + 1L):nrow(df), ]$bin <- df[(i + 1L):nrow(df), ]$bin + 1L
}
if(nrow(df) < (i <- i + 1)) break
}
rownames(df) <- seq(len=nrow(df))
df
}
fun(df, binsize)
# category quantity cumsum bin
# 1 a 20 20 1
# 2 b 30 50 1
# 3 c 50 50 2
# 4 c 50 50 3
# 5 d 10 10 4
# 6 e 1 11 4
# 7 f 23 34 4
# 8 g 3 37 4
# 9 h 13 50 4
# 10 h 50 50 5
# 11 h 50 50 6
# 12 h 50 50 7
# 13 h 37 37 8

Another solution with a loop:
DF <- read.table(text="category quantity
a 20
b 30
c 100
d 10
e 1
f 23
g 3
h 200", header=TRUE)
bin_size <- 50
n_bin <- ceiling(sum(DF$quantity)/bin_size)
DF$bin <- findInterval(cumsum(DF$quantity)-1, c(0, seq_len(n_bin)*50))
DF$cumsum <- cumsum(DF$quantity)
result <- lapply(seq_along(DF[,1]), function(i, df) {
if (i==1) {
d <- df[i, "bin"]
} else {
d <- df[i, "bin"]-df[i-1, "bin"]
}
if (d > 1) {
res <- data.frame(
category = df[i, "category"],
bin_nbr = df[i, "bin"]-seq_len(d+1)+1
)
res[,"quantity"] <- bin_size
if (i!=1) {
res[nrow(res),"quantity"] <- df[i-1, "bin"]*bin_size-df[i-1, "cumsum"]
} else {
res[nrow(res),"quantity"] <- 0
}
res[1,"quantity"] <- df[i, "quantity"]-sum(res[-1,"quantity"])
return(res[res$quantity > 0,])
} else {
return(data.frame(
category = df[i, "category"],
quantity = df[i, "quantity"],
bin_nbr = df[i, "bin"]
))
}
}, df=DF)
res <- do.call(rbind, result)
res <- res[order(res$category, res$bin_nbr),]
library(plyr)
res <- ddply(res, .(bin_nbr), transform, cumsum=cumsum(quantity))
res
# category quantity bin_nbr cumsum
# 1 a 20 1 20
# 2 b 30 1 50
# 3 c 50 2 50
# 4 c 50 3 50
# 5 d 10 4 10
# 6 e 1 4 11
# 7 f 23 4 34
# 8 g 3 4 37
# 9 h 13 4 50
# 10 h 50 5 50
# 11 h 50 6 50
# 12 h 50 7 50
# 13 h 37 8 37

This amounts to merging the bin boundaries with the data which gives this loop-free solution:
library(zoo)
fun <- function(DF, binsize = 50) {
nr <- nrow(DF)
DF2 <- data.frame(cumsum = seq(0, sum(DF$quantity), binsize) + binsize, bin_nbr = 1:nr)
DF.cs <- transform(DF, cumsum = cumsum(DF$quantity))
m <- na.locf(merge(DF.cs, DF2, all = TRUE), fromLast = TRUE)
m$bin_nbr <- as.numeric(m$bin_nbr)
cs <- as.numeric(m$cumsum)
m$quantity <- c(cs[1], diff(cs))
m$cumsum <- ave(m$quantity, m$bin_nbr, FUN = cumsum)
na.omit(m)[c("category", "quantity", "cumsum", "bin_nbr")]
}
giving:
> fun(DF)
category quantity cumsum bin_nbr
1 a 20 20 1
2 b 30 50 1
3 c 50 50 2
4 c 50 50 3
5 d 10 10 4
6 e 1 11 4
7 f 23 34 4
8 g 3 37 4
9 h 13 50 4
10 h 50 50 5
11 h 50 50 6
12 h 50 50 7
13 h 37 37 8
Note: For purposes of reproducing the result above this is the input we used:
Lines <- "category quantity
a 20
b 30
c 100
d 10
e 1
f 23
g 3
h 200
"
DF <- read.table(text = Lines, header = TRUE, as.is = TRUE)
REVISION An error in the code was corrected.

Related

R: Counting the Frequencies of Coin Flips

I am working with the R programming language.
I simulated this dataset which contains 1000 coin flips - then I calculated the number of "2 Flip Sequences":
Coin <- c('H', 'T')
Results = sample(Coin,1000, replace = TRUE)
My_Data = data.frame(id = 1:1000, Results)
Pairs = data.frame(first = head(My_Data$Results, -1), second = tail(My_Data$Results, -1))
Final = as.data.frame(table(Pairs))
first second Freq
1 H H 255
2 T H 245
3 H T 246
4 T T 253
I am curious - is it possible to extend the above code for "3 Flip Sequences"?
For example - I tried modifying parts of the code to see how the results change (and hoped to stumble across the correct way to write this code):
# First Attempt
Pairs = data.frame(first = head(My_Data$Results, -1), second = head(My_Data$Results, -1) , third = tail(My_Data$Results, -1))
Final = as.data.frame(table(Pairs))
first second third Freq
1 H H H 255
2 T H H 245
3 H T H 0
4 T T H 0
5 H H T 0
6 T H T 0
7 H T T 246
8 T T T 253
# Second Attempt
Pairs = data.frame(first = head(My_Data$Results, -1), second = tail(My_Data$Results, -1) , third = tail(My_Data$Results, -1))
Final = as.data.frame(table(Pairs))
first second third Freq
1 H H H 255
2 T H H 0
3 H T H 0
4 T T H 245
5 H H T 246
6 T H T 0
7 H T T 0
8 T T T 253
I am not sure which of these options are correct?
In general, I am looking to understand the logic as to how I can adapt the above code for an "arbitrary number of coin flips" (e.g. "4 flip sequences", "5 flip sequences", etc.)
Also, this might not be the most efficient way to calculate these frequencies - I would also be interested in learning about other ways that might be more efficient ( e.g. as the overall size of the data increases).
Thanks!
It might be helpful to work with strings.
coin <- c("H", "T")
results <- sample(coin, 1000, replace = TRUE)
Then to get sequence counts (assuming overlapping sequences also count) for triples, we could do something like:
triples <- table(
sapply(
1:(length(results) - 3),
function(i) sprintf(
"%s%s%s",
results[i],
results[i + 1],
results[i + 2]
)
)
)
which gives me something like:
HHH HHT HTH HTT THH THT TTH TTT
132 129 138 115 129 124 116 114
This idea could be generalized fairly easily, for example:
n_sequences <- function(n, results) {
helper <- function(i, n) if (n < 1) "" else sprintf(
"%s%s",
helper(i, n - 1),
results[i + n - 1]
)
result <- data.frame(
table(
sapply(
1:(length(results) - n + 1),
function(i) helper(i, n)
)
)
)
colnames(result) <- c("Sequence", "Frequency")
result
}
For example:
n_sequences(5, results)
Gives me something like:
Sequence Frequency
1 HHHHH 34
2 HHHHT 31
3 HHHTH 36
4 HHHTT 31
5 HHTHH 35
6 HHTHT 36
7 HHTTH 20
8 HHTTT 37
9 HTHHH 35
10 HTHHT 34
11 HTHTH 41
12 HTHTT 27
13 HTTHH 27
14 HTTHT 24
15 HTTTH 34
16 HTTTT 30
17 THHHH 31
18 THHHT 36
19 THHTH 36
20 THHTT 26
21 THTHH 34
22 THTHT 32
23 THTTH 31
24 THTTT 27
25 TTHHH 32
26 TTHHT 28
27 TTHTH 25
28 TTHTT 31
29 TTTHH 33
30 TTTHT 31
31 TTTTH 30
32 TTTTT 20
You could first cut along 3 + 1 breaks, split it along the levels. The interaction can now be tabled to get the result.
My_Data$cut3 <- cut(seq_len(nrow(My_Data)), seq.int(1, nrow(My_Data), length.out=3 + 1), include.lowest=TRUE)
(res <- interaction(split(My_Data$Results, My_Data$cut3)) |> table() |> as.data.frame())
# Var1 Freq
# 1 H.H.H 51
# 2 T.H.H 58
# 3 H.T.H 43
# 4 T.T.H 49
# 5 H.H.T 38
# 6 T.H.T 51
# 7 H.T.T 64
# 8 T.T.T 46
To get the desired output, we can strsplit Var1.
strsplit(as.character(res$Var1), '\\.') |> do.call(what=rbind) |>
cbind.data.frame(res$Freq) |> setNames(c('first', 'second', 'third', 'Freq'))
# first second third Freq
# 1 H H H 51
# 2 T H H 58
# 3 H T H 43
# 4 T T H 49
# 5 H H T 38
# 6 T H T 51
# 7 H T T 64
# 8 T T T 46
Note, that nrow of your data should be divisible by 3.
Edit
To generalize, we may write a small function.
f <- \(x, n) {
ct <- cut(seq_len(nrow(x)), seq.int(1L, nrow(x), length.out=n + 1L), include.lowest=TRUE)
res <- interaction(split(x$Results, ct)) |> table() |> as.data.frame()
strsplit(as.character(res$Var1), '\\.') |> do.call(what=rbind) |>
cbind.data.frame(res$Freq) |> setNames(c(LETTERS[seq_len(n)], 'Freq'))
}
f(My_Data, 4)
# A B C D Freq
# 1 H H H H 13
# 2 T H H H 25
# 3 H T H H 18
# 4 T T H H 17
# 5 H H T H 18
# 6 T H T H 15
# 7 H T T H 21
# 8 T T T H 24
# 9 H H H T 26
# 10 T H H T 15
# 11 H T H T 16
# 12 T T H T 18
# 13 H H T T 22
# 14 T H T T 18
# 15 H T T T 10
# 16 T T T T 24
Data:
set.seed(42)
My_Data <- data.frame(id=1:1200, Results=sample(c('H', 'T'), 1200, replace=TRUE))
A slightly generalized solution with tidyverse tools. Change the sets variable for longer or shorter sequences.
coin <- c("H", "T")
sets <- 4
rolls <- 10000
results <- sample(coin, sets * rolls, rep = TRUE)
named_results <- purrr::map_chr(
0:(rolls - 1),
~ paste0(results[(sets * .x + 1):(sets * .x + sets)],
collapse = ""
)
)
dplyr::count(tibble::tibble(x = named_results), x)
with output
# A tibble: 16 x 2
x n
<chr> <int>
1 HHHH 629
2 HHHT 627
3 HHTH 638
4 HHTT 599
5 HTHH 602
6 HTHT 633
7 HTTH 596
8 HTTT 661
9 THHH 631
10 THHT 589
11 THTH 633
12 THTT 647
13 TTHH 660
14 TTHT 637
15 TTTH 623
16 TTTT 595
sets = 8 would give something like
# A tibble: 256 x 2
x n
<chr> <int>
1 HHHHHHHH 37
2 HHHHHHHT 36
3 HHHHHHTH 43
4 HHHHHHTT 35
5 HHHHHTHH 38
6 HHHHHTHT 27
7 HHHHHTTH 32
8 HHHHHTTT 28
9 HHHHTHHH 33
10 HHHHTHHT 38
# ... with 246 more rows

Returning positive values in R using only vectorization and indexes

I have created a data frame which has string and integers. The integers which are positive and negative.
I have to change all the ints to be positive without using for/if loops but by only using vectorization and indexing. I have created one with a for loop but I am a bit stuck on the next part.
df <- data.frame(x = letters[1:5],
y = seq(-4,4,2),
z = c(3,4,-5,6,-8))
This is my loop to convert to positive.
loop_df_fn <- function(data){
for(i in names(data)){
if(is.numeric(data[[i]])){
data[[i]][data[[i]]<0] <- abs(data[[i]][data[[i]]< 0])*10
}
}
return(data)
}
print((loop_df_fn(df)))
You can use
df[] <- lapply(df , \(x) if(is.numeric(x)) abs(x)*10 else x)
Output
x y z
1 a 40 30
2 b 20 40
3 c 0 50
4 d 20 60
5 e 40 80
A tidy solution:
library(dplyr)
df1 <- df %>%
mutate(across(where(is.numeric), ~if_else(.<0, .*-10, .)))
rapply(df, \(x) (x*-10)^(x<0)*x^(x>0), 'numeric', how='replace')
x y z
1 a 40 3
2 b 20 4
3 c 1 50
4 d 2 6
5 e 4 80
rapply(df, \(x) replace(x, x<0, x[x<0]*-10), 'numeric', how='replace')
x y z
1 a 40 3
2 b 20 4
3 c 0 50
4 d 2 6
5 e 4 80
lastly:
ind <- sapply(df, is.numeric)
df[ind][df[ind]<0] <- df[ind][df[ind]<0] * -10
df
x y z
1 a 40 3
2 b 20 4
3 c 0 50
4 d 2 6
5 e 4 80

Two cumsum conditions with reset in R

I have a dataframe that has two types of value. I'd like to slice it in groups.
This groups are expected to provide two conditions. Each group should be;
Conditions 1: max cumulative value of w <= 75
Conditions 1: max cumulative value of n <= 15
If one of these criteria reach the max cumulative value, it should reset the cumulative sums
and start over again for both.
id<- sample(1:33)
w <- c(2,1,32,5,1,1,12,1,2,32,32,32,1,3,2,12,1,1,1,1,1,1,5,3,5,1,1,1,2,7,2,32,1)
n <- c(1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)
df <- data.frame(id, w, n)
the expected result (made manully)
w cumsum_w n cumsum_n group
2 2 1 1 1
1 3 1 2 1
32 35 1 3 1
5 40 1 4 1
1 41 1 5 1
1 42 1 6 1
12 54 1 7 1
1 55 1 8 1
2 57 1 9 1
32 32 1 2 2
32 64 1 3 2
32 32 1 1 3
1 33 1 2 3
3 36 1 3 3
2 38 1 4 3
12 50 1 5 3
1 51 1 6 3
1 52 1 7 3
1 53 1 8 3
1 54 1 9 3
1 55 1 10 3
1 56 1 11 3
5 61 1 12 3
3 64 1 13 3
5 69 1 14 3
1 70 1 15 3
1 1 1 1 4
1 2 1 2 4
2 4 1 3 4
7 11 1 4 4
2 13 1 5 4
32 45 1 6 4
1 46 1 7 4
I tried to solve some methods:
Method 1
library(BBmisc)
chunk(df, chunk.size = 75, n.chunks = 15)
Error in chunk(df, chunk.size = 75, n.chunks = 15) :
You must provide exactly one of 'chunk.size', 'n.chunks' or 'props'
Method 2
cumsum_with_reset_group <- function(w, n, threshold_w, threshold_n) {
cumsum_w <- 0
cumsum_n <- 0
group <- 1
result <- numeric()
for (i in 1:length(w)) {
cumsum_w <- cumsum_w + w[i]
cumsum_n <- cumsum_n + n[i]
if (cumsum_w > threshold_w | cumsum_n > threshold_n) {
group <- group + 1
cumsum_w <- cumsum_w + w[i]
cumsum_n <- cumsum_n + n[i]
}
result = c(result, group)
}
return (result)
}
# cumsum with reset
cumsum_w_with_reset <- function(w, threshold_w) {
cumsum_w <- 0
group <- 1
result <- numeric()
for (i in 1:length(w)) {
cumsum_w <- cumsum_w + w[i]
if (cumsum_w > threshold_w) {
group <- group + 1
cumsum_w <- w[i]
}
result = c(result, cumsum_w)
}
return (result)
}
# cumsum with reset
cumsum_n_with_reset <- function(n, threshold_n) {
cumsum_n <- 0
group <- 1
result <- numeric()
for (i in 1:length(n)) {
cumsum_n <- cumsum_n + n[i]
if (cumsum_n > threshold_n | cumsum_w > threshold_w) {
group <- group + 1
cumsum_n <- n[i]
}
result = c(result, cumsum_n)
}
return (result)
}
# use functions above as window functions inside mutate statement
y<-df %>% group_by() %>%
mutate(
cumsum_w = cumsum_w_with_reset(w, 75),
cumsum_n =cumsum_n_with_reset(n, 15),
group = cumsum_with_reset_group(w, n, 75, 15)
) %>%
ungroup()
Error in mutate_impl(.data, dots) :
Evaluation error: object 'cumsum_w' not found
Thanks!
Here is a hack, which is done by repeated subsetting and binding. As such, this will be very slow with large data sets. This takes the whole data set as an input.
library(dplyr)
cumsumdf <- function(df){
cumsum_75 <- function(x) {cumsum(x) %/% 76}
cumsum_15 <- function(x) {cumsum(x) %/% 16}
cumsum_w75 <- function(x) {cumsum(x) %% 76}
cumsum_n15 <- function(x) {cumsum(x) %% 16}
m <- nrow(df)
df$grp <- 0
df <- df %>%
group_by(grp) %>%
mutate(cumsum_w = numeric(m), cumsum_n = numeric(m))
n = 0
df2 <- df[0,]
while(nrow(df) >0 ){
df$cumsum_w = cumsum_75(df$w)
df$cumsum_n = cumsum_15(df$n)
n <- n + 1
df1 <- df[df$cumsum_n == 0 & df$cumsum_w == 0,]
df <- df[df$cumsum_n != 0 | df$cumsum_w != 0,]
df1$grp <- n
df1 <- df1 %>% group_by(grp) %>%
mutate(cumsum_w = cumsum_w75(w), cumsum_n = cumsum_n15(n))
df2 <- rbind(df2,df1)
}
return(df2)
}
cumsumdf(df)

Create a new column per group based on condition in a data frame

although I searched long for solutions, e.g.
Assign value to group based on condition in column
I am not able to solve the following problem and would appreciate greatly any help!
I have the following data frame (in reality, many more with thousands of rows):
df <- data.frame(ID1 = c(1,1,1,2,2,2,2,3,3,4,4,4,5,5,5,6,6,6,7,7),
ID2 = c(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20),
Percentage = c(0,10,NA,65,79,81,52,0,0,11,12,35,0,24,89,76,0,NA,59,16),
Group_expected_result = c(6,6,6,7,7,7,7,1,1,3,3,3,4,4,4,5,5,5,2,2))
What I want to do is to assign a group type from 1 to 7 to each group as indicated by ID1. Which group type should be assigned is dependent on the conditions of column 3, Percentage (can have values from 0-100) and is split into seven types:
Type 1 has a percentage of 0, i.e.
Type 1 = 0
Type 2 > 0 & < 10
Type 3 > 9 & < 20
Type 4 > 19 & < 30
Type 5 > 29 & < 40
Type 6 > 39 & < 50
Type 7 > 49
The combination of these types (above) defines the group type (G1-G7) below:
G1 = only T7
G2 = only T7 & T2-T6
G3 = only T2-T6
G4 = at least one T1, & one T2-T6, & one T7 (= all)
G5 = only T7 & T1
G6 = only T2-T6 & T1
G7 = only T1
The expected result is in the last column of the sample data frame, e.g.
the first group consists of types T1 and T2, therefore should be group type G6.
So, the question is how to get the expected result in the last column? I hope I made the problem clear! Thanks in advance!
Try this:
myType <- function(x) {
if (is.na(x) || x==0) {
return(1L)
} else if (x < 50) {
return(2L)
} else {
return(3L)
}
}
myGroup <- function(myDf) {
myIds <- unique(myDf$ID1)
myGs <- list(G1=1L, G2=2:3, G3=2L, G4=1:3, G5=c(1L,3L), G6=1:2, G7=3L)
assignG <- vector(mode = "integer", length=nrow(myDf))
vT <- vapply(myDf[,"Percentage"], function(x) myType(x), 1L)
for (i in myIds) {
myV <- which(myDf[,1L]==i)
testV <- sort(unique(vT[myV]))
assignG[myV] <- which(vapply(myGs, function(x) identical(x,testV), TRUE, USE.NAMES = FALSE))
}
myDf$myResult <- assignG
myDf
}
Calling it, we obtain:
myGroup(df,7)
ID1 ID2 Percentage Group_expected_result myResult
1 1 1 0 6 6
2 1 2 10 6 6
3 1 3 NA 6 6
4 2 4 65 7 7
5 2 5 79 7 7
6 2 6 81 7 7
7 2 7 52 7 7
8 3 8 0 1 1
9 3 9 0 1 1
10 4 10 11 3 3
11 4 11 12 3 3
12 4 12 35 3 3
13 5 13 0 4 4
14 5 14 24 4 4
15 5 15 89 4 4
16 6 16 76 5 5
17 6 17 0 5 5
18 6 18 NA 5 5
19 7 19 59 2 2
20 7 20 16 2 2
Here is a less intuitive, but more efficient solution.
myGroup2 <- function(myDf) {
myIds <- unique(myDf$ID1)
AltGs <- c(G1=2L, G2=7L, G3=3L, G4=9L, G5=6L, G6=5L, G7=4L)
assignG <- vector(mode = "integer", length=nrow(myDf))
vT <- vapply(myDf[,"Percentage"], function(x) myType(x), 1L)
for (i in myIds) {
myV <- which(myDf[,1L]==i)
testV <- unique(vT[myV])
assignG[myV] <- which(AltGs==(length(testV)+sum(testV)))
}
myDf$myResult <- assignG
myDf
}
It is about twice as fast.
microbenchmark(t1=myGroup(df,7), t2=myGroup2(df,7))
Unit: microseconds
expr min lq mean median uq max neval
t1 692.117 728.4470 779.6459 748.562 819.170 1018.060 100
t2 320.608 340.3115 390.7098 351.395 414.203 1781.195 100
You can obtain AltGs above by running the following:
myGs <- list(G1=1L, G2=2:3, G3=2L, G4=1:3, G5=c(1L,3L), G6=1:2, G7=3L)
AltGs <- vapply(myGs, function(x) length(x)+sum(x), 2L, USE.NAMES = FALSE)

Getting the maximum common words in R

I have data of the form:
ID A1 A2 A3 ... A100
1 john max karl ... kevin
2 kevin bosy lary ... rosy
3 karl lary bosy ... hale
.
.
.
10000 isha john lewis ... dave
I want to get one ID for each ID such that both of them have maximum number of common attributes(A1,A2,..A100)
How can I do this in R ?
Edit: Let's call the output a MatchId:
ID MatchId
1 70
2 4000
.
.
10000 3000
I think this gets what you're looking for:
library(dplyr)
# make up some data
set.seed(1492)
rbind_all(lapply(1:15, function(i) {
x <- cbind.data.frame(stringsAsFactors=FALSE, i, t(sample(LETTERS, 10)))
colnames(x) <- c("ID", sprintf("A%d", 1:10))
x
})) -> dat
print(dat)
## Source: local data frame [15 x 11]
##
## ID A1 A2 A3 A4 A5 A6 A7 A8 A9 A10
## 1 1 H F E C B A R J Z N
## 2 2 Q P E M L Z C G V Y
## 3 3 Q J D N B T L K G Z
## 4 4 D Y U F V O I C A W
## 5 5 T Z D I J F R C B S
## 6 6 Q D H U P V O E R N
## 7 7 C L I M E K N S X Z
## 8 8 M J S E N O F Y X I
## 9 9 R H V N M T Q X L S
## 10 10 Q H L Y B W S M P X
## 11 11 M N J K B G S X V R
## 12 12 W X A H Y D N T Q I
## 13 13 K H V J D X Q W A U
## 14 14 M U F H S T W Z O N
## 15 15 G B U Y E L A Q W O
# get commons
rbind_all(lapply(1:15, function(i) {
rbind_all(lapply(setdiff(1:15, i), function(j) {
data.frame(id1=i,
id2=j,
common=length(intersect(c(t(dat[i, 2:11])),
c(t(dat[j, 2:11])))))
}))
})) -> commons
commons %>%
group_by(id1) %>%
top_n(1, common) %>%
filter(row_number()==1) %>%
select(ID=id1, MatchId=id2)
## Source: local data frame [15 x 2]
## Groups: ID
##
## ID MatchId
## 1 1 5
## 2 2 7
## 3 3 5
## 4 4 12
## 5 5 1
## 6 6 9
## 7 7 8
## 8 8 7
## 9 9 10
## 10 10 9
## 11 11 9
## 12 12 13
## 13 13 12
## 14 14 8
## 15 15 2
Using similar data as provided by #hrbrmstr
set.seed(1492)
dat <- do.call(rbind, lapply(1:15, function(i) {
x <- cbind.data.frame(stringsAsFactors=FALSE, i, t(sample(LETTERS, 10)))
colnames(x) <- c("ID", sprintf("A%d", 1:10))
x
}))
You could achieve the same using base R only
Res <- sapply(seq_len(nrow(dat)),
function(x) apply(dat[-1], 1,
function(y) length(intersect(dat[x, -1], y))))
diag(Res) <- -1
cbind(dat[1], MatchId = max.col(Res, ties.method = "first"))
# ID MatchId
# 1 1 5
# 2 2 7
# 3 3 5
# 4 4 12
# 5 5 1
# 6 6 9
# 7 7 8
# 8 8 7
# 9 9 10
# 10 10 9
# 11 11 9
# 12 12 13
# 13 13 12
# 14 14 8
# 15 15 2
If I understand correctly, the requirement is to obtain the maximum number of common attributes for each ID.
Frequency tables can be obtained using table() and recursively in lapply(), assuming that ID column is unique - slight modification is necessary if not (unique(df$ID) rather than df$ID in lapply()). The maximum frequencies can be taken and, if there is a tie, only the first one is chosen. Finally they are combined by do.call().
df <- read.table(header = T, text = "
ID A1 A2 A3 A100
1 john max karl kevin
2 kevin bosy lary rosy
3 karl lary bosy hale
10000 isha john lewis dave")
do.call(rbind, lapply(df$ID, function(x) {
tbl <- table(unlist(df[df$ID == x, 2:ncol(df)]))
data.frame(ID = x, MatchId = tbl[tbl == max(tbl)][1])
}))
# ID MatchId
#john 1 1
#kevin 2 1
#karl 3 1
#isha 10000 1

Resources