how can I search for specific values in a line. E.g., for this data
a <- c("a", "a", "b", "c")
b <- c("b", "a", "b", "d")
c <- c("c", "a", "b", "c")
d <- c("a", "a", "b", "a")
x <- data.frame(cbind(a,b,c,d))
head(x)
I would like to know if there is an "a", "b", "c", or "d" in the first row. The optimal result would look like additional columns that indicate how many are in each row.
Thanks again!
You could also do:
library(reshape2)
df1 <- as.data.frame.matrix(table(melt(as.matrix(x))[,c(1,3)]))
cbind(x,df1)
# a b c d a b c d
#1 a b c a 2 1 1 0
#2 a a a a 4 0 0 0
#3 b b b b 0 4 0 0
#4 c d c a 1 0 2 1
Another approach sticking to base:
f<-function(x) table(factor(unlist(x),levels=letters[1:4]))
df1<-t(apply(x,1,f))
cbind(x,df1)
a b c d a b c d
1 a b c a 2 1 1 0
2 a a a a 4 0 0 0
3 b b b b 0 4 0 0
4 c d c a 1 0 2 1
Similar to J.R.'s answer, but a little bit different:
s <- split(unlist(x), rep.int(seq_len(nrow(x)), nrow(x)))
cbind(x, do.call(rbind, Map(table, s)))
# a b c d a b c d
# 1 a b c a 2 1 1 0
# 2 a a a a 4 0 0 0
# 3 b b b b 0 4 0 0
# 4 c d c a 1 0 2 1
vapply might actually be better (and faster) here:
cbind(x, t(vapply(s, table, integer(4L))))
# a b c d a b c d
# 1 a b c a 2 1 1 0
# 2 a a a a 4 0 0 0
# 3 b b b b 0 4 0 0
# 4 c d c a 1 0 2 1
Do you mean like this? Not totally clear what you want the output to look like
x$A <- apply(x,1,function(x) length(grep("a",x)))
x$B <- apply(x,1,function(x) length(grep("b",x)))
x$C <- apply(x,1,function(x) length(grep("c",x)))
x$D <- apply(x,1,function(x) length(grep("d",x)))
> x
a b c d A B C D
1 a b c a 2 1 1 0
2 a a a a 4 0 0 0
3 b b b b 0 4 0 0
4 c d c a 1 0 2 1
Related
I would use this dataset as an example
BEZ <- c("A","A","A","A","B","B","B")
var <- c("B","B","B","B","B","B","B")
bar <- c("B","B","B","B","B","B","B")
Bez1 <- c("A","A","A","A","B","B","B")
var1 <- c("B","B","B","B","B","B","B")
bar1 <- c("B","B","B","B","B","B","B")
dat <- data.frame(BEZ, var, bar, Bez1, var1, bar1)
the tricky thing that I would like to do is use a method (loops, map(), apply(), dplyr functions, and so on) to create aside the already existing new column where based on the respective row value is converted into a number.
Excepeted result
BEZ BEZ_num var var_num bar bar_num Bez1 BEZ1_num var1 var1_num bar1 bar1_num
A 0 B 1 B 1 A 0 B 1 B 1
A 0 B 1 B 1 A 0 B 1 B 1
A 0 B 1 C 2 A 0 B 1 A 0
A 0 B 1 B 1 A 0 C 2 B 1
B 1 B 1 B 1 B 1 C 2 C 2
B 1 B 1 B 1 A 0 B 1 B 1
B 1 B 1 B 1 A 0 B 1 B 1
This is more or less the idea I would like to hit. Any suggestions?
Thanks
Using factor
library(dplyr)
dat %>%
mutate(across(everything(), ~ as.integer(factor(.x))-1, .names = "{.col}_num"))
-output
BEZ var bar Bez1 var1 bar1 BEZ_num var_num bar_num Bez1_num var1_num bar1_num
1 A B B A B B 0 0 0 0 0 0
2 A B B A B B 0 0 0 0 0 0
3 A B B A B B 0 0 0 0 0 0
4 A B B A B B 0 0 0 0 0 0
5 B B B B B B 1 0 0 1 0 0
6 B B B B B B 1 0 0 1 0 0
7 B B B B B B 1 0 0 1 0 0
See in the comments. The provided data frame and the expected output do not match. But I think we could use mutate(across..) with the .names argument combined with case_when:
library(dplyr)
dat %>%
mutate(across(everything(), ~case_when(
. == "A" ~ "0",
. == "B" ~ "1",
. == "C" ~ "2"), .names = "{col}_num"))
BEZ var bar Bez1 var1 bar1 BEZ_num var_num bar_num Bez1_num var1_num bar1_num
1 A B B A B B 0 1 1 0 1 1
2 A B B A B B 0 1 1 0 1 1
3 A B B A B B 0 1 1 0 1 1
4 A B B A B B 0 1 1 0 1 1
5 B B B B B B 1 1 1 1 1 1
6 B B B B B B 1 1 1 1 1 1
7 B B B B B B 1 1 1 1 1 1
Using a for loop in base R:
dat2 <- dat[, 1, drop = FALSE]
for (col in names(dat)) {
dat2[[col]] <- dat[[col]]
dat2[[paste0(col, "_num")]] <- match(dat[[col]], LETTERS) - 1
}
dat2
# BEZ BEZ_num var var_num bar bar_num Bez1 Bez1_num var1 var1_num bar1 bar1_num
# 1 A 0 B 1 B 1 A 0 B 1 B 1
# 2 A 0 B 1 B 1 A 0 B 1 B 1
# 3 A 0 B 1 B 1 A 0 B 1 B 1
# 4 A 0 B 1 B 1 A 0 B 1 B 1
# 5 B 1 B 1 B 1 B 1 B 1 B 1
# 6 B 1 B 1 B 1 B 1 B 1 B 1
# 7 B 1 B 1 B 1 B 1 B 1 B 1
Or a (slightly convoluted) approach using dplyr::across():
library(dplyr)
dat %>%
mutate(
across(BEZ:bar1, list(TMP = identity, num = \(x) match(x, LETTERS) - 1)),
.keep = "unused"
) %>%
rename_with(\(x) gsub("_TMP$", "", x))
# same output as above
Or finally, if you don't care about the order of the output columns, you could also use dplyr::across() with the .names argument:
dat %>%
mutate(across(
BEZ:bar1,
\(x) match(x, LETTERS) - 1,
.names = "{.col}_num"
))
# BEZ var bar Bez1 var1 bar1 BEZ_num var_num bar_num Bez1_num var1_num bar1_num
# 1 A B B A B B 0 1 1 0 1 1
# 2 A B B A B B 0 1 1 0 1 1
# 3 A B B A B B 0 1 1 0 1 1
# 4 A B B A B B 0 1 1 0 1 1
# 5 B B B B B B 1 1 1 1 1 1
# 6 B B B B B B 1 1 1 1 1 1
# 7 B B B B B B 1 1 1 1 1 1
To add two further options:
With dplyr v.1.1.0 we can use consecutive_id():
library(dplyr) # v.1.1.0
dat %>%
mutate(across(everything(),
~ consecutive_id(.x)-1,
.names = "{.col}_num"))
#> BEZ var bar Bez1 var1 bar1 BEZ_num var_num bar_num Bez1_num var1_num bar1_num
#> 1 A B B A B B 0 0 0 0 0 0
#> 2 A B B A B B 0 0 0 0 0 0
#> 3 A B B A B B 0 0 0 0 0 0
#> 4 A B B A B B 0 0 0 0 0 0
#> 5 B B B B B B 1 0 0 1 0 0
#> 6 B B B B B B 1 0 0 1 0 0
#> 7 B B B B B B 1 0 0 1 0 0
Similar we could use data.table::rleid():
dat %>%
mutate(across(everything(),
~ data.table::rleid(.x)-1,
.names = "{.col}_num"))
#> BEZ var bar Bez1 var1 bar1 BEZ_num var_num bar_num Bez1_num var1_num bar1_num
#> 1 A B B A B B 0 0 0 0 0 0
#> 2 A B B A B B 0 0 0 0 0 0
#> 3 A B B A B B 0 0 0 0 0 0
#> 4 A B B A B B 0 0 0 0 0 0
#> 5 B B B B B B 1 0 0 1 0 0
#> 6 B B B B B B 1 0 0 1 0 0
#> 7 B B B B B B 1 0 0 1 0 0
Created on 2023-02-03 with reprex v2.0.2
I've got a dataset where I'm interested in the frequencies of different pairs emerging, but it doesn't matter which order the elements occur. For example:
library(janitor)
set.seed(24601)
options <- c("a", "b", "c", "d", "e", "f")
data.frame(x = sample(options, 20, replace = TRUE),
y = sample(options, 20, replace = TRUE)) %>%
tabyl(x, y)
provides me with the output
x a b c d e f
a 1 0 1 0 1 0
b 0 2 0 1 0 0
c 2 0 1 0 0 0
d 0 0 0 0 1 0
e 1 1 2 0 0 3
f 0 0 1 1 0 1
I'd ideally have the top right or bottom left of this table, where the combination of values a and c would be a total of 3. This is the sum of 1 (in the top right) and 2 (in the middle left). And so on for each other pair of values.
I'm sure there must be a simple way to do this, but I can't figure out what it is...
Edited to add (thanks #Akrun for the request): ideally I'd like the following output
x a b c d e f
a 1 0 3 0 2 0
b 2 0 1 1 0
c 1 0 2 1
d 0 1 1
e 0 3
f 1
We could + with the transposed output (except the first column), then replace the 'out' object upper triangle values (subset the elements based on the upper.tri - returns a logical vector) with that corresponding elements, and assign the lower triangle elements to NA
out2 <- out[-1] + t(out[-1])
out[-1][upper.tri(out[-1])] <- out2[upper.tri(out2)]
out[-1][lower.tri(out[-1])] <- NA
-output
out
# x a b c d e f
# a 1 0 3 0 2 0
# b NA 2 0 1 1 0
# c NA NA 1 0 2 1
# d NA NA NA 0 1 1
# e NA NA NA NA 0 3
# f NA NA NA NA NA 1
data
set.seed(24601)
options <- c("a", "b", "c", "d", "e", "f")
out <- data.frame(x = sample(options, 20, replace = TRUE),
y = sample(options, 20, replace = TRUE)) %>%
tabyl(x, y)
Here is another option, using igraph
out[-1] <- get.adjacency(
graph_from_data_frame(
get.data.frame(
graph_from_adjacency_matrix(
as.matrix(out[-1]), "directed"
)
), FALSE
),
type = "upper",
sparse = FALSE
)
which gives
> out
x a b c d e f
a 1 0 3 0 2 0
b 0 2 0 1 1 0
c 0 0 1 0 2 1
d 0 0 0 0 1 1
e 0 0 0 0 0 3
f 0 0 0 0 0 1
I have a following table in R
df <- data.frame('a' = c(1,0,0,1,0),
'b' = c(1,0,0,1,0),
'c' = c(1,1,0,1,1))
df
a b c
1 1 1 1
2 0 0 1
3 0 0 0
4 1 1 1
4 0 0 1
What I want is to replace the row value with the column name whenever the row is equal to 1. The output would be this one:
a b c
1 a b c
2 0 0 c
3 0 0 0
4 a b c
4 0 0 c
How can I do this in R? Thanks.
I would use Map and replace:
df[] <- Map(function(n, x) replace(x, x == 1, n), names(df), df)
df
# a b c
# 1 a b c
# 2 0 0 c
# 3 0 0 0
# 4 a b c
# 5 0 0 c
We can use
df[] <- names(df)[(NA^!df) * col(df)]
df[is.na(df)] <- 0
df
# a b c
#1 a b c
#2 0 0 c
#3 0 0 0
#4 a b c
#4 0 0 c
You can try stack and unstack
a=stack(df)
a
values ind
1 1 a
2 0 a
3 0 a
4 1 a
5 0 a
6 1 b
7 0 b
8 0 b
9 1 b
10 0 b
11 1 c
12 1 c
13 0 c
14 1 c
15 1 c
a$values[a$values==1]=as.character(a$ind)[a$values==1]
unstack(a)
a b c
1 a b c
2 0 0 c
3 0 0 0
4 a b c
5 0 0 c
We can try iterating over the names of the data frame, and then handling each column, for a base R option:
df <- data.frame(a=c(1,0,0,1,0), b=c(1,0,0,1,0), c=c(1,1,0,1,1))
df <- data.frame(sapply(names(df), function(x) {
y <- df[[x]]
y[y == 1] <- x
return(y)
}))
df
a b c
1 a b c
2 0 0 c
3 0 0 0
4 a b c
5 0 0 c
Demo
You can do it with ifelse, but you have to do some intermediate transposing to account for R's column-major order processing.
data.frame(t(ifelse(t(df)==1,names(df),0)))
a b c
1 a b c
2 0 0 c
3 0 0 0
4 a b c
5 0 0 c
I have dataset "data" with 7 rows and 4 columns, as follows:
var1 var2 var3 var4
A C
A C B
B A C D
D B
B
D B
B C
I want to create following table "Mat" based on the data I have:
A B C D
1 1
1 1 1
1 1 1 1
1 1
1
1 1
1 1 1
Basically, I have taken unique elements from the original data and create a matrix "Mat" where number of rows in Mat=number of rows in Data and number of columns in "Mat"=number of unique elements in Data (that is, A, B, C, D)
I wrote following code in R:
rule <-c("A","B","C","D")
mat<-matrix(, nrow = dim(data)[1], ncol = dim(rule)[1])
mat<-data.frame(mat)
x<-rule[,1]
nm<-as.character(x)
names(mat)<-nm
n_data<-dim(data)[1]
for(i in 1:n_data)
{
for(j in 2:dim(data)[2])
{
for(k in 1:dim(mat)[2])
{
ifelse(data[i,j]==names(mat)[k],mat[i,k]==1,0)
}
}
}
I am getting all NA in "mat". Also, the running time is too much because in my original data set I have 20,000 rows and 100 columns in "Mat".
Any advice will be highly appreciated. Thanks!
This should be faster than the nested for loops:
> sapply(c("A", "B", "C", "D"), function(x) { rowSums(df == x, na.rm = T) })
# A B C D
# [1,] 1 0 1 0
# [2,] 1 1 1 0
# [3,] 1 1 1 1
# [4,] 0 1 0 1
# [5,] 0 1 0 0
# [6,] 0 1 0 1
# [7,] 0 1 1 0
Data
df <- read.table(text = "var1 var2 var3 var4
A C NA NA
A C B NA
B A C D
D B NA NA
NA B NA NA
D B NA NA
B C NA NA", header = T, stringsAsFactors = F)
By using table and rep
table(rep(1:nrow(df),dim(df)[2]),unlist(df))
A B C D
1 1 0 1 0
2 1 1 1 0
3 1 1 1 1
4 0 1 0 1
5 0 1 0 0
6 0 1 0 1
7 0 1 1 0
I have a large dataframe of 34,000 rows x 24 columns, each of which contain a category label. I would like to efficiently go through the dataframe and count up how many times each label was listed in a section of the line, including 0s.
(I've used a for loop driving a length(which) statement that wasn't terribly efficient)
Example:
df.test<-as.data.frame(rbind(c("A", "B", "C","B","A","A"),c("C", "C", "C","C","C","C"), c("A", "B", "B","A","A","A")))
df.res<-as.data.frame(matrix(ncol=6, nrow=3))
Let's say columns 1:3 in df.test are from one dataset, 4:6 from the other. What is the most efficient way to generate df.res to show this:
A B C A B C
1 1 1 2 1 0
0 0 3 0 0 3
1 2 0 3 0 0
A way -using a lot of _applys- is the following:
#list with the different data frames
df_ls <- sapply(seq(1, ncol(df.test), 3), function(x) df.test[,x:(x+2)], simplify = F)
#count each category
df.res <- do.call(cbind,
lapply(df_ls, function(df.) { t(apply(df., 1,
function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })) }))
#> df.res
# A B C A B C
#[1,] 1 1 1 2 1 0
#[2,] 0 0 3 0 0 3
#[3,] 1 2 0 3 0 0
Simulating a dataframe like the one you described:
DF <- data.frame(replicate(24, sample(LETTERS[1:3], 34000, T)), stringsAsFactors = F)
#> head(DF)
# X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24
#1 B C C C B A C B B A C C B C B B B C B C C B B C
#2 C B C A B C B C A B A C B B A A C A B B B C A B
#3 B C C A A A C A C A A A B B A A A C B B A C C C
#4 C C A B A B B B A A A C C A B A C C A C C C B A
#5 B B A A A A C A B B A B B A C A A A C A A C B C
#6 C A C C A B B C C C B C A B B B B B A C A A B A
#> dim(DF)
#[1] 34000 24
DF_ls <- sapply(seq(1, ncol(DF), 3), function(x) DF[,x:(x+2)], simplify = F)
system.time(
DF.res <- do.call(cbind,
lapply(DF_ls, function(df.) { t(apply(df., 1,
function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })) })))
#user system elapsed
#59.84 0.07 60.73
#> head(DF.res)
# A B C A B C A B C A B C A B C A B C A B C A B C
#[1,] 0 1 2 1 1 1 0 2 1 1 0 2 0 2 1 0 2 1 0 1 2 0 2 1
#[2,] 0 1 2 1 1 1 1 1 1 1 1 1 1 2 0 2 0 1 0 3 0 1 1 1
#[3,] 0 1 2 3 0 0 1 0 2 3 0 0 1 2 0 2 0 1 1 2 0 0 0 3
#[4,] 1 0 2 1 2 0 1 2 0 2 0 1 1 1 1 1 0 2 1 0 2 1 1 1
#[5,] 1 2 0 3 0 0 1 1 1 1 2 0 1 1 1 3 0 0 2 0 1 0 1 2
#[6,] 1 0 2 1 1 1 0 1 2 0 1 2 1 2 0 0 3 0 2 0 1 2 1 0
EDIT Some more comments on the approach.
I'll do the above step by step.
The first step is to subset the different dataframes that are bound together; each one of those dataframes is put in a list. The function function(x) { df.test[,x:(x+2)], simplify = F } subsets the whole dataframe based on those values of x: seq(1, ncol(df.test), 3). Extending this, if your different dataframes where 4 columns distant, 3 would have been changed with 4 in the above sequence.
#> df_ls <- sapply(seq(1, ncol(df.test), 3), function(x) df.test[,x:(x+2)], simplify = F)
#> df_ls
#[[1]]
# V1 V2 V3
#1 A B C
#2 C C C
#3 A B B
#[[2]]
# V4 V5 V6
#1 B A A
#2 C C C
#3 A A A
The next step is to lapply to the -previously made- list a function that counts each category in each row of one dataframe (i.e. element of the list). The function is this: t(apply(df., 1, function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })). The inside function (function(x)) turns one row in a factor with levels all the categories and counts (table) the number each category occured in that row. apply applies this function to each row (MARGIN = 1) of the dataframe. So, now, we have counted the frequency of each category in each row of one dataframe.
#> table(factor(unlist(df_ls[[1]][3,]), levels = c("A", "B", "C")))
#df_ls[[1]][3,] is the third row of the first dataframe of df_ls
#(i.e. _one_ row of _one_ dataframe)
#A B C
#1 2 0
#> apply(df_ls[[1]], 1,
#+ function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })
# [,1] [,2] [,3] #df_ls[[1]] is the first dataframe of df_ls (i.e. _one_ dataframe)
#A 1 0 1
#B 1 0 2
#C 1 3 0
Because, the return of apply is not in the wanted form we use t to swap rows with columns.
The next step, is to lapply all the above to each dataframe (i.e. element of the list).
#> lapply(df_ls, function(df.) { t(apply(df., 1,
#+ function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })) })
#[[1]]
# A B C
#[1,] 1 1 1
#[2,] 0 0 3
#[3,] 1 2 0
#[[2]]
# A B C
#[1,] 2 1 0
#[2,] 0 0 3
#[3,] 3 0 0
The last step is to cbind all those elements together. The way to bind by column all the elements of a list is to do.call cbind in that list.
#NOT the expected, using only cbind
#> cbind(lapply(df_ls, function(df.) { t(apply(df., 1,
#+ function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })) }))
# [,1]
#[1,] Integer,9
#[2,] Integer,9
#Correct!
#> do.call(cbind, lapply(df_ls, function(df.) { t(apply(df., 1,
#+ function(x) { table(factor(unlist(x), levels = c("A", "B", "C"))) })) }))
# A B C A B C
#[1,] 1 1 1 2 1 0
#[2,] 0 0 3 0 0 3
#[3,] 1 2 0 3 0 0