repeat list in to a data frame in R - r

I have a list let's say
k<-c(1,2,3,4)
I want to create a dataframe with let's say 5 rows using the same list in each row as shown below.
X1 X2 X3 X4
1 1 2 3 4
2 1 2 3 4
3 1 2 3 4
4 1 2 3 4
5 1 2 3 4
I tried doing:-
> rep(k, each = 5)
[1] 1 1 1 1 1 2 2 2 2 2 3 3 3 3 3 4 4 4 4 4
However I am not able to get intended result. Any suggestions?

data.frame(t(replicate(5, k)))
#OR
data.frame(matrix(rep(k, each = 5), 5))
#OR
data.frame(t(sapply(1:5, function(x) k)))
# X1 X2 X3 X4
#1 1 2 3 4
#2 1 2 3 4
#3 1 2 3 4
#4 1 2 3 4
#5 1 2 3 4

Here is one option by converting the vector to list with as.list, change it to data.frame (as.data.frame and replicate the rows
as.data.frame(as.list(k))[rep(1, 5),]
# X1 X2 X3 X4
#1 1 2 3 4
#1.1 1 2 3 4
#1.2 1 2 3 4
#1.3 1 2 3 4
#1.4 1 2 3 4
Or another option is to take the transpose of the vector to get a row matrix, replicate the rows and convert to data.frame
as.data.frame(t(k)[rep(1, 5),])
In tidyverse, one option is to convert to tibble and then uncount
library(dplyr)
library(tidyr)
library(stringr)
as.list(k) %>%
set_names(str_c("X", seq_along(k))) %>%
as_tibble %>%
uncount(5)
# A tibble: 5 x 4
# X1 X2 X3 X4
# <dbl> <dbl> <dbl> <dbl>
#1 1 2 3 4
#2 1 2 3 4
#3 1 2 3 4
#4 1 2 3 4
#5 1 2 3 4

purrr::map_dfc(k, rep, 5)
# # A tibble: 5 x 4
# V1 V2 V3 V4
# <dbl> <dbl> <dbl> <dbl>
# 1 1 2 3 4
# 2 1 2 3 4
# 3 1 2 3 4
# 4 1 2 3 4
# 5 1 2 3 4

Using data.table:
k = c(1,2,3,4)
n = 5 # Number of rows
df = data.table()
df = df[, lapply(1:length(k), function(x) rep(k[x], n))]
> df
V1 V2 V3 V4
1: 1 2 3 4
2: 1 2 3 4
3: 1 2 3 4
4: 1 2 3 4
5: 1 2 3 4

Related

create new order for existing column values without reordering rows in dataframe - R

I have some results cluster labels from kmeans done on different ids (reprex example below). the problem is the kmeans clusters codes are not ordered consistently across ids although all ids have 3 clusters.
reprex = data.frame(id = rep(1:2, each = 41,
v1 = rep(seq(1:4), 2),
cluster = c(2,2,1,3,3,1,2,2))
reprex
id v1 cluster
1 1 1 2
2 1 2 2
3 1 3 1
4 1 4 3
5 2 1 3
6 2 2 1
7 2 3 2
8 2 4 2
what I want is that the variable cluster should always start with 1 within each ID. Note I don't want to reorder that dataframe by cluster, the order needs to remain the same. so the desired result would be:
reprex_desired<- data.frame(id = rep(1:2, each = 4),
v1 = rep(seq(1:4), 2),
cluster = c(2,2,1,3,3,1,2,2),
what_iWant = c(1,1,2,3,1,2,3,3))
reprex_desired
id v1 cluster what_iWant
1 1 1 2 1
2 1 2 2 1
3 1 3 1 2
4 1 4 3 3
5 2 1 3 1
6 2 2 1 2
7 2 3 2 3
8 2 4 2 3
We can use match after grouping by 'id'
library(dplyr)
reprex <- reprex %>%
group_by(id) %>%
mutate(what_IWant = match(cluster, unique(cluster))) %>%
ungroup
-output
reprex
# A tibble: 8 × 4
id v1 cluster what_IWant
<int> <int> <dbl> <int>
1 1 1 2 1
2 1 2 2 1
3 1 3 1 2
4 1 4 3 3
5 2 1 3 1
6 2 2 1 2
7 2 3 2 3
8 2 4 2 3
Here is a version with cumsum combined with lag:
library(dplyr)
df %>%
group_by(id) %>%
mutate(what_i_want = cumsum(cluster != lag(cluster, def = first(cluster)))+1)
id v1 cluster what_i_want
<int> <int> <dbl> <dbl>
1 1 1 2 1
2 1 2 2 1
3 1 3 1 2
4 1 4 3 3
5 2 1 3 1
6 2 2 1 2
7 2 3 2 3
8 2 4 2 3

R Tidyverse - Randomize by ID

I have a df like this one:
id <- c(1,1,2,2,3,3,4,4,5,5)
v1 <- c(3,1,2,3,4,5,6,1,5,4)
pos <- c(1,2,1,2,1,2,1,2,1,2)
df <- data.frame(id,v1,pos)
How can I "randomize" the values of v1 WHILE keeping the inherent order from the "Id" var and also the values of "pos" such as I get df with randomized values like this:
id v1 pos
1 1 1
1 3 2
2 2 1
2 3 2
3 5 1
3 4 2
4 6 1
4 1 2
5 5 1
5 4 2
Above and example of resulting df with id and pos staying as originally created and v1 randomized.
Thx!
Is sample what you're looking for?
df %>%
group_by(id) %>%
mutate(v1 = sample(v1, size = length(v1)))
# A tibble: 10 x 3
# Groups: id [5]
id v1 pos
<dbl> <dbl> <dbl>
1 1 3 1
2 1 1 2
3 2 3 1
4 2 2 2
5 3 4 1
6 3 5 2
7 4 1 1
8 4 6 2
9 5 5 1
10 5 4 2

Remove Redundant row with large number of variable

I have data with 33 attribute. 30 of them is variable. And other 3 column is cluster number ,degree and sum of degree. I want to remove duplicate row which have same value from variable 1 until 30. Within duplicate row I want to choose the row which have highest values of sum degree to remain in the data. This coding is run in R. My question is how do I simplify zz.
df_order=dfOrder(rule2,c(33),ascending=FALSE)
df_order2=as_tibble(df_order)
zz=df_order2 %>% distinct(X1,X2,X3,X4,X5,X6,X7,X8,X9,X10,X11,X12,X13,X14,X15,X16,X17,X18,X19,X20,X21,X22,X23,X24,X25,X26,X27,X28,X29,X30,.keep_all = TRUE)
Sample data:
set.seed(42)
dat <- tibble(a=rep(1:2, each=10), b=rep(1:4, each=5), x1=sample(3,size=20,replace=TRUE), x2=sample(3,size=20,replace=TRUE), x3=sample(3,size=20,replace=TRUE))
dat
# # A tibble: 20 x 5
# a b x1 x2 x3
# <int> <int> <int> <int> <int>
# 1 1 1 1 1 3
# 2 1 1 1 3 3
# 3 1 1 1 1 1
# 4 1 1 1 1 1
# 5 1 1 2 2 2
# 6 1 2 2 3 2
# ...truncated...
Brute-force to show what distinct gives you:
distinct(dat, x1, x2, x3, .keep_all = TRUE)
# # A tibble: 14 x 5
# a b x1 x2 x3
# <int> <int> <int> <int> <int>
# 1 1 1 1 1 3
# 2 1 1 1 3 3
# 3 1 1 1 1 1
# 4 1 1 2 2 2
# 5 1 2 2 3 2
# 6 1 2 1 1 2
# 7 1 2 3 2 2
# 8 1 2 3 2 3
# 9 2 3 1 3 2
# 10 2 3 1 3 1
# 11 2 3 2 2 3
# 12 2 4 3 1 2
# 13 2 4 1 2 1
# 14 2 4 3 2 1
Programmatic way, without specifying each of x1 through x3, both work (depending on your preference towards "just use these" or "don't use those"). The first two work in base R and tidyverse equally well, the third is using dplyr::select.
dat[!duplicated(subset(dat, select = -(a:b))),]
dat[!duplicated(subset(dat, select = x1:x3)),]
dat[!duplicated(select(dat, x1:x3)),] # or -(a:b), same
Or perhaps a pipe-looking method:
select(dat, x1:x3) %>%
Negate(duplicated)(.) %>%
which(.) %>%
slice(dat, .)
Using the data from #r2evans post an option is to use splice after converting the column names to symbols
library(dplyr)
dat %>%
distinct(!!! rlang::syms(names(select(., starts_with('x')))), .keep_all = TRUE)
# A tibble: 14 x 5
# a b x1 x2 x3
# <int> <int> <int> <int> <int>
# 1 1 1 1 1 3
# 2 1 1 1 3 3
# 3 1 1 1 1 1
# 4 1 1 2 2 2
# 5 1 2 2 3 2
# 6 1 2 1 1 2
# 7 1 2 3 2 2
# 8 1 2 3 2 3
# 9 2 3 1 3 2
#10 2 3 1 3 1
#11 2 3 2 2 3
#12 2 4 3 1 2
#13 2 4 1 2 1
#14 2 4 3 2 1
From dplyr version >= 1.0.0, we can also use distinct with across
dat %>%
distinct(across(starts_with('x')), .keep_all = TRUE)
# A tibble: 14 x 5
# a b x1 x2 x3
# <int> <int> <int> <int> <int>
# 1 1 1 1 1 3
# 2 1 1 1 3 3
# 3 1 1 1 1 1
# 4 1 1 2 2 2
# 5 1 2 2 3 2
# 6 1 2 1 1 2
# 7 1 2 3 2 2
# 8 1 2 3 2 3
# 9 2 3 1 3 2
#10 2 3 1 3 1
#11 2 3 2 2 3
#12 2 4 3 1 2
#13 2 4 1 2 1
#14 2 4 3 2 1

How to recode multiple variables for a subset of a dataframe?

I'm lost, so any directions would be helpful. Let's say I have a dataframe:
df <- data.frame(
id = 1:12,
v1 = rep(c(1:4), 3),
v2 = rep(c(1:3), 4),
v3 = rep(c(1:6), 2),
v4 = rep(c(1:2), 6))
My goal would be to recode 2=4 and 4=2 for variables v3 and v4 but only for the first 4 cases (id < 5). I'm looking for a solution that works for up to twenty variables. I know how to do basic recoding but I don't see a simple way to implement the subset condition while manipulating multiple variables.
Here is a base R solution,
df[1:5, c('v3', 'v4')] <- lapply(df[1:5, c('v3', 'v4')], function(i)
ifelse(i == 2, 4, ifelse(i == 4, 2, i)))
which gives,
id v1 v2 v3 v4
1 1 1 1 1 1
2 2 2 2 4 4
3 3 3 3 3 1
4 4 4 1 2 4
5 5 1 2 5 1
6 6 2 3 6 2
7 7 3 1 1 1
8 8 4 2 2 2
9 9 1 3 3 1
10 10 2 1 4 2
11 11 3 2 5 1
12 12 4 3 6 2
You can try mutate_at with case_when in dplyr
library(dplyr)
df %>%
mutate_at(vars(v3:v4), ~case_when(id < 5 & . == 4 ~ 2L,
id < 5 & . == 2 ~ 4L,
TRUE ~.))
# id v1 v2 v3 v4
#1 1 1 1 1 1
#2 2 2 2 4 4
#3 3 3 3 3 1
#4 4 4 1 2 4
#5 5 1 2 5 1
#6 6 2 3 6 2
#7 7 3 1 1 1
#8 8 4 2 2 2
#9 9 1 3 3 1
#10 10 2 1 4 2
#11 11 3 2 5 1
#12 12 4 3 6 2
With mutate_at you can specify range of columns to apply the function.
Another, more direct, option is to get the indices of the numbers to replace, and to replace them by 6 minus the number (6-4=2, 6-2=4):
whToChange <- which(df[1:5, c("v3", "v4")] ==2 | df[1:5, c("v3", "v4")]==4, arr.ind=TRUE)
df[, c("v3", "v4")][whToChange] <- 6-df[, c("v3", "v4")][whToChange]
head(df, 5)
# id v1 v2 v3 v4
#1 1 1 1 1 1
#2 2 2 2 4 4
#3 3 3 3 3 1
#4 4 4 1 2 4
#5 5 1 2 5 1
You can use match and a lookup table - just in chase you have to recede more than two values.
rosetta <- matrix(c(2,4,4,2), 2)
df[1:4, c("v3", "v4")] <- lapply(df[1:4, c("v3", "v4")], function(x) {
i <- match(x, rosetta[1,]); j <- !is.na(i); "[<-"(x, j, rosetta[2, i[j]])})
df
# id v1 v2 v3 v4
#1 1 1 1 1 1
#2 2 2 2 4 4
#3 3 3 3 3 1
#4 4 4 1 2 4
#5 5 1 2 5 1
#6 6 2 3 6 2
#7 7 3 1 1 1
#8 8 4 2 2 2
#9 9 1 3 3 1
#10 10 2 1 4 2
#11 11 3 2 5 1
#12 12 4 3 6 2
Have also a look at R: How to recode multiple variables at once or Recoding multiple variables in R

Count with table() and exclude 0's

I try to count triplets; for this I use three vectors that are packed in a dataframe:
X=c(4,4,4,4,4,4,4,4,1,1,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,3,3,3,3,3)
Y=c(1,1,1,1,1,1,1,1,1,1,1,1,2,2,3,4,2,2,2,2,3,4,1,1,2,2,3,3,4,4)
Z=c(4,4,5,4,4,4,4,4,6,1,1,1,1,1,1,1,2,2,2,2,7,2,3,3,3,3,3,3,3,3)
Count_Frame=data.frame(matrix(NA, nrow=(length(X)), ncol=3))
Count_Frame[1]=X
Count_Frame[2]=Y
Count_Frame[3]=Z
Counts=data.frame(table(Count_Frame))
There is the following problem: if I increase the value range in the vectors or use even more vectors the "Counts" dataframe quickly approaches its size limit due to the many 0-counts. Is there a way to exclude the 0-counts while generating "Counts"?
We can use data.table. Convert the 'data.frame' to 'data.table' (setDT(Count_Frame)), grouped by all the columns (.(X, Y, Z)), we get the number or rows (.N).
library(data.table)
setDT(Count_Frame)[,.N ,.(X, Y, Z)]
# X Y Z N
# 1: 4 1 4 7
# 2: 4 1 5 1
# 3: 1 1 6 1
# 4: 1 1 1 3
# 5: 1 2 1 2
# 6: 1 3 1 1
# 7: 1 4 1 1
# 8: 2 2 2 4
# 9: 2 3 7 1
#10: 2 4 2 1
#11: 3 1 3 2
#12: 3 2 3 2
#13: 3 3 3 2
#14: 3 4 3 2
Instead of naming all the columns, we can use names(Count_Frame) as well (if there are many columns)
setDT(Count_Frame)[,.N , names(Count_Frame)]
You can accomplish this with aggregate:
Count_Frame$one <- 1
aggregate(one ~ X1 + X2 + X3, data=Count_Frame, FUN=sum)
This will calculate the positive instances of table, but will not list the zero counts.
One solution is to create a combination of the column values and count those instead:
library(tidyr)
as.data.frame(table(unite(Count_Frame, tmp, X1, X2, X3))) %>%
separate(Var1, c('X1', 'X2', 'X3'))
Resulting output is:
X1 X2 X3 Freq
1 1 1 1 3
2 1 1 6 1
3 1 2 1 2
4 1 3 1 1
5 1 4 1 1
6 2 2 2 4
7 2 3 7 1
8 2 4 2 1
9 3 1 3 2
10 3 2 3 2
11 3 3 3 2
12 3 4 3 2
13 4 1 4 7
14 4 1 5 1
Or using plyr:
library(plyr)
count(Count_Frame, colnames(Count_Frame))
output
# > count(Count_Frame, colnames(Count_Frame))
# X1 X2 X3 freq
# 1 1 1 1 3
# 2 1 1 6 1
# 3 1 2 1 2
# 4 1 3 1 1
# 5 1 4 1 1
# 6 2 2 2 4
# 7 2 3 7 1
# 8 2 4 2 1
# 9 3 1 3 2
# 10 3 2 3 2
# 11 3 3 3 2
# 12 3 4 3 2
# 13 4 1 4 7
# 14 4 1 5 1

Resources