Consider this dataframe:
dat <- structure(list(col1 = c(1, 2, 0), col2 = c(0, 3, 2), col3 = c(1, 2, 3)), class = "data.frame", row.names = c(NA, -3L))
col1 col2 col3
1 1 0 1
2 2 3 2
3 0 2 3
How can one dummify rows? i.e. whenever there is a row with more than 1 non-0 value, separate the row into multiple rows with one non-0 value per row.
In this case, this would be:
col1 col2 col3
1 1 0 0
2 0 0 1
3 2 0 0
4 0 3 0
5 0 0 2
6 0 2 0
7 0 0 3
You can do:
library(tidyverse)
dat |>
pivot_longer(everything()) |>
mutate(id = 1:n()) |>
pivot_wider(values_fill = 0) |>
filter(!if_all(-id, ~ . == 0)) |>
select(-id)
# A tibble: 7 x 3
col1 col2 col3
<dbl> <dbl> <dbl>
1 1 0 0
2 0 0 1
3 2 0 0
4 0 3 0
5 0 0 2
6 0 2 0
7 0 0 3
Another approach, here I used data.table
library(data.table)
x <- rbindlist(apply(dat, 1, function(x) {
x <- data.table(diag(x, ncol(dat)))
x[colSums(x) > 0]
}))
setnames(x, names(dat))
x
# col1 col2 col3
# 1: 1 0 0
# 2: 0 0 1
# 3: 2 0 0
# 4: 0 3 0
# 5: 0 0 2
# 6: 0 2 0
# 7: 0 0 3
A very ugly way is:
library(tidyverse)
dat %>%
apply(1, diag) %>%
matrix(nrow = 3) %>%
t() %>%
as.data.frame() %>%
rename_with(~ names(dat), everything()) %>%
filter(rowSums(.) != 0)
col1 col2 col3
1 1 0 0
2 0 0 1
3 2 0 0
4 0 3 0
5 0 0 2
6 0 2 0
7 0 0 3
Related
I have a dataframe with identifiers and storm categories. Right now, the categories are in one column, but I want to add columns for each category with a 1 or 0 value. I don't think I want to reshape the data as wide, because in the actual dataset there are a number of long format variables I want to keep. I am using a series of ifelse statements currently, but it feels like there is probably a much better way:
library(dplyr)
library(tidyr)
df <- data.frame(
ID = c("A", "B", "C", "D", "A", "B", "C", "D", "A", "B", "C", "D"),
cat = c("TS", NA, NA, "TS", "1", "1", NA, NA, "2", NA, NA, NA)
)
df$cat_TS <- ifelse(df$cat == "TS", 1, 0) %>% replace_na(., 0)
df$cat_1 <- ifelse(df$cat == "1", 1, 0) %>% replace_na(., 0)
df$cat_2 <- ifelse(df$cat == "2", 1, 0) %>% replace_na(., 0)
We may use pivot_wider - create a sequence column 'rn', and then use pivot_wider to reshape to wide with values_fn as length and values_fill as 0
library(dplyr)
library(tidyr)
library(stringr)
df %>%
mutate(rn = row_number(), cat1 = cat) %>%
pivot_wider(names_from = cat1, values_from = cat1,
values_fn = length, values_fill = 0, names_prefix = "cat_")%>%
select(-cat_NA, -rn)
-output
# A tibble: 12 × 5
ID cat cat_TS cat_1 cat_2
<chr> <chr> <int> <int> <int>
1 A TS 1 0 0
2 B <NA> 0 0 0
3 C <NA> 0 0 0
4 D TS 1 0 0
5 A 1 0 1 0
6 B 1 0 1 0
7 C <NA> 0 0 0
8 D <NA> 0 0 0
9 A 2 0 0 1
10 B <NA> 0 0 0
11 C <NA> 0 0 0
12 D <NA> 0 0 0
or use fastDummies
library(fastDummies)
df %>%
dummy_cols("cat", remove_selected_columns = FALSE, ignore_na = TRUE) %>%
mutate(across(starts_with('cat_'), ~ replace_na(.x, 0)))
-output
ID cat cat_1 cat_2 cat_TS
1 A TS 0 0 1
2 B <NA> 0 0 0
3 C <NA> 0 0 0
4 D TS 0 0 1
5 A 1 1 0 0
6 B 1 1 0 0
7 C <NA> 0 0 0
8 D <NA> 0 0 0
9 A 2 0 1 0
10 B <NA> 0 0 0
11 C <NA> 0 0 0
12 D <NA> 0 0 0
An idea using base R
First, get all unique category names
cats <- unique(df$cat[!is.na(df$cat)])
cats
[1] "TS" "1" "2"
Then look for matches in column cat for each entry in cats. PS, I left the cat column in to show the matching is right. Remove it by using df$ID instead of df as the first argument in cbind.
cbind(df, setNames(data.frame(sapply(seq_along(cats), function(x)
df$cat %in% cats[x]) * 1), cats))
ID cat TS 1 2
1 A TS 1 0 0
2 B <NA> 0 0 0
3 C <NA> 0 0 0
4 D TS 1 0 0
5 A 1 0 1 0
6 B 1 0 1 0
7 C <NA> 0 0 0
8 D <NA> 0 0 0
9 A 2 0 0 1
10 B <NA> 0 0 0
11 C <NA> 0 0 0
12 D <NA> 0 0 0
Suppose I have a data set like this:
dat <- tibble(id = 1:4,
col1 = c(0, 1, 1, 0),
col2 = c(1, 0, 1, 0),
col3 = c(1, 1, 0, 1))
> dat
# A tibble: 4 × 4
id col1 col2 col3
<int> <dbl> <dbl> <dbl>
1 1 0 1 1
2 2 1 0 1
3 3 1 1 0
4 4 0 0 1
I'd like to separate, for every unique id, the multiple 1s into multiple rows, i.e. the expected output is:
# A tibble: 7 × 4
id col1 col2 col3
<dbl> <dbl> <dbl> <dbl>
1 1 0 1 0
2 1 0 0 1
3 2 1 0 0
4 2 0 0 1
5 3 1 0 0
6 3 0 1 0
7 4 0 0 1
For the first id (id = 1), col2 and col3 are both 1, so I would like a separate row for each of them. It kinda is like one-hot encoding for rows.
With help from Ritchie Sacramento and RobertoT
library(tidyverse)
dat <- tibble(id = 1:4,
col1 = c(0, 1, 1, 0),
col2 = c(1, 0, 1, 0),
col3 = c(1, 1, 0, 1))
dat %>%
pivot_longer(-id) %>%
filter(value != 0) %>%
mutate(rows = 1:nrow(.)) %>%
pivot_wider(values_fill = 0,
names_sort = TRUE) %>%
select(-rows)
# A tibble: 7 × 4
id col1 col2 col3
<int> <dbl> <dbl> <dbl>
1 1 0 1 0
2 1 0 0 1
3 2 1 0 0
4 2 0 0 1
5 3 1 0 0
6 3 0 1 0
7 4 0 0 1
Here is an alternative approach using model.matrix():
From the documenation: model.matrix creates a design (or model) matrix, e.g., by expanding factors to a set of dummy variables (depending on the contrasts) and expanding interactions similarly.
library(dplyr)
library(tidyr)
dat %>%
pivot_longer(-id) %>%
filter(value == 1) %>%
cbind((model.matrix(~ name + 0, .) == 1)*1)
id name value namecol1 namecol2 namecol3
1 1 col2 1 0 1 0
2 1 col3 1 0 0 1
3 2 col1 1 1 0 0
4 2 col3 1 0 0 1
5 3 col1 1 1 0 0
6 3 col2 1 0 1 0
7 4 col3 1 0 0 1
You could do
arrange(bind_rows(lapply(2:4, function(x) {
d <- dat[dat[[x]] == 1,]
d[-c(1, x)] <- 0
d})), id)
#> # A tibble: 7 x 4
#> id col1 col2 col3
#> <int> <dbl> <dbl> <dbl>
#> 1 1 0 1 0
#> 2 1 0 0 1
#> 3 2 1 0 0
#> 4 2 0 0 1
#> 5 3 1 0 0
#> 6 3 0 1 0
#> 7 4 0 0 1
Created on 2022-07-14 by the reprex package (v2.0.1)
Using explicit loops:
nullrow <- rep(0, ncol(dat)-1)
data <- dat[,-1]
rowsums <- apply(data, 1, sum)
res <- data[0,]
ids <- c()
for(i in 1:nrow(data)) {
if(rowsums[i]>0) {
for(j in 1:rowsums[i]) {
thisrow <- nullrow
thiscolumn <- which(data[i,]==1)[j]
thisrow[thiscolumn] <- 1
res <- rbind(res, thisrow)
}
ids <- c(ids, rep(dat$id[i], rowsums[i]))
}
}
names(res) <- colnames(data)
res$id <- ids
> res
col1 col2 col3 id
1 0 1 0 1
2 0 0 1 1
3 1 0 0 2
4 0 0 1 2
5 1 0 0 3
6 0 1 0 3
7 0 0 1 4
A possible solution, based on purrr:pmap_dfr and on the following ideas:
Loop over all dataframe rows.
Use each row to create a diagonal matrix with the contents of the diagonal being the dataframe row.
Filter out the rows that only have zeros.
library(tidyverse)
pmap_dfr(dat, ~ data.frame(id = ..1, diag(c(...)[-1]))) %>%
filter(if_any(X1:X3, ~ .x != 0))
#> id X1 X2 X3
#> 1 1 0 1 0
#> 2 1 0 0 1
#> 3 2 1 0 0
#> 4 2 0 0 1
#> 5 3 1 0 0
#> 6 3 0 1 0
#> 7 4 0 0 1
Another possible solution, based on Matrix::sparseMatrix:
First, it gets the indexes where there are 1 (with which).
Second, it adjusts the row indexes, to force one 1 per row.
Third, it creates a sparse matrix, putting the 1 where the adjusted indexes specify.
library(tidyverse)
library(Matrix)
which(dat[-1] == 1, arr.ind = T) %>%
as.data.frame %>%
arrange(row) %>%
mutate(id = dat[row,"id"], row = 1:n()) %>%
{data.frame(id = .$id, as.matrix( sparseMatrix(i = .$row, j= .$col, x= 1)))}
#> id X1 X2 X3
#> 1 1 0 1 0
#> 2 1 0 0 1
#> 3 2 1 0 0
#> 4 2 0 0 1
#> 5 3 1 0 0
#> 6 3 0 1 0
#> 7 4 0 0 1
Yet another possible solution:
library(tidyverse)
f <- function(df)
{
got <- 0
for (i in 1:nrow(df))
{
got <- which.max(df[i, (got+1):ncol(df)]) + got
df[i, -got] <- 0
}
df
}
dat %>%
slice(map(1:nrow(dat), ~ rep(.x, rowSums(dat[-1])[.x])) %>% unlist) %>%
group_by(id) %>%
group_modify(~ f(.)) %>%
ungroup
#> # A tibble: 7 × 4
#> id col1 col2 col3
#> <int> <dbl> <dbl> <dbl>
#> 1 1 0 1 0
#> 2 1 0 0 1
#> 3 2 1 0 0
#> 4 2 0 0 1
#> 5 3 1 0 0
#> 6 3 0 1 0
#> 7 4 0 0 1
I have a vector like this in R:
dt = data.frame(input=c(0,0,1,1,0,0,1,0,0,0,1,1,1,0,1) )
dt
input
# 1 0
# 2 0
# 3 1
# 4 1
# 5 0
# 6 0
# 7 1
# 8 0
# 9 0
# 10 0
# 11 1
# 12 1
# 13 1
# 14 0
# 15 1
I want to replace the consecutive 0, in which the length is less than three, with 1, and save it to a new column.
#update:
I also hope that the replacement only happens when less than three 0 are sandwiched between 1. So in this condition, I will ignore the two 0 in raw 1 and 2. (or also when happening in the tail or meet NA)
For example, I want to output:
input output
# 1 0 0
# 2 0 0
# 3 1 1
# 4 1 1
# 5 0 1
# 6 0 1
# 7 1 1
# 8 0 0
# 9 0 0
# 10 0 0
# 11 1 1
# 12 1 1
# 13 1 1
# 14 0 1
# 15 1 1
How can I write it in the foreach loop? (I have the data with thousands of rows)
Thanks.
Create a grouping column with rleid on the 'input' column, and if the number of rows is less than 3 and all values are 0, replace with 1 or else return input
library(dplyr)
library(data.table)
dt %>%
mutate(new = cumsum(input)) %>%
group_by(grp = rleid(input)) %>%
mutate(output = if(n() <3 & all(input == 0) & all(new > 0)) 1 else input) %>%
ungroup %>%
select(-grp, -new)
-output
# A tibble: 15 × 2
input output
<dbl> <dbl>
1 0 0
2 0 0
3 1 1
4 1 1
5 1 1
6 0 1
7 1 1
8 0 0
9 0 0
10 0 0
11 1 1
12 1 1
13 1 1
14 0 1
15 1 1
Or use base R with rle
dt$output <- inverse.rle(within.list(rle(dt$input),
values[!values & lengths < 3 & seq_along(values) != 1] <- 1))
dt$output
#[1] 0 0 1 1 1 1 1 0 0 0 1 1 1 1 1
Update after clarification:
We could now ungroup() and check if the sequence is wrapped by a 1 with lag(input==1):
dt %>%
mutate(
x= cumsum(input != lag(input, def = first(input)))
) %>%
group_by(x) %>%
mutate(x = seq_along(input),
x = last(x)) %>%
ungroup() %>%
mutate(output = case_when(input == 0 &
lag(input==1) &
x<=2 ~ 1,
TRUE ~ as.numeric(input))) %>%
select(-x)
Output:
A tibble: 15 x 2
input output
<dbl> <dbl>
1 0 0
2 0 0
3 1 1
4 1 1
5 1 1
6 0 1
7 1 1
8 0 0
9 0 0
10 0 0
11 1 1
12 1 1
13 1 1
14 0 1
15 1 1
First answer:
Here is a suggestion. But I don't understand the rows 1 and 2 in your output. "replace consecutive 0, in which the length is less than three, with 1" this is the case for row 1 and 2.
dt %>%
mutate(
x= cumsum(input != lag(input, def = first(input)))
) %>%
group_by(x) %>%
mutate(x = seq_along(input),
x = last(x)) %>%
mutate(output = case_when(input == 0 & x<=2 ~ 1,
TRUE ~ as.numeric(input))) %>%
ungroup() %>%
select(-x)
input output
<dbl> <dbl>
1 0 1
2 0 1
3 1 1
4 1 1
5 1 1
6 0 1
7 1 1
8 0 0
9 0 0
10 0 0
11 1 1
12 1 1
13 1 1
14 0 1
15 1 1
Update following reformulation of the question: This tidyverse approach simply makes use of case_when().
library(dplyr)
mutate(dt, inputX = case_when(input == 0 &
lag(input) == 1 &
lead(input) == 1 ~ 1,
input == 0 &
lag(input) == 0 &
lag(input, n = 2) == 1 &
lead(input) == 1 ~ 1,
T ~ input))
# input inputX
# 1 0 0
# 2 0 0
# 3 1 1
# 4 1 1
# 5 1 1
# 6 0 1
# 7 1 1
# 8 0 0
# 9 0 0
# 10 0 0
# 11 1 1
# 12 1 1
# 13 1 1
# 14 0 1
# 15 1 1
Previous solution: Having understood the requirements like Tarjae did, a tidyverse option could look as follows.
library(dplyr)
dt %>%
mutate(x = cumsum(input)) %>%
group_by(x) %>%
mutate(y = +(n() %in% 2:3)) %>%
ungroup() %>%
transmute(input = input,
inputX = if_else(y == 1, 1, input))
# # A tibble: 15 x 2
# input inputX
# <dbl> <dbl>
# 1 0 1
# 2 0 1
# 3 1 1
# 4 1 1
# 5 1 1
# 6 0 1
# 7 1 1
# 8 0 0
# 9 0 0
# 10 0 0
# 11 1 1
# 12 1 1
# 13 1 1
# 14 0 1
# 15 1 1
I'm trying to add several sets of columns together.
Example df:
df <- data.frame(
key = 1:5,
ab0 = c(1,0,0,0,1),
ab1 = c(0,2,1,0,0),
ab5 = c(1,0,0,0,1),
bc0 = c(0,1,0,2,0),
bc1 = c(2,0,0,0,0),
bc5 = c(0,2,1,0,1),
df0 = c(0,0,0,1,0),
df1 = c(1,0,3,0,0),
df5 = c(1,0,0,0,6)
)
Giving me:
key ab0 ab1 ab5 bc0 bc1 bc5 df0 df1 df5
1 1 1 0 1 0 2 0 0 1 1
2 2 0 2 0 1 0 2 0 0 0
3 3 0 1 0 0 0 1 0 3 0
4 4 0 0 0 2 0 0 1 0 0
5 5 1 0 1 0 0 1 0 0 6
I want to add all sets of columns with 0s and 5s in them together and place them in the 0 column.
So the end result would be:
key ab0 ab1 ab5 bc0 bc1 bc5 df0 df1 df5
1 1 2 0 1 0 2 0 0 1 1
2 2 0 2 0 3 0 2 0 0 0
3 3 0 1 0 1 0 1 0 3 0
4 4 0 0 0 2 0 0 2 0 0
5 5 2 0 1 1 0 1 0 0 6
I could add the columns together using 3 lines:
df$ab0 <- df$ab0 + df$ab5
df$bc0 <- df$bc0 + df$bc5
df$df0 <- df$df0 + df$df5
But my real example has over a hundred columns so I'd like to iterate over them and use apply.
The column names of the first set are contained in col0 and the names of the second set are in col5.
col0 <- c("ab0","bc0","df0")
col5 <- c("ab5","bc5","df5")
I created a function to add the columns to gether using mapply:
fun1 <- function(df,x,y) {
df[,x] <- df[,x] + df[,y]
}
mapply(fun1,df,col0,col5)
But I get an error: Error in df[, x] : incorrect number of dimensions
Thoughts?
Simply add two data frames together by their subsetted columns, assuming they will be the same length. No loops needed. All vectorized operation.
final_df <- df[grep("0", names(df))] + df[grep("5", names(df))]
final_df <- cbind(final_df, df[grep("0", names(df), invert=TRUE)])
final_df <- final_df[order(names(final_df))]
final_df
# ab0 ab1 ab5 bc0 bc1 bc5 df0 df1 df5 key
# 1 2 0 1 0 2 0 1 1 1 1
# 2 0 2 0 3 0 2 0 0 0 2
# 3 0 1 0 1 0 1 0 3 0 3
# 4 0 0 0 2 0 0 1 0 0 4
# 5 2 0 1 1 0 1 6 0 6 5
Rextester demo
You could use map2 from the purrr package to iterate over the two vectors at once:
df <- data.frame(
key = 1:5,
ab0 = c(1,0,0,0,1),
ab1 = c(0,2,1,0,0),
ab5 = c(1,0,0,0,1),
bc0 = c(0,1,0,2,0),
bc1 = c(2,0,0,0,0),
bc5 = c(0,2,1,0,1),
df0 = c(0,0,0,1,0),
df1 = c(1,0,3,0,0),
df5 = c(1,0,0,0,6)
)
col0 <- c("ab0","bc0","df0")
col5 <- c("ab5","bc5","df5")
purrr::map2(col0, col5, function(x, y) {
df[[x]] <<- df[[x]] + df[[y]]
})
> df
key ab0 ab1 ab5 bc0 bc1 bc5 df0 df1 df5
1 1 2 0 1 0 2 0 1 1 1
2 2 0 2 0 3 0 2 0 0 0
3 3 0 1 0 1 0 1 0 3 0
4 4 0 0 0 2 0 0 1 0 0
5 5 2 0 1 1 0 1 6 0 6
Here's an approach using tidyr and dplyr from the tidyverse meta-package.
First, I bring the table into long ("tidy") format, and split out the column into two components, and spread by the number part of those components.
Then I do the calculation you describe.
Finally, I bring it back into the original format using the inverse of step 1.
library(tidyverse)
df_tidy <- df %>%
# Step 1
gather(col, value, -key) %>%
separate(col, into = c("grp", "num"), 2) %>%
spread(num, value) %>%
# Step 2
mutate(`0` = `0` + `5`) %>%
# Step 3, which is just the inverse of Step 1.
gather(num, value, -key, - grp) %>%
unite(col, c("grp", "num")) %>%
spread(col, value)
df_tidy
key ab_0 ab_1 ab_5 bc_0 bc_1 bc_5 df_0 df_1 df_5
1 1 2 0 1 0 2 0 1 1 1
2 2 0 2 0 3 0 2 0 0 0
3 3 0 1 0 1 0 1 0 3 0
4 4 0 0 0 2 0 0 1 0 0
5 5 2 0 1 1 0 1 6 0 6
I have a dataframe which has a column,
service-id
ids-1-2-3-4-5
ids-1-2-6
ids-5
ids-7-8
with many other columns.
I want to split the data ids-1-2-3-4-5 into different columns 1,2,3...8 like one hot encoding ,having columns 1 2 3 4 5 6 7 8 also having 1 and rest 0 if not present.
col.1 col.2 col.3 col.4 col.5 col.6 ..... col.8
1 1 1 1 1 0 ..... 0 for ids-1-2-3-4-5
1 1 0 0 0 1 ...... 0 for ids-1-2-6
I tried tidyverse but it is not helpful.
A solution using basic R code.
Your data
db<-data.frame("service-id"=c("ids-1-2-3-4-5","ids-1-2-6","ids-5","ids-7-8"))
Identify number of columns
ncol<-max(suppressWarnings(as.numeric(unlist(strsplit(as.character(db$service.id),"-")))),na.rm = T)
Extract numeric id list
number_list<-strsplit(as.character(db$service.id),"-")
number_list<-suppressWarnings(lapply(number_list,as.numeric))
number_list <- lapply(number_list, function(x) x[!is.na(x)])
Create output dataframe
f<-function(x,ncol)
{
return(as.numeric(seq(1:ncol) %in% x))
}
out<-t(data.frame(lapply(number_list, f, ncol=ncol)))
colnames(out)<-paste0("col.",seq(1:ncol))
rownames(out)<-NULL
Your output
out
col.1 col.2 col.3 col.4 col.5 col.6 col.7 col.8
[1,] 1 1 1 1 1 0 0 0
[2,] 1 1 0 0 0 1 0 0
[3,] 0 0 0 0 1 0 0 0
[4,] 0 0 0 0 0 0 1 1
If we need tidyverse option, here is a way
library(tidyverse)
df1 %>%
rownames_to_column('rn') %>%
extract(service.id, into = c('id', 'col'), "^([^-]+)-(.*)") %>%
separate_rows(col) %>%
mutate(n = 1, col = paste0("col.", col)) %>%
spread(col, n, fill = 0) %>%
select(-rn, -id)
# col.1 col.2 col.3 col.4 col.5 col.6 col.7 col.8
#1 1 1 1 1 1 0 0 0
#2 1 1 0 0 0 1 0 0
#3 0 0 0 0 1 0 0 0
#4 0 0 0 0 0 0 1 1
data
df1 <- structure(list(service.id = c("ids-1-2-3-4-5", "ids-1-2-6", "ids-5",
"ids-7-8")), .Names = "service.id", class = "data.frame", row.names = c(NA,
-4L))