How to remove rows by condition - r

I'm trying to delete rows for which the condition is not satisfying
eg.Remove that Subject row which do not have all period's value
following is the dataframe
Subject Period
1 1
1 2
1 3
2 1
2 2
2 3
3 1
3 2
4 1
4 2
4 3
Subject Period
1 1
1 2
1 3
2 1
2 2
2 3
4 1
4 2
4 3

A dplyr solution.
library(dplyr)
dat %>%
group_by(Subject) %>%
filter(all(unique(dat$Period) %in% Period)) %>%
ungroup()
# # A tibble: 9 x 2
# Subject Period
# <int> <int>
# 1 1 1
# 2 1 2
# 3 1 3
# 4 2 1
# 5 2 2
# 6 2 3
# 7 4 1
# 8 4 2
# 9 4 3
A base R solution.
dat_list <- split(dat, f = dat$Subject)
keep_vec <- sapply(dat_list, function(x) all(unique(dat$Period) %in% x$Period))
dat_keep <- dat_list[keep_vec]
dat2 <- do.call(rbind, dat_keep)
dat2
# Subject Period
# 1.1 1 1
# 1.2 1 2
# 1.3 1 3
# 2.4 2 1
# 2.5 2 2
# 2.6 2 3
# 4.9 4 1
# 4.10 4 2
# 4.11 4 3
A solution using purrr and dplyr.
library(purrr)
library(dplyr)
dat2 <- dat %>%
split(f = .$Subject) %>%
keep(~all(unique(dat$Period) %in% .x$Period)) %>%
bind_rows()
dat2
# Subject Period
# 1 1 1
# 2 1 2
# 3 1 3
# 4 2 1
# 5 2 2
# 6 2 3
# 7 4 1
# 8 4 2
# 9 4 3
DATA
dat <- read.table(text = "Subject Period
1 1
1 2
1 3
2 1
2 2
2 3
3 1
3 2
4 1
4 2
4 3",
header = TRUE)

Consider ave for inline aggregation then subset accordingly:
sub_df <- subset(df, ave(Period, Subject, FUN=max) != 3)

Related

Nested list to grouped rows in R

I have the following nested list called l (dput below):
> l
$A
$A$`1`
[1] 1 2 3
$A$`2`
[1] 3 2 1
$B
$B$`1`
[1] 2 2 2
$B$`2`
[1] 3 4 3
I would like to convert this to a grouped dataframe where A and B are the first group column and 1 and 2 are the subgroups with respective values. The desired output should look like this:
group subgroup values
1 A 1 1
2 A 1 2
3 A 1 3
4 A 2 3
5 A 2 2
6 A 2 1
7 B 1 2
8 B 1 2
9 B 1 2
10 B 2 3
11 B 2 4
12 B 2 3
As you can see A and B are the main group and 1 and 2 are the subgroups. Using purrr::flatten(l) or unnest doesn't work. So I was wondering if anyone knows how to convert a nested list to a grouped row dataframe?
dput of l:
l <- list(A = list(`1` = c(1, 2, 3), `2` = c(3, 2, 1)), B = list(`1` = c(2,
2, 2), `2` = c(3, 4, 3)))
Using stack and rowbind with id:
data.table::rbindlist(lapply(l, stack), idcol = "id")
# id values ind
# 1: A 1 1
# 2: A 2 1
# 3: A 3 1
# 4: A 3 2
# 5: A 2 2
# 6: A 1 2
# 7: B 2 1
# 8: B 2 1
# 9: B 2 1
# 10: B 3 2
# 11: B 4 2
# 12: B 3 2
You can use enframe() to convert the list into a data.frame, and unnest the value column twice.
library(tidyr)
tibble::enframe(l, name = "group") %>%
unnest_longer(value, indices_to = "subgroup") %>%
unnest(value)
# A tibble: 12 × 3
group value subgroup
<chr> <dbl> <chr>
1 A 1 1
2 A 2 1
3 A 3 1
4 A 3 2
5 A 2 2
6 A 1 2
7 B 2 1
8 B 2 1
9 B 2 1
10 B 3 2
11 B 4 2
12 B 3 2
Turn the list directly into a data frame, then pivot it into a long format and arrange to your desired order.
library(tidyverse)
lst %>%
as.data.frame() %>%
pivot_longer(everything(), names_to = c("group", "subgroup"),
values_to = "values",
names_pattern = "(.+?)\\.(.+?)") %>%
arrange(group, subgroup)
# A tibble: 12 × 3
group subgroup values
<chr> <chr> <dbl>
1 A 1 1
2 A 1 2
3 A 1 3
4 A 2 3
5 A 2 2
6 A 2 1
7 B 1 2
8 B 1 2
9 B 1 2
10 B 2 3
11 B 2 4
12 B 2 3
You can combine rrapply with unnest, which has the benefit to work in lists of arbitrary lengths:
library(rrapply)
library(tidyr)
rrapply(l, how = "melt") |>
unnest(value)
# A tibble: 12 × 3
L1 L2 value
<chr> <chr> <dbl>
1 A 1 1
2 A 1 2
3 A 1 3
4 A 2 3
5 A 2 2
6 A 2 1
7 B 1 2
8 B 1 2
9 B 1 2
10 B 2 3
11 B 2 4
12 B 2 3

identify whenever values repeat in r

I have a dataframe like this.
data <- data.frame(Condition = c(1,1,2,3,1,1,2,2,2,3,1,1,2,3,3))
I want to populate a new variable Sequence which identifies whenever Condition starts again from 1.
So the new dataframe would look like this.
Thanks in advance for the help!
data <- data.frame(Condition = c(1,1,2,3,1,1,2,2,2,3,1,1,2,3,3),
Sequence = c(1,1,1,1,2,2,2,2,2,2,3,3,3,3,3))
base R
data$Sequence2 <- cumsum(c(TRUE, data$Condition[-1] == 1 & data$Condition[-nrow(data)] != 1))
data
# Condition Sequence Sequence2
# 1 1 1 1
# 2 1 1 1
# 3 2 1 1
# 4 3 1 1
# 5 1 2 2
# 6 1 2 2
# 7 2 2 2
# 8 2 2 2
# 9 2 2 2
# 10 3 2 2
# 11 1 3 3
# 12 1 3 3
# 13 2 3 3
# 14 3 3 3
# 15 3 3 3
dplyr
library(dplyr)
data %>%
mutate(
Sequence2 = cumsum(Condition == 1 & lag(Condition != 1, default = TRUE))
)
# Condition Sequence Sequence2
# 1 1 1 1
# 2 1 1 1
# 3 2 1 1
# 4 3 1 1
# 5 1 2 2
# 6 1 2 2
# 7 2 2 2
# 8 2 2 2
# 9 2 2 2
# 10 3 2 2
# 11 1 3 3
# 12 1 3 3
# 13 2 3 3
# 14 3 3 3
# 15 3 3 3
This took a while. Finally I find this solution:
library(dplyr)
data %>%
group_by(Sequnce = cumsum(
ifelse(Condition==1, lead(Condition)+1, Condition)
- Condition==1)
)
Condition Sequnce
<dbl> <int>
1 1 1
2 1 1
3 2 1
4 3 1
5 1 2
6 1 2
7 2 2
8 2 2
9 2 2
10 3 2
11 1 3
12 1 3
13 2 3
14 3 3
15 3 3

Counter based on ID and value in a column

I have a dataframe that contains an ID and Type column. I want a counter that if the Type is "T" then the counter in the next row would be counter + 1 for every ID. Basically, the counter is the Output_column in this example.
ID <- c(1,1,1,1,1,1,3,3,4,4,4,4)
Type <- c("A","A","T","A","A","A","A","A","T","A","T","A")
Output_Column <- c(1,1,1,2,2,2,1,1,1,2,2,3)
ID Type Output_Column
1 1 A 1
2 1 A 1
3 1 T 1
4 1 A 2
5 1 A 2
6 1 A 2
7 3 A 1
8 3 A 1
9 4 T 1
10 4 A 2
11 4 T 2
12 4 A 3
d <- data.frame(ID,Type, Output_Column)
baseR solution
output_col <- as.numeric(ave(Type, ID, FUN = function(x) cumsum(c('T', x[-length(x)]) == 'T')))
output_col
[1] 1 1 1 2 2 2 1 1 1 2 2 3
Here's data.table version :
library(data.table)
setDT(d)[, res := shift(cumsum(Type == 'T') + 1, fill = 1), ID]
d
# ID Type Output_Column res
# 1: 1 A 1 1
# 2: 1 A 1 1
# 3: 1 T 1 1
# 4: 1 A 2 2
# 5: 1 A 2 2
# 6: 1 A 2 2
# 7: 3 A 1 1
# 8: 3 A 1 1
# 9: 4 T 1 1
#10: 4 A 2 2
#11: 4 T 2 2
#12: 4 A 3 3
Here is a way to achieve it using group_by, lag, and cumsum
library(dplyr)
d %>%
# group by ID so calculation is within each ID
group_by(ID) %>%
mutate(
# create a counter variable check if previous Type is "T"
# Here default is "T" which result the first row of ID will start at 1
counter = if_else(lag(Type, default = "T") == "T", 1, 0),
# cumsum the counter which result same as the expected output column
output_column_calculated = cumsum(counter)) %>%
ungroup() %>%
# Remove the counter column if not needed
select(-counter)
#> # A tibble: 12 x 4
#> ID Type Output_Column output_column_calculated
#> <dbl> <chr> <dbl> <dbl>
#> 1 1 A 1 1
#> 2 1 A 1 1
#> 3 1 T 1 1
#> 4 1 A 2 2
#> 5 1 A 2 2
#> 6 1 A 2 2
#> 7 3 A 1 1
#> 8 3 A 1 1
#> 9 4 T 1 1
#> 10 4 A 2 2
#> 11 4 T 2 2
#> 12 4 A 3 3
Created on 2021-04-26 by the reprex package (v2.0.0)

R Tidyverse - Randomize by ID

I have a df like this one:
id <- c(1,1,2,2,3,3,4,4,5,5)
v1 <- c(3,1,2,3,4,5,6,1,5,4)
pos <- c(1,2,1,2,1,2,1,2,1,2)
df <- data.frame(id,v1,pos)
How can I "randomize" the values of v1 WHILE keeping the inherent order from the "Id" var and also the values of "pos" such as I get df with randomized values like this:
id v1 pos
1 1 1
1 3 2
2 2 1
2 3 2
3 5 1
3 4 2
4 6 1
4 1 2
5 5 1
5 4 2
Above and example of resulting df with id and pos staying as originally created and v1 randomized.
Thx!
Is sample what you're looking for?
df %>%
group_by(id) %>%
mutate(v1 = sample(v1, size = length(v1)))
# A tibble: 10 x 3
# Groups: id [5]
id v1 pos
<dbl> <dbl> <dbl>
1 1 3 1
2 1 1 2
3 2 3 1
4 2 2 2
5 3 4 1
6 3 5 2
7 4 1 1
8 4 6 2
9 5 5 1
10 5 4 2

is there a way in R to fill missing groups absent of observations?

Say I have something like:
df<-data.frame(group=c(1, 1,1, 2,2,2,3,3,3,4,4, 1, 1,1),
group2=c(1,2,3,1,2,3,1,2,3,1,3, 1,2,3))
group group2
1 1 1
2 1 2
3 1 3
4 2 1
5 2 2
6 2 3
7 3 1
8 3 2
9 3 3
10 4 1
11 4 3
12 1 1
13 1 2
14 1 3
My goal is to count the number of unique instances for group= something and group2= something. Like so:
df1<-df%>%group_by(group, group2)%>% mutate(want=n())%>%distinct(group, group2, .keep_all=TRUE)
group group2 want
<dbl> <dbl> <int>
1 1 1 2
2 1 2 2
3 1 3 2
4 2 1 1
5 2 2 1
6 2 3 1
7 3 1 1
8 3 2 1
9 3 3 1
10 4 1 1
11 4 3 1
however, notice that group=4, group2=2 was not in my dataset to begin with. Is there some sort of autofill function where I can fill these non-observations with a zero to get below easily?:
group group2 want
<dbl> <dbl> <int>
1 1 1 2
2 1 2 2
3 1 3 2
4 2 1 1
5 2 2 1
6 2 3 1
7 3 1 1
8 3 2 1
9 3 3 1
10 4 1 1
11 4 2 0
12 4 3 1
After getting the count, we can expand with complete to fill the missing combinations with 0
library(dplyr)
library(tidyr)
df %>%
count(group, group2) %>%
complete(group, group2, fill = list(n = 0))
# A tibble: 12 x 3
# group group2 n
# <dbl> <dbl> <dbl>
# 1 1 1 2
# 2 1 2 2
# 3 1 3 2
# 4 2 1 1
# 5 2 2 1
# 6 2 3 1
# 7 3 1 1
# 8 3 2 1
# 9 3 3 1
#10 4 1 1
#11 4 2 0
#12 4 3 1
Or if we do the group_by, instead of mutate and then do the distinct, directly use the summarise
df %>%
group_by(group, group2) %>%
summarise(n = n()) %>%
ungroup %>%
complete(group, group2, fill = list(n = 0))
Here is a data.table approach solution to this problem:
library(data.table)
setDT(df)[CJ(group, group2, unique = TRUE),
c(.SD, .(want = .N)), .EACHI,
on = c("group", "group2")]
# group group2 want
# 1 1 2
# 1 2 2
# 1 3 2
# 2 1 1
# 2 2 1
# 2 3 1
# 3 1 1
# 3 2 1
# 3 3 1
# 4 1 1
# 4 2 0
# 4 3 1

Resources