Extract Index of repeat value - r

how do I extract specific row of data when the column has repetitive value? my data looks like this: I want to extract the row of the end of each repeat of x (A 3 10, A 2 3 etc) or the index of the last value
Name X M
A 1 1
A 2 9
A 3 10
A 1 1
A 2 3
A 1 5
A 2 6
A 3 4
A 4 5
A 5 3
B 1 1
B 2 9
B 3 10
B 1 1
B 2 3
Expected output
Index Name X M
3 A 3 10
5 A 2 3
10 A 5 3
13 B 3 10
15 B 2 3

Using base R duplicated and cumsum:
dups <- !duplicated(cumsum(dat$X == 1), fromLast=TRUE)
cbind(dat[dups,], Index=which(dups))
# Name X M Index
#3 A 3 10 3
#5 A 2 3 5
#10 A 5 3 10
#13 B 3 10 13
#15 B 2 3 15

A solution using dplyr.
library(dplyr)
df2 <- df %>%
mutate(Flag = ifelse(lead(X) < X, 1, 0)) %>%
mutate(Index = 1:n()) %>%
filter(Flag == 1 | is.na(Flag)) %>%
select(Index, X, M)
df2
# Index X M
# 1 3 3 10
# 2 5 2 3
# 3 10 5 3
# 4 13 3 10
# 5 15 2 3
Flag is a column showing if the next number in A is smaller than the previous number. If TRUE, Flag is 1, otherwise is 0. We can then filter for Flag == 1 or where Flag is NA, which is the last row. df2 is the final filtered data frame.
DATA
df <- read.table(text = "Name X M
A 1 1
A 2 9
A 3 10
A 1 1
A 2 3
A 1 5
A 2 6
A 3 4
A 4 5
A 5 3
B 1 1
B 2 9
B 3 10
B 1 1
B 2 3",
header = TRUE, stringsAsFactors = FALSE)

Related

Keep rows with specific string and the following row

This is my data frame
df <- data.frame(
id = 1:14,
group_id = c(rep(1:2, each = 3), rep(3:4, each = 4)),
type = rep("A", 14), stringsAsFactors = FALSE)
df[c(2,4,8,12),"type"] <- "B"
id group_id type
1 1 1 A
2 2 1 B
3 3 1 A
4 4 2 B
5 5 2 A
6 6 2 A
7 7 3 A
8 8 3 B
9 9 3 A
10 10 3 A
11 11 4 A
12 12 4 B
13 13 4 A
14 14 4 A
I'd like to keep all rows with type B as well as the following row.
I could do...
B <- which(df$type=="B")
afterB <- B+1
df_sel <- df[c(B, afterB), ]
df_sel <- df_sel[order(df_sel$id),]
df_sel
...to get what I want.
id group_id type
2 2 1 B
3 3 1 A
4 4 2 B
5 5 2 A
8 8 3 B
9 9 3 A
12 12 4 B
13 13 4 A
How can this be done in a more generic way.
Another way, very similar to what you do but in one step and without the need to reorder:
df_sel <- df[rep(which(df$type=="B"), e=2)+c(0, 1), ]
df_sel
# id group_id type
# 2 2 1 B
# 3 3 1 A
# 4 4 2 B
# 5 5 2 A
# 8 8 3 B
# 9 9 3 A
# 12 12 4 B
# 13 13 4 A
Using lag from dplyr
library(dplyr)
df[df$type == "B" | lag(df$type == "B", default = FALSE), ]
# id group_id type
#2 2 1 B
#3 3 1 A
#4 4 2 B
#5 5 2 A
#8 8 3 B
#9 9 3 A
#12 12 4 B
#13 13 4 A
using grep will provide a row index of all instances of B - rows; concatenate (c()) this with rows + 1 to select from df will work.
rows <- grep("B", df[, "type"])
df[sort(c(rows, rows + 1)), ]
gives:
id group_id type
2 2 1 B
3 3 1 A
4 4 2 B
5 5 2 A
8 8 3 B
9 9 3 A
12 12 4 B
13 13 4 A

r - remove first row of condition per subject in dataframe

I have a long format dataframe with multiple subjects and multiple conditions for each subject.
I want to remove the first row of each condition (except the first one) for all subjects.
My dataframe looks like this:
> df <- data.frame(subj = c(rep(1,4),rep(2,4), rep(3,4)), cond = (rep(c("A", "A", "B", "B"),times=3)), value = round(runif(12, min = 0, max = 10)))
> df
subj cond value
1 A 1
1 A 5
1 B 3
1 B 10
2 A 6
2 A 5
2 B 2
2 B 0
3 A 5
3 A 8
3 B 5
3 B 2
I have found the duplicated() function but it only removes the first row of each condition for the first subject:
df <- df[duplicated(df$cond),]
subj cond value
1 A 5
1 B 10
2 A 6
2 A 5
2 B 2
2 B 0
3 A 5
3 A 8
3 B 5
3 B 2
Is there a way to "reset" the finding of a duplicate whenever a new subject begins?
And how can I stop it from excluding the first row of the first condition?
Thank you all so much!
You could subset with the duplicated interaction of the two variables:
> df
subj cond value
1 1 A 5
2 1 A 7
3 1 B 4
4 1 B 8
5 2 A 5
6 2 A 2
7 2 B 8
8 2 B 5
9 3 A 8
10 3 A 1
11 3 B 1
12 3 B 5
df1 <- df[!duplicated(interaction(df$subj, df$cond)),]
> df1
subj cond value
1 1 A 5
3 1 B 4
5 2 A 5
7 2 B 8
9 3 A 8
11 3 B 1
Edit:
I've read your question again and it seems you want to remove the first row, not the last. In this case, use
df1 <- df[!duplicated(interaction(df$subj, df$cond), fromLast = TRUE),]
> df1
subj cond value
2 1 A 4
4 1 B 9
6 2 A 9
8 2 B 7
10 3 A 1
12 3 B 2
Alternative (but does depend on actual df):
df <- data.frame(subj = c(rep(1,4),rep(2,4), rep(3,4)),
cond = (rep(c("A", "A", "B", "B"),times=3)),
value = round(runif(12, min = 0, max = 10)))
df
dummy <- as.character(df$cond) # factor to character
mask <- c(FALSE, dummy[-1] == dummy[-length(dummy)])
df[mask,]

Count distinct values that are not the same as the current row's values

Suppose I have a data frame:
df <- data.frame(SID=sample(1:4,15,replace=T), Var1=c(rep("A",5),rep("B",5),rep("C",5)), Var2=sample(2:4,15,replace=T))
which comes out to something like this:
SID Var1 Var2
1 4 A 2
2 3 A 2
3 4 A 3
4 3 A 3
5 1 A 4
6 1 B 2
7 3 B 2
8 4 B 4
9 4 B 4
10 3 B 2
11 2 C 2
12 2 C 2
13 4 C 4
14 2 C 4
15 3 C 3
What I hope to accomplish is to find the count of unique SIDs (see below under update, this should have said count of unique (SID, Var1) combinations) where the given row's Var1 is excluded from this count and the count is grouped on Var2. So for the example above, I would like to output:
SID Var1 Var2 Count.Excluding.Var1
1 4 A 2 3
2 3 A 2 3
3 4 A 3 1
4 3 A 3 1
5 1 A 4 3
6 1 B 2 3
7 3 B 2 3
8 4 B 4 3
9 4 B 4 3
10 3 B 2 3
11 2 C 2 4
12 2 C 2 4
13 4 C 4 2
14 2 C 4 2
15 3 C 3 2
For the 1st observation, we have a count of 3 because there are 3 unique combinations of (SID, Var1) for the given Var2 value (2, in this case) where Var1 != A (Var1 value of 1st observation) -- specifically, the count includes observation 6, 7 and 11, but not 12 because we already accounted for a (SID, Var1)=(2,C) and not row 2 because we do not want Var1 to be "A". All of these rows have the same Var2 value.
I'd preferably like to use dplyr functions and the %>% operator.
&
UPDATE
I apologize for the confusion and my incorrect explanation above. I have corrected what I intended on asking for in the paranthesis, but I am leaving my original phrasing as well because majority of answers seem to interpret it this way.
As for the example, I apologize for not setting the seed. There seems to have been some confusion with regards to the Count.Excluding.Var1 for rows 11 and 12. With unique (SID, Var1) combinations, rows 11 and 12 should make sense as these count rows 1,2,6, and 7 xor 8.
A simple mapply can do the trick. But as OP requested for %>% based solution, an option could be as:
df %>% mutate(Count.Excluding.Var1 =
mapply(function(x,y)nrow(unique(df[df$Var1 != x & df$Var2 == y,1:2])),.$Var1,.$Var2))
# SID Var1 Var2 Count.Excluding.Var1
# 1 4 A 2 3
# 2 2 A 3 3
# 3 4 A 4 3
# 4 4 A 4 3
# 5 3 A 4 3
# 6 4 B 3 1
# 7 3 B 3 1
# 8 3 B 3 1
# 9 4 B 2 3
# 10 2 B 3 1
# 11 2 C 2 2
# 12 4 C 4 2
# 13 1 C 4 2
# 14 1 C 2 2
# 15 3 C 4 2
Data:
The above results are based on origional data provided by OP.
df <- data.frame(SID=sample(1:4,15,replace=T), Var1=c(rep("A",5),rep("B",5),rep("C",5)), Var2=sample(2:4,15,replace=T))
could not think of a dplyr solution, but here's one with apply
df$Count <- apply(df, 1, function(x) length(unique(df$SID[(df$Var1 != x['Var1']) & (df$Var2 == x['Var2'])])))
# SID Var1 Var2 Count
# 1 4 A 2 3
# 2 3 A 2 3
# 3 4 A 3 1
# 4 3 A 3 1
# 5 1 A 4 2
# 6 1 B 2 3
# 7 3 B 2 3
# 8 4 B 4 3
# 9 4 B 4 3
# 10 3 B 2 3
# 11 2 C 2 3
# 12 2 C 2 3
# 13 4 C 4 2
# 14 2 C 4 2
# 15 3 C 3 2
Here is a dplyr solution, as requested. For future reference, please use set.seed so we can reproduce your desired output with sample, else I have to enter data by hand...
I think this is your logic? You want the n_distinct(SID) for each Var2, but for each row, you want to exclude rows which have the same Var1 as the current row. So a key observation here is row 3, where a simple grouped summarise would yield a count of 2. Of the rows with Var2 = 3, row 3 has SID = 4, row 4 has SID = 3, row 15 has SID = 3, but we don't count row 3 or row 4, so final count is one unique SID.
Here we get first the count of unique SID for each Var2, then the count of unique SID for each Var1, Var2 combo. First count is too large by the amount of additional unique SID for each combo, so we subtract it and add one. There is an edge case where for a Var1, there is only one corresponding Var2. This should return 0 since you exclude all the possible values of SID. I added two rows to illustrate this.
library(tidyverse)
df <- read_table2(
"SID Var1 Var2
4 A 2
3 A 2
4 A 3
3 A 3
1 A 4
1 B 2
3 B 2
4 B 4
4 B 4
3 B 2
2 C 2
2 C 2
4 C 4
2 C 4
3 C 3
1 D 5
2 D 5"
)
df %>%
group_by(Var2) %>%
mutate(SID_per_Var2 = n_distinct(SID)) %>%
group_by(Var1, Var2) %>%
mutate(SID_per_Var1Var2 = n_distinct(SID)) %>%
ungroup() %>%
add_count(Var1) %>%
add_count(Var1, Var2) %>%
mutate(
Count.Excluding.Var1 = if_else(
n > nn,
SID_per_Var2 - SID_per_Var1Var2 + 1,
0
)
) %>%
select(SID, Var1, Var2, Count.Excluding.Var1)
#> # A tibble: 17 x 4
#> SID Var1 Var2 Count.Excluding.Var1
#> <int> <chr> <int> <dbl>
#> 1 4 A 2 3.
#> 2 3 A 2 3.
#> 3 4 A 3 1.
#> 4 3 A 3 1.
#> 5 1 A 4 3.
#> 6 1 B 2 3.
#> 7 3 B 2 3.
#> 8 4 B 4 3.
#> 9 4 B 4 3.
#> 10 3 B 2 3.
#> 11 2 C 2 4.
#> 12 2 C 2 4.
#> 13 4 C 4 2.
#> 14 2 C 4 2.
#> 15 3 C 3 2.
#> 16 1 D 5 0.
#> 17 2 D 5 0.
Created on 2018-04-12 by the reprex package (v0.2.0).
Here's a solution using purrr - you can wrap this in a mutate statement if you want, but I don't know that it adds much in this particular case.
library(purrr)
df$Count.Excluding.Var1 = map_int(1:nrow(df), function(n) {
df %>% filter(Var2 == Var2[n], Var1 != Var1[n]) %>% distinct() %>% nrow()
})
(Updated with input from comments by Calum You. Thanks!)
A 100% tidyverse solution:
library(tidyverse) # dplyr + purrr
df %>%
group_by(Var2) %>%
mutate(count = map_int(Var1,~n_distinct(SID[.x!=Var1],Var1[.x!=Var1])))
# # A tibble: 15 x 4
# # Groups: Var2 [3]
# SID Var1 Var2 count
# <int> <chr> <int> <int>
# 1 4 A 2 3
# 2 3 A 2 3
# 3 4 A 3 1
# 4 3 A 3 1
# 5 1 A 4 3
# 6 1 B 2 3
# 7 3 B 2 3
# 8 4 B 4 3
# 9 4 B 4 3
# 10 3 B 2 3
# 11 2 C 2 4
# 12 2 C 2 4
# 13 4 C 4 2
# 14 2 C 4 2
# 15 3 C 3 2

Rolling sum from a certain position in a data frame in R

Say I have the following data, dat1;
width from by
2 1 A
3 1 A
2 2 A
3 2 A
2 1 B
3 1 B
2 2 B
3 2 B
And additionally have that, dat2;
x pos by
4 1 A
5 2 A
7 3 A
3 4 A
2 1 B
4 2 B
3 3 B
5 4 B
Say I want to create a new column on dat1 of rolling sum values from dat2 where;
Our width of this rolling sum is equivalent to the width given in that row
Our starting position is equivalent to the from vector value in that row
We wish to do it for the A or Bth factor depending on which level is in the row
So far I have that we want
rollapply(x = dat2$x, width = dat1$width, FUN = sum, align = "left", data = dat2)
So I need to incorporate in the starting position and the factor level for that starting position.
So in this instance I want to get
width from by RS
2 1 A 9
3 1 A 16
2 2 A 12
3 2 A 15
etc
Any help would be greatly appreciated. Thanks
1) For each row i in dat1 the anonymous function subsets dat2 to the by value in dat1 and from that picks out the indicated rows of x and sums them:
transform(dat1, RS = sapply(1:nrow(dat1), function(i)
sum(subset(dat2, dat1$by[i] == by)[seq(from[i], length = width[i]), "x"])))
giving:
width from by RS
1 2 1 A 9
2 3 1 A 16
3 2 2 A 12
4 3 2 A 15
5 2 1 B 6
6 3 1 B 9
7 2 2 B 7
8 3 2 B 12
2) An alternative would be to calculate the start values and widths for the sequences to sum in dat2 and then apply that:
st <- match(dat1$by, dat2$by) + dat1$from - 1
w <- dat1$width
Sum <- function(st, w) sum(dat2[seq(st, length = w), "x"])
transform(dat1, RS = mapply(Sum, st, w))
giving:
width from by RS
1 2 1 A 9
2 3 1 A 16
3 2 2 A 12
4 3 2 A 15
5 2 1 B 6
6 3 1 B 9
7 2 2 B 7
8 3 2 B 12
Note
dat1 and dat2 in reproducible form are:
Lines1 <- "
width from by
2 1 A
3 1 A
2 2 A
3 2 A
2 1 B
3 1 B
2 2 B
3 2 B"
dat1 <- read.table(text = Lines1, header = TRUE)
Lines2 <- "
x pos by
4 1 A
5 2 A
7 3 A
3 4 A
2 1 B
4 2 B
3 3 B
5 4 B"
dat2 <- read.table(text = Lines2, header = TRUE)
Update
Fixed (1). Added (2).
Another option could be using dplyr and join. The approach would be join two dataframes by "by". Then apply filter to consider only those rows which pos is between from and from+width. Finally take sum of x column.
dat1 %>% inner_join(dat2, by = "by") %>%
filter(from <= pos & pos < (from + width) ) %>%
group_by(by, from, width ) %>%
summarise(RS = sum(x)) %>%
select(width, from, by, RS)
# A tibble: 8 x 4
# Groups: by, from [4]
# width from by RS
# <int> <int> <chr> <int>
# 1 2 1 A 9
# 2 3 1 A 16
# 3 2 2 A 12
# 4 3 2 A 15
# 5 2 1 B 6
# 6 3 1 B 9
# 7 2 2 B 7
# 8 3 2 B 12
data
dat1 <- read.table(text =
"width from by
2 1 A
3 1 A
2 2 A
3 2 A
2 1 B
3 1 B
2 2 B
3 2 B", header = TRUE, stringsAsFactors = FALSE)
dat2 <- read.table(text =
"x pos by
4 1 A
5 2 A
7 3 A
3 4 A
2 1 B
4 2 B
3 3 B
5 4 B", header = TRUE, stringsAsFactors = FALSE)

how to mutate a column with ID in group

how to mutate a column with ID in group
data.frame like:
a b c
1 a 1 1
2 a 1 2
3 a 2 3
4 b 1 4
5 b 2 5
6 b 3 6
group by a, flag start with 1, if b equals pre b,then flag=1 else flag+=1
a b c flag
1 a 1 1 1 <- group a start with 1
2 a 1 2 1 <-- in group a, 1(in row 2)=1(in row 1)
3 a 2 3 2 <- in group a, 2(in row 3)!=1(in row 2)
4 b 1 4 1 <- group b start with 1
5 b 2 5 2 <- in group b, 2(in row 5)!=1(in row 4)
6 b 3 6 3 <- in group b, 3(in row 6)!=2(in row 5)
i now using this:
for(i in 2:nrow(x)){
x[i, 'flag'] = ifelse(x[i, 'a']!=x[i-1,'a'], 1, ifelse(x[i, 'b']==x[i-1, 'b'], x[i-1, 'flag'], x[i-1,'flag']+1))
}
but it is inefficiency in large dataset
#
UPDATE
dense_rank in dplyr give me the answer
> x %>% group_by(a) %>% mutate(dense_rank(b))
Source: local data frame [10 x 4]
Groups: a
a b c dense_rank(b)
1 a x 1 1
2 a x 2 1
3 a y 3 2
4 b x 4 1
5 b y 5 2
6 b z 6 3
7 c x 7 1
8 c y 8 2
9 c z 9 3
10 c z 10 3
thanks.
I am not entirely sure what you are trying to do. But it seems to me that you are trying to assign index numbers to values in b for each group (a or b).
#I modified your example here.
a <- rep(c("a","b"), each =3)
b <- c(4,4,5,11,12,13)
c <- 1:6
foo <- data.frame(a,b,c, stringsAsFactors = F)
a b c
1 a 4 1
2 a 4 2
3 a 5 3
4 b 11 4
5 b 12 5
6 b 13 6
#Since you referred to dplyr, I will use it.
cats <- list()
for(i in unique(foo$a)){
ana <- foo %>%
filter(a == i) %>%
arrange(b) %>%
mutate(indexInb = as.integer(as.factor(b)))
cats[[i]] <- ana
}
bob <- rbindlist(cats)
a b c indexInb
1: a 4 1 1
2: a 4 2 1
3: a 5 3 2
4: b 11 4 1
5: b 12 5 2
6: b 13 6 3
Hers's a quick vectorized way to solve this without using any for loops
Base R solution using ave and transform
transform(x, flag = ave(b, a, FUN = function(x) cumsum(c(1, diff(x)))))
# a b c flag
# 1 a 1 1 1
# 2 a 1 2 1
# 3 a 2 3 2
# 4 b 1 4 1
# 5 b 2 5 2
# 6 b 3 6 3
Or a data.table solution (more efficient)
library(data.table)
setDT(x)[, flag := cumsum(c(1, diff(b))), by = a]
x
# a b c flag
# 1: a 1 1 1
# 2: a 1 2 1
# 3: a 2 3 2
# 4: b 1 4 1
# 5: b 2 5 2
# 6: b 3 6 3
Or a dplyr solution (because you tagged it)
library(dplyr)
x %>%
group_by(a) %>%
mutate(flag = cumsum(c(1, diff(b))))
# Source: local data frame [6 x 4]
# Groups: a
#
# a b c flag
# 1 a 1 1 1
# 2 a 1 2 1
# 3 a 2 3 2
# 4 b 1 4 1
# 5 b 2 5 2
# 6 b 3 6 3

Resources