Consecutive value after and new level of factor in R - r

I have the following sample
id <- c("a","b","a","b","a","a","a","a","b","b","c")
SOG <- c(4,4,0,0,0,0,0,0,0,0,9)
data <- data.frame(id,SOG)
I would like in a new column the cumulative value when SOG == 0.
with the following code
tmp <- rle(SOG) #run length encoding:
tmp$values <- tmp$values == 0 #turn values into logicals
tmp$values[tmp$values] <- cumsum(tmp$values[tmp$values]) #cumulative sum of TRUE values
inverse.rle(tmp) #inverse the run length encoding
I create the column "stop":
data$Stops <- inverse.rle(tmp)
and I can get in it:
[1] 0 0 1 1 1 1 1 1 1 1 0
But I would like to have instead
[1] 0 0 1 2 3 3 3 3 4 4 0
I mean that when the level of the factor "id" is different from the previous row, I want to jump to the next "stop" (i+1).

have a look a the dplyr package
library(dplyr)
data %>%
mutate(
Stops = ifelse(
SOG > 0,
0,
cumsum(SOG == 0 & lag(id) != id)
)
)

We can try
library(data.table)
setDT(data1)[, v1 := if(all(!SOG)) c(TRUE, id[-1]!= id[-.N]) else
rep(FALSE, .N), .(grp = rleid(SOG))][,cumsum(v1)*(!SOG)]
#[1] 0 0 1 2 3 3 3 3 4 4 0 0 0 0 5 5 0 6 6 0
Using the old data
setDT(data)[, v1 := if(all(!SOG)) c(TRUE, id[-1]!= id[-.N])
else rep(FALSE, .N), .(grp = rleid(SOG))][,cumsum(v1)*(!SOG)]
#[1] 0 0 1 2 3 3 3 3 4 4 0
data
id <- c("a","b","a","b","a","a","a","a","b","b","c","a","a","a","a","a","a","a","a", "a")
SOG <- c(4,4,0,0,0,0,0,0,0,0,9,1,5,3,0,0,4,0,0,1)
data1 <- data.frame(id, SOG, stringsAsFactors=FALSE)

Related

find peaks and valleys for each column of a dataframe

I have a dataframe df, I would like to find peaks and valleys for each column and then replace the points where peaks and valleys are present with the value 1.
Here I made an example by applying it to only one column.
Is it possible to do this for all the columns in the dataframe?
df <- data.frame(a = sample(1:10,10),
b = sample(1:10,10),
c = sample(1:10,10),
d = sample(1:10,10),
e = sample(1:10,10))
vallys<- findValleys(df$b, thresh =0)
peaks <- findPeaks(df$b, thresh = 0)
df$b <- rep(0, nrow(df))
df$b <- replace(df$b, peaks, values=1)
df$b <- replace(df$b, vallys, values=1)
Thank you
The easiest thing is to put your code into a function.
library(quantmod)
replace_peaks_valleys <- function(x) {
valleys <- findValleys(x, thresh = 0)
peaks <- findPeaks(x, thresh = 0)
new_col <- rep(0, length(x))
new_col <- replace(new_col, peaks, values = 1)
new_col <- replace(new_col, valleys, values = 1)
return(new_col)
}
Then you can choose whether to do it in base R, dplyr or data.table.
base R
As you want to assign back to your original data frame, in base R you can do (note the square brackets or it will return a list):
df[] <- lapply(df, replace_peaks_valleys)
head(df)
# a b c d e
# 1 0 0 0 0 0
# 2 0 0 0 0 0
# 3 1 1 1 1 1
# 4 1 0 1 1 0
# 5 1 1 0 1 0
# 6 0 1 1 1 1
dplyr
Alternatively, with dplyr you can just do:
library(dplyr)
df |>
mutate(
across(
a:e, replace_peaks_valleys
)
)
# a b c d e
# 1 0 0 0 0 0
# 2 0 0 0 0 0
# 3 1 1 1 1 1
# 4 1 0 1 1 0
# <etc>
data.table
You can also do this with data.table:
library(data.table)
dt <- setDT(df)
dt[, lapply(.SD, replace_peaks_valleys)]
# a b c d e
# 1: 0 0 0 0 0
# 2: 0 0 0 0 0
# 3: 1 0 1 1 1
# 4: 1 1 0 0 0
# <etc>
N.B. I used set.seed(1) before I ran your code - if you do this as well you should exactly the same output.
Function definition
I just copied and pasted your code and made it into a function. You could change it so you assign 0 or 1 to the existing vector, rather than creating a new vector every time:
replace_peaks_valleys2 <- function(x) {
valleys <- findValleys(x, thresh = 0)
peaks <- findPeaks(x, thresh = 0)
x[] <- 0
x[c(peaks,valleys)] <- 1
return(x)
}

Removing a group with conditional statement in r

The conditional statement is that in any event, if there are two or more consecutive rows with values higher than 1, the group should be deleted.
For example:
Event<- c(1,1,1,1,2,2,2,2,2,2,3,3,3,3,3)
Value<- c(1,0,0,0,8,7,1,0,0,0,8,0,0,0,0)
A<- data.frame(Event, Value)
Event Value
1 1
1 0
1 0
1 0
2 8
2 7
2 1
2 0
2 0
2 0
3 8
3 0
3 0
3 0
3 0
In this example the group of event 2 should be deleted because it has two consecutive rows with values higher than 1. So it should looks like:
Event Value
1 1
1 0
1 0
1 0
3 8
3 0
3 0
3 0
3 0
Any suggestion?
We can use rle by groups.
library(dplyr)
A %>%
group_by(Event) %>%
filter(!any(with(rle(Value > 1), lengths[values] > 1)))
#Opposite way using all
#filter(all(with(rle(Value > 1), lengths[values] < 2)))
# Event Value
# <dbl> <dbl>
#1 1 1
#2 1 0
#3 1 0
#4 1 0
#5 3 8
#6 3 0
#7 3 0
#8 3 0
#9 3 0
The same logic can be used in base R :
subset(A, !ave(Value > 1, Event, FUN = function(x)
any(with(rle(x), lengths[values] > 1))))
as well as data.table
library(data.table)
setDT(A)[, .SD[!any(with(rle(Value > 1), lengths[values] > 1))], Event]
Using dplyr
A %>%
group_by(Event) %>%
mutate(consec = if_else(Value > 1, row_number(), 0L),
remove = if_else(consec > 1,"Y","N")) %>%
filter(!any(remove == "Y")) %>%
select(-c("consec","remove"))
A base R approach:
# split the dataframe by event into separate lists, record whether values are > 1 (T/F)
A_split <- split(A$Value > 1, Event)
# for each item in the list, record the number of consecutive T values;
# make T/F vector "keep" with row names corresponding to A$Event
keep <- sapply(A_split, function(x) sum(x[1:length(x) - 1] * x[2:length(x)])) == 0
# convert keep to numeric vector of A$Event values
keep <- as.numeric(names(keep == T))
# subset A based on keep vector
A[A$Event %in% keep, ]

R data.table subsetting by column value

I am wondering in data.table what is the most efficient or cleanest way to select rows based on the occurrence of some column values.
For example, in a 7 column data table, with each value being either 1 or 0, I want all rows where there are exactly 2 values of 1 and 5 values of 0 (1 representing "presence" and 0 "absence").
So far, here is what I am doing, assuming the following data.table (much bigger, here is just a sample of it)
name D2A1.var D2B3.var D3A1.var D4A3.var D5B3.var H2A3.var H4A4.var MA_ancestor.var
Chrom_1;10000034;G;A Chrom_1;10000034;G;A 1 1 1 1 1 1 1 1
Chrom_1;10000035;G;A Chrom_1;10000035;G;A 1 1 1 1 1 1 1 1
Chrom_1;10000042;C;A Chrom_1;10000042;C;A 1 1 1 1 1 1 1 1
Chrom_1;10000051;A;G Chrom_1;10000051;A;G 1 1 1 1 1 1 1 1
Chrom_1;10000070;G;A Chrom_1;10000070;G;A 1 1 1 1 1 1 1 1
Chrom_1;10000084;C;T Chrom_1;10000084;C;T 1 1 1 1 1 1 1 1
Chrom_6;9997224;AT;A Chrom_6;9997224;AT;A 0 0 0 0 0 1 0 1
Chrom_6;9998654;GTGTGTGTT;G Chrom_6;9998654;GTGTGTGTT;G 0 0 0 0 0 0 0 1
Chrom_6;9999553;TTTC;T Chrom_6;9999553;TTTC;T 0 0 0 0 0 0 0 1
and if I want all rows where I have 7 1 and let's say only 1 in D2A1.var and D3A1.var I am doing the following
ALL = DT[DT$MA_ancestor.var == 1 & DT$D2A1.var == 1 &DT$D2B3.var == 1 & DT$D3A1.var == 1 & DT$D4A3.var == 1 &DT$D5B3.var == 1 & DT$H2A3.var == 1 & DT$H4A4.var == 1,]
TWO = DT[DT$MA_ancestor.var == 0 & DT$D2A1.var == 1 &DT$D2B3.var == 0 & DT$D3A1.var == 1 & DT$D4A3.var == 0 &DT$D5B3.var == 0 & DT$H2A3.var == 0 & DT$H4A4.var == 0,]
DFlist=list(TWO, ALL)
DFlong = rbindlist(DFlist, use.names = TRUE, idcol = FALSE)
This returns the expected result and is fast enough. However when having multiple conditions it's a lot of typing and a lot of data.table creations. Is there a faster, cleaner and more compact way of achieving this?
We can make use of the .SDcols by specifying the columns of interest. Loop through the Subset of Data.table (.SD) create a list of logical vector and Reduce it to a single logical vector with &
ALL <- DT[, Reduce(`&`, lapply(.SD, `==`, 1), .SDcols = nm1]
TWO <- DT[, Reduce(`&`, lapply(.SD, `==`, 0), .SDcols = nm1]
where
nm1 <- names(DT)[-1] #or change the names accordingly
Another option using setkey:
setkeyv(DT, names(DT))
#create desired filtering conditions as lists
cond1 <- setNames(as.list(rep(1, ncol(DT))), names(DT))
cond2 <- list(MA_ancestor.var=0, D2A1.var=1, D2B3.var=0, D3A1.var=1, D4A3.var=0, D5B3.var=0, H2A3.var=0, H4A4.var=0)
#get list of conditions so that one does not have to type it one by one
scond <- grep("^cond", ls(), value=TRUE)
DT[rbindlist(mget(scond, envir=.GlobalEnv), use.names=TRUE)]
If you are worried about picking up spurious variable starting with cond, you can assign them to an environment using list2env and pass the envir into mget.
data:
DT <- fread("D2A1.var D2B3.var D3A1.var D4A3.var D5B3.var H2A3.var H4A4.var MA_ancestor.var
1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1
0 0 0 0 0 1 0 1
0 0 0 0 0 0 0 1
0 0 0 0 0 0 0 1")
Is there a faster, cleaner and more compact way of achieving this?
Doing separate queries and rbinding as you do is probably simplest.
You can simplify each query by using replace and join syntax:
# make a list of columns initially set to value 0
vec0 = lapply(DT[, .SD, .SDcols=D2A1.var:MA_ancestor.var], function(x) 0)
# helper function for semi join
subit = function(x, d = DT) d[x, on=names(x), nomatch=0]
rbind(
subit(replace(vec0, names(vec0), 1)),
subit(replace(vec0, c("D2A1.var", "D3A1.var"), 1))
)
(This code is not tested since OP's data is not easily reproducible.)
You could probably simplify further like...
subitall = function(..., d = DT, v0 = vec0)
rbindlist(lapply(..., function(x) subit( replace(v0, names(v0), 1), d = d )))
subitall( names(vec0), c("D2A1.var", "D3A1.var") )
Regarding the function subit for subsetting / semi-join, you could modify it to meet your needs based on answers in Perform a semi-join with data.table
EDIT: Oh right, following #chinsoon's answer, you could also rbind first:
subit(rbindlist(list(
replace(vec0, names(vec0), 1),
replace(vec0, c("D2A1.var", "D3A1.var"), 1)
)))
This would mean joining only once, which is simpler.

Find consecutive values in dataframe

I have a dataframe. I wish to detect consecutive numbers and populate a new column as 1 or 0.
ID Val
1 a 8
2 a 7
3 a 5
4 a 4
5 a 3
6 a 1
Expected output
ID Val outP
1 a 8 0
2 a 7 1
3 a 5 0
4 a 4 1
5 a 3 1
6 a 1 0
You could do this with the diff function in combination with abs and see whether the outcome is 1 or another value:
d$outP <- c(0, abs(diff(d$Val)) == 1)
which gives:
> d
ID Val outP
1 a 8 0
2 a 7 1
3 a 5 0
4 a 4 1
5 a 3 1
6 a 1 0
If you only want to take decreasing consecutive values into account, you can use:
c(0, diff(d$Val) == -1)
When you want to do this for each ID, you can also do this in base R or with dplyr:
# base R
d$outP <- ave(d$Val, d$ID, FUN = function(x) c(0, abs(diff(x)) == 1))
# dplyr
library(dplyr)
d %>%
group_by(ID) %>%
mutate(outP = c(0, abs(diff(Val)) == 1))
We can also a faster option by comparing the previous value with current
with(df1, as.integer(c(FALSE, Val[-length(Val)] - Val[-1]) ==1))
#[1] 0 1 0 1 1 0
If we need to group by "ID", one option is data.table
library(data.table)
setDT(df1)[, outP := as.integer((shift(Val, fill =Val[1]) - Val)==1) , by = ID]

Aggregating every 10 columns in binary matrice

I am new to R.
I would like to transform a binary matrix like this:
example:
" 1874 1875 1876 1877 1878 .... 2009
F 1 0 0 0 0 ... 0
E 1 1 0 0 0 ... 0
D 1 1 0 0 0 ... 0
C 1 1 0 0 0 ... 0
B 1 1 0 0 0 ... 0
A 1 1 0 0 0 ... 0"
Since, columns names are years I would like to aggregate them in decades and obtain something like:
"1840-1849 1850-1859 1860-1869 .... 2000-2009
F 1 0 0 0 0 ... 0
E 1 1 0 0 0 ... 0
D 1 1 0 0 0 ... 0
C 1 1 0 0 0 ... 0
B 1 1 0 0 0 ... 0
A 1 1 0 0 0 ... 0"
I am used to python and do not know how to do this transformation without making loops!
Thanks, isabel
It is unclear what aggregation you want, but using the following dummy data
set.seed(42)
df <- data.frame(matrix(sample(0:1, 6*25, replace = TRUE), ncol = 25))
names(df) <- 1874 + 0:24
The following counts events in each 10-year period.
Get the years as a numeric variable
years <- as.numeric(names(df))
Next we need an indicator for the start of each decade
ind <- seq(from = signif(years[1], 3), to = signif(tail(years, 1), 3), by = 10)
We then apply over the indices of ind (1:(length(ind)-1)), select columns from df that are the current decade and count the 1s using rowSums.
tmp <- lapply(seq_along(ind[-1]),
function(i, inds, data) {
rowSums(data[, names(data) %in% inds[i]:(inds[i+1]-1)])
}, inds = ind, data = df)
Next we cbind the resulting vectors into a data frame and fix-up the column names:
out <- do.call(cbind.data.frame, tmp)
names(out) <- paste(head(ind, -1), tail(ind, -1) - 1, sep = "-")
out
This gives:
> out
1870-1879 1880-1889 1890-1899
1 4 5 6
2 4 6 6
3 2 5 5
4 5 5 7
5 3 3 7
6 5 5 4
If you want simply a binary matrix with a 1 indicating at least 1 event happened in that decade, then you can use:
tmp2 <- lapply(seq_along(ind[-1]),
function(i, inds, data) {
as.numeric(rowSums(data[, names(data) %in% inds[i]:(inds[i+1]-1)]) > 0)
}, inds = ind, data = df)
out2 <- do.call(cbind.data.frame, tmp2)
names(out2) <- paste(head(ind, -1), tail(ind, -1) - 1, sep = "-")
out2
which gives:
> out2
1870-1879 1880-1889 1890-1899
1 1 1 1
2 1 1 1
3 1 1 1
4 1 1 1
5 1 1 1
6 1 1 1
If you want a different aggregation, then modify the function applied in the lapply call to use something other than rowSums.
This is another option, using modular arithmetic to aggregate the columns.
# setup, borrowed from #GavinSimpson
set.seed(42)
df <- data.frame(matrix(sample(0:1, 6*25, replace = TRUE), ncol = 25))
names(df) <- 1874 + 0:24
result <- do.call(cbind,
by(t(df), as.numeric(names(df)) %/% 10 * 10, colSums))
# add -xxx9 to column names, for each decade
dimnames(result)[[2]] <- paste(colnames(result), as.numeric(colnames(result)) + 9, sep='-')
# 1870-1879 1880-1889 1890-1899
# V1 4 5 6
# V2 4 6 6
# V3 2 5 5
# V4 5 5 7
# V5 3 3 7
# V6 5 5 4
If you wanted to aggregate with something other than sum, replace the call to
colSums with something like function(cols) lapply(cols, f), where f is the aggregating
function, e.g., max.

Resources