Create a sequential number within each group - r

I need to create a sequence of numbers based on another column
I have this data frame:
head(df)
id date lc lon lat gap_days gap
1 20162.03 2003-10-19 14:33:00 Tagging -39.370 -18.480 NA <NA>
2 20162.03 2003-10-21 12:19:00 1 -38.517 -18.253 1.90694444 gap
3 20162.03 2003-10-21 13:33:00 1 -38.464 -18.302 0.05138889 no
4 20162.03 2003-10-21 16:38:00 A -38.461 -18.425 0.12847222 no
5 20162.03 2003-10-21 18:50:00 A -38.322 -18.512 0.09166667 no
6 20162.03 2003-10-23 10:33:00 B -38.674 -19.824 1.65486111 gap
I indicated the gaps of more than one day in column "gap", basead on the column gap_days.
Now, I need to split my data. Each sequence of gaps it will be a new individual frame.
So, if I have the ID 20162.03, and this id have one or more gaps, this sequence will be split according the number of gaps.
For this, I will use the package move and the fuctions burst" and split
But, for this I need to create a new column with a sequence of numbers indicate the new separations of ids, as (seq column):
id date lc lon lat gap_days gap seq
1 20162.03 2003-10-19 14:33:00 Tagging -39.370 -18.480 NA <NA> 1
2 20162.03 2003-10-21 12:19:00 1 -38.517 -18.253 1.90694444 gap 1
3 20162.03 2003-10-21 13:33:00 1 -38.464 -18.302 0.05138889 no 1
4 20162.03 2003-10-21 16:38:00 A -38.461 -18.425 0.12847222 no 1
5 20162.03 2003-10-21 18:50:00 A -38.322 -18.512 0.09166667 no 1
6 20162.03 2003-10-23 10:33:00 B -38.674 -19.824 1.65486111 gap 2
7 20162.03 2003-10-23 17:52:00 B -38.957 -19.511 0.30486111 no 2
8 20162.03 2003-11-02 08:14:00 B -42.084 -24.071 9.59861111 gap 3
9 20162.03 2003-11-02 09:36:00 A -41.999 -24.114 0.05694444 no 3
10 20687.03 2003-10-27 17:02:00 Tagging -39.320 -18.460 NA <NA> 4
11 20687.03 2003-10-27 19:44:00 2 -39.306 -18.454 0.11250000 no 4
12 20687.03 2003-10-27 21:05:00 1 -39.301 -18.458 0.05625000 no 4
But, as can you see I have a sequencie of "gaps" and "no", but also NA's.
I can't find a solution.
does anyone have a solution?
EDIT:
structure(list(id = c("20162.03", "20162.03", "20162.03", "20162.03",
"20162.03", "20162.03", "20162.03", "20162.03", "20162.03", "20687.03",
"20687.03", "20687.03"), date = structure(c(1066573980, 1066738740,
1066743180, 1066754280, 1066762200, 1066905180, 1066931520, 1067760840,
1067765760, 1067274120, 1067283840, 1067288700), class = c("POSIXct",
"POSIXt"), tzone = "GMT"), lc = structure(c(4L, 1L, 1L, 2L, 2L,
3L, 3L, 3L, 2L, 4L, 6L, 1L), .Label = c("1", "A", "B", "Tagging",
"0", "2", "3", "N", "P", "Z"), class = "factor"), lon = c(-39.37,
-38.517, -38.464, -38.461, -38.322, -38.674, -38.957, -42.084,
-41.999, -39.32, -39.306, -39.301), lat = c(-18.48, -18.253,
-18.302, -18.425, -18.512, -19.824, -19.511, -24.071, -24.114,
-18.46, -18.454, -18.458), gap_days = c(NA, 1.90694444444444,
0.0513888888888889, 0.128472222222222, 0.0916666666666667, 1.65486111111111,
0.304861111111111, 9.59861111111111, 0.0569444444444444, NA,
0.1125, 0.05625), gap = c(NA, "gap", "no", "no", "no", "gap",
"no", "gap", "no", NA, "no", "no")), row.names = c(NA, 12L), class = "data.frame")

A simple solution with Base R:
df$seq <- ave(sapply(df$gap, identical, "gap"), df$id, FUN = cumsum)
df
#> id date lc lon lat gap_days gap seq
#> 1 20162.03 2003-10-19 14:33:00 Tagging -39.370 -18.480 NA <NA> 0
#> 2 20162.03 2003-10-21 12:19:00 1 -38.517 -18.253 1.90694444 gap 1
#> 3 20162.03 2003-10-21 13:33:00 1 -38.464 -18.302 0.05138889 no 1
#> 4 20162.03 2003-10-21 16:38:00 A -38.461 -18.425 0.12847222 no 1
#> 5 20162.03 2003-10-21 18:50:00 A -38.322 -18.512 0.09166667 no 1
#> 6 20162.03 2003-10-23 10:33:00 B -38.674 -19.824 1.65486111 gap 2
#> 7 20162.03 2003-10-23 17:52:00 B -38.957 -19.511 0.30486111 no 2
#> 8 20162.03 2003-11-02 08:14:00 B -42.084 -24.071 9.59861111 gap 3
#> 9 20162.03 2003-11-02 09:36:00 A -41.999 -24.114 0.05694444 no 3
#> 10 20687.03 2003-10-27 17:02:00 Tagging -39.320 -18.460 NA <NA> 0
#> 11 20687.03 2003-10-27 19:44:00 2 -39.306 -18.454 0.11250000 no 0
#> 12 20687.03 2003-10-27 21:05:00 1 -39.301 -18.458 0.05625000 no 0
And then split it:
split(df, list(df$id, df$seq), drop = TRUE)
#> $`20162.03.0`
#> id date lc lon lat gap_days gap seq
#> 1 20162.03 2003-10-19 14:33:00 Tagging -39.37 -18.48 NA <NA> 0
#>
#> $`20687.03.0`
#> id date lc lon lat gap_days gap seq
#> 10 20687.03 2003-10-27 17:02:00 Tagging -39.320 -18.460 NA <NA> 0
#> 11 20687.03 2003-10-27 19:44:00 2 -39.306 -18.454 0.11250 no 0
#> 12 20687.03 2003-10-27 21:05:00 1 -39.301 -18.458 0.05625 no 0
#>
#> $`20162.03.1`
#> id date lc lon lat gap_days gap seq
#> 2 20162.03 2003-10-21 12:19:00 1 -38.517 -18.253 1.90694444 gap 1
#> 3 20162.03 2003-10-21 13:33:00 1 -38.464 -18.302 0.05138889 no 1
#> 4 20162.03 2003-10-21 16:38:00 A -38.461 -18.425 0.12847222 no 1
#> 5 20162.03 2003-10-21 18:50:00 A -38.322 -18.512 0.09166667 no 1
#>
#> $`20162.03.2`
#> id date lc lon lat gap_days gap seq
#> 6 20162.03 2003-10-23 10:33:00 B -38.674 -19.824 1.6548611 gap 2
#> 7 20162.03 2003-10-23 17:52:00 B -38.957 -19.511 0.3048611 no 2
#>
#> $`20162.03.3`
#> id date lc lon lat gap_days gap seq
#> 8 20162.03 2003-11-02 08:14:00 B -42.084 -24.071 9.59861111 gap 3
#> 9 20162.03 2003-11-02 09:36:00 A -41.999 -24.114 0.05694444 no 3

Related

Using lag function to find the last value for a specific individual

I'm trying to create a column in my spreadsheet that takes the last recorded value (IC) for a specific individual (by the Datetime column) and populates it into a column (LIC) for the current event.
A sub-sample of my data looks like this (actual dataset has 4949 rows and 37 individuals):
> head(ACdatas.scale)
Date Datetime ID.2 IC LIC
1 2019-05-25 2019-05-25 11:57 139 High NA
2 2019-06-09 2019-06-09 19:42 139 Low NA
3 2019-07-05 2019-07-05 20:12 139 Medium NA
4 2019-07-27 2019-07-27 17:27 152 Low NA
5 2019-08-04 2019-08-04 9:13 152 Medium NA
6 2019-08-04 2019-08-04 16:18 139 Medium NA
I would like to be able to populate the last value from the IC column into the current LIC column for the current event (see below)
> head(ACdatas.scale)
Date Datetime ID.2 IC LIC
1 2019-05-25 2019-05-25 11:57 139 High NA
2 2019-06-09 2019-06-09 19:42 139 Low High
3 2019-07-05 2019-07-05 20:12 139 Medium Low
4 2019-07-27 2019-07-27 17:27 152 Low NA
5 2019-08-04 2019-08-04 9:13 152 Medium Low
6 2019-08-04 2019-08-04 16:18 139 Medium Medium
I've tried the following code:
ACdatas.scale <- ACdatas.scale %>%
arrange(ID.2, Datetime) %>%
group_by(ID.2) %>%
mutate(LIC= lag(IC))
This worked some of the time, but when I checked back through the data, it seemed to have a problem when the date switched, so it could accurately populate the field within the same day, but not when the previous event was on the previous day. Just to make it super confusing, it only had issues with some of the day switches, and not all! Help please!!
Sample data,
dat <- data.frame(id=c(rep("A",5),rep("B",5)), IC=c(1:5,11:15))
dplyr
library(dplyr)
dat %>%
group_by(id) %>%
mutate(LIC = lag(IC)) %>%
ungroup()
# # A tibble: 10 x 3
# id IC LIC
# <chr> <int> <int>
# 1 A 1 NA
# 2 A 2 1
# 3 A 3 2
# 4 A 4 3
# 5 A 5 4
# 6 B 11 NA
# 7 B 12 11
# 8 B 13 12
# 9 B 14 13
# 10 B 15 14
data.table
library(data.table)
as.data.table(dat)[, LIC := shift(IC, type = "lag"), by = .(id)][]
# id IC LIC
# <char> <int> <int>
# 1: A 1 NA
# 2: A 2 1
# 3: A 3 2
# 4: A 4 3
# 5: A 5 4
# 6: B 11 NA
# 7: B 12 11
# 8: B 13 12
# 9: B 14 13
# 10: B 15 14
base R
dat$LIC <- ave(dat$IC, dat$id, FUN = function(z) c(NA, z[-length(z)]))
dat
# id IC LIC
# 1 A 1 NA
# 2 A 2 1
# 3 A 3 2
# 4 A 4 3
# 5 A 5 4
# 6 B 11 NA
# 7 B 12 11
# 8 B 13 12
# 9 B 14 13
# 10 B 15 14
By using your data:
mydat <- structure(list(Date = structure(c(18041, 18056, 18082,
18104, 18112, 18112),
class = "Date"),
Datetime = structure(c(1558760220,1560084120,
1562332320, 1564223220,
1564884780, 1564910280),
class = c("POSIXct","POSIXt"),
tzone = ""),
ID.2 = c(139, 139, 139, 152, 152, 139),
IC = c("High", "Low", "Medium", "Low", "Medium", "Medium"),
LIC = c(NA, NA, NA, NA, NA, NA)), row.names = c(NA, -6L),
class = "data.frame")
mydat %>% arrange(Datetime) %>% group_by(ID.2) %>% mutate(LIC = lag(IC))
# A tibble: 6 x 5
# Groups: ID.2 [2]
Date Datetime ID.2 IC LIC
<date> <dttm> <dbl> <chr> <chr>
1 2019-05-25 2019-05-25 11:57:00 139 High NA
2 2019-06-09 2019-06-09 19:42:00 139 Low High
3 2019-07-05 2019-07-05 20:12:00 139 Medium Low
4 2019-07-27 2019-07-27 17:27:00 152 Low NA
5 2019-08-04 2019-08-04 09:13:00 152 Medium Low
6 2019-08-04 2019-08-04 16:18:00 139 Medium Medium

R: Calculate percentage of missing Values (NA) per day for a Column in a data frame using panel data and remove the days with missing data of over 25%

I have a huge panel data set with daily data. I would like to remove all days for which I have missing data of more than 25% of the observations in the column "Size".
I created the following data to show how my real data looks like:
structure(list(Product = c("A", "A", "A", "A", "A", "A", "A",
"A", "A", "B", "B", "B", "B", "B", "B", "B", "B", "B", "C", "C",
"C", "C", "C", "C", "C", "C", "C"), Date = c("01.09.2018", "02.09.2018",
"03.09.2018", "04.09.2018", "05.09.2018", "11.11.2020", "12.11.2020",
"13.11.2020", "14.11.2020", "01.09.2018", "02.09.2018", "03.09.2018",
"04.09.2018", "05.09.2018", "11.11.2020", "12.11.2020", "13.11.2020",
"14.11.2020", "01.09.2018", "02.09.2018", "03.09.2018", "04.09.2018",
"05.09.2018", "11.11.2020", "12.11.2020", "13.11.2020", "14.11.2020"
), Size = c(10L, 9L, NA, 3L, 4L, 5L, 3L, NA, 6L, 7L, 4L, NA,
4L, 6L, 6L, 4L, 6L, 7L, 3L, 4L, NA, 2L, 4L, NA, 7L, 7L, 5L)), class = "data.frame", row.names = c(NA,
-27L))
I already tried the following but I got stock on how to continue with the code:
Data %>% summarize(group_by(Date), NoData=(is.na(Size))
Then I got the error that I cannot use group_by for an object of the class "Date". Further, I don't know how I can automatically remove the days where I have more than 25% of missing values in the column "Size".
Could anyone help me here with the code that works for my problem?
I appreciate your help.
If you summarize(), you lose lots of information on the individual days. Furthermore, use group_by() before further dplyr verbs. You can calculate the percentage of NA by dividing the sum of NA by the sum of days. as_tibble() is only used to better show the number of rows, it would work without it too. I added a column "CountDate" so that you know how many times the same day appears in your dataframe.
Data %>% as_tibble() %>%
group_by(Date) %>%
mutate(CountDate = n(), PercNA = sum(is.na(Size))/n()*100)
# A tibble: 27 x 5
# Groups: Date [9]
Product Date Size CountDate PercNA
<chr> <chr> <int> <int> <dbl>
1 A 01.09.2018 10 3 0
2 A 02.09.2018 9 3 0
3 A 03.09.2018 NA 3 100
4 A 04.09.2018 3 3 0
5 A 05.09.2018 4 3 0
6 A 11.11.2020 5 3 33.3
7 A 12.11.2020 3 3 0
8 A 13.11.2020 NA 3 33.3
9 A 14.11.2020 6 3 0
10 B 01.09.2018 7 3 0
# ... with 17 more rows
To remove the dates having >25% NA, just filter():
Data %>% as_tibble() %>%
group_by(Date) %>%
mutate(CountDate = n(), PercNA = sum(is.na(Size))/n()*100) %>%
filter(PercNA <25) %>%
ungroup()
# A tibble: 18 x 5
Product Date Size CountDate PercNA
<chr> <chr> <int> <int> <dbl>
1 A 01.09.2018 10 3 0
2 A 02.09.2018 9 3 0
3 A 04.09.2018 3 3 0
4 A 05.09.2018 4 3 0
5 A 12.11.2020 3 3 0
6 A 14.11.2020 6 3 0
7 B 01.09.2018 7 3 0
8 B 02.09.2018 4 3 0
9 B 04.09.2018 4 3 0
10 B 05.09.2018 6 3 0
11 B 12.11.2020 4 3 0
12 B 14.11.2020 7 3 0
13 C 01.09.2018 3 3 0
14 C 02.09.2018 4 3 0
15 C 04.09.2018 2 3 0
16 C 05.09.2018 4 3 0
17 C 12.11.2020 7 3 0
18 C 14.11.2020 5 3 0
#Gnueghoidune's answer is a very good one. I just want to add that one can write this directly inside filter(), and thus avoid the use of summarize() and mutate().
Under is an example (I'm using mean(is.na(x)) instead of sum(is.na(x))/n()):
library(dplyr)
Data %>%
group_by(Date) %>%
filter(mean(is.na(Size)) <= 0.25)
#> # A tibble: 18 × 3
#> # Groups: Date [6]
#> Product Date Size
#> <chr> <chr> <int>
#> 1 A 01.09.2018 10
#> 2 A 02.09.2018 9
#> 3 A 04.09.2018 3
#> 4 A 05.09.2018 4
#> 5 A 12.11.2020 3
#> 6 A 14.11.2020 6
#> 7 B 01.09.2018 7
#> 8 B 02.09.2018 4
#> 9 B 04.09.2018 4
#> 10 B 05.09.2018 6
#> 11 B 12.11.2020 4
#> 12 B 14.11.2020 7
#> 13 C 01.09.2018 3
#> 14 C 02.09.2018 4
#> 15 C 04.09.2018 2
#> 16 C 05.09.2018 4
#> 17 C 12.11.2020 7
#> 18 C 14.11.2020 5
Created on 2022-04-15 by the reprex package (v2.0.1)

Binned physiological time series data in R: calculate duration spent in each bin

I have a dataset containing changes in mean arterial blood pressure (MAP) over time from multiple participants. Here is an example dataframe:
df=structure(list(ID = c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L,
2L, 2L, 2L, 2L, 2L), Time = structure(1:14, .Label = c("11:02:00",
"11:03:00", "11:04:00", "11:05:00", "11:06:00", "11:07:00", "11:08:00",
"13:30:00", "13:31:00", "13:32:00", "13:33:00", "13:34:00", "13:35:00",
"13:36:00"), class = "factor"), MAP = c(90.27999878, 84.25, 74.81999969,
80.87000275, 99.38999939, 81.51000214, 71.51000214, 90.08999634,
88.75, 84.72000122, 83.86000061, 94.18000031, 98.54000092, 51
)), class = "data.frame", row.names = c(NA, -14L))
I have binned the data into groups: e.g. MAP 40-60, 60-80, 80-100 and added a unique flag (1, 2 or 3) in an additional column map_bin. This is my code so far:
library(dplyr)
#Mean Arterial Pressure
#Bin 1=40-60; Bin 2=60-80; Bin 3=80-100
map_bin=c("1","2","3")
output <- as_tibble(df) %>%
mutate(map_bin = case_when(
MAP >= 40 & MAP < 60 ~ map_bin[1],
MAP >= 60 & MAP < 80 ~ map_bin[2],
MAP >= 80 & MAP < 100 ~ map_bin[3]
))
For each ID I wish to calculate, in an additional column, the total time MAP is in each bin. I expect the following output:
ID
Time
MAP
map_bin
map_bin_dur
1
11:02:00
90.27999878
3
5
1
11:03:00
84.25
3
5
1
11:04:00
74.81999969
2
2
1
11:05:00
80.87000275
3
5
1
11:06:00
99.38999939
3
5
1
11:07:00
81.51000214
3
5
1
11:08:00
71.51000214
2
2
2
13:30:00
90.08999634
3
6
2
13:31:00
88.75
3
6
2
13:32:00
84.72000122
3
6
2
13:33:00
83.86000061
3
6
2
13:34:00
94.18000031
3
6
2
13:35:00
98.54000092
3
6
2
13:36:00
51
1
1
Where map_bin_dur is the time in minutes that MAP for each individual resided in each bin. e.g. ID 1 had a MAP in Bin 3 for 5 minutes in total.
If you have Time column of 1 min-duration always you can use add_count -
library(dplyr)
output <- output %>% add_count(ID, map_bin, name = 'map_bin_dur')
output
# ID Time MAP map_bin map_bin_dur
# <int> <fct> <dbl> <chr> <int>
# 1 1 11:02:00 90.3 3 5
# 2 1 11:03:00 84.2 3 5
# 3 1 11:04:00 74.8 2 2
# 4 1 11:05:00 80.9 3 5
# 5 1 11:06:00 99.4 3 5
# 6 1 11:07:00 81.5 3 5
# 7 1 11:08:00 71.5 2 2
# 8 2 13:30:00 90.1 3 6
# 9 2 13:31:00 88.8 3 6
#10 2 13:32:00 84.7 3 6
#11 2 13:33:00 83.9 3 6
#12 2 13:34:00 94.2 3 6
#13 2 13:35:00 98.5 3 6
#14 2 13:36:00 51 1 1

Stacking multiple columns using pivot longer in R

I am trying to change my data from wide to long on r using pivot_longer. There appear to be a few people having similar issues on here but I have been unable to adapt their solutions to my data. I have attached a picture of example data in the wide data example and what I am trying to achieve in the long data example.
In summary I have a time and reference column which refer to all columns, I also have multiple columns of group, subject, ID, xcoordinate ycoordinate in the form of:
group1, subject1. ID1, xcoord1 ycoord1, group2, subject2, ID2, xcoord2, ycoord2 and so on... What I want is a long table with columns:
time, reference, group, subject, ID, xcoord, ycoord.
With the 5 columns stacking their respective numbered columns, and the time and reference columns repeating for the relevant stacks.
df %>%
pivot_longer(cols = -c(time, reference),
names_to = c("group", "subject", "ID", "xcoord", "ycoord")
My understanding is that I require to use the names_pattern function, although I cant seem to get that to work, and I cant find anything clear describing How I should be using it. I should say my data is much wider than the example data, so can't really rely on numbering of columns.
appreciate any help
wide data example
time reference group1 subject1 ID1 xcoord1 ycoord1 group2 subject2 ID2 xcoord2 ycoord2 group3 subject3 ID3 xcoord3 ycoord3
1 00:01 4097365 1 4 1 7.44 38.16 0 21 2 33.90 47.26 1 15 3 21.53 2.67
2 00:02 4097366 1 4 1 9.84 37.03 0 21 2 32.98 48.47 1 15 3 21.82 2.95
3 00:03 4097367 1 4 1 12.01 35.83 0 21 2 30.17 50.33 1 15 3 22.06 4.45
4 00:04 4097368 1 4 1 12.15 34.17 0 21 2 29.85 50.52 1 15 3 23.50 4.75
5 00:05 4097369 1 4 1 15.27 32.94 0 21 2 28.39 51.30 1 15 3 24.25 4.76
6 00:06 4097370 1 4 1 18.96 31.98 0 21 2 28.39 52.36 1 15 3 25.31 6.57
7 00:07 4097371 1 4 1 22.50 31.13 0 21 2 26.59 53.14 1 15 3 26.05 7.04
8 00:08 4097372 1 4 1 27.47 30.15 0 21 2 25.89 53.94 1 15 3 27.29 7.91
9 00:09 4097373 1 4 1 32.17 29.92 0 21 2 24.64 54.42 1 15 3 27.47 8.44
10 00:10 4097374 1 4 1 33.77 27.49 0 21 2 24.61 55.23 1 15 3 28.59 8.71
Long data example
time reference group subject ID xcoord ycoord
1 00:01 4097365 1 4 1 7.44 38.16
2 00:01 4097365 0 21 2 33.90 47.26
3 00:01 4097365 1 15 3 21.53 2.67
4 00:02 4097366 1 4 1 9.84 37.03
5 00:02 4097367 0 21 2 32.98 48.47
6 00:02 4097368 1 15 3 21.82 2.95
7 00:03 4097369 1 4 1 12.01 35.83
8 00:03 4097370 0 21 2 30.17 50.33
9 00:03 4097371 1 15 3 22.06 4.45
10 00:04 4097372 1 4 1 12.15 34.17
edit: playing about a bit with the data I have managed to achieve this odd solution which is a mixture of long and wide data.
dput(head(df1))
structure(list(time = c(0, 0, 0, 0, 0, 0), state = structure(c(2L,
2L, 2L, 2L, 2L, 2L), .Label = c("Alive", "Alive;:", "Dead", "Dead;:"
), class = "factor"), reference = c("1880439", "1880439", "1880439",
"1880439", "1880439", "1880439"), num = c("1", NA, "2", "3",
"4", "5"), group = c("1", NA, "1", "4", "0", "0"), X = c(NA,
NA, NA, NA, NA, NA), ID = c(1L, NA, 2L, 4L, 5L, 6L), subect = c(21L,
NA, 7L, -1L, 2L, 6L), x = c(3514L, NA, 2807L, 5550L, 3956L, 3686L
), y = c(-1644L, NA, -510L, 4400L, 1297L, -55L), speed = c("5.23",
NA, "3.24", "0.00", "2.31", "3.57"), group1 = c("0", NA, "4",
"1", "1", "0"), ID1 = c(13L, NA, 14L, 15L, 16L, 17L), subect1 = c(9L,
NA, -1L, 13L, 14L, 11L), x1 = c(882L, NA, 5550L, 3004L, 761L,
3317L), y1 = c(-1468L, NA, 4400L, 1633L, 559L, 1443L), speed1 = c("1.70",
NA, "0.00", "3.06", "2.92", "3.30"), group2 = c("4", NA, "0",
"1", "0", "0"), ID2 = c(24L, NA, 25L, 26L, 27L, 28L), subect2 = c(-1L,
NA, 1L, 18L, 5L, 10L), x2 = c(5550L, NA, 5031L, 3936L, 3972L,
3623L), y2 = c(4400L, NA, -74L, 190L, 686L, 356L), speed2 = c("0.00",
NA, "0.54", "1.06", "0.95", "2.49"), speed.group2 = c(NA, NA,
NA, NA, NA, NA)), class = c("tbl_df", "tbl", "data.frame"), row.names = c(NA,
-6L)).
the dataframe the code results in looks like this
> head(df1)
# A tibble: 6 x 24
time state reference num group X ID subect x y speed group1 ID1 subect1 x1 y1 speed1 group2 ID2 subect2 x2 y2 speed2
<dbl> <fct> <chr> <chr> <chr> <lgl> <int> <int> <int> <int> <chr> <chr> <int> <int> <int> <int> <chr> <chr> <int> <int> <int> <int> <chr>
1 0 Aliv~ 1880439 1 1 NA 1 21 3514 -1644 5.23 0 13 9 882 -1468 1.70 4 24 -1 5550 4400 0.00
2 0 Aliv~ 1880439 NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA NA
3 0 Aliv~ 1880439 2 1 NA 2 7 2807 -510 3.24 4 14 -1 5550 4400 0.00 0 25 1 5031 -74 0.54
4 0 Aliv~ 1880439 3 4 NA 4 -1 5550 4400 0.00 1 15 13 3004 1633 3.06 1 26 18 3936 190 1.06
5 0 Aliv~ 1880439 4 0 NA 5 2 3956 1297 2.31 1 16 14 761 559 2.92 0 27 5 3972 686 0.95
6 0 Aliv~ 1880439 5 0 NA 6 6 3686 -55 3.57 0 17 11 3317 1443 3.30 0 28 10 3623 356 2.49
# ... with 1 more variable: speed.group2 <lgl>
Would first rename columns and insert underscore right before number, then use that as separator in pivot_longer.
library(tidyverse)
df %>%
rename_at(-c(1:2), ~ str_replace(., "(\\w+)(\\d)", "\\1_\\2")) %>%
pivot_longer(cols = -c(1:2), names_to = c(".value", "num"), names_sep = "_")
Edit (2/7/20):
With your updated dataset, it appears that some of the variable column names don't have a number at the end. We can add 0 for those.
Also, I assume you want: group, ID, subect, x, y, speed that are repeated (with the first group in column 5 separated from its related variables in columns 7-11).
df1 %>%
rename_at(c(5,7:11), ~ paste0(., "0")) %>%
rename_at(-c(1:4, 6, 24), ~ str_replace(., "(\\w+)(\\d+)", "\\1_\\2")) %>%
pivot_longer(cols = -c(1:4, 6, 24), names_to = c(".value", "val"), names_sep = "_")
Output (Revised):
# A tibble: 18 x 13
time state reference num X speed.group2 val group ID subect x y speed
<dbl> <fct> <chr> <chr> <lgl> <lgl> <chr> <chr> <int> <int> <int> <int> <chr>
1 0 Alive;: 1880439 1 NA NA 0 1 1 21 3514 -1644 5.23
2 0 Alive;: 1880439 1 NA NA 1 0 13 9 882 -1468 1.70
3 0 Alive;: 1880439 1 NA NA 2 4 24 -1 5550 4400 0.00
4 0 Alive;: 1880439 NA NA NA 0 NA NA NA NA NA NA
5 0 Alive;: 1880439 NA NA NA 1 NA NA NA NA NA NA
6 0 Alive;: 1880439 NA NA NA 2 NA NA NA NA NA NA
7 0 Alive;: 1880439 2 NA NA 0 1 2 7 2807 -510 3.24
8 0 Alive;: 1880439 2 NA NA 1 4 14 -1 5550 4400 0.00
9 0 Alive;: 1880439 2 NA NA 2 0 25 1 5031 -74 0.54
10 0 Alive;: 1880439 3 NA NA 0 4 4 -1 5550 4400 0.00
11 0 Alive;: 1880439 3 NA NA 1 1 15 13 3004 1633 3.06
12 0 Alive;: 1880439 3 NA NA 2 1 26 18 3936 190 1.06
13 0 Alive;: 1880439 4 NA NA 0 0 5 2 3956 1297 2.31
14 0 Alive;: 1880439 4 NA NA 1 1 16 14 761 559 2.92
15 0 Alive;: 1880439 4 NA NA 2 0 27 5 3972 686 0.95
16 0 Alive;: 1880439 5 NA NA 0 0 6 6 3686 -55 3.57
17 0 Alive;: 1880439 5 NA NA 1 0 17 11 3317 1443 3.30
18 0 Alive;: 1880439 5 NA NA 2 0 28 10 3623 356 2.49

dplyr mutate function to evaluate values within columns (current, previous, next) vertically

I have scoured SO for a way to achieve what I need without luck so here it goes.
A while back I discovered the package dplyr and its potential. I am thinking this package can do what I want, I just don't know how. This is a small subset of my data, but should be representative of my problem.
dummy<-structure(list(time = structure(1:20, .Label = c("2015-03-25 12:24:00",
"2015-03-25 21:08:00", "2015-03-25 21:13:00", "2015-03-25 21:47:00",
"2015-03-26 03:08:00", "2015-04-01 20:30:00", "2015-04-01 20:34:00",
"2015-04-01 20:42:00", "2015-04-01 20:45:00", "2015-09-29 18:26:00",
"2015-09-29 19:11:00", "2015-09-29 21:21:00", "2015-09-29 22:03:00",
"2015-09-29 22:38:00", "2015-09-30 00:48:00", "2015-09-30 01:38:00",
"2015-09-30 01:41:00", "2015-09-30 01:45:00", "2015-09-30 01:47:00",
"2015-09-30 01:49:00"), class = "factor"), ID = c(1L, 1L,
1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L,
2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L,
2L), station = c(1L, 1L, 1L, 2L, 3,
4L, 4L, 4L, 4L, 5L, 5L, 6L,
6L, 5, 5, 5L, 7, 7, 7L,
7)), .Names = c("time", "ID", "station"), class = "data.frame", row.names = c(NA,
-20L))
I wish to evaluate rows within the time column conditional on the ID and station column. Specifically, I would like the function (dplyr?) to evaluate each time row, and compare the time to the previous time (row-1) and next time (row+1). If the time of current row is within 1 hour of time of previous and/or next row, and the ID and station of current row match that of previous and/or next row, then I would like to add in a new row a 1, otherwise a 0.
How would I achieve this using dplyr?
The expected outcome should be like this:
time ID station new.value
1 2015-03-25 12:24:00 1 1 0
2 2015-03-25 21:08:00 1 1 1
3 2015-03-25 21:13:00 1 1 1
4 2015-03-25 21:47:00 1 2 0
5 2015-03-26 03:08:00 1 3 0
6 2015-04-01 20:30:00 1 4 1
7 2015-04-01 20:34:00 1 4 1
8 2015-04-01 20:42:00 1 4 1
9 2015-04-01 20:45:00 1 4 1
10 2015-09-29 18:26:00 2 5 1
11 2015-09-29 19:11:00 2 5 1
12 2015-09-29 21:21:00 2 6 1
13 2015-09-29 22:03:00 2 6 1
14 2015-09-29 22:38:00 2 5 0
15 2015-09-30 00:48:00 2 5 1
16 2015-09-30 01:38:00 2 5 1
17 2015-09-30 01:41:00 2 7 1
18 2015-09-30 01:45:00 2 7 1
19 2015-09-30 01:47:00 2 7 1
20 2015-09-30 01:49:00 2 7 1
Here is an option using the difftime with dplyr mutate function. Firstly, we use a group_by operation to make sure the comparison is within each unique combination of ID and Station. The difftime can be used to calculate the difference time, here the units will be set as hours for convenience. The lag and lead functions are also from dplyr package which shift the selected column backward or forward. Combining with the vectorised operation of difftime, you can calculate the time difference between the current row and the previous/next row. We use abs to make sure the result is absolute value. The condition of <1 make sure the difference is within an hour. as.integer convert the logical values (T or F) to (1 or 0) correspondingly.
library(dplyr)
dummy %>% group_by(ID, station) %>%
mutate(new.value = as.integer(
abs(difftime(time, lag(time, default = Inf), units = "hours")) < 1 |
abs(difftime(time, lead(time, default = Inf), units = "hours")) < 1))
Source: local data frame [20 x 4]
Groups: ID, station [7]
time ID station new.value
(time) (int) (dbl) (int)
1 2015-03-25 12:24:00 1 1 0
2 2015-03-25 21:08:00 1 1 1
3 2015-03-25 21:13:00 1 1 1
4 2015-03-25 21:47:00 1 2 0
5 2015-03-26 03:08:00 1 3 0
6 2015-04-01 20:30:00 1 4 1
7 2015-04-01 20:34:00 1 4 1
8 2015-04-01 20:42:00 1 4 1
9 2015-04-01 20:45:00 1 4 1
10 2015-09-29 18:26:00 2 5 1
11 2015-09-29 19:11:00 2 5 1
12 2015-09-29 21:21:00 2 6 1
13 2015-09-29 22:03:00 2 6 1
14 2015-09-29 22:38:00 2 5 0
15 2015-09-30 00:48:00 2 5 1
16 2015-09-30 01:38:00 2 5 1
17 2015-09-30 01:41:00 2 7 1
18 2015-09-30 01:45:00 2 7 1
19 2015-09-30 01:47:00 2 7 1
20 2015-09-30 01:49:00 2 7 1
Psidom's answer is great -- here's a data.table approach.
library(data.table)
setDT(dummy)
# you do NOT want a factor for your time variable
dummy[, time := as.POSIXct(time) ]
dummy[, `:=`(lag_diff = c(Inf, diff(as.numeric(time))),
lead_diff = c(diff(as.numeric(time)), Inf)),
by = .(ID, station) ]
dummy[, new.value := as.integer(lag_diff < 3600 | lead_diff < 3600) ]
dummy
Another solution using R base functions (sapply and difftime):
n=nrow(dummy)
dummy$new.value=
as.numeric(sapply(1:n, function(i)
(i<n && (dummy[i,"ID"]==dummy[i+1,"ID"] && dummy[i,"station"]==dummy[i+1,"station"])
&& abs(as.numeric(difftime(dummy[i,"time"], dummy[i+1,"time"]), "hours"))<=1)
||
(i>1 && (dummy[i,"ID"]==dummy[i-1,"ID"] && dummy[i,"station"]==dummy[i-1,"station"])
&& abs(as.numeric(difftime(dummy[i,"time"], dummy[i-1,"time"]), "hours"))<=1)
))
# > dummy
# time ID station new.value
# 1 2015-03-25 12:24:00 1 1 0
# 2 2015-03-25 21:08:00 1 1 1
# 3 2015-03-25 21:13:00 1 1 1
# 4 2015-03-25 21:47:00 1 2 0
# 5 2015-03-26 03:08:00 1 3 0
# 6 2015-04-01 20:30:00 1 4 1
# 7 2015-04-01 20:34:00 1 4 1
# 8 2015-04-01 20:42:00 1 4 1
# 9 2015-04-01 20:45:00 1 4 1
# 10 2015-09-29 18:26:00 2 5 1
# 11 2015-09-29 19:11:00 2 5 1
# 12 2015-09-29 21:21:00 2 6 1
# 13 2015-09-29 22:03:00 2 6 1
# 14 2015-09-29 22:38:00 2 5 0
# 15 2015-09-30 00:48:00 2 5 1
# 16 2015-09-30 01:38:00 2 5 1
# 17 2015-09-30 01:41:00 2 7 1
# 18 2015-09-30 01:45:00 2 7 1
# 19 2015-09-30 01:47:00 2 7 1
# 20 2015-09-30 01:49:00 2 7 1

Resources