Getting multiple values per group based on two criteria - r

I'm trying to group a dataset and get the first and highest values based on two separate measures of time and speed. So I need the time and speed for the earliest record in each group and then the time and speed for the fastest record in each group. I've got this far but need some help...
library(tidyverse)
group <- c(1,1,1,1,1,2,2,3,3,4,4,4,4,4,4)
time <- c(1,6,4,5,7,12,10,2,3,8,9,11,13,14,15)
speed <- c(17,6, 99, 34, 12, 5, 67, 43, 23, 12, 15, 78, 61, 78, 20)
data = data.frame(group, time, speed)
summary = data %>%
group_by(group) %>%
summarise(
firstTime = # lowest time
HighestSpeedTime = , # time for highest speed
firstSpeed = , #speed for lowest time
highestSpeed = max(speed), # highest speed
)

Update:
This should work: In group 4 we have ties therefore 2 rows:(we have at two time points the highest speed)!
library(dplyr)
data %>%
group_by(group) %>%
summarise(
firstTime = min(time), # lowest time
HighestSpeedTime = time[which(speed==max(speed))], # time for highest speed
firstSpeed = speed[which(time==min(time))],#speed for lowest time
highestSpeed = max(speed) # highest speed
)
output:
group firstTime HighestSpeedTime firstSpeed highestSpeed
<dbl> <dbl> <dbl> <dbl> <dbl>
1 1 1 4 17 99
2 2 10 10 67 67
3 3 2 2 43 43
4 4 8 11 12 78
5 4 8 14 12 78

Does this work?
library(tidyverse)
group <- c(1,1,1,1,1,2,2,3,3,4,4,4,4,4,4)
time <- c(1,6,4,5,7,12,10,2,3,8,9,11,13,14,15)
speed <- c(17,6, 99, 34, 12, 5, 67, 43, 23, 12, 15, 78, 61, 78, 20)
data = data.frame(group, time, speed)
summary <- data |>
arrange(group, time) |>
group_by(group) |>
summarise(
firsttime = min(time),
highest_speed = max(speed)
) |>
left_join(data, by = c("group", "highest_speed" = "speed")) |>
group_by(group) |>
slice(1) |>
rename(highest_speed_time = time) |>
left_join(data, by = c("group", "firsttime" = "time")) |>
rename(first_speed = speed)
summary
# group firsttime highest_speed highest_speed_time first_speed
# <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 1 99 4 17
# 2 10 67 10 67
# 3 2 43 2 43
# 4 8 78 11 12

here is a data.table approach
library(data.table)
setDT(data)
temp <- data[data[, .I[speed == max(speed)], by = .(group)]$V1]
setnames(temp, new = c("group", "maxSpeedTime", "maxSpeed"))
# join together
data[, .(firstTime = time[1],
firstSpeed = speed[1]),
by = .(group)][temp, on = .(group)]
# group firstTime firstSpeed maxSpeedTime maxSpeed
# 1: 1 1 17 4 99
# 2: 2 12 5 10 67
# 3: 3 2 43 2 43
# 4: 4 8 12 11 78
# 5: 4 8 12 14 78

Another solution, with a chained inner_join:
library(tidyverse)
data %>%
group_by(group) %>%
summarise(firstTime = min(time)) %>%
inner_join(data,by=c("group", "firstTime"="time")) %>%
rename(firstSpeed=speed) %>%
inner_join(
data %>%
group_by(group) %>%
summarise(highestSpeed = max(speed)) %>%
inner_join(data,by=c("group", "highestSpeed"="speed"))
) %>%
relocate(highestTime=time, .before="highestSpeed")
#> Joining, by = "group"
#> # A tibble: 5 × 5
#> group firstTime firstSpeed highestTime highestSpeed
#> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 1 1 17 4 99
#> 2 2 10 67 10 67
#> 3 3 2 43 2 43
#> 4 4 8 12 11 78
#> 5 4 8 12 14 78
An alternative solution, based on purrr::map_dfr:
library(tidyverse)
data %>%
group_split(group) %>%
map_dfr(
~ data.frame(
group = .x$group[1],
firstTime = .x$time[min(.x$time) == .x$time],
firstSpeed = .x$speed[min(.x$time) == .x$time],
highestTime = .x$time[max(.x$speed) == .x$speed],
highestSpeed = .x$speed[max(.x$speed) == .x$speed]))
#> group firstTime firstSpeed highestTime highestSpeed
#> 1 1 1 17 4 99
#> 2 2 10 67 10 67
#> 3 3 2 43 2 43
#> 4 4 8 12 11 78
#> 5 4 8 12 14 78
And more succinctly:
library(tidyverse)
data %>%
group_split(group) %>%
map_dfr(~ data.frame(
group = integer(), firstTime = integer(), firstSpeed = integer(),
highestTime = integer(), highestSpeed = integer()) %>%
add_row(!!!setNames(c(.x$group[1],.x[min(.x$time) == .x$time, -1],
.x[max(.x$speed) == .x$speed, -1]), names(.))))
#> group firstTime firstSpeed highestTime highestSpeed
#> 1 1 1 17 4 99
#> 2 2 10 67 10 67
#> 3 3 2 43 2 43
#> 4 4 8 12 11 78
#> 5 4 8 12 14 78

Related

Identify the highest number of consecutive numbers in a dataframe and add rows with NA

Here is a reproducible example of the situation I need help for. I have a database (db1) in which weekly ratings of behavioral outcomes are recorded. The variable "Week" corresponds to the number of the week from the beginning of the year (e.g., Week = 1 indicates the week between January 1st and 7th, and so on...) and the variable "Score" to the value obtained by the subject on the criterion measure. In the real data set, I have several participants and a different number of ratings for each subject; however, in this example there is only one subject to make things easier.
library(magrittr)
x1 <- c(14, 18, 19, 20, 21, 23, 24, 25)
y1 <- c(34, 21, 45, 32, 56, 45, 23, 48)
db1 <- cbind(x1, y1) %>% as.data.frame() %>% setNames(c("Week", "Score"))
db1
# Week Score
#1 14 34
#2 18 21
#3 19 45
#4 20 32
#5 21 56
#6 23 45
#7 24 23
#8 25 48
What I need to do is to identify the highest number of ratings occurred in consecutive weeks in the database. In the example, the highest number is 4 because the ratings were consecutive from week 18 to 21. Here I added a column for demonstration, but it might not be necessary for the solution.
x2 <- c(14, 18, 19, 20, 21, 23, 24, 25)
y2 <- c(34, 21, 45, 32, 56, 45, 23, 48)
z2 <- c(1, 1, 2, 3, 4, 1, 2, 3)
db2 <- cbind(x2, y2, z2) %>% as.data.frame() %>% setNames(c("Week", "Score", "Consecutive"))
db2
# Week Score Consecutive
#1 14 34 1
#2 18 21 1
#3 19 45 2
#4 20 32 3
#5 21 56 4
#6 23 45 1
#7 24 23 2
#8 25 48 3
Finally, because every subject has to have a total of five consecutive ratings, I need to add a row with a missing datum where the highest number of consecutive weeks is below five (so that I can impute the missing data later on). However, there might be ratings before and after the sequence. If that is the case, I want to add the row based on the minimal distance between the first or last week of the longest series of consecutive weeks from the other existing rating. In the example, that means that the row with missing datum will be added after 21 because there are 4 missing weeks between week 14 and 18 whereas only 1 between week 21 and 23.
x3 <- c(14, 18, 19, 20, 21, 22, 23, 24, 25)
y3 <- c(34, 21, 45, 32, 56, NA, 45, 23, 48)
z3 <- c(1, 1, 2, 3, 4, 5, 1, 2, 3)
db3 <- cbind(x3, y3, z3) %>% as.data.frame() %>% setNames(c("Week", "Score", "Consecutive"))
db3
# Week Score Consecutive
#1 14 34 1
#2 18 21 1
#3 19 45 2
#4 20 32 3
#5 21 56 4
#6 22 NA 5
#7 23 45 1
#8 24 23 2
#9 25 48 3
For your information, this is not going to be part of the main statistical analyses but rather one of several ways I want to use to test the sensitivity of my model. So do not worry about whether it makes sense from a methodological point of view. In addition, if possible, a tidyverse solution would be greatly appreciated.
Thanks so much to anyone who will take the time.
The code is relatively easier, if you want to do it just for max group and if more than one, just for one.
db1 %>% mutate(consecutive = accumulate(diff(Week), .init = 1, ~if(.y == 1) { .x +1} else {1}),
dummy = max(consecutive) == consecutive & max(consecutive) < 5) %>%
group_by(grp = cumsum(consecutive == 1)) %>%
filter(sum(dummy) > 0) %>% #filter out group(s) with max consecutive
ungroup() %>% select(-dummy) %>%
filter(grp == min(grp)) %>% # filter out first such group, if there are more than 1
complete(consecutive = 1:5) %>%
select(-grp) %>%
mutate(Week = first(Week) + consecutive -1)
# A tibble: 5 x 3
consecutive Week Score
<dbl> <dbl> <dbl>
1 1 18 21
2 2 19 45
3 3 20 32
4 4 21 56
5 5 22 NA
OLD ANSWER Another tidyverse strategy (this can be modified to suit your additional column requirements which you have not given in sample)
library(tidyverse)
db1
#> Week Score
#> 1 14 34
#> 2 18 21
#> 3 19 45
#> 4 20 32
#> 5 21 56
#> 6 23 45
#> 7 24 23
#> 8 25 48
library(data.table)
db1 %>% mutate(consecutive = accumulate(diff(Week), .init = 1, ~if(.y == 1) { .x +1} else {1}),
dummy = max(consecutive) == consecutive & max(consecutive) < 5,
dummy2 = rleid(dummy)) %>%
group_split(dummy2, .keep = F) %>%
map_if( ~.x$dummy[[1]], ~.x %>% complete(consecutive = seq(max(consecutive), 5, 1), fill = list(Week = 1)) %>%
mutate(Week = cumsum(Week))) %>%
map_dfr(~.x %>% select(-dummy))
#> # A tibble: 9 x 3
#> Week Score consecutive
#> <dbl> <dbl> <dbl>
#> 1 14 34 1
#> 2 18 21 1
#> 3 19 45 2
#> 4 20 32 3
#> 5 21 56 4
#> 6 22 NA 5
#> 7 23 45 1
#> 8 24 23 2
#> 9 25 48 3
Created on 2021-06-10 by the reprex package (v2.0.0)
if I understand correctly
library(data.table)
library(tidyverse)
x1 <- c(14, 18, 19, 20, 21, 23, 24, 25)
y1 <- c(34, 21, 45, 32, 56, 45, 23, 48)
db1 <- cbind(x1, y1) %>% as.data.frame() %>% setNames(c("Week", "Score"))
db1 %>%
mutate(grp = cumsum(c(0, diff(Week)) > 1)) %>%
group_by(grp) %>%
mutate(n_grp = n()) %>%
ungroup() %>%
filter(n_grp == max(n_grp, na.rm = TRUE)) %>%
complete(grp,
n_grp,
nesting(Week = seq(from = first(Week), length = 5))) %>%
select(-c(grp, n_grp)) %>%
rows_upsert(db1, by = c("Week", "Score"))
#> # A tibble: 9 x 2
#> Week Score
#> <dbl> <dbl>
#> 1 18 21
#> 2 19 45
#> 3 20 32
#> 4 21 56
#> 5 22 NA
#> 6 14 34
#> 7 23 45
#> 8 24 23
#> 9 25 48
Created on 2021-06-10 by the reprex package (v2.0.0)
You can also use the following solution. Midway through this solution before we use add_row to add your additional rows, we can filter the whole data set for we use group_split I filtered the whole data set to keep only those groups with the maximum observations which means they have longer consecutive Weeks than others. So after we split by grouping variable we may end of with 2 or more groups of equal consecutive Weeks so then you can choose whichever your like based on your preference:
library(dplyr)
library(purrr)
library(tibble)
db1 %>%
mutate(Consecutive = +(Week - lag(Week, default = first(Week)) == 1),
grp = cumsum(Consecutive == 0)) %>%
group_by(grp) %>%
mutate(Consecutive = row_number()) %>%
group_by(grp, .drop = TRUE) %>%
add_count() %>%
ungroup() -> db2 # We create our grouping variable `grp` here
db2 %>%
filter(n == max(n)) %>%
group_split(grp) %>%
map_dfr(~ add_row(.x, Week = .x$Week[.x$n[1]] + seq(1, 5 - .x$n[1], 1),
Consecutive = .x$Consecutive[.x$n[1]] + seq(1, 5 - .x$n[1], 1),
grp = .x$grp[1])) %>%
bind_rows(db2 %>%
filter(n != max(n))) %>%
select(-c(grp, n)) %>%
arrange(Week)
# A tibble: 9 x 3
Week Score Consecutive
<dbl> <dbl> <dbl>
1 14 34 1
2 18 21 1
3 19 45 2
4 20 32 3
5 21 56 4
6 22 NA 5
7 23 45 1
8 24 23 2
9 25 48 3

How to use R to create baseball Splits from Game Logs

I'm trying to use R to recreate Baseball Splits as found on MLB.com. The splits are created from Game Logs and provide different cuts of the data. For example, home games vs. away games, day games vs. night games, August vs. September and many more all in one convenient table. I believe the ratios (AVG, OBP SLG) can all be added via mutate once the basic splits have been totaled.
My Question is, what's the best and most efficient way to create these splits and how should the data be shaped. The game log obviously has additional (hidden) column(s) that contain the Split topics. The nature of the problem leads me to believe purrr might be a tool to employ but I can't quite wrap my mind around how to approach this one.
Here is how I believe the data should be shaped and a link to a sample game log. I would appreciate any thoughts, ideas or solutions to this problem.
Links and images of Game Logs and Splits for National outfielder Juan Soto are set forth below.
Game Logs: Juan Soto Game Log
Splits: Juan Soto Game Splits
Splits
I've gone through the dataset, although I'm not sure if the sum values match, and neither the averages relative to the images above.
You're right about mutating for creating the values you suggest.
However, hopefully my approach can help you get what you're after.
library(tidyverse)
library(data.table)
game.splits <- "https://raw.githubusercontent.com/MundyMSDS/GAMELOG/main/SAMPLE_GAME_LOG.csv"
game.splits <- fread(game.splits, fill = TRUE)
game.splits.pivot <- game.splits
game.splits.pivot$Var1 <- ifelse(game.splits.pivot$Var1 %in% "HOME", 1, 0)
game.splits.pivot$Var2 <- ifelse(game.splits.pivot$Var2 %in% "NIGHT", 3, 2)
game.splits.pivot$Var3 <- ifelse(game.splits.pivot$Var3 %in% "SEPTEMBER", 5, 4)
game.splits.pivot <- game.splits.pivot %>% pivot_longer(-c(1:16, 20))
colnames(game.splits.pivot)[19] <- "name_c"
game.splits.pivot <- game.splits.pivot[, -c(17, 18)]
game.splits.pivot <- game.splits.pivot %>% pivot_longer(-c(1:3, 17))
#test
game.splits.pivot_test <- game.splits.pivot[, -c(1, 2, 3)]
game.splits.pivot_test <- aggregate(value ~ name_c + name, game.splits.pivot_test, sum)
game.splits.pivot_test <- game.splits.pivot_test %>% pivot_wider(names_from = name, values_from = value)
lc_name <- tibble(name_c = 0:5, split = c("HOME", "AWAY", "DAY", "NIGHT", "AUGUST", "SEPTEMBER"))
game.splits.pivot_test <- game.splits.pivot_test %>%
inner_join(lc_name, by = "name_c") %>%
arrange(name_c) %>%
select(-name_c)
game.splits.pivot_test <- game.splits.pivot_test[, c(14, 3, 9, 6, 1, 2, 7, 10, 4, 8, 12, 11, 5, 13)]
A look into the dataset:
# A tibble: 6 x 14
split AB R H `2B` `3B` HR RBI BB IBB SO SB CS TB
<chr> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int>
1 HOME 88 24 32 5 0 9 23 15 5 12 1 2 64
2 AWAY 66 15 22 9 0 4 14 26 7 16 5 0 43
3 DAY 29 21 18 4 0 5 17 12 4 3 4 0 37
4 NIGHT 125 18 36 10 0 8 20 29 8 25 2 2 70
5 AUGUST 90 21 33 6 0 11 25 13 1 13 1 1 72
6 SEPTEMBER 64 18 21 8 0 2 12 28 11 15 5 1 35
This turned out to be more straight-forward than I had thought. The following solution relies upon pivot_longer to shape the data and summarise_if to tally the splits - no rbinds or purrr needed.
library(tidyverse)
game.splits <- "https://raw.githubusercontent.com/MundyMSDS/GAMELOG/main/SAMPLE_GAME_LOG.csv"
game.splits <- read_csv(game.splits)
game.splits %>%
pivot_longer(Var1:Var3, names_to = "split") %>%
group_by(split) %>%
arrange(split) %>%
select(split, value, everything()) %>%
ungroup() %>%
select(split, value, everything()) %>%
select(-Date, -OPP) %>%
mutate(value = str_c(split, "_", value)) %>%
group_by(value) %>%
summarise_if(is.numeric, sum) %>%
mutate(value= str_replace(value, "(Var\\d_)",""))
#> # A tibble: 6 x 14
#> value AB R H TB `2B` `3B` HR RBI BB IBB SO SB
#> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 AWAY 88 24 32 64 5 0 9 23 15 5 12 1
#> 2 HOME 66 15 22 43 9 0 4 14 26 7 16 5
#> 3 DAY 29 21 18 37 4 0 5 17 12 4 3 4
#> 4 NIGHT 125 18 36 70 10 0 8 20 29 8 25 2
#> 5 AUGUST 90 21 33 72 6 0 11 25 13 1 13 1
#> 6 SEPTE~ 64 18 21 35 8 0 2 12 28 11 15 5
Created on 2021-03-03 by the reprex package (v0.3.0)

percentage change between consective columns

Looking for clever ways to automatically create percentage change columns between consecutive variables with the same prefix. I would also like to create similar variables for every 2nd variable. We can assume these variables represent years and we would like percent change between each year and every second year.
library(tidyverse)
df <- data.frame(xx = c(1, 2, 3),
a_12 = c(10, 20, 20),
a_13 = c(30, 40, 10),
a_14 = c(23, 34, 56),
a_15 = c(25, 34, 56),
a_16 = c(23, 34, 56))
df
# xx a_12 a_13 a_14 a_15 a_16
# 1 1 10 30 23 23 23
# 2 2 20 40 34 34 34
# 3 3 20 10 56 56 56
What I want is to automate the following:
df %>%
mutate(a_diff_12_13 = ((a_13 - a_12)/a_12)*100,
a_diff_13_14 = ((a_14 - a_13)/a_13)*100,
a_diff_14_15 = ((a_15 - a_14)/a_14)*100,
#ALSO EVERY SECOND YEAR
a_diff_12_14 = ((a_14 - a_12)/a_12)*100,
a_diff_14_16 = ((a_16 - a_14)/a_14)*100)
# xx a_12 a_13 a_14 a_15 a_16 a_diff_12_13 a_diff_13_14 a_diff_14_15 a_diff_12_14 a_diff_14_16
# 1 1 10 30 23 25 23 200 -23.3 8.7 130 0
# 2 2 20 40 34 34 34 100 -15.0 0.0 70 0
# 3 3 20 10 56 56 56 -50 460.0 0.0 180 0
Thanks
try it this way
df %>%
pivot_longer(-xx) %>%
group_by(xx) %>%
mutate(Diff = (value / lag(value) - 1) * 100) %>%
pivot_wider(id_cols = xx, names_from = name, values_from = Diff, names_prefix = "diff_") %>%
left_join(df) %>%
select(xx, starts_with("a"), everything())
Joining, by = "xx"
# A tibble: 3 x 11
# Groups: xx [3]
xx a_12 a_13 a_14 a_15 a_16 diff_a_12 diff_a_13 diff_a_14 diff_a_15 diff_a_16
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 1 10 30 23 25 23 NA 200 -23.3 8.70 -8.00
2 2 20 40 34 34 34 NA 100 -15. 0 0
3 3 20 10 56 56 56 NA -50 460. 0 0
or you can do it
n_lag <- seq(1,2)
df_long <- df %>%
pivot_longer(-xx) %>%
group_by(xx)
tmp <- map_dfc(
n_lag,
~ transmute(df_long, !!paste0("diff_", .x) := (value / lag(value, .x) - 1) * 100)) %>%
select(starts_with("diff_"))
bind_cols(df_long, tmp) %>%
pivot_wider(
id_cols = xx,
names_from = name,
values_from = starts_with("diff_")) %>%
left_join(df) %>%
select(xx, starts_with("a"), everything())

Different output between sum and +

I'm working on a problem that consists basically on sum all the rows based on their ID and sum some specific variables to get a consolidated dataset to input on another work, but there is an issue with the sum function and I'd appreciate some explanation about this.
Dataset:
teste <- data.frame(ID = c(1, 1, 2, 1, 3, 3, 2),
VALUE = c(10, 10, 10, 10, 10, 10, 10),
MOD = c(1, 1, 1, 1, 1, 1, 1))
ID VALUE MOD
1 1 10 1
2 1 10 1
3 2 10 1
4 1 10 1
5 3 10 1
6 3 10 1
7 2 10 1
Using + operator:
teste %>%
group_by(ID) %>%
summarise_all(sum, na.rm = TRUE) %>%
mutate(CONS = VALUE + MOD)
# A tibble: 3 x 4
ID VALUE MOD CONS
<dbl> <dbl> <dbl> <dbl>
1 1 30 3 33
2 2 20 2 22
3 3 20 2 22
Using sum function:
teste %>%
group_by(ID) %>%
summarise_all(sum, na.rm = TRUE) %>%
mutate(CONS = sum(VALUE, MOD))
# A tibble: 3 x 4
ID VALUE MOD CONS
<dbl> <dbl> <dbl> <dbl>
1 1 30 3 77
2 2 20 2 77
3 3 20 2 77
summarize_all removes one level of grouping so re-group it:
teste %>%
group_by(ID) %>%
summarise_all(sum, na.rm = TRUE) %>%
group_by(ID) %>% # <--------------------------
mutate(CONS = sum(VALUE, MOD)) %>%
ungroup
giving:
# A tibble: 3 x 4
# Groups: ID [3]
ID VALUE MOD CONS
<dbl> <dbl> <dbl> <dbl>
1 1 30 3 33
2 2 20 2 22
3 3 20 2 22

Group and compare numbers in a list

Consider the following data frame obtained after a cbind operation on two lists
> fl
x meanlist
1 1 48.5
2 2 32.5
3 3 28.0
4 4 27.0
5 5 25.5
6 6 20.5
7 7 27.0
8 8 24.0
class_median <- list(0, 15, 25, 35, 45)
class_list <- list(0:10, 10:20, 20:30, 30:40, 40:50)
The values in class_median represent classes -10 to +10, 10 to 20, 20 to 30 etc
Firstly, I am trying to group the values in fl$meanlist as per the classes in class_list. Secondly, I am trying to return one value per class which is closest to the median values as follows
> fl_subset
x meanlist cm
1 1 48.5 45
2 2 32.5 35
3 5 25.5 25
I am trying to use loops to compare but it seems to be long and unmanageable and the result is not correct
Here's an approach with dplyr:
library(dplyr)
# do a little prep--name classes, extract breaks, put medians in a data frame
names(class_list) = letters[seq_along(class_list)]
breaks = c(min(class_list[[1]]), sapply(class_list, max))
med_data = data.frame(median = unlist(class_median), class = names(class_list))
fl %>%
# assign classes
mutate(class = cut(meanlist, breaks = breaks, labels = names(class_list))) %>%
# get medians
left_join(med_data) %>%
# within each class...
group_by(class) %>%
# keep the row with the smallest absolute difference to the median
slice(which.min(abs(meanlist - median))) %>%
# sort in original order
arrange(x)
# Joining, by = "class"
# # A tibble: 3 x 4
# # Groups: class [3]
# x meanlist class median
# <int> <dbl> <fct> <dbl>
# 1 1 48.5 e 45
# 2 2 32.5 d 35
# 3 5 25.5 c 25
One approach utilizing purrr and dplyr could be:
map2(.x = class_list,
.y = class_median,
~ fl %>%
mutate(cm = between(meanlist, min(.x), max(.x))) %>%
filter(any(cm)) %>%
mutate(cm = cm*.y)) %>%
bind_rows(.id = "ID") %>%
group_by(ID) %>%
slice(which.min(abs(meanlist-cm)))
ID x meanlist cm
<chr> <int> <dbl> <dbl>
1 3 5 25.5 25
2 4 2 32.5 35
3 5 1 48.5 45

Resources