Of two rows eliminate the one with more NAs - r

I am looking for a way to check wether two columns in a data frame contain the same elements for one or more rows, then eliminate the row containing more NAs.
Lets assume we have a data frame as such:
x <- data.frame("Year" = c(2017,2017,2017,2018,2018),
"Country" = c("Sweden", "Sweden", "Norway", "Denmark", "Finland"),
"Sales" = c(15, 15, 18, 13, 12),
"Campaigns" = c(3, NA, 4, 1, 1),
"Employees" = c(15, 15, 12, 8, 9),
"Satisfaction" = c(0.8, NA, 0.9, 0.95, 0.87),
"Expenses" = c(NA, NA, 9000, 7500, 4300))
Note that the entry for Sweden in the year 2017 is there twice, but the first row has one entry with NA while the other one contains NAs in three places. Now I would like to check wether two rows contain the same "Year" and "Country", then proceed to eliminate the row containing the higher amount of NAs, in this case the second row. I did some research but I could not seem to find a solution for this particular case.
Thank you very much in advance.

Using dplyr:
library(dplyr)
x %>%
mutate(n_na = rowSums(is.na(.))) %>% ## calculate NAs for each row
group_by(Year, Country) %>% ## for each year/country
arrange(n_na) %>% ## sort by number of NAs
slice(1) %>% ## take the first row
select(-n_na) ## remove the NA counter column
# A tibble: 4 x 7
# Groups: Year, Country [4]
Year Country Sales Campaigns Employees Satisfaction Expenses
<dbl> <fctr> <dbl> <dbl> <dbl> <dbl> <dbl>
1 2017 Norway 18 4 12 0.90 9000
2 2017 Sweden 15 3 15 0.80 NA
3 2018 Denmark 13 1 8 0.95 7500
4 2018 Finland 12 1 9 0.87 4300

We can use a data.table approach
library(data.table)
ind <- setDT(x)[, {
i1 <- Reduce(`+`, lapply(.SD, is.na))
.I[i1 > 0 & (i1 == max(i1))]
}, .(Year, Country)]$V1
x[-ind]
# Year Country Sales Campaigns Employees Satisfaction Expenses
#1: 2017 Sweden 15 3 15 0.80 NA
#2: 2017 Norway 18 4 12 0.90 9000
#3: 2018 Denmark 13 1 8 0.95 7500
#4: 2018 Finland 12 1 9 0.87 4300

Base R solution:
x$nas <- rowSums(sapply(x, is.na))
do.call(rbind,
by(x, x[c("Year","Country")],
function(df) head(df[order(df$nas),,drop=FALSE], n=1)))
# Year Country Sales Campaigns Employees Satisfaction Expenses nas
# 4 2018 Denmark 13 1 8 0.95 7500 0
# 5 2018 Finland 12 1 9 0.87 4300 0
# 3 2017 Norway 18 4 12 0.90 9000 0
# 1 2017 Sweden 15 3 15 0.80 NA 1
Not too surprisingly, the data.table implementation is the fast, though I"m a little surprised by how much faster it was than base R. Being a small dataset could affect this. (In the benchmarking, I had to create a copy of the original, since data.table modifies the data in-place, so x is no longer a data.frame.)
microbenchmark(
data.table = {
x0 <- copy(x)
ind <- setDT(x0)[, {
i1 <- Reduce(`+`, lapply(.SD, is.na))
.I[i1 > 0 & (i1 == max(i1))]
}, .(Year, Country)]$V1
x0[-ind]
},
dplyr = {
x %>%
mutate(n_na = rowSums(is.na(.))) %>% ## calculate NAs for each row
group_by(Year, Country) %>% ## for each year/country
arrange(n_na) %>% ## sort by number of NAs
slice(1) %>% ## take the first row
select(-n_na) ## remove the NA counter column
},
base = {
x0 <- x
x0$nas <- rowSums(sapply(x0, is.na))
do.call(rbind,
by(x0, x0[c("Year","Country")],
function(df) head(df[order(df$nas),,drop=FALSE], n=1)))
}
)
# Unit: milliseconds
# expr min lq mean median uq max neval
# data.table 1.223477 1.441005 1.973714 1.582861 1.919090 12.837569 100
# dplyr 2.675239 2.901882 4.465172 3.079295 3.806453 42.261540 100
# base 2.039615 2.209187 2.737758 2.298714 2.570760 8.586946 100

Related

More efficient way of using group_by > mutate > slice

I have a dataframe that looks like this
df <- data.frame("Month" = c("April","April","May","May","June","June","June"),
"ID" = c(11, 11, 12, 10, 11, 11, 11),
"Region" = c("East", "West", "North", "East", "North" ,"East", "West"),
"Qty" = c(120, 110, 110, 110, 100, 90, 70),
"Sales" = c(1000, 1100, 900, 1000, 1000, 800, 650),
"Leads" = c(10, 12, 9, 8, 6, 5, 4))
Month ID Region Qty Sales Leads
April 11 East 120 1000 10
April 11 West 110 1100 12
May 12 North 110 900 9
May 10 East 110 1000 8
June 11 North 100 1000 6
June 11 East 90 800 5
June 11 West 70 650 4
I want a dataframe that looks like this
Month ID Qty Sales Leads Region
April 11 230 2100 22 East
May 12 110 900 9 North
May 10 110 1000 8 East
June 11 260 2450 15 North
I am using a the following code
result <- df %>% group_by(Month, ID) %>% mutate(across(.cols = Qty:Leads, ~sum(.x, na.rm = T))) %>% slice(n = 1)
result$Region <- NULL
I have over 2 million such rows and it is taking forever to calculate the aggregate.
I am using mutate and slice instead of summarize because the df is arranged in a certain way and I want to retain the Region in that first row.
However I think there could be a more efficient way. Please help on both. Can't figure it out for the life of me.
summarize makes more sense to me than mutate and slice. This should save you some time.
library(dplyr)
result <- df %>%
group_by(Month, ID) %>%
summarize(across(.cols = Qty:Leads, ~sum(.x, na.rm = T)),
Region = first(Region))
result
# # A tibble: 4 x 6
# # Groups: Month [3]
# Month ID Qty Sales Leads Region
# <chr> <dbl> <dbl> <dbl> <dbl> <chr>
# 1 April 11 230 2100 22 East
# 2 June 11 260 2450 15 North
# 3 May 10 110 1000 8 East
# 4 May 12 110 900 9 North
Here is a data.table solution.
library(data.table)
setDT(df)
cols <- c("Qty", "Sales", "Leads")
df[, c(lapply(.SD, sum, na.rm = TRUE),
Region = first(Region)), .SDcols = cols,
by = .(Month, ID)][]
# Month ID Qty Sales Leads Region
# 1: April 11 230 2100 22 East
# 2: May 12 110 900 9 North
# 3: May 10 110 1000 8 East
# 4: June 11 260 2450 15 North
We can apply generic speed-up strategies:
Do less
Choose an appropriate back-end
Use appropriate data structures
dplyr provides syntactic sugar for data manipulation, but may not be the most efficient when it comes to handling large data sets.
solution 1
We could rewrite the code slightly to be more efficient by using the collapse package, which provides a C++ interface to dplyr functions. It prepends dplyr functions with f, with one exception fsubset which is similar to dplyr::filter (or base R subset).
library(collapse)
df |>
fgroup_by(Month, ID) |>
fsummarise(Qty = fsum(Qty),
Sales = fsum(Sales),
Leads = fsum(Leads),
Region = fsubset(Region, 1L),
keep.group_vars = T) |>
as_tibble() # optional
#> # A tibble: 4 x 6
#> Month ID Qty Sales Leads Region
#> <chr> <dbl> <dbl> <dbl> <dbl> <chr>
#> 1 April 11 230 2100 22 East
#> 2 June 11 260 2450 15 North
#> 3 May 10 110 1000 8 East
#> 4 May 12 110 900 9 North
Where |> (Requires R version > 3.5) is a slightly faster pipe than %>%. Its result is ungrouped.
solution 2
data.table is often lauded for its speed, memory use and utility. The easiest conversion from existing dplyr code to use data.table is using the dtplyr package, which ships with tidyverse. We can convert it by adding two lines of code.
library(dtplyr)
df1 <- lazy_dt(df)
df1 %>%
group_by(Month, ID) %>%
summarize(across(.cols = Qty:Leads, ~sum(.x, na.rm = T)),
Region = first(Region)) %>%
as_tibble() # or data.table()
Note that this results is an ungrouped data.frame at the end.
Benchmarks
Approaches are put in wrapper functions. dplyr here is www's approach. All approaches outputting is a tibble.
bench::mark(collapse = collapse(df), dplyr = dplyr(df), dtplyr = dtplyr(df),
time_unit = "ms", iterations = 200)[c(1, 3,5,7)]
# A tibble: 3 x 4
expression median mem_alloc n_itr
<bch:expr> <dbl> <bch:byt> <int>
1 collapse 0.316 0B 200
2 dplyr 5.42 8.73KB 195
3 dtplyr 6.67 120.21KB 196
We can see that collapse is more memory efficient, and significantly faster compared to dplyr. dtplyr approach is included here, as its time complexity is different than that of dplyr and its convenience to rewrite.
Per #www's request, an inclusion of pure data.table approach, wrapper functions rewritten for brevity. Input/ Output is a data.frame for collapse and a data.table for data.table respectively.
data.table = \(x){setDT(x); cols = c("Qty", "Sales", "Leads");x[, c(lapply(.SD, sum, na.rm = T), Region = first(Region)), .SDcols = cols, by = .(Month, ID)][]}
# retainig the `|>` pipes for readability, impact is ~4us.
collapse = \(x) x|>fgroup_by(Month, ID)|>fsummarise(Qty = fsum(Qty),Sales = fsum(Sales),Leads = fsum(Leads),Region = fsubset(Region, 1L),keep.group_vars = T)
dt <- as.data.table(df)
bench::mark(collapse(df), iterations = 10e3)[c(1,3,5,7)] ; bench::mark(data.table(dt), iterations = 10e3)[c(1,3,5,7)]
expression median mem_alloc n_itr
<bch:expr> <bch:tm> <bch:byt> <int>
1 collapse(df) 150us 0B 9988
2 data.table(dt) 796us 146KB 9939
The difference between collapse and pure data.table, for such a small dataset, is negligible. The reason for speed increase is likely the use of fsum instead of base R sum.

create increment index for each group, starting row conditional on other column

Let's assume I have a dataset similar to this:
library(tidyverse)
country <- c(rep("Germany", 9), rep("Greece", 9), rep("Turkey", 9), rep("Austria", 9))
date <- rep(seq(ymd('2012-04-07'),ymd('2012-04-15'),by='days'), 4)
gyros_eaten <- c(lag(1:3, n = 2, default = 0), floor(runif(6, min=1, max=4)),
lag(1:6, n = 5, default = 0), floor(runif(3, min=1, max=4)),
lag(1:2, n = 1, default = 0), floor(runif(7, min=1, max=4)),
lag(1:3, n = 2, default = 0), floor(runif(6, min=1, max=4)))
df <- data.frame(country, date, gyros_eaten)
I want to create a new column, name it days_since_first_local_gyros, which would tell me how many days have passed since eating the first gyros, in that particular country. That is, it would take the value of zero if no gyros was eaten yet, or if on that day I ate my first gyros in e.g. Greece. After that, it would run from 1 to infinity, increasing by 1 on each day. Hence each group (i.e. country) would have a distinct index, and I have rows for each possible country x day dyads in my dataset, within a particular time range.
How should I create this column? I assume it's some combination of case when, group_by and lag, but i just can't wrap my head around it.
We could get the first date where gyros_eaten > 0 and subtract all the dates from that date in the country. We use pmax to keep max value of 0 in days_since_first_local_gyros.
library(dplyr)
df %>%
arrange(country, date) %>%
group_by(country) %>%
mutate(days_since_first_local_gyros = pmax(as.integer(date -
date[which.max(gyros_eaten > 0)]), 0))
# country date gyros_eaten days_since_first_local_gyros
# <chr> <date> <dbl> <dbl>
# 1 Austria 2012-04-07 0 0
# 2 Austria 2012-04-08 0 0
# 3 Austria 2012-04-09 1 0
# 4 Austria 2012-04-10 1 1
# 5 Austria 2012-04-11 1 2
# 6 Austria 2012-04-12 2 3
# 7 Austria 2012-04-13 2 4
# 8 Austria 2012-04-14 2 5
# 9 Austria 2012-04-15 3 6
#10 Germany 2012-04-07 0 0
# … with 26 more rows

Is there a way to find daily maximum from hourly data with missing values

I have measured hourly data of ground O3 but with some missing data (marked as NA). I want to calculate daily maximums, but only in case there are more than 17 hourly measurements per date. In case it is less than 18 measurement per date I want to write NA.
head(o3sat)
date hour O3
1/1/2010 0 50.2
1/1/2010 1 39.8
1/1/2010 2 41.8
1/1/2010 3 NA
1/1/2010 4 9.2
1/1/2010 5 6.0
Is there a possibility to add some argument to this function to indicate that at least 75% of the data must be available in a day for the value to be calculated, else the data is removed
maximums <- aggregate(o3sat["dnevnik"], list(Date = as.Date(o3sat$datum)), max, na.rm = TRUE)
It is better to provide a reproducible example when asking a question. Here, I created an example data frame based on the information you provided. This data frame contains hourly O3 measurements from 2010-01-01 to 2010-01-03.
library(dplyr)
library(tidyr)
library(lubridate)
o3sat <- read.table(text = " date hour O3
'1/1/2010' 0 50.2
'1/1/2010' 1 39.8
'1/1/2010' 2 41.8
'1/1/2010' 3 NA
'1/1/2010' 4 9.2
'1/1/2010' 5 6.0 ",
stringsAsFactors = FALSE, header = TRUE)
set.seed(1234)
o3sat_ex <- o3sat %>%
mutate(date = mdy(date)) %>%
complete(date = seq.Date(ymd("2010-01-01"), ymd("2010-01-03"), 1), hour = 0:23) %>%
mutate(O3 = c(o3sat$O3, rnorm(66, 30, 10))) %>%
mutate(O3 = ifelse(row_number() %in% sample(7:72, 18), NA, O3))
We can count how many non-NA value per day using the following code.
o3sat_ex %>%
group_by(date) %>%
summarize(sum(!is.na(O3)))
# # A tibble: 3 x 2
# date `sum(!is.na(O3))`
# <date> <int>
# 1 2010-01-01 18
# 2 2010-01-02 17
# 3 2010-01-03 18
Based on your description, we would like to calculate the maximum for 2010-01-01 and 2010-01-03, but not 2010-01-02 as it only contains 17 non-NA values.
Here is one way to achieve the task, we can define a function, max_helper, that only returns maximum if the count of non-NA values is larger than 17.
max_helper <- function(x, threshold){
if (sum(!is.na(x)) >= threshold) {
r <- max(x, na.rm = TRUE)
} else {
r <- NA
}
return(r)
}
We can apply this number using the dplyr code to get the answer.
o3sat_ex2 <- o3sat_ex %>%
group_by(date) %>%
summarize(O3 = max_helper(O3, 18))
o3sat_ex2
# # A tibble: 3 x 2
# date O3
# <date> <dbl>
# 1 2010-01-01 50.2
# 2 2010-01-02 NA
# 3 2010-01-03 47.8

Single row per id to multiple row per id

I'd like to expand observations from single row-per-id to multiple rows-per-id based on a given time interval:
> dput(df)
structure(list(id = c(123, 456, 789), gender = c(0, 1, 1), yr.start = c(2005,
2010, 2000), yr.last = c(2007, 2012, 2000)), .Names = c("id",
"gender", "yr.start", "yr.last"), class = c("tbl_df", "tbl",
"data.frame"), row.names = c(NA, -3L))
> df
# A tibble: 3 x 4
id gender yr.start yr.last
<dbl> <dbl> <dbl> <dbl>
1 123 0 2005 2007
2 456 1 2010 2012
3 789 1 2000 2000
I want to get id expanded into one row per year:
> dput(df_out)
structure(list(id = c(123, 123, 123, 456, 456, 456, 789), gender = c(0,
0, 0, 1, 1, 1, 1), yr = c(2005, 2006, 2007, 2010, 2011, 2012,
2000)), .Names = c("id", "gender", "yr"), class = c("tbl_df",
"tbl", "data.frame"), row.names = c(NA, -7L))
> df_out
# A tibble: 7 x 3
id gender yr
<dbl> <dbl> <dbl>
1 123 0 2005
2 123 0 2006
3 123 0 2007
4 456 1 2010
5 456 1 2011
6 456 1 2012
7 789 1 2000
I know how to melt/reshape, but I'm not sure how I can expand the years.
Thanks.
Here is a base R method.
# expand years to a list
yearList <- mapply(":", df$yr.start, df$yr.last)
Now, use this list to calculate the number of rows to repeat for each ID (the second argument of rep) and then append it as a vector (transformed from list with unlist) using cbind.
# get data.frame
cbind(df[rep(seq_along(df$id), lengths(yearList)), c("id", "gender")], yr=unlist(yearList))
id gender yr
1 123 0 2005
1.1 123 0 2006
1.2 123 0 2007
2 456 1 2010
2.1 456 1 2011
2.2 456 1 2012
3 789 1 2000
You could gather into long format and then fill in the missing rows via complete using tidyr.
library(dplyr)
library(tidyr)
df %>%
gather(group, yr, starts_with("yr") ) %>%
group_by(id, gender) %>%
complete(yr = full_seq(yr, period = 1) )
You can use select to get rid of the extra column.
df %>%
gather(group, yr, starts_with("yr") ) %>%
select(-group) %>%
group_by(id, gender) %>%
complete(yr = full_seq(yr, period = 1) )
# A tibble: 8 x 3
# Groups: id, gender [3]
id gender yr
<dbl> <dbl> <dbl>
1 123 0 2005
2 123 0 2006
3 123 0 2007
4 456 1 2010
5 456 1 2011
6 456 1 2012
7 789 1 2000
8 789 1 2000
Here is a tidyverse solution
library(tidyverse)
df %>%
group_by(id, gender) %>%
nest() %>%
mutate(data = map(data, ~ seq(.x$yr.start, .x$yr.last))) %>%
unnest() %>%
rename(year = data)
# A tibble: 7 x 3
id gender year
<dbl> <dbl> <int>
1 123 0 2005
2 123 0 2006
3 123 0 2007
4 456 1 2010
5 456 1 2011
6 456 1 2012
7 789 1 2000
As the OP mentions that his production data set has more than 1 M rows and he is benchmarking the different solutions, it might be worthwhile to try a data.table version:
library(data.table) # CRAN version 1.10.4 used
data.table(DF)[, .(yr = yr.start:yr.last), by = .(id, gender)]
which returns
id gender yr
1: 123 0 2005
2: 123 0 2006
3: 123 0 2007
4: 456 1 2010
5: 456 1 2011
6: 456 1 2012
7: 789 1 2000
If there are more non-varying columns than just gender it might be more efficient to do a join rather than including all those columns in the grouping parameter by =:
data.table(DF)[DF[, .(yr = yr.start:yr.last), by = id], on = "id"]
id gender yr.start yr.last yr
1: 123 0 2005 2007 2005
2: 123 0 2005 2007 2006
3: 123 0 2005 2007 2007
4: 456 1 2010 2012 2010
5: 456 1 2010 2012 2011
6: 456 1 2010 2012 2012
7: 789 1 2000 2000 2000
Note that both approaches assume that id is unique in the input data.
Benchmarking
The OP has noted that he is surprised that above data.table solution is five times slower than lmo's base R solution, apparently with OP's production data set of more than 1 M rows.
Also, the question has attracted 5 different answers plus additional suggestions. So, it's worthwhile to compare the solution in terms of processing speed.
Data
As the production data set isn't available, and problem size among other factors like the strcuture of the data is important for benchmarking, sample data sets are created.
# parameters
n_rows <- 1E2
yr_range <- 10L
start_yr <- seq(2000L, length.out = 10L, by = 1L)
# create sample data set
set.seed(123L)
library(data.table)
DT <- data.table(id = seq_len(n_rows),
gender = sample(0:1, n_rows, replace = TRUE),
yr.start = sample(start_yr, n_rows, replace = TRUE))
DT[, yr.last := yr.start + sample(0:yr_range, n_rows, replace = TRUE)]
DF <- as.data.frame(DT)
str(DT)
Classes ‘data.table’ and 'data.frame': 100 obs. of 4 variables:
$ id : int 1 2 3 4 5 6 7 8 9 10 ...
$ gender : int 0 1 0 1 1 0 1 1 1 0 ...
$ yr.start: int 2005 2003 2004 2009 2004 2008 2009 2006 2004 2001 ...
$ yr.last : int 2007 2013 2010 2014 2008 2017 2013 2009 2005 2002 ...
- attr(*, ".internal.selfref")=<externalptr>
For the first run, 100 rows are created, the start year can vary between 2000 and 2009, and the span of years an indivdual id can cover is between 0 and 10 years. Thus, the result set should be expected to have approximately 100 * (10 + 1) / 2 rows.
Also, only one additional column gender is included although the OP has told that the producion data may have 2 to 10 non-varying columns.
Code
library(magrittr)
bm <- microbenchmark::microbenchmark(
lmo = {
yearList <- mapply(":", DF$yr.start, DF$yr.last)
res_lmo <- cbind(DF[rep(seq_along(DF$id), lengths(yearList)), c("id", "gender")],
yr=unlist(yearList))
},
hao = {
res_hao <- DF %>%
dplyr::group_by(id, gender) %>%
tidyr::nest() %>%
dplyr::mutate(data = purrr::map(data, ~ seq(.x$yr.start, .x$yr.last))) %>%
tidyr::unnest() %>%
dplyr::rename(yr = data)
},
aosmith = {
res_aosmith <- DF %>%
tidyr::gather(group, yr, dplyr::starts_with("yr") ) %>%
dplyr::select(-group) %>%
dplyr::group_by(id, gender) %>%
tidyr::complete(yr = tidyr::full_seq(yr, period = 1) )
},
jason = {
res_jason <- DF %>%
dplyr::group_by(id, gender) %>%
dplyr::do(data.frame(yr=.$yr.start:.$yr.last))
},
uwe1 = {
res_uwe1 <- DT[, .(yr = yr.start:yr.last), by = .(id, gender)]
},
uwe2 = {
res_uwe2 <- DT[DT[, .(yr = yr.start:yr.last), by = id], on = "id"
][, c("yr.start", "yr.last") := NULL]
},
frank1 = {
res_frank1 <- DT[rep(1:.N, yr.last - yr.start + 1L),
.(id, gender, yr = DT[, unlist(mapply(":", yr.start, yr.last))])]
},
frank2 = {
res_frank2 <- DT[, {
m = mapply(":", yr.start, yr.last); c(.SD[rep(.I, lengths(m))], .(yr = unlist(m)))},
.SDcols=id:gender]
},
times = 3L
)
Note that references to tidyverse functions are explicit in order to avoid name conflicts due to a cluttered name space.
First run
Unit: microseconds
expr min lq mean median uq max neval
lmo 655.860 692.6740 968.749 729.488 1125.193 1520.899 3
hao 40610.776 41484.1220 41950.184 42357.468 42619.887 42882.307 3
aosmith 319715.984 336006.9255 371176.437 352297.867 396906.664 441515.461 3
jason 77525.784 78197.8795 78697.798 78869.975 79283.804 79697.634 3
uwe1 834.079 870.1375 894.869 906.196 925.264 944.332 3
uwe2 1796.910 1810.8810 1880.482 1824.852 1922.268 2019.684 3
frank1 981.712 1057.4170 1086.680 1133.122 1139.164 1145.205 3
frank2 994.172 1003.6115 1081.016 1013.051 1124.438 1235.825 3
For the given problem size of 100 rows, the timings clearly indicate that the dplyr/ tidyr solutions are magnitudes slower than base R or data.table solutions.
The results are essentially consistent:
all.equal(as.data.table(res_lmo), res_uwe1)
all.equal(res_hao, res_uwe1)
all.equal(res_jason, res_uwe1)
all.equal(res_uwe2, res_uwe1)
all.equal(res_frank1, res_uwe1)
all.equal(res_frank2, res_uwe1)
return TRUE except all.equal(res_aosmith, res_uwe1) which returns
[1] "Incompatible type for column yr: x numeric, y integer"
Second run
Due to the long execution times, the tidyverse solutions are skipped when benchmarking larger problem sizes.
With the modified parameters
n_rows <- 1E4
yr_range <- 100L
the result set is expected to consist of about 500'000 rows.
Unit: milliseconds
expr min lq mean median uq max neval
lmo 425.026101 447.716671 455.85324 470.40724 471.26681 472.12637 3
uwe1 9.555455 9.796163 10.05562 10.03687 10.30571 10.57455 3
uwe2 18.711805 18.992726 19.40454 19.27365 19.75091 20.22817 3
frank1 22.639031 23.129131 23.58424 23.61923 24.05685 24.49447 3
frank2 13.989016 14.124945 14.47987 14.26088 14.72530 15.18973 3
For the given problem size and structure the data.table solutions are the fastest while the base R approach is a magnitude slower. The most concise solution uwe1 is also the fastest, here.
Note that the results depend on the structure of the data, in particular the parameters n_rows and yr_range and the number of non-varying columns. If there are more of those columns than just gender the timings might look differently.
The benchmark results are in contradiction to the OP's observation on execution speed which needs to be further investigated.
Another way using do in dplyr, but it's slower than the base R method.
df %>%
group_by(id, gender) %>%
do(data.frame(yr=.$yr.start:.$yr.last))
# # A tibble: 7 x 3
# # Groups: id, gender [3]
# id gender yr
# <dbl> <dbl> <int>
# 1 123 0 2005
# 2 123 0 2006
# 3 123 0 2007
# 4 456 1 2010
# 5 456 1 2011
# 6 456 1 2012
# 7 789 1 2000

How to get the maximum value by group

I have a data.frame with two columns: year and score. The years go from 2000-2012 and each year can be listed multiple times. In the score column I list all the scores for each year with each row having a different score.
What I'd like to do is filter the data.frame so only the rows with the maximum scores for each year remain.
So as a tiny example if I have
year score
2000 18
2001 22
2000 21
I would want to return just
year score
2001 22
2000 21
If you know sql this is easier to understand
library(sqldf)
sqldf('select year, max(score) from mydata group by year')
Update (2016-01): Now you can also use dplyr
library(dplyr)
mydata %>% group_by(year) %>% summarise(max = max(score))
using plyr
require(plyr)
set.seed(45)
df <- data.frame(year=sample(2000:2012, 25, replace=T), score=sample(25))
ddply(df, .(year), summarise, max.score=max(score))
using data.table
require(data.table)
dt <- data.table(df, key="year")
dt[, list(max.score=max(score)), by=year]
using aggregate:
o <- aggregate(df$score, list(df$year) , max)
names(o) <- c("year", "max.score")
using ave:
df1 <- df
df1$max.score <- ave(df1$score, df1$year, FUN=max)
df1 <- df1[!duplicated(df1$year), ]
Edit: In case of more columns, a data.table solution would be the best (my opinion :))
set.seed(45)
df <- data.frame(year=sample(2000:2012, 25, replace=T), score=sample(25),
alpha = sample(letters[1:5], 25, replace=T), beta=rnorm(25))
# convert to data.table with key=year
dt <- data.table(df, key="year")
# get the subset of data that matches this criterion
dt[, .SD[score %in% max(score)], by=year]
# year score alpha beta
# 1: 2000 20 b 0.8675148
# 2: 2001 21 e 1.5543102
# 3: 2002 22 c 0.6676305
# 4: 2003 18 a -0.9953758
# 5: 2004 23 d 2.1829996
# 6: 2005 25 b -0.9454914
# 7: 2007 17 e 0.7158021
# 8: 2008 12 e 0.6501763
# 9: 2011 24 a 0.7201334
# 10: 2012 19 d 1.2493954
using base packages
> df
year score
1 2000 18
2 2001 22
3 2000 21
> aggregate(score ~ year, data=df, max)
year score
1 2000 21
2 2001 22
EDIT
If you have additional columns that you need to keep, then you can user merge with aggregate to get those columns
> df <- data.frame(year = c(2000, 2001, 2000), score = c(18, 22, 21) , hrs = c( 10, 11, 12))
> df
year score hrs
1 2000 18 10
2 2001 22 11
3 2000 21 12
> merge(aggregate(score ~ year, data=df, max), df, all.x=T)
year score hrs
1 2000 21 12
2 2001 22 11
data <- data.frame(year = c(2000, 2001, 2000), score = c(18, 22, 21))
new.year <- unique(data$year)
new.score <- sapply(new.year, function(y) max(data[data$year == y, ]$score))
data <- data.frame(year = new.year, score = new.score)
one liner,
df_2<-data.frame(year=sort(unique(df$year)),score = tapply(df$score,df$year,max));

Resources