summarise by group of columns using min and maintaing row number - r

I have a data frame with 3 columns
df <- data.frame(ID1=c(rep(1,4),rep(2,4)), ID2=rep(1:2,4), value=1:8)
I need to recover the min for each group (ID1, ID2) and the position(row.name) of this min in the original table.
Using group_by and summarise, I have obtained the min but I can't see a way to obtain the position as summarise gets rid of the columns not summarised and not used for group.
df<-data.frame(ID1=c(rep(1,4),rep(2,4)), ID2=rep(1:2,4), value=1:8)
df[['X']] <- paste0(df$ID1,'.',df$ID2)
df <- group_by( df, X )
df <- summarise( df, Objective=min(value) )
Any ideas on how to solve this to get?
X Objective Position
1 1.1 1 1
2 1.2 2 2
3 2.1 5 5
4 2.2 6 6
Thanks in advance

If I understand correct and since you're already using dplyr, you could do it like this:
library(dplyr); library(tidyr)
unite(df, X, ID1:ID2, sep = ".") %>%
mutate(Position = row_number()) %>%
group_by(X) %>% slice(which.min(value))
#Source: local data frame [4 x 3]
#Groups: X
#
# X value Position
#1 1.1 1 1
#2 1.2 2 2
#3 2.1 5 5
#4 2.2 6 6
Or alternatively (only dplyr) - I'd rather use this one:
mutate(df, Position = row_number()) %>% group_by(ID1, ID2) %>% slice(which.min(value))
#Source: local data frame [4 x 4]
#Groups: ID1, ID2
#
# ID1 ID2 value Position
#1 1 1 1 1
#2 1 2 2 2
#3 2 1 5 5
#4 2 2 6 6
data
df <- data.frame(ID1=rep(1:2, each = 4), ID2=rep(1:2,4), value=1:8)

Here's how would I approach this using data.table (rn would be your row number).
library(data.table)
setDT(df, keep.rownames = TRUE)[, .SD[which.min(value)], list(ID1, ID2)]
# ID1 ID2 rn value
# 1: 1 1 1 1
# 2: 1 2 2 2
# 3: 2 1 5 5
# 4: 2 2 6 6
Another option is ordering and then picking the unique values
unique(setorder(df, value), by = c("ID1", "ID2"))
# ID1 ID2 rn value
# 1: 1 1 1 1
# 2: 1 2 2 2
# 3: 2 1 5 5
# 4: 2 2 6 6
Both approaches don't require creating X column
Or using base R
df <- df[order(df$value), ]
df[!duplicated(df[, 1:2]), ]
# ID1 ID2 value
# 1 1 1 1
# 2 1 2 2
# 5 2 1 5
# 6 2 2 6
data
df <- data.frame(ID1=c(rep(1,4),rep(2,4)), ID2=rep(1:2,4), value=1:8)

Using Aggregate:
Data:
df<-data.frame(ID1=c(rep(1,4),rep(2,4)), ID2=rep(1:2,4), value=1:8)
df[['X']] <- paste0(df$ID1,'.',df$ID2)
df$rn<-row.names(df) #rn is the row number
df<-df[c("X","rn","value")]
#> df
# X rn value
#1 1.1 1 1
#2 1.2 2 2
#3 1.1 3 3
#4 1.2 4 4
#5 2.1 5 5
#6 2.2 6 6
#7 2.1 7 7
#8 2.2 8 8
Aggregate step:
df2<- aggregate(df, by=list(c(df$X)), min)
#> df2
# Group.1 X rn value
#1 1.1 1.1 1 1
#2 1.2 1.2 2 2
#3 2.1 2.1 5 5
#4 2.2 2.2 6 6

Related

arranging columns based on numeric values in r

I need to arrange column names based on numbering.
Here is a short version of my dataset.
df <- data.frame(id = c(1,2,3),
raw_score = c(10,20,30),
a = c(1,1,1),
b = c(2,3,4),
c = c(4,6,7))
names(df) <- c("id","raw_score","2.2","2.3","2.1")
> df
id raw_score 2.2 2.3 2.1
1 1 10 1 2 4
2 2 20 1 3 6
3 3 30 1 4 7
How can I arrange the columns below?
> df
id raw_score 2.1 2.2 2.3
1 1 10 4 1 2
2 2 20 6 1 3
3 3 30 7 1 4
Maybe
df %>% dplyr::select(id, raw_score,stringr::str_sort(colnames(df[, 3:ncol(df)]), numeric = TRUE)) -> df

Merging columns while ignoring NAs

I would like to merge multiple columns. Here is what my sample dataset looks like.
df <- data.frame(
id = c(1,2,3,4,5),
cat.1 = c(3,4,NA,4,2),
cat.2 = c(3,NA,1,4,NA),
cat.3 = c(3,4,1,4,2))
> df
id cat.1 cat.2 cat.3
1 1 3 3 3
2 2 4 NA 4
3 3 NA 1 1
4 4 4 4 4
5 5 2 NA 2
I am trying to merge columns cat.1 cat.2 and cat.3. It is a little complicated for me since there are NAs.
I need to have only one cat variable and even some columns have NA, I need to ignore them. The desired output is below:
> df
id cat
1 1 3
2 2 4
3 3 1
4 4 4
5 5 2
Any thoughts?
Another variation of Gregor's answer using dplyr::transmute:
library(dplyr)
df %>%
transmute(id = id, cat = coalesce(cat.1, cat.2, cat.3))
#> id cat
#> 1 1 3
#> 2 2 4
#> 3 3 1
#> 4 4 4
#> 5 5 2
With dplyr:
library(dplyr)
df %>%
mutate(cat = coalesce(cat.1, cat.2, cat.3)) %>%
select(-cat.1, -cat.2, -cat.3)
An option with fcoalesce from data.table
library(data.table)
setDT(df)[, .(id, cat = do.call(fcoalesce, .SD)), .SDcols = patterns('^cat')]
-output
# id cat
#1: 1 3
#2: 2 4
#3: 3 1
#4: 4 4
#5: 5 2
Does this work:
> library(dplyr)
> df %>% rowwise() %>% mutate(cat = mean(c(cat.1, cat.2, cat.3), na.rm = T)) %>% select(-(2:4))
# A tibble: 5 x 2
# Rowwise:
id cat
<dbl> <dbl>
1 1 3
2 2 4
3 3 1
4 4 4
5 5 2
Since values across rows are unique, mean of the rows will return the same unique value, can also go with max or min.
Here is a base R solution which uses apply:
df$cat <- apply(df, 1, function(x) unique(x[!is.na(x)][-1]))

Want to get the dataframe of values that are deviations from the mean based on a factor column [duplicate]

This question already has answers here:
Subtracting values group-wise by the average of each group in R
(4 answers)
Closed 2 years ago.
Example:
So lets say I have this data frame.
x = data.frame(factor = as.factor(c('a','a','b','b','c','c')),value1 = c(1,3,2,4,5,3), value2 = c(7,9,3,4,9,3))
factor value1 value2
1 a 1 7
2 a 3 9
3 b 2 3
4 b 4 4
5 c 5 9
6 c 3 3
I know how to get the mean per factor, I use this method:
aggregate(x[,c(2,3)], list(x$factor), mean, na.rm = T )
This give me the following output:
Group.1 value1 value2
1 a 2 8.0
2 b 3 3.5
3 c 4 6.0
How do I now go about subtracting from each value in the original dataframe the corresponding mean of its factor. The actual dataset I am using is big so need to have a nice way, I have managed to do it but I used complicated for loops.
So the output that I want would be:
factor value1 value2
1 a -1 -1.0
2 a 1 1.0
3 b -1 -0.5
4 b 1 0.5
5 c 1 3.0
6 c -1 -3.0
Any help would be great. Thanks.
A dplyr solution
library(dplyr)
x %>% group_by(factor) %>% mutate(across(c(value1, value2), ~. - mean(.)))
Output
# A tibble: 6 x 3
# Groups: factor [3]
factor value1 value2
<fct> <dbl> <dbl>
1 a -1 -1
2 a 1 1
3 b -1 -0.5
4 b 1 0.5
5 c 1 3
6 c -1 -3
You can try this dplyr approach:
library(dplyr)
#Data
x = data.frame(factor = as.factor(c('a','a','b','b','c','c')),value1 = c(1,3,2,4,5,3), value2 = c(7,9,3,4,9,3))
#Code
x <- x %>% group_by(factor) %>%
mutate(Mv1=mean(value1),
Mv2=mean(value2),
value1=value1-Mv1,
value2=value2-Mv2) %>% select(-c(Mv1,Mv2))
Output:
# A tibble: 6 x 3
# Groups: factor [3]
factor value1 value2
<fct> <dbl> <dbl>
1 a -1 -1
2 a 1 1
3 b -1 -0.5
4 b 1 0.5
5 c 1 3
6 c -1 -3
Here is a solution with data.table
library("data.table")
setDT(x)
cols <- paste0("value", 1:2)
x[, lapply(.SD, function(x) x - mean(x)), .SDcols=cols, by=factor]
or
library("data.table")
setDT(x)
x[, sweep(.SD, 2, STATS=colMeans(.SD)), by=factor, .SDcols=2:3]

Collapsing rows with dplyr

I am new to R and am trying to collapse rows based on row values with dplyr. The following example shows the sample data.
set.seed(123)
df<-data.frame(A=c(rep(1:4,4)),
B=runif(16,min=0,max=1),
C=rnorm(16, mean=1,sd=0.5))
A B c
1 1 0.36647435 0.7485365
2 2 0.51864614 0.8654337
3 3 0.04596929 0.9858012
4 4 0.15479619 1.1294208
5 1 0.76712372 1.2460700
6 2 0.17666676 0.7402996
7 3 0.89759874 1.2699954
8 4 0.90267735 0.7101804
9 1 0.91744223 0.3451281
10 2 0.25472599 0.8604743
11 3 0.10933985 0.8696796
12 4 0.71656017 1.2648846
13 1 0.21157810 1.3170205
14 2 0.14947268 1.2789700
15 3 0.92251060 1.5696901
16 4 0.30090579 1.7642853
I want to summarize/collapse two rows based on the condition that the rows in column A with values 1 and 2 as one row (as mean of row 1 and 2) . Therefore the final result will have only 12 rows because the other 4 rows has been collapsed.
I tried to use the following dplyr function but to little avail.
install.packages ("tidyverse")
library (tidyverse)
df %>% summarize_each( fun(i){ for i %in% c(1,2)funs(mean) })
The expected output is something like:
A B C
1 1.5 0.4425602 0.8069851
3 3 0.04596929 0.9858012
4 4 0.15479619 1.1294208
5 1.5 0.4718952 0.9931848
7 3 0.89759874 1.2699954
8 4 0.90267735 0.7101804
9 1.5 0.5860841 0.6028012
11 3 0.10933985 0.8696796
12 4 0.71656017 1.2648846
13 1.5 0.1805254 1.297995
15 3 0.92251060 1.5696901
16 4 0.30090579 1.7642853
Thank you in advance.
By making the implicit, order based groupings explicit, the summary can
be done with a single summarise_all call.
# Generate the data
set.seed(1)
df <- data.frame(
A = c(rep(1:4, 4)),
B = runif(16, min = 0, max = 1),
C = rnorm(16, mean = 1, sd = 0.5)
)
library(dplyr)
new <- df %>%
group_by(grp = rep(
1:4, # vector containing names of groups to create
each = 4 # number of elements in each group
)) %>%
group_by(mean_grp = cumsum(A > 2) + 1, add = T) %>%
summarise_all(mean) %>%
ungroup()
new
#> # A tibble: 12 x 5
#> grp mean_grp A B C
#> <int> <dbl> <dbl> <dbl> <dbl>
#> 1 1 1 1.5 0.3188163 1.067598241
#> 2 1 2 3.0 0.5728534 1.755890584
#> 3 1 3 4.0 0.9082078 1.194921618
#> 4 2 1 1.5 0.5500358 0.291014883
#> 5 2 2 3.0 0.9446753 1.562465459
#> 6 2 3 4.0 0.6607978 0.977533195
#> 7 3 1 1.5 0.3454502 1.231911487
#> 8 3 2 3.0 0.2059746 1.410610598
#> 9 3 3 4.0 0.1765568 1.296950661
#> 10 4 1 1.5 0.5355633 1.425278418
#> 11 4 2 3.0 0.7698414 1.037282492
#> 12 4 3 4.0 0.4976992 0.005324152
I would recommend keeping the grouping variables in your data after the
summary (everything is simpler if you include them in the first place),
but if you want to, you can drop them with
new %>% select(-grp, -mean_grp).
PS. In order to avoid having "magic numbers" (such as the 1:4 and each = 4 when creating grp) included in the code, you could also create the first grouping variable as:
grp = cumsum(A < lag(A, default = A[1])) + 1
Assuming that the original data are ordered such that a new group starts each time the value of A is less than the previous value of A.
One option would be to process the rows with A equal to 1 or 2 separately from the other rows and then bind them back together:
set.seed(3)
df<-data.frame(A=c(rep(1:4,4)),B=runif(16,min=0,max=1),c=rnorm(16, mean=1,sd=0.5))
df %>%
filter(A %in% 1:2) %>%
group_by(tmp=cumsum(A==1)) %>%
summarise_all(mean) %>%
ungroup %>% select(-tmp) %>%
bind_rows(df %>% filter(!A %in% 1:2))
A B c
<dbl> <dbl> <dbl>
1 1.5 0.4877790 1.0121278
2 1.5 0.6032474 0.8840735
3 1.5 0.6042946 0.5996850
4 1.5 0.5456424 0.6198039
5 3.0 0.3849424 0.6276092
6 4.0 0.3277343 0.4343907
7 3.0 0.1246334 1.0760229
8 4.0 0.2946009 0.8461718
9 3.0 0.5120159 1.6121568
10 4.0 0.5050239 1.0999058
11 3.0 0.8679195 0.8981359
12 4.0 0.8297087 0.1667626

R, dplyr: cumulative version of n_distinct

I have a dataframe as follows. It is ordered by column time.
Input -
df = data.frame(time = 1:20,
grp = sort(rep(1:5,4)),
var1 = rep(c('A','B'),10)
)
head(df,10)
time grp var1
1 1 1 A
2 2 1 B
3 3 1 A
4 4 1 B
5 5 2 A
6 6 2 B
7 7 2 A
8 8 2 B
9 9 3 A
10 10 3 B
I want to create another variable var2 which computes no of distinct var1 values so far i.e. until that point in time for each group grp . This is a little different from what I'd get if I were to use n_distinct.
Expected output -
time grp var1 var2
1 1 1 A 1
2 2 1 B 2
3 3 1 A 2
4 4 1 B 2
5 5 2 A 1
6 6 2 B 2
7 7 2 A 2
8 8 2 B 2
9 9 3 A 1
10 10 3 B 2
I want to create a function say cum_n_distinct for this and use it as -
d_out = df %>%
arrange(time) %>%
group_by(grp) %>%
mutate(var2 = cum_n_distinct(var1))
A dplyr solution inspired from #akrun's answer -
Ths logic is basically to set 1st occurrence of each unique values of var1 to 1 and rest to 0 for each group grp and then apply cumsum on it -
df = df %>%
arrange(time) %>%
group_by(grp,var1) %>%
mutate(var_temp = ifelse(row_number()==1,1,0)) %>%
group_by(grp) %>%
mutate(var2 = cumsum(var_temp)) %>%
select(-var_temp)
head(df,10)
Source: local data frame [10 x 4]
Groups: grp
time grp var1 var2
1 1 1 A 1
2 2 1 B 2
3 3 1 A 2
4 4 1 B 2
5 5 2 A 1
6 6 2 B 2
7 7 2 A 2
8 8 2 B 2
9 9 3 A 1
10 10 3 B 2
Assuming stuff is ordered by time already, first define a cumulative distinct function:
dist_cum <- function(var)
sapply(seq_along(var), function(x) length(unique(head(var, x))))
Then a base solution that uses ave to create groups (note, assumes var1 is factor), and then applies our function to each group:
transform(df, var2=ave(as.integer(var1), grp, FUN=dist_cum))
A data.table solution, basically doing the same thing:
library(data.table)
(data.table(df)[, var2:=dist_cum(var1), by=grp])
And dplyr, again, same thing:
library(dplyr)
df %>% group_by(grp) %>% mutate(var2=dist_cum(var1))
Try:
Update
With your new dataset, an approach in base R
df$var2 <- unlist(lapply(split(df, df$grp),
function(x) {x$var2 <-0
indx <- match(unique(x$var1), x$var1)
x$var2[indx] <- 1
cumsum(x$var2) }))
head(df,7)
# time grp var1 var2
# 1 1 1 A 1
# 2 2 1 B 2
# 3 3 1 A 2
# 4 4 1 B 2
# 5 5 2 A 1
# 6 6 2 B 2
# 7 7 2 A 2
Here's another solution using data.table that's pretty quick.
Generic Function
cum_n_distinct <- function(x, na.include = TRUE){
# Given a vector x, returns a corresponding vector y
# where the ith element of y gives the number of unique
# elements observed up to and including index i
# if na.include = TRUE (default) NA is counted as an
# additional unique element, otherwise it's essentially ignored
temp <- data.table(x, idx = seq_along(x))
firsts <- temp[temp[, .I[1L], by = x]$V1]
if(na.include == FALSE) firsts <- firsts[!is.na(x)]
y <- rep(0, times = length(x))
y[firsts$idx] <- 1
y <- cumsum(y)
return(y)
}
Example Use
cum_n_distinct(c(5,10,10,15,5)) # 1 2 2 3 3
cum_n_distinct(c(5,NA,10,15,5)) # 1 2 3 4 4
cum_n_distinct(c(5,NA,10,15,5), na.include = FALSE) # 1 1 2 3 3
Solution To Your Question
d_out = df %>%
arrange(time) %>%
group_by(grp) %>%
mutate(var2 = cum_n_distinct(var1))

Resources