Sum of individual elements in a vector - r

I would like to determine the sum for each individual element in a vector.
For example, suppose I have the vector
x <- c(2,3,2,2,5,5,3,3)
and I want to find the sum for each element.
The answer would be something like
2: 6
3: 9
5: 10
This is because there are three 2's (2+2+2 or 2*), etc.
In other words, I want to essentially multiply the number times the number of times that element is found in the vector.

Using base R tapply
tapply(x, x, sum)
# 2 3 5
# 6 9 10
If you need it as dataframe wrap it in stack
stack(tapply(x, x, sum))
# values ind
#1 6 2
#2 9 3
#3 10 5
If you convert this to a dataframe then this becomes (How to sum a variable by group)
library(dplyr)
tibble::tibble(x) %>%
group_by(x) %>%
summarise(n = sum(x))
# A tibble: 3 x 2
# x n
# <dbl> <dbl>
#1 2 6
#2 3 9
#3 5 10

A method with dplyr:
x <- c(2,3,2,2,5,5,3,3)
a = tibble(x)
a %>% count(x) %>% mutate(xn = x*n)
# A tibble: 3 x 3
x n xn
<dbl> <int> <dbl>
1 2 3 6
2 3 3 9
3 5 2 10

Lots of ways to do this. A couple of base approaches:
with(rle(sort(x)), data.frame(val = values, freq = lengths, prod = lengths*values))
val freq prod
1 2 3 6
2 3 3 9
3 5 2 10
Or:
transform(as.data.frame(table(x), stringsAsFactors = FALSE), sum = as.numeric(x) * Freq)
x Freq sum
1 2 3 6
2 3 3 9
3 5 2 10

library(tidyverse)
x <- c(2,3,2,2,5,5,3,3)
tibble(x) %>%
count(x) %>%
mutate(xn = x*n ) %>%
pull(xn)

We can use rowsum from base R
rowsum(x, group = x)
# [,1]
#2 6
#3 9
#5 10
Or with by
by(x, x, FUN = sum)
Or with split
sapply(split(x, x), sum)
# 2 3 5
# 6 9 10
Or another option with xtabs
xtabs(x1 ~ x, cbind(x1 = x, x))
# 2 3 5
# 6 9 10
Or with ave
unique(data.frame(x, Sum = ave(x, x, FUN = sum)))
# x Sum
#1 2 6
#2 3 9
#5 5 10
Or using data.table
library(data.table)
data.table(grp = x, x=x)[, .(Sum = sum(x)), grp]
# grp Sum
#1: 2 6
#2: 3 9
#3: 5 10

Related

Re-ordering a factor (in a table) with hierarchical groups in R

Suppose the following table with two factor variabels and one numerical variable:
df <- tibble(
x = as_factor(c("a", "a", "a", "b", "b", "b")),
y = as_factor(1:6),
val = c(10, 3, 8, 2, 6, 1)
)
> df
# A tibble: 6 x 3
x y val
<fct> <fct> <dbl>
1 a 1 10
2 a 2 3
3 a 3 8
4 b 4 2
5 b 5 6
6 b 6 1
I would like to re-order y such that the sum of val, when grouped by x, takes precedent, but y is still ordered by val. To illustrate the goal:
# A tibble: 6 x 4
# Groups: x [2]
x y val sum
<fct> <fct> <dbl> <dbl>
1 a 1 10 21 # all y for which x=="a" come first, because
2 a 3 8 21 # the sum of val for x=="a" is greater than
3 a 2 3 21 # for x=="b"
4 b 5 6 9 # within each group, y is ordered by val
5 b 4 2 9
6 b 6 1 9
But how do I get there? Within tidyverse, I tried to solve it with forcats::fct_reorder(), thinking that grouping might help (df |> group_by(x) |> mutate(y = fct_reorder(y, val))), but it doesn't.
Can fct_reorder() do that at all? What other approaches could work?
Edit: I have found a solution, but it feels rather hacky:
df |>
group_by(x) |>
mutate(sum = sum(val)) |>
arrange(desc(sum), desc(val)) |> ungroup() |>
tibble::rowid_to_column() |>
mutate(across(c(x, y), \(x) fct_reorder(x, rowid)))
Perhaps, we need to arrange
library(dplyr)
library(forcats)
df %>%
arrange(desc(ave(val, x, FUN = sum)), desc(val)) %>%
mutate(across(where(is.factor), fct_inorder))
-output
# A tibble: 6 × 3
x y val
<fct> <fct> <dbl>
1 a 1 10
2 a 3 8
3 a 2 3
4 b 5 6
5 b 4 2
6 b 6 1
Or use fct_reorder/reorder in arrange
df %>%
arrange(desc(fct_reorder(x, val, .fun = sum)), desc(val)) %>%
mutate(across(where(is.factor), fct_inorder)
Probably we can use the following data.table option along with fct_inorder
setorder(
setDT(df)[
,
sum := sum(val), x
],
-sum, -val
)[
,
lapply(
.SD,
function(x) ifelse(is.factor(x), fct_inorder, c)(x)
)
]
and you will obtain
x y val sum
1: a 1 10 21
2: a 3 8 21
3: a 2 3 21
4: b 5 6 9
5: b 4 2 9
6: b 6 1 9

Label distinct combinations across multiple columns in R

I want to create a new column that labels each unique combination of values across x, y, z columns. My current work-around to achieve that is this:
> library(tidyverse)
>
> set.seed(100)
> df = tibble(x = sample.int(5, 50, replace = T), y = sample.int(5, 50, replace = T), z = sample.int(5, 50, replace = T))
> df
# A tibble: 50 x 3
x y z
<int> <int> <int>
1 2 4 4
2 3 4 4
3 1 3 5
4 2 1 4
5 4 2 5
6 4 5 2
7 2 3 4
8 3 5 4
9 2 4 1
10 5 5 2
# … with 40 more rows
>
> df2 = df %>% distinct(x,y,z) %>% rowid_to_column("unique_id") %>% left_join(df)
Joining, by = c("x", "y", "z")
> df2
# A tibble: 50 x 4
unique_id x y z
<int> <int> <int> <int>
1 1 2 4 4
2 2 3 4 4
3 3 1 3 5
4 4 2 1 4
5 4 2 1 4
6 5 4 2 5
7 5 4 2 5
8 6 4 5 2
9 6 4 5 2
10 7 2 3 4
# … with 40 more rows
What is a better/more efficient way to do this on a fairly large dataset? I'd like to stay within tidyverse but also open to other suggestions.
You could use rleidv from data.table
df$unique_id <- data.table::rleidv(df)
In dplyr, we can use group_indices function for this purpose which generates a unique id for each group of values.
library(dplyr)
df %>% mutate(unique_id = group_indices(., x, y, z))
In the devel version of dplyr, we can use cur_group_id
library(dplyr)
df %>%
group_by_all() %>%
mutate(unique_id = cur_group_id())
Or using .GRP from data.table
library(data.table)
setDT(df)[, unique_id := .GRP, names(df)]

Dense Rank by Multiple Columns in R

How can I get a dense rank of multiple columns in a dataframe? For example,
# I have:
df <- data.frame(x = c(1,1,1,1,2,2,2,3,3,3),
y = c(1,2,3,4,2,2,2,1,2,3))
# I want:
res <- data.frame(x = c(1,1,1,1,2,2,2,3,3,3),
y = c(1,2,3,4,2,2,2,1,2,3),
r = c(1,2,3,4,5,5,5,6,7,8))
res
x y z
1 1 1 1
2 1 2 2
3 1 3 3
4 1 4 4
5 2 2 5
6 2 2 5
7 2 2 5
8 3 1 6
9 3 2 7
10 3 3 8
My hack approach works for this particular dataset:
df %>%
arrange(x,y) %>%
mutate(r = if_else(y - lag(y,default=0) == 0, 0, 1)) %>%
mutate(r = cumsum(r))
But there must be a more general solution, maybe using functions like dense_rank() or row_number(). But I'm struggling with this.
dplyr solutions are ideal.
Right after posting, I think I found a solution here. In my case, it would be:
mutate(df, r = dense_rank(interaction(x,y,lex.order=T)))
But if you have a better solution, please share.
data.table
data.table has you covered with frank().
library(data.table)
frank(df, x,y, ties.method = 'min')
[1] 1 2 3 4 5 5 5 8 9 10
You can df$r <- frank(df, x,y, ties.method = 'min') to add as a new column.
tidyr/dplyr
Another option (though clunkier) is to use tidyr::unite to collapse your columns to one plus dplyr::dense_rank.
library(tidyverse)
df %>%
# add a single column with all the info
unite(xy, x, y) %>%
cbind(df) %>%
# dense rank on that
mutate(r = dense_rank(xy)) %>%
# now drop the helper col
select(-xy)
You can use cur_group_id:
library(dplyr)
df %>%
group_by(x, y) %>%
mutate(r = cur_group_id())
# x y r
# <dbl> <dbl> <int>
# 1 1 1 1
# 2 1 2 2
# 3 1 3 3
# 4 1 4 4
# 5 2 2 5
# 6 2 2 5
# 7 2 2 5
# 8 3 1 6
# 9 3 2 7
# 10 3 3 8

Divide one column of data frame by condition from another column

I have a data frame with 2 columns like this:
cond val
1 5
2 18
2 18
2 18
3 30
3 30
I want to change values in val in this way:
cond val
1 5 # 5 = 5/1 (only "1" in cond column)
2 6 # 6 = 18/3 (there are three "2" in cond column)
2 6
2 6
3 15 # 15 = 30/2
3 15
How to achieve this?
A base R solution:
# method 1:
mydf$val <- ave(mydf$val, mydf$cond, FUN = function(x) x = x/length(x))
# method 2:
mydf <- transform(mydf, val = ave(val, cond, FUN = function(x) x = x/length(x)))
which gives:
cond val
1 1 5
2 2 6
3 2 6
4 2 6
5 3 15
6 3 15
Here's the dplyr way:
library(dplyr)
df %>%
group_by(cond) %>%
mutate(val = val / n())
Which gives:
#Source: local data frame [6 x 2]
#Groups: cond [3]
#
# cond val
# (int) (dbl)
#1 1 5
#2 2 6
#3 2 6
#4 2 6
#5 3 15
#6 3 15
The idea is to divide val by the number of observations in the current group (cond) using n()
This seems like an appropriate situation for data.table:
library(data.table)
(dt <- data.table(df)[,val := val / .N, by = cond][])
# cond val
# 1: 1 5
# 2: 2 6
# 3: 2 6
# 4: 2 6
# 5: 3 15
# 6: 3 15
df <- read.table(
text = "cond val
1 5
2 18
2 18
2 18
3 30
3 30",
header = TRUE,
colClasses = "numeric"
)
In base R
df$result = df$val / ave(df$cond, df$cond, FUN = length)
The ave() divides up the cond column by its unique values and takes the length of each subvector, i.e., the denominator you ask for.
Here is a base R answer that will work if cond is an ID variable:
# get length of repeats
temp <- rle(df$cond)
temp <- data.frame(cond=temp$values, lengths=temp$lengths)
# merge onto data.frame
df <- merge(df, temp, by="cond")
df$valNew <- df$val / df$lengths

R, dplyr: cumulative version of n_distinct

I have a dataframe as follows. It is ordered by column time.
Input -
df = data.frame(time = 1:20,
grp = sort(rep(1:5,4)),
var1 = rep(c('A','B'),10)
)
head(df,10)
time grp var1
1 1 1 A
2 2 1 B
3 3 1 A
4 4 1 B
5 5 2 A
6 6 2 B
7 7 2 A
8 8 2 B
9 9 3 A
10 10 3 B
I want to create another variable var2 which computes no of distinct var1 values so far i.e. until that point in time for each group grp . This is a little different from what I'd get if I were to use n_distinct.
Expected output -
time grp var1 var2
1 1 1 A 1
2 2 1 B 2
3 3 1 A 2
4 4 1 B 2
5 5 2 A 1
6 6 2 B 2
7 7 2 A 2
8 8 2 B 2
9 9 3 A 1
10 10 3 B 2
I want to create a function say cum_n_distinct for this and use it as -
d_out = df %>%
arrange(time) %>%
group_by(grp) %>%
mutate(var2 = cum_n_distinct(var1))
A dplyr solution inspired from #akrun's answer -
Ths logic is basically to set 1st occurrence of each unique values of var1 to 1 and rest to 0 for each group grp and then apply cumsum on it -
df = df %>%
arrange(time) %>%
group_by(grp,var1) %>%
mutate(var_temp = ifelse(row_number()==1,1,0)) %>%
group_by(grp) %>%
mutate(var2 = cumsum(var_temp)) %>%
select(-var_temp)
head(df,10)
Source: local data frame [10 x 4]
Groups: grp
time grp var1 var2
1 1 1 A 1
2 2 1 B 2
3 3 1 A 2
4 4 1 B 2
5 5 2 A 1
6 6 2 B 2
7 7 2 A 2
8 8 2 B 2
9 9 3 A 1
10 10 3 B 2
Assuming stuff is ordered by time already, first define a cumulative distinct function:
dist_cum <- function(var)
sapply(seq_along(var), function(x) length(unique(head(var, x))))
Then a base solution that uses ave to create groups (note, assumes var1 is factor), and then applies our function to each group:
transform(df, var2=ave(as.integer(var1), grp, FUN=dist_cum))
A data.table solution, basically doing the same thing:
library(data.table)
(data.table(df)[, var2:=dist_cum(var1), by=grp])
And dplyr, again, same thing:
library(dplyr)
df %>% group_by(grp) %>% mutate(var2=dist_cum(var1))
Try:
Update
With your new dataset, an approach in base R
df$var2 <- unlist(lapply(split(df, df$grp),
function(x) {x$var2 <-0
indx <- match(unique(x$var1), x$var1)
x$var2[indx] <- 1
cumsum(x$var2) }))
head(df,7)
# time grp var1 var2
# 1 1 1 A 1
# 2 2 1 B 2
# 3 3 1 A 2
# 4 4 1 B 2
# 5 5 2 A 1
# 6 6 2 B 2
# 7 7 2 A 2
Here's another solution using data.table that's pretty quick.
Generic Function
cum_n_distinct <- function(x, na.include = TRUE){
# Given a vector x, returns a corresponding vector y
# where the ith element of y gives the number of unique
# elements observed up to and including index i
# if na.include = TRUE (default) NA is counted as an
# additional unique element, otherwise it's essentially ignored
temp <- data.table(x, idx = seq_along(x))
firsts <- temp[temp[, .I[1L], by = x]$V1]
if(na.include == FALSE) firsts <- firsts[!is.na(x)]
y <- rep(0, times = length(x))
y[firsts$idx] <- 1
y <- cumsum(y)
return(y)
}
Example Use
cum_n_distinct(c(5,10,10,15,5)) # 1 2 2 3 3
cum_n_distinct(c(5,NA,10,15,5)) # 1 2 3 4 4
cum_n_distinct(c(5,NA,10,15,5), na.include = FALSE) # 1 1 2 3 3
Solution To Your Question
d_out = df %>%
arrange(time) %>%
group_by(grp) %>%
mutate(var2 = cum_n_distinct(var1))

Resources