This is an example we can work with:
df <- tibble(y = c("a", "a", "a", "a", "a", "a"), z = c("b", "b", "b", "b", "b", "b"), a = c("aaa", "aaa", "aaa", "bbb", "bbb", "bbb"),
b = c(1,2,3,1,2,3), c = c(5,10,15,100,95,90))
df
# A tibble: 6 x 5
y z a b c
<chr> <chr> <chr> <dbl> <dbl>
1 a b aaa 1 5
2 a b aaa 2 10
3 a b aaa 3 15
4 a b bbb 1 100
5 a b bbb 2 95
6 a b bbb 3 90
I want to group the values in column y, z and a and combine column b and c to a single string. The final result should look exactly like this:
# A tibble: 2 x 4
y z a result
<chr> <chr> <chr> <chr>
1 a b aaa {"1":5,"2":10,"3":15}
2 a b bbb {"1":100,"2":95,"3":90}
Which i can almost achieve with:
b <- by(df[-1:-3], df$a, function(x)
sprintf("{%s}", toString(Reduce(paste0, c(x, "\"", "\":")[c(3, 1, 4, 2)]))))
data.frame(a=unique(df$a), result=do.call(rbind, as.list(b)), row.names=NULL)
a result
1 aaa {"1":5, "2":10, "3":15}
2 bbb {"1":100, "2":95, "3":90}
This only groups by column a, though and not by all three (y, z and a) columns. I got the hint that i can do fix it with the aggregate function but have a hard time appying it.
Using dplyr you can make use sprintf/paste0 :
library(dplyr)
df %>%
group_by(y, z, a) %>%
summarise(result = paste0('{', toString(sprintf('"%d":"%d"', b, c)), '}')) %>%
ungroup %>% data.frame()
# y z a result
#1 a b aaa {"1":"5", "2":"10", "3":"15"}
#2 a b bbb {"1":"100", "2":"95", "3":"90"}
Using by this can be written as :
do.call(rbind, by(df, list(df$y, df$z, df$a), function(x)
cbind(unique(x[1:3]),
result = paste0('{', toString(sprintf('"%d":"%d"', x$b, x$c)), '}'))))
Related
I have a filtering question as below:
df <- data.frame(id = c(1,1,1, 2,2, 3,3, 4,4, 5, 6),
cat =c("A","B","C", "A","C", "B","C", "A","B", "A", "B"),
value = c("Y","N","Y", "Y","N", "N","Y", "N","Y", "N","Y"))
> df
id cat value
1 1 A Y
2 1 B N
3 1 C Y
4 2 A Y
5 2 C N
6 3 B N
7 3 C Y
8 4 A N
9 4 B Y
10 5 A N
11 6 B Y
Filtering rules:
1) When an id has three cat, I need to keep only cat B.
2) When an id has two cat - A and B, I need to keep only cat B.
3) When an id has two cat - A and C, I need to keep only cat C.
4) When an id has two cat - B and C, I need to keep only cat B.
5) When an id has one cat - keep the category.
In sum, desired cat order is first B, then C, and last A if there is only one category.
How can I achieve this desired dataframe:
> df1
id cat value
1 1 B N
2 2 C N
3 3 B N
4 4 B Y
5 5 A N
6 6 B Y
It tried this below but it only considers cat B filtering. I also need to add C filtering when the cats are A and C.
df %>% group_by(id) %>% mutate(value = value[match("B", cat, nomatch = 1)]) %>% ungroup
In base R, using just the priority of categories:
tmp <- lapply(split(df, df$id),
function(x) x[which.min(match(x$cat, c('B', 'C', 'A'))), ])
do.call(rbind, tmp)
dplyr way:
df %>%
group_by(id) %>%
slice(which.min(match(cat, c('B', 'C', 'A'))))
Here is an option using some suitable case_when conditions:
df %>%
group_by(id) %>%
filter(case_when(
length(cat) == 3 & all(cat %in% c("A", "B", "C")) ~ cat == "B",
length(cat) == 2 & all(cat %in% c("A", "B")) ~ cat == "B",
length(cat) == 2 & all(cat %in% c("A", "C")) ~ cat == "C",
length(cat) == 2 & all(cat %in% c("B", "C")) ~ cat == "B",
length(cat) == 1 ~ cat == unique(cat))) %>%
ungroup()
## A tibble: 6 × 3
# id cat value
# <dbl> <chr> <chr>
#1 1 B N
#2 2 C N
#3 3 B N
#4 4 B Y
#5 5 A N
#6 6 B Y
An option with ordered
library(dplyr)
df %>%
arrange(id, ordered(cat, c("B", "C", "A"))) %>%
group_by(id) %>%
slice_head(n = 1) %>%
ungroup
-output
# A tibble: 6 × 3
id cat value
<dbl> <chr> <chr>
1 1 B N
2 2 C N
3 3 B N
4 4 B Y
5 5 A N
6 6 B Y
I have a data frame containing (in random places) a character value (say "foo") that I want to replace with a NA.
What's the best way to do so across the whole data frame?
This:
df[df == "foo"] <- NA
One way to nip this in the bud is to convert that character to NA when you read the data in in the first place.
df <- read.csv("file.csv", na.strings = c("foo", "bar"))
Using dplyr::na_if, you can replace specific values with NA. In this case, that would be "foo".
library(dplyr)
set.seed(1234)
df <- data.frame(
id = 1:6,
x = sample(c("a", "b", "foo"), 6, replace = T),
y = sample(c("c", "d", "foo"), 6, replace = T),
z = sample(c("e", "f", "foo"), 6, replace = T),
stringsAsFactors = F
)
df
#> id x y z
#> 1 1 a c e
#> 2 2 b c foo
#> 3 3 b d e
#> 4 4 b d foo
#> 5 5 foo foo e
#> 6 6 b d e
na_if(df$x, "foo")
#> [1] "a" "b" "b" "b" NA "b"
If you need to do this for multiple columns, you can pass "foo" through from mutate with across (updated for dplyr v1.0.0+).
df %>%
mutate(across(c(x, y, z), na_if, "foo"))
#> id x y z
#> 1 1 a c e
#> 2 2 b c <NA>
#> 3 3 b d e
#> 4 4 b d <NA>
#> 5 5 <NA> <NA> e
#> 6 6 b d e
Another option is is.na<-:
is.na(df) <- df == "foo"
Note that its use may seem a bit counter-intuitive, but it actually assigns NA values to df at the index on the right hand side.
This could be done with dplyr::mutate_all() and replace:
library(dplyr)
df <- data_frame(a = c('foo', 2, 3), b = c(1, 'foo', 3), c = c(1,2,'foobar'), d = c(1, 2, 3))
> df
# A tibble: 3 x 4
a b c d
<chr> <chr> <chr> <dbl>
1 foo 1 1 1
2 2 foo 2 2
3 3 3 foobar 3
df <- mutate_all(df, funs(replace(., .=='foo', NA)))
> df
# A tibble: 3 x 4
a b c d
<chr> <chr> <chr> <dbl>
1 <NA> 1 1 1
2 2 <NA> 2 2
3 3 3 foobar 3
Another dplyr option is:
df <- na_if(df, 'foo')
Assuming you do not know the column names or have large number of columns to select, is.character() might be of use.
df <- data.frame(
id = 1:6,
x = sample(c("a", "b", "foo"), 6, replace = T),
y = sample(c("c", "d", "foo"), 6, replace = T),
z = sample(c("e", "f", "foo"), 6, replace = T),
stringsAsFactors = F
)
df
# id x y z
# 1 1 b d e
# 2 2 a foo foo
# 3 3 a d foo
# 4 4 b foo foo
# 5 5 foo foo e
# 6 6 foo foo f
df %>%
mutate_if(is.character, list(~na_if(., "foo")))
# id x y z
# 1 1 b d e
# 2 2 a <NA> <NA>
# 3 3 a d <NA>
# 4 4 b <NA> <NA>
# 5 5 <NA> <NA> e
# 6 6 <NA> <NA> f
One alternate way to solve is below:
for (i in 1:ncol(DF)){
DF[which(DF[,i]==""),columnIndex]<-"ALL"
FinalData[which(is.na(FinalData[,columnIndex])),columnIndex]<-"ALL"
}
In Rstudio, I have a dataframe which contains 4 columns and I need to get the list of every different triplet of the 3 first columns sorted decreasingly by the sum on the 4th column. For example, with:
A B C 2
D E F 5
A B C 4
G H I 5
D E F 3
I need as a result:
D E F 8
A B C 6
G H I 5
I've tried the following different approach but I can't manage to have exactly the result I need:
df_list<-df_raw_data %>%
group_by(param1, param2, param3) %>%
summarise_all(total = sum(param4))
arrange(df_list, desc(total))
and:
df_list<-unique(df_raw_data[, c('param1', 'param2', 'param3')])
cbind(df_list, total)
for(i in 1:nrow(df_raw_data))
{
filter ???????????
}
I would prefer to use the dplyr package since it's a more elegant solution.
EDIT: Okay, thanks for your working answers. I think that I've lost some time figuring out that the plyr package shouldn't be loaded after dplyr...
We can use group_by_at to select the columns to group.
library(dplyr)
dat2 <- dat %>%
group_by_at(vars(-V4)) %>%
summarise(V4 = sum(V4)) %>%
ungroup()
dat2
# # A tibble: 3 x 4
# V1 V2 V3 V4
# <chr> <chr> <chr> <int>
# 1 A B C 6
# 2 D E F 8
# 3 G H I 5
Or use group_by_if to select columns to group based on column types.
dat2 <- dat %>%
group_by_if(is.character) %>%
summarise(V4 = sum(V4)) %>%
ungroup()
dat2
# # A tibble: 3 x 4
# V1 V2 V3 V4
# <chr> <chr> <chr> <int>
# 1 A B C 6
# 2 D E F 8
# 3 G H I 5
DATA
dat <- read.table(text = "A B C 2
D E F 5
A B C 4
G H I 5
D E F 3",
header = FALSE, stringsAsFactors = FALSE)
Would this be what you are looking for?
df <- data_frame(var1 = c("A", "D", "A", "G", "D"),
var2 = c("B", "E", "B", "H", "E"),
var3 = c("C", "F", "C", "I", "F"),
var4 = c(2, 5, 4, 5, 3))
df %>% group_by(var1, var2, var3) %>%
summarise(sum = sum(var4)) %>%
arrange(desc(sum))
I am trying to remove rows that have offsetting values.
library(dplyr)
a <- c(1, 1, 1, 1, 2, 2, 2, 2,2,2)
b <- c("a", "b", "b", "b", "c", "c","c", "d", "d", "d")
d <- c(10, 10, -10, 10, 20, -20, 20, 30, -30, 30)
o <- c("A", "B", "C", "D", "E", "F", "G", "H", "I", "J")
df <- tibble(ID = a, SEQ = b, VALUE = d, OTHER = o)
Generates this ordered table that is grouped by ID and SEQ.
> df
# A tibble: 10 x 4
ID SEQ VALUE OTHER
<dbl> <chr> <dbl> <chr>
1 1 a 10 A
2 1 b 10 B
3 1 b -10 C
4 1 b 10 D
5 2 c 20 E
6 2 c -20 F
7 2 c 20 G
8 2 d 30 H
9 2 d -30 I
10 2 d 30 J
I want to drop the row pairs (2,3), (5,6), (8,9) because VALUE negates the VALUE in the matching previous row.
I want the resulting table to be
> df2
# A tibble: 4 x 4
ID SEQ VALUE OTHER
<dbl> <chr> <dbl> <chr>
1 1 a 10 A
2 1 b 10 D
3 2 c 20 G
4 2 d 30 J
I know that I can't use group_by %>% summarize, because I need to keep the value that is in OTHER. I've looked at the dplyr::lag() function but I don't see how that can help. I believe that I could loop through the table with some type of for each loop and generate a logical vector that can be used to drop the rows, but I was hoping for a more elegant solution.
What about:
vec <- cbind(
c(head(df$VALUE,-1) + df$VALUE[-1], 9999) ,
df$VALUE + c(9999, head(df$VALUE,-1))
)
vec <- apply(vec,1,prod)
vec <- vec!=0
df[vec,]
# A tibble: 4 x 4
ID SEQ VALUE OTHER
<dbl> <chr> <dbl> <chr>
1 1 a 10 A
2 1 b 50 D
3 2 c 60 G
4 2 d 70 J
The idea is to take your VALUE field and subtract it with a slightly subset version of it. When the result is 0, than you remove the line.
Here's another solution with dplyr. Not sure about the edge case you mentioned in the comments, but feel free to test it with my solution:
library(dplyr)
df %>%
group_by(ID, SEQ) %>%
mutate(diff = VALUE + lag(VALUE),
diff2 = VALUE + lead(VALUE)) %>%
mutate_at(vars(diff:diff2), funs(coalesce(., 1))) %>%
filter((diff != 0 & diff2 != 0)) %>%
select(-diff, -diff2)
Result:
# A tibble: 4 x 4
# Groups: ID, SEQ [4]
ID SEQ VALUE OTHER
<dbl> <chr> <dbl> <chr>
1 1 a 10 A
2 1 b 50 D
3 2 c 60 G
4 2 d 70 J
Note:
This solution first creates two diff columns, one adding the lag, another adding the lead of VALUE to each VALUE. Only the offset columns will either have a zero in diff or in diff2, so I filtered out those rows, resulting in the desired output.
I have a data frame containing (in random places) a character value (say "foo") that I want to replace with a NA.
What's the best way to do so across the whole data frame?
This:
df[df == "foo"] <- NA
One way to nip this in the bud is to convert that character to NA when you read the data in in the first place.
df <- read.csv("file.csv", na.strings = c("foo", "bar"))
Using dplyr::na_if, you can replace specific values with NA. In this case, that would be "foo".
library(dplyr)
set.seed(1234)
df <- data.frame(
id = 1:6,
x = sample(c("a", "b", "foo"), 6, replace = T),
y = sample(c("c", "d", "foo"), 6, replace = T),
z = sample(c("e", "f", "foo"), 6, replace = T),
stringsAsFactors = F
)
df
#> id x y z
#> 1 1 a c e
#> 2 2 b c foo
#> 3 3 b d e
#> 4 4 b d foo
#> 5 5 foo foo e
#> 6 6 b d e
na_if(df$x, "foo")
#> [1] "a" "b" "b" "b" NA "b"
If you need to do this for multiple columns, you can pass "foo" through from mutate with across (updated for dplyr v1.0.0+).
df %>%
mutate(across(c(x, y, z), na_if, "foo"))
#> id x y z
#> 1 1 a c e
#> 2 2 b c <NA>
#> 3 3 b d e
#> 4 4 b d <NA>
#> 5 5 <NA> <NA> e
#> 6 6 b d e
Another option is is.na<-:
is.na(df) <- df == "foo"
Note that its use may seem a bit counter-intuitive, but it actually assigns NA values to df at the index on the right hand side.
This could be done with dplyr::mutate_all() and replace:
library(dplyr)
df <- data_frame(a = c('foo', 2, 3), b = c(1, 'foo', 3), c = c(1,2,'foobar'), d = c(1, 2, 3))
> df
# A tibble: 3 x 4
a b c d
<chr> <chr> <chr> <dbl>
1 foo 1 1 1
2 2 foo 2 2
3 3 3 foobar 3
df <- mutate_all(df, funs(replace(., .=='foo', NA)))
> df
# A tibble: 3 x 4
a b c d
<chr> <chr> <chr> <dbl>
1 <NA> 1 1 1
2 2 <NA> 2 2
3 3 3 foobar 3
Another dplyr option is:
df <- na_if(df, 'foo')
Assuming you do not know the column names or have large number of columns to select, is.character() might be of use.
df <- data.frame(
id = 1:6,
x = sample(c("a", "b", "foo"), 6, replace = T),
y = sample(c("c", "d", "foo"), 6, replace = T),
z = sample(c("e", "f", "foo"), 6, replace = T),
stringsAsFactors = F
)
df
# id x y z
# 1 1 b d e
# 2 2 a foo foo
# 3 3 a d foo
# 4 4 b foo foo
# 5 5 foo foo e
# 6 6 foo foo f
df %>%
mutate_if(is.character, list(~na_if(., "foo")))
# id x y z
# 1 1 b d e
# 2 2 a <NA> <NA>
# 3 3 a d <NA>
# 4 4 b <NA> <NA>
# 5 5 <NA> <NA> e
# 6 6 <NA> <NA> f
One alternate way to solve is below:
for (i in 1:ncol(DF)){
DF[which(DF[,i]==""),columnIndex]<-"ALL"
FinalData[which(is.na(FinalData[,columnIndex])),columnIndex]<-"ALL"
}