I have tried to make a piece of code that calculate the mean and standard error of my data and puts it into a new tibble.
It feels very clumpsy, however. Does anyone know of packages or other tricks that could make my code more elegant?
I need to calculate mean and se for a number of sub groups (days_incubated).
library(dplyr)
library(tibble)
library(tidyr)
library(data.table)
library(plotrix)
df2 <- df1%>%
group_by(days_incubated)%>%
summarise_each(funs(mean, se= std.error))%>% # Calculating mean and standard error
mutate_if(is.numeric, round, digits = 2) # Round off the data
df2_trans <- transpose(df2) # Transposing data table
colnames(df2_trans) <- rownames(df2) # Get row and colnames in order
rownames(df2_trans) <- colnames(df2) # Get row and colnames in order
df2_trans <- rownames_to_column(df2_trans, "mass") # Making row names into a column
df3_trans <- df2_trans%>% # Converting one column into two
separate(mass, c("mass","type"), sep = "([_])")
mean_target <- c("mean", "incubated")
mean <- df3_trans%>% # Mean table
filter(type %in% mean_target)%>%
rename("mean day 0"="1")%>%
rename("mean day 4"="2")%>%
rename("mean day 10"="3")%>%
rename("mean day 17"="4")%>%
rename("mean day 24"="5")%>%
rename("mean day 66"="6")%>%
rename("mean day 81"="7")%>%
rename("mean day 94"="8")%>%
rename("mean day 116"="9")%>%
select("mass", "mean day 0", "mean day 4", "mean day 10", "mean day 17", "mean day 24", "mean day 66", "mean day 81", "mean day 94", "mean day 116")%>%
slice(-c(1))
se_target <- c("se", "incubated")
se <- df3_trans%>% # SE table
filter(type %in% se_target)%>%
rename("se day 0"="1")%>%
rename("se day 4"="2")%>%
rename("se day 10"="3")%>%
rename("se day 17"="4")%>%
rename("se day 24"="5")%>%
rename("se day 66"="6")%>%
rename("se day 81"="7")%>%
rename("se day 94"="8")%>%
rename("se day 116"="9")%>%
select("mass", "se day 0", "se day 4", "se day 10", "se day 17", "se day 24", "se day 66", "se day 81", "se day 94", "se day 116")%>%
slice(-c(1))
# join mean and se tables
mean_se <- mean %>% #merging mean and se dataset
full_join(se, by=("mass"))%>%
select("mass","mean day 0","se day 0", "mean day 4", "se day 4", "mean day 10", "se day 10", "mean day 17", "se day 17", "mean day 24", "se day 24", "mean day 66", "se day 66", "mean day 81", "se day 81", "mean day 94", "se day 94", "mean day 116", "se day 116") # Putting columns in correct order
And here's the data:
df1 <- structure(list(days_incubated = c("0", "0", "0", "0", "0", "4",
"4", "4", "4", "4", "10", "10", "10", "10", "10", "17", "17",
"17", "17", "17", "24", "24", "24", "24", "24", "66", "66", "66",
"66", "66", "81", "81", "81", "81", "81", "94", "94", "94", "94",
"94", "116", "116", "116", "116", "116"), i.x33.031 = c(7.45,
0, 78.2, 16.49, 18.77, 104.5, 28.95, 26.05, 4.11, 62.09, 1.95,
6.75, 1.41, 3.34, 3.02, 0, 100.28, 0.2, 32.66, 0, 0, 370.57,
7.24, 133.63, 55.26, 0.16, 5.5, 25.17, 16.59, 3.3, 23.95, 30.61,
4.04, 0, 6.58, 0.08, 0.01, 0, 0.38, 0, 0, 0, 0, 0.18, 0), i.x35.034 = c(0,
0, 0.15, 0.02, 0.01, 0.04, 0.04, 0.05, 0.02, 0.09, 0.02, 0, 0.04,
0.01, 0, 0, 0.22, 0, 0.08, 0, 0, 0.66, 0.01, 0.2, 0.12, 0.01,
0.01, 0.04, 0.01, 0.01, 0.01, 0.04, 0, 0, 0, 0, 0, 0, 0.01, 0,
0, 0.02, 0, 0, 0.02), i.x36.017 = c(0.47, 0.09, 0.28, 0.02, 0.03,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.05,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.06, 0.32, 0, 0, 0, 0, 0.12,
0, 0.02), i.x39.959 = c(0.02, 0, 0.08, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.04, 0, 0, 0, 0, 0, 0.01, 0, 0,
0, 0, 0, 0.01, 0.02, 0.06, 0.03, 0.03, 0, 0, 0.02, 0.01, 0, 0,
0), i.x40.023 = c(0.35, 0.02, 0.48, 0.06, 0, 1.25, 0.09, 0.1,
0.03, 0, 0.09, 0.07, 0.55, 0.09, 0.07, 0, 0.63, 0, 0.09, 0.07,
0.02, 1.11, 0.04, 0.59, 0.13, 0, 0.01, 0.02, 0, 0, 0, 0, 0.01,
0.02, 0.06, 0.01, 0.01, 0.01, 0.01, 0.04, 0, 0.08, 0, 0, 0.01
)), row.names = c(NA, -45L), class = "data.frame")
Does this give you what you are looking for? It seems to reproduce the output. The note there is that I think your result actually has the columns labeled wrong. One of your steps did a character sort on the columns, so they are not in the order you expected when your renamed later.
What we do is first use pivot_longer() so that we have tidy data. From there, we can group and summarize to calculate the mean and standard errors. Then we pivot_wider() to move it back to the wide format of your result.
library(dplyr)
library(tidyr)
library(plotrix)
col_order <- paste0(rep(paste0(c("mean", "se"), "_day_"), length(unique(df1$days_incubated))),
rep(unique(df1$days_incubated), each = 2))
df1 %>%
pivot_longer(-days_incubated, names_to = "mass") %>%
group_by(days_incubated, mass) %>%
summarize(mean = mean(value),
se = std.error(value), .groups = "drop") %>%
pivot_wider(names_from = days_incubated, values_from = c("mean", "se"),
names_glue = "{.value}_day_{days_incubated}") %>%
relocate(mass, all_of(col_order))
# A tibble: 5 x 19
mass mean_day_0 se_day_0 mean_day_4 se_day_4 mean_day_10 se_day_10 mean_day_17 se_day_17 mean_day_24 se_day_24 mean_day_66 se_day_66 mean_day_81
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 i.x3~ 24.2 13.9 45.1 17.5 3.29 0.932 26.6 19.5 113. 68.6 10.1 4.67 13.0
2 i.x3~ 0.036 0.0287 0.048 0.0116 0.014 0.00748 0.06 0.0429 0.198 0.121 0.016 0.006 0.01
3 i.x3~ 0.178 0.0867 0 0 0 0 0 0 0.01 0.01 0 0 0
4 i.x3~ 0.02 0.0155 0 0 0 0 0 0 0.008 0.008 0.002 0.002 0.006
5 i.x4~ 0.182 0.0978 0.294 0.240 0.174 0.0941 0.158 0.119 0.378 0.210 0.006 0.004 0.018
# ... with 5 more variables: se_day_81 <dbl>, mean_day_94 <dbl>, se_day_94 <dbl>, mean_day_116 <dbl>, se_day_116 <dbl>
Line-by-line
First, I have added in a short step to help column sorting.
col_order is a character vector we create of the correct final order we are looking for. Basically we are just concatenating strings to match the column names. You can skip this and simply type each name individually much like you did in your select() statement, but this saves typing.
Now onto the pipeline.
pivot_longer() will take the data from the columns and stack them. The column names will now be in a variable called mass. By default, the values will be in a new variable called value.
group_by() groups on the specified variables, which are the row and column combinations you want in the final table.
summarize() collapses the multiple rows for each mass and days_incubated combination into one new row, with two columns: the mean and se. The .groups = "drop" argument says to drop the groups (i.e., the tibble is no longer grouped, check the help file for more details).
pivot_wider() takes the long formatted summary tibble and pivots the data out into columns, much how you had your format. It says to take the column names from days_incubated, and to take the values from the mean and the se columns. The names_glue argument specifies how to name the new columns. It uses syntax from the glue packages, there the { brackets basically mean "substitute a value here". So it is the '[name of value column]_day_[days_incubated]' as each name.
relocate() simply reorders the columns. This is relatively new to dplyr, and is the preferred way now to rearrange columns instead of select(). It says to first take mass, and then all_of() the columns in the col_order vector that we created earlier.
I'd like to offer a solution mainly in base R.
library(dplyr)
# Define a function that computes the standard error of a vector.
f.SE <- function(x) sd(x)/sqrt(length(x))
# Define a function that compute the standard error of a column (MARGIN = 2 for column).
f.colSE <- function(mydf) apply(mydf, MARGIN = 2, FUN = f.SE)
# Define a function that combines the means and the standard errors of a column
f.colMeanSE <- function(mycol) cbind(means = colMeans(mycol), se = f.colSE(mycol))
# Apply the function to all numeric columns in your data frame by using the days_incubated subgroup as the indices. The result is a list.
my_mean_se1 <- by(data = df1[,-1], INDICES = df1$days_incubated, FUN= f.colMeanSE); my_mean_se1
#df1$days_incubated: 0
#means se
#i.x33.031 24.182 13.91245823
#i.x35.034 0.036 0.02874022
#i.x36.017 0.178 0.08668333
#i.x39.959 0.020 0.01549193
#i.x40.023 0.182 0.09779571
#------------------------------------------------------------
# df1$days_incubated: 10
#means se
#i.x33.031 3.294 0.932119091
#i.x35.034 0.014 0.007483315
#i.x36.017 0.000 0.000000000
#i.x39.959 0.000 0.000000000
#i.x40.023 0.174 0.094106323
#------------------------------------------------------------
# Bind the columns.
my_mean_se2 <- do.call(cbind, my_mean_se1); my_mean_se2
# means se means se means se means se means se means se means se means
#i.x33.031 24.182 13.91245823 3.294 0.932119091 0.036 0.036000000 26.628 19.46476571 113.340 68.5767486 45.140 17.49416646 10.144 4.665935 13.036
#i.x35.034 0.036 0.02874022 0.014 0.007483315 0.008 0.004898979 0.060 0.04289522 0.198 0.1212601 0.048 0.01157584 0.016 0.006000 0.010
#i.x36.017 0.178 0.08668333 0.000 0.000000000 0.028 0.023323808 0.000 0.00000000 0.010 0.0100000 0.000 0.00000000 0.000 0.000000 0.000
#i.x39.959 0.020 0.01549193 0.000 0.000000000 0.006 0.004000000 0.000 0.00000000 0.008 0.0080000 0.000 0.00000000 0.002 0.002000 0.006
#i.x40.023 0.182 0.09779571 0.174 0.094106323 0.018 0.015620499 0.158 0.11939012 0.378 0.2103188 0.294 0.23972067 0.006 0.004000 0.018
# se means se
#i.x33.031 6.002108463 0.094 0.07304793
#i.x35.034 0.007745967 0.002 0.00200000
#i.x36.017 0.000000000 0.136 0.07194442
#i.x39.959 0.004000000 0.024 0.01122497
#i.x40.023 0.011135529 0.016 0.00600000
# Bind the names of the columns to the names of days_incubated
names_days <- unique(df1$days_incubated)
names_meanse <- rbind(paste0("mean day ", names_days), paste0("se day ", names_days))
colnames(my_mean_se2) <- names_meanse
# Round the numbers to 2 decimal places and convert the numeric table to a data frame.
mean_se <- as.data.frame(round(my_mean_se2, digits = 2)); mean_se
# mean day 0 se day 0 mean day 4 se day 4 mean day 10 se day 10 mean day 17
#i.x33.031 24.18 13.91 3.29 0.93 0.04 0.04 26.63
#i.x35.034 0.04 0.03 0.01 0.01 0.01 0.00 0.06
#i.x36.017 0.18 0.09 0.00 0.00 0.03 0.02 0.00
#i.x39.959 0.02 0.02 0.00 0.00 0.01 0.00 0.00
#i.x40.023 0.18 0.10 0.17 0.09 0.02 0.02 0.16
# se day 17 mean day 24 se day 24 mean day 66 se day 66 mean day 81 se day 81
#i.x33.031 19.46 113.34 68.58 45.14 17.49 10.14 4.67
#i.x35.034 0.04 0.20 0.12 0.05 0.01 0.02 0.01
#i.x36.017 0.00 0.01 0.01 0.00 0.00 0.00 0.00
#i.x39.959 0.00 0.01 0.01 0.00 0.00 0.00 0.00
#i.x40.023 0.12 0.38 0.21 0.29 0.24 0.01 0.00
# mean day 94 se day 94 mean day 116 se day 116
#i.x33.031 13.04 6.00 0.09 0.07
#i.x35.034 0.01 0.01 0.00 0.00
#i.x36.017 0.00 0.00 0.14 0.07
#i.x39.959 0.01 0.00 0.02 0.01
#i.x40.023 0.02 0.01 0.02 0.01
# Convert the data frame to a tibble with rowname set as a new column
as_tibble(mean_se, rownames = "rowname")
# A tibble: 5 x 19
# rowname `mean day 0` `se day 0` `mean day 4` `se day 4` `mean day 10` `se day 10` `mean day 17` `se day 17` `mean day 24` `se day 24`
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#1 i.x33.~ 24.2 13.9 3.29 0.93 0.04 0.04 26.6 19.5 113. 68.6
#2 i.x35.~ 0.04 0.03 0.01 0.01 0.01 0 0.06 0.04 0.2 0.12
#3 i.x36.~ 0.18 0.09 0 0 0.03 0.02 0 0 0.01 0.01
#4 i.x39.~ 0.02 0.02 0 0 0.01 0 0 0 0.01 0.01
#5 i.x40.~ 0.18 0.1 0.17 0.09 0.02 0.02 0.16 0.12 0.38 0.21
# ... with 8 more variables: `mean day 66` <dbl>, `se day 66` <dbl>, `mean day 81` <dbl>, `se day 81` <dbl>, `mean day 94` <dbl>, `se day
# 94` <dbl>, `mean day 116` <dbl>, `se day 116` <dbl>
I think what you started off with was in right direction. You can next get data in long format, change the column names the way you want it and get the data back in wide format.
library(dplyr)
library(tidyr)
df1 %>%
group_by(days_incubated = as.numeric(days_incubated)) %>%
summarise(across(i.x33.031:i.x40.023, list(mean = mean, se = plotrix::std.error))) %>%
pivot_longer(cols = -days_incubated,
names_to = c('mass', 'col'),
names_sep = '_') %>%
#If you need column exactly as shown
mutate(col = paste(col, 'day')) %>%
pivot_wider(names_from = c(col, days_incubated), values_from = value, names_sep = ' ')
# mass `mean day 0` `se day 0` `mean day 4` `se day 4` `mean day 10` `se day 10`
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#1 i.x3… 24.2 13.9 45.1 17.5 3.29 0.932
#2 i.x3… 0.036 0.0287 0.048 0.0116 0.014 0.00748
#3 i.x3… 0.178 0.0867 0 0 0 0
#4 i.x3… 0.02 0.0155 0 0 0 0
#5 i.x4… 0.182 0.0978 0.294 0.240 0.174 0.0941
# … with 12 more variables: `mean day 17` <dbl>, `se day 17` <dbl>, `mean day
# 24` <dbl>, `se day 24` <dbl>, `mean day 66` <dbl>, `se day 66` <dbl>, `mean day
# 81` <dbl>, `se day 81` <dbl>, `mean day 94` <dbl>, `se day 94` <dbl>, `mean day
# 116` <dbl>, `se day 116` <dbl>
Related
Sample data:
X_5 X_1 Y alpha_5 alpha_1 beta_5 beta_1
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 0.21 0.02 0.61 10 5 3 0.01
2 0.01 0.02 0.37 0.4 0.01 0.8 0.5
3 0.02 0.03 0.55 0.01 0.01 0.3 0.99
4 0.04 0.05 0.29 0.01 0.005 0.03 0.55
5 0.11 0.1 -0.08 0.22 0.015 0.01 0.01
6 0.22 0.21 -0.08 0.02 0.03 0.01 0.01
I have a dataset which has columns of some variable of interest, say alpha, beta, and so on. I also have this saved as a character vector. I want to be able to mutate new columns based on these variable names, suffixed with an identifier, using the existing columns in the dataset as part of some transformation, like this:
df %>% mutate(
alpha_new = ((alpha_5-alpha_1) / (X_5-X_1) * Y),
beta_new = ((beta_5-beta_1) / (X_5-X_1) * Y)
)
X_5 X_1 Y alpha_5 alpha_1 beta_5 beta_1 alpha_new beta_new
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 0.21 0.02 0.61 10 5 3 0.01 16.1 9.60
2 0.01 0.02 0.37 0.4 0.01 0.8 0.5 -14.4 -11.1
3 0.02 0.03 0.55 0.01 0.01 0.3 0.99 0 38.0
4 0.04 0.05 0.29 0.01 0.005 0.03 0.55 -0.145 15.1
5 0.11 0.1 -0.08 0.22 0.015 0.01 0.01 -1.64 0
6 0.22 0.21 -0.08 0.02 0.03 0.01 0.01 0.0800 0
In my real data I have many more columns like this and I'm struggling to implement this in a "tidy" way which isn't hardcoded, what's the best practice for my situation?
Sample code:
structure(
list(
X_5 = c(0.21, 0.01, 0.02, 0.04, 0.11, 0.22),
X_1 = c(0.02,
0.02, 0.03, 0.05, 0.10, 0.21),
Y = c(0.61, 0.37, 0.55, 0.29, -0.08, -0.08),
alpha_5 = c(10, 0.4, 0.01, 0.01, 0.22, 0.02),
alpha_1 = c(5, 0.01, 0.01, 0.005, 0.015, 0.03),
beta_5 = c(3, 0.8, 0.3, 0.03, 0.01, 0.01),
beta_1 = c(0.01, 0.5, 0.99, 0.55, 0.01, 0.01)
),
row.names = c(NA, -6L),
class = c("tbl_df", "tbl", "data.frame")
) -> df
variable_of_interest <- c("alpha", "beta")
Here's another way to approach this with dynamic creation of columns. With map_dfc from purrr you can column-bind new results, creating new column names with bang-bang on left hand side of := operator, and using .data to access column values on right hand side.
library(tidyverse)
bind_cols(
df,
map_dfc(
variable_of_interest,
~ transmute(df, !!paste0(.x, '_new') :=
(.data[[paste0(.x, '_5')]] - .data[[paste0(.x, '_1')]]) /
(X_5 - X_1) * Y)
)
)
Output
X_5 X_1 Y alpha_5 alpha_1 beta_5 beta_1 alpha_new beta_new
1 0.21 0.02 0.61 10.00 5.000 3.00 0.01 16.05263 9.599474
2 0.01 0.02 0.37 0.40 0.010 0.80 0.50 -14.43000 -11.100000
3 0.02 0.03 0.55 0.01 0.010 0.30 0.99 0.00000 37.950000
4 0.04 0.05 0.29 0.01 0.005 0.03 0.55 -0.14500 15.080000
5 0.11 0.10 -0.08 0.22 0.015 0.01 0.01 -1.64000 0.000000
6 0.22 0.21 -0.08 0.02 0.030 0.01 0.01 0.08000 0.000000
Better to pivot the data first
library(dplyr)
library(tidyr)
# your data
df <- structure(list(X_5 = c(0.21, 0.01, 0.02, 0.04, 0.11, 0.22), X_1 = c(0.02,
0.02, 0.03, 0.05, 0.1, 0.21), Y = c(0.61, 0.37, 0.55, 0.29, -0.08,
-0.08), alpha_5 = c(10, 0.4, 0.01, 0.01, 0.22, 0.02), alpha_1 = c(5,
0.01, 0.01, 0.005, 0.015, 0.03), beta_5 = c(3, 0.8, 0.3, 0.03,
0.01, 0.01), beta_1 = c(0.01, 0.5, 0.99, 0.55, 0.01, 0.01)), class = "data.frame", row.names = c(NA,
-6L))
df <- df |> mutate(id = 1:n()) |>
pivot_longer(cols = -c(id, Y, X_5, X_1),
names_to = c("name", ".value"), names_sep="_") |>
mutate(new= (`5` - `1`) / (X_5 - X_1) * Y) |>
pivot_wider(id_cols = id, names_from = "name", values_from = c(`5`,`1`, `new`),
names_glue = "{name}_{.value}", values_fn = sum)
df
#> # A tibble: 6 × 7
#> id alpha_5 beta_5 alpha_1 beta_1 alpha_new beta_new
#> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#> 1 1 10 3 5 0.01 16.1 9.60
#> 2 2 0.4 0.8 0.01 0.5 -14.4 -11.1
#> 3 3 0.01 0.3 0.01 0.99 0 38.0
#> 4 4 0.01 0.03 0.005 0.55 -0.145 15.1
#> 5 5 0.22 0.01 0.015 0.01 -1.64 0
#> 6 6 0.02 0.01 0.03 0.01 0.0800 0
Created on 2023-02-16 with reprex v2.0.2
Note: if you want to add X_5 and X_1 in the output use id_cols = c(id, X_5, X_1) instead.
I modified your data to create a bit more complicated situation. My hope is that this is close to your real situation. The condition in this idea is that two columns that you wanna pair up stay next to each other. The first job is to collect column names that begin with small letters. Next job is to create a data frame. Here I keep the column names in odd positions
in target in the first column, and ones in even positions in the second column. I was thinking in the same line of Ben; I used map2_dfc to create an output data frame. In this function, I replaced all small letters with X so that I could specify two column names in the original data (i.e., ones starting with X). Then, I did the calculation as you specified. Finally, I created a column name for outcome in the loop. If you want to add the result to the original data, you can run the final line with cbind.
grep(x = names(df), pattern = "[[:lower:]]+_[0-9]+", value = TRUE) -> target
tibble(first_element = target[c(TRUE, FALSE)],
second_element = target[c(FALSE, TRUE)]) -> mydf
map2_dfc(.x = mydf$first_element,
.y = mydf$second_element,
.f = function(x, y) {
sub(x = x, pattern = "[[:lower:]]+", replacement = "X") -> foo1
sub(x = y, pattern = "[[:lower:]]+", replacement = "X") -> foo2
outcome <- ((df[x] - df[y]) / (df[foo1] - df[foo2]) * df["Y"])
names(outcome) <- paste(x,
sub(x = y, pattern = "[[:lower:]]+", replacement = ""),
sep = "")
return(outcome)
}) -> result
cbind(df, result)
# alpha_5_1 alpha_2_6 beta_5_1 beta_3_4
#1 16.05263 0.10736 9.599474 0.27145
#2 -14.43000 0.10730 -11.100000 0.28564
#3 0.00000 0.28710 37.950000 0.50820
#4 -0.14500 0.21576 15.080000 0.64206
#5 -1.64000 -0.06416 0.000000 -0.61352
#6 0.08000 -0.08480 0.000000 -0.25400
DATA
structure(list(
X_5 = c(0.21, 0.01, 0.02, 0.04, 0.11, 0.22),
X_1 = c(0.02,0.02, 0.03, 0.05, 0.10, 0.21),
X_2 = 1:6,
X_6 = 6:11,
X_3 = 21:26,
X_4 = 31:36,
Y = c(0.61, 0.37, 0.55, 0.29, -0.08, -0.08),
alpha_5 = c(10, 0.4, 0.01, 0.01, 0.22, 0.02),
alpha_1 = c(5, 0.01, 0.01, 0.005, 0.015, 0.03),
alpha_2 = c(0.12, 0.55, 0.39, 0.28, 0.99, 0.7),
alpha_6 = 1:6,
beta_5 = c(3, 0.8, 0.3, 0.03, 0.01, 0.01),
beta_1 = c(0.01, 0.5, 0.99, 0.55, 0.01, 0.01),
beta_3 = c(0.55, 0.28, 0.76, 0.86, 0.31, 0.25),
beta_4 = c(5, 8, 10, 23, 77, 32)),
row.names = c(NA, -6L),
class = c("tbl_df", "tbl", "data.frame")) -> df
I have a dataset like this
Time1 Time2 Time3
A
Median 0.046 0.12 0
Q1, Q3 -0.12, 0.22 -1.67, -4.59 -0.245, 0.289
Range -2.75 -4.65 -2.20 - 1.425 -3.12, -1.928
B
Median 0.016 0.42 0.067
Q1, Q3 -0.21, 0.63 -1.17, -2.98 -0.478, 0.187
Range -2.15 -2.15 -1.12 - 1.125 -1.45, -1.478
What I want is to make this look like this
Time1 Time2 Time3
Median Q1,Q3 Range Median Q1,Q3 Range Median Q1,Q3 Range
A 0.046 -0.12, 0.22 2.75 -4.65 0.12 -1.67, -4.59 -2.20 - 1.425 0 -0.245, 0.289 -3.12, -1.928
B 0.016 -0.21, 0.63 -2.15 -2.15 0.42 -1.17, -2.98 -1.12 - 1.125 0.067 -0.478, 0.187 -1.45, -1.478
I have used spread function before to change long to wide, not sure how to turn this into a nested wide. Any suggestions is much appreciated.
df <- structure(list(Col1 = c("A", "Median", "Q1, Q3", "Range", "B",
"Median", "Q1, Q3", "Range"), Time1 = c("", "0.046", "-0.12, 0.22",
"-2.75 -4.65", "", "0.016", "-0.21, 0.63", "-2.15 -2.15"), Time2 = c("",
"0.12", "-1.67, -4.59", "-2.20 - 1.425", "", "0.42", "-1.17, -2.98",
"-1.12 - 1.125"), Time3 = c("", "0 ", "-0.245, 0.289 ",
"-3.12, -1.928", "", "0.067 ", "-0.478, 0.187 ", "-1.45, -1.478"
)), class = "data.frame", row.names = c(NA, -8L))
Here is a potential solution, see comments for the step by step.
library(tidyr)
#find rows containing the ids
namerows <- which(df$Time1=="")
#create and fill in the id column
df$id <- ifelse(df$Time1=="", df$Col1, NA)
df <- fill(df, id, .direction="down")
#clean up the dataframe
df <- df[-namerows, ]
#pivot
pivot_wider(df, id_cols = "id", names_from = "Col1", values_from = starts_with("Time"))
The result:
# A tibble: 2 × 10
id Time1_Median `Time1_Q1, Q3` Time1_Range Time2_Median `Time2_Q1, Q3` Time2_Range Time3_Median `Time3_Q1, Q3` Time3_Range
<chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr> <chr>
1 A 0.046 -0.12, 0.22 -2.75 -4.65 0.12 -1.67, -4.59 -2.20 - 1.425 "0 " "-0.245, 0.289 " -3.12, -1.928
2 B 0.016 -0.21, 0.63 -2.15 -2.15 0.42 -1.17, -2.98 -1.12 - 1.125 "0.067 " "-0.478, 0.187 " -1.45, -1.478
My data set is about forest fires and NDVI values (a value ranging from 0 to 1, indicating how green is the surface). It has an initial column which says when the forest fire of row one took place, and subsequent columns indicating the NDVI value on different dates, before and after the fire happened. NDVI values before the fire are substantially higher compared with values after the fire. Something like:
data1989 <- data.frame("date_fire" = c("1987-01-01", "1987-07-03", "1988-01-01"),
"1986-01-01" = c(0.5, 0.589, 0.66),
"1986-06-03" = c(0.56, 0.447, 0.75),
"1986-10-19" = c(0.8, NA, 0.83),
"1987-01-19" = c(0.75, 0.65,0.75),
"1987-06-19" = c(0.1, 0.55,0.811),
"1987-10-19" = c(0.15, 0.12, 0.780),
"1988-01-19" = c(0.2, 0.22,0.32),
"1988-06-19" = c(0.18, 0.21,0.23),
"1988-10-19" = c(0.21, 0.24, 0.250),
stringsAsFactors = FALSE)
> data1989
date_fire X1986.01.01 X1986.06.03 X1986.10.19 X1987.01.19 X1987.06.19 X1987.10.19 X1988.01.19 X1988.06.19 X1988.10.19
1 1987-01-01 0.500 0.560 0.80 0.75 0.100 0.15 0.20 0.18 0.21
2 1987-07-03 0.589 0.447 NA 0.65 0.550 0.12 0.22 0.21 0.24
3 1988-01-01 0.660 0.750 0.83 0.75 0.811 0.78 0.32 0.23 0.25
I would like to compute the average of NDVI values, in a new column, PRIOR to the forest fire. In case one, it would be the average of columns 2, 3, 4 and 5.
What I need to get is:
date_fire X1986.01.01 X1986.06.03 X1986.10.19 X1987.01.19 X1987.06.19 X1987.10.19 X1988.01.19 X1988.06.19 X1988.10.19 meanPreFire
1 1987-01-01 0.500 0.560 0.80 0.75 0.100 0.15 0.20 0.18 0.21 0.653
2 1987-07-03 0.589 0.447 NA 0.65 0.550 0.12 0.22 0.21 0.24 0.559
3 1988-01-01 0.660 0.750 0.83 0.75 0.811 0.78 0.32 0.23 0.25 0.764
Thanks!
EDIT: SOLUTION
How to adapt the code with more than one column to exclude:
data1989 <- data.frame("date_fire" = c("1987-02-01", "1987-07-03", "1988-01-01"),
"type" = c("oak", "pine", "oak"),
"meanRainfall" = c(600, 300, 450),
"1986.01.01" = c(0.5, 0.589, 0.66),
"1986.06.03" = c(0.56, 0.447, 0.75),
"1986.10.19" = c(0.8, NA, 0.83),
"1987.01.19" = c(0.75, 0.65,0.75),
"1987.06.19" = c(0.1, 0.55,0.811),
"1987.10.19" = c(0.15, 0.12, 0.780),
"1988.01.19" = c(0.2, 0.22,0.32),
"1988.06.19" = c(0.18, 0.21,0.23),
"1988.10.19" = c(0.21, 0.24, 0.250),
check.names = FALSE,
stringsAsFactors = FALSE)
Using:
j1 <- findInterval(as.Date(data1989$date_fire), as.Date(names(data1989)[-(1:3)],format="%Y.%m.%d"))
m1 <- cbind(rep(seq_len(nrow(data1989)), j1), sequence(j1))
data1989$meanPreFire <- tapply(data1989[-(1:3)][m1], m1[,1], FUN = mean, na.rm = TRUE)
> data1989
date_fire type meanRainfall 1986.01.01 1986.06.03 1986.10.19 1987.01.19 1987.06.19 1987.10.19 1988.01.19 1988.06.19 1988.10.19 meanPreFire
1 1987-02-01 oak 600 0.500 0.560 0.80 0.75 0.100 0.15 0.20 0.18 0.21 0.6525
2 1987-07-03 pine 300 0.589 0.447 NA 0.65 0.550 0.12 0.22 0.21 0.24 0.5590
3 1988-01-01 oak 450 0.660 0.750 0.83 0.75 0.811 0.78 0.32 0.23 0.25 0.7635
Reshape data to the long form and filter dates prior to the forest fire.
library(tidyverse)
data1989 %>%
pivot_longer(-date_fire, names_to = "date") %>%
mutate(date_fire = as.Date(date_fire),
date = as.Date(date, "X%Y.%m.%d")) %>%
filter(date < date_fire) %>%
group_by(date_fire) %>%
summarise(meanPreFire = mean(value, na.rm = T))
# # A tibble: 3 x 2
# date_fire meanPreFire
# <date> <dbl>
# 1 1987-01-01 0.62
# 2 1987-07-03 0.559
# 3 1988-01-01 0.764
The solution would be much more concise if we would keep the data in long(er) form... but this reproduces the desired output:
library(dplyr)
library(tidyr)
data1989 %>%
pivot_longer(-date_fire, names_to = "date_NDVI", values_to = "value", names_prefix = "^X") %>%
mutate(date_fire = as.Date(date_fire, "%Y-%m-%d"),
date_NDVI = as.Date(date_NDVI, "%Y.%m.%d")) %>%
group_by(date_fire) %>%
mutate(period = ifelse(date_NDVI < date_fire, "before_fire", "after_fire")) %>%
group_by(date_fire, period) %>%
mutate(average_NDVI = mean(value, na.rm = TRUE)) %>%
pivot_wider(names_from = date_NDVI, names_prefix = "X", values_from = value) %>%
pivot_wider(names_from = period, values_from = average_NDVI) %>%
group_by(date_fire) %>%
summarise_all(funs(sum(., na.rm=T)))
Returns:
# A tibble: 3 x 12
date_fire `X1986-01-01` `X1986-06-03` `X1986-10-19` `X1987-01-19` `X1987-06-19` `X1987-10-19` `X1988-01-19` `X1988-06-19` `X1988-10-19` before_fire after_fire
<date> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 1987-01-01 0.5 0.56 0.8 0.75 0.1 0.15 0.2 0.18 0.21 0.62 0.265
2 1987-07-03 0.589 0.447 0 0.65 0.55 0.12 0.22 0.21 0.24 0.559 0.198
3 1988-01-01 0.66 0.75 0.83 0.75 0.811 0.78 0.32 0.23 0.25 0.764 0.267
Edit:
If we stop the expression right after calculating the averages we can use the data in this structure to easily calculate the variance or account for variable number of observations. I think it's ok to keep the date_fireas its own column, but I'd suggest leaving the other dates as a column (because they correspond to observations). Especially if we want to do more analysis with the data using ggplot2 and other tidyverse functions.
We can use base R, by creating a row/column index. The column index can be got from findInterval with the column names and the 'date_fire'
j1 <- findInterval(as.Date(data1989$date_fire), as.Date(names(data1989)[-1]))
l1 <- lapply(j1+1, `:`, ncol(data1989)-1)
m1 <- cbind(rep(seq_len(nrow(data1989)), j1), sequence(j1))
m2 <- cbind(rep(seq_len(nrow(data1989)), lengths(l1)), unlist(l1))
data1989$meanPreFire <- tapply(data1989[-1][m1], m1[,1], FUN = mean, na.rm = TRUE)
data1989$meanPostFire <- tapply(data1989[-1][m2], m2[,1], FUN = mean, na.rm = TRUE)
data1989
# date_fire 1986-01-01 1986-06-03 1986-10-19 1987-01-19 1987-06-19 1987-10-19 1988-01-19 1988-06-19 1988-10-19
#1 1987-01-01 0.500 0.560 0.80 0.75 0.100 0.15 0.20 0.18 0.21
#2 1987-07-03 0.589 0.447 NA 0.65 0.550 0.12 0.22 0.21 0.24
#3 1988-01-01 0.660 0.750 0.83 0.75 0.811 0.78 0.32 0.23 0.25
# meanPreFire meanPostFire
#1 0.6200 0.2650000
#2 0.5590 0.1975000
#3 0.7635 0.2666667
Or using melt/dcast from data.table
library(data.table)
dcast(melt(setDT(data1989), id.var = 'date_fire')[,
.(value = mean(value, na.rm = TRUE)),
.(date_fire, grp = c('postFire', 'preFire')[1 + (as.IDate(variable) < as.IDate(date_fire))]) ], date_fire ~ grp)[data1989, on = .(date_fire)]
# date_fire postFire preFire 1986-01-01 1986-06-03 1986-10-19 1987-01-19 1987-06-19 1987-10-19 1988-01-19 1988-06-19
#1: 1987-01-01 0.2650000 0.6200 0.500 0.560 0.80 0.75 0.100 0.15 0.20 0.18
#2: 1987-07-03 0.1975000 0.5590 0.589 0.447 NA 0.65 0.550 0.12 0.22 0.21
#3: 1988-01-01 0.2666667 0.7635 0.660 0.750 0.83 0.75 0.811 0.78 0.32 0.23
# 1988-10-19
#1: 0.21
#2: 0.24
#3: 0.25
data
data1989 <- data.frame("date_fire" = c("1987-01-01", "1987-07-03", "1988-01-01"),
"1986-01-01" = c(0.5, 0.589, 0.66),
"1986-06-03" = c(0.56, 0.447, 0.75),
"1986-10-19" = c(0.8, NA, 0.83),
"1987-01-19" = c(0.75, 0.65,0.75),
"1987-06-19" = c(0.1, 0.55,0.811),
"1987-10-19" = c(0.15, 0.12, 0.780),
"1988-01-19" = c(0.2, 0.22,0.32),
"1988-06-19" = c(0.18, 0.21,0.23),
"1988-10-19" = c(0.21, 0.24, 0.250), check.names = FALSE,
stringsAsFactors = FALSE)
I have the following data.
name x1 x2 x3 x4
1 V1_3 1 0 999 999
2 V2_3 1.12 0.044 25.4 0
3 V3_3 0.917 0.045 20.4 0
4 V1_15 1 0 999 999
5 V2_15 1.07 0.036 29.8 0
6 V3_15 0.867 0.039 22.5 0
7 V1_25 1 0 999 999
8 V2_25 1.07 0.034 31.1 0
9 V3_25 0.917 0.037 24.6 0
10 V1_35 1 0 999 999
11 V2_35 1.05 0.034 31.2 0
12 V3_35 0.994 0.037 26.6 0
13 V1_47 1 0 999 999
14 V2_47 1.03 0.031 33.6 0
15 V3_47 0.937 0.034 27.4 0
16 V1_57 1 0 999 999
17 V2_57 1.13 0.036 31.9 0
18 V3_57 1.03 0.037 28.1 0
I want to convert this data to the following data. Can someone give me some suggestion, please?
name est_3 est_15 est_25 est_35 est_47 est_57
1 V2 1.12 1.07 1.07 1.05 1.03 1.13
2 V3 0.917 0.867 0.917 0.994 0.937 1.03
Here is one approach for you. Your data is called mydf here. First, you want to choose necessary columns (i.e., name and x1) using select(). Then, you want to subset rows using filter(). You want to grab rows that begin with V2 or V3 in strings. grepl() checks if each string has the pattern. Then, you want to split the column, name and create two columns (i.e., name and est). Finally, you want to convert the data to a long-format data using pivot_wider().
library(dplyr)
library(tidyr)
select(mydf, name:x1) %>%
filter(grepl(x = name, pattern = "^V[2|3]")) %>%
separate(col = name, into = c("name", "est"), sep = "_") %>%
pivot_wider(names_from = "est",values_from = "x1", names_prefix = "est_")
# name est_3 est_15 est_25 est_35 est_47 est_57
# <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
#1 V2 1.12 1.07 1.07 1.05 1.03 1.13
#2 V3 0.917 0.867 0.917 0.994 0.937 1.03
For your reference, when you ask questions, you want to provide a minimal sample data and code. If you can do that, SO users can easily help you out. Please read this question.
DATA
mydf <- structure(list(name = c("V1_3", "V2_3", "V3_3", "V1_15", "V2_15",
"V3_15", "V1_25", "V2_25", "V3_25", "V1_35", "V2_35", "V3_35",
"V1_47", "V2_47", "V3_47", "V1_57", "V2_57", "V3_57"), x1 = c(1,
1.122, 0.917, 1, 1.069, 0.867, 1, 1.066, 0.917, 1, 1.048, 0.994,
1, 1.03, 0.937, 1, 1.133, 1.032), x2 = c(0, 0.044, 0.045, 0,
0.036, 0.039, 0, 0.034, 0.037, 0, 0.034, 0.037, 0, 0.031, 0.034,
0, 0.036, 0.037), x3 = c(999, 25.446, 20.385, 999, 29.751, 22.478,
999, 31.134, 24.565, 999, 31.18, 26.587, 999, 33.637, 27.405,
999, 31.883, 28.081), x4 = c(999, 0, 0, 999, 0, 0, 999, 0, 0,
999, 0, 0, 999, 0, 0, 999, 0, 0)), row.names = c(NA, -18L), class = c("tbl_df",
"tbl", "data.frame"))
When using aggregate with compound function, the resulting data.frame has matrices inside columns.
ta=aggregate(cbind(precision,result,prPo)~rstx+qx+laplace,t0
,function(x) c(x=mean(x),m=min(x),M=max(x)))
ta=head(ta)
dput(ta)
structure(list(rstx = c(3, 3, 2, 3, 2, 3), qx = c(0.2, 0.25,
0.3, 0.3, 0.33, 0.33), laplace = c(0, 0, 0, 0, 0, 0), precision = structure(c(0.174583333333333,
0.186833333333333, 0.3035, 0.19175, 0.30675, 0.193666666666667,
0.106, 0.117, 0.213, 0.101, 0.22, 0.109, 0.212, 0.235, 0.339,
0.232, 0.344, 0.232), .Dim = c(6L, 3L), .Dimnames = list(NULL,
c("x", "m", "M"))), result = structure(c(-142.333333333333,
-108.316666666667, -69.1, -85.7, -59.1666666666667, -68.5666666666667,
-268.8, -198.2, -164, -151.6, -138.2, -144.8, -30.8, -12.2, -14.2,
-3.8, -12.6, -3.4), .Dim = c(6L, 3L), .Dimnames = list(NULL,
c("x", "m", "M"))), prPo = structure(c(3.68416666666667,
3.045, 2.235, 2.53916666666667, 2.0775, 2.23666666666667, 1.6,
1, 1.02, 0.54, 0.87, 0.31, 5.04, 4.02, 2.77, 3.53, 2.63, 3.25
), .Dim = c(6L, 3L), .Dimnames = list(NULL, c("x", "m", "M")))), .Names = c("rstx",
"qx", "laplace", "precision", "result", "prPo"), row.names = c(NA,
6L), class = "data.frame")
Is there a function that transform data.frame matrix-colum into columns?
Manually, for each matrix-column, column bind plus column delete works:
colnames(ta)
[1] "rstx" "qx" "laplace" "precision" "result" "prPo"
ta[,"precision"] # ta[,4]
x m M
[1,] 0.1745833 0.106 0.212
[2,] 0.1868333 0.117 0.235
[3,] 0.3035000 0.213 0.339
[4,] 0.1917500 0.101 0.232
[5,] 0.3067500 0.220 0.344
[6,] 0.1936667 0.109 0.232
#column bind + column delete
ta=cbind(ta,precision=ta[,4])
ta=ta[,-4]
colnames(ta)
[1] "rstx" "qx" "laplace" "result" "prPo" "precision.x" "precision.m"
[8] "precision.M"
ta
rstx qx laplace result.x result.m result.M prPo.x prPo.m prPo.M precision.x precision.m
1 3 0.20 0 -142.33333 -268.80000 -30.80000 3.684167 1.600000 5.040000 0.1745833 0.106
2 3 0.25 0 -108.31667 -198.20000 -12.20000 3.045000 1.000000 4.020000 0.1868333 0.117
3 2 0.30 0 -69.10000 -164.00000 -14.20000 2.235000 1.020000 2.770000 0.3035000 0.213
4 3 0.30 0 -85.70000 -151.60000 -3.80000 2.539167 0.540000 3.530000 0.1917500 0.101
5 2 0.33 0 -59.16667 -138.20000 -12.60000 2.077500 0.870000 2.630000 0.3067500 0.220
6 3 0.33 0 -68.56667 -144.80000 -3.40000 2.236667 0.310000 3.250000 0.1936667 0.109
precision.M
1 0.212
2 0.235
3 0.339
4 0.232
5 0.344
6 0.232
matrix doesn't support matrix-column. So as.matrix() transform data.frame into matrix, breaking up matrix-column.
Here is my idea:
library(tidyverse)
ta2 <- ta %>%
as.matrix() %>%
as.data.frame()
Somewhere in Stackoverflow I found a very simple solution:
cbind(ta[-ncol(ta)],ta[[ncol(ta)]])
rstx qx laplace precision.x precision.m precision.M result.x result.m result.M x m
1 3 0.20 0 0.1745833 0.1060000 0.2120000 -142.33333 -268.80000 -30.80000 3.684167 1.60
2 3 0.25 0 0.1868333 0.1170000 0.2350000 -108.31667 -198.20000 -12.20000 3.045000 1.00
3 2 0.30 0 0.3035000 0.2130000 0.3390000 -69.10000 -164.00000 -14.20000 2.235000 1.02
4 3 0.30 0 0.1917500 0.1010000 0.2320000 -85.70000 -151.60000 -3.80000 2.539167 0.54
5 2 0.33 0 0.3067500 0.2200000 0.3440000 -59.16667 -138.20000 -12.60000 2.077500 0.87
6 3 0.33 0 0.1936667 0.1090000 0.2320000 -68.56667 -144.80000 -3.40000 2.236667 0.31
M
1 5.04
2 4.02
3 2.77
4 3.53
5 2.63
6 3.25
Just that!