Related
I am writing a function that uses a dataframe as filtering criteria for a big dataframe containing model outputs. These are the filtering criteria (as a df):
parameter value
1 alpha 0.1
2 beta 0.1
3 eta 0.1
4 zeta 0.1
5 lambda 0.5
6 phi 5.0
7 kappa 1.0
dput(values)
structure(list(parameter = structure(c(1L, 2L, 3L, 7L, 5L, 6L,
4L), .Label = c("alpha", "beta", "eta", "kappa", "lambda", "phi",
"zeta"), class = "factor"), value = c(0.1, 0.1, 0.1, 0.1, 0.5,
5, 1)), class = "data.frame", row.names = c(NA, -7L))
And this is how the 'outputs' df looks like:
time w x y z alpha beta eta zeta lambda phi kappa
1 0.0 10.00000 10.00000 10.000000 10.000000 0.1 0.1 0.1 0.1 0.95 5 1
1.1 0.1 10.00572 11.04680 9.896057 9.054394 0.1 0.1 0.1 0.1 0.95 5 1
1.2 0.2 10.01983 12.17827 9.592536 8.215338 0.1 0.1 0.1 0.1 0.95 5 1
1.3 0.3 10.04010 13.37290 9.112223 7.483799 0.1 0.1 0.1 0.1 0.95 5 1
1.4 0.4 10.06377 14.60353 8.489174 6.855626 0.1 0.1 0.1 0.1 0.95 5 1
1.5 0.5 10.08778 15.83982 7.764470 6.323152 0.1 0.1 0.1 0.1 0.95 5 1
dput(outputs)
structure(list(time = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 276.5, 276.6,
276.7, 276.8, 276.9, 276.961144437566), w = c(10, 10.0057192322758,
10.0198266325956, 10.040096099625, 10.0637654242843, 10.087779652849,
-1.71585943177118, -2.04004317987084, -2.56315700921588, -3.56775247519687,
-6.37643561014456, -13.828470036737), x = c(10, 11.0467963604334,
12.1782709261765, 13.3728962503142, 14.6035317074526, 15.8398164069251,
27.2774474452024, 26.3099862348669, 24.8705756934881, 22.3379071188018,
15.8960461541267, 3.62452931346518e-144), y = c(10, 9.89605687874935,
9.59253574727296, 9.11222320249057, 8.48917353431654, 7.76447036695841,
-0.604572230605542, -0.878231815857628, -1.46586965791714, -3.20623046085508,
-14.9365932475767, -3.30552834129368e+146), z = c(10, 9.05439359565339,
8.21533762023494, 7.48379901688836, 6.85562632179817, 6.3231517466183,
42.3149654949179, 43.8836626616462, 46.4372543252026, 51.7183454733949,
72.7027555440752, 3.30552834129368e+146), alpha = c(0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), beta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), eta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), zeta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9), lambda = c(0.9,
0.9, 0.5, 0.5, 0.9, 0.9, 0.5, 0.9, 0.5, 0.9, 0.5, 0.5
), phi = c(5, 5, 5, 5, 5, 5, 20, 20, 20, 20, 20, 20), kappa = c(1,
1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10), ode_outputs..iteration.. = c(NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)), row.names = c("1",
"1.1", "1.2", "1.3", "1.4", "1.5", "2916.2765", "2916.2766",
"2916.2767", "2916.2768", "2916.2769", "2916.2770"), class = "data.frame")
So it should be something like:
filtered_outputs <- outputs %>% filter(all rows in column 1 == all values in column 2)
The names under the 'parameter' column correspond to column names in the 'outputs' df. I'd like this to be not hard-coded, so that I can feed in any filtering criteria as a df and the function will filter 'outputs'. I'd like to use dplyr or baseR preferably.
So you want to select all the rows in outputs dataframe which matches the values in values dataframe?
Here is a base R approach using sweep and rowSums.
result <- outputs[rowSums(sweep(outputs[as.character(values$parameter)], 2,
values$value, `!=`)) == 0, ]
result
# time w x y z alpha beta eta zeta lambda phi kappa
#1.2 0.2 10.01983 12.17827 9.592536 8.215338 0.1 0.1 0.1 0.1 0.5 5 1
#1.3 0.3 10.04010 13.37290 9.112223 7.483799 0.1 0.1 0.1 0.1 0.5 5 1
# ode_outputs..iteration..
#1.2 NA
#1.3 NA
A possible dplyr and tidyr solution:
Create a helper data frame by turning the values data frame into wide format, and apply a semi-join to filter by the required conditions.
You could easily wrap this up in one continuous workflow but I think it's easier to understand in separate steps.
library(dplyr)
library(tidyr)
conditions <-
values %>%
pivot_wider(names_from = parameter, values_from = value)
outputs %>%
semi_join(conditions)
#> Joining, by = c("alpha", "beta", "eta", "zeta", "lambda", "phi", "kappa")
#> time w x y z alpha beta eta zeta lambda phi
#> 1.2 0.2 10.01983 12.17827 9.592536 8.215338 0.1 0.1 0.1 0.1 0.5 5
#> 1.3 0.3 10.04010 13.37290 9.112223 7.483799 0.1 0.1 0.1 0.1 0.5 5
#> kappa ode_outputs..iteration..
#> 1.2 1 NA
#> 1.3 1 NA
Created on 2021-07-08 by the reprex package (v2.0.0)
I often find these kind of things are easier when the data is in long-form format - although this is just preference:
outputs %>%
tidyr::pivot_longer(
cols = -c(time, w, x, y, z, ode_outputs..iteration..),
names_to="parameter", values_to="value_truth"
) %>%
dplyr::left_join(filter_df) %>%
dplyr::group_by(time) %>%
dplyr::filter(all(value == value_truth)) %>%
dplyr::select(-value) %>%
tidyr::pivot_wider(
names_from="parameter",
values_from="value_truth"
)
Output:
# A tibble: 2 x 13
# Groups: time [2]
time w x y z ode_outputs..iteration.. alpha beta eta zeta lambda phi kappa
<dbl> <dbl> <dbl> <dbl> <dbl> <lgl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 0.2 10.0 12.2 9.59 8.22 NA 0.1 0.1 0.1 0.1 0.5 5 1
2 0.3 10.0 13.4 9.11 7.48 NA 0.1 0.1 0.1 0.1 0.5 5 1
Data:
outputs = structure(list(time = c(0, 0.1, 0.2, 0.3, 0.4, 0.5, 276.5, 276.6,
276.7, 276.8, 276.9, 276.961144437566), w = c(10, 10.0057192322758,
10.0198266325956, 10.040096099625, 10.0637654242843, 10.087779652849,
-1.71585943177118, -2.04004317987084, -2.56315700921588, -3.56775247519687,
-6.37643561014456, -13.828470036737), x = c(10, 11.0467963604334,
12.1782709261765, 13.3728962503142, 14.6035317074526, 15.8398164069251,
27.2774474452024, 26.3099862348669, 24.8705756934881, 22.3379071188018,
15.8960461541267, 3.62452931346518e-144), y = c(10, 9.89605687874935,
9.59253574727296, 9.11222320249057, 8.48917353431654, 7.76447036695841,
-0.604572230605542, -0.878231815857628, -1.46586965791714, -3.20623046085508,
-14.9365932475767, -3.30552834129368e+146), z = c(10, 9.05439359565339,
8.21533762023494, 7.48379901688836, 6.85562632179817, 6.3231517466183,
42.3149654949179, 43.8836626616462, 46.4372543252026, 51.7183454733949,
72.7027555440752, 3.30552834129368e+146), alpha = c(0.1, 0.1,
0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), beta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), eta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5), zeta = c(0.1,
0.1, 0.1, 0.1, 0.1, 0.1, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9), lambda = c(0.9,
0.9, 0.5, 0.5, 0.9, 0.9, 0.5, 0.9, 0.5, 0.9, 0.5, 0.5
), phi = c(5, 5, 5, 5, 5, 5, 20, 20, 20, 20, 20, 20), kappa = c(1,
1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10), ode_outputs..iteration.. = c(NA,
NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA)), row.names = c("1",
"1.1", "1.2", "1.3", "1.4", "1.5", "2916.2765", "2916.2766",
"2916.2767", "2916.2768", "2916.2769", "2916.2770"), class = "data.frame")
filter_df = fread(' parameter value
1 alpha 0.1
2 beta 0.1
3 eta 0.1
4 zeta 0.1
5 lambda 0.5
6 phi 5.0
7 kappa 1.0') %>% dplyr::select(-V1)
- Example Data to work with:
To create a reduced example, this is the output of dput(df):
df <- structure(list(SubjectID = structure(c(1L, 1L, 1L, 1L, 1L, 1L,
1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L,
3L, 3L), .Label = c("1", "2", "3"), class = "factor"), EventNumber = structure(c(1L,
1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L,
1L, 1L, 1L, 2L, 2L, 2L, 2L), .Label = c("1", "2"), class = "factor"),
EventType = structure(c(1L, 1L, 1L, 1L, 2L, 2L, 2L, 1L, 1L,
1L, 1L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L
), .Label = c("A", "B"), class = "factor"), Param1 = c(0.3,
0.21, 0.87, 0.78, 0.9, 1.2, 1.4, 1.3, 0.6, 0.45, 0.45, 0.04,
0, 0.1, 0.03, 0.01, 0.09, 0.06, 0.08, 0.09, 0.03, 0.04, 0.04,
0.02), Param2 = c(45, 38, 76, 32, 67, 23, 27, 784, 623, 54,
54, 1056, 487, 341, 671, 859, 7769, 2219, 4277, 4060, 411,
440, 224, 57), Param3 = c(1.5, 1.7, 1.65, 1.32, 0.6, 0.3,
2.5, 0.4, 1.4, 0.67, 0.67, 0.32, 0.1, 0.15, 0.22, 0.29, 0.3,
0.2, 0.8, 1, 0.9, 0.8, 0.3, 0.1), Param4 = c(0.14, 0, 1,
0.86, 0, 0.6, 1, 1, 0.18, 0, 0, 0.39, 0, 1, 0.29, 0.07, 0.33,
0.53, 0.29, 0.23, 0.84, 0.61, 0.57, 0.59), Param5 = c(0.18,
0, 1, 0, 1, 0, 0.09, 1, 0.78, 0, 0, 1, 0.2, 0, 0.46, 0.72,
0.16, 0.22, 0.77, 0.52, 0.2, 0.68, 0.58, 0.17), Param6 = c(0,
1, 0.75, 0, 0.14, 0, 1, 0, 1, 0.27, 0, 1, 0, 0.23, 0.55,
0.86, 1, 0.33, 1, 1, 0.88, 0.75, 0, 0), AbsoluteTime = structure(c(1522533600,
1522533602, 1522533604, 1522533604, 1525125600, 1525125602,
1525125604, 1519254000, 1519254002, 1519254004, 1519254006,
1521759600, 1521759602, 1521759604, 1521759606, 1521759608,
1517353224, 1517353226, 1517353228, 1517353230, 1517439600,
1517439602, 1517439604, 1517439606), class = c("POSIXct",
"POSIXt"), tzone = "")), row.names = c(NA, -24L), class = "data.frame")
df
The real data has 20 subject, EventNumbers ranging from 1 to 100, and parameters are from Param1 to Param40 (depending on the experiment).
Row number are around 60 000 observation.
- What I want to achieve:
For df, create n * 40 new columns. # (40 or any number of parameters that will be chosen later.)
Think of n as "steps into the future".
Name the 40 * n newly created columns:
Param1_2, Param2_2, Param3_2, ..., Param39_2, Param40_2, ...,
Param1_3, Param2_3, Param3_3, ..., Param39_3, Param40_3, ...,
...,
Param1_n, Param2_n, Param3_n, ..., Param39_n, Param40_n
Resulting in columns
Param1_1, Param2_1, Param1_2, Param2_2, Param1_3, Param2_3, Param1_4, Param2_4, ... Param1_n, Param2_n
So every observation of subset df[X, c(4:9)] will get an additional set of variables with values from df[X+1, c(4:9)] to df[X+n, c(4:9)].
This is what the new df.extended should look like for n = 1:
df.extended <- structure(list(SubjectID = c(1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2,
2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3), EventNumber = c(1, 1,
1, 1, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1, 1, 2, 2, 2,
2), EventType = c("A", "A", "A", "A", "B", "B", "B", "A", "A",
"A", "A", "B", "B", "B", "B", "B", "A", "A", "A", "A", "B", "B",
"B", "B"), Param1 = c(0.3, 0.21, 0.87, 0.78, 0.9, 1.2, 1.4, 1.3,
0.6, 0.45, 0.45, 0.04, 0, 0.1, 0.03, 0.01, 0.05, 0.07, 0.06,
0.01, 0.01, 0.01, 0.07, 0.04), Param2 = c(45, 38, 76, 32, 67,
23, 27, 784, 623, 54, 54, 1056, 487, 341, 671, 859, 1858, 640,
8181, 220, 99, 86, 170, 495), Param3 = c(1.5, 1.7, 1.65, 1.32,
0.6, 0.3, 2.5, 0.4, 1.4, 0.67, 0.67, 0.32, 0.1, 0.15, 0.22, 0.29,
1.5, 0.9, 0.8, 0.9, 0.1, 0, 0.8, 0.1), Param4 = c(0.14, 0, 1,
0.86, 0, 0.6, 1, 1, 0.18, 0, 0, 0.39, 0, 1, 0.29, 0.07, 0.64,
0.11, 0.12, 0.32, 0.55, 0.67, 0.83, 0.82), Param5 = c(0.18, 0,
1, 0, 1, 0, 0.09, 1, 0.78, 0, 0, 1, 0.2, 0, 0.46, 0.72, 0.27,
0.14, 0.7, 0.67, 0.23, 0.44, 0.61, 0.76), Param6 = c(0, 1, 0.75,
0, 0.14, 0, 1, 0, 1, 0.27, 0, 1, 0, 0.23, 0.55, 0.86, 1, 0.56,
0.45, 0.5, 0, 0, 0.89, 0.11), AbsoluteTime = c("2018-04-01 00:00:00",
"2018-04-01 00:00:02", "2018-04-01 00:00:04", "2018-04-01 00:00:04",
"2018-05-01 00:00:00", "2018-05-01 00:00:02", "2018-05-01 00:00:04",
"2018-02-22 00:00:00", "2018-02-22 00:00:02", "2018-02-22 00:00:04",
"2018-02-22 00:00:06", "2018-03-23 00:00:00", "2018-03-23 00:00:02",
"2018-03-23 00:00:04", "2018-03-23 00:00:06", "2018-03-23 00:00:08",
"2018-01-31 00:00:24", "2018-01-31 00:00:26", "2018-01-31 00:00:28",
"2018-01-31 00:00:30", "2018-02-01 00:00:00", "2018-02-01 00:00:02",
"2018-02-01 00:00:04", "2018-02-01 00:00:06"), Param1_2 = c(0.21,
0.87, 0.78, NA, 1.2, 1.4, NA, 0.6, 0.45, 0.45, NA, 0, 0.1, 0.03,
0.01, NA, 0.07, 0.07, 0.08, NA, 0.09, 0.06, 0.01, NA), Param2_2 = c(38,
76, 32, NA, 23, 27, NA, 623, 54, 54, NA, 487, 341, 671, 859,
NA, 6941, 4467, 808, NA, 143, 301, 219, NA), Param3_2 = c(1.7,
1.65, 1.32, NA, 0.3, 2.5, NA, 1.4, 0.67, 0.67, NA, 0.1, 0.15,
0.22, 0.29, NA, 1, 1, 0.1, NA, 0.5, 1, 0.3, NA), Param4_2 = c(0,
1, 0.86, NA, 0.6, 1, NA, 0.18, 0, 0, NA, 0, 1, 0.29, 0.07, NA,
0.31, 0.16, 0.68, NA, 0.86, 0.47, 0.47, NA), Param5_2 = c(0,
1, 0, NA, 0, 0.09, NA, 0.78, 0, 0, NA, 0.2, 0, 0.46, 0.72, NA,
0.29, 0.26, 0.1, NA, 0.88, 0.86, 0.95, NA), Param6_2 = c(1, 0,
0, NA, 0, 1, NA, 1, 0.27, 0, NA, 0, 0.23, 0.55, 0.86, NA, 0.68,
0.66, 0, NA, 0.44, 1, 0.22, NA)), row.names = c(NA, 24L), class = "data.frame")
df.extended
How can this be solved without using loops, writing column indexes by hand etc.? Write a function for trial 2 and use doBy?
My thoughts and what I have done so far to solve this:
Trial 1:
Cycle through the SubjectIDs in a for-loop
In an inner for-loop, cycle through the EventNumber
In another inner for-loop, cycle through the rows
Get the first row by grabbing df[1, ] and save into df.temp
Merge df.temp with df[2, parameters] #
Merge merge df.temp with df[3, parameters] and so on
Save all resulting df.temps into df.final
Problems I ran into: Step 5:
df.temp <- df[1,]
df.temp <- merge(df.temp, df[2, !(colnames(df) == "AbsoluteTime")], by = c("SubjectID", "EventNumber", "EventType"))
df.temp <- merge(df.temp, df[3, !(colnames(df) == "AbsoluteTime")], by = c("SubjectID", "EventNumber", "EventType"))
df.temp <- merge(df.temp, df[4, !(colnames(df) == "AbsoluteTime")], by = c("SubjectID", "EventNumber", "EventType"))
Warning:
In merge.data.frame(df.temp, df[4, ], by = c("SubjectID", "EventNumber", :
column names ‘Param1.x’, ‘Param2.x’, ‘Param3.x’, ‘Param4.x’, ‘Param5.x’, ‘Param6.x’, ‘AbsoluteTime.x’, ‘Param1.y’, ‘Param2.y’,
‘Param3.y’, ‘Param4.y’, ‘Param5.y’, ‘Param6.y’, ‘AbsoluteTime.y’ are
duplicated in the result.
The column names are repeated, see the warning.
I can not figure out how to easily create the column names / rename the new columns based on a given column name and variable.
There must a better way than this:
n <- 3
names_vector <- c()
for (n in seq(from = c(1), to = n)) {
for (i in names(df[4:9])) {
names_vector <- c(names_vector, paste0(i, "_", c(n+1)))
}
}
names(df.temp)[c(4:9)] <- parameters
names(df.temp)[c(11:ncol(df.temp))] <- names_vector
names(df.temp)
Also, how do I prevent the last n-1 rows from breaking the script? This is a lot of work to do by hand and I think quite error prone!?
Trial 2:
Cycle through the SubjectIDs in a for-loop
In an inner for-loop, cycle through the EventNumber
Get all rows of parameters into a new data frame except the first row
Append a row with NAs
use cbind() to merge the rows
Repeat n times.
This is the code for one SubjectID and one EventNumber:
df.temp <- df[which(df$SubjectID == "1" & df$EventNumber == "1"), ]
df.temp2 <- df.temp[2:nrow(df.temp)-1, parameters]
df.temp2 <- rbind(df.temp2, NA)
df.temp <- cbind(df.temp, df.temp2)
df.temp2 <- df.temp[3:nrow(df.temp)-1, parameters]
df.temp2 <- rbind(df.temp2, NA, NA)
df.temp <- cbind(df.temp, df.temp2)
df.temp2 <- df.temp[4:nrow(df.temp)-1, parameters]
df.temp2 <- rbind(df.temp2, NA, NA, NA)
df.temp <- cbind(df.temp, df.temp2)
n <- 3
names_vector <- c()
for (n in seq(from = c(1), to = n)) {
for (i in names(df[4:9])) {
print(i)
print(n)
names_vector <- c(names_vector, paste0(i, "_", c(n+1)))
}
}
names(df.temp)[c(4:9)] <- parameters
names(df.temp)[c(11:ncol(df.temp))] <- names_vector
df.temp
That solves the problem with missing rows (NAs are acceptable in my case).
Still lots of work by hand / for loops and error prone!?
What about something like this:
You can use the developer version of the package dplyr to add and rename variables according to various subsets of interest in your data. dplyr also provides the functions lead()and lag(), which can be used to find the "next" or "previous" values in a vector (or here row). You can use lead() in combination with the function mutate_at() to extract the values from the succeeding "nth"-row and use them to create new set of variables.
Here I use the data you provided in your example:
# load dplyr package
require(dplyr)
# creacte new data frame "df.extended"
df.extended <- df
# number of observations per group (e.g., SubjectID)
# or desired number of successions
obs = 3
# loop until number of successions achieved
for (i in 1:obs) {
# overwrite df.extended with new information
df.extended <- df.extended %>%
# group by subjects and events
group_by(SubjectID, EventNumber) %>%
# create new variable for each parameter
mutate_at( vars(Param1:Param6),
# using the lead function
.funs = funs(step = lead),
# for the nth followning row
n = i) %>%
# rename the new variables to show the succession number
rename_at(vars(contains("_step")), funs(sub("step", as.character(i), .)))
}
This should roughly recreate the data you posted as desired result.
# Look at first part of "df.extended"
> head(df.extended)
# A tibble: 6 x 28
# Groups: SubjectID, EventNumber [2]
SubjectID EventNumber EventType Param1 Param2 Param3 Param4 Param5 Param6 AbsoluteTime Param1_1 Param2_1 Param3_1 Param4_1 Param5_1 Param6_1
<fct> <fct> <fct> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dttm> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 1 1 A 0.300 45. 1.50 0.140 0.180 0. 2018-04-01 00:00:00 0.210 38. 1.70 0. 0. 1.00
2 1 1 A 0.210 38. 1.70 0. 0. 1.00 2018-04-01 00:00:02 0.870 76. 1.65 1.00 1.00 0.750
3 1 1 A 0.870 76. 1.65 1.00 1.00 0.750 2018-04-01 00:00:04 0.780 32. 1.32 0.860 0. 0.
4 1 1 A 0.780 32. 1.32 0.860 0. 0. 2018-04-01 00:00:04 NA NA NA NA NA NA
5 1 2 B 0.900 67. 0.600 0. 1.00 0.140 2018-05-01 00:00:00 1.20 23. 0.300 0.600 0. 0.
6 1 2 B 1.20 23. 0.300 0.600 0. 0. 2018-05-01 00:00:02 1.40 27. 2.50 1.00 0.0900 1.00
# ... with 12 more variables: Param1_2 <dbl>, Param2_2 <dbl>, Param3_2 <dbl>, Param4_2 <dbl>, Param5_2 <dbl>, Param6_2 <dbl>, Param1_3 <dbl>,
# Param2_3 <dbl>, Param3_3 <dbl>, Param4_3 <dbl>, Param5_3 <dbl>, Param6_3 <dbl>
For base R, consider by to slice by SubjectID, EventNumber, and EventType, and run a merge using a helper group_num. And to run across a series of params, wrap by process in an lapply for list of dataframes that you chain merge on the outside for final merge with original dataframe:
df_list <- lapply(2:3, function(i) {
# BUILD LIST OF DATAFRAMES
by_list <- by(df, df[c("SubjectID", "EventNumber", "EventType")], FUN=function(sub){
sub$grp_num <- 1:nrow(sub)
row_less_sub <- transform(sub, AbsoluteTime=NULL, grp_num=grp_num-(i-1))
merge(sub, row_less_sub, by=c("SubjectID", "EventNumber", "EventType", "grp_num"),
all.x=TRUE, suffixes = c("", paste0("_", i)))
})
# APPEND ALL DATAFRAMES IN LIST
grp_df <- do.call(rbind, by_list)
grp_df <- with(grp_df, grp_df[order(SubjectID, EventNumber),])
# KEEP NEEDED COLUMNS
grp_df <- grp_df[c("SubjectID", "EventNumber", "EventType", "grp_num",
names(grp_df)[grep("Param[0-9]_", names(grp_df))])]
row.names(grp_df) <- NULL
return(grp_df)
})
# ALL PARAMS_* CHAIN MERGE
params_df <- Reduce(function(x,y) merge(x, y, by=c("SubjectID", "EventNumber", "EventType", "grp_num")), df_list)
# ORIGINAL DF AND PARAMS MERGE
df$grp_num <- ave(df$Param1, df$SubjectID, df$EventNumber, df$EventType,
FUN=function(x) cumsum(rep(1, length(x))))
final_df <- transform(merge(df, params_df, by=c("SubjectID", "EventNumber", "EventType", "grp_num")), grp_num=NULL)
Output
head(final_df, 10)
# SubjectID EventNumber EventType Param1 Param2 Param3 Param4 Param5 Param6 AbsoluteTime Param1_2 Param2_2 Param3_2 Param4_2 Param5_2 Param6_2 Param1_3 Param2_3 Param3_3 Param4_3 Param5_3 Param6_3
# 1 1 1 A 0.30 45 1.50 0.14 0.18 0.00 2018-03-31 17:00:00 0.21 38 1.70 0.00 0.00 1.00 0.87 76 1.65 1.00 1.00 0.75
# 2 1 1 A 0.21 38 1.70 0.00 0.00 1.00 2018-03-31 17:00:02 0.87 76 1.65 1.00 1.00 0.75 0.78 32 1.32 0.86 0.00 0.00
# 3 1 1 A 0.87 76 1.65 1.00 1.00 0.75 2018-03-31 17:00:04 0.78 32 1.32 0.86 0.00 0.00 NA NA NA NA NA NA
# 4 1 1 A 0.78 32 1.32 0.86 0.00 0.00 2018-03-31 17:00:04 NA NA NA NA NA NA NA NA NA NA NA NA
# 5 1 2 B 0.90 67 0.60 0.00 1.00 0.14 2018-04-30 17:00:00 1.20 23 0.30 0.60 0.00 0.00 1.40 27 2.50 1.00 0.09 1.00
# 6 1 2 B 1.20 23 0.30 0.60 0.00 0.00 2018-04-30 17:00:02 1.40 27 2.50 1.00 0.09 1.00 NA NA NA NA NA NA
# 7 1 2 B 1.40 27 2.50 1.00 0.09 1.00 2018-04-30 17:00:04 NA NA NA NA NA NA NA NA NA NA NA NA
# 8 2 1 A 1.30 784 0.40 1.00 1.00 0.00 2018-02-21 17:00:00 0.60 623 1.40 0.18 0.78 1.00 0.45 54 0.67 0.00 0.00 0.27
# 9 2 1 A 0.60 623 1.40 0.18 0.78 1.00 2018-02-21 17:00:02 0.45 54 0.67 0.00 0.00 0.27 0.45 54 0.67 0.00 0.00 0.00
# 10 2 1 A 0.45 54 0.67 0.00 0.00 0.27 2018-02-21 17:00:04 0.45 54 0.67 0.00 0.00 0.00 NA NA NA NA NA NA
I have to loop through a list and when I index it, .Last.value stops working.
Example:
A list:
x
[[1]]
cd_entrada qx laplace r
21 1 0.65 50 16.35
141 1 0.65 0 11.24
81 1 0.80 0 2.87
181 0 0.75 25 2.68
[[2]]
cd_entrada qx penalty stepno r
1 0 0.40 10 1500 12.64
2 1 0.45 400 2000 21.58
3 1 0.35 400 750 19.38
4 1 0.45 400 1000 12.78
5 1 0.55 400 1500 4.77
6 1 0.50 400 750 4.79
7 1 0.65 400 750 1.80
8 1 0.75 100 750 1.80
9 2 0.65 100 1500 1.95
10 0 0.40 10 750 9.18
11 0 0.35 200 1000 7.90
12 1 0.45 100 1500 5.65
13 0 0.60 100 750 2.80
Filtering lines of first element
(x[[1]])[.Last.value$cd_entrada==0 & .Last.value$laplace==25,]
cd_entrada qx laplace r
181 0 0.75 25 2.68
It is ok. But When I replace 1 by i, it gives error:
i=1
(x[[i]])[.Last.value$cd_entrada==0 & .Last.value$laplace==25,]
Error in .Last.value$cd_entrada :
$ operator is invalid for atomic vectors
But .Last.value stills returning the same data.frame:
(x[[i]]); .Last.value
cd_entrada qx laplace r
21 1 0.65 50 16.35
141 1 0.65 0 11.24
81 1 0.80 0 2.87
181 0 0.75 25 2.68
cd_entrada qx laplace r
21 1 0.65 50 16.35
141 1 0.65 0 11.24
81 1 0.80 0 2.87
181 0 0.75 25 2.68
P.S.
> dput(x)
list(structure(list(cd_entrada = c(1, 1, 1, 0), qx = c(0.65,
0.65, 0.8, 0.75), laplace = c(50, 0, 0, 25), r = c(16.35, 11.24,
2.87, 2.68)), .Names = c("cd_entrada", "qx", "laplace", "r"), class = "data.frame", row.names = c(21L,
141L, 81L, 181L)), structure(list(cd_entrada = c(0, 1, 1, 1,
1, 1, 1, 1, 2, 0, 0, 1, 0), qx = c(0.4, 0.45, 0.35, 0.45, 0.55,
0.5, 0.65, 0.75, 0.65, 0.4, 0.35, 0.45, 0.6), penalty = c(10,
400, 400, 400, 400, 400, 400, 100, 100, 10, 200, 100, 100), stepno = c(1500,
2000, 750, 1000, 1500, 750, 750, 750, 1500, 750, 1000, 1500,
750), r = c(12.64, 21.58, 19.38, 12.78, 4.77, 4.79, 1.8, 1.8,
1.95, 9.18, 7.9, 5.65, 2.8)), .Names = c("cd_entrada", "qx",
"penalty", "stepno", "r"), class = "data.frame", row.names = c(NA,
-13L)))
.Last.value will change every time you do another assignment such as x=1. At that point, .Last.value will just be a length-1 vector with the value 1. You cannot use .Last.value in a function or in a subsetting operation. Only reliable when you want to retrieve something that you might not have assigned to a name. As an example, you've just entered a complex ggplot2 call and don't want to scrape the result from you console and then edit out the extra line-continuation "+"-signs. You could just execute:
my_last_plot <- .Last.value
I think you might tail() or you might want to be subsetting x[[1]] with a logical expression, but your first result was almost surely a lucky accident based on your having just examined x[[1]] (but then not showing us that code sequence.) See what happens when I first assign your dput output to x
x <-
list(structure(list(cd_entrada = c(1, 1, 1, 0), qx = c(0.65,
0.65, 0.8, 0.75), laplace = c(50, 0, 0, 25), r = c(16.35, 11.24,
2.87, 2.68)), .Names = c("cd_entrada", "qx", "laplace", "r"), class = "data.frame", row.names = c(21L,
141L, 81L, 181L)), structure(list(cd_entrada = c(0, 1, 1, 1,
1, 1, 1, 1, 2, 0, 0, 1, 0), qx = c(0.4, 0.45, 0.35, 0.45, 0.55,
0.5, 0.65, 0.75, 0.65, 0.4, 0.35, 0.45, 0.6), penalty = c(10,
400, 400, 400, 400, 400, 400, 100, 100, 10, 200, 100, 100), stepno = c(1500,
2000, 750, 1000, 1500, 750, 750, 750, 1500, 750, 1000, 1500,
750), r = c(12.64, 21.58, 19.38, 12.78, 4.77, 4.79, 1.8, 1.8,
1.95, 9.18, 7.9, 5.65, 2.8)), .Names = c("cd_entrada", "qx",
"penalty", "stepno", "r"), class = "data.frame", row.names = c(NA,
-13L)))
(x[[1]])[.Last.value$cd_entrada==0 & .Last.value$laplace==25,]
#[1] cd_entrada qx laplace r
#<0 rows> (or 0-length row.names)
Not the result you got, but if we first look at x[[1]]:
x[[1]]
# cd_entrada qx laplace r
#21 1 0.65 50 16.35
#141 1 0.65 0 11.24
#81 1 0.80 0 2.87
#181 0 0.75 25 2.68
(x[[1]])[.Last.value$cd_entrada==0 & .Last.value$laplace==25,]
# cd_entrada qx laplace r
#181 0 0.75 25 2.68
If you wanted to do that operation reliably use this code instead:
i=1
x[[i]][ x[[i]]$cd_entrada==0 & x[[i]]$laplace==25, ]
#-----------
cd_entrada qx laplace r
181 0 0.75 25 2.68
I have a table like this one (table 1):
X a b c d
A 1 0 1 1
B 1 0 0 1
C 0 0 1 1
D 1 1 0 1
E 0 0 1 0
And another one with an identical column "X" like this one (table 2):
X a b b.1 c d d.1
A 0.8 1.5 1.2 3 0.8 0.9
B 0.7 0.1 0.3 0.002 0.7 0.03
C 0.3 0.2 0.4 0.4 0.6 1.3
D 1.2 1.4 0.95 0.5 1.2 0.4
E 1 0.01 1.4 1.9 1.7 0.2
As you can see one column in table1 (e.g. column b) can have one or two corresponding columns in table2 (e.g. columns b and b.1)
I would like to apply the following modifications to table 1:
If the value in table1 is 1 and in the corresponding columns from table2 at least one value is > 0.9, keep the value as a 1
If the value in table1 is 1 but the corresponding value(s) in table2 are not > 0.9, replace with "NA"
If the value in table1 is 0, but at least one corresponding value(s) in table2 is > 0.9, replace with "NA"
If the value in table1 is 0, but the corresponding value(s) in table2 are not > 0.9, replace with 0
Therefore, I would get this table as a result:
X a b c d
A NA NA 1 NA
B NA 0 0 NA
C 0 0 NA 1
D 1 1 0 1
E NA NA 1 NA
Please let me know if I can clarify anything further. Thank you for your help!
Please note that the solution has to be applicable to much larger data frames!
Here is the example data:
> dput(table1)
structure(c(1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1,
1, 1, 0), .Dim = c(5L, 4L), .Dimnames = list(c("A", "B", "C",
"D", "E"), c("a", "b", "c", "d")))
> dput(table2)
structure(c(0.8, 0.7, 0.3, 1.2, 1, 1.5, 0.1, 0.2, 1.4, 0.01,
1.2, 0.3, 0.4, 0.95, 1.4, 3, 0.002, 0.4, 0.5, 1.9, 0.8, 0.7,
0.6, 1.2, 1.7, 0.9, 0.03, 1.3, 0.4, 0.2), .Dim = 5:6, .Dimnames = list(
c("A", "B", "C", "D", "E"), c("a", "b", "b.1", "c", "d",
"d.1")))
This solution requires converting the matrices to data frames and using functions from tidyverse. There is a definitely simpler way to do this. Hopefully, someone can share their answers.
# Create example data
dt1 <- structure(c(1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1,
1, 1, 0), .Dim = c(5L, 4L), .Dimnames = list(c("A", "B", "C",
"D", "E"), c("a", "b", "c", "d")))
dt2 <- structure(c(0.8, 0.7, 0.3, 1.2, 1, 1.5, 0.1, 0.2, 1.4, 0.01,
1.2, 0.3, 0.4, 0.95, 1.4, 3, 0.002, 0.4, 0.5, 1.9, 0.8, 0.7,
0.6, 1.2, 1.7, 0.9, 0.03, 1.3, 0.4, 0.2), .Dim = 5:6, .Dimnames = list(
c("A", "B", "C", "D", "E"), c("a", "b", "b.1", "c", "d",
"d.1")))
# Load package
library(tidyverse)
# Extract row names
Row <- rownames(dt1)
# Convert dt1 and dt2 to data frames
dt1 <- as_data_frame(dt1)
dt2 <- as_data_frame(dt2)
# Add the row names as a new column
dt1 <- dt1 %>% mutate(Row = Row)
dt2 <- dt2 %>% mutate(Row = Row)
# Re-organize the dataset
dt1_r <- dt1 %>% gather(Class, Value, -Row)
dt2_r <- dt2 %>% gather(Class, Value, -Row)
# Keep only the letters, remove numbers and . in Class
dt2_r <- dt2_r %>% mutate(Class = gsub("\\..*", "", Class))
# Check the value in dt2 for each combination of Row and Class
# if any value is larger than 0.9, mark as 1, otherwise 0
dt3 <- dt2_r %>%
group_by(Row, Class) %>%
summarise(Threshold = ifelse(any(Value > 0.9), 1, 0))
# Merge dt3 and dt1_r by Row and Class
dt4 <- dt1_r %>% left_join(dt3, by = c("Row", "Class"))
# Create a new column to document the result based on the condition in Value and Threshold
dt5 <- dt4 %>%
group_by(Row, Class) %>%
mutate(Value2 = ifelse(Value == 1 & Threshold == 1, 1,
ifelse(Value == 1 & Threshold != 1, NA,
ifelse(Value == 0 & Threshold == 1, NA, 0)))) %>%
select(Row, Class, Value2)
# Re-organize dt5
dt5_r <- dt5 %>% spread(Class, Value2)
# Convert dt5_r to a matrix
dt6 <- dt5_r %>%
ungroup() %>%
select(-Row) %>%
as.matrix()
# Rename the matrix, dt6 is the final output
rownames(dt6) <- Row
My apologies if this question has already been answered but I can't find it.
I have a table in R: (see below example copied from txt, the actual table has more data and NA)
I need to compute the mean and sd from column c, e, and f by the group in column b
I can calculate the mean and sd separate by group for all of the separate e.g.
mean(c[b == 1], na.rm=TRUE)
var(e[b == 2], na.rm=TRUE)
I can also calculate the mean and SD for all the columns and generate a table with the results
library(data.table)
new <- data.table(project2016)
wide <- setnames(new[, sapply(.SD, function(x) list(mean = round(mean(x), 3), sd = round(sd(x), 3))), by = b], c("b", sapply(names(new)[-1], paste0, c(".mean", ".SD"))))
wide
But I am not able to do it for only the needed colums and separated by group.
Thx in advance,
Nimby
"id" "a" "b" "c" "d" "e" "f" "g"
1 78 2 83 4 2.53 1.07 3
2 72 2 117 4 2.50 1.16 2
3 72 2 132 4 2.43 1.13 2
4 73 2 102 4 2.48 .81 2
5 73 2 114 4 2.33 1.13 2
6 73 2 88 43 2.13 .84 2
7 65 2 213 4 2.55 1.26 1
8 68 2 153 4 2.45 1.23 1
library(dplyr)
# Some reproducible data
d <- matrix(c(1, 78, 2, 83, 4, 2.53, 1.07, 3, 2, 72, 2, 117, 4, 2.50, 1.16, 2, 3, 72, 2, 132, 4, 2.43, 1.13, 2, 4, 73, 2, 102, 4, 2.48, .81, 2, 5, 73, 2, 114, 4, 2.33, 1.13, 2, 6, 73, 2, 88, 43, 2.13, .84, 2, 7, 65, 2, 213, 4, 2.55, 1.26, 1, 8, 68, 2, 153, 4, 2.45, 1.23, 1),
ncol = 8, byrow = TRUE) %>%
as.data.frame
names(d) <- c("id", "a", "b", "c", "d", "e", "f", "g")
# Your data only included one group in column b
d$b[5:8] <- 1
# Calc mean and sd for the 3 columns, grouped by b
d %>%
group_by(b) %>%
summarise(mean_c = mean(c), sd_c = sd(c),
mean_e = mean(e), sd_e = sd(e),
mean_f = mean(f), sd_f = sd(f))
d
This yields
# A tibble: 2 × 7
b mean_c sd_c mean_e sd_e mean_f sd_f
<dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 1 142.0 54.35071 2.365 0.18064699 1.1150 0.1915724
2 2 108.5 20.95233 2.485 0.04203173 1.0425 0.1594522
There'll also be non dplyr ways to do it.