Function using conditions - r

I am trying to create a function that considers a determined number of samples (rivers) each one with a determined number of observations. Given 10 samples each one with 12 observations in a lognormal distribution with mean=4 and sd=1.4, I would like to obtain the number of times a particular number (6 - it refers to a standard number for water quality measurement) is counted.
The following is the code for one experiment, considering "limit" as the maximum number of observations allowed to ovverpass 6.
set.seed(1001)
nobs<-12
limit<-round(0.10 * nobs, digits = 0)
h2o <- as.data.frame(matrix(rnorm(10*12, mean = 4, sd = 1.4), ncol = 12))
paste(rep("Riv", nrow(h2o)), c(1:nrow(h2o)), sep = "")
rownames(h2o) <- paste(rep("Riv", nrow(h2o)), c(1:nrow(h2o)), sep = "")
colnames(h2o) <- paste(rep("Obs", ncol(h2o)), c(1:ncol(h2o)), sep = "")
#Number of rivers declared impared based in the assumptiom that the number of observations per river are 2 or more?
ifelse(h2o >=6,1,0)
h2o$Test<-rowSums(ifelse(h2o >=6,1,0))
length(h2o$Test[h2o$Test>1])
The function should resume the previous data and works for different observations with different samples.
Thanks

Here's a function using dplyr and tidyr from the tidyverse.
library(tidyverse)
test_h2o <- function(data, threshold_quality = 6, limit = 1) {
table <- data %>%
rownames_to_column("river") %>%
gather(observation, value, -river) %>%
mutate(over_lim = value > threshold_quality)
table_wide <- table %>%
select(river, observation, over_lim) %>%
mutate(over_lim = over_lim %>% as.integer()) %>%
spread(observation, over_lim)
summary <- table %>%
group_by(river) %>%
summarize(over_lim_count = sum(over_lim))
result <- summary %>%
summarize(num_impaired = sum(over_lim_count > limit))
list(table_wide, summary, result)
}
Here's the output, meant to show the steps in your example:
> test_h2o(h2o)
[[1]]
river Obs1 Obs10 Obs11 Obs12 Obs2 Obs3 Obs4 Obs5 Obs6 Obs7 Obs8 Obs9
1 Riv1 0 0 0 0 0 0 0 0 0 0 0 0
2 Riv10 0 0 0 0 0 0 0 0 0 0 0 0
3 Riv2 0 0 1 0 0 0 0 0 0 0 0 0
4 Riv3 1 0 0 0 0 0 0 0 0 0 0 0
5 Riv4 0 0 0 0 0 0 0 0 0 0 0 1
6 Riv5 0 1 0 0 0 0 0 0 0 0 0 0
7 Riv6 0 1 1 0 0 1 0 0 0 0 0 0
8 Riv7 0 0 0 0 0 0 0 0 0 0 0 0
9 Riv8 0 0 0 0 0 0 0 1 0 1 0 0
10 Riv9 1 0 0 0 0 0 0 0 0 0 0 0
[[2]]
# A tibble: 10 x 2
river over_lim_count
<chr> <int>
1 Riv1 0
2 Riv10 0
3 Riv2 1
4 Riv3 1
5 Riv4 1
6 Riv5 1
7 Riv6 3
8 Riv7 0
9 Riv8 2
10 Riv9 1
[[3]]
# A tibble: 1 x 1
num_impaired
<int>
1 2

Related

Calculate amount of times a interaction has been made by the same client id when a conversion is made

I have a dataset like so :
client_id
interaction_1
interaction_2
conversion
A
1
0
0
B
0
1
0
C
0
0
1
A
0
0
1
B
0
1
0
B
0
0
1
C
0
1
0
C
0
0
1
The dataset is already ordered based on a timestamp (ascending). Both the interactions and conversion columns are dummies (0/1). For every conversion, I need to calculate the amount a client_id did a interaction or conversion, but only the interactions since the last conversion (therefore, the column "lag_conversion" can never be >1).
The output should look something like this:
client_id
interaction_1
interaction_2
conversion
lag_interaction_1
lag_interaction_2
lag_conversion
A
1
0
0
0
0
0
B
0
1
0
0
0
0
C
0
0
1
0
0
0
A
0
0
1
1
0
0
B
0
1
0
0
0
0
B
0
0
1
0
2
0
C
0
1
0
0
0
0
C
0
0
1
0
1
1
I've tried the code:
for (i in 1:nrow(mydata)) {
client_id <- mydata$client_id[i]
if (mydata$conversion[i] == 1) {
last_conversion_index <- max(which(mydata$client_id == client_id & mydata$conversion== 1 & 1:nrow(mydata) <= i))
mydata$interaction_1[i:last_conversion_index & mydata$interaction_1== 1] <- 1
}
}
although this only results in a 1 in the row itself, meaning that the first row would look like:
client_id
interaction_1
interaction_2
conversion
lag_interaction_1
lag_interaction_2
lag_conversion
A
1
0
0
1
0
0
Any help is much appreciated! Thanks in advance
With dplyr:
library(dplyr) #1.1.0 and above
dat %>%
mutate(across(everything(), ~ ifelse(conversion == 1, cumsum(lag(.x, default = 0)), 0),
.names = "lag_{col}"), .by = client_id)
client_id interaction_1 interaction_2 conversion lag_interaction_1 lag_interaction_2 lag_conversion
1 A 1 0 0 0 0 0
2 B 0 1 0 0 0 0
3 C 0 0 1 0 0 0
4 A 0 0 1 1 0 0
5 B 0 1 0 0 0 0
6 B 0 0 1 0 2 0
7 C 0 1 0 0 0 0
8 C 0 0 1 0 1 1
below 1.1.0:
dat %>%
group_by(client_id) %>%
mutate(across(everything(), ~ ifelse(conversion == 1, cumsum(lag(.x, default = 0)), 0),
.names = "lag_{col}")) %>%
ungroup()

dummyVars() in r and weird column names in R

for a dataset similar to the one below, I need N level dummy variables. I use dummyVars() from caret package.
As you can see the column names are ignoring "sep="-"" argument and there are some dots in the column names rather than < or > signs.
df <- data.frame(fruit=as.factor(c("apple", "orange","orange", "carrot", "apple")),
st=as.factor(c("CA", "MN","MN", "NY", "NJ")),
wt=as.factor(c("<2","2-4",">4","2-4","<2")),
buy=c(1,1,0,1,0))
fruit st wt buy
1 apple CA <2 1
2 orange MN 2-4 1
3 orange MN >4 0
4 carrot NY 2-4 1
5 apple NJ <2 0
library(caret)
dmy <- dummyVars(buy~ ., data = df, sep="-")
df2 <- data.frame(predict(dmy, newdata = df))
df2
fruit.apple fruit.carrot fruit.orange st.CA st.MN st.NJ st.NY wt..2 wt..4 wt.2.4
1 1 0 0 1 0 0 0 1 0 0
2 0 0 1 0 1 0 0 0 0 1
3 0 0 1 0 1 0 0 0 1 0
4 0 1 0 0 0 0 1 0 0 1
5 1 0 0 0 0 1 0 1 0 0
I am confused why dummyVars() is not converting the actual levels into the parts of the column names and why is it ignoring the separator argument.
I would appreciate any hint on what I am doing wrong!
EDIT: for the future readers :) ! according to AKRUN's note, the argument below for dataframe() solved the problem.
df2 <- data.frame(predict(dmy, newdata = df), check.names = FALSE)
fruit-apple fruit-carrot fruit-orange st-CA st-MN st-NJ st-NY wt-<2 wt->4 wt-2-4
1 1 0 0 1 0 0 0 1 0 0
2 0 0 1 0 1 0 0 0 0 1
3 0 0 1 0 1 0 0 0 1 0
4 0 1 0 0 0 0 1 0 0 1
5 1 0 0 0 0 1 0 1 0 0

Iteration in R using tidyverse

I am trying to avoid using a for loop and instead use tidyverse for iteration. Specifically, I have a vector of values that I want to loop through a single variable from a data frame to create new variables with a prefix. I've tried using dplyr::across but am unsuccessful when the vector length is >1
Sample code:
library(tidyverse)
library(glue)
data <- data.frame(id = 1:10,
y = letters[1:10],
z = LETTERS[1:10])
letter_list <- letters[1:10]
var_naming <- function(dat, list){
dat %>%
mutate(!!glue("hx_{list}") := ifelse(y == {list}, 1, 0))
}
Code I've tried:
**the correct dimensions of the data frame should be 13 variables and 10 observations**
# data_b outputs the correct number of observations but has 40 variables
data_b <- map(letter_list,
~var_naming(data, .x)) %>%
as.data.frame()
# data_c gives me the correct number of variables but has 100 observations
data_c <- map_df(letter_list,
~var_naming(data, .x))
# error message from data_d when using dplyr::across:
>> Error in `mutate()`:
>> ! Problem while computing `..1 =
>> across(...)`.
>> Caused by error in `across()`:
>> ! All unnamed arguments must be length 1
>> Run `rlang::last_error()` to see where the error occurred.
data_d <- data %>%
mutate(
across(
.cols = y,
.fns = ~ifelse(y == {letter_list}, 1, 0),
.names = glue("hx_{letter_list}")
))
Desired output:
id y z hx_a hx_b hx_c hx_d hx_e hx_f hx_g hx_h hx_i hx_j
1 a A 1 0 0 0 0 0 0 0 0 0
2 b B 0 1 0 0 0 0 0 0 0 0
3 c C 0 0 1 0 0 0 0 0 0 0
4 d D 0 0 0 1 0 0 0 0 0 0
5 e E 0 0 0 0 1 0 0 0 0 0
6 f F 0 0 0 0 0 1 0 0 0 0
7 g G 0 0 0 0 0 0 1 0 0 0
8 h H 0 0 0 0 0 0 0 1 0 0
9 i I 0 0 0 0 0 0 0 0 1 0
10 j J 0 0 0 0 0 0 0 0 0 1
You were close with the mutate call, but what you ultimately want is a list of functions (one for each letter in letter_list) to pass to .fns. Since they are anonymous functions, name them the same as letter_list to help with across naming the columns
myFxs <- map(letter_list, ~function(y) ifelse(y == .x, 1, 0)) %>%
setNames(letter_list)
For whatever reason, .names seemed to be having a problem with the glue character vector (for me anyway). Since the functions are named for the letters they are matching against you can use the .fn pronoun to instead to pass a template to across
data %>%
mutate(
across(
.cols = y,
.fns = myFxs,
.names = "hx_{.fn}"
)
)
The code can be modified
Remove the {} around the list on the rhs of :=
It may be better to use transmute instead of mutate as mutate returns the whole data by default.
Once we get the column binded (_dfc) data from map, bind the original data with bind_cols
library(dplyr)
library(purrr)
var_naming <- function(dat, list){
dat %>%
transmute(!!glue::glue('hx_{list}') := ifelse(y == list, 1, 0))
}
NOTE: list is a base R function to construct list data structure. It may be better to create functions with argument names different than the reserved words or function names already existing.
-testing
map_dfc(letter_list, var_naming, dat = data) %>%
bind_cols(data, .)
-output
id y z hx_a hx_b hx_c hx_d hx_e hx_f hx_g hx_h hx_i hx_j
1 1 a A 1 0 0 0 0 0 0 0 0 0
2 2 b B 0 1 0 0 0 0 0 0 0 0
3 3 c C 0 0 1 0 0 0 0 0 0 0
4 4 d D 0 0 0 1 0 0 0 0 0 0
5 5 e E 0 0 0 0 1 0 0 0 0 0
6 6 f F 0 0 0 0 0 1 0 0 0 0
7 7 g G 0 0 0 0 0 0 1 0 0 0
8 8 h H 0 0 0 0 0 0 0 1 0 0
9 9 i I 0 0 0 0 0 0 0 0 1 0
10 10 j J 0 0 0 0 0 0 0 0 0 1
Anotehr way to get the same results:
data %>%
cbind(model.matrix(~y + 0, .)) %>%
rename_with(~str_replace(., 'y\\B', 'hx_'))
id y z hx_a hx_b hx_c hx_d hx_e hx_f hx_g hx_h hx_i hx_j
1 1 a A 1 0 0 0 0 0 0 0 0 0
2 2 b B 0 1 0 0 0 0 0 0 0 0
3 3 c C 0 0 1 0 0 0 0 0 0 0
4 4 d D 0 0 0 1 0 0 0 0 0 0
5 5 e E 0 0 0 0 1 0 0 0 0 0
6 6 f F 0 0 0 0 0 1 0 0 0 0
7 7 g G 0 0 0 0 0 0 1 0 0 0
8 8 h H 0 0 0 0 0 0 0 1 0 0
9 9 i I 0 0 0 0 0 0 0 0 1 0
10 10 j J 0 0 0 0 0 0 0 0 0 1
If you only consider those in letters_list:
data %>%
mutate( y =factor(y, letter_list)) %>%
cbind(model.matrix(~y + 0, .) %>%
as_tibble() %>%
select(paste0('y', letter_list)) %>%
rename_with(~str_replace(., 'y', 'hx_')))

Split column of comma-separated numbers into multiple columns based on value

I have a column f in my dataframe that I would like to spread into multiple columns based on the values in that column. For example:
df <- structure(list(f = c(NA, "18,17,10", "12,8", "17,11,6", "18",
"12", "12", NA, "17,11", "12")), .Names = "f", row.names = c(NA,
10L), class = "data.frame")
df
# f
# 1 <NA>
# 2 18,17,10
# 3 12,8
# 4 17,11,6
# 5 18
# 6 12
# 7 12
# 8 <NA>
# 9 17,11
# 10 12
How would I split column f into multiple columns indicating the numbers in the row. I'm interested in something like this:
6 8 10 11 12 17 18
1 0 0 0 0 0 0 0
2 0 0 1 0 0 1 1
3 0 1 0 0 1 0 0
4 1 0 0 1 0 1 0
5 0 0 0 0 0 0 1
6 0 0 0 0 1 0 0
7 0 0 0 0 1 0 0
8 0 0 0 0 0 0 0
9 0 0 0 1 0 1 0
10 0 0 0 0 1 0 0
I'm thinking I could useunique on the f column to create the seperate columns based on the different numbers and then do a grepl to determine if the specific number is in column f but I was wondering if there was a better way. Something similar to spread or separate in the tidyr package.
A solution using tidyr::separate_rows will be as:
library(tidyverse)
df %>% mutate(ind = row_number()) %>%
separate_rows(f, sep=",") %>%
mutate(f = ifelse(is.na(f),0, f)) %>%
count(ind, f) %>%
spread(f, n, fill = 0) %>%
select(-2) %>% as.data.frame()
# ind 10 11 12 17 18 6 8
# 1 1 0 0 0 0 0 0 0
# 2 2 1 0 0 1 1 0 0
# 3 3 0 0 1 0 0 0 1
# 4 4 0 1 0 1 0 1 0
# 5 5 0 0 0 0 1 0 0
# 6 6 0 0 1 0 0 0 0
# 7 7 0 0 1 0 0 0 0
# 8 8 0 0 0 0 0 0 0
# 9 9 0 1 0 1 0 0 0
# 10 10 0 0 1 0 0 0 0
This could be achieved by splitting on the ,, the stack it to a two column data.frame and get the frequency with table
df1 <- na.omit(stack(setNames(lapply(strsplit(df$f, ","),
as.numeric), seq_len(nrow(df))))[, 2:1])
table(df1)
# values
#ind 6 8 10 11 12 17 18
# 1 0 0 0 0 0 0 0
# 2 0 0 1 0 0 1 1
# 3 0 1 0 0 1 0 0
# 4 1 0 0 1 0 1 0
# 5 0 0 0 0 0 0 1
# 6 0 0 0 0 1 0 0
# 7 0 0 0 0 1 0 0
# 8 0 0 0 0 0 0 0
# 9 0 0 0 1 0 1 0
# 10 0 0 0 0 1 0 0

R: Generating sparse matrix with all elements as rows and columns

I have a data set with user to user. It doesn't have all users as col and row. For example,
U1 U2 T
1 3 1
1 6 1
2 4 1
3 5 1
u1 and u2 represent users of the dataset. When I create a sparse matrix using following code, (df- keep all data of above dataset as a dataframe)
trustmatrix <- xtabs(T~U1+U2,df,sparse = TRUE)
3 4 5 6
1 1 0 0 1
2 0 1 0 0
3 0 0 1 0
Because this matrix doesn't have all the users in row and columns as below.
1 2 3 4 5 6
1 0 0 1 0 0 1
2 0 0 0 1 0 0
3 0 0 0 0 1 0
4 0 0 0 0 0 0
5 0 0 0 0 0 0
6 0 0 0 0 0 0
If I want to get above matrix after sparse matrix, How can I do so in R?
We can convert the columns to factor with levels as 1 through 6 and then use xtabs
df1[1:2] <- lapply(df1[1:2], factor, levels = 1:6)
as.matrix(xtabs(T~U1+U2,df1,sparse = TRUE))
# U2
#U1 1 2 3 4 5 6
# 1 0 0 1 0 0 1
# 2 0 0 0 1 0 0
# 3 0 0 0 0 1 0
# 4 0 0 0 0 0 0
# 5 0 0 0 0 0 0
# 6 0 0 0 0 0 0
Or another option is to get the expanded index filled with 0s and then use sparseMatrix
library(tidyverse)
library(Matrix)
df2 <- crossing(U1 = 1:6, U2 = 1:6) %>%
left_join(df1) %>%
mutate(T = replace(T, is.na(T), 0))
sparseMatrix(i = df2$U1, j = df2$U2, x = df2$T)
Or use spread
spread(df2, U2, T)

Resources