Analysis of multiple response - r

df1 <-
data.frame(c("male", "female", "male"),
c("1", "2", "3", "4", "5", "6"),
seq(141, 170))
names(df1) = c("gender", "age", "height")
df1$age <- factor(
df1$age,
levels = c(1, 2, 3, 4, 5, 6),
labels = c("16-24", "25-34", "35-44", "45-54", "55-64", "65+")
)
q1a = c(1, 0, 1, 0, 0, 1)
q1b = c(0, 0, 2, 2, 2, 0)
q1c = c(0, 0, 3, 3, 0, 3)
# 1,2 and 3 used to be compatible with existing datasets.
# Could change all to 1 if necessary.
df2 <- data.frame(q1a = q1a, q1b = q1b, q1c = q1c)
df1 <- cbind(df1, df2)
rm(q1a, q1b, q1c, df2)
I am looking to replicate the analysis of multiple response questions from SPSS in R.
At the moment I am using this code:
#creating function for analysing questions with grouped data
multfreqtable <- function(a, b, c) {
# number of respondents (for percent of cases)
totrep = sum(a == 1 | b == 2 | c == 3)
#creating frequency table
table_a = data.frame("a", sum(a == 1))
names(table_a) = c("question", "freq")
table_b = data.frame("b", sum(b == 2))
names(table_b) = c("question", "freq")
table_c = data.frame("c", sum(c == 3))
names(table_c) = c("question", "freq")
table_question <- rbind(table_a, table_b, table_c)
#remove individual question tables
rm(table_a, table_b, table_c)
#adding total
total = as.data.frame("Total")
totalsum = (sum(table_question$freq, na.rm = TRUE))
totalrow = cbind(total, totalsum)
names(totalrow) = c("question", "freq")
table_question = rbind(table_question, totalrow)
#adding percentage column to frequency table
percentcalc = as.numeric(table_question$freq)
percent = (percentcalc / totalsum) * 100
table_question <- cbind(table_question, percent)
#adding percent of cases column to frequency table
poccalc = as.numeric(table_question$freq)
percentofcases = (poccalc / totrep) * 100
table_question <- cbind(table_question, percentofcases)
#print percent of cases value
total_respondents <<- data.frame(totrep)
#remove all unnecessary data and values
rm(
total,
totalsum,
totalrow,
b,
c,
percent,
percentcalc,
percentofcases,
totrep,
poccalc
)
return(table_question)
}
#calling function - must tie to data.frame using $ !!!
q1_frequency <- multfreqtable(df1$q1a, df1$q1b, df1$q1c)
#renaming percent of cases - This is very important while using current method
total_respondents_q1 <- total_respondents
rm(total_respondents)
Producing this table as a result:
I am looking for a more efficient method of doing this that ideally would not require the function to be edited if there were more or less multiple choice questions.

Your function is actually far too complicated for what you need to do. I think a function like this should work and be more flexible.
multfreqtable = function(data, question.prefix) {
# Find the columns with the questions
a = grep(question.prefix, names(data))
# Find the total number of responses
b = sum(data[, a] != 0)
# Find the totals for each question
d = colSums(data[, a] != 0)
# Find the number of respondents
e = sum(rowSums(data[,a]) !=0)
# d + b as a vector. This is your overfall frequency
f = as.numeric(c(d, b))
data.frame(question = c(names(d), "Total"),
freq = f,
percent = (f/b)*100,
percentofcases = (f/e)*100 )
}
Add another question to your example dataset:
set.seed(1); df1$q2a = sample(c(0, 1), 30, replace=T)
set.seed(2); df1$q2b = sample(c(0, 2), 30, replace=T)
set.seed(3); df1$q2c = sample(c(0, 3), 30, replace=T)
Make a table for "q1" responses:
> multfreqtable(df1, "q1")
question freq percent percentofcases
1 q1a 15 33.33333 60
2 q1b 15 33.33333 60
3 q1c 15 33.33333 60
4 Total 45 100.00000 180
Make a table for "q2" responses:
> multfreqtable(df1, "q2")
question freq percent percentofcases
1 q2a 14 31.11111 53.84615
2 q2b 13 28.88889 50.00000
3 q2c 18 40.00000 69.23077
4 Total 45 100.00000 173.07692
Tables for multiple questions
Here's a modified version of the function that allows you to create a list of tables for multiple questions at once:
multfreqtable = function(data, question.prefix) {
z = length(question.prefix)
temp = vector("list", z)
for (i in 1:z) {
a = grep(question.prefix[i], names(data))
b = sum(data[, a] != 0)
d = colSums(data[, a] != 0)
e = sum(rowSums(data[,a]) !=0)
f = as.numeric(c(d, b))
temp[[i]] = data.frame(question = c(sub(question.prefix[i],
"", names(d)), "Total"),
freq = f,
percent = (f/b)*100,
percentofcases = (f/e)*100 )
names(temp)[i] = question.prefix[i]
}
temp
}
Examples:
> multfreqtable(df1, "q1")
$q1
question freq percent percentofcases
1 a 15 33.33333 60
2 b 15 33.33333 60
3 c 15 33.33333 60
4 Total 45 100.00000 180
> test1 = multfreqtable(df1, c("q1", "q2"))
> test1
$q1
question freq percent percentofcases
1 a 15 33.33333 60
2 b 15 33.33333 60
3 c 15 33.33333 60
4 Total 45 100.00000 180
$q2
question freq percent percentofcases
1 a 14 31.11111 53.84615
2 b 13 28.88889 50.00000
3 c 18 40.00000 69.23077
4 Total 45 100.00000 173.07692
> test1$q1
question freq percent percentofcases
1 a 15 33.33333 60
2 b 15 33.33333 60
3 c 15 33.33333 60
4 Total 45 100.00000 180

I've noticed that this is post is quite old, however I couldn’t find a more up to date solution. Here's my version based on dplyr/tidyverse approach.
mult_resp = function(df1, mv_q = c("q1a", "q1b", "q1c")){
df2 = df1 %>%
mutate(id = rownames(.)) %>% #row id for counting n_cases
select(id, everything()) %>%
mutate_at(mv_q, ~ ifelse(. != 0, 1, 0)) %>%
gather(question, resp,-id, -gender,-age,-height)
#count number of cases excluding "all zeros" cases
n_cases = df2 %>% group_by(id) %>%
summarise(n = sum(resp)) %>%
summarise(sum(n > 0))
#output table
res = df2 %>%
group_by(question) %>%
summarise(freq = sum(resp)) %>%
mutate(
percent = freq/sum(freq) *100,
percent_of_cases = freq/as.numeric(n_cases)*100
) %>%
rbind(.,
data.frame(question ="Total",
freq =sum(.$freq, na.rm=TRUE),
percent =sum(.$percent, na.rm=TRUE),
percent_of_cases = sum(.$percent_of_cases, na.rm=TRUE)
)
)
res
}
Example:
> mult_resp(df1, mv_q = c("q1a", "q1b", "q1c"))
# A tibble: 4 x 4
question freq percent percent_of_cases
<chr> <dbl> <dbl> <dbl>
1 q1a 15 33.3 60
2 q1b 15 33.3 60
3 q1c 15 33.3 60
4 Total 45 100. 180

It's an old question. However, you can use userfriendlyscience package to analyze multiple responses survey data very easily.
library(userfriendlyscience)
multiResponse (data, c('v1', 'v2', 'v3'))

Related

calculating n quantiles by group in tidyverse

I have a unique problem where I would like to add a column of percentiles for each group in a data frame. Here is how my data look like:
library(tidyverse)
set.seed(123)
df <- tibble(id = 1:100,
group = rep(letters[1:4], 25),
x = c(sample(1:100, 25, replace = T),
sample(101:200, 25, replace = T),
sample(201:300, 25, replace = T),
sample(301:400, 25, replace = T)))
> df
# A tibble: 100 x 3
id group x
<int> <chr> <int>
1 1 a 78
2 2 b 80
3 3 c 7
4 4 d 100
5 5 a 45
6 6 b 76
7 7 c 25
8 8 d 91
9 9 a 13
10 10 b 84
# ... with 90 more rows
# Function to create a table ten percentiles for a numeric vector
percentiles_table <- function(x) {
res <- round(quantile(x, probs = seq(from=.1, to=1, by=0.1)), 0)
res <- data.frame(percentile = names(res), to = res )
res <- res %>%
mutate(from = lag(to, default = 0)) %>%
select(from,to,percentile)
}
# Table of percentiles
percentiles <- df %>%
group_by(group) %>%
summarise(percentiles_table(x)) %>%
ungroup()
> percentiles
# A tibble: 40 x 4
group from to percentile
<chr> <dbl> <dbl> <chr>
1 a 0 25 10%
2 a 25 71 20%
3 a 71 106 30%
4 a 106 125 40%
5 a 125 198 50%
6 a 198 236 60%
7 a 236 278 70%
8 a 278 325 80%
9 a 325 379 90%
10 a 379 389 100%
I would like to add the percentile column to df for each group where the value of x falls between from and to.
There might be some way to calculate the percentile column directly without having it calculated in a separated data.frame and then appending it back to df.
A one-liner with my santoku package:
library(santoku)
df |>
group_by(group) |>
mutate(
percentile = chop_quantiles(x, 0:100/100,
labels = lbl_endpoint())
)
# A tibble: 100 × 4
# Groups: group [4]
id group x percentile
<int> <chr> <int> <fct>
1 1 a 35 8%
2 2 b 97 20%
3 3 c 39 4%
4 4 d 20 8%
5 5 a 89 16%
...
Using data.table:
setDT(df)[
,
percentile := cut(
x,
quantile(x, seq(0, 1, 0.1)),
include.lowest = TRUE,
labels = paste0(seq(10, 100, 10), "%")
),
by = group
]
install.packages("zoo")
library(zoo)
y=as.data.frame(c(0:max(percentiles$to)))
y=merge(y,unique(percentiles[,c(1)]))
y=merge(y,percentiles[,c(1,2,4)], by.x = c("group","c(0:max(percentiles$to))"), by.y = c("group","from"), all.x = TRUE)
y=na.locf(y)
df=merge(df,y, all.x = TRUE, by.x = c("group","x"), by.y = c("group","c(0:max(percentiles$to))"))
I got this working solution.
percentile_ranks <- function(x) {
res <- trunc(rank(x))/length(x) * 100
res <- floor(res/10) }
df <- df %>%
group_by(group) %>%
arrange(x) %>%
mutate(percentile = percentile_ranks(x)) %>%
mutate(percentile_pct = paste0(percentile*10,"%")) %>%
ungroup() %>%
arrange(id) # original data.frame order

How can I delete rows which have two or more words (after each other) in a sequence?

I want to remove the rows which have the same two or more words after each other, like a sequence. This is to do a sequential pattern mining analysis.
I already tried the distinct() and duplicated() function, but this only removes the
whole row.
r_seq_5 <- r_seq_5[!duplicated(r_seq_5),] # remove duplicates
# Su Score result ROI next_roi third_roi four_roi five_roi
# 1 1 90 high Elsewhere Elsewhere Teacher Teacher Teacher
# 2 1 90 high Elsewhere Teacher Teacher Teacher Teacher
# 3 1 90 high Teacher Pen Teacher Elsewhere Smartboard
This is the table. If Teacher is two or three times in the sentence it doesn't matter, as long as it is not after each other.
The desired result is:
# 1 1 90 high Teacher Pen Teacher Elsewhere Smartboard
To do this, I have found it convenient to turn the factors into numbers. And this was my first step, because to compare macth of columns this path seems to be less arduous.
For this I used a for, the qdap package, because in macth I replaced the values with NA.
library(dplyr)
library(qdap)
df <- data.frame(Su = rep(1,3),
Score = rep(90,3),
ROI = c("A", "A", "B"),
NETX_ROI = c("A", "B", "C"),
third_roi = rep("B", 3),
four_roi = c("B", "B", "A"),
five_roi = c("B", "B", "D"))
df
> df
Su Score ROI NETX_ROI third_roi four_roi five_roi
1 1 90 A A B B B
2 1 90 A B B B B
3 1 90 B C B A D
df2 <- df
roi <- c("A", "B", "C", "D")
# A = Elsewhere
# B = Teacher
# C = Pen
# D = Smartboard
n <- seq(1, length.out = length(roi))
for (i in 1:length(n)) {
df2[df2 == roi[i]] <- NA
df2 <- qdap::NAer(df2, i)
}
> df2
Su Score ROI NETX_ROI third_roi four_roi five_roi
1 1 90 1 1 2 2 2
2 1 90 1 2 2 2 2
3 1 90 2 3 2 1 4
df2 <- df2 %>%
dplyr::select(-c(Su, Score)) %>%
as.matrix()
nn <- ncol(df2)
x <- matrix(nrow = nrow(df2), ncol = ncol(df2)-1)
for (i in 1:(nn-1)) {
xx <- ifelse(df2[,i] == df2[,i+1], NA, 0)
x[,i] <- as.matrix(xx)
}
> x
[,1] [,2] [,3] [,4]
[1,] NA 0 NA NA
[2,] 0 NA NA NA
[3,] 0 0 0 0
Finally, I just removed the lines with NA.
dfx <- x %>%
as.data.frame()
df_test <- df %>%
dplyr::bind_cols(dfx) %>%
na.omit() %>%
dplyr::select(1:ncol(df))
df_test
> df_test
Su Score ROI NETX_ROI third_roi four_roi five_roi
3 1 90 B C B A D
You can use gather() in order to regroup your variable, and then build a loop to identify in the value in the same as the precedent one.
Finally, use spread() in order to rebuild your inital structur.
df <- data.frame(
row = 1:4,
Su = 1,
Score = 90,
result = 'high',
ROI = c('A', 'A', 'B', 'A'),
ROI2 = c('A', 'B', 'C', 'B'),
ROI3 = c('B', 'B', 'A', 'C')
) %>%
gather(-(row:result), key = roi, value = value) %>%
arrange(row) %>%
mutate(repeated = 0)
for(i in 2:nrow(df)){
if(df$row[i] == df$row[i-1] & df$value[i] == df$value[i-1])
df$repeated[i] = 1
}
df %>%
group_by(row) %>%
mutate(repeated = sum(repeated)) %>%
filter(repeated == 0) %>%
select(-repeated) %>%
spread(key = roi, value = value)
# row Su Score result ROI ROI2 ROI3
# <int> <dbl> <dbl> <fct> <chr> <chr> <chr>
# 1 3 1 90 high B C A
# 2 4 1 90 high A B C

Group rows in data.frame and find quantile [duplicate]

This question already has answers here:
Aggregate / summarize multiple variables per group (e.g. sum, mean)
(10 answers)
Closed 3 years ago.
I have the following data:
set.seed(789)
df_1 = data.frame(a = 22, b = 24, c = rnorm(10))
df_2 = data.frame(a = 44, b = 24, c = rnorm(10))
df_3 = data.frame(a = 33, b = 99, c = rnorm(10))
df_all = rbind(df_1, df_2, df_3)
I need to group df_all by column a and b, and then find the 50th quantile based on column c.
This can be done singularly, for each df, as follows:
df_1_q = quantile(df_1$c, probs = 0.50)
df_2_q = quantile(df_2$c, probs = 0.50)
df_3_q = quantile(df_3$c, probs = 0.50)
However my real df_all is larger than this.
And more generally, how can I group a data.frame by rows and apply a given function?
thanks
You could use dplyr for that
library(dplyr)
df_all %>%
group_by(a, b) %>%
summarise(quantile = quantile(c, probs = 0.5))
# A tibble: 3 x 3
# Groups: a [?]
a b quantile
<dbl> <dbl> <dbl>
1 22 24 -0.268
2 33 99 -0.234
3 44 24 -0.445
Or using data.table as:
library(data.table)
dt <- data.table(df_all)
dt[,list(quantile=quantile(c, probs = 0.5)),by=c("a", "b")]
a b quantile
1: 22 24 -0.2679104
2: 44 24 -0.4450979
3: 33 99 -0.2336712

Aggregate frequency classification table

I work with R, and I have a table xy like this
View( xy)
X Y
21 A
33 B
24 B
16 A
25 B
31 A
17 B
14 A
Now, I want to make groups of x and y and frequencies in steps of 10 like this at the end
Class A B
I (1-10) 0 0
II (11-20) 2 1
III (21-30) 1 2
And so on
First create the labels using either the commented out hard coded labels or the computed labels lab. Then use cut and table to create the resulting table.
# lab <- c("I (1-10)", "II (11-20)", "III (21-30)", "IV (31-40)")
n <- ceiling(max(DF$X) / 10) # 4
bounds <- seq(0, 10*n, 10) # c(0, 10, 20, 30, 40)
lab <- sprintf("%s (%d-%d)", as.roman(1:n), head(bounds, -1) + 1, bounds[-1])
Class <- cut(DF$X, bounds, lab = lab)
table(Class, Y = DF$Y)
giving:
Y
Class A B
I (1-10) 0 0
II (11-20) 2 1
III (21-30) 1 2
IV (31-40) 1 1
Note
We assume the input data frame DF is the following shown in reproducible form:
Lines <- "
X Y
21 A
33 B
24 B
16 A
25 B
31 A
17 B
14 A"
DF <- read.table(text = Lines, header = TRUE)
One tidyverse possibility could be:
df %>%
mutate(Class = X %/% 10) %>%
count(Y, Class) %>%
group_by(Y) %>%
complete(Class = seq(0, max(Class), 1)) %>%
spread(Y, n, fill = 0)
Class A B
<dbl> <dbl> <dbl>
1 0 0 0
2 1 2 1
3 2 1 2
4 3 1 1
Or if you want also the ranges:
df %>%
mutate(Class = X %/% 10) %>%
count(Y, Class) %>%
group_by(Y) %>%
complete(Class = seq(0, max(Class), 1)) %>%
spread(Y, n, fill = 0) %>%
mutate(Class = paste(Class * 10 + 1,
lead(Class * 10, default = ((last(Class) + 1) * 10)),
sep = "-"))
Class A B
<chr> <dbl> <dbl>
1 1-10 0 0
2 11-20 2 1
3 21-30 1 2
4 31-40 1 1
Or if you want the exact output you provided:
df %>%
mutate(Class = X %/% 10) %>%
count(Y, Class) %>%
group_by(Y) %>%
complete(Class = seq(0, max(Class), 1)) %>%
spread(Y, n, fill = 0) %>%
mutate(Class = paste0("(",
Class * 10 + 1,
"-",
lead(Class * 10, default = ((last(Class) + 1) * 10)),
")"),
Class = paste(as.roman(row_number()), Class, sep = " "))
Class A B
<chr> <dbl> <dbl>
1 I (1-10) 0 0
2 II (11-20) 2 1
3 III (21-30) 1 2
4 IV (31-40) 1 1
Or a possibility for the cases when X == 0:
df %>%
filter(X > 0) %>%
mutate(Class = X %/% 10) %>%
count(Y, Class) %>%
group_by(Y) %>%
complete(Class = seq(0, max(Class), 1)) %>%
spread(Y, n, fill = 0) %>%
mutate(Class = paste0("(",
Class * 10 + 1,
"-",
lead(Class * 10, default = ((last(Class) + 1) * 10)),
")"),
Class = paste(as.roman(row_number()), Class, sep = " "))

Fast way to find min in groups after excluding observations using R

I need to do something similar to below on a very large data set (with many groups), and read somewhere that using .SD is slow. Is there any faster way to perform the following operation?
To be more precise, I need to create a new column that contains the min value for each group after having excluded a subset of observations in that group (something similar to minif in Excel).
library(data.table)
dt <- data.table(valid = c(0,1,1,0,1),
a = c(1,1,2,3,4),
groups = c("A", "A", "A", "B", "B"))
dt[, valid_min := .SD[valid == 1, min(a, na.rm = TRUE)], by = groups]
With the output:
> test
valid a k valid_min
1: 0 1 A 1
2: 1 1 A 1
3: 1 2 A 1
4: 0 3 B 4
5: 1 4 B 4
To make it even more complicated, groups could have no valid entries or they could have multiple valid but missing entries. My current code is similar to this:
dt <- data.table(valid = c(0,1,1,0,1,0,1,1),
a = c(1,1,2,3,4,3,NA,NA),
k = c("A", "A", "A", "B", "B", "C", "D", "D"))
dt[, valid_min := .SD[valid == 1,
ifelse(all(is.na(a)), NA_real_, min(a, na.rm = TRUE))], by = k]
Output:
> dt
valid a k valid_min
1: 0 1 A 1
2: 1 1 A 1
3: 1 2 A 1
4: 0 3 B 4
5: 1 4 B 4
6: 0 3 C NA
7: 1 NA D NA
8: 1 NA D NA
There's...
dt[dt[valid == 1 & !is.na(a), min(a), by=k], on=.(k), the_min := i.V1]
This should be fast since the inner call to min is optimized for groups. (See ?GForce.)
We can do the same using dplyr
dt %>%
group_by(groups) %>%
mutate(valid_min = min(ifelse(valid == 1,
a, NA),
na.rm = TRUE))
Which gives:
valid a groups valid_min
<dbl> <dbl> <chr> <dbl>
1 0 1 A 1
2 1 1 A 1
3 1 2 A 1
4 0 3 B 4
5 1 4 B 4
Alternatively, if you are not interested in keeping the 'non-valid' rows, we can do the following:
dt %>%
filter(valid == 1) %>%
group_by(groups) %>%
mutate(valid_min = min(a))
Looks like I provided the slowest approach. Comparing each approach (using a larger, replicated data frame called df) with a microbenchmark test:
library(microbenchmark)
library(ggplot2)
mbm <- microbenchmark(
dplyr.test = suppressWarnings(df %>%
group_by(k) %>%
mutate(valid_min = min(ifelse(valid == 1,
a, NA),
na.rm = TRUE),
valid_min = ifelse(valid_min == Inf,
NA,
valid_min))),
data.table.test = df[, valid_min := .SD[valid == 1,
ifelse(all(is.na(a)), NA_real_, min(a, na.rm = TRUE))], by = k],
GForce.test = df[df[valid == 1 & !is.na(a), min(a), by=k], on=.(k), the_min := i.V1]
)
autoplot(mbm)
...well, i tried...

Resources