Rowwise proportion test and add p value as new column - r

My data:
c5 =structure(list(comorbid = c("heart", "ihd", "cabg", "angio",
"cerebrovasc", "diabetes", "pvd", "amputation", "liver", "malig",
"smoke", "ulcers"), AVF_Y = c(626L, 355L, 266L, 92L, 320L, 1175L,
199L, 89L, 75L, 450L, 901L, 114L), AVG_Y = c(54L, 14L, 18L, 5L,
21L, 37L, 5L, 7L, 5L, 29L, 33L, 3L), AVF_tot = c(2755L, 1768L,
2770L, 2831L, 2844L, 2877L, 1745L, 2823L, 2831L, 2823L, 2798L,
2829L), AVG_tot = c(161L, 61L, 161L, 165L, 166L, 167L, 61L, 165L,
165L, 165L, 159L, 164L)), row.names = c(NA, -12L), class = "data.frame")
I want to perform a prop.test for each row ( a two-proportions z-test) and add the p value as a new column.
I've tried using the following code, but this gives me 24 1-sample proportions test results instead of 12 2-sample test for equality of proportions.
Map(prop.test, x = c(c5$AVF_Y, c5$AVG_Y), n = c(c5$AVF_tot, c5$AVG_tot))

Use a lambda function and extract. When we concatenate the columns, it returns a vector and its length will be 2 times the number of rows of the data. We would need to concatenate within in the loop to create a vector of length 2 for each x and n from corresponding columns of '_Y', and '_tot'
mapply(function(avf, avg, avf_n, avg_n) prop.test(c(avf, avg), c(avf_n, avg_n))$p.value, c5$AVF_Y, c5$AVG_Y, c5$AVF_tot, c5$AVG_tot)
-output
[1] 2.218376e-03 6.985883e-01 6.026012e-01 1.000000e+00 6.695440e-01 2.425781e-06 5.672322e-01 5.861097e-01 9.627050e-01 6.546286e-01 3.360300e-03 2.276857e-0
Or use do.cal with Map or mapply
do.call(mapply, c(FUN = function(x, y, n1, n2)
prop.test(c(x, y), c(n1, n2))$p.value, unname(c5[-1])))
[1] 2.218376e-03 6.985883e-01 6.026012e-01 1.000000e+00 6.695440e-01 2.425781e-06 5.672322e-01 5.861097e-01 9.627050e-01 6.546286e-01 3.360300e-03 2.276857e-01
Or with apply
apply(c5[-1], 1, function(x) prop.test(x[1:2], x[3:4])$p.value)
[1] 2.218376e-03 6.985883e-01 6.026012e-01 1.000000e+00 6.695440e-01 2.425781e-06 5.672322e-01 5.861097e-01 9.627050e-01 6.546286e-01 3.360300e-03 2.276857e-01
Or use rowwise
library(dplyr)
c5 %>%
rowwise %>%
mutate(pval = prop.test(c(AVF_Y, AVG_Y),
n = c(AVF_tot, AVG_tot))$p.value) %>%
ungroup
-output
# A tibble: 12 × 6
comorbid AVF_Y AVG_Y AVF_tot AVG_tot pval
<chr> <int> <int> <int> <int> <dbl>
1 heart 626 54 2755 161 0.00222
2 ihd 355 14 1768 61 0.699
3 cabg 266 18 2770 161 0.603
4 angio 92 5 2831 165 1.00
5 cerebrovasc 320 21 2844 166 0.670
6 diabetes 1175 37 2877 167 0.00000243
7 pvd 199 5 1745 61 0.567
8 amputation 89 7 2823 165 0.586
9 liver 75 5 2831 165 0.963
10 malig 450 29 2823 165 0.655
11 smoke 901 33 2798 159 0.00336
12 ulcers 114 3 2829 164 0.228

Related

Calculate average based on columns in 2 datafarmes and their values via mutate in R?

I have a dataframe structure that calculates the sum of Response.Status found per month with this mutate function:
DF1 <- complete_df %>%
mutate(Month = format(as.Date(date, format = "%Y/%m/%d"), "%m/%Y"),
UNSUBSCRIBE = if_else(UNSUBSCRIBE == "TRUE", "UNSUBSCRIBE", NA_character_)) %>%
pivot_longer(c(Response.Status, UNSUBSCRIBE), values_to = "Response.Status") %>%
drop_na() %>%
count(Month, Response.Status) %>%
pivot_wider(names_from = Month, names_sep = "/", values_from = n)
# A tibble: 7 x 16
Response.Status `01/2020` `02/2020` `03/2020` `04/2020` `05/2020` `06/2020` `07/2020` `08/2020` `09/2019` `09/2020` `10/2019` `10/2020` `11/2019` `11/2020` `12/2019`
<chr> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int> <int>
1 EMAIL_OPENED 1068 3105 4063 4976 2079 1856 4249 3638 882 4140 865 2573 1167 684 862
2 NOT_RESPONDED 3187 9715 13164 15239 5458 4773 12679 10709 2798 15066 2814 8068 3641 1931 2647
3 PARTIALLY_SAVED 5 34 56 8 28 22 73 86 11 14 7 23 8 8 2
4 SUBMITTED 216 557 838 828 357 310 654 621 214 1001 233 497 264 122 194
5 SURVEY_OPENED 164 395 597 1016 245 212 513 625 110 588 123 349 202 94 120
6 UNDELIVERED_OR_BOUNCED 92 280 318 260 109 127 319 321 63 445 69 192 93 39 74
7 UNSUBSCRIBE 397 1011 1472 1568 727 737 1745 2189 372 1451 378 941 429 254 355
What I would like to do is take those values created in table to calculate average based on # of people in each Response.Status group.
structure(list(Response.Status = c("EMAIL_OPENED", "NOT_RESPONDED",
"PARTIALLY_SAVED", "SUBMITTED", "SURVEY_OPENED", "UNDELIVERED_OR_BOUNCED"
), `01/2020` = c(1068L, 3187L, 5L, 216L, 164L, 92L), `02/2020` = c(3105L,
9715L, 34L, 557L, 395L, 280L), `03/2020` = c(4063L, 13164L, 56L,
838L, 597L, 318L), `04/2020` = c(4976L, 15239L, 8L, 828L, 1016L,
260L), `05/2020` = c(2079L, 5458L, 28L, 357L, 245L, 109L), `06/2020` = c(1856L,
4773L, 22L, 310L, 212L, 127L), `07/2020` = c(4249L, 12679L, 73L,
654L, 513L, 319L), `08/2020` = c(3638L, 10709L, 86L, 621L, 625L,
321L), `09/2019` = c(882L, 2798L, 11L, 214L, 110L, 63L), `09/2020` = c(4140L,
15066L, 14L, 1001L, 588L, 445L), `10/2019` = c(865L, 2814L, 7L,
233L, 123L, 69L), `10/2020` = c(2573L, 8068L, 23L, 497L, 349L,
192L), `11/2019` = c(1167L, 3641L, 8L, 264L, 202L, 93L), `11/2020` = c(684L,
1931L, 8L, 122L, 94L, 39L), `12/2019` = c(862L, 2647L, 2L, 194L,
120L, 74L)), row.names = c(NA, -6L), class = c("tbl_df", "tbl",
"data.frame"))
I made a separate table that contains sum values based on those group names:
Response.Status
EMAIL_OPENED : 451
NOT_RESPONDED : 1563
PARTIALLY_SAVED : 4
SUBMITTED : 71
SURVEY_OPENED : 53
UNDELIVERED_OR_BOUNCED: 47
UNSUBSCRIBE: 135
If I understood your problem correctly you have 2 data.frame/tibbles. One that is shown in the "structure" part an one that informs the quantity of people/users per response status. Now you want to get the value per person. If so this is a possible solution:
# people/users data set
df2 <- data.frame(Response.Status = c("EMAIL_OPENED", "NOT_RESPONDED", "PARTIALLY_SAVED", "SUBMITTED", "SURVEY_OPENED", "UNDELIVERED_OR_BOUNCED", "UNSUBSCRIBE"),
PEOPLE = c(451, 1563, 4, 71, 53, 47, 135))
df %>% # this is your "structure"
tidyr::pivot_longer(-Response.Status, names_to = "DATE", values_to = "nmbr") %>%
dplyr::group_by(Response.Status) %>%
dplyr::summarise(SUM = sum(nmbr)) %>%
dplyr::inner_join(df2) %>%
dplyr::mutate(MEAN_PP = SUM / PEOPLE)
Response.Status SUM PEOPLE MEAN_PP
<chr> <int> <dbl> <dbl>
1 EMAIL_OPENED 36207 451 80.3
2 NOT_RESPONDED 111889 1563 71.6
3 PARTIALLY_SAVED 385 4 96.2
4 SUBMITTED 6906 71 97.3
5 SURVEY_OPENED 5353 53 101
6 UNDELIVERED_OR_BOUNCED 2801 47 59.6

Create a new table from an existing one on a criteria in R

I've done a self-paced reading experiment in which 151 participants read 112 sentences divided into three lists and I'm having some problems cleaning the data in R. I'm not a programmer so I'm kind of struggling with all this!
I've got the results file which looks something like this:
results
part item word n.word rt
51 106 * 1 382
51 106 El 2 286
51 106 asistente 3 327
51 106 del 4 344
51 106 carnicero 5 394
51 106 que 6 274
51 106 abapl’a 7 2327
51 106 el 8 1104
51 106 sabor 9 409
51 106 del 10 360
51 106 pollo 11 1605
51 106 envipi— 12 256
51 106 un 13 4573
51 106 libro 14 660
51 106 *. 15 519
Part=participant; item=sentences; n.word=number of word; rt=reading times.
In the results file, I have the reading times of every word of every sentence read by every participant. Every participant read more or less 40 sentences. My problem is that I am interested in the reading times of specific words, such as the main verb or the last word of each sentence. But as every sentence is a bit different, the main verb is not always in the same position for each sentence. So I've done another table with the position of the words I'm interested in every sentence.
rules
item v1 v2 n1 n2
106 12 7 3 5
107 11 8 3 6
108 11 8 3 6
item=sentence; v1=main verb; v2=secondary verb; n1=first noun; n2=second noun.
So this should be read: For sentence 106, the main verb is the word number 12, the secondary verb is the word number 7 and so on.
I want to have a final table that looks like this:
results2
part item v1 v2 n1 n2
51 106 256 2327 327 394
51 107 ...
52 106 ...
Does anyone know how to do this? It's kind of a from long to wide problem but with a more complex scenario.
If anyone could help me, I would really appreciate it! Thanks!!
You can try the following code, which joins your results data to a reshaped rules data, and then reshapes the result into a wider form.
library(tidyr)
library(dplyr)
inner_join(select(results, -word),
pivot_longer(rules, -item), c("item", "n.word"="value")) %>%
select(-n.word) %>%
pivot_wider(names_from=name, values_from=rt) %>%
select(part, item, v1, v2, n1, n2)
# A tibble: 1 x 6
# part item v1 v2 n1 n2
# <int> <int> <int> <int> <int> <int>
#1 51 106 256 2327 327 394
Data:
results <- structure(list(part = c(51L, 51L, 51L, 51L, 51L, 51L, 51L, 51L,
51L, 51L, 51L, 51L, 51L, 51L, 51L), item = c(106L, 106L, 106L,
106L, 106L, 106L, 106L, 106L, 106L, 106L, 106L, 106L, 106L, 106L,
106L), word = c("*", "El", "asistente", "del", "carnicero", "que",
"abapl’a", "el", "sabor", "del", "pollo", "envipi—", "un", "libro",
"*."), n.word = 1:15, rt = c(382L, 286L, 327L, 344L, 394L, 274L,
2327L, 1104L, 409L, 360L, 1605L, 256L, 4573L, 660L, 519L)), class = "data.frame", row.names = c(NA,
-15L))
rules <- structure(list(item = 106:108, v1 = c(12L, 11L, 11L), v2 = c(7L,
8L, 8L), n1 = c(3L, 3L, 3L), n2 = c(5L, 6L, 6L)), class = "data.frame", row.names = c(NA,
-3L))

How to create a crosstab table using two tables in R?

I have excel dataset as follows:
Weight Quantity Price
72 5 460
73 8 720
75 20 830
95 2 490
91 15 680
82 14 340
88 30 250
89 6 770
78 27 820
98 24 940
99 29 825
I want to get a weight vs Quantity pivot table with sum of prices for each category as follows:
0-10 10-20 20-30
70-80 1180 830 820
80-90 770 340 250
90-100 490 680 1765
I had created two tables for the individual categories to get the average and count using dplyr package as follows:
table1 <- group_by(dataset, Weight = cut(Weight, breaks = c(70,80,90,100))
result1 <- summarise(table1, Count = n(), Avg_Price = mean(Price, na.rm = T))
table2 <- group_by(dataset, Quantity = cut(Quantity, breaks = c(0,10,20,30))
result2 <- summarise(table2, Count = n(), Avg_Price = mean(Price, na.rm = T))
Now, How do i use table1 and table2 to create a crosstab table as above?
Maybe the following is what you want. It uses cut like you have, then xtabs.
Weight = cut(dataset$Weight, breaks = c(70,80,90,100))
Quantity = cut(dataset$Quantity, breaks = c(0,10,20,30))
dt2 <- data.frame(Weight, Quantity, Price = dataset$Price)
xtabs(Price ~ Weight + Quantity, dt2)
# Quantity
#Weight (0,10] (10,20] (20,30]
# (70,80] 1180 830 820
# (80,90] 770 340 250
# (90,100] 490 680 1765
A dplyr and tidyr solution:
library(dplyr)
library(tidyr)
df %>%
mutate(Weight = cut(Weight, breaks = c(70,80,90,100)),
Quantity = cut(Quantity, breaks = c(0,10,20,30))) %>%
group_by(Weight, Quantity) %>%
summarise(Price = sum(Price)) %>%
spread(Quantity, Price)
# A tibble: 3 x 4
# Groups: Weight [3]
Weight `(0,10]` `(10,20]` `(20,30]`
* <fct> <int> <int> <int>
1 (70,80] 1180 830 820
2 (80,90] 770 340 250
3 (90,100] 490 680 1765
Data:
df <- structure(list(Weight = c(72L, 73L, 75L, 95L, 91L, 82L, 88L,
89L, 78L, 98L, 99L), Quantity = c(5L, 8L, 20L, 2L, 15L, 14L,
30L, 6L, 27L, 24L, 29L), Price = c(460L, 720L, 830L, 490L, 680L,
340L, 250L, 770L, 820L, 940L, 825L)), .Names = c("Weight", "Quantity",
"Price"), class = "data.frame", row.names = c(NA, -11L))

how to make a summary table [duplicate]

I have the following data frame (df) of 29 observations of 5 variables:
age height_seca1 height_chad1 height_DL weight_alog1
1 19 1800 1797 180 70
2 19 1682 1670 167 69
3 21 1765 1765 178 80
4 21 1829 1833 181 74
5 21 1706 1705 170 103
6 18 1607 1606 160 76
7 19 1578 1576 156 50
8 19 1577 1575 156 61
9 21 1666 1665 166 52
10 17 1710 1716 172 65
11 28 1616 1619 161 66
12 22 1648 1644 165 58
13 19 1569 1570 155 55
14 19 1779 1777 177 55
15 18 1773 1772 179 70
16 18 1816 1809 181 81
17 19 1766 1765 178 77
18 19 1745 1741 174 76
19 18 1716 1714 170 71
20 21 1785 1783 179 64
21 19 1850 1854 185 71
22 31 1875 1880 188 95
23 26 1877 1877 186 106
24 19 1836 1837 185 100
25 18 1825 1823 182 85
26 19 1755 1754 174 79
27 26 1658 1658 165 69
28 20 1816 1818 183 84
29 18 1755 1755 175 67
I wish to obtain the mean, standard deviation, median, minimum, maximum and sample size of each of the variables and get an output as a data frame. I tried using the code below but then the it becomes impossible for me to work with and using tapply or aggregate seems to be beyond me as a novice R programmer. My assignment requires me not use any 'extra' R packages.
apply(df, 2, mean)
apply(df, 2, sd)
apply(df, 2, median)
apply(df, 2, min)
apply(df, 2, max)
apply(df, 2, length)
Ideally, this is how the output data frame should look like including the row headings for each of the statistical functions:
age height_seca1 height_chad1 height_DL weight_alog1
mean 20 1737 1736 173 73
sd 3.3 91.9 92.7 9.7 14.5
median 19 1755 1755 175 71
minimum 17 1569 1570 155 50
maximum 31 1877 1880 188 106
sample size 29 29 29 29 29
Any help would be greatly appreciated.
Try with basicStats from fBasics package
> install.packages("fBasics")
> library(fBasics)
> basicStats(df)
age height_seca1 height_chad1 height_DL weight_alog1
nobs 29.000000 29.000000 29.000000 29.000000 29.000000
NAs 0.000000 0.000000 0.000000 0.000000 0.000000
Minimum 17.000000 1569.000000 1570.000000 155.000000 50.000000
Maximum 31.000000 1877.000000 1880.000000 188.000000 106.000000
1. Quartile 19.000000 1666.000000 1665.000000 166.000000 65.000000
3. Quartile 21.000000 1816.000000 1809.000000 181.000000 80.000000
Mean 20.413793 1737.241379 1736.482759 173.379310 73.413793
Median 19.000000 1755.000000 1755.000000 175.000000 71.000000
Sum 592.000000 50380.000000 50358.000000 5028.000000 2129.000000
SE Mean 0.612910 17.069018 17.210707 1.798613 2.700354
LCL Mean 19.158305 1702.277081 1701.228224 169.695018 67.882368
UCL Mean 21.669282 1772.205677 1771.737293 177.063602 78.945219
Variance 10.894089 8449.189655 8590.044335 93.815271 211.465517
Stdev 3.300619 91.919474 92.682492 9.685828 14.541854
Skewness 1.746597 -0.355499 -0.322915 -0.430019 0.560360
Kurtosis 2.290686 -1.077820 -1.086108 -1.040182 -0.311017
You can also subset the output to get what you want:
> basicStats(df)[c("Mean", "Stdev", "Median", "Minimum", "Maximum", "nobs"),]
age height_seca1 height_chad1 height_DL weight_alog1
Mean 20.413793 1737.24138 1736.48276 173.379310 73.41379
Stdev 3.300619 91.91947 92.68249 9.685828 14.54185
Median 19.000000 1755.00000 1755.00000 175.000000 71.00000
Minimum 17.000000 1569.00000 1570.00000 155.000000 50.00000
Maximum 31.000000 1877.00000 1880.00000 188.000000 106.00000
nobs 29.000000 29.00000 29.00000 29.000000 29.00000
Another alternative is that you define your own function as in this post.
Update:
(I hadn't read the "My assignment requires me not use any 'extra' R packages." part)
As I said before, you can define your own function and loop over each column by using *apply family functions:
my.summary <- function(x,...){
c(mean=mean(x, ...),
sd=sd(x, ...),
median=median(x, ...),
min=min(x, ...),
max=max(x,...),
n=length(x))
}
# all these calls should give you the same results.
apply(df, 2, my.summary)
sapply(df, my.summary)
do.call(cbind,lapply(df, my.summary))
Or using what you have already done, you just need to put those summaries into a list and use do.call
df <- structure(list(age = c(19L, 19L, 21L, 21L, 21L, 18L, 19L, 19L, 21L, 17L, 28L, 22L, 19L, 19L, 18L, 18L, 19L, 19L, 18L, 21L, 19L, 31L, 26L, 19L, 18L, 19L, 26L, 20L, 18L), height_seca1 = c(1800L, 1682L, 1765L, 1829L, 1706L, 1607L, 1578L, 1577L, 1666L, 1710L, 1616L, 1648L, 1569L, 1779L, 1773L, 1816L, 1766L, 1745L, 1716L, 1785L, 1850L, 1875L, 1877L, 1836L, 1825L, 1755L, 1658L, 1816L, 1755L), height_chad1 = c(1797L, 1670L, 1765L, 1833L, 1705L, 1606L, 1576L, 1575L, 1665L, 1716L, 1619L, 1644L, 1570L, 1777L, 1772L, 1809L, 1765L, 1741L, 1714L, 1783L, 1854L, 1880L, 1877L, 1837L, 1823L, 1754L, 1658L, 1818L, 1755L), height_DL = c(180L, 167L, 178L, 181L, 170L, 160L, 156L, 156L, 166L, 172L, 161L, 165L, 155L, 177L, 179L, 181L, 178L, 174L, 170L, 179L, 185L, 188L, 186L, 185L, 182L, 174L, 165L, 183L, 175L), weight_alog1 = c(70L, 69L, 80L, 74L, 103L, 76L, 50L, 61L, 52L, 65L, 66L, 58L, 55L, 55L, 70L, 81L, 77L, 76L, 71L, 64L, 71L, 95L, 106L, 100L, 85L, 79L, 69L, 84L, 67L)), class = "data.frame", row.names = c("1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29"))
tmp <- do.call(data.frame,
list(mean = apply(df, 2, mean),
sd = apply(df, 2, sd),
median = apply(df, 2, median),
min = apply(df, 2, min),
max = apply(df, 2, max),
n = apply(df, 2, length)))
tmp
mean sd median min max n
age 20.41379 3.300619 19 17 31 29
height_seca1 1737.24138 91.919474 1755 1569 1877 29
height_chad1 1736.48276 92.682492 1755 1570 1880 29
height_DL 173.37931 9.685828 175 155 188 29
weight_alog1 73.41379 14.541854 71 50 106 29
or...
data.frame(t(tmp))
age height_seca1 height_chad1 height_DL weight_alog1
mean 20.413793 1737.24138 1736.48276 173.379310 73.41379
sd 3.300619 91.91947 92.68249 9.685828 14.54185
median 19.000000 1755.00000 1755.00000 175.000000 71.00000
min 17.000000 1569.00000 1570.00000 155.000000 50.00000
max 31.000000 1877.00000 1880.00000 188.000000 106.00000
n 29.000000 29.00000 29.00000 29.000000 29.00000
You can use lapply to go over each column and an anonymous function to do each of your calculations:
res <- lapply( mydf , function(x) rbind( mean = mean(x) ,
sd = sd(x) ,
median = median(x) ,
minimum = min(x) ,
maximum = max(x) ,
s.size = length(x) ) )
data.frame( res )
# age height_seca1 height_chad1 height_DL weight_alog1
#mean 20.413793 1737.24138 1736.48276 173.379310 73.41379
#sd 3.300619 91.91947 92.68249 9.685828 14.54185
#median 19.000000 1755.00000 1755.00000 175.000000 71.00000
#minimum 17.000000 1569.00000 1570.00000 155.000000 50.00000
#maximum 31.000000 1877.00000 1880.00000 188.000000 106.00000
#s.size 29.000000 29.00000 29.00000 29.000000 29.00000
Adding few more options for quick Exploratory Data Analysis (EDA)
1) skimr package:
install.packages("skimr")
library(skimr)
skim(df)
2) ExPanDaR package:
install.packages("ExPanDaR")
library(ExPanDaR)
# export data and code to a notebook
ExPanD(df, export_nb_option = TRUE)
# open a shiny app
ExPanD(df)
3) DescTools package:
install.packages("DescTools")
library(DescTools)
Desc(df, plotit = TRUE)
#> ------------------------------------------------------------------------------
#> Describe df (data.frame):
#>
#> data frame: 29 obs. of 5 variables
#> 29 complete cases (100.0%)
#>
#> Nr ColName Class NAs Levels
#> 1 age integer .
#> 2 height_seca1 integer .
#> 3 height_chad1 integer .
#> 4 height_DL integer .
#> 5 weight_alog1 integer .
#>
#>
#> ------------------------------------------------------------------------------
#> 1 - age (integer)
#>
#> length n NAs unique 0s mean meanCI
#> 29 29 0 9 0 20.41 19.16
#> 100.0% 0.0% 0.0% 21.67
#>
#> .05 .10 .25 median .75 .90 .95
#> 18.00 18.00 19.00 19.00 21.00 26.00 27.20
#>
#> range sd vcoef mad IQR skew kurt
#> 14.00 3.30 0.16 1.48 2.00 1.75 2.29
#>
#>
#> level freq perc cumfreq cumperc
#> 1 17 1 3.4% 1 3.4%
#> 2 18 6 20.7% 7 24.1%
#> 3 19 11 37.9% 18 62.1%
#> 4 20 1 3.4% 19 65.5%
#> 5 21 5 17.2% 24 82.8%
#> 6 22 1 3.4% 25 86.2%
#> 7 26 2 6.9% 27 93.1%
#> 8 28 1 3.4% 28 96.6%
#> 9 31 1 3.4% 29 100.0%
#>
#> heap(?): remarkable frequency (37.9%) for the mode(s) (= 19)
Results from Desc can be saved to a Microsoft Word docx file
### RDCOMClient package is needed
install.packages("RDCOMClient", repos = "http://www.omegahat.net/R")
# or
devtools::install_github("omegahat/RDCOMClient")
# create a new word instance and insert title and contents
wrd <- GetNewWrd(header = TRUE)
DescTools::Desc(df, plotit = TRUE, wrd = wrd)
Created on 2020-01-17 by the reprex package (v0.3.0)
So far I had the same problem and I wrote ...
h <- function(x, flist){
f <- function(f,...)f(...)
g <- function(x, flist){vapply(flist, f , x, FUN.VALUE = numeric(1))}
df <- as.data.frame(lapply(x, g , flist))
row.names(df) <- names(flist)
df
}
h(cars, flist = list(mean = mean, median = median, std_dev = sd))
it should work with any function as specified in flist as long as the function returns a single value; i.e. it wont work with range
Note that elements of flist should be named otherwise, you'll get strange row.names for the resulting data.frame

Subset column and compute operations for each subset [duplicate]

This question already has answers here:
How to sum a variable by group
(18 answers)
Closed 5 years ago.
Here is a minimal example of dataframe to reproduce.
df <- structure(list(Gene = structure(c(147L, 147L, 148L, 148L, 148L,
87L, 87L, 87L, 87L, 87L), .Label = c("genome", "k141_1189_101",
"k141_1189_104", "k141_1189_105", "k141_1189_116", "k141_1189_13",
"k141_1189_14", "k141_1189_146", "k141_1189_150", "k141_1189_18",
"k141_1189_190", "k141_1189_194", "k141_1189_215", "k141_1189_248",
"k141_1189_251", "k141_1189_252", "k141_1189_259", "k141_1189_274",
"k141_1189_283", "k141_1189_308", "k141_1189_314", "k141_1189_322",
"k141_1189_353", "k141_1189_356", "k141_1189_372", "k141_1189_373",
"k141_1189_43", "k141_1189_45", "k141_1189_72", "k141_1597_15",
"k141_1597_18", "k141_1597_23", "k141_1597_41", "k141_1597_55",
"k141_1597_66", "k141_1597_67", "k141_1597_68", "k141_1597_69",
"k141_2409_34", "k141_2409_8", "k141_3390_69", "k141_3390_83",
"k141_3390_84", "k141_3726_25", "k141_3726_31", "k141_3726_49",
"k141_3726_50", "k141_3726_62", "k141_3726_8", "k141_3726_80",
"k141_3790_1", "k141_3993_114", "k141_3993_122", "k141_3993_162",
"k141_3993_172", "k141_3993_183", "k141_3993_186", "k141_3993_188",
"k141_3993_24", "k141_3993_25", "k141_3993_28", "k141_3993_32",
"k141_3993_44", "k141_3993_47", "k141_3993_53", "k141_3993_57",
"k141_3993_68", "k141_4255_80", "k141_4255_81", "k141_4255_87",
"k141_5079_107", "k141_5079_110", "k141_5079_130", "k141_5079_14",
"k141_5079_141", "k141_5079_16", "k141_5079_184", "k141_5079_185",
"k141_5079_202", "k141_5079_24", "k141_5079_39", "k141_5079_63",
"k141_5079_65", "k141_5079_70", "k141_5079_77", "k141_5079_87",
"k141_5079_9", "k141_5313_16", "k141_5313_17", "k141_5313_20",
"k141_5313_23", "k141_5313_39", "k141_5313_5", "k141_5313_51",
"k141_5313_52", "k141_5313_78", "k141_5545_101", "k141_5545_103",
"k141_5545_104", "k141_5545_105", "k141_5545_106", "k141_5545_107",
"k141_5545_108", "k141_5545_109", "k141_5545_110", "k141_5545_111",
"k141_5545_112", "k141_5545_113", "k141_5545_114", "k141_5545_119",
"k141_5545_128", "k141_5545_130", "k141_5545_139", "k141_5545_141",
"k141_5545_145", "k141_5545_16", "k141_5545_169", "k141_5545_17",
"k141_5545_172", "k141_5545_6", "k141_5545_60", "k141_5545_62",
"k141_5545_63", "k141_5545_86", "k141_5545_87", "k141_5545_88",
"k141_5545_89", "k141_5545_91", "k141_5545_92", "k141_5545_93",
"k141_5545_94", "k141_5545_96", "k141_5545_97", "k141_5545_98",
"k141_5545_99", "k141_5734_13", "k141_5734_2", "k141_5734_4",
"k141_5734_5", "k141_5734_6", "k141_6014_124", "k141_6014_2",
"k141_6014_34", "k141_6014_75", "k141_6014_96", "k141_908_14",
"k141_908_2", "k141_908_5", "k141_957_126", "k141_957_135", "k141_957_136",
"k141_957_14", "k141_957_140", "k141_957_141", "k141_957_148",
"k141_957_179", "k141_957_191", "k141_957_35", "k141_957_47",
"k141_957_55", "k141_957_57", "k141_957_59", "k141_957_6", "k141_957_63",
"k141_957_65", "k141_957_68", "k141_957_77", "k141_957_95"), class = "factor"),
depth = c(9L, 10L, 9L, 10L, 11L, 14L, 15L, 16L, 17L, 18L),
bases_covered = c(6L, 3L, 4L, 7L, 4L, 59L, 54L, 70L, 34L,
17L), gene_length = c(1140L, 1140L, 591L, 591L, 591L, 690L,
690L, 690L, 690L, 690L), regioncoverage = c(54L, 30L, 36L,
70L, 44L, 826L, 810L, 1120L, 578L, 306L)), .Names = c("Gene",
"depth", "bases_covered", "gene_length", "regioncoverage"), row.names = c(1L,
2L, 33L, 34L, 35L, 78L, 79L, 80L, 81L, 82L), class = "data.frame")
The dataframe looks like this:
Gene depth bases_covered gene_length regioncoverage
1 k141_908_2 9 6 1140 54
2 k141_908_2 10 3 1140 30
33 k141_908_5 9 4 591 36
34 k141_908_5 10 7 591 70
35 k141_908_5 11 4 591 44
78 k141_5079_9 14 59 690 826
79 k141_5079_9 15 54 690 810
80 k141_5079_9 16 70 690 1120
81 k141_5079_9 17 34 690 578
82 k141_5079_9 18 17 690 306
What i want is that for each Gene (e.g k141_908_2) i want to sum region coverage and divide by unique(gene length). In fact gene length is always the same value for each gene.
For example for Gene K141_908_2 i would do: (54+30)/1140 = 0.07
For example for Gene K141_908_5 i would do: (36+70+44)/591 = 0.25
The final dataframe should report two columns.
Gene Newcoverage
1 k141_908_2 0.07
2 k141_908_5 0.25
3 ......
and so on .
Thanks for your help
This is straightforward with dplyr:
library(dplyr)
df_final <- df %>%
group_by(Gene) %>%
summarize(Newcoverage = sum(regioncoverage) / first(gene_length))
df_final
# # A tibble: 3 × 2
# Gene Newcoverage
# <fctr> <dbl>
# 1 k141_5079_9 5.27536232
# 2 k141_908_2 0.07368421
# 3 k141_908_5 0.25380711
I needed to set the first column to character and others to numeric. But after that you can just split the df by gene and then do the necessary calculations.
df[,2:5] = lapply(df[,2:5], as.numeric)
df$Gene = as.character(df$Gene)
sapply(split(df, df$Gene), function(x) sum(x[,5]/x[1,4]))
#k141_5079_9 k141_908_2 k141_908_5
# 5.27536232 0.07368421 0.25380711
We can use tidyverse
library(tidyverse)
df %>%
group_by(Gene) %>%
summarise(Newcoverage = sum(regioncoverage)/gene_length[1])
# A tibble: 3 × 2
# Gene Newcoverage
# <fctr> <dbl>
#1 k141_5079_9 5.27536232
#2 k141_908_2 0.07368421
#3 k141_908_5 0.25380711
Or a base R option is
by(df[4:5], list(as.character(df[,'Gene'])), FUN= function(x) sum(x[,2])/x[1,1])
quick approach is
require(data.table)
DT <- setDT(df)
#just to output unique rows
DT[, .(New_Coverage = unique(sum(regioncoverage)/gene_length)), by = .(Gene)]
output
Gene New_Coverage
1: k141_908_2 0.07368421
2: k141_908_5 0.25380711
3: k141_5079_9 5.27536232
I use dplyr a lot. So here's one way:
library(dplyr)
df %>%
group_by(Gene) %>%
mutate(Newcoverage=sum(regioncoverage)/unique(gene_length))
If you want only unique values per Gene:
df %>%
group_by(Gene) %>%
transmute(Newcoverage=sum(regioncoverage)/unique(gene_length)) %>%
unique()

Resources