Reshaping data from long to wide with multiple categories - r

I'm trying to reshape my data from a long format into a wide format based on multiple groupings, without success. with this data:
id <- 1:20
month <- rep(4:7, 50)
name <- rep(c("sam", "mike", "tim", "jill", "max"), 40)
cost <- sample(1:100, 200, replace=TRUE)
df <- data.frame(id, month, name, cost)
df.mo.mean <- aggregate(df$cost ~ df$name + df$month, FUN="mean")
df.mo.sd <- aggregate(df$cost ~ df$name + df$month, FUN="sd")
df.mo <- data.frame(df.mo.mean, df.mo.sd)
df.mo <- df.mo[,-c(4,5)]
df.mo[3:4] <- round(df.mo[3:4],2)
head(df)
id month name cost
1 1 4 sam 29
2 2 5 mike 93
3 3 6 tim 27
4 4 7 jill 67
5 5 4 max 28
6 6 5 sam 69
I'm trying to get my data to look like something below, and try to generalize it for an unknown number of names (but <15 max)
month name1.cost.mean name1.cost.sd name2.cost.mean name2.cost.sd
1 45 4 40 6
2 ...
I've tried reshape and do.call with rbind without success. The only other way I can think of doing it is with a loop, which means I'm doing something wrong. I dont have any experience with plyr and would prefer to solve this problem with base packages (for learning purposes), but if its not possible any other suggestions would be very helpful

set.seed(1)
library(plyr)
kk<-ddply(df,.(month,name),summarize,mean=mean(cost),sd=sd(cost))
reshape(kk,timevar="name",idvar="month",direction="wide")
month mean.jill sd.jill mean.max sd.max mean.mike sd.mike mean.sam sd.sam mean.tim sd.tim
1 4 55.3 34.62834 63.3 23.35261 57.6 22.91627 63.4 28.89906 43.3 25.42112
6 5 49.3 25.00689 51.1 27.85059 48.4 23.16223 43.0 24.33562 47.6 32.13928
11 6 60.4 23.61826 52.1 29.74503 38.6 34.39703 53.0 23.28567 52.4 20.88700
16 7 50.0 30.76073 62.7 23.98634 51.7 32.10763 52.8 32.27589 49.5 23.00845

> means <- with( df, tapply(cost, list(month, name), FUN=mean) )
> sds <- with( df, tapply(cost, list(month, name), FUN=sd) )
> colnames(means) <- paste0(colnames(means), ".mean")
> colnames(sds) <- paste0(colnames(sds), ".sd")
> comb.df <- as.data.frame( cbind(means, sds) )
> comb.df <- comb.df[order(names(comb.df))]
> comb.df
jill.mean jill.mean.sd max.mean max.mean.sd mike.mean mike.mean.sd
4 62.1 22.29823 39.7 25.53016 39.6 30.11164
5 40.7 30.72838 44.4 29.12502 54.2 23.91095
6 47.3 31.54556 46.9 32.30910 65.3 30.05569
7 55.5 33.16038 45.9 28.13637 59.7 31.79815
sam.mean sam.mean.sd tim.mean tim.mean.sd
4 40.9 23.54877 58.5 21.69613
5 51.5 30.76163 34.2 32.16900
6 69.1 18.26016 55.2 32.99764
7 46.9 29.90150 55.8 27.17352

I'm not sure what you are asking for, but maybe something like this could be useful
> set.seed(1)
> df <- data.frame(id=1:20, month=rep(4:7, 50),
+ name=rep(c("sam", "mike", "tim", "jill", "max"), 40),
+ cost= sample(1:100, 200, replace=TRUE))
>
> DF.mean <- aggregate(cost ~ name + month, FUN=mean, data=df) ## mean
> DF.sd <- aggregate(cost ~ name + month, FUN=sd, data=df) ## sd
>
> x1 <- as.data.frame.matrix(xtabs(cost~month+name, data=DF.mean)) # reshaping mean
> colnames(x1) <- paste0(colnames(x1), ".mean")
> x2 <- as.data.frame.matrix(xtabs(cost~month+name, data=DF.sd)) # reshaping sd
> colnames(x2) <- paste0(colnames(x2), ".sd")
>
> cbind(x1, x2)
jill.mean max.mean mike.mean sam.mean tim.mean jill.sd max.sd mike.sd sam.sd tim.sd
4 55.3 63.3 57.6 63.4 43.3 34.62834 23.35261 22.91627 28.89906 25.42112
5 49.3 51.1 48.4 43.0 47.6 25.00689 27.85059 23.16223 24.33562 32.13928
6 60.4 52.1 38.6 53.0 52.4 23.61826 29.74503 34.39703 23.28567 20.88700
7 50.0 62.7 51.7 52.8 49.5 30.76073 23.98634 32.10763 32.27589 23.00845
Also, note that #Metrics approach can be done using R base functions without any extra packages:
> kk <- aggregate(cost ~ name + month, FUN=function(x) c(mean=mean(x), sd=sd(x)), data=df)
> reshape(kk,timevar="name",idvar="month",direction="wide")
month cost.jill.mean cost.jill.sd cost.max.mean cost.max.sd cost.mike.mean cost.mike.sd cost.sam.mean cost.sam.sd cost.tim.mean cost.tim.sd
1 4 55.30000 34.62834 63.30000 23.35261 57.60000 22.91627 63.40000 28.89906 43.30000 25.42112
6 5 49.30000 25.00689 51.10000 27.85059 48.40000 23.16223 43.00000 24.33562 47.60000 32.13928
11 6 60.40000 23.61826 52.10000 29.74503 38.60000 34.39703 53.00000 23.28567 52.40000 20.88700
16 7 50.00000 30.76073 62.70000 23.98634 51.70000 32.10763 52.80000 32.27589 49.50000 23.00845

You can use two reshape and then merge the results
library(reshape2)
> dcast(df, month ~ name, mean, value.var="cost")
month jill max mike sam tim
1 4 39.5 54.6 45.6 48.4 57.4
2 5 45.1 61.7 45.4 54.5 50.8
3 6 41.9 45.7 56.4 43.1 52.1
4 7 51.6 38.6 43.6 65.1 51.5
> dcast(df, month ~ name, sd, value.var="cost")
month jill max mike sam tim
1 4 29.31154 25.25954 28.96051 31.32695 29.82989
2 5 31.02848 27.96049 34.32589 30.08599 23.95273
3 6 32.09517 32.50316 37.16988 27.03681 30.42094
4 7 19.56300 31.50026 28.65969 36.53750 26.73429

Related

R { : the condition has length > 1 [duplicate]

This question already has answers here:
Interpreting "condition has length > 1" warning from `if` function
(7 answers)
Closed 7 months ago.
this is my first time asking a question in StackOverflow and also my first time coding using R
So, please understand if my explanation is unclear :(
I now have a data frame (data2000) that is 1092 x 6
The headers are year, month, predictive horizon, name of the company, GDP Price Index, and Consumer Price Index
I want to create vectors on gdppi and cpi for each month
My ultimate goal is to get the mean, median, interquartile range, and 90th-10th percentile range for each month and I thought this is the first step
and this is the code that I wrote by far
***library(tidyverse)
data2000 <- read.csv("")
for (i in 1:12) {
i_gdppi <- c()
i_cpi <- c()
}
for (i in 1:12) {
if (data2000$month == i) {
append(i_gdppi,data2000[,gdppi])
append(i_cpi, data2000[,cpi])
}
}***
Unfortunately, I got an error message saying that
Error in if (data2000$month == 1) { : the condition has length > 1
I googled it by myself and in if statement, I cannot use a vector as a condition
How can I solve this problem?
Thank you so much and have a nice day!
If you use the group_by() function then it takes care of sub-setting your data:
library(dplyr)
data2000 <- data.frame(month = rep(c(1:12), times = 2), gdppi = runif(24)*100) # Dummy data
data2000 |>
group_by(month) |>
summarise(mean = mean(gdppi), q10 = quantile(gdppi, probs = .10), q25 = quantile(gdppi, probs = .25)) # Add the other percentiles, as needed
Gives this
# A tibble: 12 x 4
month mean q10 q25
<int> <dbl> <dbl> <dbl>
1 1 12.5 3.44 6.83
2 2 34.7 7.15 17.5
3 3 37.8 22.1 28.0
4 4 30.3 19.0 23.2
5 5 65.7 62.2 63.5
6 6 60.7 38.7 47.0
7 7 43.0 38.2 40.0
8 8 77.9 60.7 67.1
9 9 56.3 44.0 48.6
10 10 53.1 19.6 32.2
11 11 63.8 40.6 49.3
12 12 59.0 49.2 52.9
If you have years and months, then group_by(year, month)

I'm having difficulty getting the right data into the matrices of my 4x3x21 array

What I'm having trouble with is I'd like the first row of this matrix (mat.a) to be the first row of matrix 1 in my array, and then the second row to be the first row of matrix 2, etc. Then the first row of mat.b to be the second row of the first matrix in my array, second row of mat. b to be the second row in the second matrix of the array, etc. This trend continues for mat.c. The fourth row of my matrix should be the averages of the values in each column. Also, I'm not allowed to use a for loop
mat.a <- matrix(c(scores$A1, scores$A2, scores$avgA), ncol = 3,
byrow = FALSE)
mat.b <- matrix(c(scores$B1, scores$B2, scores$avgB), ncol = 3,
byrow = FALSE)
mat.c <- matrix(c(scores$C1, scores$C2, scores$avgC), ncol = 3,
byrow = FALSE)
scores.array<- array(c(mat.a,mat.b, mat.c), dim = c(3,3,21))
> dim(mat.a)
[1] 21 3
> dim(scores)
[1] 21 10
> dim(mat.b)
[1] 21 3
> dim(mat.c)
[1] 21 3
scores
scores.updated
Here is a natural (I think) approach to this problem:
Use array to construct an array with A, B, and C lying along the third dimension.
Use aperm to transpose the array so that A, B, and C lie along the first dimension.
Use colMeans to compute means over the first dimension ("columnwise").
Use abind to attach the means to the transposed array.
nms <- c("A1", "A2", "avgA", "B1", "B2", "avgB", "C1", "C2", "avgC")
z <- array(unlist(scores[nms]), dim = c(21L, 3L, 3L))
zz <- aperm(zz, 3:1)
zzz <- abind::abind(zz, colMeans(zz, dims = 1L), along = 1L)
zzz[, , 1:2]
, , 1
[,1] [,2] [,3]
[1,] 28.75775 69.28034 49.01905
[2,] 41.37243 27.43836 34.40540
[3,] 10.28646 89.03502 49.66074
[4,] 26.80555 61.91791 44.36173
, , 2
[,1] [,2] [,3]
[1,] 78.83051 64.05068 71.44060
[2,] 36.88455 81.46400 59.17427
[3,] 43.48927 91.44382 67.46655
[4,] 53.06811 78.98617 66.02714
I have used scores as (very helpfully!) defined by #langtang.
Try this:
library(tidyverse)
# add the averages
scores <- scores %>%
rowwise() %>%
mutate(avg1 = mean(c_across(ends_with("1"))),
avg2 = mean(c_across(ends_with("2"))),
avg3 = mean(c_across(starts_with("avg")))) %>%
# relocate the columns
relocate(ini, A1,B1,C1,avg1, A2,B2,C2,avg2, avgA,avgB,avgC, avg3)
# create scores array
scores.array = array(scores %>% pivot_longer(cols = A1:avg3) %>% pull(value), dim=c(4,3,21))
# add dim names
dimnames(scores.array) = list(c("A","B","C","mean"), c("Midterm", "Final", "mean"), scores$ini)
Output (first two):
> scores.array[,,1:2]
, , ZO
Midterm Final mean
A 28.75775 69.28034 49.01905
B 41.37243 27.43836 34.40540
C 10.28646 89.03502 49.66074
mean 26.80555 61.91791 44.36173
, , UE
Midterm Final mean
A 78.83051 64.05068 71.44060
B 36.88455 81.46400 59.17427
C 43.48927 91.44382 67.46655
mean 53.06811 78.98617 66.02714
Input Data (fake data):
set.seed(123)
scores = data.frame(
A1 = runif(21)*100,
A2 = runif(21)*100,
B1 = runif(21)*100,
B2 = runif(21)*100,
C1 = runif(21)*100,
C2 = runif(21)*100
)
scores <- scores %>% rowwise() %>%
mutate(ini = paste0(sample(LETTERS,2), collapse="")) %>%
relocate(ini)
scores$avgA = apply(scores[,c("A1","A2")],1,mean)
scores$avgB = apply(scores[,c("B1","B2")],1,mean)
scores$avgC = apply(scores[,c("C1","C2")],1,mean)
ini A1 A2 B1 B2 C1 C2 avgA avgB avgC
<chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 ZO 28.8 69.3 41.4 27.4 10.3 89.0 49.0 34.4 49.7
2 UE 78.8 64.1 36.9 81.5 43.5 91.4 71.4 59.2 67.5
3 HS 40.9 99.4 15.2 44.9 98.5 60.9 70.2 30.0 79.7
4 JR 88.3 65.6 13.9 81.0 89.3 41.1 76.9 47.4 65.2
5 JL 94.0 70.9 23.3 81.2 88.6 14.7 82.4 52.3 51.7
6 BJ 4.56 54.4 46.6 79.4 17.5 93.5 29.5 63.0 55.5
7 VL 52.8 59.4 26.6 44.0 13.1 30.1 56.1 35.3 21.6
8 TN 89.2 28.9 85.8 75.4 65.3 6.07 59.1 80.6 35.7
9 QN 55.1 14.7 4.58 62.9 34.4 94.8 34.9 33.8 64.6
10 VC 45.7 96.3 44.2 71.0 65.7 72.1 71.0 57.6 68.9
# ... with 11 more rows

How to create a data summary function?

I'm trying to create a function that summarizes several vectors and the prompt is
Write a function data_summary which takes three inputs:\
`dataset`: A data frame\
`vars`: A character vector whose elements are names of columns from dataset which the user wants summaries for\
`group.name`: A length one character vector which gives the name of the column from dataset which contains the factor which will be used as a grouping variable
\`var.names`: A character vector of the same length as vars which gives the names that the user would like used as the entries under “Variable” in the resulting output. This should be set equal to vars by default, so the default behavior is to use the column names from dataset.
The output of the function should be a data frame with the following structure:
Column names of the data frame will be:\
`Variable`\
`Missing`\
The `first` level of the factor group.name\
The `second` level of the factor group.name\
…\
The `kth` level of the factor group.name\
`p-value`
I've set up the code already,
data_summary <- function(dataset,vars,group.name,var.names) {
}
but I'm unsure how to proceed because I do not understand what this is trying to accomplish and what the output should look like. There is an example that shows
#data_summary<-function(dataset, vars,group.name, var.name){}
#example
#data_summary(titanic4, c("survived", "female", "age", "sibsp", "parch", "fare", "cabin"), "pclass")
#data_summary(titanic4, c("survived", "female", "age", "sibsp", "parch", "fare", "cabin"), "pclass", c("Survival rate", "% Female", "Age", "# siblings/spouses aboard", "# children/parents aboard", "Fare ($)", "Cabin"))
But it really did not help me outside of inputting the arguments for the function.
You can use dplyr package for this function. Also I don't know by which functions you want summarise your dataframe, so I use all functions which summary function returns from base package.
My data:
> NewSKUMatrix
# A tibble: 268,918 x 4
LagerID FilialID CSBID Price
<int> <int> <int> <dbl>
1 233 2578 1005 38.3
2 333 2543 NA 61.0
3 334 2543 NA 15.0
4 335 2543 NA 11.0
5 337 2301 NA 71.0
6 338 2031 NA 37.0
7 338 2044 NA 35.0
8 338 2054 NA 36.0
9 338 2060 NA 37.0
10 338 2063 NA 36.0
# ... with 268,908 more rows
Function:
data_summary <- function(data,
variables,
values,
names = NULL) {
if (is.null(x = names)) {
names <- variables
}
data %>%
group_by_at(.vars = variables) %>%
summarise_at(
.vars = values,
.funs = list(
Min. = min,
`1st Qu.` = ~ quantile(x = ., probs = 0.25),
Median = median,
Mean = mean,
`3rd Qu.` = ~ quantile(x = ., probs = 0.75),
Max. = max
)
) %>%
rename_at(.vars = variables,
.funs = ~ names)
}
Output:
data_summary(NewSKUMatrix,
c('LagerID'),
c('Price'),
c('SKU'))
# A tibble: 32,454 x 7
SKU Min. `1st Qu.` Median Mean `3rd Qu.` Max.
<int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
1 17 39.0 39.0 39.0 39.0 39.0 39.0
2 18 120. 120. 120. 121. 120. 140.
3 21 289. 289. 289. 289. 289. 289.
4 24 37.0 37.0 37.0 45.2 45.2 70.0
5 25 14.0 14.0 14.0 14.0 14.0 14.0
6 55 30.9 30.9 30.9 30.9 30.9 30.9
7 117 26.9 26.9 26.9 26.9 26.9 26.9
8 118 24.8 24.9 24.9 25.1 25.1 25.7
9 119 24.8 24.8 24.9 25.1 25.3 25.7
10 158 104. 108. 108. 107. 108. 108.
# ... with 32,444 more rows

Removing a column from a matrix

I'm a bit new to R and wanting to remove a column from a matrix by the name of that column. I know that X[,2] gives the second column and X[,-2] gives every column except the second one. What I really want to know is if there's a similar command using column names. I've got a matrix and want to remove the "sales" column, but X[,-"sales"] doesn't seem to work for this. How should I do this? I would use the column number only I want to be able to use it for other matrices later, which have different dimensions. Any help would be much appreciated.
I'm not sure why all the answers are solutions for data frames and not matrices.
Per #Sotos's and #Moody_Mudskipper's comments, here is an example with the builtin state.x77 data matrix.
dat <- head(state.x77)
dat
#> Population Income Illiteracy Life Exp Murder HS Grad Frost Area
#> Alabama 3615 3624 2.1 69.05 15.1 41.3 20 50708
#> Alaska 365 6315 1.5 69.31 11.3 66.7 152 566432
#> Arizona 2212 4530 1.8 70.55 7.8 58.1 15 113417
#> Arkansas 2110 3378 1.9 70.66 10.1 39.9 65 51945
#> California 21198 5114 1.1 71.71 10.3 62.6 20 156361
#> Colorado 2541 4884 0.7 72.06 6.8 63.9 166 103766
# for removing one column
dat[, colnames(dat) != "Area"]
#> Population Income Illiteracy Life Exp Murder HS Grad Frost
#> Alabama 3615 3624 2.1 69.05 15.1 41.3 20
#> Alaska 365 6315 1.5 69.31 11.3 66.7 152
#> Arizona 2212 4530 1.8 70.55 7.8 58.1 15
#> Arkansas 2110 3378 1.9 70.66 10.1 39.9 65
#> California 21198 5114 1.1 71.71 10.3 62.6 20
#> Colorado 2541 4884 0.7 72.06 6.8 63.9 166
# for removing more than one column
dat[, !colnames(dat) %in% c("Area", "Life Exp")]
#> Population Income Illiteracy Murder HS Grad Frost
#> Alabama 3615 3624 2.1 15.1 41.3 20
#> Alaska 365 6315 1.5 11.3 66.7 152
#> Arizona 2212 4530 1.8 7.8 58.1 15
#> Arkansas 2110 3378 1.9 10.1 39.9 65
#> California 21198 5114 1.1 10.3 62.6 20
#> Colorado 2541 4884 0.7 6.8 63.9 166
#be sure to use `colnames` and not `names`
names(state.x77)
#> NULL
Created on 2020-06-27 by the reprex package (v0.3.0)
my favorite way:
# create data
df <- data.frame(x = runif(100),
y = runif(100),
remove_me = runif(100),
remove_me_too = runif(100))
# remove column
df <- df[,!names(df) %in% c("remove_me", "remove_me_too")]
so this dataframe:
> df
x y remove_me remove_me_too
1 0.731124508 0.535219259 0.33209113 0.736142042
2 0.612017350 0.404128030 0.84923974 0.624543223
3 0.415403559 0.369818154 0.53817387 0.661263087
4 0.199780006 0.679946936 0.58782429 0.085624708
5 0.343304259 0.892128112 0.02827132 0.038203599
becomes this:
> df
x y
1 0.731124508 0.535219259
2 0.612017350 0.404128030
3 0.415403559 0.369818154
4 0.199780006 0.679946936
5 0.343304259 0.892128112
As always in R there are many potential solutions. You can use the package dplyr and select() to easily remove or select columns in a data frame.
df <- data.frame(x = runif(100),
y = runif(100),
remove_me = runif(100),
remove_me_too = runif(100))
library(dplyr)
select(df, -remove_me, -remove_me_too) %>% head()
#> x y
#> 1 0.35113636 0.134590652
#> 2 0.72545356 0.165608839
#> 3 0.81000067 0.090696049
#> 4 0.29882204 0.004602398
#> 5 0.93492918 0.256870750
#> 6 0.03007377 0.395614901
You can read more about dplyr and its verbs here.
As a general case, if you remove so many columns that only one column remains, R will convert it to a numeric vector. You can prevent it by setting drop = FALSE.
(df <- data.frame(x = runif(6),
y = runif(6),
remove_me = runif(6),
remove_me_too = runif(6)))
# x y remove_me remove_me_too
# 1 0.4839869 0.18672217 0.0973506 0.72310641
# 2 0.2467426 0.37950878 0.2472324 0.80133920
# 3 0.4449471 0.58542547 0.8185943 0.57900456
# 4 0.9119014 0.12089776 0.2153147 0.05584816
# 5 0.4979701 0.04890334 0.7420666 0.44906667
# 6 0.3266374 0.37110822 0.6809380 0.29091746
df[, -c(3, 4)]
# x y
# 1 0.4839869 0.18672217
# 2 0.2467426 0.37950878
# 3 0.4449471 0.58542547
# 4 0.9119014 0.12089776
# 5 0.4979701 0.04890334
# 6 0.3266374 0.37110822
# Result is a numeric vector
df[, -c(2, 3, 4)]
# [1] 0.4839869 0.2467426 0.4449471 0.9119014 0.4979701 0.3266374
# Keep the matrix type
df[, -c(2, 3, 4), drop = FALSE]
# x
# 1 0.4839869
# 2 0.2467426
# 3 0.4449471
# 4 0.9119014
# 5 0.4979701
# 6 0.3266374

Why subsetting rows with 'apply' in data frame doesn't work in R

I have a data that looks like this.
Name|ID|p72|p78|p51|p49|c36.1|c32.1|c32.2|c36.2|c37
hsa-let-7a-5p|MIMAT0000062|9.1|38|12.7|185|8|4.53333333333333|17.9|23|63.3
hsa-let-7b-5p|MIMAT0000063|11.3|58.6|27.5|165.6|20.4|8.5|21|30.2|92.6
hsa-let-7c|MIMAT0000064|7.8|40.2|9.6|147.8|11.8|4.53333333333333|15.4|17.7|62.3
hsa-let-7d-5p|MIMAT0000065|4.53333333333333|27.7|13.4|158.1|8.5|4.53333333333333|14.2|13.5|50.5
hsa-let-7e-5p|MIMAT0000066|6.2|4.53333333333333|4.53333333333333|28|4.53333333333333|4.53333333333333|5.6|4.7|12.8
hsa-let-7f-5p|MIMAT0000067|4.53333333333333|4.53333333333333|4.53333333333333|78.2|4.53333333333333|4.53333333333333|6.8|4.53333333333333|8.9
hsa-miR-15a-5p|MIMAT0000068|4.53333333333333|70.3|10.3|147.6|4.53333333333333|4.53333333333333|21.1|30.2|100.8
hsa-miR-16-5p|MIMAT0000069|9.5|562.6|60.5|757|25.1|4.53333333333333|89.4|142.9|613.9
hsa-miR-17-5p|MIMAT0000070|10.5|71.6|27.4|335.1|6.3|10.1|51|51|187.1
hsa-miR-17-3p|MIMAT0000071|4.53333333333333|4.53333333333333|4.53333333333333|17.2|4.53333333333333|4.53333333333333|9.5|4.53333333333333|7.3
hsa-miR-18a-5p|MIMAT0000072|4.53333333333333|14.6|4.53333333333333|53.4|4.53333333333333|4.53333333333333|9.5|25.5|29.7
hsa-miR-19a-3p|MIMAT0000073|4.53333333333333|11.6|4.53333333333333|42.8|4.53333333333333|4.53333333333333|4.53333333333333|5.5|17.9
hsa-miR-19b-3p|MIMAT0000074|8.3|93.3|15.8|248.3|4.53333333333333|6.3|44.7|53.2|135
hsa-miR-20a-5p|MIMAT0000075|4.53333333333333|75.2|23.4|255.7|6.6|4.53333333333333|43.8|38|130.3
hsa-miR-21-5p|MIMAT0000076|6.2|19.7|18|299.5|6.8|4.53333333333333|49.9|68.5|48
hsa-miR-22-3p|MIMAT0000077|40.4|128.4|65.4|547.1|56.5|33.4|104.9|84.1|248.3
hsa-miR-23a-3p|MIMAT0000078|58.3|99.3|58.6|617.9|36.6|21.4|107.1|125.5|120.9
hsa-miR-24-1-5p|MIMAT0000079|4.53333333333333|4.53333333333333|4.53333333333333|9.2|4.53333333333333|4.53333333333333|4.53333333333333|4.9|4.53333333333333
hsa-miR-24-3p|MIMAT0000080|638.2|286.9|379.5|394.4|307.8|240.4|186|234.2|564
What I want to do is to simply pick rows where all the values is greater than 10.
But why this code of mine only report the last one?
The data clearly showed that there are more rows that satisfy this condition.
> dat<-read.delim("http://dpaste.com/1215552/plain/",sep="|",na.strings="",header=TRUE,blank.lines.skip=TRUE,fill=FALSE)
But why this code of mine only report the last one?
> dat[apply(dat[, -1], MARGIN = 1, function(x) all(x > 10)), ]
Name ID p72 p78 p51 p49 c36.1 c32.1 c32.2 c36.2 c37
19 hsa-miR-24-3p MIMAT0000080 638.2 286.9 379.5 394.4 307.8 240.4 186 234.2 564
What is the right way to do it?
Update:
alexwhan solution works. But I wonder how can I generalized his approach
so that it can handle data with missing values (NA)
dat<-read.delim("http://dpaste.com/1215354/plain/",sep="\t",na.strings="",heade‌​r=FALSE,blank.lines.skip=TRUE,fill=FALSE)
Since you're including your ID column (which is a factor) in the all(), it's getting messed up. Try:
dat[apply(dat[, -c(1,2)], MARGIN = 1, function(x) all(x > 10)), ]
# Name ID p72 p78 p51 p49 c36.1 c32.1 c32.2 c36.2 c37
# 16 hsa-miR-22-3p MIMAT0000077 40.4 128.4 65.4 547.1 56.5 33.4 104.9 84.1 248.3
# 17 hsa-miR-23a-3p MIMAT0000078 58.3 99.3 58.6 617.9 36.6 21.4 107.1 125.5 120.9
# 19 hsa-miR-24-3p MIMAT0000080 638.2 286.9 379.5 394.4 307.8 240.4 186.0 234.2 564.0
EDIT
For the case where you have NA, you can just just use the na.rm argument for all(). Using your new data (from the comment):
dat<-read.delim("http://dpaste.com/1215354/plain/",sep="\t",na.strings="",header=FALSE,blank.lines.skip=TRUE,fill=FALSE)
dat[apply(dat[, -c(1,2)], MARGIN = 1, function(x) all(x > 10, na.rm = T)), ]
# V1 V2 V3 V4 V5 V6 V7 V8 V9 V10 V11
# 7 hsa-miR-15a-5p MIMAT0000068 NA 70.3 10.3 147.6 NA NA 21.1 30.2 100.8
# 16 hsa-miR-22-3p MIMAT0000077 40.4 128.4 65.4 547.1 56.5 33.4 104.9 84.1 248.3
# 17 hsa-miR-23a-3p MIMAT0000078 58.3 99.3 58.6 617.9 36.6 21.4 107.1 125.5 120.9
# 19 hsa-miR-24-3p MIMAT0000080 638.2 286.9 379.5 394.4 307.8 240.4 186.0 234.2 564.0
# 20 hsa-miR-25-3p MIMAT0000081 19.3 78.6 25.6 84.3 14.9 16.9 19.1 27.2 113.8
# 21 hsa-miR-26a-5p MIMAT0000082 NA 22.8 31.0 561.2 12.4 NA 67.0 55.8 48.9
ANother idea is to transform your data ton long format( or molton format). I think it is even better to avoid missing values problem with:
library(reshape2)
dat.m <- melt(dat,id.vars=c('Name','ID'))
dat.m$value <- as.numeric(dat.m$value)
library(plyr)
res <- ddply(dat.m,.(Name,ID), summarise, keepme = all(value > 10))
res[res$keepme,]
# Name ID keepme
# 16 hsa-miR-22-3p MIMAT0000077 TRUE
# 17 hsa-miR-23a-3p MIMAT0000078 TRUE
# 19 hsa-miR-24-3p MIMAT0000080 TRUE

Resources