weighted table data frame with plyr - r

I'm working with survey data consisting of integer value responses for multiple questions (y1, y2, y3, ...) and a weighted count assigned to each respondent, like this:
foo <- data.frame(wcount = c(10, 1, 2, 3), # weighted counts
y1 = sample(1:5, 4, replace=T), # numeric responses
y2 = sample(1:5, 4, replace=T), #
y3 = sample(1:5, 4, replace=T)) #
>foo
wcount y1 y2 y3
1 10 5 5 5
2 1 1 4 4
3 2 1 2 5
4 3 2 5 3
and I'd like to transform this into a consolidated data frame version of a weighted table, with the first column representing the response values, and the next 3 columns representing the weighted counts. This can be done explicitly by column using:
library(Hmisc)
ty1 <- wtd.table(foo$y1, foo$wcount)
ty2 <- wtd.table(foo$y2, foo$wcount)
ty3 <- wtd.table(foo$y3, foo$wcount)
bar <- merge(ty1, ty2, all=T, by="x")
bar <- merge(bar, ty3, all=T, by="x")
names(bar) <- c("x", "ty1", "ty2", "ty3")
bar[is.na(bar)]<-0
>bar
x ty1 ty2 ty3
1 1 3 0 0
2 2 3 2 0
3 3 0 0 3
4 4 0 1 1
5 5 10 13 12
I suspect there is a way of automating this with plyr and numcolwise or ddply. For instance, the following comes close, but I'm not sure what else is needed to finish the job:
library(plyr)
bar2 <- numcolwise(wtd.table)(foo[c("y1","y2","y3")], foo$wcount)
>bar2
y1 y2 y3
1 1, 2, 5 2, 4, 5 3, 4, 5
2 3, 3, 10 2, 1, 13 3, 1, 12
Any thoughts?

Not a plyr answer, but this struck me as a reshaping/aggregating problem that could be tackled straightforwardly using functions from package reshape2.
First, melt the dataset, making a column of the response value which can be named x (the unique values in y1-y3).
library(reshape2)
dat2 = melt(foo, id.var = "wcount", value.name = "x")
Now this can be cast back wide with dcast, using sum as the aggregation function. This puts y1-y3 back as columns with the sum of wcount for each value of x.
# Cast back wide using the values within y1-y3 as response values
# and filling with the sum of "wcount"
dcast(dat2, x ~ variable, value.var = "wcount", fun = sum)
Giving
x y1 y2 y3
1 1 3 0 0
2 2 3 2 0
3 3 0 0 3
4 4 0 1 1
5 5 10 13 12

you are describing a survey data set that uses replicate weights. see http://asdfree.com/ for many, many examples but for recs, do something like this:
library(survey)
x <- read.csv( "http://www.eia.gov/consumption/residential/data/2009/csv/recs2009_public.csv" )
rw <- read.csv( "http://www.eia.gov/consumption/residential/data/2009/csv/recs2009_public_repweights.csv" )
y <- merge( x , rw )
# create a replicate-weighted survey design object
z <- svrepdesign( data = y , weights = ~NWEIGHT , repweights = "brr_weight_[0-9]" )
# now run all of your analyses on the object `z` ..
# see the `survey` package homepage for details
# distribution
svymean( ~ factor( BASEHEAT ) , z )
# mean
svymean( ~ TOTHSQFT , z )

Related

Convert 3D array into list of dataframes

Basically, I want to group a 3D array by its columns, transform it into a data frame, and bind to it a new column whose value equals to the sum of all existing columns.
For example, consider the following 3D array
> (src <- array(1:8, c(2, 2, 2), dimnames=list(c('X1', 'X2'), c('Y1', 'Y2'), 1:2)))
, , 1
Y1 Y2
X1 1 3
X2 2 4
, , 2
Y1 Y2
X1 5 7
X2 6 8
I would like to convert it to
> (dest <- list(Y1=data.frame(X1=c(1, 5), X2=c(2, 6), Y1=c(1, 5)+c(2, 6)),
Y2=data.frame(X1=c(3, 7), X2=c(4, 8), Y2=c(3, 7)+c(4, 8))))
$Y1
X1 X2 Y1
1 1 2 3
2 5 6 11
$Y2
X1 X2 Y2
1 3 4 7
2 7 8 15
I know how to do the transformation for each individual column in the original array, but have no idea how to handle multiple columns simultaneously.
> library(dplyr)
> as.data.frame(t(src[, 'Y1', ])) %>% mutate(Y1=X1+X2)
X1 X2 Y1
1 1 2 3
2 5 6 11
Feel free to use base R, dplyr, data.table, or whatever package you prefer, as long as it's fast enough. In the real-world application, dim(src) tend to be something like c(hundreds, tens, tens of thousands).
We could first apply data.frame-transformation on margin 2 of the transposed array, where we transpose arrays with aperm(). Then we proceed similarly with the colSums. In order to get the right names "Y1", "Y2" we make an interim step listing the columns as data frames. Finally Map evaluates both lists (the X* and colsums of Y*) element by element.
dest <- Map(cbind, apply(aperm(src, c(3, 2, 1)), 2, data.frame),
{tmp <- data.frame(apply(src, 2, colSums));list(tmp[1], tmp[2])})
dest
# $Y1
# X1 X2 Y1
# 1 1 2 3
# 2 5 6 11
#
# $Y2
# X1 X2 Y2
# 1 3 4 7
# 2 7 8 15

Remove rows with zero-variance in R

I have a dataframe of survey responses (rows = participants, columns = question responses). Participants would respond to 50 questions on a 5-point Likert scale. I would like to remove participants who answered 5 across the 50 questions as they have zero-variance and likely to bias my results.
I have seen the nearZeroVar()function, but was wondering if there's a way to do this in base R?
Many thanks,
R
If you had this dataframe:
df <- data.frame(col1 = rep(1, 10),
col2 = 1:10,
col3 = rep(1:2, 5))
You could calculate the variance of each column and select only those columns where the variance is not 0 or greater than or equal to a certain threshold which is close to what nearZeroVar() would do:
df[, sapply(df, var) != 0]
df[, sapply(df, var) >= 0.3]
If you wanted to exclude rows, you could do something similar, but loop through the rows instead and then subset:
df[apply(df, 1, var) != 0, ]
df[apply(df, 1, var) >= 0.3, ]
Assuming you have data like this.
survey <- data.frame(participants = c(1:10),
q1 = c(1,2,5,5,5,1,2,3,4,2),
q2 = c(1,2,5,5,5,1,2,3,4,3),
q3 = c(3,2,5,4,5,5,2,3,4,5))
You can do the following.
idx <- which(apply(survey[,-1], 1, function(x) all(x == 5)) == T)
survey[-idx,]
This will remove rows where all values equal 5.
# Dummy data:
df <- data.frame(
matrix(
sample(1:5, 100000, replace =TRUE),
ncol = 5
)
)
names(df) <- paste0("likert", 1:5)
df$id <- 1:nrow(df)
head(df)
likert1 likert2 likert3 likert4 likert5 id
1 1 2 4 4 5 1
2 5 4 2 2 1 2
3 2 1 2 1 5 3
4 5 1 3 3 2 4
5 4 3 3 5 1 5
6 1 3 3 2 3 6
dim(df)
[1] 20000 6
# Clean out rows where all likert values are 5
df <- df[rowSums(df[grepl("likert", names(df))] == 5) != 5, ]
nrow(df)
[1] 19995
Stealing #AshOfFire's data, with small modification as you say you only have answers in columns and not participants :
survey <- data.frame(q1 = c(1,2,5,5,5,1,2,3,4,2),
q2 = c(1,2,5,5,5,1,2,3,4,3),
q3 = c(3,2,5,4,5,5,2,3,4,5))
survey[!apply(survey==survey[[1]],1,all),]
# q1 q2 q3
# 1 1 1 3
# 4 5 5 4
# 6 1 1 5
# 10 2 3 5
the equality test builds a data.frame filled with Booleans, then with apply we keep rows that aren't always TRUE.

R: Find the Variance of all Non-Zero Elements in Each Row

I have a dataframe d like this:
ID Value1 Value2 Value3
1 20 25 0
2 2 0 0
3 15 32 16
4 0 0 0
What I would like to do is calculate the variance for each person (ID), based only on non-zero values, and to return NA where this is not possible.
So for instance, in this example the variance for ID 1 would be var(20, 25),
for ID 2 it would return NA because you can't calculate a variance on just one entry, for ID 3 the var would be var(15, 32, 16) and for ID 4 it would again return NULL because it has no numbers at all to calculate variance on.
How would I go about this? I currently have the following (incomplete) code, but this might not be the best way to go about it:
len=nrow(d)
variances = numeric(len)
for (i in 1:len){
#get all nonzero values in ith row of data into a vector nonzerodat here
currentvar = var(nonzerodat)
Variances[i]=currentvar
}
Note this is a toy example, but the dataset I'm actually working with has over 40 different columns of values to calculate variance on, so something that easily scales would be great.
Data <- data.frame(ID = 1:4, Value1=c(20,2,15,0), Value2=c(25,0,32,0), Value3=c(0,0,16,0))
var_nonzero <- function(x) var(x[!x == 0])
apply(Data[, -1], 1, var_nonzero)
[1] 12.5 NA 91.0 NA
This seems overwrought, but it works, and it gives you back an object with the ids attached to the statistics:
library(reshape2)
library(dplyr)
variances <- df %>%
melt(., id.var = "id") %>%
group_by(id) %>%
summarise(variance = var(value[value!=0]))
Here's the toy data I used to test it:
df <- data.frame(id = seq(4), X1 = c(3, 0, 1, 7), X2 = c(10, 5, 0, 0), X3 = c(4, 6, 0, 0))
> df
id X1 X2 X3
1 1 3 10 4
2 2 0 5 6
3 3 1 0 0
4 4 7 0 0
And here's the result:
id variance
1 1 14.33333
2 2 0.50000
3 3 NA
4 4 NA

Order data frame by columns with different calling schemes

Say I have the following data frame:
df <- data.frame(x1 = c(2, 2, 2, 1),
x2 = c(3, 3, 2, 1),
let = c("B", "A", "A", "A"))
df
x1 x2 let
1 2 3 B
2 2 3 A
3 2 2 A
4 1 1 A
If I want to order df by x1, then x2 then let, I do this:
df2 <- df[with(df, order(x1, x2, let)), ]
df2
x1 x2 let
4 1 1 A
3 2 2 A
2 2 3 A
1 2 3 B
However, x1 and x2 have actually been saved as an id <- c("x1", "x2") vector earlier in the code, which I use for other purposes.
So my problem is that I want to reference id instead of x1 and x2 in my order function, but unfortunately anything like df[order(df[id], df$let), ] will result in a argument lengths differ error.
From what I can tell (and this has been addressed at another SO thread), the problem is that length(df[id]) == 2 and length(df$let) == 4.
I have been able to make it through this workaround:
df3 <- df[order(df[, id[1]], df[, id[2]], df[, "let"]), ]
df3
x1 x2 let
4 1 1 A
3 2 2 A
2 2 3 A
1 2 3 B
But it looks ugly and depends on knowing the size of id.
Is there a more elegant solution to sorting my data frame by id then let?
I would suggest using do.call(order, ...) and combining id and "let" with c():
id <- c("x1", "x2")
df[do.call(order, df[c(id, "let")]), ]
# x1 x2 let
# 4 1 1 A
# 3 2 2 A
# 2 2 3 A
# 1 2 3 B

Groupby bins and aggregate in R

I have data like (a,b,c)
a b c
1 2 1
2 3 1
9 2 2
1 6 2
where 'a' range is divided into n (say 3) equal parts and aggregate function calculates b values (say max) and grouped by at 'c' also.
So the output looks like
a_bin b_m(c=1) b_m(c=2)
1-3 3 6
4-6 NaN NaN
7-9 NaN 2
Which is MxN where M=number of a bins, N=unique c samples or all range
How do I approach this? Can any R package help me through?
A combination of aggregate, cut and reshape seems to work
df <- data.frame(a = c(1,2,9,1),
b = c(2,3,2,6),
c = c(1,1,2,2))
breaks <- c(0, 3, 6, 9)
# Aggregate data
ag <- aggregate(df$b, FUN=max,
by=list(a=cut(df$a, breaks, include.lowest=T), c=df$c))
# Reshape data
res <- reshape(ag, idvar="a", timevar="c", direction="wide")
There would be easier ways.
If your dataset is dat
res <- sapply(split(dat[, -3], dat$c), function(x) {
a_bin <- with(x, cut(a, breaks = c(1, 3, 6, 9), include.lowest = T, labels = c("1-3",
"4-6", "7-9")))
c(by(x$b, a_bin, FUN = max))
})
res1 <- setNames(data.frame(row.names(res), res),
c("a_bin", "b_m(c=1)", "b_m(c=2)"))
row.names(res1) <- 1:nrow(res1)
res1
a_bin b_m(c=1) b_m(c=2)
1 1-3 3 6
2 4-6 NA NA
3 7-9 NA 2
I would use a combination of data.table and reshape2 which are both fully optimized for speed (not using for loops from apply family).
The output won't return the unused bins.
v <- c(1, 4, 7, 10) # creating bins
temp$int <- findInterval(temp$a, v)
library(data.table)
temp <- setDT(temp)[, list(b_m = max(b)), by = c("c", "int")]
library(reshape2)
temp <- dcast.data.table(temp, int ~ c, value.var = "b_m")
## colnames(temp) <- c("a_bin", "b_m(c=1)", "b_m(c=2)") # Optional for prettier table
## temp$a_bin<- c("1-3", "7-9") # Optional for prettier table
## a_bin b_m(c=1) b_m(c=2)
## 1 1-3 3 6
## 2 7-9 NA 2

Resources