Identify missing values in an R data table - r

I want to identify missing values in an R data datable
In order to get the id, column "id" of each column in your dataset
I use apply(is.na(dt_tb) 2, which) this scrip tells me the position, I would like to replace the position by the id number (id column)
dt_tb <- data.table(id = c(5, 6, 7, 15),
coll = c("this", NA,"NA", "text"),
cyy = c(TRUE, FALSE, TRUE, TRUE),
hhh = c(2.5, 4.2, 3.2, NA),
stringsAsFactors = FALSE)
apply(is.na(dt_tb), 2, which)
example
$id
integer(0)
$coll
[1] 2
$cyy
integer(0)
$hhh
[1] 4
I want
id
integer(0)
coll
6 7
cyy
integer(0)
hhh
15

You can use unlist to get id from dt_tb$id and relist to come back to the origin structure.
i <- apply(is.na(dt_tb) | dt_tb=="NA", 2, which)
relist(dt_tb$id[unlist(i)], i)
#$id
#numeric(0)
#
#$coll
#[1] 6 7
#
#$cyy
#numeric(0)
#
#$hhh
#[1] 15

You can use which with arr.ind = TRUE to get row and column index where NA or "NA" is present. You can then use split to get a named list.
mat <- which(is.na(dt_tb) | dt_tb == 'NA', arr.ind = TRUE)
split(dt_tb$id[mat[, 1]], names(dt_tb)[mat[, 2]])
#$coll
#[1] 6 7
#$hhh
#[1] 15

you can use complete.cases(dt_tb)
install.packages("devtools")
install.packages("data.table")
library(devtools)
library(data.table)
dt_tb <- data.table(id = c(5, 6, 7, 15),
coll = c("this", NA,"NA", "text"),
cyy = c(TRUE, FALSE, TRUE, TRUE),
hhh = c(2.5, 4.2, 3.2, NA),
stringsAsFactors = FALSE)
complete.cases(dt_tb) # returns: TRUE FALSE TRUE FALSE
which(!complete.cases(dt_tb)) # return row numbers: 2 4
dt_tb[!complete.cases(dt_tb),] # returns: rows with missing data/na's
update:
dt_tb[which(!complete.cases(dt_tb)),1] #to return ID's
id
1: 6
2: 15

Related

R: if statement inside function (lapply)

I have a large list of dataframes with environmental variables from different localities. For each of the dataframes in the list, I want to summarize the values across locality (= group measurements of the same locality into one), using the name of the dataframes as a condition for which variables need to be summarized. For example, for a dataframe with the name 'salinity' I want to only summarize across salinity, and not the other environmental variables. Note that the different dataframes contain data from different localities, so I cannot simply merge them into one dataframe.
Let's do this with a dummy dataset:
#create list of dataframes
df1 = data.frame(locality = c(1, 2, 2, 5, 7, 7, 9),
Temp = c(14, 15, 16, 18, 20, 18, 21),
Sal = c(16, NA, NA, 12, NA, NA, 9))
df2 = data.frame(locality = c(1, 1, 3, 6, 8, 9, 9),
Temp = c(1, 2, 4, 5, 0, 2, -1),
Sal = c(18, NA, NA, NA, 36, NA, NA))
df3 = data.frame(locality = c(1, 3, 4, 4, 5, 5, 9),
Temp = c(14, NA, NA, NA, 17, 18, 21),
Sal = c(16, 8, 24, 23, 11, 12, 9))
df4 = data.frame(locality = c(1, 1, 1, 4, 7, 8, 10),
Temp = c(1, NA, NA, NA, NA, 0, 2),
Sal = c(18, 17, 13, 16, 20, 36, 30))
df_list = list(df1, df2, df3, df4)
names(df_list) = c("Summer_temperature", "Winter_temperature",
"Summer_salinity", "Winter_salinity")
Next, I used lapply to summarize environmental variables:
#select only those dataframes in the list that have either 'salinity' or 'temperature' in the dataframe names
df_sal = df_list[grep("salinity", names(df_list))]
df_temp = df_list[grep("temperature", names(df_list))]
#use apply to summarize salinity or temperature values in each dataframe
##salinity
df_sal2 = lapply(df_sal, function(x) {
x %>%
group_by(locality) %>%
summarise(Sal = mean(Sal, na.rm = TRUE))
})
##temperature
df_temp2 = lapply(df_temp, function(x) {
x %>%
group_by(locality) %>%
summarise(Temp = mean(Temp, na.rm = TRUE))
})
Now, this code is repetitive, so I want to downsize this by combining everything into one function. This is what I tried:
df_env = lapply(df_list, function(x) {
if (grepl("salinity", names(x)) == TRUE) {x %>% group_by(locality) %>% summarise(Sal = mean(Sal, na.rm = TRUE))}
if (grepl("temperature", names(x)) == TRUE) {x %>% group_by(locality) %>% summarise(Temp = mean(Temp, na.rm = TRUE))}
})
But I am getting the following output:
$Summer_temperature
NULL
$Winter_temperature
NULL
$Summer_salinity
NULL
$Winter_salinity
NULL
And the following warning messages:
Warning messages:
1: In if (grepl("salinity", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
2: In if (grepl("temperature", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
3: In if (grepl("salinity", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
4: In if (grepl("temperature", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
5: In if (grepl("salinity", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
6: In if (grepl("temperature", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
7: In if (grepl("salinity", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
8: In if (grepl("temperature", names(x)) == TRUE) { :
the condition has length > 1 and only the first element will be used
Now, I read here that this warning message can potentially be solved by using ifelse. However, in the final dataset I will have more than two environmental variables, so I will have to add many more if statements - for this reason I believe ifelse is not a solution here. Does anyone have an elegant solution to my problem? I am new to using both functions and lapply, and would appreciate any help you can give me.
EDIT:
I tried using the else if option suggested in one of the answers, but this still returns NULL values. I also tried the return and assigning output to x but both have the same problem as below code - any ideas?
#else if
df_env = lapply(df_list, function(x) {
if (grepl("salinity", names(x)) == TRUE) {
x %>% group_by(locality) %>%
summarise(Sal = mean(Sal, na.rm = TRUE))}
else if (grepl("temperature", names(x)) == TRUE) {
x %>% group_by(locality) %>%
summarise(Temp = mean(Temp, na.rm = TRUE))}
})
df_env
What I think is happening is that my if argument does not get passed to the summarize function, so nothing is being summarized.
Several things going on here, including
as akrun said, if statements must have a condition with a length of 1. Yours are not.
grepl("locality", names(df1))
# [1] TRUE FALSE FALSE
That must be reduced so that it is always exactly length 1. Frankly, grepl is the wrong tool here, since technically a column named notlocality would match and then it would error. I suggest you change to
"locality" %in% names(df1)
# [1] TRUE
You need to return something. Always. You shifted from if ...; if ...; to if ... else if ..., which is a good start, but really if you meet neither condition, then nothing is returned. I suggest one of the following: either add one more } else x, or reassign as if (..) { x <- x %>% ...; } else if (..) { x <- x %>% ... ; } and then end the anon-func with just x (to return it).
However, I think ultimately the problem is that you are looking for "temperature" or "salinity" which are in the names of the list-objects, not in the frames themselves. For instance, your reference to names(x) is returning c("locality", "Temp", "Sal"), the names of the frame x itself.
I think this is what you want?
Map(function(x, nm) {
if (grepl("salinity", nm)) {
x %>%
group_by(locality) %>%
summarize(Sal = mean(Sal, na.rm = TRUE))
} else if (grepl("temperature", nm)) {
x %>%
group_by(locality) %>%
summarize(Temp = mean(Temp, na.rm = TRUE))
} else x
}, df_list, names(df_list))
# $Summer_temperature
# # A tibble: 5 x 2
# locality Temp
# <dbl> <dbl>
# 1 1 14
# 2 2 15.5
# 3 5 18
# 4 7 19
# 5 9 21
# $Winter_temperature
# # A tibble: 5 x 2
# locality Temp
# <dbl> <dbl>
# 1 1 1.5
# 2 3 4
# 3 6 5
# 4 8 0
# 5 9 0.5
# $Summer_salinity
# # A tibble: 5 x 2
# locality Sal
# <dbl> <dbl>
# 1 1 16
# 2 3 8
# 3 4 23.5
# 4 5 11.5
# 5 9 9
# $Winter_salinity
# # A tibble: 5 x 2
# locality Sal
# <dbl> <dbl>
# 1 1 16
# 2 4 16
# 3 7 20
# 4 8 36
# 5 10 30

How does the table and $freq function work in R

I want a function for the mode of a vector. Abhiroop Sarkar's answer to This question works, but I want to understand why.
Here is the code
Mode <- function(x){
y <- data.frame(table(x))
y[y$Freq == max(y$Freq),1]
}
1) Wy do we need to put the table in a data frame,
2) in this line
y[y$Freq == max(y$Freq),1]
what does the y$Freq do? is frequency a default columns in the table?
When we convert a table output to data.frame, it creates a two column data.frame
set.seed(24)
v1 <- table(sample(1:5, 100, replace = TRUE))
y <- data.frame(v1)
y
# Var1 Freq
#1 1 19
#2 2 24
#3 3 22
#4 4 16
#5 5 19
The first column 'Var1' is the names of the frequency output from table and the 'Freq' is the actual frequency of those names
y[y$Freq == max(y$Freq), 1]
#[1] 2
#Levels: 1 2 3 4 5
Now, we are subsetting the first column 'Var1' based on the max value of 'Freq', and it returns a vector because of the drop = TRUE in [ when there is a single column
If we want to return a data.frame with single, add drop = FALSE at the end
y[y$Freq == max(y$Freq), 1, drop = FALSE]
# Var1
#2 2
Regarding the default name Freq, it is created from the as.data.frame.table method
as.data.frame.table
function (x, row.names = NULL, ..., responseName = "Freq", stringsAsFactors = TRUE,
sep = "", base = list(LETTERS))
{
ex <- quote(data.frame(do.call("expand.grid", c(dimnames(provideDimnames(x,
sep = sep, base = base)), KEEP.OUT.ATTRS = FALSE, stringsAsFactors = stringsAsFactors)),
Freq = c(x), row.names = row.names))
names(ex)[3L] <- responseName
eval(ex)
}

Find the latest sequence of TRUEs with length equal or greater than n

I've got a data like below:
library(dplyr)
ex <- data.frame(bool = c(rep(FALSE, 2), rep(TRUE, 3), rep(FALSE, 2), rep(TRUE, 5),
FALSE, FALSE, rep(TRUE, 6), FALSE, FALSE, FALSE)) %>%
mutate(seq = data.table::rleid(bool)) %>%
group_by(seq) %>%
mutate(n = n()) %>%
ungroup() %>%
mutate(expected_output = c(4, 4, NA, NA, NA, 4, 4, rep(NA,5), 4, 4, rep(NA, 6), rep(6, 3)))
For every FALSE I need to find a latest sequence of TRUE with length at least of 4. But if there's no such a sequence before (like for rows 1:2 or 6:7), we should check forward, i.e. find the first sequence of length 4 or more that appears after the observation.
The last column of ex contains expected output. How can I do that (at best with tidyverse)?
Edit
A solution using tidyverse would bes still much appreciated.
The following should work using base R.
function(col,min_seq =4)
{
end = c(which(c(col[-1],NA)!=col),length(col))
num = diff(c(0,end))
start = end-num+1
seq_n = seq_along(start)
v=col[end]
accept = num >= min_seq & v
st = start[accept]
sn = seq_n[accept]
en = end[accept]
en_ = en
en_[1]=1
place = rep(sn, diff(c(en_,length(col) + 1 ))) # If row with start of sequence is wanted instead of sequence number sn can be replaced with st
place[col]=NA
return(place)
}
You can do:
define function: (robust and with error handling)
fun1<-
function(vec, min_rep = 4) {
stopifnot(length(vec)>0, all(vec %in% 0:1))
runL <- do.call(rbind,rle(vec))
lngth<- ncol(runL)
runL <- rbind(runL, seq = 1:lngth, seq2 = NA^runL[2,])
runL[3,] <- ifelse(!runL[2,]|runL[1,]<min_rep, NA, runL[3,])
cases <- na.omit(runL[3,])
if(length(cases)>0) {
for(i in rev(cases)) {
runL[4,1:i][!is.na(runL[4,1:i])] <- i
}
for(i in cases) {
runL[4,i:lngth][!is.na(runL[4,i:lngth])] <- i
}
} else { runL[4,] <- NA }
return(rep(runL[4,],runL[1,]))
}
call function:
vec = c(rep(FALSE, 2), rep(TRUE, 3), rep(FALSE, 2), rep(TRUE, 5),
FALSE, FALSE, rep(TRUE, 6), FALSE, FALSE, FALSE)
cbind(vec,fun1(vec))
vec = rep(T,5)
cbind(vec,fun1(vec))
vec = rep(F,5)
cbind(vec,fun1(vec))
vec = c(rep(F,5),T)
cbind(vec,fun1(vec))
vec = c()
cbind(vec,fun1(vec))
vec = 1:3
cbind(vec,fun1(vec))
If OP strictly does not want a data.table solution, I can take down this post.
Here is a possible data.table approach:
#aggregate the dataset by bool and rleid
agg <- DT[, .(rn=.GRP, N=.N), by=.(bool, seq=rleid(bool))]
#extract all the TRUE sequences with length >= 4
true4s <- agg[(bool) & N >= 4L]
#for rows that are FALSE
agg[(!bool), expOut := {
prev <- NA
#find the previous sequence of TRUEs by using data.table non-equi join
#(a rolling join will work too here)
#in addition, do the match in reverse so that we can fill NA with prev value
ans <- true4s[.SD[order(-rn)], {
if (.N > 0L) {
prev <- seq[.N]
}
prev
#for each row in i (see ?data.table for i argument and also ?.EACHI)
#non equi join where earlier row in x to be join with later row in i
}, by=.EACHI, on=.(rn<rn)]$V1
#for the rolling version
#}, by=.EACHI, on=.(rn), roll=Inf]$V1
rev(ans)
}]
#add expected output to original dataset
DT[, expected_output := inverse.rle(list(values=agg$expOut, lengths=agg$N))]
output:
bool expected_output
1: FALSE 4
2: FALSE 4
3: TRUE NA
4: TRUE NA
5: TRUE NA
6: FALSE 4
7: FALSE 4
8: TRUE NA
9: TRUE NA
10: TRUE NA
11: TRUE NA
12: TRUE NA
13: FALSE 4
14: FALSE 4
15: TRUE NA
16: TRUE NA
17: TRUE NA
18: TRUE NA
19: TRUE NA
20: TRUE NA
21: FALSE 6
22: FALSE 6
23: FALSE 6
bool expected_output
data:
library(data.table)
DT <- data.table(bool = c(rep(FALSE, 2), rep(TRUE, 3), rep(FALSE, 2), rep(TRUE, 5),
FALSE, FALSE, rep(TRUE, 6), FALSE, FALSE, FALSE))

Parse unexpected symbol error in function applied over list

I'm trying to check the "pin" numbers of cases with missing data for each variable of interest in my dataset.
Here are some fake data:
c <- data.frame(pin = c(1, 2, 3, 4), type = c(1, 1, 2, 2), v1 = c(1, NA, NA,
NA), v2 = c(NA, NA, 1, 1))
I wrote a function "m.pin" to do this:
m.pin <- function(x, data = "c", return = "$pin") {
sect <- gsub("^.*\\[", "\\[", deparse(substitute(x)))
vect <- eval(parse(text = paste(data, return, sect, sep = "")))
return(vect[is.na(x)])
}
And I use it like so:
m.pin(c$v1[c$type == 1])
[1] 2
I wrote a function to apply "m.pin" over a list of variables to only return pins with missing data:
return.m.pin <- function(x, fun = m.pin) {
val.list <- lapply(x, fun)
condition <- lapply(val.list, function(x) length(x) > 0)
val.list[unlist(condition)]
}
But when I apply it, I get this error:
l <- lst(c$v1[c$type == 1], c$v2[c$type == 2])
return.m.pin(l)
Error in parse(text = paste(data, return, sect, sep = "")) :
<text>:1:9: unexpected ']'
1: c$pin[i]]
^
How can I rewrite my function(s) to avoid this issue?
Many thanks!
Please see Gregor's comment for the most critical issues with your code (to add: don't use return as a variable name as it is the name of a base R function).
It's not clear to me why you want to define a specific function m.pin, nor what you ultimately are trying to do, but I am assuming this is a critical design component.
Rewriting m.pin as
m.pin <- function(df, type, vcol) which(df[, "type"] == type & is.na(df[, vcol]))
we get
m.pin(df, 1, "v1")
#[1] 2
Or to identify rows with NA in "v1" for all types
lapply(unique(df$type), function(x) m.pin(df, x, "v1"))
#[[1]]
#[1] 2
#
#[[2]]
#[1] 3 4
Update
In response to Gregor's comment, perhaps this is what you're after?
by(df, df$type, function(x)
list(v1 = x$pin[which(is.na(x$v1))], v2 = x$pin[which(is.na(x$v2))]))
# df$type: 1
# $v1
# [1] 2
#
# $v2
# [1] 1 2
#
# ------------------------------------------------------------
# df$type: 2
# $v1
# [1] 3 4
#
# $v2
# integer(0)
This returns a list of the pin numbers for every type and NA entries in v1/v2.
Sample data
df <- data.frame(
pin = c(1, 2, 3, 4),
type = c(1, 1, 2, 2),
v1 = c(1, NA, NA, NA),
v2 = c(NA, NA, 1, 1))
I would suggest rewriting like this (if this approach is to be taken at all). I call your data d because c is already the name of an extremely common function.
# string column names, pass in the data frame as an object
# means no need for eval, parse, substitute, etc.
foo = function(data, na_col, return_col = "pin", filter_col, filter_val) {
if(! missing(filter_col) & ! missing(filter_val)) {
data = data[data[, filter_col] == filter_val, ]
}
data[is.na(data[, na_col]), return_col]
}
# working on the whole data frame
foo(d, na_col = "v1", return_col = "pin")
# [1] 2 3 4
# passing in a subset of the data
foo(d[d$type == 1, ], "v1", "pin")
# [1] 2
# using function arguments to subset the data
foo(d, "v1", "pin", filter_col = "type", filter_val = 1)
# [1] 2
# calling it with changing arguments:
# you could use `Map` or `mapply` to be fancy, but this for loop is nice and clear
inputs = data.frame(na_col = c("v1", "v2"), filter_val = c(1, 2), stringsAsFactors = FALSE)
result = list()
for (i in 1:nrow(inputs)) {
result[[i]] = foo(d, na_col = inputs$na_col[i], return_col = "pin",
filter_col = "type", filter_val = inputs$filter_val[i])
}
result
# [[1]]
# [1] 2
#
# [[2]]
# numeric(0)
A different approach I would suggest is melting your data into a long format, and simply taking a subset of the NA values, hence getting all combinations of type and the v* columns that have NA values at once. Do this once, and no function is needed to look up individual combinations.
d_long = reshape2::melt(d, id.vars = c("pin", "type"))
library(dplyr)
d_long %>% filter(is.na(value)) %>%
arrange(variable, type)
# pin type variable value
# 1 2 1 v1 NA
# 2 3 2 v1 NA
# 3 4 2 v1 NA
# 4 1 1 v2 NA
# 5 2 1 v2 NA

how to handle vector as element in data frame? [R]

How do I store a few numbers in one element of data frame?
For example I want a summary of my data, including the class and values in each column.
dat = data.frame(STATE = 1:5,
MONTH = 1:5)
should yield:
var class values
STATE numeric c(1,2,3,4,5)
MONTH numeric c(1,2,3,4,5)
Now I try:
dat = data.frame(STATE = 1:5,
MONTH = 1:5)
vars = data.frame(var = colnames(dat), class = NA, values = NA,
stringsAsFactors = F)
vars$class = sapply(dat, class)
vars
# var class values
# 1 STATE integer NA
# 2 MONTH integer NA
vars$values = sapply(dat, function(x) unique(x))
# Error in `$<-.data.frame`(`*tmp*`, "values", value = c(1L, 2L, 3L, 4L, :
# replacement has 5 rows, data has 2
# UPDATE: #jMathew 's answer:
vars$values = sapply(dat, function(x) list(unique(x)))
vars
# var class values
# 1 STATE integer 1, 2, 3, 4, 5
# 2 MONTH integer 1, 2, 3, 4, 5
It doesn't work because unique(dat$STATE) = c(1,2,3,4,5), and R thinks it should be 5 elements in data frame, and can't fit in one element.
But the above code works for many data sets I work with, e.g.:
library(foreign)
dat = read.xport('LLCP2013.XPT')
# download from http://www.cdc.gov/brfss/annual_data/2013/files/LLCP2013XPT.ZIP
dat = dat[1:5, 1:3]
dat
# X_STATE FMONTH IDATE
# 1 1 1 01092013
# 2 1 1 01192013
# 3 1 1 01192013
# 4 1 1 01112013
# 5 1 2 02062013
vars = data.frame(var = colnames(dat), class = NA, values = NA,
stringsAsFactors = F)
vars$class = sapply(dat, class)
vars$values = sapply(dat, function(x) unique(x))
vars
# var class values
# 1 X_STATE numeric 1
# 2 FMONTH numeric 1, 2
# 3 IDATE factor 16, 36, 20, 70
# UPDATE:
class(vars[3,3])
# [1] "list"
# #jMathew was right, it was somehow coerced to list
Can somebody tells me why this works in the second case but not in the first? Thanks
I suspect that in your second case, the vector is being coerced to a list
Try, this on your first example
vars$values = sapply(dat, function(x) list(unique(x)))
We could try
do.call(rbind,lapply(seq_along(dat), function(i)
data.frame(var=names(dat)[i], class=class(dat[,i]),
values= sprintf('c(%s)', toString(unique(dat[,i]))))))
# var class values
#1 STATE integer c(1, 2, 3, 4, 5)
#2 MONTH integer c(1, 2, 3, 4, 5)

Resources