Syntax in R for prediction - r

How do I perform this loop in RStudio using R library? Actually my dataset has over 100,000 rows and need some efficient syntax that can produce something similar to this for loop
Use previous row's value to predict for next all rows(in col d) after rows when data not available
# df is a dataframe with columns b,c,d,p.
d = c(1, 2, 4, NA, NA)
b = c(1,1,1,2,2)
c=c(1,1,1,1,1)
df= data.frame(cbind(b,c,d))
df$p <- c(0.1,0.2,0.1,0.1,0.3)
for(i in 1:(nrow(df)-1)) {
if (df$b[i + 1] > df$c[i + 1]) {
df$d[i + 1] = df$d[i] * (1 - df$p[i + 1])
} else{
df$d[i + 1] = df$d[i+1]
}
}

This vectorized code gives the same output as the question's for loop. And is much faster.
inx <- seq_along(df$a)[-1]
b_greater <- df$b[inx] > df$c[inx]
df$a[inx] <- df$d[inx - 1]
df$a[inx][b_greater] <- df$d[inx - 1][b_greater] * (1 - df$p[inx][b_greater])
df
# b c d a p
#1 1 1 1 NA 0.1
#2 1 1 2 1.0 0.2
#3 1 1 4 2.0 0.1
#4 2 1 NA 3.6 0.1
#5 2 1 NA NA 0.3

Related

Inserting NA rows when missing data

I have a data set with some missing values in a sequence:
Seq<-c(1,2,3,4,6,7,10,11,12,18,19,20)
Data<-c(3,4,5,4,3,2,1,2,3,5,4,3)
DF<-data.frame(Seq, Data)
I'd like to add rows to this data set approximating where I have missing values, and filling in the data with NA. So any time where I have a gap larger than 2, I add a NA row (or multiple rows if the gap is large). The result would look something like this:
NewSeq<-c(1,2,3,4,6,7,8.5,10,11,12,14,16,18,19,20)
NewData<-c(3,4,5,4,3,2,NA,1,2,3,NA,NA,18,19,20)
NewDF<-data.frame(NewSeq,NewData)
So I ignore when the gap is only < 2, but I add an NA row anytime there is a gap > 2. If there is still a > 2 gap after adding an NA row, I add another until the gap is filled.
Not very elegant, but this is how I would do it:
Seq<-c(1,2,3,4,6,7,10,11,12,18,19,20)
Data<-c(3,4,5,4,3,2,1,2,3,5,4,3)
DF<-data.frame(Seq, Data)
first <- DF$Seq
second <- DF$Data
for(i in length(first):2) {
gap <- first[i] - first[i - 1]
if(gap > 2) {
steps <- ifelse(gap %% 2 == 1, gap %/% 2, (gap %/% 2) -1)
new_values_gap <- gap / (steps + 1)
new_values <- vector('numeric')
for(j in 1:steps) {
new_values <- c(new_values, first[i - 1] + j * new_values_gap)
}
first <- c(first[1:i - 1], new_values, first[i:length(first)])
second <- c(second[1:i - 1], rep(NA, length(new_values)), second[i:length(second)])
}
}
NewDF <- data.frame(NewSeq = first, NewData = second)
> NewDF
## NewSeq NewData
## 1 1.0 3
## 2 2.0 4
## 3 3.0 5
## 4 4.0 4
## 5 6.0 3
## 6 7.0 2
## 7 8.5 NA
## 8 10.0 1
## 9 11.0 2
## 10 12.0 3
## 11 14.0 NA
## 12 16.0 NA
## 13 18.0 5
## 14 19.0 4
## 15 20.0 3
Seem to works for your example but not sure how it will perform on data I haven't seen. You'll need to adjust the intervals in the ifelse statement depending on how you want to account for different difference intervals.
Seq<-c(1,2,3,4,6,7,10,11,12,18,19,20)
Data<-c(3,4,5,4,3,2,1,2,3,5,4,3)
DF<-data.frame(Seq, Data)
diffs <- diff(Seq)
inds <- which(diffs > 2)
new.vals <- sapply(inds, function(x)
if(diffs[x] %% 2 != 0){
seq(Seq[x]+1.5, Seq[x+1]-1.5,1.5)
}else{
seq(Seq[x]+2, Seq[x+1]-2,2)
})
add.length <- unlist(lapply(new.vals, function(x) length(x)))
Seq.new <- c(Seq, unlist(new.vals))
id <- c(seq_along(Seq),
rep(inds+0.5,add.length))
Seq.new <- Seq.new[order(id)]
Data.new <- c(Data, rep(NA, sum(add.length)))
id <- c(seq_along(Seq),
rep(inds+0.5,add.length))
Data.new <- Data.new[order(id)]
NewDF <- data.frame(Seq.new, Data.new)

R: Is it possible to use mutate+lag with the same column?

I'm trying to replicate the following formula in R:
Xt = Xt-1 * b + Zt * (1-b)
I'm using the following code
t %>%
mutate(x= ifelse(week == 1, z, NaN)) %>% # Initial value for the first lag
mutate(x= ifelse(week != 1, lag(x,1 ,default = 0) * b + z, z)
But I get all NaN except from the second element.
z b x
(dbl) (dbl) (dbl)
1 168.895 0.9 168.8950
2 20.304 0.9 131.7472
3 14.943 0.9 NA
4 11.028 0.9 NA
5 8.295 0.9 NA
6 8.024 0.9 NA
7 6.872 0.9 NA
8 7.035 0.9 NA
9 4.399 0.9 NA
10 4.158 0.9 NA
This is fairly simple in excel but I must do it in R, Do you have any approaches?
Reproducible example:
set.seed(2)
t = data.frame(week = seq(1:52),
z = runif(52, 0, 100),
b = 0.2)
I found the solution running the following loop, thanks to #Frank and #docendo discimus
for (row in 2:dim(t)[1]) {
t[row,] <- mutate(t[1:row,], x= lag(x,1) * b + z * (1 - b))[row,]
}

Interpolating a spline within dplyr

I am trying interpolate splines for the following example data:
trt depth root carbon
A 2 1 14
A 4 2 18
A 6 3 18
A 8 3 17
A 10 1 12
B 2 3 16
B 4 4 18
B 6 4 17
B 8 2 15
B 10 1 12
in the following way:
new_df<-df%>%
group_by(trt)%>%
summarise_each(funs(splinefun(., x=depth, method="natural")))
I get an Error: not a vector, but I don't see why not. Am I not expressing the function in the right way?
Do you want a dataset that contains the values interpolated? If so, I've expanded the dataset to contain the desired x locations before the splines are calculated.
The resolution of those points are determined in the second line of the expand.grid() function. Just make sure the original depth points are a subset of the expanded depth points (eg, don't use something uneven like by=.732).
library(magrittr)
ds <- readr::read_csv("trt,depth,root,carbon\nA,2,1,14\nA,4,2,18\nA,6,3,18\nA,8,3,17\nA,10,1,12\nB,2,3,16\nB,4,4,18\nB,6,4,17\nB,8,2,15\nB,10,1,12")
ds_depths_possible <- expand.grid(
depth = seq(from=min(ds$depth), max(ds$depth), by=.5), #Decide resolution here.
trt = c("A", "B"),
stringsAsFactors = FALSE
)
ds_intpolated <- ds %>%
dplyr::right_join(ds_depths_possible, by=c("trt", "depth")) %>% #Incorporate locations to interpolate
dplyr::group_by(trt) %>%
dplyr::mutate(
root_interpolated = spline(x=depth, y=root , xout=depth)$y,
carbon_interpolated = spline(x=depth, y=carbon, xout=depth)$y
) %>%
dplyr::ungroup()
ds_intpolated
Output:
Source: local data frame [34 x 6]
trt depth root carbon root_interpolated carbon_interpolated
(chr) (dbl) (int) (int) (dbl) (dbl)
1 A 2.0 1 14 1.000000 14.00000
2 A 2.5 NA NA 1.195312 15.57031
3 A 3.0 NA NA 1.437500 16.72917
4 A 3.5 NA NA 1.710938 17.52344
5 A 4.0 2 18 2.000000 18.00000
6 A 4.5 NA NA 2.289062 18.21094
7 A 5.0 NA NA 2.562500 18.22917
8 A 5.5 NA NA 2.804688 18.13281
9 A 6.0 3 18 3.000000 18.00000
10 A 6.5 NA NA 3.132812 17.88281
.. ... ... ... ... ... ...
In the graphs above, the little points & lines are interpolated. The big fat points are observed.
library(ggplot2)
ggplot(ds_intpolated, aes(x=depth, y=root_interpolated, color=trt)) +
geom_line() +
geom_point(shape=1) +
geom_point(aes(y=root), size=5, alpha=.3, na.rm=T) +
theme_bw()
ggplot(ds_intpolated, aes(x=depth, y=carbon_interpolated, color=trt)) +
geom_line() +
geom_point(shape=1) +
geom_point(aes(y=carbon), size=5, alpha=.3, na.rm=T) +
theme_bw()
If you want an additional example, here's some recent code and slides. We needed a rolling median for some missing points, and linear stats::approx() for some others. Another option is also stats::loess(), but it's arguments aren't as similar as approx() and spline().
I gave up trying to get dplyr::summarise_each (and also tried dplyr::summarise, since your choice of functions didn't seem to match you desire for multiple column input to return only two functions.) I'm not sure it's possible in dply. Here's what might be called the canonical method of approaching this:
lapply( split(df, df$trt), function(d) splinefun(x=d$depth, y=d$carbon) )
#-------------
$A
function (x, deriv = 0L)
{
deriv <- as.integer(deriv)
if (deriv < 0L || deriv > 3L)
stop("'deriv' must be between 0 and 3")
if (deriv > 0L) {
z0 <- double(z$n)
z[c("y", "b", "c")] <- switch(deriv, list(y = z$b, b = 2 *
z$c, c = 3 * z$d), list(y = 2 * z$c, b = 6 * z$d,
c = z0), list(y = 6 * z$d, b = z0, c = z0))
z[["d"]] <- z0
}
res <- .splinefun(x, z)
if (deriv > 0 && z$method == 2 && any(ind <- x <= z$x[1L]))
res[ind] <- ifelse(deriv == 1, z$y[1L], 0)
res
}
<bytecode: 0x7fe56e4853f8>
<environment: 0x7fe56efd3d80>
$B
function (x, deriv = 0L)
{
deriv <- as.integer(deriv)
if (deriv < 0L || deriv > 3L)
stop("'deriv' must be between 0 and 3")
if (deriv > 0L) {
z0 <- double(z$n)
z[c("y", "b", "c")] <- switch(deriv, list(y = z$b, b = 2 *
z$c, c = 3 * z$d), list(y = 2 * z$c, b = 6 * z$d,
c = z0), list(y = 6 * z$d, b = z0, c = z0))
z[["d"]] <- z0
}
res <- .splinefun(x, z)
if (deriv > 0 && z$method == 2 && any(ind <- x <= z$x[1L]))
res[ind] <- ifelse(deriv == 1, z$y[1L], 0)
res
}
<bytecode: 0x7fe56e4853f8>
<environment: 0x7fe56efc4db8>

How can I vectorize this task in R?

For a specific task, I have written the following R script:
pred <- c(0.1, 0.1, 0.1, 0.2, 0.2, 0.3, 0.3)
grp <- as.factor(c(1, 1, 2, 2, 1, 1, 1))
cut <- unique(pred)
cut_n <- length(cut)
n <- length(pred)
class_1 <- numeric(cut_n)
class_2 <- numeric(cut_n)
curr_cut <- cut[1]
class_1_c <- 0
class_2_c <- 0
j <- 1
for (i in 1:n){
if (curr_cut != pred[i]) {
j <- j + 1
curr_cut <- pred[i]
}
if (grp[i] == levels(grp)[1])
class_1_c <- class_1_c + 1
else
class_2_c <- class_2_c + 1
class_1[j] <- class_1_c
class_2[j] <- class_2_c
}
cat("index:", cut, "\n")
cat("class1:", class_1, "\n")
cat("class2:", class_2, "\n")
My goal above was to compute the cumulative number of times the factors in grp appear for each unique value in pred. For example, I get the following output for above:
index: 0.1 0.2 0.3
class1: 2 3 5
class2: 1 2 2
I am a beginner in R and I have few questions about this:
How can I make this code faster and simpler?
Is is it possible to vectorize this and avoid the for loop?
Is there a different "R-esque" way of doing this?
Any help would be greatly appreciated. Thanks!
You can start by getting a the unique group/pred counts using a table
table(grp, pred)
# pred
# grp 0.1 0.2 0.3
# 1 2 1 2
# 2 1 1 0
Of course this isn't exactly what you wanted. You want cumulative totals, so we can adjust this result by applying a cumulative sum across each row (transposed to better match your data layout)
t(apply(table(grp, pred), 1, cumsum))
# grp 0.1 0.2 0.3
# 1 2 3 5
# 2 1 2 2

Combining vectors of unequal length and non-unique values

I would like to do the following:
combine into a data frame, two vectors that
have different length
contain sequences found also in the other vector
contain sequences not found in the other vector
sequences that are not found in other vector are never longer than 3 elements
always have same first element
The data frame should show the equal sequences in the two vectors aligned, with NA in the column if a vector lacks a sequence present in the other vector.
For example:
vector 1 vector 2 vector 1 vector 2
1 1 a a
2 2 g g
3 3 b b
4 1 or h a
1 2 a g
2 3 g b
5 4 c h
5 c
should be combined into data frame
1 1 a a
2 2 g g
3 3 b b
4 NA h NA
1 1 or a a
2 2 g g
NA 3 NA b
NA 4 NA h
5 5 c c
What I did, is to search for merge, combine, cbind, plyr examples but was not able to find solutions. I am afraid I will need to start write a function with nested for loops to solve this problem.
Note - this was proposed as an answer to the first version of the OP. The question has been modified since then but the problem is still not well-defined in my opinion.
Here is a solution that works with your integer example and would also work with numeric vectors. I am also assuming that:
both vectors contain the same number of sequences
a new sequence starts where value[i+1] <= value[i]
If your vectors are non-numeric or if one of my assumptions does not fit your problem, you'll have to clarify.
v1 <- c(1,2,3,4,1,2,5)
v2 <- c(1,2,3,1,2,3,4,5)
v1.sequences <- split(v1, cumsum(c(TRUE, diff(v1) <= 0)))
v2.sequences <- split(v2, cumsum(c(TRUE, diff(v2) <= 0)))
align.fun <- function(s1, s2) { #aligns two sequences
s12 <- sort(unique(c(s1, s2)))
cbind(ifelse(s12 %in% s1, s12, NA),
ifelse(s12 %in% s2, s12, NA))
}
do.call(rbind, mapply(align.fun, v1.sequences, v2.sequences))
# [,1] [,2]
# [1,] 1 1
# [2,] 2 2
# [3,] 3 3
# [4,] 4 NA
# [5,] 1 1
# [6,] 2 2
# [7,] NA 3
# [8,] NA 4
# [9,] 5 5
I maintain that your problem might be solved in terms of the shortest common supersequence. It assumes that your two vectors each represent one sequence. Please give the code below a try.
If it still does not solve your problem, you'll have to explain exactly what you mean by "my vector contains not one but many sequences": define what you mean by a sequence and tell us how sequences can be identified by scanning through your two vectors.
Part I: given two sequences, find the longest common subsequence
LongestCommonSubsequence <- function(X, Y) {
m <- length(X)
n <- length(Y)
C <- matrix(0, 1 + m, 1 + n)
for (i in seq_len(m)) {
for (j in seq_len(n)) {
if (X[i] == Y[j]) {
C[i + 1, j + 1] = C[i, j] + 1
} else {
C[i + 1, j + 1] = max(C[i + 1, j], C[i, j + 1])
}
}
}
backtrack <- function(C, X, Y, i, j) {
if (i == 1 | j == 1) {
return(data.frame(I = c(), J = c(), LCS = c()))
} else if (X[i - 1] == Y[j - 1]) {
return(rbind(backtrack(C, X, Y, i - 1, j - 1),
data.frame(LCS = X[i - 1], I = i - 1, J = j - 1)))
} else if (C[i, j - 1] > C[i - 1, j]) {
return(backtrack(C, X, Y, i, j - 1))
} else {
return(backtrack(C, X, Y, i - 1, j))
}
}
return(backtrack(C, X, Y, m + 1, n + 1))
}
Part II: given two sequences, find the shortest common supersequence
ShortestCommonSupersequence <- function(X, Y) {
LCS <- LongestCommonSubsequence(X, Y)[c("I", "J")]
X.df <- data.frame(X = X, I = seq_along(X), stringsAsFactors = FALSE)
Y.df <- data.frame(Y = Y, J = seq_along(Y), stringsAsFactors = FALSE)
ALL <- merge(LCS, X.df, by = "I", all = TRUE)
ALL <- merge(ALL, Y.df, by = "J", all = TRUE)
ALL <- ALL[order(pmax(ifelse(is.na(ALL$I), 0, ALL$I),
ifelse(is.na(ALL$J), 0, ALL$J))), ]
ALL$SCS <- ifelse(is.na(ALL$X), ALL$Y, ALL$X)
ALL
}
Your Example:
ShortestCommonSupersequence(X = c("a","g","b","h","a","g","c"),
Y = c("a","g","b","a","g","b","h","c"))
# J I X Y SCS
# 1 1 1 a a a
# 2 2 2 g g g
# 3 3 3 b b b
# 9 NA 4 h <NA> h
# 4 4 5 a a a
# 5 5 6 g g g
# 6 6 NA <NA> b b
# 7 7 NA <NA> h h
# 8 8 7 c c c
(where the two updated vectors are in columns X and Y.)

Resources