Diffusion-reaction model using reactran in R - r

I am trying to implement a reaction-diffusion PDE using reacTran in the deSolve package. However, the time-dependent reaction term is not working. Any suggestions on how to implement this would be greatly appreciated!
library(ReacTran)
library(deSolve)
N <- 1000
xgrid <- setup.grid.1D(x.up = 0, x.down = 10, N = N)
x <- xgrid$x.mid
D.coeff <- 1
k <- 1
Diffusion <- function (t, Y, parms){
tran <- tran.1D(C = Y, C.up = 0, C.down = 0, D = D.coeff, dx = xgrid)-k*t
reac <- -kt
return(list(tran$dC+reac))
}
# Set initial conditions as gaussian distribution
C0 <- 10 #Initial concentration (mg/L)
X0 <- 5 #Location of initial concentration (m)
sig <- .2 #Spread of Gaussian distribution
C <- rep(0,N) #matrix
Yini <- C+C0*exp(-((x-X0)/sig)^2)
parms1 <- list(D=D.coeff, k=k)
times <- seq(from = 0, to = 5, by = 0.01)
print(system.time(
out <- ode.1D(y = Yini, times = times, func = Diffusion,
parms = parms1, dimens = N)))

Related

How can I use try catch for nls function in R

I am doing a regression for a Quadric Linear function. I got two option is to use either nlsLM and nls2. However, for some dataset, the use of nlsLM casing some problem such as: singular gradient matrix at initial parameter estimates or they ran in to an infinitie loop. I want to use the try catch to deal with this issue. Can anyone help me out? Thanks everyone in advance.
Here is the full code:
# Packages needed for estimaton of Ideal trajectory - nonlinear regression
#-------------------------------------------------------------------------------
library("minpack.lm")
library("nlstools")
library("nlsMicrobio")
library("stats")
library("tseries") #runs test for auto correlation
#Use NLS2
library(proto)
library(nls2)
################################################################
# Set working directory
setwd("C:/Users/Kevin Le/PycharmProjects/Pig Data Black Box - Copy")
#load dataset
load("Data/JRPData_TTC.Rdata") #load dataset created in MissingData.step
ID <- 5470
#Create a new dataframe which will store Data after ITC estimation
#Dataframe contains ITC parameters
ITC.param.pos2 <- data.frame(ANIMAL_ID=factor(),
X0=double(),
Y1=double(),
Y2=double(),
Ylast=double(),
a=double(),
b=double(),
c=double(),
d=double(),
stringsAsFactors=FALSE)
#Dataframe contains data points on the ITC
Data.remain <- data.frame(ANIMAL_ID=character(),
Age=double(),
obs.CFI=double(),
tt=double(),
ttt=double(),
stringsAsFactors=FALSE)
#===============================================================
# For loop for automatically estimating ITC of all pigs
#===============================================================
IDC <- seq_along(ID) # 17, 23, 52, 57, 116
for (idc in IDC){
# idc = 1
i <- ID[idc]
Data <- No.NA.Data.1[No.NA.Data.1$ANIMAL_ID == i,]
idc1 <- unique(as.numeric(Data$idc.1))
####### Create data frame of x (Age) and y (CFI) ########
x <- as.numeric(Data$Age.plot)
Y <- as.numeric(Data$CFI.plot)
Z <- as.numeric(Data$DFI.plot)
Data.xy <- as.data.frame(cbind(x,Y))
#Initial parameteres for parameter estimation
X0.0 <- x[1]
Xlast <- x[length(x)]
##################################################################
# 1. reparametrization CFI at X0 = 0
#function used for reparametrization in MAPLE
# solve({
# 0=a+b*X_0+c*X_0**2,
# DFIs=b+2*c*Xs,CFIs=a+b*Xs+c*Xs**2},
# {a,b,c});
# a = -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
# b = (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
# c = -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
# 2. with the source of the function abcd and pred
##################################################################
#Provide set of initial parameters
Xs.1 <- round(seq(X0.0 + 1, Xlast - 1, len = 30), digits = 0)
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
names(st1) <- c("X0","Xs", "DFIs","CFIs")
#RUN NLS2 to find optimal initial parameters
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
# weights = weight,
# trace = T,
algorithm = "brute-force")
par_init <- coef(st2); par_init
#--------------------------------------------
# Create empty lists to store data after loop
#--------------------------------------------
par <- list()
AC.res <- list()
AC.pvalue <- NULL
data2 <- list()
data3 <- list()
param <- data.frame(rbind(par_init))
par.abcd <- data.frame(rbind(abcd.2(as.vector(par_init))))
param.2 <- data.frame(X0=double(),
Xs=double(),
DFIs=double(),
CFIs=double(),
a=double(),
b=double(),
c=double(),
stringsAsFactors=FALSE)
j <- 2
AC_pvalue <- 0
AC.pvalue[1] <- AC_pvalue
datapointsleft <- as.numeric(dim(Data)[1])
dpl <- datapointsleft #vector of all dataponitsleft at each step
#-------------------------------------------------------------------------------
# Start the procedure of Non Linear Regression
#-------------------------------------------------------------------------------
while ((AC_pvalue<=0.05) && datapointsleft >= 20){
weight <- 1/Y^2
# ---------------- NON linear reg applied to log(Y) ---------------------------------
st2 <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
start = st1,
weights = weight,
trace = F,
algorithm = "brute-force")
par_init <- coef(st2)
par_init
# st1 <- st1[!(st1$Xs == par_init[2]),]
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
# nls.CFI <- nls2(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# control = nls.control(warnOnly = TRUE),
# trace = T,
# algorithm = "port",
# lower = c(-100000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000))
# nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
# Data.xy,
# control = nls.control(warnOnly = TRUE),
# start = list(X0 = par_init[1], Xs = par_init[2],
# DFIs = par_init[3], CFIs = par_init[4]),
# weights = weight,
# algorithm = "port",
# lower = c(-1000000000,X0.0+1, -1000000000, -1000000000),
# upper = c(1000000000, Xlast-1, 1000000000, 1000000000),
# trace = F)
#--------RESULTS analysis GOODNESS of fit
#estimate params
par[[j]] <- coef(nls.CFI)
par.abcd[j,] <- abcd.2(as.vector(coef(nls.CFI) )) #calculation of a, b, c and d
param[j,] <- par[[j]]
param.2[j-1,] <- cbind(param[j,], par.abcd[j,])
#summary
# summ = overview((nls.CFI)) #summary
#residuals
res1 <- nlsResiduals(nls.CFI) #residuals
res2 <- nlsResiduals(nls.CFI)$resi1
res <- res2[, 2]
AC.res <- test.nlsResiduals(res1)
AC.pvalue[j] <- AC.res$p.value
#---------Check for negative residuals----------
#Add filtration step order to data
Step <- rep(j - 1, length(x))
#create a new dataset with predicted CFI included
Data.new <- data.frame(cbind(x, Z, Y, pred.func.2(par[[j]],x)[[1]], res, Step))
names(Data.new) <- c("Age", "Observed_DFI","Observed_CFI", "Predicted_CFI", "Residual", "Step")
# plot(Data.new$Age, Data.new$Predicted_CFI, type = "l", col = "black",lwd = 2,
# ylim = c(0, max(Data.new$Predicted_CFI, Data.new$Observed_CFI)))
# lines(Data.new$Age, Data.new$Observed_CFI, type = "p", cex = 1.5)
#
#remove negative res
Data.pos <- Data.new[!Data.new$Residual<0,]
# lines(Data.pos$Age, Data.pos$Predicted_CFI, type = "l", col = j-1, lwd = 2)
# lines(Data.pos$Age, Data.pos$Observed_CFI, type = "p", col = j, cex = 1.5)
#restart
#Criteria to stop the loop when the estimated parameters are equal to initial parameters
# Crite <- sum(param.2[dim(param.2)[1],c(1:4)] == par_init)
datapointsleft <- as.numeric(dim(Data.pos)[1])
par_init <- par[[j]]
AC_pvalue <- AC.pvalue[j]
j <- j+1
x <- Data.pos$Age
Y <- Data.pos$Observed_CFI
Z <- Data.pos$Observed_DFI
Data.xy <- as.data.frame(cbind(x,Y))
dpl <- c(dpl, datapointsleft)
dpl
#Create again the grid
X0.0 <- x[1]
Xlast <- x[length(x)]
#Xs
if(par_init[2] -15 <= X0.0){
Xs.1 <- round(seq(X0.0 + 5, Xlast - 5, len = 30), digits = 0)
} else if(par_init[2] + 5 >= Xlast){
Xs.1 <- round(seq(par_init[2]-10, par_init[2]-1, len = 6), digits = 0)
} else{
Xs.1 <- round(seq(par_init[2]-5, par_init[2] + 5, len = 6), digits = 0)
}
#
X0.1 <- rep(X0.0, length(Xs.1))
DFIs.1 <- NULL
CFIs.1 <- NULL
for(A in seq_along(Xs.1)){
DFIs2 <- Data[Data$Age.plot == Xs.1[A],]$DFI.plot
CFIs2 <- Data[Data$Age.plot == Xs.1[A],]$CFI.plot
DFIs.1 <- c(DFIs.1, DFIs2)
CFIs.1 <- c(CFIs.1, CFIs2)
}
st1 <- data.frame(cbind(X0.1, Xs.1, DFIs.1, CFIs.1))
if(X0.0 <= par_init[2] && Xlast >=par_init[2]){
st1 <- rbind(st1, par_init)
}
names(st1) <- c("X0","Xs", "DFIs","CFIs")
}
} # end FOR loop
Here is the data file. I have exported my data into the .Rdata for an easier import.: https://drive.google.com/file/d/1GVMarNKWMEyz-noSp1dhzKQNtu2uPS3R/view?usp=sharing
In this file, the set id: 5470 will have this error: singular gradient matrix at initial parameter estimates in this part:
nls.CFI <- nlsLM(Y ~ nls.func.2(X0, Xs, DFIs, CFIs),
Data.xy,
control = list(tol = 1e-2, printEval = TRUE, maxiter = 1024),
start = list(X0 = par_init[1], Xs = par_init[2],
DFIs = par_init[3], CFIs = par_init[4]),
weights = weight,
algorithm = "port",
lower = c(-10000,X0.0+1, -10000, -10000),
upper = c(10000, Xlast-1, 10000, 10000),
trace = F)
The complementary functions (file Function.R):
abcd.2 <- function(P){
X0 <- P[1]
Xs <- P[2]
DFIs <- P[3]
CFIs <- P[4]
a <- -X0*(2*CFIs*Xs-CFIs*X0-Xs^2*DFIs+Xs*DFIs*X0)/(Xs^2-2*X0*Xs+X0^2)
b <- (-Xs^2*DFIs+DFIs*X0^2+2*CFIs*Xs)/(Xs^2-2*X0*Xs+X0^2)
c <- -(CFIs-Xs*DFIs+X0*DFIs)/(Xs^2-2*X0*Xs+X0^2)
pp <- as.vector(c(a, b, c))
return(pp)
}
#--------------------------------------------------------------
# NLS function
#--------------------------------------------------------------
nls.func.2 <- function(X0, Xs, DFIs, CFIs){
pp <- c(X0, Xs, DFIs, CFIs)
#calculation of a, b and c using these new parameters
c <- abcd.2(pp)[3]
b <- abcd.2(pp)[2]
a <- abcd.2(pp)[1]
ind1 <- as.numeric(x < Xs)
return (ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))))
}
#--------------------------------------------------------------
# Fit new parameters to a quadratic-linear function of CFI
#--------------------------------------------------------------
pred.func.2 <- function(pr,age){
#
X0 <- pr[1]
Xs <- pr[2]
DFIs <- pr[3]
CFIs <- pr[4]
#
x <- age
#calculation of a, b and c using these new parameters
c <- abcd.2(pr)[3]
b <- abcd.2(pr)[2]
a <- abcd.2(pr)[1]
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
#---------------------------------------------------------------------------------------------------------------
# Quadratic-linear function of CFI curve and its 1st derivative (DFI) with original parameters (only a, b and c)
#---------------------------------------------------------------------------------------------------------------
pred.abcd.2 <- function(pr,age){
#
a <- pr[1]
b <- pr[2]
c <- pr[3]
x <- age
#calculation of a, b and c using these new parameters
#
ind1 <- as.numeric(x < Xs)
#
results <- list()
cfi <- ind1*(a+b*x+c*x^2)+(1-ind1)*((a+b*(Xs)+c*(Xs)^2)+(b+2*c*(Xs))*(x-(Xs))) #CFI
dfi <- ind1*(b+2*c*x) + (1 - ind1)*(b+2*c*(Xs)) #DFI
results[[1]] <- cfi
results[[2]] <- dfi
return (results)
}
Updated: I did review my logic from the previous step and found that my data is a bit messed up because of it. I have fixed it. The case where a set f data ran into an infinite loop has no longer exists, but this error is still there however: singular gradient matrix at initial parameter estimates.

In R: FME/ deSolve - SIR fitting (time varying parameters)

What I am trying to do: I have a simple SIR model, with time varying transmission rates beta, I have already implemented this in R (thanks to #tpetzoldt). We have a population of N=10000, gamma is also fixed.
sir_1 <- function(f_beta, S0, I0, R0, times) {
# the differential equations
sir_equations <- function(time, variables, parameters) {
beta <- f_beta(time)
gamma <- f_gamma(time)
with(as.list(variables), {
dS <- -beta * I * S/10000
dI <- beta * I * S/10000 - 1/5 * I
dR <- 1/5 * I
return(list(c(dS, dI, dR), beta=beta))
})
}
# time dependent parameter functions
parameters_values <- list(
f_beta = f_beta
)
# the initial values of variables
initial_values <- c(S = S0, I = I0, R = R0)
out <- ode(initial_values, times, sir_equations, parameters)
}
times <- seq(0, 19)
f_beta <- approxfun(x=times, y=seq(0.901, 0.92, by=0.001), rule=2)
out <- as.data.frame(sir_1(f_beta=f_beta, S0 = 9990, I0 = 10, R0 = 0, times = times))
Now I have some "real" data, with the FME package I want to get the optimal beta parameters at each timestep
datareal <- cbind(time = times, I=c(10,32,120,230,480,567,1040,1743,2300,2619,3542,4039,4231,6378,
5356, 4987, 3421, 2789, 1789,1156))
sir_cost <- function (f_beta) {
outsir <- as.data.frame(sir_1(f_beta=f_beta, S0 = 9990, I0 = 10, R0 = 0, times = times))
costf <- modCost(model = outsir, obs = datareal)
}
p <- rep(0.8, 20)
Fit <- modFit(f = sir_cost, p = p)
Fit
$par
[1] 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8 0.8
My issues:
For the initial values I took 0.8 at each timestep, however the Fit function does nothing, it only returns the 0.8 for each timestep (even if I take a very high value like 800, it says that this is already the best fit). My guess is for timevarying values of the same variable (beta) I have to approach this another way as it is in the documentation.
Any help is highly appreciated.
I don't think that estimating beta per time step is a good idea. This is inherent in the problem and not a fault of deSolve or FME. If a dynamic model shall be used to estimate time dependent parameters, I would recommend to use a suitable function with less knots, e.g. time dependent linear, quadratic or spline, for example 3-5 instead of 20 knots. Then replace approxfun with that function and plug it in. Model fitting is an art, so play with start values and solvers. And, read the books.
Note that the following is just a technical demonstration:
library("deSolve")
library("FME")
sir_1 <- function(f_beta, S0, I0, R0, times) {
# the differential equations
sir_equations <- function(time, variables, parameters) {
beta <- parameters$f_beta(time)
with(as.list(variables), {
dS <- -beta * I * S/10000
dI <- beta * I * S/10000 - 1/5 * I
dR <- 1/5 * I
return(list(c(dS, dI, dR), beta=beta))
})
}
initial_values <- c(S = S0, I = I0, R = R0)
parameters <- list(f_beta=f_beta)
out <- ode(initial_values, times, sir_equations, parameters)
}
times <- seq(0, 19)
# use method "constant" to leave beta constant over time step
f_beta <- approxfun(x=times, y=seq(0.901, 0.92, by=0.001), method="constant", rule=2)
out <- sir_1(f_beta=f_beta, S0 = 9990, I0 = 10, R0 = 0, times = times)
plot(out)
datareal <- cbind(time = times, I=c(10,32,120,230,480,567,1040,1743,2300,2619,3542,4039,4231,6378,
5356, 4987, 3421, 2789, 1789,1156))
plot(out, obs=datareal)
sir_cost <- function (p) {
f_beta <- approxfun(x=times, y=p, method="constant", rule=2)
outsir <- sir_1(f_beta=f_beta, S0 = 9990, I0 = 10, R0 = 0, times = times)
modCost(model = outsir, obs = datareal)
}
# Play with start values!!!
p <- rep(0.8, 20)
# e.g.: consider random start values
set.seed(123)
p <- runif(20, min=0.8, max=1.2)
# try other solvers, especially such with true box constraints
Fit <- modFit(f = sir_cost, p = p,
lower=rep(0.2, 20), upper=rep(5, 20), # box constraints
method="Port")
summary(Fit) # system is singular (that is what we expected)
# use another solver. Note: it takes a while
Fit <- modFit(f = sir_cost, p = p,
lower=rep(0.2, 20), upper=rep(5, 20), # box constraints
method="L-BFGS-B")
# goes in a surprisingly good direction
Fit$par
f_beta <- approxfun(x=times, y=Fit$par, method="constant", rule=2)
out2 <- sir_1(f_beta=f_beta, S0 = 9990, I0 = 10, R0 = 0, times = times)
# compare with data
plot(out, out2, obs=datareal)
# but see how unstable beta is
plot(out2)
Fitting a model with time dependent parameters may be a good idea or not, but if there are reasons to do so, I would suggest to restrict the number of parameters and to use a kind of smooth function.
The following example shows how to use a spline for this purpose, but it is of course also possible (and may be preferable) to use a function with some mechanistic meaning.
As a side effect, it was also possible to identify gamma instead of fixing it a-priori. Nevertheless, this is still a technical demonstration, but I leave the scientific question open, whether a time-dependent beta will make any sense.
library("FME")
sir_1 <- function(f_beta, gamma, S0, I0, R0, times) {
# the differential equations
sir_equations <- function(time, variables, parameters) {
beta <- parameters$f_beta(time)
gamma <- parameters$gamma
with(as.list(variables), {
dS <- -beta * I * S / 10000
dI <- beta * I * S / 10000 - gamma * I
dR <- gamma * I
# return vector of derivatives, and beta as auxiliary variable
return(list(c(dS, dI, dR), beta = beta))
})
}
initial_values <- c(S = S0, I = I0, R = R0)
# pass constant parameter and parameter function together as a list
parameters <- list(
f_beta = f_beta,
gamma = gamma
)
ode(initial_values, times, sir_equations, parameters)
}
times <- seq(0, 19)
datareal <- data.frame(
time = times,
I = c(10, 32, 120, 230, 480, 567, 1040, 1743, 2300,
2619, 3542, 4039, 4231, 6378,
5356, 4987, 3421, 2789, 1789, 1156)
)
## define parameter as a vector: gamma and beta
t_beta <- c(0, 12, 16, 19) # consider more or less knots
n_beta <- length(t_beta)
y_beta <- rep(1, n_beta)
p <- c(gamma = 1/5, y_beta) # combine all parameters in one vector
## a small helper function for parameter selection
select <- function(p, which, exclude = FALSE) {
parnames <- names(p)
p[(which == parnames) != exclude]
}
## check the helper function
select(p, "gamma")
select(p, "gamma", excl=TRUE)
## cost function, see ?modCost help page
sir_cost <- function (p) {
gamma <- select(p, "gamma")
y_beta <- select(p, "gamma", exclude = TRUE)
f_beta <- splinefun(x = t_beta, y = y_beta)
outsir <- sir_1(f_beta = f_beta, gamma = gamma,
S0 = 9990, I0 = 10, R0 = 0, times = times)
modCost(model = outsir, obs = datareal)
}
## model calibration, see ?modFit
Fit <- modFit(f = sir_cost, p = p,
# lower bound to avoid negative values of beta
lower = c(gamma = 0, rep(0.0, n_beta)),
# note: high sensitivity wrt. upper bound
upper = c(gamma=1, rep(2.0, n_beta)),
# an algorithm that supports box constraints
method = "Port")
## all parameters were identifiable
summary(Fit)
## smaller time steps to obtain a curves
times <- seq(0, 19, 0.1)
## split components of fitted parameters
gamma <- select(Fit$par, "gamma")
y_beta <- select(Fit$par, "gamma", exclude = TRUE)
out2 <- sir_1(f_beta = splinefun(x = t_beta, y = y_beta), gamma,
S0 = 9990, I0 = 10, R0 = 0, times = times)
## show fitted curves and compare simulation with data
## see ?plot.deSolve help page
plot(out2, obs = datareal, which = c("S", "R", "I", "beta"),
las = 1, obspar = list(pch = 16, col = "red"))

why random effect estiamator are not correct

I'm trying to simulate glmmLasso using a binomial data.
but random effect estiamator are not similar 5 that i given.
something wrong in my code?
if not, why random effect shown like that.
makedata <- function(I, J, p, sigmaB){
N <- I*J
# fixed effect generation
beta0 <- runif(1, 0, 1)
beta <- sort(runif(p, 0, 1))
# x generation
x <- matrix(runif(N*p, -1, 1), N, p)
# random effect generation
b0 <- rep(rnorm(I, 0, sigmaB), each=J)
# group
group <- as.factor(rep(1:I, each = J))
# y generation
k <- exp(-(beta0 + x %*% beta + b0))
y <- rbinom(n = length(k), size = 1, prob = (1/(1+k)))
#standardization
sx <- scale(x, center = TRUE, scale = TRUE)
simuldata <- data.frame(y = y, x = sx, group)
res <- list(simuldata=simuldata)
return(res)
}
# I : number of groups
I <- 20
# J : number of observation in group
J <- 10
# p : number of variables
p <- 20
# sigmaB : sd of random effect b0
sigmaB <- 5
set.seed(231233)
simdata <- makedata(I, J, p, sigmaB)
lam <- 10
xnam <- paste("x", 1:p, sep=".")
fmla <- as.formula(paste("y ~ ", paste(xnam, collapse= "+")))
glmm <- glmmLasso(fmla, rnd = list(group=~1), data = simdata, lambda = lam, control = list(scale = T, center = T))
summary(glmm)

Start the ODE-based curve at a time that is different from 0

I have a system of ordinary differential equations (ODEs) that has been built from the R package "deSolve". From the ODEs, Is it possible to run the model so that the variable “y2” starts at time = 5 (instead of time = 0) in the example below?
library(deSolve)
test <- function(t, y, parms) {
dy1 <- -2 * y[2]
dy2 <- 1.25 * y[1]
list(c(dy1, dy2))
}
yini <- c(y1 = 1, y2 = 0)
times <- seq(from = 0, to = 20, by = 0.01)
out <- ode (times = times, y = yini, func = test, parms = NULL)
plot(out)

ReacTran 2D Diffusion model with non-conformable arrays error in R, matrix and setup.grid.1D and setup.grid.2D

I am trying to model diffusion in 2D in R with the diffusion rate being dependent on the density, y. I have completed this model in 1D, but trying to change it 2D it keep getting the error code:
Error in -VF.grid$x.int * D.grid$x.int * diff(rbind(C.x.up, C, C.x.down, non-conformable arrays
I have no data, as it is a simulation. My code is as follows;
library(ReacTran)
N <- 50 # number of grid cells
Nx <-50
Ny <-50
XX <- 10 # total size
dy <- dx <- XX/N # grid size
Dy <- Dx <- 0.1 # diffusion coeff, X- and Y-direction
r <- 0.005 # growth rate
ini <- 10 # initial value at x=0
N2 <- ceiling(N/2)
K <- 100 #Carrying Capacity
A0<- 2 #pop ini size
x.grid <- setup.grid.1D(x.up = 0, x.down = 1, N = N)
y.grid <- setup.grid.1D(x.up = 0, x.down = 1, N = N)
grid2D <- setup.grid.2D(x.grid, y.grid)
D.grid <- setup.prop.2D(value = Dx, y.value = Dy, grid = grid2D) #diffusion coefficient on cell interfaces
v.grid <- setup.prop.2D(value = 0, y.value=0, grid = grid2D) #advection velocity
A.grid <- setup.prop.2D(value = 1, y.value=1, grid = grid2D) #interface area
AFDW.grid <- setup.prop.2D(value = 0, y.value=0, grid = grid2D) #advction weight difference
VF.grid <- setup.prop.2D(value = 0, y.value=1, grid = grid2D) #volume fraction
# The model equations - using the grids
Diff2Db <- function (t, y, parms) {
U <- matrix(nrow = N, ncol = N, data = y)
dCONC <- tran.2D(C = y, C.x.up=0, C.x.down=0,
C.y.up=0, C.y.down=0,
grid = grid2D, D.grid = D.grid,
D.x=(y-1)^2 + 1, D.y=(y-1)^2 + 1, dx=dx, dy=dy,
A.grid = A.grid,
VF.grid = VF.grid, AFDW.grid = AFDW.grid, v.grid = v.grid
)$dC
return (list(dCONC))
}
# initial condition: 0 everywhere, except in central point
y <- matrix(nrow = N, ncol = N, data = 0)
y[N2,N2] <- ini # initial concentration in the central point...
times <- 0:8
outb <- ode.2D (y = y, func = Diff2Db, t = times, parms = NULL,
dim = c(49, N), lrw = 160000)
I am out of ideas to try to fix it. Any help would be greatly appreciated.
Thank you in advance

Resources