I am trying to set up an "interface model for dosage adjustment connects hematotoxicity to pharmacokinetics". In this model there is a feedback loop and the influx must not only be amplified but also be delayed. Is there any good starting point to implement such models
Image and equations Shown below.
dCc <- -k12 * c_central + k21 * r * c_peripheral - k10 * c_central
dCp <- k12 / r * c_central - k21 * c_peripheral
dy=-alphaexp[-beta(y)]*y+[c_central -gamma]*H[c_central -gamma] ; Y(0)=0
dg=kskww0*phi[w,param]*m[y,u]-{ks+n[y,v]}*g
dw=g*(time-delay)-kw*w
m[y(t),u]=exp(-uy(t)]
n[y(t),v]=vy(t)
initial condition
phi[w(t),param]=[w(t)/W0]^-param
H <- function(x) {
ifelse(x < 0, 0, 1)
}
diffeq <- function(t, state, params) {
with(as.list(c(state, params)), {
dCc <- -k12 * c_central + k21 * c_peripheral - k10 * c_central
dCp <- k12 * c_central - k21 * c_peripheral
dy <- -alpha * exp(-beta * y) * y + (c_central - gamma) *H(c_central - gamma)
ds <- ks*kw*((w/w0)^-0.2)*(1-exp(-u*y))- ks * s
dw <- -kw * w + (t-k) * s
return(list(c(ds, dw, dCc, dCp,dy)))
})
}
# Set the initial conditions and parameter values
y0<-0
s0 <- 10
w0 <- 3
c_central0 <- 500/5
c_peripheral0 <- 0
params <- c(Rs = 1, d = 0.1, k = 1.65, kw = 1, ks = 0.5, theta = 1, W = 100, K = 100,
alpha = 0.85, beta = 1.5*10-4, gamma = 0.2,u=5.6*10-4,
k12 =1.263*10-1, k21 = 3.547*10-1, k10 = 9.461*10-2, r = 10000/42000)
# Solve the differential-delay equation using an ODE solver
result <- ode(y = c(y = y0,s = s0, w = w0, c_central = c_central0, c_peripheral = c_peripheral0),
times = seq(0, 40, by = 0.1), func = diffeq, parms = params)
# Plot the solution
plot(result[, "time"], result[, "c_central"], type = "l", xlab = "Time (t)", ylab = "Neutrophil Progenitor Cells (s(t))")
lines(result[, "time"], result[, "s"], col = "red")
lines(result[, "time"], result[, "c_central"], col = "blue")
lines(result[, "time"], result[, "c_peripheral"], col = "green")
Related
I am new to R. I want to do some parameters estimation by using Maximum Likelihood Estimation.
Here is my attempt:
The data are
my_data = c(0.1,0.2,1,1,1,1,1,2,3,6,7,11,12,18,18,18,18,18,21,32,36,40,
45,45,47,50,55,60,63,63,67,67,67,67,72,75,79,82,82,83,
84,84,84,85,85,85,85,85,86,86)
and
lx <- function(p,x){
l <- p[1]
b <- p[2]
a <- p[3]
n <- length(x)
lnL <- n*log(l)+n*log(b)+n*log(a)+(b-1)*sum(log(x))+(a-1)*sum(log(1+l*x^b))+n-sum(1+l*x^b)
return(-lnL)
}
Note: l is λ, b is β, and a is α.
And here is the optim function
optim(p=c(1,1,1),fn = lx, method = "L-BFGS-B",
lower = c(0.0001, 0.0001, 0.0001),
control = list(), hessian = FALSE, x = my_data)
After I run this code, I get an error message:
Error in optim(p = c(1, 1, 1), fn = lx, method = "L-BFGS-B", lower = c(1e-04, :
objective function in optim evaluates to length 50 not 1
What's wrong with my code? Can you help me to fix it? Thanks in advance!
Instead of a log-likelihood, use MASS::fitdistr.
#
# Power Generalized Weibull distribution
#
# x > 0, alpha, beta, lambda > 0
#
dpowergweibull <- function(x, alpha, beta, lambda){
f1 <- lambda * beta * alpha
f2 <- x^(beta - 1)
f3 <- (1 + lambda * x^beta)^(alpha - 1)
f4 <- exp(1 - (1 + lambda * x^beta)^alpha)
f1 * f2 * f3 * f4
}
ppowergweibull <- function(q, alpha, beta, lambda){
1 - exp(1 - (1 + lambda * q^beta)^alpha)
}
my_data <- c(0.1,0.2,1,1,1,1,1,2,3,6,7,11,12,18,18,18,18,18,21,32,36,40,
45,45,47,50,55,60,63,63,67,67,67,67,72,75,79,82,82,83,
84,84,84,85,85,85,85,85,86,86)
start_par <- list(alpha = 0.1, beta = 0.1, lambda = 0.1)
y1 <- MASS::fitdistr(my_data, dpowergweibull, start = start_par),
start_par2 <- list(shape = 1, rate = 1)
y2 <- MASS::fitdistr(my_data, "gamma", start = start_par2)
hist(my_data, freq = FALSE)
curve(dpowergweibull(x, y1$estimate[1], y1$estimate[2], y1$estimate[3]),
from = 0.1, to = 90, col = "red", add = TRUE)
curve(dgamma(x, y2$estimate[1], y2$estimate[2]),
from = 0.1, to = 90, col = "blue", add = TRUE)
I receive an error from nls function in R. I search some similar questions, but do not solve this problem. For example, I try to use nlsLM from library 'minpack.lm', it also fails. So I have to ask for help here. Following is the code:
tt = c(10, 30, 50, 90, 180, 360, 720, 1440, 2880, 4320, 8640, 12960)
x = c(
1.53901e-06,
1.22765e-06,
1.11200e-06,
9.25185e-07,
8.71809e-07,
8.80705e-07,
8.36225e-07,
7.82849e-07,
8.18433e-07,
6.04928e-07,
3.46944e-07,
4.44800e-07
)
y = c(
3.81639e-06,
5.00623e-06,
4.62815e-06,
5.10631e-06,
4.48359e-06,
3.30487e-06,
2.64879e-06,
2.13727e-06,
8.02865e-07,
1.91487e-06,
3.73855e-06,
2.32631e-06
)
nt = length(tt)
L0 = 0.005
y0 = 0.000267681
model = function(K, Kd, k1) {
eta = 5 / (4 * Kd + 40)
eta1 = 1 - eta
eta1_seq = eta1 ^ c(0:(nt - 1))
Lt = L0 * eta * cumsum(eta1_seq)
b = K * x - K * Lt + 1
L = (-b + sqrt(b ^ 2.0 + 4 * K * Lt)) / (2 * K)
cx = x * K * L / (K * L + 1)
qx = Kd * cx
q1 = y0 * (1 - k1 * sqrt(tt))
y = qx + q1
return(y)
}
fit <- nls(
y ~ model(K, Kd, k1),
start = list(K = 1e+15,
Kd = 10,
k1 = 1e-5),
lower = c(1e+13, 1, 1e-10),
upper = c(1e+20, 200, 1e-3),
algorithm = "port"
)
Thanks in advance for your help!
Consider the following example of nonlinear optimization problem. The procedure is too slow to apply in simulation studies. For example, in case of my studies, it takes 2.5 hours for only one replication. How to speed up the process so that the processing time could also be optimized?
library(mvtnorm)
library(alabama)
n = 200
X <- matrix(0, nrow = n, ncol = 2)
X[,1:2] <- rmvnorm(n = n, mean = c(0,0), sigma = matrix(c(1,1,1,4),
ncol = 2))
x0 = matrix(c(X[1,1:2]), nrow = 1)
y0 = x0 - 0.5 * log(n) * (colMeans(X) - x0)
X = rbind(X, y0)
x01 = y0[1]
x02 = y0[2]
x1 = X[,1]
x2 = X[,2]
pInit = matrix(rep(0.1, n + 1), nrow = n + 1)
outopt = list(kkt2.check=FALSE, "trace" = FALSE)
f1 <- function(p) sum(sqrt(pmax(0, p)))/sqrt(n+1)
heq1 <- function(p) c(sum(x1 * p) - x01, sum(x2 * p) - x02, sum(p) - 1)
hin1 <- function(p) p - 1e-06
sol <- alabama::auglag(pInit, fn = function(p) -f1(p),
heq = heq1, hin = hin1,
control.outer = outopt)
-1 * sol$value
I keep getting the error:
Error in checkFunc(Func2, times, y, rho) :
The number of derivatives returned by func() (175) must equal the length of the initial conditions vector (51)
I am trying to create a model based off of Brigatti et al 2009 (pred-prey model w a spatial component)
x<-c(1:40000)
left_shift = function(x) {
x[c(2:length(x), 1)]
}
right_shift = function(x) {
x[c(length(x), 1:(length(x) - 1))]
}
laplace = function(x) {
return(c(left_shift(x) + right_shift(x) - 2 * x))
}
dxdt <- function(time, state, pars) {
prey = state[1:length(state) / 2]
pred = state[(length(state) / 2 + 1):length(state)]
dprey = pars[5] * laplace(prey) + pars[1] * prey - x[2] * prey * pred
dpred = pars[5] * laplace(pred) + pars[3] * prey * pred - pars[4] * pred
list(c(prey, pred, dprey, dpred))
}
time <- seq(0, 600, by = 1)
pars <- c(alpha=1,
beta = .5,
gamma = .2,
delta = .6,
D = 0.000008 #(0.004*0.004/2), #diffusion coefficient
)
state <- rep(0.1, 51)
out <- as.data.frame(ode(func = dxdt, y = state, parms = pars, times = time))
A few problems. First, missing parentheses.
prey = state[1:length(state) / 2]
should read
prey = state[1:(length(state) / 2)]
Second, your initial conditions are an odd number in length. state should specify the initial conditions for both prey and predator (in that order). So, for each location there should be two values and, consequently, the vector should always be a multiple of two in length.
Thirdly, your function dxdt should return list(c(dprey, dpred)). There is no reason to return the values for the state variables, because the ODE solver will calculate those.
Fix those and this is what you get:
left_shift = function(x) {
x[c(2:length(x), 1)]
}
right_shift = function(x) {
x[c(length(x), 1:(length(x) - 1))]
}
laplace = function(x) {
return(c(left_shift(x) + right_shift(x) - 2 * x))
}
dxdt <- function(time, state, pars) {
prey = state[1:(length(state) / 2)]
pred = state[(length(state) / 2 + 1):length(state)]
dprey = pars[5] * laplace(prey) + pars[1] * prey - x[2] * prey * pred
dpred = pars[5] * laplace(pred) + pars[3] * prey * pred - pars[4] * pred
list(c(dprey, dpred))
}
time <- seq(0, 600, by = 1)
pars <- c(alpha=1,
beta = .5,
gamma = .2,
delta = .6,
D = 0.000008 #(0.004*0.004/2), #diffusion coefficient
)
state <- rep(0.1, 50)
out <- as.data.frame(ode(func = dxdt, y = state, parms = pars, times = time))
I'm trying to use nlsBoot function from the nlstools package by applying it to a function I created. I get an error when using nlsBoot from the output of my function. However, if I use directly the nls function with the data it works. What is happening and is there a way around this?
# Nonlinear function to generate data
NEE <- function(GPmax, alpha, resp, PAR) {
((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp
}
#some data
plot <- rep(c(1,2), each = 2000)
PAR <- 1:2000
dat <- data.table(plot, PAR)
dat[, GPP := (NEE(12, 0.73, -2, PAR) + rnorm(length(PAR), sd=2))]
library(nlstools)
# Function I created
model.fun <- function(df){
fit <- nls(GPP ~ ((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp,
start = list(GPmax = 12, alpha = 0.73, resp = -2), data = df)
return(list(b = summary(nlsBoot(fit))))
}
models <- dat[, list(model.fun(.SD)) , by = .(plot)]
# Error
# Error in data2[, var1] <- fitted1 + sample(scale(resid1, scale = FALSE), :
# object of type 'closure' is not subsettable
# Using nls directly outside of the function I created for plot 1.
mod1 <- nls(GPP ~ ((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp,
start = list(GPmax = 12, alpha = 0.73, resp = -2), data = dat[plot==1])
# Bootstrap of residuals
summary(nlsBoot(mod1,niter=5))
mod2 <- nls(GPP ~ ((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp,
start = list(GPmax = 12, alpha = 0.73, resp = -2), data = dat[plot==2])
# Bootstrap of residuals
summary(nlsBoot(mod2,niter=5))
# works
I would write the bootstrapping myself. This gives you more control and you avoid this kind of scoping issues:
library(boot)
model.fun <- function(dt){
fit <- nls(GPP ~ ((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp,
start = list(GPmax = 12, alpha = 0.73, resp = -2), data = dt)
#this copy should be avoided if you have big data, but I don't have time right now:
df <- copy(dt)
df[, fitted := fitted(fit)]
df[, resid := residuals(fit)]
fun <- function(df, inds) {
df[, bootGPP := fitted + resid[inds]]
tryCatch(coef(nls(bootGPP ~ ((alpha * PAR * GPmax)/((alpha * PAR)+ GPmax)) - resp,
start = list(GPmax = 12, alpha = 0.73, resp = -2), data = df)),
error = function(e) c("GPmax" = NA, "alpha" = NA, "resp" = NA))
}
b <- boot(df, fun, R = 1000)
res0 <- b$t0
res1 <- apply(b$t, 2, sd, na.rm = TRUE)
res2 <- res0 - colMeans(b$t, na.rm = TRUE)
return(as.list(setNames(c(sum(!is.na(b$t[,1])), res0, res1, res2), c("n", "GPmax", "alpha", "resp","SEGPmax",
"SEalpha", "SEresp","BiasGPmax", "Biasalpha", "Biasresp"))))
}
set.seed(42)
models <- dat[, model.fun(.SD) , by = .(plot)]
# plot n GPmax alpha resp SEGPmax SEalpha SEresp BiasGPmax Biasalpha Biasresp
#1: 1 1000 12.60382 0.9308744 -1.3579906 1.249449 0.2729928 1.263640 -0.11642169 -0.04376221 -0.11526536
#2: 2 1000 13.58702 0.8660450 -0.5081954 1.109599 0.2085150 1.125234 -0.06664517 -0.02241303 -0.06447617