I recently started with Julia and wanted to implement one of my usual problems - implement time-depended events.
For now I have:
# Packages
using Plots
using DifferentialEquations
# Parameters
k21 = 0.14*24
k12 = 0.06*24
ke = 1.14*24
α = 0.5
β = 0.05
η = 0.477
μ = 0.218
k1 = 0.5
V1 = 6
# Time
maxtime = 10
tspan = (0.0, maxtime)
# Dose
stim = 100
# Initial conditions
x0 = [0 0 2e11 8e11]
# Model equations
function system(dy, y, p, t)
dy[1] = k21*y[2] - (k12 + ke)*y[1]
dy[2] = k12*y[1] - k21*y[2]
dy[3] = (α - μ - η)*y[3] + β*y[4] - k1/V1*y[1]*y[3]
dy[4] = μ*y[3] - β*y[4]
end
# Events
eventtimes = [2, 5]
function condition(y, t, integrator)
t - eventtimes
end
function affect!(integrator)
x0[1] = stim
end
cb = ContinuousCallback(condition, affect!)
# Solve
prob = ODEProblem(system, x0, tspan)
sol = solve(prob, Rodas4(), callback = cb)
# Plotting
plot(sol, layout = (2, 2))
But the output that is give is not correct. More specifically, the events are not taken into account and the initial condition doesn't seems to be 0 for y1 but stim.
Any help would be greatly appreciated.
t - eventtimes doesn't work because one's a scalar and the other is a vector. But for this case, it's much easier to just use a DiscreteCallback. When you make it a DiscreteCallback you should pre-set the stop times so that to it hits 2 and 5 for the callback. Here's an example:
# Packages
using Plots
using DifferentialEquations
# Parameters
k21 = 0.14*24
k12 = 0.06*24
ke = 1.14*24
α = 0.5
β = 0.05
η = 0.477
μ = 0.218
k1 = 0.5
V1 = 6
# Time
maxtime = 10
tspan = (0.0, maxtime)
# Dose
stim = 100
# Initial conditions
x0 = [0 0 2e11 8e11]
# Model equations
function system(dy, y, p, t)
dy[1] = k21*y[2] - (k12 + ke)*y[1]
dy[2] = k12*y[1] - k21*y[2]
dy[3] = (α - μ - η)*y[3] + β*y[4] - k1/V1*y[1]*y[3]
dy[4] = μ*y[3] - β*y[4]
end
# Events
eventtimes = [2.0, 5.0]
function condition(y, t, integrator)
t ∈ eventtimes
end
function affect!(integrator)
integrator.u[1] = stim
end
cb = DiscreteCallback(condition, affect!)
# Solve
prob = ODEProblem(system, x0, tspan)
sol = solve(prob, Rodas4(), callback = cb, tstops = eventtimes)
# Plotting
plot(sol, layout = (2, 2))
This avoids rootfinding altogether so it should be a much nicer solution that hacking time choices into a rootfinding system.
Either way, notice that the affect was changed to
function affect!(integrator)
integrator.u[1] = stim
end
It needs to be modifying the current u value otherwise it won't do anything.
Related
I am solving a differential equation in Julia the corresponding equations are given in the code below. Now by solving the differential equation what I am getting is the two variables only. But for my work, I also want the derivatives of the variables in each time step too after the integration is done, in which I am unable to proceed further.
using DelimitedFiles
using LightGraphs
using LinearAlgebra
using Random
using PyPlot
using BenchmarkTools
using SparseArrays
const N= 100;
Adj=readdlm("sf_simplicial_100.txt")
G=Graph(Adj)
global A = adjacency_matrix(G)
global deg=degree(G)
global omega=deg
mean(deg)
A2=zeros(N,N,N);
for i in 1:N
for j in 1:N
for k in 1:N
if (A[i,j]==1 && A[j,k]==1 && A[k,i]==1)
A2[i,j,k]=1
A2[i,k,j]=1
A2[j,k,i]=1
A2[j,i,k]=1
A2[k,i,j]=1
A2[k,j,i]=1
end;
end;
end;
end;
K= sum(p -> A2[:,:,p], 1:N)
deg_sim= sum(j -> K[:,j], 1:N)/2;
deg_sim2=2*deg_sim;
function kuramoto(du,u, pp, t)
u1 = #view u[1:N] #### θ
u2 = #view u[N+1:2*N] ####### λ
du1 = #view du[1:N] #### dθ
du2 = #view du[N+1:2*N] ####### dλ
α1=0.08
β1=0.04
σ1=1.0
σ2=1.0
λ0=pp
####### local_order
z1 = Array{Complex{Float64},1}(undef, N)
mul!(z1, A, exp.((u1)im))
z1 = z1 ./ deg
####### generalized_local_order
z2 = Array{Complex{Float64},1}(undef, N)
z2= (diag(A*Diagonal(exp.((u1)im))*A*Diagonal(exp.((u1)im))*A))
z2 = z2 ./ deg_sim2
####### equ of motion
#. du1 = omega + u2 *( σ1 * deg * imag(z1 * exp((-1im) * u1)) + σ2 * deg_sim * imag(z2 * exp((-1im) * 2*u1)))
#. du2 = α1 *(λ0-u2)- β1 * (abs(z1)+ abs(z2))/2.0
return nothing
end;
using DifferentialEquations
# setting up time steps and integration intervals
dt = 0.01 # time step
dts = 0.1 # save time
ti = 0.0
tt = 1000.0
tf = 5000.0
nt = Int(div(tt,dts))
nf = Int(div(tf,dts))
tspan = (ti, tf); # time interval
pp=0.75
ini=readdlm("N=100/initial_condition.txt")
u0=[ini;pp*ones(N)];
du = similar(u0);
prob = ODEProblem(kuramoto,u0, tspan, pp)
sol = solve(prob, RK4(), reltol=1e-4, saveat=dts,maxiters=1e10,progress=true)
Use the derivative of the interpolation sol(t,Val{1}). To do this you'll want to not use saveat. Otherwise you can use the SavingCallback.
I am struggling to plot evaluated function and Cbebyshev approximation.
I am using Julia 1.2.0.
EDIT: Sorry, added completed code.
using Plots
pyplot()
mutable struct Cheb_struct
c::Vector{Float64}
min::Float64
max::Float64
end
function cheb_coeff(min::Float64, max::Float64, n::Int, fn::Function)::Cheb_struct
struc = Cheb_struct(Vector{Float64}(undef,n), min, max)
f = Vector{Float64}(undef,n)
p = Vector{Float64}(undef,n)
max_plus_min = (max + min) / 2
max_minus_min = (max - min) / 2
for k in 0:n-1
p[k+1] = pi * ((k+1) - 0.5) / n
f[k+1] = fn(max_plus_min + cos(p[k+1])*max_minus_min)
end
n2 = 2 / n
for j in 0:n-1
s = 0
for i in 0:n-1
s += f[i+1]*cos(j*p[i+1])
struc.c[j+1] = s * n2
end
end
return struc
end
function approximate(struc::Cheb_struct, x::Float64)::Float64
x1 = (2*x - struc.max - struc.min) / (struc.max - struc.min)
x2 = 2*x1
t = s = 0
for j in length(struc.c):-1:2
pom = s
s = x2 * s - t + struc.c[j]
t = pom
end
return (x1 * s - t + struc.c[1] / 2)
end
fn = sin
struc = cheb_coeff(0.0, 1.0, 10, fn)
println("coeff:")
for x in struc.c
#printf("% .15f\n", x)
end
println("\n x eval approx eval-approx")
for x in struc.min:0.1:struc.max
eval = fn(x)
approx = approximate(struc, x)
#printf("%11.8f %12.8f %12.8f % .3e\n", x,eval, approx, eval - approx)
display(plot(x=eval,y=approx))
end
I am getting empty plot window.
I would be very grateful if someone coould how to plot these two functions.
You should provide a working code as an example.
However the code below can show you how to plot:
using Plots
pyplot()
fn = sin
approxf(x) = sin(x)+rand()/10
x = 0:0.1:1
evalv = fn.(x)
approxv = approxf.(x)
p = plot(evalv,approxv)
using PyPlot
PyPlot.display_figs() #needed when running in IDE such as Atom
I've tried to reproduce the model from a PYMC3 and Stan comparison. But it seems to run slowly and when I look at #code_warntype there are some things -- K and N I think -- which the compiler seemingly calls Any.
I've tried adding types -- though I can't add types to turing_model's arguments and things are complicated within turing_model because it's using autodiff variables and not the usuals. I put all the code into the function do_it to avoid globals, because they say that globals can slow things down. (It actually seems slower, though.)
Any suggestions as to what's causing the problem? The turing_model code is what's iterating, so that should make the most difference.
using Turing, StatsPlots, Random
sigmoid(x) = 1.0 / (1.0 + exp(-x))
function scale(w0::Float64, w1::Array{Float64,1})
scale = √(w0^2 + sum(w1 .^ 2))
return w0 / scale, w1 ./ scale
end
function do_it(iterations::Int64)::Chains
K = 10 # predictor dimension
N = 1000 # number of data samples
X = rand(N, K) # predictors (1000, 10)
w1 = rand(K) # weights (10,)
w0 = -median(X * w1) # 50% of elements for each class (number)
w0, w1 = scale(w0, w1) # unit length (euclidean)
w_true = [w0, w1...]
y = (w0 .+ (X * w1)) .> 0.0 # labels
y = [Float64(x) for x in y]
σ = 5.0
σm = [x == y ? σ : 0.0 for x in 1:K, y in 1:K]
#model turing_model(X, y, σ, σm) = begin
w0_pred ~ Normal(0.0, σ)
w1_pred ~ MvNormal(σm)
p = sigmoid.(w0_pred .+ (X * w1_pred))
#inbounds for n in 1:length(y)
y[n] ~ Bernoulli(p[n])
end
end
#time chain = sample(turing_model(X, y, σ, σm), NUTS(iterations, 200, 0.65));
# ϵ = 0.5
# τ = 10
# #time chain = sample(turing_model(X, y, σ), HMC(iterations, ϵ, τ));
return (w_true=w_true, chains=chain::Chains)
end
chain = do_it(1000)
I'm trying to emulate a system of ODEs (Fig3 B in Tilman, 1994.Ecology, Vol.75,No1,pp-2-16) but Julia Integration method failed to give a solution.
The error is dt <= dtmin. Aborting.
using DifferentialEquations
TFour = #ode_def TilmanFour begin
dp1 = c1*p1*(1-p1) - m*p1
dp2 = c2*p2*(1-p1-p2) -m*p2 -c1*p1*p2
dp3 = c3*p3*(1-p1-p2-p3) -m*p3 -c1*p1*p2 -c2*p2*p3
dp4 = c4*p4*(1-p1-p2-p3-p4) -m*p4 -c1*p1*p2 -c2*p2*p3 -c3*p3*p4
end c1 c2 c3 c4 m
u0 = [0.05,0.05,0.05,0.05]
p = (0.333,3.700,41.150,457.200,0.100)
tspan = (0.0,300.0)
prob = ODEProblem(TFour,u0,tspan,p)
sol = solve(prob,alg_hints=[:stiff])
I think that you read the equations wrong. The last term in the paper is
sum(c[j]*p[j]*p[i] for j<i)
Note that every term in the equation for dp[i] has a factor p[i].
Thus your equations should read
dp1 = p1 * (c1*(1-p1) - m)
dp2 = p2 * (c2*(1-p1-p2) - m - c1*p1)
dp3 = p3 * (c3*(1-p1-p2-p3) - m - c1*p1 -c2*p2)
dp4 = p4 * (c4*(1-p1-p2-p3-p4) - m - c1*p1 - c2*p2 - c3*p3)
where I also made explicit that dpk is a multiple of pk. This is necessary as it ensures that the dynamic stays in the octand of positive variables.
Using python the plot looks like in the paper
def p_ode(p,c,m):
return [ p[i]*(c[i]*(1-sum(p[j] for j in range(i+1))) - m[i] - sum(c[j]*p[j] for j in range(i))) for i in range(len(p)) ]
c = [0.333,3.700,41.150,457.200]; m=4*[0.100]
u0 = [0.05,0.05,0.05,0.05]
t = np.linspace(0,60,601)
p = odeint(lambda u,t: p_ode(u,c,m), u0, t)
for k in range(4): plt.plot(t,p[:,k], label='$p_%d$'%(k+1));
plt.grid(); plt.legend(); plt.show()
I am trying to Implement gradient Descent algorithm from scratch to find the slope and intercept value for my linear fit line.
Using the package and calculating slope and intercept, I get slope = 0.04 and intercept = 7.2 but when I use my gradient descent algorithm for the same problem, I get slope and intercept both values = (-infinity,-infinity)
Here is my code
x= [1,2,3,4,5,6,7,8,9,10,11,12,13,141,5,16,17,18,19,20]
y=[2,3,4,5,6,7,8,9,10,11,12,13,141,5,16,17,18,19,20,21]
function GradientDescent()
m=0
c=0
for i=1:10000
for k=1:length(x)
Yp = m*x[k] + c
E = y[k]-Yp #error in predicted value
dm = 2*E*(-x[k]) # partial derivation of cost function w.r.t slope(m)
dc = 2*E*(-1) # partial derivate of cost function w.r.t. Intercept(c)
m = m + (dm * 0.001)
c = c + (dc * 0.001)
end
end
return m,c
end
Values = GradientDescent() # after running values = (-inf,-inf)
I have not done the math, but instead wrote the tests. It seems you got a sign error when assigning m and c.
Also, writing the tests really helps, and Julia makes it simple :)
function GradientDescent(x, y)
m=0.0
c=0.0
for i=1:10000
for k=1:length(x)
Yp = m*x[k] + c
E = y[k]-Yp
dm = 2*E*(-x[k])
dc = 2*E*(-1)
m = m - (dm * 0.001)
c = c - (dc * 0.001)
end
end
return m,c
end
using Base.Test
#testset "gradient descent" begin
#testset "slope $slope" for slope in [0, 1, 2]
#testset "intercept for $intercept" for intercept in [0, 1, 2]
x = 1:20
y = broadcast(x -> slope * x + intercept, x)
computed_slope, computed_intercept = GradientDescent(x, y)
#test slope ≈ computed_slope atol=1e-8
#test intercept ≈ computed_intercept atol=1e-8
end
end
end
I can't get your exact numbers, but this is close. Perhaps it helps?
# 141 ?
datax = [1,2,3,4,5,6,7,8,9,10,11,12,13,141,5,16,17,18,19,20]
datay = [2,3,4,5,6,7,8,9,10,11,12,13,141,5,16,17,18,19,20,21]
function gradientdescent()
m = 0
b = 0
learning_rate = 0.00001
for n in 1:10000
for i in 1:length(datay)
x = datax[i]
y = datay[i]
guess = m * x + b
error = y - guess
dm = 2error * x
dc = 2error
m += dm * learning_rate
b += dc * learning_rate
end
end
return m, b
end
gradientdescent()
(-0.04, 17.35)
It seems that adjusting the learning rate is critical...