I'm trying to solve typical example from the DifferentialEquation package,
according to the guide from their page.
Here is the example:
using DifferentialEquations
using Plots
function lorenz(t,u, du)
du[1] = 10.0(u[2]-u[1])
du[2] = u[1]*(28.0-u[3]) - u[2]
du[3] = u[1]*u[2] - (8/3)*u[3]
end
u0 = [1.0;0.0;0.0]
tspan = (0.0,100.0)
prob = ODEProblem(lorenz,u0,tspan)
sol = solve(prob)
plot(sol,vars=(1,2,3))
Then I get:
ERROR: LoadError: Parameters were indexed but the parameters are nothing. You likely forgot to pass in parameters to the DEProblem!
What is wrong here?
Thanks in advance!
You could try to follow this example which builds on what you've done:
using DifferentialEquations
using Plots
function lorenz(du,u,p,t)
du[1] = p[1]*(u[2]-u[1])
du[2] = u[1]*(p[2]-u[3]) - u[2]
du[3] = u[1]*u[2] - p[3]*u[3]
end
u0 = [1.0;0.0;0.0]
tspan = (0.0,100.0)
p = (10.0,28.0,8/3)
prob = ODEProblem(lorenz, u0, tspan,p)
sol = solve(prob)
xyzt = plot(sol, plotdensity=10000,lw=1.5)
xy = plot(sol, plotdensity=10000, vars=(1,2))
xz = plot(sol, plotdensity=10000, vars=(1,3))
yz = plot(sol, plotdensity=10000, vars=(2,3))
xyz = plot(sol, plotdensity=10000, vars=(1,2,3))
plot(plot(xyzt,xyz),plot(xy, xz, yz, layout=(1,3),w=1), layout=(2,1))
Related
My interactive plot (topoplot) reacts to mouse signals, but how to make it reacting to keyboard signals?
Here is my code:
f = Figure()
xs = 1:1:193 #range(-30, 120, length = size(dat_e, 2))
sg = SliderGrid(f[2, 1],
(label="time", range=xs, format = "{:d} ms", startvalue = 100),
)
time = sg.sliders[1].value
str = lift(t -> "[$t ms]", time)
topo_slice = lift((t, data) -> mean(data[1:30, t, :], dims=2)[:,1], time, dat_e)
topo_axis = Axis(f[1, 1], aspect = DataAspect())
topo = eeg_topoplot!(topo_axis, topo_slice,
raw.ch_names[1:30];
positions=pos, # produced automatically from ch_names
label_text=true) # aspect ratio, correlation of height and width
text!(topo_axis, 1, 1, text = str, align = (:center, :center))
#topo_slice = lift((t, data) -> data[:, :, t], time, topo)
xlims!(-0.2, 1.1)
ylims!(-0.2, 1.2)
hidedecorations!(topo_axis)
hidespines!(topo_axis)
f
There is an official instruction https://docs.juliahub.com/AbstractPlotting/6fydZ/0.12.11/interaction.html, but as usual with Julia documentations, there is no example and I have no idea how implement it in my code.
How my plot looks like:
Expanding on the answer from before:
T = 10
pts = range(-1, 1, length=100)
ts = reshape(1:T, 1, 1, :)
topo = cos.(pts) .+ cos.(ts .* pts')
fig = Figure()
ax = Axis(fig[1, 1])
sg = SliderGrid(fig[2,1],
(label="time", range=1:T))
time = sg.sliders[1].value
str = lift(t -> "[$t ms]", time)
text!(ax, str)
topo_slice = lift((t, data) -> data[:, :, t], time, topo)
# decrement/increment slider with left/right keys
on(events(fig).keyboardbutton) do btn
if btn.action in (Keyboard.press, Keyboard.repeat)
if btn.key == Keyboard.left
set_close_to!(sg.sliders[1], time[] - 1)
elseif btn.key == Keyboard.right
set_close_to!(sg.sliders[1], time[] + 1)
end
end
end
contour!(ax, topo_slice)
hidedecorations!(ax)
hidespines!(ax)
fig
I have a complicated equation for which I have written the code as follows:
sigma = 1.336449027;
f_t = 0.500185113;
alpha = 0.364; #elasticity of capital
beta = 0.115; #elasticity of labor
R = 3.131696599;
chi = 0.5;
M = log(1056);
sigma = 1.336449027; #degree of product substitutability
W = log(29448.08908);
P = 3.0686;
aval = 1.25;
c = 0.5;
f = function(b){
loutpow = sigma/(beta*(sigma-1)-sigma);
lconst1 = sigma/(beta*(sigma-1));
lconst2 = (aval*kval^alpha)^((1 - sigma)/sigma);
lconst3 = (R*P^(sigma-1))^(1/sigma);
lval = (W/b*lconst2/lconst3*lconst1)^loutpow;
profit_first_term = (R*P^(sigma-1))^(1/sigma)*(aval*kval^alpha*lval^beta)^(1-(1/sigma));
profit_middle_terms = kval - kprimeval - f_t*kprimeval - c(kval - kprimeval)^2
profit_last_term = W/b*lval
profit = profit_first_term + profit_middle_terms - profit_last_term
bankruptcy = profit - chi*dval
}
For a range of kval,kprimeval,dval from 1 to 10000, I want to find the roots of this equation, that is the value of b. It is possible that for some values of kval,kprimeval,dval roots do not exist.
apparently your function has not a zero:
curve(f(x), -1, 1e9)
I have been trying to replicate https://diffeqflux.sciml.ai/dev/examples/BayesianNODE_NUTS/, using different ODE equation, but I have received this result without uncertainty quantification, is it because I did the initial value u0 is higher :
Could you please tell me what was wrong?
using DiffEqFlux, OrdinaryDiffEq, Flux, Optim, Plots, AdvancedHMC, MCMCChains
using JLD, StatsPlots
function Arps!(du,u,p,t)
y= u[1]
#x, y = u
# Di,b,n,tau = p
n,tau = p
#du[1]=dx=-(x * Di * x^b)
du[1]=dy=-(n *((t^n)/tau) * y/t)
end
tspan=(1.0,50.0)
tsteps = 1:1:50
u0 = [16382.9]
p=[0.48,15.92]
prob_trueode = ODEProblem(Arps!,u0,tspan,p)
ode_data = Array(solve(prob_trueode, Tsit5(), saveat = tsteps))
ode_data =ode_data[1,:]
dudt= FastChain(FastDense(1, 30, tanh),
FastDense(30, 1))
prob_neuralode = NeuralODE(dudt, tspan, Tsit5(), saveat = tsteps)
function predict_neuralode(p)
Array(prob_neuralode(u0, p))
end
function loss_neuralode(p)
pred = predict_neuralode(p)
loss = sum(abs2, ode_data .- pred)
return loss, pred
end
l(θ) = -sum(abs2, ode_data .- predict_neuralode(θ)) - sum(θ .* θ)
function dldθ(θ)
x,lambda = Flux.Zygote.pullback(l,θ)
grad = first(lambda(1))
return x, grad
end
metric = DiagEuclideanMetric(length(prob_neuralode.p))
h = Hamiltonian(metric, l, dldθ)
integrator = Leapfrog(find_good_stepsize(h, Float64.(prob_neuralode.p)))
prop = AdvancedHMC.NUTS{MultinomialTS, GeneralisedNoUTurn}(integrator)
adaptor = StanHMCAdaptor(MassMatrixAdaptor(metric), StepSizeAdaptor(0.45, prop.integrator))
samples, stats = sample(h, prop, Float64.(prob_neuralode.p), 500, adaptor, 500; progress=true)
losses = map(x-> x[1],[loss_neuralode(samples[i]) for i in 1:length(samples)])
################### RETRODICTED PLOTS: TIME SERIES #################
pl = scatter(tsteps, ode_data, color = :red, label = "Data: Var1", xlabel = "t", title = "Spiral Neural ODE")
for k in 1:300
resol = predict_neuralode(samples[100:end][rand(1:400)])
plot!(tsteps,resol[1,:], alpha=0.04, color = :red, label = "")
end
idx = findmin(losses)[2]
prediction = predict_neuralode(samples[idx])
plot!(tsteps,prediction[1,:], color = :black, w = 2, label = "")
The most likely reason for this is because the loss function magnitude is too high for the posterior samples, due to which the posterior sample results are out of range and not visible on your plot.
This can be possibly fixed by (a) adding a scaling factor the Neural ODE output and making sure that the loss function does not start from a very high magnitude or (b) increasing the number of layers in the neural network architecture/ changing the activation function.
By adding scaling factor to the Neural ODE, I have got good results as shown in the figure below:
MLR3 is really cool. I am trying to tune the regularisation prarameter
searchspace_glmnet_trafo = ParamSet$new(list(
ParamDbl$new("regr.glmnet.lambda", log(0.01), log(10))
))
searchspace_glmnet_trafo$trafo = function(x, param_set) {
x$regr.glmnet.lambda = (exp(x$regr.glmnet.lambda))
x
}
but get the error
Error in glmnet::cv.glmnet(x = data, y = target, family = "gaussian", :
Need more than one value of lambda for cv.glmnet
A minimum non-working example is below. Any help is greatly appreciated.
library(mlr3verse)
data("kc_housing", package = "mlr3data")
library(anytime)
dates = anytime(kc_housing$date)
kc_housing$date = as.numeric(difftime(dates, min(dates), units = "days"))
kc_housing$zipcode = as.factor(kc_housing$zipcode)
kc_housing$renovated = as.numeric(!is.na(kc_housing$yr_renovated))
kc_housing$has_basement = as.numeric(!is.na(kc_housing$sqft_basement))
kc_housing$id = NULL
kc_housing$price = kc_housing$price / 1000
kc_housing$yr_renovated = NULL
kc_housing$sqft_basement = NULL
lrnglm=lrn("regr.glmnet")
kc_housing
tsk = TaskRegr$new("sales", kc_housing, target = "price")
fencoder = po("encode", method = "treatment",
affect_columns = selector_type("factor"))
pipe = fencoder %>>% lrnglm
glearner = GraphLearner$new(pipe)
glearner$train(tsk)
searchspace_glmnet_trafo = ParamSet$new(list(
ParamDbl$new("regr.glmnet.lambda", log(0.01), log(10))
))
searchspace_glmnet_trafo$trafo = function(x, param_set) {
x$regr.glmnet.lambda = (exp(x$regr.glmnet.lambda))
x
}
inst = TuningInstance$new(
tsk, glearner,
rsmp("cv"), msr("regr.mse"),
searchspace_glmnet_trafo, term("evals", n_evals = 100)
)
gsearch = tnr("grid_search", resolution = 100)
gsearch$tune(inst)
lambda needs to be a vector param, not a single value (as the message tells).
I suggest to not tune cv.glmnet.
This algorithm does an internal 10-fold CV optimization and relies on its own sequence for lambda.
Consult the help page of the learner for more information.
You can apply your own tuning (tuning of param s, not lambda) on glmnet::glmnet(). However, this algorithm is not (yet) available for use with {mlr3}.
Looking for custom zticklabels and fontsize too on the z-axis. Most notably the intuitive approach of using zticks([-(R+r),0,R+r],["-R-r","0","R+r"],fontsize=16) does not work. I am using Julia 4.3.0 because this is an older project which I cannot fully convert to a newer version at this time. The commented lines below include additional commands I tried which were unsuccessful.
My final goal here is to get the -0.8, 0, 0.8 values on the z-axis to instead say "-r", and "0" and "r" respectively.
using PyPlot
colormapp = "nipy_spectral"
R = 1.6;
r = 0.8;
N = 256;
dx = 2*pi/(N-1);
y = zeros(N,1); # y = phi (col) toroidal
x = y.'; # x = theta (row) poloidal
for ix = 2:N; y[ix] = (ix-1)*dx; x[ix] = (ix-1)*dx; end
cosxsqr = cos(x) .+ 0.0*y;
sinxsqr = sin(x) .+ 0.0*y;
sinysqr = 0.0*x .+ sin(y);
cosysqr = 0.0*x .+ cos(y);
Rrcosxsqr = R+r*cosxsqr;
rRrcosx = r*Rrcosxsqr[:];
Xsqr = Rrcosxsqr.*cosysqr;
Ysqr = Rrcosxsqr.*sinysqr;
Zsqr = r*sinxsqr;
figure(98)
clf()
pmeshtor = pcolormesh(x,y,Zsqr+r,cmap=colormapp);
cb = colorbar();
colorvals = Zsqr+r;
colorvals = colorvals/maximum(colorvals[:])
ax = figure(99)
clf()
srf = surf(Xsqr,Ysqr,Zsqr,cstride=10,rstride=10,facecolors=get_cmap(colormapp).o((colorvals)))
cb = colorbar(pmeshtor,ticks=[0,0.8,1.6])
cb[:ax][:set_yticklabels](["-r","0","r"], fontsize=16)
xlabel("x",fontsize=16)
ylabel("y",fontsize=16)
zlabel("z",fontsize=16)
xlim([-(R+r)-0.3,R+r+0.3])
ylim([-(R+r)-0.3,R+r+0.3])
zlim([-(R+r)-0.3,R+r+0.3])
xticks([-(R+r),0,R+r],["-R-r","0","R+r"],fontsize=16)
yticks([-(R+r),0,R+r],["-R-r","0","R+r"],fontsize=16)
zticks([-r,0,r])
#zticklabels([-r,0,r],["-r","0","r"])
#setp(ax[:get_zticklabels](),fontsize=16);
#setp(ax[:set_zticklabels](["-r","0","r"]))#,fontsize=16);
Here is the resulting image.
The commented command
setp(ax[:set_zticklabels](["-r","0","r"]),fontsize=16);
does work, but only if insert missing projection option as follows in Fig 99
figure(99)
ax = subplot(111, projection="3d")