I'm trying to write a Fortran subroutine that does some matrix multiplication. I use R to create the inputs:
set.seed(7232015)
#############
# meta data #
#############
B <- 200 # (actually millions)
D <- 100 # number of markov chain monte carlo draws (actually 4,000)
T <- 8 # number of quarters
#########
# input #
#########
input <- data.frame(
treat = sample(0:1, B, T), # treatment indicator
time = sample(1:T, B, T), # time
weight = rnorm(B), # weight
pred = rnorm(B), # predictions (x \hat\beta)
eresid = exp(rnorm(B))
) # exp(resid) exp(y - x \hat\beta)
thetaTime <- matrix(rnorm(T * D), T, D) # time-by-treatment intrxn
theta <-
thetaTime[input$time,] # pull off the relevant value for ea obs
rm(list=setdiff(ls(), c("input", "theta", "T")))
And I have an R function that does what I need:
test2 <- function(d, DF, theta, T){
D <- ncol(theta)
B <- nrow(DF)
DF$epredC <- exp(DF$pred + theta[,d] * (DF$treat == 1))
DF$epredT <- exp(DF$pred - theta[,d] * (DF$treat == 0))
DF$diff <-
as.vector(
DF$eresid %*% matrix(
DF$epredT, nrow = B, ncol = B, byrow = T
)
)
return(DF$diff)
}
x <- test2(d = 1, DF = input, theta = theta, T = T)
The Fortran subroutine should create exaclty the vector x of size 200 in this example.
This is the fortran code I wrote:
subroutine test3(d, i, nMCd, DF, theta, C)
integer, intent(in) :: d, i, nMCd
double precision, intent(in), dimension(i,5) :: DF
double precision, intent(in), dimension(i,nMCd) :: theta
double precision, dimension(i) :: epredC, epredT
double precision, intent(out), dimension(i) :: C
C=0.0d0
epredC = exp(DF(:,4) + (theta(:,d) * DF(:,1)))
epredT = exp(DF(:,4) + (theta(:,d) * (1-DF(:,1))))
do jj=1, i
do j=1, i
C(jj) = C(jj) + DF(j,5)*epredT(jj)
end do
end do
end subroutine test3
I am able to compile it and run it on R
system("R CMD SHLIB ./Fortran/mytest.f90")
dyn.load("./Fortran/mytest.so")
X <- .Fortran("test3", d = as.integer(1), i = nrow(input),
nMCd = nrow(theta), DF = unlist(input),
theta = unlist(theta),
C = numeric(nrow(input)))
But in R x[1]=415.937 and in Fortran X$C[1]=10414.94
What am I doing wrong? Thanks!
I know my do loop is wrong, but I cannot figure out why...
If I change my fortran subroutine to this, it works as it should
subroutine test5(d, i, nMCd, DF, theta, C)
integer, intent(in) :: d, i, nMCd
double precision, intent(in), dimension(i,5) :: DF
double precision, intent(in), dimension(i,nMCd) :: theta
double precision, dimension(i) :: epredC, epredT
double precision, intent(out), dimension(i) :: C
double precision, dimension(i,i) :: B
C=0.0d0
B=0.0d0
epredC = exp(DF(:,4) + (theta(:,d) * DF(:,1)))
epredT = exp(DF(:,4) + (theta(:,d) * (1-DF(:,1))))
do j=1,i
B(:,j)=epredT(j)
end do
C = matmul(DF(:,5), B)
end subroutine test5
Is this method or the loop more efficient in Fortran?
Related
This is my Julia code to simulate data and sample from a Turing.jl model:
using LinearAlgebra, Distributions, StatsBase
using Turing, FillArrays, DynamicHMC, LabelledArrays
using NNlib, GLM
using CSV, DataFrames
function generate_hmnl_data(R::Int=100, S::Int=30, C::Int=3,
Theta::Array{Float64, 2}=ones(2, 4),
Sigma::Array{Float64, 2}=Matrix(Diagonal(fill(0.1, 4))))
K = size(Theta, 2)
G = size(Theta, 1)
Y = Array{Int64}(undef, R, S)
X = randn(R, S, C, K)
Z = Array{Float64}(undef, G, R)
Z[1, :] .= 1
if G > 1
Z[2:G, :] = randn(R * (G-1))
end
Beta = Array{Float64}(undef, K, R)
for r in 1:R
println(Z[:, r])
println(Theta)
Beta[:, r] = rand(MvNormal(Theta' * Z[:, r], Sigma))
for s in 1:S
Y[r, s] = sample(1:C, Weights(exp.(X[r, s, :, :] * Beta[:, r])))
end
end
return (R=R, S=S, C=C, K=K, G=G, Y=Y, X=X, Z=Z,
beta_true=Beta, Theta_true=Theta, Sigma_true=Sigma)
end
d1 = generate_hmnl_data()
#model function hmnl(G::Int, Y::Matrix{Int64}, X::Array{Float64}, Z::Matrix{Float64})
R, S, C, K = size(X)
Theta = zeros(K, G)
for k in 1:K
for g in 1:G
Theta[k, g] ~ Normal(0, 10)
end
end
Sigma ~ InverseWishart(K, diagm(ones(K)))
Beta = zeros(K, R)
println(eltype(Beta))
for r in 1:R
Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma)
println(typeof(Beta[:, r]))
for s in 1:S
beta_r = copy(Beta[:, r])
beta_r = convert(Vector{Float64}, beta_r)
ut_rs = X[r, s, :, :] * beta_r
v = softmax(ut_rs)
Y[r, s] ~ Categorical(v)
end
end
end
sampler = HMC(.05, 10)
test_mod = hmnl(d1.G, d1.Y, d1.X, d1.Z)
chains = sample(test_mod, sampler, 1_000)
I get this error when I try to sample from the model: MethodError: no method matching float(::Type{Any}). The sampling statement Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma) changes Beta[:, r] to type Vector{Any}.
I have tried
beta_r = copy(Beta[:, r])
beta_r = convert(Vector{Float64}, beta_r)
ut_rs = X[r, s, :, :] * beta_r
But then I get this error instead:
ERROR: TypeError: in typeassert, expected Float64, got a value of type ForwardDiff.Dual{Nothing, Float64, 12}
So it's messing with Turing AD somehow. I'm new to Turing and can't understand the right way to do this.
I'm reposting an answer from Tor Fjelde (https://github.com/torfjelde) which I received on Github. For Turing to work you need to ensure types in your model can be inferred. I wasn't doing that. https://turing.ml/v0.22/docs/using-turing/performancetips#ensure-that-types-in-your-model-can-be-inferred
This function worked:
#model function hmnl(G::Int, Y::Matrix{Int64}, X::Array{Float64}, Z::Matrix{Float64}, ::Type{T} = Float64) where {T}
R, S, C, K = size(X)
Theta = zeros(T, K, G)
for k in 1:K
for g in 1:G
Theta[k, g] ~ Normal(0, 10)
end
end
Sigma ~ InverseWishart(K, diagm(ones(K)))
Beta = zeros(T, K, R)
println(eltype(Beta))
for r in 1:R
Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma)
println(typeof(Beta[:, r]))
for s in 1:S
ut_rs = X[r, s, :, :] * Beta[:, r]
v = softmax(ut_rs)
Y[r, s] ~ Categorical(v)
end
end
end
I am trying to implement a simple line-search algorithm in Julia. I am new to Julia programming, so I am learning it on the go. I'd like to ask for some help, if possible, to correct an error while running the code.
Source code.
using LinearAlgebra
function bracket_minimum(f, x = 0, s = 1e-2, k = 2.0)
a, fa = x, f(x)
b, fb = x + s, f(x + s)
if(fb > fa)
a, b = b, a
fa, fb = fb, fa
s = -s
end
while(true)
c, fc = b + s, f(b + s)
if(fb < fc)
return a < c ? (a, c) : (c, a)
else
a, fa, b, fb = b, fb, c, fc
s *= k
end
end
end
function bisection(f, a₀, b₀, ϵ)
function D(f,a)
# Approximate the first derivative using central differences
h = 0.001
return (f(a + h) - f(a - h))/(2 * h)
end
a = a₀
b = b₀
while((b - a) > ϵ)
c = (a + b)/2.0
if D(f,c) > 0
b = c
else
a = c
end
end
return (a,b)
end
function line_search(f::Function, x::Vector{Float64}, d::Vector{Float64})
println("Hello")
objective = α -> f(x + α*d)
a, b = bracket_minimum(objective)
α = bisection(objective, a, b, 1e-5)
return α, x + α*d
end
f(x) = sin(x[1] * x[2]) + exp(x[2] + x[3]) - x[3]
x = [1,2,3]
d = [0, -1, -1]
α, x_min = line_search(f, x, d)
I am getting a Linear algebraic error, so I think I must not be passing vectors correctly or perhaps I am not doing scalar-vector multiplication correctly. But, I was having a hard-time figuring out. If I step through the code, it fails on the function call line_search(f,x,d) and does not even enter inside the function body.
Error description.
ERROR: MethodError: no method matching *(::Tuple{Float64,Float64}, ::Array{Int64,1})
Closest candidates are:
*(::Any, ::Any, ::Any, ::Any...) at operators.jl:538
*(::Adjoint{var"#s828",var"#s8281"} where var"#s8281"<:(AbstractArray{T,1} where T) where var"#s828"<:Number, ::AbstractArray{var"#s827",1} where var"#s827"<:Number) at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\adjtrans.jl:283
*(::Transpose{T,var"#s828"} where var"#s828"<:(AbstractArray{T,1} where T), ::AbstractArray{T,1}) where T<:Real at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\adjtrans.jl:284
Here is a fix in the code (I have cleaned up several stylistic things, but the key problem that your bisection returned a tuple not a value - I have changed it to return the center of the bracketing interval):
function bracket_minimum(f, x = 0.0, s = 1e-2, k = 2.0)
a, fa = x, f(x)
b, fb = x + s, f(x + s)
if fb > fa
a, b = b, a
fa, fb = fb, fa
s = -s
end
while true
s *= k
c, fc = b + s, f(b + s)
if fb < fc
return minmax(a, c)
else
a, fa, b, fb = b, fb, c, fc
end
end
end
function bisection(f, a₀, b₀, ϵ)
function D(f, a)
# Approximate the first derivative using central differences
h = 0.001
return (f(a + h) - f(a - h)) / (2 * h)
end
a = a₀
b = b₀
while (b - a) > ϵ
c = (a + b) / 2.0
if D(f, c) > 0
b = c
else
a = c
end
end
return (a + b) / 2 # this was changed
end
function line_search(f::Function, x::Vector{Float64}, d::Vector{Float64})
#assert length(x) == length(d)
objective(α) = f(x .+ α .* d)
a, b = bracket_minimum(objective)
α = bisection(objective, a, b, 1e-5)
return α, x .+ α .* d
end
f(x) = sin(x[1] * x[2]) + exp(x[2] + x[3]) - x[3]
x = [1.0, 2.0, 3.0]
d = [0.0, -1.0, -1.0]
α, x_min = line_search(f, x, d)
I was not commenting on the algorithm, as I assume you are writing this as a programming exercise and you are not trying to write the fastest and most robust algorithm.
I'm trying to solve a life cycle problem in economics using Julia but I'm having trouble with NLsolve. The model boils down to trying to solve two a two equation system to find optimal leisure hours and capital stock each working period. The economic agent after retirement sets leisure = 1 and I only need to solve a single non linear equation for capital. This part works fine. It's solving the two equation system that seems to break down.
As I'm fairly new to Julia / programming in general so any advice would be very helpful. Also advice / points / recommendations on all aspects of the code will be greatly appreciated. The model is solved backwards from the final time period.
My attempt
using Parameters
using Roots
using Plots
using NLsolve
using ForwardDiff
Model = #with_kw (α = 0.66,
δ = 0.02,
τ = 0.015,
β = 1/1.01,
T = 70,
Ret = 40,
);
function du_c(c, l, η=2, γ=2)
if c>0 && l>0
return (c+1e-6)^(-η) * l^((1-η)*γ)
else
return Inf
end
end
function du_l(c, l, η=2, γ=2)
if l>0 && c>0
return γ * (c+1e-6)^(1-η) * l^(γ*(1-η)-1)
else
return Inf
end
end
function create_euler_work(x, y, m, k, l, r, w, t)
# x = todays capital, y = leisure
#unpack α, β, τ, δ, T, Ret = m
c_1 = x*(1+r) + (1-τ)*w*(1-y) - k[t+1]
c_2 = k[t+1]*(1+r) + (1-τ)*w*(1-l[t+1]) - k[t+2]
return du_c(c_1,y) - β*(1+r)*du_c(c_2,l[t+1])
end
function create_euler_retire(x, m, k, r, b, t)
# Holds at time periods Ret onwards
#unpack α, β, τ, δ, T, Ret = m
c_1 = x*(1+r) + b - k[t+1]
c_2 = k[t+1]*(1+r) + b - k[t+2]
return du_c(c_1,1) - β*(1+r)*du_c(c_2,1)
end
function create_euler_lyw(x, y, m, k, r, w, b, t)
# x = todays capital, y = leisure
#unpack α, β, τ, δ, T, Ret = m
c_1 = x*(1+r) + (1-τ)*w*(1-y) - k[t+1]
c_2 = k[t+1]*(1+r) + b - k[t+2]
return du_c(c_1,y) - β*(1+r)*du_c(c_2,1)
end
function create_foc(x, y, m, k, r, w, t)
# x = todays capital, l= leisure
#unpack α, β, τ, δ, T = m
c = x*(1+r) + (1-τ)*w*(1-y) - k[t+1]
return du_l(c,y) - (1-τ)*w*du_c(c,y)
end
function life_cycle(m, guess, r, w, b, initial)
#unpack α, β, τ, δ, T, Ret = m
k = zeros(T+1);
l = zeros(T);
k[T] = guess
println("Period t = $(T+1) Retirment, k = $(k[T+1]), l.0 = NA")
println("Period t = $T Retirment, k = $(k[T]), l = 1.0")
########################## Retirment ################################
for t in T-1:-1:Ret+1
euler(x) = create_euler_retire(x, m, k, r, b, t)
k[t] = find_zero(euler, (0,100))
l[t] = 1
println("Period t = $t Retirment, k = $(k[t]), l = $(l[t])")
end
###################### Retirement Year #############################
for t in Ret:Ret
euler(x,y) = create_euler_lyw(x, y, m, k, r, w, b, t)
foc(x,y) = create_foc(x, y, m, k, r, w, t)
function f!(F, x)
F[1] = euler(x[1], x[2])
F[2] = foc(x[1], x[2])
end
res = nlsolve(f!, [5; 0.7], autodiff = :forward)
k[t] = res.zero[1]
l[t] = res.zero[2]
println("Period t = $t Working, k = $(k[t]), l = $(l[t])")
end
############################ Working ###############################
for t in Ret-1:-1:1
euler(x,y) = create_euler_work(x, y, m, k, l, r, w, t)
foc(x,y) = create_foc(x, y, m, k, r, w, t)
function f!(F, x)
F[1] = euler(x[1], x[2])
F[2] = foc(x[1], x[2])
end
res = nlsolve(f!, [5; 0.7], autodiff = :forward)
k[t] = res.zero[1]
l[t] = res.zero[2]
println("Period t = $t Working, k = $(k[t]), l = $(l[t])")
end
#####################################################################
return k[1] - initial, k, l
end
m = Model();
residual, k, l = life_cycle(m, 0.3, 0.03, 1.0, 0.0, 0.0)
The code seems to break on period 35 with the error "During the resolution of the nonlinear system, the evaluation of following equations resulted in a non-finite number: [1,2]" However the solutions seem to go weird at period 37.
I've tried to reproduce the model from a PYMC3 and Stan comparison. But it seems to run slowly and when I look at #code_warntype there are some things -- K and N I think -- which the compiler seemingly calls Any.
I've tried adding types -- though I can't add types to turing_model's arguments and things are complicated within turing_model because it's using autodiff variables and not the usuals. I put all the code into the function do_it to avoid globals, because they say that globals can slow things down. (It actually seems slower, though.)
Any suggestions as to what's causing the problem? The turing_model code is what's iterating, so that should make the most difference.
using Turing, StatsPlots, Random
sigmoid(x) = 1.0 / (1.0 + exp(-x))
function scale(w0::Float64, w1::Array{Float64,1})
scale = √(w0^2 + sum(w1 .^ 2))
return w0 / scale, w1 ./ scale
end
function do_it(iterations::Int64)::Chains
K = 10 # predictor dimension
N = 1000 # number of data samples
X = rand(N, K) # predictors (1000, 10)
w1 = rand(K) # weights (10,)
w0 = -median(X * w1) # 50% of elements for each class (number)
w0, w1 = scale(w0, w1) # unit length (euclidean)
w_true = [w0, w1...]
y = (w0 .+ (X * w1)) .> 0.0 # labels
y = [Float64(x) for x in y]
σ = 5.0
σm = [x == y ? σ : 0.0 for x in 1:K, y in 1:K]
#model turing_model(X, y, σ, σm) = begin
w0_pred ~ Normal(0.0, σ)
w1_pred ~ MvNormal(σm)
p = sigmoid.(w0_pred .+ (X * w1_pred))
#inbounds for n in 1:length(y)
y[n] ~ Bernoulli(p[n])
end
end
#time chain = sample(turing_model(X, y, σ, σm), NUTS(iterations, 200, 0.65));
# ϵ = 0.5
# τ = 10
# #time chain = sample(turing_model(X, y, σ), HMC(iterations, ϵ, τ));
return (w_true=w_true, chains=chain::Chains)
end
chain = do_it(1000)
Function programmed in Fortran 95 to compute values of the Gamma function from mathematics is not producing the correct values.
I am trying to implement a recursive function in Fortran 95 that computes values of the Gamma function using the Lanczos approximation (yes I know that there is an intrinsic function for this in the 2003 standard and later). I've followed the standard formula very closely so I'm not certain what is wrong. Correct values for the Gamma function are crucial for some other numerical computations I am doing involving the numerical computation of the Jacobi polynomials by means of a recursion relation.
program testGam
implicit none
integer, parameter :: dp = selected_real_kind(15,307)
real(dp), parameter :: pi = 3.14159265358979324
real(dp), dimension(10) :: xGam, Gam
integer :: n
xGam = (/ -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5 /)
do n = 1,10
Gam(n) = GammaFun(xGam(n))
end do
do n = 1,10
write(*,*) xGam(n), Gam(n)
end do
contains
recursive function GammaFun(x) result(G)
real(dp), intent(in) :: x
real(dp) :: G
real(dp), dimension(0:8), parameter :: q = &
(/ 0.99999999999980993, 676.5203681218851, -1259.1392167224028, &
771.32342877765313, -176.61502916214059, 12.507343278686905, &
-0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7 /)
real(dp) :: t, w, xx
integer :: n
xx = x
if ( xx < 0.5_dp ) then
G = pi / ( sin(pi*xx)*GammaFun(1.0_dp - xx) )
else
xx = xx - 1.0_dp
t = q(0)
do n = 1,9
t = t + q(n) / (xx + real(n, dp))
end do
w = xx + 7.5_dp
G = sqrt(2.0_dp*pi)*(w**(xx + 0.5_dp))*exp(-w)*t
end if
end function GammaFun
end program testGam
Whereas this code should be producing correct values for the Gamma function over the whole real line, it seems only to produce a constant value close to 122 regardless of the input. I suspect that there is some weird floating point arithmetic issue that I am not seeing.
There are two obvious issues with your code
Most seriously the code accesses an array out of bounds at line 42, i.e. in the loop
do n = 1,9
t = t + q(n) / (xx + real(n, dp))
end do
You have mixed up your precision somewhat, with some of the constants being of kind dp, other being of default kind
Making what I believe are the appropriate fixes to these your program compiles, links and runs correctly, at least as far as I can see. See below:
ian#eris:~/work/stackoverflow$ cat g.f90
program testGam
implicit none
integer, parameter :: dp = selected_real_kind(15,307)
real(dp), parameter :: pi = 3.14159265358979324_dp
real(dp), dimension(10) :: xGam, Gam
integer :: n
xGam = (/ -3.5_dp, -2.5_dp, -1.5_dp, -0.5_dp, 0.5_dp, 1.5_dp, 2.5_dp, 3.5_dp, 4.5_dp, 5.5_dp /)
do n = 1,10
Gam(n) = GammaFun(xGam(n))
end do
do n = 1,10
write(*,*) xGam(n), Gam(n), gamma( xGam( n ) ), Abs( Gam( n ) - gamma( xGam( n ) ) )
end do
contains
recursive function GammaFun(x) result(G)
real(dp), intent(in) :: x
real(dp) :: G
real(dp), dimension(0:8), parameter :: q = &
(/ 0.99999999999980993_dp, 676.5203681218851_dp, -1259.1392167224028_dp, &
771.32342877765313_dp, -176.61502916214059_dp, 12.507343278686905_dp, &
-0.13857109526572012_dp, 9.9843695780195716e-6_dp, 1.5056327351493116e-7_dp /)
real(dp) :: t, w, xx
integer :: n
xx = x
if ( xx < 0.5_dp ) then
G = pi / ( sin(pi*xx)*GammaFun(1.0_dp - xx) )
else
xx = xx - 1.0_dp
t = q(0)
do n = 1,8
t = t + q(n) / (xx + real(n, dp))
end do
w = xx + 7.5_dp
G = sqrt(2.0_dp*pi)*(w**(xx + 0.5_dp))*exp(-w)*t
end if
end function GammaFun
end program testGam
ian#eris:~/work/stackoverflow$ gfortran -O -std=f2008 -Wall -Wextra -fcheck=all g.f90
ian#eris:~/work/stackoverflow$ ./a.out
-3.5000000000000000 0.27008820585226917 0.27008820585226906 1.1102230246251565E-016
-2.5000000000000000 -0.94530872048294168 -0.94530872048294179 1.1102230246251565E-016
-1.5000000000000000 2.3632718012073521 2.3632718012073548 2.6645352591003757E-015
-0.50000000000000000 -3.5449077018110295 -3.5449077018110318 2.2204460492503131E-015
0.50000000000000000 1.7724538509055159 1.7724538509055161 2.2204460492503131E-016
1.5000000000000000 0.88622692545275861 0.88622692545275805 5.5511151231257827E-016
2.5000000000000000 1.3293403881791384 1.3293403881791370 1.3322676295501878E-015
3.5000000000000000 3.3233509704478430 3.3233509704478426 4.4408920985006262E-016
4.5000000000000000 11.631728396567446 11.631728396567450 3.5527136788005009E-015
5.5000000000000000 52.342777784553583 52.342777784553519 6.3948846218409017E-014
ian#eris:~/work/stackoverflow$