I wanted to try Julia GpABC package. There are some examples on GitHub: https://github.com/tanhevg/GpABC.jl/blob/master/examples/abc-example.ipynb
I used the In[1] and In[2] from the above and then tried to run simulator function, but I get an error.
Here is the exact code I used:
# ~/GIT/GIT_Schistoxpkg.jl_1.2.15/src/ABCexample.jl
#
# ABC settings
#
using GpABC
using OrdinaryDiffEq
using Distances
using Distributions
using Plots
using StatsBase
using Printf
pyplot()
true_params = [2.0, 1.0, 15.0, 1.0, 1.0, 1.0, 100.0, 1.0, 1.0, 1.0] # nominal parameter values
priors = [Uniform(0.2, 5.), Uniform(0.2, 5.), Uniform(10., 20.),
Uniform(0.2, 2.), Uniform(0.2, 2.), Uniform(0.2, 2.),
Uniform(75., 125.), Uniform(0.2, 2.), Uniform(0.2, 2.),
Uniform(0.2, 2.)]
param_indices = [1, 3, 9] #indices of the parameters we want to estimate
priors = priors[param_indices]
#
# ODE solver settings
#
Tspan = (0.0, 10.0)
x0 = [3.0, 2.0, 1.0]
solver = RK4()
saveat = 0.1
#
# Returns the solution to the toy model as solved by OrdinaryDiffEq
#
GeneReg = function(params::AbstractArray{Float64,1},
Tspan::Tuple{Float64,Float64}, x0::AbstractArray{Float64,1},
solver::OrdinaryDiffEq.OrdinaryDiffEqAlgorithm, saveat::Float64)
if size(params,1) != 10
throw(ArgumentError("GeneReg needs 10 parameters, $(size(params,1)) were provided"))
end
function ODE_3GeneReg(dx, x, par, t)
dx[1] = par[1]/(1+par[7]*x[3]) - par[4]*x[1]
dx[2] = par[2]*par[8]*x[1]/(1+par[8]*x[1]) - par[5]*x[2]
dx[3] = par[3]*par[9]*x[1]*par[10]*x[2]./(1+par[9]*x[1])./(1+par[10]*x[2]) - par[6]*x[3]
end
prob = ODEProblem(ODE_3GeneReg, x0 ,Tspan, params)
Obs = solve(prob, solver, saveat=saveat)
return Array{Float64, 2}(Obs)
end
#
# A function that simulates the model
#
function simulator_function(var_params)
params = copy(true_params)
params[param_indices] .= var_params
GeneReg(params, Tspan, x0, solver, saveat)
end
simulator_function([2.0, 15.0, 1.0])
It leads to an error message:
ERROR: MethodError: Cannot `convert` an object of type ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, RK4, OrdinaryDiffEq.InterpolationData{ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.RK4Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}}}, DiffEqBase.DEStats} to an object of type Float64
Closest candidates are:
convert(::Type{T}, ::Static.StaticFloat64{N}) where {N, T<:AbstractFloat} at ~/.julia/packages/Static/pkxBE/src/float.jl:26
convert(::Type{T}, ::LLVM.GenericValue, ::LLVM.LLVMType) where T<:AbstractFloat at ~/.julia/packages/LLVM/YSJ2s/src/execution.jl:39
convert(::Type{T}, ::LLVM.ConstantFP) where T<:AbstractFloat at ~/.julia/packages/LLVM/YSJ2s/src/core/value/constant.jl:111
...
Stacktrace:
[1] setindex!(A::Matrix{Float64}, x::ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, RK4, OrdinaryDiffEq.InterpolationData{ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.RK4Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}}}, DiffEqBase.DEStats}, i1::Int64)
# Base ./array.jl:903
[2] copyto_unaliased!
# ./abstractarray.jl:1024 [inlined]
[3] copyto!(dest::Matrix{Float64}, src::ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, RK4, OrdinaryDiffEq.InterpolationData{ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.RK4Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}}}, DiffEqBase.DEStats})
# Base ./abstractarray.jl:998
[4] copyto_axcheck!
# ./abstractarray.jl:1104 [inlined]
[5] Matrix{Float64}(x::ODESolution{Float64, 2, Vector{Vector{Float64}}, Nothing, Nothing, Vector{Float64}, Vector{Vector{Vector{Float64}}}, ODEProblem{Vector{Float64}, Tuple{Float64, Float64}, true, Vector{Float64}, ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Base.Pairs{Symbol, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, SciMLBase.StandardODEProblem}, RK4, OrdinaryDiffEq.InterpolationData{ODEFunction{true, var"#ODE_3GeneReg#7", LinearAlgebra.UniformScaling{Bool}, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, Nothing, typeof(SciMLBase.DEFAULT_OBSERVED), Nothing}, Vector{Vector{Float64}}, Vector{Float64}, Vector{Vector{Vector{Float64}}}, OrdinaryDiffEq.RK4Cache{Vector{Float64}, Vector{Float64}, Vector{Float64}}}, DiffEqBase.DEStats})
# Base ./array.jl:563
[6] (::var"#5#6")(params::Vector{Float64}, Tspan::Tuple{Float64, Float64}, x0::Vector{Float64}, solver::RK4, saveat::Float64)
# Main ~/GIT/GIT_Schistoxpkg.jl_1.2.15/src/ABCexample.jl:50
[7] simulator_function(var_params::Vector{Float64})
# Main ~/GIT/GIT_Schistoxpkg.jl_1.2.15/src/ABCexample.jl:59
[8] top-level scope
# ~/GIT/GIT_Schistoxpkg.jl_1.2.15/src/ABCexample.jl:62
Any idea why the example fails?
Related
I have the following function
error = prior_error(data,s_vals,ones(20)/20)
This executes fine. I now package the arguments up as follows
inputs = (data,s_vals,ones(20)/20)
And then try and get the gradient
test = gradient(prior_error,inputs)
Using the gradient function from the ReverseDiff package. The type signature of the arguments for the prior_error function is as follows
prior_error(data::Matrix,sample_vals::Vector,prior::Vector)
But I am getting the following error when I try and get that gradient
MethodError: no method matching prior_error(::ReverseDiff.TrackedArray{Float64, Float64, 2, Matrix{Float64}, Matrix{Float64}}, ::ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}, ::ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}})
ReverseDiff.GradientTape(::Function, ::Tuple{Matrix{Float64}, Vector{Float64}, Vector{Float64}}, ::ReverseDiff.GradientConfig{Tuple{ReverseDiff.TrackedArray{Float64, Float64, 2, Matrix{Float64}, Matrix{Float64}}, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}}})#tape.jl:207
gradient(::Function, ::Tuple{Matrix{Float64}, Vector{Float64}, Vector{Float64}}, ::ReverseDiff.GradientConfig{Tuple{ReverseDiff.TrackedArray{Float64, Float64, 2, Matrix{Float64}, Matrix{Float64}}, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}}})#gradients.jl:22
top-level scope#Local: 5
I'm afraid, being relatively new to both Julia and the ReverseDiff package in Julia, I don't even know where to start trying to solve this error. Any advice on how to fix would be greatly appreciated!
UPDATE
I figured out its because I'm assigning too strict a type signature, so actually leaving the functions without a type signature allows the autodiff to progress UNTIL I try and do a matrix multiplication, and I get this error
MethodError: *(::ReverseDiff.TrackedArray{Float64, Float64, 2, Matrix{Float64}, Matrix{Float64}}, ::LinearAlgebra.Diagonal{ReverseDiff.TrackedReal{Float64, Float64, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}}, ReverseDiff.TrackedArray{Float64, Float64, 1, Vector{Float64}, Vector{Float64}}}) is ambiguous. Candidates:
*(A::AbstractMatrix, D::LinearAlgebra.Diagonal) in LinearAlgebra at /home/peter/julia-1.7.3/share/julia/stdlib/v1.7/LinearAlgebra/src/diagonal.jl:222
*(x::ReverseDiff.TrackedArray{V, D, 2}, y::AbstractMatrix) where {V, D} in ReverseDiff at /home/peter/.julia/packages/ReverseDiff/GtPeW/src/derivatives/linalg/arithmetic.jl:213
*(x::ReverseDiff.TrackedArray{V, D, 2}, y::AbstractArray) where {V, D} in ReverseDiff at /home/peter/.julia/packages/ReverseDiff/GtPeW/src/derivatives/linalg/arithmetic.jl:213
*(x::ReverseDiff.TrackedArray{V, D}, y::AbstractMatrix) where {V, D} in ReverseDiff at /home/peter/.julia/packages/ReverseDiff/GtPeW/src/derivatives/linalg/arithmetic.jl:213
*(A::AbstractMatrix, B::AbstractMatrix) in LinearAlgebra at /home/peter/julia-1.7.3/share/julia/stdlib/v1.7/LinearAlgebra/src/matmul.jl:151
*(x::ReverseDiff.TrackedArray{V, D}, y::AbstractArray) where {V, D} in ReverseDiff at /home/peter/.julia/packages/ReverseDiff/GtPeW/src/derivatives/linalg/arithmetic.jl:213
Possible fix, define
*(::ReverseDiff.TrackedArray{V, D, 2}, ::LinearAlgebra.Diagonal) where {V, D}
But I don't understand what this means. Am I literally writing out a function with that type signature? Do I need to manually code in a function for matrix multiplication?
So I tried to make a minimum example to ask questions based on a more complicated piece of code I have written:
A HUGE common error I'm getting is expecting float64 and instead got ForwardDiff.Dual - can someone give me a tip how in general I always make sure I avoid this bug. I feel like every time I do a new optimization problem I have to reinvent the wheel to try to make this go away
Apparently you cannot autodiff the julia exp() function? Does anyone know how to make it work?
A workaround is I did a finite sum to approximate it via the taylor series. In my one function if I had 20 terms the autodiff worked, but it wasn't accurate enough - so I went to 40 terms but then julia told me to do factorial(big(k)) and then when I try to do that with autodiff it doesn't work now - anyone have a fix for this?
Any advice would be greatly appreciated!
using Cubature
using Juniper
using Ipopt
using JuMP
using LinearAlgebra
using Base.Threads
using Cbc
using DifferentialEquations
using Trapz
function mat_exp(x::AbstractVector{T},dim,num_terms,A) where T
sum = zeros(Complex{T},(dim,dim))
A[1,1] = A[1,1]*x[1]
A[2,2] = A[2,2]*x[2]
return exp(A)-1
end
function exp_approx_no_big(x::AbstractVector{T},dim,num_terms,A) where T
sum = zeros(Complex{T},(dim,dim))
A[1,1] = A[1,1]*x[1]
A[2,2] = A[2,2]*x[2]
for k=0:num_terms-1
sum = sum + (1.0/factorial(k))*A^k
end
return norm(sum)-1
end
function exp_approx_big(x::AbstractVector{T},dim,num_terms,A) where T
sum = zeros(Complex{T},(dim,dim))
A[1,1] = A[1,1]*x[1]
A[2,2] = A[2,2]*x[2]
for k=0:num_terms-1
sum = sum + (1.0/factorial(big(k)))*A^k
end
return norm(sum)-1
end
optimizer = Juniper.Optimizer
nl_solver= optimizer_with_attributes(Ipopt.Optimizer, "print_level" => 0)
mip_solver = optimizer_with_attributes(Cbc.Optimizer, "logLevel" => 0, "threads"=>nthreads())
m = Model(optimizer_with_attributes(optimizer, "nl_solver"=>nl_solver, "mip_solver"=>mip_solver))
#variable(m, 0.0<=x[1:2]<=1.0)
dim=5
A=zeros(Complex,(dim,dim))
for k=1:dim
A[k,k]=1.0
end
println(A)
f(x...) = exp_approx_no_big(collect(x),dim,20,A)
g(x...) = exp_approx_big(collect(x),dim,40,A)
h(x...) = mat_exp(collect(x),dim,20,A)
register(m, :f, 2, f; autodiff = true)
#NLobjective(m, Min, f(x...))
optimize!(m)
println(JuMP.value.(x))
println(JuMP.objective_value(m))
println(JuMP.termination_status(m))
There are quite a few problems with your mat_exp function:
It modifies A in-place, so repeated calls will not do what you think
It returns exp(x) - 1, which is a matrix. JuMP only supports scalar calls
You probably meant norm(exp(x)) - 1
But ForwardDiff doesn't support differentiating through exp
julia> using ForwardDiff
julia> function mat_exp(x::AbstractVector{T}) where {T}
A = zeros(Complex{T}, (dim, dim))
for k = 1:dim
A[k, k] = one(T)
end
A[1, 1] = A[1, 1] * x[1]
A[2, 2] = A[2, 2] * x[2]
return norm(exp(A)) - one(T)
end
mat_exp (generic function with 3 methods)
julia> ForwardDiff.gradient(mat_exp, [0.5, 0.5])
ERROR: MethodError: no method matching exp(::Matrix{Complex{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}}})
Closest candidates are:
exp(::StridedMatrix{var"#s832"} where var"#s832"<:Union{Float32, Float64, ComplexF32, ComplexF64}) at /Users/julia/buildbot/worker/package_macos64/build/usr/share/julia/stdlib/v1.6/LinearAlgebra/src/dense.jl:557
exp(::StridedMatrix{var"#s832"} where var"#s832"<:Union{Integer, Complex{var"#s831"} where var"#s831"<:Integer}) at /Users/julia/buildbot/worker/package_macos64/build/usr/share/julia/stdlib/v1.6/LinearAlgebra/src/dense.jl:558
exp(::Diagonal) at /Users/julia/buildbot/worker/package_macos64/build/usr/share/julia/stdlib/v1.6/LinearAlgebra/src/diagonal.jl:603
...
Stacktrace:
[1] mat_exp(x::Vector{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}})
# Main ./REPL[34]:8
[2] vector_mode_dual_eval!(f::typeof(mat_exp), cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}}}, x::Vector{Float64})
# ForwardDiff ~/.julia/packages/ForwardDiff/jJIvy/src/apiutils.jl:37
[3] vector_mode_gradient(f::typeof(mat_exp), x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}}})
# ForwardDiff ~/.julia/packages/ForwardDiff/jJIvy/src/gradient.jl:106
[4] gradient(f::Function, x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}}}, ::Val{true})
# ForwardDiff ~/.julia/packages/ForwardDiff/jJIvy/src/gradient.jl:19
[5] gradient(f::Function, x::Vector{Float64}, cfg::ForwardDiff.GradientConfig{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2, Vector{ForwardDiff.Dual{ForwardDiff.Tag{typeof(mat_exp), Float64}, Float64, 2}}}) (repeats 2 times)
# ForwardDiff ~/.julia/packages/ForwardDiff/jJIvy/src/gradient.jl:17
[6] top-level scope
# REPL[35]:1
I also don't know why you're using Juniper, or that you have a bunch of other packages installed.
If you want to have a discussion on this, come join the community forum: https://discourse.julialang.org/c/domain/opt/13. (It's much better for back-and-forth than stackoverflow.) Someone might have suggestions, but I don't know of an AD tool in Julia that can differentiate through a matrix exponential.
While trying to run a code which is paraphrasing the DE tutorial on SDE's, I'm getting the following stacktrace (only the first few lines):
Bridging distribution is unknown. Cannot use adapativity
Stacktrace:
[1] error(s::String)
# Base ./error.jl:33
[2] (::DiffEqNoiseProcess.var"#106#108")(rand_vec::Vector{Float64}, W::NoiseProcess{Float64, 2, Float64, Vector{Float64}, Nothing, Nothing, DiffEqNoiseProcess.var"#105#107"{Vector{Float64}, Matrix{Float64}}, DiffEqNoiseProcess.var"#106#108", true, ResettableStacks.ResettableStack{Tuple{Float64, Vector{Float64}, Nothing}, true}, ResettableStacks.ResettableStack{Tuple{Float64, Vector{Float64}, Nothing}, true}, RSWM{Float64}, Nothing, RandomNumbers.Xorshifts.Xoroshiro128Plus}, W0::Int64, Wh::Vector{Float64}, q::Float64, h::Float64, u::Vector{Float64}, p::SciMLBase.NullParameters, t::Float64, rng::RandomNumbers.Xorshifts.Xoroshiro128Plus)
# DiffEqNoiseProcess ~/.julia/packages/DiffEqNoiseProcess/9NzQP/src/correlated_noisefunc.jl:28
[3] reject_step!
# ~/.julia/packages/DiffEqNoiseProcess/9NzQP/src/noise_interfaces/noise_process_interface.jl:278 [inlined]
[4] reject_step! (repeats 2 times)
# ~/.julia/packages/StochasticDiffEq/Ysmjy/src/integrators/integrator_utils.jl:7 [inlined]
I suspect this has to do with the way I am defining the correlated wiener processes.
Here is the block of code I'm trying to run:
# Correlated Brownian motions
# e.g. Heston model
heston_tspan = (0.0,1.0)
μ = 1.0
κ = 1.0
Θ = 1.0
σ = 1.0
ρ = 0.333
function heston_drift!(du,u,p,t)
du[1] = μ*u[1]
du[2] = κ*(Θ-u[2])
end
function heston_sigma!(du,u,p,t)
du[1] = √u[2]*u[1]
du[2] = σ*√u[2]
end
correl = [1 ρ;ρ 1]
heston_noise = CorrelatedWienerProcess!(correl, heston_tspan[1], zeros(2), zeros(2))
heston_problem = SDEProblem(heston_drift!, heston_sigma!, ones(2), heston_tspan, noise=heston_noise)
heston_sol = solve(heston_problem)
plot(heston_sol)
EDIT:
The solver works if I tell it to not use adaptive methods explicitly.
For example,
heston_sol = solve(heston_problem, adaptive=false, dt=0.01)
However, I don't understand why
There's no bridging distribution property defined for this CorrelatedWienerProcess! "object" (mathematically, it's similar to the default WienerProcess)
The solve function's auto selector does not try a non-adaptive method when it fails to find a bridging distribution.
I am trying to get Avro working in Julia and having some real issues. It is important for my application that I use a row-oriented data format to which I can append a hierarchical data structure row by row as they are generated.
Avro seems like a good fit. But I am having issues in Julia. I have things working in Python test, but I need to be in Julia as the main code is in julia.
Here are my simplified test examples which show my issue. The first one works, the rest don't. Any help would be appreciated. The second gives the wrong answer. The rest give errors.
import Avro
v1=Dict("RUTHERFORD" => 7, "DURHAM" => 11)
buf=Avro.write(v1)
Avro.read(buf,typeof(v1))
output:
Dict{String, Int64} with 2 entries:
"DURHAM" => 11
"RUTHERFORD" => 7
example 2:
#show v3=Dict((5,2) => 7, (5,4) => 11)
#show typeof(v3)
buf=Avro.write(v3)
Avro.read(buf,typeof(v3))
output:
v3 = Dict((5, 2) => 7, (5, 4) => 11) = Dict((5, 2) => 7, (5, 4) => 11)
typeof(v3) = Dict{Tuple{Int64, Int64}, Int64}
Dict{Tuple{Int64, Int64}, Int64} with 1 entry:
(40, 53) => 11
example 3:
#show v2=Dict(("jcm",2) => 7, ("sem",4) => 11)
#show typeof(v2)
buf=Avro.write(v2)
v2o=Avro.read(buf,typeof(v2))
output:
v2 = Dict(("jcm", 2) => 7, ("sem", 4) => 11) = Dict(("sem", 4) => 11, ("jcm", 2) => 7)
typeof(v2) = Dict{Tuple{String, Int64}, Int64}
MethodError: Cannot `convert` an object of type Char to an object of type String
Closest candidates are:
convert(::Type{String}, ::String) at essentials.jl:210
convert(::Type{T}, ::T) where T<:AbstractString at strings/basic.jl:231
convert(::Type{T}, ::AbstractString) where T<:AbstractString at strings/basic.jl:232
...
Stacktrace:
[1] _totuple
# ./tuple.jl:316 [inlined]
[2] Tuple{String, Int64}(itr::String)
# Base ./tuple.jl:303
[3] construct(T::Type, args::String; kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:310
[4] construct(T::Type, args::String)
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:310
[5] construct(::Type{Tuple{String, Int64}}, ptr::Ptr{UInt8}, len::Int64; kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:435
[6] construct(::Type{Tuple{String, Int64}}, ptr::Ptr{UInt8}, len::Int64)
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:435
[7] readvalue(B::Avro.Binary, #unused#::Avro.StringType, #unused#::Type{Tuple{String, Int64}}, buf::Vector{UInt8}, pos::Int64, len::Int64, opts::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:247
[8] readvalue(B::Avro.Binary, MT::Avro.MapType, #unused#::Type{Dict{Tuple{String, Int64}, Int64}}, buf::Vector{UInt8}, pos::Int64, buflen::Int64, opts::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/maps.jl:63
[9] read(buf::Vector{UInt8}, ::Type{Dict{Tuple{String, Int64}, Int64}}; schema::Avro.MapType, jsonencoding::Bool, kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:58
[10] read(buf::Vector{UInt8}, ::Type{Dict{Tuple{String, Int64}, Int64}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:58
[11] top-level scope
# In[209]:5
[12] eval
# ./boot.jl:360 [inlined]
[13] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)
# Base ./loading.jl:1094
Last example:
v=Dict(("RUTHERFORD", "05A", "371619611022065") => 7, ("DURHAM", "28","jcm") => 11)
buf=Avro.write(v)
vo=Avro.read(buf,typeof(v))
output:
MethodError: Cannot `convert` an object of type Char to an object of type String
Closest candidates are:
convert(::Type{String}, ::String) at essentials.jl:210
convert(::Type{T}, ::T) where T<:AbstractString at strings/basic.jl:231
convert(::Type{T}, ::AbstractString) where T<:AbstractString at strings/basic.jl:232
...
Stacktrace:
[1] _totuple
# ./tuple.jl:316 [inlined]
[2] Tuple{String, String, String}(itr::String)
# Base ./tuple.jl:303
[3] construct(T::Type, args::String; kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:310
[4] construct(T::Type, args::String)
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:310
[5] construct(::Type{Tuple{String, String, String}}, ptr::Ptr{UInt8}, len::Int64; kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:435
[6] construct(::Type{Tuple{String, String, String}}, ptr::Ptr{UInt8}, len::Int64)
# StructTypes ~/.julia/packages/StructTypes/NJXhA/src/StructTypes.jl:435
[7] readvalue(B::Avro.Binary, #unused#::Avro.StringType, #unused#::Type{Tuple{String, String, String}}, buf::Vector{UInt8}, pos::Int64, len::Int64, opts::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:247
[8] readvalue(B::Avro.Binary, MT::Avro.MapType, #unused#::Type{Dict{Tuple{String, String, String}, Int64}}, buf::Vector{UInt8}, pos::Int64, buflen::Int64, opts::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/maps.jl:63
[9] read(buf::Vector{UInt8}, ::Type{Dict{Tuple{String, String, String}, Int64}}; schema::Avro.MapType, jsonencoding::Bool, kw::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:58
[10] read(buf::Vector{UInt8}, ::Type{Dict{Tuple{String, String, String}, Int64}})
# Avro ~/.julia/packages/Avro/JEoRa/src/types/binary.jl:58
[11] top-level scope
# In[210]:3
[12] eval
# ./boot.jl:360 [inlined]
[13] include_string(mapexpr::typeof(REPL.softscope), mod::Module, code::String, filename::String)
# Base ./loading.jl:1094
What is going wrong?
Avro.jl is unable to properly read from the buffer into a Dict (or, as Avro calls it, a "Map") that uses a Tuple as a key because, according to the Avro specification:
Map keys are assumed to be strings.
This assumption is hard-coded into Avro.jl: no matter what the actual type of the Dict keys are, the code forces the key to be a String. Avro.jl does not bother to check that the key is actually a subtype of String because as long as the type can be converted to a String via the Base.string method, the code will write that string representation to the buffer. And that is exactly what is happening when you write a Dict with Tuple keys:
v = Dict((1,2) => 3)
buf = Avro.write(v)
Char.(buf)
This decodes the bytes in buf as ASCII/Unicode characters and prints them to the REPL. You should see the string representation of the Tuple (1,2) in there encoded as "(1, 2)":
11-element Vector{Char}:
'\x01': ASCII/Unicode U+0001 (category Cc: Other, control)
'\x10': ASCII/Unicode U+0010 (category Cc: Other, control)
'\f': ASCII/Unicode U+000C (category Cc: Other, control)
'(': ASCII/Unicode U+0028 (category Ps: Punctuation, open)
'1': ASCII/Unicode U+0031 (category Nd: Number, decimal digit)
',': ASCII/Unicode U+002C (category Po: Punctuation, other)
' ': ASCII/Unicode U+0020 (category Zs: Separator, space)
'2': ASCII/Unicode U+0032 (category Nd: Number, decimal digit)
')': ASCII/Unicode U+0029 (category Pe: Punctuation, close)
'\x06': ASCII/Unicode U+0006 (category Cc: Other, control)
'\0': ASCII/Unicode U+0000 (category Cc: Other, control)
The problem arises when you try to read that key back into a Tuple. When reading a key of a Map element, Avro.jl will try to read whatever is in the buffer as a String and stuff it into whatever type the key is. If the type is a Tuple of N types that can be constructed from UInt8 values (eltype(buf)), then the next N UInt8 values in the buffer will be used to create the key:
Avro.read(buf, typeof(v))
# Dict{Tuple{Int64, Int64}, Int64} with 1 entry:
# (40, 49) => 3
Why 40 and 49? Because those are the Int64 representations of the Chars '(' and '1', respectively:
Char(40)
# '(': ASCII/Unicode U+0028 (category Ps: Punctuation, open)
Char(49)
# '1': ASCII/Unicode U+0031 (category Nd: Number, decimal digit)
Note that this is why your second example is only reading one element in the Dict even though two are written. The two-element Tuple that is being parsed as the key is only reading the first to characters of the string representation, which are both '(' and '5' in your example. The Dict cannot have duplicate keys, so the second value simply overwrites the first.
How to fix it
Avoid using non-strings as keys
Because the Avro specifications specifically state that the key of a Map is assumed to be a string, you should probably follow the specification and avoid using non-strings as keys. In my opinion, Avro.jl should not let the user write a Dict with keys that are not subtypes of AbstractString. Maybe that's a design choice, or maybe that's a bug, but it might be worth filing an issue on the project page just in case.
Use a custom type as a key
If you really, really want to use something other than a String as a key, Avro.jl will always convert the key to a String when it serializes a Map to a buffer using the Base.string method. During deserialization, if the code recognizes the key as a struct, it will try to pass the serialized String to the struct's constructor. Therefore all you have to do is define a custom struct with a constructor that takes a String and make it do the right thing (and optionally overload the Base.string method). Here's an example:
struct XY
x::Int64
y::Int64
end
function XY(s::String)
# parse the default string representation of an XY value
# very inefficient: for demonstration purposes only
m = match(r"XY\((\d+), (\d+)\)", s)
XY(parse.(Int64, m.captures)...)
end
v2 = Dict(XY(1,2) => 3)
buf2 = Avro.write(v2)
Avro.read(buf2, typeof(v2)
# Dict{XY, Int64} with 1 entry:
# XY(1, 2) => 3
Write your own Tuple construct method
If you really, really, really want to use a Tuple as a key, you can take advantage of StructType.StringType and define your own StructType.construct method. Because Avro.jl uses the unsafe pointer version, you're stuck defining the same for your Tuple. Here is an awkward example:
function StructTypes.construct(::Type{Tuple{Int64,Int64}}, ptr::Ptr{UInt8}, len::Int; kw...)
arr = unsafe_wrap(Vector{UInt8}, ptr, len)
s = join(Char.(arr))
m = findall(r"\d+", s)
(parse(Int64, s[m[1]]), parse(Int64, s[m[2]]))
end
Avro.read(buf, typeof(v))
# Dict{Tuple{Int64, Int64}, Int64} with 1 entry:
# (1, 2) => 3
For the curious: why does Avro.jl get the value right, even if the key is parsed incorrectly?
In Avro's binary encoding scheme, strings are serialized with their lengths stored at the beginning of the string. This allows Avro.jl to pass the known length of the string key to the pointer-based StructTypes.construct method, which passes an Array{UInt8,1} to the Tuple constructor. A fun fact about Julia is that the iterable-based constructor for a Tuple will only read as many elements from the iterable as necessary to construct the Tuple, then stop. Example:
Tuple{Int64, Int64}([1,2,3,4])
# (1, 2)
So Avro.jl passes a 6-element Array{UInt8,1} (['(', '1', ',', ' ', '2', ')']) to the constructor of Tuple{Int64,Int64} which in turn reads only the first two elements, then returns the Tuple for Avro.jl to use as the key of the Map element. Avro.jl then skips ahead to where it knows the string ends (remember: it stores the length of the string in the buffer) and starts reading there for the value of the Map element. Avro.jl knows that value should be an Int64, and it knows how to parse an Int64, so it reads the appropriate value. Neat!
The following fails on load. It says the problem is with the to_index method but it only happens when using the colon operator.
S = InputOhlcSeries{Int}(5)
push!(S, 0,0,0,1)
push!(S, 0,0,0,2)
push!(S, 0,0,0,3)
push!(S, 0,0,0,4)
push!(S, 0,0,0,5)
push!(S, 0,0,0,6)
#assert S[end, :close] == 6 # Works fine
#assert S[:, 4] == [2,3,4,5,6] # Works fine
#assert S[:, :close] == [2,3,4,5,6] # Will fail here
ERROR: LoadError: LoadError: ArgumentError: invalid index: close of type Symbol
Stacktrace:
[1] to_index(::Symbol) at ./indices.jl:270
[2] to_index(::InputOhlcSeries{Int64}, ::Symbol) at ./indices.jl:247
[3] to_indices at ./indices.jl:298 [inlined]
[4] to_indices at ./indices.jl:294 [inlined]
[5] getindex(::InputOhlcSeries{Int64}, ::Function, ::Symbol) at ./abstractarray.jl:927
[6] top-level scope at none:0
[7] include at ./boot.jl:326 [inlined]
using Match
mutable struct InputOhlcSeries{T} <: AbstractArray{T,2}
data::CircularBuffer{Vector{T}}
function InputOhlcSeries{T}(length::Int) where T
data = CircularBuffer{Vector{T}}(length)
fill!(data, [0,0,0,0])
new{T}(data)
end
end
#inline Base.getindex(S::InputOhlcSeries, i::Int, j::Int) = S.data[i][j]
#inline Base.setindex!(S::InputOhlcSeries, value, i::Int, j::Int) = S.data[i][j] = value
Base.size(S::InputOhlcSeries) = (length(S.data), 4)
Base.eltype(::Type{InputOhlcSeries{T}}) where {T} = T
#inline Base.push!(S::InputOhlcSeries, open, high, low, close) = push!(S.data, [open, high, low, close])
#inline function Base.getindex(S::InputOhlcSeries, r::Int, c::Symbol)
S[r, to_index(c)]
end
#inline function Base.setindex!(S::InputOhlcSeries, value, r::Int, c::Symbol)
S[r, to_index(c)] = value
end
#inline function to_index(r::Symbol)::Int
#match r begin
:open => 1
:high => 2
:low => 3
:close => 4
_ => throw(ArgumentError("Expected one of :open, :high, :low, :close"))
end
end
Made the following mods:
import Base: to_index
#inline Base.getindex(S::InputOhlcSeries, r::Int, c::Symbol) = S[r, to_index(S, c)]
#inline function Base.setindex!(S::InputOhlcSeries, value, r::Int, c::Symbol)
S[r, to_index(S, c)] = value
end
#inline Base.push!(S::InputOhlcSeries, open, high, low, close) = push!(S.data, [open, high, low, close])
function to_index(S::InputOhlcSeries, s::Symbol)
#match s begin
:open => 1
:high => 2
:low => 3
:close => 4
_ => throw(ArgumentError("Expected one of :open, :high, :low, :close"))
end
end