Error while using GPU on UNET with FLUX in Julia - julia

I'm using Julia for UNet training with GPU. (Runs without issues on CPU).
The error is at NNlib's function conv_im2col()
function conv_im2col!(
y::AbstractArray{T,5}, x::AbstractArray{T,5},
w::AbstractArray{T,5}, cdims::DenseConvDims;
col::AbstractArray{T,3}=similar(x, im2col_dims(cdims)),
alpha::T=T(1), beta::T=T(0)) where {T}
check_dims(size(x), size(w), size(y), cdims)
# COL * W -> Y
# [M x K] * [K x N] -> [M x N]
#
# M: output spatial resolution
# N: output channels
# K: size of input "patch" (kernel size and input channels combined)
#
# In english, we're grabbing each input patch and laying them out along
# the M dimension in `col`, so that the GEMM call below multiplies each
# kernel (which is kernel_h * kernel_w * channels_in elments long) is
# dotproducted with that input patch, effectively computing a convolution
# in a somewhat memory-wasteful but easily-computed way (since we already
# have an extremely highly-optimized GEMM call available in BLAS).
M = prod(output_size(cdims))
N = channels_out(cdims)
K = prod(kernel_size(cdims))*channels_in(cdims)
#threads for batch_idx in 1:size(x,5)
# col_slice is a thread-local workspace
col_slice = view(col, :, :, threadid())
im2col!(col_slice, view(x, :, :, :, :, batch_idx), cdims)
GC.#preserve col_slice w y begin
col_ptr = pointer(col_slice)
w_ptr = pointer(w)
y_ptr = pointer(y, (batch_idx - 1)*M*N + 1)
gemm!(Val(false), Val(false), M, N, K, alpha, col_ptr, w_ptr, beta, y_ptr)
end
and the detailed error is:
[ Info: training... [ Info: Using Device gpu Epoch Minibatch Loss
Test Dice Coef
---------------------------------------------- 1ERROR: LoadError: TaskFailedException
nested task error: TaskFailedException
Stacktrace:
[1] wait
# ./task.jl:322 [inlined]
[2] threading_run(func::Function)
# Base.Threads ./threadingconstructs.jl:34
[3] macro expansion
# ./threadingconstructs.jl:93 [inlined]
[4] conv_im2col!(y::SubArray{Float32, 5, Array{Float32, 5}, Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64},
Base.Slice{Base.OneTo{Int64}}}, false}, x::SubArray{Float32, 5,
Array{Float32, 5}, Tuple{Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},
w::CuArray{Float32, 5, CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{3,
3, 3, 6, 3}; col::Array{Float32, 3}, alpha::Float32, beta::Float32)
# NNlib ~/.julia/packages/NNlib/0QnJJ/src/impl/conv_im2col.jl:47
[5] conv_im2col! # ~/.julia/packages/NNlib/0QnJJ/src/impl/conv_im2col.jl:28 [inlined]
[6] (::NNlib.var"#262#266"{Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(), Tuple{}}}, DenseConvDims{3, 3, 3, 6, 3},
SubArray{Float32, 5, Array{Float32, 5},
Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64},
Base.Slice{Base.OneTo{Int64}}}, false}, CuArray{Float32, 5,
CUDA.Mem.DeviceBuffer}, SubArray{Float32, 5, Array{Float32, 5},
Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64},
Base.Slice{Base.OneTo{Int64}}}, false}})()
# NNlib ./threadingconstructs.jl:169
nested task error: MethodError: no method matching gemm!(::Val{false}, ::Val{false}, ::Int64, ::Int64, ::Int64,::Float32, ::Ptr{Float32}, ::CuPtr{Float32}, ::Float32, ::Ptr{Float32})
Closest candidates are:
gemm!(::Val, ::Val, ::Int64, ::Int64, ::Int64, ::Float32, ::Ptr{Float32}, ::Ptr{Float32}, ::Float32, ::Ptr{Float32}) at
.julia/packages/NNlib/0QnJJ/src/gemm.jl:29
gemm!(::Val, ::Val, ::Int64, ::Int64, ::Int64, ::Float64, ::Ptr{Float64}, ::Ptr{Float64}, ::Float64, ::Ptr{Float64}) at
.julia/packages/NNlib/0QnJJ/src/gemm.jl:29
gemm!(::Val, ::Val, ::Int64, ::Int64, ::Int64, ::ComplexF64, ::Ptr{ComplexF64}, ::Ptr{ComplexF64}, ::ComplexF64, ::Ptr{ComplexF64})
at .julia/packages/NNlib/0QnJJ/src/gemm.jl:29
...
Stacktrace:
[1] macro expansion
# ~/.julia/packages/NNlib/0QnJJ/src/impl/conv_im2col.jl:56 [inlined]
[2] (::NNlib.var"#909#threadsfor_fun#504"{Array{Float32, 3}, Float32, Float32, SubArray{Float32, 5, Array{Float32, 5},
Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64},
Base.Slice{Base.OneTo{Int64}}}, false}, SubArray{Float32, 5,
Array{Float32, 5}, Tuple{Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},
CuArray{Float32, 5, CUDA.Mem.DeviceBuffer}, DenseConvDims{3, 3, 3, 6,
3}, Int64, Int64, Int64, UnitRange{Int64}})(onethread::Bool)
# NNlib ./threadingconstructs.jl:81
[3] (::NNlib.var"#909#threadsfor_fun#504"{Array{Float32, 3}, Float32, Float32, SubArray{Float32, 5, Array{Float32, 5},
Tuple{Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, UnitRange{Int64},
Base.Slice{Base.OneTo{Int64}}}, false}, SubArray{Float32, 5,
Array{Float32, 5}, Tuple{Base.Slice{Base.OneTo{Int64}},
Base.Slice{Base.OneTo{Int64}}, Base.Slice{Base.OneTo{Int64}},
UnitRange{Int64}, Base.Slice{Base.OneTo{Int64}}}, false},
CuArray{Float32, 5, CUDA.Mem.DeviceBuffer}, DenseConvDims{3, 3, 3, 6,
3}, Int64, Int64, Int64, UnitRange{Int64}})()
# NNlib ./threadingconstructs.jl:48 Stacktrace: [1] sync_end(c::Channel{Any})
# Base ./task.jl:369 [2] macro expansion
# ./task.jl:388 [inlined] [3] conv!(out::Array{Float32, 5}, in1::Array{Float32, 5}, in2::CuArray{Float32, 5,
CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{3, 3, 3, 6, 3};
kwargs::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(),
Tuple{}}})
# NNlib ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:195 [4] conv!
# ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:186 [inlined] [5] conv!(y::Array{Float32, 4}, x::Array{Float32, 4}, w::CuArray{Float32,
4, CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{2, 2, 2, 4, 2};
kwargs::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(),
Tuple{}}})
# NNlib ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:145 [6] conv!
# ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:145 [inlined] [7] conv(x::Array{Float32, 4}, w::CuArray{Float32, 4,
CUDA.Mem.DeviceBuffer}, cdims::DenseConvDims{2, 2, 2, 4, 2};
kwargs::Base.Iterators.Pairs{Union{}, Union{}, Tuple{}, NamedTuple{(),
Tuple{}}})
# NNlib ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:88 [8] conv
# ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:86 [inlined] [9] #rrule#312
# ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:313 [inlined] [10] rrule
# ~/.julia/packages/NNlib/0QnJJ/src/conv.jl:304 [inlined] [11] rrule
# ~/.julia/packages/ChainRulesCore/ctmSK/src/rules.jl:134 [inlined] [12] chain_rrule
# ~/.julia/packages/Zygote/xGkZ5/src/compiler/chainrules.jl:218 [inlined] [13] macro expansion
# ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [inlined] [14] _pullback
# ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:9 [inlined] [15] _pullback
# ~/.julia/packages/Flux/KkC79/src/layers/conv.jl:200 [inlined] [16] macro expansion
# ~/.julia/packages/Flux/KkC79/src/layers/basic.jl:53 [inlined] [17] _pullback
# ~/.julia/packages/Flux/KkC79/src/layers/basic.jl:53 [inlined] [18] _pullback(::Zygote.Context{true}, ::typeof(Flux._applychain),
::WARNING: both Losses and NNlib export "ctc_loss"; uses of it in
module Flux must be qualified Tuple{Conv{2, 2, typeof(identity),
CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(selu), CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}, Float32, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}, Conv{2, 2, typeof(identity), CuArray{Float32,
4, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(selu), CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}, Float32, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}}, ::Array{Float32, 4})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [19]
_pullback
# ~/.julia/packages/Flux/KkC79/src/layers/basic.jl:51 [inlined] [20] _pullback(ctx::Zygote.Context{true}, f::Chain{Tuple{Conv{2, 2,
typeof(identity), CuArray{Float32, 4, CUDA.Mem.DeviceBuffer},
CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(selu),
CuArray{Float32, 1, CUDA.Mem.DeviceBuffer}, Float32, CuArray{Float32,
1, CUDA.Mem.DeviceBuffer}}, Conv{2, 2, typeof(identity),
CuArray{Float32, 4, CUDA.Mem.DeviceBuffer}, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}, BatchNorm{typeof(selu), CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}, Float32, CuArray{Float32, 1,
CUDA.Mem.DeviceBuffer}}}}, args::Array{Float32, 4})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [21]
_pullback
# ~/.julia/dev/UNet/src/model.jl:93 [inlined] [22] _pullback(ctx::Zygote.Context{true}, f::Unet, args::Array{Float32, 4})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [23]
_pullback
# ~/.julia/dev/UNet/src/train.jl:37 [inlined] [24] _pullback(::Zygote.Context{true}, ::UNet.var"#loss#75"{typeof(Flux.Losses.logitcrossentropy), Unet},
::Array{Float32, 4}, ::Array{Int32, 4})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [25]
_apply
# ./boot.jl:804 [inlined] [26] adjoint
# ~/.julia/packages/Zygote/xGkZ5/src/lib/lib.jl:203 [inlined] [27] _pullback
# ~/.julia/packages/ZygoteRules/AIbCs/src/adjoint.jl:65 [inlined] [28] _pullback
# ~/.julia/packages/Flux/KkC79/src/optimise/train.jl:120 [inlined] [29] _pullback(::Zygote.Context{true},
::Flux.Optimise.var"#37#40"{UNet.var"#loss#75"{typeof(Flux.Losses.logitcrossentropy),
Unet}, Tuple{Array{Float32, 4}, Array{Int32, 4}}})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface2.jl:0 [30]
pullback(f::Function, ps::Zygote.Params{Zygote.Buffer{Any,
Vector{Any}}})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface.jl:373 [31]
gradient(f::Function, args::Zygote.Params{Zygote.Buffer{Any,
Vector{Any}}})
# Zygote ~/.julia/packages/Zygote/xGkZ5/src/compiler/interface.jl:96 [32]
macro expansion
# ~/.julia/packages/Flux/KkC79/src/optimise/train.jl:119 [inlined] [33] macro expansion
# ~/.julia/packages/ProgressLogging/6KXlp/src/ProgressLogging.jl:328
[inlined] [34] train!(loss::Function,
ps::Zygote.Params{Zygote.Buffer{Any, Vector{Any}}},
data::MLUtils.DataLoader{Tuple{Array{Float32, 4}, Array{Int32, 4}},
Random._GLOBAL_RNG}, opt::RMSProp; cb::UNet.var"#77#78"{Unet,
MLUtils.DataLoader{Tuple{Array{Float32, 4}, Array{Int32, 4}},
Random._GLOBAL_RNG}})
# Flux.Optimise ~/.julia/packages/Flux/KkC79/src/optimise/train.jl:117 [35]
train(train_dataset::ImageDataset, test_dataset::ImageDataset,
model::Unet, opt::RMSProp,
loss::UNet.var"#loss#75"{typeof(Flux.Losses.logitcrossentropy), Unet};
batch_size::Int64, num_epochs::Int64,
learning_rate_drop_rate::Float64, learning_rate_step::Int64,
device::Function, save_intermediate_models::Bool, save_model::Bool,
out_dir::String, model_file_name::String)
# UNet ~/.julia/dev/UNet/src/train.jl:123 [36] train(train_dataset::ImageDataset, test_dataset::ImageDataset,
num_channels::Int64, num_labels::Int64;
loss_function::typeof(Flux.Losses.logitcrossentropy),
learning_rate::Float64, momentum::Float64, device::Function,
kwargs::Base.Iterators.Pairs{Symbol, Any, NTuple{6, Symbol},
NamedTuple{(:num_epochs, :batch_size, :save_intermediate_models,
:save_model, :out_dir, :model_file_name), Tuple{Int64, Int64, Bool,
Bool, String, String}}})
# UNet ~/.julia/dev/UNet/src/train.jl:40 [37] top-level scope
# /train_images.jl:70 [38] include(fname::String)
# Base.MainInclude ./client.jl:444 [39] top-level scope
# REPL[2]:1 in expression starting at train_images.jl:70

Related

How can I put a slice of a Matrix into a 3D Array with SMatrix type of inner structure?

Suppose I have this Matrix:
julia> mat = [
1 2 3 4
5 6 7 8
9 8 7 6
];
Then I want to put slices of this Matrix into a 3D Array with types of SMatrix{Int64}, like below:
julia> using StaticArrays
julia> arr = Array{SMatrix{Int64}, 3}(undef, 3, 2, 3);
julia> col_idx = [1, 2, 3];
julia> foreach(x->arr[:, :, x] = mat[:, x:x+1], col_idx)
ERROR: MethodError: Cannot `convert` an object of type
Int64 to an object of type
SMatrix{Int64}
Closest candidates are:
convert(::Type{T}, ::LinearAlgebra.Factorization) where T<:AbstractArray at C:\Users\JUL\.julia\juliaup\julia-1.8.3+0.x64\share\julia\stdlib\v1.8\LinearAlgebra\src\factorization.jl:58
convert(::Type{SA}, ::Tuple) where SA<:StaticArray at C:\Users\JUL\.julia\packages\StaticArrays\x7lS0\src\convert.jl:179
convert(::Type{SA}, ::SA) where SA<:StaticArray at C:\Users\JUL\.julia\packages\StaticArrays\x7lS0\src\convert.jl:178
...
Stacktrace:
[1] setindex!
# .\array.jl:968 [inlined]
[2] macro expansion
# .\multidimensional.jl:946 [inlined]
[3] macro expansion
# .\cartesian.jl:64 [inlined]
[4] macro expansion
# .\multidimensional.jl:941 [inlined]
[5] _unsafe_setindex!(::IndexLinear, ::Array{SMatrix{Int64}, 3}, ::Matrix{Int64}, ::Base.Slice{Base.OneTo{Int64}}, ::Base.Slice{Base.OneTo{Int64}}, ::Int64)
# Base .\multidimensional.jl:953
[6] _setindex!
# .\multidimensional.jl:930 [inlined]
[7] setindex!(::Array{SMatrix{Int64}, 3}, ::Matrix{Int64}, ::Function, ::Function, ::Int64)
# Base .\abstractarray.jl:1344
[8] (::var"#5#6")(x::Int64)
# Main .\REPL[20]:1
[9] foreach(f::var"#5#6", itr::Vector{Int64})
# Base .\abstractarray.jl:2774
[10] top-level scope
# REPL[20]:1
How can I achieve it?
P.S.:
This is just a minimal and reproducible example. In the practical sense, I have a size of (10, 10, 2000) for arr and a big size for mat as well (10x2000, I guess)!
If I understood correctly, do you want an Array of SMatrices?
mat = [ 1 2 3 4
5 6 7 8
9 8 7 6 ];
using StaticArrays
col_idx = [1, 2, 3];
arr = [SMatrix{3,2}(mat[:, x:x+1]) for x in col_idx]
3-element Vector{SMatrix{3, 2, Int64, 6}}:
[1 2; 5 6; 9 8]
[2 3; 6 7; 8 7]
[3 4; 7 8; 7 6]
Then, what if I say:
julia> using StaticArrays
julia> mat = [
1 2 3 4
5 6 7 8
9 8 7 6
];
julia> arr = Array{Int64, 3}(undef, 3, 2, 3);
julia> foreach(x->arr[:, :, x] = mat[:, x:x+1], [1, 2, 3]);
julia> sarr = SArray{Tuple{3, 2, 3}}(arr)
3×2×3 SArray{Tuple{3, 2, 3}, Int64, 3, 18} with indices SOneTo(3)×SOneTo(2)×SOneTo(3):
[:, :, 1] =
1 2
5 6
9 8
[:, :, 2] =
2 3
6 7
8 7
[:, :, 3] =
3 4
7 8
7 6
julia> typeof(sarr[:, :, 1])
SMatrix{3, 2, Int64, 6} (alias for SArray{Tuple{3, 2}, Int64, 2, 6})
First, I created a regular 3D Array, then constructed a SArray based on it.
However, in the case of your practical situation, I tried the following:
julia> mat = rand(10, 2000);
julia> arr = Array{Float64, 3}(undef, 10, 2, 1999);
julia> foreach(x->arr[:, :, x] = mat[:, x:x+1], 1:1999);
julia> sarr = SArray{Tuple{10, 2, 1999}}(arr);
But it takes too much time to construct such a container. (I already canceled it, and I don't know the runtime of it.). Hence, in these cases, it's better to take #AboAmmar's advice.
Inspired by #Shayan and #AboAmmar, this answer explores using BlockArrays.jl package to construct the desired result. BlockArrays puts existing arrays into a 'meta-array'. The sub-arrays can be of SMatrix type.
In code:
using StaticArrays, BlockArrays
mat = rand(10,2000) # random demo matrix
# make all the slice SArrays
arr = [SArray{Tuple{10,2,1}, Float64, 3}(mat[:,i:i+1])
for i=1:1999]
arr = reshape(arr,1,1,1999)
# glue them into a BlockArray
bricked = mortar(arr)
After now:
julia> size(bricked)
(10, 2, 1999)
julia> bricked[:,:,25]
1×1-blocked 10×2 BlockMatrix{Float64}:
0.265972 0.258414
0.396142 0.863366
0.41708 0.648276
0.960283 0.773064
0.62513 0.268989
0.132796 0.0493077
0.844674 0.791772
0.59638 0.0769661
0.221536 0.388623
0.595742 0.50732
Hopefully this method gets the performance trade-off you wanted (or at least introduces some new ideas).

Is it possible to reach value of#NLexpression?

How is it possible to attain the value of #NLexpression when the variables are fixed? In the following code variables have fixed but value of K1 has not been reached.
using JuMP, Distributions,Juniper
#-----Model parameters--------------------------------------------------------
sig, C1, c0 = 2, 300, 10;
E, landa, T0, T1, T2, gam1, gam2, a1, a2, a3, ap = 0.05, 0.01, 0, 2, 2, 1, 1, 0.5, 0.1, 50, 25;
xhat=[2.807064523673271;23.0;1.3349699464500042];
f(x) = cdf(Normal(0, 1), x);
#---------------------------------------------------------------------------
ALT= Model(optimizer_with_attributes(Juniper.Optimizer, "nl_solver"=>optimizer_with_attributes(Ipopt.Optimizer, "print_level" => 0),
"mip_solver"=>optimizer_with_attributes(Gurobi.Optimizer, "logLevel" => 0),"registered_functions" =>[Juniper.register( :f, 1, f; autodiff = true)])
);
# variables-----------------------------------------------------------------
JuMP.register(ALT, :f, 1, f; autodiff = true);
#variable(ALT, hp == xhat[3]);
#variable(ALT, Lp ==xhat[1]);
#variable(ALT, np==xhat[2], Int);
#---------------------------------------------------------------------------
C1=rand(100:100:300);
sig=rand(0.5:0.5:2);
#---------------------------------------------------------------------------
k1=#NLexpression(ALT,hp/(1-f(Lp-sig*sqrt(np))+f(-Lp - sig*sqrt(np))));
JuMP.value(k1);
the error is this:
julia> JuMP.value(k1)
ERROR: type Nothing has no field status
Stacktrace:
[1] getproperty(::Nothing, ::Symbol) at .\Base.jl:33
[2] get at C:\Users\admin\.julia\packages\Juniper\dNHnx\src\MOI_wrapper\results.jl:4 [inlined]
[3] get(::MathOptInterface.Bridges.LazyBridgeOptimizer{Juniper.Optimizer}, ::MathOptInterface.TerminationStatus) at C:\Users\admin\.julia\packages\MathOptInterface\bygN7\src\Bridges\bridge_optimizer.jl:587
[4] get(::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.TerminationStatus) at C:\Users\admin\.julia\packages\MathOptInterface\bygN7\src\Utilities\cachingoptimizer.jl:553
[5] _moi_get_result(::MathOptInterface.Utilities.CachingOptimizer{MathOptInterface.AbstractOptimizer,MathOptInterface.Utilities.UniversalFallback{MathOptInterface.Utilities.Model{Float64}}}, ::MathOptInterface.VariablePrimal, ::Vararg{Any,N} where N) at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\JuMP.jl:844
[6] get(::Model, ::MathOptInterface.VariablePrimal, ::VariableRef) at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\JuMP.jl:877
[7] value(::VariableRef; result::Int64) at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\variables.jl:767
[8] #103 at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\nlp.jl:1159 [inlined]
[9] value(::NonlinearExpression, ::JuMP.var"#103#104"{Int64}) at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\nlp.jl:1102
[10] #value#102 at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\nlp.jl:1159 [inlined]
[11] value(::NonlinearExpression) at C:\Users\admin\.julia\packages\JuMP\YXK4e\src\nlp.jl:1159
[12] top-level scope at none:1
would you please help me how the error is solved?
Thanks.
Please update to Juniper v0.8.0. I fixed this issue a few days ago.
p.s., in future, please consider posting on the JuMP community forum: https://discourse.julialang.org/c/domain/opt/13. There are more readers of JuMP-specific questions.

LinearAlgebra.SingularException(7) while executing radial basis interpolation using Surrogates in julia?

I am trying to execute radial basis interpolation in Julia using Surrogates package.
I have developed the following code:
using Surrogates
function t(train, test)
xy = zip(train[:,1],train[:,2]) |> collect
lb = vcat(minimum(train[:,1]), minimum(train([:,2]))
ub = vcat(maximum(train([:,1]),maximum(train[:,2]))
rbf = Surrogates.RadialBasis(xy, train[:,3], lb, ub)
testing = [rbf(i) for i in test]
return rbf, testing
end
train = [0.0 0.0 4;
0.0 0.0 2;
0.0 0.0 2;
0.0 0.0 2;
0.0 0.0 1;
0.0 0.0 3;
0.0 0.0 6;
0.0 0.0 7]
test = [(1.0, 1.0),
(1.0, 1.0),
(2.0, 2.0),
(2.0, 2.0),
(2.0, 2.0),
(2.0, 2.0),
(2.0, 2.0),
(2.0, 2.0)]
t(train, test)
However, upon executing this above code I am getting the following error:
LinearAlgebra.SingularException(7)
Stacktrace:
[1] checknonsingular at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\factorization.jl:19 [inlined]
[2] checknonsingular at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\factorization.jl:21 [inlined]
[3] bunchkaufman!(::LinearAlgebra.Symmetric{Float64,Array{Float64,2}}, ::Bool; check::Bool) at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\bunchkaufman.jl:99
[4] #bunchkaufman#142 at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\bunchkaufman.jl:186 [inlined]
[5] #_factorize#94 at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\symmetric.jl:638 [inlined]
[6] _factorize at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\symmetric.jl:636 [inlined]
[7] factorize at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\symmetric.jl:634 [inlined]
[8] \(::LinearAlgebra.Symmetric{Float64,Array{Float64,2}}, ::Array{Float64,1}) at C:\buildbot\worker\package_win64\build\usr\share\julia\stdlib\v1.5\LinearAlgebra\src\symmetric.jl:648
[9] _calc_coeffs(::Array{Tuple{Float64,Float64},1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Function, ::Int64, ::Float64, ::Bool) at C:\Users\user\.julia\packages\Surrogates\wJbFN\src\Radials.jl:61
[10] RadialBasis(::Array{Tuple{Float64,Float64},1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}; rad::Surrogates.RadialFunction{Int64,Surrogates.var"#1#2"}, scale_factor::Float64, sparse::Bool) at C:\Users\user\.julia\packages\Surrogates\wJbFN\src\Radials.jl:51
[11] RadialBasis(::Array{Tuple{Float64,Float64},1}, ::Array{Float64,1}, ::Array{Float64,1}, ::Array{Float64,1}) at C:\Users\user\.julia\packages\Surrogates\wJbFN\src\Radials.jl:49
Please suggest solution to resolve this issue!!
Thanks!
Your training data contains only a single sampling point with multiple values. Replace with more sound data and your code works:
julia> train = Float64[
1 2 3
4 5 6
7 8 9
]
3×3 Matrix{Float64}:
1.0 2.0 3.0
4.0 5.0 6.0
7.0 8.0 9.0
julia> t(train, test)
(RadialBasis{Surrogates.var"#1#2", Int64, Vector{Tuple{Float64, Float64}}, Vector{Float64}, Vector{Float64}, Vector{Float64}, Vector{Float64}, Float64, Bool}(Surrogates.var"#1#2"(), 0, [(1.0, 2.0), (4.0, 5.0), (7.0, 8.0)], [3.0, 6.0, 9.0], [1.0, 2.0], [7.0, 8.0], [0.3535533905932738, 0.0, -0.3535533905932738, 6.0], 1.0, false), [3.0939521879919494, 3.0939521879919494, 3.5922131362964587, 3.5922131362964587, 3.5922131362964587, 3.5922131362964587, 3.5922131362964587, 3.5922131362964587])

Reshaping a circular buffer to the thrid dimension

We can reshape a regular matrix to the third dimension using:
julia> data = rand(4,2)
4×2 Array{Float64,2}:
0.89585 0.328315
0.77878 0.619666
0.232389 0.132091
0.48543 0.829476
julia> reshape(data, 4, 1, 2)
4×1×2 Array{Float64,3}:
[:, :, 1] =
0.895850499952602
0.7787804133322247
0.23238945917674037
0.4854297310447009
[:, :, 2] =
0.3283154491436233
0.6196660556881552
0.13209084702809903
0.8294762758800456
But if using CircularBuffer we get an error:
using DataStructures
julia> data = CircularBuffer{Vector{Float64}}(4)
0-element CircularBuffer{Array{Float64,1}}
julia> push!(data, rand(2))
1-element CircularBuffer{Array{Float64,1}}:
[0.0271144, 0.131345]
julia> push!(data, rand(2))
2-element CircularBuffer{Array{Float64,1}}:
[0.0271144, 0.131345]
[0.0483998, 0.384114]
julia> push!(data, rand(2))
3-element CircularBuffer{Array{Float64,1}}:
[0.0271144, 0.131345]
[0.0483998, 0.384114]
[0.856657, 0.239313]
julia> push!(data, rand(2))
4-element CircularBuffer{Array{Float64,1}}:
[0.0271144, 0.131345]
[0.0483998, 0.384114]
[0.856657, 0.239313]
[0.573953, 0.0423042]
julia> reshape(data, 4, 1, 2)
ERROR: DimensionMismatch("parent has 4 elements, which is incompatible with size (4, 1, 2)")
Stacktrace:
[1] _throw_dmrs(::Int64, ::String, ::Tuple{Int64,Int64,Int64}) at ./reshapedarray.jl:180
[2] _reshape at ./reshapedarray.jl:175 [inlined]
[3] reshape(::CircularBuffer{Array{Float64,1}}, ::Tuple{Int64,Int64,Int64}) at ./reshapedarray.jl:112
[4] reshape(::CircularBuffer{Array{Float64,1}}, ::Int64, ::Int64, ::Vararg{Int64,N} where N) at ./reshapedarray.jl:115
[5] top-level scope at none:0
Your CirclularBuffer is a Vector of size 4x1 so it has 4 elements.
Hence you need a reshaped structure with that element count:
julia> CircularBuffer <: AbstractVector
true
julia> reshape(data,2,1,2)
2×1×2 reshape(::CircularBuffer{Array{Float64,1}}, 2, 1, 2) with eltype Array{Float64,1}:
[:, :, 1] =
[0.8553997132170639, 0.823601762586583]
[0.16126832224468735, 0.11158333622955818]
[:, :, 2] =
[0.3559049470946831, 0.8110608687196386]
[0.7729569142023647, 0.9370118594277839]
Another option is to have a CircularBuffer 8x1 and then reshape it - see the Julia session below:
julia> data2 = CircularBuffer{Float64}(8);
julia> push!.(Ref(data2), rand(8));
julia> reshape(data2, 4, 1, 2)
4×1×2 reshape(::CircularBuffer{Float64}, 4, 1, 2) with eltype Float64:
[:, :, 1] =
0.016467979685045098
0.14558440901833336
0.11701214175525476
0.7006868771470229
[:, :, 2] =
0.03545592972243128
0.19139585572379736
0.5295403356035531
0.5409464215739019
I guess what you want is a CircularArrayBuffer
julia> using ReinforcementLearningCore
julia> b = CircularArrayBuffer{Float64}(4, 2)
4×0 CircularArrayBuffer{Float64,2}
julia> push!(b, rand(4));
julia> push!(b, rand(4));
julia> reshape(b, 4, 1, 2)
4×1×2 reshape(::CircularArrayBuffer{Float64,2}, 4, 1, 2) with eltype Float64:
[:, :, 1] =
0.09621058339946265
0.19652636521722577
0.14816367263437913
0.5415815617368629
[:, :, 2] =
0.38976815167466494
0.9344752986999203
0.43187275186834295
0.7951761882018082

How to generate a list of certain subsets of x in r?

I want to generate a set in r which contains all of its subsets but not the set itself.
For example, say I have the set
{1, 2, 3}
I want to generate the following list in r
{{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}}
However, the commands I am trying in r (powerset and set_power) are giving
{{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}
I want to create a set which contains all of it's subsets.
Any help would be appreciated!
You can use the sets package.
First, create set A:
library(sets)
A <- set(1,2,3)
A
{1, 2, 3}
Find the power set:
PS <- 2^A
PS
{{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}}
And substract A from the power set:
B <- set_symdiff(PS,set(A))
B
{{}, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}}
I have managed to do it by
s <- c(1,2,3)
powerSet(s)[-length(powerSet(s))]
Thanks!

Resources