I am trying to estimate the following model
where I provide uniform priors and I code the likelihood . The latter comes from this paper and goes as follows:
In the theano/pymc3 implementation I am computing the first and second term on the rhs in first_term and second_term. Finally logp sum over the entire sample.
Theano, on his own is producing some output yet when I integrate it in a pymc3 model the following error appears:
TypeError: ('Bad input argument to theano function with name "<ipython-input-90-a5304bf41c50>:27" at index 0(0-based)', 'Expected an array-like object, but found a Variable: maybe you are trying to call a function on a (possibly shared) variable instead of a numeric array?')
I think the problem is how to supply pymc3 variables to theano.
from pymc3 import Model, Uniform, DensityDist
import theano.tensor as T
import theano
import numpy as np
p_test, theta_test = .1, .1
X = np.asarray([[1,2,3],[1,2,3]])
### theano test
theano.config.compute_test_value = 'off'
obss = T.matrix('obss')
p, theta = T.scalar('p'), T.scalar('theta')
def first_term(obs, p, theta):
x, tx, n = obs[0], obs[1], obs[2]
first_comp = p ** x * (1 - p) ** (n - x) * (1 - theta) ** n
return(first_comp)
def second_term(obs, p, theta):
x, tx, n = obs[0], obs[1], obs[2]
components, updates = theano.scan(
lambda t, p, theta, x, tx: p ** x * (1 - theta) ** (tx-x+t) * theta * (1 - theta) ** (tx + t),
sequences=theano.tensor.arange(n), non_sequences = [p, theta, x, tx]
)
return(components)
def logp(X, p_hat, theta_hat):
contributions, updates = theano.scan(lambda obs, p, theta: first_term(obs, p, theta) + T.sum( second_term(obs, p, theta) ) ,
sequences = obss, non_sequences = [p, theta]
)
ll = contributions.sum()
get_ll = theano.function(inputs = [obss, p, theta], outputs = ll)
return(get_ll(X, p_hat , theta_hat))
print( logp( X, p_test, theta_test ) ) # It works!
### pymc3 implementation
with Model() as bg_model:
p = Uniform('p', lower = 0, upper = 1)
theta = Uniform('theta', lower = 0, upper = .2)
def first_term(obs, p, theta):
x, tx, n = obs[0], obs[1], obs[2]
first_comp = p ** x * (1 - p) ** (n - x) * (1 - theta) ** n
return(first_comp)
def second_term(obs, p, theta):
x, tx, n = obs[0], obs[1], obs[2]
components, updates = theano.scan(
lambda t, p, theta, x, tx: p ** x * (1 - theta) ** (tx-x+t) * theta * (1 - theta) ** (tx + t),
sequences=theano.tensor.arange(n), non_sequences = [p, theta, x, tx]
)
return(components)
def logp(X):
contributions, updates = theano.scan(lambda obs, p, theta: first_term(obs, p, theta) + T.sum( second_term(obs, p, theta) ) ,
sequences = obss, non_sequences = [p, theta]
)
ll = contributions.sum()
get_ll = theano.function(inputs = [obss, p, theta], outputs = ll)
return(get_ll(X, p, theta))
y = pymc3.DensityDist('y', logp, observed = X) # Nx4 y = f(f,x,tx,n | p, theta)
My first guess was modifying logp with return(get_ll(X, p.eval(), theta.eval())) but then theano complains about some mysterious p_interval missing from the graph. Any clue?
I figured it out by: i) simplifying things ii) avoiding using theano operators when coding the likelihood and iii) using the build-in wrapper Deterministic for deterministic transformations of variables (my lifetime). To speed up computation I vectorised the likelihood by writing the second term on the rhs as solution of a geometric series. Here is the code in case someone want to test it on his own lifetime application.
from pymc3 import Model, Uniform, Deterministic
import pymc3
from scipy import optimize
import theano.tensor as T
X = array([[ 5, 64, 8, 13],[ 4, 71, 23, 41],[ 7, 70, 4, 19])
#f, n, x, tx where f stands for the frequency of the triple (n, x, tx)
class CustomDist(pymc3.distributions.Discrete):
def __init__(self, p, theta, *args, **kwargs):
super(CustomDist, self).__init__(*args, **kwargs)
self.p = p
self.theta = theta
def logp(self, X):
p = self.theta
theta = self.theta
f, n, x, tx = X[0],(X[1] + 1),X[2],(X[3] + 1) #time indexed at 0, x > n
ll = f * T.log( p ** x * (1 - p) ** (n - x) * (1 - theta) ** n +
[(1 - p) ** (tx - x) * (1 - theta) ** tx - (1 - p) ** (n - x) * (1 - theta) ** n] / (1 - (1 - p) * (1 - theta)) )
# eliminated -1 since would result in negatice ll
return(T.sum( ll ))
with Model() as bg_model:
p = Uniform('p', lower = 0, upper = 1)
theta = Uniform('theta', lower = 0, upper = 1)
like = CustomDist('like', p = p, theta = theta, observed=X.T) #likelihood
lt = pymc3.Deterministic('lt', p / theta)
# start = {'p':.5, 'theta':.1}
start = pymc3.find_MAP(fmin=optimize.fmin_powell)
step = pymc3.Slice([p, theta, lt])
trace = pymc3.sample(5000, start = start, step = [step])
pymc3.traceplot(trace[2000:])
Related
This is my Julia code to simulate data and sample from a Turing.jl model:
using LinearAlgebra, Distributions, StatsBase
using Turing, FillArrays, DynamicHMC, LabelledArrays
using NNlib, GLM
using CSV, DataFrames
function generate_hmnl_data(R::Int=100, S::Int=30, C::Int=3,
Theta::Array{Float64, 2}=ones(2, 4),
Sigma::Array{Float64, 2}=Matrix(Diagonal(fill(0.1, 4))))
K = size(Theta, 2)
G = size(Theta, 1)
Y = Array{Int64}(undef, R, S)
X = randn(R, S, C, K)
Z = Array{Float64}(undef, G, R)
Z[1, :] .= 1
if G > 1
Z[2:G, :] = randn(R * (G-1))
end
Beta = Array{Float64}(undef, K, R)
for r in 1:R
println(Z[:, r])
println(Theta)
Beta[:, r] = rand(MvNormal(Theta' * Z[:, r], Sigma))
for s in 1:S
Y[r, s] = sample(1:C, Weights(exp.(X[r, s, :, :] * Beta[:, r])))
end
end
return (R=R, S=S, C=C, K=K, G=G, Y=Y, X=X, Z=Z,
beta_true=Beta, Theta_true=Theta, Sigma_true=Sigma)
end
d1 = generate_hmnl_data()
#model function hmnl(G::Int, Y::Matrix{Int64}, X::Array{Float64}, Z::Matrix{Float64})
R, S, C, K = size(X)
Theta = zeros(K, G)
for k in 1:K
for g in 1:G
Theta[k, g] ~ Normal(0, 10)
end
end
Sigma ~ InverseWishart(K, diagm(ones(K)))
Beta = zeros(K, R)
println(eltype(Beta))
for r in 1:R
Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma)
println(typeof(Beta[:, r]))
for s in 1:S
beta_r = copy(Beta[:, r])
beta_r = convert(Vector{Float64}, beta_r)
ut_rs = X[r, s, :, :] * beta_r
v = softmax(ut_rs)
Y[r, s] ~ Categorical(v)
end
end
end
sampler = HMC(.05, 10)
test_mod = hmnl(d1.G, d1.Y, d1.X, d1.Z)
chains = sample(test_mod, sampler, 1_000)
I get this error when I try to sample from the model: MethodError: no method matching float(::Type{Any}). The sampling statement Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma) changes Beta[:, r] to type Vector{Any}.
I have tried
beta_r = copy(Beta[:, r])
beta_r = convert(Vector{Float64}, beta_r)
ut_rs = X[r, s, :, :] * beta_r
But then I get this error instead:
ERROR: TypeError: in typeassert, expected Float64, got a value of type ForwardDiff.Dual{Nothing, Float64, 12}
So it's messing with Turing AD somehow. I'm new to Turing and can't understand the right way to do this.
I'm reposting an answer from Tor Fjelde (https://github.com/torfjelde) which I received on Github. For Turing to work you need to ensure types in your model can be inferred. I wasn't doing that. https://turing.ml/v0.22/docs/using-turing/performancetips#ensure-that-types-in-your-model-can-be-inferred
This function worked:
#model function hmnl(G::Int, Y::Matrix{Int64}, X::Array{Float64}, Z::Matrix{Float64}, ::Type{T} = Float64) where {T}
R, S, C, K = size(X)
Theta = zeros(T, K, G)
for k in 1:K
for g in 1:G
Theta[k, g] ~ Normal(0, 10)
end
end
Sigma ~ InverseWishart(K, diagm(ones(K)))
Beta = zeros(T, K, R)
println(eltype(Beta))
for r in 1:R
Beta[:, r] ~ MvNormal(Theta * Z[:, r], Sigma)
println(typeof(Beta[:, r]))
for s in 1:S
ut_rs = X[r, s, :, :] * Beta[:, r]
v = softmax(ut_rs)
Y[r, s] ~ Categorical(v)
end
end
end
I am trying to minimize a nonlinear function with nonlinear inequality constraints with NLopt and JuMP.
In my test code below, I am minimizing a function with a known global minima.
Local optimizers such as LD_MMA fails to find this global minima, so I am trying to use global optimizers of NLopt that allow nonlinear inequality constraintes.
However, when I check my termination status, it says “termination_status(model) = MathOptInterface.OTHER_ERROR”. I am not sure which part of my code to check for this error.
What could be the cause?
I am using JuMP since in the future I plan to use other solvers such as KNITRO as well, but should I rather use the NLopt syntax?
Below is my code:
# THIS IS A CODE TO SOLVE FOR THE TOYMODEL
# THE EQUILIBRIUM IS CHARACTERIZED BY A NONLINEAR SYSTEM OF ODEs OF INCREASING FUCTIONS B(x) and S(y)
# THE GOAL IS TO APPROXIMATE B(x) and S(y) WITH POLYNOMIALS
# FIND THE POLYNOMIAL COEFFICIENTS THAT MINIMIZE THE LEAST SQUARES OF THE EQUILIBRIUM EQUATIONS
# load packages
using Roots, NLopt, JuMP
# model primitives and other parameters
k = .5 # equal split
d = 1 # degree of polynomial
nparam = 2*d+2 # number of parameters to estimate
m = 10 # number of grids
m -= 1
vGrid = range(0,1,m) # discretize values
c1 = 0 # lower bound for B'() and S'()
c2 = 2 # lower and upper bounds for offers
c3 = 1 # lower and upper bounds for the parameters to be estimated
# objective function to be minimized
function obj(α::T...) where {T<:Real}
# split parameters
αb = α[1:d+1] # coefficients for B(x)
αs = α[d+2:end] # coefficients for S(y)
# define B(x), B'(x), S(y), and S'(y)
B(v) = sum([αb[i] * v .^ (i-1) for i in 1:d+1])
B1(v) = sum([αb[i] * (i-1) * v ^ (i-2) for i in 2:d+1])
S(v) = sum([αs[i] * v .^ (i-1) for i in 1:d+1])
S1(v) = sum([αs[i] * (i-1) * v ^ (i-2) for i in 2:d+1])
# the equilibrium is characterized by the following first order conditions
#FOCb(y) = B(k * y * S1(y) + S(y)) - S(y)
#FOCs(x) = S(- (1-k) * (1-x) * B1(x) + B(x)) - B(x)
function FOCb(y)
sy = S(y)
binv = find_zero(q -> B(q) - sy, (-c2, c2))
return k * y * S1(y) + sy - binv
end
function FOCs(x)
bx = B(x)
sinv = find_zero(q -> S(q) - bx, (-c2, c2))
return (1-k) * (1-x) * B1(x) - B(x) + sinv
end
# evaluate the FOCs at each grid point and return the sum of squares
Eb = [FOCb(y) for y in vGrid]
Es = [FOCs(x) for x in vGrid]
E = [Eb; Es]
return E' * E
end
# this is the actual global minimum
αa = [1/12, 2/3, 1/4, 2/3]
obj(αa...)
# do optimization
model = Model(NLopt.Optimizer)
set_optimizer_attribute(model, "algorithm", :GN_ISRES)
#variable(model, -c3 <= α[1:nparam] <= c3)
#NLconstraint(model, [j = 1:m], sum(α[i] * (i-1) * vGrid[j] ^ (i-2) for i in 2:d+1) >= c1) # B should be increasing
#NLconstraint(model, [j = 1:m], sum(α[d+1+i] * (i-1) * vGrid[j] ^ (i-2) for i in 2:d+1) >= c1) # S should be increasing
register(model, :obj, nparam, obj, autodiff=true)
#NLobjective(model, Min, obj(α...))
println("")
println("Initial values:")
for i in 1:nparam
set_start_value(α[i], αa[i]+rand()*.1)
println(start_value(α[i]))
end
JuMP.optimize!(model)
println("")
#show termination_status(model)
#show objective_value(model)
println("")
println("Solution:")
sol = [value(α[i]) for i in 1:nparam]
My output:
Initial values:
0.11233072522513032
0.7631843020124309
0.3331559403539963
0.7161240026812674
termination_status(model) = MathOptInterface.OTHER_ERROR
objective_value(model) = 0.19116585196576466
Solution:
4-element Vector{Float64}:
0.11233072522513032
0.7631843020124309
0.3331559403539963
0.7161240026812674
I answered on the Julia forum: https://discourse.julialang.org/t/mathoptinterface-other-error-when-trying-to-use-isres-of-nlopt-through-jump/87420/2.
Posting my answer for posterity:
You have multiple issues:
range(0,1,m) should be range(0,1; length = m) (how did this work otherwise?) This is true for Julia 1.6. The range(start, stop, length) method was added for Julia v1.8
Sometimes your objective function errors because the root doesn't exist. If I run with Ipopt, I get
ERROR: ArgumentError: The interval [a,b] is not a bracketing interval.
You need f(a) and f(b) to have different signs (f(a) * f(b) < 0).
Consider a different bracket or try fzero(f, c) with an initial guess c.
Here's what I would do:
using JuMP
import Ipopt
import Roots
function main()
k, d, c1, c2, c3, m = 0.5, 1, 0, 2, 1, 10
nparam = 2 * d + 2
m -= 1
vGrid = range(0, 1; length = m)
function obj(α::T...) where {T<:Real}
αb, αs = α[1:d+1], α[d+2:end]
B(v) = sum(αb[i] * v^(i-1) for i in 1:d+1)
B1(v) = sum(αb[i] * (i-1) * v^(i-2) for i in 2:d+1)
S(v) = sum(αs[i] * v^(i-1) for i in 1:d+1)
S1(v) = sum(αs[i] * (i-1) * v^(i-2) for i in 2:d+1)
function FOCb(y)
sy = S(y)
binv = Roots.fzero(q -> B(q) - sy, zero(T))
return k * y * S1(y) + sy - binv
end
function FOCs(x)
bx = B(x)
sinv = Roots.fzero(q -> S(q) - bx, zero(T))
return (1-k) * (1-x) * B1(x) - B(x) + sinv
end
return sum(FOCb(x)^2 + FOCs(x)^2 for x in vGrid)
end
αa = [1/12, 2/3, 1/4, 2/3]
model = Model(Ipopt.Optimizer)
#variable(model, -c3 <= α[i=1:nparam] <= c3, start = αa[i]+ 0.1 * rand())
#constraints(model, begin
[j = 1:m], sum(α[i] * (i-1) * vGrid[j]^(i-2) for i in 2:d+1) >= c1
[j = 1:m], sum(α[d+1+i] * (i-1) * vGrid[j]^(i-2) for i in 2:d+1) >= c1
end)
register(model, :obj, nparam, obj; autodiff = true)
#NLobjective(model, Min, obj(α...))
optimize!(model)
print(solution_summary(model))
return value.(α)
end
main()
I have written a surrogate function called GEKPLS and I am trying to make the code work with an optimizer to find the optimal theta parameter values.
As a first step, I'm trying to make it work with Zygote and have the following code:
function min_rlfv(theta)
g = GEKPLS(X, y, grads, n_comp, delta_x, xlimits, extra_points, theta)
return -g.reduced_likelihood_function_value
end
Zygote.gradient(min_rlfv, [0.01, 0.1])
Running the above code results in the following error message:
ERROR: Need an adjoint for constructor LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}}. Gradient is of type LinearAlgebra.Transpose{Float64, Matrix{Float64}}
The stacktrace leads to the following function. The highlighted line in my code editor is Q, G = qr(Ft) from the following code block:
function _reduced_likelihood_function(theta, kernel_type, d, nt, ij, y_norma, noise = 0.0)
reduced_likelihood_function_value = -Inf
nugget = 1000000.0 * eps() #a jitter for numerical stability;
if kernel_type == "squar_exp"
r = squar_exp(theta, d)
end
R = (I + zeros(nt, nt)) .* (1.0 + nugget + noise)
for k in 1:size(ij)[1]
R[ij[k, 1], ij[k, 2]] = r[k]
R[ij[k, 2], ij[k, 1]] = r[k]
end
C = cholesky(R).L
F = ones(nt, 1)
Ft = C \ F
Q, G = qr(Ft)
Q = Array(Q)
Yt = C \ y_norma
beta = G \ [(transpose(Q) ⋅ Yt)]
rho = Yt .- (Ft .* beta)
gamma = transpose(C) \ rho
sigma2 = sum((rho) .^ 2, dims = 1) / nt
detR = prod(diag(C) .^ (2.0 / nt))
reduced_likelihood_function_value = -nt * log10(sum(sigma2)) - nt * log10(detR)
return beta, gamma, reduced_likelihood_function_value
end
Any pointers on how this can be fixed?
https://en.wikipedia.org/wiki/Superellipse
I have read the SO questions on how to point-pick from a circle and an ellipse.
How would one uniformly select random points from the interior of a super-ellipse?
More generally, how would one uniformly select random points from the interior of the curve described by an arbitrary super-formula?
https://en.wikipedia.org/wiki/Superformula
The discarding method is not considered a solution, as it is mathematically unenlightening.
In order to sample the superellipse, let's assume without loss of generality that a = b = 1. The general case can be then obtained by rescaling the corresponding axis.
The points in the first quadrant (positive x-coordinate and positive y-coordinate) can be then parametrized as:
x = r * ( cos(t) )^(2/n)
y = r * ( sin(t) )^(2/n)
with 0 <= r <= 1 and 0 <= t <= pi/2:
Now, we need to sample in r, t so that the sampling transformed into x, y is uniform. To this end, let's calculate the Jacobian of this transform:
dx*dy = (2/n) * r * (sin(2*t)/2)^(2/n - 1) dr*dt
= (1/n) * d(r^2) * d(f(t))
Here, we see that as for the variable r, it is sufficient to sample uniformly the value of r^2 and then transform back with a square root. The dependency on t is a bit more complicated. However, with some effort, one gets
f(t) = -(n/2) * 2F1(1/n, (n-1)/n, 1 + 1/n, cos(t)^2) * cos(t)^(2/n)
where 2F1 is the hypergeometric function.
In order to obtain uniform sampling in x,y, we need now to sample uniformly the range of f(t) for t in [0, pi/2] and then find the t which corresponds to this sampled value, i.e., to solve for t the equation u = f(t) where u is a uniform random variable sampled from [f(0), f(pi/2)]. This is essentially the same method as for r, nevertheless in that case one can calculate the inverse directly.
One small issue with this approach is that the function f is not that well-behaved near zero - the infinite slope makes it quite challenging to find a root of u = f(t). To circumvent this, we can sample only the "upper part" of the first quadrant (i.e., area between lines x=y and x=0) and then obtain all the other points by symmetry (not only in the first quadrant but also for all the other ones).
An implementation of this method in Python could look like:
import numpy as np
from numpy.random import uniform, randint, seed
from scipy.optimize import brenth, ridder, bisect, newton
from scipy.special import gamma, hyp2f1
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
seed(100)
def superellipse_area(n):
#https://en.wikipedia.org/wiki/Superellipse#Mathematical_properties
inv_n = 1. / n
return 4 * ( gamma(1 + inv_n)**2 ) / gamma(1 + 2*inv_n)
def sample_superellipse(n, num_of_points = 2000):
def f(n, x):
inv_n = 1. / n
return -(n/2)*hyp2f1(inv_n, 1 - inv_n, 1 + inv_n, x)*(x**inv_n)
lb = f(n, 0.5)
ub = f(n, 0.0)
points = [None for idx in range(num_of_points)]
for idx in range(num_of_points):
r = np.sqrt(uniform())
v = uniform(lb, ub)
w = bisect(lambda w: f(n, w**n) - v, 0.0, 0.5**(1/n))
z = w**n
x = r * z**(1/n)
y = r * (1 - z)**(1/n)
if uniform(-1, 1) < 0:
y, x = x, y
x = (2*randint(0, 2) - 1)*x
y = (2*randint(0, 2) - 1)*y
points[idx] = [x, y]
return points
def plot_superellipse(ax, n, points):
coords_x = [p[0] for p in points]
coords_y = [p[1] for p in points]
ax.set_xlim(-1.25, 1.25)
ax.set_ylim(-1.25, 1.25)
ax.text(-1.1, 1, '{n:.1f}'.format(n = n), fontsize = 12)
ax.scatter(coords_x, coords_y, s = 0.6)
params = np.array([[0.5, 1], [2, 4]])
fig = plt.figure(figsize = (6, 6))
gs = gridspec.GridSpec(*params.shape, wspace = 1/32., hspace = 1/32.)
n_rows, n_cols = params.shape
for i in range(n_rows):
for j in range(n_cols):
n = params[i, j]
ax = plt.subplot(gs[i, j])
if i == n_rows-1:
ax.set_xticks([-1, 0, 1])
else:
ax.set_xticks([])
if j == 0:
ax.set_yticks([-1, 0, 1])
else:
ax.set_yticks([])
#ensure that the ellipses have similar point density
num_of_points = int(superellipse_area(n) / superellipse_area(2) * 4000)
points = sample_superellipse(n, num_of_points)
plot_superellipse(ax, n, points)
fig.savefig('fig.png')
This produces:
I’m trying to optimize a function using one of the algorithms that require a gradient. Basically I’m trying to learn how to optimize a function using a gradient in Julia. I’m fairly confident that my gradient is specified correctly. I know this because the similarly defined Matlab function for the gradient gives me the same values as in Julia for some test values of the arguments. Also, the Matlab version using fminunc with the gradient seems to optimize the function fine.
However when I run the Julia script, I seem to get the following error:
julia> include("ex2b.jl")
ERROR: `g!` has no method matching g!(::Array{Float64,1}, ::Array{Float64,1})
while loading ...\ex2b.jl, in ex
pression starting on line 64
I'm running Julia 0.3.2 on a windows 7 32bit machine. Here is the code (basically a translation of some Matlab to Julia):
using Optim
function mapFeature(X1, X2)
degrees = 5
out = ones(size(X1)[1])
for i in range(1, degrees+1)
for j in range(0, i+1)
term = reshape( (X1.^(i-j) .* X2.^(j)), size(X1.^(i-j))[1], 1)
out = hcat(out, term)
end
end
return out
end
function sigmoid(z)
return 1 ./ (1 + exp(-z))
end
function costFunc_logistic(theta, X, y, lam)
m = length(y)
regularization = sum(theta[2:end].^2) * lam / (2 * m)
return sum( (-y .* log(sigmoid(X * theta)) - (1 - y) .* log(1 - sigmoid(X * theta))) ) ./ m + regularization
end
function costFunc_logistic_gradient!(theta, X, y, lam, m)
grad= X' * ( sigmoid(X * theta) .- y ) ./ m
grad[2:end] = grad[2:end] + theta[2:end] .* lam / m
return grad
end
data = readcsv("ex2data2.txt")
X = mapFeature(data[:,1], data[:,2])
m, n = size(data)
y = data[:, end]
theta = zeros(size(X)[2])
lam = 1.0
f(theta::Array) = costFunc_logistic(theta, X, y, lam)
g!(theta::Array) = costFunc_logistic_gradient!(theta, X, y, lam, m)
optimize(f, g!, theta, method = :l_bfgs)
And here is some of the data:
0.051267,0.69956,1
-0.092742,0.68494,1
-0.21371,0.69225,1
-0.375,0.50219,1
-0.51325,0.46564,1
-0.52477,0.2098,1
-0.39804,0.034357,1
-0.30588,-0.19225,1
0.016705,-0.40424,1
0.13191,-0.51389,1
0.38537,-0.56506,1
0.52938,-0.5212,1
0.63882,-0.24342,1
0.73675,-0.18494,1
0.54666,0.48757,1
0.322,0.5826,1
0.16647,0.53874,1
-0.046659,0.81652,1
-0.17339,0.69956,1
-0.47869,0.63377,1
-0.60541,0.59722,1
-0.62846,0.33406,1
-0.59389,0.005117,1
-0.42108,-0.27266,1
-0.11578,-0.39693,1
0.20104,-0.60161,1
0.46601,-0.53582,1
0.67339,-0.53582,1
-0.13882,0.54605,1
-0.29435,0.77997,1
-0.26555,0.96272,1
-0.16187,0.8019,1
-0.17339,0.64839,1
-0.28283,0.47295,1
-0.36348,0.31213,1
-0.30012,0.027047,1
-0.23675,-0.21418,1
-0.06394,-0.18494,1
0.062788,-0.16301,1
0.22984,-0.41155,1
0.2932,-0.2288,1
0.48329,-0.18494,1
0.64459,-0.14108,1
0.46025,0.012427,1
0.6273,0.15863,1
0.57546,0.26827,1
0.72523,0.44371,1
0.22408,0.52412,1
0.44297,0.67032,1
0.322,0.69225,1
0.13767,0.57529,1
-0.0063364,0.39985,1
-0.092742,0.55336,1
-0.20795,0.35599,1
-0.20795,0.17325,1
-0.43836,0.21711,1
-0.21947,-0.016813,1
-0.13882,-0.27266,1
0.18376,0.93348,0
0.22408,0.77997,0
Let me know if you guys need additional details. Btw, this relates to a coursera machine learning course if curious.
The gradient should not be a function to compute the gradient,
but a function to store it
(hence the exclamation mark in the function name, and the second argument in the error message).
The following seems to work.
function g!(theta::Array, storage::Array)
storage[:] = costFunc_logistic_gradient!(theta, X, y, lam, m)
end
optimize(f, g!, theta, method = :l_bfgs)
The same using closures and currying (version for those who got used to a function that returns the cost and gradient):
function cost_gradient(θ, X, y, λ)
m = length(y);
return (θ::Array) -> begin
h = sigmoid(X * θ); #(m,n+1)*(n+1,1) -> (m,1)
J = (1 / m) * sum(-y .* log(h) .- (1 - y) .* log(1 - h)) + λ / (2 * m) * sum(θ[2:end] .^ 2);
end, (θ::Array, storage::Array) -> begin
h = sigmoid(X * θ); #(m,n+1)*(n+1,1) -> (m,1)
storage[:] = (1 / m) * (X' * (h .- y)) + (λ / m) * [0; θ[2:end]];
end
end
Then, somewhere in the code:
initialθ = zeros(n,1);
f, g! = cost_gradient(initialθ, X, y, λ);
res = optimize(f, g!, initialθ, method = :cg, iterations = your_iterations);
θ = res.minimum;