JuMP: How to get multiple solutions from getvalue(x) - julia

I'm solving this Multi-Objective problem
f1(x,y) = x
f2(x,y) = (2.0-exp(-((y-0.2)/0.004)^2)-0.8*exp(-((y-0.6)/0.4)^2) )/x
isdefined(:f1) || JuMP.register(:f1, 2, f1, autodiff=true)
isdefined(:f2) || JuMP.register(:f2, 2, f2, autodiff=true)
m = Model(solver=IpoptSolver(print_level=0))
#variable(m, 0.1 <= x <= 1.0)
#variable(m, 0.0 <= y <= 1.0)
#variable(m, alpha1)
#variable(m, alpha2)
#NLobjective(m, Min, alpha1 + alpha2)
#constraint(m, f1(x,y) - z1_id >= -alpha1)
#constraint(m, f1(x,y) - z1_id <= alpha1)
#NLconstraint(m, f2(x,y) - z2_id >= -alpha2)
#NLconstraint(m, f2(x,y) - z2_id <= alpha2)
solve(m)
x_opt = getvalue(x)
y_opt = getvalue(y)
println("GOAL Programming (p=1): F1 = $(f1(x_opt, y_opt)), F2 = $(f2(x_opt, y_opt))")
It should have two solutions. I get only the first one with getvalue(x), how can I get all the others?

I ran into something similar, but in integer programming. Nevertheless, as my search for an answer to took me here, this seems like a good place to share my ideas.
To me it seems the way to go about this is to first solve the system and then add a constraint that the previously found solution is now unfeasible. My idea would be to append as per the following:
solve(m)
x_opt = getvalue(x)
y_opt = getvalue(y)
println("GOAL Programming (p=1): F1 = $(f1(x_opt, y_opt)), F2 = $(f2(x_opt, y_opt))")
eps = 1.e-15
#constraint(m, x - x_opt <= eps)
#constraint(m, x - x_opt >= eps)
#constraint(m, y - y_opt <= eps)
#constraint(m, y - y_opt >= eps)
solve(m)
println("GOAL Programming (p=1): F1 = $(f1(x_opt, y_opt)), F2 = $(f2(x_opt, y_opt))")
I've done the same within a loop for my integer programming, i.e. wrapping the last lines in while solve(m) == :Optimal, for finding more options.

Related

Julia JuMP feasibility slack of constraints

In Julia, by using JuMP am setting up a simple optimization problem (MWE, the real problem is much bigger).
model = Model()
set_optimizer(model, MosekTools.Optimizer)
#variable(model, 0 <= x[1:2])
#constraint(model, sum(x) <= 2)
#constraint(model, 1 <= sum(x))
#objective(model, Min, sum(x))
print(model)
Which gives this model:
Min x[1] + x[2]
Subject to
x[1] + x[2] ≤ 2.0
-x[1] - x[2] ≤ -1.0
x[1] ≥ 0.0
x[2] ≥ 0.0
I optimize this model via optimize!(model).
Now, obviously, the constraint x[1] + x[2] <= 2 is redundant and it has a feasibility slack of "3". My goal is to determine all the constraints that have slacks larger than 0 and display the slacks. Then I will delete those from the model.
To this end, I iterate over the constraints which are not variable bounds and print their values.
for (F, S) in list_of_constraint_types(model)
# Iterate over constraint types
if F!= JuMP.VariableRef #for constraints that
for ci in all_constraints(model, F, S)
println(value(ci))
end
end
end
However, because I print the value of the constraints, I get the left-hand sides:
1.0
-1.0
I want to instead see the slacks as
0
3
How may I do this? Note that I am not necessarily interested in linear programs, so things like shadow_value is not useful for me.
Based on the accepted answer, I am adding a MWE that solves this problem.
model = Model()
set_optimizer(model, MosekTools.Optimizer)
#variable(model, 0 <= x[1:2])
#constraint(model, sum(x) <= 2)
#constraint(model, 1 <= sum(x))
#constraint(model, 0.9 <= sum(x))
#objective(model, Min, sum(x))
print(model)
optimize!(model)
constraints_to_delete = vec([])
for (F, S) in list_of_constraint_types(model)
if F!= JuMP.VariableRef
for ci in all_constraints(model, F, S)
slack = normalized_rhs(ci) - value(ci)
if slack > 10^-5
push!(constraints_to_delete, ci)
println(slack)
#delete(model, ci)
end
end
end
end
for c in constraints_to_delete
delete(model, c)
end
print(model)
Read this (hot off the press) tutorial: https://jump.dev/JuMP.jl/dev/tutorials/linear/lp_sensitivity/.
Although focused on LPs, it shows how to compute slacks etc using normalized_rhs(ci) - value(ci).

What do multiple objective functions mean in Julia jump?

I have multiple objective functions for the same model in Julia JuMP created using an #optimize in a for loop. What does it mean to have multiple objective functions in Julia? What objective is minimized, or is it that all the objectives are minimized jointly? How are the objectives minimized jointly?
using JuMP
using MosekTools
K = 3
N = 2
penalties = [1.0, 3.9, 8.7]
function fac1(r::Number, i::Number, l::Number)
fac1 = 1.0
for m in 0:r-1
fac1 *= (i-m)*(l-m)
end
return fac1
end
function fac2(r::Number, i::Number, l::Number, tau::Float64)
return tau ^ (i + l - 2r + 1)/(i + l - 2r + 1)
end
function Q_r(i::Number, l::Number, r::Number, tau::Float64)
if i >= r && l >= r
return 2 * fac1(r, i, l) * fac2(r, i, l, tau)
else
return 0.0
end
end
function Q(i::Number, l::Number, tau::Number)
elem = 0
for r in 0:N
elem += penalties[r + 1] * Q_r(i, l, r, tau)
end
return elem
end
# discrete segment starting times
mat = Array{Float64, 3}(undef, K, N+1, N+1)
function Q_mat()
for k in 0:K-1
for i in 1:N+1
for j in 1:N+1
mat[k+1, i, j] = Q(i, j, convert(Float64, k))
end
end
return mat
end
end
function A_tau(r::Number, n::Number, tau::Float64)
fac = 1
for m in 1:r
fac *= (n - (m - 1))
end
if n >= r
return fac * tau ^ (n - r)
else
return 0.0
end
end
function A_tau_mat(tau::Float64)
mat = Array{Float64, 2}(undef, N+1, N+1)
for i in 1:N+1
for j in 1:N+1
mat[i, j] = A_tau(i, j, tau)
end
end
return mat
end
function A_0(r::Number, n::Number)
if r == n
fac = 1
for m in 1:r
fac *= r - (m - 1)
end
return fac
else
return 0.0
end
end
m = Model(optimizer_with_attributes(Mosek.Optimizer, "QUIET" => false, "INTPNT_CO_TOL_DFEAS" => 1e-7))
#variable(m, A[i=1:K+1,j=1:K,k=1:N+1,l=1:N+1])
#variable(m, p[i=1:K+1,j=1:N+1])
# constraint difference might be a small fractional difference.
# assuming that time difference is 1 second starting from 0.
for i in 1:K
#constraint(m, -A_tau_mat(convert(Float64, i-1)) * p[i] .+ A_tau_mat(convert(Float64, i-1)) * p[i+1] .== [0.0, 0.0, 0.0])
end
for i in 1:K+1
#constraint(m, A_tau_mat(convert(Float64, i-1)) * p[i] .== [1.0 12.0 13.0])
end
#constraint(m, A_tau_mat(convert(Float64, K+1)) * p[K+1] .== [0.0 0.0 0.0])
for i in 1:K+1
#objective(m, Min, p[i]' * Q_mat()[i] * p[i])
end
optimize!(m)
println("p value is ", value.(p))
println(A_tau_mat(0.0), A_tau_mat(1.0), A_tau_mat(2.0))
With the standard JuMP you can have only one goal function at a time. Running another #objective macro just overwrites the previous goal function.
Consider the following code:
julia> m = Model(GLPK.Optimizer);
julia> #variable(m,x >= 0)
x
julia> #objective(m, Max, 2x)
2 x
julia> #objective(m, Min, 2x)
2 x
julia> println(m)
Min 2 x
Subject to
x >= 0.0
It can be obviously seen that there is only one goal function left.
However, indeed there is an area in optimization called multi-criteria optimization. The goal here is to find a Pareto-barrier.
There is a Julia package for handling MC and it is named MultiJuMP. Here is a sample code:
using MultiJuMP, JuMP
using Clp
const mmodel = multi_model(Clp.Optimizer, linear = true)
const y = #variable(mmodel, 0 <= y <= 10.0)
const z = #variable(mmodel, 0 <= z <= 10.0)
#constraint(mmodel, y + z <= 15.0)
const exp_obj1 = #expression(mmodel, -y +0.05 * z)
const exp_obj2 = #expression(mmodel, 0.05 * y - z)
const obj1 = SingleObjective(exp_obj1)
const obj2 = SingleObjective(exp_obj2)
const multim = get_multidata(mmodel)
multim.objectives = [obj1, obj2]
optimize!(mmodel, method = WeightedSum())
This library also supports plotting of the Pareto frontier.
The disadvantage is that as of today it does not seem to be actively maintained (however it works with the current Julia and JuMP versions).

Element-wise multiplication in JuMP environment

I'm trying to implement the following constraint in a JuMP environment:
#constraint(m, ((c*x) + (p*o)) + (r.*z) - d .== g')
Unfortunately, I get the following error ERROR: MethodError: no method matching append
But trying the element-wise multiplication alone does not return any error and implements it correctly into the model.
Here you have the minimal example I'm working with.
m = Model(solver = GLPKSolverLP());
np = 3; #number of products
c = [3 7 5;
6 5 7;
3 6 5;
-28 -40 -32];
g = [200 200 200 -1500];
n = length(g);
o = [1 1 1]';
#variable(m, x[1:np] >= 0);
#variable(m, d[1:n] >= 0);
#variable(m, z[1:n] >= 0);
#variable(m, r[1:n] >= 0);
#variable(m, p[1:n,1:np] >= 0);
#objective(m, Min, sum(d));
#constraint(m, ((c*x) + (p*o)) + (r.*z) - d .== g')
It seems that there is a problem when you add quadratic term to linear term and quadratic term is on right hand side of the addition inside #constraint macro.
There are two solutions:
A. write the quadratic term as first like this:
#constraint(m, (r.*z) + ((c*x) + (p*o)) - d .== g')
B. define LHS of the equation outside (and now the order of terms does not matter)
constr = ((c*x) + (p*o)) + (r.*z) - d
#constraint(m, constr .== g')
As a side note: your problem is quadratic so GLPKSolverLP will not solve it as it does not allow such constraints.

Minimize the maximum variable

I have a Mixed Integer Programming problem. The objective function is a minimization of the maximum variable value in the a vector. The variable is has an upper bound of 5. The problem is like this:
m = Model(solver = GLPKSolverMIP())
#objective(m, Min, max(x[i] for i=1:12))
#variable(m, 0 <= x[i] <= 5, Int)
#constraint(m, sum(x[i] for i=1:12) == 12)
status = solve(m)
The max variable is not part of the julia JuMP syntax. So I modified the problem to
t=1
while t<=5 && (status == :NotSolved || status == :Infeasible)
m = Model(solver = GLPKSolverMIP())
i = 1:12
#objective(m, Min, max(x[i] for i=1:12))
#variable(m, 0 <= x[i] <= t, Int)
#constraint(m, sum(x[i] for i=1:12) == 12)
status = solve(m)
t += 1
end
This solution does the job by solving the problem iterative for starting with a upper bound for the variable at 1 and then increase by one until the solutoin is feasible. Is this really the best way to do this?
The question wants to minimize a maximum, this maximum can be held in an auxiliary variable and then we will minimize it. To do so, add constraints to force the new variable to actually be an upper bound on x. In code it is:
using GLPKMathProgInterface
using JuMP
m = Model(solver = GLPKSolverMIP())
#variable(m, 0 <= x[i=1:3] <= 5, Int) # define variables
#variable(m, 0 <= t <= 12) # define auxiliary variable
#constraint(m, t .>= x) # constrain t to be the max
#constraint(m, sum(x[i] for i=1:3) == 12) # the meat of the constraints
#objective(m, Min, t) # we wish to minimize the max
status = solve(m)
Now we can inspect the solution:
julia> getValue(t)
4.0
julia> getValue(x)
3-element Array{Float64,1}:
4.0
4.0
4.0
The actual problem the poster wanted to solve is probably more complex that this, but it can be solved by a variation on this framework.

Julia LoadError: MethodError: no method matching addconstraint(::Int64, ::Jump.GenericRangeConstarint{JuMP.GenericAffExpr{Float64,JuMP.Variable}})

I am trying to run the following file. i tried uploading piece by piece. That is why some of the components are commented out. No matter which component I have activated I receive this error for the constraints part.
using JuMP
using Gurobi
pset = ["packaging1", "packaging2"]
size_pset = 2
fset = ["filling1", "filling2"]
size_fset = 2
mset = ["manufacturing1","manufacturing2"]
size_mset = 2
timeset = [1,2,3,4,5,6,7,8,9,10]
size_timeset = 10
fgset = ["product1","product2"]
size_fgset = 2
bulkset = ["bulk1","bulk2"]
size_bulkset = size_fgset
apiset = ["api1","api2"]
size_apiset = 2
scenarioset = ["s1","s2","s3","s4","s5","s6","s7","s8","s9","s10","s11","s12","s13","s14","s15","s16"]
size_scenarioset = 16
CFF = 0.7
CFP = 0.5
SR = 2
RW = 2
KGS = 2
ESTputP= 100
ESTputF= 100
ESTputM= 100
m = Model(solver=GurobiSolver())
#variable(m, ThputM[1:size_mset, 1:size_timeset, 1:size_apiset, 1:size_scenarioset] >= 0) 
#variable(m, InThputM[1:size_fset, 1:size_timeset, 1:size_apiset, 1:size_bulkset, 1:size_scenarioset] >= 0)
#variable(m, XP_p[1:size_pset, 1:size_timeset, 1:size_fgset] >= 0)
#variable(m, XP_n[1:size_pset, 1:size_timeset, 1:size_fgset] >= 0)
#variable(m, XF_p[1:size_fset, 1:size_timeset, 1:size_bulkset] >= 0)
#variable(m, XF_n[1:size_fset, 1:size_timeset, 1:size_bulkset] >= 0)
#variable(m, XM_p[1:size_mset, 1:size_timeset, 1:size_apiset] >= 0)
#variable(m, XM_n[1:size_mset, 1:size_timeset, 1:size_apiset] >= 0)
#objective
#objective(m, Min, sum(XM_p[m,t,a]+XM_n[m,t,a] for m=1:size_mset for t=1:size_timeset for a=1:size_apiset)+
sum(XF_p[f,t,b]+XF_n[f,t,b] for f=1:size_fset for t=1:size_timeset for b=1:size_bulkset)+
sum(XP_p[p,t,f]+XP_n[p,t,f] for p=1:size_pset for t=1:size_timeset for f=1:size_fgset))
# Constraints:
##constraint(m, thput_relateM[m=1:size_mset, t=1:size_timeset, a=1:size_apiset, s=1:size_scenarioset],ThputM[m,t,a,s]==SR*RW*KGS)
#constraint(m, thput_relate_M_Bulk_API[m=1:size_mset, t=1:size_timeset, a=1:size_apiset, s=1:size_scenarioset],
sum(InThputM[m,t,a,b,s] for b=1:size_bulkset)==ThputM[m,t,a,s])
writeLP(m, 'Sifomodel.lp'; genericnames=false)
status = solve(m)
println("Solve status: ", status)
println("Objective value: ", getobjectivevalue(m))
What can be the reson for particularly "constraint upload"?
If you use m as an index it will overwrite the model object m. Note the error message says that you're trying to call addconstraint with (integer,constraint) arguments instead of (model,constraint).
It seems like JuMP wants all constants on the right hand side while variables are collected on the other side. When the constraints are re-arranged in this manner, it accepts the model.

Resources