Asymmetric Subspace Optimization (ASO) - openmdao

I'm trying to implement the ASO for the Sellar problem but the "subproblem" is not working as I expected. I have no idea how to solve this problem.
Although the Sellar problem is not a good example to understand the ASO architecture (both disciplines have pretty much the same computational cost), it's a simple formulation and I'm using it as a benchmark before trying to use in the problem I'm studying.
My code is( OpenMDAO 1.7):
class low_order(Group):
def __init__(self):
super(low_order, self).__init__()
self.add('plx', IndepVarComp('x', 1.0), promotes=['x'])
self.add('plz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.add('d1', SellarDis1(), promotes=['x', 'z', 'y1', 'y2'])
#self.add('d2', SellarDis2(), promotes=['z', 'y1', 'y2'])
self.add('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]) ),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
class SellarDerivatives(Group):
def __init__(self):
super(SellarDerivatives, self).__init__()
self.add('ppx', IndepVarComp('x', 1.0), promotes=['x'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.add('d2', SellarDis2(), promotes=['z', 'y1', 'y2'])
self.add('obj_cmp1', ExecComp('obj1 = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]) ),
promotes=['obj1', 'x', 'z', 'y1', 'y2'])
#self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
if __name__ == '__main__':
from openmdao.api import Problem, ScipyOptimizer, SqliteRecorder
sub = Problem()
sub.root = low_order()
sub.driver =ScipyOptimizer()# pyOptSparseDriver()
sub.driver.options['optimizer'] = 'SLSQP'
sub.driver.options['disp'] = False
sub.driver.add_desvar('x', lower=0.0, upper=10.0)
sub.driver.add_objective('obj')
sub.driver.add_constraint('con1', upper=0.0)
#sub.driver.add_constraint('con2', upper=0.0)
top = Problem()
top.root = SellarDerivatives()
top.driver = ScipyOptimizer()#pyOptSparseDriver()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.root.add('subprob1', SubProblem(sub, params=['z'],unknowns=['y1','x']))
top.root.connect('z','subprob1.z')
top.driver.add_objective('obj1')
#top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup()
top.run()
When I look the iterations, I see that only the Z1 and Z2 are varying as I expected, and I have no idea about the X.
The lines below are : X | Z1 | Z2
(8.881784197001252e-16, 5.000916977558285, 1.000912901563544)
(1.0000000008881784e-06, 5.000916977558285, 1.000912901563544)
(-9.999999991118215e-07, 5.000916977558285, 1.000912901563544)
(8.881784197001252e-16, 5.000912478379066, 1.0009120015635442)
(8.881784197001252e-16, 5.000912478379066, 1.0009120015635442)
(1.0000000008881784e-06, 5.000912478379066, 1.0009120015635442)
(-9.999999991118215e-07, 5.000912478379066, 1.0009120015635442)
And the final answer is ( Z1 , Z2 , X ):
Minimum found at (5.000912, 1.000912, 1.000000)
Coupling vars: 0.000000, 6.001824
('Minimum objective: ', 2.0033861370124493)
Ps. As the Sellar problem is not my main objective, I'm using FD across the whole model, just to make it easier for me.
Thanks!

I got this to work with a few changes (it didn't work at all with the code you sent, but we may have tightened some error checking between 1.7 and 1.7.3.)
You need to pass y2 into the subproblem to complete the cycle:
top.root.add('subprob1', SubProblem(sub, params=['z', 'y2'], unknowns=['y1', 'x']), promotes=['*'])
And you need to remove the extra IndepVarComp for variable 'x' in the top "SellarDerivatives" group because you are letting the subproblem be the ultimate source.
#self.add('ppx', IndepVarComp('x', 1.0), promotes=['x'])
With those changes and using pyoptsparse, I get for the sub:
Objectives:
Name Value Optimum
obj 3.18339 0
Variables (c - continuous, i - integer, d - discrete):
Name Type Value Lower Bound Upper Bound
x_0 c 0.000004 0.00e+00 1.00e+01
and for the main problem:
Objectives:
Name Value Optimum
obj1 3.18339 0
Variables (c - continuous, i - integer, d - discrete):
Name Type Value Lower Bound Upper Bound
z_0 c 1.977638 -1.00e+01 1.00e+01
z_1 c 0.000000 0.00e+00 1.00e+01

Related

Can't get performant Julia Turing model

I've tried to reproduce the model from a PYMC3 and Stan comparison. But it seems to run slowly and when I look at #code_warntype there are some things -- K and N I think -- which the compiler seemingly calls Any.
I've tried adding types -- though I can't add types to turing_model's arguments and things are complicated within turing_model because it's using autodiff variables and not the usuals. I put all the code into the function do_it to avoid globals, because they say that globals can slow things down. (It actually seems slower, though.)
Any suggestions as to what's causing the problem? The turing_model code is what's iterating, so that should make the most difference.
using Turing, StatsPlots, Random
sigmoid(x) = 1.0 / (1.0 + exp(-x))
function scale(w0::Float64, w1::Array{Float64,1})
scale = √(w0^2 + sum(w1 .^ 2))
return w0 / scale, w1 ./ scale
end
function do_it(iterations::Int64)::Chains
K = 10 # predictor dimension
N = 1000 # number of data samples
X = rand(N, K) # predictors (1000, 10)
w1 = rand(K) # weights (10,)
w0 = -median(X * w1) # 50% of elements for each class (number)
w0, w1 = scale(w0, w1) # unit length (euclidean)
w_true = [w0, w1...]
y = (w0 .+ (X * w1)) .> 0.0 # labels
y = [Float64(x) for x in y]
σ = 5.0
σm = [x == y ? σ : 0.0 for x in 1:K, y in 1:K]
#model turing_model(X, y, σ, σm) = begin
w0_pred ~ Normal(0.0, σ)
w1_pred ~ MvNormal(σm)
p = sigmoid.(w0_pred .+ (X * w1_pred))
#inbounds for n in 1:length(y)
y[n] ~ Bernoulli(p[n])
end
end
#time chain = sample(turing_model(X, y, σ, σm), NUTS(iterations, 200, 0.65));
# ϵ = 0.5
# τ = 10
# #time chain = sample(turing_model(X, y, σ), HMC(iterations, ϵ, τ));
return (w_true=w_true, chains=chain::Chains)
end
chain = do_it(1000)

How to fix TypeError: in setindex! in DifferentialEquations.jl

Recently, I got started with Julia's (v1.0.3) DifferentialEquations.jl package. I tried solving a simple ODE system, with the same structure as my real model, but much smaller.
Depending on the solver which I use, the example either solves or throws an error. Consider this MWE, a Chemical Engineering model of a consecutive / parallel reaction in a CSTR:
using DifferentialEquations
using Plots
# Modeling a consecutive / parallel reaction in a CSTR
# A --> 2B --> C, C --> 2B, B --> D
# PETERSEN-Matrix
# No. A B C D Rate
# 1 -1 2 k1*A
# 2 -2 1 k2*B*B
# 3 2 -1 k3*C
# 4 -1 1 k4*B
function fpr(dx, x, params, t)
k_1, k_2, k_3, k_4, q_in, V_liq, A_in, B_in, C_in, D_in = params
# Rate equations
rate = Array{Float64}(undef, 4)
rate[1] = k_1*x[1]
rate[2] = k_2*x[2]*x[2]
rate[3] = k_3*x[3]
rate[4] = k_4*x[2]
dx[1] = -rate[1] + q_in/V_liq*(A_in - x[1])
dx[2] = 2*rate[1] - 2*rate[2] + 2*rate[3] - rate[4] + q_in/V_liq*(B_in - x[2])
dx[3] = rate[2] - rate[3] + q_in/V_liq*(C_in - x[3])
dx[4] = rate[4] + q_in/V_liq*(D_in - x[4])
end
u0 = [1.5, 0.1, 0, 0]
params = [1.0, 1.5, 0.75, 0.15, 3, 15, 0.5, 0, 0, 0]
tspan = (0.0, 15.0)
prob = ODEProblem(fpr, u0, tspan, params)
sol = solve(prob)
plot(sol)
This works perfectly.
However, if a choose a different solver, say Rosenbrock23() or Rodas4(), the ODE is not solved and I get the following error:
ERROR: LoadError: TypeError: in setindex!, in typeassert, expected Float64,
got ForwardDiff.Dual{Nothing,Float64,4}
I won't paste the whole stacktrace here, since it is very long, but you can easily reproduce this by changing sol = solve(prob) into sol = solve(prob, Rosenbrock23()). It seems to me that the error occurs when the solver tries to derive Jacobians, but I have no clue why. And why does the default solver work, but others don't?
Please, could anyone tell me why this error occurs and how it can be fixed?
Automatic differentiation works by passing Dual types through your function, instead of the floats you would normally use it with. So the problem arises because you fix the internal value rate to be of type Vector{Float64} (see the third point here, and this advice). Fortunately, that's easy to fix (and even better looking, IMHO):
julia> function fpr(dx, x, params, t)
k_1, k_2, k_3, k_4, q_in, V_liq, A_in, B_in, C_in, D_in = params
# Rate equations
# should actually be rate = [k_1*x[1], k_2*x[2]*x[2], k_3*x[3], k_4*x[2]], as per #LutzL's comment
rate = [k_1*x[1], k_2*x[2], k_3*x[3], k_4*x[2]]
dx[1] = -rate[1] + q_in/V_liq*(A_in - x[1])
dx[2] = 2*rate[1] - 2*rate[2] + 2*rate[3] - rate[4] + q_in/V_liq*(B_in - x[2])
dx[3] = rate[2] - rate[3] + q_in/V_liq*(C_in - x[3])
dx[4] = rate[4] + q_in/V_liq*(D_in - x[4])
end
That works with both Rosenbrock23 and Rodas4.
Alternatively, you can turn off AD with Rosenbrock23(autodiff=false) (which, I think, will use finite differences instead), or supply a Jacobian.

Semi-total approx of a Group in a Group does not use the same input

Here is a sample abbreviated N2 diagram. I have one group (gr1) attached to linear and non linear solvers (DirectSolver and NonlinearBlockGS)
If I use the setup shown in version 1 with a gradient based optimizer for the whole problem the finite difference is applied on the variables D1 and D2 (which are floats).
If I wrap gr1 with gr2, with the same setup (except the approx_totals are inside gr2 now) the finite difference is applied on t and d which are ndarrays which results in min n*2 function evalutions.
Of course it is not necessarry to have gr2 in this setup but my aim is to have a version tagged as future desired in the image below.
I can not replicate the same problem with the sellarmda, which is weird as i tried to set it up identically. Nevertheless I add the sellarmda code that explains my problem. The variable called "ver1" set to True or False will change the setup.
ver1=False --> is the case with a single cycle group. And in my setup the fd is applied to the global design variable, which is what I want.
ver2=True --> is the case with group in group. And the fd is applied to the large arrays which are coupling parameters.
from openmdao.api import Problem, ScipyOptimizeDriver, ExecComp, IndepVarComp, DirectSolver,ExplicitComponent,NonlinearBlockGS, Group
import numpy as np
class SellarDis1(ExplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def setup(self):
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Coupling parameter
self.add_input('y2', val=1.0)
# Coupling output
self.add_output('y1', val=1.0)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y2 = inputs['y2']
print(inputs['z'])
outputs['y1'] = z1**2 + z2 -0.2*y2
class SellarDis2(ExplicitComponent):
"""
Component containing Discipline 2 -- no derivatives version.
"""
def setup(self):
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Coupling parameter
self.add_input('y1', val=1.0)
# Coupling output
self.add_output('y2', val=1.0)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y2 = y1**(.5) + z1 + z2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1.real < 0.0:
y1 *= -1
outputs['y2'] = y1**.5 + z1 + z2
class SellarMDA(Group):
"""
Group containing the Sellar MDA.
"""
def setup(self):
ver1=False
if ver1:
cycle = self.add_subsystem('cycle', Group(), promotes=['*'])
cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=[ 'z', 'y2'], promotes_outputs=['y1'])
cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'], promotes_outputs=['y2'])
# Nonlinear Block Gauss Seidel is a gradient free solver
cycle.nonlinear_solver = NonlinearBlockGS()
else:
self.add_subsystem('d1', SellarDis1(), promotes_inputs=[ 'z', 'y2'], promotes_outputs=['y1'])
self.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'], promotes_outputs=['y2'])
self.nonlinear_solver = NonlinearBlockGS()
self.approx_totals()
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('z', np.array([5.0, 2.0]))
SellarMDA11=SellarMDA()
prob.model.add_subsystem('SellarMDA', SellarMDA11, promotes=['*'])
#SellarMDA11.approx_totals()
prob.model.add_subsystem('obj_cmp', ExecComp('obj = z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0])),
promotes=[ 'z', 'y1', 'y2', 'obj'])
prob.model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
prob.model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
# prob.driver.options['maxiter'] = 100
prob.driver.options['tol'] = 1e-8
prob.model.add_design_var('z', lower=0, upper=10)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0)
prob.model.add_constraint('con2', upper=0)
prob.setup()
prob.set_solver_print(level=0)
# Ask OpenMDAO to finite-difference across the model to compute the gradients for the optimizer
#prob.model.approx_totals()
prob.run_driver()
print('minimum found at')
print(prob['z'])
print('minumum objective')
print(prob['obj'][0])
we examined the code and the example you gave closely and were not able to replicate any of the problems you described. Without a test-case, we can't make any more progress on this.

Time-dependent events in ODE

I recently started with Julia and wanted to implement one of my usual problems - implement time-depended events.
For now I have:
# Packages
using Plots
using DifferentialEquations
# Parameters
k21 = 0.14*24
k12 = 0.06*24
ke = 1.14*24
α = 0.5
β = 0.05
η = 0.477
μ = 0.218
k1 = 0.5
V1 = 6
# Time
maxtime = 10
tspan = (0.0, maxtime)
# Dose
stim = 100
# Initial conditions
x0 = [0 0 2e11 8e11]
# Model equations
function system(dy, y, p, t)
dy[1] = k21*y[2] - (k12 + ke)*y[1]
dy[2] = k12*y[1] - k21*y[2]
dy[3] = (α - μ - η)*y[3] + β*y[4] - k1/V1*y[1]*y[3]
dy[4] = μ*y[3] - β*y[4]
end
# Events
eventtimes = [2, 5]
function condition(y, t, integrator)
t - eventtimes
end
function affect!(integrator)
x0[1] = stim
end
cb = ContinuousCallback(condition, affect!)
# Solve
prob = ODEProblem(system, x0, tspan)
sol = solve(prob, Rodas4(), callback = cb)
# Plotting
plot(sol, layout = (2, 2))
But the output that is give is not correct. More specifically, the events are not taken into account and the initial condition doesn't seems to be 0 for y1 but stim.
Any help would be greatly appreciated.
t - eventtimes doesn't work because one's a scalar and the other is a vector. But for this case, it's much easier to just use a DiscreteCallback. When you make it a DiscreteCallback you should pre-set the stop times so that to it hits 2 and 5 for the callback. Here's an example:
# Packages
using Plots
using DifferentialEquations
# Parameters
k21 = 0.14*24
k12 = 0.06*24
ke = 1.14*24
α = 0.5
β = 0.05
η = 0.477
μ = 0.218
k1 = 0.5
V1 = 6
# Time
maxtime = 10
tspan = (0.0, maxtime)
# Dose
stim = 100
# Initial conditions
x0 = [0 0 2e11 8e11]
# Model equations
function system(dy, y, p, t)
dy[1] = k21*y[2] - (k12 + ke)*y[1]
dy[2] = k12*y[1] - k21*y[2]
dy[3] = (α - μ - η)*y[3] + β*y[4] - k1/V1*y[1]*y[3]
dy[4] = μ*y[3] - β*y[4]
end
# Events
eventtimes = [2.0, 5.0]
function condition(y, t, integrator)
t ∈ eventtimes
end
function affect!(integrator)
integrator.u[1] = stim
end
cb = DiscreteCallback(condition, affect!)
# Solve
prob = ODEProblem(system, x0, tspan)
sol = solve(prob, Rodas4(), callback = cb, tstops = eventtimes)
# Plotting
plot(sol, layout = (2, 2))
This avoids rootfinding altogether so it should be a much nicer solution that hacking time choices into a rootfinding system.
Either way, notice that the affect was changed to
function affect!(integrator)
integrator.u[1] = stim
end
It needs to be modifying the current u value otherwise it won't do anything.

OpenMDAO1+: variable trees in parallel

I have a large set of model parameters controlling several different components. The model is being run in parallel. The model parameters are held constant during the run. The problem is that I have to add an IndepVarComp() for every model parameter when running in parallel, even though I would like to pass them all by object. I need to be able to edit the values in my run script before running the model (between setup and run). Is there a good way of doing this? I recognize the data passing issue due to running under MPI without a "source" for the parameters.
It works if I add an IndepVarComp() for each model parameter, as long as I don't pass by object. This makes sense, if I tell OpenMDAO I want to be able to change the value and track how the model changes, then passing by object is contradictory. However, I need to be able to pass in the parameter values after setup and I can't do that under MPI without making an IndepVarComp() for each model parameter.
I have attached an example based on the Sellar problem from the OpenMDAO docs of what I want to do. By uncommenting line 28, commenting out line 27, and uncommenting line 139 in src.py, the example functions fine in parallel.
run with $ mpirun -np 4 python call.py
call.py
from __future__ import print_function
from openmdao.api import Problem, ScipyOptimizer
from src import SellarDerivativesSuperGroup
import numpy as np
if __name__ == "__main__":
######################### for MPI functionality #########################
from openmdao.core.mpi_wrap import MPI
# if MPI: # pragma: no cover
# if you called this script with 'mpirun', then use the petsc data passing
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
else:
from openmdao.api import BasicImpl as impl
# else:
# if you didn't use 'mpirun', then use the numpy data passing
# from openmdao.api import BasicImpl as impl
def mpi_print(prob, *args):
""" helper function to only print on rank 0 """
if prob.root.comm.rank == 0:
print(*args)
##################
nProblems = 4
datasize = 10
top = Problem(impl=impl)
top.root = SellarDerivativesSuperGroup(nProblems=nProblems, datasize=datasize)
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.driver.add_desvar('x', lower=0.0, upper=10.0)
top.driver.add_objective('obj')
top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup(check=True)
# Setting initial values for design variables
top['x'] = 1.0
top['z'] = np.array([5.0, 2.0])
top['varTree:leaf1'] = np.ones(datasize)
top.run()
if top.root.comm.rank == 0:
print("\n")
print("Minimum found at (%f, %f, %f)" % (top['z'][0],
top['z'][1],
top['x']))
print("Coupling vars: %f, %f" % (top['y1_0'], top['y2_0']))
print("Minimum objective: ", top['obj']/nProblems)
src.py
from __future__ import print_function
from openmdao.api import ExecComp, IndepVarComp, Group, NLGaussSeidel, \
Component, ParallelGroup, ScipyGMRES
import numpy as np
class SellarDis1(Component):
"""Component containing Discipline 1."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDis1, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Local Design Variable
self.add_param('x', val=0.)
# Coupling parameter
self.add_param('y2_%i' % problem_id, val=1.0)
# Dummy variable tree element
self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
# self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=False)
# Coupling output
self.add_output('y1_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
x1 = params['x']
y2 = params['y2_%i' % problem_id]
unknowns['y1_%i' % problem_id] = z1**2 + z2 + x1 - 0.2*y2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 1."""
problem_id = self.problem_id
J = {}
J['y1_%i' % problem_id, 'y2_%i' % problem_id] = -0.2
J['y1_%i' % problem_id, 'z'] = np.array([[2*params['z'][0], 1.0]])
J['y1_%i' % problem_id, 'x'] = 1.0
return J
class SellarDis2(Component):
"""Component containing Discipline 2."""
def __init__(self, problem_id=0):
super(SellarDis2, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Coupling parameter
self.add_param('y1_%i' % problem_id, val=1.0)
# Coupling output
self.add_output('y2_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
y1 = params['y1_%i' % problem_id]
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(y1)
unknowns['y2_%i' % problem_id] = y1**.5 + z1 + z2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 2."""
problem_id = self.problem_id
J = {}
J['y2_%i' % problem_id, 'y1_%i' % problem_id] = .5*params['y1_%i' % problem_id]**-.5
J['y2_%i' % problem_id, 'z'] = np.array([[1.0, 1.0]])
return J
class SellarDerivativesSubGroup(Group):
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivativesSubGroup, self).__init__()
self.add('d1', SellarDis1(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('d2', SellarDis2(problem_id=problem_id), promotes=['*'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
class SellarDerivatives(Group):
""" Group containing the Sellar MDA. This version uses the disciplines
with derivatives."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivatives, self).__init__()
self.add('d', SellarDerivativesSubGroup(problem_id=problem_id, datasize=datasize), promotes=['*'])
class SellarDerivativesSuperGroup(Group):
def __init__(self, nProblems=0, datasize=0):
super(SellarDerivativesSuperGroup, self).__init__()
self.add('px', IndepVarComp('x', 1.0), promotes=['*'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*'])
# self.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize)), promotes=['*'])
pg = self.add('manySellars', ParallelGroup(), promotes=['*'])
print(nProblems)
for problem_id in np.arange(0, nProblems):
pg.add('Sellar%i' % problem_id, SellarDerivatives(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('obj_cmp', ExecComp('obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + '
'(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))',
z=np.array([0.0, 0.0]), x=0.0,
y1_0=0.0, y2_0=0.0,
y1_1=0.0, y2_1=0.0,
y1_2=0.0, y2_2=0.0,
y1_3=0.0, y2_3=0.0),
promotes=['*'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*'])
self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])
If these parameters will never be used as optimization design variables, you don't have to declare them as OpenMDAO variables. You could just declare these things as regular python attributes in the init methods, then write a small method that loops over the hierarchy and sets the attribute values to whatever you want.
That might be a little simpler than adding IndepVarComps with pass-by-object,though your own proposed solution also work.
On further investigation, I found that the I can specify pass_by_obj in an IndepVarComp(). That solves part of the problem. The other part of the problem I solved by creating a function that adds the params rather than having a large list of parameters in my constructor that would decrease readability.
My solution is below. If someone else has a better one I would definitely be interested.
src.py
from __future__ import print_function
from openmdao.api import ExecComp, IndepVarComp, Group, NLGaussSeidel, \
Component, ParallelGroup, ScipyGMRES
import numpy as np
class SellarDis1(Component):
"""Component containing Discipline 1."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDis1, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Local Design Variable
self.add_param('x', val=0.)
# Coupling parameter
self.add_param('y2_%i' % problem_id, val=1.0)
# Dummy variable tree element
# self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
# Coupling output
self.add_output('y1_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
x1 = params['x']
y2 = params['y2_%i' % problem_id]
unknowns['y1_%i' % problem_id] = z1**2 + z2 + x1 - 0.2*y2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 1."""
problem_id = self.problem_id
J = {}
J['y1_%i' % problem_id, 'y2_%i' % problem_id] = -0.2
J['y1_%i' % problem_id, 'z'] = np.array([[2*params['z'][0], 1.0]])
J['y1_%i' % problem_id, 'x'] = 1.0
return J
class SellarDis2(Component):
"""Component containing Discipline 2."""
def __init__(self, problem_id=0):
super(SellarDis2, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Coupling parameter
self.add_param('y1_%i' % problem_id, val=1.0)
# Coupling output
self.add_output('y2_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
y1 = params['y1_%i' % problem_id]
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(y1)
unknowns['y2_%i' % problem_id] = y1**.5 + z1 + z2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 2."""
problem_id = self.problem_id
J = {}
J['y2_%i' % problem_id, 'y1_%i' % problem_id] = .5*params['y1_%i' % problem_id]**-.5
J['y2_%i' % problem_id, 'z'] = np.array([[1.0, 1.0]])
return J
class SellarDerivativesSubGroup(Group):
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivativesSubGroup, self).__init__()
self.add('d1', SellarDis1(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('d2', SellarDis2(problem_id=problem_id), promotes=['*'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
class SellarDerivatives(Group):
""" Group containing the Sellar MDA. This version uses the disciplines
with derivatives."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivatives, self).__init__()
self.add('d', SellarDerivativesSubGroup(problem_id=problem_id, datasize=datasize), promotes=['*'])
class SellarDerivativesSuperGroup(Group):
def __init__(self, nProblems=0, datasize=0):
super(SellarDerivativesSuperGroup, self).__init__()
self.add('px', IndepVarComp('x', 1.0), promotes=['*'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*'])
# self.add('vt', MyIndepVarComp(datasize=datasize), promotes=['*'])
# self.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True), promotes=['*'])
addVariableTree(self, datasize=datasize)
pg = self.add('manySellars', ParallelGroup(), promotes=['*'])
print(nProblems)
for problem_id in np.arange(0, nProblems):
pg.add('Sellar%i' % problem_id, SellarDerivatives(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('obj_cmp', ExecComp('obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + '
'(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))',
z=np.array([0.0, 0.0]), x=0.0,
y1_0=0.0, y2_0=0.0,
y1_1=0.0, y2_1=0.0,
y1_2=0.0, y2_2=0.0,
y1_3=0.0, y2_3=0.0),
promotes=['*'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*'])
self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])
def addVariableTree(openmdao_class, datasize=0):
openmdao_class.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True), promotes=['*'])

Resources