Implicit and explicit components with same inputs - openmdao

How can you define implicit and explicit outputs in the same component considering they have the same inputs ?
I tried to do this with implicit component, like this.
class Dis(ImplicitComponent):
def setup(self):
self.add_input('a')
self.add_input('b')
self.add_input('c')
self.add_output('z')
self.add_output('y1')
self.add_output('y2')
self.add_output('y3')
self.declare_partials(of='*', wrt='*', method='fd')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['z'] = (1 - outputs['z']) / (inputs['a']) ** 0.5 * inputs['b'] - inputs['c']
def solve_nonlinear(self, inputs, outputs):
outputs['y1'] = (1 - outputs['z']) / (inputs['a']) ** 0.5 * inputs['b']
outputs['y2'] = inputs['a'] * (0.01 * outputs['z'] + 1)
outputs['y3'] = inputs['b'] * (outputs['y2'] / inputs['a'])
So, in the same discipline I need z to vary to satisfy the residual equation and I need it to compute y1, y2 and y3 for the rest of my system.
However, OpenMDAO considered all of the outputs as implicit ones, consequently the DirectSolver I used later in my system had a "Singular matrix" issue. To fix that, I created a group composed of two components: one explicit for my variables that directly depends on inputs and another implicit one, which uses the same inputs, like this.
class Dis1(ImplicitComponent):
def setup(self):
self.add_input('a')
self.add_input('b')
self.add_input('c')
self.add_output('z')
self.declare_partials(of='*', wrt='*', method='fd')
def apply_nonlinear(self, inputs, outputs, residuals):
residuals['z'] = (1 - inputs['z']) / (inputs['a']) ** 0.5 * inputs['b'] - inputs['c']
class Dis2(ExplicitComponent):
def setup(self):
self.add_input('a')
self.add_input('b')
self.add_input('c')
self.add_input('z')
self.add_output('y1')
self.add_output('y2')
self.add_output('y3')
self.declare_partials(of='*', wrt='*', method='fd')
def compute(self, inputs, outputs):
outputs['y1'] = (1 - inputs['z']) / (inputs['a']) ** 0.5 * inputs['b']
outputs['y2'] = inputs['a'] * (0.01 * inputs['z'] + 1)
outputs['y3'] = inputs['b'] * (outputs['y2'] / inputs['a'])
class Top(Group):
def setup(self):
self.add_subsystem('compz', Dis1())
self.add_subsystem('comp_exp', Dis2(), promotes_outputs=['y1', 'y2', 'y3'])
self.connect('compz.z', 'comp_exp.z')
self.set_order(['compz', 'comp_ex'])
Is there a way to put them in a single component ? I mean a component which is able to compute residuals and give explicit outputs as well.

Related

Where can I find the source code for pygame.math.Vector2?

I wrote a game with Pygame on my Raspberry Pi. Then, the calculations that it had to do every frame dropped the frame rate significantly (5 fps instead of the intended 60 fps). I have a stronger computer that I want to try running it on, but there isn't a version of pygame that includes pygame.math that I can install on the operating system (Mac OS X 10.7.1). All I need is pygame.math.Vector2, so is there anywhere that I could find the source code for it?
Below is a class that I wrote. I attempted to make it behave exactly like pygame.math.Vector2.
import math
class Vec():
def __init__(self, x, y=None):
if y == None:
x, y = x[0], x[1]
self.x = float(x)
self.y = float(y)
def __iter__(self):
vals = [self.x, self.y]
return iter(vals)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vec(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return Vec(x, y)
def __mul__(self, other):
if type(other) == type(self):
x = self.x * other.x
y = self.y * other.y
return x + y
else:
x = self.x * other
y = self.y * other
return Vec(x, y)
def rotate(self, angle):
ox, oy = 0, 0
px, py = self.x, self.y
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return Vec(qx, qy)
def angle_to(self, other):
return math.degrees(math.asin((self.x * other.y - self.y * other.x)/(self.length()*other.length())))
def length(self):
return math.sqrt(self.x**2 + self.y**2)
However, it did not work and raised errors when the vector was used in mathematical operations and when a rectangle point was assigned to equal the vector.
PyGame is based on SDL and is mainly written in the C programming language. See About - wiki.
The PyGame GitHub repository can be found at pygame / pygame.

Semi-total approx of a Group in a Group does not use the same input

Here is a sample abbreviated N2 diagram. I have one group (gr1) attached to linear and non linear solvers (DirectSolver and NonlinearBlockGS)
If I use the setup shown in version 1 with a gradient based optimizer for the whole problem the finite difference is applied on the variables D1 and D2 (which are floats).
If I wrap gr1 with gr2, with the same setup (except the approx_totals are inside gr2 now) the finite difference is applied on t and d which are ndarrays which results in min n*2 function evalutions.
Of course it is not necessarry to have gr2 in this setup but my aim is to have a version tagged as future desired in the image below.
I can not replicate the same problem with the sellarmda, which is weird as i tried to set it up identically. Nevertheless I add the sellarmda code that explains my problem. The variable called "ver1" set to True or False will change the setup.
ver1=False --> is the case with a single cycle group. And in my setup the fd is applied to the global design variable, which is what I want.
ver2=True --> is the case with group in group. And the fd is applied to the large arrays which are coupling parameters.
from openmdao.api import Problem, ScipyOptimizeDriver, ExecComp, IndepVarComp, DirectSolver,ExplicitComponent,NonlinearBlockGS, Group
import numpy as np
class SellarDis1(ExplicitComponent):
"""
Component containing Discipline 1 -- no derivatives version.
"""
def setup(self):
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Coupling parameter
self.add_input('y2', val=1.0)
# Coupling output
self.add_output('y1', val=1.0)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y2 = inputs['y2']
print(inputs['z'])
outputs['y1'] = z1**2 + z2 -0.2*y2
class SellarDis2(ExplicitComponent):
"""
Component containing Discipline 2 -- no derivatives version.
"""
def setup(self):
# Global Design Variable
self.add_input('z', val=np.zeros(2))
# Coupling parameter
self.add_input('y1', val=1.0)
# Coupling output
self.add_output('y2', val=1.0)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
"""
Evaluates the equation
y2 = y1**(.5) + z1 + z2
"""
z1 = inputs['z'][0]
z2 = inputs['z'][1]
y1 = inputs['y1']
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
if y1.real < 0.0:
y1 *= -1
outputs['y2'] = y1**.5 + z1 + z2
class SellarMDA(Group):
"""
Group containing the Sellar MDA.
"""
def setup(self):
ver1=False
if ver1:
cycle = self.add_subsystem('cycle', Group(), promotes=['*'])
cycle.add_subsystem('d1', SellarDis1(), promotes_inputs=[ 'z', 'y2'], promotes_outputs=['y1'])
cycle.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'], promotes_outputs=['y2'])
# Nonlinear Block Gauss Seidel is a gradient free solver
cycle.nonlinear_solver = NonlinearBlockGS()
else:
self.add_subsystem('d1', SellarDis1(), promotes_inputs=[ 'z', 'y2'], promotes_outputs=['y1'])
self.add_subsystem('d2', SellarDis2(), promotes_inputs=['z', 'y1'], promotes_outputs=['y2'])
self.nonlinear_solver = NonlinearBlockGS()
self.approx_totals()
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('z', np.array([5.0, 2.0]))
SellarMDA11=SellarMDA()
prob.model.add_subsystem('SellarMDA', SellarMDA11, promotes=['*'])
#SellarMDA11.approx_totals()
prob.model.add_subsystem('obj_cmp', ExecComp('obj = z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0])),
promotes=[ 'z', 'y1', 'y2', 'obj'])
prob.model.add_subsystem('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
prob.model.add_subsystem('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
# prob.driver.options['maxiter'] = 100
prob.driver.options['tol'] = 1e-8
prob.model.add_design_var('z', lower=0, upper=10)
prob.model.add_objective('obj')
prob.model.add_constraint('con1', upper=0)
prob.model.add_constraint('con2', upper=0)
prob.setup()
prob.set_solver_print(level=0)
# Ask OpenMDAO to finite-difference across the model to compute the gradients for the optimizer
#prob.model.approx_totals()
prob.run_driver()
print('minimum found at')
print(prob['z'])
print('minumum objective')
print(prob['obj'][0])
we examined the code and the example you gave closely and were not able to replicate any of the problems you described. Without a test-case, we can't make any more progress on this.

Asymmetric Subspace Optimization (ASO)

I'm trying to implement the ASO for the Sellar problem but the "subproblem" is not working as I expected. I have no idea how to solve this problem.
Although the Sellar problem is not a good example to understand the ASO architecture (both disciplines have pretty much the same computational cost), it's a simple formulation and I'm using it as a benchmark before trying to use in the problem I'm studying.
My code is( OpenMDAO 1.7):
class low_order(Group):
def __init__(self):
super(low_order, self).__init__()
self.add('plx', IndepVarComp('x', 1.0), promotes=['x'])
self.add('plz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.add('d1', SellarDis1(), promotes=['x', 'z', 'y1', 'y2'])
#self.add('d2', SellarDis2(), promotes=['z', 'y1', 'y2'])
self.add('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]) ),
promotes=['obj', 'x', 'z', 'y1', 'y2'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
class SellarDerivatives(Group):
def __init__(self):
super(SellarDerivatives, self).__init__()
self.add('ppx', IndepVarComp('x', 1.0), promotes=['x'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['z'])
self.add('d2', SellarDis2(), promotes=['z', 'y1', 'y2'])
self.add('obj_cmp1', ExecComp('obj1 = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]) ),
promotes=['obj1', 'x', 'z', 'y1', 'y2'])
#self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['con1', 'y1'])
self.add('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['con2', 'y2'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
self.deriv_options['type'] = 'fd'
self.deriv_options['form'] = 'central'
if __name__ == '__main__':
from openmdao.api import Problem, ScipyOptimizer, SqliteRecorder
sub = Problem()
sub.root = low_order()
sub.driver =ScipyOptimizer()# pyOptSparseDriver()
sub.driver.options['optimizer'] = 'SLSQP'
sub.driver.options['disp'] = False
sub.driver.add_desvar('x', lower=0.0, upper=10.0)
sub.driver.add_objective('obj')
sub.driver.add_constraint('con1', upper=0.0)
#sub.driver.add_constraint('con2', upper=0.0)
top = Problem()
top.root = SellarDerivatives()
top.driver = ScipyOptimizer()#pyOptSparseDriver()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.root.add('subprob1', SubProblem(sub, params=['z'],unknowns=['y1','x']))
top.root.connect('z','subprob1.z')
top.driver.add_objective('obj1')
#top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup()
top.run()
When I look the iterations, I see that only the Z1 and Z2 are varying as I expected, and I have no idea about the X.
The lines below are : X | Z1 | Z2
(8.881784197001252e-16, 5.000916977558285, 1.000912901563544)
(1.0000000008881784e-06, 5.000916977558285, 1.000912901563544)
(-9.999999991118215e-07, 5.000916977558285, 1.000912901563544)
(8.881784197001252e-16, 5.000912478379066, 1.0009120015635442)
(8.881784197001252e-16, 5.000912478379066, 1.0009120015635442)
(1.0000000008881784e-06, 5.000912478379066, 1.0009120015635442)
(-9.999999991118215e-07, 5.000912478379066, 1.0009120015635442)
And the final answer is ( Z1 , Z2 , X ):
Minimum found at (5.000912, 1.000912, 1.000000)
Coupling vars: 0.000000, 6.001824
('Minimum objective: ', 2.0033861370124493)
Ps. As the Sellar problem is not my main objective, I'm using FD across the whole model, just to make it easier for me.
Thanks!
I got this to work with a few changes (it didn't work at all with the code you sent, but we may have tightened some error checking between 1.7 and 1.7.3.)
You need to pass y2 into the subproblem to complete the cycle:
top.root.add('subprob1', SubProblem(sub, params=['z', 'y2'], unknowns=['y1', 'x']), promotes=['*'])
And you need to remove the extra IndepVarComp for variable 'x' in the top "SellarDerivatives" group because you are letting the subproblem be the ultimate source.
#self.add('ppx', IndepVarComp('x', 1.0), promotes=['x'])
With those changes and using pyoptsparse, I get for the sub:
Objectives:
Name Value Optimum
obj 3.18339 0
Variables (c - continuous, i - integer, d - discrete):
Name Type Value Lower Bound Upper Bound
x_0 c 0.000004 0.00e+00 1.00e+01
and for the main problem:
Objectives:
Name Value Optimum
obj1 3.18339 0
Variables (c - continuous, i - integer, d - discrete):
Name Type Value Lower Bound Upper Bound
z_0 c 1.977638 -1.00e+01 1.00e+01
z_1 c 0.000000 0.00e+00 1.00e+01

OpenMDAO1+: variable trees in parallel

I have a large set of model parameters controlling several different components. The model is being run in parallel. The model parameters are held constant during the run. The problem is that I have to add an IndepVarComp() for every model parameter when running in parallel, even though I would like to pass them all by object. I need to be able to edit the values in my run script before running the model (between setup and run). Is there a good way of doing this? I recognize the data passing issue due to running under MPI without a "source" for the parameters.
It works if I add an IndepVarComp() for each model parameter, as long as I don't pass by object. This makes sense, if I tell OpenMDAO I want to be able to change the value and track how the model changes, then passing by object is contradictory. However, I need to be able to pass in the parameter values after setup and I can't do that under MPI without making an IndepVarComp() for each model parameter.
I have attached an example based on the Sellar problem from the OpenMDAO docs of what I want to do. By uncommenting line 28, commenting out line 27, and uncommenting line 139 in src.py, the example functions fine in parallel.
run with $ mpirun -np 4 python call.py
call.py
from __future__ import print_function
from openmdao.api import Problem, ScipyOptimizer
from src import SellarDerivativesSuperGroup
import numpy as np
if __name__ == "__main__":
######################### for MPI functionality #########################
from openmdao.core.mpi_wrap import MPI
# if MPI: # pragma: no cover
# if you called this script with 'mpirun', then use the petsc data passing
if MPI:
from openmdao.core.petsc_impl import PetscImpl as impl
else:
from openmdao.api import BasicImpl as impl
# else:
# if you didn't use 'mpirun', then use the numpy data passing
# from openmdao.api import BasicImpl as impl
def mpi_print(prob, *args):
""" helper function to only print on rank 0 """
if prob.root.comm.rank == 0:
print(*args)
##################
nProblems = 4
datasize = 10
top = Problem(impl=impl)
top.root = SellarDerivativesSuperGroup(nProblems=nProblems, datasize=datasize)
top.driver = ScipyOptimizer()
top.driver.options['optimizer'] = 'SLSQP'
top.driver.options['tol'] = 1.0e-8
top.driver.add_desvar('z', lower=np.array([-10.0, 0.0]),
upper=np.array([10.0, 10.0]))
top.driver.add_desvar('x', lower=0.0, upper=10.0)
top.driver.add_objective('obj')
top.driver.add_constraint('con1', upper=0.0)
top.driver.add_constraint('con2', upper=0.0)
top.setup(check=True)
# Setting initial values for design variables
top['x'] = 1.0
top['z'] = np.array([5.0, 2.0])
top['varTree:leaf1'] = np.ones(datasize)
top.run()
if top.root.comm.rank == 0:
print("\n")
print("Minimum found at (%f, %f, %f)" % (top['z'][0],
top['z'][1],
top['x']))
print("Coupling vars: %f, %f" % (top['y1_0'], top['y2_0']))
print("Minimum objective: ", top['obj']/nProblems)
src.py
from __future__ import print_function
from openmdao.api import ExecComp, IndepVarComp, Group, NLGaussSeidel, \
Component, ParallelGroup, ScipyGMRES
import numpy as np
class SellarDis1(Component):
"""Component containing Discipline 1."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDis1, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Local Design Variable
self.add_param('x', val=0.)
# Coupling parameter
self.add_param('y2_%i' % problem_id, val=1.0)
# Dummy variable tree element
self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
# self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=False)
# Coupling output
self.add_output('y1_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
x1 = params['x']
y2 = params['y2_%i' % problem_id]
unknowns['y1_%i' % problem_id] = z1**2 + z2 + x1 - 0.2*y2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 1."""
problem_id = self.problem_id
J = {}
J['y1_%i' % problem_id, 'y2_%i' % problem_id] = -0.2
J['y1_%i' % problem_id, 'z'] = np.array([[2*params['z'][0], 1.0]])
J['y1_%i' % problem_id, 'x'] = 1.0
return J
class SellarDis2(Component):
"""Component containing Discipline 2."""
def __init__(self, problem_id=0):
super(SellarDis2, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Coupling parameter
self.add_param('y1_%i' % problem_id, val=1.0)
# Coupling output
self.add_output('y2_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
y1 = params['y1_%i' % problem_id]
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(y1)
unknowns['y2_%i' % problem_id] = y1**.5 + z1 + z2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 2."""
problem_id = self.problem_id
J = {}
J['y2_%i' % problem_id, 'y1_%i' % problem_id] = .5*params['y1_%i' % problem_id]**-.5
J['y2_%i' % problem_id, 'z'] = np.array([[1.0, 1.0]])
return J
class SellarDerivativesSubGroup(Group):
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivativesSubGroup, self).__init__()
self.add('d1', SellarDis1(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('d2', SellarDis2(problem_id=problem_id), promotes=['*'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
class SellarDerivatives(Group):
""" Group containing the Sellar MDA. This version uses the disciplines
with derivatives."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivatives, self).__init__()
self.add('d', SellarDerivativesSubGroup(problem_id=problem_id, datasize=datasize), promotes=['*'])
class SellarDerivativesSuperGroup(Group):
def __init__(self, nProblems=0, datasize=0):
super(SellarDerivativesSuperGroup, self).__init__()
self.add('px', IndepVarComp('x', 1.0), promotes=['*'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*'])
# self.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize)), promotes=['*'])
pg = self.add('manySellars', ParallelGroup(), promotes=['*'])
print(nProblems)
for problem_id in np.arange(0, nProblems):
pg.add('Sellar%i' % problem_id, SellarDerivatives(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('obj_cmp', ExecComp('obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + '
'(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))',
z=np.array([0.0, 0.0]), x=0.0,
y1_0=0.0, y2_0=0.0,
y1_1=0.0, y2_1=0.0,
y1_2=0.0, y2_2=0.0,
y1_3=0.0, y2_3=0.0),
promotes=['*'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*'])
self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])
If these parameters will never be used as optimization design variables, you don't have to declare them as OpenMDAO variables. You could just declare these things as regular python attributes in the init methods, then write a small method that loops over the hierarchy and sets the attribute values to whatever you want.
That might be a little simpler than adding IndepVarComps with pass-by-object,though your own proposed solution also work.
On further investigation, I found that the I can specify pass_by_obj in an IndepVarComp(). That solves part of the problem. The other part of the problem I solved by creating a function that adds the params rather than having a large list of parameters in my constructor that would decrease readability.
My solution is below. If someone else has a better one I would definitely be interested.
src.py
from __future__ import print_function
from openmdao.api import ExecComp, IndepVarComp, Group, NLGaussSeidel, \
Component, ParallelGroup, ScipyGMRES
import numpy as np
class SellarDis1(Component):
"""Component containing Discipline 1."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDis1, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Local Design Variable
self.add_param('x', val=0.)
# Coupling parameter
self.add_param('y2_%i' % problem_id, val=1.0)
# Dummy variable tree element
# self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
self.add_param('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True)
# Coupling output
self.add_output('y1_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y1 = z1**2 + z2 + x1 - 0.2*y2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
x1 = params['x']
y2 = params['y2_%i' % problem_id]
unknowns['y1_%i' % problem_id] = z1**2 + z2 + x1 - 0.2*y2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 1."""
problem_id = self.problem_id
J = {}
J['y1_%i' % problem_id, 'y2_%i' % problem_id] = -0.2
J['y1_%i' % problem_id, 'z'] = np.array([[2*params['z'][0], 1.0]])
J['y1_%i' % problem_id, 'x'] = 1.0
return J
class SellarDis2(Component):
"""Component containing Discipline 2."""
def __init__(self, problem_id=0):
super(SellarDis2, self).__init__()
self.problem_id = problem_id
# Global Design Variable
self.add_param('z', val=np.zeros(2))
# Coupling parameter
self.add_param('y1_%i' % problem_id, val=1.0)
# Coupling output
self.add_output('y2_%i' % problem_id, val=1.0)
def solve_nonlinear(self, params, unknowns, resids):
"""Evaluates the equation
y2 = y1**(.5) + z1 + z2"""
problem_id = self.problem_id
z1 = params['z'][0]
z2 = params['z'][1]
y1 = params['y1_%i' % problem_id]
# Note: this may cause some issues. However, y1 is constrained to be
# above 3.16, so lets just let it converge, and the optimizer will
# throw it out
y1 = abs(y1)
unknowns['y2_%i' % problem_id] = y1**.5 + z1 + z2
def linearize(self, params, unknowns, resids):
""" Jacobian for Sellar discipline 2."""
problem_id = self.problem_id
J = {}
J['y2_%i' % problem_id, 'y1_%i' % problem_id] = .5*params['y1_%i' % problem_id]**-.5
J['y2_%i' % problem_id, 'z'] = np.array([[1.0, 1.0]])
return J
class SellarDerivativesSubGroup(Group):
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivativesSubGroup, self).__init__()
self.add('d1', SellarDis1(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('d2', SellarDis2(problem_id=problem_id), promotes=['*'])
self.nl_solver = NLGaussSeidel()
self.nl_solver.options['atol'] = 1.0e-12
self.ln_solver = ScipyGMRES()
class SellarDerivatives(Group):
""" Group containing the Sellar MDA. This version uses the disciplines
with derivatives."""
def __init__(self, problem_id=0, datasize=0):
super(SellarDerivatives, self).__init__()
self.add('d', SellarDerivativesSubGroup(problem_id=problem_id, datasize=datasize), promotes=['*'])
class SellarDerivativesSuperGroup(Group):
def __init__(self, nProblems=0, datasize=0):
super(SellarDerivativesSuperGroup, self).__init__()
self.add('px', IndepVarComp('x', 1.0), promotes=['*'])
self.add('pz', IndepVarComp('z', np.array([5.0, 2.0])), promotes=['*'])
# self.add('vt', MyIndepVarComp(datasize=datasize), promotes=['*'])
# self.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True), promotes=['*'])
addVariableTree(self, datasize=datasize)
pg = self.add('manySellars', ParallelGroup(), promotes=['*'])
print(nProblems)
for problem_id in np.arange(0, nProblems):
pg.add('Sellar%i' % problem_id, SellarDerivatives(problem_id=problem_id, datasize=datasize), promotes=['*'])
self.add('obj_cmp', ExecComp('obj = (x**2 + z[1] + y1_0 + exp(-y2_0)) + (x**2 + z[1] + y1_1 + exp(-y2_1)) + '
'(x**2 + z[1] + y1_2 + exp(-y2_2)) + (x**2 + z[1] + y1_3 + exp(-y2_3))',
z=np.array([0.0, 0.0]), x=0.0,
y1_0=0.0, y2_0=0.0,
y1_1=0.0, y2_1=0.0,
y1_2=0.0, y2_2=0.0,
y1_3=0.0, y2_3=0.0),
promotes=['*'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1_0'), promotes=['*'])
self.add('con_cmp2', ExecComp('con2 = y2_0 - 24.0'), promotes=['*'])
def addVariableTree(openmdao_class, datasize=0):
openmdao_class.add('vt', IndepVarComp('varTree:leaf1', val=np.zeros(datasize), pass_by_obj=True), promotes=['*'])

OpenMDAO: Replacing ExecComps with normal Components changes output

I was running the code of 'Sellar exmaple' from the tutorials. According to the docs given on the tutorial page, the ExecComp is just a shorthand for declaring a normal Component. So I tried redefining the ExecComps in the example as normal Components and use them in the same example.
The ExecComps in the example are defined as follows -
self.add('obj_cmp', ExecComp('obj = x**2 + z[1] + y1 + exp(-y2)',
z=np.array([0.0, 0.0]), x=0.0, y1=0.0, y2=0.0),
promotes=['*'])
self.add('con_cmp1', ExecComp('con1 = 3.16 - y1'), promotes=['*'])
self.add('con_cmp2', ExecComp('con2 = y2 - 24.0'), promotes=['*'])
The normal Components which I defined are as follows -
Objective component
class SellarObjective(Component):
def __init__(self):
super(SellarObjective, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y2', val=0.0)
self.add_param('y1', val=0.0)
self.add_param('z', val=np.zeros(2))
self.add_output('obj', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['obj'] = params['x']**2 + params['z'][0] + params['y1'] + exp(-params['y2'])
def linearize(self, params, unknowns, resids):
J = {}
J['obj', 'x'] = 2 * params['x']
J['obj', 'y2'] = (-1) * exp(-params['y2'])
J['obj', 'y1'] = 1.0
J['obj', 'z[0]'] = 1.0
return J
Constraint 1
class SellarConstraint1(Component):
def __init__(self):
super(SellarConstraint1, self).__init__()
self.add_param('y1', val=0.0)
self.add_output('con1', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['con1'] = 3.16 - params['y1']
def linearize(self, params, unknowns, resids):
J = {}
J['con1', 'y1'] = -1.0
return J
Constraint 2
class SellarConstraint2(Component):
def __init__(self):
super(SellarConstraint2, self).__init__()
self.add_param('y2', val=0.0)
self.add_output('con2', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['con2'] = params['y2'] - 24.0
def linearize(self, params, unknowns, resids):
J = {}
J['con2', 'y2'] = 1.0
return J
And I instantiate these newly declared Components in the re-written implementation as -
self.add('obj_cmp', SellarObjective(), promotes=['*'])
self.add('con_cmp1', SellarConstraint1(), promotes=['*'])
self.add('con_cmp2', SellarConstraint2(), promotes=['*'])
Everything else in the code is same as that of the tutorial. But after executing both of them, when I compare the results - results don't match.
Am I missing something obvious here? Thank you for your time.
There are two minor issues with your replacement objective class:
the objective is a function of z[1], no z[0]
the derivative of the objective with respect to z is an array, and you can't use z[1] as the key. You must use z instead.
Correct your objective comp to the following, and it should work:
class SellarObjective(Component):
def __init__(self):
super(SellarObjective, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y2', val=0.0)
self.add_param('y1', val=0.0)
self.add_param('z', val=np.zeros(2))
self.add_output('obj', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['obj'] = params['x']**2 + params['z'][1] + params['y1'] + np.exp(-params['y2'])
def linearize(self, params, unknowns, resids):
J = {}
J['obj', 'x'] = 2 * params['x']
J['obj', 'y2'] = (-1) * np.exp(-params['y2'])
J['obj', 'y1'] = 1.0
J['obj', 'z'] = np.array([[0,1],])
return J

Resources