Connecting an element of array to another Component's input - openmdao

I am trying to connect the last element of an output of one component to the input of another component. An example is shown below:
import numpy as np
from openmdao.api import Component, Problem, Group
class C1(Component):
def __init__(self):
super(C1, self).__init__()
self.add_param('fin', val=1.0)
self.add_output('arr', val=np.zeros(5))
def solve_nonlinear(self, params, unknowns, resids):
fin = params['fin']
unknowns['arr'] = np.array([2*fin])
class C2(Component):
def __init__(self):
super(C2, self).__init__()
self.add_param('flt', val=0.0)
self.add_output('fout', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
flt = params['flt']
unknowns['fout'] = 2*flt
class A(Group):
def __init__(self):
super(A, self).__init__()
self.add('c1', C1())
self.add('c2', C2())
self.connect('c1.arr[-1]', 'c2.flt')
if __name__ == '__main__':
a = Problem()
a.root = A()
a.setup()
a.run()
print a.root.c2.unknowns['fout']
I am given the error:
openmdao.core.checks.ConnectError: Source 'c1.arr[-1]' cannot be connected to target 'c2.flt': 'c1.arr[-1]' does not exist.
Is there a way to do this? I know it worked in the old version of OpenMDAO.

OpenMDAO supports connection to specific indices of a source by using the 'src_indices' arg. For example:
self.connect('c1.arr', 'c2.flt', src_indices=[4])
Negative indices are not currently supported.

There are a number of small issues here. First the solve_nonlinear method of C1 has the wrong size for its array computation. It ends up working out, but you should really set the array to the right size (length 5).
To part of an array (see docs and more advanced docs) , you specify src_indices argument to connect.
import numpy as np
from openmdao.api import Component, Problem, Group
class C1(Component):
def __init__(self):
super(C1, self).__init__()
self.add_param('fin', val=1.0)
self.add_output('arr', val=np.zeros(5))
def solve_nonlinear(self, params, unknowns, resids):
fin = params['fin']
unknowns['arr'] = fin*np.arange(5)
class C2(Component):
def __init__(self):
super(C2, self).__init__()
self.add_param('flt', val=0.0)
self.add_output('fout', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
flt = params['flt']
unknowns['fout'] = 2*flt
class A(Group):
def __init__(self):
super(A, self).__init__()
self.add('c1', C1())
self.add('c2', C2())
self.connect('c1.arr', 'c2.flt', src_indices=[4,])
if __name__ == '__main__':
a = Problem()
a.root = A()
a.setup()
a.run()
print a.root.c2.unknowns['fout']

Related

External code incompatibility with metadata?

If I use the same setup with metadata for an external code component and explicit component it seems to fail with the externalcode comp. The code below has both of the component. As is there is no error, if i comment out the explicit component part I get an error :
comp = GFWrapper(DVLO=np.array(DVLIST))
TypeError: init() got an unexpected keyword argument 'DVLO'
import numpy as np
import json as js
import re,sys,subprocess,os
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.api import ScipyOptimizeDriver
from openmdao.api import ExternalCode
class GFWrapper(ExternalCode):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
self.input_file = 'GFWrapper_input.dat'
self.output_file = 'GFWrapper_output.dat'
self.options['external_input_files'] = [self.input_file,]
self.options['external_output_files'] = [self.output_file,]
self.options['command'] = [
'python', 'run.py', self.input_file, self.output_file
]
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
# generate the input file for the external code
outF = open(self.input_file, "w")
for dv in DVLO:
outF.write(inputs[dv])
print(dv,inputs[dv])
outF.write("\n")
outF.close()
# the parent compute function actually runs the external code
super(GFWrapper, self).compute(inputs, outputs)
# parse the output file from the external code
file_contents=np.loadtxt(self.output_file)
outputs['OBJECTIVE'] = file_contents[0]
from openmdao.api import ExplicitComponent
#
class GFWrapper(ExplicitComponent):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
powetemp = 0
for dv in DVLO:
powetemp += inputs[dv]
outputs['OBJECTIVE'] = powetemp
#
DVLIST=['DV1','DV2']
DVMIN =[2,11]
DVMAX =[3,12]
InitDVVal=[3,5]
nr_of_desvar=len(DVLIST)
top = Problem()
top.model = model = Group()
" Introduce independent variables later will be design variables (optimization parameters) "
inputs_comp = IndepVarComp()
for i in range(nr_of_desvar):
inputs_comp.add_output(DVLIST[i],InitDVVal[i])
" Add components/subsystems to the Group-model- "
model.add_subsystem('inputs_comp', inputs_comp)
comp = GFWrapper(DVLO=np.array(DVLIST))
model.add_subsystem('GFWrapper', comp)
This is a bug in OpenMDAO version 2.2. The error occurs because we didn't include **kwargs in the init statement for ExternalCode. We intend to fix this for V2.3, but in the meantime, you can work around for your GFWrapper component by adding an init statement like this:
class GFWrapper(ExternalCode):
def __init__(self, **kwargs):
super(GFWrapper, self).__init__()
self.metadata.update(kwargs)

Splitting up connections between groups

I would like to know the best way to split up the connection command. I have two groups that I want to be modular, an inner group and an outer group. I want the inner group to be a kind of black box where I can switch out or change the inner group without changing all the connections for the outer group. I just want the outer group to have to know the inputs and outputs of the inner group. For an example:
import numpy as np
from openmdao.api import Group, Problem, Component, IndepVarComp, ExecComp
class C(Component):
def __init__(self, n):
super(C, self).__init__()
self.add_param('array_element', shape=1)
self.add_output('new_element', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['new_element'] = params['array_element']*2.0
class MUX(Component):
def __init__(self, n):
super(MUX, self).__init__()
for i in range(n):
self.add_param('new_element' + str(i), shape=1)
self.add_output('new_array', shape=n)
self.n = n
def solve_nonlinear(self, params, unknowns, resids):
new_array = np.zeros(n)
for i in range(n):
new_array[i] = params['new_element' + str(i)]
unknowns['new_array'] = new_array
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
for i in range(n):
self.add('c'+str(i), C(n))
self.connect('array', 'c'+str(i) + '.array_element', src_indices=[i])
self.connect('c'+str(i)+'.new_element', 'new_element'+str(i))
self.add('mux', MUX(n), promotes=['*'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array'])
for i in range(n):
# self.connect('array', 'inner.c'+str(i) + '.array_element', src_indices=[i])
self.connect('array', 'inner.array', src_indices=[i])
n = 3
p = Problem()
p.root = GroupOuter(n)
p.setup(check=False)
p['array'] = np.ones(n)
p.run()
print p['new_array']
When I run the code I get the error that:
NameError: Source 'array' cannot be connected to target 'c0.array_element': 'array' does not exist.
To try to solve this I made 'array' an IndepVarComp in the GroupInner group. However, when I do this I get the error:
NameError: Source 'array' cannot be connected to target 'inner.array': Target must be a parameter but 'inner.array' is an unknown.
I know that if I just make the full connection: self.connect('array', 'inner.c'+str(i) + '.array_element', src_indices=[i]) then it will work. But like I said I want GroupInner to be kind of a black box where I don't know what groups or components are in it. I also can't just promote all because the array_elements are different. Is it possible to do this or do you have to do the entire connection in one command?
I have two answers to your question. First I'll get the problem working as you specified it. Second, I'll suggest a modification that I think might be more efficient for some applications of this model structure.
First, the main issue with the problem as you specified it was the following line
self.connect('array', 'c'+str(i) + '.array_element', src_indices=[i])
There simply isn't a output or state named array anywhere inside the Inner group, so that connection isn't going to work. You did create a variable called 'array' in the Outer group, but you can't issue a connection to it from inside the Inner definition because its not available in that scope. To make it work the way you've specified, the simplest way would be to do the following:
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
for i in range(n):
self.add('c'+str(i), C(n))
self.connect('c%d.new_element'%i, 'new_element'+str(i))
self.add('mux', MUX(n), promotes=['*'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array'])
for i in range(n):
self.connect('array', 'inner.c%d.array_element'%i, src_indices=[i])
Here is an alternate approach that will reduce the number of variables and components in your model, which will help reduce setup times if n grows large is to use an actual distributed component, and partition the array using the MPI comm. This has some nice properties, besides the setup cost savings, because it will also allow you to scale your calculations more flexibility and improves efficiency when you run in serial. This solution works well if your model would have really had multiple c instances that were all doing the same thing and the process can be simply vectorized via numpy.
import numpy as np
from openmdao.api import Group, Problem, Component, IndepVarComp
from openmdao.util.array_util import evenly_distrib_idxs
from openmdao.core.mpi_wrap import MPI
if MPI:
# if you called this script with 'mpirun', then use the petsc data passing
from openmdao.core.petsc_impl import PetscImpl as impl
else:
# if you didn't use `mpirun`, then use the numpy data passing
from openmdao.api import BasicImpl as impl
class C(Component):
def __init__(self, n):
super(C, self).__init__()
self.add_param('in_array', shape=n)
self.add_output('new_array', shape=n)
self.n = n
def get_req_procs(self):
"""
min/max number of procs that this component can use
"""
return (1,self.n)
#NOTE: This needs to be setup_distrib_idx for <= version 1.5.0
def setup_distrib(self):
comm = self.comm
rank = comm.rank
# NOTE: evenly_distrib_idxs is a helper function to split the array
# up as evenly as possible
sizes, offsets = evenly_distrib_idxs(comm.size, self.n)
local_size, local_offset = sizes[rank], offsets[rank]
self.local_size = int(local_size)
start = local_offset
end = local_offset + local_size
self.set_var_indices('in_array', val=np.zeros(local_size, float),
src_indices=np.arange(start, end, dtype=int))
self.set_var_indices('new_array', val=np.zeros(local_size, float),
src_indices=np.arange(start, end, dtype=int))
def solve_nonlinear(self, params, unknowns, resids):
unknowns['new_array'] = params['in_array']*2.0
print "computing new_array: ", unknowns['new_array']
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
self.add('c', C(n), promotes=['new_array', 'in_array'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array',])
self.connect('array', 'inner.in_array')
n = 3
p = Problem(impl=impl)
p.root = GroupOuter(n)
p.setup(check=False)
p['array'] = np.ones(n)
p.run()
print p['new_array']

OpenMDAO: unit conversion with pass_by_obj

Is unit conversion with pass_by_obj supported in OpenMDAO 1.4? I have a small repro case:
from openmdao.api import Component, Problem, Group, IndepVarComp
pass_by_obj=True
class PassByObjParaboloid(Component):
def __init__(self):
super(PassByObjParaboloid, self).__init__()
self.fd_options['force_fd'] = True
self.add_param('x', val=1.0, pass_by_obj=pass_by_obj, units='mm')
self.add_output('f_xy', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
print params['x']
assert params['x'] == 1000.0
unknowns['f_xy'] = params['x']
def linearize(self, params, unknowns, resids):
raise Exception()
top = Problem()
root = top.root = Group()
root.add('p1', IndepVarComp('x', 1.0, pass_by_obj=pass_by_obj, units='m'))
root.add('p', PassByObjParaboloid())
root.connect('p1.x', 'p.x')
top.setup()
top.run()
With pass_by_obj=True, the assert fails. top.setup() reports:
Unit Conversions
p1.x -> p.x : m -> mm
So I'd expect the unit conversion to be done.
OpenMDAO currently does not support automatic unit conversions for pass_by_obj variables. When designing OpenMDAO, we didn't intend for floating point data to be transferred using pass_by_obj. We only added pass_by_obj to handle other kinds of variables. We should fix the diagnostic output of setup so that it doesn't list unit conversions that don't actually happen. I'll put a story in for that.

Passing gradients between components; pass_by_obj output

I have a situation where the gradient of one component is by necessity calculated in another component. What I have attempted to do is just have the gradient be an output from the first component and an input to the second component. I have set it to be pass_by_obj so that it doesn't affect other calculations. Any recommendations on whether or not this would be the best way to do it would be appreciated. Nevertheless, I am getting an error when using check_partial_derivatives(). It seems to be an error for any output that is specified as pass_by_obj. Here is a simple case:
import numpy as np
from openmdao.api import Group, Problem, Component, ScipyGMRES, ExecComp, IndepVarComp
class Comp1(Component):
def __init__(self):
super(Comp1, self).__init__()
self.add_param('x', shape=1)
self.add_output('y', shape=1)
self.add_output('dz_dy', shape=1, pass_by_obj=True)
def solve_nonlinear(self, params, unknowns, resids):
x = params['x']
unknowns['y'] = 4.0*x + 1.0
unknowns['dz_dy'] = 2.0*x
def linearize(self, params, unknowns, resids):
J = {}
J['y', 'x'] = 4.0
return J
class Comp2(Component):
def __init__(self):
super(Comp2, self).__init__()
self.add_param('y', shape=1)
self.add_param('dz_dy', shape=1, pass_by_obj=True)
self.add_output('z', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
y = params['y']
unknowns['z'] = y*2.0
def linearize(self, params, unknowns, resids):
J = {}
J['z', 'y'] = params['dz_dy']
return J
class TestGroup(Group):
def __init__(self):
super(TestGroup, self).__init__()
self.add('x', IndepVarComp('x', 0.0), promotes=['*'])
self.add('c1', Comp1(), promotes=['*'])
self.add('c2', Comp2(), promotes=['*'])
p = Problem()
p.root = TestGroup()
p.setup(check=False)
p['x'] = 2.0
p.run()
print p['z']
print 'gradients'
test_grad = open('partial_gradients_test.txt', 'w')
partial = p.check_partial_derivatives(out_stream=test_grad)
I get the following error message:
partial = p.check_partial_derivatives(out_stream=test_grad)
File "/usr/local/lib/python2.7/site-packages/openmdao/core/problem.py", line 1699, in check_partial_derivatives
dresids._dat[u_name].val[idx] = 1.0
TypeError: '_ByObjWrapper' object does not support item assignment
I asked before about the params being checked for pass_by_obj in check_partial_derivatives() and it might be simply a matter of checking the unknowns for pass_by_obj as well.
the error you're getting is another bug related to check_partial_derivatives function. It should be easy enough to fix, but in the meantime you can just remove the pass_by_obj setting. Since you're computing a value in one component and passing it to another, there isn't a need to do pass_by_obj at all (and it will be more efficient if you don't).
You said that you did it so that it "doesn't affect other calculations", but I don't quite know what you mean by that. It won't affect anything unless you use it in the solve_nonlinear method.

Check Partial Derivatives with pass_by_obj

I have a component that has an input that is an int so I am setting pass_by_obj = True. However, when I check derivatives with check_partial_derivatives(), it throws this error:
data = prob.check_partial_derivatives(out_stream=sys.stdout)
File "/usr/local/lib/python2.7/site-packages/openmdao/core/problem.py", line 1711, in check_partial_derivatives
jac_rev[(u_name, p_name)][idx, :] = dinputs._dat[p_name].val
TypeError: float() argument must be a string or a number
It appears to be trying to take the derivative even though it cannot. Here is a simple example:
import sys
from openmdao.api import IndepVarComp, Problem, Group, Component
class Comp(Component):
def __init__(self):
super(Comp, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y', val=3, pass_by_obj=True)
self.add_output('z', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['z'] = params['y']*params['x']
def linearize(self, params, unknowns, resids):
J = {}
J['z', 'x'] = params['y']
return J
prob = Problem()
prob.root = Group()
prob.root.add('comp', Comp(), promotes=['*'])
prob.root.add('p1', IndepVarComp('x', 0.0), promotes=['x'])
prob.root.add('p2', IndepVarComp('y', 3, pass_by_obj=True), promotes=['y'])
prob.setup(check=False)
prob['x'] = 2.0
prob['y'] = 3
prob.run()
print prob['z']
data = prob.check_partial_derivatives(out_stream=sys.stdout)
It is possible to use the check_partial_derivatives() method with components that have inputs that are specified as pass_by_obj? I don't care about the derivatives for the inputs that are specified as pass_by_obj, but I care about the other inputs.
Thanks for the report and test. This was a bug where we weren't excluding the design variables that were declared pass_by_obj. I've got a pull request up on the OpenMDAO repo with a fix. It'll probably be merged to master within a day.
EDIT -- The fix is merged. https://github.com/OpenMDAO/OpenMDAO/commit/b123b284e46aac7e15fa9bce3751f9ad9bb63b95

Resources