External code incompatibility with metadata? - openmdao

If I use the same setup with metadata for an external code component and explicit component it seems to fail with the externalcode comp. The code below has both of the component. As is there is no error, if i comment out the explicit component part I get an error :
comp = GFWrapper(DVLO=np.array(DVLIST))
TypeError: init() got an unexpected keyword argument 'DVLO'
import numpy as np
import json as js
import re,sys,subprocess,os
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.api import ScipyOptimizeDriver
from openmdao.api import ExternalCode
class GFWrapper(ExternalCode):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
self.input_file = 'GFWrapper_input.dat'
self.output_file = 'GFWrapper_output.dat'
self.options['external_input_files'] = [self.input_file,]
self.options['external_output_files'] = [self.output_file,]
self.options['command'] = [
'python', 'run.py', self.input_file, self.output_file
]
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
# generate the input file for the external code
outF = open(self.input_file, "w")
for dv in DVLO:
outF.write(inputs[dv])
print(dv,inputs[dv])
outF.write("\n")
outF.close()
# the parent compute function actually runs the external code
super(GFWrapper, self).compute(inputs, outputs)
# parse the output file from the external code
file_contents=np.loadtxt(self.output_file)
outputs['OBJECTIVE'] = file_contents[0]
from openmdao.api import ExplicitComponent
#
class GFWrapper(ExplicitComponent):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
powetemp = 0
for dv in DVLO:
powetemp += inputs[dv]
outputs['OBJECTIVE'] = powetemp
#
DVLIST=['DV1','DV2']
DVMIN =[2,11]
DVMAX =[3,12]
InitDVVal=[3,5]
nr_of_desvar=len(DVLIST)
top = Problem()
top.model = model = Group()
" Introduce independent variables later will be design variables (optimization parameters) "
inputs_comp = IndepVarComp()
for i in range(nr_of_desvar):
inputs_comp.add_output(DVLIST[i],InitDVVal[i])
" Add components/subsystems to the Group-model- "
model.add_subsystem('inputs_comp', inputs_comp)
comp = GFWrapper(DVLO=np.array(DVLIST))
model.add_subsystem('GFWrapper', comp)

This is a bug in OpenMDAO version 2.2. The error occurs because we didn't include **kwargs in the init statement for ExternalCode. We intend to fix this for V2.3, but in the meantime, you can work around for your GFWrapper component by adding an init statement like this:
class GFWrapper(ExternalCode):
def __init__(self, **kwargs):
super(GFWrapper, self).__init__()
self.metadata.update(kwargs)

Related

OpenMDAO: How to resolve 'numpy.ndarray' object has no attribute 'log' error

I am new to OpenMDAO and am trying to solve an optimization problem. When I run the code, I receive the following error "'numpy.ndarray' object has no attribute 'log'" Cannot resolve the problem? Any suggestions?
I have reviewed the OpenMDAO documentation.
Error message: 'numpy.ndarray' object has no attribute 'log'
from __future__ import division, print_function
import openmdao.api as om
import numpy as np
class Objective (om.ExplicitComponent):
def setup(self):
self.add_input('mu1', val = 3.84)
self.add_input('mu2', val = 3.84)
self.add_output('f', val = 0.00022)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
mu1 = inputs['mu1']
mu2 = inputs['mu2']
outputs['f'] = np.log((mu1*(0.86))/(1.0-(mu1*0.14)))+np.log((mu2*(0.86))/(1.0-(mu2*0.14)))
# build the model
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())
indeps.add_output('mu1', 3.84)
indeps.add_output('mu2', 3.84)
prob.model.add_subsystem('obj', Objective())
prob.model.add_subsystem('cnst', om.ExecComp('g = 7924.8 - 2943.0*(np.log(mu1)) - 2943.0*(np.log(mu2))'))
prob.model.connect('indeps.mu1', 'obj.mu1')
prob.model.connect('indeps.mu2', 'obj.mu2')
prob.model.connect('indeps.mu1', 'cnst.mu1')
prob.model.connect('indeps.mu2', 'cnst.mu2')
# setup the optimization
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'COBYLA'
prob.model.add_design_var('indeps.mu1', lower=0.0, upper=5.0)
prob.model.add_design_var('indeps.mu2', lower=0.0, upper=5.0)
prob.model.add_objective('obj.f')
prob.model.add_constraint('cnst.g', lower=0.0, upper=0.0)
prob.setup()
prob.run_driver()
The problem is in the definition of your ExecComp. You have np.log, but the way the string parsing works for that you just wanted log.
Try this instead:
'g = 7924.8 - 2943.0*(log(mu1)) - 2943.0*(log(mu2))'
With that change I got:
Normal return from subroutine COBYLA
NFVALS = 46 F = 3.935882E+00 MAXCV = 2.892193E-10
X = 3.843492E+00 3.843491E+00
Optimization Complete

Splitting up connections between groups

I would like to know the best way to split up the connection command. I have two groups that I want to be modular, an inner group and an outer group. I want the inner group to be a kind of black box where I can switch out or change the inner group without changing all the connections for the outer group. I just want the outer group to have to know the inputs and outputs of the inner group. For an example:
import numpy as np
from openmdao.api import Group, Problem, Component, IndepVarComp, ExecComp
class C(Component):
def __init__(self, n):
super(C, self).__init__()
self.add_param('array_element', shape=1)
self.add_output('new_element', shape=1)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['new_element'] = params['array_element']*2.0
class MUX(Component):
def __init__(self, n):
super(MUX, self).__init__()
for i in range(n):
self.add_param('new_element' + str(i), shape=1)
self.add_output('new_array', shape=n)
self.n = n
def solve_nonlinear(self, params, unknowns, resids):
new_array = np.zeros(n)
for i in range(n):
new_array[i] = params['new_element' + str(i)]
unknowns['new_array'] = new_array
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
for i in range(n):
self.add('c'+str(i), C(n))
self.connect('array', 'c'+str(i) + '.array_element', src_indices=[i])
self.connect('c'+str(i)+'.new_element', 'new_element'+str(i))
self.add('mux', MUX(n), promotes=['*'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array'])
for i in range(n):
# self.connect('array', 'inner.c'+str(i) + '.array_element', src_indices=[i])
self.connect('array', 'inner.array', src_indices=[i])
n = 3
p = Problem()
p.root = GroupOuter(n)
p.setup(check=False)
p['array'] = np.ones(n)
p.run()
print p['new_array']
When I run the code I get the error that:
NameError: Source 'array' cannot be connected to target 'c0.array_element': 'array' does not exist.
To try to solve this I made 'array' an IndepVarComp in the GroupInner group. However, when I do this I get the error:
NameError: Source 'array' cannot be connected to target 'inner.array': Target must be a parameter but 'inner.array' is an unknown.
I know that if I just make the full connection: self.connect('array', 'inner.c'+str(i) + '.array_element', src_indices=[i]) then it will work. But like I said I want GroupInner to be kind of a black box where I don't know what groups or components are in it. I also can't just promote all because the array_elements are different. Is it possible to do this or do you have to do the entire connection in one command?
I have two answers to your question. First I'll get the problem working as you specified it. Second, I'll suggest a modification that I think might be more efficient for some applications of this model structure.
First, the main issue with the problem as you specified it was the following line
self.connect('array', 'c'+str(i) + '.array_element', src_indices=[i])
There simply isn't a output or state named array anywhere inside the Inner group, so that connection isn't going to work. You did create a variable called 'array' in the Outer group, but you can't issue a connection to it from inside the Inner definition because its not available in that scope. To make it work the way you've specified, the simplest way would be to do the following:
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
for i in range(n):
self.add('c'+str(i), C(n))
self.connect('c%d.new_element'%i, 'new_element'+str(i))
self.add('mux', MUX(n), promotes=['*'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array'])
for i in range(n):
self.connect('array', 'inner.c%d.array_element'%i, src_indices=[i])
Here is an alternate approach that will reduce the number of variables and components in your model, which will help reduce setup times if n grows large is to use an actual distributed component, and partition the array using the MPI comm. This has some nice properties, besides the setup cost savings, because it will also allow you to scale your calculations more flexibility and improves efficiency when you run in serial. This solution works well if your model would have really had multiple c instances that were all doing the same thing and the process can be simply vectorized via numpy.
import numpy as np
from openmdao.api import Group, Problem, Component, IndepVarComp
from openmdao.util.array_util import evenly_distrib_idxs
from openmdao.core.mpi_wrap import MPI
if MPI:
# if you called this script with 'mpirun', then use the petsc data passing
from openmdao.core.petsc_impl import PetscImpl as impl
else:
# if you didn't use `mpirun`, then use the numpy data passing
from openmdao.api import BasicImpl as impl
class C(Component):
def __init__(self, n):
super(C, self).__init__()
self.add_param('in_array', shape=n)
self.add_output('new_array', shape=n)
self.n = n
def get_req_procs(self):
"""
min/max number of procs that this component can use
"""
return (1,self.n)
#NOTE: This needs to be setup_distrib_idx for <= version 1.5.0
def setup_distrib(self):
comm = self.comm
rank = comm.rank
# NOTE: evenly_distrib_idxs is a helper function to split the array
# up as evenly as possible
sizes, offsets = evenly_distrib_idxs(comm.size, self.n)
local_size, local_offset = sizes[rank], offsets[rank]
self.local_size = int(local_size)
start = local_offset
end = local_offset + local_size
self.set_var_indices('in_array', val=np.zeros(local_size, float),
src_indices=np.arange(start, end, dtype=int))
self.set_var_indices('new_array', val=np.zeros(local_size, float),
src_indices=np.arange(start, end, dtype=int))
def solve_nonlinear(self, params, unknowns, resids):
unknowns['new_array'] = params['in_array']*2.0
print "computing new_array: ", unknowns['new_array']
class GroupInner(Group):
def __init__(self, n):
super(GroupInner, self).__init__()
self.add('c', C(n), promotes=['new_array', 'in_array'])
class GroupOuter(Group):
def __init__(self, n):
super(GroupOuter, self).__init__()
self.add('array', IndepVarComp('array', np.zeros(n)), promotes=['*'])
self.add('inner', GroupInner(n), promotes=['new_array',])
self.connect('array', 'inner.in_array')
n = 3
p = Problem(impl=impl)
p.root = GroupOuter(n)
p.setup(check=False)
p['array'] = np.ones(n)
p.run()
print p['new_array']

openmdao v1.4 optimization with metamodel

I which to perform an optimization with openmdao 1.4 on a metamodel. Using the tutorials I have build u p problem that i do not mange to solve: I think the problem is coming from a misuse of setup() and run() : I do not manage to train my metamodel and to optimize on it at the same time (perhpas I should use two differentes "groups" to do this ..)
Here is my code :
from __future__ import print_function
from openmdao.api import Component, Group, MetaModel ,IndepVarComp, ExecComp, NLGaussSeidel, KrigingSurrogate, FloatKrigingSurrogate
import numpy as np
class KrigMM(Group):
''' FloatKriging gives responses as floats '''
def __init__(self):
super(KrigMM, self).__init__()
# Create meta_model for f_x as the response
pmm = self.add("pmm", MetaModel())
pmm.add_param('x', val=0.)
pmm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate())
self.add('p1', IndepVarComp('x', 0.0))
self.connect('p1.x','pmm.x')
# mm.add_output('f_xy:norm_dist', val=(0.,0.), surrogate=KrigingSurrogate())
if __name__ == '__main__':
# Setup and run the model.
from openmdao.core.problem import Problem
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
from openmdao.core.driver import Driver
import numpy as np
import doe_lhs
#prob = Problem(root=ParaboloidProblem())
###########################################################
prob = Problem(root=Group())
prob.root.add('meta',KrigMM(), promotes=['*'])
prob.driver = ScipyOptimizer()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.add_desvar('p1.x', lower=0, upper=10)
prob.driver.add_objective('pmm.f_x:float')
prob.setup()
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
print('\n')
print('Minimum of %f found for meta at %f' % (prob['pmm.f_x:float'],prob['pmm.x'])) #predicted value
I believe your problem is actually working fine. Its just that the sinusiod you've picked has an local optimum at 0.0, which happens to be your initial condition.
If I change the initial condition as follows:
prob.setup()
prob['p1.x'] = 5
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
I get:
Optimization terminated successfully. (Exit mode 0)
Current function value: [-1.00004544]
Iterations: 3
Function evaluations: 3
Gradient evaluations: 3
Optimization Complete
-----------------------------------
Minimum of -1.000045 found for meta at 4.710483

Check Partial Derivatives with pass_by_obj

I have a component that has an input that is an int so I am setting pass_by_obj = True. However, when I check derivatives with check_partial_derivatives(), it throws this error:
data = prob.check_partial_derivatives(out_stream=sys.stdout)
File "/usr/local/lib/python2.7/site-packages/openmdao/core/problem.py", line 1711, in check_partial_derivatives
jac_rev[(u_name, p_name)][idx, :] = dinputs._dat[p_name].val
TypeError: float() argument must be a string or a number
It appears to be trying to take the derivative even though it cannot. Here is a simple example:
import sys
from openmdao.api import IndepVarComp, Problem, Group, Component
class Comp(Component):
def __init__(self):
super(Comp, self).__init__()
self.add_param('x', val=0.0)
self.add_param('y', val=3, pass_by_obj=True)
self.add_output('z', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
unknowns['z'] = params['y']*params['x']
def linearize(self, params, unknowns, resids):
J = {}
J['z', 'x'] = params['y']
return J
prob = Problem()
prob.root = Group()
prob.root.add('comp', Comp(), promotes=['*'])
prob.root.add('p1', IndepVarComp('x', 0.0), promotes=['x'])
prob.root.add('p2', IndepVarComp('y', 3, pass_by_obj=True), promotes=['y'])
prob.setup(check=False)
prob['x'] = 2.0
prob['y'] = 3
prob.run()
print prob['z']
data = prob.check_partial_derivatives(out_stream=sys.stdout)
It is possible to use the check_partial_derivatives() method with components that have inputs that are specified as pass_by_obj? I don't care about the derivatives for the inputs that are specified as pass_by_obj, but I care about the other inputs.
Thanks for the report and test. This was a bug where we weren't excluding the design variables that were declared pass_by_obj. I've got a pull request up on the OpenMDAO repo with a fix. It'll probably be merged to master within a day.
EDIT -- The fix is merged. https://github.com/OpenMDAO/OpenMDAO/commit/b123b284e46aac7e15fa9bce3751f9ad9bb63b95

Connecting an element of array to another Component's input

I am trying to connect the last element of an output of one component to the input of another component. An example is shown below:
import numpy as np
from openmdao.api import Component, Problem, Group
class C1(Component):
def __init__(self):
super(C1, self).__init__()
self.add_param('fin', val=1.0)
self.add_output('arr', val=np.zeros(5))
def solve_nonlinear(self, params, unknowns, resids):
fin = params['fin']
unknowns['arr'] = np.array([2*fin])
class C2(Component):
def __init__(self):
super(C2, self).__init__()
self.add_param('flt', val=0.0)
self.add_output('fout', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
flt = params['flt']
unknowns['fout'] = 2*flt
class A(Group):
def __init__(self):
super(A, self).__init__()
self.add('c1', C1())
self.add('c2', C2())
self.connect('c1.arr[-1]', 'c2.flt')
if __name__ == '__main__':
a = Problem()
a.root = A()
a.setup()
a.run()
print a.root.c2.unknowns['fout']
I am given the error:
openmdao.core.checks.ConnectError: Source 'c1.arr[-1]' cannot be connected to target 'c2.flt': 'c1.arr[-1]' does not exist.
Is there a way to do this? I know it worked in the old version of OpenMDAO.
OpenMDAO supports connection to specific indices of a source by using the 'src_indices' arg. For example:
self.connect('c1.arr', 'c2.flt', src_indices=[4])
Negative indices are not currently supported.
There are a number of small issues here. First the solve_nonlinear method of C1 has the wrong size for its array computation. It ends up working out, but you should really set the array to the right size (length 5).
To part of an array (see docs and more advanced docs) , you specify src_indices argument to connect.
import numpy as np
from openmdao.api import Component, Problem, Group
class C1(Component):
def __init__(self):
super(C1, self).__init__()
self.add_param('fin', val=1.0)
self.add_output('arr', val=np.zeros(5))
def solve_nonlinear(self, params, unknowns, resids):
fin = params['fin']
unknowns['arr'] = fin*np.arange(5)
class C2(Component):
def __init__(self):
super(C2, self).__init__()
self.add_param('flt', val=0.0)
self.add_output('fout', val=0.0)
def solve_nonlinear(self, params, unknowns, resids):
flt = params['flt']
unknowns['fout'] = 2*flt
class A(Group):
def __init__(self):
super(A, self).__init__()
self.add('c1', C1())
self.add('c2', C2())
self.connect('c1.arr', 'c2.flt', src_indices=[4,])
if __name__ == '__main__':
a = Problem()
a.root = A()
a.setup()
a.run()
print a.root.c2.unknowns['fout']

Resources