OpenMDAO doesnt want to move away from the defaults - openmdao

I have some geometry that I've parameterised upstream into a mesh pre-processor which produces inputs for my external code which runs. The outputs of this is a geometry defined by its inputs, calls my external solver and then the outputs are ingested from CSV file. I've gotten it to run, but in terms of optimisation certainly hasn't minimised the value of the function. In fact it's tried values all purturbed slightly away from the starting point.
I have 5 design variables which impact the geometry and are set to go in various ranges, lets call them a,b,c,d and e. The nominal starting value of each of these variables is 0.1. Its clear from the history of the files which I saved, e.g. it has tried
a | b | c | d | e
------------------
0.100001 0.1 0.1 0.1 0.1
0.1 0.100001 0.1 0.1 0.1
0.1 0.1 0.100001 0.1 0.1
0.1 0.1 0.1 0.100001 0.1
0.1 0.1 0.1 0.1 0.100001
0.1 0.1 0.1 0.1 0.1
It somehow, then decides the original starting values were optimal, which is super weird since we can view the returned value (fout) in my case, for each one in turn. I then realised that I needed to scale my return value to something much closer to 1, I've done that I get a few more comparisons, now I get the following
Inequality constraints incompatible (Exit mode 4)
Current function value: [0.90320516]
Iterations: 3
Function evaluations: 2
Gradient evaluations: 2
Optimization FAILED.
Inequality constraints incompatible
-----------------------------------
Optimal pfc_thickness: [0.1]
Optimal conductor radius: [0.10000001]
Optimal conductor thickness: [0.1]
Optimal cut height: [0.1]
Optimal cut width: [0.3745392]
Objective value: [0.90320516]
Any kind souls out there that have tried something similar and can provide feedback/tips?
My complete code is shown below.
from openmdao.api import Problem, Group, ExternalCodeComp, IndepVarComp, ScipyOptimizeDriver
import sys
import os
from make_geometry import *
file = open("data.txt", "w")
class ExternalCode(ExternalCodeComp):
def setup(self):
self.iteration = 0
self.add_input('pfc_thickness', val = 0.0)
self.add_input('conductor_radius', val = 0.0)
self.add_input('conductor_thickness', val = 0.0)
self.add_input('conductor_origin_x', val = 0.0)
self.add_input('conductor_origin_y', val = 0.0)
self.add_input('cut_height', val = 0.0)
self.add_input('cut_width', val = 0.0)
self.add_output('fout', val=0.0)
problem_name = "mdao"
self.geometry_instance = build_geometry(problem_name)
self.input_file = 'input-thermal.i'
self.output_file = 'input-thermal_csv.csv'
self.options['external_input_files'] = [self.input_file]
self.options['external_output_files'] = [self.output_file]
#os.environ['LD_LIBRARY_PATH'] = '/usr/lib64/mpich/lib:/usr/lib64'
self.options['command'] = ['/home/adavis/opt/moose/modules/combined/combined-opt','-i','input-thermal.i']
def setup_partials(self):
# this external code does not provide derivatives, use finite difference
self.declare_partials(of='*', wrt='*', method='fd')
def __read_csv(self,filename):
import csv
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
data = row
return data[1] # this is the maxtemp
def compute(self, inputs, outputs):
self.iteration = self.iteration + 1
# set the variables
print(inputs)
pfc_thickness = inputs['pfc_thickness'][0]
conductor_radius = inputs['conductor_radius'][0]
conductor_thickness = inputs['conductor_thickness'][0]
#conductor_origin_x = inputs['conductor_origin_x']
#conductor_origin_y = inputs['conductor_origin_y']
cut_height = inputs['cut_height'][0]
cut_width = inputs['cut_width'][0]
# set the geometry instance
self.geometry_instance.pfc_thickness = pfc_thickness
self.geometry_instance.conductor_radius = conductor_radius
self.geometry_instance.conductor_thickness = conductor_thickness
self.geometry_instance.conductor_origin_x = 5.0
self.geometry_instance.conductor_origin_y = 5.0
self.geometry_instance.cut_height = cut_height
self.geometry_instance.cut_width = cut_width
if os.path.isfile('mdao.e'):
os.remove('mdao.e')
if os.path.isfile('mdao.e'):
os.remove('input-thermal_csv.csv')
# build the file
self.geometry_instance.build_all()
# test to make sure mesh exists
if not os.path.isfile('mdao.e'):
print('meshing failed')
sys.exit(1)
filename = '%s_%s_%s_%s_%s.cub' % (pfc_thickness,conductor_radius,conductor_thickness,cut_height,cut_width)
os.system('cp mdao.cub %s' % (filename))
# the parent compute function actually runs the external code
super().compute(inputs, outputs)
# get the output
f_xy = self.__read_csv(self.output_file)
file.write('%s %s\n' % (self.iteration, f_xy))
# set the output data in f_out
outputs['fout'] = f_xy
if __name__ == "__main__":
prob = Problem()
# Create and connect inputs
prob.model.add_subsystem('p', ExternalCode())
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.model.add_design_var('p.pfc_thickness', lower=0.1, upper = 2)
prob.model.add_design_var('p.conductor_radius', lower=0.1, upper = 3.5)
prob.model.add_design_var('p.conductor_thickness', lower=0.1, upper = 1.0)
prob.model.add_design_var('p.cut_height', lower=0.1, upper = 1.5)
prob.model.add_design_var('p.cut_width', lower=0.1, upper = 3.0)
prob.model.add_objective('p.fout')
prob.driver.options['tol'] = 1e-9
prob.driver.options['disp'] = True
prob.model.approx_totals()
# run the ExternalCodeComp Component
prob.setup()
prob.set_solver_print(level=2)
# Set input values
prob.set_val('p.pfc_thickness', 0.1)
prob.set_val('p.conductor_radius', 0.1)
prob.set_val('p.conductor_thickness', 0.1)
prob.set_val('p.cut_height', 0.1)
prob.set_val('p.cut_width', 0.1)
prob.run_driver()
print('Optimal pfc_thickness: ', prob.get_val('p.pfc_thickness'))
print('Optimal conductor radius: ', prob.get_val('p.conductor_radius'))
print('Optimal conductor thickness: ', prob.get_val('p.conductor_thickness'))
print('Optimal cut height: ', prob.get_val('p.cut_height'))
print('Optimal cut width: ', prob.get_val('p.cut_width'))
print('Objective value: ', prob.get_val('p.fout'))
file.close()

Is your output file ('input-thermal_csv.csv') being properly regenerated on each execution? I ask because it looks like you intend to delete it in your compute, but the condition seems incorrect so it would never get deleted:
if os.path.isfile('mdao.e'):
os.remove('mdao.e')
if os.path.isfile('mdao.e'):
os.remove('input-thermal_csv.csv')

When you use finite difference, you need to be very careful about step sizes and methods. You can consider switching to relative instead of absolute step size, or using a different step size (default in OpenMDAO V3.9 is 1e-6, absolute). If you are using file i/o then be sure you are writing output with sufficient number of digits as well. If you are truncating the output to say 8 or 9 digits, then you might be getting effectively 0 change for very small steps (because the change is down in the digits you are not printing to the file).
You can have a look at the options in the declare_partials and check_partials methods to see how to adjust these settings.

Related

Optimizing Distributed I/O with serial output

I am having trouble understanding how to optimize a distributed component with a serial output. This is my attempt with an example problem given in the openmdao docs.
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib2(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Serial Input
self.add_input('in_serial', val=1)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
# Serial Output
self.add_output('out_serial', copy_shape='in_serial')
#self.declare_partials('*','*', method='cs')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_serial']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# These operations are repeated on all procs.
f_y = y ** 0.5
g_y = y**2 + 3.0*y - 5.0
# Compute square root of our portion of the distributed input.
g_x = x ** 0.5
# Distributed output
outputs['out_dist'] = f_x + f_y
# Serial output
if MPI and comm.size > 1:
# We need to gather the summed values to compute the total sum over all procs.
local_sum = np.array(np.sum(g_x))
total_sum = local_sum.copy()
self.comm.Allreduce(local_sum, total_sum, op=MPI.SUM)
outputs['out_serial'] = g_y * total_sum
else:
# Recommended to make sure your code can run in serial too, for testing.
outputs['out_serial'] = g_y * np.sum(g_x)
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running in serial, the entire variable is on rank 0.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_serial', val=1)
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib2())
model.add_subsystem('con_cmp1', om.ExecComp('con1 = y**2'), promotes=['con1', 'y'])
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_serial', ['D1.in_serial','y'])
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
model.add_design_var('indep.x_serial', lower=5, upper=10)
model.add_constraint('con1', upper=90)
model.add_objective('D1.out_serial')
prob.setup(force_alloc_complex=True)
#prob.setup()
# Set initial values of distributed variable.
x_dist_init = [1,1,1,1,1,1,1]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of serial variable.
prob.set_val('indep.x_serial', 10)
#prob.run_model()
prob.run_driver()
print('x_dist', prob.get_val('indep.x_dist', get_remote=True))
print('x_serial', prob.get_val('indep.x_serial'))
print('Obj', prob.get_val('D1.out_serial'))
The problem is with defining partials with 'fd' or 'cs'. I cannot define partials of serial output w.r.t distributed input. So I used prob.setup(force_alloc_complex=True) to use complex step. But gives me this warning DerivativesWarning:Constraints or objectives [('D1.out_serial', inds=[0])] cannot be impacted by the design variables of the problem. I understand this is because the total derivative is 0 which causes the warning but I dont understand the reason. Clearly the total derivative should not be 0 here. But I guess this is because I didn't explicitly declare_partials in the component. I tried removing the distributed components and ran it again with declare_partials and this works correctly(code below).
import numpy as np
import openmdao.api as om
class MixedDistrib2(om.ExplicitComponent):
def setup(self):
self.add_input('in_dist', np.zeros(7))
self.add_input('in_serial', val=1)
self.add_output('out_serial', val=0)
self.declare_partials('*','*', method='cs')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_serial']
g_y = y**2 + 3.0*y - 5.0
g_x = x ** 0.5
outputs['out_serial'] = g_y * np.sum(g_x)
prob = om.Problem()
model = prob.model
model.add_subsystem("D1", MixedDistrib2(), promotes_inputs=['in_dist', 'in_serial'], promotes_outputs=['out_serial'])
model.add_subsystem('con_cmp1', om.ExecComp('con1 = in_serial**2'), promotes=['con1', 'in_serial'])
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
model.add_design_var('in_serial', lower=5, upper=10)
model.add_constraint('con1', upper=90)
model.add_objective('out_serial')
prob.setup(force_alloc_complex=True)
prob.set_val('in_dist', [1,1,1,1,1,1,1])
prob.set_val('in_serial', 10)
prob.run_model()
prob.check_totals()
prob.run_driver()
print('x_dist', prob.get_val('in_dist', get_remote=True))
print('x_serial', prob.get_val('in_serial'))
print('Obj', prob.get_val('out_serial'))
What I am trying to understand is
How to use 'fd' or 'cs' in Distributed component with a serial output?
What is the meaning of prob.setup(force_alloc_complex=True) ? Is not forcing to use cs in all the components in the problem ? If so why does the total derivative becomes 0?
When I run your code in OpenMDAO V 3.11.0 (after uncommenting the declare_partials call) I get the following error:
RuntimeError: 'D1' <class MixedDistrib2>: component has defined partial ('out_serial', 'in_dist') which is a serial output wrt a distributed input. This is only supported using the matrix free API.
As the error indicates, you can't use the matrix-based api for derivatives in this situations. The reasons why are a bit subtle, and probably outside the scope of what needs to be delt with to answer your question here. It boils down to OpenMDAO not knowing why kind of distributed operations are being done in the compute and having no way to manage those details when you propagate things in reverse.
So you need to use the matrix-free derivative APIs in this situation. When you use the matrix-free APIs you DO NOT declare any partials, because you don't want OpenMDAO to allocate any memory for you to store partials in (and you wouldn't use that memory even if it did).
I've coded them for your example here, but I need to note a few important details:
Your example has a distributed IVC, but as of OpenMDAO V3.11.0 you can't get total derivatives with respect to distributed design variables. I assume you just made it that way to make your simple test case, but in case your real problem was set up this way, you need to note this and not do it this way. Instead, make the IVC serial, and use src indices to distribute the correct parts to each proc.
In the example below, the derivatives are correct. However, there seems to be a bug in the check_partials output when running in paralle. So the reverse mode partials look like they are off by a factor of the comm size... this will have to get fixed in later releases.
I only did the derivatives for out_serial. out_dist will work similarly and is left as an excersize for the reader :)
You'll notice that I duplicates some code in the compute and compute_jacvec_product methods. You can abstract this duplicate code out into its own method (or call compute from within compute_jacvec_product by providing your own output dictionary). However, you might be asking why the duplicate call is needed at all? Why can't u store the values from the compute call. The answer is, in large part, that OpenMDAO does not guarantee that compute is always called before compute_jacvec_product. However, I'll also point out that this kind of code duplication is very AD-like. Any AD code will have the same kind of duplication built in, even though you don't see it.
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib2(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Serial Input
self.add_input('in_serial', val=1)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
# Serial Output
self.add_output('out_serial', copy_shape='in_serial')
# self.declare_partials('*','*', method='fd')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_serial']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# These operations are repeated on all procs.
f_y = y ** 0.5
g_y = y**2 + 3.0*y - 5.0
# Compute square root of our portion of the distributed input.
g_x = x ** 0.5
# Distributed output
outputs['out_dist'] = f_x + f_y
# Serial output
if MPI and comm.size > 1:
# We need to gather the summed values to compute the total sum over all procs.
local_sum = np.array(np.sum(g_x))
total_sum = local_sum.copy()
self.comm.Allreduce(local_sum, total_sum, op=MPI.SUM)
outputs['out_serial'] = g_y * total_sum
else:
# Recommended to make sure your code can run in serial too, for testing.
outputs['out_serial'] = g_y * np.sum(g_x)
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
x = inputs['in_dist']
y = inputs['in_serial']
g_y = y**2 + 3.0*y - 5.0
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# These operations are repeated on all procs.
f_y = y ** 0.5
g_y = y**2 + 3.0*y - 5.0
# Compute square root of our portion of the distributed input.
g_x = x ** 0.5
# Distributed output
out_dist = f_x + f_y
# Serial output
if MPI and comm.size > 1:
# We need to gather the summed values to compute the total sum over all procs.
local_sum = np.array(np.sum(g_x))
total_sum = local_sum.copy()
self.comm.Allreduce(local_sum, total_sum, op=MPI.SUM)
# total_sum
else:
# Recommended to make sure your code can run in serial too, for testing.
total_sum = np.sum(g_x)
num_x = len(x)
d_f_x__d_x = np.diag(2*x - 2.)
d_f_y__d_y = np.ones(num_x)*0.5*y**-0.5
d_g_y__d_y = 2*y + 3.
d_g_x__d_x = 0.5*x**-0.5
d_out_dist__d_x = d_f_x__d_x # square matrix
d_out_dist__d_y = d_f_y__d_y # num_x,1
d_out_serial__d_y = d_g_y__d_y # scalar
d_out_serial__d_x = g_y*d_g_x__d_x.reshape((1,num_x))
if mode == 'fwd':
if 'out_serial' in d_outputs:
if 'in_dist' in d_inputs:
d_outputs['out_serial'] += d_out_serial__d_x.dot(d_inputs['in_dist'])
if 'in_serial' in d_inputs:
d_outputs['out_serial'] += d_out_serial__d_y.dot(d_inputs['in_serial'])
elif mode == 'rev':
if 'out_serial' in d_outputs:
if 'in_dist' in d_inputs:
d_inputs['in_dist'] += d_out_serial__d_x.T.dot(d_outputs['out_serial'])
if 'in_serial' in d_inputs:
d_inputs['in_serial'] += total_sum*d_out_serial__d_y.T.dot(d_outputs['out_serial'])
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running in serial, the entire variable is on rank 0.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_serial', val=1)
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib2())
model.add_subsystem('con_cmp1', om.ExecComp('con1 = y**2'), promotes=['con1', 'y'])
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_serial', ['D1.in_serial','y'])
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
model.add_design_var('indep.x_serial', lower=5, upper=10)
model.add_constraint('con1', upper=90)
model.add_objective('D1.out_serial')
prob.setup(force_alloc_complex=True)
#prob.setup()
# Set initial values of distributed variable.
x_dist_init = np.ones(sizes[rank])
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of serial variable.
prob.set_val('indep.x_serial', 10)
prob.run_model()
prob.check_partials()
# prob.run_driver()
print('x_dist', prob.get_val('indep.x_dist', get_remote=True))
print('x_serial', prob.get_val('indep.x_serial'))
print('Obj', prob.get_val('D1.out_serial'))

openMDAO: Optimization terminates successfully after 1 iteration, not at optimal point

I am trying to make a toy problem to learn a bit about the OpenMDAO software before applying the lessons to a larger problem. I have a problem set up so that the objective function should be minimized when both design variables are at a minimum. However both values stay at their originally assigned values despite receiving an 'Optimization terminated successfully' message.
I have been starting by writing the code based on the Sellar problem examples. ( http://openmdao.org/twodocs/versions/latest/basic_guide/sellar.html ) Additionally I have come across a stack overflow question that seems to be the same problem, but the solution there doesn't work. ( OpenMDAO: Solver converging to non-optimal point ) (When I add the declare_partials line to the IntermediateCycle or ScriptForTest I recieve an error saying either that self is not defined, or that the object has no attribute declare_partials)
This is the script that runs everything
import openmdao.api as om
from IntermediateForTest import IntermediateCycle
prob = om.Problem()
prob.model = IntermediateCycle()
prob.driver = om.ScipyOptimizeDriver()
#prob.driver.options['optimizer'] = 'SLSQP'
#prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('n_gear', lower=2, upper=6)
prob.model.add_design_var('stroke', lower=0.0254, upper=1)
prob.model.add_objective('objective')
prob.setup()
prob.model.approx_totals()
prob.run_driver()
print(prob['objective'])
print(prob['cycle.f1.total_weight'])
print(prob['cycle.f1.stroke'])
print(prob['cycle.f1.n_gear'])
It calls an intermediate group, as per the Sellar example
import openmdao.api as om
from FunctionsForTest import FunctionForTest1
from FunctionsForTest import FunctionForTest2
class IntermediateCycle(om.Group):
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('n_gear', 3.0)
indeps.add_output('stroke', 0.2)
indeps.add_output('total_weight', 26000.0)
cycle = self.add_subsystem('cycle', om.Group())
cycle.add_subsystem('f1', FunctionForTest1())
cycle.add_subsystem('f2', FunctionForTest2())
cycle.connect('f1.landing_gear_weight','f2.landing_gear_weight')
cycle.connect('f2.total_weight','f1.total_weight')
self.connect('n_gear','cycle.f1.n_gear')
self.connect('stroke','cycle.f1.stroke')
#cycle.nonlinear_solver = om.NonlinearBlockGS()
self.nonlinear_solver = om.NonlinearBlockGS()
self.add_subsystem('objective', om.ExecComp('objective = total_weight', objective=26000, total_weight=26000), promotes=['objective', 'total_weight'])
Finally there is a file with the two functions in it:
import openmdao.api as om
class FunctionForTest1(om.ExplicitComponent):
def setup(self):
self.add_input('stroke', val=0.2)
self.add_input('n_gear', val=3.0)
self.add_input('total_weight', val=26000)
self.add_output('landing_gear_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
stroke = inputs['stroke']
n_gear = inputs['n_gear']
total_weight = inputs['total_weight']
outputs['landing_gear_weight'] = total_weight * 0.1 + 100*stroke * n_gear ** 2
class FunctionForTest2(om.ExplicitComponent):
def setup(self):
self.add_input('landing_gear_weight')
self.add_output('total_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
landing_gear_weight = inputs['landing_gear_weight']
outputs['total_weight'] = 26000 + landing_gear_weight
It reports optimization terminated successfully,
Optimization terminated successfully. (Exit mode 0)
Current function value: 26000.0
Iterations: 1
Function evaluations: 1
Gradient evaluations: 1
Optimization Complete
-----------------------------------
[26000.]
[29088.88888889]
[0.2]
[3.]
however the value for the function to optimize hasn't changed. It seems as it converges the loop to estimate the weight, but doesn't vary the design variables to find the optimum.
It arrives at 29088.9, which is correct for a value of n_gear=3 and stroke=0.2, but if both are decreased to the bounds of n_gear=2 and stroke=0.0254, it would arrive at a value of ~28900, ~188 less.
Any advice, links to tutorials, or solutions would be appreciated.
Lets take a look at the n2 of the model, as you provided it:
I've highlighted the connection from indeps.total_weight to objective.total_weight. So this means that your computed total_weight value is not being passed to your objective output at all. Instead you have a constant value being set there.
Now, taking a small step back, lets look at the computation of the objective itself:
self.add_subsystem('objective', om.ExecComp('objective = total_weight', objective=26000, total_weight=26000), promotes=['objective', 'total_weight'])
So this is an odd use of the ExecComp, because it just sets the output to exactly the input. It does nothing, and isn't really needed at all.
I believe what you wanted was simply to make the objective be the output f2.total_weight. When I do that (and make a few additional small cleanups to your code, like removing the unnecessary ExecComp, then I do get the correct answer in 2 major iterations of the optimizer:
import openmdao.api as om
class FunctionForTest1(om.ExplicitComponent):
def setup(self):
self.add_input('stroke', val=0.2)
self.add_input('n_gear', val=3.0)
self.add_input('total_weight', val=26000)
self.add_output('landing_gear_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
stroke = inputs['stroke']
n_gear = inputs['n_gear']
total_weight = inputs['total_weight']
outputs['landing_gear_weight'] = total_weight * 0.1 + 100*stroke * n_gear ** 2
class FunctionForTest2(om.ExplicitComponent):
def setup(self):
self.add_input('landing_gear_weight')
self.add_output('total_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
landing_gear_weight = inputs['landing_gear_weight']
outputs['total_weight'] = 26000 + landing_gear_weight
class IntermediateCycle(om.Group):
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('n_gear', 3.0)
indeps.add_output('stroke', 0.2)
cycle = self.add_subsystem('cycle', om.Group())
cycle.add_subsystem('f1', FunctionForTest1())
cycle.add_subsystem('f2', FunctionForTest2())
cycle.connect('f1.landing_gear_weight','f2.landing_gear_weight')
cycle.connect('f2.total_weight','f1.total_weight')
self.connect('n_gear','cycle.f1.n_gear')
self.connect('stroke','cycle.f1.stroke')
#cycle.nonlinear_solver = om.NonlinearBlockGS()
self.nonlinear_solver = om.NonlinearBlockGS()
prob = om.Problem()
prob.model = IntermediateCycle()
prob.driver = om.ScipyOptimizeDriver()
#prob.driver.options['optimizer'] = 'SLSQP'
#prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('n_gear', lower=2, upper=6)
prob.model.add_design_var('stroke', lower=0.0254, upper=1)
prob.model.add_objective('cycle.f2.total_weight')
prob.model.approx_totals()
prob.setup()
prob.model.nl_solver.options['iprint'] = 2
prob.run_driver()
print(prob['cycle.f1.total_weight'])
print(prob['cycle.f2.total_weight'])
print(prob['cycle.f1.stroke'])
print(prob['cycle.f1.n_gear'])
gives:
Optimization terminated successfully. (Exit mode 0)
Current function value: 28900.177777779667
Iterations: 2
Function evaluations: 2
Gradient evaluations: 2
Optimization Complete
-----------------------------------
[28900.1777778]
[28900.17777778]
[0.0254]
[2.]

Pitch Calculation Error via Autocorrelation Method

Aim : Pitch Calculation
Issue : The calculated pitch does not match the expected one. For instance, the output is approx. 'D3', however the expected output is 'C5'.
Source Sound : https://freewavesamples.com/1980s-casio-celesta-c5
Source Code
library("tuneR")
library("seewave")
#0: Acquisition of sample sound
snd_smpl = readWave(paste("~/Music/sample/1980s-Casio-Celesta-C5.wav"),
from = 0, to = 1, units = "seconds")
dur_smpl = duration(snd_smpl)
len_smpl = length(snd_smpl)
#1 : Pre-Processing Stage
#1.1 : Application of Hanning Window
n = 1:len_smpl
han_win = 0.5-0.5*cos(2*pi*n/(len_smpl-1))
wind_sig = han_win*snd_smpl#left
#2.1 : Auto-Correlation Calculation
rev_wind_sig = rev(wind_sig) #Reversing the windowed signal
acorr_1 = convolve(wind_sig, rev_wind_sig, type = "open")
# Obtaining the 2nd half of the correlation, to simplify calculation
n = 2*len_smpl-1
acorr_2 = (1/len_smpl)*acorr_1[len_smpl:n]
#2.2 : Note Calculation
min_index = which.min(acorr_2)
print(min_index)
fs = 44100
fo = fs/min_index #To obtain fundamental frequency
print(fo)
print(notenames(noteFromFF(fo)))
Output
> print(min_index)
[1] 37
> fs = 44100
> fo = fs/min_index
> print(fo)
[1] 1191.892
> print(notenames(noteFromFF(fo)))
[1] "d'''"
The entire calculation is performed in the Time Domain.
I'm currently using autocorrelation as a base to understand more about Pitch Detection & Analysis. I've tried to analyse the sample with 'Audacity' and the result is 'C5'. Hence, I'm wondering where actually the issue is.
Can you all help me find it?
Also, there are a few but important doubts:
How small should actually my analysis window be (20ms, 1s,..)?
Will reinforcement of the Autocorrelation Algorithm with AMDF and other similar algorithms make this Pitch Detection module more robust?
This whole analysis seems not correct. You should not use windowing in time domain analysis.
Attached a short solution in the python language; you can use it as pseudocode
from soundfile import read
from glob import glob
from scipy.signal import correlate, find_peaks
from matplotlib.pyplot import plot, show, xlim, title, xlabel
import numpy as np
%matplotlib inline
name = glob('*wav')[0]
samples, fs = read(name)
corr = correlate(samples, samples)
corr = corr[corr.size / 2:]
time = np.arange(corr.size) / float(fs)
ind = find_peaks(corr[time < 0.002])[0]
plot(time, corr)
plot(time[ind], corr[ind], '*')
xlim([0, 0.005])
title('Frequency = {} Hz'.format(1 / time[ind][0]))
xlabel('Time [Sec]')
show()

MetaModelUnstructured Computational Time

I am using sample 2D functions for optimization with MetaModelUnStructuredComp.
Below is a code snippet. The computational time spent for training increases considerably as I increase the number of sample points. I am not sure if this much increase is expected or am I doing something wrong.
The problem is 2D and predicting 1 output below is some performance time;
45 sec for 900 points*
14 sec for 625 points
3.7 sec for 400 points
*points represent the dimension of each training input
Will decreasing this be a focus of openMDAO development team in the future? (keep reading for the edited version)
import numpy as np
from openmdao.api import Problem, IndepVarComp
from openmdao.api import ScipyOptimizeDriver
from openmdao.api import MetaModelUnStructuredComp, FloatKrigingSurrogate,MetaModelUnStructuredComp
from openmdao.api import CaseReader, SqliteRecorder
import time
t0 = time.time()
class trig(MetaModelUnStructuredComp):
def setup(self):
ii=3
nx, ny = (10*ii, 10*ii)
print(nx*ny)
xx = np.linspace(-3,3, nx)
yy = np.linspace(-2,2, ny)
x, y = np.meshgrid(xx, yy)
# z = np.sin(x)**10 + np.cos(10 + y) * np.cos(x)
# z=4+4.5*x-4*y+x**2+2*y**2-2*x*y+x**4-2*x**2*y
term1 = (4-2.1*x**2+(x**4)/3) * x**2;
term2 = x*y;
term3 = (-4+4*y**2) * y**2;
z = term1 + term2 + term3;
self.add_input('x', training_data=x.flatten())
self.add_input('y', training_data=y.flatten())
self.add_output('meta_out', surrogate=FloatKrigingSurrogate(),
training_data=z.flatten())
prob = Problem()
inputs_comp = IndepVarComp()
inputs_comp.add_output('x', 1.5)
inputs_comp.add_output('y', 1.5)
prob.model.add_subsystem('inputs_comp', inputs_comp)
#triginst=
prob.model.add_subsystem('trig', trig())
prob.model.connect('inputs_comp.x', 'trig.x')
prob.model.connect('inputs_comp.y', 'trig.y')
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-8
prob.driver.options['disp'] = True
prob.model.add_design_var('inputs_comp.x', lower=-3, upper=3)
prob.model.add_design_var('inputs_comp.y', lower=-2, upper=2)
prob.model.add_objective('trig.meta_out')
prob.setup(check=True)
prob.run_model()
print(prob['inputs_comp.x'])
print(prob['inputs_comp.y'])
print(prob['trig.meta_out'])
t1 = time.time()
total = t1-t0
print(total)
Following the answers below i am adding a code snippet of an explicit component that uses SMT toolbox for surrogate. I guess this is one way to use the toolbox's capabilities.
import numpy as np
from smt.surrogate_models import RBF
from openmdao.api import ExplicitComponent
from openmdao.api import Problem, ScipyOptimizeDriver
from openmdao.api import Group, IndepVarComp
import smt
# Sample problem with SMT Toolbox and OpenMDAO Explicit Comp
#Optimization of SIX-HUMP CAMEL FUNCTION with 2 global optima
class MetaCompSMT(ExplicitComponent):
def initialize(self):
self.options.declare('sm', types=smt.surrogate_models.rbf.RBF)
def setup(self):
self.add_input('x')
self.add_input('y')
self.add_output('z')
# self.declare_partials(of='z', wrt=['x','y'], method='fd')
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
# sm = self.options['sm'] # seems like this is not needed
sta=np.column_stack([inputs[i] for i in inputs])
outputs['z'] =sm.predict_values(sta).flatten()
def compute_partials(self, inputs, partials):
sta=np.column_stack([inputs[i] for i in inputs])
print(sta)
for i,invar in enumerate(inputs):
partials['z', invar] =sm.predict_derivatives(sta,i)
# SMT SURROGATE IS TRAINED IN ADVANCE AND PASSED TO THE COMPONENT AS GLOBAL INPUT
# Training Data
ii=3 # "incerases the domain size"
nx, ny = (10*ii, 5*ii)
x, y = np.meshgrid(np.linspace(-3,3, nx), np.linspace(-2,2, ny))
term1 = (4-2.1*x**2+(x**4)/3) * x**2;
term2 = x*y;
term3 = (-4+4*y**2) * y**2;
z = term1 + term2 + term3;
# Surrogate training
xt=np.column_stack([x.flatten(),y.flatten()])
yt=z.flatten()
#sm = KPLSK(theta0=[1e-2])
sm=RBF(d0=-1,poly_degree=-1,reg=1e-13,print_global=False)
sm.set_training_values(xt, yt)
sm.train()
prob = Problem() # Start the OpenMDAO optimization problem
prob.model = model = Group() # Assemble a group within the problem. In this case single group.
"Independent component ~ single Design variable "
inputs_comp = IndepVarComp() # OpenMDAO approach for the design variable as independent component output
inputs_comp.add_output('x', 2.5) # Vary initial value for finding the second global optimum
inputs_comp.add_output('y', 1.5) # Vary initial value for finding the second global optimum
model.add_subsystem('inputs_comp', inputs_comp)
"Component 1"
comp = MetaCompSMT(sm=sm)
model.add_subsystem('MetaCompSMT', comp)
"Connect design variable to the 2 components. Easier to follow than promote"
model.connect('inputs_comp.x', 'MetaCompSMT.x')
model.connect('inputs_comp.y', 'MetaCompSMT.y')
"Lower/Upper bound design variables"
model.add_design_var('inputs_comp.x', lower=-3, upper=3)
model.add_design_var('inputs_comp.y', lower=-2, upper=2)
model.add_objective('MetaCompSMT.z')
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['disp'] = True
prob.driver.options['tol'] = 1e-9
prob.setup(check=True, mode='fwd')
prob.run_driver()
print(prob['inputs_comp.x'],prob['inputs_comp.y'],prob['MetaCompSMT.z'])
If you are willing to compile some code yourself, you could write very light weight wrapper for the Surrogate Modeling Toolbox (SMT). You could write that wrapper to work with the standard MetaModelUnstructuredComp or just write your own component wrapper.
Either way, that library has some significantly faster unstructured surrogate models in it. The default OpenMDAO implementations are just basic implementations. We may improve them over time, but for larger data sets or design spaces SMT offers much better algorithms.
We haven't written a general SMT wrapper in OpenMDAO as of Version 2.4, but its not hard to write your own.
I'm going to look into the performance of the MetaModelUnStructuredComp using your test case a bit more closely. Though I do notice that this test case does involve fitting a structured data set. If you were to use MetaModelStructuredComp(http://openmdao.org/twodocs/versions/2.2.0/features/building_blocks/components/metamodelstructured.html), the performance is considerably better:
class trig(MetaModelStructuredComp):
def setup(self):
ii=3
nx, ny = (10*ii, 10*ii)
xx = np.linspace(-3,3, nx)
yy = np.linspace(-2,2, ny)
x, y = np.meshgrid(xx, yy, indexing='ij')
term1 = (4-2.1*x**2+(x**4)/3) * x**2;
term2 = x*y;
term3 = (-4+4*y**2) * y**2;
z = term1 + term2 + term3;
self.add_input('x', 0.0, xx)
self.add_input('y', 0.0, yy)
self.add_output('meta_out', 0.0, z)
The 900 points case goes from 14 seconds on my machine using MetaModelUnStructuredComp to 0.081 when using MetaModelStructuredComp.

How to call numerical results to integrate a ODE using Runge-Kutta-4 in Python 3?

I'm trying to solve (for m_0) numerically the following ordinary differential equation:
dm0/dx=(((1-x)*(x*(2-x))**(1.5))/(k+x)**2)*(((x*(2-x))/3.0)*(dw/dx)**2 + ((8*(k+1))/(3*(k+x)))*w**2)
The values of w and dw/dx have been found already numerically using the Runge-Kutta 4th order and k is a factor that is fixed. I wrote a code where I call the values for w and dw/dx from an external file, then I organize them in an array, then I call the array in the function and then I run the integration. My outcome is not what it's expected :(, I don't know what is wrong. If anyone could give me a hand, it would be highly appreciated. Thank you!
from math import sqrt
from numpy import array,zeros,loadtxt
from printSoln import *
from run_kut4 import *
m = 1.15 # Just a constant.
k = 3.0*sqrt(1.0-(1.0/m))-1.0 # k in terms of m.
omegas = loadtxt("omega.txt",float) # Import values of w
domegas = loadtxt("domega.txt",float) # Import values of dw/dx
w = [] # Defines the array w to store the values w^2
s = 0.0
for s in omegas:
w.append(s**2) # Calculates the values w**2
omeg = array(w,float) # Array to store the value of w**2
dw = [] # Defines the array dw to store the values dw**2
t = 0.0
for t in domegas:
dw.append(t**2) # Calculates the values for dw**2
domeg = array(dw,float) # Array to store the values of dw**2
x = 1.0e-12 # Starting point of integration
xStop = (2.0 - k)/3.0 # Final point of integration
def F(x,y): # Define function to be integrated
F = zeros(1)
for i in domeg: # Loop to call w^2, (dw/dx)^2
for j in omeg:
F[0] = (((1.0-x)*(x*(2.0-x))**(1.5))/(k+x)**2)*((1.0/3.0)*x* (2.0-x)*domeg[i] + (8.0*(k+1.0)*omeg[j])/(3.0*(k+x)))
return F
y = array([((32.0*sqrt(2.0)*(k+1.0)*(x**2.5))/(15.0*(k**3)))]) # Initial condition for m_{0}
h = 1.0e-5 # Integration step
freq = 0 # Prints only initial and final values
X,Y = integrate(F,x,y,xStop,h) # Calls Runge-Kutta 4
printSoln(X,Y,freq) # Prints solution
Interpreting your verbal description, there is an ODE for omega, w'=F(x,w), and a coupled ODE for m0, m'=G(x,m,w,w'). The almost always optimal way to solve this is to treat it as system of ODE,
def ODEfunc(x,y)
w,m = y
dw = F(x,w)
dm = G(x,m,w,dw)
return np.array([dw, dm])
which you can then insert in the ODE solver of your choice, e.g., the fictitious
ODEintegrate(ODEfunc, xsamples, y0)

Resources