openmdao v1.4 optimization with metamodel - openmdao

I which to perform an optimization with openmdao 1.4 on a metamodel. Using the tutorials I have build u p problem that i do not mange to solve: I think the problem is coming from a misuse of setup() and run() : I do not manage to train my metamodel and to optimize on it at the same time (perhpas I should use two differentes "groups" to do this ..)
Here is my code :
from __future__ import print_function
from openmdao.api import Component, Group, MetaModel ,IndepVarComp, ExecComp, NLGaussSeidel, KrigingSurrogate, FloatKrigingSurrogate
import numpy as np
class KrigMM(Group):
''' FloatKriging gives responses as floats '''
def __init__(self):
super(KrigMM, self).__init__()
# Create meta_model for f_x as the response
pmm = self.add("pmm", MetaModel())
pmm.add_param('x', val=0.)
pmm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate())
self.add('p1', IndepVarComp('x', 0.0))
self.connect('p1.x','pmm.x')
# mm.add_output('f_xy:norm_dist', val=(0.,0.), surrogate=KrigingSurrogate())
if __name__ == '__main__':
# Setup and run the model.
from openmdao.core.problem import Problem
from openmdao.drivers.scipy_optimizer import ScipyOptimizer
from openmdao.core.driver import Driver
import numpy as np
import doe_lhs
#prob = Problem(root=ParaboloidProblem())
###########################################################
prob = Problem(root=Group())
prob.root.add('meta',KrigMM(), promotes=['*'])
prob.driver = ScipyOptimizer()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.add_desvar('p1.x', lower=0, upper=10)
prob.driver.add_objective('pmm.f_x:float')
prob.setup()
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
print('\n')
print('Minimum of %f found for meta at %f' % (prob['pmm.f_x:float'],prob['pmm.x'])) #predicted value

I believe your problem is actually working fine. Its just that the sinusiod you've picked has an local optimum at 0.0, which happens to be your initial condition.
If I change the initial condition as follows:
prob.setup()
prob['p1.x'] = 5
prob['pmm.train:x'] = np.linspace(0,10,20)
prob['pmm.train:f_x:float'] = np.sin(prob['pmm.train:x'])
prob.run()
I get:
Optimization terminated successfully. (Exit mode 0)
Current function value: [-1.00004544]
Iterations: 3
Function evaluations: 3
Gradient evaluations: 3
Optimization Complete
-----------------------------------
Minimum of -1.000045 found for meta at 4.710483

Related

openMDAO: Optimization terminates successfully after 1 iteration, not at optimal point

I am trying to make a toy problem to learn a bit about the OpenMDAO software before applying the lessons to a larger problem. I have a problem set up so that the objective function should be minimized when both design variables are at a minimum. However both values stay at their originally assigned values despite receiving an 'Optimization terminated successfully' message.
I have been starting by writing the code based on the Sellar problem examples. ( http://openmdao.org/twodocs/versions/latest/basic_guide/sellar.html ) Additionally I have come across a stack overflow question that seems to be the same problem, but the solution there doesn't work. ( OpenMDAO: Solver converging to non-optimal point ) (When I add the declare_partials line to the IntermediateCycle or ScriptForTest I recieve an error saying either that self is not defined, or that the object has no attribute declare_partials)
This is the script that runs everything
import openmdao.api as om
from IntermediateForTest import IntermediateCycle
prob = om.Problem()
prob.model = IntermediateCycle()
prob.driver = om.ScipyOptimizeDriver()
#prob.driver.options['optimizer'] = 'SLSQP'
#prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('n_gear', lower=2, upper=6)
prob.model.add_design_var('stroke', lower=0.0254, upper=1)
prob.model.add_objective('objective')
prob.setup()
prob.model.approx_totals()
prob.run_driver()
print(prob['objective'])
print(prob['cycle.f1.total_weight'])
print(prob['cycle.f1.stroke'])
print(prob['cycle.f1.n_gear'])
It calls an intermediate group, as per the Sellar example
import openmdao.api as om
from FunctionsForTest import FunctionForTest1
from FunctionsForTest import FunctionForTest2
class IntermediateCycle(om.Group):
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('n_gear', 3.0)
indeps.add_output('stroke', 0.2)
indeps.add_output('total_weight', 26000.0)
cycle = self.add_subsystem('cycle', om.Group())
cycle.add_subsystem('f1', FunctionForTest1())
cycle.add_subsystem('f2', FunctionForTest2())
cycle.connect('f1.landing_gear_weight','f2.landing_gear_weight')
cycle.connect('f2.total_weight','f1.total_weight')
self.connect('n_gear','cycle.f1.n_gear')
self.connect('stroke','cycle.f1.stroke')
#cycle.nonlinear_solver = om.NonlinearBlockGS()
self.nonlinear_solver = om.NonlinearBlockGS()
self.add_subsystem('objective', om.ExecComp('objective = total_weight', objective=26000, total_weight=26000), promotes=['objective', 'total_weight'])
Finally there is a file with the two functions in it:
import openmdao.api as om
class FunctionForTest1(om.ExplicitComponent):
def setup(self):
self.add_input('stroke', val=0.2)
self.add_input('n_gear', val=3.0)
self.add_input('total_weight', val=26000)
self.add_output('landing_gear_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
stroke = inputs['stroke']
n_gear = inputs['n_gear']
total_weight = inputs['total_weight']
outputs['landing_gear_weight'] = total_weight * 0.1 + 100*stroke * n_gear ** 2
class FunctionForTest2(om.ExplicitComponent):
def setup(self):
self.add_input('landing_gear_weight')
self.add_output('total_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
landing_gear_weight = inputs['landing_gear_weight']
outputs['total_weight'] = 26000 + landing_gear_weight
It reports optimization terminated successfully,
Optimization terminated successfully. (Exit mode 0)
Current function value: 26000.0
Iterations: 1
Function evaluations: 1
Gradient evaluations: 1
Optimization Complete
-----------------------------------
[26000.]
[29088.88888889]
[0.2]
[3.]
however the value for the function to optimize hasn't changed. It seems as it converges the loop to estimate the weight, but doesn't vary the design variables to find the optimum.
It arrives at 29088.9, which is correct for a value of n_gear=3 and stroke=0.2, but if both are decreased to the bounds of n_gear=2 and stroke=0.0254, it would arrive at a value of ~28900, ~188 less.
Any advice, links to tutorials, or solutions would be appreciated.
Lets take a look at the n2 of the model, as you provided it:
I've highlighted the connection from indeps.total_weight to objective.total_weight. So this means that your computed total_weight value is not being passed to your objective output at all. Instead you have a constant value being set there.
Now, taking a small step back, lets look at the computation of the objective itself:
self.add_subsystem('objective', om.ExecComp('objective = total_weight', objective=26000, total_weight=26000), promotes=['objective', 'total_weight'])
So this is an odd use of the ExecComp, because it just sets the output to exactly the input. It does nothing, and isn't really needed at all.
I believe what you wanted was simply to make the objective be the output f2.total_weight. When I do that (and make a few additional small cleanups to your code, like removing the unnecessary ExecComp, then I do get the correct answer in 2 major iterations of the optimizer:
import openmdao.api as om
class FunctionForTest1(om.ExplicitComponent):
def setup(self):
self.add_input('stroke', val=0.2)
self.add_input('n_gear', val=3.0)
self.add_input('total_weight', val=26000)
self.add_output('landing_gear_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
stroke = inputs['stroke']
n_gear = inputs['n_gear']
total_weight = inputs['total_weight']
outputs['landing_gear_weight'] = total_weight * 0.1 + 100*stroke * n_gear ** 2
class FunctionForTest2(om.ExplicitComponent):
def setup(self):
self.add_input('landing_gear_weight')
self.add_output('total_weight')
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
landing_gear_weight = inputs['landing_gear_weight']
outputs['total_weight'] = 26000 + landing_gear_weight
class IntermediateCycle(om.Group):
def setup(self):
indeps = self.add_subsystem('indeps', om.IndepVarComp(), promotes=['*'])
indeps.add_output('n_gear', 3.0)
indeps.add_output('stroke', 0.2)
cycle = self.add_subsystem('cycle', om.Group())
cycle.add_subsystem('f1', FunctionForTest1())
cycle.add_subsystem('f2', FunctionForTest2())
cycle.connect('f1.landing_gear_weight','f2.landing_gear_weight')
cycle.connect('f2.total_weight','f1.total_weight')
self.connect('n_gear','cycle.f1.n_gear')
self.connect('stroke','cycle.f1.stroke')
#cycle.nonlinear_solver = om.NonlinearBlockGS()
self.nonlinear_solver = om.NonlinearBlockGS()
prob = om.Problem()
prob.model = IntermediateCycle()
prob.driver = om.ScipyOptimizeDriver()
#prob.driver.options['optimizer'] = 'SLSQP'
#prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('n_gear', lower=2, upper=6)
prob.model.add_design_var('stroke', lower=0.0254, upper=1)
prob.model.add_objective('cycle.f2.total_weight')
prob.model.approx_totals()
prob.setup()
prob.model.nl_solver.options['iprint'] = 2
prob.run_driver()
print(prob['cycle.f1.total_weight'])
print(prob['cycle.f2.total_weight'])
print(prob['cycle.f1.stroke'])
print(prob['cycle.f1.n_gear'])
gives:
Optimization terminated successfully. (Exit mode 0)
Current function value: 28900.177777779667
Iterations: 2
Function evaluations: 2
Gradient evaluations: 2
Optimization Complete
-----------------------------------
[28900.1777778]
[28900.17777778]
[0.0254]
[2.]

OpenMDAO: How to resolve 'numpy.ndarray' object has no attribute 'log' error

I am new to OpenMDAO and am trying to solve an optimization problem. When I run the code, I receive the following error "'numpy.ndarray' object has no attribute 'log'" Cannot resolve the problem? Any suggestions?
I have reviewed the OpenMDAO documentation.
Error message: 'numpy.ndarray' object has no attribute 'log'
from __future__ import division, print_function
import openmdao.api as om
import numpy as np
class Objective (om.ExplicitComponent):
def setup(self):
self.add_input('mu1', val = 3.84)
self.add_input('mu2', val = 3.84)
self.add_output('f', val = 0.00022)
# Finite difference all partials.
self.declare_partials('*', '*', method='fd')
def compute(self, inputs, outputs):
mu1 = inputs['mu1']
mu2 = inputs['mu2']
outputs['f'] = np.log((mu1*(0.86))/(1.0-(mu1*0.14)))+np.log((mu2*(0.86))/(1.0-(mu2*0.14)))
# build the model
prob = om.Problem()
indeps = prob.model.add_subsystem('indeps', om.IndepVarComp())
indeps.add_output('mu1', 3.84)
indeps.add_output('mu2', 3.84)
prob.model.add_subsystem('obj', Objective())
prob.model.add_subsystem('cnst', om.ExecComp('g = 7924.8 - 2943.0*(np.log(mu1)) - 2943.0*(np.log(mu2))'))
prob.model.connect('indeps.mu1', 'obj.mu1')
prob.model.connect('indeps.mu2', 'obj.mu2')
prob.model.connect('indeps.mu1', 'cnst.mu1')
prob.model.connect('indeps.mu2', 'cnst.mu2')
# setup the optimization
prob.driver = om.ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'COBYLA'
prob.model.add_design_var('indeps.mu1', lower=0.0, upper=5.0)
prob.model.add_design_var('indeps.mu2', lower=0.0, upper=5.0)
prob.model.add_objective('obj.f')
prob.model.add_constraint('cnst.g', lower=0.0, upper=0.0)
prob.setup()
prob.run_driver()
The problem is in the definition of your ExecComp. You have np.log, but the way the string parsing works for that you just wanted log.
Try this instead:
'g = 7924.8 - 2943.0*(log(mu1)) - 2943.0*(log(mu2))'
With that change I got:
Normal return from subroutine COBYLA
NFVALS = 46 F = 3.935882E+00 MAXCV = 2.892193E-10
X = 3.843492E+00 3.843491E+00
Optimization Complete

How to update connection sizes in a reconfigurable model in OpenMDAO 2.5.0?

With reconfigurable model execution it is possible to resize inputs and outputs of components. How are the connections updated, when reconfigured outputs and inputs are connected?
In the example below the output c2.y and c3.y is resized at each model run. This input and output is supposed to be connected, as shown in the N2 chart. However, after the reconfiguration the connection size seems to be not updated automatically, it throws the following error:
ValueError: The source and target shapes do not match or are ambiguous for the connection 'c2.y' to 'c3.y'. Expected (1,) but got (2,).
I included below 3 tests, with promoted connection, absolute connection, and the last one with reconfiguration but without the connection (which works).
The last chance would be to declare the connection in the parent group of the comps, which I did not try yet.
The tests:
Promoted connection
Absolute connection
No connection
Reconfigurable component classes and tests:
from __future__ import division
import logging
import numpy as np
import unittest
from openmdao.api import Problem, Group, IndepVarComp, ExplicitComponent
from openmdao.utils.assert_utils import assert_rel_error
class ReconfComp(ExplicitComponent):
def initialize(self):
self.size = 1
self.counter = 0
def reconfigure(self):
logging.info('reconf started {}'.format(self.pathname))
self.counter += 1
logging.info('reconf ended {}'.format(self.pathname))
if self.counter % 2 == 0:
self.size += 1
return True
else:
return False
def setup(self):
logging.info('setup started {}'.format(self.pathname))
self.add_input('x', val=1.0)
self.add_output('y', val=np.zeros(self.size))
# All derivatives are defined.
self.declare_partials(of='*', wrt='*')
logging.info('setup ended {}'.format(self.pathname))
def compute(self, inputs, outputs):
logging.info('compute started {}'.format(self.pathname))
outputs['y'] = 2 * inputs['x']
logging.info('compute ended {}'.format(self.pathname))
def compute_partials(self, inputs, jacobian):
jacobian['y', 'x'] = 2 * np.ones((self.size, 1))
class ReconfComp2(ReconfComp):
"""The size of the y input changes the same as way as in ReconfComp"""
def setup(self):
logging.info('setup started {}'.format(self.pathname))
self.add_input('y', val=np.zeros(self.size))
self.add_output('f', val=np.zeros(self.size))
# All derivatives are defined.
self.declare_partials(of='*', wrt='*')
logging.info('setup ended {}'.format(self.pathname))
def compute(self, inputs, outputs):
logging.info('compute started {}'.format(self.pathname))
outputs['f'] = 2 * inputs['y']
logging.info('compute ended {}'.format(self.pathname))
def compute_partials(self, inputs, jacobian):
jacobian['f', 'y'] = 2 * np.ones((self.size, 1))
class TestReconfConnections(unittest.TestCase):
def test_reconf_comp_promoted_connections(self):
p = Problem()
p.model = Group()
p.model.add_subsystem('c1', IndepVarComp('x', 1.0), promotes_outputs=['x'])
p.model.add_subsystem('c2', ReconfComp(), promotes_inputs=['x'], promotes_outputs=['y'])
p.model.add_subsystem('c3', ReconfComp2(), promotes_inputs=['y'],
promotes_outputs=['f'])
p.setup()
p['x'] = 3.
# First run the model once; counter = 1, size of y = 1
p.run_model()
totals = p.compute_totals(wrt=['x'], of=['y'])
assert_rel_error(self, p['x'], 3.0)
assert_rel_error(self, p['y'], 6.0)
assert_rel_error(self, totals['y', 'x'], [[2.0]])
print(p['x'], p['y'], totals['y', 'x'].flatten())
# Run the model again, which will trigger reconfiguration; counter = 2, size of y = 2
p.run_model() # FIXME Fails with ValueError
def test_reconf_comp_connections(self):
p = Problem()
p.model = Group()
p.model.add_subsystem('c1', IndepVarComp('x', 1.0), promotes_outputs=['x'])
p.model.add_subsystem('c2', ReconfComp(), promotes_inputs=['x'])
p.model.add_subsystem('c3', ReconfComp2(), promotes_outputs=['f'])
p.model.connect('c2.y', 'c3.y')
p.setup()
p['x'] = 3.
# First run the model once; counter = 1, size of y = 1
p.run_model()
# Run the model again, which will trigger reconfiguration; counter = 2, size of y = 2
p.run_model() # FIXME Fails with ValueError
def test_reconf_comp_not_connected(self):
p = Problem()
p.model = Group()
p.model.add_subsystem('c1', IndepVarComp('x', 1.0), promotes_outputs=['x'])
p.model.add_subsystem('c2', ReconfComp(), promotes_inputs=['x'])
p.model.add_subsystem('c3', ReconfComp2(), promotes_outputs=['f'])
# c2.y not connected to c3.y
p.setup()
p['x'] = 3.
# First run the model once; counter = 1, size of y = 1
p.run_model()
# Run the model again, which will trigger reconfiguration; counter = 2, size of y = 2
fail, _, _ = p.run_model()
self.assertFalse(fail)
if __name__ == '__main__':
unittest.main()
UPDATE:
It seems, that in Group._var_abs2meta only the source size is updated, but not the target. The setup of the connections starts, before the setup of the parent group or the setup of the other component would be called.
UPDATE 2:
This happens with the default NonlinearRunOnce solver, with a NewtonSolver of NonlinearBlockGS there is no error, but the variable sizes also don't change.
As of OpenMDAO V2.5 reconfigurable model variables is not an officially supported feature in the framework. The bare bones of the capability has been in the code since that research was done, but it wasn't something that was high priority enough for us to finalize. A recent major refactor in V2.4 re-worked how some underlying data-structures worked and must have broken this functionality.
It is on our development priority list to get this working again, but its not super high on that list. We focus development mainly on features that have a direct in-house applications, and we don't have one of those yet.
If you could provide a decently complete set of tests for it, we could take a look at getting the functionality working.

Openmdao - compute finite difference in parallel when optimizing

I have made a simple example (see below) which does the fd in serial. What is the best way to do this in parallel? I am using Python 3.6 and OpenMDAO 2.4.0.
import numpy as np
from openmdao.api import Problem, ScipyOptimizeDriver, ExecComp, IndepVarComp, ExplicitComponent, Group
class WorkFlow(ExplicitComponent):
def setup(self):
self.add_input('x', np.ones(5))
self.add_output('y', 2.0)
self.declare_partials('y', 'x', method='fd')
def compute(self, inputs, outputs):
print('comm:', self.comm.rank, inputs['x'])
print()
outputs['y'] = abs(np.sum(inputs['x']**2) - 9)
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', np.ones(5))
prob.model.add_subsystem('wf', WorkFlow(), promotes_inputs=['x'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('x', lower=-10.0, upper=10.0)
prob.model.add_objective('wf.y')
prob.setup()
prob.run_driver()
print(prob['x'])
print(prob['wf.y'])
As of OpenMDAO v2.4 you could not do parallel finite-difference. However, the feature has been recently added to the master branch of OpenMDAO and will be officially released in OpenMDAO V2.5 soon.
To use the feature right now, install the repository version of openMDAO (You can not do pip install openmdao. instead clone the repository from github and then pip install -e <location of the cloned repo>).
Then you can follow the instructions on the documentation page for parallel FD.
Here is your actual code though. The only change to the component is that when you instantiate it you provide the num_par_fd=5 argument to the component. Then when you call the file, you should run it under mpi like this:
mpiexec -n 5 python test.py
Here is what test.py should look like:
import numpy as np
from openmdao.api import Problem, ScipyOptimizeDriver, ExecComp, IndepVarComp, ExplicitComponent, Group
class WorkFlow(ExplicitComponent):
def setup(self):
self.add_input('x', np.ones(5))
self.add_output('y', 2.0)
self.declare_partials('y', 'x', method='fd')
def compute(self, inputs, outputs):
print('comm:', self.comm.rank, inputs['x'])
print()
outputs['y'] = abs(np.sum(inputs['x']**2) - 9)
prob = Problem()
indeps = prob.model.add_subsystem('indeps', IndepVarComp(), promotes=['*'])
indeps.add_output('x', np.ones(5))
prob.model.add_subsystem('wf', WorkFlow(num_par_fd=5), promotes_inputs=['x'])
prob.driver = ScipyOptimizeDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['tol'] = 1e-9
prob.model.add_design_var('x', lower=-10.0, upper=10.0)
prob.model.add_objective('wf.y')
prob.setup()
prob.run_driver()
print(prob['x'])
print(prob['wf.y'])

External code incompatibility with metadata?

If I use the same setup with metadata for an external code component and explicit component it seems to fail with the externalcode comp. The code below has both of the component. As is there is no error, if i comment out the explicit component part I get an error :
comp = GFWrapper(DVLO=np.array(DVLIST))
TypeError: init() got an unexpected keyword argument 'DVLO'
import numpy as np
import json as js
import re,sys,subprocess,os
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.api import ScipyOptimizeDriver
from openmdao.api import ExternalCode
class GFWrapper(ExternalCode):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
self.input_file = 'GFWrapper_input.dat'
self.output_file = 'GFWrapper_output.dat'
self.options['external_input_files'] = [self.input_file,]
self.options['external_output_files'] = [self.output_file,]
self.options['command'] = [
'python', 'run.py', self.input_file, self.output_file
]
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
# generate the input file for the external code
outF = open(self.input_file, "w")
for dv in DVLO:
outF.write(inputs[dv])
print(dv,inputs[dv])
outF.write("\n")
outF.close()
# the parent compute function actually runs the external code
super(GFWrapper, self).compute(inputs, outputs)
# parse the output file from the external code
file_contents=np.loadtxt(self.output_file)
outputs['OBJECTIVE'] = file_contents[0]
from openmdao.api import ExplicitComponent
#
class GFWrapper(ExplicitComponent):
def initialize(self):
self.metadata.declare('DVLO', types=np.ndarray)
def setup(self):
DVLO = self.metadata['DVLO']
for dv in DVLO:
self.add_input(dv)
self.add_output('OBJECTIVE')
#self.declare_partials(of='*', wrt='*', method='fd')
for dv in DVLO:
self.declare_partials(of='OBJECTIVE', wrt=dv, method='fd')
def compute(self, inputs, outputs):
DVLO = self.metadata['DVLO']
powetemp = 0
for dv in DVLO:
powetemp += inputs[dv]
outputs['OBJECTIVE'] = powetemp
#
DVLIST=['DV1','DV2']
DVMIN =[2,11]
DVMAX =[3,12]
InitDVVal=[3,5]
nr_of_desvar=len(DVLIST)
top = Problem()
top.model = model = Group()
" Introduce independent variables later will be design variables (optimization parameters) "
inputs_comp = IndepVarComp()
for i in range(nr_of_desvar):
inputs_comp.add_output(DVLIST[i],InitDVVal[i])
" Add components/subsystems to the Group-model- "
model.add_subsystem('inputs_comp', inputs_comp)
comp = GFWrapper(DVLO=np.array(DVLIST))
model.add_subsystem('GFWrapper', comp)
This is a bug in OpenMDAO version 2.2. The error occurs because we didn't include **kwargs in the init statement for ExternalCode. We intend to fix this for V2.3, but in the meantime, you can work around for your GFWrapper component by adding an init statement like this:
class GFWrapper(ExternalCode):
def __init__(self, **kwargs):
super(GFWrapper, self).__init__()
self.metadata.update(kwargs)

Resources