Yolov5 Convert to ONNX - torch

I am trying to convert Yolov5 that takes as input dynamic image shapes into onnx.
import torch
from app import onnx_tools
# This is an example of usage of onnx converter.
yolo5_layout = '/home/eirini/Downloads/best.pt'
model = torch.hub.load("ultralytics/yolov5", 'custom', path=yolo5_layout, source='local')
model.eval()
# Example case
dummy_input = torch.rand((1, 3, 224, 224))
# Passing a dictionary where you define that batch size dimension, width and height are dynamic
dynamic_axes_dict = {"actual_input": {0: "bs",
2: "img_x",
3: "img_y"},
"output": {0: "bs",
}}
# In this example, we told PyTorch to set the axes at indices 0, 2 and 3 of “actual_input” to be dynamic
# and to set the 0 index of “output” to be dynamic – where a dynamic shape is represented as an arbitrary
# string rather than a numerical value (e.g., `img_x` and `img_y` instead of 224 and 224).
torch.onnx.export(model= model,
args= dummy_input,
f = "mytest.onnx",
export_params= True,
verbose= False,
input_names=["actual_input"],
output_names=["output"],
opset_version=14,
dynamic_axes=dynamic_axes_dict)
The above code produces an onnx model. Then I try to load this model by passing a random example.
import numpy as np
import onnxruntime as ort
ort_session = ort.InferenceSession("mytest.onnx")
outputs = ort_session.run(
None,
{"actual_input": np.random.randn(10, 3, 960, 1200).astype(np.float32)},
)
print(outputs[0])
But I get the following error:
onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Non-zero status code returned while running Concat node. Name:'/model/model/model.12/Concat' Status Message: concat.cc:159 PrepareForCompute Non concat axis dimensions must match: Axis 3 has mismatched dimensions of 75 and 76`
It seems like I accepts 224,224 but the purpose of dynamic axes was to handle variant shapes

Related

detectron2 diffusioninst: oom-kill during training

I tried to run code for DiffusionInst based on Detectron2 (source code: https://github.com/chenhaoxing/DiffusionInst). During my training, my python process has always been killed (at 10000-20000 iteration epochs, which is insufficient for diffisioninst training).
I only rewrite the code for dataloader, in order to adapt to my own dataset.
My new code for dataloader:
class DiffusionInstDatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by DiffusionInst.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies geometric transforms to the image and annotation
3. Find and applies suitable cropping to the image and annotation
4. Prepare image and annotation to Tensors
"""
def __init__(self, cfg, is_train=True):
if cfg.INPUT.CROP.ENABLED and is_train:
self.crop_gen = [
# T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
]
else:
self.crop_gen = None
self.tfm_gens = build_transform_gen(cfg, is_train)
logging.getLogger(__name__).info(
"Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
)
self.img_format = cfg.INPUT.FORMAT
self.is_train = is_train
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
## crop roi
'''lst = dataset_dict['file_name'].split('-')
image = sitk.ReadImage('-'.join(lst[:-2]))
image = sitk.GetArrayFromImage(image)
above, below = int(lst[-2]), int(lst[-1])
image = image[:, above:below, :]'''
## no crop roi
image = sitk.ReadImage(dataset_dict["file_name"],sitk.sitkFloat32)
image = sitk.GetArrayFromImage(image)
# print('**********************',image.shape,'************************')
image = (image - image.min()) / (image.max() - image.min()) * 255
#print(image.dtype)
image = image.transpose(1, 2, 0).astype(np.uint8)
image = np.repeat(image, 3, axis=2)
#print(image.dtype)
utils.check_image_size(dataset_dict, image)
#origshape = image.shape
if self.crop_gen is None:
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
else:
image, transforms = T.apply_transform_gens(
self.tfm_gens + self.crop_gen, image
)
#print('orig', origshape, '\t\tresized', image.shape)
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
del image
gc.collect()
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
dataset_dict.pop("annotations", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
# import pdb;pdb.set_trace()
for anno in dataset_dict["annotations"]:
# anno.pop("segmentation", None)
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(obj, transforms, image_shape)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")
dataset_dict["instances"] = utils.filter_empty_instances(instances)
del instances
gc.collect()
return dataset_dict
And the information about the oom-killer:
[2599547.303018] python invoked oom-killer: gfp_mask=0x24000c0, order=0, oom_score_adj=995
[2599547.303084] [<ffffffff8119bfae>] oom_kill_process+0x1fe/0x3c0
[2599547.303133] Task in /kubepods/burstable/podd09a5032-8b07-11ed-bb60-ac1f6b9ec91e/8b4a8d5c2c1a082f93b1610173beb70bbc19fb1a1c2e28150d2d912ed9b95b10 killed as a result of limit of /kubepods/burstable/podd09a5032-8b07-11ed-bb60-ac1f6b9ec91e
[2599547.305957] Memory cgroup out of memory: Kill process 1041771 (python) score 1198 or sacrifice child
[2599547.307810] Killed process 1041771 (python) total-vm:36436532kB, anon-rss:10288264kB, file-rss:104888kB
[2599718.702250] python invoked oom-killer: gfp_mask=0x24000c0, order=0, oom_score_adj=995
[2599718.702299] [<ffffffff8119bfae>] oom_kill_process+0x1fe/0x3c0
[2599718.702333] Task in /kubepods/burstable/podd09a5032-8b07-11ed-bb60-ac1f6b9ec91e/8b4a8d5c2c1a082f93b1610173beb70bbc19fb1a1c2e28150d2d912ed9b95b10 killed as a result of limit of /kubepods/burstable/podd09a5032-8b07-11ed-bb60-ac1f6b9ec91e
I set IMS_PER_BATCH to 1, and used a dataset which contains only 1 image, but the oom problem still occurred.
I wonder know what should i do to prevent oom problem?

Matix Exponential Layer (Custom: Keras in R)

I'm trying to make a layer in Keras (R) which (matrix) exponential a layer of shape (d,d).
ie.: Input to layer is a dxd matrix and the output is a dxd matrix which is the (matrix) exponential of the input matrix.
What I've Implemented to Date:
Here's what I've done (its a degree 4 approximation because I'm also not sure how to get the tensorflow matrix exponential command working in Keras):
# Matrix Exponential
Matrix_Exp<- R6::R6Class("KerasLayer",
inherit = KerasLayer,
public = list(
call = function(x, mask = NULL) {
# Initialize Tenor-like Object -> Tensor Objects
ord0 = k_eye((k_shape(x)[1]))
ord1 = x
ord2 = (1/2)*k_dot(x,x) # note x is square so this works
ord3 = (1/6)*k_dot(x,ord2)
ord4 = (1/24)*k_dot(x,ord3)
ord0+ord1+ord2 +ord3+ord4
},
compute_output_shape = function(input_shape) {
c(d,d)
}
)
)
# Create layer wrapper function
layer_Matrix_Exp <- function(object) {
create_layer(Matrix_Exp, object)
}
I'm plugging a model with this summary into the custom layer:
Model: "sequential_32"
_________________________________________________________________________________________________________________________________________________________________
Layer (type) Output Shape Param #
=================================================================================================================================================================
dense_63 (Dense) (None, 100) 400
_________________________________________________________________________________________________________________________________________________________________
dense_64 (Dense) (None, 4) 404
_________________________________________________________________________________________________________________________________________________________________
reshape_10 (Reshape) (None, 2, 2) 0
=================================================================================================================================================================
Total params: 804
Trainable params: 804
Non-trainable params: 0
_________________________________________________________________________________________________________________________________________________________________
Problem/Error:
But I run into this error when passing layers_NE %>% layer_Matrix_Exp
WARNING:tensorflow:Entity <function wrap_fn.<locals>.fn at 0x7fbdd0cf2b90> could not be transformed and will be executed as-is. Please report this to the AutoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: Evaluation error: object 'size' not found.
Error in py_call_impl(callable, dots$args, dots$keywords) :
RuntimeError: in converted code:
/scratch/users/BIM/R/x86_64-redhat-linux-gnu-library/3.6/keras/python/kerastools/layer.py:30 call *
return self.r_call(inputs, mask)
<string>:4 fn
/scratch/users/BIM/R/x86_64-redhat-linux-gnu-library/3.6/reticulate/python/rpytools/call.py:21 python_function
raise RuntimeError(res[kErrorKey])
RuntimeError: Evaluation error: object 'size' not found.
Note:
The problem is coming from the identity part but I don't know how to fix this.
Question:
How to fix error.
How to replace the order 4 (manual) approximation to the matrix exponential with the keras equivalent to the tensorflow matrix exponential command.
Thanks in advance.

Error when implementing RBF kernel bandwidth differentiation in Pytorch

I'm implementing an RBF network by using some beginer examples from Pytorch Website. I have a problem when implementing the kernel bandwidth differentiation for the network. Also, Iwould like to know whether my attempt ti implement the idea is fine. This is a code sample to reproduce the issue. Thanks
# -*- coding: utf-8 -*-
import torch
from torch.autograd import Variable
def kernel_product(x,y, mode = "gaussian", s = 1.):
x_i = x.unsqueeze(1)
y_j = y.unsqueeze(0)
xmy = ((x_i-y_j)**2).sum(2)
if mode == "gaussian" : K = torch.exp( - xmy/s**2) )
elif mode == "laplace" : K = torch.exp( - torch.sqrt(xmy + (s**2)))
elif mode == "energy" : K = torch.pow( xmy + (s**2), -.25 )
return torch.t(K)
class MyReLU(torch.autograd.Function):
"""
We can implement our own custom autograd Functions by subclassing
torch.autograd.Function and implementing the forward and backward passes
which operate on Tensors.
"""
#staticmethod
def forward(ctx, input):
"""
In the forward pass we receive a Tensor containing the input and return
a Tensor containing the output. ctx is a context object that can be used
to stash information for backward computation. You can cache arbitrary
objects for use in the backward pass using the ctx.save_for_backward method.
"""
ctx.save_for_backward(input)
return input.clamp(min=0)
#staticmethod
def backward(ctx, grad_output):
"""
In the backward pass we receive a Tensor containing the gradient of the loss
with respect to the output, and we need to compute the gradient of the loss
with respect to the input.
"""
input, = ctx.saved_tensors
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return grad_input
dtype = torch.cuda.FloatTensor
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold input and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)
y = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)
# Create random Tensors for weights, and wrap them in Variables.
w1 = Variable(torch.randn(H, D_in).type(dtype), requires_grad=True)
w2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)
# I've created this scalar variable (the kernel bandwidth)
s = Variable(torch.randn(1).type(dtype), requires_grad=True)
learning_rate = 1e-6
for t in range(500):
# To apply our Function, we use Function.apply method. We alias this as 'relu'.
relu = MyReLU.apply
# Forward pass: compute predicted y using operations on Variables; we compute
# ReLU using our custom autograd operation.
# y_pred = relu(x.mm(w1)).mm(w2)
y_pred = relu(kernel_product(w1, x, s)).mm(w2)
# Compute and print loss
loss = (y_pred - y).pow(2).sum()
print(t, loss.data[0])
# Use autograd to compute the backward pass.
loss.backward()
# Update weights using gradient descent
w1.data -= learning_rate * w1.grad.data
w2.data -= learning_rate * w2.grad.data
# Manually zero the gradients after updating weights
w1.grad.data.zero_()
w2.grad.data.zero_()
However I get this error, which dissapears when I simply use a fixed scalar in the default input parameter of kernel_product():
RuntimeError: eq() received an invalid combination of arguments - got (str), but expected one of:
* (float other)
didn't match because some of the arguments have invalid types: (str)
* (Variable other)
didn't match because some of the arguments have invalid types: (str)
Well, you are calling kernel_product(w1, x, s) where w1, x and s are torch Variable while the definition of the function is: kernel_product(x,y, mode = "gaussian", s = 1.). Seems like s should be a string specifying the mode.

Resetting default graph does not remove variables

I am looking for a way to quickly change a graph within an interactive session in Jupyter in order to test different structures. Initially I wanted to simple delete existing variables and recreate them with a different initializer. This does not seem to be possible [1].
I then found [2] and am now attempting to simply discard and recreate the default graph. But this does not seem to work. This is what I do:
a. Start a session
import tensorflow as tf
import math
sess = tf.InteractiveSession()
b. Create a variable in the default graph
IMAGE_PIXELS = 32 * 32
HIDDEN1 = 200
BATCH_SIZE = 100
NUM_POINTS = 30
images_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, IMAGE_PIXELS))
points_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, NUM_POINTS))
# Hidden 1
with tf.name_scope('hidden1'):
weights_init = tf.truncated_normal([IMAGE_PIXELS, HIDDEN1], stddev=1.0 / math.sqrt(float(IMAGE_PIXELS)))
weights = tf.Variable(weights_init, name='weights')
biases_init = tf.zeros([HIDDEN1])
biases = tf.Variable(biases_init, name='biases')
hidden1 = tf.nn.relu(tf.matmul(images_placeholder, weights) + biases)
c. Use the variable
# Add the variable initializer Op.
init = tf.initialize_all_variables()
# Run the Op to initialize the variables.
sess.run(init)
d. Reset the graph
tf.reset_default_graph()
e. Recreate the variable
with tf.name_scope('hidden1'):
weights = tf.get_variable(name='weights', shape=[IMAGE_PIXELS, HIDDEN1],
initializer=tf.contrib.layers.xavier_initializer())
biases_init = tf.zeros([HIDDEN1])
biases = tf.Variable(biases_init, name='biases')
hidden1 = tf.nn.relu(tf.matmul(images_placeholder, weights) + biases)
However, I get an exception (see below). So my question is: is it possible to reset/remove the graph and recreate it as before? If so, how?
Appreciate any pointers.
TIA,
Refs
Change initializer of Variable in Tensorflow
Remove nodes from graph or reset entire default graph
Exception
ValueError Traceback (most recent call last)
<ipython-input-5-e98a82c45473> in <module>()
5 biases_init = tf.zeros([HIDDEN1])
6 biases = tf.Variable(biases_init, name='biases')
----> 7 hidden1 = tf.nn.relu(tf.matmul(images_placeholder, weights) + biases)
8
/home/hmf/my_py3/lib/python3.4/site-packages/tensorflow/python/ops/math_ops.py in matmul(a, b, transpose_a, transpose_b, a_is_sparse, b_is_sparse, name)
1323 A `Tensor` of the same type as `a`.
1324 """
-> 1325 with ops.op_scope([a, b], name, "MatMul") as name:
1326 a = ops.convert_to_tensor(a, name="a")
1327 b = ops.convert_to_tensor(b, name="b")
/usr/lib/python3.4/contextlib.py in __enter__(self)
57 def __enter__(self):
58 try:
---> 59 return next(self.gen)
60 except StopIteration:
61 raise RuntimeError("generator didn't yield") from None
/home/hmf/my_py3/lib/python3.4/site-packages/tensorflow/python/framework/ops.py in op_scope(values, name, default_name)
4014 ValueError: if neither `name` nor `default_name` is provided.
4015 """
-> 4016 g = _get_graph_from_inputs(values)
4017 n = default_name if name is None else name
4018 if n is None:
/home/hmf/my_py3/lib/python3.4/site-packages/tensorflow/python/framework/ops.py in _get_graph_from_inputs(op_input_list, graph)
3812 graph = graph_element.graph
3813 elif original_graph_element is not None:
-> 3814 _assert_same_graph(original_graph_element, graph_element)
3815 elif graph_element.graph is not graph:
3816 raise ValueError(
/home/hmf/my_py3/lib/python3.4/site-packages/tensorflow/python/framework/ops.py in _assert_same_graph(original_item, item)
3757 if original_item.graph is not item.graph:
3758 raise ValueError(
-> 3759 "%s must be from the same graph as %s." % (item, original_item))
3760
3761
ValueError: Tensor("weights:0", shape=(1024, 200), dtype=float32_ref) must be from the same graph as Tensor("Placeholder:0", shape=(100, 1024), dtype=float32).`
When you reset the default graph, you do not remove the previous Tensors created. When calling tf.reset_default_graph(), a new graph is created and set to default.
Here is an example to illustrate:
x = tf.constant(1)
print tf.get_default_graph() == x.graph # prints True
tf.reset_default_graph()
print tf.get_default_graph() == x.graph # prints False
The error you had indicates that two tensors must be from the same graph, which means you are still using some tensors from the previous graph AND from the current default graph.
The easy fix is to create again the two placeholders images_placeholder and points_placeholder

Tensorflow : how to insert custom input to existing graph?

I have downloaded a tensorflow GraphDef that implements a VGG16 ConvNet, which I use doing this :
Pl['images'] = tf.placeholder(tf.float32,
[None, 448, 448, 3],
name="images") #batch x width x height x channels
with open("tensorflow-vgg16/vgg16.tfmodel", mode='rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
tf.import_graph_def(graph_def, input_map={"images": Pl['images']})
Besides, I have image features that are homogeneous to the output of the "import/pool5/".
How can I tell my graph that don't want to use his input "images", but the tensor "import/pool5/" as input ?
Thank's !
EDIT
OK I realize I haven't been very clear. Here is the situation:
I am trying to use this implementation of ROI pooling, using a pre-trained VGG16, which I have in the GraphDef format. So here is what I do:
First of all, I load the model:
tf.reset_default_graph()
with open("tensorflow-vgg16/vgg16.tfmodel",
mode='rb') as f:
fileContent = f.read()
graph_def = tf.GraphDef()
graph_def.ParseFromString(fileContent)
graph = tf.get_default_graph()
Then, I create my placeholders
images = tf.placeholder(tf.float32,
[None, 448, 448, 3],
name="images") #batch x width x height x channels
boxes = tf.placeholder(tf.float32,
[None,5], # 5 = [batch_id,x1,y1,x2,y2]
name = "boxes")
And I define the output of the first part of the graph to be conv5_3/Relu
tf.import_graph_def(graph_def,
input_map={'images':images})
out_tensor = graph.get_tensor_by_name("import/conv5_3/Relu:0")
So, out_tensor is of shape [None,14,14,512]
Then, I do the ROI pooling:
[out_pool,argmax] = module.roi_pool(out_tensor,
boxes,
7,7,1.0/1)
With out_pool.shape = N_Boxes_in_batch x 7 x 7 x 512, which is homogeneous to pool5. I would then like to feed out_pool as an input to the op that comes just after pool5, so it would look like
tf.import_graph_def(graph.as_graph_def(),
input_map={'import/pool5':out_pool})
But it doesn't work, I have this error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-89-527398d7344b> in <module>()
5
6 tf.import_graph_def(graph.as_graph_def(),
----> 7 input_map={'import/pool5':out_pool})
8
9 final_out = graph.get_tensor_by_name("import/Relu_1:0")
/usr/local/lib/python3.4/dist-packages/tensorflow/python/framework/importer.py in import_graph_def(graph_def, input_map, return_elements, name, op_dict)
333 # NOTE(mrry): If the graph contains a cycle, the full shape information
334 # may not be available for this op's inputs.
--> 335 ops.set_shapes_for_outputs(op)
336
337 # Apply device functions for this op.
/usr/local/lib/python3.4/dist-packages/tensorflow/python/framework/ops.py in set_shapes_for_outputs(op)
1610 raise RuntimeError("No shape function registered for standard op: %s"
1611 % op.type)
-> 1612 shapes = shape_func(op)
1613 if len(op.outputs) != len(shapes):
1614 raise RuntimeError(
/home/hbenyounes/vqa/roi_pooling_op_grad.py in _roi_pool_shape(op)
13 channels = dims_data[3]
14 print(op.inputs[1].name, op.inputs[1].get_shape())
---> 15 dims_rois = op.inputs[1].get_shape().as_list()
16 num_rois = dims_rois[0]
17
/usr/local/lib/python3.4/dist-packages/tensorflow/python/framework/tensor_shape.py in as_list(self)
745 A list of integers or None for each dimension.
746 """
--> 747 return [dim.value for dim in self._dims]
748
749 def as_proto(self):
TypeError: 'NoneType' object is not iterable
Any clue ?
It is usually very convenient to use tf.train.export_meta_graph to store the whole MetaGraph. Then, upon restoring you can use tf.train.import_meta_graph, because it turns out that it passes all additional arguments to the underlying import_scoped_meta_graph which has the input_map argument and utilizes it when it gets to it's own invocation of import_graph_def.
It is not documented, and took me waaaay toooo much time to find it, but it works!
What I would do is something along those lines:
-First retrieve the names of the tensors representing the weights and biases of the 3 fully connected layers coming after pool5 in VGG16.
To do that I would inspect [n.name for n in graph.as_graph_def().node].
(They probably look something like import/locali/weight:0, import/locali/bias:0, etc.)
-Put them in a python list:
weights_names=["import/local1/weight:0" ,"import/local2/weight:0" ,"import/local3/weight:0"]
biases_names=["import/local1/bias:0" ,"import/local2/bias:0" ,"import/local3/bias:0"]
-Define a function that look something like:
def pool5_tofcX(input_tensor, layer_number=3):
flatten=tf.reshape(input_tensor,(-1,7*7*512))
tmp=flatten
for i in xrange(layer_number):
tmp=tf.matmul(tmp, graph.get_tensor_by_name(weights_name[i]))
tmp=tf.nn.bias_add(tmp, graph.get_tensor_by_name(biases_name[i]))
tmp=tf.nn.relu(tmp)
return tmp
Then define the tensor using the function:
wanted_output=pool5_tofcX(out_pool)
Then you are done !
Jonan Georgiev provided an excellent answer here. The same approach was also described with little fanfare at the end of this git issue: https://github.com/tensorflow/tensorflow/issues/3389
Below is a copy/paste runnable example of using this approach to switch out a placeholder for a tf.data.Dataset get_next tensor.
import tensorflow as tf
my_placeholder = tf.placeholder(dtype=tf.float32, shape=1, name='my_placeholder')
my_op = tf.square(my_placeholder, name='my_op')
# Save the graph to memory
graph_def = tf.get_default_graph().as_graph_def()
print('----- my_op before any remapping -----')
print([n for n in graph_def.node if n.name == 'my_op'])
tf.reset_default_graph()
ds = tf.data.Dataset.from_tensors(1.0)
next_tensor = tf.data.make_one_shot_iterator(ds).get_next(name='my_next_tensor')
# Restore the graph with a custom input mapping
tf.graph_util.import_graph_def(graph_def, input_map={'my_placeholder': next_tensor}, name='')
print('----- my_op after remapping -----')
print([n for n in tf.get_default_graph().as_graph_def().node if n.name == 'my_op'])
Output, where we can clearly see that the input to the square operation has changed.
----- my_op before any remapping -----
[name: "my_op"
op: "Square"
input: "my_placeholder"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
]
----- my_op after remapping -----
[name: "my_op"
op: "Square"
input: "my_next_tensor"
attr {
key: "T"
value {
type: DT_FLOAT
}
}
]

Resources