Skip to content
This repository was archived by the owner on May 12, 2024. It is now read-only.

Commit edd516a

Browse files
committed
CONV_3D,CONV_3D_TRANSPOSE
1 parent b730a44 commit edd516a

File tree

4 files changed

+194
-66
lines changed

4 files changed

+194
-66
lines changed

Dockerfile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ ARG CPVER=cp38
77
ARG OPENVINOVER=2021.4.582
88
ARG OPENVINOROOTDIR=/opt/intel/openvino_2021
99
ARG TENSORRTVER=cuda11.3-trt8.0.1.6-ga-20210626
10-
ARG APPVER=v1.11.4
10+
ARG APPVER=v1.11.5
1111
ARG wkdir=/home/user
1212

1313
# dash -> bash

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,8 @@ Generate saved_model, tfjs, tf-trt, EdgeTPU, CoreML, quantized tflite, ONNX, Ope
137137
|120|FlexAll|tf.math.reduce_all|Flex OP|
138138
|121|FlexErf|tf.math.erf|Flex OP|
139139
|122|FlexRoll|tf.roll|Flex OP|
140+
|123|CONV_3D|tf.keras.layers.Conv3D||
141+
|124|CONV_3D_TRANSPOSE|tf.nn.conv3d_transpose||
140142

141143
## 2. Environment
142144
- Python3.6+

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
setup(
1212
name="tflite2tensorflow",
1313
scripts=scripts,
14-
version="1.11.4",
14+
version="1.11.5",
1515
description="Generate saved_model, tfjs, tf-trt, EdgeTPU, CoreML, quantized tflite, ONNX, OpenVINO, Myriad Inference Engine blob and .pb from .tflite.",
1616
long_description=long_description,
1717
long_description_content_type="text/markdown",

tflite2tensorflow/tflite2tensorflow.py

Lines changed: 190 additions & 64 deletions
Original file line numberDiff line numberDiff line change
@@ -3930,74 +3930,200 @@ def complexabs_(x, tout):
39303930

39313931
# tensors[output_detail['index']] = output_tensor
39323932

3933-
# elif op_type == 'CONV_3D':
3934-
# input_tensor1 = None
3935-
# try:
3936-
# input_tensor1 = tensors[op['inputs'][0]]
3937-
# except:
3938-
# input_detail1 = interpreter._get_tensor_details(op['inputs'][0])
3939-
# input_tensor1 = interpreter.get_tensor(input_detail1['index'])
3940-
# input_tensor2 = None
3941-
# try:
3942-
# input_tensor2 = tensors[op['inputs'][1]].transpose(1,2,3,4,0)
3943-
# except:
3944-
# input_detail2 = interpreter._get_tensor_details(op['inputs'][1])
3945-
# input_tensor2 = interpreter.get_tensor(input_detail2['index']).transpose(1,2,3,4,0)
3946-
# output_detail = interpreter._get_tensor_details(op['outputs'][0])
3933+
elif op_type == 'CONV_3D':
3934+
# Conv3D
3935+
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv3D
3936+
"""
3937+
tf : [Z, Y, X, C_IN, C_OUT] = [3,3,3,1,8] [3,3,3,8,16]
3938+
"""
3939+
input_tensor1 = None
3940+
input_detail1 = interpreter._get_tensor_details(op['inputs'][0])
3941+
try:
3942+
input_tensor1 = tensors[op['inputs'][0]]
3943+
except:
3944+
input_tensor1 = interpreter.get_tensor(input_detail1['index'])
3945+
input_tensor1 = backward_quantization(input_detail1, input_tensor1)
39473946

3948-
# kernel_size = [output_detail['shape'][0], output_detail['shape'][1], output_detail['shape'][2]]
3947+
input_tensor2 = None
3948+
input_detail2 = interpreter._get_tensor_details(op['inputs'][1])
3949+
try:
3950+
input_tensor2 = tensors[op['inputs'][1]]
3951+
except:
3952+
input_tensor2 = interpreter.get_tensor(input_detail2['index'])
3953+
input_tensor2 = backward_quantization(input_detail2, input_tensor2)
39493954

3950-
# options = op['builtin_options']
3951-
# dilation_rate = [options['dilation_d_factor'], options['dilation_h_factor'], options['dilation_w_factor']]
3952-
# strides = [options['stride_d'], options['stride_h'], options['stride_w']]
3953-
# padding = options['padding']
3954-
# if padding == 0 or padding == 'VALID':
3955-
# padding =='valid'
3956-
# elif padding == 1 or padding == 'SAME':
3957-
# padding =='same'
3958-
# else:
3959-
# raise ValueError(padding)
3960-
3961-
# print('@@@@@@@@@@@@@@@@@@ output_detail[\'shape\']', output_detail['shape'])
3962-
3963-
# activation = options['fused_activation_function']
3964-
# if activation == 'NONE' or activation == 0:
3965-
# output_tensor = tf.keras.layers.Conv3D(filters=output_detail['shape'][4],
3966-
# kernel_size=kernel_size,
3967-
# strides=strides,
3968-
# padding=padding,
3969-
# dilation_rate=dilation_rate,
3970-
# # groups=1,
3971-
# use_bias=False,
3972-
# kernel_initializer=tf.keras.initializers.Constant(input_tensor2))(input_tensor1)
3973-
# output_tensor = tf.identity(output_tensor, name=get_op_name(output_detail['name']))
3974-
# elif activation == 'RELU':
3975-
# output_tensor = tf.keras.layers.Conv3D(filters=output_detail['shape'][4],
3976-
# kernel_size=kernel_size,
3977-
# strides=strides,
3978-
# padding=padding,
3979-
# dilation_rate=dilation_rate,
3980-
# # groups=1,
3981-
# use_bias=False,
3982-
# kernel_initializer=tf.keras.initializers.Constant(input_tensor2))(input_tensor1)
3983-
# output_tensor = tf.nn.relu(output_tensor, name=get_op_name(output_detail['name']))
3984-
# elif activation == 'RELU6':
3985-
# output_tensor = tf.keras.layers.Conv3D(filters=output_detail['shape'][4],
3986-
# kernel_size=kernel_size,
3987-
# strides=strides,
3988-
# padding=padding,
3989-
# dilation_rate=dilation_rate,
3990-
# # groups=1,
3991-
# use_bias=False,
3992-
# kernel_initializer=tf.keras.initializers.Constant(input_tensor2))(input_tensor1)
3993-
# output_tensor = tf.nn.relu6(output_tensor, name=get_op_name(output_detail['name']))
3994-
# else:
3995-
# raise ValueError(activation)
3955+
output_detail = interpreter._get_tensor_details(op['outputs'][0])
39963956

3997-
# tensors[output_detail['index']] = output_tensor
3957+
kernel_size = [input_detail2['shape'][0], input_detail2['shape'][1], input_detail2['shape'][2]]
3958+
options = op['builtin_options']
3959+
dilation_rate = [options['dilation_d_factor'], options['dilation_h_factor'], options['dilation_w_factor']]
3960+
strides = [options['stride_d'], options['stride_h'], options['stride_w']]
3961+
padding = options['padding']
3962+
if padding == 0 or padding == 'VALID':
3963+
padding =='valid'
3964+
elif padding == 1 or padding == 'SAME':
3965+
padding =='same'
3966+
else:
3967+
raise ValueError(padding)
3968+
activation = options['fused_activation_function']
3969+
3970+
if activation == 'NONE' or activation == 0:
3971+
output_tensor = tf.keras.layers.Conv3D(
3972+
filters=output_detail['shape'][4],
3973+
kernel_size=kernel_size,
3974+
strides=strides,
3975+
padding=padding,
3976+
dilation_rate=dilation_rate,
3977+
use_bias=False,
3978+
kernel_initializer=tf.keras.initializers.Constant(input_tensor2)
3979+
)(input_tensor1)
3980+
output_tensor = tf.identity(output_tensor, name=get_op_name(output_detail['name']))
3981+
json_tensor_info = searh_json_tensor_detail(interpreter._get_tensor_details(op['outputs'][0])['name'])
3982+
if json_tensor_info:
3983+
if 'quantization' in json_tensor_info:
3984+
json_quant_info = json_tensor_info['quantization']
3985+
activation_min = None
3986+
activation_max = None
3987+
if 'min' in json_quant_info:
3988+
activation_min = json_quant_info['min']
3989+
if 'max' in json_quant_info:
3990+
activation_max = json_quant_info['max']
3991+
if activation_min == [0.0] and (activation_max == [6.0] or activation_max == [5.999762]):
3992+
output_tensor = tf.nn.relu6(
3993+
output_tensor,
3994+
name=get_op_name(output_detail['name'])
3995+
)
3996+
elif activation_min == [0.0]:
3997+
output_tensor = tf.nn.relu(
3998+
output_tensor,
3999+
name=get_op_name(output_detail['name'])
4000+
)
4001+
4002+
elif activation == 'RELU':
4003+
output_tensor = tf.keras.layers.Conv3D(
4004+
filters=output_detail['shape'][4],
4005+
kernel_size=kernel_size,
4006+
strides=strides,
4007+
padding=padding,
4008+
dilation_rate=dilation_rate,
4009+
use_bias=False,
4010+
kernel_initializer=tf.keras.initializers.Constant(input_tensor2)
4011+
)(input_tensor1)
4012+
output_tensor = tf.nn.relu(output_tensor, name=get_op_name(output_detail['name']))
4013+
4014+
elif activation == 'RELU6':
4015+
output_tensor = tf.keras.layers.Conv3D(
4016+
filters=output_detail['shape'][4],
4017+
kernel_size=kernel_size,
4018+
strides=strides,
4019+
padding=padding,
4020+
dilation_rate=dilation_rate,
4021+
use_bias=False,
4022+
kernel_initializer=tf.keras.initializers.Constant(input_tensor2)
4023+
)(input_tensor1)
4024+
output_tensor = tf.nn.relu6(output_tensor, name=get_op_name(output_detail['name']))
4025+
else:
4026+
raise ValueError(activation)
4027+
4028+
tensors[output_detail['index']] = output_tensor
4029+
4030+
elif op_type == 'CONV_3D_TRANSPOSE':
4031+
input_tensor1 = None
4032+
input_detail1 = interpreter._get_tensor_details(op['inputs'][0])
4033+
try:
4034+
input_tensor1 = tensors[op['inputs'][0]]
4035+
except:
4036+
input_tensor1 = interpreter.get_tensor(input_detail1['index'])
4037+
input_tensor1 = backward_quantization(input_detail1, input_tensor1)
4038+
4039+
input_tensor2 = None
4040+
input_detail2 = interpreter._get_tensor_details(op['inputs'][1])
4041+
try:
4042+
input_tensor2 = tensors[op['inputs'][1]]
4043+
except:
4044+
input_tensor2 = interpreter.get_tensor(input_detail2['index'])
4045+
input_tensor2 = backward_quantization(input_detail2, input_tensor2)
4046+
4047+
input_tensor3 = None
4048+
input_detail3 = interpreter._get_tensor_details(op['inputs'][2])
4049+
try:
4050+
input_tensor3 = tensors[op['inputs'][2]]
4051+
except:
4052+
input_tensor3 = interpreter.get_tensor(input_detail3['index'])
4053+
input_tensor3 = backward_quantization(input_detail3, input_tensor3)
4054+
4055+
output_detail = interpreter._get_tensor_details(op['outputs'][0])
4056+
4057+
options = op['builtin_options']
4058+
kernel_size = [input_detail2['shape'][0], input_detail2['shape'][1], input_detail2['shape'][2]]
4059+
dilation_rate = [options['dilation_d_factor'], options['dilation_h_factor'], options['dilation_w_factor']]
4060+
strides = [options['stride_d'], options['stride_h'], options['stride_w']]
4061+
padding = options['padding']
4062+
if padding == 0 or padding == 'valid' or padding == 'VALID':
4063+
padding =='VALID'
4064+
elif padding == 1 or padding == 'same' or padding == 'SAME':
4065+
padding =='SAME'
4066+
else:
4067+
raise ValueError(padding)
4068+
activation = options['fused_activation_function']
4069+
4070+
if activation == 'NONE' or activation == 0:
4071+
output_tensor = tf.nn.conv3d_transpose(
4072+
value=input_tensor3,
4073+
filters=input_tensor2,
4074+
output_shape=input_tensor1,
4075+
strides=strides,
4076+
padding=padding
4077+
)
4078+
output_tensor = tf.identity(output_tensor, name=get_op_name(output_detail['name']))
4079+
json_tensor_info = searh_json_tensor_detail(interpreter._get_tensor_details(op['outputs'][0])['name'])
4080+
if json_tensor_info:
4081+
if 'quantization' in json_tensor_info:
4082+
json_quant_info = json_tensor_info['quantization']
4083+
activation_min = None
4084+
activation_max = None
4085+
if 'min' in json_quant_info:
4086+
activation_min = json_quant_info['min']
4087+
if 'max' in json_quant_info:
4088+
activation_max = json_quant_info['max']
4089+
if activation_min == [0.0] and (activation_max == [6.0] or activation_max == [5.999762]):
4090+
output_tensor = tf.nn.relu6(
4091+
output_tensor,
4092+
name=get_op_name(output_detail['name'])
4093+
)
4094+
elif activation_min == [0.0]:
4095+
output_tensor = tf.nn.relu(
4096+
output_tensor,
4097+
name=get_op_name(output_detail['name'])
4098+
)
4099+
4100+
elif activation == 'RELU':
4101+
output_tensor = tf.nn.conv3d_transpose(
4102+
value=input_tensor3,
4103+
filters=input_tensor2,
4104+
output_shape=input_tensor1,
4105+
strides=strides,
4106+
padding=padding
4107+
)
4108+
output_tensor = tf.nn.relu(output_tensor, name=get_op_name(output_detail['name']))
4109+
4110+
elif activation == 'RELU6':
4111+
output_tensor = tf.nn.conv3d_transpose(
4112+
value=input_tensor3,
4113+
filters=input_tensor2,
4114+
output_shape=input_tensor1,
4115+
strides=strides,
4116+
padding=padding
4117+
)
4118+
output_tensor = tf.nn.relu6(output_tensor, name=get_op_name(output_detail['name']))
4119+
4120+
else:
4121+
raise ValueError(activation)
4122+
4123+
tensors[output_detail['index']] = output_tensor
39984124

39994125
elif op_type == 'CUSTOM':
4000-
'''
4126+
"""
40014127
Convolution2DTransposeBias
40024128
+++++++++++++++++++++++++++++++++ op
40034129
{'builtin_options_type': 'NONE',
@@ -4022,7 +4148,7 @@ def complexabs_(x, tout):
40224148
'inputs': array([241, 353, 275], dtype=int32),
40234149
'op_name': 'Convolution2DTransposeBias',
40244150
'outputs': array([244], dtype=int32)}
4025-
'''
4151+
"""
40264152
custom_op_implementation_flg = False
40274153
custom_op_type = None
40284154
for ops_detail in ops_details:

0 commit comments

Comments
 (0)