@@ -983,23 +983,31 @@ def aten_constant_pad_nd(mapper, graph, node):
983
983
mapper ._check_input (graph , inputs_node [0 ], inputs_name [0 ], current_outputs ,
984
984
scope_name )
985
985
layer_inputs ["input" ] = inputs_name [0 ]
986
+ # 处理输入1,即%4876
987
+ is_padding_tensor = False
988
+ if inputs_name [1 ] in mapper .attrs :
989
+ layer_attrs ["padding" ] = mapper .attrs [inputs_name [1 ]]
990
+ else :
991
+ mapper ._check_input (graph , inputs_node [1 ], inputs_name [1 ], current_outputs ,
992
+ scope_name )
993
+ layer_inputs ["pad" ] = inputs_name [1 ]
994
+ is_padding_tensor = True
986
995
# 获取当前节点输入的list
987
996
current_inputs = list (layer_inputs .values ())
988
- # 处理输入1,即%4876
989
- layer_attrs ["padding" ] = mapper .attrs [inputs_name [1 ]]
990
997
# 处理输入2,即%42
991
998
layer_attrs ["value" ] = mapper .attrs [inputs_name [2 ]]
992
999
993
- graph .add_layer (
994
- "prim.shape" ,
995
- inputs = {"input" : inputs_name [0 ]},
996
- outputs = [inputs_name [0 ] + "_shape" ],
997
- scope_name = scope_name )
998
- graph .add_layer (
999
- "prim.len" ,
1000
- inputs = {"input" : inputs_name [0 ] + "_shape" },
1001
- outputs = [inputs_name [0 ] + "_len" ],
1002
- scope_name = scope_name )
1000
+ if not is_padding_tensor :
1001
+ graph .add_layer (
1002
+ "prim.shape" ,
1003
+ inputs = {"input" : inputs_name [0 ]},
1004
+ outputs = [inputs_name [0 ] + "_shape" ],
1005
+ scope_name = scope_name )
1006
+ graph .add_layer (
1007
+ "prim.len" ,
1008
+ inputs = {"input" : inputs_name [0 ] + "_shape" },
1009
+ outputs = [inputs_name [0 ] + "_len" ],
1010
+ scope_name = scope_name )
1003
1011
1004
1012
def add_pad_layers (kernel , dim ):
1005
1013
graph .add_layer (
@@ -1020,6 +1028,7 @@ def add_pad_layers(kernel, dim):
1020
1028
inputs = {"y" : inputs_name [0 ] + "_len" },
1021
1029
outputs = [inputs_name [0 ] + "_len0" ],
1022
1030
scope_name = scope_name ,
1031
+ alpha = 1.0 ,
1023
1032
x = dim )
1024
1033
block .add_layer (
1025
1034
"prim.len2list" ,
@@ -1058,17 +1067,25 @@ def add_pad_layers(kernel, dim):
1058
1067
if_layer .inputs ["input-0" ] = inputs_name [0 ]
1059
1068
if_layer .inputs ["input-1" ] = inputs_name [0 ] + "_len"
1060
1069
1061
- if len (layer_attrs ["padding" ]) == 2 :
1062
- layer_outputs [0 ] = layer_outputs [0 ].raplace ("pad" , "pad1d" )
1063
- add_pad_layers ("paddle.nn.Pad1D" , 3 )
1064
- elif len (layer_attrs ["padding" ]) == 4 :
1065
- layer_outputs [0 ] = layer_outputs [0 ].raplace ("pad" , "pad2d" )
1066
- add_pad_layers ("paddle.nn.Pad2D" , 4 )
1067
- elif len (layer_attrs ["padding" ]) == 6 :
1068
- layer_outputs [0 ] = layer_outputs [0 ].raplace ("pad" , "pad3d" )
1069
- add_pad_layers ("paddle.nn.Pad3D" , 5 )
1070
+ if not is_padding_tensor :
1071
+ if len (layer_attrs ["padding" ]) == 2 :
1072
+ layer_outputs [0 ] = layer_outputs [0 ].replace ("pad" , "pad1d" )
1073
+ add_pad_layers ("paddle.nn.Pad1D" , 3 )
1074
+ elif len (layer_attrs ["padding" ]) == 4 :
1075
+ layer_outputs [0 ] = layer_outputs [0 ].replace ("pad" , "pad2d" )
1076
+ add_pad_layers ("paddle.nn.Pad2D" , 4 )
1077
+ elif len (layer_attrs ["padding" ]) == 6 :
1078
+ layer_outputs [0 ] = layer_outputs [0 ].replace ("pad" , "pad3d" )
1079
+ add_pad_layers ("paddle.nn.Pad3D" , 5 )
1080
+ else :
1081
+ raise Exception ("The lenght of padding list must be 2, 4 or 6!" )
1070
1082
else :
1071
- raise Exception ("The lenght of padding list must be 2, 4 or 6!" )
1083
+ graph .add_layer (
1084
+ "custom_layer:Pad" ,
1085
+ inputs = layer_inputs ,
1086
+ outputs = [output_name ],
1087
+ scope_name = scope_name ,
1088
+ ** layer_attrs )
1072
1089
return current_inputs , current_outputs
1073
1090
1074
1091
@@ -4191,10 +4208,45 @@ def aten_relu6(mapper, graph, node):
4191
4208
return current_inputs , current_outputs
4192
4209
4193
4210
4211
+ def aten_remainder (mapper , graph , node ):
4212
+ """ 构造取余数的PaddleLayer。
4213
+ TorchScript示例:
4214
+ %701 : Tensor = aten::remainder(%661, %139)
4215
+ 参数含义:
4216
+ %701 (Tensor): 输出,取余结果的Tensor。
4217
+ %661 (Tensor): 需要取余的Tensor。
4218
+ %139 (Tensor): 除数Tensor。
4219
+ """
4220
+ scope_name = mapper .normalize_scope_name (node )
4221
+ output_name = mapper ._get_outputs_name (node )[0 ]
4222
+ layer_outputs = [output_name ]
4223
+ layer_inputs = {}
4224
+ inputs_name , inputs_node = mapper ._get_inputs_name (node )
4225
+ # 获取当前节点输出的list
4226
+ current_outputs = [output_name ]
4227
+ # 处理输入0,即%661
4228
+ mapper ._check_input (graph , inputs_node [0 ], inputs_name [0 ], current_outputs ,
4229
+ scope_name )
4230
+ layer_inputs ["x" ] = inputs_name [0 ]
4231
+ # 处理输入1,即%139
4232
+ mapper ._check_input (graph , inputs_node [1 ], inputs_name [1 ], current_outputs ,
4233
+ scope_name )
4234
+ layer_inputs ["y" ] = inputs_name [1 ]
4235
+ # 获取当前节点输入、输出的list
4236
+ current_inputs = list (layer_inputs .values ())
4237
+
4238
+ graph .add_layer (
4239
+ "prim.remainder" ,
4240
+ inputs = layer_inputs ,
4241
+ outputs = layer_outputs ,
4242
+ scope_name = scope_name )
4243
+ return current_inputs , current_outputs
4244
+
4245
+
4194
4246
def aten_repeat (mapper , graph , node ):
4195
4247
""" 构造根据参数对输入各维度进行复制的PaddleLayer。
4196
4248
TorchScript示例:
4197
- 701 : Tensor = aten::repeat(%699, %700)
4249
+ % 701 : Tensor = aten::repeat(%699, %700)
4198
4250
参数含义:
4199
4251
%701 (Tensor): 输出,复制后的Tensor。
4200
4252
%699 (Tensor): 需要复制的Tensor。
0 commit comments