Skip to content

Commit fee5405

Browse files
authored
Support YOLOX and Adapt to PyTorch 1.7.0 (#696)
* add SiLU op * fixed bugs * support yolox model * fix code style * Handling the case of underslashes * add aten::format and remove to_tensor * deal with comments * fixed for CI * update rm to_tensor
1 parent 06984e8 commit fee5405

File tree

7 files changed

+143
-345
lines changed

7 files changed

+143
-345
lines changed

docs/inference_model_convertor/op_list.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# X2Paddle支持OP列表
2-
> 目前X2Paddle支持90+的TensorFlow OP,30+的Caffe Layer,80+的ONNX OP,110+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
2+
> 目前X2Paddle支持90+的TensorFlow OP,30+的Caffe Layer,80+的ONNX OP,120+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
33
44
**注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
55

@@ -109,7 +109,7 @@ Aten:
109109
| 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm|
110110
| 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather|
111111
| 113 | aten::upsample\_nearest2d | 114 | aten::split\_with\_sizes | 115 | aten::sum | 116 | aten::instance_norm |
112-
| 117 | aten::bitwise_not | 118 | aten::bitwise_xor | 119 | aten::bitwise_and | | |
112+
| 117 | aten::bitwise_not | 118 | aten::bitwise_xor | 119 | aten::bitwise_and | 120 | aten::silu |
113113

114114
Prim:
115115
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |

x2paddle/convert.py

+8-1
Original file line numberDiff line numberDiff line change
@@ -88,6 +88,11 @@ def arg_parser():
8888
type=_text_type,
8989
default=None,
9090
help="pretrain model file of pytorch model")
91+
parser.add_argument(
92+
"--enable_code_optim",
93+
"-co",
94+
default=True,
95+
help="Turn on code optimization")
9196
parser.add_argument(
9297
"--to_lite", "-tl", default=False, help="convert to Paddle-Lite format")
9398
parser.add_argument(
@@ -222,6 +227,7 @@ def pytorch2paddle(module,
222227
save_dir,
223228
jit_type="trace",
224229
input_examples=None,
230+
enable_code_optim=True,
225231
convert_to_lite=False,
226232
lite_valid_places="arm",
227233
lite_model_type="naive_buffer"):
@@ -262,7 +268,8 @@ def pytorch2paddle(module,
262268
graph_opt = GraphOptimizer(source_frame="pytorch", jit_type=jit_type)
263269
graph_opt.optimize(mapper.paddle_graph)
264270
logging.info("Model optimized.")
265-
mapper.paddle_graph.gen_model(save_dir, jit_type=jit_type)
271+
mapper.paddle_graph.gen_model(
272+
save_dir, jit_type=jit_type, enable_code_optim=enable_code_optim)
266273
if convert_to_lite:
267274
convert2lite(save_dir, lite_valid_places, lite_model_type)
268275

x2paddle/core/program.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -237,11 +237,11 @@ def update(layers):
237237

238238
return update(self.layers)
239239

240-
def gen_model(self, save_dir, jit_type=None):
240+
def gen_model(self, save_dir, jit_type=None, enable_code_optim=True):
241241
if not osp.exists(save_dir):
242242
os.makedirs(save_dir)
243243
if jit_type == "trace":
244-
if not self.has_unpack:
244+
if not self.has_unpack and enable_code_optim:
245245
from x2paddle.optimizer.pytorch_code_optimizer import HierarchicalTree
246246
hierarchical_tree = HierarchicalTree(self)
247247
for layer_id, layer in self.layers.items():
@@ -252,7 +252,7 @@ def gen_model(self, save_dir, jit_type=None):
252252
self.gen_code(save_dir)
253253
self.dump_parameter(save_dir)
254254
else:
255-
if self.source_type == "pytorch":
255+
if self.source_type == "pytorch" and enable_code_optim:
256256
from x2paddle.optimizer.pytorch_code_optimizer import ModuleGraph
257257
module_graph = ModuleGraph(self)
258258
module_graph.save_source_files(save_dir)

0 commit comments

Comments
 (0)