diff --git a/framework/e2e/PaddleLT_new/binary_search.py b/framework/e2e/PaddleLT_new/binary_search.py index 78bf7b6af0..f3a2902def 100644 --- a/framework/e2e/PaddleLT_new/binary_search.py +++ b/framework/e2e/PaddleLT_new/binary_search.py @@ -62,6 +62,8 @@ def __init__(self, commit_list, title, layerfile, testing, perf_decay=None, test self.test_obj = test_obj self.py_cmd = os.environ.get("python_ver") self.testing_mode = os.environ.get("TESTING_MODE") + self.device_place_id = 0 + self.timeout = 300 def _status_print(self, exit_code, status_str): """ @@ -82,8 +84,8 @@ def _install_paddle(self, commit_id): whl_link = ( "https://paddle-qa.bj.bcebos.com/paddle-pipe" - "line/Develop-GpuAll-LinuxCentos-Gcc82-Cuda112-Trtoff-Py38-Compile/{}/paddle" - "paddle_gpu-0.0.0-cp38-cp38-linux_x86_64.whl".format(commit_id) + "line/Develop-GpuSome-LinuxCentos-Gcc82-Cuda118-Cudnn86-Trt85-Py310-CINN-Compile/{}/paddle" + "paddle_gpu-0.0.0-cp310-cp310-linux_x86_64.whl".format(commit_id) ) exit_code = os.system(f"{self.py_cmd} -m pip install {whl_link}") self._status_print(exit_code=exit_code, status_str="install paddlepaddle-gpu") @@ -105,7 +107,8 @@ def _precision_debug(self, commit_id): exit_code = os.system( f"cp -r PaddleLT.py {self.title}.py && " f"{self.py_cmd} -m pytest {self.title}.py --title={self.title} " - f"--layerfile={self.layerfile} --testing={self.testing}" + f"--layerfile={self.layerfile} --testing={self.testing} " + f"--device_place_id={self.device_place_id} --timeout={self.timeout}" ) if exit_code > 0: @@ -168,8 +171,8 @@ def _commit_locate(self, commits): if not os.path.exists("paddle"): os.system("git clone -b develop http://github.com/paddlepaddle/paddle.git") os.chdir(os.path.join(cur_path, "paddle")) - start_commit = "f651ac5cf387c56488b52fcc6456a63e9eb73086" # 成功commit - end_commit = "9d5f31687cce16a976256723a70df4550085d685" # 失败commit + start_commit = "4ac66e4319740d65eb3b2a9e80b4a9f989083099" # 成功commit + end_commit = "81dc24b20d9a016dd4e92e76d2849cc9e3d0f8c8" # 失败commit commits = get_commits(start=start_commit, end=end_commit) save_pickle(data=commits, filename="candidate_commits.pickle") print("the candidate commits is {}".format(commits)) @@ -178,8 +181,8 @@ def _commit_locate(self, commits): final_commit = BinarySearch( commit_list=commits, title="PrecisionBS", - layerfile="./layercase/sublayer1000/Clas_cases/CSWinTransformer_CSWinTransformer_base_384/SIR_5.py", - testing="yaml/dy^dy2stcinn_eval.yml", + layerfile="./layercase/sublayer1000/Det_cases/ppyolo_ppyolo_r50vd_dcn_1x_coco/SIR_58.py", + testing="yaml/dy^dy2stcinn_train_inputspec.yml", perf_decay=None, # ["dy2st_eval_cinn_perf", 0.042814, -0.3] test_obj=LayerTest, )._commit_locate(commits) diff --git a/framework/e2e/PaddleLT_new/engine/paddle_eval.py b/framework/e2e/PaddleLT_new/engine/paddle_eval.py index 19df5debf4..0c9f3e564a 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_eval.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_eval.py @@ -24,7 +24,7 @@ class LayerEval(object): """ # def __init__(self, testing, layerfile, device_id): - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -36,6 +36,8 @@ def __init__(self, testing, layerfile, device_place_id): # paddle.set_device("{}:{}".format(str(self.device), str(device_id))) self.testing = testing + self.upstream_net = upstream_net + self.return_net_instance = self.testing.get("return_net_instance", "False") self.model_dtype = self.testing.get("model_dtype") paddle.set_default_dtype(self.model_dtype) @@ -53,7 +55,10 @@ def _net_input(self): def _net_instant(self): """get net""" reset(self.seed) - net = BuildLayer(layerfile=self.layerfile).get_layer() + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() return net def _net_input_and_spec(self): @@ -85,7 +90,10 @@ def dy_eval(self): net = self._net_instant() net.eval() logit = net(*self._net_input()) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval(self): """dy2st eval""" @@ -94,7 +102,10 @@ def dy2st_eval(self): st_net = paddle.jit.to_static(net, full_graph=True) st_net.eval() logit = st_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": st_net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval_inputspec(self): """dy2st eval""" @@ -104,7 +115,10 @@ def dy2st_eval_inputspec(self): st_net = paddle.jit.to_static(net, full_graph=True, input_spec=input_spec) st_net.eval() logit = st_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": st_net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval_static_inputspec(self): """dy2st eval""" @@ -114,7 +128,10 @@ def dy2st_eval_static_inputspec(self): st_net = paddle.jit.to_static(net, full_graph=True, input_spec=input_spec) st_net.eval() logit = st_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": st_net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval_cinn(self): """dy2st cinn eval""" @@ -126,7 +143,10 @@ def dy2st_eval_cinn(self): cinn_net = paddle.jit.to_static(net, build_strategy=build_strategy, full_graph=True) cinn_net.eval() logit = cinn_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": cinn_net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval_cinn_inputspec(self): """dy2st cinn eval with inputspec""" @@ -139,7 +159,10 @@ def dy2st_eval_cinn_inputspec(self): cinn_net = paddle.jit.to_static(net, build_strategy=build_strategy, full_graph=True, input_spec=input_spec) cinn_net.eval() logit = cinn_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": cinn_net} + else: + return {"res": {"logit": logit}, "net": None} def dy2st_eval_cinn_static_inputspec(self): """dy2st cinn eval with inputspec""" @@ -152,7 +175,10 @@ def dy2st_eval_cinn_static_inputspec(self): cinn_net = paddle.jit.to_static(net, build_strategy=build_strategy, full_graph=True, input_spec=input_spec) cinn_net.eval() logit = cinn_net(*data) - return {"logit": logit} + if self.return_net_instance == "True": + return {"res": {"logit": logit}, "net": cinn_net} + else: + return {"res": {"logit": logit}, "net": None} # def dy2st_eval_cinn_inputspec_legacy_2(self): # """dy2st cinn eval with inputspec""" diff --git a/framework/e2e/PaddleLT_new/engine/paddle_eval_bm.py b/framework/e2e/PaddleLT_new/engine/paddle_eval_bm.py index 3426333302..5bf70b6fe4 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_eval_bm.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_eval_bm.py @@ -24,7 +24,7 @@ class LayerEvalBM(object): """ # def __init__(self, testing, layerfile, device_id): - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -43,6 +43,8 @@ def __init__(self, testing, layerfile, device_place_id): self.statis_round = 6 self.testing = testing + self.upstream_net = upstream_net + # self.return_net_instance = self.testing.get("return_net_instance", "False") self.model_dtype = self.testing.get("model_dtype") paddle.set_default_dtype(self.model_dtype) @@ -53,7 +55,10 @@ def __init__(self, testing, layerfile, device_place_id): def _net_instant(self): """get net and data""" reset(self.seed) - net = BuildLayer(layerfile=self.layerfile).get_layer() + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() return net def _set_cinn_flags(self): diff --git a/framework/e2e/PaddleLT_new/engine/paddle_export.py b/framework/e2e/PaddleLT_new/engine/paddle_export.py index 8a54af176d..87ba4c566b 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_export.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_export.py @@ -20,7 +20,7 @@ class LayerExport(object): 构建Layer导出的通用类 """ - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -30,6 +30,8 @@ def __init__(self, testing, layerfile, device_place_id): paddle.set_device(f"{self.device}:{device_place_id}") self.testing = testing + self.upstream_net = upstream_net + # self.return_net_instance = self.testing.get("return_net_instance", "False") self.model_dtype = self.testing.get("model_dtype") paddle.set_default_dtype(self.model_dtype) @@ -48,7 +50,10 @@ def _net_input(self): def _net_instant(self): """get net""" reset(self.seed) - net = BuildLayer(layerfile=self.layerfile).get_layer() + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() return net def _net_input_and_spec(self): @@ -77,6 +82,7 @@ def jit_save(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(st_net, path=os.path.join(self.path, self.layername, "jit_save")) + return {"res": None} def jit_save_inputspec(self): """jit.save(layer)""" @@ -90,6 +96,7 @@ def jit_save_inputspec(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(st_net, path=os.path.join(self.path, self.layername, "jit_save_inputspec")) + return {"res": None} def jit_save_static_inputspec(self): """jit.save(layer)""" @@ -103,6 +110,7 @@ def jit_save_static_inputspec(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(st_net, path=os.path.join(self.path, self.layername, "jit_save_static_inputspec")) + return {"res": None} def jit_save_cinn(self): """jit.save(layer)""" @@ -117,6 +125,7 @@ def jit_save_cinn(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(cinn_net, path=os.path.join(self.path, self.layername, "jit_save_cinn")) + return {"res": None} def jit_save_cinn_inputspec(self): """jit.save(layer)""" @@ -132,6 +141,7 @@ def jit_save_cinn_inputspec(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(cinn_net, path=os.path.join(self.path, self.layername, "jit_save_cinn_inputspec")) + return {"res": None} def jit_save_cinn_static_inputspec(self): """jit.save(layer)""" @@ -147,3 +157,4 @@ def jit_save_cinn_static_inputspec(self): # paddle.jit.save(net, path=os.path.join(self.path, self.case)) paddle.jit.save(cinn_net, path=os.path.join(self.path, self.layername, "jit_save_cinn_static_inputspec")) + return {"res": None} diff --git a/framework/e2e/PaddleLT_new/engine/paddle_infer.py b/framework/e2e/PaddleLT_new/engine/paddle_infer.py index 8ac3b5be7e..4ab35e7f1a 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_infer.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_infer.py @@ -14,13 +14,15 @@ from generator.builder_layer import BuildLayer from generator.builder_data import BuildData +from tools.logger import Logger + class LayerInfer(object): """ 构建Layer预测的通用类 """ - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -41,7 +43,8 @@ def paddle_infer_gpu(self): """infer load (layer)""" reset(self.seed) if not os.path.exists(self.path + ".pdiparams"): - return "pass" + Logger("paddle_infer_gpu").get_log().info("该子图export未产出pdiparams, 所以跳过infer测试") + return {"res": {"logit": None}} config = paddle_infer.Config(self.path + ".pdmodel", self.path + ".pdiparams") config.enable_use_gpu(1000, int(self.device_id)) @@ -63,13 +66,14 @@ def paddle_infer_gpu(self): else: output_handle = predictor.get_output_handle(output_names[0]) infer_res = output_handle.copy_to_cpu() - return {"logit": infer_res} + return {"res": {"logit": infer_res}} def paddle_infer_cpu(self): """infer load (layer)""" reset(self.seed) if not os.path.exists(self.path + ".pdiparams"): - return "pass" + Logger("paddle_infer_cpu").get_log().info("该子图export未产出pdiparams, 所以跳过infer测试") + return {"res": {"logit": None}} config = paddle_infer.Config(self.path + ".pdmodel", self.path + ".pdiparams") @@ -92,13 +96,14 @@ def paddle_infer_cpu(self): else: output_handle = predictor.get_output_handle(output_names[0]) infer_res = output_handle.copy_to_cpu() - return {"logit": infer_res} + return {"res": {"logit": infer_res}} def paddle_infer_mkldnn(self): """infer load (layer)""" reset(self.seed) if not os.path.exists(self.path + ".pdiparams"): - return "pass" + Logger("paddle_infer_mkldnn").get_log().info("该子图export未产出pdiparams, 所以跳过infer测试") + return {"res": {"logit": None}} config = paddle_infer.Config(self.path + ".pdmodel", self.path + ".pdiparams") @@ -123,13 +128,14 @@ def paddle_infer_mkldnn(self): else: output_handle = predictor.get_output_handle(output_names[0]) infer_res = output_handle.copy_to_cpu() - return {"logit": infer_res} + return {"res": {"logit": infer_res}} def paddle_infer_ort(self): """infer load (layer)""" reset(self.seed) if not os.path.exists(self.path + ".pdiparams"): - return "pass" + Logger("paddle_infer_ort").get_log().info("该子图export未产出pdiparams, 所以跳过infer测试") + return {"res": {"logit": None}} config = paddle_infer.Config(self.path + ".pdmodel", self.path + ".pdiparams") @@ -153,13 +159,14 @@ def paddle_infer_ort(self): else: output_handle = predictor.get_output_handle(output_names[0]) infer_res = output_handle.copy_to_cpu() - return {"logit": infer_res} + return {"res": {"logit": infer_res}} def paddle_infer_new_exc_pir(self): """infer load (layer)""" reset(self.seed) if not os.path.exists(self.path + ".pdiparams"): - return "pass" + Logger("paddle_infer_new_exc_pir").get_log().info("该子图export未产出pdiparams, 所以跳过infer测试") + return {"res": {"logit": None}} config = paddle_infer.Config(self.path + ".json", self.path + ".pdiparams") # config = paddle_infer.Config(self.path, 'inference') @@ -186,4 +193,4 @@ def paddle_infer_new_exc_pir(self): else: output_handle = predictor.get_output_handle(output_names[0]) infer_res = output_handle.copy_to_cpu() - return {"logit": infer_res} + return {"res": {"logit": infer_res}} diff --git a/framework/e2e/PaddleLT_new/engine/paddle_train.py b/framework/e2e/PaddleLT_new/engine/paddle_train.py index 3480f6f686..52aec07e71 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_train.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_train.py @@ -24,7 +24,7 @@ class LayerTrain(object): """ # def __init__(self, testing, layerfile, device_id): - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -36,6 +36,8 @@ def __init__(self, testing, layerfile, device_place_id): # paddle.set_device("{}:{}".format(str(self.device), str(device_id))) self.testing = testing + self.upstream_net = upstream_net + self.return_net_instance = self.testing.get("return_net_instance", "False") self.model_dtype = self.testing.get("model_dtype") paddle.set_default_dtype(self.model_dtype) @@ -51,7 +53,10 @@ def _net_input(self): def _net_instant(self): """get net""" reset(self.seed) - net = BuildLayer(layerfile=self.layerfile).get_layer() + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() return net def _net_optimizer(self): @@ -135,8 +140,12 @@ def dy_train(self): opt.step() opt.clear_grad() + Logger("dy_train").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy_dp_train(self): """dygraph data parallel train""" @@ -166,8 +175,13 @@ def dy_dp_train(self): opt.step() opt.clear_grad() + Logger("dy_dp_train").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} # def dy_train_dl(self): # """dygraph train with dataloader""" @@ -217,8 +231,13 @@ def dy2st_train(self): opt.step() opt.clear_grad() + Logger("dy2st_train").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy2st_train_inputspec(self): """dy2st cinn train with inputspec""" @@ -244,8 +263,13 @@ def dy2st_train_inputspec(self): opt.step() opt.clear_grad() + Logger("dy2st_train_inputspec").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy2st_train_static_inputspec(self): """dy2st cinn train with inputspec""" @@ -271,8 +295,13 @@ def dy2st_train_static_inputspec(self): opt.step() opt.clear_grad() + Logger("dy2st_train_static_inputspec").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy2st_train_cinn(self): """dy2st cinn train""" @@ -284,23 +313,28 @@ def dy2st_train_cinn(self): net.train() build_strategy = paddle.static.BuildStrategy() build_strategy.build_cinn_pass = True - st_net = paddle.jit.to_static(net, build_strategy=build_strategy, full_graph=True) + cinn_net = paddle.jit.to_static(net, build_strategy=build_strategy, full_graph=True) # 构建optimizer用于训练 - if st_net.parameters(): - opt = optimizer.get_opt(net=st_net) + if cinn_net.parameters(): + opt = optimizer.get_opt(net=cinn_net) for epoch in range(self.step): - logit = st_net(*data) + logit = cinn_net(*data) # 构建loss用于训练 dy_loss = loss.get_loss(logit) dy_loss.backward() - if st_net.parameters(): + if cinn_net.parameters(): opt.step() opt.clear_grad() + Logger("dy2st_train_cinn").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy2st_train_cinn_inputspec(self): """dy2st cinn train with inputspec""" @@ -328,8 +362,13 @@ def dy2st_train_cinn_inputspec(self): opt.step() opt.clear_grad() + Logger("dy2st_train_cinn_inputspec").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} def dy2st_train_cinn_static_inputspec(self): """dy2st cinn train with inputspec""" @@ -357,5 +396,10 @@ def dy2st_train_cinn_static_inputspec(self): opt.step() opt.clear_grad() + Logger("dy2st_train_cinn_static_inputspec").get_log().info(f"已完成 {epoch} 轮训练") data_grad = self._get_data_grad(data) - return {"logit": logit, "data_grad": data_grad} + # return {"logit": logit, "data_grad": data_grad} + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} diff --git a/framework/e2e/PaddleLT_new/engine/paddle_train_bm.py b/framework/e2e/PaddleLT_new/engine/paddle_train_bm.py index 82384f554a..c17564a189 100644 --- a/framework/e2e/PaddleLT_new/engine/paddle_train_bm.py +++ b/framework/e2e/PaddleLT_new/engine/paddle_train_bm.py @@ -26,7 +26,7 @@ class LayerTrainBM(object): """ # def __init__(self, testing, layerfile, device_id): - def __init__(self, testing, layerfile, device_place_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net): """ 初始化 """ @@ -45,6 +45,8 @@ def __init__(self, testing, layerfile, device_place_id): self.statis_round = 6 self.testing = testing + self.upstream_net = upstream_net + # self.return_net_instance = self.testing.get("return_net_instance", "False") self.model_dtype = self.testing.get("model_dtype") paddle.set_default_dtype(self.model_dtype) @@ -56,7 +58,10 @@ def __init__(self, testing, layerfile, device_place_id): def _net_instant(self): """get net and data""" reset(self.seed) - net = BuildLayer(layerfile=self.layerfile).get_layer() + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() return net def _net_optimizer(self): diff --git a/framework/e2e/PaddleLT_new/layerApicase/nn_sublayer/dropout2d_0_func.py b/framework/e2e/PaddleLT_new/layerApicase/nn_sublayer/dropout2d_0_func.py index 80c307ce7e..bea163ff87 100644 --- a/framework/e2e/PaddleLT_new/layerApicase/nn_sublayer/dropout2d_0_func.py +++ b/framework/e2e/PaddleLT_new/layerApicase/nn_sublayer/dropout2d_0_func.py @@ -15,7 +15,7 @@ def forward(self, x, ): """ forward """ - out = paddle.nn.functional.dropout2d(x, p=paddle.to_tensor([0.5], dtype='float32', stop_gradient=False), training=True, ) + out = paddle.nn.functional.dropout2d(x, p=paddle.to_tensor([0.5], dtype='float32', stop_gradient=False), training=self.training, ) return out diff --git a/framework/e2e/PaddleLT_new/layercase/perf245/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py b/framework/e2e/PaddleLT_new/layercase/perf245/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py index 0050a41c7a..8458707449 100644 --- a/framework/e2e/PaddleLT_new/layercase/perf245/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py +++ b/framework/e2e/PaddleLT_new/layercase/perf245/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py @@ -19,7 +19,7 @@ def forward( ): var_3 = paddle.nn.functional.common.interpolate(var_0, scale_factor=2, mode='bilinear', align_corners=True) var_4 = paddle.tensor.manipulation.concat([var_1, var_3], axis=1) - var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=True, data_format='NCHW', name=None) + var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=self.training, data_format='NCHW', name=None) var_6 = paddle.nn.functional.conv._conv_nd(var_5, self.parameter_0, bias=None, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_7 = paddle.nn.functional.common.interpolate(var_6, scale_factor=2, mode='bilinear', align_corners=True) var_8 = paddle.nn.functional.common.interpolate(var_2, scale_factor=2, mode='bilinear', align_corners=True) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py index 53555f906a..1303d53647 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py @@ -47,12 +47,12 @@ def forward( var_9 = var_5.matmul(var_8) var_10 = var_9.__mul__(0.125) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_7) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((-1, 198, 192,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_2, bias=self.parameter_4, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_56.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_56.py index 1ca00a2704..8dcba83093 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_56.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_56.py @@ -47,12 +47,12 @@ def forward( var_9 = var_5.matmul(var_8) var_10 = var_9.__mul__(0.125) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_7) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((-1, 198, 192,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_1, bias=self.parameter_2, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_4.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_4.py index a54e05ddde..4e42dae425 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_4.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_4.py @@ -47,12 +47,12 @@ def forward( var_9 = var_5.matmul(var_8) var_10 = var_9.__mul__(0.125) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_7) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((-1, 197, 192,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_3, bias=self.parameter_5, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_56.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_56.py index a15e7b6858..fae2239e3d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_56.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/DeiT_DeiT_tiny_patch16_224/SIR_56.py @@ -47,12 +47,12 @@ def forward( var_9 = var_5.matmul(var_8) var_10 = var_9.__mul__(0.125) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_7) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((-1, 197, 192,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_0, bias=self.parameter_5, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_140.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_140.py index d359417040..a6464096e4 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_140.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_140.py @@ -21,7 +21,7 @@ def forward( ): paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) - var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py index 781956af76..486ab89d99 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py @@ -21,7 +21,7 @@ def forward( ): paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) - var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_39.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_39.py index 6e6a9b8a56..507808cac9 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_39.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_39.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = paddle.tensor.manipulation.squeeze(var_1, axis=[2, 3]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=True, mode='downscale_in_infer', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_40.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_40.py index 7262a08d42..7fa2530d30 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_40.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Inception_InceptionV4/SIR_40.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = paddle.tensor.manipulation.squeeze(var_1, axis=[2, 3]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=True, mode='downscale_in_infer', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py index 8c16e59c62..ba72d48d57 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = var_1.reshape([43, 320]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_27.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_27.py index bb9fb3dc8a..5aafee130c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_27.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/MobileViT_MobileViT_XXS/SIR_27.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = var_1.reshape([11, 320]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_41.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_41.py index 4e0c49bada..c4502c74ad 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_41.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_41.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.avg_pool2d(var_0, kernel_size=[7, 7]) var_2 = var_1.flatten(1) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=True) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=self.training) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_42.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_42.py index de558608ea..ab4a248bb6 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_42.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/PeleeNet_PeleeNet/SIR_42.py @@ -22,7 +22,7 @@ def forward( paddle.seed(33) var_1 = paddle.nn.functional.pooling.avg_pool2d(var_0, kernel_size=[7, 7]) var_2 = var_1.flatten(1) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=True) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=self.training) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py index a9851348c7..5ba1a6fd5f 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [22, 3840, 1, 1], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_3 = var_2.squeeze(axis=-1) var_4 = var_3.squeeze(axis=-1) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_61.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_61.py index 42ed5ee84d..5bc6700ecc 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_61.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/ReXNet_ReXNet_3_0/SIR_61.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [10, 3840, 1, 1], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_0, bias=self.parameter_1, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_3 = var_2.squeeze(axis=-1) var_4 = var_3.squeeze(axis=-1) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py index 16a7c5d6e9..0d1a93c7b3 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py @@ -281,7 +281,7 @@ def forward( var_59 = paddle.nn.functional.conv._conv_nd(var_56, self.parameter_35, bias=self.parameter_48, stride=[1, 1], padding=[1, 1], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_60 = paddle.nn.functional.activation.relu(var_59) var_61 = paddle.tensor.manipulation.concat([var_58, var_60], axis=1) - var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_63 = paddle.nn.functional.conv._conv_nd(var_62, self.parameter_44, bias=self.parameter_1, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_64 = paddle.nn.functional.activation.relu(var_63) var_65 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_64, output_size=1, data_format='NCHW', name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_1.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_1.py index 0ca56bfe59..850fafa090 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_1.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_1.py @@ -281,7 +281,7 @@ def forward( var_59 = paddle.nn.functional.conv._conv_nd(var_56, self.parameter_39, bias=self.parameter_1, stride=[1, 1], padding=[1, 1], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_60 = paddle.nn.functional.activation.relu(var_59) var_61 = paddle.tensor.manipulation.concat([var_58, var_60], axis=1) - var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_63 = paddle.nn.functional.conv._conv_nd(var_62, self.parameter_15, bias=self.parameter_17, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_64 = paddle.nn.functional.activation.relu(var_63) var_65 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_64, output_size=1, data_format='NCHW', name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_108.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_108.py index 1e3eaa1721..c31613494b 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_108.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_108.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[384], weight=self.parameter_2, bias=self.parameter_1, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_136.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_136.py index 9c4d75e67d..49eb091a1d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_136.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_136.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[768], weight=self.parameter_0, bias=self.parameter_3, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_14.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_14.py index f45422bef1..3f5fc8286c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_14.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_14.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[192], weight=self.parameter_3, bias=self.parameter_2, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_2.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_2.py index 4b6dad9154..41f6e276b4 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_2.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_2.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[96], weight=self.parameter_1, bias=self.parameter_0, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_26.py index c171476c89..0a1dc90722 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_26.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[384], weight=self.parameter_0, bias=self.parameter_2, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_70.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_70.py index 9a4e976ff5..cc772efff0 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_70.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_70.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[768], weight=self.parameter_3, bias=self.parameter_2, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_84.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_84.py index 93e93954c5..49a42602e7 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_84.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_84.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[96], weight=self.parameter_2, bias=self.parameter_0, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_96.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_96.py index 8a66e139e3..58f5087a6e 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_96.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Twins_alt_gvt_base/SIR_96.py @@ -32,7 +32,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[192], weight=self.parameter_1, bias=self.parameter_0, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_35.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_35.py index 4bfec374f2..b6b66a1044 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_35.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_35.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [22, 2048, 10, 10], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_2 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_1, output_size=1, data_format='NCHW', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_55.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_55.py index 994ccab514..8b1d9baf1e 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_55.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Clas_cases/Xception_Xception65_deeplab/SIR_55.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [10, 2048, 10, 10], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_2 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_1, output_size=1, data_format='NCHW', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py index 0ea90a31ef..c41101de69 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py @@ -36,7 +36,7 @@ def forward( var_6 = var_5.reshape([-1, 96, 200, 304]) var_7 = var_6.flatten(2) var_8 = var_7.transpose([0, 2, 1]) - var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_9 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_2x_coco/SIR_3.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_2x_coco/SIR_3.py index 7f8aeb5de0..bb79c07a7d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_2x_coco/SIR_3.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_2x_coco/SIR_3.py @@ -36,7 +36,7 @@ def forward( var_6 = var_5.reshape([-1, 96, 136, 160]) var_7 = var_6.flatten(2) var_8 = var_7.transpose([0, 2, 1]) - var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_9 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py index 4f08d13720..2b6d15003f 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py @@ -45,14 +45,14 @@ def forward( var_1, # (shape: [1, 169, 1024], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_2 = paddle.nn.functional.common.dropout(var_0, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_2 = paddle.nn.functional.common.dropout(var_0, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_3 = var_1.__add__(var_2) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[1024], weight=self.parameter_5, bias=self.parameter_3, epsilon=1e-05) var_5 = paddle.nn.functional.common.linear(x=var_4, weight=self.parameter_7, bias=self.parameter_0, name=None) var_6 = paddle.nn.functional.activation.gelu(var_5) - var_7 = paddle.nn.functional.common.dropout(var_6, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_7 = paddle.nn.functional.common.dropout(var_6, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_8 = paddle.nn.functional.common.linear(x=var_7, weight=self.parameter_6, bias=self.parameter_1, name=None) - var_9 = paddle.nn.functional.common.dropout(var_8, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_9 = paddle.nn.functional.common.dropout(var_8, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_10 = var_4.__add__(var_9) var_11 = paddle.nn.functional.norm.layer_norm(var_10, normalized_shape=[1024], weight=self.parameter_2, bias=self.parameter_4, epsilon=1e-05) return var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py index 89f59aeadf..3636681783 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py @@ -61,12 +61,12 @@ def forward( var_15 = var_11.matmul(var_14) var_16 = var_15.__mul__(0.125) var_17 = paddle.nn.functional.activation.softmax(var_16, axis=-1) - var_18 = paddle.nn.functional.common.dropout(var_17, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_18 = paddle.nn.functional.common.dropout(var_17, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_19 = var_18.matmul(var_13) var_20 = var_19.transpose((0, 2, 1, 3,)) var_21 = var_20.reshape((-1, var_3, var_4,)) var_22 = paddle.nn.functional.common.linear(x=var_21, weight=self.parameter_4, bias=self.parameter_0, name=None) - var_23 = paddle.nn.functional.common.dropout(var_22, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_23 = paddle.nn.functional.common.dropout(var_22, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_24 = self.parameter_2.__mul__(var_23) return var_24 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_30.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_30.py index a1109994e7..8176e0a9a7 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_30.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_30.py @@ -49,12 +49,12 @@ def forward( var_11 = var_7.matmul(var_10) var_12 = var_11.__add__(var_1) var_13 = paddle.nn.functional.activation.softmax(var_12, axis=-1) - var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_15 = var_14.matmul(var_9) var_16 = var_15.transpose((0, 2, 1, 3,)) var_17 = var_16.reshape((0, -1, 64,)) var_18 = paddle.nn.functional.common.linear(x=var_17, weight=self.parameter_3, bias=self.parameter_4, name=None) - var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_19 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_43.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_43.py index 191fd0ef15..3b3055ecc9 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_43.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_43.py @@ -49,12 +49,12 @@ def forward( var_11 = var_7.matmul(var_10) var_12 = var_11.__add__(var_1) var_13 = paddle.nn.functional.activation.softmax(var_12, axis=-1) - var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_15 = var_14.matmul(var_9) var_16 = var_15.transpose((0, 2, 1, 3,)) var_17 = var_16.reshape((0, -1, 128,)) var_18 = paddle.nn.functional.common.linear(x=var_17, weight=self.parameter_0, bias=self.parameter_3, name=None) - var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_19 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_57.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_57.py index 427e9ed33d..7e158c4020 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_57.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_57.py @@ -47,12 +47,12 @@ def forward( var_9 = var_7.transpose((0, 1, 3, 2,)) var_10 = var_6.matmul(var_9) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_8) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((0, -1, 128,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_2, bias=self.parameter_4, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_72.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_72.py index bc00da2df7..dbaa521272 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_72.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet/SIR_72.py @@ -47,12 +47,12 @@ def forward( var_9 = var_7.transpose((0, 1, 3, 2,)) var_10 = var_6.matmul(var_9) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_8) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((0, -1, 256,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_2, bias=self.parameter_5, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_13.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_13.py index 63b041378a..711d2dfb2f 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_13.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_13.py @@ -49,12 +49,12 @@ def forward( var_11 = var_7.matmul(var_10) var_12 = var_11.__add__(var_1) var_13 = paddle.nn.functional.activation.softmax(var_12, axis=-1) - var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_15 = var_14.matmul(var_9) var_16 = var_15.transpose((0, 2, 1, 3,)) var_17 = var_16.reshape((0, -1, 64,)) var_18 = paddle.nn.functional.common.linear(x=var_17, weight=self.parameter_2, bias=self.parameter_1, name=None) - var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_19 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_26.py index 11ba3955c9..8badc02f9d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_26.py @@ -49,12 +49,12 @@ def forward( var_11 = var_7.matmul(var_10) var_12 = var_11.__add__(var_1) var_13 = paddle.nn.functional.activation.softmax(var_12, axis=-1) - var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_15 = var_14.matmul(var_9) var_16 = var_15.transpose((0, 2, 1, 3,)) var_17 = var_16.reshape((0, -1, 128,)) var_18 = paddle.nn.functional.common.linear(x=var_17, weight=self.parameter_4, bias=self.parameter_5, name=None) - var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_19 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_40.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_40.py index d6b8dc3a04..0cd4040b7c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_40.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_40.py @@ -47,12 +47,12 @@ def forward( var_9 = var_7.transpose((0, 1, 3, 2,)) var_10 = var_6.matmul(var_9) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_8) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((0, -1, 128,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_4, bias=self.parameter_5, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_55.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_55.py index 09eab4e63e..02eec8561c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_55.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Ocr_cases/rec_rec_svtrnet_ch/SIR_55.py @@ -47,12 +47,12 @@ def forward( var_9 = var_7.transpose((0, 1, 3, 2,)) var_10 = var_6.matmul(var_9) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_8) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((0, -1, 256,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_1, bias=self.parameter_3, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py index 4bbe67c27e..ca4c96ba7b 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py @@ -39,13 +39,13 @@ def forward( ): paddle.seed(33) var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_2, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_5, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py index a441be2c45..1f236957a3 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py @@ -39,13 +39,13 @@ def forward( ): paddle.seed(33) var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_5, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_2, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/dnlnet_dnlnet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/dnlnet_dnlnet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py index a85af7d67c..245b1869d9 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/dnlnet_dnlnet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/dnlnet_dnlnet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 512, 64, 128], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_cityscapes_1024x512_80k/SIR_35.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_cityscapes_1024x512_80k/SIR_35.py index 630ea7fb36..50141e7228 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_cityscapes_1024x512_80k/SIR_35.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_cityscapes_1024x512_80k/SIR_35.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 256, 64, 128], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_voc12aug_512x512_40k/SIR_35.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_voc12aug_512x512_40k/SIR_35.py index e76ab9ea62..2b7bc36405 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_voc12aug_512x512_40k/SIR_35.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/emanet_emanet_resnet101_os8_voc12aug_512x512_40k/SIR_35.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 256, 64, 64], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_10.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_10.py index 9811b6d1a5..d776f3b9bd 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_10.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_10.py @@ -12,7 +12,7 @@ def forward( var_0, # (shape: [1, 64, 128, 256], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.01, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.01, training=self.training, data_format='NCHW', name=None) return var_1 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_20.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_20.py index 61dcb2448f..cd9750ebe7 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_20.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_20.py @@ -13,7 +13,7 @@ def forward( var_1, # (shape: [1, 64, 128, 256], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.01, training=True, data_format='NCHW', name=None) + var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.01, training=self.training, data_format='NCHW', name=None) var_3 = var_1.__add__(var_2) return var_3 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py index 759ca9ccb4..323d9c6c5b 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py @@ -12,7 +12,7 @@ def forward( var_0, # (shape: [1, 128, 64, 128], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) return var_1 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py index 6dfa5ac413..85ffe6df02 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py @@ -13,7 +13,7 @@ def forward( var_1, # (shape: [1, 128, 64, 128], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_3 = var_1.__add__(var_2) return var_3 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py index 17e1a3501f..b350ecda71 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py @@ -20,7 +20,7 @@ def forward( paddle.seed(33) var_3 = paddle.nn.functional.common.interpolate(var_0, scale_factor=2, mode='bilinear', align_corners=True) var_4 = paddle.tensor.manipulation.concat([var_1, var_3], axis=1) - var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=True, data_format='NCHW', name=None) + var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=self.training, data_format='NCHW', name=None) var_6 = paddle.nn.functional.conv._conv_nd(var_5, self.parameter_0, bias=None, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_7 = paddle.nn.functional.common.interpolate(var_6, scale_factor=2, mode='bilinear', align_corners=True) var_8 = paddle.nn.functional.common.interpolate(var_2, scale_factor=2, mode='bilinear', align_corners=True) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet101_os8_voc12aug_512x512_40k/SIR_38.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet101_os8_voc12aug_512x512_40k/SIR_38.py index 55ea96421c..14c29c7138 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet101_os8_voc12aug_512x512_40k/SIR_38.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet101_os8_voc12aug_512x512_40k/SIR_38.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 512, 64, 64], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_0, bias=self.parameter_1, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet50_os8_cityscapes_769x769_80k/SIR_36.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet50_os8_cityscapes_769x769_80k/SIR_36.py index 5141193507..4df9ac85da 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet50_os8_cityscapes_769x769_80k/SIR_36.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/isanet_isanet_resnet50_os8_cityscapes_769x769_80k/SIR_36.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 512, 97, 97], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/pfpn_pfpn_resnet101_os8_cityscapes_512x1024_40k/SIR_68.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/pfpn_pfpn_resnet101_os8_cityscapes_512x1024_40k/SIR_68.py index 686495bd72..3cf1a7cbc9 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/pfpn_pfpn_resnet101_os8_cityscapes_512x1024_40k/SIR_68.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/pfpn_pfpn_resnet101_os8_cityscapes_512x1024_40k/SIR_68.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 256, 256, 128], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_14.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_14.py index f70d30b572..20942b2da8 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_14.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_14.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 64]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_8, bias=self.parameter_0, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_26.py index 871ad397eb..9bd2da6952 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_26.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 160]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_1, bias=self.parameter_7, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_38.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_38.py index 17814f9da3..8badae91bb 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_38.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_38.py @@ -60,12 +60,12 @@ def forward( var_14 = var_7.__matmul__(var_13) var_15 = var_14.__mul__(0.1767766952966369) var_16 = paddle.nn.functional.activation.softmax(var_15, axis=-1) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_18 = var_17.__matmul__(var_12) var_19 = var_18.transpose([0, 2, 1, 3]) var_20 = var_19.reshape([var_3, var_4, 256]) var_21 = paddle.nn.functional.common.linear(x=var_20, weight=self.parameter_1, bias=self.parameter_6, name=None) - var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_22 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_5.py index 70423da935..de7336ba19 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x1024_160k/SIR_5.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 32]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_2, bias=self.parameter_5, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_14.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_14.py index a41547d248..aaebab5249 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_14.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_14.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 64]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_3, bias=self.parameter_4, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_26.py index c65e0a0a0e..2f37860bfa 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_26.py @@ -105,14 +105,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 160]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_9, bias=self.parameter_4, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_38.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_38.py index 4bb0c30654..fa74d1f9b1 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_38.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_38.py @@ -60,12 +60,12 @@ def forward( var_14 = var_7.__matmul__(var_13) var_15 = var_14.__mul__(0.1767766952966369) var_16 = paddle.nn.functional.activation.softmax(var_15, axis=-1) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_18 = var_17.__matmul__(var_12) var_19 = var_18.transpose([0, 2, 1, 3]) var_20 = var_19.reshape([var_3, var_4, 256]) var_21 = paddle.nn.functional.common.linear(x=var_20, weight=self.parameter_1, bias=self.parameter_0, name=None) - var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_22 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_5.py index 60d342e046..d037863d7b 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b0_cityscapes_1024x512_160k/SIR_5.py @@ -105,14 +105,14 @@ def forward( var_23 = var_22.__mul__(0.1767766952966369) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 32]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_7, bias=self.parameter_1, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_106.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_106.py index 902a7ee875..8d7ad24000 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_106.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_106.py @@ -60,12 +60,12 @@ def forward( var_14 = var_7.__matmul__(var_13) var_15 = var_14.__mul__(0.125) var_16 = paddle.nn.functional.activation.softmax(var_15, axis=-1) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_18 = var_17.__matmul__(var_12) var_19 = var_18.transpose([0, 2, 1, 3]) var_20 = var_19.reshape([var_3, var_4, 512]) var_21 = paddle.nn.functional.common.linear(x=var_20, weight=self.parameter_5, bias=self.parameter_4, name=None) - var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_22 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_18.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_18.py index 5c3656936a..64f701eea8 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_18.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_18.py @@ -105,7 +105,7 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) @@ -114,7 +114,7 @@ def forward( x=var_28, weight=self.parameter_1, bias=self.parameter_10, name=None ) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_38.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_38.py index dbe50dfc27..462911ce55 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_38.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_38.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 320]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_8, bias=self.parameter_7, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_5.py index 93d7801e7f..87f2e515ab 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x1024_160k/SIR_5.py @@ -105,14 +105,14 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 64]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_8, bias=self.parameter_0, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_106.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_106.py index 319fc60348..4eab99dfce 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_106.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_106.py @@ -60,12 +60,12 @@ def forward( var_14 = var_7.__matmul__(var_13) var_15 = var_14.__mul__(0.125) var_16 = paddle.nn.functional.activation.softmax(var_15, axis=-1) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_18 = var_17.__matmul__(var_12) var_19 = var_18.transpose([0, 2, 1, 3]) var_20 = var_19.reshape([var_3, var_4, 512]) var_21 = paddle.nn.functional.common.linear(x=var_20, weight=self.parameter_7, bias=self.parameter_6, name=None) - var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_22 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_18.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_18.py index 9727460f65..e1725e0163 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_18.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_18.py @@ -103,14 +103,14 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 128]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_4, bias=self.parameter_1, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_38.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_38.py index 08ea52b8a8..e2088a6c13 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_38.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_38.py @@ -105,14 +105,14 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 320]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_8, bias=self.parameter_4, name=None) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_5.py index 0982bd4f1e..cde24b3c99 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segformer_segformer_b3_cityscapes_1024x512_160k/SIR_5.py @@ -105,7 +105,7 @@ def forward( var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) var_25 = paddle.nn.functional.common.dropout( - var_24, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_24, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) @@ -114,7 +114,7 @@ def forward( x=var_28, weight=self.parameter_6, bias=self.parameter_11, name=None ) var_30 = paddle.nn.functional.common.dropout( - var_29, p=0.0, axis=None, training=True, mode="upscale_in_train", name=None + var_29, p=0.0, axis=None, training=self.training, mode="upscale_in_train", name=None ) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_5.py index f616070009..45408ff145 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_5.py @@ -51,12 +51,12 @@ def forward( var_13 = var_9.matmul(var_12) var_14 = var_13.__mul__(0.125) var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) - var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_17 = var_16.matmul(var_11) var_18 = var_17.transpose((0, 2, 1, 3,)) var_19 = var_18.reshape((-1, var_3, var_4,)) var_20 = paddle.nn.functional.common.linear(x=var_19, weight=self.parameter_5, bias=self.parameter_2, name=None) - var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_21 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_66.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_66.py index c54c7799a9..e342636112 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_66.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_base_mask_ade20k_512x512_160k/SIR_66.py @@ -47,12 +47,12 @@ def forward( var_13 = var_9.matmul(var_12) var_14 = var_13.__mul__(0.125) var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) - var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_17 = var_16.matmul(var_11) var_18 = var_17.transpose((0, 2, 1, 3,)) var_19 = var_18.reshape((-1, var_3, var_4,)) var_20 = paddle.nn.functional.common.linear(x=var_19, weight=self.parameter_1, bias=self.parameter_0, name=None) - var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_21 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_5.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_5.py index 43e06f15ea..30ca31cf7d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_5.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_5.py @@ -51,12 +51,12 @@ def forward( var_13 = var_9.matmul(var_12) var_14 = var_13.__mul__(0.125) var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) - var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_17 = var_16.matmul(var_11) var_18 = var_17.transpose((0, 2, 1, 3,)) var_19 = var_18.reshape((-1, var_3, var_4,)) var_20 = paddle.nn.functional.common.linear(x=var_19, weight=self.parameter_5, bias=self.parameter_3, name=None) - var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_21 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py index ff892627a2..7fd3cc2c4f 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py @@ -47,12 +47,12 @@ def forward( var_13 = var_9.matmul(var_12) var_14 = var_13.__mul__(0.125) var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) - var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_17 = var_16.matmul(var_11) var_18 = var_17.transpose((0, 2, 1, 3,)) var_19 = var_18.reshape((-1, var_3, var_4,)) var_20 = paddle.nn.functional.common.linear(x=var_19, weight=self.parameter_4, bias=self.parameter_0, name=None) - var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_21 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_small_ade20k_512x512_160k/SIR_128.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_small_ade20k_512x512_160k/SIR_128.py index b74b065c5f..c0ab74fe32 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_small_ade20k_512x512_160k/SIR_128.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_small_ade20k_512x512_160k/SIR_128.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 192, 64, 64], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py index 833bdd4069..d99d4411bb 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer1000/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [1, 128, 64, 64], dtype: paddle.float32, stop_gradient: False) ): paddle.seed(33) - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py index d18ba2cc98..a33fd5f3bc 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/DeiT_DeiT_tiny_distilled_patch16_224/SIR_4.py @@ -46,12 +46,12 @@ def forward( var_9 = var_5.matmul(var_8) var_10 = var_9.__mul__(0.125) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_7) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((-1, 198, 192,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_1, bias=self.parameter_4, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py index aefcc6ecbd..baa16255b1 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/EfficientNet_EfficientNetB0/SIR_96.py @@ -20,7 +20,7 @@ def forward( var_0, # (shape: [43, 1280, 7, 7], dtype: paddle.float32, stop_gradient: False) ): var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) - var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_2 = paddle.nn.functional.common.dropout(var_1, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Inception_InceptionV4/SIR_40.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Inception_InceptionV4/SIR_40.py index deb95c8e1b..f843454444 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Inception_InceptionV4/SIR_40.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Inception_InceptionV4/SIR_40.py @@ -21,7 +21,7 @@ def forward( ): var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = paddle.tensor.manipulation.squeeze(var_1, axis=[2, 3]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=True, mode='downscale_in_infer', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.2, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py index 1ea1c769af..9f800b9db2 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/MobileViT_MobileViT_XXS/SIR_26.py @@ -21,7 +21,7 @@ def forward( ): var_1 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_0, output_size=1, data_format='NCHW', name=None) var_2 = var_1.reshape([43, 320]) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/PeleeNet_PeleeNet/SIR_41.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/PeleeNet_PeleeNet/SIR_41.py index c033db0df7..fd5e59b263 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/PeleeNet_PeleeNet/SIR_41.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/PeleeNet_PeleeNet/SIR_41.py @@ -21,7 +21,7 @@ def forward( ): var_1 = paddle.nn.functional.pooling.avg_pool2d(var_0, kernel_size=[7, 7]) var_2 = var_1.flatten(1) - var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=True) + var_3 = paddle.nn.functional.common.dropout(var_2, p=0.05, training=self.training) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_0, bias=self.parameter_1, name=None) return var_4 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py index b78dbbbc8d..9de20a32ef 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/ReXNet_ReXNet_3_0/SIR_60.py @@ -19,7 +19,7 @@ def forward( self, var_0, # (shape: [22, 3840, 1, 1], dtype: paddle.float32, stop_gradient: False) ): - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=True, mode='upscale_in_train', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.2, axis=None, training=self.training, mode='upscale_in_train', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_1, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_3 = var_2.squeeze(axis=-1) var_4 = var_3.squeeze(axis=-1) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py index b0cf1d2222..f56fa5a5d5 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/SqueezeNet_SqueezeNet1_0/SIR_0.py @@ -280,7 +280,7 @@ def forward( var_59 = paddle.nn.functional.conv._conv_nd(var_56, self.parameter_2, bias=self.parameter_30, stride=[1, 1], padding=[1, 1], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_60 = paddle.nn.functional.activation.relu(var_59) var_61 = paddle.tensor.manipulation.concat([var_58, var_60], axis=1) - var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_62 = paddle.nn.functional.common.dropout(var_61, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_63 = paddle.nn.functional.conv._conv_nd(var_62, self.parameter_29, bias=self.parameter_44, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_64 = paddle.nn.functional.activation.relu(var_63) var_65 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_64, output_size=1, data_format='NCHW', name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Twins_alt_gvt_base/SIR_84.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Twins_alt_gvt_base/SIR_84.py index c7007a83f7..ae22189bd5 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Twins_alt_gvt_base/SIR_84.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Twins_alt_gvt_base/SIR_84.py @@ -31,7 +31,7 @@ def forward( var_2 = var_1.flatten(2) var_3 = var_2.transpose([0, 2, 1]) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[96], weight=self.parameter_3, bias=self.parameter_2, epsilon=1e-05) - var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_5 = paddle.nn.functional.common.dropout(var_4, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_5 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Xception_Xception65_deeplab/SIR_35.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Xception_Xception65_deeplab/SIR_35.py index bc58eb87f0..7a7c614ff3 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Xception_Xception65_deeplab/SIR_35.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Clas_cases/Xception_Xception65_deeplab/SIR_35.py @@ -19,7 +19,7 @@ def forward( self, var_0, # (shape: [22, 2048, 10, 10], dtype: paddle.float32, stop_gradient: False) ): - var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=True, mode='downscale_in_infer', name=None) + var_1 = paddle.nn.functional.common.dropout(var_0, p=0.5, axis=None, training=self.training, mode='downscale_in_infer', name=None) var_2 = paddle.nn.functional.pooling.adaptive_avg_pool2d(var_1, output_size=1, data_format='NCHW', name=None) var_3 = paddle.tensor.manipulation.squeeze(var_2, axis=[2, 3]) var_4 = paddle.nn.functional.common.linear(x=var_3, weight=self.parameter_1, bias=self.parameter_0, name=None) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py index 3659ee8a94..dd50590b4c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/faster_rcnn_faster_rcnn_swin_tiny_fpn_1x_coco/SIR_3.py @@ -35,7 +35,7 @@ def forward( var_6 = var_5.reshape([-1, 96, 160, 240]) var_7 = var_6.flatten(2) var_8 = var_7.transpose([0, 2, 1]) - var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_9 = paddle.nn.functional.common.dropout(var_8, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_9 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py index 87e5069c42..e1da098ce6 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/smalldet_ppyoloe_plus_sod_crn_l_80e_coco/SIR_127.py @@ -44,14 +44,14 @@ def forward( var_0, # (shape: [1, 361, 1024], dtype: paddle.float32, stop_gradient: False) var_1, # (shape: [1, 361, 1024], dtype: paddle.float32, stop_gradient: False) ): - var_2 = paddle.nn.functional.common.dropout(var_0, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_2 = paddle.nn.functional.common.dropout(var_0, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_3 = var_1.__add__(var_2) var_4 = paddle.nn.functional.norm.layer_norm(var_3, normalized_shape=[1024], weight=self.parameter_6, bias=self.parameter_7, epsilon=1e-05) var_5 = paddle.nn.functional.common.linear(x=var_4, weight=self.parameter_4, bias=self.parameter_5, name=None) var_6 = paddle.nn.functional.activation.gelu(var_5) - var_7 = paddle.nn.functional.common.dropout(var_6, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_7 = paddle.nn.functional.common.dropout(var_6, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_8 = paddle.nn.functional.common.linear(x=var_7, weight=self.parameter_1, bias=self.parameter_0, name=None) - var_9 = paddle.nn.functional.common.dropout(var_8, p=0.1, axis=None, training=True, mode='upscale_in_train', name=None) + var_9 = paddle.nn.functional.common.dropout(var_8, p=0.1, axis=None, training=self.training, mode='upscale_in_train', name=None) var_10 = var_4.__add__(var_9) var_11 = paddle.nn.functional.norm.layer_norm(var_10, normalized_shape=[1024], weight=self.parameter_2, bias=self.parameter_3, epsilon=1e-05) return var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py index 610c4dec8b..057061838d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Det_cases/vitdet_ppyoloe_vit_base_csppan_cae_36e_coco/SIR_6.py @@ -60,12 +60,12 @@ def forward( var_15 = var_11.matmul(var_14) var_16 = var_15.__mul__(0.125) var_17 = paddle.nn.functional.activation.softmax(var_16, axis=-1) - var_18 = paddle.nn.functional.common.dropout(var_17, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_18 = paddle.nn.functional.common.dropout(var_17, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_19 = var_18.matmul(var_13) var_20 = var_19.transpose((0, 2, 1, 3,)) var_21 = var_20.reshape((-1, var_3, var_4,)) var_22 = paddle.nn.functional.common.linear(x=var_21, weight=self.parameter_5, bias=self.parameter_0, name=None) - var_23 = paddle.nn.functional.common.dropout(var_22, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_23 = paddle.nn.functional.common.dropout(var_22, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_24 = self.parameter_3.__mul__(var_23) return var_24 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_43.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_43.py index bac371baad..fa9c036d0b 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_43.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_43.py @@ -48,12 +48,12 @@ def forward( var_11 = var_7.matmul(var_10) var_12 = var_11.__add__(var_1) var_13 = paddle.nn.functional.activation.softmax(var_12, axis=-1) - var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_14 = paddle.nn.functional.common.dropout(var_13, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_15 = var_14.matmul(var_9) var_16 = var_15.transpose((0, 2, 1, 3,)) var_17 = var_16.reshape((0, -1, 128,)) var_18 = paddle.nn.functional.common.linear(x=var_17, weight=self.parameter_5, bias=self.parameter_2, name=None) - var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_19 = paddle.nn.functional.common.dropout(var_18, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_19 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_57.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_57.py index 440c546337..dd2de38b2c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_57.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Ocr_cases/rec_rec_svtrnet/SIR_57.py @@ -46,12 +46,12 @@ def forward( var_9 = var_7.transpose((0, 1, 3, 2,)) var_10 = var_6.matmul(var_9) var_11 = paddle.nn.functional.activation.softmax(var_10, axis=-1) - var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_12 = paddle.nn.functional.common.dropout(var_11, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_13 = var_12.matmul(var_8) var_14 = var_13.transpose((0, 2, 1, 3,)) var_15 = var_14.reshape((0, -1, 128,)) var_16 = paddle.nn.functional.common.linear(x=var_15, weight=self.parameter_0, bias=self.parameter_5, name=None) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_17 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/danet_danet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/danet_danet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py index c239162788..eda34ec0b0 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/danet_danet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/danet_danet_resnet101_os8_cityscapes_1024x512_80k/SIR_34.py @@ -38,13 +38,13 @@ def forward( var_2, # (shape: [1, 2048, 64, 128], dtype: paddle.float32, stop_gradient: False) ): var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_0, bias=self.parameter_1, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_5, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_5, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_3, bias=self.parameter_2, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py index ae9e67a190..74540c8c3c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_40.py @@ -11,7 +11,7 @@ def forward( self, var_0, # (shape: [1, 128, 64, 128], dtype: paddle.float32, stop_gradient: False) ): - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) return var_1 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py index 19f79d625b..0b331ed44e 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/enet_enet_cityscapes_1024x512_80k/SIR_46.py @@ -12,7 +12,7 @@ def forward( var_0, # (shape: [1, 128, 64, 128], dtype: paddle.float32, stop_gradient: False) var_1, # (shape: [1, 128, 64, 128], dtype: paddle.float32, stop_gradient: False) ): - var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_2 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_3 = var_1.__add__(var_2) return var_3 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py index 0050a41c7a..8458707449 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py @@ -19,7 +19,7 @@ def forward( ): var_3 = paddle.nn.functional.common.interpolate(var_0, scale_factor=2, mode='bilinear', align_corners=True) var_4 = paddle.tensor.manipulation.concat([var_1, var_3], axis=1) - var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=True, data_format='NCHW', name=None) + var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=self.training, data_format='NCHW', name=None) var_6 = paddle.nn.functional.conv._conv_nd(var_5, self.parameter_0, bias=None, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_7 = paddle.nn.functional.common.interpolate(var_6, scale_factor=2, mode='bilinear', align_corners=True) var_8 = paddle.nn.functional.common.interpolate(var_2, scale_factor=2, mode='bilinear', align_corners=True) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_18.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_18.py index 591daf7a0d..7f3eac8691 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_18.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_18.py @@ -83,12 +83,12 @@ def forward( var_22 = var_9.__matmul__(var_21) var_23 = var_22.__mul__(0.125) var_24 = paddle.nn.functional.activation.softmax(var_23, axis=-1) - var_25 = paddle.nn.functional.common.dropout(var_24, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_25 = paddle.nn.functional.common.dropout(var_24, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_26 = var_25.__matmul__(var_20) var_27 = var_26.transpose([0, 2, 1, 3]) var_28 = var_27.reshape([var_5, var_6, 128]) var_29 = paddle.nn.functional.common.linear(x=var_28, weight=self.parameter_8, bias=self.parameter_6, name=None) - var_30 = paddle.nn.functional.common.dropout(var_29, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_30 = paddle.nn.functional.common.dropout(var_29, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_30 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_66.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_66.py index 68223854c3..63755e893c 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_66.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segformer_segformer_b2_cityscapes_1024x1024_160k/SIR_66.py @@ -59,12 +59,12 @@ def forward( var_14 = var_7.__matmul__(var_13) var_15 = var_14.__mul__(0.125) var_16 = paddle.nn.functional.activation.softmax(var_15, axis=-1) - var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_17 = paddle.nn.functional.common.dropout(var_16, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_18 = var_17.__matmul__(var_12) var_19 = var_18.transpose([0, 2, 1, 3]) var_20 = var_19.reshape([var_3, var_4, 512]) var_21 = paddle.nn.functional.common.linear(x=var_20, weight=self.parameter_5, bias=self.parameter_2, name=None) - var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_22 = paddle.nn.functional.common.dropout(var_21, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_22 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py index 14c6818504..c243a92e10 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/segmenter_segmenter_vit_small_mask_ade20k_512x512_160k/SIR_66.py @@ -46,12 +46,12 @@ def forward( var_13 = var_9.matmul(var_12) var_14 = var_13.__mul__(0.125) var_15 = paddle.nn.functional.activation.softmax(var_14, axis=-1) - var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_16 = paddle.nn.functional.common.dropout(var_15, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) var_17 = var_16.matmul(var_11) var_18 = var_17.transpose((0, 2, 1, 3,)) var_19 = var_18.reshape((-1, var_3, var_4,)) var_20 = paddle.nn.functional.common.linear(x=var_19, weight=self.parameter_4, bias=self.parameter_2, name=None) - var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=True, mode='upscale_in_train', name=None) + var_21 = paddle.nn.functional.common.dropout(var_20, p=0.0, axis=None, training=self.training, mode='upscale_in_train', name=None) return var_21 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py index 65c4e9bcdb..efe1faa64d 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer160/Seg_cases/topformer_topformer_tiny_ade20k_512x512_160k/SIR_126.py @@ -19,7 +19,7 @@ def forward( self, var_0, # (shape: [1, 128, 64, 64], dtype: paddle.float32, stop_gradient: False) ): - var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_1 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_2 = paddle.nn.functional.conv._conv_nd(var_1, self.parameter_0, bias=self.parameter_1, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_2 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py index 27561ee309..eef564a138 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py @@ -38,13 +38,13 @@ def forward( var_2, # (shape: [1, 2048, 64, 128], dtype: paddle.float32, stop_gradient: False) ): var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_2, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_5, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py index a9f920a83e..8b07aef637 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py @@ -38,13 +38,13 @@ def forward( var_2, # (shape: [1, 2048, 64, 64], dtype: paddle.float32, stop_gradient: False) ): var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_5, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_2, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py index 0050a41c7a..8458707449 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py @@ -19,7 +19,7 @@ def forward( ): var_3 = paddle.nn.functional.common.interpolate(var_0, scale_factor=2, mode='bilinear', align_corners=True) var_4 = paddle.tensor.manipulation.concat([var_1, var_3], axis=1) - var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=True, data_format='NCHW', name=None) + var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=self.training, data_format='NCHW', name=None) var_6 = paddle.nn.functional.conv._conv_nd(var_5, self.parameter_0, bias=None, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_7 = paddle.nn.functional.common.interpolate(var_6, scale_factor=2, mode='bilinear', align_corners=True) var_8 = paddle.nn.functional.common.interpolate(var_2, scale_factor=2, mode='bilinear', align_corners=True) diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py index 27561ee309..eef564a138 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_cityscapes_1024x512_80k/SIR_34.py @@ -38,13 +38,13 @@ def forward( var_2, # (shape: [1, 2048, 64, 128], dtype: paddle.float32, stop_gradient: False) ): var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_2, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_5, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py index a9f920a83e..8b07aef637 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/danet_danet_resnet50_os8_voc12aug_512x512_40k/SIR_34.py @@ -38,13 +38,13 @@ def forward( var_2, # (shape: [1, 2048, 64, 64], dtype: paddle.float32, stop_gradient: False) ): var_3 = var_0.__add__(var_1) - var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=True, data_format='NCHW', name=None) + var_4 = paddle.nn.functional.common.dropout2d(var_3, p=0.1, training=self.training, data_format='NCHW', name=None) var_5 = paddle.nn.functional.conv._conv_nd(var_4, self.parameter_5, bias=self.parameter_0, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=True, data_format='NCHW', name=None) + var_6 = paddle.nn.functional.common.dropout2d(var_1, p=0.1, training=self.training, data_format='NCHW', name=None) var_7 = paddle.nn.functional.conv._conv_nd(var_6, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=True, data_format='NCHW', name=None) + var_8 = paddle.nn.functional.common.dropout2d(var_0, p=0.1, training=self.training, data_format='NCHW', name=None) var_9 = paddle.nn.functional.conv._conv_nd(var_8, self.parameter_4, bias=self.parameter_3, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) - var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=True, data_format='NCHW', name=None) + var_10 = paddle.nn.functional.common.dropout2d(var_2, p=0.1, training=self.training, data_format='NCHW', name=None) var_11 = paddle.nn.functional.conv._conv_nd(var_10, self.parameter_1, bias=self.parameter_2, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) return var_5, var_7, var_9, var_11 diff --git a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py index 0050a41c7a..8458707449 100644 --- a/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py +++ b/framework/e2e/PaddleLT_new/layercase/sublayer_search90plusplus/Seg_cases/espnet_espnet_cityscapes_1024x512_120k/SIR_143.py @@ -19,7 +19,7 @@ def forward( ): var_3 = paddle.nn.functional.common.interpolate(var_0, scale_factor=2, mode='bilinear', align_corners=True) var_4 = paddle.tensor.manipulation.concat([var_1, var_3], axis=1) - var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=True, data_format='NCHW', name=None) + var_5 = paddle.nn.functional.common.dropout2d(var_4, p=0.0, training=self.training, data_format='NCHW', name=None) var_6 = paddle.nn.functional.conv._conv_nd(var_5, self.parameter_0, bias=None, stride=[1, 1], padding=[0, 0], padding_algorithm='EXPLICIT', dilation=[1, 1], groups=1, data_format='NCHW', channel_dim=1, op_type='conv2d', use_cudnn=True) var_7 = paddle.nn.functional.common.interpolate(var_6, scale_factor=2, mode='bilinear', align_corners=True) var_8 = paddle.nn.functional.common.interpolate(var_2, scale_factor=2, mode='bilinear', align_corners=True) diff --git a/framework/e2e/PaddleLT_new/layertest.py b/framework/e2e/PaddleLT_new/layertest.py index 5a6ea908d1..1587b543bc 100644 --- a/framework/e2e/PaddleLT_new/layertest.py +++ b/framework/e2e/PaddleLT_new/layertest.py @@ -10,7 +10,7 @@ import traceback # from engine.engine_map import engine_map -from strategy.compare import base_compare +from strategy.compare import base_compare, infer_compare from tools.yaml_loader import YamlLoader from tools.logger import Logger from tools.res_save import save_tensor, load_tensor, save_pickle @@ -59,7 +59,7 @@ def del_core_dump(self): # 如果删除过程中发生错误(比如文件不存在或没有权限),则打印错误信息 self.logger.get_log().warning(f"Error deleting {filepath}: {e.strerror}") - def _single_run(self, testing, layerfile, device_place_id=0): + def _single_run(self, testing, layerfile, device_place_id=0, upstream_net=None): """ 单次执行器测试 :param testing: 'dy_train', 'dy_eval'... @@ -78,7 +78,10 @@ def _single_run(self, testing, layerfile, device_place_id=0): self.logger.get_log().info(f"testing engine has been covered. Real engine is: {engine}") layer_test = engine_map[engine]( - testing=self.testings.get(testing), layerfile=layerfile, device_place_id=device_place_id + testing=self.testings.get(testing), + layerfile=layerfile, + device_place_id=device_place_id, + upstream_net=upstream_net, ) res = getattr(layer_test, engine)() return res @@ -90,14 +93,20 @@ def _case_run(self): exc_func = 0 exc = 0 res_dict = {} + net = None compare_res_list = [] self.logger.get_log().info("测试case名称: {}".format(self.title)) fail_testing_list = [] for testing in self.testings_list: try: self.logger.get_log().info("测试执行器: {}".format(testing)) - res = self._single_run(testing=testing, layerfile=self.layerfile, device_place_id=self.device_place_id) - res_dict[testing] = res + if self.testings.get(testing).get("use_upstream_net_instance", "False") == "False": + net = None + res = self._single_run( + testing=testing, layerfile=self.layerfile, device_place_id=self.device_place_id, upstream_net=net + ) + res_dict[testing] = res["res"] + net = res.get("net", None) if os.environ.get("PLT_SAVE_GT") == "True": # 开启gt保存 gt_path = os.path.join("plt_gt", os.environ.get("PLT_SET_DEVICE"), testing) if not os.path.exists(gt_path): @@ -154,7 +163,11 @@ def _case_run(self): compare_res_list.append(tmp) else: precision = comparing.get("precision") - compare_res = base_compare( + if comparing.get("compare_method", "base_compare") == "infer_compare": + compare_methon = infer_compare + else: + compare_methon = base_compare + compare_res = compare_methon( result=result, expect=expect, res_name=latest, diff --git a/framework/e2e/PaddleLT_new/strategy/compare.py b/framework/e2e/PaddleLT_new/strategy/compare.py index 467949ede9..7f33174387 100644 --- a/framework/e2e/PaddleLT_new/strategy/compare.py +++ b/framework/e2e/PaddleLT_new/strategy/compare.py @@ -8,10 +8,13 @@ import os import json -import logging + +# import logging import traceback import numpy as np +from tools.logger import Logger + framework = "" if os.environ.get("FRAMEWORK") == "paddle": import paddle @@ -37,7 +40,13 @@ def base_compare(result, expect, res_name, exp_name, logger, delta=1e-10, rtol=1 if isinstance(expect, str): raise Exception("expect is exception !!!") - if isinstance(expect, eval(f"{framework}.Tensor")) or isinstance(expect, np.ndarray): + if expect is None or result is None: + if expect is None: + Logger("PLT_compare").get_log().info(f"{exp_name} 结果为None, 所以跳过 {exp_name} 和 {res_name} 精度对比") + if result is None: + Logger("PLT_compare").get_log().info(f"{res_name} 结果为None, 所以跳过 {exp_name} 和 {res_name} 精度对比") + pass + elif isinstance(expect, eval(f"{framework}.Tensor")) or isinstance(expect, np.ndarray): if isinstance(result, eval(f"{framework}.Tensor")): if framework == "torch": result = result.detach().numpy() @@ -92,16 +101,19 @@ def base_compare(result, expect, res_name, exp_name, logger, delta=1e-10, rtol=1 ) else: for k, v in expect.items(): - base_compare( - result=result[k], - expect=expect[k], - res_name=res_name + "[{}]".format(str(k)), - exp_name=exp_name + "[{}]".format(str(k)), - logger=logger, - delta=delta, - rtol=rtol, - exc_dict=exc_dict, - ) + if k in result: + base_compare( + result=result[k], + expect=expect[k], + res_name=res_name + "[{}]".format(str(k)), + exp_name=exp_name + "[{}]".format(str(k)), + logger=logger, + delta=delta, + rtol=rtol, + exc_dict=exc_dict, + ) + else: + Logger("PLT_compare").get_log().info(f"{exp_name} 有 {k}, 但是 {res_name} 没有 {k}, 所以跳过 {k} 精度对比") elif isinstance(expect, list) or isinstance(expect, tuple): for i, element in enumerate(expect): if isinstance(result, (np.generic, np.ndarray)) or isinstance(result, eval(f"{framework}.Tensor")): @@ -130,14 +142,49 @@ def base_compare(result, expect, res_name, exp_name, logger, delta=1e-10, rtol=1 ) elif isinstance(expect, (bool, int, float)): assert expect == result - elif expect is None: - pass else: raise Exception("expect is unknown data struction in compare_tool!!!") return exc_dict +def infer_compare(result, expect, res_name, exp_name, logger, delta=1e-10, rtol=1e-10, exc_dict={}): + """ + 比较函数 + :param result: 待测值 + :param expect: 基线值 + :param delta: 误差值 + :param rtol: 相对误差 + :return: + """ + # 去除反向结果的数据 + forward_handled_result = {"logit": []} + forward_handled_expect = {"logit": []} + + # 去除非tensor数值的影响 + if isinstance(expect["logit"], (tuple, list)): + for item in expect["logit"]: + if not isinstance(item, (int, bool, float)): + forward_handled_expect["logit"].append(item) + + if isinstance(result["logit"], (tuple, list)): + for item in result["logit"]: + if not isinstance(item, (int, bool, float)): + forward_handled_result["logit"].append(item) + + exc_dict = base_compare( + result=forward_handled_result, + expect=forward_handled_expect, + res_name=res_name, + exp_name=exp_name, + logger=logger, + delta=delta, + rtol=rtol, + exc_dict=exc_dict, + ) + return exc_dict + + def perf_compare_legacy(baseline, latest): """ 比较函数 @@ -353,8 +400,6 @@ def perf_compare_kernel_dict( if __name__ == "__main__": - from tools.logger import Logger - result = { "logit": [paddle.to_tensor([1.0]), paddle.to_tensor([1.0])], "data_grad": [paddle.to_tensor([0.0]), paddle.to_tensor([0.0])], diff --git a/framework/e2e/PaddleLT_new/yaml/dy^dy2st_train_static_inputspec^export_st_inputspec^ppinfer_new_exc_pir.yml b/framework/e2e/PaddleLT_new/yaml/dy^dy2st_train_static_inputspec^export_st_inputspec^ppinfer_new_exc_pir.yml new file mode 100644 index 0000000000..5e71b204c3 --- /dev/null +++ b/framework/e2e/PaddleLT_new/yaml/dy^dy2st_train_static_inputspec^export_st_inputspec^ppinfer_new_exc_pir.yml @@ -0,0 +1,53 @@ +testings: + dy_train: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 20 + + dy2st_train_static_inputspec: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 20 + return_net_instance: "True" # 测试engine会返回模型实例 + + jit_save_static_inputspec: + use_upstream_net_instance: "True" # 获取上游模型实例而不是重新构建模型实例 + model_dtype: "float32" + + paddle_infer_new_exc_pir: + jit_save_type: "jit_save_static_inputspec" + +compare: + - + baseline: 'dy_train' + latest: 'dy2st_train_static_inputspec' + compare_method: "base_compare" + precision: + delta: 0.00001 + rtol: 0.000001 + - + baseline: 'dy_train' + latest: 'paddle_infer_new_exc_pir' + compare_method: "infer_compare" + precision: + delta: 0.00001 + rtol: 0.000001 + - + baseline: 'dy2st_train_static_inputspec' + latest: 'paddle_infer_new_exc_pir' + compare_method: "infer_compare" + precision: + delta: 0.00001 + rtol: 0.000001 diff --git a/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train^export_cinn_st_inputspec^ppinfer_new_exc_pir.yml b/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train^export_cinn_st_inputspec^ppinfer_new_exc_pir.yml new file mode 100644 index 0000000000..a804cd2b37 --- /dev/null +++ b/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train^export_cinn_st_inputspec^ppinfer_new_exc_pir.yml @@ -0,0 +1,53 @@ +testings: + dy_train: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 20 + + dy2st_train_cinn: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 20 + return_net_instance: "True" # 测试engine会返回模型实例 + + jit_save_cinn_static_inputspec: + use_upstream_net_instance: "True" # 获取上游模型实例而不是重新构建模型实例 + model_dtype: "float32" + + paddle_infer_new_exc_pir: + jit_save_type: "jit_save_cinn_static_inputspec" + +compare: + - + baseline: 'dy_train' + latest: 'dy2st_train_cinn' + compare_method: "base_compare" + precision: + delta: 0.00001 + rtol: 0.000001 + - + baseline: 'dy_train' + latest: 'paddle_infer_new_exc_pir' + compare_method: "infer_compare" + precision: + delta: 0.00001 + rtol: 0.000001 + - + baseline: 'dy2st_train_cinn' + latest: 'paddle_infer_new_exc_pir' + compare_method: "infer_compare" + precision: + delta: 0.00001 + rtol: 0.000001 diff --git a/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train_inputspec_benchmark.yml b/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train_inputspec_benchmark.yml new file mode 100644 index 0000000000..b849e9926a --- /dev/null +++ b/framework/e2e/PaddleLT_new/yaml/dy^dy2stcinn_train_inputspec_benchmark.yml @@ -0,0 +1,28 @@ +# 暂时没有开发dy2st_train_cinn_inputspec_perf +testings: + dy_train_perf: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 1 + + dy2st_train_cinn_inputspec_perf: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 1 + +compare: + - + baseline: 'dy_train_perf' + latest: 'dy2st_train_cinn_inputspec_perf' diff --git a/framework/e2e/PaddleLT_new/yaml/dy^dy_train^export_st_inputspec^ppinfer_new_exc_pir.yml b/framework/e2e/PaddleLT_new/yaml/dy^dy_train^export_st_inputspec^ppinfer_new_exc_pir.yml new file mode 100644 index 0000000000..5a089b8c73 --- /dev/null +++ b/framework/e2e/PaddleLT_new/yaml/dy^dy_train^export_st_inputspec^ppinfer_new_exc_pir.yml @@ -0,0 +1,33 @@ +testings: + dy_train: + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 20 + return_net_instance: "True" # 测试engine会返回模型实例 + + dy_eval: + use_upstream_net_instance: "True" # 获取上游模型实例而不是重新构建模型实例 + model_dtype: "float32" + return_net_instance: "True" # 测试engine会返回模型实例 + + jit_save_static_inputspec: + use_upstream_net_instance: "True" # 获取上游模型实例而不是重新构建模型实例 + model_dtype: "float32" + + paddle_infer_new_exc_pir: + jit_save_type: "jit_save_static_inputspec" + +compare: + - + baseline: 'dy_eval' + latest: 'paddle_infer_new_exc_pir' + # compare_method: "infer_compare" + precision: + delta: 0.00001 + rtol: 0.000001