Skip to content

添加全链条自动化测试脚本 #1097

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Aug 5, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Empty file added deploy/auto_log.log
Empty file.
42 changes: 39 additions & 3 deletions deploy/python/predict_cls.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
from python.preprocess import create_operators
from python.postprocess import build_postprocess


class ClsPredictor(Predictor):
def __init__(self, config):
super().__init__(config["Global"])
Expand All @@ -40,6 +41,29 @@ def __init__(self, config):
if "PostProcess" in config:
self.postprocess = build_postprocess(config["PostProcess"])

# for whole_chain project to test each repo of paddle
self.benchmark = config["Global"].get("benchmark", False)
if self.benchmark:
import auto_log
import os
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

import统一放到顶上吧

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个是全链条中引入的自己开发特定的包,一般使用此脚本时,不会安装auto_log,所以感觉还是放在这里比较比好

pid = os.getpid()
self.auto_logger = auto_log.AutoLogger(
model_name=config["Global"].get("model_name", "cls"),
model_precision='fp16'
if config["Global"]["use_fp16"] else 'fp32',
batch_size=config["Global"].get("batch_size", 1),
data_shape=[3, 224, 224],
save_path=config["Global"].get("save_log_path",
"./auto_log.log"),
inference_config=self.config,
pids=pid,
process_name=None,
gpu_ids=None,
time_keys=[
'preprocess_time', 'inference_time', 'postprocess_time'
],
warmup=2)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这些信息都是固定的吗?不是应该从配置文件读取吗?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这些是全链条固定的


def predict(self, images):
input_names = self.paddle_predictor.get_input_names()
input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
Expand All @@ -48,18 +72,26 @@ def predict(self, images):
output_tensor = self.paddle_predictor.get_output_handle(output_names[
0])

if self.benchmark:
self.auto_logger.times.start()
if not isinstance(images, (list, )):
images = [images]
for idx in range(len(images)):
for ops in self.preprocess_ops:
images[idx] = ops(images[idx])
image = np.array(images)
if self.benchmark:
self.auto_logger.times.stamp()

input_tensor.copy_from_cpu(image)
self.paddle_predictor.run()
batch_output = output_tensor.copy_to_cpu()
if self.benchmark:
self.auto_logger.times.stamp()
if self.postprocess is not None:
batch_output = self.postprocess(batch_output)
if self.benchmark:
self.auto_logger.times.end(stamp=True)
return batch_output


Expand All @@ -83,10 +115,11 @@ def main(config):
batch_names.append(img_name)
cnt += 1

if cnt % config["Global"]["batch_size"] == 0 or (idx + 1) == len(image_list):
if len(batch_imgs) == 0:
if cnt % config["Global"]["batch_size"] == 0 or (idx + 1
) == len(image_list):
if len(batch_imgs) == 0:
continue

batch_results = cls_predictor.predict(batch_imgs)
for number, result_dict in enumerate(batch_results):
filename = batch_names[number]
Expand All @@ -98,8 +131,11 @@ def main(config):
format(filename, clas_ids, scores_str, label_names))
batch_imgs = []
batch_names = []
if cls_predictor.benchmark:
cls_predictor.auto_logger.report()
return


if __name__ == "__main__":
args = config.parse_args()
config = config.get_config(args.config, overrides=args.override, show=True)
Expand Down
7 changes: 4 additions & 3 deletions deploy/utils/predictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, args, inference_model_dir=None):
if args.use_fp16 is True:
assert args.use_tensorrt is True
self.args = args
self.paddle_predictor = self.create_paddle_predictor(
self.paddle_predictor, self.config = self.create_paddle_predictor(
args, inference_model_dir)

def predict(self, image):
Expand Down Expand Up @@ -59,11 +59,12 @@ def create_paddle_predictor(self, args, inference_model_dir=None):
config.enable_tensorrt_engine(
precision_mode=Config.Precision.Half
if args.use_fp16 else Config.Precision.Float32,
max_batch_size=args.batch_size)
max_batch_size=args.batch_size,
min_subgraph_size=30)

config.enable_memory_optim()
# use zero copy
config.switch_use_feed_fetch_ops(False)
predictor = create_predictor(config)

return predictor
return predictor, config
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

加上config的作用是什么?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

config中有inference的相关信息。这样是为了将config传给auto_log

51 changes: 51 additions & 0 deletions tests/DarkNet53.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
===========================train_params===========================
model_name:DarkNet53
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/DarkNet53_inference.tar
infer_model:../inference/
infer_export:null
infer_quant:Fasle
inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.use_gpu:True|False
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.inference_model_dir:../inference
-o Global.infer_imgs:../dataset/ILSVRC2012/val
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
51 changes: 51 additions & 0 deletions tests/HRNet_W18_C.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
===========================train_params===========================
model_name:HRNet_W18_C
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/HRNet_W18_C_inference.tar
infer_model:../inference/
infer_export:null
infer_quant:Fasle
inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.use_gpu:True|False
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.inference_model_dir:../inference
-o Global.infer_imgs:../dataset/ILSVRC2012/val
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
51 changes: 51 additions & 0 deletions tests/LeViT_128S.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
===========================train_params===========================
model_name:LeViT_128S
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/LeViT_128S_inference.tar
infer_model:../inference/
infer_export:null
infer_quant:Fasle
inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.use_gpu:True|Fasle
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.inference_model_dir:../inference
-o Global.infer_imgs:../dataset/ILSVRC2012/val
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
51 changes: 51 additions & 0 deletions tests/MobileNetV1.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
===========================train_params===========================
model_name:MobileNetV1
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/MobileNetV1_inference.tar
infer_model:../inference/
infer_export:null
infer_quant:Fasle
inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.use_gpu:True|False
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.inference_model_dir:../inference
-o Global.infer_imgs:../dataset/ILSVRC2012/val
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
51 changes: 51 additions & 0 deletions tests/MobileNetV2.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
===========================train_params===========================
model_name:MobileNetV2
python:python3.7
gpu_list:0|0,1
-o Global.device:gpu
-o Global.auto_cast:null
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
-o Global.output_dir:./output/
-o DataLoader.Train.sampler.batch_size:8
-o Global.pretrained_model:null
train_model_name:latest
train_infer_img_dir:./dataset/ILSVRC2012/val
null:null
##
trainer:norm_train
norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
pact_train:null
fpgm_train:null
distill_train:null
null:null
null:null
##
===========================eval_params===========================
eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
null:null
##
===========================infer_params==========================
-o Global.save_inference_dir:./inference
-o Global.pretrained_model:
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
quant_export:null
fpgm_export:null
distill_export:null
export1:null
export2:null
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/MobileNetV2_inference.tar
infer_model:../inference/
infer_export:null
infer_quant:Fasle
inference:python/predict_cls.py -c configs/inference_cls.yaml
-o Global.use_gpu:True|False
-o Global.enable_mkldnn:True|False
-o Global.cpu_num_threads:1|6
-o Global.batch_size:1
-o Global.use_tensorrt:True|False
-o Global.use_fp16:True|False
-o Global.inference_model_dir:../inference
-o Global.infer_imgs:../dataset/ILSVRC2012/val
-o Global.save_log_path:null
-o Global.benchmark:True
null:null
Loading