Skip to content

Commit 2197820

Browse files
authored
Merge pull request #1097 from RainFrost1/benchmark
添加全链条自动化测试脚本
2 parents 1efc5f5 + 227f4fd commit 2197820

15 files changed

+976
-6
lines changed

deploy/auto_log.log

Whitespace-only changes.

deploy/python/predict_cls.py

Lines changed: 39 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
from python.preprocess import create_operators
2828
from python.postprocess import build_postprocess
2929

30+
3031
class ClsPredictor(Predictor):
3132
def __init__(self, config):
3233
super().__init__(config["Global"])
@@ -40,6 +41,29 @@ def __init__(self, config):
4041
if "PostProcess" in config:
4142
self.postprocess = build_postprocess(config["PostProcess"])
4243

44+
# for whole_chain project to test each repo of paddle
45+
self.benchmark = config["Global"].get("benchmark", False)
46+
if self.benchmark:
47+
import auto_log
48+
import os
49+
pid = os.getpid()
50+
self.auto_logger = auto_log.AutoLogger(
51+
model_name=config["Global"].get("model_name", "cls"),
52+
model_precision='fp16'
53+
if config["Global"]["use_fp16"] else 'fp32',
54+
batch_size=config["Global"].get("batch_size", 1),
55+
data_shape=[3, 224, 224],
56+
save_path=config["Global"].get("save_log_path",
57+
"./auto_log.log"),
58+
inference_config=self.config,
59+
pids=pid,
60+
process_name=None,
61+
gpu_ids=None,
62+
time_keys=[
63+
'preprocess_time', 'inference_time', 'postprocess_time'
64+
],
65+
warmup=2)
66+
4367
def predict(self, images):
4468
input_names = self.paddle_predictor.get_input_names()
4569
input_tensor = self.paddle_predictor.get_input_handle(input_names[0])
@@ -48,18 +72,26 @@ def predict(self, images):
4872
output_tensor = self.paddle_predictor.get_output_handle(output_names[
4973
0])
5074

75+
if self.benchmark:
76+
self.auto_logger.times.start()
5177
if not isinstance(images, (list, )):
5278
images = [images]
5379
for idx in range(len(images)):
5480
for ops in self.preprocess_ops:
5581
images[idx] = ops(images[idx])
5682
image = np.array(images)
83+
if self.benchmark:
84+
self.auto_logger.times.stamp()
5785

5886
input_tensor.copy_from_cpu(image)
5987
self.paddle_predictor.run()
6088
batch_output = output_tensor.copy_to_cpu()
89+
if self.benchmark:
90+
self.auto_logger.times.stamp()
6191
if self.postprocess is not None:
6292
batch_output = self.postprocess(batch_output)
93+
if self.benchmark:
94+
self.auto_logger.times.end(stamp=True)
6395
return batch_output
6496

6597

@@ -83,10 +115,11 @@ def main(config):
83115
batch_names.append(img_name)
84116
cnt += 1
85117

86-
if cnt % config["Global"]["batch_size"] == 0 or (idx + 1) == len(image_list):
87-
if len(batch_imgs) == 0:
118+
if cnt % config["Global"]["batch_size"] == 0 or (idx + 1
119+
) == len(image_list):
120+
if len(batch_imgs) == 0:
88121
continue
89-
122+
90123
batch_results = cls_predictor.predict(batch_imgs)
91124
for number, result_dict in enumerate(batch_results):
92125
filename = batch_names[number]
@@ -98,8 +131,11 @@ def main(config):
98131
format(filename, clas_ids, scores_str, label_names))
99132
batch_imgs = []
100133
batch_names = []
134+
if cls_predictor.benchmark:
135+
cls_predictor.auto_logger.report()
101136
return
102137

138+
103139
if __name__ == "__main__":
104140
args = config.parse_args()
105141
config = config.get_config(args.config, overrides=args.override, show=True)

deploy/utils/predictor.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def __init__(self, args, inference_model_dir=None):
2828
if args.use_fp16 is True:
2929
assert args.use_tensorrt is True
3030
self.args = args
31-
self.paddle_predictor = self.create_paddle_predictor(
31+
self.paddle_predictor, self.config = self.create_paddle_predictor(
3232
args, inference_model_dir)
3333

3434
def predict(self, image):
@@ -59,11 +59,12 @@ def create_paddle_predictor(self, args, inference_model_dir=None):
5959
config.enable_tensorrt_engine(
6060
precision_mode=Config.Precision.Half
6161
if args.use_fp16 else Config.Precision.Float32,
62-
max_batch_size=args.batch_size)
62+
max_batch_size=args.batch_size,
63+
min_subgraph_size=30)
6364

6465
config.enable_memory_optim()
6566
# use zero copy
6667
config.switch_use_feed_fetch_ops(False)
6768
predictor = create_predictor(config)
6869

69-
return predictor
70+
return predictor, config

tests/DarkNet53.txt

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
===========================train_params===========================
2+
model_name:DarkNet53
3+
python:python3.7
4+
gpu_list:0|0,1
5+
-o Global.device:gpu
6+
-o Global.auto_cast:null
7+
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
8+
-o Global.output_dir:./output/
9+
-o DataLoader.Train.sampler.batch_size:8
10+
-o Global.pretrained_model:null
11+
train_model_name:latest
12+
train_infer_img_dir:./dataset/ILSVRC2012/val
13+
null:null
14+
##
15+
trainer:norm_train
16+
norm_train:tools/train.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
17+
pact_train:null
18+
fpgm_train:null
19+
distill_train:null
20+
null:null
21+
null:null
22+
##
23+
===========================eval_params===========================
24+
eval:tools/eval.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
25+
null:null
26+
##
27+
===========================infer_params==========================
28+
-o Global.save_inference_dir:./inference
29+
-o Global.pretrained_model:
30+
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/DarkNet/DarkNet53.yaml
31+
quant_export:null
32+
fpgm_export:null
33+
distill_export:null
34+
export1:null
35+
export2:null
36+
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/DarkNet53_inference.tar
37+
infer_model:../inference/
38+
infer_export:null
39+
infer_quant:Fasle
40+
inference:python/predict_cls.py -c configs/inference_cls.yaml
41+
-o Global.use_gpu:True|False
42+
-o Global.enable_mkldnn:True|False
43+
-o Global.cpu_num_threads:1|6
44+
-o Global.batch_size:1
45+
-o Global.use_tensorrt:True|False
46+
-o Global.use_fp16:True|False
47+
-o Global.inference_model_dir:../inference
48+
-o Global.infer_imgs:../dataset/ILSVRC2012/val
49+
-o Global.save_log_path:null
50+
-o Global.benchmark:True
51+
null:null

tests/HRNet_W18_C.txt

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
===========================train_params===========================
2+
model_name:HRNet_W18_C
3+
python:python3.7
4+
gpu_list:0|0,1
5+
-o Global.device:gpu
6+
-o Global.auto_cast:null
7+
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
8+
-o Global.output_dir:./output/
9+
-o DataLoader.Train.sampler.batch_size:8
10+
-o Global.pretrained_model:null
11+
train_model_name:latest
12+
train_infer_img_dir:./dataset/ILSVRC2012/val
13+
null:null
14+
##
15+
trainer:norm_train
16+
norm_train:tools/train.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
17+
pact_train:null
18+
fpgm_train:null
19+
distill_train:null
20+
null:null
21+
null:null
22+
##
23+
===========================eval_params===========================
24+
eval:tools/eval.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
25+
null:null
26+
##
27+
===========================infer_params==========================
28+
-o Global.save_inference_dir:./inference
29+
-o Global.pretrained_model:
30+
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/HRNet/HRNet_W18_C.yaml
31+
quant_export:null
32+
fpgm_export:null
33+
distill_export:null
34+
export1:null
35+
export2:null
36+
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/HRNet_W18_C_inference.tar
37+
infer_model:../inference/
38+
infer_export:null
39+
infer_quant:Fasle
40+
inference:python/predict_cls.py -c configs/inference_cls.yaml
41+
-o Global.use_gpu:True|False
42+
-o Global.enable_mkldnn:True|False
43+
-o Global.cpu_num_threads:1|6
44+
-o Global.batch_size:1
45+
-o Global.use_tensorrt:True|False
46+
-o Global.use_fp16:True|False
47+
-o Global.inference_model_dir:../inference
48+
-o Global.infer_imgs:../dataset/ILSVRC2012/val
49+
-o Global.save_log_path:null
50+
-o Global.benchmark:True
51+
null:null

tests/LeViT_128S.txt

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
===========================train_params===========================
2+
model_name:LeViT_128S
3+
python:python3.7
4+
gpu_list:0|0,1
5+
-o Global.device:gpu
6+
-o Global.auto_cast:null
7+
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
8+
-o Global.output_dir:./output/
9+
-o DataLoader.Train.sampler.batch_size:8
10+
-o Global.pretrained_model:null
11+
train_model_name:latest
12+
train_infer_img_dir:./dataset/ILSVRC2012/val
13+
null:null
14+
##
15+
trainer:norm_train
16+
norm_train:tools/train.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
17+
pact_train:null
18+
fpgm_train:null
19+
distill_train:null
20+
null:null
21+
null:null
22+
##
23+
===========================eval_params===========================
24+
eval:tools/eval.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
25+
null:null
26+
##
27+
===========================infer_params==========================
28+
-o Global.save_inference_dir:./inference
29+
-o Global.pretrained_model:
30+
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/LeViT/LeViT_128S.yaml
31+
quant_export:null
32+
fpgm_export:null
33+
distill_export:null
34+
export1:null
35+
export2:null
36+
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/LeViT_128S_inference.tar
37+
infer_model:../inference/
38+
infer_export:null
39+
infer_quant:Fasle
40+
inference:python/predict_cls.py -c configs/inference_cls.yaml
41+
-o Global.use_gpu:True|Fasle
42+
-o Global.enable_mkldnn:True|False
43+
-o Global.cpu_num_threads:1|6
44+
-o Global.batch_size:1
45+
-o Global.use_tensorrt:True|False
46+
-o Global.use_fp16:True|False
47+
-o Global.inference_model_dir:../inference
48+
-o Global.infer_imgs:../dataset/ILSVRC2012/val
49+
-o Global.save_log_path:null
50+
-o Global.benchmark:True
51+
null:null

tests/MobileNetV1.txt

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
===========================train_params===========================
2+
model_name:MobileNetV1
3+
python:python3.7
4+
gpu_list:0|0,1
5+
-o Global.device:gpu
6+
-o Global.auto_cast:null
7+
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
8+
-o Global.output_dir:./output/
9+
-o DataLoader.Train.sampler.batch_size:8
10+
-o Global.pretrained_model:null
11+
train_model_name:latest
12+
train_infer_img_dir:./dataset/ILSVRC2012/val
13+
null:null
14+
##
15+
trainer:norm_train
16+
norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
17+
pact_train:null
18+
fpgm_train:null
19+
distill_train:null
20+
null:null
21+
null:null
22+
##
23+
===========================eval_params===========================
24+
eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
25+
null:null
26+
##
27+
===========================infer_params==========================
28+
-o Global.save_inference_dir:./inference
29+
-o Global.pretrained_model:
30+
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV1/MobileNetV1.yaml
31+
quant_export:null
32+
fpgm_export:null
33+
distill_export:null
34+
export1:null
35+
export2:null
36+
inference_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/MobileNetV1_inference.tar
37+
infer_model:../inference/
38+
infer_export:null
39+
infer_quant:Fasle
40+
inference:python/predict_cls.py -c configs/inference_cls.yaml
41+
-o Global.use_gpu:True|False
42+
-o Global.enable_mkldnn:True|False
43+
-o Global.cpu_num_threads:1|6
44+
-o Global.batch_size:1
45+
-o Global.use_tensorrt:True|False
46+
-o Global.use_fp16:True|False
47+
-o Global.inference_model_dir:../inference
48+
-o Global.infer_imgs:../dataset/ILSVRC2012/val
49+
-o Global.save_log_path:null
50+
-o Global.benchmark:True
51+
null:null

tests/MobileNetV2.txt

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
===========================train_params===========================
2+
model_name:MobileNetV2
3+
python:python3.7
4+
gpu_list:0|0,1
5+
-o Global.device:gpu
6+
-o Global.auto_cast:null
7+
-o Global.epochs:lite_train_infer=2|whole_train_infer=120
8+
-o Global.output_dir:./output/
9+
-o DataLoader.Train.sampler.batch_size:8
10+
-o Global.pretrained_model:null
11+
train_model_name:latest
12+
train_infer_img_dir:./dataset/ILSVRC2012/val
13+
null:null
14+
##
15+
trainer:norm_train
16+
norm_train:tools/train.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
17+
pact_train:null
18+
fpgm_train:null
19+
distill_train:null
20+
null:null
21+
null:null
22+
##
23+
===========================eval_params===========================
24+
eval:tools/eval.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
25+
null:null
26+
##
27+
===========================infer_params==========================
28+
-o Global.save_inference_dir:./inference
29+
-o Global.pretrained_model:
30+
norm_export:tools/export_model.py -c ppcls/configs/ImageNet/MobileNetV2/MobileNetV2.yaml
31+
quant_export:null
32+
fpgm_export:null
33+
distill_export:null
34+
export1:null
35+
export2:null
36+
infer_model_url:https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/whole_chain/MobileNetV2_inference.tar
37+
infer_model:../inference/
38+
infer_export:null
39+
infer_quant:Fasle
40+
inference:python/predict_cls.py -c configs/inference_cls.yaml
41+
-o Global.use_gpu:True|False
42+
-o Global.enable_mkldnn:True|False
43+
-o Global.cpu_num_threads:1|6
44+
-o Global.batch_size:1
45+
-o Global.use_tensorrt:True|False
46+
-o Global.use_fp16:True|False
47+
-o Global.inference_model_dir:../inference
48+
-o Global.infer_imgs:../dataset/ILSVRC2012/val
49+
-o Global.save_log_path:null
50+
-o Global.benchmark:True
51+
null:null

0 commit comments

Comments
 (0)