Skip to content

Commit 7dcc313

Browse files
authored
unify benchmark log to new version (#3054)
* update det infer benchmark log
1 parent 34efe84 commit 7dcc313

File tree

3 files changed

+335
-100
lines changed

3 files changed

+335
-100
lines changed

deploy/python/benchmark_utils.py

Lines changed: 279 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,279 @@
1+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import argparse
16+
import os
17+
import time
18+
import logging
19+
20+
import paddle
21+
import paddle.inference as paddle_infer
22+
23+
from pathlib import Path
24+
25+
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
26+
LOG_PATH_ROOT = f"{CUR_DIR}/../../output"
27+
28+
29+
class PaddleInferBenchmark(object):
30+
def __init__(self,
31+
config,
32+
model_info: dict={},
33+
data_info: dict={},
34+
perf_info: dict={},
35+
resource_info: dict={},
36+
**kwargs):
37+
"""
38+
Construct PaddleInferBenchmark Class to format logs.
39+
args:
40+
config(paddle.inference.Config): paddle inference config
41+
model_info(dict): basic model info
42+
{'model_name': 'resnet50'
43+
'precision': 'fp32'}
44+
data_info(dict): input data info
45+
{'batch_size': 1
46+
'shape': '3,224,224'
47+
'data_num': 1000}
48+
perf_info(dict): performance result
49+
{'preprocess_time_s': 1.0
50+
'inference_time_s': 2.0
51+
'postprocess_time_s': 1.0
52+
'total_time_s': 4.0}
53+
resource_info(dict):
54+
cpu and gpu resources
55+
{'cpu_rss': 100
56+
'gpu_rss': 100
57+
'gpu_util': 60}
58+
"""
59+
# PaddleInferBenchmark Log Version
60+
self.log_version = "1.0.3"
61+
62+
# Paddle Version
63+
self.paddle_version = paddle.__version__
64+
self.paddle_commit = paddle.__git_commit__
65+
paddle_infer_info = paddle_infer.get_version()
66+
self.paddle_branch = paddle_infer_info.strip().split(': ')[-1]
67+
68+
# model info
69+
self.model_info = model_info
70+
71+
# data info
72+
self.data_info = data_info
73+
74+
# perf info
75+
self.perf_info = perf_info
76+
77+
try:
78+
# required value
79+
self.model_name = model_info['model_name']
80+
self.precision = model_info['precision']
81+
82+
self.batch_size = data_info['batch_size']
83+
self.shape = data_info['shape']
84+
self.data_num = data_info['data_num']
85+
86+
self.inference_time_s = round(perf_info['inference_time_s'], 4)
87+
except:
88+
self.print_help()
89+
raise ValueError(
90+
"Set argument wrong, please check input argument and its type")
91+
92+
self.preprocess_time_s = perf_info.get('preprocess_time_s', 0)
93+
self.postprocess_time_s = perf_info.get('postprocess_time_s', 0)
94+
self.total_time_s = perf_info.get('total_time_s', 0)
95+
96+
self.inference_time_s_90 = perf_info.get("inference_time_s_90", "")
97+
self.inference_time_s_99 = perf_info.get("inference_time_s_99", "")
98+
self.succ_rate = perf_info.get("succ_rate", "")
99+
self.qps = perf_info.get("qps", "")
100+
101+
# conf info
102+
self.config_status = self.parse_config(config)
103+
104+
# mem info
105+
if isinstance(resource_info, dict):
106+
self.cpu_rss_mb = int(resource_info.get('cpu_rss_mb', 0))
107+
self.cpu_vms_mb = int(resource_info.get('cpu_vms_mb', 0))
108+
self.cpu_shared_mb = int(resource_info.get('cpu_shared_mb', 0))
109+
self.cpu_dirty_mb = int(resource_info.get('cpu_dirty_mb', 0))
110+
self.cpu_util = round(resource_info.get('cpu_util', 0), 2)
111+
112+
self.gpu_rss_mb = int(resource_info.get('gpu_rss_mb', 0))
113+
self.gpu_util = round(resource_info.get('gpu_util', 0), 2)
114+
self.gpu_mem_util = round(resource_info.get('gpu_mem_util', 0), 2)
115+
else:
116+
self.cpu_rss_mb = 0
117+
self.cpu_vms_mb = 0
118+
self.cpu_shared_mb = 0
119+
self.cpu_dirty_mb = 0
120+
self.cpu_util = 0
121+
122+
self.gpu_rss_mb = 0
123+
self.gpu_util = 0
124+
self.gpu_mem_util = 0
125+
126+
# init benchmark logger
127+
self.benchmark_logger()
128+
129+
def benchmark_logger(self):
130+
"""
131+
benchmark logger
132+
"""
133+
# remove other logging handler
134+
for handler in logging.root.handlers[:]:
135+
logging.root.removeHandler(handler)
136+
137+
# Init logger
138+
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
139+
log_output = f"{LOG_PATH_ROOT}/{self.model_name}.log"
140+
Path(f"{LOG_PATH_ROOT}").mkdir(parents=True, exist_ok=True)
141+
logging.basicConfig(
142+
level=logging.INFO,
143+
format=FORMAT,
144+
handlers=[
145+
logging.FileHandler(
146+
filename=log_output, mode='w'),
147+
logging.StreamHandler(),
148+
])
149+
self.logger = logging.getLogger(__name__)
150+
self.logger.info(
151+
f"Paddle Inference benchmark log will be saved to {log_output}")
152+
153+
def parse_config(self, config) -> dict:
154+
"""
155+
parse paddle predictor config
156+
args:
157+
config(paddle.inference.Config): paddle inference config
158+
return:
159+
config_status(dict): dict style config info
160+
"""
161+
if isinstance(config, paddle_infer.Config):
162+
config_status = {}
163+
config_status['runtime_device'] = "gpu" if config.use_gpu(
164+
) else "cpu"
165+
config_status['ir_optim'] = config.ir_optim()
166+
config_status['enable_tensorrt'] = config.tensorrt_engine_enabled()
167+
config_status['precision'] = self.precision
168+
config_status['enable_mkldnn'] = config.mkldnn_enabled()
169+
config_status[
170+
'cpu_math_library_num_threads'] = config.cpu_math_library_num_threads(
171+
)
172+
elif isinstance(config, dict):
173+
config_status['runtime_device'] = config.get('runtime_device', "")
174+
config_status['ir_optim'] = config.get('ir_optim', "")
175+
config_status['enable_tensorrt'] = config.get('enable_tensorrt', "")
176+
config_status['precision'] = config.get('precision', "")
177+
config_status['enable_mkldnn'] = config.get('enable_mkldnn', "")
178+
config_status['cpu_math_library_num_threads'] = config.get(
179+
'cpu_math_library_num_threads', "")
180+
else:
181+
self.print_help()
182+
raise ValueError(
183+
"Set argument config wrong, please check input argument and its type"
184+
)
185+
return config_status
186+
187+
def report(self, identifier=None):
188+
"""
189+
print log report
190+
args:
191+
identifier(string): identify log
192+
"""
193+
if identifier:
194+
identifier = f"[{identifier}]"
195+
else:
196+
identifier = ""
197+
198+
self.logger.info("\n")
199+
self.logger.info(
200+
"---------------------- Paddle info ----------------------")
201+
self.logger.info(f"{identifier} paddle_version: {self.paddle_version}")
202+
self.logger.info(f"{identifier} paddle_commit: {self.paddle_commit}")
203+
self.logger.info(f"{identifier} paddle_branch: {self.paddle_branch}")
204+
self.logger.info(f"{identifier} log_api_version: {self.log_version}")
205+
self.logger.info(
206+
"----------------------- Conf info -----------------------")
207+
self.logger.info(
208+
f"{identifier} runtime_device: {self.config_status['runtime_device']}"
209+
)
210+
self.logger.info(
211+
f"{identifier} ir_optim: {self.config_status['ir_optim']}")
212+
self.logger.info(f"{identifier} enable_memory_optim: {True}")
213+
self.logger.info(
214+
f"{identifier} enable_tensorrt: {self.config_status['enable_tensorrt']}"
215+
)
216+
self.logger.info(
217+
f"{identifier} enable_mkldnn: {self.config_status['enable_mkldnn']}")
218+
self.logger.info(
219+
f"{identifier} cpu_math_library_num_threads: {self.config_status['cpu_math_library_num_threads']}"
220+
)
221+
self.logger.info(
222+
"----------------------- Model info ----------------------")
223+
self.logger.info(f"{identifier} model_name: {self.model_name}")
224+
self.logger.info(f"{identifier} precision: {self.precision}")
225+
self.logger.info(
226+
"----------------------- Data info -----------------------")
227+
self.logger.info(f"{identifier} batch_size: {self.batch_size}")
228+
self.logger.info(f"{identifier} input_shape: {self.shape}")
229+
self.logger.info(f"{identifier} data_num: {self.data_num}")
230+
self.logger.info(
231+
"----------------------- Perf info -----------------------")
232+
self.logger.info(
233+
f"{identifier} cpu_rss(MB): {self.cpu_rss_mb}, cpu_vms: {self.cpu_vms_mb}, cpu_shared_mb: {self.cpu_shared_mb}, cpu_dirty_mb: {self.cpu_dirty_mb}, cpu_util: {self.cpu_util}%"
234+
)
235+
self.logger.info(
236+
f"{identifier} gpu_rss(MB): {self.gpu_rss_mb}, gpu_util: {self.gpu_util}%, gpu_mem_util: {self.gpu_mem_util}%"
237+
)
238+
self.logger.info(
239+
f"{identifier} total time spent(s): {self.total_time_s}")
240+
self.logger.info(
241+
f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, inference_time(ms): {round(self.inference_time_s*1000, 1)}, postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}"
242+
)
243+
if self.inference_time_s_90:
244+
self.looger.info(
245+
f"{identifier} 90%_cost: {self.inference_time_s_90}, 99%_cost: {self.inference_time_s_99}, succ_rate: {self.succ_rate}"
246+
)
247+
if self.qps:
248+
self.logger.info(f"{identifier} QPS: {self.qps}")
249+
250+
def print_help(self):
251+
"""
252+
print function help
253+
"""
254+
print("""Usage:
255+
==== Print inference benchmark logs. ====
256+
config = paddle.inference.Config()
257+
model_info = {'model_name': 'resnet50'
258+
'precision': 'fp32'}
259+
data_info = {'batch_size': 1
260+
'shape': '3,224,224'
261+
'data_num': 1000}
262+
perf_info = {'preprocess_time_s': 1.0
263+
'inference_time_s': 2.0
264+
'postprocess_time_s': 1.0
265+
'total_time_s': 4.0}
266+
resource_info = {'cpu_rss_mb': 100
267+
'gpu_rss_mb': 100
268+
'gpu_util': 60}
269+
log = PaddleInferBenchmark(config, model_info, data_info, perf_info, resource_info)
270+
log('Test')
271+
""")
272+
273+
def __call__(self, identifier=None):
274+
"""
275+
__call__
276+
args:
277+
identifier(string): identify log
278+
"""
279+
self.report(identifier)

0 commit comments

Comments
 (0)