commit
f58ad9dd5b
|
@ -1,232 +0,0 @@
|
|||
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
|
||||
import paddle
|
||||
import paddle.inference as paddle_infer
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class PaddleInferBenchmark(object):
|
||||
def __init__(self,
|
||||
config,
|
||||
model_info: dict={},
|
||||
data_info: dict={},
|
||||
perf_info: dict={},
|
||||
resource_info: dict={},
|
||||
save_log_path: str="",
|
||||
**kwargs):
|
||||
"""
|
||||
Construct PaddleInferBenchmark Class to format logs.
|
||||
args:
|
||||
config(paddle.inference.Config): paddle inference config
|
||||
model_info(dict): basic model info
|
||||
{'model_name': 'resnet50'
|
||||
'precision': 'fp32'}
|
||||
data_info(dict): input data info
|
||||
{'batch_size': 1
|
||||
'shape': '3,224,224'
|
||||
'data_num': 1000}
|
||||
perf_info(dict): performance result
|
||||
{'preprocess_time_s': 1.0
|
||||
'inference_time_s': 2.0
|
||||
'postprocess_time_s': 1.0
|
||||
'total_time_s': 4.0}
|
||||
resource_info(dict):
|
||||
cpu and gpu resources
|
||||
{'cpu_rss': 100
|
||||
'gpu_rss': 100
|
||||
'gpu_util': 60}
|
||||
"""
|
||||
# PaddleInferBenchmark Log Version
|
||||
self.log_version = 1.0
|
||||
|
||||
# Paddle Version
|
||||
self.paddle_version = paddle.__version__
|
||||
self.paddle_commit = paddle.__git_commit__
|
||||
paddle_infer_info = paddle_infer.get_version()
|
||||
self.paddle_branch = paddle_infer_info.strip().split(': ')[-1]
|
||||
|
||||
# model info
|
||||
self.model_info = model_info
|
||||
|
||||
# data info
|
||||
self.data_info = data_info
|
||||
|
||||
# perf info
|
||||
self.perf_info = perf_info
|
||||
|
||||
try:
|
||||
self.model_name = model_info['model_name']
|
||||
self.precision = model_info['precision']
|
||||
|
||||
self.batch_size = data_info['batch_size']
|
||||
self.shape = data_info['shape']
|
||||
self.data_num = data_info['data_num']
|
||||
|
||||
self.preprocess_time_s = round(perf_info['preprocess_time_s'], 4)
|
||||
self.inference_time_s = round(perf_info['inference_time_s'], 4)
|
||||
self.postprocess_time_s = round(perf_info['postprocess_time_s'], 4)
|
||||
self.total_time_s = round(perf_info['total_time_s'], 4)
|
||||
except:
|
||||
self.print_help()
|
||||
raise ValueError(
|
||||
"Set argument wrong, please check input argument and its type")
|
||||
|
||||
# conf info
|
||||
self.config_status = self.parse_config(config)
|
||||
self.save_log_path = save_log_path
|
||||
# mem info
|
||||
if isinstance(resource_info, dict):
|
||||
self.cpu_rss_mb = int(resource_info.get('cpu_rss_mb', 0))
|
||||
self.gpu_rss_mb = int(resource_info.get('gpu_rss_mb', 0))
|
||||
self.gpu_util = round(resource_info.get('gpu_util', 0), 2)
|
||||
else:
|
||||
self.cpu_rss_mb = 0
|
||||
self.gpu_rss_mb = 0
|
||||
self.gpu_util = 0
|
||||
|
||||
# init benchmark logger
|
||||
self.benchmark_logger()
|
||||
|
||||
def benchmark_logger(self):
|
||||
"""
|
||||
benchmark logger
|
||||
"""
|
||||
# Init logger
|
||||
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
log_output = f"{self.save_log_path}/{self.model_name}.log"
|
||||
Path(f"{self.save_log_path}").mkdir(parents=True, exist_ok=True)
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format=FORMAT,
|
||||
handlers=[
|
||||
logging.FileHandler(
|
||||
filename=log_output, mode='w'),
|
||||
logging.StreamHandler(),
|
||||
])
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.logger.info(
|
||||
f"Paddle Inference benchmark log will be saved to {log_output}")
|
||||
|
||||
def parse_config(self, config) -> dict:
|
||||
"""
|
||||
parse paddle predictor config
|
||||
args:
|
||||
config(paddle.inference.Config): paddle inference config
|
||||
return:
|
||||
config_status(dict): dict style config info
|
||||
"""
|
||||
config_status = {}
|
||||
config_status['runtime_device'] = "gpu" if config.use_gpu() else "cpu"
|
||||
config_status['ir_optim'] = config.ir_optim()
|
||||
config_status['enable_tensorrt'] = config.tensorrt_engine_enabled()
|
||||
config_status['precision'] = self.precision
|
||||
config_status['enable_mkldnn'] = config.mkldnn_enabled()
|
||||
config_status[
|
||||
'cpu_math_library_num_threads'] = config.cpu_math_library_num_threads(
|
||||
)
|
||||
return config_status
|
||||
|
||||
def report(self, identifier=None):
|
||||
"""
|
||||
print log report
|
||||
args:
|
||||
identifier(string): identify log
|
||||
"""
|
||||
if identifier:
|
||||
identifier = f"[{identifier}]"
|
||||
else:
|
||||
identifier = ""
|
||||
|
||||
self.logger.info("\n")
|
||||
self.logger.info(
|
||||
"---------------------- Paddle info ----------------------")
|
||||
self.logger.info(f"{identifier} paddle_version: {self.paddle_version}")
|
||||
self.logger.info(f"{identifier} paddle_commit: {self.paddle_commit}")
|
||||
self.logger.info(f"{identifier} paddle_branch: {self.paddle_branch}")
|
||||
self.logger.info(f"{identifier} log_api_version: {self.log_version}")
|
||||
self.logger.info(
|
||||
"----------------------- Conf info -----------------------")
|
||||
self.logger.info(
|
||||
f"{identifier} runtime_device: {self.config_status['runtime_device']}"
|
||||
)
|
||||
self.logger.info(
|
||||
f"{identifier} ir_optim: {self.config_status['ir_optim']}")
|
||||
self.logger.info(f"{identifier} enable_memory_optim: {True}")
|
||||
self.logger.info(
|
||||
f"{identifier} enable_tensorrt: {self.config_status['enable_tensorrt']}"
|
||||
)
|
||||
self.logger.info(
|
||||
f"{identifier} enable_mkldnn: {self.config_status['enable_mkldnn']}")
|
||||
self.logger.info(
|
||||
f"{identifier} cpu_math_library_num_threads: {self.config_status['cpu_math_library_num_threads']}"
|
||||
)
|
||||
self.logger.info(
|
||||
"----------------------- Model info ----------------------")
|
||||
self.logger.info(f"{identifier} model_name: {self.model_name}")
|
||||
self.logger.info(f"{identifier} precision: {self.precision}")
|
||||
self.logger.info(
|
||||
"----------------------- Data info -----------------------")
|
||||
self.logger.info(f"{identifier} batch_size: {self.batch_size}")
|
||||
self.logger.info(f"{identifier} input_shape: {self.shape}")
|
||||
self.logger.info(f"{identifier} data_num: {self.data_num}")
|
||||
self.logger.info(
|
||||
"----------------------- Perf info -----------------------")
|
||||
self.logger.info(
|
||||
f"{identifier} cpu_rss(MB): {self.cpu_rss_mb}, gpu_rss(MB): {self.gpu_rss_mb}, gpu_util: {self.gpu_util}%"
|
||||
)
|
||||
self.logger.info(
|
||||
f"{identifier} total time spent(s): {self.total_time_s}")
|
||||
self.logger.info(
|
||||
f"{identifier} preprocess_time(ms): {round(self.preprocess_time_s*1000, 1)}, inference_time(ms): {round(self.inference_time_s*1000, 1)}, postprocess_time(ms): {round(self.postprocess_time_s*1000, 1)}"
|
||||
)
|
||||
|
||||
def print_help(self):
|
||||
"""
|
||||
print function help
|
||||
"""
|
||||
print("""Usage:
|
||||
==== Print inference benchmark logs. ====
|
||||
config = paddle.inference.Config()
|
||||
model_info = {'model_name': 'resnet50'
|
||||
'precision': 'fp32'}
|
||||
data_info = {'batch_size': 1
|
||||
'shape': '3,224,224'
|
||||
'data_num': 1000}
|
||||
perf_info = {'preprocess_time_s': 1.0
|
||||
'inference_time_s': 2.0
|
||||
'postprocess_time_s': 1.0
|
||||
'total_time_s': 4.0}
|
||||
resource_info = {'cpu_rss_mb': 100
|
||||
'gpu_rss_mb': 100
|
||||
'gpu_util': 60}
|
||||
log = PaddleInferBenchmark(config, model_info, data_info, perf_info, resource_info)
|
||||
log('Test')
|
||||
""")
|
||||
|
||||
def __call__(self, identifier=None):
|
||||
"""
|
||||
__call__
|
||||
args:
|
||||
identifier(string): identify log
|
||||
"""
|
||||
self.report(identifier)
|
|
@ -48,8 +48,6 @@ class TextClassifier(object):
|
|||
self.predictor, self.input_tensor, self.output_tensors, _ = \
|
||||
utility.create_predictor(args, 'cls', logger)
|
||||
|
||||
self.cls_times = utility.Timer()
|
||||
|
||||
def resize_norm_img(self, img):
|
||||
imgC, imgH, imgW = self.cls_image_shape
|
||||
h = img.shape[0]
|
||||
|
@ -85,35 +83,28 @@ class TextClassifier(object):
|
|||
cls_res = [['', 0.0]] * img_num
|
||||
batch_num = self.cls_batch_num
|
||||
elapse = 0
|
||||
self.cls_times.total_time.start()
|
||||
for beg_img_no in range(0, img_num, batch_num):
|
||||
|
||||
end_img_no = min(img_num, beg_img_no + batch_num)
|
||||
norm_img_batch = []
|
||||
max_wh_ratio = 0
|
||||
starttime = time.time()
|
||||
for ino in range(beg_img_no, end_img_no):
|
||||
h, w = img_list[indices[ino]].shape[0:2]
|
||||
wh_ratio = w * 1.0 / h
|
||||
max_wh_ratio = max(max_wh_ratio, wh_ratio)
|
||||
self.cls_times.preprocess_time.start()
|
||||
for ino in range(beg_img_no, end_img_no):
|
||||
norm_img = self.resize_norm_img(img_list[indices[ino]])
|
||||
norm_img = norm_img[np.newaxis, :]
|
||||
norm_img_batch.append(norm_img)
|
||||
norm_img_batch = np.concatenate(norm_img_batch)
|
||||
norm_img_batch = norm_img_batch.copy()
|
||||
starttime = time.time()
|
||||
self.cls_times.preprocess_time.end()
|
||||
self.cls_times.inference_time.start()
|
||||
|
||||
self.input_tensor.copy_from_cpu(norm_img_batch)
|
||||
self.predictor.run()
|
||||
prob_out = self.output_tensors[0].copy_to_cpu()
|
||||
self.cls_times.inference_time.end()
|
||||
self.cls_times.postprocess_time.start()
|
||||
self.predictor.try_shrink_memory()
|
||||
cls_result = self.postprocess_op(prob_out)
|
||||
self.cls_times.postprocess_time.end()
|
||||
elapse += time.time() - starttime
|
||||
for rno in range(len(cls_result)):
|
||||
label, score = cls_result[rno]
|
||||
|
@ -121,9 +112,7 @@ class TextClassifier(object):
|
|||
if '180' in label and score > self.cls_thresh:
|
||||
img_list[indices[beg_img_no + rno]] = cv2.rotate(
|
||||
img_list[indices[beg_img_no + rno]], 1)
|
||||
self.cls_times.total_time.end()
|
||||
self.cls_times.img_num += img_num
|
||||
elapse = self.cls_times.total_time.value()
|
||||
elapse = time.time() - starttime
|
||||
return img_list, cls_res, elapse
|
||||
|
||||
|
||||
|
|
|
@ -31,8 +31,6 @@ from ppocr.utils.utility import get_image_file_list, check_and_read_gif
|
|||
from ppocr.data import create_operators, transform
|
||||
from ppocr.postprocess import build_post_process
|
||||
|
||||
# import tools.infer.benchmark_utils as benchmark_utils
|
||||
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@ import traceback
|
|||
import paddle
|
||||
|
||||
import tools.infer.utility as utility
|
||||
import tools.infer.benchmark_utils as benchmark_utils
|
||||
from ppocr.postprocess import build_post_process
|
||||
from ppocr.utils.logging import get_logger
|
||||
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
|
||||
|
@ -66,8 +65,6 @@ class TextRecognizer(object):
|
|||
self.predictor, self.input_tensor, self.output_tensors, self.config = \
|
||||
utility.create_predictor(args, 'rec', logger)
|
||||
|
||||
self.rec_times = utility.Timer()
|
||||
|
||||
def resize_norm_img(self, img, max_wh_ratio):
|
||||
imgC, imgH, imgW = self.rec_image_shape
|
||||
assert imgC == img.shape[2]
|
||||
|
@ -168,14 +165,13 @@ class TextRecognizer(object):
|
|||
width_list.append(img.shape[1] / float(img.shape[0]))
|
||||
# Sorting can speed up the recognition process
|
||||
indices = np.argsort(np.array(width_list))
|
||||
self.rec_times.total_time.start()
|
||||
rec_res = [['', 0.0]] * img_num
|
||||
batch_num = self.rec_batch_num
|
||||
st = time.time()
|
||||
for beg_img_no in range(0, img_num, batch_num):
|
||||
end_img_no = min(img_num, beg_img_no + batch_num)
|
||||
norm_img_batch = []
|
||||
max_wh_ratio = 0
|
||||
self.rec_times.preprocess_time.start()
|
||||
for ino in range(beg_img_no, end_img_no):
|
||||
h, w = img_list[indices[ino]].shape[0:2]
|
||||
wh_ratio = w * 1.0 / h
|
||||
|
@ -216,23 +212,18 @@ class TextRecognizer(object):
|
|||
gsrm_slf_attn_bias1_list,
|
||||
gsrm_slf_attn_bias2_list,
|
||||
]
|
||||
self.rec_times.preprocess_time.end()
|
||||
self.rec_times.inference_time.start()
|
||||
input_names = self.predictor.get_input_names()
|
||||
for i in range(len(input_names)):
|
||||
input_tensor = self.predictor.get_input_handle(input_names[
|
||||
i])
|
||||
input_tensor.copy_from_cpu(inputs[i])
|
||||
self.predictor.run()
|
||||
self.rec_times.inference_time.end()
|
||||
outputs = []
|
||||
for output_tensor in self.output_tensors:
|
||||
output = output_tensor.copy_to_cpu()
|
||||
outputs.append(output)
|
||||
preds = {"predict": outputs[2]}
|
||||
else:
|
||||
self.rec_times.preprocess_time.end()
|
||||
self.rec_times.inference_time.start()
|
||||
self.input_tensor.copy_from_cpu(norm_img_batch)
|
||||
self.predictor.run()
|
||||
|
||||
|
@ -241,15 +232,11 @@ class TextRecognizer(object):
|
|||
output = output_tensor.copy_to_cpu()
|
||||
outputs.append(output)
|
||||
preds = outputs[0]
|
||||
self.rec_times.inference_time.end()
|
||||
self.rec_times.postprocess_time.start()
|
||||
rec_result = self.postprocess_op(preds)
|
||||
for rno in range(len(rec_result)):
|
||||
rec_res[indices[beg_img_no + rno]] = rec_result[rno]
|
||||
self.rec_times.postprocess_time.end()
|
||||
self.rec_times.img_num += int(norm_img_batch.shape[0])
|
||||
self.rec_times.total_time.end()
|
||||
return rec_res, self.rec_times.total_time.value()
|
||||
|
||||
return rec_res, time.time() - st
|
||||
|
||||
|
||||
def main(args):
|
||||
|
@ -278,12 +265,6 @@ def main(args):
|
|||
img_list.append(img)
|
||||
try:
|
||||
rec_res, _ = text_recognizer(img_list)
|
||||
if args.benchmark:
|
||||
cm, gm, gu = utility.get_current_memory_mb(0)
|
||||
cpu_mem += cm
|
||||
gpu_mem += gm
|
||||
gpu_util += gu
|
||||
count += 1
|
||||
|
||||
except Exception as E:
|
||||
logger.info(traceback.format_exc())
|
||||
|
@ -292,39 +273,6 @@ def main(args):
|
|||
for ino in range(len(img_list)):
|
||||
logger.info("Predicts of {}:{}".format(valid_image_file_list[ino],
|
||||
rec_res[ino]))
|
||||
if args.benchmark:
|
||||
mems = {
|
||||
'cpu_rss_mb': cpu_mem / count,
|
||||
'gpu_rss_mb': gpu_mem / count,
|
||||
'gpu_util': gpu_util * 100 / count
|
||||
}
|
||||
else:
|
||||
mems = None
|
||||
logger.info("The predict time about recognizer module is as follows: ")
|
||||
rec_time_dict = text_recognizer.rec_times.report(average=True)
|
||||
rec_model_name = args.rec_model_dir
|
||||
|
||||
if args.benchmark:
|
||||
# construct log information
|
||||
model_info = {
|
||||
'model_name': args.rec_model_dir.split('/')[-1],
|
||||
'precision': args.precision
|
||||
}
|
||||
data_info = {
|
||||
'batch_size': args.rec_batch_num,
|
||||
'shape': 'dynamic_shape',
|
||||
'data_num': rec_time_dict['img_num']
|
||||
}
|
||||
perf_info = {
|
||||
'preprocess_time_s': rec_time_dict['preprocess_time'],
|
||||
'inference_time_s': rec_time_dict['inference_time'],
|
||||
'postprocess_time_s': rec_time_dict['postprocess_time'],
|
||||
'total_time_s': rec_time_dict['total_time']
|
||||
}
|
||||
benchmark_log = benchmark_utils.PaddleInferBenchmark(
|
||||
text_recognizer.config, model_info, data_info, perf_info, mems,
|
||||
args.save_log_path)
|
||||
benchmark_log("Rec")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -33,8 +33,7 @@ import tools.infer.predict_det as predict_det
|
|||
import tools.infer.predict_cls as predict_cls
|
||||
from ppocr.utils.utility import get_image_file_list, check_and_read_gif
|
||||
from ppocr.utils.logging import get_logger
|
||||
from tools.infer.utility import draw_ocr_box_txt, get_current_memory_mb
|
||||
import tools.infer.benchmark_utils as benchmark_utils
|
||||
from tools.infer.utility import draw_ocr_box_txt
|
||||
logger = get_logger()
|
||||
|
||||
|
||||
|
@ -158,7 +157,7 @@ def main(args):
|
|||
img = np.random.uniform(0, 255, [640, 640, 3]).astype(np.uint8)
|
||||
for i in range(10):
|
||||
res = text_sys(img)
|
||||
|
||||
|
||||
total_time = 0
|
||||
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
|
||||
_st = time.time()
|
||||
|
@ -175,12 +174,6 @@ def main(args):
|
|||
dt_boxes, rec_res = text_sys(img)
|
||||
elapse = time.time() - starttime
|
||||
total_time += elapse
|
||||
if args.benchmark and idx % 20 == 0:
|
||||
cm, gm, gu = get_current_memory_mb(0)
|
||||
cpu_mem += cm
|
||||
gpu_mem += gm
|
||||
gpu_util += gu
|
||||
count += 1
|
||||
|
||||
logger.info(
|
||||
str(idx) + " Predict time of %s: %.3fs" % (image_file, elapse))
|
||||
|
@ -215,61 +208,6 @@ def main(args):
|
|||
logger.info("\nThe predict total time is {}".format(total_time))
|
||||
|
||||
img_num = text_sys.text_detector.det_times.img_num
|
||||
if args.benchmark:
|
||||
mems = {
|
||||
'cpu_rss_mb': cpu_mem / count,
|
||||
'gpu_rss_mb': gpu_mem / count,
|
||||
'gpu_util': gpu_util * 100 / count
|
||||
}
|
||||
else:
|
||||
mems = None
|
||||
det_time_dict = text_sys.text_detector.det_times.report(average=True)
|
||||
rec_time_dict = text_sys.text_recognizer.rec_times.report(average=True)
|
||||
det_model_name = args.det_model_dir
|
||||
rec_model_name = args.rec_model_dir
|
||||
|
||||
# construct det log information
|
||||
model_info = {
|
||||
'model_name': args.det_model_dir.split('/')[-1],
|
||||
'precision': args.precision
|
||||
}
|
||||
data_info = {
|
||||
'batch_size': 1,
|
||||
'shape': 'dynamic_shape',
|
||||
'data_num': det_time_dict['img_num']
|
||||
}
|
||||
perf_info = {
|
||||
'preprocess_time_s': det_time_dict['preprocess_time'],
|
||||
'inference_time_s': det_time_dict['inference_time'],
|
||||
'postprocess_time_s': det_time_dict['postprocess_time'],
|
||||
'total_time_s': det_time_dict['total_time']
|
||||
}
|
||||
|
||||
benchmark_log = benchmark_utils.PaddleInferBenchmark(
|
||||
text_sys.text_detector.config, model_info, data_info, perf_info, mems,
|
||||
args.save_log_path)
|
||||
benchmark_log("Det")
|
||||
|
||||
# construct rec log information
|
||||
model_info = {
|
||||
'model_name': args.rec_model_dir.split('/')[-1],
|
||||
'precision': args.precision
|
||||
}
|
||||
data_info = {
|
||||
'batch_size': args.rec_batch_num,
|
||||
'shape': 'dynamic_shape',
|
||||
'data_num': rec_time_dict['img_num']
|
||||
}
|
||||
perf_info = {
|
||||
'preprocess_time_s': rec_time_dict['preprocess_time'],
|
||||
'inference_time_s': rec_time_dict['inference_time'],
|
||||
'postprocess_time_s': rec_time_dict['postprocess_time'],
|
||||
'total_time_s': rec_time_dict['total_time']
|
||||
}
|
||||
benchmark_log = benchmark_utils.PaddleInferBenchmark(
|
||||
text_sys.text_recognizer.config, model_info, data_info, perf_info, mems,
|
||||
args.save_log_path)
|
||||
benchmark_log("Rec")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -125,76 +125,6 @@ def parse_args():
|
|||
return parser.parse_args()
|
||||
|
||||
|
||||
class Times(object):
|
||||
def __init__(self):
|
||||
self.time = 0.
|
||||
self.st = 0.
|
||||
self.et = 0.
|
||||
|
||||
def start(self):
|
||||
self.st = time.time()
|
||||
|
||||
def end(self, accumulative=True):
|
||||
self.et = time.time()
|
||||
if accumulative:
|
||||
self.time += self.et - self.st
|
||||
else:
|
||||
self.time = self.et - self.st
|
||||
|
||||
def reset(self):
|
||||
self.time = 0.
|
||||
self.st = 0.
|
||||
self.et = 0.
|
||||
|
||||
def value(self):
|
||||
return round(self.time, 4)
|
||||
|
||||
|
||||
class Timer(Times):
|
||||
def __init__(self):
|
||||
super(Timer, self).__init__()
|
||||
self.total_time = Times()
|
||||
self.preprocess_time = Times()
|
||||
self.inference_time = Times()
|
||||
self.postprocess_time = Times()
|
||||
self.img_num = 0
|
||||
|
||||
def info(self, average=False):
|
||||
logger.info("----------------------- Perf info -----------------------")
|
||||
logger.info("total_time: {}, img_num: {}".format(self.total_time.value(
|
||||
), self.img_num))
|
||||
preprocess_time = round(self.preprocess_time.value() / self.img_num,
|
||||
4) if average else self.preprocess_time.value()
|
||||
postprocess_time = round(
|
||||
self.postprocess_time.value() / self.img_num,
|
||||
4) if average else self.postprocess_time.value()
|
||||
inference_time = round(self.inference_time.value() / self.img_num,
|
||||
4) if average else self.inference_time.value()
|
||||
|
||||
average_latency = self.total_time.value() / self.img_num
|
||||
logger.info("average_latency(ms): {:.2f}, QPS: {:2f}".format(
|
||||
average_latency * 1000, 1 / average_latency))
|
||||
logger.info(
|
||||
"preprocess_latency(ms): {:.2f}, inference_latency(ms): {:.2f}, postprocess_latency(ms): {:.2f}".
|
||||
format(preprocess_time * 1000, inference_time * 1000,
|
||||
postprocess_time * 1000))
|
||||
|
||||
def report(self, average=False):
|
||||
dic = {}
|
||||
dic['preprocess_time'] = round(
|
||||
self.preprocess_time.value() / self.img_num,
|
||||
4) if average else self.preprocess_time.value()
|
||||
dic['postprocess_time'] = round(
|
||||
self.postprocess_time.value() / self.img_num,
|
||||
4) if average else self.postprocess_time.value()
|
||||
dic['inference_time'] = round(
|
||||
self.inference_time.value() / self.img_num,
|
||||
4) if average else self.inference_time.value()
|
||||
dic['img_num'] = self.img_num
|
||||
dic['total_time'] = round(self.total_time.value(), 4)
|
||||
return dic
|
||||
|
||||
|
||||
def create_predictor(args, mode, logger):
|
||||
if mode == "det":
|
||||
model_dir = args.det_model_dir
|
||||
|
@ -213,11 +143,10 @@ def create_predictor(args, mode, logger):
|
|||
model_file_path = model_dir + "/inference.pdmodel"
|
||||
params_file_path = model_dir + "/inference.pdiparams"
|
||||
if not os.path.exists(model_file_path):
|
||||
logger.info("not find model file path {}".format(model_file_path))
|
||||
sys.exit(0)
|
||||
raise ValueError("not find model file path {}".format(model_file_path))
|
||||
if not os.path.exists(params_file_path):
|
||||
logger.info("not find params file path {}".format(params_file_path))
|
||||
sys.exit(0)
|
||||
raise ValueError("not find params file path {}".format(
|
||||
params_file_path))
|
||||
|
||||
config = inference.Config(model_file_path, params_file_path)
|
||||
|
||||
|
@ -312,7 +241,7 @@ def create_predictor(args, mode, logger):
|
|||
|
||||
config.delete_pass("conv_transpose_eltwiseadd_bn_fuse_pass")
|
||||
if mode == 'table':
|
||||
config.delete_pass("fc_fuse_pass") # not supported for table
|
||||
config.delete_pass("fc_fuse_pass") # not supported for table
|
||||
config.switch_use_feed_fetch_ops(False)
|
||||
config.switch_ir_optim(True)
|
||||
|
||||
|
@ -577,30 +506,5 @@ def draw_boxes(image, boxes, scores=None, drop_score=0.5):
|
|||
return image
|
||||
|
||||
|
||||
def get_current_memory_mb(gpu_id=None):
|
||||
"""
|
||||
It is used to Obtain the memory usage of the CPU and GPU during the running of the program.
|
||||
And this function Current program is time-consuming.
|
||||
"""
|
||||
import pynvml
|
||||
import psutil
|
||||
import GPUtil
|
||||
pid = os.getpid()
|
||||
p = psutil.Process(pid)
|
||||
info = p.memory_full_info()
|
||||
cpu_mem = info.uss / 1024. / 1024.
|
||||
gpu_mem = 0
|
||||
gpu_percent = 0
|
||||
if gpu_id is not None:
|
||||
GPUs = GPUtil.getGPUs()
|
||||
gpu_load = GPUs[gpu_id].load
|
||||
gpu_percent = gpu_load
|
||||
pynvml.nvmlInit()
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
|
||||
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
||||
gpu_mem = meminfo.used / 1024. / 1024.
|
||||
return round(cpu_mem, 4), round(gpu_mem, 4), round(gpu_percent, 4)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pass
|
||||
|
|
Loading…
Reference in New Issue