diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt index 9752ba43..f26bdc40 100644 --- a/test/ocr_det_params.txt +++ b/test/ocr_det_params.txt @@ -1,7 +1,7 @@ model_name:ocr_det python:python3.7 -gpu_list:-1|0|0,1 -Global.auto_cast:False|True +gpu_list:0 +Global.auto_cast:False Global.epoch_num:10 Global.save_model_dir:./output/ Global.save_inference_dir:./output/ @@ -9,7 +9,7 @@ Train.loader.batch_size_per_card: Global.use_gpu Global.pretrained_model -trainer:norm|pact|fpgm +trainer:norm|pact norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained quant_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy fpgm_train:null diff --git a/test/test.sh b/test/test.sh index 52afbbb3..0e4b7bfe 100644 --- a/test/test.sh +++ b/test/test.sh @@ -110,7 +110,7 @@ function func_inference(){ for threads in ${cpu_threads_list[*]}; do for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True " eval $command status_check $? "${command}" "${status_log}" done @@ -124,7 +124,7 @@ function func_inference(){ fi for batch_size in ${batch_size_list[*]}; do _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" - command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True " eval $command status_check $? "${command}" "${status_log}" done diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 4253964e..4573b561 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -106,7 +106,7 @@ class TextDetector(object): model_precision=args.precision, batch_size=1, data_shape="dynamic", - save_path="./output/auto_log.lpg", + save_path=args.save_log_path, inference_config=self.config, pids=pid, process_name=None, @@ -174,7 +174,7 @@ class TextDetector(object): data = {'image': img} st = time.time() - + if args.benchmark: self.autolog.times.start() @@ -262,7 +262,6 @@ if __name__ == "__main__": "det_res_{}".format(img_name_pure)) cv2.imwrite(img_path, src_im) logger.info("The visualized image saved in {}".format(img_path)) - + if args.benchmark: text_detector.autolog.report() -