From eb827bb35b4a37dedfd3e0e7e62c8e37296b851a Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:07:24 +0800 Subject: [PATCH 01/34] add test shell --- test/params.txt | 18 +++++++ test/test.sh | 141 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 159 insertions(+) create mode 100644 test/params.txt create mode 100644 test/test.sh diff --git a/test/params.txt b/test/params.txt new file mode 100644 index 00000000..8101714f --- /dev/null +++ b/test/params.txt @@ -0,0 +1,18 @@ +train_model_list: det;benchmark/benchmark_det.yml +gpu_list: -1|0|0,1 +auto_cast_list: False|True +trainer_list: norm|quant|prune +python: python3.7 + +inference: python|C++ +devices: cpu|gpu +use_mkldnn_list: True|False +cpu_threads_list: 1|6 +rec_batch_size_list: 1|6 +gpu_trt_list: True|False +gpu_precision_list: fp32|fp16|int8 +img_dir: /paddle/OCR/test_set/benchmark_eval + +epoch: 10 +checkpoints: None + diff --git a/test/test.sh b/test/test.sh new file mode 100644 index 00000000..24605130 --- /dev/null +++ b/test/test.sh @@ -0,0 +1,141 @@ +#!/bin/bash +FILENAME=$1 + +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] +MODE=$2 +# prepare pretrained weights and dataset +wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams +if [ ${MODE} = "lite_train_infer" ];then + # pretrain lite train data + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar + cd ./train_data/ && tar xf icdar2015_lite.tar && + ln -s ./icdar2015_lite ./icdar2015 + cd ../ +elif [ ${MODE} = "whole_train_infer" ];then + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar + cd ./train_data/ && tar xf icdar2015.tar && cd ../ +else + echo "Do Nothing" +fi + + +dataline=$(cat ${FILENAME}) +# parser params +IFS=$'\n' +lines=(${dataline}) +function func_parser(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +IFS=$'\n' +# The training params +train_model_list=$(func_parser "${lines[0]}") +gpu_list=$(func_parser "${lines[1]}") +auto_cast_list=$(func_parser "${lines[2]}") +slim_trainer_list=$(func_parser "${lines[3]}") +python=$(func_parser "${lines[4]}") +# inference params +inference=$(func_parser "${lines[5]}") +devices=$(func_parser "${lines[6]}") +use_mkldnn_list=$(func_parser "${lines[7]}") +cpu_threads_list=$(func_parser "${lines[8]}") +rec_batch_size_list=$(func_parser "${lines[9]}") +gpu_trt_list=$(func_parser "${lines[10]}") +gpu_precision_list=$(func_parser "${lines[11]}") +img_dir=$(func_parser "${lines[12]}") + +# train superparameters +epoch=$(func_parser "${lines[13]}") +checkpoints=$(func_parser "${lines[14]}") + + +for train_model in ${train_model_list[*]}; do + if [ ${train_model} = "det" ];then + model_name="det" + yml_file="configs/det/det_mv3_db.yml" + elif [ ${train_model} = "rec" ];then + model_name="rec" + yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" + else + model_name="det" + yml_file="configs/det/det_mv3_db.yml" + fi + # array=(${train_model}) + # for j in "${!array[@]}"; do + # model_name=${array[0]} + # yml_file=${array[1]} + # done + IFS="|" + for gpu in ${gpu_list[*]}; do + use_gpu=True + if [ ${gpu} = "-1" ];then + lanuch="" + use_gpu=False + elif [ ${#gpu} -le 1 ];then + launch="" + else + launch="-m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu}" + fi + # echo "model_name: ${model_name} yml_file: ${yml_file} launch: ${launch} gpu: ${gpu}" + for auto_cast in ${auto_cast_list[*]}; do + for slim_trainer in ${slim_trainer_list[*]}; do + if [ ${slim_trainer} = "norm" ]; then + trainer="tools/train.py" + export_model="tools/export_model.py" + elif [ ${slim_trainer} = "quant" ]; then + trainer="deploy/slim/quantization/quant.py" + export_model="deploy/slim/quantization/export_model.py" + elif [ ${slim_trainer} = "prune" ]; then + trainer="deploy/slim/prune/sensitivity_anal.py" + export_model="deploy/slim/prune/export_prune_model.py" + elif [ ${slim_trainer} = "distill" ]; then + trainer="deploy/slim/distill/train_dml.py" + export_model="deploy/slim/distill/export_distill_model.py" + else + trainer="tools/train.py" + export_model="tools/export_model.py" + fi + # dataset="Train.dataset.data_dir=${train_dir} Train.dataset.label_file_list=${train_label_file} Eval.dataset.data_dir=${eval_dir} Eval.dataset.label_file_list=${eval_label_file}" + save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} + echo ${python} ${launch} ${trainer} -c ${yml_file} -o Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Global.epoch=${epoch} + echo ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ + if [ "${model_name}" = "det" ]; then + export rec_batch_size_list=( "1" ) + inference="tools/infer/predict_det.py" + elif [ "${model_name}" = "rec" ]; then + inference="tools/infer/predict_rec.py" + fi + # inference + for device in ${devices[*]}; do + if [ ${device} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for rec_batch_size in ${rec_batch_size_list[*]}; do + echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + # ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + done + done + done + else + for use_trt in ${gpu_trt_list[*]}; do + for precision in ${gpu_precision_list[*]}; do + if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then + continue + fi + for rec_batch_size in ${rec_batch_size_list[*]}; do + # echo "${model_name} ${det_model_dir} ${rec_model_dir}, use_trt: ${use_trt} use_fp16: ${use_fp16}" + echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log + done + done + done + fi + done + done + done + done +done From 27b543ab13c206491acaf48003f03e92114d5eb4 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:08:52 +0800 Subject: [PATCH 02/34] delete echo and note --- test/test.sh | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/test/test.sh b/test/test.sh index 24605130..559dc4b0 100644 --- a/test/test.sh +++ b/test/test.sh @@ -65,11 +65,6 @@ for train_model in ${train_model_list[*]}; do model_name="det" yml_file="configs/det/det_mv3_db.yml" fi - # array=(${train_model}) - # for j in "${!array[@]}"; do - # model_name=${array[0]} - # yml_file=${array[1]} - # done IFS="|" for gpu in ${gpu_list[*]}; do use_gpu=True @@ -103,7 +98,7 @@ for train_model in ${train_model_list[*]}; do # dataset="Train.dataset.data_dir=${train_dir} Train.dataset.label_file_list=${train_label_file} Eval.dataset.data_dir=${eval_dir} Eval.dataset.label_file_list=${eval_label_file}" save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} echo ${python} ${launch} ${trainer} -c ${yml_file} -o Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Global.epoch=${epoch} - echo ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ + ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" @@ -117,7 +112,7 @@ for train_model in ${train_model_list[*]}; do for threads in ${cpu_threads_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - # ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log done done done @@ -129,7 +124,7 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do # echo "${model_name} ${det_model_dir} ${rec_model_dir}, use_trt: ${use_trt} use_fp16: ${use_fp16}" - echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log done done done From 40f78f7531f0ff898be5b256401dc16d7faaf31d Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:11:20 +0800 Subject: [PATCH 03/34] rename key --- test/params.txt | 2 +- test/test.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/params.txt b/test/params.txt index 8101714f..9a4b52e8 100644 --- a/test/params.txt +++ b/test/params.txt @@ -1,4 +1,4 @@ -train_model_list: det;benchmark/benchmark_det.yml +train_model_list: ocr_det gpu_list: -1|0|0,1 auto_cast_list: False|True trainer_list: norm|quant|prune diff --git a/test/test.sh b/test/test.sh index 559dc4b0..a17d3274 100644 --- a/test/test.sh +++ b/test/test.sh @@ -55,10 +55,10 @@ checkpoints=$(func_parser "${lines[14]}") for train_model in ${train_model_list[*]}; do - if [ ${train_model} = "det" ];then + if [ ${train_model} = "ocr_det" ];then model_name="det" yml_file="configs/det/det_mv3_db.yml" - elif [ ${train_model} = "rec" ];then + elif [ ${train_model} = "ocr_rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" else From a1494515a804516a0036926c496f2c457b8ca204 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:13:28 +0800 Subject: [PATCH 04/34] add usage --- test/test.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/test.sh b/test/test.sh index a17d3274..6ba13c19 100644 --- a/test/test.sh +++ b/test/test.sh @@ -1,4 +1,7 @@ #!/bin/bash +# Usage: +# bash test/test.sh ./test/params.txt 'lite_train_infer' + FILENAME=$1 # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] From 83149576de2ab6b664c8608ed0106bd5967b8cc2 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:18:08 +0800 Subject: [PATCH 05/34] remove img_dir from params.txt --- test/params.txt | 3 +-- test/test.sh | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/test/params.txt b/test/params.txt index 9a4b52e8..67b2f8f1 100644 --- a/test/params.txt +++ b/test/params.txt @@ -1,6 +1,6 @@ train_model_list: ocr_det gpu_list: -1|0|0,1 -auto_cast_list: False|True +auto_cast_list: False trainer_list: norm|quant|prune python: python3.7 @@ -11,7 +11,6 @@ cpu_threads_list: 1|6 rec_batch_size_list: 1|6 gpu_trt_list: True|False gpu_precision_list: fp32|fp16|int8 -img_dir: /paddle/OCR/test_set/benchmark_eval epoch: 10 checkpoints: None diff --git a/test/test.sh b/test/test.sh index 6ba13c19..1bc5a8f6 100644 --- a/test/test.sh +++ b/test/test.sh @@ -50,11 +50,10 @@ cpu_threads_list=$(func_parser "${lines[8]}") rec_batch_size_list=$(func_parser "${lines[9]}") gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") -img_dir=$(func_parser "${lines[12]}") - +img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" # train superparameters -epoch=$(func_parser "${lines[13]}") -checkpoints=$(func_parser "${lines[14]}") +epoch=$(func_parser "${lines[12]}") +checkpoints=$(func_parser "${lines[13]}") for train_model in ${train_model_list[*]}; do From 1a06bff08a0b387ddc1c81b500f4394fc5d937b7 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 16:29:22 +0800 Subject: [PATCH 06/34] delete epoch from params.txt --- test/params.txt | 2 -- test/test.sh | 10 +++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/test/params.txt b/test/params.txt index 67b2f8f1..3fe857d3 100644 --- a/test/params.txt +++ b/test/params.txt @@ -12,6 +12,4 @@ rec_batch_size_list: 1|6 gpu_trt_list: True|False gpu_precision_list: fp32|fp16|int8 -epoch: 10 -checkpoints: None diff --git a/test/test.sh b/test/test.sh index 1bc5a8f6..1a40eb25 100644 --- a/test/test.sh +++ b/test/test.sh @@ -15,10 +15,14 @@ if [ ${MODE} = "lite_train_infer" ];then cd ./train_data/ && tar xf icdar2015_lite.tar && ln -s ./icdar2015_lite ./icdar2015 cd ../ + epoch=10 + eval_batch_step=10 elif [ ${MODE} = "whole_train_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar cd ./train_data/ && tar xf icdar2015.tar && cd ../ + epoch=300 + eval_batch_step=200 else echo "Do Nothing" fi @@ -52,8 +56,8 @@ gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" # train superparameters -epoch=$(func_parser "${lines[12]}") -checkpoints=$(func_parser "${lines[13]}") +#epoch=$(func_parser "${lines[12]}") +#checkpoints=$(func_parser "${lines[13]}") for train_model in ${train_model_list[*]}; do @@ -99,7 +103,7 @@ for train_model in ${train_model_list[*]}; do fi # dataset="Train.dataset.data_dir=${train_dir} Train.dataset.label_file_list=${train_label_file} Eval.dataset.data_dir=${eval_dir} Eval.dataset.label_file_list=${eval_label_file}" save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} - echo ${python} ${launch} ${trainer} -c ${yml_file} -o Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Global.epoch=${epoch} + ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) From 553a6a29cac808792190e263e749e73ed68a3ab0 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 18:24:21 +0800 Subject: [PATCH 07/34] check the exit code --- test/test.sh | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/test/test.sh b/test/test.sh index 1a40eb25..0ef5c1d9 100644 --- a/test/test.sh +++ b/test/test.sh @@ -103,8 +103,16 @@ for train_model in ${train_model_list[*]}; do fi # dataset="Train.dataset.data_dir=${train_dir} Train.dataset.label_file_list=${train_label_file} Eval.dataset.data_dir=${eval_dir} Eval.dataset.label_file_list=${eval_label_file}" save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} + ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} - ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ + ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + if [ $? -eq 0 ]; then + echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log + else + cat ${save_log}/train.log + echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log + fi + if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" @@ -119,6 +127,13 @@ for train_model in ${train_model_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + if [ $? -eq 0 ]; then + echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + else + cat ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + fi + done done done From 07ff561bf0072a2e7cd0825bcaeb09f105564e9a Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 18:37:56 +0800 Subject: [PATCH 08/34] add save_log_path to inference --- test/test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/test.sh b/test/test.sh index 0ef5c1d9..cd4d93d9 100644 --- a/test/test.sh +++ b/test/test.sh @@ -126,7 +126,7 @@ for train_model in ${train_model_list[*]}; do for threads in ${cpu_threads_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log if [ $? -eq 0 ]; then echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log else From 81f2ccb16da0a433d070be036f5534e6dbee84c2 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 20:38:30 +0800 Subject: [PATCH 09/34] add infer.sh --- test/infer.sh | 121 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 test/infer.sh diff --git a/test/infer.sh b/test/infer.sh new file mode 100644 index 00000000..35c4db2d --- /dev/null +++ b/test/infer.sh @@ -0,0 +1,121 @@ +#!/bin/bash + +dataline=$(cat ${FILENAME}) +# parser params +IFS=$'\n' +lines=(${dataline}) +function func_parser(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +IFS=$'\n' +# The training params +train_model_list=$(func_parser "${lines[0]}") +gpu_list=$(func_parser "${lines[1]}") +auto_cast_list=$(func_parser "${lines[2]}") +slim_trainer_list=$(func_parser "${lines[3]}") +python=$(func_parser "${lines[4]}") +# inference params +inference=$(func_parser "${lines[5]}") +devices=$(func_parser "${lines[6]}") +use_mkldnn_list=$(func_parser "${lines[7]}") +cpu_threads_list=$(func_parser "${lines[8]}") +rec_batch_size_list=$(func_parser "${lines[9]}") +gpu_trt_list=$(func_parser "${lines[10]}") +gpu_precision_list=$(func_parser "${lines[11]}") + + +for train_model in ${train_model_list[*]}; do + if [ ${train_model} = "det" ];then + model_name="det" + yml_file="configs/det/det_mv3_db.yml" + img_dir="" + elif [ ${train_model} = "rec" ];then + model_name="rec" + yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" + img_dir="" + fi + + # eval + for slim_trainer in ${slim_trainer_list[*]}; do + if [ ${slim_trainer} = "norm" ]; then + if [ ${model_name} = "model_name" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar + fi + elif [ ${slim_trainer} = "quant" ]; then + if [ ${model_name} = "model_name" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_quant_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar + fi + elif [ ${slim_trainer} = "distill" ]; then + if [ ${model_name} = "model_name" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_distill_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_infer" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar + fi + elif [ ${slim_trainer} = "prune" ]; then + if [ ${model_name} = "model_name" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar + fi + fi + + echo ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${log_path}/${model_name} + echo ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer + if [ $? -eq 0 ]; then + echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log + else + cat ${save_log}/train.log + echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log + fi + if [ "${model_name}" = "det" ]; then + export rec_batch_size_list=( "1" ) + inference="tools/infer/predict_det.py" + det_model_dir=${log_path}/${eval_model_name}_infer + rec_model_dir="" + elif [ "${model_name}" = "rec" ]; then + inference="tools/infer/predict_rec.py" + rec_model_dir=${log_path}/${eval_model_name}_infer + det_model_dir="" + fi + # inference + for device in ${devices[*]}; do + if [ ${device} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for rec_batch_size in ${rec_batch_size_list[*]}; do + echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + # ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + done + done + done + else + for use_trt in ${gpu_trt_list[*]}; do + for precision in ${gpu_precision_list[*]}; do + if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then + continue + fi + for rec_batch_size in ${rec_batch_size_list[*]}; do + # echo "${model_name} ${det_model_dir} ${rec_model_dir}, use_trt: ${use_trt} use_fp16: ${use_fp16}" + echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log + done + done + done + fi + done +done From f7a554c2af9cff0a40ea70abb01ca77c0884e1c9 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 20:42:11 +0800 Subject: [PATCH 10/34] fix det_model_dir rec_model_dir Null --- test/test.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/test.sh b/test/test.sh index cd4d93d9..b43b1a61 100644 --- a/test/test.sh +++ b/test/test.sh @@ -116,8 +116,12 @@ for train_model in ${train_model_list[*]}; do if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" + det_model_dir=${save_log}/export_inference/ + rec_model_dir="" elif [ "${model_name}" = "rec" ]; then inference="tools/infer/predict_rec.py" + rec_model_dir=${save_log}/export_inference/ + det_model_dir="" fi # inference for device in ${devices[*]}; do From 58437e644821d420871fae89334a848749181c34 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 21:22:54 +0800 Subject: [PATCH 11/34] refine status check --- test/infer.sh | 43 +++++++++++++++++++++++++++++++++---------- test/test.sh | 49 ++++++++++++++++++++++++++----------------------- 2 files changed, 59 insertions(+), 33 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 35c4db2d..61d10b4d 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -28,6 +28,19 @@ gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") +function status_check(){ + last_status=$1 # 上个阶段的退出码 + run_model=$2 + run_command=$3 + save_log=$4 + echo ${case3} + if [ $last_status -eq 0 ]; then + echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} + else + echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${save_log} + fi +} + for train_model in ${train_model_list[*]}; do if [ ${train_model} = "det" ];then model_name="det" @@ -42,7 +55,7 @@ for train_model in ${train_model_list[*]}; do # eval for slim_trainer in ${slim_trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then - if [ ${model_name} = "model_name" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_infer" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar else @@ -50,7 +63,7 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar fi elif [ ${slim_trainer} = "quant" ]; then - if [ ${model_name} = "model_name" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_quant_infer" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar else @@ -58,7 +71,7 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar fi elif [ ${slim_trainer} = "distill" ]; then - if [ ${model_name} = "model_name" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_distill_infer" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar else @@ -66,7 +79,7 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar fi elif [ ${slim_trainer} = "prune" ]; then - if [ ${model_name} = "model_name" ]; then + if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar else @@ -74,9 +87,15 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar fi fi + save_log_path="${log_path}/${eval_model_name}" + command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}" + ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" + + command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" + ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" - echo ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${log_path}/${model_name} - echo ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer if [ $? -eq 0 ]; then echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log else @@ -99,8 +118,10 @@ for train_model in ${train_model_list[*]}; do for use_mkldnn in ${use_mkldnn_list[*]}; do for threads in ${cpu_threads_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do - echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - # ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} 2>&1 | tee ${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log + save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" + command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}" done done done @@ -111,8 +132,10 @@ for train_model in ${train_model_list[*]}; do continue fi for rec_batch_size in ${rec_batch_size_list[*]}; do - # echo "${model_name} ${det_model_dir} ${rec_model_dir}, use_trt: ${use_trt} use_fp16: ${use_fp16}" - echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log + save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" + command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}" done done done diff --git a/test/test.sh b/test/test.sh index b43b1a61..cb894f26 100644 --- a/test/test.sh +++ b/test/test.sh @@ -21,7 +21,7 @@ elif [ ${MODE} = "whole_train_infer" ];then rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar cd ./train_data/ && tar xf icdar2015.tar && cd ../ - epoch=300 + epoch=500 eval_batch_step=200 else echo "Do Nothing" @@ -55,9 +55,19 @@ rec_batch_size_list=$(func_parser "${lines[9]}") gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" -# train superparameters -#epoch=$(func_parser "${lines[12]}") -#checkpoints=$(func_parser "${lines[13]}") + +function status_check(){ + last_status=$1 # 上个阶段的退出码 + run_model=$2 + run_command=$3 + save_log=$4 + echo ${case3} + if [ $last_status -eq 0 ]; then + echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} + else + echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${save_log} + fi +} for train_model in ${train_model_list[*]}; do @@ -101,17 +111,14 @@ for train_model in ${train_model_list[*]}; do trainer="tools/train.py" export_model="tools/export_model.py" fi - # dataset="Train.dataset.data_dir=${train_dir} Train.dataset.label_file_list=${train_label_file} Eval.dataset.data_dir=${eval_dir} Eval.dataset.label_file_list=${eval_label_file}" save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} - + command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} + status_check $? "${trainer}" "${command}" "${save_log}/train.log" + + command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} - if [ $? -eq 0 ]; then - echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log - else - cat ${save_log}/train.log - echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log - fi + status_check $? "${trainer}" "${command}" "${save_log}/train.log" if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) @@ -129,15 +136,10 @@ for train_model in ${train_model_list[*]}; do for use_mkldnn in ${use_mkldnn_list[*]}; do for threads in ${cpu_threads_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do - echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - if [ $? -eq 0 ]; then - echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - else - cat ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${log_path}${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log - fi - + save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" + command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${inference}" "${command}" "${save_log}" done done done @@ -148,8 +150,9 @@ for train_model in ${train_model_list[*]}; do continue fi for rec_batch_size in ${rec_batch_size_list[*]}; do - # echo "${model_name} ${det_model_dir} ${rec_model_dir}, use_trt: ${use_trt} use_fp16: ${use_fp16}" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log + save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${inference}" "${command}" "${save_log}" done done done From 4a179f5fc0f8c375828b285d79dd5427f9afa263 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Tue, 8 Jun 2021 21:24:43 +0800 Subject: [PATCH 12/34] refine status check --- test/infer.sh | 2 +- test/test.sh | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 61d10b4d..4c49dde7 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -29,7 +29,7 @@ gpu_precision_list=$(func_parser "${lines[11]}") function status_check(){ - last_status=$1 # 上个阶段的退出码 + last_status=$1 # the exit code run_model=$2 run_command=$3 save_log=$4 diff --git a/test/test.sh b/test/test.sh index cb894f26..d24f2795 100644 --- a/test/test.sh +++ b/test/test.sh @@ -57,7 +57,7 @@ gpu_precision_list=$(func_parser "${lines[11]}") img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" function status_check(){ - last_status=$1 # 上个阶段的退出码 + last_status=$1 # the exit code run_model=$2 run_command=$3 save_log=$4 @@ -92,7 +92,7 @@ for train_model in ${train_model_list[*]}; do else launch="-m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu}" fi - # echo "model_name: ${model_name} yml_file: ${yml_file} launch: ${launch} gpu: ${gpu}" + for auto_cast in ${auto_cast_list[*]}; do for slim_trainer in ${slim_trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then From 04fb6148e15141387a881714143af58fc0305aea Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 10:12:32 +0800 Subject: [PATCH 13/34] add log_path to params.txt --- test/params.txt | 4 ++-- test/test.sh | 34 ++++++++++++++++++++++------------ 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/test/params.txt b/test/params.txt index 3fe857d3..e94b21e7 100644 --- a/test/params.txt +++ b/test/params.txt @@ -4,7 +4,7 @@ auto_cast_list: False trainer_list: norm|quant|prune python: python3.7 -inference: python|C++ +inference: python devices: cpu|gpu use_mkldnn_list: True|False cpu_threads_list: 1|6 @@ -12,4 +12,4 @@ rec_batch_size_list: 1|6 gpu_trt_list: True|False gpu_precision_list: fp32|fp16|int8 - +log_path: ./output diff --git a/test/test.sh b/test/test.sh index d24f2795..a7bb76f3 100644 --- a/test/test.sh +++ b/test/test.sh @@ -8,11 +8,12 @@ FILENAME=$1 MODE=$2 # prepare pretrained weights and dataset wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams + if [ ${MODE} = "lite_train_infer" ];then # pretrain lite train data rm -rf ./train_data/icdar2015 wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar - cd ./train_data/ && tar xf icdar2015_lite.tar && + cd ./train_data/ && tar xf icdar2015_lite.tar ln -s ./icdar2015_lite ./icdar2015 cd ../ epoch=10 @@ -24,9 +25,17 @@ elif [ ${MODE} = "whole_train_infer" ];then epoch=500 eval_batch_step=200 else - echo "Do Nothing" + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar + cd ./train_data/ && tar xf icdar2015_infer.tar + ln -s ./icdar2015_infer ./icdar2015 + cd ../ + epoch=10 + eval_batch_step=10 fi +img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" + dataline=$(cat ${FILENAME}) # parser params @@ -34,7 +43,7 @@ IFS=$'\n' lines=(${dataline}) function func_parser(){ strs=$1 - IFS=":" + IFS=": " array=(${strs}) tmp=${array[1]} echo ${tmp} @@ -54,7 +63,8 @@ cpu_threads_list=$(func_parser "${lines[8]}") rec_batch_size_list=$(func_parser "${lines[9]}") gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") -img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" + +log_path=$(func_parser "${lines[12]}") function status_check(){ last_status=$1 # the exit code @@ -113,12 +123,12 @@ for train_model in ${train_model_list[*]}; do fi save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" - ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} - status_check $? "${trainer}" "${command}" "${save_log}/train.log" + echo ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} + # status_check $? "${trainer}" "${command}" "${save_log}/train.log" command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" - ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} - status_check $? "${trainer}" "${command}" "${save_log}/train.log" + echo ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + # status_check $? "${trainer}" "${command}" "${save_log}/train.log" if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) @@ -138,8 +148,8 @@ for train_model in ${train_model_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${save_log}" + echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + # status_check $? "${inference}" "${command}" "${save_log}" done done done @@ -151,8 +161,8 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${save_log}" + echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + # status_check $? "${inference}" "${command}" "${save_log}" done done done From 1203276fbd0cfdbcba8a40c00be8639915a96049 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 11:22:29 +0800 Subject: [PATCH 14/34] add env, add infer det imgs, add infer_gpu_id to params.txt --- test/infer.sh | 17 ++++++++++------- test/params.txt | 2 +- test/test.sh | 30 ++++++++++++++++++------------ 3 files changed, 29 insertions(+), 20 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 4c49dde7..db8b788c 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -14,8 +14,6 @@ function func_parser(){ IFS=$'\n' # The training params train_model_list=$(func_parser "${lines[0]}") -gpu_list=$(func_parser "${lines[1]}") -auto_cast_list=$(func_parser "${lines[2]}") slim_trainer_list=$(func_parser "${lines[3]}") python=$(func_parser "${lines[4]}") # inference params @@ -27,13 +25,15 @@ rec_batch_size_list=$(func_parser "${lines[9]}") gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") +infer_gpu_id=$(func_parser "${lines[12]}") +log_path=$(func_parser "${lines[13]}") + function status_check(){ last_status=$1 # the exit code run_model=$2 run_command=$3 save_log=$4 - echo ${case3} if [ $last_status -eq 0 ]; then echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} else @@ -45,11 +45,13 @@ for train_model in ${train_model_list[*]}; do if [ ${train_model} = "det" ];then model_name="det" yml_file="configs/det/det_mv3_db.yml" - img_dir="" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar && tar xf ./inference/ch_det_data_50.tar + img_dir="./inference/ch_det_data_50/" elif [ ${train_model} = "rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - img_dir="" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar && tar xf ./inference/ch_rec_data_200.tar + img_dir="./inference/ch_rec_data_200/" fi # eval @@ -126,6 +128,7 @@ for train_model in ${train_model_list[*]}; do done done else + env="CUDA_VISIBLE_DEVICES=${infer_gpu_id}" for use_trt in ${gpu_trt_list[*]}; do for precision in ${gpu_precision_list[*]}; do if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then @@ -133,8 +136,8 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} status_check $? "${trainer}" "${command}" "${save_log_path}" done done diff --git a/test/params.txt b/test/params.txt index e94b21e7..ff1cc008 100644 --- a/test/params.txt +++ b/test/params.txt @@ -11,5 +11,5 @@ cpu_threads_list: 1|6 rec_batch_size_list: 1|6 gpu_trt_list: True|False gpu_precision_list: fp32|fp16|int8 - +infer_gpu_id: 0 log_path: ./output diff --git a/test/test.sh b/test/test.sh index a7bb76f3..cc1aee92 100644 --- a/test/test.sh +++ b/test/test.sh @@ -64,14 +64,13 @@ rec_batch_size_list=$(func_parser "${lines[9]}") gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") -log_path=$(func_parser "${lines[12]}") +log_path=$(func_parser "${lines[13]}") function status_check(){ last_status=$1 # the exit code run_model=$2 run_command=$3 save_log=$4 - echo ${case3} if [ $last_status -eq 0 ]; then echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} else @@ -97,10 +96,16 @@ for train_model in ${train_model_list[*]}; do if [ ${gpu} = "-1" ];then lanuch="" use_gpu=False + env="" elif [ ${#gpu} -le 1 ];then launch="" + env="CUDA_VISIBLE_DEVICES=${gpu}" else launch="-m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu}" + IFS="," + array=(${gpu}) + env="CUDA_VISIBLE_DEVICES=${array[0]}" + IFS="|" fi for auto_cast in ${auto_cast_list[*]}; do @@ -122,13 +127,13 @@ for train_model in ${train_model_list[*]}; do export_model="tools/export_model.py" fi save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} - command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" - echo ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} - # status_check $? "${trainer}" "${command}" "${save_log}/train.log" + command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" + ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} + status_check $? "${trainer}" "${command}" "${save_log}/train.log" - command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" - echo ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} - # status_check $? "${trainer}" "${command}" "${save_log}/train.log" + command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" + ${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + status_check $? "${trainer}" "${command}" "${save_log}/train.log" if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) @@ -148,8 +153,8 @@ for train_model in ${train_model_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - echo ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - # status_check $? "${inference}" "${command}" "${save_log}" + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${inference}" "${command}" "${save_log}" done done done @@ -161,8 +166,9 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - echo ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - # status_check $? "${inference}" "${command}" "${save_log}" + command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${inference}" "${command}" "${save_log}" done done done From 0f84fc1e5a666273ddd1c85fcc72c46eccef4f85 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 11:59:04 +0800 Subject: [PATCH 15/34] fix ci error --- test/infer.sh | 121 +++++++++++++++++++++++++----------------------- test/params.txt | 2 +- test/test.sh | 10 ++-- 3 files changed, 68 insertions(+), 65 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index db8b788c..18aab1f1 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -1,12 +1,12 @@ #!/bin/bash - +FILENAME=$1 dataline=$(cat ${FILENAME}) # parser params IFS=$'\n' lines=(${dataline}) function func_parser(){ strs=$1 - IFS=":" + IFS=": " array=(${strs}) tmp=${array[1]} echo ${tmp} @@ -17,7 +17,7 @@ train_model_list=$(func_parser "${lines[0]}") slim_trainer_list=$(func_parser "${lines[3]}") python=$(func_parser "${lines[4]}") # inference params -inference=$(func_parser "${lines[5]}") +# inference=$(func_parser "${lines[5]}") devices=$(func_parser "${lines[6]}") use_mkldnn_list=$(func_parser "${lines[7]}") cpu_threads_list=$(func_parser "${lines[8]}") @@ -40,14 +40,15 @@ function status_check(){ echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${save_log} fi } - +IFS='|' for train_model in ${train_model_list[*]}; do - if [ ${train_model} = "det" ];then + if [ ${train_model} = "ocr_det" ];then model_name="det" yml_file="configs/det/det_mv3_db.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar && tar xf ./inference/ch_det_data_50.tar + # wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + tar xf ./inference/ch_det_data_50.tar img_dir="./inference/ch_det_data_50/" - elif [ ${train_model} = "rec" ];then + elif [ ${train_model} = "ocr_rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar && tar xf ./inference/ch_rec_data_200.tar @@ -71,7 +72,7 @@ for train_model in ${train_model_list[*]}; do else eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_infer" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar - fi + fi elif [ ${slim_trainer} = "distill" ]; then if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_distill_infer" @@ -89,59 +90,61 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar fi fi - save_log_path="${log_path}/${eval_model_name}" - command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}" - ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" - command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" - ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" + save_log_path="${log_path}/${eval_model_name}" + command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}" + ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" - if [ $? -eq 0 ]; then - echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log - else - cat ${save_log}/train.log - echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log - fi - if [ "${model_name}" = "det" ]; then - export rec_batch_size_list=( "1" ) - inference="tools/infer/predict_det.py" - det_model_dir=${log_path}/${eval_model_name}_infer - rec_model_dir="" - elif [ "${model_name}" = "rec" ]; then - inference="tools/infer/predict_rec.py" - rec_model_dir=${log_path}/${eval_model_name}_infer - det_model_dir="" - fi - # inference - for device in ${devices[*]}; do - if [ ${device} = "cpu" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - for threads in ${cpu_threads_list[*]}; do - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}" - done - done - done - else - env="CUDA_VISIBLE_DEVICES=${infer_gpu_id}" - for use_trt in ${gpu_trt_list[*]}; do - for precision in ${gpu_precision_list[*]}; do - if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then - continue - fi - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}" - done - done - done + command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" + ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" + + if [ $? -eq 0 ]; then + echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log + else + cat ${save_log}/train.log + echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log fi + if [ "${model_name}" = "det" ]; then + export rec_batch_size_list=( "1" ) + inference="tools/infer/predict_det.py" + det_model_dir=${log_path}/${eval_model_name}_infer + rec_model_dir="" + elif [ "${model_name}" = "rec" ]; then + inference="tools/infer/predict_rec.py" + rec_model_dir=${log_path}/${eval_model_name}_infer + det_model_dir="" + fi + # inference + for device in ${devices[*]}; do + if [ ${device} = "cpu" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for rec_batch_size in ${rec_batch_size_list[*]}; do + save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" + command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}" + done + done + done + else + env="CUDA_VISIBLE_DEVICES=${infer_gpu_id}" + for use_trt in ${gpu_trt_list[*]}; do + for precision in ${gpu_precision_list[*]}; do + if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then + continue + fi + for rec_batch_size in ${rec_batch_size_list[*]}; do + save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" + command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + status_check $? "${trainer}" "${command}" "${save_log_path}" + done + done + done + fi + done done done diff --git a/test/params.txt b/test/params.txt index ff1cc008..93c5ccde 100644 --- a/test/params.txt +++ b/test/params.txt @@ -1,7 +1,7 @@ train_model_list: ocr_det gpu_list: -1|0|0,1 auto_cast_list: False -trainer_list: norm|quant|prune +trainer_list: norm|quant python: python3.7 inference: python diff --git a/test/test.sh b/test/test.sh index cc1aee92..24a68b26 100644 --- a/test/test.sh +++ b/test/test.sh @@ -78,7 +78,7 @@ function status_check(){ fi } - +IFS="|" for train_model in ${train_model_list[*]}; do if [ ${train_model} = "ocr_det" ];then model_name="det" @@ -107,7 +107,7 @@ for train_model in ${train_model_list[*]}; do env="CUDA_VISIBLE_DEVICES=${array[0]}" IFS="|" fi - + IFS="|" for auto_cast in ${auto_cast_list[*]}; do for slim_trainer in ${slim_trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then @@ -126,13 +126,13 @@ for train_model in ${train_model_list[*]}; do trainer="tools/train.py" export_model="tools/export_model.py" fi - save_log=${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu} + save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} status_check $? "${trainer}" "${command}" "${save_log}/train.log" - command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" - ${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/best_accuracy Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" + ${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} status_check $? "${trainer}" "${command}" "${save_log}/train.log" if [ "${model_name}" = "det" ]; then From 4b56069d84138096817bcf30460f7ee0ecc63d1c Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 12:53:22 +0800 Subject: [PATCH 16/34] fix bug --- test/infer.sh | 35 ++++++++++++++++++++++------------- test/test.sh | 4 ++-- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 18aab1f1..30b91df7 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -45,13 +45,14 @@ for train_model in ${train_model_list[*]}; do if [ ${train_model} = "ocr_det" ];then model_name="det" yml_file="configs/det/det_mv3_db.yml" - # wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar - tar xf ./inference/ch_det_data_50.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + cd ./inference && tar xf ch_det_data_50.tar && cd ../ img_dir="./inference/ch_det_data_50/" elif [ ${train_model} = "ocr_rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar && tar xf ./inference/ch_rec_data_200.tar + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar + cd ./inference && tar xf ch_rec_data_200.tar && cd ../ img_dir="./inference/ch_rec_data_200/" fi @@ -59,45 +60,53 @@ for train_model in ${train_model_list[*]}; do for slim_trainer in ${slim_trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_infer" + eval_model_name="ch_ppocr_mobile_v2.0_det_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ else - eval_model_name="ch_ppocr_mobile_v2.0_rec_infer" + eval_model_name="ch_ppocr_mobile_v2.0_rec_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "quant" ]; then if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_quant_infer" + eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ else - eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_infer" + eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "distill" ]; then if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_distill_infer" + eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ else - eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_infer" + eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi elif [ ${slim_trainer} = "prune" ]; then if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ else eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi fi save_log_path="${log_path}/${eval_model_name}" - command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path}" - ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_model_dir=${save_log_path} + command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path}" + ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path} status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" - command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" - ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model=${eval_model_name} Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} + command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" + ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" if [ $? -eq 0 ]; then diff --git a/test/test.sh b/test/test.sh index 24a68b26..a1b711b7 100644 --- a/test/test.sh +++ b/test/test.sh @@ -127,8 +127,8 @@ for train_model in ${train_model_list[*]}; do export_model="tools/export_model.py" fi save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" - command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu}" - ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} + command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" + ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 status_check $? "${trainer}" "${command}" "${save_log}/train.log" command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" From 3cdc9e53835de2fd06199e851efaea614fbef6b7 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 13:02:34 +0800 Subject: [PATCH 17/34] add pretrain to Global --- test/test.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/test/test.sh b/test/test.sh index a1b711b7..15a10d17 100644 --- a/test/test.sh +++ b/test/test.sh @@ -8,6 +8,8 @@ FILENAME=$1 MODE=$2 # prepare pretrained weights and dataset wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams +wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar +cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ if [ ${MODE} = "lite_train_infer" ];then # pretrain lite train data @@ -107,28 +109,32 @@ for train_model in ${train_model_list[*]}; do env="CUDA_VISIBLE_DEVICES=${array[0]}" IFS="|" fi - IFS="|" for auto_cast in ${auto_cast_list[*]}; do for slim_trainer in ${slim_trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then trainer="tools/train.py" export_model="tools/export_model.py" + pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" elif [ ${slim_trainer} = "quant" ]; then trainer="deploy/slim/quantization/quant.py" export_model="deploy/slim/quantization/export_model.py" + pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" elif [ ${slim_trainer} = "prune" ]; then trainer="deploy/slim/prune/sensitivity_anal.py" export_model="deploy/slim/prune/export_prune_model.py" + pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" elif [ ${slim_trainer} = "distill" ]; then trainer="deploy/slim/distill/train_dml.py" export_model="deploy/slim/distill/export_distill_model.py" + pretrain="" else trainer="tools/train.py" export_model="tools/export_model.py" + pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" fi save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" - command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" - ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 + command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" + ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 status_check $? "${trainer}" "${command}" "${save_log}/train.log" command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" From 90454c767eb480523404c1a22873ff4d966800f5 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 13:14:00 +0800 Subject: [PATCH 18/34] rename params.txt --- test/{params.txt => paddleocr_ci_params.txt} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename test/{params.txt => paddleocr_ci_params.txt} (100%) diff --git a/test/params.txt b/test/paddleocr_ci_params.txt similarity index 100% rename from test/params.txt rename to test/paddleocr_ci_params.txt From 9372741adf2d3d3997025b2522287ea18436eb14 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 14:48:56 +0800 Subject: [PATCH 19/34] return status to log_path/results.log --- test/infer.sh | 9 +++++---- test/test.sh | 11 ++++++----- 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 30b91df7..9db893d3 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -27,6 +27,7 @@ gpu_precision_list=$(func_parser "${lines[11]}") infer_gpu_id=$(func_parser "${lines[12]}") log_path=$(func_parser "${lines[13]}") +status_log="${log_path}/result.log" function status_check(){ @@ -103,11 +104,11 @@ for train_model in ${train_model_list[*]}; do save_log_path="${log_path}/${eval_model_name}" command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path}" ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" + status_check $? "${trainer}" "${command}" "${status_log}" command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}/train.log" + status_check $? "${trainer}" "${command}" "${status_log}" if [ $? -eq 0 ]; then echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log @@ -134,7 +135,7 @@ for train_model in ${train_model_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}" + status_check $? "${trainer}" "${command}" "${status_log}" done done done @@ -149,7 +150,7 @@ for train_model in ${train_model_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${save_log_path}" + status_check $? "${trainer}" "${command}" "${status_log}" done done done diff --git a/test/test.sh b/test/test.sh index 15a10d17..fdc54095 100644 --- a/test/test.sh +++ b/test/test.sh @@ -1,6 +1,6 @@ #!/bin/bash # Usage: -# bash test/test.sh ./test/params.txt 'lite_train_infer' +# bash test/test.sh ./test/paddleocr_ci_params.txt 'lite_train_infer' FILENAME=$1 @@ -67,6 +67,7 @@ gpu_trt_list=$(func_parser "${lines[10]}") gpu_precision_list=$(func_parser "${lines[11]}") log_path=$(func_parser "${lines[13]}") +status_log="${log_path}/result.log" function status_check(){ last_status=$1 # the exit code @@ -135,11 +136,11 @@ for train_model in ${train_model_list[*]}; do save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 - status_check $? "${trainer}" "${command}" "${save_log}/train.log" + status_check $? "${trainer}" "${command}" "${status_log}" command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" ${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} - status_check $? "${trainer}" "${command}" "${save_log}/train.log" + status_check $? "${trainer}" "${command}" "${status_log}" if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) @@ -160,7 +161,7 @@ for train_model in ${train_model_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${save_log}" + status_check $? "${inference}" "${command}" "${status_log}" done done done @@ -174,7 +175,7 @@ for train_model in ${train_model_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${save_log}" + status_check $? "${inference}" "${command}" "${status_log}" done done done From 95ae3c19d2af70405177c1b64b93b1d3196037db Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 15:38:54 +0800 Subject: [PATCH 20/34] rename save_log in func status_check --- test/test.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/test.sh b/test/test.sh index fdc54095..30fb42a3 100644 --- a/test/test.sh +++ b/test/test.sh @@ -73,11 +73,11 @@ function status_check(){ last_status=$1 # the exit code run_model=$2 run_command=$3 - save_log=$4 + run_log=$4 if [ $last_status -eq 0 ]; then - echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} + echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${save_log} + echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${run_log} fi } From ef0467884ee44e886444fecd3743fc9d6a0ec504 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 15:47:46 +0800 Subject: [PATCH 21/34] rename save_log --- test/infer.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 9db893d3..219ba547 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -34,11 +34,11 @@ function status_check(){ last_status=$1 # the exit code run_model=$2 run_command=$3 - save_log=$4 + run_log=$4 if [ $last_status -eq 0 ]; then - echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${save_log} + echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${save_log} + echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${run_log} fi } IFS='|' From 58ca7639fc1d7ae0266acf39ab590c131f39414c Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 16:14:03 +0800 Subject: [PATCH 22/34] fix eval bug and inference path --- test/infer.sh | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 219ba547..49f0c4f7 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -48,7 +48,7 @@ for train_model in ${train_model_list[*]}; do yml_file="configs/det/det_mv3_db.yml" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar cd ./inference && tar xf ch_det_data_50.tar && cd ../ - img_dir="./inference/ch_det_data_50/" + img_dir="./inference/ch_det_data_50/all-sum-50" elif [ ${train_model} = "ocr_rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" @@ -102,12 +102,13 @@ for train_model in ${train_model_list[*]}; do fi save_log_path="${log_path}/${eval_model_name}" - command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path}" - ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_model_dir=${save_log_path} + eval_img="Eval.dataset.data_dir=./inference/ch_det_data_50/ Eval.dataset.label_file_list=./inference/ch_det_data_50/test_gt_50.txt" + command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} ${eval_img}" + ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='./inference/${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} ${eval_img} status_check $? "${trainer}" "${command}" "${status_log}" command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" - ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} + ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="./inference/${eval_model_name}/best_accuracy" Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} status_check $? "${trainer}" "${command}" "${status_log}" if [ $? -eq 0 ]; then @@ -119,11 +120,11 @@ for train_model in ${train_model_list[*]}; do if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" - det_model_dir=${log_path}/${eval_model_name}_infer + det_model_dir="./inference/${log_path}/${eval_model_name}_infer" rec_model_dir="" elif [ "${model_name}" = "rec" ]; then inference="tools/infer/predict_rec.py" - rec_model_dir=${log_path}/${eval_model_name}_infer + rec_model_dir="./inference/${log_path}/${eval_model_name}_infer" det_model_dir="" fi # inference From a4fe159bc07acfc4c3d277448f9103599a54391b Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 09:42:41 +0000 Subject: [PATCH 23/34] fix infer bug --- test/infer.sh | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index 49f0c4f7..324c346b 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -45,10 +45,12 @@ IFS='|' for train_model in ${train_model_list[*]}; do if [ ${train_model} = "ocr_det" ];then model_name="det" - yml_file="configs/det/det_mv3_db.yml" + yml_file="configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar cd ./inference && tar xf ch_det_data_50.tar && cd ../ - img_dir="./inference/ch_det_data_50/all-sum-50" + img_dir="./inference/ch_det_data_50/all-sum-510" + data_dir=./inference/ch_det_data_50/ + data_label_file=[./inference/ch_det_data_50/test_gt_50.txt] elif [ ${train_model} = "ocr_rec" ];then model_name="rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" @@ -102,9 +104,8 @@ for train_model in ${train_model_list[*]}; do fi save_log_path="${log_path}/${eval_model_name}" - eval_img="Eval.dataset.data_dir=./inference/ch_det_data_50/ Eval.dataset.label_file_list=./inference/ch_det_data_50/test_gt_50.txt" - command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} ${eval_img}" - ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='./inference/${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} ${eval_img} + command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='./inference/${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} Eval.dataset.data_dir=${data_dir} Eval.dataset.label_file_list=${data_label_file}" + ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=./inference/${eval_model_name}/best_accuracy Global.save_model_dir=${save_log_path} Eval.dataset.data_dir=${data_dir} Eval.dataset.label_file_list=${data_label_file} status_check $? "${trainer}" "${command}" "${status_log}" command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" @@ -120,11 +121,11 @@ for train_model in ${train_model_list[*]}; do if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" - det_model_dir="./inference/${log_path}/${eval_model_name}_infer" + det_model_dir="${log_path}/${eval_model_name}_infer" rec_model_dir="" elif [ "${model_name}" = "rec" ]; then inference="tools/infer/predict_rec.py" - rec_model_dir="./inference/${log_path}/${eval_model_name}_infer" + rec_model_dir="${log_path}/${eval_model_name}_infer" det_model_dir="" fi # inference @@ -140,8 +141,8 @@ for train_model in ${train_model_list[*]}; do done done done - else - env="CUDA_VISIBLE_DEVICES=${infer_gpu_id}" + else + # env="export CUDA_VISIBLE_DEVICES=${infer_gpu_id}" for use_trt in ${gpu_trt_list[*]}; do for precision in ${gpu_precision_list[*]}; do if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then @@ -149,8 +150,8 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} status_check $? "${trainer}" "${command}" "${status_log}" done done From b5aa9bdea17c6b4498efe0ed7fde72f572942428 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 09:48:45 +0000 Subject: [PATCH 24/34] delete env --- test/test.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/test.sh b/test/test.sh index 30fb42a3..df8dabdb 100644 --- a/test/test.sh +++ b/test/test.sh @@ -134,12 +134,12 @@ for train_model in ${train_model_list[*]}; do pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" fi save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" - command="${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" - ${env} ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 + command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" + ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 status_check $? "${trainer}" "${command}" "${status_log}" - command="${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" - ${env} ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" + ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} status_check $? "${trainer}" "${command}" "${status_log}" if [ "${model_name}" = "det" ]; then @@ -173,8 +173,8 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${env} ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} status_check $? "${inference}" "${command}" "${status_log}" done done From f3efa9be6bae30fd836d7e6cc8c2482ba6708a1b Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 10:03:27 +0000 Subject: [PATCH 25/34] fix test.sh bug --- test/test.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/test/test.sh b/test/test.sh index df8dabdb..cc87c772 100644 --- a/test/test.sh +++ b/test/test.sh @@ -138,18 +138,18 @@ for train_model in ${train_model_list[*]}; do ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 status_check $? "${trainer}" "${command}" "${status_log}" - command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log}" - ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}/export_inference/ Global.save_model_dir=${save_log} + command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}_infer/ Global.save_model_dir=${save_log}" + ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}_infer/ Global.save_model_dir=${save_log} status_check $? "${trainer}" "${command}" "${status_log}" if [ "${model_name}" = "det" ]; then export rec_batch_size_list=( "1" ) inference="tools/infer/predict_det.py" - det_model_dir=${save_log}/export_inference/ + det_model_dir=${save_log}_infer rec_model_dir="" elif [ "${model_name}" = "rec" ]; then inference="tools/infer/predict_rec.py" - rec_model_dir=${save_log}/export_inference/ + rec_model_dir=${save_log}_infer det_model_dir="" fi # inference @@ -159,8 +159,8 @@ for train_model in ${train_model_list[*]}; do for threads in ${cpu_threads_list[*]}; do for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} status_check $? "${inference}" "${command}" "${status_log}" done done @@ -173,8 +173,8 @@ for train_model in ${train_model_list[*]}; do fi for rec_batch_size in ${rec_batch_size_list[*]}; do save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${save_log}/export_inference/ --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} + command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" + ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} status_check $? "${inference}" "${command}" "${status_log}" done done From e0d1779faee5e79e5c60df75a00348176453cf20 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Wed, 9 Jun 2021 11:27:36 +0000 Subject: [PATCH 26/34] add requirments --- test/infer.sh | 5 +++++ test/test.sh | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/test/infer.sh b/test/infer.sh index 324c346b..eb70cafb 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -29,6 +29,11 @@ infer_gpu_id=$(func_parser "${lines[12]}") log_path=$(func_parser "${lines[13]}") status_log="${log_path}/result.log" +# install requirments +${python} -m pip install pynvml; +${python} -m pip install psutil; +${python} -m pip install GPUtil; + function status_check(){ last_status=$1 # the exit code diff --git a/test/test.sh b/test/test.sh index cc87c772..927abd19 100644 --- a/test/test.sh +++ b/test/test.sh @@ -69,6 +69,11 @@ gpu_precision_list=$(func_parser "${lines[11]}") log_path=$(func_parser "${lines[13]}") status_log="${log_path}/result.log" +# install requirments +${python} -m pip install pynvml; +${python} -m pip install psutil; +${python} -m pip install GPUtil; + function status_check(){ last_status=$1 # the exit code run_model=$2 From c270500a56845469c01b48474ca54fc4feb3885b Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 10 Jun 2021 14:19:52 +0800 Subject: [PATCH 27/34] delete launch --- test/test.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/test/test.sh b/test/test.sh index 927abd19..6c584810 100644 --- a/test/test.sh +++ b/test/test.sh @@ -102,14 +102,11 @@ for train_model in ${train_model_list[*]}; do for gpu in ${gpu_list[*]}; do use_gpu=True if [ ${gpu} = "-1" ];then - lanuch="" use_gpu=False env="" elif [ ${#gpu} -le 1 ];then - launch="" env="CUDA_VISIBLE_DEVICES=${gpu}" else - launch="-m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu}" IFS="," array=(${gpu}) env="CUDA_VISIBLE_DEVICES=${array[0]}" @@ -139,8 +136,13 @@ for train_model in ${train_model_list[*]}; do pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" fi save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" - command="${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" - ${python} ${launch} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 + if [ ${#gpu} -le 2 ];then + command="${python} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" + ${python} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 + else + command="${python} -m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" + ${python} -m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 + fi status_check $? "${trainer}" "${command}" "${status_log}" command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}_infer/ Global.save_model_dir=${save_log}" From fcd080851b87de0fb4c8735058e87b924ca02fb3 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 10 Jun 2021 15:49:33 +0800 Subject: [PATCH 28/34] add paddleslim --- test/test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test.sh b/test/test.sh index 6c584810..39a60033 100644 --- a/test/test.sh +++ b/test/test.sh @@ -73,6 +73,7 @@ status_log="${log_path}/result.log" ${python} -m pip install pynvml; ${python} -m pip install psutil; ${python} -m pip install GPUtil; +${python} -m pip install paddlesim==2.0.0 function status_check(){ last_status=$1 # the exit code From 868d3062ab012d980254327e5b0d45a7423d8883 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 10 Jun 2021 15:53:02 +0800 Subject: [PATCH 29/34] support download sen.pickle --- test/test.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/test.sh b/test/test.sh index 39a60033..92c61f43 100644 --- a/test/test.sh +++ b/test/test.sh @@ -127,6 +127,7 @@ for train_model in ${train_model_list[*]}; do trainer="deploy/slim/prune/sensitivity_anal.py" export_model="deploy/slim/prune/export_prune_model.py" pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" + wget -nc -P https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/sen.pickle elif [ ${slim_trainer} = "distill" ]; then trainer="deploy/slim/distill/train_dml.py" export_model="deploy/slim/distill/export_distill_model.py" From 777857ec8944b180788da9f888bdff81ebbe1f78 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 17 Jun 2021 12:02:05 +0800 Subject: [PATCH 30/34] rename quant and prune --- test/infer.sh | 4 ++-- test/paddleocr_ci_params.txt | 4 ++-- test/test.sh | 5 +++-- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index eb70cafb..78057705 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -76,7 +76,7 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi - elif [ ${slim_trainer} = "quant" ]; then + elif [ ${slim_trainer} = "pact" ]; then if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar @@ -96,7 +96,7 @@ for train_model in ${train_model_list[*]}; do wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar cd ./inference && tar xf ${eval_model_name}.tar && cd ../ fi - elif [ ${slim_trainer} = "prune" ]; then + elif [ ${slim_trainer} = "fpgm" ]; then if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar diff --git a/test/paddleocr_ci_params.txt b/test/paddleocr_ci_params.txt index 93c5ccde..4cd035ea 100644 --- a/test/paddleocr_ci_params.txt +++ b/test/paddleocr_ci_params.txt @@ -1,7 +1,7 @@ train_model_list: ocr_det gpu_list: -1|0|0,1 -auto_cast_list: False -trainer_list: norm|quant +auto_cast_list: False|True +trainer_list: norm|pact|fpgm python: python3.7 inference: python diff --git a/test/test.sh b/test/test.sh index 92c61f43..02379baa 100644 --- a/test/test.sh +++ b/test/test.sh @@ -75,6 +75,7 @@ ${python} -m pip install psutil; ${python} -m pip install GPUtil; ${python} -m pip install paddlesim==2.0.0 + function status_check(){ last_status=$1 # the exit code run_model=$2 @@ -119,11 +120,11 @@ for train_model in ${train_model_list[*]}; do trainer="tools/train.py" export_model="tools/export_model.py" pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" - elif [ ${slim_trainer} = "quant" ]; then + elif [ ${slim_trainer} = "pact" ]; then trainer="deploy/slim/quantization/quant.py" export_model="deploy/slim/quantization/export_model.py" pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" - elif [ ${slim_trainer} = "prune" ]; then + elif [ ${slim_trainer} = "fpgm" ]; then trainer="deploy/slim/prune/sensitivity_anal.py" export_model="deploy/slim/prune/export_prune_model.py" pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" From e4a51f40d4c3dd371786ac558fe9d7364060684d Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 17 Jun 2021 12:11:57 +0800 Subject: [PATCH 31/34] get cpuinfo and ip info to status_log --- test/infer.sh | 6 ++++++ test/test.sh | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/test/infer.sh b/test/infer.sh index 78057705..ad32ffc5 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -34,6 +34,12 @@ ${python} -m pip install pynvml; ${python} -m pip install psutil; ${python} -m pip install GPUtil; +paddle_info="$(python3.7 -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" +echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} +cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` +echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} +ip=`ifconfig| grep -A 1 'eth0'|grep 'inet'|awk -F ':' '{print $2}'|awk '{print $1}'` +echo -e "\033[33m ip_info:$ip \033[0m" | tee -a ${status_log} function status_check(){ last_status=$1 # the exit code diff --git a/test/test.sh b/test/test.sh index 02379baa..63686401 100644 --- a/test/test.sh +++ b/test/test.sh @@ -75,6 +75,12 @@ ${python} -m pip install psutil; ${python} -m pip install GPUtil; ${python} -m pip install paddlesim==2.0.0 +paddle_info="$(python3.7 -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" +echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} +cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` +echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} +ip=`ifconfig| grep -A 1 'eth0'|grep 'inet'|awk -F ':' '{print $2}'|awk '{print $1}'` +echo -e "\033[33m ip_info:$ip \033[0m" | tee -a ${status_log} function status_check(){ last_status=$1 # the exit code From 3ba4d543a60319702eb1475f6edeb7053d9b832e Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 17 Jun 2021 13:51:45 +0800 Subject: [PATCH 32/34] python3.7 to --- test/infer.sh | 2 +- test/test.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/infer.sh b/test/infer.sh index ad32ffc5..5b2c7d30 100644 --- a/test/infer.sh +++ b/test/infer.sh @@ -34,7 +34,7 @@ ${python} -m pip install pynvml; ${python} -m pip install psutil; ${python} -m pip install GPUtil; -paddle_info="$(python3.7 -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" +paddle_info="$(${python} -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} diff --git a/test/test.sh b/test/test.sh index 63686401..5bb48ac5 100644 --- a/test/test.sh +++ b/test/test.sh @@ -75,7 +75,7 @@ ${python} -m pip install psutil; ${python} -m pip install GPUtil; ${python} -m pip install paddlesim==2.0.0 -paddle_info="$(python3.7 -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" +paddle_info="$(${python} -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} From 069d994c16e313fb2af22f7697cb7e271f18bc23 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 28 Jun 2021 20:31:21 +0800 Subject: [PATCH 33/34] test to test_v5 --- test/infer.sh | 173 ---------------- test/ocr_det_params.txt | 35 ++++ test/paddleocr_ci_params.txt | 15 -- test/prepare.sh | 138 +++++++++++++ test/test.sh | 374 ++++++++++++++++++----------------- 5 files changed, 369 insertions(+), 366 deletions(-) delete mode 100644 test/infer.sh create mode 100644 test/ocr_det_params.txt delete mode 100644 test/paddleocr_ci_params.txt create mode 100644 test/prepare.sh diff --git a/test/infer.sh b/test/infer.sh deleted file mode 100644 index 5b2c7d30..00000000 --- a/test/infer.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -FILENAME=$1 -dataline=$(cat ${FILENAME}) -# parser params -IFS=$'\n' -lines=(${dataline}) -function func_parser(){ - strs=$1 - IFS=": " - array=(${strs}) - tmp=${array[1]} - echo ${tmp} -} -IFS=$'\n' -# The training params -train_model_list=$(func_parser "${lines[0]}") -slim_trainer_list=$(func_parser "${lines[3]}") -python=$(func_parser "${lines[4]}") -# inference params -# inference=$(func_parser "${lines[5]}") -devices=$(func_parser "${lines[6]}") -use_mkldnn_list=$(func_parser "${lines[7]}") -cpu_threads_list=$(func_parser "${lines[8]}") -rec_batch_size_list=$(func_parser "${lines[9]}") -gpu_trt_list=$(func_parser "${lines[10]}") -gpu_precision_list=$(func_parser "${lines[11]}") - -infer_gpu_id=$(func_parser "${lines[12]}") -log_path=$(func_parser "${lines[13]}") -status_log="${log_path}/result.log" - -# install requirments -${python} -m pip install pynvml; -${python} -m pip install psutil; -${python} -m pip install GPUtil; - -paddle_info="$(${python} -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" -echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} -cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` -echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} -ip=`ifconfig| grep -A 1 'eth0'|grep 'inet'|awk -F ':' '{print $2}'|awk '{print $1}'` -echo -e "\033[33m ip_info:$ip \033[0m" | tee -a ${status_log} - -function status_check(){ - last_status=$1 # the exit code - run_model=$2 - run_command=$3 - run_log=$4 - if [ $last_status -eq 0 ]; then - echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} - else - echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${run_log} - fi -} -IFS='|' -for train_model in ${train_model_list[*]}; do - if [ ${train_model} = "ocr_det" ];then - model_name="det" - yml_file="configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar - cd ./inference && tar xf ch_det_data_50.tar && cd ../ - img_dir="./inference/ch_det_data_50/all-sum-510" - data_dir=./inference/ch_det_data_50/ - data_label_file=[./inference/ch_det_data_50/test_gt_50.txt] - elif [ ${train_model} = "ocr_rec" ];then - model_name="rec" - yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar - cd ./inference && tar xf ch_rec_data_200.tar && cd ../ - img_dir="./inference/ch_rec_data_200/" - fi - - # eval - for slim_trainer in ${slim_trainer_list[*]}; do - if [ ${slim_trainer} = "norm" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "pact" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "distill" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - elif [ ${slim_trainer} = "fpgm" ]; then - if [ ${model_name} = "det" ]; then - eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - else - eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train" - wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar - cd ./inference && tar xf ${eval_model_name}.tar && cd ../ - fi - fi - - save_log_path="${log_path}/${eval_model_name}" - command="${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model='./inference/${eval_model_name}/best_accuracy' Global.save_model_dir=${save_log_path} Eval.dataset.data_dir=${data_dir} Eval.dataset.label_file_list=${data_label_file}" - ${python} tools/eval.py -c ${yml_file} -o Global.pretrained_model=./inference/${eval_model_name}/best_accuracy Global.save_model_dir=${save_log_path} Eval.dataset.data_dir=${data_dir} Eval.dataset.label_file_list=${data_label_file} - status_check $? "${trainer}" "${command}" "${status_log}" - - command="${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="${eval_model_name}/best_accuracy" Global.save_inference_dir=${log_path}/${eval_model_name}_infer Global.save_model_dir=${save_log_path}" - ${python} tools/export_model.py -c ${yml_file} -o Global.pretrained_model="./inference/${eval_model_name}/best_accuracy" Global.save_inference_dir="${log_path}/${eval_model_name}_infer" Global.save_model_dir=${save_log_path} - status_check $? "${trainer}" "${command}" "${status_log}" - - if [ $? -eq 0 ]; then - echo -e "\033[33m training of $model_name successfully!\033[0m" | tee -a ${save_log}/train.log - else - cat ${save_log}/train.log - echo -e "\033[33m training of $model_name failed!\033[0m" | tee -a ${save_log}/train.log - fi - if [ "${model_name}" = "det" ]; then - export rec_batch_size_list=( "1" ) - inference="tools/infer/predict_det.py" - det_model_dir="${log_path}/${eval_model_name}_infer" - rec_model_dir="" - elif [ "${model_name}" = "rec" ]; then - inference="tools/infer/predict_rec.py" - rec_model_dir="${log_path}/${eval_model_name}_infer" - det_model_dir="" - fi - # inference - for device in ${devices[*]}; do - if [ ${device} = "cpu" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - for threads in ${cpu_threads_list[*]}; do - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${status_log}" - done - done - done - else - # env="export CUDA_VISIBLE_DEVICES=${infer_gpu_id}" - for use_trt in ${gpu_trt_list[*]}; do - for precision in ${gpu_precision_list[*]}; do - if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then - continue - fi - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${log_path}/${eval_model_name}_infer --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${trainer}" "${command}" "${status_log}" - done - done - done - fi - done - done -done diff --git a/test/ocr_det_params.txt b/test/ocr_det_params.txt new file mode 100644 index 00000000..9752ba43 --- /dev/null +++ b/test/ocr_det_params.txt @@ -0,0 +1,35 @@ +model_name:ocr_det +python:python3.7 +gpu_list:-1|0|0,1 +Global.auto_cast:False|True +Global.epoch_num:10 +Global.save_model_dir:./output/ +Global.save_inference_dir:./output/ +Train.loader.batch_size_per_card: +Global.use_gpu +Global.pretrained_model + +trainer:norm|pact|fpgm +norm_train:tools/train.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/MobileNetV3_large_x0_5_pretrained +quant_train:deploy/slim/quantization/quant.py -c configs/det/det_mv3_db.yml -o Global.pretrained_model=./pretrain_models/det_mv3_db_v2.0_train/best_accuracy +fpgm_train:null +distill_train:null + +eval:tools/eval.py -c configs/det/det_mv3_db.yml -o + +norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o +quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o +fpgm_export:deploy/slim/prune/export_prune_model.py +distill_export:null + +inference:tools/infer/predict_det.py +--use_gpu:True|False +--enable_mkldnn:True|False +--cpu_threads:1|6 +--rec_batch_num:1 +--use_tensorrt:True|False +--precision:fp32|fp16|int8 +--det_model_dir +--image_dir +--save_log_path + diff --git a/test/paddleocr_ci_params.txt b/test/paddleocr_ci_params.txt deleted file mode 100644 index 4cd035ea..00000000 --- a/test/paddleocr_ci_params.txt +++ /dev/null @@ -1,15 +0,0 @@ -train_model_list: ocr_det -gpu_list: -1|0|0,1 -auto_cast_list: False|True -trainer_list: norm|pact|fpgm -python: python3.7 - -inference: python -devices: cpu|gpu -use_mkldnn_list: True|False -cpu_threads_list: 1|6 -rec_batch_size_list: 1|6 -gpu_trt_list: True|False -gpu_precision_list: fp32|fp16|int8 -infer_gpu_id: 0 -log_path: ./output diff --git a/test/prepare.sh b/test/prepare.sh new file mode 100644 index 00000000..65ea28c1 --- /dev/null +++ b/test/prepare.sh @@ -0,0 +1,138 @@ +#!/bin/bash +FILENAME=$1 +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] +MODE=$2 + +dataline=$(cat ${FILENAME}) + +# parser params +IFS=$'\n' +lines=(${dataline}) +function func_parser_key(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" + array=(${strs}) + tmp=${array[1]} + echo ${tmp} +} +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[0]}") +train_model_list=$(func_parser_value "${lines[0]}") +slim_trainer_list=$(func_parser_value "${lines[12]}") + +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] +MODE=$2 +# prepare pretrained weights and dataset +wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams +wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar +cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ + +if [ ${MODE} = "lite_train_infer" ];then + # pretrain lite train data + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar + cd ./train_data/ && tar xf icdar2015_lite.tar + ln -s ./icdar2015_lite ./icdar2015 + cd ../ + epoch=10 + eval_batch_step=10 +elif [ ${MODE} = "whole_train_infer" ];then + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar + cd ./train_data/ && tar xf icdar2015.tar && cd ../ + epoch=500 + eval_batch_step=200 +elif [ ${MODE} = "whole_infer" ];then + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar + cd ./train_data/ && tar xf icdar2015_infer.tar + ln -s ./icdar2015_infer ./icdar2015 + cd ../ + epoch=10 + eval_batch_step=10 +else + rm -rf ./train_data/icdar2015 + wget -nc -P ./train_data https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + if [ ${model_name} = "ocr_det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + fi +fi + + +IFS='|' +for train_model in ${train_model_list[*]}; do + if [ ${train_model} = "ocr_det" ];then + model_name="det" + yml_file="configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar + cd ./inference && tar xf ch_det_data_50.tar && cd ../ + img_dir="./inference/ch_det_data_50/all-sum-510" + data_dir=./inference/ch_det_data_50/ + data_label_file=[./inference/ch_det_data_50/test_gt_50.txt] + elif [ ${train_model} = "ocr_rec" ];then + model_name="rec" + yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar + cd ./inference && tar xf ch_rec_data_200.tar && cd ../ + img_dir="./inference/ch_rec_data_200/" + fi + + # eval + for slim_trainer in ${slim_trainer_list[*]}; do + if [ ${slim_trainer} = "norm" ]; then + if [ ${model_name} = "det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_det_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_rec_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + fi + elif [ ${slim_trainer} = "pact" ]; then + if [ ${model_name} = "det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_quant_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_quant_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_quant_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_quant_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + fi + elif [ ${slim_trainer} = "distill" ]; then + if [ ${model_name} = "det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_distill_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_distill_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_distill_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_distill_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + fi + elif [ ${slim_trainer} = "fpgm" ]; then + if [ ${model_name} = "det" ]; then + eval_model_name="ch_ppocr_mobile_v2.0_det_prune_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_det_prune_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + else + eval_model_name="ch_ppocr_mobile_v2.0_rec_prune_train" + wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/slim/ch_ppocr_mobile_v2.0_rec_prune_train.tar + cd ./inference && tar xf ${eval_model_name}.tar && cd ../ + fi + fi + done +done diff --git a/test/test.sh b/test/test.sh index 5bb48ac5..b95b8ead 100644 --- a/test/test.sh +++ b/test/test.sh @@ -1,203 +1,221 @@ -#!/bin/bash -# Usage: -# bash test/test.sh ./test/paddleocr_ci_params.txt 'lite_train_infer' - +#!/bin/bash FILENAME=$1 - -# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] +# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer'] MODE=$2 -# prepare pretrained weights and dataset -wget -nc -P ./pretrain_models/ https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/MobileNetV3_large_x0_5_pretrained.pdparams -wget -nc -P ./pretrain_models/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/en/det_mv3_db_v2.0_train.tar -cd pretrain_models && tar xf det_mv3_db_v2.0_train.tar && cd ../ - -if [ ${MODE} = "lite_train_infer" ];then - # pretrain lite train data - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_lite.tar - cd ./train_data/ && tar xf icdar2015_lite.tar - ln -s ./icdar2015_lite ./icdar2015 - cd ../ - epoch=10 - eval_batch_step=10 -elif [ ${MODE} = "whole_train_infer" ];then - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015.tar - cd ./train_data/ && tar xf icdar2015.tar && cd ../ - epoch=500 - eval_batch_step=200 -else - rm -rf ./train_data/icdar2015 - wget -nc -P ./train_data/ https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/icdar2015_infer.tar - cd ./train_data/ && tar xf icdar2015_infer.tar - ln -s ./icdar2015_infer ./icdar2015 - cd ../ - epoch=10 - eval_batch_step=10 -fi - -img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" - dataline=$(cat ${FILENAME}) + # parser params IFS=$'\n' lines=(${dataline}) -function func_parser(){ +function func_parser_key(){ strs=$1 - IFS=": " + IFS=":" + array=(${strs}) + tmp=${array[0]} + echo ${tmp} +} +function func_parser_value(){ + strs=$1 + IFS=":" array=(${strs}) tmp=${array[1]} echo ${tmp} } -IFS=$'\n' -# The training params -train_model_list=$(func_parser "${lines[0]}") -gpu_list=$(func_parser "${lines[1]}") -auto_cast_list=$(func_parser "${lines[2]}") -slim_trainer_list=$(func_parser "${lines[3]}") -python=$(func_parser "${lines[4]}") -# inference params -inference=$(func_parser "${lines[5]}") -devices=$(func_parser "${lines[6]}") -use_mkldnn_list=$(func_parser "${lines[7]}") -cpu_threads_list=$(func_parser "${lines[8]}") -rec_batch_size_list=$(func_parser "${lines[9]}") -gpu_trt_list=$(func_parser "${lines[10]}") -gpu_precision_list=$(func_parser "${lines[11]}") - -log_path=$(func_parser "${lines[13]}") -status_log="${log_path}/result.log" - -# install requirments -${python} -m pip install pynvml; -${python} -m pip install psutil; -${python} -m pip install GPUtil; -${python} -m pip install paddlesim==2.0.0 - -paddle_info="$(${python} -c "import paddle;print(f'paddle_version:{paddle.__version__}');print(f'paddle_commit:{paddle.__git_commit__}')")" -echo -e "\033[33m $paddle_info \033[0m" | tee -a ${status_log} -cpu_model=`cat /proc/cpuinfo | grep "model name" | awk -F ':' '{print $2}' | sort | uniq` -echo -e "\033[33m cpu_info:$cpu_model \033[0m" | tee -a ${status_log} -ip=`ifconfig| grep -A 1 'eth0'|grep 'inet'|awk -F ':' '{print $2}'|awk '{print $1}'` -echo -e "\033[33m ip_info:$ip \033[0m" | tee -a ${status_log} - function status_check(){ last_status=$1 # the exit code - run_model=$2 - run_command=$3 - run_log=$4 + run_command=$2 + run_log=$3 if [ $last_status -eq 0 ]; then - echo -e "\033[33m $run_model successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run successfully with command - ${run_command}! \033[0m" | tee -a ${run_log} else - echo -e "\033[33m $case failed with command - ${run_command}! \033[0m" | tee -a ${run_log} + echo -e "\033[33m Run failed with command - ${run_command}! \033[0m" | tee -a ${run_log} fi } -IFS="|" -for train_model in ${train_model_list[*]}; do - if [ ${train_model} = "ocr_det" ];then - model_name="det" - yml_file="configs/det/det_mv3_db.yml" - elif [ ${train_model} = "ocr_rec" ];then - model_name="rec" - yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" - else - model_name="det" - yml_file="configs/det/det_mv3_db.yml" - fi - IFS="|" - for gpu in ${gpu_list[*]}; do - use_gpu=True - if [ ${gpu} = "-1" ];then - use_gpu=False - env="" - elif [ ${#gpu} -le 1 ];then - env="CUDA_VISIBLE_DEVICES=${gpu}" - else - IFS="," - array=(${gpu}) - env="CUDA_VISIBLE_DEVICES=${array[0]}" - IFS="|" - fi - for auto_cast in ${auto_cast_list[*]}; do - for slim_trainer in ${slim_trainer_list[*]}; do - if [ ${slim_trainer} = "norm" ]; then - trainer="tools/train.py" - export_model="tools/export_model.py" - pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" - elif [ ${slim_trainer} = "pact" ]; then - trainer="deploy/slim/quantization/quant.py" - export_model="deploy/slim/quantization/export_model.py" - pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" - elif [ ${slim_trainer} = "fpgm" ]; then - trainer="deploy/slim/prune/sensitivity_anal.py" - export_model="deploy/slim/prune/export_prune_model.py" - pretrain="./pretrain_models/det_mv3_db_v2.0_train/best_accuracy" - wget -nc -P https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/sen.pickle - elif [ ${slim_trainer} = "distill" ]; then - trainer="deploy/slim/distill/train_dml.py" - export_model="deploy/slim/distill/export_distill_model.py" - pretrain="" - else - trainer="tools/train.py" - export_model="tools/export_model.py" - pretrain="./pretrain_models/MobileNetV3_large_x0_5_pretrained" - fi - save_log="${log_path}/${model_name}_${slim_trainer}_autocast_${auto_cast}_gpuid_${gpu}" - if [ ${#gpu} -le 2 ];then - command="${python} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" - ${python} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 - else - command="${python} -m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2" - ${python} -m paddle.distributed.launch --log_dir=./debug/ --gpus ${gpu} ${trainer} -c ${yml_file} -o Global.epoch_num=${epoch} Global.eval_batch_step=${eval_batch_step} Global.auto_cast=${auto_cast} Global.pretrained_model=${pretrain} Global.save_model_dir=${save_log} Global.use_gpu=${use_gpu} Train.loader.batch_size_per_card=2 - fi - status_check $? "${trainer}" "${command}" "${status_log}" +IFS=$'\n' +# The training params +model_name=$(func_parser_value "${lines[0]}") +python=$(func_parser_value "${lines[1]}") +gpu_list=$(func_parser_value "${lines[2]}") +autocast_list=$(func_parser_value "${lines[3]}") +autocast_key=$(func_parser_key "${lines[3]}") +epoch_key=$(func_parser_key "${lines[4]}") +save_model_key=$(func_parser_key "${lines[5]}") +save_infer_key=$(func_parser_key "${lines[6]}") +train_batch_key=$(func_parser_key "${lines[7]}") +train_use_gpu_key=$(func_parser_key "${lines[8]}") +pretrain_model_key=$(func_parser_key "${lines[9]}") - command="${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}_infer/ Global.save_model_dir=${save_log}" - ${python} ${export_model} -c ${yml_file} -o Global.pretrained_model=${save_log}/latest Global.save_inference_dir=${save_log}_infer/ Global.save_model_dir=${save_log} - status_check $? "${trainer}" "${command}" "${status_log}" - - if [ "${model_name}" = "det" ]; then - export rec_batch_size_list=( "1" ) - inference="tools/infer/predict_det.py" - det_model_dir=${save_log}_infer - rec_model_dir="" - elif [ "${model_name}" = "rec" ]; then - inference="tools/infer/predict_rec.py" - rec_model_dir=${save_log}_infer - det_model_dir="" - fi - # inference - for device in ${devices[*]}; do - if [ ${device} = "cpu" ]; then - for use_mkldnn in ${use_mkldnn_list[*]}; do - for threads in ${cpu_threads_list[*]}; do - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_cpu_usemkldnn_${use_mkldnn}_cputhreads_${threads}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --enable_mkldnn=${use_mkldnn} --use_gpu=False --cpu_threads=${threads} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${status_log}" - done - done - done - else - for use_trt in ${gpu_trt_list[*]}; do - for precision in ${gpu_precision_list[*]}; do - if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then - continue - fi - for rec_batch_size in ${rec_batch_size_list[*]}; do - save_log_path="${log_path}/${model_name}_${slim_trainer}_gpu_usetensorrt_${use_trt}_usefp16_${precision}_recbatchnum_${rec_batch_size}_infer.log" - command="${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path}" - ${python} ${inference} --use_gpu=True --use_tensorrt=${use_trt} --precision=${precision} --benchmark=True --det_model_dir=${det_model_dir} --rec_batch_num=${rec_batch_size} --rec_model_dir=${rec_model_dir} --image_dir=${img_dir} --save_log_path=${save_log_path} - status_check $? "${inference}" "${command}" "${status_log}" - done - done - done - fi +trainer_list=$(func_parser_value "${lines[10]}") +norm_trainer=$(func_parser_value "${lines[11]}") +pact_trainer=$(func_parser_value "${lines[12]}") +fpgm_trainer=$(func_parser_value "${lines[13]}") +distill_trainer=$(func_parser_value "${lines[14]}") + +eval_py=$(func_parser_value "${lines[15]}") +norm_export=$(func_parser_value "${lines[16]}") +pact_export=$(func_parser_value "${lines[17]}") +fpgm_export=$(func_parser_value "${lines[18]}") +distill_export=$(func_parser_value "${lines[19]}") + +inference_py=$(func_parser_value "${lines[20]}") +use_gpu_key=$(func_parser_key "${lines[21]}") +use_gpu_list=$(func_parser_value "${lines[21]}") +use_mkldnn_key=$(func_parser_key "${lines[22]}") +use_mkldnn_list=$(func_parser_value "${lines[22]}") +cpu_threads_key=$(func_parser_key "${lines[23]}") +cpu_threads_list=$(func_parser_value "${lines[23]}") +batch_size_key=$(func_parser_key "${lines[24]}") +batch_size_list=$(func_parser_value "${lines[24]}") +use_trt_key=$(func_parser_key "${lines[25]}") +use_trt_list=$(func_parser_value "${lines[25]}") +precision_key=$(func_parser_key "${lines[26]}") +precision_list=$(func_parser_value "${lines[26]}") +model_dir_key=$(func_parser_key "${lines[27]}") +image_dir_key=$(func_parser_key "${lines[28]}") +save_log_key=$(func_parser_key "${lines[29]}") + +LOG_PATH="./test/output" +mkdir -p ${LOG_PATH} +status_log="${LOG_PATH}/results.log" + +if [ ${MODE} = "lite_train_infer" ]; then + export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" + export epoch_num=10 +elif [ ${MODE} = "whole_infer" ]; then + export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" + export epoch_num=10 +elif [ ${MODE} = "whole_train_infer" ]; then + export infer_img_dir="./train_data/icdar2015/text_localization/ch4_test_images/" + export epoch_num=300 +else + export infer_img_dir="./inference/ch_det_data_50/all-sum-510" + export infer_model_dir="./inference/ch_ppocr_mobile_v2.0_det_train/best_accuracy" +fi + + +function func_inference(){ + IFS='|' + _python=$1 + _script=$2 + _model_dir=$3 + _log_path=$4 + _img_dir=$5 + + # inference + for use_gpu in ${use_gpu_list[*]}; do + if [ ${use_gpu} = "False" ]; then + for use_mkldnn in ${use_mkldnn_list[*]}; do + for threads in ${cpu_threads_list[*]}; do + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path}" + eval $command + status_check $? "${command}" "${status_log}" + done done done + else + for use_trt in ${use_trt_list[*]}; do + for precision in ${precision_list[*]}; do + if [ ${use_trt} = "False" ] && [ ${precision} != "fp32" ]; then + continue + fi + for batch_size in ${batch_size_list[*]}; do + _save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}" + command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${model_dir_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path}" + eval $command + status_check $? "${command}" "${status_log}" + done + done + done + fi + done +} + +if [ ${MODE} != "infer" ]; then + +IFS="|" +for gpu in ${gpu_list[*]}; do + use_gpu=True + if [ ${gpu} = "-1" ];then + use_gpu=False + env="" + elif [ ${#gpu} -le 1 ];then + env="export CUDA_VISIBLE_DEVICES=${gpu}" + elif [ ${#gpu} -le 15 ];then + IFS="," + array=(${gpu}) + env="export CUDA_VISIBLE_DEVICES=${array[0]}" + IFS="|" + else + IFS=";" + array=(${gpu}) + ips=${array[0]} + gpu=${array[1]} + IFS="|" + fi + for autocast in ${autocast_list[*]}; do + for trainer in ${trainer_list[*]}; do + if [ ${trainer} = "pact" ]; then + run_train=${pact_trainer} + run_export=${pact_export} + elif [ ${trainer} = "fpgm" ]; then + run_train=${fpgm_trainer} + run_export=${fpgm_export} + elif [ ${trainer} = "distill" ]; then + run_train=${distill_trainer} + run_export=${distill_export} + else + run_train=${norm_trainer} + run_export=${norm_export} + fi + + if [ ${run_train} = "null" ]; then + continue + fi + if [ ${run_export} = "null" ]; then + continue + fi + + save_log="${LOG_PATH}/${trainer}_gpus_${gpu}_autocast_${autocast}" + if [ ${#gpu} -le 2 ];then # epoch_num #TODO + cmd="${python} ${run_train} ${train_use_gpu_key}=${use_gpu} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log} " + elif [ ${#gpu} -le 15 ];then + cmd="${python} -m paddle.distributed.launch --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}" + else + cmd="${python} -m paddle.distributed.launch --ips=${ips} --gpus=${gpu} ${run_train} ${autocast_key}=${autocast} ${epoch_key}=${epoch_num} ${save_model_key}=${save_log}" + fi + # run train + eval $cmd + status_check $? "${cmd}" "${status_log}" + + # run eval + eval_cmd="${python} ${eval_py} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest" + eval $eval_cmd + status_check $? "${eval_cmd}" "${status_log}" + + # run export model + save_infer_path="${save_log}" + export_cmd="${python} ${run_export} ${save_model_key}=${save_log} ${pretrain_model_key}=${save_log}/latest ${save_infer_key}=${save_infer_path}" + eval $export_cmd + status_check $? "${export_cmd}" "${status_log}" + + #run inference + save_infer_path="${save_log}" + func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" done done done + +else + save_infer_path="${LOG_PATH}/${MODE}" + run_export=${norm_export} + export_cmd="${python} ${run_export} ${save_model_key}=${save_infer_path} ${pretrain_model_key}=${infer_model_dir} ${save_infer_key}=${save_infer_path}" + eval $export_cmd + status_check $? "${export_cmd}" "${status_log}" + + #run inference + func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}" +fi From 9cac660093e4046ed13cefa5b1a7a61b44a0b2a8 Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Mon, 28 Jun 2021 20:49:36 +0800 Subject: [PATCH 34/34] update prepare.sh --- test/prepare.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/prepare.sh b/test/prepare.sh index 65ea28c1..42f12b57 100644 --- a/test/prepare.sh +++ b/test/prepare.sh @@ -26,7 +26,7 @@ IFS=$'\n' # The training params model_name=$(func_parser_value "${lines[0]}") train_model_list=$(func_parser_value "${lines[0]}") -slim_trainer_list=$(func_parser_value "${lines[12]}") +trainer_list=$(func_parser_value "${lines[10]}") # MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer'] MODE=$2 @@ -76,7 +76,7 @@ fi IFS='|' for train_model in ${train_model_list[*]}; do if [ ${train_model} = "ocr_det" ];then - model_name="det" + model_name="ocr_det" yml_file="configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_det_data_50.tar cd ./inference && tar xf ch_det_data_50.tar && cd ../ @@ -84,7 +84,7 @@ for train_model in ${train_model_list[*]}; do data_dir=./inference/ch_det_data_50/ data_label_file=[./inference/ch_det_data_50/test_gt_50.txt] elif [ ${train_model} = "ocr_rec" ];then - model_name="rec" + model_name="ocr_rec" yml_file="configs/rec/rec_mv3_none_bilstm_ctc.yml" wget -nc -P ./inference https://paddleocr.bj.bcebos.com/dygraph_v2.0/test/ch_rec_data_200.tar cd ./inference && tar xf ch_rec_data_200.tar && cd ../ @@ -92,7 +92,7 @@ for train_model in ${train_model_list[*]}; do fi # eval - for slim_trainer in ${slim_trainer_list[*]}; do + for slim_trainer in ${trainer_list[*]}; do if [ ${slim_trainer} = "norm" ]; then if [ ${model_name} = "det" ]; then eval_model_name="ch_ppocr_mobile_v2.0_det_train"