fix kl quant
This commit is contained in:
parent
08f25ca4fa
commit
eb22ce42db
|
@ -82,14 +82,14 @@ pipline:pipeline_http_client.py --image_dir=../../doc/imgs
|
|||
===========================kl_quant_params===========================
|
||||
infer_model:./inference/ch_ppocr_mobile_v2.0_det_infer/
|
||||
infer_export:tools/export_model.py -c configs/det/ch_ppocr_v2.0/ch_det_mv3_db_v2.0.yml -o
|
||||
infer_quant:False
|
||||
infer_quant:True
|
||||
inference:tools/infer/predict_det.py
|
||||
--use_gpu:True|False
|
||||
--enable_mkldnn:True|False
|
||||
--cpu_threads:1|6
|
||||
--rec_batch_num:1
|
||||
--use_tensorrt:False|True
|
||||
--precision:fp32|fp16|int8
|
||||
--precision:int8
|
||||
--det_model_dir:
|
||||
--image_dir:./inference/ch_det_data_50/all-sum-510/
|
||||
null:null
|
||||
|
|
|
@ -57,7 +57,7 @@ function func_cpp_inference(){
|
|||
for threads in ${cpp_cpu_threads_list[*]}; do
|
||||
for batch_size in ${cpp_batch_size_list[*]}; do
|
||||
precision="fp32"
|
||||
if [ ${_flag_quant} = "True" ]; then
|
||||
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
|
||||
precison="int8"
|
||||
fi
|
||||
_save_log_path="${_log_path}/cpp_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
|
|
|
@ -5,11 +5,7 @@ FILENAME=$1
|
|||
# MODE be one of ['lite_train_infer' 'whole_infer' 'whole_train_infer', 'infer', 'klquant_infer']
|
||||
MODE=$2
|
||||
|
||||
if [ ${MODE} = "klquant_infer" ]; then
|
||||
dataline=$(awk 'NR==82, NR==98{print}' $FILENAME)
|
||||
else
|
||||
dataline=$(awk 'NR==1, NR==51{print}' $FILENAME)
|
||||
fi
|
||||
dataline=$(awk 'NR==1, NR==51{print}' $FILENAME)
|
||||
|
||||
# parser params
|
||||
IFS=$'\n'
|
||||
|
@ -93,6 +89,8 @@ infer_value1=$(func_parser_value "${lines[50]}")
|
|||
|
||||
# parser klquant_infer
|
||||
if [ ${MODE} = "klquant_infer" ]; then
|
||||
dataline=$(awk 'NR==82, NR==98{print}' $FILENAME)
|
||||
lines=(${dataline})
|
||||
# parser inference model
|
||||
infer_model_dir_list=$(func_parser_value "${lines[1]}")
|
||||
infer_export_list=$(func_parser_value "${lines[2]}")
|
||||
|
@ -144,7 +142,7 @@ function func_inference(){
|
|||
for threads in ${cpu_threads_list[*]}; do
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
precison="fp32"
|
||||
if [ ${_flag_quant} = "True" ]; then
|
||||
if [ ${use_mkldnn} = "False" ] && [ ${_flag_quant} = "True" ]; then
|
||||
precision="int8"
|
||||
fi
|
||||
_save_log_path="${_log_path}/python_infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
|
@ -228,6 +226,9 @@ if [ ${MODE} = "infer" ] || [ ${MODE} = "klquant_infer" ]; then
|
|||
fi
|
||||
#run inference
|
||||
is_quant=${infer_quant_flag[Count]}
|
||||
if [ ${MODE} = "klquant_infer" ]; then
|
||||
is_quant="True"
|
||||
fi
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_dir}" "${LOG_PATH}" "${infer_img_dir}" ${is_quant}
|
||||
Count=$(($Count + 1))
|
||||
done
|
||||
|
|
Loading…
Reference in New Issue