Merge branch 'PaddlePaddle:dygraph' into dygraph
This commit is contained in:
commit
1757d8347a
|
@ -101,7 +101,7 @@ def main():
|
|||
quanter = QAT(config=quant_config)
|
||||
quanter.quantize(model)
|
||||
|
||||
init_model(config, model, logger)
|
||||
init_model(config, model)
|
||||
model.eval()
|
||||
|
||||
# build metric
|
||||
|
|
|
@ -17,7 +17,7 @@ distill_train:null
|
|||
eval:tools/eval.py -c configs/det/det_mv3_db.yml -o
|
||||
|
||||
Global.save_inference_dir:./output/
|
||||
Global.checkpoints:
|
||||
Global.pretrained_model:
|
||||
norm_export:tools/export_model.py -c configs/det/det_mv3_db.yml -o
|
||||
quant_export:deploy/slim/quantization/export_model.py -c configs/det/det_mv3_db.yml -o
|
||||
fpgm_export:deploy/slim/prune/export_prune_model.py
|
||||
|
|
|
@ -101,7 +101,7 @@ function func_inference(){
|
|||
for use_mkldnn in ${use_mkldnn_list[*]}; do
|
||||
for threads in ${cpu_threads_list[*]}; do
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}"
|
||||
_save_log_path="${_log_path}/infer_cpu_usemkldnn_${use_mkldnn}_threads_${threads}_batchsize_${batch_size}.log"
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_mkldnn_key}=${use_mkldnn} ${cpu_threads_key}=${threads} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
|
||||
eval $command
|
||||
status_check $? "${command}" "${status_log}"
|
||||
|
@ -115,7 +115,7 @@ function func_inference(){
|
|||
continue
|
||||
fi
|
||||
for batch_size in ${batch_size_list[*]}; do
|
||||
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}"
|
||||
_save_log_path="${_log_path}/infer_gpu_usetrt_${use_trt}_precision_${precision}_batchsize_${batch_size}.log"
|
||||
command="${_python} ${_script} ${use_gpu_key}=${use_gpu} ${use_trt_key}=${use_trt} ${precision_key}=${precision} ${infer_model_key}=${_model_dir} ${batch_size_key}=${batch_size} ${image_dir_key}=${_img_dir} ${save_log_key}=${_save_log_path} --benchmark=True"
|
||||
eval $command
|
||||
status_check $? "${command}" "${status_log}"
|
||||
|
@ -136,6 +136,7 @@ for gpu in ${gpu_list[*]}; do
|
|||
env=""
|
||||
elif [ ${#gpu} -le 1 ];then
|
||||
env="export CUDA_VISIBLE_DEVICES=${gpu}"
|
||||
eval ${env}
|
||||
elif [ ${#gpu} -le 15 ];then
|
||||
IFS=","
|
||||
array=(${gpu})
|
||||
|
@ -215,9 +216,10 @@ for gpu in ${gpu_list[*]}; do
|
|||
status_check $? "${export_cmd}" "${status_log}"
|
||||
|
||||
#run inference
|
||||
echo $env
|
||||
eval $env
|
||||
save_infer_path="${save_log}"
|
||||
func_inference "${python}" "${inference_py}" "${save_infer_path}" "${LOG_PATH}" "${infer_img_dir}"
|
||||
eval "unset CUDA_VISIBLE_DEVICES"
|
||||
done
|
||||
done
|
||||
done
|
||||
|
|
|
@ -19,7 +19,29 @@
|
|||
|
||||
|
||||
### 2.1 训练
|
||||
TBD
|
||||
#### 数据准备
|
||||
训练数据使用公开数据集[PubTabNet](https://arxiv.org/abs/1911.10683),可以从[官网](https://github.com/ibm-aur-nlp/PubTabNet)下载。PubTabNet数据集包含约50万张表格数据的图像,以及图像对应的html格式的注释。
|
||||
|
||||
#### 启动训练
|
||||
*如果您安装的是cpu版本,请将配置文件中的 `use_gpu` 字段修改为false*
|
||||
```shell
|
||||
# 单机单卡训练
|
||||
python3 tools/train.py -c configs/table/table_mv3.yml
|
||||
# 单机多卡训练,通过 --gpus 参数设置使用的GPU ID
|
||||
python3 -m paddle.distributed.launch --gpus '0,1,2,3' tools/train.py -c configs/table/table_mv3.yml
|
||||
```
|
||||
|
||||
上述指令中,通过-c 选择训练使用configs/table/table_mv3.yml配置文件。有关配置文件的详细解释,请参考[链接](./config.md)。
|
||||
|
||||
#### 断点训练
|
||||
|
||||
如果训练程序中断,如果希望加载训练中断的模型从而恢复训练,可以通过指定Global.checkpoints指定要加载的模型路径:
|
||||
```shell
|
||||
python3 tools/train.py -c configs/table/table_mv3.yml -o Global.checkpoints=./your/trained/model
|
||||
```
|
||||
|
||||
**注意**:`Global.checkpoints`的优先级高于`Global.pretrain_weights`的优先级,即同时指定两个参数时,优先加载`Global.checkpoints`指定的模型,如果`Global.checkpoints`指定的模型路径有误,会加载`Global.pretrain_weights`指定的模型。
|
||||
|
||||
|
||||
### 2.2 评估
|
||||
先cd到PaddleOCR/ppstructure目录下
|
||||
|
|
|
@ -164,7 +164,7 @@ def create_predictor(args, mode, logger):
|
|||
config.enable_use_gpu(args.gpu_mem, 0)
|
||||
if args.use_tensorrt:
|
||||
config.enable_tensorrt_engine(
|
||||
precision_mode=inference.PrecisionType.Float32,
|
||||
precision_mode=precision,
|
||||
max_batch_size=args.max_batch_size,
|
||||
min_subgraph_size=args.min_subgraph_size)
|
||||
# skip the minmum trt subgraph
|
||||
|
|
Loading…
Reference in New Issue