From 5e555a80473fbca5f033171aff073db75d4b523c Mon Sep 17 00:00:00 2001 From: LDOUBLEV Date: Thu, 18 Feb 2021 18:53:28 +0800 Subject: [PATCH] cherry-pick fix doc and fix dilation --- deploy/cpp_infer/tools/config.txt | 2 +- doc/doc_ch/installation.md | 2 +- doc/doc_en/inference_en.md | 3 ++- doc/doc_en/installation_en.md | 2 +- tools/infer/predict_det.py | 2 +- tools/infer/utility.py | 4 ++++ 6 files changed, 10 insertions(+), 5 deletions(-) diff --git a/deploy/cpp_infer/tools/config.txt b/deploy/cpp_infer/tools/config.txt index e185377e..24e4ef0d 100644 --- a/deploy/cpp_infer/tools/config.txt +++ b/deploy/cpp_infer/tools/config.txt @@ -9,7 +9,7 @@ use_mkldnn 0 max_side_len 960 det_db_thresh 0.3 det_db_box_thresh 0.5 -det_db_unclip_ratio 2.0 +det_db_unclip_ratio 1.6 det_model_dir ./inference/ch_ppocr_mobile_v2.0_det_infer/ # cls config diff --git a/doc/doc_ch/installation.md b/doc/doc_ch/installation.md index fce151eb..7e7523b9 100644 --- a/doc/doc_ch/installation.md +++ b/doc/doc_ch/installation.md @@ -30,7 +30,7 @@ sudo nvidia-docker run --name ppocr -v $PWD:/paddle --shm-size=64G --network=hos sudo docker container exec -it ppocr /bin/bash ``` -**2. 安装PaddlePaddle Fluid v2.0** +**2. 安装PaddlePaddle 2.0** ``` pip3 install --upgrade pip diff --git a/doc/doc_en/inference_en.md b/doc/doc_en/inference_en.md index 6b745619..aa3e0536 100755 --- a/doc/doc_en/inference_en.md +++ b/doc/doc_en/inference_en.md @@ -5,7 +5,8 @@ The inference model (the model saved by `paddle.jit.save`) is generally a solidi The model saved during the training process is the checkpoints model, which saves the parameters of the model and is mostly used to resume training. -Compared with the checkpoints model, the inference model will additionally save the structural information of the model. It has superior performance in predicting in deployment and accelerating inferencing, is flexible and convenient, and is suitable for integration with actual systems. For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/master/docs/zh_CN/extension/paddle_inference.md). +Compared with the checkpoints model, the inference model will additionally save the structural information of the model. Therefore, it is easier to deploy because the model structure and model parameters are already solidified in the inference model file, and is suitable for integration with actual systems. +For more details, please refer to the document [Classification Framework](https://github.com/PaddlePaddle/PaddleClas/blob/release%2F2.0/docs/zh_CN/extension/paddle_mobile_inference.md). Next, we first introduce how to convert a trained model into an inference model, and then we will introduce text detection, text recognition, angle class, and the concatenation of them based on inference model. diff --git a/doc/doc_en/installation_en.md b/doc/doc_en/installation_en.md index 35c1881d..dec384b2 100644 --- a/doc/doc_en/installation_en.md +++ b/doc/doc_en/installation_en.md @@ -33,7 +33,7 @@ You can also visit [DockerHub](https://hub.docker.com/r/paddlepaddle/paddle/tags sudo docker container exec -it ppocr /bin/bash ``` -**2. Install PaddlePaddle Fluid v2.0** +**2. Install PaddlePaddle 2.0** ``` pip3 install --upgrade pip diff --git a/tools/infer/predict_det.py b/tools/infer/predict_det.py index 077692af..26febf1c 100755 --- a/tools/infer/predict_det.py +++ b/tools/infer/predict_det.py @@ -64,7 +64,7 @@ class TextDetector(object): postprocess_params["box_thresh"] = args.det_db_box_thresh postprocess_params["max_candidates"] = 1000 postprocess_params["unclip_ratio"] = args.det_db_unclip_ratio - postprocess_params["use_dilation"] = True + postprocess_params["use_dilation"] = args.use_dialtion elif self.det_algorithm == "EAST": postprocess_params['name'] = 'EASTPostProcess' postprocess_params["score_thresh"] = args.det_east_score_thresh diff --git a/tools/infer/utility.py b/tools/infer/utility.py index 4171a29b..70e855c7 100755 --- a/tools/infer/utility.py +++ b/tools/infer/utility.py @@ -47,6 +47,8 @@ def parse_args(): parser.add_argument("--det_db_box_thresh", type=float, default=0.5) parser.add_argument("--det_db_unclip_ratio", type=float, default=1.6) parser.add_argument("--max_batch_size", type=int, default=10) + parser.add_argument("--use_dialtion", type=bool, default=False) + # EAST parmas parser.add_argument("--det_east_score_thresh", type=float, default=0.8) parser.add_argument("--det_east_cover_thresh", type=float, default=0.1) @@ -123,6 +125,8 @@ def create_predictor(args, mode, logger): # cache 10 different shapes for mkldnn to avoid memory leak config.set_mkldnn_cache_capacity(10) config.enable_mkldnn() + # TODO LDOUBLEV: fix mkldnn bug when bach_size > 1 + #config.set_mkldnn_op({'conv2d', 'depthwise_conv2d', 'pool2d', 'batch_norm'}) args.rec_batch_num = 1 # config.enable_memory_optim()