Merge remote-tracking branch 'upstream/develop' into develop
This commit is contained in:
commit
ddb7a72d94
|
@ -62,4 +62,4 @@ public:
|
|||
const std::vector<int> &rec_image_shape = {3, 48, 192});
|
||||
};
|
||||
|
||||
} // namespace PaddleOCR
|
||||
} // namespace PaddleOCR
|
||||
|
|
|
@ -81,7 +81,7 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
|
|||
else if (resize_h / 32 < 1 + 1e-5)
|
||||
resize_h = 32;
|
||||
else
|
||||
resize_h = (resize_h / 32 - 1) * 32;
|
||||
resize_h = resize_h / 32 * 32;
|
||||
|
||||
if (resize_w % 32 == 0)
|
||||
resize_w = resize_w;
|
||||
|
@ -96,13 +96,17 @@ void ResizeImgType0::Run(const cv::Mat &img, cv::Mat &resize_img,
|
|||
ratio_w = float(resize_w) / float(w);
|
||||
}
|
||||
|
||||
void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
|
||||
void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &pad_resize_img,
|
||||
float max_wh_ratio,
|
||||
const std::vector<int> &rec_image_shape) {
|
||||
int imgC, imgH, imgW;
|
||||
imgC = rec_image_shape[0];
|
||||
imgH = rec_image_shape[1];
|
||||
imgW = rec_image_shape[2];
|
||||
|
||||
float wh_ratio = 1.0 * imgW / imgH;
|
||||
wh_ratio = std::max(max_wh_ratio, wh_ratio);
|
||||
|
||||
imgW = int(32 * wh_ratio);
|
||||
|
||||
float ratio = float(img.cols) / float(img.rows);
|
||||
|
@ -112,8 +116,12 @@ void CrnnResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img, float wh_ratio,
|
|||
else
|
||||
resize_w = int(ceilf(imgH * ratio));
|
||||
|
||||
cv::Mat resize_img;
|
||||
cv::resize(img, resize_img, cv::Size(resize_w, imgH), 0.f, 0.f,
|
||||
cv::INTER_LINEAR);
|
||||
cv::copyMakeBorder(resize_img, pad_resize_img, 0, 0, 0,
|
||||
int(imgW - resize_img.cols), cv::BORDER_CONSTANT,
|
||||
{127, 127, 127});
|
||||
}
|
||||
|
||||
void ClsResizeImg::Run(const cv::Mat &img, cv::Mat &resize_img,
|
||||
|
|
|
@ -24,15 +24,7 @@ Paddle Lite是飞桨轻量化推理引擎,为手机、IOT端提供高效推理
|
|||
### 1.2 准备预测库
|
||||
|
||||
预测库有两种获取方式:
|
||||
- 1. 直接下载,预测库下载链接如下:
|
||||
|平台|预测库下载链接|
|
||||
|-|-|
|
||||
|Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.android.armv7.gcc.c++_shared.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz)|
|
||||
|IOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.ios.armv7.with_cv.with_extra.with_log.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.ios.armv8.with_cv.with_extra.with_log.tiny_publish.tar.gz)|
|
||||
|
||||
注:1. 上述预测库为PaddleLite 2.6.3分支编译得到,有关PaddleLite 2.6.3 详细信息可参考[链接](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.6.3)。
|
||||
|
||||
- 2. [推荐]编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下:
|
||||
- 1. [推荐]编译Paddle-Lite得到预测库,Paddle-Lite的编译方式如下:
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
|
||||
cd Paddle-Lite
|
||||
|
@ -45,6 +37,9 @@ git checkout release/v2.7
|
|||
更多编译命令
|
||||
介绍请参考[链接](https://paddle-lite.readthedocs.io/zh/latest/user_guides/Compile/Android.html#id2)。
|
||||
|
||||
- 2. 直接下载预测库,下载[链接](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.7.1)。
|
||||
|
||||
|
||||
直接下载预测库并解压后,可以得到`inference_lite_lib.android.armv8/`文件夹,通过编译Paddle-Lite得到的预测库位于
|
||||
`Paddle-Lite/build.lite.android.armv8.gcc/inference_lite_lib.android.armv8/`文件夹下。
|
||||
预测库的文件目录如下:
|
||||
|
@ -88,11 +83,11 @@ Paddle-Lite 提供了多种策略来自动优化原始的模型,其中包括
|
|||
|-|-|-|-|-|-|-|
|
||||
|V1.1|超轻量中文OCR 移动端模型|8.1M|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_opt.nb)|v2.7|
|
||||
|【slim】V1.1|超轻量中文OCR 移动端模型|3.5M|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_prune_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_quant_opt.nb)|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_quant_opt.nb)|v2.7|
|
||||
|V1.0|轻量级中文OCR 移动端模型|8.6M|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.0_det_opt.nb)|---|[下载地址](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.0_rec_opt.nb)|v2.7|
|
||||
|
||||
|
||||
注意:V1.1 3.0M 轻量模型是使用PaddleSlim优化后的,需要配合Paddle-Lite最新预测库使用。
|
||||
|
||||
如果直接使用上述表格中的模型进行部署,可略过下述步骤,直接阅读 [2.2节](#2.2与手机联调)。
|
||||
如果直接使用上述表格中的模型进行部署没有问题,可略过下述步骤,直接阅读 [2.2节](#2.2与手机联调)。
|
||||
|
||||
如果要部署的模型不在上述表格中,则需要按照如下步骤获得优化后的模型。
|
||||
|
||||
|
@ -184,7 +179,7 @@ wget https://paddleocr.bj.bcebos.com/ch_models/ch_rec_mv3_crnn_infer.tar && tar
|
|||
```
|
||||
git clone https://github.com/PaddlePaddle/PaddleOCR.git
|
||||
cd PaddleOCR/deploy/lite/
|
||||
# 运行prepare.sh,准备预测库文件、测试图像和使用的字典文件,并放置在预测库中的demo/cxx/ocr文件夹下
|
||||
# 按照如下命令运行prepare.sh,将预测库文件、测试图像和使用的字典文件到预测库中的demo/cxx/ocr文件夹下
|
||||
sh prepare.sh /{lite prediction library path}/inference_lite_lib.android.armv8
|
||||
|
||||
# 进入OCR demo的工作目录
|
||||
|
@ -255,6 +250,7 @@ use_direction_classify 0 # 是否使用方向分类器,0表示不使用,1
|
|||
adb shell
|
||||
cd /data/local/tmp/debug
|
||||
export LD_LIBRARY_PATH=${PWD}:$LD_LIBRARY_PATH
|
||||
# ./ocr_db_crnn 检测模型文件 方向分类器模型文件 识别模型文件 测试图像路径 字典文件路径
|
||||
./ocr_db_crnn ch_ppocr_mobile_v1.1_det_prune_opt.nb ch_ppocr_mobile_v1.1_rec_quant_opt.nb ch_ppocr_mobile_cls_quant_opt.nb ./11.jpg ppocr_keys_v1.txt
|
||||
```
|
||||
|
||||
|
|
|
@ -22,15 +22,7 @@ deployment solutions for end-side deployment issues.
|
|||
|
||||
## 3. Prepare prebuild library for android and ios
|
||||
|
||||
### 3.1 Download prebuild library
|
||||
|Platform|Prebuild library Download Link|
|
||||
|-|-|
|
||||
|Android|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.android.armv7.gcc.c++_shared.with_extra.with_cv.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.android.armv8.gcc.c++_shared.with_extra.with_cv.tar.gz)|
|
||||
|IOS|[arm7](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.ios.armv7.with_cv.with_extra.with_log.tiny_publish.tar.gz) / [arm8](https://github.com/PaddlePaddle/Paddle-Lite/releases/download/v2.6.3/inference_lite_lib.ios.armv8.with_cv.with_extra.with_log.tiny_publish.tar.gz)|
|
||||
|
||||
note: The above pre-build inference library is compiled from the PaddleLite `release/v2.7` branch. For more information about PaddleLite 2.6.3, please refer to [link](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.6.3).
|
||||
|
||||
### 3.2 Compile prebuild library (Recommended)
|
||||
### 3.1 Compile prebuild library (Recommended)
|
||||
```
|
||||
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
|
||||
cd Paddle-Lite
|
||||
|
@ -66,6 +58,11 @@ inference_lite_lib.android.armv8/
|
|||
| `-- java
|
||||
```
|
||||
|
||||
### 3.2 Download prebuild library
|
||||
|
||||
PaddleLite also provides a compiled [prediction library](https://github.com/PaddlePaddle/Paddle-Lite/releases/tag/v2.7.1), developers can try on their own.
|
||||
|
||||
|
||||
|
||||
## 4. Inference Model Optimization
|
||||
|
||||
|
@ -80,7 +77,6 @@ You can directly download the optimized model.
|
|||
| - | - | - | - | - | - | - |
|
||||
| V1.1 | extra-lightweight chinese OCR optimized model | 8.1M | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_opt.nb) | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_opt.nb) | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_opt.nb) | develop |
|
||||
| [slim] V1.1 | extra-lightweight chinese OCR optimized model | 3.5M | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_det_prune_opt.nb) | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_cls_quant_opt.nb) | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.1_rec_quant_opt.nb) | develop |
|
||||
| V1.0 | lightweight Chinese OCR optimized model | 8.6M | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.0_det_opt.nb) | - | [Download](https://paddleocr.bj.bcebos.com/20-09-22/mobile/lite/ch_ppocr_mobile_v1.0_rec_opt.nb) | develop |
|
||||
|
||||
If the model to be deployed is not in the above table, you need to follow the steps below to obtain the optimized model.
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ On Total-Text dataset, the text detection result is as follows:
|
|||
|
||||
**Note:** Additional data, like icdar2013, icdar2017, COCO-Text, ArT, was added to the model training of SAST. Download English public dataset in organized format used by PaddleOCR from [Baidu Drive](https://pan.baidu.com/s/12cPnZcVuV1zn5DOd4mqjVw) (download code: 2bpi).
|
||||
|
||||
For the training guide and use of PaddleOCR text detection algorithms, please refer to the document [Text detection model training/evaluation/prediction](./doc/doc_en/detection_en.md)
|
||||
For the training guide and use of PaddleOCR text detection algorithms, please refer to the document [Text detection model training/evaluation/prediction](./detection_en.md)
|
||||
|
||||
<a name="TEXTRECOGNITIONALGORITHM"></a>
|
||||
### 2. Text Recognition Algorithm
|
||||
|
@ -63,4 +63,4 @@ Refer to [DTRB](https://arxiv.org/abs/1904.01906), the training and evaluation r
|
|||
|
||||
The average accuracy of the two-stage training in the original paper is 89.74%, and that of one stage training in paddleocr is 88.33%. Both pre-trained weights can be downloaded [here](https://paddleocr.bj.bcebos.com/SRN/rec_r50fpn_vd_none_srn.tar).
|
||||
|
||||
Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./doc/doc_en/recognition_en.md)
|
||||
Please refer to the document for training guide and use of PaddleOCR text recognition algorithms [Text recognition model training/evaluation/prediction](./recognition_en.md)
|
||||
|
|
|
@ -32,6 +32,7 @@ class MobileNetV3():
|
|||
"""
|
||||
self.scale = params['scale']
|
||||
model_name = params['model_name']
|
||||
self.model_name = model_name
|
||||
self.inplanes = 16
|
||||
if model_name == "large":
|
||||
self.cfg = [
|
||||
|
@ -80,7 +81,7 @@ class MobileNetV3():
|
|||
"supported scale are {} but input scale is {}".format(supported_scale, self.scale)
|
||||
|
||||
self.disable_se = params.get('disable_se', False)
|
||||
|
||||
|
||||
def __call__(self, input):
|
||||
scale = self.scale
|
||||
inplanes = self.inplanes
|
||||
|
@ -102,7 +103,8 @@ class MobileNetV3():
|
|||
inplanes = self.make_divisible(inplanes * scale)
|
||||
outs = []
|
||||
for layer_cfg in cfg:
|
||||
if layer_cfg[5] == 2 and i > 2:
|
||||
start_idx = 2 if self.model_name == 'large' else 0
|
||||
if layer_cfg[5] == 2 and i > start_idx:
|
||||
outs.append(conv)
|
||||
conv = self.residual_unit(
|
||||
input=conv,
|
||||
|
|
|
@ -200,4 +200,4 @@ if __name__ == "__main__":
|
|||
logger.info("The visualized img saved in {}".format(
|
||||
os.path.join(draw_img_save, "det_res_%s" % img_name_pure)))
|
||||
if count > 1:
|
||||
logger.info("Avg Time:", total_time / (count - 1))
|
||||
logger.info("Avg Time: {}".format(total_time / (count - 1)))
|
||||
|
|
Loading…
Reference in New Issue