add convert to serving model

This commit is contained in:
wangjiawei04 2020-08-19 14:29:13 +08:00
parent d7cd666a3a
commit 0d8fe75833
4 changed files with 34 additions and 7 deletions

View File

@ -23,7 +23,7 @@ from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes from paddle_serving_app.reader import DBPostProcess, FilterBoxes
if sys.argv[1] == 'gpu': if sys.argv[1] == 'gpu':
from paddle_serving_server_gpu.web_service import WebService from paddle_serving_server_gpu.web_service import WebService
elif sys.argv[1] == 'cpu' elif sys.argv[1] == 'cpu':
from paddle_serving_server.web_service import WebService from paddle_serving_server.web_service import WebService
import time import time
import re import re
@ -67,11 +67,13 @@ class OCRService(WebService):
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_det_model") ocr_service.load_model_config("ocr_det_model")
ocr_service.init_det()
if sys.argv[1] == 'gpu': if sys.argv[1] == 'gpu':
ocr_service.set_gpus("0") ocr_service.set_gpus("0")
ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0)
ocr_service.run_debugger_service(gpu=True)
elif sys.argv[1] == 'cpu': elif sys.argv[1] == 'cpu':
ocr_service.prepare_server(workdir="workdir", port=9292) ocr_service.prepare_server(workdir="workdir", port=9292)
ocr_service.run_debugger_service()
ocr_service.init_det() ocr_service.init_det()
ocr_service.run_debugger_service()
ocr_service.run_web_service() ocr_service.run_web_service()

View File

@ -104,10 +104,11 @@ class OCRService(WebService):
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_rec_model") ocr_service.load_model_config("ocr_rec_model")
ocr_service.prepare_server(workdir="workdir", port=9292)
ocr_service.init_det_debugger(det_model_config="ocr_det_model") ocr_service.init_det_debugger(det_model_config="ocr_det_model")
if sys.argv[1] == 'gpu': if sys.argv[1] == 'gpu':
ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0)
ocr_service.run_debugger_service(gpu=True) ocr_service.run_debugger_service(gpu=True)
elif sys.argv[1] == 'cpu': elif sys.argv[1] == 'cpu':
ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
ocr_service.run_debugger_service() ocr_service.run_debugger_service()
ocr_service.run_web_service() ocr_service.run_web_service()

View File

@ -55,6 +55,23 @@ tar -xzvf ocr_det.tar.gz
``` ```
执行上述命令会下载`db_crnn_mobile`的模型,如果想要下载规模更大的`db_crnn_server`模型,可以在下载预测模型并解压之后。参考[如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型](https://github.com/PaddlePaddle/Serving/blob/develop/doc/INFERENCE_TO_SERVING_CN.md)。 执行上述命令会下载`db_crnn_mobile`的模型,如果想要下载规模更大的`db_crnn_server`模型,可以在下载预测模型并解压之后。参考[如何从Paddle保存的预测模型转为Paddle Serving格式可部署的模型](https://github.com/PaddlePaddle/Serving/blob/develop/doc/INFERENCE_TO_SERVING_CN.md)。
我们以`ch_rec_r34_vd_crnn`模型作为例子,下载链接在:
```
wget --no-check-certificate https://paddleocr.bj.bcebos.com/ch_models/ch_rec_r34_vd_crnn_infer.tar
tar xf ch_rec_r34_vd_crnn_infer.tar
```
因此我们按照Serving模型转换教程运行下列python文件。
```
from paddle_serving_client.io import inference_model_to_serving
inference_model_dir = "ch_rec_r34_vd_crnn"
serving_client_dir = "serving_client_dir"
serving_server_dir = "serving_server_dir"
feed_var_names, fetch_var_names = inference_model_to_serving(
inference_model_dir, serving_client_dir, serving_server_dir, model_filename="model", params_filename="params")
```
最终会在`serving_client_dir`和`serving_server_dir`生成客户端和服务端的模型配置。
### 3. 启动服务 ### 3. 启动服务
启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表: 启动服务可以根据实际需求选择启动`标准版`或者`快速版`,两种方式的对比如下表:

View File

@ -22,7 +22,10 @@ from paddle_serving_client import Client
from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor from paddle_serving_app.reader import Sequential, URL2Image, ResizeByFactor
from paddle_serving_app.reader import Div, Normalize, Transpose from paddle_serving_app.reader import Div, Normalize, Transpose
from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes from paddle_serving_app.reader import DBPostProcess, FilterBoxes, GetRotateCropImage, SortedBoxes
from paddle_serving_server_gpu.web_service import WebService if sys.argv[1] == 'gpu':
from paddle_serving_server_gpu.web_service import WebService
elif sys.argv[1] == 'cpu':
from paddle_serving_server.web_service import WebService
import time import time
import re import re
import base64 import base64
@ -65,8 +68,12 @@ class OCRService(WebService):
ocr_service = OCRService(name="ocr") ocr_service = OCRService(name="ocr")
ocr_service.load_model_config("ocr_rec_model") ocr_service.load_model_config("ocr_rec_model")
ocr_service.set_gpus("0")
ocr_service.init_rec() ocr_service.init_rec()
ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0) if sys.argv[1] == 'gpu':
ocr_service.run_debugger_service() ocr_service.set_gpus("0")
ocr_service.prepare_server(workdir="workdir", port=9292, device="gpu", gpuid=0)
ocr_service.run_debugger_service(gpu=True)
elif sys.argv[1] == 'cpu':
ocr_service.prepare_server(workdir="workdir", port=9292, device="cpu")
ocr_service.run_debugger_service()
ocr_service.run_web_service() ocr_service.run_web_service()