mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Serving] Simple serving YOLOv5 and PP-OCRv3 example, add uvicorn to fastdeploy tools (#986)
* ppocrv3 simple serving * add uvicorn to fd tools * update ppdet simple serving readme * yolov5 simple serving * not import simple serving by default * remove config from envs * update comment
This commit is contained in:
1
examples/vision/ocr/PP-OCRv3/python/serving/README.md
Symbolic link
1
examples/vision/ocr/PP-OCRv3/python/serving/README.md
Symbolic link
@@ -0,0 +1 @@
|
||||
README_CN.md
|
44
examples/vision/ocr/PP-OCRv3/python/serving/README_CN.md
Normal file
44
examples/vision/ocr/PP-OCRv3/python/serving/README_CN.md
Normal file
@@ -0,0 +1,44 @@
|
||||
简体中文 | [English](README_EN.md)
|
||||
|
||||
# PP-OCRv3 Python轻量服务化部署示例
|
||||
|
||||
在部署前,需确认以下两个步骤
|
||||
|
||||
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
服务端:
|
||||
```bash
|
||||
# 下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
|
||||
|
||||
# 下载模型和字典文件
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
|
||||
tar xvf ch_PP-OCRv3_det_infer.tar
|
||||
|
||||
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
|
||||
tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
|
||||
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
|
||||
tar xvf ch_PP-OCRv3_rec_infer.tar
|
||||
|
||||
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
|
||||
|
||||
# 启动服务,可修改server.py中的配置项来指定硬件、后端等
|
||||
# 可通过--host、--port指定IP和端口号
|
||||
fastdeploy simple_serving --app server:app
|
||||
```
|
||||
|
||||
客户端:
|
||||
```bash
|
||||
# 下载部署示例代码
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
|
||||
|
||||
# 下载测试图片
|
||||
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
|
||||
|
||||
# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
|
||||
python client.py
|
||||
```
|
43
examples/vision/ocr/PP-OCRv3/python/serving/README_EN.md
Normal file
43
examples/vision/ocr/PP-OCRv3/python/serving/README_EN.md
Normal file
@@ -0,0 +1,43 @@
|
||||
English | [简体中文](README_CN.md)
|
||||
|
||||
# PP-OCRv3 Python Simple Serving Demo
|
||||
|
||||
## Environment
|
||||
|
||||
- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||
|
||||
Server:
|
||||
```bash
|
||||
# Download demo code
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
|
||||
|
||||
# Download models and labels
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
|
||||
tar xvf ch_PP-OCRv3_det_infer.tar
|
||||
|
||||
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
|
||||
tar -xvf ch_ppocr_mobile_v2.0_cls_infer.tar
|
||||
|
||||
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
|
||||
tar xvf ch_PP-OCRv3_rec_infer.tar
|
||||
|
||||
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
|
||||
|
||||
# Launch server, change the configurations in server.py to select hardware, backend, etc.
|
||||
# and use --host, --port to specify IP and port
|
||||
fastdeploy simple_serving --app server:app
|
||||
```
|
||||
|
||||
Client:
|
||||
```bash
|
||||
# Download demo code
|
||||
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||
cd FastDeploy/examples/vision/ocr/PP-OCRv3/python/serving
|
||||
|
||||
# Download test image
|
||||
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/doc/imgs/12.jpg
|
||||
|
||||
# Send request and get inference result (Please adapt the IP and port if necessary)
|
||||
python client.py
|
||||
```
|
24
examples/vision/ocr/PP-OCRv3/python/serving/client.py
Normal file
24
examples/vision/ocr/PP-OCRv3/python/serving/client.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import requests
|
||||
import json
|
||||
import cv2
|
||||
import fastdeploy as fd
|
||||
from fastdeploy.serving.utils import cv2_to_base64
|
||||
|
||||
if __name__ == '__main__':
|
||||
url = "http://127.0.0.1:8000/fd/ppocrv3"
|
||||
headers = {"Content-Type": "application/json"}
|
||||
|
||||
im = cv2.imread("12.jpg")
|
||||
data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
|
||||
|
||||
resp = requests.post(url=url, headers=headers, data=json.dumps(data))
|
||||
if resp.status_code == 200:
|
||||
r_json = json.loads(resp.json()["result"])
|
||||
print(r_json)
|
||||
ocr_result = fd.vision.utils.json_to_ocr(r_json)
|
||||
vis_im = fd.vision.vis_ppocr(im, ocr_result)
|
||||
cv2.imwrite("visualized_result.jpg", vis_im)
|
||||
print("Visualized result save in ./visualized_result.jpg")
|
||||
else:
|
||||
print("Error code:", resp.status_code)
|
||||
print(resp.text)
|
80
examples/vision/ocr/PP-OCRv3/python/serving/server.py
Normal file
80
examples/vision/ocr/PP-OCRv3/python/serving/server.py
Normal file
@@ -0,0 +1,80 @@
|
||||
import fastdeploy as fd
|
||||
from fastdeploy.serving.server import SimpleServer
|
||||
import os
|
||||
import logging
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
# Configurations
|
||||
det_model_dir = 'ch_PP-OCRv3_det_infer'
|
||||
cls_model_dir = 'ch_ppocr_mobile_v2.0_cls_infer'
|
||||
rec_model_dir = 'ch_PP-OCRv3_rec_infer'
|
||||
rec_label_file = 'ppocr_keys_v1.txt'
|
||||
device = 'cpu'
|
||||
# backend: ['paddle', 'trt'], you can also use other backends, but need to modify
|
||||
# the runtime option below
|
||||
backend = 'paddle'
|
||||
|
||||
# Prepare models
|
||||
# Detection model
|
||||
det_model_file = os.path.join(det_model_dir, "inference.pdmodel")
|
||||
det_params_file = os.path.join(det_model_dir, "inference.pdiparams")
|
||||
# Classification model
|
||||
cls_model_file = os.path.join(cls_model_dir, "inference.pdmodel")
|
||||
cls_params_file = os.path.join(cls_model_dir, "inference.pdiparams")
|
||||
# Recognition model
|
||||
rec_model_file = os.path.join(rec_model_dir, "inference.pdmodel")
|
||||
rec_params_file = os.path.join(rec_model_dir, "inference.pdiparams")
|
||||
|
||||
# Setup runtime option to select hardware, backend, etc.
|
||||
option = fd.RuntimeOption()
|
||||
if device.lower() == 'gpu':
|
||||
option.use_gpu()
|
||||
if backend == 'trt':
|
||||
option.use_trt_backend()
|
||||
else:
|
||||
option.use_paddle_infer_backend()
|
||||
|
||||
det_option = option
|
||||
det_option.set_trt_input_shape("x", [1, 3, 64, 64], [1, 3, 640, 640],
|
||||
[1, 3, 960, 960])
|
||||
|
||||
# det_option.set_trt_cache_file("det_trt_cache.trt")
|
||||
print(det_model_file, det_params_file)
|
||||
det_model = fd.vision.ocr.DBDetector(
|
||||
det_model_file, det_params_file, runtime_option=det_option)
|
||||
|
||||
cls_batch_size = 1
|
||||
rec_batch_size = 6
|
||||
|
||||
cls_option = option
|
||||
cls_option.set_trt_input_shape("x", [1, 3, 48, 10],
|
||||
[cls_batch_size, 3, 48, 320],
|
||||
[cls_batch_size, 3, 48, 1024])
|
||||
|
||||
# cls_option.set_trt_cache_file("cls_trt_cache.trt")
|
||||
cls_model = fd.vision.ocr.Classifier(
|
||||
cls_model_file, cls_params_file, runtime_option=cls_option)
|
||||
|
||||
rec_option = option
|
||||
rec_option.set_trt_input_shape("x", [1, 3, 48, 10],
|
||||
[rec_batch_size, 3, 48, 320],
|
||||
[rec_batch_size, 3, 48, 2304])
|
||||
|
||||
# rec_option.set_trt_cache_file("rec_trt_cache.trt")
|
||||
rec_model = fd.vision.ocr.Recognizer(
|
||||
rec_model_file, rec_params_file, rec_label_file, runtime_option=rec_option)
|
||||
|
||||
# Create PPOCRv3 pipeline
|
||||
ppocr_v3 = fd.vision.ocr.PPOCRv3(
|
||||
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
|
||||
|
||||
ppocr_v3.cls_batch_size = cls_batch_size
|
||||
ppocr_v3.rec_batch_size = rec_batch_size
|
||||
|
||||
# Create server, setup REST API
|
||||
app = SimpleServer()
|
||||
app.register(
|
||||
task_name="fd/ppocrv3",
|
||||
model_handler=fd.serving.handler.VisionModelHandler,
|
||||
predictor=ppocr_v3)
|
Reference in New Issue
Block a user