mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-15 13:10:55 +08:00
Add PaddleSeg simple serving example
This commit is contained in:
@@ -0,0 +1,36 @@
|
|||||||
|
English | [简体中文](README_CN.md)
|
||||||
|
|
||||||
|
# PaddleSegmentation Python Simple Serving Demo
|
||||||
|
|
||||||
|
|
||||||
|
## Environment
|
||||||
|
|
||||||
|
- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
|
||||||
|
Server:
|
||||||
|
```bash
|
||||||
|
# Download demo code
|
||||||
|
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||||
|
cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving
|
||||||
|
|
||||||
|
# Download PP_LiteSeg model
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz
|
||||||
|
tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz
|
||||||
|
|
||||||
|
# Launch server, change the configurations in server.py to select hardware, backend, etc.
|
||||||
|
# and use --host, --port to specify IP and port
|
||||||
|
fastdeploy simple_serving --app server:app
|
||||||
|
```
|
||||||
|
|
||||||
|
Client:
|
||||||
|
```bash
|
||||||
|
# Download demo code
|
||||||
|
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||||
|
cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving
|
||||||
|
|
||||||
|
# Download test image
|
||||||
|
wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
|
||||||
|
|
||||||
|
# Send request and get inference result (Please adapt the IP and port if necessary)
|
||||||
|
python client.py
|
||||||
|
```
|
@@ -0,0 +1,36 @@
|
|||||||
|
简体中文 | [English](README.md)
|
||||||
|
|
||||||
|
# PaddleSegmentation Python轻量服务化部署示例
|
||||||
|
|
||||||
|
在部署前,需确认以下两个步骤
|
||||||
|
|
||||||
|
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
|
||||||
|
|
||||||
|
服务端:
|
||||||
|
```bash
|
||||||
|
# 下载部署示例代码
|
||||||
|
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||||
|
cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving
|
||||||
|
|
||||||
|
# 下载PP_LiteSeg模型文件
|
||||||
|
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz
|
||||||
|
tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz
|
||||||
|
|
||||||
|
# 启动服务,可修改server.py中的配置项来指定硬件、后端等
|
||||||
|
# 可通过--host、--port指定IP和端口号
|
||||||
|
fastdeploy simple_serving --app server:app
|
||||||
|
```
|
||||||
|
|
||||||
|
客户端:
|
||||||
|
```bash
|
||||||
|
# 下载部署示例代码
|
||||||
|
git clone https://github.com/PaddlePaddle/FastDeploy.git
|
||||||
|
cd FastDeploy/examples/vision/detection/paddledetection/python/serving
|
||||||
|
|
||||||
|
# 下载测试图片
|
||||||
|
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
|
||||||
|
|
||||||
|
# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号)
|
||||||
|
python client.py
|
||||||
|
```
|
@@ -0,0 +1,23 @@
|
|||||||
|
import requests
|
||||||
|
import json
|
||||||
|
import cv2
|
||||||
|
import fastdeploy as fd
|
||||||
|
from fastdeploy.serving.utils import cv2_to_base64
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
url = "http://127.0.0.1:8000/fd/ppliteseg"
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
|
||||||
|
im = cv2.imread("cityscapes_demo.png")
|
||||||
|
data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}}
|
||||||
|
|
||||||
|
resp = requests.post(url=url, headers=headers, data=json.dumps(data))
|
||||||
|
if resp.status_code == 200:
|
||||||
|
r_json = json.loads(resp.json()["result"])
|
||||||
|
result = fd.vision.utils.json_to_segmentation(r_json)
|
||||||
|
vis_im = fd.vision.vis_segmentation(im, result, weight=0.5)
|
||||||
|
cv2.imwrite("visualized_result.jpg", vis_im)
|
||||||
|
print("Visualized result save in ./visualized_result.jpg")
|
||||||
|
else:
|
||||||
|
print("Error code:", resp.status_code)
|
||||||
|
print(resp.text)
|
@@ -0,0 +1,38 @@
|
|||||||
|
import fastdeploy as fd
|
||||||
|
from fastdeploy.serving.server import SimpleServer
|
||||||
|
import os
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logging.getLogger().setLevel(logging.INFO)
|
||||||
|
|
||||||
|
# Configurations
|
||||||
|
model_dir = 'PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer'
|
||||||
|
device = 'cpu'
|
||||||
|
use_trt = False
|
||||||
|
|
||||||
|
# Prepare model
|
||||||
|
model_file = os.path.join(model_dir, "model.pdmodel")
|
||||||
|
params_file = os.path.join(model_dir, "model.pdiparams")
|
||||||
|
config_file = os.path.join(model_dir, "deploy.yaml")
|
||||||
|
|
||||||
|
# Setup runtime option to select hardware, backend, etc.
|
||||||
|
option = fd.RuntimeOption()
|
||||||
|
if device.lower() == 'gpu':
|
||||||
|
option.use_gpu()
|
||||||
|
if use_trt:
|
||||||
|
option.use_trt_backend()
|
||||||
|
option.set_trt_cache_file('pp_lite_seg.trt')
|
||||||
|
|
||||||
|
# Create model instance
|
||||||
|
model_instance = fd.vision.segmentation.PaddleSegModel(
|
||||||
|
model_file=model_file,
|
||||||
|
params_file=params_file,
|
||||||
|
config_file=config_file,
|
||||||
|
runtime_option=option)
|
||||||
|
|
||||||
|
# Create server, setup REST API
|
||||||
|
app = SimpleServer()
|
||||||
|
app.register(
|
||||||
|
task_name="fd/ppliteseg",
|
||||||
|
model_handler=fd.serving.handler.VisionModelHandler,
|
||||||
|
predictor=model_instance)
|
Reference in New Issue
Block a user