mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 09:31:35 +08:00
[Serving] PaddleSeg add triton serving && simple serving example (#1171)
* Update keypointdetection result docs * Update im.copy() to im in examples * Update new Api, fastdeploy::vision::Visualize to fastdeploy::vision * Update SwapBackgroundSegmentation && SwapBackgroundMatting to SwapBackground * Update README_CN.md * Update README_CN.md * Update preprocessor.h * PaddleSeg supports triton serving * Add PaddleSeg simple serving example * Add PaddleSeg triton serving client code * Update triton serving runtime config.pbtxt * Update paddleseg grpc client * Add paddle serving README
This commit is contained in:
@@ -0,0 +1,38 @@
|
||||
import fastdeploy as fd
|
||||
from fastdeploy.serving.server import SimpleServer
|
||||
import os
|
||||
import logging
|
||||
|
||||
logging.getLogger().setLevel(logging.INFO)
|
||||
|
||||
# Configurations
|
||||
model_dir = 'PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer'
|
||||
device = 'cpu'
|
||||
use_trt = False
|
||||
|
||||
# Prepare model
|
||||
model_file = os.path.join(model_dir, "model.pdmodel")
|
||||
params_file = os.path.join(model_dir, "model.pdiparams")
|
||||
config_file = os.path.join(model_dir, "deploy.yaml")
|
||||
|
||||
# Setup runtime option to select hardware, backend, etc.
|
||||
option = fd.RuntimeOption()
|
||||
if device.lower() == 'gpu':
|
||||
option.use_gpu()
|
||||
if use_trt:
|
||||
option.use_trt_backend()
|
||||
option.set_trt_cache_file('pp_lite_seg.trt')
|
||||
|
||||
# Create model instance
|
||||
model_instance = fd.vision.segmentation.PaddleSegModel(
|
||||
model_file=model_file,
|
||||
params_file=params_file,
|
||||
config_file=config_file,
|
||||
runtime_option=option)
|
||||
|
||||
# Create server, setup REST API
|
||||
app = SimpleServer()
|
||||
app.register(
|
||||
task_name="fd/ppliteseg",
|
||||
model_handler=fd.serving.handler.VisionModelHandler,
|
||||
predictor=model_instance)
|
Reference in New Issue
Block a user