[Backend] Add OCR、Seg、 KeypointDetection、Matting、 ernie-3.0 and adaface models for XPU Deploy (#960)

* [FlyCV] Bump up FlyCV -> official release 1.0.0

* add seg models for XPU

* add ocr model for XPU

* add matting

* add matting python

* fix infer.cc

* add keypointdetection support for XPU

* Add adaface support for XPU

* add ernie-3.0

* fix doc

Co-authored-by: DefTruth <qiustudent_r@163.com>
Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
yeliang2258
2022-12-26 15:02:58 +08:00
committed by GitHub
parent 3b29e86add
commit 7b15f72516
39 changed files with 304 additions and 25 deletions

2
examples/text/ernie-3.0/cpp/README.md Normal file → Executable file
View File

@@ -35,6 +35,8 @@ tar xvfz ernie-3.0-medium-zh-afqmc.tgz
# GPU Inference # GPU Inference
./seq_cls_infer_demo --device gpu --model_dir ernie-3.0-medium-zh-afqmc ./seq_cls_infer_demo --device gpu --model_dir ernie-3.0-medium-zh-afqmc
# XPU 推理
./seq_cls_infer_demo --device xpu --model_dir ernie-3.0-medium-zh-afqmc
``` ```
The result returned after running is as follows The result returned after running is as follows
```bash ```bash

19
examples/text/ernie-3.0/cpp/seq_cls_infer.cc Normal file → Executable file
View File

@@ -32,7 +32,7 @@ const char sep = '/';
DEFINE_string(model_dir, "", "Directory of the inference model."); DEFINE_string(model_dir, "", "Directory of the inference model.");
DEFINE_string(vocab_path, "", "Path of the vocab file."); DEFINE_string(vocab_path, "", "Path of the vocab file.");
DEFINE_string(device, "cpu", DEFINE_string(device, "cpu",
"Type of inference device, support 'cpu' or 'gpu'."); "Type of inference device, support 'cpu', 'xpu' or 'gpu'.");
DEFINE_string(backend, "onnx_runtime", DEFINE_string(backend, "onnx_runtime",
"The inference runtime backend, support: ['onnx_runtime', " "The inference runtime backend, support: ['onnx_runtime', "
"'paddle', 'openvino', 'tensorrt', 'paddle_tensorrt']"); "'paddle', 'openvino', 'tensorrt', 'paddle_tensorrt']");
@@ -55,7 +55,16 @@ void PrintUsage() {
} }
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) { bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
if (FLAGS_device == "gpu") { std::string model_path = FLAGS_model_dir + sep + "infer.pdmodel";
std::string param_path = FLAGS_model_dir + sep + "infer.pdiparams";
fastdeploy::FDINFO << "model_path = " << model_path
<< ", param_path = " << param_path << std::endl;
option->SetModelPath(model_path, param_path);
if (FLAGS_device == "xpu") {
option->UseXpu();
return true;
} else if (FLAGS_device == "gpu") {
option->UseGpu(); option->UseGpu();
} else if (FLAGS_device == "cpu") { } else if (FLAGS_device == "cpu") {
option->UseCpu(); option->UseCpu();
@@ -97,11 +106,7 @@ bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
<< FLAGS_backend << "'" << std::endl; << FLAGS_backend << "'" << std::endl;
return false; return false;
} }
std::string model_path = FLAGS_model_dir + sep + "infer.pdmodel";
std::string param_path = FLAGS_model_dir + sep + "infer.pdiparams";
fastdeploy::FDINFO << "model_path = " << model_path
<< ", param_path = " << param_path << std::endl;
option->SetModelPath(model_path, param_path);
return true; return true;
} }

5
examples/text/ernie-3.0/python/README.md Normal file → Executable file
View File

@@ -22,7 +22,7 @@ pip install -r requirements.txt
### A Quick Start ### A Quick Start
The following example shows how to employ FastDeploy library to complete Python predictive deployment of ERNIE 3.0 Medium model on [AFQMC Dataset](https://bj.bcebos.com/paddlenlp/datasets/afqmc_public.zip)of CLUE Benchmark for text classification tasks. The following example shows how to employ FastDeploy library to complete Python predictive deployment of ERNIE 3.0 Medium model on [AFQMC Dataset](https://bj.bcebos.com/paddlenlp/datasets/afqmc_public.zip)of CLUE Benchmark for text classification tasks.
```bash ```bash
@@ -40,6 +40,9 @@ python seq_cls_infer.py --device cpu --model_dir ernie-3.0-medium-zh-afqmc
# GPU Inference # GPU Inference
python seq_cls_infer.py --device gpu --model_dir ernie-3.0-medium-zh-afqmc python seq_cls_infer.py --device gpu --model_dir ernie-3.0-medium-zh-afqmc
# XPU Inference
python seq_cls_infer.py --device xpu --model_dir ernie-3.0-medium-zh-afqmc
``` ```
The result returned after running is as follows: The result returned after running is as follows:

8
examples/text/ernie-3.0/python/seq_cls_infer.py Normal file → Executable file
View File

@@ -35,8 +35,8 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
choices=['gpu', 'cpu'], choices=['gpu', 'cpu', 'xpu'],
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--backend", "--backend",
type=str, type=str,
@@ -94,6 +94,10 @@ class ErnieForSequenceClassificationPredictor(object):
model_path = os.path.join(args.model_dir, "infer.pdmodel") model_path = os.path.join(args.model_dir, "infer.pdmodel")
params_path = os.path.join(args.model_dir, "infer.pdiparams") params_path = os.path.join(args.model_dir, "infer.pdiparams")
option.set_model_path(model_path, params_path) option.set_model_path(model_path, params_path)
if args.device == 'xpu':
option.use_xpu()
option.use_paddle_lite_backend()
return fd.Runtime(option)
if args.device == 'cpu': if args.device == 'cpu':
option.use_cpu() option.use_cpu()
else: else:

7
examples/vision/faceid/adaface/cpp/README.md Normal file → Executable file
View File

@@ -51,6 +51,13 @@ tar zxvf mobilefacenet_adaface.tgz -C ./
test_lite_focal_arcface_2.JPG \ test_lite_focal_arcface_2.JPG \
2 2
# XPU推理
./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
test_lite_focal_arcface_0.JPG \
test_lite_focal_arcface_1.JPG \
test_lite_focal_arcface_2.JPG \
3
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

41
examples/vision/faceid/adaface/cpp/infer.cc Normal file → Executable file
View File

@@ -47,6 +47,43 @@ void CpuInfer(const std::string &model_file, const std::string &params_file,
<< ", Cosine 02:" << cosine02 << std::endl; << ", Cosine 02:" << cosine02 << std::endl;
} }
void XpuInfer(const std::string &model_file, const std::string &params_file,
const std::vector<std::string> &image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto model = fastdeploy::vision::faceid::AdaFace(model_file, params_file);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
cv::Mat face0 = cv::imread(image_file[0]);
cv::Mat face1 = cv::imread(image_file[1]);
cv::Mat face2 = cv::imread(image_file[2]);
fastdeploy::vision::FaceRecognitionResult res0;
fastdeploy::vision::FaceRecognitionResult res1;
fastdeploy::vision::FaceRecognitionResult res2;
if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) ||
(!model.Predict(&face2, &res2))) {
std::cerr << "Prediction Failed." << std::endl;
}
std::cout << "Prediction Done!" << std::endl;
std::cout << "--- [Face 0]:" << res0.Str();
std::cout << "--- [Face 1]:" << res1.Str();
std::cout << "--- [Face 2]:" << res2.Str();
float cosine01 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res1.embedding, model.l2_normalize);
float cosine02 = fastdeploy::vision::utils::CosineSimilarity(
res0.embedding, res2.embedding, model.l2_normalize);
std::cout << "Detect Done! Cosine 01: " << cosine01
<< ", Cosine 02:" << cosine02 << std::endl;
}
void GpuInfer(const std::string &model_file, const std::string &params_file, void GpuInfer(const std::string &model_file, const std::string &params_file,
const std::vector<std::string> &image_file) { const std::vector<std::string> &image_file) {
auto option = fastdeploy::RuntimeOption(); auto option = fastdeploy::RuntimeOption();
@@ -134,7 +171,7 @@ int main(int argc, char *argv[]) {
"test_lite_focal_AdaFace_2.JPG 0" "test_lite_focal_AdaFace_2.JPG 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -147,6 +184,8 @@ int main(int argc, char *argv[]) {
GpuInfer(argv[1], argv[2], image_files); GpuInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 2) { } else if (std::atoi(argv[6]) == 2) {
TrtInfer(argv[1], argv[2], image_files); TrtInfer(argv[1], argv[2], image_files);
} else if (std::atoi(argv[6]) == 3) {
CpuInfer(argv[1], argv[2], image_files);
} }
return 0; return 0;
} }

8
examples/vision/faceid/adaface/python/README.md Normal file → Executable file
View File

@@ -45,6 +45,14 @@ python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--face_negative test_lite_focal_arcface_2.JPG \ --face_negative test_lite_focal_arcface_2.JPG \
--device gpu \ --device gpu \
--use_trt True --use_trt True
# XPU推理
python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \
--params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \
--face test_lite_focal_arcface_0.JPG \
--face_positive test_lite_focal_arcface_1.JPG \
--face_negative test_lite_focal_arcface_2.JPG \
--device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

5
examples/vision/faceid/adaface/python/infer.py Normal file → Executable file
View File

@@ -39,7 +39,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'cpu' , 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--use_trt", "--use_trt",
type=ast.literal_eval, type=ast.literal_eval,
@@ -54,6 +54,9 @@ def build_option(args):
if args.device.lower() == "gpu": if args.device.lower() == "gpu":
option.use_gpu() option.use_gpu()
if args.device.lower() == "xpu":
option.use_xpu()
if args.use_trt: if args.use_trt:
option.use_trt_backend() option.use_trt_backend()
option.set_trt_input_shape("data", [1, 3, 112, 112]) option.set_trt_input_shape("data", [1, 3, 112, 112])

View File

@@ -33,6 +33,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/000000018491.jpg
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 1 ./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 2 ./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 2
# XPU推理
./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 3
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

View File

@@ -66,6 +66,55 @@ void CpuInfer(const std::string& det_model_dir,
<< std::endl; << std::endl;
} }
void XpuInfer(const std::string& det_model_dir,
const std::string& tinypose_model_dir,
const std::string& image_file) {
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto det_model_file = det_model_dir + sep + "model.pdmodel";
auto det_params_file = det_model_dir + sep + "model.pdiparams";
auto det_config_file = det_model_dir + sep + "infer_cfg.yml";
auto det_model = fastdeploy::vision::detection::PicoDet(
det_model_file, det_params_file, det_config_file, option);
if (!det_model.Initialized()) {
std::cerr << "Detection Model Failed to initialize." << std::endl;
return;
}
auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel";
auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams";
auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml";
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
tinypose_model_file, tinypose_params_file, tinypose_config_file, option);
if (!tinypose_model.Initialized()) {
std::cerr << "TinyPose Model Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::KeyPointDetectionResult res;
auto pipeline =
fastdeploy::pipeline::PPTinyPose(
&det_model, &tinypose_model);
pipeline.detection_model_score_threshold = 0.5;
if (!pipeline.Predict(&im, &res)) {
std::cerr << "TinyPose Prediction Failed." << std::endl;
return;
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
// 输出预测框结果
std::cout << res.Str() << std::endl;
// 可视化预测结果
auto vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.2);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "TinyPose visualized result saved in ./vis_result.jpg"
<< std::endl;
}
void GpuInfer(const std::string& det_model_dir, void GpuInfer(const std::string& det_model_dir,
const std::string& tinypose_model_dir, const std::string& tinypose_model_dir,
const std::string& image_file) { const std::string& image_file) {
@@ -180,7 +229,7 @@ int main(int argc, char* argv[]) {
"./test.jpeg 0" "./test.jpeg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -191,6 +240,8 @@ int main(int argc, char* argv[]) {
GpuInfer(argv[1], argv[2], argv[3]); GpuInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 2) { } else if (std::atoi(argv[4]) == 2) {
TrtInfer(argv[1], argv[2], argv[3]); TrtInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 3) {
XpuInfer(argv[1], argv[2], argv[3]);
} }
return 0; return 0;
} }

View File

@@ -25,6 +25,8 @@ python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infe
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待 # GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu --use_trt True python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu --use_trt True
# XPU推理
python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

View File

@@ -19,7 +19,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="type of inference device, support 'cpu' or 'gpu'.") help="type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--use_trt", "--use_trt",
type=ast.literal_eval, type=ast.literal_eval,
@@ -47,6 +47,9 @@ def build_tinypose_option(args):
if args.device.lower() == "gpu": if args.device.lower() == "gpu":
option.use_gpu() option.use_gpu()
if args.device.lower() == "xpu":
option.use_xpu()
if args.use_trt: if args.use_trt:
option.use_trt_backend() option.use_trt_backend()
option.set_trt_input_shape("image", [1, 3, 256, 192]) option.set_trt_input_shape("image", [1, 3, 256, 192])

View File

@@ -32,6 +32,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 1 ./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 2 ./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 2
# XPU推理
./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 3
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

View File

@@ -53,6 +53,40 @@ void CpuInfer(const std::string& tinypose_model_dir,
<< std::endl; << std::endl;
} }
void XpuInfer(const std::string& tinypose_model_dir,
const std::string& image_file) {
auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel";
auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams";
auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml";
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose(
tinypose_model_file, tinypose_params_file, tinypose_config_file, option);
if (!tinypose_model.Initialized()) {
std::cerr << "TinyPose Model Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::KeyPointDetectionResult res;
if (!tinypose_model.Predict(&im, &res)) {
std::cerr << "TinyPose Prediction Failed." << std::endl;
return;
} else {
std::cout << "TinyPose Prediction Done!" << std::endl;
}
// 输出预测框结果
std::cout << res.Str() << std::endl;
// 可视化预测结果
auto tinypose_vis_im =
fastdeploy::vision::VisKeypointDetection(im, res, 0.5);
cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im);
std::cout << "TinyPose visualized result saved in ./tinypose_vis_result.jpg"
<< std::endl;
}
void GpuInfer(const std::string& tinypose_model_dir, void GpuInfer(const std::string& tinypose_model_dir,
const std::string& image_file) { const std::string& image_file) {
auto option = fastdeploy::RuntimeOption(); auto option = fastdeploy::RuntimeOption();
@@ -129,7 +163,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0" "e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -140,6 +174,8 @@ int main(int argc, char* argv[]) {
GpuInfer(argv[1], argv[2]); GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) { } else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]); TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
TrtInfer(argv[1], argv[2]);
} }
return 0; return 0;
} }

View File

@@ -25,6 +25,8 @@ python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --imag
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待 # GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu --use_trt True python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu --use_trt True
# XPU推理
python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

View File

@@ -17,7 +17,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="type of inference device, support 'cpu' or 'gpu'.") help="type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--use_trt", "--use_trt",
type=ast.literal_eval, type=ast.literal_eval,
@@ -32,6 +32,9 @@ def build_tinypose_option(args):
if args.device.lower() == "gpu": if args.device.lower() == "gpu":
option.use_gpu() option.use_gpu()
if args.device.lower() == "xpu":
option.use_xpu()
if args.use_trt: if args.use_trt:
option.use_trt_backend() option.use_trt_backend()
option.set_trt_input_shape("image", [1, 3, 256, 192]) option.set_trt_input_shape("image", [1, 3, 256, 192])

2
examples/vision/matting/ppmatting/cpp/README.md Normal file → Executable file
View File

@@ -31,6 +31,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1 ./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2 ./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2
# XPU推理
./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 3
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

35
examples/vision/matting/ppmatting/cpp/infer.cc Normal file → Executable file
View File

@@ -51,6 +51,37 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file,
<< std::endl; << std::endl;
} }
void XpuInfer(const std::string& model_dir, const std::string& image_file,
const std::string& background_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file,
config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
cv::Mat bg = cv::imread(background_file);
fastdeploy::vision::MattingResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
auto vis_im = fastdeploy::vision::VisMatting(im, res);
auto vis_im_with_bg =
fastdeploy::vision::SwapBackground(im, bg, res);
cv::imwrite("visualized_result.jpg", vis_im_with_bg);
cv::imwrite("visualized_result_fg.jpg", vis_im);
std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg "
"and ./visualized_result_fg.jpg"
<< std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file, void GpuInfer(const std::string& model_dir, const std::string& image_file,
const std::string& background_file) { const std::string& background_file) {
auto model_file = model_dir + sep + "model.pdmodel"; auto model_file = model_dir + sep + "model.pdmodel";
@@ -125,7 +156,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./PP-Matting-512 ./test.jpg ./test_bg.jpg 0" "e.g ./infer_model ./PP-Matting-512 ./test.jpg ./test_bg.jpg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend, 3: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -135,6 +166,8 @@ int main(int argc, char* argv[]) {
GpuInfer(argv[1], argv[2], argv[3]); GpuInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 2) { } else if (std::atoi(argv[4]) == 2) {
TrtInfer(argv[1], argv[2], argv[3]); TrtInfer(argv[1], argv[2], argv[3]);
} else if (std::atoi(argv[4]) == 3) {
XpuInfer(argv[1], argv[2], argv[3]);
} }
return 0; return 0;
} }

2
examples/vision/matting/ppmatting/python/README.md Normal file → Executable file
View File

@@ -23,6 +23,8 @@ python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bg
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待 # GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True
# XPU推理
python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

4
examples/vision/matting/ppmatting/python/infer.py Normal file → Executable file
View File

@@ -21,7 +21,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--use_trt", "--use_trt",
type=ast.literal_eval, type=ast.literal_eval,
@@ -40,6 +40,8 @@ def build_option(args):
option.use_trt_backend() option.use_trt_backend()
option.set_trt_input_shape("img", [1, 3, 512, 512]) option.set_trt_input_shape("img", [1, 3, 512, 512])
if args.device.lower() == "xpu":
option.use_xpu()
return option return option

4
examples/vision/ocr/PP-OCRv2/cpp/README.md Normal file → Executable file
View File

@@ -39,6 +39,10 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1 ./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2 ./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# GPU上Paddle-TRT推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3
# XPU推理
./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4
``` ```
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考: 以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:

4
examples/vision/ocr/PP-OCRv2/cpp/infer.cc Normal file → Executable file
View File

@@ -100,7 +100,7 @@ int main(int argc, char* argv[]) {
"./ppocr_keys_v1.txt ./12.jpg 0" "./ppocr_keys_v1.txt ./12.jpg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -120,6 +120,8 @@ int main(int argc, char* argv[]) {
option.UseTrtBackend(); option.UseTrtBackend();
option.EnablePaddleTrtCollectShape(); option.EnablePaddleTrtCollectShape();
option.EnablePaddleToTrt(); option.EnablePaddleToTrt();
} else if (flag == 4) {
option.UseXpu();
} }
std::string det_model_dir = argv[1]; std::string det_model_dir = argv[1];

2
examples/vision/ocr/PP-OCRv2/python/README.md Normal file → Executable file
View File

@@ -34,6 +34,8 @@ python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理 # GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
# XPU推理
python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

6
examples/vision/ocr/PP-OCRv2/python/infer.py Normal file → Executable file
View File

@@ -41,7 +41,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--backend", "--backend",
type=str, type=str,
@@ -68,6 +68,10 @@ def build_option(args):
option.set_cpu_thread_num(args.cpu_thread_num) option.set_cpu_thread_num(args.cpu_thread_num)
if args.device.lower() == "xpu":
option.use_xpu()
return option
if args.backend.lower() == "trt": if args.backend.lower() == "trt":
assert args.device.lower( assert args.device.lower(
) == "gpu", "TensorRT backend require inference on device GPU." ) == "gpu", "TensorRT backend require inference on device GPU."

4
examples/vision/ocr/PP-OCRv3/cpp/README.md Normal file → Executable file
View File

@@ -39,6 +39,10 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1 ./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2 ./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2
# GPU上Paddle-TRT推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3
# XPU推理
./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4
``` ```
以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考: 以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考:

4
examples/vision/ocr/PP-OCRv3/cpp/infer.cc Normal file → Executable file
View File

@@ -101,7 +101,7 @@ int main(int argc, char* argv[]) {
"./ppocr_keys_v1.txt ./12.jpg 0" "./ppocr_keys_v1.txt ./12.jpg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -121,6 +121,8 @@ int main(int argc, char* argv[]) {
option.UseTrtBackend(); option.UseTrtBackend();
option.EnablePaddleTrtCollectShape(); option.EnablePaddleTrtCollectShape();
option.EnablePaddleToTrt(); option.EnablePaddleToTrt();
} else if (flag == 4) {
option.UseXpu();
} }
std::string det_model_dir = argv[1]; std::string det_model_dir = argv[1];

2
examples/vision/ocr/PP-OCRv3/python/README.md Normal file → Executable file
View File

@@ -33,6 +33,8 @@ python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu
# GPU上使用TensorRT推理 # GPU上使用TensorRT推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt
# XPU推理
python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

6
examples/vision/ocr/PP-OCRv3/python/infer.py Normal file → Executable file
View File

@@ -41,7 +41,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--backend", "--backend",
type=str, type=str,
@@ -68,6 +68,10 @@ def build_option(args):
option.set_cpu_thread_num(args.cpu_thread_num) option.set_cpu_thread_num(args.cpu_thread_num)
if args.device.lower() == "xpu":
option.use_xpu()
return option
if args.backend.lower() == "trt": if args.backend.lower() == "trt":
assert args.device.lower( assert args.device.lower(
) == "gpu", "TensorRT backend require inference on device GPU." ) == "gpu", "TensorRT backend require inference on device GPU."

2
examples/vision/segmentation/paddleseg/cpp/README.md Normal file → Executable file
View File

@@ -32,6 +32,8 @@ wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 1 ./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 1
# GPU上TensorRT推理 # GPU上TensorRT推理
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 2 ./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 2
# XPU推理
./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 3
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

32
examples/vision/segmentation/paddleseg/cpp/infer.cc Normal file → Executable file
View File

@@ -48,6 +48,34 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) {
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
} }
void XpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams";
auto config_file = model_dir + sep + "deploy.yaml";
auto option = fastdeploy::RuntimeOption();
option.UseXpu();
auto model = fastdeploy::vision::segmentation::PaddleSegModel(
model_file, params_file, config_file, option);
if (!model.Initialized()) {
std::cerr << "Failed to initialize." << std::endl;
return;
}
auto im = cv::imread(image_file);
fastdeploy::vision::SegmentationResult res;
if (!model.Predict(im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}
std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
}
void GpuInfer(const std::string& model_dir, const std::string& image_file) { void GpuInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + sep + "model.pdmodel"; auto model_file = model_dir + sep + "model.pdmodel";
auto params_file = model_dir + sep + "model.pdiparams"; auto params_file = model_dir + sep + "model.pdiparams";
@@ -114,7 +142,7 @@ int main(int argc, char* argv[]) {
"e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0" "e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0"
<< std::endl; << std::endl;
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
"with gpu; 2: run with gpu and use tensorrt backend." "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu."
<< std::endl; << std::endl;
return -1; return -1;
} }
@@ -125,6 +153,8 @@ int main(int argc, char* argv[]) {
GpuInfer(argv[1], argv[2]); GpuInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 2) { } else if (std::atoi(argv[3]) == 2) {
TrtInfer(argv[1], argv[2]); TrtInfer(argv[1], argv[2]);
} else if (std::atoi(argv[3]) == 3) {
XpuInfer(argv[1], argv[2]);
} }
return 0; return 0;
} }

View File

@@ -25,6 +25,8 @@ python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_
python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待 # GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu --use_trt True python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu --use_trt True
# XPU推理
python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device xpu
``` ```
运行完成可视化结果如下图所示 运行完成可视化结果如下图所示

5
examples/vision/segmentation/paddleseg/python/infer.py Normal file → Executable file
View File

@@ -15,7 +15,7 @@ def parse_arguments():
"--device", "--device",
type=str, type=str,
default='cpu', default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.") help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.")
parser.add_argument( parser.add_argument(
"--use_trt", "--use_trt",
type=ast.literal_eval, type=ast.literal_eval,
@@ -30,6 +30,9 @@ def build_option(args):
if args.device.lower() == "gpu": if args.device.lower() == "gpu":
option.use_gpu() option.use_gpu()
if args.device.lower() == "xpu":
option.use_xpu()
if args.use_trt: if args.use_trt:
option.use_trt_backend() option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 256, 256], [1, 3, 1024, 1024], option.set_trt_input_shape("x", [1, 3, 256, 256], [1, 3, 1024, 1024],

1
fastdeploy/vision/faceid/contrib/insightface_rec.cc Normal file → Executable file
View File

@@ -32,6 +32,7 @@ InsightFaceRecognitionModel::InsightFaceRecognitionModel(
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

1
fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc Normal file → Executable file
View File

@@ -18,6 +18,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file,
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;
runtime_option.model_file = model_file; runtime_option.model_file = model_file;

1
fastdeploy/vision/matting/ppmatting/ppmatting.cc Normal file → Executable file
View File

@@ -28,6 +28,7 @@ PPMatting::PPMatting(const std::string& model_file,
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE}; valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;
runtime_option.model_file = model_file; runtime_option.model_file = model_file;

View File

@@ -32,6 +32,7 @@ Classifier::Classifier(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }
runtime_option = custom_option; runtime_option = custom_option;

View File

@@ -32,6 +32,7 @@ DBDetector::DBDetector(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }

View File

@@ -34,6 +34,7 @@ Recognizer::Recognizer(const std::string& model_file,
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_xpu_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
} }

View File

@@ -29,6 +29,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};
valid_rknpu_backends = {Backend::RKNPU2}; valid_rknpu_backends = {Backend::RKNPU2};
valid_timvx_backends = {Backend::LITE}; valid_timvx_backends = {Backend::LITE};
valid_xpu_backends = {Backend::LITE};
valid_ascend_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;
@@ -89,4 +90,4 @@ bool PaddleSegModel::BatchPredict(const std::vector<cv::Mat>& imgs,
} }
} // namespace segmentation } // namespace segmentation
} // namespace vision } // namespace vision
} // namespace fastdeploy } // namespace fastdeploy