From 7b15f72516a1f2bb2e8b6f347ca5a60d14d2bd0e Mon Sep 17 00:00:00 2001 From: yeliang2258 <30516196+yeliang2258@users.noreply.github.com> Date: Mon, 26 Dec 2022 15:02:58 +0800 Subject: [PATCH] =?UTF-8?q?[Backend]=20Add=20OCR=E3=80=81Seg=E3=80=81=20Ke?= =?UTF-8?q?ypointDetection=E3=80=81Matting=E3=80=81=20ernie-3.0=20and=20ad?= =?UTF-8?q?aface=20models=20for=20XPU=20Deploy=20(#960)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [FlyCV] Bump up FlyCV -> official release 1.0.0 * add seg models for XPU * add ocr model for XPU * add matting * add matting python * fix infer.cc * add keypointdetection support for XPU * Add adaface support for XPU * add ernie-3.0 * fix doc Co-authored-by: DefTruth Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com> --- examples/text/ernie-3.0/cpp/README.md | 2 + examples/text/ernie-3.0/cpp/seq_cls_infer.cc | 19 ++++--- examples/text/ernie-3.0/python/README.md | 5 +- .../text/ernie-3.0/python/seq_cls_infer.py | 8 ++- examples/vision/faceid/adaface/cpp/README.md | 7 +++ examples/vision/faceid/adaface/cpp/infer.cc | 41 +++++++++++++- .../vision/faceid/adaface/python/README.md | 8 +++ .../vision/faceid/adaface/python/infer.py | 5 +- .../det_keypoint_unite/cpp/README.md | 2 + .../cpp/det_keypoint_unite_infer.cc | 53 ++++++++++++++++++- .../det_keypoint_unite/python/README.md | 2 + .../python/det_keypoint_unite_infer.py | 5 +- .../keypointdetection/tiny_pose/cpp/README.md | 2 + .../tiny_pose/cpp/pptinypose_infer.cc | 38 ++++++++++++- .../tiny_pose/python/README.md | 2 + .../tiny_pose/python/pptinypose_infer.py | 5 +- .../vision/matting/ppmatting/cpp/README.md | 2 + .../vision/matting/ppmatting/cpp/infer.cc | 35 +++++++++++- .../vision/matting/ppmatting/python/README.md | 2 + .../vision/matting/ppmatting/python/infer.py | 4 +- examples/vision/ocr/PP-OCRv2/cpp/README.md | 4 ++ examples/vision/ocr/PP-OCRv2/cpp/infer.cc | 4 +- examples/vision/ocr/PP-OCRv2/python/README.md | 2 + examples/vision/ocr/PP-OCRv2/python/infer.py | 6 ++- examples/vision/ocr/PP-OCRv3/cpp/README.md | 4 ++ examples/vision/ocr/PP-OCRv3/cpp/infer.cc | 4 +- examples/vision/ocr/PP-OCRv3/python/README.md | 2 + examples/vision/ocr/PP-OCRv3/python/infer.py | 6 ++- .../segmentation/paddleseg/cpp/README.md | 2 + .../segmentation/paddleseg/cpp/infer.cc | 32 ++++++++++- .../segmentation/paddleseg/python/README.md | 2 + .../segmentation/paddleseg/python/infer.py | 5 +- .../vision/faceid/contrib/insightface_rec.cc | 1 + .../keypointdet/pptinypose/pptinypose.cc | 1 + .../vision/matting/ppmatting/ppmatting.cc | 1 + fastdeploy/vision/ocr/ppocr/classifier.cc | 1 + fastdeploy/vision/ocr/ppocr/dbdetector.cc | 1 + fastdeploy/vision/ocr/ppocr/recognizer.cc | 1 + fastdeploy/vision/segmentation/ppseg/model.cc | 3 +- 39 files changed, 304 insertions(+), 25 deletions(-) mode change 100644 => 100755 examples/text/ernie-3.0/cpp/README.md mode change 100644 => 100755 examples/text/ernie-3.0/cpp/seq_cls_infer.cc mode change 100644 => 100755 examples/text/ernie-3.0/python/README.md mode change 100644 => 100755 examples/text/ernie-3.0/python/seq_cls_infer.py mode change 100644 => 100755 examples/vision/faceid/adaface/cpp/README.md mode change 100644 => 100755 examples/vision/faceid/adaface/cpp/infer.cc mode change 100644 => 100755 examples/vision/faceid/adaface/python/README.md mode change 100644 => 100755 examples/vision/faceid/adaface/python/infer.py mode change 100644 => 100755 examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md mode change 100644 => 100755 examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc mode change 100644 => 100755 examples/vision/keypointdetection/det_keypoint_unite/python/README.md mode change 100644 => 100755 examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py mode change 100644 => 100755 examples/vision/keypointdetection/tiny_pose/cpp/README.md mode change 100644 => 100755 examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc mode change 100644 => 100755 examples/vision/keypointdetection/tiny_pose/python/README.md mode change 100644 => 100755 examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py mode change 100644 => 100755 examples/vision/matting/ppmatting/cpp/README.md mode change 100644 => 100755 examples/vision/matting/ppmatting/cpp/infer.cc mode change 100644 => 100755 examples/vision/matting/ppmatting/python/README.md mode change 100644 => 100755 examples/vision/matting/ppmatting/python/infer.py mode change 100644 => 100755 examples/vision/ocr/PP-OCRv2/cpp/README.md mode change 100644 => 100755 examples/vision/ocr/PP-OCRv2/cpp/infer.cc mode change 100644 => 100755 examples/vision/ocr/PP-OCRv2/python/README.md mode change 100644 => 100755 examples/vision/ocr/PP-OCRv2/python/infer.py mode change 100644 => 100755 examples/vision/ocr/PP-OCRv3/cpp/README.md mode change 100644 => 100755 examples/vision/ocr/PP-OCRv3/cpp/infer.cc mode change 100644 => 100755 examples/vision/ocr/PP-OCRv3/python/README.md mode change 100644 => 100755 examples/vision/ocr/PP-OCRv3/python/infer.py mode change 100644 => 100755 examples/vision/segmentation/paddleseg/cpp/README.md mode change 100644 => 100755 examples/vision/segmentation/paddleseg/cpp/infer.cc mode change 100644 => 100755 examples/vision/segmentation/paddleseg/python/README.md mode change 100644 => 100755 examples/vision/segmentation/paddleseg/python/infer.py mode change 100644 => 100755 fastdeploy/vision/faceid/contrib/insightface_rec.cc mode change 100644 => 100755 fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc mode change 100644 => 100755 fastdeploy/vision/matting/ppmatting/ppmatting.cc diff --git a/examples/text/ernie-3.0/cpp/README.md b/examples/text/ernie-3.0/cpp/README.md old mode 100644 new mode 100755 index 51e99656f..053697799 --- a/examples/text/ernie-3.0/cpp/README.md +++ b/examples/text/ernie-3.0/cpp/README.md @@ -35,6 +35,8 @@ tar xvfz ernie-3.0-medium-zh-afqmc.tgz # GPU Inference ./seq_cls_infer_demo --device gpu --model_dir ernie-3.0-medium-zh-afqmc +# XPU 推理 +./seq_cls_infer_demo --device xpu --model_dir ernie-3.0-medium-zh-afqmc ``` The result returned after running is as follows: ```bash diff --git a/examples/text/ernie-3.0/cpp/seq_cls_infer.cc b/examples/text/ernie-3.0/cpp/seq_cls_infer.cc old mode 100644 new mode 100755 index e50cbfe75..71ded2a1e --- a/examples/text/ernie-3.0/cpp/seq_cls_infer.cc +++ b/examples/text/ernie-3.0/cpp/seq_cls_infer.cc @@ -32,7 +32,7 @@ const char sep = '/'; DEFINE_string(model_dir, "", "Directory of the inference model."); DEFINE_string(vocab_path, "", "Path of the vocab file."); DEFINE_string(device, "cpu", - "Type of inference device, support 'cpu' or 'gpu'."); + "Type of inference device, support 'cpu', 'xpu' or 'gpu'."); DEFINE_string(backend, "onnx_runtime", "The inference runtime backend, support: ['onnx_runtime', " "'paddle', 'openvino', 'tensorrt', 'paddle_tensorrt']"); @@ -55,7 +55,16 @@ void PrintUsage() { } bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) { - if (FLAGS_device == "gpu") { + std::string model_path = FLAGS_model_dir + sep + "infer.pdmodel"; + std::string param_path = FLAGS_model_dir + sep + "infer.pdiparams"; + fastdeploy::FDINFO << "model_path = " << model_path + << ", param_path = " << param_path << std::endl; + option->SetModelPath(model_path, param_path); + + if (FLAGS_device == "xpu") { + option->UseXpu(); + return true; + } else if (FLAGS_device == "gpu") { option->UseGpu(); } else if (FLAGS_device == "cpu") { option->UseCpu(); @@ -97,11 +106,7 @@ bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) { << FLAGS_backend << "'" << std::endl; return false; } - std::string model_path = FLAGS_model_dir + sep + "infer.pdmodel"; - std::string param_path = FLAGS_model_dir + sep + "infer.pdiparams"; - fastdeploy::FDINFO << "model_path = " << model_path - << ", param_path = " << param_path << std::endl; - option->SetModelPath(model_path, param_path); + return true; } diff --git a/examples/text/ernie-3.0/python/README.md b/examples/text/ernie-3.0/python/README.md old mode 100644 new mode 100755 index fb07f26eb..f1424aff7 --- a/examples/text/ernie-3.0/python/README.md +++ b/examples/text/ernie-3.0/python/README.md @@ -22,7 +22,7 @@ pip install -r requirements.txt ### A Quick Start -The following example shows how to employ FastDeploy library to complete Python predictive deployment of ERNIE 3.0 Medium model on [AFQMC Dataset](https://bj.bcebos.com/paddlenlp/datasets/afqmc_public.zip)of CLUE Benchmark for text classification tasks. +The following example shows how to employ FastDeploy library to complete Python predictive deployment of ERNIE 3.0 Medium model on [AFQMC Dataset](https://bj.bcebos.com/paddlenlp/datasets/afqmc_public.zip)of CLUE Benchmark for text classification tasks. ```bash @@ -40,6 +40,9 @@ python seq_cls_infer.py --device cpu --model_dir ernie-3.0-medium-zh-afqmc # GPU Inference python seq_cls_infer.py --device gpu --model_dir ernie-3.0-medium-zh-afqmc +# XPU Inference +python seq_cls_infer.py --device xpu --model_dir ernie-3.0-medium-zh-afqmc + ``` The result returned after running is as follows: diff --git a/examples/text/ernie-3.0/python/seq_cls_infer.py b/examples/text/ernie-3.0/python/seq_cls_infer.py old mode 100644 new mode 100755 index d9481451d..67cde26ae --- a/examples/text/ernie-3.0/python/seq_cls_infer.py +++ b/examples/text/ernie-3.0/python/seq_cls_infer.py @@ -35,8 +35,8 @@ def parse_arguments(): "--device", type=str, default='cpu', - choices=['gpu', 'cpu'], - help="Type of inference device, support 'cpu' or 'gpu'.") + choices=['gpu', 'cpu', 'xpu'], + help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--backend", type=str, @@ -94,6 +94,10 @@ class ErnieForSequenceClassificationPredictor(object): model_path = os.path.join(args.model_dir, "infer.pdmodel") params_path = os.path.join(args.model_dir, "infer.pdiparams") option.set_model_path(model_path, params_path) + if args.device == 'xpu': + option.use_xpu() + option.use_paddle_lite_backend() + return fd.Runtime(option) if args.device == 'cpu': option.use_cpu() else: diff --git a/examples/vision/faceid/adaface/cpp/README.md b/examples/vision/faceid/adaface/cpp/README.md old mode 100644 new mode 100755 index 9c28d584d..b4a6a5991 --- a/examples/vision/faceid/adaface/cpp/README.md +++ b/examples/vision/faceid/adaface/cpp/README.md @@ -51,6 +51,13 @@ tar zxvf mobilefacenet_adaface.tgz -C ./ test_lite_focal_arcface_2.JPG \ 2 +# XPU推理 +./infer_demo mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \ + mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \ + test_lite_focal_arcface_0.JPG \ + test_lite_focal_arcface_1.JPG \ + test_lite_focal_arcface_2.JPG \ + 3 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/faceid/adaface/cpp/infer.cc b/examples/vision/faceid/adaface/cpp/infer.cc old mode 100644 new mode 100755 index 8ed6f766c..8d0ca938c --- a/examples/vision/faceid/adaface/cpp/infer.cc +++ b/examples/vision/faceid/adaface/cpp/infer.cc @@ -47,6 +47,43 @@ void CpuInfer(const std::string &model_file, const std::string ¶ms_file, << ", Cosine 02:" << cosine02 << std::endl; } +void XpuInfer(const std::string &model_file, const std::string ¶ms_file, + const std::vector &image_file) { + auto option = fastdeploy::RuntimeOption(); + option.UseXpu(); + auto model = fastdeploy::vision::faceid::AdaFace(model_file, params_file); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + cv::Mat face0 = cv::imread(image_file[0]); + cv::Mat face1 = cv::imread(image_file[1]); + cv::Mat face2 = cv::imread(image_file[2]); + + fastdeploy::vision::FaceRecognitionResult res0; + fastdeploy::vision::FaceRecognitionResult res1; + fastdeploy::vision::FaceRecognitionResult res2; + + if ((!model.Predict(&face0, &res0)) || (!model.Predict(&face1, &res1)) || + (!model.Predict(&face2, &res2))) { + std::cerr << "Prediction Failed." << std::endl; + } + + std::cout << "Prediction Done!" << std::endl; + + std::cout << "--- [Face 0]:" << res0.Str(); + std::cout << "--- [Face 1]:" << res1.Str(); + std::cout << "--- [Face 2]:" << res2.Str(); + + float cosine01 = fastdeploy::vision::utils::CosineSimilarity( + res0.embedding, res1.embedding, model.l2_normalize); + float cosine02 = fastdeploy::vision::utils::CosineSimilarity( + res0.embedding, res2.embedding, model.l2_normalize); + std::cout << "Detect Done! Cosine 01: " << cosine01 + << ", Cosine 02:" << cosine02 << std::endl; +} + void GpuInfer(const std::string &model_file, const std::string ¶ms_file, const std::vector &image_file) { auto option = fastdeploy::RuntimeOption(); @@ -134,7 +171,7 @@ int main(int argc, char *argv[]) { "test_lite_focal_AdaFace_2.JPG 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu." << std::endl; return -1; } @@ -147,6 +184,8 @@ int main(int argc, char *argv[]) { GpuInfer(argv[1], argv[2], image_files); } else if (std::atoi(argv[6]) == 2) { TrtInfer(argv[1], argv[2], image_files); + } else if (std::atoi(argv[6]) == 3) { + CpuInfer(argv[1], argv[2], image_files); } return 0; } diff --git a/examples/vision/faceid/adaface/python/README.md b/examples/vision/faceid/adaface/python/README.md old mode 100644 new mode 100755 index 9c3694012..699da9001 --- a/examples/vision/faceid/adaface/python/README.md +++ b/examples/vision/faceid/adaface/python/README.md @@ -45,6 +45,14 @@ python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \ --face_negative test_lite_focal_arcface_2.JPG \ --device gpu \ --use_trt True + +# XPU推理 +python infer.py --model mobilefacenet_adaface/mobilefacenet_adaface.pdmodel \ + --params_file mobilefacenet_adaface/mobilefacenet_adaface.pdiparams \ + --face test_lite_focal_arcface_0.JPG \ + --face_positive test_lite_focal_arcface_1.JPG \ + --face_negative test_lite_focal_arcface_2.JPG \ + --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/faceid/adaface/python/infer.py b/examples/vision/faceid/adaface/python/infer.py old mode 100644 new mode 100755 index b92ebadfb..50a30191f --- a/examples/vision/faceid/adaface/python/infer.py +++ b/examples/vision/faceid/adaface/python/infer.py @@ -39,7 +39,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="Type of inference device, support 'cpu' or 'gpu'.") + help="Type of inference device, support 'cpu' , 'xpu' or 'gpu'.") parser.add_argument( "--use_trt", type=ast.literal_eval, @@ -54,6 +54,9 @@ def build_option(args): if args.device.lower() == "gpu": option.use_gpu() + if args.device.lower() == "xpu": + option.use_xpu() + if args.use_trt: option.use_trt_backend() option.set_trt_input_shape("data", [1, 3, 112, 112]) diff --git a/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md b/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md old mode 100644 new mode 100755 index 631258127..477d0872d --- a/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md +++ b/examples/vision/keypointdetection/det_keypoint_unite/cpp/README.md @@ -33,6 +33,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/000000018491.jpg ./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 1 # GPU上TensorRT推理 ./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 2 +# XPU推理 +./infer_demo PP_PicoDet_V2_S_Pedestrian_320x320_infer PP_TinyPose_256x192_infer 000000018491.jpg 3 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc b/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc old mode 100644 new mode 100755 index f2704104a..bd5c080a8 --- a/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc +++ b/examples/vision/keypointdetection/det_keypoint_unite/cpp/det_keypoint_unite_infer.cc @@ -66,6 +66,55 @@ void CpuInfer(const std::string& det_model_dir, << std::endl; } +void XpuInfer(const std::string& det_model_dir, + const std::string& tinypose_model_dir, + const std::string& image_file) { + auto option = fastdeploy::RuntimeOption(); + option.UseXpu(); + auto det_model_file = det_model_dir + sep + "model.pdmodel"; + auto det_params_file = det_model_dir + sep + "model.pdiparams"; + auto det_config_file = det_model_dir + sep + "infer_cfg.yml"; + auto det_model = fastdeploy::vision::detection::PicoDet( + det_model_file, det_params_file, det_config_file, option); + if (!det_model.Initialized()) { + std::cerr << "Detection Model Failed to initialize." << std::endl; + return; + } + + auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel"; + auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams"; + auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml"; + auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose( + tinypose_model_file, tinypose_params_file, tinypose_config_file, option); + if (!tinypose_model.Initialized()) { + std::cerr << "TinyPose Model Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + fastdeploy::vision::KeyPointDetectionResult res; + + auto pipeline = + fastdeploy::pipeline::PPTinyPose( + &det_model, &tinypose_model); + pipeline.detection_model_score_threshold = 0.5; + if (!pipeline.Predict(&im, &res)) { + std::cerr << "TinyPose Prediction Failed." << std::endl; + return; + } else { + std::cout << "TinyPose Prediction Done!" << std::endl; + } + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + auto vis_im = + fastdeploy::vision::VisKeypointDetection(im, res, 0.2); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "TinyPose visualized result saved in ./vis_result.jpg" + << std::endl; +} + void GpuInfer(const std::string& det_model_dir, const std::string& tinypose_model_dir, const std::string& image_file) { @@ -180,7 +229,7 @@ int main(int argc, char* argv[]) { "./test.jpeg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu." << std::endl; return -1; } @@ -191,6 +240,8 @@ int main(int argc, char* argv[]) { GpuInfer(argv[1], argv[2], argv[3]); } else if (std::atoi(argv[4]) == 2) { TrtInfer(argv[1], argv[2], argv[3]); + } else if (std::atoi(argv[4]) == 3) { + XpuInfer(argv[1], argv[2], argv[3]); } return 0; } diff --git a/examples/vision/keypointdetection/det_keypoint_unite/python/README.md b/examples/vision/keypointdetection/det_keypoint_unite/python/README.md old mode 100644 new mode 100755 index 1b2fc0f18..9ea6a1b6d --- a/examples/vision/keypointdetection/det_keypoint_unite/python/README.md +++ b/examples/vision/keypointdetection/det_keypoint_unite/python/README.md @@ -25,6 +25,8 @@ python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infe python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device gpu --use_trt True +# XPU推理 +python det_keypoint_unite_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --det_model_dir PP_PicoDet_V2_S_Pedestrian_320x320_infer --image 000000018491.jpg --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py b/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py old mode 100644 new mode 100755 index b0952439f..d76c9944b --- a/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py +++ b/examples/vision/keypointdetection/det_keypoint_unite/python/det_keypoint_unite_infer.py @@ -19,7 +19,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="type of inference device, support 'cpu' or 'gpu'.") + help="type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--use_trt", type=ast.literal_eval, @@ -47,6 +47,9 @@ def build_tinypose_option(args): if args.device.lower() == "gpu": option.use_gpu() + if args.device.lower() == "xpu": + option.use_xpu() + if args.use_trt: option.use_trt_backend() option.set_trt_input_shape("image", [1, 3, 256, 192]) diff --git a/examples/vision/keypointdetection/tiny_pose/cpp/README.md b/examples/vision/keypointdetection/tiny_pose/cpp/README.md old mode 100644 new mode 100755 index eba550193..9b4983ee8 --- a/examples/vision/keypointdetection/tiny_pose/cpp/README.md +++ b/examples/vision/keypointdetection/tiny_pose/cpp/README.md @@ -32,6 +32,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/hrnet_demo.jpg ./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 1 # GPU上TensorRT推理 ./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 2 +# XPU推理 +./infer_tinypose_demo PP_TinyPose_256x192_infer hrnet_demo.jpg 3 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc b/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc old mode 100644 new mode 100755 index 87792e6f5..b81761836 --- a/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc +++ b/examples/vision/keypointdetection/tiny_pose/cpp/pptinypose_infer.cc @@ -53,6 +53,40 @@ void CpuInfer(const std::string& tinypose_model_dir, << std::endl; } +void XpuInfer(const std::string& tinypose_model_dir, + const std::string& image_file) { + auto tinypose_model_file = tinypose_model_dir + sep + "model.pdmodel"; + auto tinypose_params_file = tinypose_model_dir + sep + "model.pdiparams"; + auto tinypose_config_file = tinypose_model_dir + sep + "infer_cfg.yml"; + auto option = fastdeploy::RuntimeOption(); + option.UseXpu(); + auto tinypose_model = fastdeploy::vision::keypointdetection::PPTinyPose( + tinypose_model_file, tinypose_params_file, tinypose_config_file, option); + if (!tinypose_model.Initialized()) { + std::cerr << "TinyPose Model Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + fastdeploy::vision::KeyPointDetectionResult res; + if (!tinypose_model.Predict(&im, &res)) { + std::cerr << "TinyPose Prediction Failed." << std::endl; + return; + } else { + std::cout << "TinyPose Prediction Done!" << std::endl; + } + // 输出预测框结果 + std::cout << res.Str() << std::endl; + + // 可视化预测结果 + auto tinypose_vis_im = + fastdeploy::vision::VisKeypointDetection(im, res, 0.5); + cv::imwrite("tinypose_vis_result.jpg", tinypose_vis_im); + std::cout << "TinyPose visualized result saved in ./tinypose_vis_result.jpg" + << std::endl; +} + + void GpuInfer(const std::string& tinypose_model_dir, const std::string& image_file) { auto option = fastdeploy::RuntimeOption(); @@ -129,7 +163,7 @@ int main(int argc, char* argv[]) { "e.g ./infer_model ./pptinypose_model_dir ./test.jpeg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu." << std::endl; return -1; } @@ -140,6 +174,8 @@ int main(int argc, char* argv[]) { GpuInfer(argv[1], argv[2]); } else if (std::atoi(argv[3]) == 2) { TrtInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 3) { + TrtInfer(argv[1], argv[2]); } return 0; } diff --git a/examples/vision/keypointdetection/tiny_pose/python/README.md b/examples/vision/keypointdetection/tiny_pose/python/README.md old mode 100644 new mode 100755 index f8835e00e..6467de863 --- a/examples/vision/keypointdetection/tiny_pose/python/README.md +++ b/examples/vision/keypointdetection/tiny_pose/python/README.md @@ -25,6 +25,8 @@ python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --imag python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device gpu --use_trt True +# XPU推理 +python pptinypose_infer.py --tinypose_model_dir PP_TinyPose_256x192_infer --image hrnet_demo.jpg --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py b/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py old mode 100644 new mode 100755 index 29c384b75..e103d75ef --- a/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py +++ b/examples/vision/keypointdetection/tiny_pose/python/pptinypose_infer.py @@ -17,7 +17,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="type of inference device, support 'cpu' or 'gpu'.") + help="type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--use_trt", type=ast.literal_eval, @@ -32,6 +32,9 @@ def build_tinypose_option(args): if args.device.lower() == "gpu": option.use_gpu() + if args.device.lower() == "xpu": + option.use_xpu() + if args.use_trt: option.use_trt_backend() option.set_trt_input_shape("image", [1, 3, 256, 192]) diff --git a/examples/vision/matting/ppmatting/cpp/README.md b/examples/vision/matting/ppmatting/cpp/README.md old mode 100644 new mode 100755 index e8b919f92..219869fa5 --- a/examples/vision/matting/ppmatting/cpp/README.md +++ b/examples/vision/matting/ppmatting/cpp/README.md @@ -31,6 +31,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/matting_bgr.jpg ./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 1 # GPU上TensorRT推理 ./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 2 +# XPU推理 +./infer_demo PP-Matting-512 matting_input.jpg matting_bgr.jpg 3 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/matting/ppmatting/cpp/infer.cc b/examples/vision/matting/ppmatting/cpp/infer.cc old mode 100644 new mode 100755 index 2acb2a8ca..00df1833c --- a/examples/vision/matting/ppmatting/cpp/infer.cc +++ b/examples/vision/matting/ppmatting/cpp/infer.cc @@ -51,6 +51,37 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file, << std::endl; } +void XpuInfer(const std::string& model_dir, const std::string& image_file, + const std::string& background_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "deploy.yaml"; + auto option = fastdeploy::RuntimeOption(); + option.UseXpu(); + auto model = fastdeploy::vision::matting::PPMatting(model_file, params_file, + config_file, option); + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + cv::Mat bg = cv::imread(background_file); + fastdeploy::vision::MattingResult res; + if (!model.Predict(&im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + auto vis_im = fastdeploy::vision::VisMatting(im, res); + auto vis_im_with_bg = + fastdeploy::vision::SwapBackground(im, bg, res); + cv::imwrite("visualized_result.jpg", vis_im_with_bg); + cv::imwrite("visualized_result_fg.jpg", vis_im); + std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " + "and ./visualized_result_fg.jpg" + << std::endl; +} + void GpuInfer(const std::string& model_dir, const std::string& image_file, const std::string& background_file) { auto model_file = model_dir + sep + "model.pdmodel"; @@ -125,7 +156,7 @@ int main(int argc, char* argv[]) { "e.g ./infer_model ./PP-Matting-512 ./test.jpg ./test_bg.jpg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend, 3: run with xpu." << std::endl; return -1; } @@ -135,6 +166,8 @@ int main(int argc, char* argv[]) { GpuInfer(argv[1], argv[2], argv[3]); } else if (std::atoi(argv[4]) == 2) { TrtInfer(argv[1], argv[2], argv[3]); + } else if (std::atoi(argv[4]) == 3) { + XpuInfer(argv[1], argv[2], argv[3]); } return 0; } diff --git a/examples/vision/matting/ppmatting/python/README.md b/examples/vision/matting/ppmatting/python/README.md old mode 100644 new mode 100755 index a17b6af89..f9f15e5b4 --- a/examples/vision/matting/ppmatting/python/README.md +++ b/examples/vision/matting/ppmatting/python/README.md @@ -23,6 +23,8 @@ python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bg python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device gpu --use_trt True +# XPU推理 +python infer.py --model PP-Matting-512 --image matting_input.jpg --bg matting_bgr.jpg --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/matting/ppmatting/python/infer.py b/examples/vision/matting/ppmatting/python/infer.py old mode 100644 new mode 100755 index 89913cd11..12b4890f1 --- a/examples/vision/matting/ppmatting/python/infer.py +++ b/examples/vision/matting/ppmatting/python/infer.py @@ -21,7 +21,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="Type of inference device, support 'cpu' or 'gpu'.") + help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--use_trt", type=ast.literal_eval, @@ -40,6 +40,8 @@ def build_option(args): option.use_trt_backend() option.set_trt_input_shape("img", [1, 3, 512, 512]) + if args.device.lower() == "xpu": + option.use_xpu() return option diff --git a/examples/vision/ocr/PP-OCRv2/cpp/README.md b/examples/vision/ocr/PP-OCRv2/cpp/README.md old mode 100644 new mode 100755 index 1bb794f5e..a93eb097f --- a/examples/vision/ocr/PP-OCRv2/cpp/README.md +++ b/examples/vision/ocr/PP-OCRv2/cpp/README.md @@ -39,6 +39,10 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_ ./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1 # GPU上TensorRT推理 ./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2 +# GPU上Paddle-TRT推理 +./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3 +# XPU推理 +./infer_demo ./ch_PP-OCRv2_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv2_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4 ``` 以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考: diff --git a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc old mode 100644 new mode 100755 index 6cde6390f..3435b7ad2 --- a/examples/vision/ocr/PP-OCRv2/cpp/infer.cc +++ b/examples/vision/ocr/PP-OCRv2/cpp/infer.cc @@ -100,7 +100,7 @@ int main(int argc, char* argv[]) { "./ppocr_keys_v1.txt ./12.jpg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu." << std::endl; return -1; } @@ -120,6 +120,8 @@ int main(int argc, char* argv[]) { option.UseTrtBackend(); option.EnablePaddleTrtCollectShape(); option.EnablePaddleToTrt(); + } else if (flag == 4) { + option.UseXpu(); } std::string det_model_dir = argv[1]; diff --git a/examples/vision/ocr/PP-OCRv2/python/README.md b/examples/vision/ocr/PP-OCRv2/python/README.md old mode 100644 new mode 100755 index 89e5fc073..21e824d6e --- a/examples/vision/ocr/PP-OCRv2/python/README.md +++ b/examples/vision/ocr/PP-OCRv2/python/README.md @@ -34,6 +34,8 @@ python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2 python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu # GPU上使用TensorRT推理 python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt +# XPU推理 +python infer.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/ocr/PP-OCRv2/python/infer.py b/examples/vision/ocr/PP-OCRv2/python/infer.py old mode 100644 new mode 100755 index 1487d795f..02a443ee4 --- a/examples/vision/ocr/PP-OCRv2/python/infer.py +++ b/examples/vision/ocr/PP-OCRv2/python/infer.py @@ -41,7 +41,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="Type of inference device, support 'cpu' or 'gpu'.") + help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--backend", type=str, @@ -68,6 +68,10 @@ def build_option(args): option.set_cpu_thread_num(args.cpu_thread_num) + if args.device.lower() == "xpu": + option.use_xpu() + return option + if args.backend.lower() == "trt": assert args.device.lower( ) == "gpu", "TensorRT backend require inference on device GPU." diff --git a/examples/vision/ocr/PP-OCRv3/cpp/README.md b/examples/vision/ocr/PP-OCRv3/cpp/README.md old mode 100644 new mode 100755 index bfbc0fc4a..301ccef3e --- a/examples/vision/ocr/PP-OCRv3/cpp/README.md +++ b/examples/vision/ocr/PP-OCRv3/cpp/README.md @@ -39,6 +39,10 @@ wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_ ./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 1 # GPU上TensorRT推理 ./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 2 +# GPU上Paddle-TRT推理 +./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 3 +# XPU推理 +./infer_demo ./ch_PP-OCRv3_det_infer ./ch_ppocr_mobile_v2.0_cls_infer ./ch_PP-OCRv3_rec_infer ./ppocr_keys_v1.txt ./12.jpg 4 ``` 以上命令只适用于Linux或MacOS, Windows下SDK的使用方式请参考: diff --git a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc old mode 100644 new mode 100755 index 90b77679f..6146765fa --- a/examples/vision/ocr/PP-OCRv3/cpp/infer.cc +++ b/examples/vision/ocr/PP-OCRv3/cpp/infer.cc @@ -101,7 +101,7 @@ int main(int argc, char* argv[]) { "./ppocr_keys_v1.txt ./12.jpg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with gpu and use Paddle-TRT; 4: run with xpu." << std::endl; return -1; } @@ -121,6 +121,8 @@ int main(int argc, char* argv[]) { option.UseTrtBackend(); option.EnablePaddleTrtCollectShape(); option.EnablePaddleToTrt(); + } else if (flag == 4) { + option.UseXpu(); } std::string det_model_dir = argv[1]; diff --git a/examples/vision/ocr/PP-OCRv3/python/README.md b/examples/vision/ocr/PP-OCRv3/python/README.md old mode 100644 new mode 100755 index 280ae7016..90bdf0e2c --- a/examples/vision/ocr/PP-OCRv3/python/README.md +++ b/examples/vision/ocr/PP-OCRv3/python/README.md @@ -33,6 +33,8 @@ python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2 python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu # GPU上使用TensorRT推理 python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device gpu --backend trt +# XPU推理 +python infer.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt --image 12.jpg --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/ocr/PP-OCRv3/python/infer.py b/examples/vision/ocr/PP-OCRv3/python/infer.py old mode 100644 new mode 100755 index 1ec962cb5..0753c8594 --- a/examples/vision/ocr/PP-OCRv3/python/infer.py +++ b/examples/vision/ocr/PP-OCRv3/python/infer.py @@ -41,7 +41,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="Type of inference device, support 'cpu' or 'gpu'.") + help="Type of inference device, support 'cpu', 'xpu' or 'gpu'.") parser.add_argument( "--backend", type=str, @@ -68,6 +68,10 @@ def build_option(args): option.set_cpu_thread_num(args.cpu_thread_num) + if args.device.lower() == "xpu": + option.use_xpu() + return option + if args.backend.lower() == "trt": assert args.device.lower( ) == "gpu", "TensorRT backend require inference on device GPU." diff --git a/examples/vision/segmentation/paddleseg/cpp/README.md b/examples/vision/segmentation/paddleseg/cpp/README.md old mode 100644 new mode 100755 index e7b9dc5bf..620f32345 --- a/examples/vision/segmentation/paddleseg/cpp/README.md +++ b/examples/vision/segmentation/paddleseg/cpp/README.md @@ -32,6 +32,8 @@ wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png ./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 1 # GPU上TensorRT推理 ./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 2 +# XPU推理 +./infer_demo Unet_cityscapes_without_argmax_infer cityscapes_demo.png 3 ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/segmentation/paddleseg/cpp/infer.cc b/examples/vision/segmentation/paddleseg/cpp/infer.cc old mode 100644 new mode 100755 index 28bb56009..d0b9af0ed --- a/examples/vision/segmentation/paddleseg/cpp/infer.cc +++ b/examples/vision/segmentation/paddleseg/cpp/infer.cc @@ -48,6 +48,34 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file) { std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } +void XpuInfer(const std::string& model_dir, const std::string& image_file) { + auto model_file = model_dir + sep + "model.pdmodel"; + auto params_file = model_dir + sep + "model.pdiparams"; + auto config_file = model_dir + sep + "deploy.yaml"; + auto option = fastdeploy::RuntimeOption(); + option.UseXpu(); + auto model = fastdeploy::vision::segmentation::PaddleSegModel( + model_file, params_file, config_file, option); + + if (!model.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return; + } + + auto im = cv::imread(image_file); + + fastdeploy::vision::SegmentationResult res; + if (!model.Predict(im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + std::cout << res.Str() << std::endl; + auto vis_im = fastdeploy::vision::VisSegmentation(im, res, 0.5); + cv::imwrite("vis_result.jpg", vis_im); + std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +} + void GpuInfer(const std::string& model_dir, const std::string& image_file) { auto model_file = model_dir + sep + "model.pdmodel"; auto params_file = model_dir + sep + "model.pdiparams"; @@ -114,7 +142,7 @@ int main(int argc, char* argv[]) { "e.g ./infer_model ./ppseg_model_dir ./test.jpeg 0" << std::endl; std::cout << "The data type of run_option is int, 0: run with cpu; 1: run " - "with gpu; 2: run with gpu and use tensorrt backend." + "with gpu; 2: run with gpu and use tensorrt backend; 3: run with xpu." << std::endl; return -1; } @@ -125,6 +153,8 @@ int main(int argc, char* argv[]) { GpuInfer(argv[1], argv[2]); } else if (std::atoi(argv[3]) == 2) { TrtInfer(argv[1], argv[2]); + } else if (std::atoi(argv[3]) == 3) { + XpuInfer(argv[1], argv[2]); } return 0; } diff --git a/examples/vision/segmentation/paddleseg/python/README.md b/examples/vision/segmentation/paddleseg/python/README.md old mode 100644 new mode 100755 index d5dd59e75..18c8092f5 --- a/examples/vision/segmentation/paddleseg/python/README.md +++ b/examples/vision/segmentation/paddleseg/python/README.md @@ -25,6 +25,8 @@ python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_ python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu # GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待) python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device gpu --use_trt True +# XPU推理 +python infer.py --model Unet_cityscapes_without_argmax_infer --image cityscapes_demo.png --device xpu ``` 运行完成可视化结果如下图所示 diff --git a/examples/vision/segmentation/paddleseg/python/infer.py b/examples/vision/segmentation/paddleseg/python/infer.py old mode 100644 new mode 100755 index 9df7665a2..a1c31ebcd --- a/examples/vision/segmentation/paddleseg/python/infer.py +++ b/examples/vision/segmentation/paddleseg/python/infer.py @@ -15,7 +15,7 @@ def parse_arguments(): "--device", type=str, default='cpu', - help="Type of inference device, support 'cpu' or 'gpu'.") + help="Type of inference device, support 'xpu', 'cpu' or 'gpu'.") parser.add_argument( "--use_trt", type=ast.literal_eval, @@ -30,6 +30,9 @@ def build_option(args): if args.device.lower() == "gpu": option.use_gpu() + if args.device.lower() == "xpu": + option.use_xpu() + if args.use_trt: option.use_trt_backend() option.set_trt_input_shape("x", [1, 3, 256, 256], [1, 3, 1024, 1024], diff --git a/fastdeploy/vision/faceid/contrib/insightface_rec.cc b/fastdeploy/vision/faceid/contrib/insightface_rec.cc old mode 100644 new mode 100755 index cc4f1fcea..f8810e82a --- a/fastdeploy/vision/faceid/contrib/insightface_rec.cc +++ b/fastdeploy/vision/faceid/contrib/insightface_rec.cc @@ -32,6 +32,7 @@ InsightFaceRecognitionModel::InsightFaceRecognitionModel( } else { valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; } runtime_option = custom_option; runtime_option.model_format = model_format; diff --git a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc old mode 100644 new mode 100755 index 94b7708f3..155ef218f --- a/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc +++ b/fastdeploy/vision/keypointdet/pptinypose/pptinypose.cc @@ -18,6 +18,7 @@ PPTinyPose::PPTinyPose(const std::string& model_file, config_file_ = config_file; valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/fastdeploy/vision/matting/ppmatting/ppmatting.cc b/fastdeploy/vision/matting/ppmatting/ppmatting.cc old mode 100644 new mode 100755 index b15e5b209..1a7de4451 --- a/fastdeploy/vision/matting/ppmatting/ppmatting.cc +++ b/fastdeploy/vision/matting/ppmatting/ppmatting.cc @@ -28,6 +28,7 @@ PPMatting::PPMatting(const std::string& model_file, config_file_ = config_file; valid_cpu_backends = {Backend::ORT, Backend::PDINFER, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; runtime_option = custom_option; runtime_option.model_format = model_format; runtime_option.model_file = model_file; diff --git a/fastdeploy/vision/ocr/ppocr/classifier.cc b/fastdeploy/vision/ocr/ppocr/classifier.cc index f7cde2eb2..7783ed828 100755 --- a/fastdeploy/vision/ocr/ppocr/classifier.cc +++ b/fastdeploy/vision/ocr/ppocr/classifier.cc @@ -32,6 +32,7 @@ Classifier::Classifier(const std::string& model_file, } else { valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE}; } runtime_option = custom_option; diff --git a/fastdeploy/vision/ocr/ppocr/dbdetector.cc b/fastdeploy/vision/ocr/ppocr/dbdetector.cc index 26b1ce378..3245d2dcc 100755 --- a/fastdeploy/vision/ocr/ppocr/dbdetector.cc +++ b/fastdeploy/vision/ocr/ppocr/dbdetector.cc @@ -32,6 +32,7 @@ DBDetector::DBDetector(const std::string& model_file, } else { valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE}; } diff --git a/fastdeploy/vision/ocr/ppocr/recognizer.cc b/fastdeploy/vision/ocr/ppocr/recognizer.cc index 87a6e5496..3cbe0c00c 100755 --- a/fastdeploy/vision/ocr/ppocr/recognizer.cc +++ b/fastdeploy/vision/ocr/ppocr/recognizer.cc @@ -34,6 +34,7 @@ Recognizer::Recognizer(const std::string& model_file, } else { valid_cpu_backends = {Backend::PDINFER, Backend::ORT, Backend::OPENVINO, Backend::LITE}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; + valid_xpu_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE}; } diff --git a/fastdeploy/vision/segmentation/ppseg/model.cc b/fastdeploy/vision/segmentation/ppseg/model.cc index 4cc631978..9c158ef10 100755 --- a/fastdeploy/vision/segmentation/ppseg/model.cc +++ b/fastdeploy/vision/segmentation/ppseg/model.cc @@ -29,6 +29,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file, valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_rknpu_backends = {Backend::RKNPU2}; valid_timvx_backends = {Backend::LITE}; + valid_xpu_backends = {Backend::LITE}; valid_ascend_backends = {Backend::LITE}; runtime_option = custom_option; runtime_option.model_format = model_format; @@ -89,4 +90,4 @@ bool PaddleSegModel::BatchPredict(const std::vector& imgs, } } // namespace segmentation } // namespace vision -} // namespace fastdeploy \ No newline at end of file +} // namespace fastdeploy