mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-22 08:09:28 +08:00
[Model] Support PaddleYOLOv8 model (#1136)
This commit is contained in:
@@ -42,6 +42,9 @@ target_link_libraries(infer_yolov6_demo ${FASTDEPLOY_LIBS})
|
|||||||
add_executable(infer_yolov7_demo ${PROJECT_SOURCE_DIR}/infer_yolov7.cc)
|
add_executable(infer_yolov7_demo ${PROJECT_SOURCE_DIR}/infer_yolov7.cc)
|
||||||
target_link_libraries(infer_yolov7_demo ${FASTDEPLOY_LIBS})
|
target_link_libraries(infer_yolov7_demo ${FASTDEPLOY_LIBS})
|
||||||
|
|
||||||
|
add_executable(infer_yolov8_demo ${PROJECT_SOURCE_DIR}/infer_yolov8.cc)
|
||||||
|
target_link_libraries(infer_yolov8_demo ${FASTDEPLOY_LIBS})
|
||||||
|
|
||||||
add_executable(infer_rtmdet_demo ${PROJECT_SOURCE_DIR}/infer_rtmdet.cc)
|
add_executable(infer_rtmdet_demo ${PROJECT_SOURCE_DIR}/infer_rtmdet.cc)
|
||||||
target_link_libraries(infer_rtmdet_demo ${FASTDEPLOY_LIBS})
|
target_link_libraries(infer_rtmdet_demo ${FASTDEPLOY_LIBS})
|
||||||
|
|
||||||
|
159
examples/vision/detection/paddledetection/cpp/infer_yolov8.cc
Executable file
159
examples/vision/detection/paddledetection/cpp/infer_yolov8.cc
Executable file
@@ -0,0 +1,159 @@
|
|||||||
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include "fastdeploy/vision.h"
|
||||||
|
|
||||||
|
#ifdef WIN32
|
||||||
|
const char sep = '\\';
|
||||||
|
#else
|
||||||
|
const char sep = '/';
|
||||||
|
#endif
|
||||||
|
|
||||||
|
void CpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||||
|
auto model_file = model_dir + sep + "model.pdmodel";
|
||||||
|
auto params_file = model_dir + sep + "model.pdiparams";
|
||||||
|
auto config_file = model_dir + sep + "infer_cfg.yml";
|
||||||
|
auto option = fastdeploy::RuntimeOption();
|
||||||
|
option.UseCpu();
|
||||||
|
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
|
||||||
|
config_file, option);
|
||||||
|
if (!model.Initialized()) {
|
||||||
|
std::cerr << "Failed to initialize." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto im = cv::imread(image_file);
|
||||||
|
auto im_bak = im.clone();
|
||||||
|
|
||||||
|
fastdeploy::vision::DetectionResult res;
|
||||||
|
if (!model.Predict(&im, &res)) {
|
||||||
|
std::cerr << "Failed to predict." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << res.Str() << std::endl;
|
||||||
|
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
|
||||||
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void KunlunXinInfer(const std::string& model_dir, const std::string& image_file) {
|
||||||
|
auto model_file = model_dir + sep + "model.pdmodel";
|
||||||
|
auto params_file = model_dir + sep + "model.pdiparams";
|
||||||
|
auto config_file = model_dir + sep + "infer_cfg.yml";
|
||||||
|
auto option = fastdeploy::RuntimeOption();
|
||||||
|
option.UseKunlunXin();
|
||||||
|
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
|
||||||
|
config_file, option);
|
||||||
|
if (!model.Initialized()) {
|
||||||
|
std::cerr << "Failed to initialize." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto im = cv::imread(image_file);
|
||||||
|
auto im_bak = im.clone();
|
||||||
|
|
||||||
|
fastdeploy::vision::DetectionResult res;
|
||||||
|
if (!model.Predict(&im, &res)) {
|
||||||
|
std::cerr << "Failed to predict." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << res.Str() << std::endl;
|
||||||
|
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
|
||||||
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void GpuInfer(const std::string& model_dir, const std::string& image_file) {
|
||||||
|
auto model_file = model_dir + sep + "model.pdmodel";
|
||||||
|
auto params_file = model_dir + sep + "model.pdiparams";
|
||||||
|
auto config_file = model_dir + sep + "infer_cfg.yml";
|
||||||
|
|
||||||
|
auto option = fastdeploy::RuntimeOption();
|
||||||
|
option.UseGpu();
|
||||||
|
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
|
||||||
|
config_file, option);
|
||||||
|
if (!model.Initialized()) {
|
||||||
|
std::cerr << "Failed to initialize." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto im = cv::imread(image_file);
|
||||||
|
auto im_bak = im.clone();
|
||||||
|
|
||||||
|
fastdeploy::vision::DetectionResult res;
|
||||||
|
if (!model.Predict(&im, &res)) {
|
||||||
|
std::cerr << "Failed to predict." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << res.Str() << std::endl;
|
||||||
|
auto vis_im = fastdeploy::vision::VisDetection(im_bak, res, 0.5);
|
||||||
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
void TrtInfer(const std::string& model_dir, const std::string& image_file) {
|
||||||
|
auto model_file = model_dir + sep + "model.pdmodel";
|
||||||
|
auto params_file = model_dir + sep + "model.pdiparams";
|
||||||
|
auto config_file = model_dir + sep + "infer_cfg.yml";
|
||||||
|
|
||||||
|
auto option = fastdeploy::RuntimeOption();
|
||||||
|
option.UseGpu();
|
||||||
|
option.UseTrtBackend();
|
||||||
|
auto model = fastdeploy::vision::detection::PaddleYOLOv8(model_file, params_file,
|
||||||
|
config_file, option);
|
||||||
|
if (!model.Initialized()) {
|
||||||
|
std::cerr << "Failed to initialize." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto im = cv::imread(image_file);
|
||||||
|
|
||||||
|
fastdeploy::vision::DetectionResult res;
|
||||||
|
if (!model.Predict(&im, &res)) {
|
||||||
|
std::cerr << "Failed to predict." << std::endl;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << res.Str() << std::endl;
|
||||||
|
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
|
||||||
|
cv::imwrite("vis_result.jpg", vis_im);
|
||||||
|
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
int main(int argc, char* argv[]) {
|
||||||
|
if (argc < 4) {
|
||||||
|
std::cout
|
||||||
|
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
|
||||||
|
"e.g ./infer_model ./ppyolo_dirname ./test.jpeg 0"
|
||||||
|
<< std::endl;
|
||||||
|
std::cout << "The data type of run_option is int, 0: run with cpu; 1: run "
|
||||||
|
"with gpu; 2: run with gpu and use tensorrt backend; 3: run with kunlunxin."
|
||||||
|
<< std::endl;
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (std::atoi(argv[3]) == 0) {
|
||||||
|
CpuInfer(argv[1], argv[2]);
|
||||||
|
} else if (std::atoi(argv[3]) == 1) {
|
||||||
|
GpuInfer(argv[1], argv[2]);
|
||||||
|
} else if(std::atoi(argv[3]) == 2){
|
||||||
|
TrtInfer(argv[1], argv[2]);
|
||||||
|
} else if(std::atoi(argv[3]) == 3){
|
||||||
|
KunlunXinInfer(argv[1], argv[2]);
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
62
examples/vision/detection/paddledetection/python/infer_yolov8.py
Executable file
62
examples/vision/detection/paddledetection/python/infer_yolov8.py
Executable file
@@ -0,0 +1,62 @@
|
|||||||
|
import fastdeploy as fd
|
||||||
|
import cv2
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def parse_arguments():
|
||||||
|
import argparse
|
||||||
|
import ast
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument(
|
||||||
|
"--model_dir",
|
||||||
|
required=True,
|
||||||
|
help="Path of PaddleDetection model directory")
|
||||||
|
parser.add_argument(
|
||||||
|
"--image", required=True, help="Path of test image file.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--device",
|
||||||
|
type=str,
|
||||||
|
default='cpu',
|
||||||
|
help="Type of inference device, support 'kunlunxin', 'cpu' or 'gpu'.")
|
||||||
|
parser.add_argument(
|
||||||
|
"--use_trt",
|
||||||
|
type=ast.literal_eval,
|
||||||
|
default=False,
|
||||||
|
help="Wether to use tensorrt.")
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
|
def build_option(args):
|
||||||
|
option = fd.RuntimeOption()
|
||||||
|
|
||||||
|
if args.device.lower() == "kunlunxin":
|
||||||
|
option.use_kunlunxin()
|
||||||
|
|
||||||
|
if args.device.lower() == "gpu":
|
||||||
|
option.use_gpu()
|
||||||
|
|
||||||
|
if args.use_trt:
|
||||||
|
option.use_trt_backend()
|
||||||
|
return option
|
||||||
|
|
||||||
|
|
||||||
|
args = parse_arguments()
|
||||||
|
|
||||||
|
model_file = os.path.join(args.model_dir, "model.pdmodel")
|
||||||
|
params_file = os.path.join(args.model_dir, "model.pdiparams")
|
||||||
|
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
|
||||||
|
|
||||||
|
# 配置runtime,加载模型
|
||||||
|
runtime_option = build_option(args)
|
||||||
|
model = fd.vision.detection.PaddleYOLOv8(
|
||||||
|
model_file, params_file, config_file, runtime_option=runtime_option)
|
||||||
|
|
||||||
|
# 预测图片检测结果
|
||||||
|
im = cv2.imread(args.image)
|
||||||
|
result = model.predict(im.copy())
|
||||||
|
print(result)
|
||||||
|
|
||||||
|
# 预测结果可视化
|
||||||
|
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
|
||||||
|
cv2.imwrite("visualized_result.jpg", vis_im)
|
||||||
|
print("Visualized result save in ./visualized_result.jpg")
|
@@ -245,6 +245,23 @@ class FASTDEPLOY_DECL PaddleYOLOv7 : public PPDetBase {
|
|||||||
virtual std::string ModelName() const { return "PaddleDetection/YOLOv7"; }
|
virtual std::string ModelName() const { return "PaddleDetection/YOLOv7"; }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class FASTDEPLOY_DECL PaddleYOLOv8 : public PPDetBase {
|
||||||
|
public:
|
||||||
|
PaddleYOLOv8(const std::string& model_file, const std::string& params_file,
|
||||||
|
const std::string& config_file,
|
||||||
|
const RuntimeOption& custom_option = RuntimeOption(),
|
||||||
|
const ModelFormat& model_format = ModelFormat::PADDLE)
|
||||||
|
: PPDetBase(model_file, params_file, config_file, custom_option,
|
||||||
|
model_format) {
|
||||||
|
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
|
||||||
|
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
|
||||||
|
valid_kunlunxin_backends = {Backend::LITE};
|
||||||
|
initialized = Initialize();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual std::string ModelName() const { return "PaddleDetection/YOLOv8"; }
|
||||||
|
};
|
||||||
|
|
||||||
class FASTDEPLOY_DECL RTMDet : public PPDetBase {
|
class FASTDEPLOY_DECL RTMDet : public PPDetBase {
|
||||||
public:
|
public:
|
||||||
RTMDet(const std::string& model_file, const std::string& params_file,
|
RTMDet(const std::string& model_file, const std::string& params_file,
|
||||||
|
@@ -490,6 +490,31 @@ class PaddleYOLOv7(PPYOLOE):
|
|||||||
assert self.initialized, "PaddleYOLOv7 model initialize failed."
|
assert self.initialized, "PaddleYOLOv7 model initialize failed."
|
||||||
|
|
||||||
|
|
||||||
|
class PaddleYOLOv8(PPYOLOE):
|
||||||
|
def __init__(self,
|
||||||
|
model_file,
|
||||||
|
params_file,
|
||||||
|
config_file,
|
||||||
|
runtime_option=None,
|
||||||
|
model_format=ModelFormat.PADDLE):
|
||||||
|
"""Load a YOLOv8 model exported by PaddleDetection.
|
||||||
|
|
||||||
|
:param model_file: (str)Path of model file, e.g yolov8/model.pdmodel
|
||||||
|
:param params_file: (str)Path of parameters file, e.g yolov8/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
|
||||||
|
:param config_file: (str)Path of configuration file for deployment, e.g yolov8/infer_cfg.yml
|
||||||
|
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
|
||||||
|
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
|
||||||
|
"""
|
||||||
|
|
||||||
|
super(PPYOLOE, self).__init__(runtime_option)
|
||||||
|
|
||||||
|
assert model_format == ModelFormat.PADDLE, "PaddleYOLOv8 model only support model format of ModelFormat.Paddle now."
|
||||||
|
self._model = C.vision.detection.PaddleYOLOv8(
|
||||||
|
model_file, params_file, config_file, self._runtime_option,
|
||||||
|
model_format)
|
||||||
|
assert self.initialized, "PaddleYOLOv8 model initialize failed."
|
||||||
|
|
||||||
|
|
||||||
class RTMDet(PPYOLOE):
|
class RTMDet(PPYOLOE):
|
||||||
def __init__(self,
|
def __init__(self,
|
||||||
model_file,
|
model_file,
|
||||||
|
Reference in New Issue
Block a user