[Model] Support DINO & DETR and add PaddleDetectionModel class (#1837)

* 添加paddleclas模型

* 更新README_CN

* 更新README_CN

* 更新README

* update get_model.sh

* update get_models.sh

* update paddleseg models

* update paddle_seg models

* update paddle_seg models

* modified test resources

* update benchmark_gpu_trt.sh

* add paddle detection

* add paddledetection to benchmark

* modified benchmark cmakelists

* update benchmark scripts

* modified benchmark function calling

* modified paddledetection documents

* add PaddleDetectonModel

* reset examples/paddledetection

* resolve conflict

* update pybind

* resolve conflict

* fix bug

* delete debug mode

* update checkarch log

* update trt inputs example

* Update README.md

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
linyangshi
2023-05-05 14:10:33 +08:00
committed by GitHub
parent 6d0261e9e4
commit 9164796645
11 changed files with 378 additions and 3 deletions

View File

@@ -1,6 +1,5 @@
PROJECT(infer_demo C CXX) PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10) CMAKE_MINIMUM_REQUIRED (VERSION 3.10)
# specify the decompress directory of FastDeploy SDK # specify the decompress directory of FastDeploy SDK
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")
include(${FASTDEPLOY_INSTALL_DIR}/utils/gflags.cmake) include(${FASTDEPLOY_INSTALL_DIR}/utils/gflags.cmake)
@@ -39,6 +38,8 @@ add_executable(benchmark_retinanet ${PROJECT_SOURCE_DIR}/benchmark_retinanet.cc)
add_executable(benchmark_tood ${PROJECT_SOURCE_DIR}/benchmark_tood.cc) add_executable(benchmark_tood ${PROJECT_SOURCE_DIR}/benchmark_tood.cc)
add_executable(benchmark_ttfnet ${PROJECT_SOURCE_DIR}/benchmark_ttfnet.cc) add_executable(benchmark_ttfnet ${PROJECT_SOURCE_DIR}/benchmark_ttfnet.cc)
add_executable(benchmark ${PROJECT_SOURCE_DIR}/benchmark.cc) add_executable(benchmark ${PROJECT_SOURCE_DIR}/benchmark.cc)
add_executable(benchmark_ppdet ${PROJECT_SOURCE_DIR}/benchmark_ppdet.cc)
add_executable(benchmark_dino ${PROJECT_SOURCE_DIR}/benchmark_dino.cc)
if(UNIX AND (NOT APPLE) AND (NOT ANDROID)) if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags pthread) target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags pthread)
@@ -72,6 +73,8 @@ if(UNIX AND (NOT APPLE) AND (NOT ANDROID))
target_link_libraries(benchmark_tood ${FASTDEPLOY_LIBS} gflags pthread) target_link_libraries(benchmark_tood ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ttfnet ${FASTDEPLOY_LIBS} gflags pthread) target_link_libraries(benchmark_ttfnet ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags pthread) target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_ppdet ${FASTDEPLOY_LIBS} gflags pthread)
target_link_libraries(benchmark_dino ${FASTDEPLOY_LIBS} gflags pthread)
else() else()
target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags) target_link_libraries(benchmark_yolov5 ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppyolov5 ${FASTDEPLOY_LIBS} gflags) target_link_libraries(benchmark_ppyolov5 ${FASTDEPLOY_LIBS} gflags)
@@ -104,6 +107,8 @@ else()
target_link_libraries(benchmark_tood ${FASTDEPLOY_LIBS} gflags) target_link_libraries(benchmark_tood ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ttfnet ${FASTDEPLOY_LIBS} gflags) target_link_libraries(benchmark_ttfnet ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags) target_link_libraries(benchmark ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_ppdet ${FASTDEPLOY_LIBS} gflags)
target_link_libraries(benchmark_dino ${FASTDEPLOY_LIBS} gflags)
endif() endif()
# only for Android ADB test # only for Android ADB test
if(ANDROID) if(ANDROID)

View File

@@ -186,6 +186,10 @@ benchmark: ./benchmark -[info|diff|check|dump|mem] -model xxx -config_path xxx -
```bash ```bash
./benchmark --model ResNet50_vd_infer --config_path config/config.gpu.paddle_trt.fp16.txt --trt_shapes 1,3,224,224:1,3,224,224:1,3,224,224 --names inputs --dtypes FP32 ./benchmark --model ResNet50_vd_infer --config_path config/config.gpu.paddle_trt.fp16.txt --trt_shapes 1,3,224,224:1,3,224,224:1,3,224,224 --names inputs --dtypes FP32
``` ```
- TensorRT/Paddle-TRT多输入示例
```bash
./benchmark --model rtdetr_r50vd_6x_coco --trt_shapes 1,2:1,2:1,2:1,3,640,640:1,3,640,640:1,3,640,640:1,2:1,2:1,2 --names im_shape:image:scale_factor --shapes 1,2:1,3,640,640:1,2 --config_path config/config.gpu.paddle_trt.fp32.txt --dtypes FP32:FP32:FP32
```
- 支持FD全部后端和全部模型格式--model_file, --params_file(optional), --model_format - 支持FD全部后端和全部模型格式--model_file, --params_file(optional), --model_format
```bash ```bash
# ONNX模型示例 # ONNX模型示例
@@ -206,4 +210,4 @@ benchmark: ./benchmark -[info|diff|check|dump|mem] -model xxx -config_path xxx -
- 显示模型的输入信息: --info - 显示模型的输入信息: --info
```bash ```bash
./benchmark --info --model picodet_l_640_coco_lcnet --config_path config/config.arm.lite.fp32.txt ./benchmark --info --model picodet_l_640_coco_lcnet --config_path config/config.arm.lite.fp32.txt
``` ```

View File

@@ -0,0 +1,118 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "flags.h"
#include "macros.h"
#include "option.h"
namespace vision = fastdeploy::vision;
namespace benchmark = fastdeploy::benchmark;
DEFINE_bool(no_nms, false, "Whether the model contains nms.");
int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK) && defined(ENABLE_VISION)
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
std::unordered_map<std::string, std::string> config_info;
benchmark::ResultManager::LoadBenchmarkConfig(FLAGS_config_path,
&config_info);
std::string model_name, params_name, config_name;
auto model_format = fastdeploy::ModelFormat::PADDLE;
if (!UpdateModelResourceName(&model_name, &params_name, &config_name,
&model_format, config_info)) {
return -1;
}
auto model_file = FLAGS_model + sep + model_name;
auto params_file = FLAGS_model + sep + params_name;
auto config_file = FLAGS_model + sep + config_name;
if (config_info["backend"] == "paddle_trt") {
option.paddle_infer_option.collect_trt_shape = true;
}
if (config_info["backend"] == "paddle_trt" ||
config_info["backend"] == "trt") {
option.trt_option.SetShape("im_shape",{1,2},{1,2},{1,2});
option.trt_option.SetShape("image", {1, 3, 320,320},{1, 3, 640, 640},
{1, 3, 1280, 1280});
option.trt_option.SetShape("scale_factor", {1, 2}, {1, 2},
{1, 2});
}
auto model_ppdet = vision::detection::PaddleDetectionModel(
model_file, params_file, config_file, option, model_format);
vision::DetectionResult res;
if (config_info["precision_compare"] == "true") {
// Run once at least
model_ppdet.Predict(im, &res);
// 1. Test result diff
std::cout << "=============== Test result diff =================\n";
// Save result to -> disk.
std::string det_result_path = "ppdet_result.txt";
benchmark::ResultManager::SaveDetectionResult(res, det_result_path);
// Load result from <- disk.
vision::DetectionResult res_loaded;
benchmark::ResultManager::LoadDetectionResult(&res_loaded, det_result_path);
// Calculate diff between two results.
auto det_diff =
benchmark::ResultManager::CalculateDiffStatis(res, res_loaded);
std::cout << "Boxes diff: mean=" << det_diff.boxes.mean
<< ", max=" << det_diff.boxes.max
<< ", min=" << det_diff.boxes.min << std::endl;
std::cout << "Label_ids diff: mean=" << det_diff.labels.mean
<< ", max=" << det_diff.labels.max
<< ", min=" << det_diff.labels.min << std::endl;
// 2. Test tensor diff
std::cout << "=============== Test tensor diff =================\n";
std::vector<vision::DetectionResult> batch_res;
std::vector<fastdeploy::FDTensor> input_tensors, output_tensors;
std::vector<cv::Mat> imgs;
imgs.push_back(im);
std::vector<vision::FDMat> fd_images = vision::WrapMat(imgs);
model_ppdet.GetPreprocessor().Run(&fd_images, &input_tensors);
input_tensors[0].name = "image";
input_tensors[1].name = "scale_factor";
input_tensors[2].name = "im_shape";
input_tensors.pop_back();
model_ppdet.Infer(input_tensors, &output_tensors);
model_ppdet.GetPostprocessor().Run(output_tensors, &batch_res);
// Save tensor to -> disk.
auto& tensor_dump = output_tensors[0];
std::string det_tensor_path = "ppdet_tensor.txt";
benchmark::ResultManager::SaveFDTensor(tensor_dump, det_tensor_path);
// Load tensor from <- disk.
fastdeploy::FDTensor tensor_loaded;
benchmark::ResultManager::LoadFDTensor(&tensor_loaded, det_tensor_path);
// Calculate diff between two tensors.
auto det_tensor_diff = benchmark::ResultManager::CalculateDiffStatis(
tensor_dump, tensor_loaded);
std::cout << "Tensor diff: mean=" << det_tensor_diff.data.mean
<< ", max=" << det_tensor_diff.data.max
<< ", min=" << det_tensor_diff.data.min << std::endl;
}
// Run profiling
if (FLAGS_no_nms) {
model_ppdet.GetPostprocessor().ApplyNMS();
}
BENCHMARK_MODEL(model_ppdet, model_ppdet.Predict(im, &res))
auto vis_im = vision::VisDetection(im, res,0.3);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
#endif
return 0;
}

View File

@@ -0,0 +1,117 @@
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "flags.h"
#include "macros.h"
#include "option.h"
namespace vision = fastdeploy::vision;
namespace benchmark = fastdeploy::benchmark;
DEFINE_bool(no_nms, false, "Whether the model contains nms.");
int main(int argc, char* argv[]) {
#if defined(ENABLE_BENCHMARK) && defined(ENABLE_VISION)
// Initialization
auto option = fastdeploy::RuntimeOption();
if (!CreateRuntimeOption(&option, argc, argv, true)) {
return -1;
}
auto im = cv::imread(FLAGS_image);
std::unordered_map<std::string, std::string> config_info;
benchmark::ResultManager::LoadBenchmarkConfig(FLAGS_config_path,
&config_info);
std::string model_name, params_name, config_name;
auto model_format = fastdeploy::ModelFormat::PADDLE;
if (!UpdateModelResourceName(&model_name, &params_name, &config_name,
&model_format, config_info)) {
return -1;
}
auto model_file = FLAGS_model + sep + model_name;
auto params_file = FLAGS_model + sep + params_name;
auto config_file = FLAGS_model + sep + config_name;
if (config_info["backend"] == "paddle_trt") {
option.paddle_infer_option.collect_trt_shape = true;
}
if (config_info["backend"] == "paddle_trt" ||
config_info["backend"] == "trt") {
option.trt_option.SetShape("image", {1, 3, 640, 640}, {1, 3, 640, 640},
{1, 3, 640, 640});
option.trt_option.SetShape("scale_factor", {1, 2}, {1, 2},
{1, 2});
}
auto model_ppdet = vision::detection::PaddleDetectionModel(
model_file, params_file, config_file, option, model_format);
vision::DetectionResult res;
if (config_info["precision_compare"] == "true") {
// Run once at least
model_ppdet.Predict(im, &res);
// 1. Test result diff
std::cout << "=============== Test result diff =================\n";
// Save result to -> disk.
std::string det_result_path = "ppdet_result.txt";
benchmark::ResultManager::SaveDetectionResult(res, det_result_path);
// Load result from <- disk.
vision::DetectionResult res_loaded;
benchmark::ResultManager::LoadDetectionResult(&res_loaded, det_result_path);
// Calculate diff between two results.
auto det_diff =
benchmark::ResultManager::CalculateDiffStatis(res, res_loaded);
std::cout << "Boxes diff: mean=" << det_diff.boxes.mean
<< ", max=" << det_diff.boxes.max
<< ", min=" << det_diff.boxes.min << std::endl;
std::cout << "Label_ids diff: mean=" << det_diff.labels.mean
<< ", max=" << det_diff.labels.max
<< ", min=" << det_diff.labels.min << std::endl;
// 2. Test tensor diff
std::cout << "=============== Test tensor diff =================\n";
std::vector<vision::DetectionResult> batch_res;
std::vector<fastdeploy::FDTensor> input_tensors, output_tensors;
std::vector<cv::Mat> imgs;
imgs.push_back(im);
std::vector<vision::FDMat> fd_images = vision::WrapMat(imgs);
model_ppdet.GetPreprocessor().Run(&fd_images, &input_tensors);
input_tensors[0].name = "image";
input_tensors[1].name = "scale_factor";
input_tensors[2].name = "im_shape";
input_tensors.pop_back();
model_ppdet.Infer(input_tensors, &output_tensors);
model_ppdet.GetPostprocessor().Run(output_tensors, &batch_res);
// Save tensor to -> disk.
auto& tensor_dump = output_tensors[0];
std::string det_tensor_path = "ppdet_tensor.txt";
benchmark::ResultManager::SaveFDTensor(tensor_dump, det_tensor_path);
// Load tensor from <- disk.
fastdeploy::FDTensor tensor_loaded;
benchmark::ResultManager::LoadFDTensor(&tensor_loaded, det_tensor_path);
// Calculate diff between two tensors.
auto det_tensor_diff = benchmark::ResultManager::CalculateDiffStatis(
tensor_dump, tensor_loaded);
std::cout << "Tensor diff: mean=" << det_tensor_diff.data.mean
<< ", max=" << det_tensor_diff.data.max
<< ", min=" << det_tensor_diff.data.min << std::endl;
}
// Run profiling
if (FLAGS_no_nms) {
model_ppdet.GetPostprocessor().ApplyNMS();
}
BENCHMARK_MODEL(model_ppdet, model_ppdet.Predict(im, &res))
auto vis_im = vision::VisDetection(im, res,0.3);
cv::imwrite("vis_result.jpg", vis_im);
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
#endif
return 0;
}

2
fastdeploy/runtime/option_pybind.cc Normal file → Executable file
View File

@@ -43,6 +43,8 @@ void BindOption(pybind11::module& m) {
.def("use_sophgo", &RuntimeOption::UseSophgo) .def("use_sophgo", &RuntimeOption::UseSophgo)
.def("use_ascend", &RuntimeOption::UseAscend) .def("use_ascend", &RuntimeOption::UseAscend)
.def("use_kunlunxin", &RuntimeOption::UseKunlunXin) .def("use_kunlunxin", &RuntimeOption::UseKunlunXin)
.def("disable_valid_backend_check",&RuntimeOption::DisableValidBackendCheck)
.def("enable_valid_backend_check",&RuntimeOption::EnableValidBackendCheck)
.def_readwrite("paddle_lite_option", &RuntimeOption::paddle_lite_option) .def_readwrite("paddle_lite_option", &RuntimeOption::paddle_lite_option)
.def_readwrite("openvino_option", &RuntimeOption::openvino_option) .def_readwrite("openvino_option", &RuntimeOption::openvino_option)
.def_readwrite("ort_option", &RuntimeOption::ort_option) .def_readwrite("ort_option", &RuntimeOption::ort_option)

17
fastdeploy/vision/detection/ppdet/base.cc Normal file → Executable file
View File

@@ -18,6 +18,7 @@ PPDetBase::PPDetBase(const std::string& model_file,
runtime_option.model_format = model_format; runtime_option.model_format = model_format;
runtime_option.model_file = model_file; runtime_option.model_file = model_file;
runtime_option.params_file = params_file; runtime_option.params_file = params_file;
} }
std::unique_ptr<PPDetBase> PPDetBase::Clone() const { std::unique_ptr<PPDetBase> PPDetBase::Clone() const {
@@ -82,6 +83,22 @@ bool PPDetBase::BatchPredict(const std::vector<cv::Mat>& imgs,
return true; return true;
} }
bool PPDetBase::CheckArch(){
std::vector<std::string> archs = {"SOLOv2","YOLO","SSD","RetinaNet","RCNN","Face","GFL","YOLOX","YOLOv5","YOLOv6","YOLOv7","RTMDet","FCOS","TTFNet","TOOD","DETR"};
auto arch_ = preprocessor_.GetArch();
for (auto item : archs) {
if (arch_ == item) {
return true;
}
}
FDWARNING << "Please set model arch,"
<< "support value : SOLOv2, YOLO, SSD, RetinaNet, RCNN, Face , GFL , RTMDet ,"\
<<"FCOS , TTFNet , TOOD , DETR." << std::endl;
return false;
}
} // namespace detection } // namespace detection
} // namespace vision } // namespace vision
} // namespace fastdeploy } // namespace fastdeploy

2
fastdeploy/vision/detection/ppdet/base.h Normal file → Executable file
View File

@@ -77,6 +77,7 @@ class FASTDEPLOY_DECL PPDetBase : public FastDeployModel {
virtual bool BatchPredict(const std::vector<cv::Mat>& imgs, virtual bool BatchPredict(const std::vector<cv::Mat>& imgs,
std::vector<DetectionResult>* results); std::vector<DetectionResult>* results);
PaddleDetPreprocessor& GetPreprocessor() { PaddleDetPreprocessor& GetPreprocessor() {
return preprocessor_; return preprocessor_;
} }
@@ -84,6 +85,7 @@ class FASTDEPLOY_DECL PPDetBase : public FastDeployModel {
PaddleDetPostprocessor& GetPostprocessor() { PaddleDetPostprocessor& GetPostprocessor() {
return postprocessor_; return postprocessor_;
} }
virtual bool CheckArch();
protected: protected:
virtual bool Initialize(); virtual bool Initialize();

View File

@@ -440,6 +440,29 @@ class FASTDEPLOY_DECL GFL : public PPDetBase {
virtual std::string ModelName() const { return "PaddleDetection/GFL"; } virtual std::string ModelName() const { return "PaddleDetection/GFL"; }
}; };
class FASTDEPLOY_DECL PaddleDetectionModel : public PPDetBase {
public:
PaddleDetectionModel(const std::string& model_file, const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE)
: PPDetBase(model_file, params_file, config_file, custom_option,
model_format) {
CheckArch();
valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER,
Backend::LITE};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
valid_timvx_backends = {Backend::LITE};
valid_kunlunxin_backends = {Backend::LITE};
valid_rknpu_backends = {Backend::RKNPU2};
valid_ascend_backends = {Backend::LITE};
valid_sophgonpu_backends = {Backend::SOPHGOTPU};
initialized = Initialize();
}
virtual std::string ModelName() const { return "PaddleDetectionModel"; }
};
class FASTDEPLOY_DECL PPYOLOER : public PPDetBase { class FASTDEPLOY_DECL PPYOLOER : public PPDetBase {
public: public:
PPYOLOER(const std::string& model_file, const std::string& params_file, PPYOLOER(const std::string& model_file, const std::string& params_file,

7
fastdeploy/vision/detection/ppdet/ppdet_pybind.cc Normal file → Executable file
View File

@@ -238,7 +238,12 @@ void BindPPDet(pybind11::module& m) {
m, "SOLOv2") m, "SOLOv2")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption, .def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
ModelFormat>()); ModelFormat>());
pybind11::class_<vision::detection::PaddleDetectionModel, vision::detection::PPDetBase>(
m, "PaddleDetectionModel")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
ModelFormat>());
pybind11::class_<vision::detection::PPYOLOER, vision::detection::PPDetBase>( pybind11::class_<vision::detection::PPYOLOER, vision::detection::PPDetBase>(
m, "PPYOLOER") m, "PPYOLOER")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption, .def(pybind11::init<std::string, std::string, std::string, RuntimeOption,

View File

@@ -266,6 +266,16 @@ class RuntimeOption:
""" """
return self._option.use_ascend() return self._option.use_ascend()
def disable_valid_backend_check(self):
""" Disable checking validity of backend during inference
"""
return self._option.disable_valid_backend_check()
def enable_valid_backend_check(self):
"""Enable checking validity of backend during inference
"""
return self._option.enable_valid_backend_check()
def set_cpu_thread_num(self, thread_num=-1): def set_cpu_thread_num(self, thread_num=-1):
"""Set number of threads if inference with CPU """Set number of threads if inference with CPU

View File

@@ -800,6 +800,78 @@ class GFL(PPYOLOE):
assert self.initialized, "GFL model initialize failed." assert self.initialized, "GFL model initialize failed."
class PaddleDetectionModel(FastDeployModel):
def __init__(self,
model_file,
params_file,
config_file,
runtime_option=None,
model_format=ModelFormat.PADDLE):
"""Load a PaddleDetectionModel model exported by PaddleDetection.
:param model_file: (str)Path of model file, e.g ppyoloe/model.pdmodel
:param params_file: (str)Path of parameters file, e.g ppyoloe/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
:param config_file: (str)Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
"""
super(PaddleDetectionModel, self).__init__(runtime_option)
self._model = C.vision.detection.PaddleDetectionModel(
model_file, params_file, config_file, self._runtime_option,
model_format)
assert self.initialized, "PaddleDetectionModel model initialize failed."
def predict(self, im):
"""Detect an input image
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
:return: DetectionResult
"""
assert im is not None, "The input image data is None."
return self._model.predict(im)
def batch_predict(self, images):
"""Detect a batch of input image list
:param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
:return list of DetectionResult
"""
return self._model.batch_predict(images)
def clone(self):
"""Clone PPYOLOE object
:return: a new PPYOLOE object
"""
class PPYOLOEClone(PPYOLOE):
def __init__(self, model):
self._model = model
clone_model = PPYOLOEClone(self._model.clone())
return clone_model
@property
def preprocessor(self):
"""Get PaddleDetPreprocessor object of the loaded model
:return PaddleDetPreprocessor
"""
return self._model.preprocessor
@property
def postprocessor(self):
"""Get PaddleDetPostprocessor object of the loaded model
:return PaddleDetPostprocessor
"""
return self._model.postprocessor
class PPYOLOER(PPYOLOE): class PPYOLOER(PPYOLOE):
def __init__(self, def __init__(self,
model_file, model_file,