Add some comments for ppyoloe (#324)

* Add some comments for ppyoloe

* Update runtime.h
This commit is contained in:
Jason
2022-10-07 20:41:04 +08:00
committed by GitHub
parent 116265d239
commit 12e5a65fc3
7 changed files with 134 additions and 23 deletions

View File

@@ -982,7 +982,7 @@ FILTER_SOURCE_PATTERNS =
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE =
USE_MDFILE_AS_MAINPAGE = fastdeploy/README.md
#---------------------------------------------------------------------------
# Configuration options related to source browsing
@@ -2059,7 +2059,7 @@ ENABLE_PREPROCESSING = YES
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
MACRO_EXPANSION = NO
MACRO_EXPANSION = YES
# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
# the macro expansion is limited to the macros specified with the PREDEFINED and
@@ -2067,7 +2067,7 @@ MACRO_EXPANSION = NO
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
EXPAND_ONLY_PREDEF = NO
EXPAND_ONLY_PREDEF = YES
# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
@@ -2099,7 +2099,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
PREDEFINED =
PREDEFINED = protected=private
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The

27
fastdeploy/README.md Normal file
View File

@@ -0,0 +1,27 @@
# FastDeploy C++ API Summary
## Runtime
FastDeploy Runtime can be used as an inference engine with the same code, we can deploy Paddle/ONNX model on different device by different backends.
Currently, FastDeploy supported backends listed as below,
| Backend | Hardware | Support Model Format | Platform |
| :------ | :------- | :------------------- | :------- |
| Paddle Inference | CPU/Nvidia GPU | Paddle | Windows(x64)/Linux(x64) |
| ONNX Runtime | CPU/Nvidia GPU | Paddle/ONNX | Windows(x64)/Linux(x64/aarch64)/Mac(x86/arm64) |
| TensorRT | Nvidia GPU | Paddle/ONNX | Windows(x64)/Linux(x64)/Jetson |
| OpenVINO | CPU | Paddle/ONNX | Windows(x64)/Linux(x64)/Mac(x86) |
### Example code
- [Python examples](./)
- [C++ examples](./)
### Related APIs
- [RuntimeOption](./structfastdeploy_1_1RuntimeOption.html)
- [Runtime](./structfastdeploy_1_1Runtime.html)
## Vision Models
| Task | Model | API | Example |
| :---- | :---- | :---- | :----- |
| object detection | PaddleDetection/PPYOLOE | [fastdeploy::vision::detection::PPYOLOE](./classfastdeploy_1_1vision_1_1detection_1_1PPYOLOE.html) | [C++](./)/[Python](./) |

View File

@@ -16,45 +16,82 @@
namespace fastdeploy {
/*! @brief Base model object for all the vision models
*/
class FASTDEPLOY_DECL FastDeployModel {
public:
/// Get model's name
virtual std::string ModelName() const { return "NameUndefined"; }
virtual bool InitRuntime();
virtual bool CreateCpuBackend();
virtual bool CreateGpuBackend();
/** \brief Inference the model by the runtime. This interface is included in the `Predict()` function, so we don't call `Infer()` directly in most common situation
*/
virtual bool Infer(std::vector<FDTensor>& input_tensors,
std::vector<FDTensor>* output_tensors);
RuntimeOption runtime_option;
/** \brief Model's valid cpu backends. This member defined all the cpu backends have successfully tested for the model
*/
std::vector<Backend> valid_cpu_backends = {Backend::ORT};
/** Model's valid gpu backends. This member defined all the gpu backends have successfully tested for the model
*/
std::vector<Backend> valid_gpu_backends = {Backend::ORT};
std::vector<Backend> valid_external_backends;
bool initialized = false;
/// Get number of inputs for this model
virtual int NumInputsOfRuntime() { return runtime_->NumInputs(); }
/// Get number of outputs for this model
virtual int NumOutputsOfRuntime() { return runtime_->NumOutputs(); }
/// Get input information for this model
virtual TensorInfo InputInfoOfRuntime(int index) {
return runtime_->GetInputInfo(index);
}
/// Get output information for this model
virtual TensorInfo OutputInfoOfRuntime(int index) {
return runtime_->GetOutputInfo(index);
}
/// Check if the model is initialized successfully
virtual bool Initialized() const {
return runtime_initialized_ && initialized;
}
/** \brief This is a debug interface, used to record the time of backend runtime
*
* example code @code
* auto model = fastdeploy::vision::PPYOLOE("model.pdmodel", "model.pdiparams", "infer_cfg.yml");
* if (!model.Initialized()) {
* std::cerr << "Failed to initialize." << std::endl;
* return -1;
* }
* model.EnableRecordTimeOfRuntime();
* cv::Mat im = cv::imread("test.jpg");
* for (auto i = 0; i < 1000; ++i) {
* fastdeploy::vision::DetectionResult result;
* model.Predict(&im, &result);
* }
* model.PrintStatisInfoOfRuntime();
* @endcode After called the `PrintStatisInfoOfRuntime()`, the statistical information of runtime will be printed in the console
*/
virtual void EnableRecordTimeOfRuntime() {
time_of_runtime_.clear();
std::vector<double>().swap(time_of_runtime_);
enable_record_time_of_runtime_ = true;
}
/** \brief Disable to record the time of backend runtime, see `EnableRecordTimeOfRuntime()` for more detail
*/
virtual void DisableRecordTimeOfRuntime() {
enable_record_time_of_runtime_ = false;
}
/** \brief Print the statistic information of runtime in the console, see function `EnableRecordTimeOfRuntime()` for more detail
*/
virtual std::map<std::string, float> PrintStatisInfoOfRuntime();
protected:
virtual bool InitRuntime();
virtual bool CreateCpuBackend();
virtual bool CreateGpuBackend();
bool initialized = false;
std::vector<Backend> valid_external_backends;
private:
std::unique_ptr<Runtime> runtime_;
bool runtime_initialized_ = false;

View File

@@ -54,7 +54,7 @@ FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out,
const ModelFormat& format);
/*! Paddle Lite power mode for mobile device. */
enum FASTDEPLOY_DECL LitePowerMode {
enum LitePowerMode {
LITE_POWER_HIGH = 0, ///< Use Lite Backend with high power mode
LITE_POWER_LOW = 1, ///< Use Lite Backend with low power mode
LITE_POWER_FULL = 2, ///< Use Lite Backend with full power mode

View File

@@ -1,3 +0,0 @@
# 如何添加一个模型
本文档以[yolov5](https://github.com/ultralytics/yolov5)为例,说明如何添加新的模型支持。

View File

@@ -33,51 +33,81 @@ struct FASTDEPLOY_DECL BaseResult {
ResultType type = ResultType::UNKNOWN_RESULT;
};
/*! @brief Classify result structure for all the image classify models
*/
struct FASTDEPLOY_DECL ClassifyResult : public BaseResult {
/// Classify result for an image
std::vector<int32_t> label_ids;
/// The confidence for each classify result
std::vector<float> scores;
ResultType type = ResultType::CLASSIFY;
/// Clear result
void Clear();
/// Debug function, convert the result to string to print
std::string Str();
};
/*! Mask structure, used in DetectionResult for instance segmentation models
*/
struct FASTDEPLOY_DECL Mask : public BaseResult {
/// Mask data buffer
std::vector<int32_t> data;
/// Shape of mask
std::vector<int64_t> shape; // (H,W) ...
ResultType type = ResultType::MASK;
/// clear mask
void Clear();
/// Return a mutable pointer of the mask data buffer
void* Data() { return data.data(); }
/// Return a pointer of the mask data buffer for read only
const void* Data() const { return data.data(); }
/// Reserve size for mask data buffer
void Reserve(int size);
/// Resize the mask data buffer
void Resize(int size);
/// Debug function, convert the result to string to print
std::string Str();
};
/*! @brief Detection result structure for all the object detection models and instance segmentation models
*/
struct FASTDEPLOY_DECL DetectionResult : public BaseResult {
// box: xmin, ymin, xmax, ymax
/** \brief All the detected object boxes for an input image, the size of `boxes` is the number of detected objects, and the element of `boxes` is a array of 4 float values, means [xmin, ymin, xmax, ymax]
*/
std::vector<std::array<float, 4>> boxes;
/** \brief The confidence for all the detected objects
*/
std::vector<float> scores;
/// The classify label for all the detected objects
std::vector<int32_t> label_ids;
/** \brief For instance segmentation model, `masks` is the predict mask for all the deteced objects
*/
std::vector<Mask> masks;
//// Shows if the DetectionResult has mask
bool contain_masks = false;
ResultType type = ResultType::DETECTION;
DetectionResult() {}
DetectionResult(const DetectionResult& res);
/// Clear detection result
void Clear();
void Reserve(int size);
void Resize(int size);
/// Debug function, convert the result to string to print
std::string Str();
};

View File

@@ -21,30 +21,50 @@
namespace fastdeploy {
namespace vision {
/** \brief All object detection model APIs are defined inside this namespace
*
*/
namespace detection {
/*! @brief PPYOLOE model object used when to load a PPYOLOE model exported by PaddleDetection
*/
class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel {
public:
/** \brief Set path of model file and configuration file, and the configuration of runtime
*
* \param[in] model_file Path of model file, e.g ppyoloe/model.pdmodel
* \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored
* \param[in] config_file Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
* \param[in] model_format Model format of the loaded model, default is Paddle format
*/
PPYOLOE(const std::string& model_file, const std::string& params_file,
const std::string& config_file,
const RuntimeOption& custom_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE);
/// Get model's name
virtual std::string ModelName() const { return "PaddleDetection/PPYOLOE"; }
virtual bool Initialize();
virtual bool BuildPreprocessPipelineFromConfig();
virtual bool Preprocess(Mat* mat, std::vector<FDTensor>* outputs);
virtual bool Postprocess(std::vector<FDTensor>& infer_result,
DetectionResult* result);
/** \brief Predict the detection result for an input image
*
* \param[in] im The input image data, comes from cv::imread()
* \param[in] result The output detection result will be writen to this structure
* \return true if the prediction successed, otherwise false
*/
virtual bool Predict(cv::Mat* im, DetectionResult* result);
protected:
PPYOLOE() {}
virtual bool Initialize();
/// Build the preprocess pipeline from the loaded model
virtual bool BuildPreprocessPipelineFromConfig();
/// Preprocess an input image, and set the preprocessed results to `outputs`
virtual bool Preprocess(Mat* mat, std::vector<FDTensor>* outputs);
/// Postprocess the inferenced results, and set the final result to `result`
virtual bool Postprocess(std::vector<FDTensor>& infer_result,
DetectionResult* result);
std::vector<std::shared_ptr<Processor>> processors_;
std::string config_file_;