From 12e5a65fc319a5451bc7c7295aa1fb1f56014ca6 Mon Sep 17 00:00:00 2001 From: Jason Date: Fri, 7 Oct 2022 20:41:04 +0800 Subject: [PATCH] Add some comments for ppyoloe (#324) * Add some comments for ppyoloe * Update runtime.h --- Doxyfile | 8 ++-- fastdeploy/README.md | 27 ++++++++++++ fastdeploy/fastdeploy_model.h | 47 ++++++++++++++++++--- fastdeploy/runtime.h | 2 +- fastdeploy/vision/AddModel.md | 3 -- fastdeploy/vision/common/result.h | 32 +++++++++++++- fastdeploy/vision/detection/ppdet/ppyoloe.h | 38 +++++++++++++---- 7 files changed, 134 insertions(+), 23 deletions(-) create mode 100644 fastdeploy/README.md delete mode 100644 fastdeploy/vision/AddModel.md diff --git a/Doxyfile b/Doxyfile index 1b3f06810..46806d541 100644 --- a/Doxyfile +++ b/Doxyfile @@ -982,7 +982,7 @@ FILTER_SOURCE_PATTERNS = # (index.html). This can be useful if you have a project on for instance GitHub # and want to reuse the introduction page also for the doxygen output. -USE_MDFILE_AS_MAINPAGE = +USE_MDFILE_AS_MAINPAGE = fastdeploy/README.md #--------------------------------------------------------------------------- # Configuration options related to source browsing @@ -2059,7 +2059,7 @@ ENABLE_PREPROCESSING = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -MACRO_EXPANSION = NO +MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and @@ -2067,7 +2067,7 @@ MACRO_EXPANSION = NO # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = NO +EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. @@ -2099,7 +2099,7 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = +PREDEFINED = protected=private # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/fastdeploy/README.md b/fastdeploy/README.md new file mode 100644 index 000000000..2266d476a --- /dev/null +++ b/fastdeploy/README.md @@ -0,0 +1,27 @@ +# FastDeploy C++ API Summary + +## Runtime + +FastDeploy Runtime can be used as an inference engine with the same code, we can deploy Paddle/ONNX model on different device by different backends. +Currently, FastDeploy supported backends listed as below, + +| Backend | Hardware | Support Model Format | Platform | +| :------ | :------- | :------------------- | :------- | +| Paddle Inference | CPU/Nvidia GPU | Paddle | Windows(x64)/Linux(x64) | +| ONNX Runtime | CPU/Nvidia GPU | Paddle/ONNX | Windows(x64)/Linux(x64/aarch64)/Mac(x86/arm64) | +| TensorRT | Nvidia GPU | Paddle/ONNX | Windows(x64)/Linux(x64)/Jetson | +| OpenVINO | CPU | Paddle/ONNX | Windows(x64)/Linux(x64)/Mac(x86) | + +### Example code +- [Python examples](./) +- [C++ examples](./) + +### Related APIs +- [RuntimeOption](./structfastdeploy_1_1RuntimeOption.html) +- [Runtime](./structfastdeploy_1_1Runtime.html) + +## Vision Models + +| Task | Model | API | Example | +| :---- | :---- | :---- | :----- | +| object detection | PaddleDetection/PPYOLOE | [fastdeploy::vision::detection::PPYOLOE](./classfastdeploy_1_1vision_1_1detection_1_1PPYOLOE.html) | [C++](./)/[Python](./) | diff --git a/fastdeploy/fastdeploy_model.h b/fastdeploy/fastdeploy_model.h index cd373013a..030396c18 100644 --- a/fastdeploy/fastdeploy_model.h +++ b/fastdeploy/fastdeploy_model.h @@ -16,45 +16,82 @@ namespace fastdeploy { +/*! @brief Base model object for all the vision models + */ class FASTDEPLOY_DECL FastDeployModel { public: + /// Get model's name virtual std::string ModelName() const { return "NameUndefined"; } - virtual bool InitRuntime(); - virtual bool CreateCpuBackend(); - virtual bool CreateGpuBackend(); + /** \brief Inference the model by the runtime. This interface is included in the `Predict()` function, so we don't call `Infer()` directly in most common situation + */ virtual bool Infer(std::vector& input_tensors, std::vector* output_tensors); RuntimeOption runtime_option; + /** \brief Model's valid cpu backends. This member defined all the cpu backends have successfully tested for the model + */ std::vector valid_cpu_backends = {Backend::ORT}; + /** Model's valid gpu backends. This member defined all the gpu backends have successfully tested for the model + */ std::vector valid_gpu_backends = {Backend::ORT}; - std::vector valid_external_backends; - bool initialized = false; + /// Get number of inputs for this model virtual int NumInputsOfRuntime() { return runtime_->NumInputs(); } + /// Get number of outputs for this model virtual int NumOutputsOfRuntime() { return runtime_->NumOutputs(); } + /// Get input information for this model virtual TensorInfo InputInfoOfRuntime(int index) { return runtime_->GetInputInfo(index); } + /// Get output information for this model virtual TensorInfo OutputInfoOfRuntime(int index) { return runtime_->GetOutputInfo(index); } + /// Check if the model is initialized successfully virtual bool Initialized() const { return runtime_initialized_ && initialized; } + /** \brief This is a debug interface, used to record the time of backend runtime + * + * example code @code + * auto model = fastdeploy::vision::PPYOLOE("model.pdmodel", "model.pdiparams", "infer_cfg.yml"); + * if (!model.Initialized()) { + * std::cerr << "Failed to initialize." << std::endl; + * return -1; + * } + * model.EnableRecordTimeOfRuntime(); + * cv::Mat im = cv::imread("test.jpg"); + * for (auto i = 0; i < 1000; ++i) { + * fastdeploy::vision::DetectionResult result; + * model.Predict(&im, &result); + * } + * model.PrintStatisInfoOfRuntime(); + * @endcode After called the `PrintStatisInfoOfRuntime()`, the statistical information of runtime will be printed in the console + */ virtual void EnableRecordTimeOfRuntime() { time_of_runtime_.clear(); std::vector().swap(time_of_runtime_); enable_record_time_of_runtime_ = true; } + /** \brief Disable to record the time of backend runtime, see `EnableRecordTimeOfRuntime()` for more detail + */ virtual void DisableRecordTimeOfRuntime() { enable_record_time_of_runtime_ = false; } + /** \brief Print the statistic information of runtime in the console, see function `EnableRecordTimeOfRuntime()` for more detail + */ virtual std::map PrintStatisInfoOfRuntime(); + protected: + virtual bool InitRuntime(); + virtual bool CreateCpuBackend(); + virtual bool CreateGpuBackend(); + bool initialized = false; + std::vector valid_external_backends; + private: std::unique_ptr runtime_; bool runtime_initialized_ = false; diff --git a/fastdeploy/runtime.h b/fastdeploy/runtime.h index 3a17e8fba..7804c2ac4 100644 --- a/fastdeploy/runtime.h +++ b/fastdeploy/runtime.h @@ -54,7 +54,7 @@ FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& out, const ModelFormat& format); /*! Paddle Lite power mode for mobile device. */ -enum FASTDEPLOY_DECL LitePowerMode { +enum LitePowerMode { LITE_POWER_HIGH = 0, ///< Use Lite Backend with high power mode LITE_POWER_LOW = 1, ///< Use Lite Backend with low power mode LITE_POWER_FULL = 2, ///< Use Lite Backend with full power mode diff --git a/fastdeploy/vision/AddModel.md b/fastdeploy/vision/AddModel.md deleted file mode 100644 index 30080bd5e..000000000 --- a/fastdeploy/vision/AddModel.md +++ /dev/null @@ -1,3 +0,0 @@ -# 如何添加一个模型 - -本文档以[yolov5](https://github.com/ultralytics/yolov5)为例,说明如何添加新的模型支持。 diff --git a/fastdeploy/vision/common/result.h b/fastdeploy/vision/common/result.h index cd9422aee..a41bc561b 100644 --- a/fastdeploy/vision/common/result.h +++ b/fastdeploy/vision/common/result.h @@ -33,51 +33,81 @@ struct FASTDEPLOY_DECL BaseResult { ResultType type = ResultType::UNKNOWN_RESULT; }; +/*! @brief Classify result structure for all the image classify models + */ struct FASTDEPLOY_DECL ClassifyResult : public BaseResult { + /// Classify result for an image std::vector label_ids; + /// The confidence for each classify result std::vector scores; ResultType type = ResultType::CLASSIFY; + /// Clear result void Clear(); + + /// Debug function, convert the result to string to print std::string Str(); }; +/*! Mask structure, used in DetectionResult for instance segmentation models + */ struct FASTDEPLOY_DECL Mask : public BaseResult { + /// Mask data buffer std::vector data; + /// Shape of mask std::vector shape; // (H,W) ... ResultType type = ResultType::MASK; + /// clear mask void Clear(); + /// Return a mutable pointer of the mask data buffer void* Data() { return data.data(); } + /// Return a pointer of the mask data buffer for read only const void* Data() const { return data.data(); } + /// Reserve size for mask data buffer void Reserve(int size); + /// Resize the mask data buffer void Resize(int size); + /// Debug function, convert the result to string to print std::string Str(); }; + +/*! @brief Detection result structure for all the object detection models and instance segmentation models + */ struct FASTDEPLOY_DECL DetectionResult : public BaseResult { - // box: xmin, ymin, xmax, ymax + /** \brief All the detected object boxes for an input image, the size of `boxes` is the number of detected objects, and the element of `boxes` is a array of 4 float values, means [xmin, ymin, xmax, ymax] + */ std::vector> boxes; + /** \brief The confidence for all the detected objects + */ std::vector scores; + /// The classify label for all the detected objects std::vector label_ids; + /** \brief For instance segmentation model, `masks` is the predict mask for all the deteced objects + */ std::vector masks; + //// Shows if the DetectionResult has mask bool contain_masks = false; + ResultType type = ResultType::DETECTION; DetectionResult() {} DetectionResult(const DetectionResult& res); + /// Clear detection result void Clear(); void Reserve(int size); void Resize(int size); + /// Debug function, convert the result to string to print std::string Str(); }; diff --git a/fastdeploy/vision/detection/ppdet/ppyoloe.h b/fastdeploy/vision/detection/ppdet/ppyoloe.h index 0d2e97d86..f7f6da779 100644 --- a/fastdeploy/vision/detection/ppdet/ppyoloe.h +++ b/fastdeploy/vision/detection/ppdet/ppyoloe.h @@ -21,30 +21,50 @@ namespace fastdeploy { namespace vision { +/** \brief All object detection model APIs are defined inside this namespace + * + */ namespace detection { +/*! @brief PPYOLOE model object used when to load a PPYOLOE model exported by PaddleDetection + */ class FASTDEPLOY_DECL PPYOLOE : public FastDeployModel { public: + /** \brief Set path of model file and configuration file, and the configuration of runtime + * + * \param[in] model_file Path of model file, e.g ppyoloe/model.pdmodel + * \param[in] params_file Path of parameter file, e.g ppyoloe/model.pdiparams, if the model format is ONNX, this parameter will be ignored + * \param[in] config_file Path of configuration file for deployment, e.g ppyoloe/infer_cfg.yml + * \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends` + * \param[in] model_format Model format of the loaded model, default is Paddle format + */ PPYOLOE(const std::string& model_file, const std::string& params_file, const std::string& config_file, const RuntimeOption& custom_option = RuntimeOption(), const ModelFormat& model_format = ModelFormat::PADDLE); + /// Get model's name virtual std::string ModelName() const { return "PaddleDetection/PPYOLOE"; } - virtual bool Initialize(); - - virtual bool BuildPreprocessPipelineFromConfig(); - - virtual bool Preprocess(Mat* mat, std::vector* outputs); - - virtual bool Postprocess(std::vector& infer_result, - DetectionResult* result); - + /** \brief Predict the detection result for an input image + * + * \param[in] im The input image data, comes from cv::imread() + * \param[in] result The output detection result will be writen to this structure + * \return true if the prediction successed, otherwise false + */ virtual bool Predict(cv::Mat* im, DetectionResult* result); protected: PPYOLOE() {} + virtual bool Initialize(); + /// Build the preprocess pipeline from the loaded model + virtual bool BuildPreprocessPipelineFromConfig(); + /// Preprocess an input image, and set the preprocessed results to `outputs` + virtual bool Preprocess(Mat* mat, std::vector* outputs); + + /// Postprocess the inferenced results, and set the final result to `result` + virtual bool Postprocess(std::vector& infer_result, + DetectionResult* result); std::vector> processors_; std::string config_file_;