mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Doc] Add comments for PPSeg && PPClas (#396)
* Add comment for PPSeg && PPClas * Update main_page.md
This commit is contained in:
@@ -26,3 +26,5 @@ Currently, FastDeploy supported backends listed as below,
|
||||
| Task | Model | API | Example |
|
||||
| :---- | :---- | :---- | :----- |
|
||||
| object detection | PaddleDetection/PPYOLOE | [fastdeploy::vision::detection::PPYOLOE](./classfastdeploy_1_1vision_1_1detection_1_1PPYOLOE.html) | [C++](./)/[Python](./) |
|
||||
| image classification | PaddleClassification serials | [fastdeploy::vision::classification::PaddleClasModel](./classfastdeploy_1_1vision_1_1classification_1_1PaddleClasModel.html) | [C++](./)/[Python](./) |
|
||||
| semantic segmentation | PaddleSegmentation serials | [fastdeploy::vision::classification::PaddleSegModel](./classfastdeploy_1_1vision_1_1segmentation_1_1PaddleSegModel.html) | [C++](./)/[Python](./) |
|
||||
|
@@ -1,3 +1,9 @@
|
||||
# Semantic Segmentation API
|
||||
|
||||
comming soon...
|
||||
## fastdeploy.vision.segmentation.PaddleSegModel
|
||||
|
||||
```{eval-rst}
|
||||
.. autoclass:: fastdeploy.vision.segmentation.PaddleSegModel
|
||||
:members:
|
||||
:inherited-members:
|
||||
```
|
||||
|
@@ -19,17 +19,37 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
/** \brief All classification model APIs are defined inside this namespace
|
||||
*
|
||||
*/
|
||||
namespace classification {
|
||||
|
||||
/*! @brief PaddleClas serials model object used when to load a PaddleClas model exported by PaddleClas repository
|
||||
*/
|
||||
class FASTDEPLOY_DECL PaddleClasModel : public FastDeployModel {
|
||||
public:
|
||||
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||
*
|
||||
* \param[in] model_file Path of model file, e.g resnet/model.pdmodel
|
||||
* \param[in] params_file Path of parameter file, e.g resnet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||
* \param[in] config_file Path of configuration file for deployment, e.g resnet/infer_cfg.yml
|
||||
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
|
||||
* \param[in] model_format Model format of the loaded model, default is Paddle format
|
||||
*/
|
||||
PaddleClasModel(const std::string& model_file, const std::string& params_file,
|
||||
const std::string& config_file,
|
||||
const RuntimeOption& custom_option = RuntimeOption(),
|
||||
const ModelFormat& model_format = ModelFormat::PADDLE);
|
||||
|
||||
/// Get model's name
|
||||
virtual std::string ModelName() const { return "PaddleClas/Model"; }
|
||||
|
||||
/** \brief Predict the classification result for an input image
|
||||
*
|
||||
* \param[in] im The input image data, comes from cv::imread()
|
||||
* \param[in] result The output classification result will be writen to this structure
|
||||
* \param[in] topk (int)The topk result by the classify confidence score, default 1
|
||||
* \return true if the prediction successed, otherwise false
|
||||
*/
|
||||
// TODO(jiangjiajun) Batch is on the way
|
||||
virtual bool Predict(cv::Mat* im, ClassifyResult* result, int topk = 1);
|
||||
|
||||
|
@@ -1,3 +1,17 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/vision/segmentation/ppseg/model.h"
|
||||
#include "fastdeploy/vision.h"
|
||||
#include "fastdeploy/vision/utils/utils.h"
|
||||
|
@@ -1,3 +1,17 @@
|
||||
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#pragma once
|
||||
#include "fastdeploy/fastdeploy_model.h"
|
||||
#include "fastdeploy/vision/common/processors/transform.h"
|
||||
@@ -5,21 +19,45 @@
|
||||
|
||||
namespace fastdeploy {
|
||||
namespace vision {
|
||||
/** \brief All segmentation model APIs are defined inside this namespace
|
||||
*
|
||||
*/
|
||||
namespace segmentation {
|
||||
|
||||
/*! @brief PaddleSeg serials model object used when to load a PaddleSeg model exported by PaddleSeg repository
|
||||
*/
|
||||
class FASTDEPLOY_DECL PaddleSegModel : public FastDeployModel {
|
||||
public:
|
||||
/** \brief Set path of model file and configuration file, and the configuration of runtime
|
||||
*
|
||||
* \param[in] model_file Path of model file, e.g unet/model.pdmodel
|
||||
* \param[in] params_file Path of parameter file, e.g unet/model.pdiparams, if the model format is ONNX, this parameter will be ignored
|
||||
* \param[in] config_file Path of configuration file for deployment, e.g unet/deploy.yml
|
||||
* \param[in] custom_option RuntimeOption for inference, the default will use cpu, and choose the backend defined in `valid_cpu_backends`
|
||||
* \param[in] model_format Model format of the loaded model, default is Paddle format
|
||||
*/
|
||||
PaddleSegModel(const std::string& model_file, const std::string& params_file,
|
||||
const std::string& config_file,
|
||||
const RuntimeOption& custom_option = RuntimeOption(),
|
||||
const ModelFormat& model_format = ModelFormat::PADDLE);
|
||||
|
||||
/// Get model's name
|
||||
std::string ModelName() const { return "PaddleSeg"; }
|
||||
|
||||
/** \brief Predict the segmentation result for an input image
|
||||
*
|
||||
* \param[in] im The input image data, comes from cv::imread()
|
||||
* \param[in] result The output segmentation result will be writen to this structure
|
||||
* \return true if the segmentation prediction successed, otherwise false
|
||||
*/
|
||||
virtual bool Predict(cv::Mat* im, SegmentationResult* result);
|
||||
|
||||
/** \brief Whether applying softmax operator in the postprocess, default value is false
|
||||
*/
|
||||
bool apply_softmax = false;
|
||||
|
||||
/** \brief For PP-HumanSeg model, set true if the input image is vertical image(height > width), default value is false
|
||||
*/
|
||||
bool is_vertical_screen = false;
|
||||
|
||||
private:
|
||||
|
@@ -25,6 +25,14 @@ class PaddleSegModel(FastDeployModel):
|
||||
config_file,
|
||||
runtime_option=None,
|
||||
model_format=ModelFormat.PADDLE):
|
||||
"""Load a image segmentation model exported by PaddleSeg.
|
||||
|
||||
:param model_file: (str)Path of model file, e.g unet/model.pdmodel
|
||||
:param params_file: (str)Path of parameters file, e.g unet/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
|
||||
:param config_file: (str) Path of configuration file for deploy, e.g unet/deploy.yml
|
||||
:param runtime_option: (fastdeploy.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
|
||||
:param model_format: (fastdeploy.ModelForamt)Model format of the loaded model
|
||||
"""
|
||||
super(PaddleSegModel, self).__init__(runtime_option)
|
||||
|
||||
assert model_format == ModelFormat.PADDLE, "PaddleSeg only support model format of ModelFormat.Paddle now."
|
||||
@@ -34,14 +42,27 @@ class PaddleSegModel(FastDeployModel):
|
||||
assert self.initialized, "PaddleSeg model initialize failed."
|
||||
|
||||
def predict(self, input_image):
|
||||
"""Predict the segmentation result for an input image
|
||||
|
||||
:param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
|
||||
:return: SegmentationResult
|
||||
"""
|
||||
return self._model.predict(input_image)
|
||||
|
||||
@property
|
||||
def apply_softmax(self):
|
||||
"""Atrribute of PaddleSeg model. Stating Whether applying softmax operator in the postprocess, default value is False
|
||||
|
||||
:return: value of apply_softmax(bool)
|
||||
"""
|
||||
return self._model.apply_softmax
|
||||
|
||||
@apply_softmax.setter
|
||||
def apply_softmax(self, value):
|
||||
"""Set attribute apply_softmax of PaddleSeg model.
|
||||
|
||||
:param value: (bool)The value to set apply_softmax
|
||||
"""
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `apply_softmax` must be type of bool."
|
||||
@@ -49,10 +70,18 @@ class PaddleSegModel(FastDeployModel):
|
||||
|
||||
@property
|
||||
def is_vertical_screen(self):
|
||||
"""Atrribute of PP-HumanSeg model. Stating Whether the input image is vertical image(height > width), default value is False
|
||||
|
||||
:return: value of is_vertical_screen(bool)
|
||||
"""
|
||||
return self._model.is_vertical_screen
|
||||
|
||||
@is_vertical_screen.setter
|
||||
def is_vertical_screen(self, value):
|
||||
"""Set attribute is_vertical_screen of PP-HumanSeg model.
|
||||
|
||||
:param value: (bool)The value to set is_vertical_screen
|
||||
"""
|
||||
assert isinstance(
|
||||
value,
|
||||
bool), "The value to set `is_vertical_screen` must be type of bool."
|
||||
|
Reference in New Issue
Block a user