Files
FastDeploy/fastdeploy/runtime/backends/paddle/paddle_backend.h
WJJ1995 aa6931bee9 [Model] Add YOLOv5-seg (#988)
* add onnx_ort_runtime demo

* rm in requirements

* support batch eval

* fixed MattingResults bug

* move assignment for DetectionResult

* integrated x2paddle

* add model convert readme

* update readme

* re-lint

* add processor api

* Add MattingResult Free

* change valid_cpu_backends order

* add ppocr benchmark

* mv bs from 64 to 32

* fixed quantize.md

* fixed quantize bugs

* Add Monitor for benchmark

* update mem monitor

* Set trt_max_batch_size default 1

* fixed ocr benchmark bug

* support yolov5 in serving

* Fixed yolov5 serving

* Fixed postprocess

* update yolov5 to 7.0

* add poros runtime demos

* update readme

* Support poros abi=1

* rm useless note

* deal with comments

* support pp_trt for ppseg

* fixed symlink problem

* Add is_mini_pad and stride for yolov5

* Add yolo series for paddle format

* fixed bugs

* fixed bug

* support yolov5seg

* fixed bug

* refactor yolov5seg

* fixed bug

* mv Mask int32 to uint8

* add yolov5seg example

* rm log info

* fixed code style

* add yolov5seg example in python

* fixed dtype bug

* update note

* deal with comments

* get sorted index

* add yolov5seg test case

* Add GPL-3.0 License

* add round func

* deal with comments

* deal with commens

Co-authored-by: Jason <jiangjiajun@baidu.com>
2023-01-11 15:36:32 +08:00

92 lines
3.3 KiB
C++
Executable File

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <iostream>
#include <memory>
#include <string>
#include <vector>
#include "fastdeploy/runtime/backends/backend.h"
#include "fastdeploy/runtime/backends/paddle/option.h"
#ifdef ENABLE_PADDLE2ONNX
#include "paddle2onnx/converter.h"
#endif
#include "fastdeploy/utils/unique_ptr.h"
#include "paddle_inference_api.h" // NOLINT
namespace fastdeploy {
// convert FD device to paddle place type
paddle_infer::PlaceType ConvertFDDeviceToPlace(Device device);
// Share memory buffer with paddle_infer::Tensor from fastdeploy::FDTensor
void ShareTensorFromFDTensor(paddle_infer::Tensor* tensor, FDTensor& fd_tensor);
// convert paddle_infer::Tensor to fastdeploy::FDTensor
// if copy_to_fd is true, copy memory data to FDTensor
/// else share memory to FDTensor
void PaddleTensorToFDTensor(std::unique_ptr<paddle_infer::Tensor>& tensor,
FDTensor* fd_tensor, bool copy_to_fd);
// Convert data type from paddle inference to fastdeploy
FDDataType PaddleDataTypeToFD(const paddle_infer::DataType& dtype);
// Convert data type from paddle2onnx::PaddleReader to fastdeploy
FDDataType ReaderDataTypeToFD(int32_t dtype);
class PaddleBackend : public BaseBackend {
public:
PaddleBackend() {}
virtual ~PaddleBackend() = default;
void BuildOption(const PaddleBackendOption& option);
bool
InitFromPaddle(const std::string& model_file, const std::string& params_file,
const PaddleBackendOption& option = PaddleBackendOption());
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs,
bool copy_to_fd = true) override;
int NumInputs() const override { return inputs_desc_.size(); }
int NumOutputs() const override { return outputs_desc_.size(); }
std::unique_ptr<BaseBackend> Clone(void* stream = nullptr,
int device_id = -1) override;
TensorInfo GetInputInfo(int index) override;
TensorInfo GetOutputInfo(int index) override;
std::vector<TensorInfo> GetInputInfos() override;
std::vector<TensorInfo> GetOutputInfos() override;
private:
void
CollectShapeRun(paddle_infer::Predictor* predictor,
const std::map<std::string, std::vector<int>>& shape) const;
void GetDynamicShapeFromOption(
const PaddleBackendOption& option,
std::map<std::string, std::vector<int>>* max_shape,
std::map<std::string, std::vector<int>>* min_shape,
std::map<std::string, std::vector<int>>* opt_shape) const;
void SetTRTDynamicShapeToConfig(const PaddleBackendOption& option);
PaddleBackendOption option_;
paddle_infer::Config config_;
std::shared_ptr<paddle_infer::Predictor> predictor_;
std::vector<TensorInfo> inputs_desc_;
std::vector<TensorInfo> outputs_desc_;
};
} // namespace fastdeploy