mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
Fix bug of get input/output information from PaddleBackend (#339)
* Fix bug of get input/output information from PaddleBackend * Support Paddle Inference with TensorRT (#340) * Fix bug
This commit is contained in:
@@ -20,9 +20,15 @@
|
||||
#include <vector>
|
||||
|
||||
#include "fastdeploy/backends/backend.h"
|
||||
#ifdef ENABLE_PADDLE_FRONTEND
|
||||
#include "paddle2onnx/converter.h"
|
||||
#endif
|
||||
#include "paddle_inference_api.h" // NOLINT
|
||||
|
||||
#ifdef ENABLE_TRT_BACKEND
|
||||
#include "fastdeploy/backends/tensorrt/trt_backend.h"
|
||||
#endif
|
||||
|
||||
namespace fastdeploy {
|
||||
|
||||
struct PaddleBackendOption {
|
||||
@@ -35,6 +41,11 @@ struct PaddleBackendOption {
|
||||
|
||||
bool enable_log_info = false;
|
||||
|
||||
bool enable_trt = false;
|
||||
#ifdef ENABLE_TRT_BACKEND
|
||||
TrtBackendOption trt_option;
|
||||
#endif
|
||||
|
||||
int mkldnn_cache_size = 1;
|
||||
int cpu_thread_num = 8;
|
||||
// initialize memory size(MB) for GPU
|
||||
@@ -58,18 +69,21 @@ void CopyTensorToCpu(std::unique_ptr<paddle_infer::Tensor>& tensor,
|
||||
// Convert data type from paddle inference to fastdeploy
|
||||
FDDataType PaddleDataTypeToFD(const paddle_infer::DataType& dtype);
|
||||
|
||||
// Convert data type from paddle2onnx::PaddleReader to fastdeploy
|
||||
FDDataType ReaderDataTypeToFD(int32_t dtype);
|
||||
|
||||
class PaddleBackend : public BaseBackend {
|
||||
public:
|
||||
PaddleBackend() {}
|
||||
virtual ~PaddleBackend() = default;
|
||||
void BuildOption(const PaddleBackendOption& option,
|
||||
const std::string& model_file);
|
||||
void BuildOption(const PaddleBackendOption& option);
|
||||
|
||||
bool InitFromPaddle(
|
||||
const std::string& model_file, const std::string& params_file,
|
||||
const PaddleBackendOption& option = PaddleBackendOption());
|
||||
|
||||
bool Infer(std::vector<FDTensor>& inputs, std::vector<FDTensor>* outputs) override;
|
||||
bool Infer(std::vector<FDTensor>& inputs,
|
||||
std::vector<FDTensor>* outputs) override;
|
||||
|
||||
int NumInputs() const override { return inputs_desc_.size(); }
|
||||
|
||||
|
Reference in New Issue
Block a user