// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include #include #include "fastdeploy/backends/backend.h" #include "fastdeploy/backends/tensorrt/common/argsParser.h" #include "fastdeploy/backends/tensorrt/common/buffers.h" #include "fastdeploy/backends/tensorrt/common/common.h" #include "fastdeploy/backends/tensorrt/common/logger.h" #include "fastdeploy/backends/tensorrt/common/parserOnnxConfig.h" #include "fastdeploy/backends/tensorrt/common/sampleUtils.h" #include #include "NvInfer.h" namespace fastdeploy { using namespace samplesCommon; struct TrtValueInfo { std::string name; std::vector shape; nvinfer1::DataType dtype; }; struct TrtBackendOption { int gpu_id = 0; bool enable_fp16 = false; bool enable_int8 = false; size_t max_batch_size = 32; size_t max_workspace_size = 1 << 30; std::map> max_shape; std::map> min_shape; std::map> opt_shape; std::string serialize_file = ""; }; std::vector toVec(const nvinfer1::Dims& dim); size_t TrtDataTypeSize(const nvinfer1::DataType& dtype); FDDataType GetFDDataType(const nvinfer1::DataType& dtype); class TrtBackend : public BaseBackend { public: TrtBackend() : engine_(nullptr), context_(nullptr) {} void BuildOption(const TrtBackendOption& option); bool InitFromPaddle(const std::string& model_file, const std::string& params_file, const TrtBackendOption& option = TrtBackendOption(), bool verbose = false); bool InitFromOnnx(const std::string& model_file, const TrtBackendOption& option = TrtBackendOption(), bool from_memory_buffer = false); bool InitFromTrt(const std::string& trt_engine_file, const TrtBackendOption& option = TrtBackendOption()); bool Infer(std::vector& inputs, std::vector* outputs); int NumInputs() const { return inputs_desc_.size(); } int NumOutputs() const { return outputs_desc_.size(); } TensorInfo GetInputInfo(int index); TensorInfo GetOutputInfo(int index); private: std::shared_ptr engine_; std::shared_ptr context_; cudaStream_t stream_{}; std::vector bindings_; std::vector inputs_desc_; std::vector outputs_desc_; std::map inputs_buffer_; std::map outputs_buffer_; // Sometimes while the number of outputs > 1 // the output order of tensorrt may not be same // with the original onnx model // So this parameter will record to origin outputs // order, to help recover the rigt order std::map outputs_order_; void GetInputOutputInfo(); void AllocateBufferInDynamicShape(const std::vector& inputs, std::vector* outputs); bool CreateTrtEngine(const std::string& onnx_model, const TrtBackendOption& option); }; } // namespace fastdeploy