Files
FastDeploy/fastdeploy/runtime/backends/tensorrt/option.h
WJJ1995 409c5dc056 [Backend]Add default value for TRT SetShape() (#1712)
* add GPL lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* support yolov8

* add pybind for yolov8

* add yolov8 readme

* add cpp benchmark

* add cpu and gpu mem

* public part split

* add runtime mode

* fixed bugs

* add cpu_thread_nums

* deal with comments

* deal with comments

* deal with comments

* rm useless code

* add FASTDEPLOY_DECL

* add FASTDEPLOY_DECL

* fixed for windows

* mv rss to pss

* mv rss to pss

* Update utils.cc

* use thread to collect mem

* Add ResourceUsageMonitor

* rm useless code

* fixed bug

* fixed typo

* update ResourceUsageMonitor

* fixed bug

* fixed bug

* add note for ResourceUsageMonitor

* deal with comments

* add macros

* deal with comments

* deal with comments

* deal with comments

* re-lint

* rm pmap and use mem api

* rm pmap and use mem api

* add mem api

* Add PrintBenchmarkInfo func

* Add PrintBenchmarkInfo func

* Add PrintBenchmarkInfo func

* deal with comments

* fixed enable_paddle_to_trt

* add log for paddle_trt

* support ppcls benchmark

* use new trt option api

* update benchmark info

* simplify benchmark.cc

* simplify benchmark.cc

* deal with comments

* Add ppseg && ppocr benchmark

* add OCR rec img

* add ocr benchmark

* fixed trt shape

* add trt shape

* resolve conflict

* add ENABLE_BENCHMARK define

* Add ClassifyDiff

* Add Resize for ClassifyResult

* deal with comments

* add convert info script

* resolve conflict

* Add SaveBenchmarkResult func

* fixed bug

* fixed bug

* fixed bug

* add config.txt for option

* fixed bug

* fixed bug

* fixed bug

* add benchmark.sh

* mv thread_nums from 8 to 1

* deal with comments

* deal with comments

* fixed readme

* deal with comments

* add all platform shell

* Update config.arm.txt

* Update config.gpu.txt

* Update config.x86.txt

* fixed printinfo bug

* rm proxy

* add more model support

* all backend config.txt

* deal with comments

* Add MattingDiff compare

* fixed predict bug

* adjust warmup/repeat times

* add e2e/mem configs

* fixed typo

* open collect_mem

* fixed typo

* add trt cache option

* fixed bug

* fixed repeat times

* test for benchmark

* test for det benchmark

* for benchmark

* fixed for x86

* add h2d and d2h config

* renmae txt file

* add dynamic shape for pp_trt

* fixed typo

* Update option.h

* add collect shape

* add default value for SetShape()

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
2023-03-27 14:03:47 +08:00

85 lines
3.4 KiB
C++
Executable File

// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/core/fd_type.h"
#include <iostream>
#include <map>
#include <string>
#include <vector>
namespace fastdeploy {
/*! @brief Option object to configure TensorRT backend
*/
struct TrtBackendOption {
/// `max_batch_size`, it's deprecated in TensorRT 8.x
size_t max_batch_size = 32;
/// `max_workspace_size` for TensorRT
size_t max_workspace_size = 1 << 30;
/// Enable log while converting onnx model to tensorrt
bool enable_log_info = false;
/// Enable half precison inference, on some device not support half precision, it will fallback to float32 mode
bool enable_fp16 = false;
/** \brief Set shape range of input tensor for the model that contain dynamic input shape while using TensorRT backend
*
* \param[in] tensor_name The name of input for the model which is dynamic shape
* \param[in] min The minimal shape for the input tensor
* \param[in] opt The optimized shape for the input tensor, just set the most common shape, if set as default value, it will keep same with min_shape
* \param[in] max The maximum shape for the input tensor, if set as default value, it will keep same with min_shape
*/
void SetShape(const std::string& tensor_name,
const std::vector<int32_t>& min,
const std::vector<int32_t>& opt = std::vector<int32_t>(),
const std::vector<int32_t>& max = std::vector<int32_t>()) {
min_shape[tensor_name].clear();
max_shape[tensor_name].clear();
opt_shape[tensor_name].clear();
min_shape[tensor_name].assign(min.begin(), min.end());
if (opt.size() == 0) {
opt_shape[tensor_name].assign(min.begin(), min.end());
} else {
opt_shape[tensor_name].assign(opt.begin(), opt.end());
}
if (max.size() == 0) {
max_shape[tensor_name].assign(min.begin(), min.end());
} else {
max_shape[tensor_name].assign(max.begin(), max.end());
}
}
/// Set cache file path while use TensorRT backend. Loadding a Paddle/ONNX model and initialize TensorRT will take a long time, by this interface it will save the tensorrt engine to `cache_file_path`, and load it directly while execute the code again
std::string serialize_file = "";
// The below parameters may be removed in next version, please do not
// visit or use them directly
std::map<std::string, std::vector<int32_t>> max_shape;
std::map<std::string, std::vector<int32_t>> min_shape;
std::map<std::string, std::vector<int32_t>> opt_shape;
bool enable_pinned_memory = false;
void* external_stream_ = nullptr;
int gpu_id = 0;
std::string model_file = ""; // Path of model file
std::string params_file = ""; // Path of parameters file, can be empty
// format of input model
ModelFormat model_format = ModelFormat::AUTOREC;
};
} // namespace fastdeploy