mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-13 12:23:55 +08:00
[Backend] refactor paddle custom ops -> fastdeploy::paddle_custom_ops (#2101)
* [cmake] upgrade windows paddle inference -> 2.5.0 * [cmake] upgrade windows paddle inference -> 2.5.0 * fix paddle custom ops bug on windows * [Backend] refactor paddle custom ops
This commit is contained in:
@@ -88,7 +88,8 @@ void PaddleBackend::BuildOption(const PaddleBackendOption& option) {
|
||||
config_.SetOptimCacheDir(opt_cache_dir);
|
||||
}
|
||||
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
||||
option.trt_option.max_batch_size, 3,
|
||||
option.trt_option.max_batch_size,
|
||||
option.trt_min_subgraph_size,
|
||||
precision, use_static);
|
||||
SetTRTDynamicShapeToConfig(option);
|
||||
if (option_.enable_fixed_size_opt) {
|
||||
@@ -225,7 +226,8 @@ bool PaddleBackend::InitFromPaddle(const std::string& model,
|
||||
use_static = true;
|
||||
}
|
||||
config_.EnableTensorRtEngine(option.trt_option.max_workspace_size,
|
||||
option.trt_option.max_batch_size, 3,
|
||||
option.trt_option.max_batch_size,
|
||||
option.trt_min_subgraph_size,
|
||||
paddle_infer::PrecisionType::kInt8,
|
||||
use_static, false);
|
||||
SetTRTDynamicShapeToConfig(option);
|
||||
|
Reference in New Issue
Block a user