mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00
Fix some compile problem in Linux (#41)
* Add custom operator for onnxruntime ans fix paddle backend * Polish cmake files and runtime apis * Remove copy libraries * fix some issue * fix bug * fix bug * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * Support remove multiclass_nms to enable paddledetection run tensorrt * add common operator multiclassnms * fix compile problem * fix some compile problem in linux * remove debug log Co-authored-by: root <root@bjyz-sys-gpu-kongming3.bjyz.baidu.com>
This commit is contained in:
@@ -87,16 +87,14 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
// disable half precision, change to full precision(float32)
|
||||
void DisableTrtFP16();
|
||||
|
||||
void SetTrtCacheFile(const std::string& cache_file_path);
|
||||
|
||||
Backend backend = Backend::UNKNOWN;
|
||||
// for cpu inference and preprocess
|
||||
int cpu_thread_num = 8;
|
||||
int device_id = 0;
|
||||
|
||||
#ifdef WITH_GPU
|
||||
Device device = Device::GPU;
|
||||
#else
|
||||
Device device = Device::CPU;
|
||||
#endif
|
||||
|
||||
// ======Only for ORT Backend========
|
||||
// -1 means use default value by ort
|
||||
|
Reference in New Issue
Block a user