mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Benchmark]Fixed PrintBenchmarkInfo bug (#1570)
* add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme * add cpp benchmark * add cpu and gpu mem * public part split * add runtime mode * fixed bugs * add cpu_thread_nums * deal with comments * deal with comments * deal with comments * rm useless code * add FASTDEPLOY_DECL * add FASTDEPLOY_DECL * fixed for windows * mv rss to pss * mv rss to pss * Update utils.cc * use thread to collect mem * Add ResourceUsageMonitor * rm useless code * fixed bug * fixed typo * update ResourceUsageMonitor * fixed bug * fixed bug * add note for ResourceUsageMonitor * deal with comments * add macros * deal with comments * deal with comments * deal with comments * re-lint * rm pmap and use mem api * rm pmap and use mem api * add mem api * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * Add PrintBenchmarkInfo func * deal with comments * fixed enable_paddle_to_trt * add log for paddle_trt * support ppcls benchmark * use new trt option api * update benchmark info * simplify benchmark.cc * simplify benchmark.cc * deal with comments * Add ppseg && ppocr benchmark * add OCR rec img * add ocr benchmark * fixed trt shape * add trt shape * resolve conflict * add ENABLE_BENCHMARK define * Add ClassifyDiff * Add Resize for ClassifyResult * deal with comments * add convert info script * resolve conflict * Add SaveBenchmarkResult func * fixed bug * fixed bug * fixed bug * add config.txt for option * fixed bug * fixed bug * fixed bug * add benchmark.sh * mv thread_nums from 8 to 1 * deal with comments * deal with comments * fixed readme * deal with comments * add all platform shell * Update config.arm.txt * Update config.gpu.txt * Update config.x86.txt * fixed printinfo bug --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -45,6 +45,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
|
||||
option->trt_option.enable_fp16 = true;
|
||||
}
|
||||
} else if (config_info["backend"] == "default") {
|
||||
PrintBenchmarkInfo(config_info);
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with GPU, only support "
|
||||
@@ -67,6 +68,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
|
||||
option->paddle_lite_option.enable_fp16 = true;
|
||||
}
|
||||
} else if (config_info["backend"] == "default") {
|
||||
PrintBenchmarkInfo(config_info);
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with CPU, only support "
|
||||
@@ -88,6 +90,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
|
||||
option->paddle_lite_option.enable_fp16 = true;
|
||||
}
|
||||
} else if (config_info["backend"] == "default") {
|
||||
PrintBenchmarkInfo(config_info);
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with XPU, only support "
|
||||
|
Reference in New Issue
Block a user