diff --git a/benchmark/cpp/config.arm.txt b/benchmark/cpp/config.arm.txt index 2e3526c1f..69187a060 100755 --- a/benchmark/cpp/config.arm.txt +++ b/benchmark/cpp/config.arm.txt @@ -11,4 +11,4 @@ collect_memory_info: false sampling_interval: 1 precision_compare: false xpu_l3_cache: 0 -result_path: benchmark_arm.txt +result_path: benchmark_arm.txt \ No newline at end of file diff --git a/benchmark/cpp/config.gpu.txt b/benchmark/cpp/config.gpu.txt index 476745622..c5437718e 100755 --- a/benchmark/cpp/config.gpu.txt +++ b/benchmark/cpp/config.gpu.txt @@ -11,4 +11,4 @@ collect_memory_info: false sampling_interval: 1 precision_compare: false xpu_l3_cache: 0 -result_path: benchmark_gpu.txt +result_path: benchmark_gpu.txt \ No newline at end of file diff --git a/benchmark/cpp/config.x86.txt b/benchmark/cpp/config.x86.txt index d98a45ad5..153c3e76a 100755 --- a/benchmark/cpp/config.x86.txt +++ b/benchmark/cpp/config.x86.txt @@ -11,4 +11,4 @@ collect_memory_info: false sampling_interval: 1 precision_compare: false xpu_l3_cache: 0 -result_path: benchmark_x86.txt +result_path: benchmark_x86.txt \ No newline at end of file diff --git a/benchmark/cpp/config.xpu.txt b/benchmark/cpp/config.xpu.txt index a99ebe05a..2dded3221 100755 --- a/benchmark/cpp/config.xpu.txt +++ b/benchmark/cpp/config.xpu.txt @@ -11,4 +11,4 @@ collect_memory_info: false sampling_interval: 1 precision_compare: false xpu_l3_cache: 0 -result_path: benchmark_xpu.txt +result_path: benchmark_xpu.txt \ No newline at end of file diff --git a/benchmark/cpp/option.h b/benchmark/cpp/option.h index b04dcafda..1c0032e10 100755 --- a/benchmark/cpp/option.h +++ b/benchmark/cpp/option.h @@ -45,6 +45,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option, option->trt_option.enable_fp16 = true; } } else if (config_info["backend"] == "default") { + PrintBenchmarkInfo(config_info); return true; } else { std::cout << "While inference with GPU, only support " @@ -67,6 +68,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option, option->paddle_lite_option.enable_fp16 = true; } } else if (config_info["backend"] == "default") { + PrintBenchmarkInfo(config_info); return true; } else { std::cout << "While inference with CPU, only support " @@ -88,6 +90,7 @@ static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option, option->paddle_lite_option.enable_fp16 = true; } } else if (config_info["backend"] == "default") { + PrintBenchmarkInfo(config_info); return true; } else { std::cout << "While inference with XPU, only support "