diff --git a/benchmark/cpp/benchmark_x86.sh b/benchmark/cpp/benchmark_x86.sh index 1657a32f9..dcdc41123 100755 --- a/benchmark/cpp/benchmark_x86.sh +++ b/benchmark/cpp/benchmark_x86.sh @@ -62,9 +62,9 @@ fi ./benchmark_ppseg --model PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer --image cityscapes_demo.png --config_path $CONFIG_PATH ./benchmark_ppseg --model FCN_HRNet_W18_cityscapes_with_argmax_infer --image cityscapes_demo.png --config_path $CONFIG_PATH ./benchmark_ppseg --model SegFormer_B0-cityscapes-with-argmax --image cityscapes_demo.png --config_path $CONFIG_PATH -./benchmark_ppseg --model Deeplabv3_ResNet101_OS8_cityscapes_with_argmax_infer --image cityscapes_demo.png --config_path $CONFIG_PATH +./benchmark_ppseg --model Deeplabv3_ResNet101_OS8_cityscapes_with_argmax_infer --image cityscapes_demo.png --warmup 10 --repeat 50 --config_path $CONFIG_PATH ./benchmark_ppseg --model Unet_cityscapes_with_argmax_infer --image cityscapes_demo.png --config_path $CONFIG_PATH -./benchmark_ppseg --model PP_HumanSegV1_Server_with_argmax_infer --image portrait_heng.jpg --config_path $CONFIG_PATH -./benchmark_ppmatting --model PP-Matting-512 --image matting_input.jpg --config_path $CONFIG_PATH -./benchmark_ppmatting --model PPHumanMatting --image matting_input.jpg --config_path $CONFIG_PATH +./benchmark_ppseg --model PP_HumanSegV1_Server_with_argmax_infer --image portrait_heng.jpg --warmup 10 --repeat 50 --config_path $CONFIG_PATH +./benchmark_ppmatting --model PP-Matting-512 --image matting_input.jpg --warmup 10 --repeat 50 --config_path $CONFIG_PATH +./benchmark_ppmatting --model PPHumanMatting --image matting_input.jpg --warmup 10 --repeat 50 --config_path $CONFIG_PATH ./benchmark_ppmatting --model PPModnet_MobileNetV2 --image matting_input.jpg --config_path $CONFIG_PATH diff --git a/benchmark/cpp/macros.h b/benchmark/cpp/macros.h index 018a7c881..2c7284526 100755 --- a/benchmark/cpp/macros.h +++ b/benchmark/cpp/macros.h @@ -36,6 +36,14 @@ if (__config_info__["profile_mode"] == "runtime") { \ if (!BENCHMARK_FUNC) { \ std::cerr << "Failed to predict." << std::endl; \ + __ss__ << "Runtime(ms): Failed" << std::endl; \ + if (__config_info__["collect_memory_info"] == "true") { \ + __ss__ << "cpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_util: Failed" << std::endl; \ + } \ + fastdeploy::benchmark::ResultManager::SaveBenchmarkResult( \ + __ss__.str(), __config_info__["result_path"]); \ return 0; \ } \ double __profile_time__ = MODEL_NAME.GetProfileTime() * 1000; \ @@ -49,6 +57,14 @@ for (int __i__ = 0; __i__ < __warmup__; __i__++) { \ if (!BENCHMARK_FUNC) { \ std::cerr << "Failed to predict." << std::endl; \ + __ss__ << "End2End(ms): Failed" << std::endl; \ + if (__config_info__["collect_memory_info"] == "true") { \ + __ss__ << "cpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_util: Failed" << std::endl; \ + } \ + fastdeploy::benchmark::ResultManager::SaveBenchmarkResult( \ + __ss__.str(), __config_info__["result_path"]); \ return 0; \ } \ } \ @@ -62,6 +78,14 @@ for (int __i__ = 0; __i__ < __repeat__; __i__++) { \ if (!BENCHMARK_FUNC) { \ std::cerr << "Failed to predict." << std::endl; \ + __ss__ << "End2End(ms): Failed" << std::endl; \ + if (__config_info__["collect_memory_info"] == "true") { \ + __ss__ << "cpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_rss_mb: Failed" << std::endl; \ + __ss__ << "gpu_util: Failed" << std::endl; \ + } \ + fastdeploy::benchmark::ResultManager::SaveBenchmarkResult( \ + __ss__.str(), __config_info__["result_path"]); \ return 0; \ } \ } \ @@ -79,7 +103,7 @@ std::cout << "gpu_rss_mb: " << __gpu_mem__ << "MB." << std::endl; \ __ss__ << "gpu_rss_mb: " << __gpu_mem__ << "MB." << std::endl; \ std::cout << "gpu_util: " << __gpu_util__ << std::endl; \ - __ss__ << "gpu_util: " << __gpu_util__ << "MB." << std::endl; \ + __ss__ << "gpu_util: " << __gpu_util__ << "MB." << std::endl; \ __resource_moniter__.Stop(); \ } \ fastdeploy::benchmark::ResultManager::SaveBenchmarkResult(__ss__.str(), \