mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Benchmark] Add PaddleYOLOv8 cpp benchmark example & lite flags option (#1270)
* [Android] Add PaddleYOLOv8 cpp benchmark example & lite flags option * [Benchmark] add linux x86_64 gpu benchmark build script
This commit is contained in:
6
benchmark/cpp/benchmark_yolov5.cc
Executable file → Normal file
6
benchmark/cpp/benchmark_yolov5.cc
Executable file → Normal file
@@ -65,8 +65,10 @@ bool RunModel(std::string model_file, std::string image_file, size_t warmup,
|
||||
for (int i = 0; i < repeats; i++) {
|
||||
if (FLAGS_collect_memory_info && i % dump_period == 0) {
|
||||
fastdeploy::benchmark::DumpCurrentCpuMemoryUsage(cpu_mem_file_name);
|
||||
#if defined(WITH_GPU)
|
||||
fastdeploy::benchmark::DumpCurrentGpuMemoryUsage(gpu_mem_file_name,
|
||||
FLAGS_device_id);
|
||||
#endif
|
||||
}
|
||||
tc.Start();
|
||||
if (!model.Predict(im, &res)) {
|
||||
@@ -102,9 +104,11 @@ int main(int argc, char* argv[]) {
|
||||
}
|
||||
if (FLAGS_collect_memory_info) {
|
||||
float cpu_mem = fastdeploy::benchmark::GetCpuMemoryUsage(cpu_mem_file_name);
|
||||
float gpu_mem = fastdeploy::benchmark::GetGpuMemoryUsage(gpu_mem_file_name);
|
||||
std::cout << "cpu_pss_mb: " << cpu_mem << "MB." << std::endl;
|
||||
#if defined(WITH_GPU)
|
||||
float gpu_mem = fastdeploy::benchmark::GetGpuMemoryUsage(gpu_mem_file_name);
|
||||
std::cout << "gpu_pss_mb: " << gpu_mem << "MB." << std::endl;
|
||||
#endif
|
||||
}
|
||||
return 0;
|
||||
}
|
Reference in New Issue
Block a user