mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00
[Benchmark] Add macros for benchmark (#1301)
* add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme * add cpp benchmark * add cpu and gpu mem * public part split * add runtime mode * fixed bugs * add cpu_thread_nums * deal with comments * deal with comments * deal with comments * rm useless code * add FASTDEPLOY_DECL * add FASTDEPLOY_DECL * fixed for windows * mv rss to pss * mv rss to pss * Update utils.cc * use thread to collect mem * Add ResourceUsageMonitor * rm useless code * fixed bug * fixed typo * update ResourceUsageMonitor * fixed bug * fixed bug * add note for ResourceUsageMonitor * deal with comments * add macros * deal with comments * deal with comments * deal with comments * re-lint --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -15,7 +15,6 @@
|
||||
#pragma once
|
||||
|
||||
#include "gflags/gflags.h"
|
||||
#include "fastdeploy/utils/perf.h"
|
||||
|
||||
DEFINE_string(model, "", "Directory of the inference model.");
|
||||
DEFINE_string(image, "", "Path of the image file.");
|
||||
@@ -49,75 +48,3 @@ void PrintUsage() {
|
||||
std::cout << "Default value of backend: default" << std::endl;
|
||||
std::cout << "Default value of use_fp16: false" << std::endl;
|
||||
}
|
||||
|
||||
bool CreateRuntimeOption(fastdeploy::RuntimeOption* option) {
|
||||
if (FLAGS_device == "gpu") {
|
||||
option->UseGpu(FLAGS_device_id);
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleInferBackend();
|
||||
} else if (FLAGS_backend == "trt" || FLAGS_backend == "paddle_trt") {
|
||||
option->UseTrtBackend();
|
||||
if (FLAGS_backend == "paddle_trt") {
|
||||
option->EnablePaddleToTrt();
|
||||
}
|
||||
if (FLAGS_use_fp16) {
|
||||
option->EnableTrtFP16();
|
||||
}
|
||||
} else if (FLAGS_backend == "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with GPU, only support "
|
||||
"default/ort/paddle/trt/paddle_trt now, "
|
||||
<< FLAGS_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if (FLAGS_device == "cpu") {
|
||||
option->SetCpuThreadNum(FLAGS_cpu_thread_nums);
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "ov") {
|
||||
option->UseOpenVINOBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleInferBackend();
|
||||
} else if (FLAGS_backend == "lite") {
|
||||
option->UsePaddleLiteBackend();
|
||||
if (FLAGS_use_fp16) {
|
||||
option->EnableLiteFP16();
|
||||
}
|
||||
} else if (FLAGS_backend == "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with CPU, only support "
|
||||
"default/ort/ov/paddle/lite now, "
|
||||
<< FLAGS_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else if (FLAGS_device == "xpu") {
|
||||
option->UseKunlunXin(FLAGS_device_id);
|
||||
if (FLAGS_backend == "ort") {
|
||||
option->UseOrtBackend();
|
||||
} else if (FLAGS_backend == "paddle") {
|
||||
option->UsePaddleInferBackend();
|
||||
} else if (FLAGS_backend == "lite") {
|
||||
option->UsePaddleLiteBackend();
|
||||
if (FLAGS_use_fp16) {
|
||||
option->EnableLiteFP16();
|
||||
}
|
||||
} else if (FLAGS_backend == "default") {
|
||||
return true;
|
||||
} else {
|
||||
std::cout << "While inference with XPU, only support "
|
||||
"default/ort/paddle/lite now, "
|
||||
<< FLAGS_backend << " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
std::cerr << "Only support device CPU/GPU/XPU now, " << FLAGS_device
|
||||
<< " is not supported." << std::endl;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
Reference in New Issue
Block a user