[benchmark] support lite light api & optimize benchmark flags (#1950)

* [benchmark] support lite light api & optimize benchmark flags

* [backend] remove un-used option member

* [backend] remove un-used option member
This commit is contained in:
DefTruth
2023-05-17 18:51:32 +08:00
committed by GitHub
parent 09ec386e8d
commit 1f66d5d9d3
6 changed files with 198 additions and 27 deletions

View File

@@ -33,16 +33,11 @@ DEFINE_string(tensors, "tensor_a.txt:tensor_b.txt",
"The paths to dumped tensors.");
DEFINE_bool(mem, false, "Whether to force to collect memory info.");
DEFINE_int32(interval, -1, "Sampling interval for collect memory info.");
DEFINE_string(model_file, "UNKNOWN",
"Optional, set specific model file,"
"eg, model.pdmodel, model.onnx");
DEFINE_string(params_file, "",
"Optional, set specific params file,"
"eg, model.pdiparams.");
DEFINE_string(model_format, "PADDLE",
"Optional, set specific model format,"
"eg, PADDLE/ONNX/RKNN/TORCHSCRIPT/SOPHGO");
DEFINE_bool(disable_mkldnn, false, "disable mkldnn for paddle backend");
DEFINE_string(optimized_model_dir, "", "Set optimized model dir for lite backend.");
#if defined(ENABLE_BENCHMARK)
static std::vector<int64_t> GetInt64Shape(const std::vector<int>& shape) {
@@ -117,15 +112,25 @@ static void RuntimeProfiling(int argc, char* argv[]) {
auto model_format = fastdeploy::ModelFormat::PADDLE;
if (FLAGS_model_file != "UNKNOWN") {
// Set model file/param/format via command line
model_file = FLAGS_model + sep + FLAGS_model_file;
params_file = FLAGS_model + sep + FLAGS_params_file;
if (FLAGS_model != "") {
model_file = FLAGS_model + sep + FLAGS_model_file;
params_file = FLAGS_model + sep + FLAGS_params_file;
} else {
model_file = FLAGS_model_file;
params_file = FLAGS_params_file;
}
model_format = GetModelFormat(FLAGS_model_format);
if (model_format == fastdeploy::ModelFormat::PADDLE &&
FLAGS_params_file == "") {
std::cout << "[ERROR] params_file can not be empty for PADDLE"
if (config_info["backend"] != "lite") {
std::cout << "[ERROR] params_file can not be empty for PADDLE"
<< " format, Please, set your custom params_file manually."
<< std::endl;
return;
return;
} else {
std::cout << "[INFO] Will using the lite light api for: "
<< model_file << std::endl;
}
}
} else {
// Set model file/param/format via model dir (only support
@@ -140,6 +145,16 @@ static void RuntimeProfiling(int argc, char* argv[]) {
option.SetModelPath(model_file, params_file, model_format);
// Set opt model dir
if (config_info["backend"] == "lite") {
if (FLAGS_optimized_model_dir != "") {
option.paddle_lite_option.optimized_model_dir =
FLAGS_optimized_model_dir;
} else {
option.paddle_lite_option.optimized_model_dir = FLAGS_model;
}
}
// Get input shapes/names/dtypes
std::vector<std::vector<int32_t>> input_shapes =
benchmark::ResultManager::GetInputShapes(FLAGS_shapes);