Files
FastDeploy/benchmark/cpp/option.h
WJJ1995 098b06754f [Benchmark]Fixed PrintBenchmarkInfo bug (#1570)
* add GPL lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* add GPL-3.0 lisence

* support yolov8

* add pybind for yolov8

* add yolov8 readme

* add cpp benchmark

* add cpu and gpu mem

* public part split

* add runtime mode

* fixed bugs

* add cpu_thread_nums

* deal with comments

* deal with comments

* deal with comments

* rm useless code

* add FASTDEPLOY_DECL

* add FASTDEPLOY_DECL

* fixed for windows

* mv rss to pss

* mv rss to pss

* Update utils.cc

* use thread to collect mem

* Add ResourceUsageMonitor

* rm useless code

* fixed bug

* fixed typo

* update ResourceUsageMonitor

* fixed bug

* fixed bug

* add note for ResourceUsageMonitor

* deal with comments

* add macros

* deal with comments

* deal with comments

* deal with comments

* re-lint

* rm pmap and use mem api

* rm pmap and use mem api

* add mem api

* Add PrintBenchmarkInfo func

* Add PrintBenchmarkInfo func

* Add PrintBenchmarkInfo func

* deal with comments

* fixed enable_paddle_to_trt

* add log for paddle_trt

* support ppcls benchmark

* use new trt option api

* update benchmark info

* simplify benchmark.cc

* simplify benchmark.cc

* deal with comments

* Add ppseg && ppocr benchmark

* add OCR rec img

* add ocr benchmark

* fixed trt shape

* add trt shape

* resolve conflict

* add ENABLE_BENCHMARK define

* Add ClassifyDiff

* Add Resize for ClassifyResult

* deal with comments

* add convert info script

* resolve conflict

* Add SaveBenchmarkResult func

* fixed bug

* fixed bug

* fixed bug

* add config.txt for option

* fixed bug

* fixed bug

* fixed bug

* add benchmark.sh

* mv thread_nums from 8 to 1

* deal with comments

* deal with comments

* fixed readme

* deal with comments

* add all platform shell

* Update config.arm.txt

* Update config.gpu.txt

* Update config.x86.txt

* fixed printinfo bug

---------

Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
2023-03-09 22:12:58 +08:00

112 lines
4.4 KiB
C++
Executable File

// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision.h"
static bool CreateRuntimeOption(fastdeploy::RuntimeOption* option,
int argc, char* argv[], bool remove_flags) {
google::ParseCommandLineFlags(&argc, &argv, remove_flags);
option->DisableValidBackendCheck();
std::unordered_map<std::string, std::string> config_info;
fastdeploy::benchmark::ResultManager::LoadBenchmarkConfig(
FLAGS_config_path, &config_info);
if (config_info["profile_mode"] == "runtime") {
option->EnableProfiling(config_info["include_h2d_d2h"] == "true",
std::stoi(config_info["repeat"]),
std::stoi(config_info["warmup"]));
}
if (config_info["device"] == "gpu") {
option->UseGpu(std::stoi(config_info["device_id"]));
if (config_info["backend"] == "ort") {
option->UseOrtBackend();
} else if (config_info["backend"] == "paddle") {
option->UsePaddleInferBackend();
} else if (config_info["backend"] == "trt" ||
config_info["backend"] == "paddle_trt") {
option->UseTrtBackend();
if (config_info["backend"] == "paddle_trt") {
option->UsePaddleInferBackend();
option->paddle_infer_option.enable_trt = true;
}
if (config_info["use_fp16"] == "true") {
option->trt_option.enable_fp16 = true;
}
} else if (config_info["backend"] == "default") {
PrintBenchmarkInfo(config_info);
return true;
} else {
std::cout << "While inference with GPU, only support "
"default/ort/paddle/trt/paddle_trt now, "
<< config_info["backend"] << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else if (config_info["device"] == "cpu") {
option->SetCpuThreadNum(std::stoi(config_info["cpu_thread_nums"]));
if (config_info["backend"] == "ort") {
option->UseOrtBackend();
} else if (config_info["backend"] == "ov") {
option->UseOpenVINOBackend();
} else if (config_info["backend"] == "paddle") {
option->UsePaddleInferBackend();
} else if (config_info["backend"] == "lite") {
option->UsePaddleLiteBackend();
if (config_info["use_fp16"] == "true") {
option->paddle_lite_option.enable_fp16 = true;
}
} else if (config_info["backend"] == "default") {
PrintBenchmarkInfo(config_info);
return true;
} else {
std::cout << "While inference with CPU, only support "
"default/ort/ov/paddle/lite now, "
<< config_info["backend"] << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else if (config_info["device"] == "xpu") {
option->UseKunlunXin(std::stoi(config_info["device_id"]),
std::stoi(config_info["xpu_l3_cache"]));
if (config_info["backend"] == "ort") {
option->UseOrtBackend();
} else if (config_info["backend"] == "paddle") {
option->UsePaddleInferBackend();
} else if (config_info["backend"] == "lite") {
option->UsePaddleLiteBackend();
if (config_info["use_fp16"] == "true") {
option->paddle_lite_option.enable_fp16 = true;
}
} else if (config_info["backend"] == "default") {
PrintBenchmarkInfo(config_info);
return true;
} else {
std::cout << "While inference with XPU, only support "
"default/ort/paddle/lite now, "
<< config_info["backend"] << " is not supported." << std::endl;
PrintUsage();
return false;
}
} else {
std::cerr << "Only support device CPU/GPU/XPU now, "
<< config_info["device"]
<< " is not supported." << std::endl;
PrintUsage();
return false;
}
PrintBenchmarkInfo(config_info);
return true;
}