mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-13 04:13:58 +08:00
[Benchmark] Add macros for benchmark (#1301)
* add GPL lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * add GPL-3.0 lisence * support yolov8 * add pybind for yolov8 * add yolov8 readme * add cpp benchmark * add cpu and gpu mem * public part split * add runtime mode * fixed bugs * add cpu_thread_nums * deal with comments * deal with comments * deal with comments * rm useless code * add FASTDEPLOY_DECL * add FASTDEPLOY_DECL * fixed for windows * mv rss to pss * mv rss to pss * Update utils.cc * use thread to collect mem * Add ResourceUsageMonitor * rm useless code * fixed bug * fixed typo * update ResourceUsageMonitor * fixed bug * fixed bug * add note for ResourceUsageMonitor * deal with comments * add macros * deal with comments * deal with comments * deal with comments * re-lint --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
97
benchmark/cpp/benchmark_yolov5.cc
Executable file → Normal file
97
benchmark/cpp/benchmark_yolov5.cc
Executable file → Normal file
@@ -12,96 +12,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "fastdeploy/benchmark/utils.h"
|
||||
#include "fastdeploy/vision.h"
|
||||
#include "flags.h"
|
||||
#include "macros.h"
|
||||
#include "option.h"
|
||||
|
||||
bool RunModel(std::string model_file, std::string image_file, size_t warmup,
|
||||
size_t repeats, size_t sampling_interval) {
|
||||
int main(int argc, char* argv[]) {
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
auto im = cv::imread(FLAGS_image);
|
||||
// Initialization
|
||||
auto option = fastdeploy::RuntimeOption();
|
||||
if (!CreateRuntimeOption(&option)) {
|
||||
PrintUsage();
|
||||
return false;
|
||||
}
|
||||
if (FLAGS_profile_mode == "runtime") {
|
||||
option.EnableProfiling(FLAGS_include_h2d_d2h, repeats, warmup);
|
||||
}
|
||||
auto model = fastdeploy::vision::detection::YOLOv5(model_file, "", option);
|
||||
if (!model.Initialized()) {
|
||||
std::cerr << "Failed to initialize." << std::endl;
|
||||
return false;
|
||||
}
|
||||
auto im = cv::imread(image_file);
|
||||
// For collect memory info
|
||||
fastdeploy::benchmark::ResourceUsageMonitor resource_moniter(
|
||||
sampling_interval, FLAGS_device_id);
|
||||
if (FLAGS_collect_memory_info) {
|
||||
resource_moniter.Start();
|
||||
}
|
||||
// For Runtime
|
||||
if (FLAGS_profile_mode == "runtime") {
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return false;
|
||||
}
|
||||
double profile_time = model.GetProfileTime() * 1000;
|
||||
std::cout << "Runtime(ms): " << profile_time << "ms." << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
} else {
|
||||
// For End2End
|
||||
// Step1: warm up for warmup times
|
||||
std::cout << "Warmup " << warmup << " times..." << std::endl;
|
||||
for (int i = 0; i < warmup; i++) {
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
// Step2: repeat for repeats times
|
||||
std::cout << "Counting time..." << std::endl;
|
||||
std::cout << "Repeat " << repeats << " times..." << std::endl;
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
fastdeploy::TimeCounter tc;
|
||||
tc.Start();
|
||||
for (int i = 0; i < repeats; i++) {
|
||||
if (!model.Predict(im, &res)) {
|
||||
std::cerr << "Failed to predict." << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
tc.End();
|
||||
double end2end = tc.Duration() / repeats * 1000;
|
||||
std::cout << "End2End(ms): " << end2end << "ms." << std::endl;
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
}
|
||||
if (FLAGS_collect_memory_info) {
|
||||
float cpu_mem = resource_moniter.GetMaxCpuMem();
|
||||
float gpu_mem = resource_moniter.GetMaxGpuMem();
|
||||
float gpu_util = resource_moniter.GetMaxGpuUtil();
|
||||
std::cout << "cpu_pss_mb: " << cpu_mem << "MB." << std::endl;
|
||||
std::cout << "gpu_pss_mb: " << gpu_mem << "MB." << std::endl;
|
||||
std::cout << "gpu_util: " << gpu_util << std::endl;
|
||||
resource_moniter.Stop();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
google::ParseCommandLineFlags(&argc, &argv, true);
|
||||
int repeats = FLAGS_repeat;
|
||||
int warmup = FLAGS_warmup;
|
||||
int sampling_interval = FLAGS_sampling_interval;
|
||||
// Run model
|
||||
if (!RunModel(FLAGS_model, FLAGS_image, warmup, repeats, sampling_interval)) {
|
||||
exit(1);
|
||||
}
|
||||
auto model_yolov5 =
|
||||
fastdeploy::vision::detection::YOLOv5(FLAGS_model, "", option);
|
||||
fastdeploy::vision::DetectionResult res;
|
||||
BENCHMARK_MODEL(model_yolov5, model_yolov5.Predict(im, &res))
|
||||
auto vis_im = fastdeploy::vision::VisDetection(im, res);
|
||||
cv::imwrite("vis_result.jpg", vis_im);
|
||||
std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user