Add Benchmark test (#200)

* add ppcls benchmark

* add ppcls benchmark

* add ppcls benchmark

* add ppcls benchmark

* fixed txt path

* resolve conflict

* resolve conflict

* deal with comments

* Add enable_trt_fp16 option

* Add OV backend for seg and det

* fixed valid backends in ppdet

* fixed valid backends in yolo

* add copyright and rm Chinese Notes

* add ppdet&ppseg&yolo benchmark

* add cpu/gpu mem info

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
WJJ1995
2022-09-14 20:15:01 +08:00
committed by GitHub
parent 695034fdad
commit 06908b8beb
19 changed files with 1014 additions and 190 deletions

View File

@@ -0,0 +1,170 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
import numpy as np
import pynvml
import psutil
import GPUtil
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleClas model.")
parser.add_argument(
"--image", type=str, required=False, help="Path of test image file.")
parser.add_argument(
"--cpu_num_thread",
type=int,
default=8,
help="default number of cpu thread.")
parser.add_argument(
"--device_id", type=int, default=0, help="device(gpu) id")
parser.add_argument(
"--iter_num",
required=True,
type=int,
default=300,
help="number of iterations for computing performace.")
parser.add_argument(
"--device",
default="cpu",
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="ort",
help="inference backend, ort, ov, trt, paddle.")
parser.add_argument(
"--enable_trt_fp16",
type=bool,
default=False,
help="whether enable fp16 in trt backend")
args = parser.parse_args()
return args
def build_option(args):
option = fd.RuntimeOption()
device = args.device
backend = args.backend
option.set_cpu_thread_num(args.cpu_num_thread)
if device == "gpu":
option.use_gpu(args.device_id)
if backend == "trt":
assert device == "gpu", "the trt backend need device==gpu"
option.use_trt_backend()
if args.enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "ov":
assert device == "cpu", "the openvino backend need device==cpu"
option.use_openvino_backend()
elif backend == "paddle":
option.use_paddle_backend()
elif backend == "ort":
option.use_ort_backend()
else:
print("%s is an unsupported backend" % backend)
return option
def get_current_memory_mb(gpu_id=None):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
cpu_mem = info.uss / 1024. / 1024.
gpu_mem = 0
if gpu_id is not None:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem = meminfo.used / 1024. / 1024.
return cpu_mem, gpu_mem
def get_current_gputil(gpu_id):
GPUs = GPUtil.getGPUs()
gpu_load = GPUs[gpu_id].load
return gpu_load
if __name__ == '__main__':
args = parse_arguments()
option = build_option(args)
model_file = os.path.join(args.model, "inference.pdmodel")
params_file = os.path.join(args.model, "inference.pdiparams")
config_file = os.path.join(args.model, "inference_cls.yaml")
gpu_id = args.device_id
end2end_statis = list()
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
if args.device == "cpu":
file_path = args.model + "_model_" + args.backend + "_" + \
args.device + "_" + str(args.cpu_num_thread) + ".txt"
else:
if args.enable_trt_fp16:
file_path = args.model + "_model_" + \
args.backend + "_fp16_" + args.device + ".txt"
else:
file_path = args.model + "_model_" + args.backend + "_" + args.device + ".txt"
f = open(file_path, "w")
f.writelines("===={}====: \n".format(file_path.split("/")[1][:-4]))
try:
model = fd.vision.classification.PaddleClasModel(
model_file, params_file, config_file, runtime_option=option)
model.enable_record_time_of_runtime()
for i in range(args.iter_num):
im = cv2.imread(args.image)
start = time.time()
result = model.predict(im)
end2end_statis.append(time.time() - start)
gpu_util += get_current_gputil(gpu_id)
cm, gm = get_current_memory_mb(gpu_id)
cpu_mem += cm
gpu_mem += gm
runtime_statis = model.print_statis_info_of_runtime()
warmup_iter = args.iter_num // 5
repeat_iter = args.iter_num - warmup_iter
end2end_statis = end2end_statis[warmup_iter:]
dump_result = dict()
dump_result["runtime"] = runtime_statis["avg_time"] * 1000
dump_result["end2end"] = np.mean(end2end_statis) * 1000
dump_result["cpu_rss_mb"] = cpu_mem / repeat_iter
dump_result["gpu_rss_mb"] = gpu_mem / repeat_iter
dump_result["gpu_util"] = gpu_util / repeat_iter
f.writelines("Runtime(ms): {} \n".format(str(dump_result["runtime"])))
f.writelines("End2End(ms): {} \n".format(str(dump_result["end2end"])))
f.writelines("cpu_rss_mb: {} \n".format(
str(dump_result["cpu_rss_mb"])))
f.writelines("gpu_rss_mb: {} \n".format(
str(dump_result["gpu_rss_mb"])))
f.writelines("gpu_util: {} \n".format(str(dump_result["gpu_util"])))
except:
f.writelines("!!!!!Infer Failed\n")
f.close()

View File

@@ -0,0 +1,190 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
import numpy as np
import datetime
import json
import pynvml
import psutil
import GPUtil
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleDetection model.")
parser.add_argument(
"--image", type=str, required=False, help="Path of test image file.")
parser.add_argument(
"--cpu_num_thread",
type=int,
default=8,
help="default number of cpu thread.")
parser.add_argument(
"--device_id", type=int, default=0, help="device(gpu) id")
parser.add_argument(
"--iter_num",
required=True,
type=int,
default=300,
help="number of iterations for computing performace.")
parser.add_argument(
"--device",
default="cpu",
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="ort",
help="inference backend, ort, ov, trt, paddle.")
parser.add_argument(
"--enable_trt_fp16",
type=bool,
default=False,
help="whether enable fp16 in trt backend")
args = parser.parse_args()
return args
def build_option(args):
option = fd.RuntimeOption()
device = args.device
backend = args.backend
option.set_cpu_thread_num(args.cpu_num_thread)
if device == "gpu":
option.use_gpu(args.device_id)
if backend == "trt":
assert device == "gpu", "the trt backend need device==gpu"
option.use_trt_backend()
if args.enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "ov":
assert device == "cpu", "the openvino backend need device==cpu"
option.use_openvino_backend()
elif backend == "paddle":
option.use_paddle_backend()
elif backend == "ort":
option.use_ort_backend()
else:
print("%s is an unsupported backend" % backend)
return option
def get_current_memory_mb(gpu_id=None):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
cpu_mem = info.uss / 1024. / 1024.
gpu_mem = 0
if gpu_id is not None:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem = meminfo.used / 1024. / 1024.
return cpu_mem, gpu_mem
def get_current_gputil(gpu_id):
GPUs = GPUtil.getGPUs()
gpu_load = GPUs[gpu_id].load
return gpu_load
if __name__ == '__main__':
args = parse_arguments()
option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
config_file = os.path.join(args.model, "infer_cfg.yml")
gpu_id = args.device_id
end2end_statis = list()
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
if args.device == "cpu":
file_path = args.model + "_model_" + args.backend + "_" + \
args.device + "_" + str(args.cpu_num_thread) + ".txt"
else:
if args.enable_trt_fp16:
file_path = args.model + "_model_" + args.backend + "_fp16_" + args.device + ".txt"
else:
file_path = args.model + "_model_" + args.backend + "_" + args.device + ".txt"
f = open(file_path, "w")
f.writelines("===={}====: \n".format(file_path.split("/")[1][:-4]))
try:
if "ppyoloe" in args.model:
model = fd.vision.detection.PPYOLOE(
model_file, params_file, config_file, runtime_option=option)
elif "picodet" in args.model:
model = fd.vision.detection.PicoDet(
model_file, params_file, config_file, runtime_option=option)
elif "yolox" in args.model:
model = fd.vision.detection.PaddleYOLOX(
model_file, params_file, config_file, runtime_option=option)
elif "yolov3" in args.model:
model = fd.vision.detection.YOLOv3(
model_file, params_file, config_file, runtime_option=option)
elif "ppyolo_r50vd_dcn_1x_coco" in args.model or "ppyolov2_r101vd_dcn_365e_coco" in args.model:
model = fd.vision.detection.PPYOLO(
model_file, params_file, config_file, runtime_option=option)
elif "faster_rcnn" in args.model:
model = fd.vision.detection.FasterRCNN(
model_file, params_file, config_file, runtime_option=option)
else:
raise Exception("model {} not support now in ppdet series".format(
args.model))
model.enable_record_time_of_runtime()
for i in range(args.iter_num):
im = cv2.imread(args.image)
start = time.time()
result = model.predict(im)
end2end_statis.append(time.time() - start)
gpu_util += get_current_gputil(gpu_id)
cm, gm = get_current_memory_mb(gpu_id)
cpu_mem += cm
gpu_mem += gm
runtime_statis = model.print_statis_info_of_runtime()
warmup_iter = args.iter_num // 5
repeat_iter = args.iter_num - warmup_iter
end2end_statis = end2end_statis[warmup_iter:]
dump_result = dict()
dump_result["runtime"] = runtime_statis["avg_time"] * 1000
dump_result["end2end"] = np.mean(end2end_statis) * 1000
dump_result["cpu_rss_mb"] = cpu_mem / repeat_iter
dump_result["gpu_rss_mb"] = gpu_mem / repeat_iter
dump_result["gpu_util"] = gpu_util / repeat_iter
f.writelines("Runtime(ms): {} \n".format(str(dump_result["runtime"])))
f.writelines("End2End(ms): {} \n".format(str(dump_result["end2end"])))
f.writelines("cpu_rss_mb: {} \n".format(
str(dump_result["cpu_rss_mb"])))
f.writelines("gpu_rss_mb: {} \n".format(
str(dump_result["gpu_rss_mb"])))
f.writelines("gpu_util: {} \n".format(str(dump_result["gpu_util"])))
except:
f.writelines("!!!!!Infer Failed\n")
f.close()

View File

@@ -0,0 +1,169 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
import numpy as np
import pynvml
import psutil
import GPUtil
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleSeg model.")
parser.add_argument(
"--image", type=str, required=False, help="Path of test image file.")
parser.add_argument(
"--cpu_num_thread",
type=int,
default=8,
help="default number of cpu thread.")
parser.add_argument(
"--device_id", type=int, default=0, help="device(gpu) id")
parser.add_argument(
"--iter_num",
required=True,
type=int,
default=300,
help="number of iterations for computing performace.")
parser.add_argument(
"--device",
default="cpu",
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="ort",
help="inference backend, ort, ov, trt, paddle.")
parser.add_argument(
"--enable_trt_fp16",
type=bool,
default=False,
help="whether enable fp16 in trt backend")
args = parser.parse_args()
return args
def build_option(args):
option = fd.RuntimeOption()
device = args.device
backend = args.backend
option.set_cpu_thread_num(args.cpu_num_thread)
if device == "gpu":
option.use_gpu(args.device_id)
if backend == "trt":
assert device == "gpu", "the trt backend need device==gpu"
option.use_trt_backend()
if args.enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "ov":
assert device == "cpu", "the openvino backend need device==cpu"
option.use_openvino_backend()
elif backend == "paddle":
option.use_paddle_backend()
elif backend == "ort":
option.use_ort_backend()
else:
print("%s is an unsupported backend" % backend)
return option
def get_current_memory_mb(gpu_id=None):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
cpu_mem = info.uss / 1024. / 1024.
gpu_mem = 0
if gpu_id is not None:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem = meminfo.used / 1024. / 1024.
return cpu_mem, gpu_mem
def get_current_gputil(gpu_id):
GPUs = GPUtil.getGPUs()
gpu_load = GPUs[gpu_id].load
return gpu_load
if __name__ == '__main__':
args = parse_arguments()
option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
config_file = os.path.join(args.model, "deploy.yaml")
gpu_id = args.device_id
end2end_statis = list()
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
if args.device == "cpu":
file_path = args.model + "_model_" + args.backend + "_" + \
args.device + "_" + str(args.cpu_num_thread) + ".txt"
else:
if args.enable_trt_fp16:
file_path = args.model + "_model_" + args.backend + "_fp16_" + args.device + ".txt"
else:
file_path = args.model + "_model_" + args.backend + "_" + args.device + ".txt"
f = open(file_path, "w")
f.writelines("===={}====: \n".format(file_path.split("/")[1][:-4]))
try:
model = fd.vision.segmentation.PaddleSegModel(
model_file, params_file, config_file, runtime_option=option)
model.enable_record_time_of_runtime()
for i in range(args.iter_num):
im = cv2.imread(args.image)
start = time.time()
result = model.predict(im)
end2end_statis.append(time.time() - start)
gpu_util += get_current_gputil(gpu_id)
cm, gm = get_current_memory_mb(gpu_id)
cpu_mem += cm
gpu_mem += gm
runtime_statis = model.print_statis_info_of_runtime()
warmup_iter = args.iter_num // 5
repeat_iter = args.iter_num - warmup_iter
end2end_statis = end2end_statis[warmup_iter:]
dump_result = dict()
dump_result["runtime"] = runtime_statis["avg_time"] * 1000
dump_result["end2end"] = np.mean(end2end_statis) * 1000
dump_result["cpu_rss_mb"] = cpu_mem / repeat_iter
dump_result["gpu_rss_mb"] = gpu_mem / repeat_iter
dump_result["gpu_util"] = gpu_util / repeat_iter
f.writelines("Runtime(ms): {} \n".format(str(dump_result["runtime"])))
f.writelines("End2End(ms): {} \n".format(str(dump_result["end2end"])))
f.writelines("cpu_rss_mb: {} \n".format(
str(dump_result["cpu_rss_mb"])))
f.writelines("gpu_rss_mb: {} \n".format(
str(dump_result["gpu_rss_mb"])))
f.writelines("gpu_util: {} \n".format(str(dump_result["gpu_util"])))
except:
f.writelines("!!!!!Infer Failed\n")
f.close()

183
benchmark/benchmark_yolo.py Normal file
View File

@@ -0,0 +1,183 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
import numpy as np
import datetime
import json
import pynvml
import psutil
import GPUtil
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of Yolo onnx model.")
parser.add_argument(
"--image", type=str, required=False, help="Path of test image file.")
parser.add_argument(
"--cpu_num_thread",
type=int,
default=8,
help="default number of cpu thread.")
parser.add_argument(
"--device_id", type=int, default=0, help="device(gpu) id")
parser.add_argument(
"--iter_num",
required=True,
type=int,
default=300,
help="number of iterations for computing performace.")
parser.add_argument(
"--device",
default="cpu",
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="ort",
help="inference backend, ort, ov, trt, paddle.")
parser.add_argument(
"--enable_trt_fp16",
type=bool,
default=False,
help="whether enable fp16 in trt backend")
args = parser.parse_args()
return args
def build_option(args):
option = fd.RuntimeOption()
device = args.device
backend = args.backend
option.set_cpu_thread_num(args.cpu_num_thread)
if device == "gpu":
option.use_gpu(args.device_id)
if backend == "trt":
assert device == "gpu", "the trt backend need device==gpu"
option.use_trt_backend()
if args.enable_trt_fp16:
option.enable_trt_fp16()
elif backend == "ov":
assert device == "cpu", "the openvino backend need device==cpu"
option.use_openvino_backend()
elif backend == "paddle":
option.use_paddle_backend()
elif backend == "ort":
option.use_ort_backend()
else:
print("%s is an unsupported backend" % backend)
return option
def get_current_memory_mb(gpu_id=None):
pid = os.getpid()
p = psutil.Process(pid)
info = p.memory_full_info()
cpu_mem = info.uss / 1024. / 1024.
gpu_mem = 0
if gpu_id is not None:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
gpu_mem = meminfo.used / 1024. / 1024.
return cpu_mem, gpu_mem
def get_current_gputil(gpu_id):
GPUs = GPUtil.getGPUs()
gpu_load = GPUs[gpu_id].load
return gpu_load
if __name__ == '__main__':
args = parse_arguments()
option = build_option(args)
model_file = args.model
gpu_id = args.device_id
end2end_statis = list()
cpu_mem, gpu_mem, gpu_util = 0, 0, 0
if args.device == "cpu":
file_path = args.model + "_model_" + args.backend + "_" + \
args.device + "_" + str(args.cpu_num_thread) + ".txt"
else:
if args.enable_trt_fp16:
file_path = args.model + "_model_" + args.backend + "_fp16_" + args.device + ".txt"
else:
file_path = args.model + "_model_" + args.backend + "_" + args.device + ".txt"
f = open(file_path, "w")
f.writelines("===={}====: \n".format(file_path.split("/")[1][:-4]))
try:
if "yolox" in model_file:
model = fd.vision.detection.YOLOX(
model_file, runtime_option=option)
elif "yolov5" in model_file:
model = fd.vision.detection.YOLOv5(
model_file, runtime_option=option)
elif "yolov6" in model_file:
model = fd.vision.detection.YOLOv6(
model_file, runtime_option=option)
elif "yolov7" in model_file:
model = fd.vision.detection.YOLOv7(
model_file, runtime_option=option)
else:
raise Exception("model {} not support now in yolo series".format(
args.model))
model.enable_record_time_of_runtime()
for i in range(args.iter_num):
im = cv2.imread(args.image)
start = time.time()
result = model.predict(im)
end2end_statis.append(time.time() - start)
gpu_util += get_current_gputil(gpu_id)
cm, gm = get_current_memory_mb(gpu_id)
cpu_mem += cm
gpu_mem += gm
runtime_statis = model.print_statis_info_of_runtime()
warmup_iter = args.iter_num // 5
repeat_iter = args.iter_num - warmup_iter
end2end_statis = end2end_statis[warmup_iter:]
dump_result = dict()
dump_result["runtime"] = runtime_statis["avg_time"] * 1000
dump_result["end2end"] = np.mean(end2end_statis) * 1000
dump_result["cpu_rss_mb"] = cpu_mem / repeat_iter
dump_result["gpu_rss_mb"] = gpu_mem / repeat_iter
dump_result["gpu_util"] = gpu_util / repeat_iter
f.writelines("Runtime(ms): {} \n".format(str(dump_result["runtime"])))
f.writelines("End2End(ms): {} \n".format(str(dump_result["end2end"])))
f.writelines("cpu_rss_mb: {} \n".format(
str(dump_result["cpu_rss_mb"])))
f.writelines("gpu_rss_mb: {} \n".format(
str(dump_result["gpu_rss_mb"])))
f.writelines("gpu_util: {} \n".format(str(dump_result["gpu_util"])))
except:
f.writelines("!!!!!Infer Failed\n")
f.close()

155
benchmark/convert_info.py Normal file
View File

@@ -0,0 +1,155 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('--txt_path', type=str, default='result.txt')
parser.add_argument('--domain', type=str, default='ppcls')
args = parser.parse_args()
txt_path = args.txt_path
domain = args.domain
f1 = open(txt_path, "r")
lines = f1.readlines()
line_nums = len(lines)
ort_cpu_thread1 = dict()
ort_cpu_thread8 = dict()
ort_gpu = dict()
ov_cpu_thread1 = dict()
ov_cpu_thread8 = dict()
paddle_cpu_thread1 = dict()
paddle_cpu_thread8 = dict()
paddle_gpu = dict()
trt_gpu = dict()
trt_gpu_fp16 = dict()
model_name_set = set()
for i in range(line_nums):
if "====" in lines[i]:
model_name = lines[i].strip().split("_model")[0][4:]
model_name_set.add(model_name)
runtime = "-"
end2end = "-"
if "Runtime(ms)" in lines[i + 1]:
runtime_ori = lines[i + 1].split(": ")[1]
# two decimal places
runtime_list = runtime_ori.split(".")
runtime = runtime_list[0] + "." + runtime_list[1][:2]
if "End2End(ms)" in lines[i + 2]:
end2end_ori = lines[i + 2].split(": ")[1]
# two decimal places
end2end_list = end2end_ori.split(".")
end2end = end2end_list[0] + "." + end2end_list[1][:2]
if "cpu_rss_mb" in lines[i + 3]:
cpu_rss_mb_ori = lines[i + 3].split(": ")[1]
# two decimal places
cpu_rss_mb_list = cpu_rss_mb_ori.split(".")
cpu_rss_mb = cpu_rss_mb_list[0] + "." + cpu_rss_mb_list[1][:2]
if "gpu_rss_mb" in lines[i + 4]:
gpu_rss_mb_ori = lines[i + 4].split(": ")[1]
# two decimal places
gpu_rss_mb_list = gpu_rss_mb_ori.split(".")
gpu_rss_mb = gpu_rss_mb_list[0] + "." + gpu_rss_mb_list[1][:2]
if "ort_cpu_1" in lines[i]:
ort_cpu_thread1[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "ort_cpu_8" in lines[i]:
ort_cpu_thread8[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "ort_gpu" in lines[i]:
ort_gpu[model_name] = runtime + "\t" + end2end + "\t" + gpu_rss_mb
elif "ov_cpu_1" in lines[i]:
ov_cpu_thread1[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "ov_cpu_8" in lines[i]:
ov_cpu_thread8[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "paddle_cpu_1" in lines[i]:
paddle_cpu_thread1[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "paddle_cpu_8" in lines[i]:
paddle_cpu_thread8[
model_name] = runtime + "\t" + end2end + "\t" + cpu_rss_mb
elif "paddle_gpu" in lines[i]:
paddle_gpu[
model_name] = runtime + "\t" + end2end + "\t" + gpu_rss_mb
elif "trt_gpu" in lines[i]:
trt_gpu[model_name] = runtime + "\t" + end2end + "\t" + gpu_rss_mb
elif "trt_fp16_gpu" in lines[i]:
trt_gpu_fp16[
model_name] = runtime + "\t" + end2end + "\t" + gpu_rss_mb
f2 = open("struct_cpu_" + domain + ".txt", "w")
f2.writelines(
"model_name\tthread_nums\tort_run\tort_end2end\tcpu_rss_mb\tov_run\tov_end2end\tcpu_rss_mb\tpaddle_run\tpaddle_end2end\tcpu_rss_mb\n"
)
for model_name in model_name_set:
lines1 = model_name + '\t1\t'
lines2 = model_name + '\t8\t'
if model_name in ort_cpu_thread1 and ort_cpu_thread1[model_name] != "":
lines1 += ort_cpu_thread1[model_name] + '\t'
else:
lines1 += "-\t-\t-\t"
if model_name in ov_cpu_thread1 and ov_cpu_thread1[model_name] != "":
lines1 += ov_cpu_thread1[model_name] + '\t'
else:
lines1 += "-\t-\t-\t"
if model_name in paddle_cpu_thread1 and paddle_cpu_thread1[
model_name] != "":
lines1 += paddle_cpu_thread1[model_name] + '\n'
else:
lines1 += "-\t-\t-\n"
f2.writelines(lines1)
if model_name in ort_cpu_thread8 and ort_cpu_thread8[model_name] != "":
lines2 += ort_cpu_thread8[model_name] + '\t'
else:
lines2 += "-\t-\t-\t"
if model_name in ov_cpu_thread8 and ov_cpu_thread8[model_name] != "":
lines2 += ov_cpu_thread8[model_name] + '\t'
else:
lines2 += "-\t-\t-\t"
if model_name in paddle_cpu_thread8 and paddle_cpu_thread8[
model_name] != "":
lines2 += paddle_cpu_thread8[model_name] + '\n'
else:
lines2 += "-\t-\t-\n"
f2.writelines(lines2)
f2.close()
f3 = open("struct_gpu_" + domain + ".txt", "w")
f3.writelines(
"model_name\tort_run\tort_end2end\tgpu_rss_mb\tpaddle_run\tpaddle_end2end\tgpu_rss_mb\ttrt_run\ttrt_end2end\tgpu_rss_mb\ttrt_fp16_run\ttrt_fp16_end2end\tgpu_rss_mb\n"
)
for model_name in model_name_set:
lines1 = model_name + '\t'
if model_name in ort_gpu and ort_gpu[model_name] != "":
lines1 += ort_gpu[model_name] + '\t'
else:
lines1 += "-\t-\t-\t"
if model_name in paddle_gpu and paddle_gpu[model_name] != "":
lines1 += paddle_gpu[model_name] + '\t'
else:
lines1 += "-\t-\t-\t"
if model_name in trt_gpu and trt_gpu[model_name] != "":
lines1 += trt_gpu[model_name] + '\t'
else:
lines1 += "-\t-\t-\t"
if model_name in trt_gpu_fp16 and trt_gpu_fp16[model_name] != "":
lines1 += trt_gpu_fp16[model_name] + '\n'
else:
lines1 += "-\t-\t-\n"
f3.writelines(lines1)
f3.close()

View File

@@ -0,0 +1,6 @@
numpy
pynvml
psutil
GPUtil
time
numpy

View File

@@ -0,0 +1,33 @@
echo "[FastDeploy] Running PPcls benchmark..."
num_of_models=$(ls -d ppcls_model/* | wc -l)
counter=1
for model in $(ls -d ppcls_model/* )
do
echo "[Benchmark-PPcls] ${counter}/${num_of_models} $model ..."
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend ort
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend ort
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend paddle
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend paddle
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend ov
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend ov
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend ort
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend paddle
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend trt
python benchmark_ppcls.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend trt --enable_trt_fp16 True
counter=$(($counter+1))
step=$(( $counter % 1 ))
if [ $step = 0 ]
then
wait
fi
done
wait
rm -rf result_ppcls.txt
touch result_ppcls.txt
cat ppcls_model/*.txt >> ./result_ppcls.txt
python convert_info.py --txt_path result_ppcls.txt --domain ppcls

View File

@@ -0,0 +1,33 @@
echo "[FastDeploy] Running PPdet benchmark..."
num_of_models=$(ls -d ppdet_model/* | wc -l)
counter=1
for model in $(ls -d ppdet_model/* )
do
echo "[Benchmark-PPdet] ${counter}/${num_of_models} $model ..."
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 1 --iter_num 2000 --backend ort
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 8 --iter_num 2000 --backend ort
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 1 --iter_num 2000 --backend paddle
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 8 --iter_num 2000 --backend paddle
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 1 --iter_num 2000 --backend ov
python benchmark_ppdet.py --model $model --image 000000014439.jpg --cpu_num_thread 8 --iter_num 2000 --backend ov
python benchmark_ppdet.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend ort
python benchmark_ppdet.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend paddle
python benchmark_ppdet.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend trt
python benchmark_ppdet.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend trt --enable_trt_fp16 True
counter=$(($counter+1))
step=$(( $counter % 1 ))
if [ $step = 0 ]
then
wait
fi
done
wait
rm -rf result_ppdet.txt
touch result_ppdet.txt
cat ppdet_model/*.txt >> ./result_ppdet.txt
python convert_info.py --txt_path result_ppdet.txt --domain ppdet

View File

@@ -0,0 +1,33 @@
echo "[FastDeploy] Running PPseg benchmark..."
num_of_models=$(ls -d ppseg_model/* | wc -l)
counter=1
for model in $(ls -d ppseg_model/* )
do
echo "[Benchmark-PPseg] ${counter}/${num_of_models} $model ..."
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend ort
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend ort
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend paddle
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend paddle
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 1 --iter_num 2000 --backend ov
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --cpu_num_thread 8 --iter_num 2000 --backend ov
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend ort
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend paddle
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend trt
python benchmark_ppseg.py --model $model --image ILSVRC2012_val_00000010.jpeg --device gpu --iter_num 2000 --backend trt --enable_trt_fp16 True
counter=$(($counter+1))
step=$(( $counter % 1 ))
if [ $step = 0 ]
then
wait
fi
done
wait
rm -rf result_ppseg.txt
touch result_ppseg.txt
cat ppseg_model/*.txt >> ./result_ppseg.txt
python convert_info.py --txt_path result_ppseg.txt --domain ppseg

View File

@@ -0,0 +1,30 @@
echo "[FastDeploy] Running Yolo benchmark..."
num_of_models=$(ls -d yolo_model/* | wc -l)
counter=1
for model in $(ls -d yolo_model/* )
do
echo "[Benchmark-Yolo] ${counter}/${num_of_models} $model ..."
python benchmark_yolo.py --model $model --image 000000014439.jpg --cpu_num_thread 1 --iter_num 2000 --backend ort
python benchmark_yolo.py --model $model --image 000000014439.jpg --cpu_num_thread 8 --iter_num 2000 --backend ort
python benchmark_yolo.py --model $model --image 000000014439.jpg --cpu_num_thread 1 --iter_num 2000 --backend ov
python benchmark_yolo.py --model $model --image 000000014439.jpg --cpu_num_thread 8 --iter_num 2000 --backend ov
python benchmark_yolo.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend ort
python benchmark_yolo.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend trt
python benchmark_yolo.py --model $model --image 000000014439.jpg --device gpu --iter_num 2000 --backend trt --enable_trt_fp16 True
counter=$(($counter+1))
step=$(( $counter % 1 ))
if [ $step = 0 ]
then
wait
fi
done
wait
rm -rf result_yolo.txt
touch result_yolo.txt
cat yolo_model/*.txt >> ./result_yolo.txt
python convert_info.py --txt_path result_yolo.txt --domain yolo

View File

@@ -1,178 +0,0 @@
import fastdeploy as fd
import cv2
import os
from tqdm import trange
import numpy as np
import datetime
import json
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleClas model.")
parser.add_argument(
"--image", type=str, required=False, help="Path of test image file.")
parser.add_argument(
"--input_name",
type=str,
required=False,
default="inputs",
help="input name of inference file.")
parser.add_argument(
"--topk", type=int, default=1, help="Return topk results.")
parser.add_argument(
"--cpu_num_thread",
type=int,
default=12,
help="default number of cpu thread.")
parser.add_argument(
"--size",
nargs='+',
type=int,
default=[1, 3, 224, 224],
help="size of inference array.")
parser.add_argument(
"--iter_num",
required=True,
type=int,
default=30,
help="number of iterations for computing performace.")
parser.add_argument(
"--device",
nargs='+',
type=str,
default=['cpu', 'cpu', 'cpu', 'gpu', 'gpu', 'gpu'],
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
nargs='+',
type=str,
default=['ort', 'paddle', 'ov', 'ort', 'trt', 'paddle'],
help="inference backend.")
args = parser.parse_args()
backend_list = ['ov', 'trt', 'ort', 'paddle']
device_list = ['cpu', 'gpu']
assert len(args.device) == len(
args.backend), "the same number of --device and --backend is requested"
assert args.iter_num > 10, "--iter_num has to bigger than 10"
assert len(args.size
) == 4, "size should include 4 values, e.g., --size 1 3 300 300"
for b in args.backend:
assert b in backend_list, "%s backend is not supported" % b
for d in args.device:
assert d in device_list, "%s device is not supported" % d
return args
def build_option(index, args):
option = fd.RuntimeOption()
device = args.device[index]
backend = args.backend[index]
option.set_cpu_thread_num(args.cpu_num_thread)
if device == "gpu":
option.use_gpu()
if backend == "trt":
assert device == "gpu", "the trt backend need device==gpu"
option.use_trt_backend()
option.set_trt_input_shape(args.input_name, args.size)
elif backend == "ov":
assert device == "cpu", "the openvino backend need device==cpu"
option.use_openvino_backend()
elif backend == "paddle":
option.use_paddle_backend()
elif backend == "ort":
option.use_ort_backend()
else:
print("%s is an unsupported backend" % backend)
print("============= inference using %s backend on %s device ============="
% (args.backend[index], args.device[index]))
return option
args = parse_arguments()
save_dict = dict()
for index, device_name in enumerate(args.device):
if device_name not in save_dict:
save_dict[device_name] = dict()
# 配置runtime加载模型
runtime_option = build_option(index, args)
model_file = os.path.join(args.model, "inference.pdmodel")
params_file = os.path.join(args.model, "inference.pdiparams")
config_file = os.path.join(args.model, "inference_cls.yaml")
model = fd.vision.classification.PaddleClasModel(
model_file, params_file, config_file, runtime_option=runtime_option)
# 创建要输入的向量
channel = args.size[1]
height = args.size[2]
width = args.size[3]
input_array = np.random.randint(
0, high=255, size=(height, width, channel), dtype=np.uint8)
# 如果有输入图片,则使用输入的图片进行推理
if args.image:
input_array = cv2.imread(args.image)
model_name = args.model.split('/')
model_name = model_name[-1] if model_name[-1] else model_name[-2]
print(" Model: ", model_name, " Input shape: ", input_array.shape)
start_time = datetime.datetime.now()
model.enable_record_time_of_runtime()
warmup_iter = args.iter_num // 5
warmup_end2end_time = 0
if "iter_num" not in save_dict:
save_dict["iter_num"] = args.iter_num
if "warmup_iter" not in save_dict:
save_dict["warmup_iter"] = warmup_iter
if "cpu_num_thread" not in save_dict:
save_dict["cpu_num_thread"] = args.cpu_num_thread
for i in trange(args.iter_num, desc="Inference Progress"):
if i == warmup_iter:
# 计算warmup端到端总时间(s)
warmup_time = datetime.datetime.now()
warmup_end2end_time = warmup_time - start_time
warmup_end2end_time = (
warmup_end2end_time.days * 24 * 60 * 60 +
warmup_end2end_time.seconds
) * 1000 + warmup_end2end_time.microseconds / 1000
result = model.predict(input_array, args.topk)
end_time = datetime.datetime.now()
# 计算端到端(前处理,推理,后处理)的总时间
statis_info_of_runtime_dict = model.print_statis_info_of_runtime()
end2end_time = end_time - start_time
end2end_time = (end2end_time.days * 24 * 60 * 60 + end2end_time.seconds
) * 1000 + end2end_time.microseconds / 1000
remain_end2end_time = end2end_time - warmup_end2end_time
pre_post_process = end2end_time - statis_info_of_runtime_dict[
"total_time"] * 1000
end2end = remain_end2end_time / (args.iter_num - warmup_iter)
runtime = statis_info_of_runtime_dict["avg_time"] * 1000
print("Total time of end2end: %s ms" % str(end2end_time))
print("Average time of end2end exclude warmup step: %s ms" % str(end2end))
print("Total time of preprocess and postprocess in warmup step: %s ms" %
str(warmup_end2end_time - statis_info_of_runtime_dict["warmup_time"]
* 1000))
print(
"Average time of preprocess and postprocess exclude warmup step: %s ms"
% str((remain_end2end_time - statis_info_of_runtime_dict["remain_time"]
* 1000) / (args.iter_num - warmup_iter)))
# 结构化输出
backend_name = args.backend[index]
save_dict[device_name][backend_name] = {
"end2end": end2end,
"runtime": runtime
}
json_str = json.dumps(save_dict)
with open("%s.json" % model_name, 'w', encoding='utf-8') as fw:
json.dump(json_str, fw, indent=4, ensure_ascii=False)

View File

@@ -59,8 +59,8 @@ YOLOv5::YOLOv5(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
if (model_format == Frontend::ONNX) { if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};

View File

@@ -63,8 +63,8 @@ YOLOv6::YOLOv6(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
if (model_format == Frontend::ONNX) { if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};

View File

@@ -61,8 +61,8 @@ YOLOv7::YOLOv7(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
if (model_format == Frontend::ONNX) { if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else { } else {
valid_cpu_backends = {Backend::PDINFER}; valid_cpu_backends = {Backend::PDINFER};
valid_gpu_backends = {Backend::PDINFER}; valid_gpu_backends = {Backend::PDINFER};

View File

@@ -75,8 +75,8 @@ void LetterBoxWithRightBottomPad(Mat* mat, std::vector<int> size,
YOLOX::YOLOX(const std::string& model_file, const std::string& params_file, YOLOX::YOLOX(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const Frontend& model_format) { const RuntimeOption& custom_option, const Frontend& model_format) {
if (model_format == Frontend::ONNX) { if (model_format == Frontend::ONNX) {
valid_cpu_backends = {Backend::ORT}; // 指定可用的CPU后端 valid_cpu_backends = {Backend::OPENVINO, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::TRT}; // 指定可用的GPU后端 valid_gpu_backends = {Backend::ORT, Backend::TRT};
} else { } else {
valid_cpu_backends = {Backend::PDINFER, Backend::ORT}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::ORT, Backend::TRT};

View File

@@ -24,7 +24,7 @@ PicoDet::PicoDet(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; valid_cpu_backends = {Backend::PDINFER, Backend::ORT};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -23,7 +23,7 @@ PPYOLO::PPYOLO(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER}; valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
valid_gpu_backends = {Backend::PDINFER}; valid_gpu_backends = {Backend::PDINFER};
has_nms_ = true; has_nms_ = true;
runtime_option = custom_option; runtime_option = custom_option;

View File

@@ -23,7 +23,7 @@ YOLOv3::YOLOv3(const std::string& model_file, const std::string& params_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::ORT, Backend::PDINFER}; valid_cpu_backends = {Backend::OPENVINO, Backend::ORT, Backend::PDINFER};
valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::ORT, Backend::PDINFER, Backend::TRT};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;

View File

@@ -13,7 +13,7 @@ PaddleSegModel::PaddleSegModel(const std::string& model_file,
const RuntimeOption& custom_option, const RuntimeOption& custom_option,
const Frontend& model_format) { const Frontend& model_format) {
config_file_ = config_file; config_file_ = config_file;
valid_cpu_backends = {Backend::PDINFER}; valid_cpu_backends = {Backend::OPENVINO, Backend::PDINFER};
valid_gpu_backends = {Backend::PDINFER, Backend::TRT}; valid_gpu_backends = {Backend::PDINFER, Backend::TRT};
runtime_option = custom_option; runtime_option = custom_option;
runtime_option.model_format = model_format; runtime_option.model_format = model_format;