[Bug Fix] Fix bugs when deploy quantized YOLOv5/v6/v7 model. (#729)

* Improve the usage of fastdeploy tools

* Fix quantized YOLOv5,v6 and v7 model deployment
This commit is contained in:
yunyaoXYY
2022-11-28 19:19:37 +08:00
committed by GitHub
parent 941057888a
commit dc2dad62a4
14 changed files with 12 additions and 32 deletions

View File

@@ -21,8 +21,8 @@ wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_qat_model.tar
tar -xvf yolov6s_qat_model.tar
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
# 在CPU上使用Paddle-Inference推理量化模型
python infer.py --model yolov6s_qat_model --image 000000014439.jpg --device cpu --backend paddle
# 在CPU上使用ONNX Runtime推理量化模型
python infer.py --model yolov6s_qat_model --image 000000014439.jpg --device cpu --backend ort
# 在GPU上使用TensorRT推理量化模型
python infer.py --model yolov6s_qat_model --image 000000014439.jpg --device gpu --backend trt
# 在GPU上使用Paddle-TensorRT推理量化模型

View File

@@ -54,12 +54,6 @@ def build_option(args):
option.enable_paddle_to_trt()
elif args.backend.lower() == "ort":
option.use_ort_backend()
elif args.backend.lower() == "paddle":
option.use_paddle_backend()
elif args.backend.lower() == "openvino":
assert args.device.lower(
) == "cpu", "OpenVINO backend require inference on device CPU."
option.use_openvino_backend()
return option