[Other] Add accuracy evaluation scripts (#1034)

* add accuracy scripts

* add accuracy scripts

* Add FlyCV doc

* fix conflict

* fix conflict

* fix conflict
This commit is contained in:
yunyaoXYY
2023-01-04 15:54:03 +08:00
committed by GitHub
parent 34bea7649d
commit 07ad7216f6
24 changed files with 1361 additions and 0 deletions

29
tests/acc_eval/README.md Normal file
View File

@@ -0,0 +1,29 @@
# 模型精度批量验证脚本
本目录下的Python脚本可以在CPU/GPU/昆仑芯/昇腾,以及后续的新增硬件上, 完成对高优模型的精度批量验证.
各模型的精度测试代码是基于Python部署demo修改而成, 当后续有新增硬件或者新增模型时,用户可以通过同样的方式(新增option和模型),添加新的Python代码来完成精度验证.
## 用法
### 1.准备数据集
- 分类模型需要ImageNet验证集以及标签
- 检测模型需要COCO2017验证集以及标签
- 分割模型需要Cityscape验证集以及标签
- PP-OCRv2/v3的数据集在准备脚本中会自行下载.
请将准备好的数据集解压至dataset目录中使用
### 2.精度验证
分类/检测/分割/OCR四个场景的精度验证启用方式是一样的.
其中分类, 检测和分割模型会返回预测精度, OCR模型会返回与GPU预测结果的差异.
```bash
# 进入分类模型目录下
cd classification
# 执行prepare.sh脚本,自动下载并解压模型至models文件夹下
bash prepare.sh
# 首先修改run.sh中的TARGET_DEVICE为想测试的硬件,之后执行run.sh脚本
bash run.sh
# 验证完毕的输出以及精度数据,会保存至log文件夹下,用户自行查看
```

View File

@@ -0,0 +1,66 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleClas model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--topk", type=int, default=1, help="Return topk results.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu' or 'ipu' or 'kunlunxin' or 'ascend' ."
)
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "ipu":
option.use_ipu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
# 配置runtime加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "inference.pdmodel")
params_file = os.path.join(args.model, "inference.pdiparams")
config_file = os.path.join(args.model, "inference_cls.yaml")
model = fd.vision.classification.PaddleClasModel(
model_file, params_file, config_file, runtime_option=runtime_option)
res = fd.vision.evaluation.eval_classify(
model=model,
image_file_path="../dataset/imagenet/",
label_file_path="../dataset/imagenet/val_list.txt",
topk=1)
print(res)

View File

@@ -0,0 +1,28 @@
mkdir models
cd models
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNet_x1_0_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPLCNetV2_base_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB7_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/EfficientNetB0_small_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x1_3_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/GhostNet_x0_5_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_x0_25_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV1_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_x0_25_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV2_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_small_x0_35_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/MobileNetV3_large_x1_0_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x0_25_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ShuffleNetV2_x2_0_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/SqueezeNet1_1_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/InceptionV3_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_tiny_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPHGNet_base_ssld_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
ls *.tgz | xargs -n1 tar xzvf
rm -rf *.tgz
cd ..

View File

@@ -0,0 +1,8 @@
TARGET_DEVICE=ascend
model_dir=`ls ./models/`
for MODEL_NAME in $model_dir
do
python infer.py --model ./models/$MODEL_NAME --image None --device $TARGET_DEVICE 2>&1 | tee ./log/${MODEL_NAME}_acc.log
done

View File

@@ -0,0 +1,69 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("image", [1, 3, 640, 640])
option.set_trt_input_shape("scale_factor", [1, 2])
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='faster_rcnn_r50_vd_fpn_2x_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.FasterRCNN(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,76 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
# option.use_gpu()
print(
"""GPU inference with Backend::Paddle in python has not been supported yet. \
\nWill ignore this option.""")
if args.use_trt:
# TODO(qiuyanjun): may remove TRT option
# Backend::TRT has not been supported yet.
print(
"""Backend::TRT has not been supported yet, will ignore this option.\
\nPaddleDetection/MaskRCNN has only support Backend::Paddle now."""
)
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='mask_rcnn_r50_1x_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.MaskRCNN(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,67 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='picodet_l_320_coco_lcnet')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PicoDet(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,69 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("image", [1, 3, 640, 640])
option.set_trt_input_shape("scale_factor", [1, 2])
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='ppyolo_r50vd_dcn_1x_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PPYOLO(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,68 @@
import cv2
import os
import fastdeploy as fd
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='ppyoloe_crn_l_300e_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PPYOLOE(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,56 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
required=True,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
return option
args = parse_arguments()
model_file = os.path.join(args.model_dir, "model.pdmodel")
params_file = os.path.join(args.model_dir, "model.pdiparams")
config_file = os.path.join(args.model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.SSD(model_file,
params_file,
config_file,
runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,67 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='yolov3_darknet53_270e_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.YOLOv3(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,60 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", default=None, help="Path of yolov5 model.")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu' or 'kunlunxin'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("images", [1, 3, 640, 640])
return option
args = parse_arguments()
# 配置runtime加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
model = fd.vision.detection.YOLOv5(
model_file,
params_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.PADDLE)
image_file_path = "/xieyunyao/Project/coco/val2017"
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path, 0.001, 0.65)
print(res)

View File

@@ -0,0 +1,60 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", default=None, help="Path of yolov5 model.")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu' or 'kunlunxin'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("images", [1, 3, 640, 640])
return option
args = parse_arguments()
# 配置runtime加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
model = fd.vision.detection.YOLOv6(
model_file,
params_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.PADDLE)
image_file_path = "/xieyunyao/Project/coco/val2017"
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path, 0.001, 0.65)
print(res)

View File

@@ -0,0 +1,60 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument("--model", default=None, help="Path of yolov5 model.")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu' or 'kunlunxin'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.device.lower() == "gpu":
option.use_gpu()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("images", [1, 3, 640, 640])
return option
args = parse_arguments()
# 配置runtime加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
model = fd.vision.detection.YOLOv6(
model_file,
params_file,
runtime_option=runtime_option,
model_format=fd.ModelFormat.PADDLE)
image_file_path = "/xieyunyao/Project/coco/val2017"
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path, 0.001, 0.65)
print(res)

View File

@@ -0,0 +1,67 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_dir",
default=None,
help="Path of PaddleDetection model directory")
parser.add_argument(
"--image", default=None, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
return option
args = parse_arguments()
if args.model_dir is None:
model_dir = fd.download_model(name='yolox_s_300e_coco')
else:
model_dir = args.model_dir
model_file = os.path.join(model_dir, "model.pdmodel")
params_file = os.path.join(model_dir, "model.pdiparams")
config_file = os.path.join(model_dir, "infer_cfg.yml")
# 配置runtime加载模型
runtime_option = build_option(args)
model = fd.vision.detection.PaddleYOLOX(
model_file, params_file, config_file, runtime_option=runtime_option)
image_file_path = "../dataset/coco/val2017"
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
res = fd.vision.evaluation.eval_detection(model, image_file_path,
annotation_file_path)
print(res)

View File

@@ -0,0 +1,25 @@
mkdir models
cd models
wget https://bj.bcebos.com/paddlehub/fastdeploy/picodet_l_320_coco_lcnet.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
wget https://bj.bcebos.com/fastdeploy/models/ppyoloe_plus_crn_m_80e_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyolo_r50vd_dcn_1x_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyolov2_r101vd_dcn_365e_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov3_darknet53_270e_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolox_s_300e_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ssd_mobilenet_v1_300_120e_voc.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ssd_vgg16_300_240e_voc.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/ssdlite_mobilenet_v1_300_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/faster_rcnn_r50_vd_fpn_2x_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/mask_rcnn_r50_1x_coco.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov5s_infer.tar
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov6s_infer.tar
wget https://bj.bcebos.com/paddlehub/fastdeploy/yolov7_infer.tar
ls *.tgz | xargs -n1 tar xzvf
ls *.tar | xargs -n1 tar xzvf
rm -rf *.tgz
rm -rf *.tar
cd ..

View File

@@ -0,0 +1,17 @@
TARGET_DEVICE=ascend
python eval_picodet.py --model_dir ./models/picodet_l_320_coco_lcnet --image None --device $TARGET_DEVICE 2>&1 | tee ./log/picodet_l_320_coco_lcnet.log
python eval_ppyolo.py --model_dir ./models/ppyolov2_r101vd_dcn_365e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ppyolov2_r101vd_dcn_365e_coco.log
python eval_ppyolo.py --model_dir ./models/ppyolo_r50vd_dcn_1x_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ppyolo_r50vd_dcn_1x_coco.log
python eval_ppyoloe.py --model_dir ./models/ppyoloe_crn_l_300e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ppyoloe_crn_l_300e_coco.log
python eval_ppyoloe.py --model_dir ./models/ppyoloe_plus_crn_m_80e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ppyoloe_plus_crn_m_80e_coco.log
python eval_ssd.py --model_dir ./models/ssd_vgg16_300_240e_voc --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ssd_vgg16_300_240e_voc.log
python eval_ssd.py --model_dir ./models/ssdlite_mobilenet_v1_300_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ssdlite_mobilenet_v1_300_coco.log
python eval_ssd.py --model_dir ./models/ssd_mobilenet_v1_300_120e_voc --image None --device $TARGET_DEVICE 2>&1 | tee ./log/ssd_mobilenet_v1_300_120e_voc.log
python eval_yolov3.py --model_dir ./models/yolov3_darknet53_270e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov3_darknet53_270e_coco.log
python eval_yolox.py --model_dir ./models/yolox_s_300e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolox_s_300e_coco.log
python eval_faster_rcnn.py --model_dir ./models/faster_rcnn_r50_vd_fpn_2x_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/faster_rcnn_r50_vd_fpn_2x_coco.log
python eval_mask_rcnn.py --model_dir ./models/mask_rcnn_r50_1x_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/mask_rcnn_r50_1x_coco.log
python eval_yolov5.py --model_dir ./models/yolov5s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov5s_infer.log
python eval_yolov6.py --model_dir ./models/yolov6s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov6s_infer.log
python eval_yolov5.py --model_dir ./models/yolov7_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov7_infer.log

View File

@@ -0,0 +1,170 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--det_model", required=True, help="Path of Detection model of PPOCR.")
parser.add_argument(
"--cls_model",
required=True,
help="Path of Classification model of PPOCR.")
parser.add_argument(
"--rec_model",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--rec_label_file",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="default",
help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
)
parser.add_argument(
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
parser.add_argument(
"--cpu_thread_num",
type=int,
default=9,
help="Number of threads while inference on CPU.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.device.lower() == "gpu":
option.use_gpu()
return option
args = parse_arguments()
# Detection模型, 检测文字框
det_model_file = os.path.join(args.det_model, "inference.pdmodel")
det_params_file = os.path.join(args.det_model, "inference.pdiparams")
# Classification模型方向分类可选
cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
# Recognition模型文字识别模型
rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
rec_label_file = args.rec_label_file
# 对于三个模型,均采用同样的部署配置
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=runtime_option)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=runtime_option)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=runtime_option)
# PPOCR的Rec模型开启静态推理, 其他硬件不需要的话请注释掉.
rec_model.preprocessor.static_shape = True
# 创建PP-OCR串联3个模型其中cls_model可选如无需求可设置为None
ppocr_v2 = fd.vision.ocr.PPOCRv2(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
#####
#准备输入图片数据
img_dir = args.image
imgs_file_lists = []
if os.path.isdir(img_dir):
for single_file in os.listdir(img_dir):
if 'jpg' in single_file:
file_path = os.path.join(img_dir, single_file)
if os.path.isfile(file_path):
imgs_file_lists.append(file_path)
imgs_file_lists.sort()
fd_result = []
for idx, image in enumerate(imgs_file_lists):
img = cv2.imread(image)
result = ppocr_v2.predict(img)
for i in range(len(result.boxes)):
one_res = result.boxes[i] + [
result.rec_scores[i]
] + [result.cls_labels[i]] + [result.cls_scores[i]]
fd_result.append(one_res)
local_result = []
with open('PPOCRv2_ICDAR10_BS116_1221.txt', 'r') as f:
for line in f:
local_result.append(list(map(float, line.split(','))))
print("==== Begin to check OCR diff ====")
for list_local, list_fd in zip(local_result, fd_result):
for i in range(len(list_local)):
if (i < 8):
#Det
diff = list_local[i] - list_fd[i]
assert (
abs(diff) < 1
), "Diff exist in Det box result, where is {} - {} .".format(
list_local, list_fd)
elif (i == 8):
#rec
diff = round(list_local[i], 6) - round(list_fd[i], 6)
assert (
abs(diff) < 0.001
), "Diff exist in rec scores result, where is {} - {} .".format(
list_local, list_fd)
elif (i == 9):
diff = list_local[i] - list_fd[i]
assert (
abs(diff) != 1
), "Diff exist in cls label result, where is {} - {} .".format(
list_local, list_fd)
else:
diff = round(list_local[i], 6) - round(list_fd[i], 6)
assert (
abs(diff) < 0.001
), "Diff exist in cls score result, where is {} - {} .".format(
list_local, list_fd)

View File

@@ -0,0 +1,174 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--det_model", required=True, help="Path of Detection model of PPOCR.")
parser.add_argument(
"--cls_model",
required=True,
help="Path of Classification model of PPOCR.")
parser.add_argument(
"--rec_model",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--rec_label_file",
required=True,
help="Path of Recognization model of PPOCR.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--backend",
type=str,
default="default",
help="Type of inference backend, support ort/trt/paddle/openvino, default 'openvino' for cpu, 'tensorrt' for gpu"
)
parser.add_argument(
"--device_id",
type=int,
default=0,
help="Define which GPU card used to run model.")
parser.add_argument(
"--cpu_thread_num",
type=int,
default=9,
help="Number of threads while inference on CPU.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.device.lower() == "gpu":
option.use_gpu()
return option
args = parse_arguments()
# Detection模型, 检测文字框
det_model_file = os.path.join(args.det_model, "inference.pdmodel")
det_params_file = os.path.join(args.det_model, "inference.pdiparams")
# Classification模型方向分类可选
cls_model_file = os.path.join(args.cls_model, "inference.pdmodel")
cls_params_file = os.path.join(args.cls_model, "inference.pdiparams")
# Recognition模型文字识别模型
rec_model_file = os.path.join(args.rec_model, "inference.pdmodel")
rec_params_file = os.path.join(args.rec_model, "inference.pdiparams")
rec_label_file = args.rec_label_file
# 对于三个模型,均采用同样的部署配置
# 用户也可根据自行需求分别配置
runtime_option = build_option(args)
det_model = fd.vision.ocr.DBDetector(
det_model_file, det_params_file, runtime_option=runtime_option)
cls_model = fd.vision.ocr.Classifier(
cls_model_file, cls_params_file, runtime_option=runtime_option)
rec_model = fd.vision.ocr.Recognizer(
rec_model_file,
rec_params_file,
rec_label_file,
runtime_option=runtime_option)
# PPOCR的Rec模型开启静态推理, 其他硬件不需要的话请注释掉.
rec_model.preprocessor.static_shape = True
# 创建PP-OCR串联3个模型其中cls_model可选如无需求可设置为None
ppocr_v3 = fd.vision.ocr.PPOCRv3(
det_model=det_model, cls_model=cls_model, rec_model=rec_model)
#####
#准备输入图片数据
img_dir = args.image
imgs_file_lists = []
if os.path.isdir(img_dir):
for single_file in os.listdir(img_dir):
if 'jpg' in single_file:
file_path = os.path.join(img_dir, single_file)
if os.path.isfile(file_path):
imgs_file_lists.append(file_path)
imgs_file_lists.sort()
fd_result = []
for idx, image in enumerate(imgs_file_lists):
img = cv2.imread(image)
result = ppocr_v3.predict(img)
for i in range(len(result.boxes)):
one_res = result.boxes[i] + [
result.rec_scores[i]
] + [result.cls_labels[i]] + [result.cls_scores[i]]
fd_result.append(one_res)
local_result = []
with open('PPOCRv3_ICDAR10_BS116_1221.txt', 'r') as f:
for line in f:
local_result.append(list(map(float, line.split(','))))
# Begin to Diff Compare
total_num_res = len(local_result) * 11
total_diff_num = 0
print("==== Begin to check OCR diff ====")
for list_local, list_fd in zip(local_result, fd_result):
for i in range(len(list_local)):
if (i < 8):
#Det
diff = list_local[i] - list_fd[i]
assert (
abs(diff) < 1
), "Diff exist in Det box result, where is {} - {} .".format(
list_local, list_fd)
elif (i == 8):
#rec
diff = round(list_local[i], 6) - round(list_fd[i], 6)
assert (
abs(diff) < 0.001
), "Diff exist in rec scores result, where is {} - {} .".format(
list_local, list_fd)
elif (i == 9):
diff = list_local[i] - list_fd[i]
assert (
abs(diff) != 1
), "Diff exist in cls label result, where is {} - {} .".format(
list_local, list_fd)
else:
diff = round(list_local[i], 6) - round(list_fd[i], 6)
assert (
abs(diff) < 0.001
), "Diff exist in cls score result, where is {} - {} .".format(
list_local, list_fd)

View File

@@ -0,0 +1,22 @@
mkdir models
cd models
# 下载模型
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_det_infer.tar
wget https://paddleocr.bj.bcebos.com/dygraph_v2.0/ch/ch_ppocr_mobile_v2.0_cls_infer.tar
wget https://paddleocr.bj.bcebos.com/PP-OCRv3/chinese/ch_PP-OCRv3_rec_infer.tar
wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_det_infer.tar
wget https://paddleocr.bj.bcebos.com/PP-OCRv2/chinese/ch_PP-OCRv2_rec_infer.tar
wget https://gitee.com/paddlepaddle/PaddleOCR/raw/release/2.6/ppocr/utils/ppocr_keys_v1.txt
# 下载GPU预测结果
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPOCRv3_ICDAR10_BS116_1221.txt
wget https://bj.bcebos.com/paddlehub/fastdeploy/PPOCRv2_ICDAR10_BS116_1221.txt
# 下载ICDAR2017数据集前10张图片
wget https://bj.bcebos.com/paddlehub/fastdeploy/ICDAR2017_10.tar
ls *.tar | xargs -n1 tar xzvf
rm -rf *.tar
cd ..

View File

@@ -0,0 +1,7 @@
TARGET_DEVICE=ascend
python eval_ppocrv3.py.py --det_model ch_PP-OCRv3_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv3_rec_infer --rec_label_file ppocr_keys_v1.txt \
--image ../ICDAR2017_10 --device $TARGET_DEVICE 2>&1 | tee ./log/ppocrv3_diff.log
python eval_ppocrv2.py.py --det_model ch_PP-OCRv2_det_infer --cls_model ch_ppocr_mobile_v2.0_cls_infer --rec_model ch_PP-OCRv2_rec_infer --rec_label_file ppocr_keys_v1.txt \
--image ../ICDAR2017_10 --device $TARGET_DEVICE 2>&1 | tee ./log/ppocrv2_diff.log

View File

@@ -0,0 +1,58 @@
import fastdeploy as fd
import cv2
import os
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleSeg model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
default=False,
help="Wether to use tensorrt.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "kunlunxin":
option.use_kunlunxin()
if args.device.lower() == "ascend":
option.use_ascend()
if args.use_trt:
option.use_trt_backend()
option.set_trt_input_shape("x", [1, 3, 256, 256], [1, 3, 1024, 1024],
[1, 3, 2048, 2048])
return option
args = parse_arguments()
# 配置runtime加载模型
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
config_file = os.path.join(args.model, "deploy.yaml")
model = fd.vision.segmentation.PaddleSegModel(
model_file, params_file, config_file, runtime_option=runtime_option)
res = fd.vision.evaluation.eval_segmentation(
model=model, data_dir="../dataset/FD_dataset/data/cityscapes")
print(res)

View File

@@ -0,0 +1,30 @@
mkdir models
cd models
wget https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Unet_cityscapes_without_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_without_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV1_Lite_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Lite_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Lite_192x192_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV2_Mobile_192x192_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_HumanSegV1_Server_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Portrait_PP_HumanSegV2_Lite_256x144_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/FCN_HRNet_W18_cityscapes_without_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_with_argmax_infer.tgz
wget https://bj.bcebos.com/paddlehub/fastdeploy/Deeplabv3_ResNet101_OS8_cityscapes_without_argmax_infer.tgz
ls *.tgz | xargs -n1 tar xzvf
rm -rf *.tgz

View File

@@ -0,0 +1,8 @@
TARGET_DEVICE=ascend
model_dir=`ls ./models/`
for MODEL_NAME in $model_dir
do
python eval.py --model ./models/$MODEL_NAME --image None --device $TARGET_DEVICE 2>&1 | tee ./log/${MODEL_NAME}_acc.log
done