mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
2
tests/acc_eval/classification/run.sh
Normal file → Executable file
2
tests/acc_eval/classification/run.sh
Normal file → Executable file
@@ -4,5 +4,5 @@ model_dir=`ls ./models/`
|
||||
|
||||
for MODEL_NAME in $model_dir
|
||||
do
|
||||
python infer.py --model ./models/$MODEL_NAME --image None --device $TARGET_DEVICE 2>&1 | tee ./log/${MODEL_NAME}_acc.log
|
||||
python eval.py --model ./models/$MODEL_NAME --image None --device $TARGET_DEVICE 2>&1 | tee ./log/${MODEL_NAME}_acc.log
|
||||
done
|
||||
|
@@ -52,8 +52,8 @@ model = fd.vision.detection.YOLOv5(
|
||||
runtime_option=runtime_option,
|
||||
model_format=fd.ModelFormat.PADDLE)
|
||||
|
||||
image_file_path = "/xieyunyao/Project/coco/val2017"
|
||||
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
|
||||
image_file_path = "../dataset/coco/val2017"
|
||||
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
|
||||
|
||||
res = fd.vision.evaluation.eval_detection(model, image_file_path,
|
||||
annotation_file_path, 0.001, 0.65)
|
||||
|
@@ -52,8 +52,8 @@ model = fd.vision.detection.YOLOv6(
|
||||
runtime_option=runtime_option,
|
||||
model_format=fd.ModelFormat.PADDLE)
|
||||
|
||||
image_file_path = "/xieyunyao/Project/coco/val2017"
|
||||
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
|
||||
image_file_path = "../dataset/coco/val2017"
|
||||
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
|
||||
|
||||
res = fd.vision.evaluation.eval_detection(model, image_file_path,
|
||||
annotation_file_path, 0.001, 0.65)
|
||||
|
@@ -52,8 +52,8 @@ model = fd.vision.detection.YOLOv6(
|
||||
runtime_option=runtime_option,
|
||||
model_format=fd.ModelFormat.PADDLE)
|
||||
|
||||
image_file_path = "/xieyunyao/Project/coco/val2017"
|
||||
annotation_file_path = "/xieyunyao/Project/coco/annotations/instances_val2017.json"
|
||||
image_file_path = "../dataset/coco/val2017"
|
||||
annotation_file_path = "../dataset/coco/annotations/instances_val2017.json"
|
||||
|
||||
res = fd.vision.evaluation.eval_detection(model, image_file_path,
|
||||
annotation_file_path, 0.001, 0.65)
|
||||
|
6
tests/acc_eval/detection/run.sh
Normal file → Executable file
6
tests/acc_eval/detection/run.sh
Normal file → Executable file
@@ -12,6 +12,6 @@ python eval_yolov3.py --model_dir ./models/yolov3_darknet53_270e_coco --image
|
||||
python eval_yolox.py --model_dir ./models/yolox_s_300e_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolox_s_300e_coco.log
|
||||
python eval_faster_rcnn.py --model_dir ./models/faster_rcnn_r50_vd_fpn_2x_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/faster_rcnn_r50_vd_fpn_2x_coco.log
|
||||
python eval_mask_rcnn.py --model_dir ./models/mask_rcnn_r50_1x_coco --image None --device $TARGET_DEVICE 2>&1 | tee ./log/mask_rcnn_r50_1x_coco.log
|
||||
python eval_yolov5.py --model_dir ./models/yolov5s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov5s_infer.log
|
||||
python eval_yolov6.py --model_dir ./models/yolov6s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov6s_infer.log
|
||||
python eval_yolov5.py --model_dir ./models/yolov7_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov7_infer.log
|
||||
python eval_yolov5.py --model ./models/yolov5s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov5s_infer.log
|
||||
python eval_yolov6.py --model ./models/yolov6s_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov6s_infer.log
|
||||
python eval_yolov7.py --model ./models/yolov7_infer --image None --device $TARGET_DEVICE 2>&1 | tee ./log/yolov7_infer.log
|
||||
|
2
tests/acc_eval/ppocr/eval_ppocrv2.py
Normal file → Executable file
2
tests/acc_eval/ppocr/eval_ppocrv2.py
Normal file → Executable file
@@ -103,7 +103,7 @@ rec_model = fd.vision.ocr.Recognizer(
|
||||
runtime_option=runtime_option)
|
||||
|
||||
# PPOCR的Rec模型开启静态推理, 其他硬件不需要的话请注释掉.
|
||||
rec_model.preprocessor.static_shape = True
|
||||
rec_model.preprocessor.static_shape_infer = True
|
||||
|
||||
# 创建PP-OCR,串联3个模型,其中cls_model可选,如无需求,可设置为None
|
||||
ppocr_v2 = fd.vision.ocr.PPOCRv2(
|
||||
|
2
tests/acc_eval/ppocr/eval_ppocrv3.py
Normal file → Executable file
2
tests/acc_eval/ppocr/eval_ppocrv3.py
Normal file → Executable file
@@ -103,7 +103,7 @@ rec_model = fd.vision.ocr.Recognizer(
|
||||
runtime_option=runtime_option)
|
||||
|
||||
# PPOCR的Rec模型开启静态推理, 其他硬件不需要的话请注释掉.
|
||||
rec_model.preprocessor.static_shape = True
|
||||
rec_model.preprocessor.static_shape_infer = True
|
||||
|
||||
# 创建PP-OCR,串联3个模型,其中cls_model可选,如无需求,可设置为None
|
||||
ppocr_v3 = fd.vision.ocr.PPOCRv3(
|
||||
|
2
tests/acc_eval/segmentation/eval.py
Normal file → Executable file
2
tests/acc_eval/segmentation/eval.py
Normal file → Executable file
@@ -54,5 +54,5 @@ model = fd.vision.segmentation.PaddleSegModel(
|
||||
model_file, params_file, config_file, runtime_option=runtime_option)
|
||||
|
||||
res = fd.vision.evaluation.eval_segmentation(
|
||||
model=model, data_dir="../dataset/FD_dataset/data/cityscapes")
|
||||
model=model, data_dir="../dataset/cityscapes")
|
||||
print(res)
|
||||
|
Reference in New Issue
Block a user