From f91a6126eee74f5cf918b6fe57455160529926ea Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Tue, 29 Nov 2022 08:26:27 +0000 Subject: [PATCH 01/18] Update keypointdetection result docs --- docs/api/vision_results/keypointdetection_result.md | 8 ++------ docs/api_docs/python/vision_results_cn.md | 3 +-- docs/api_docs/python/vision_results_en.md | 3 +-- tests/models/test_ppyoloe.py | 10 +++++++--- 4 files changed, 11 insertions(+), 13 deletions(-) diff --git a/docs/api/vision_results/keypointdetection_result.md b/docs/api/vision_results/keypointdetection_result.md index a47057ad8..645880d23 100644 --- a/docs/api/vision_results/keypointdetection_result.md +++ b/docs/api/vision_results/keypointdetection_result.md @@ -16,16 +16,13 @@ struct KeyPointDetectionResult { }; ``` -- **keypoints**: 成员变量,表示识别到的目标行为的关键点坐标。`keypoints.size()= N * J * 2`, +- **keypoints**: 成员变量,表示识别到的目标行为的关键点坐标。`keypoints.size()= N * J`, - `N`:图片中的目标数量 - `J`:num_joints(一个目标的关键点数量) - - `3`:坐标信息[x, y] - **scores**: 成员变量,表示识别到的目标行为的关键点坐标的置信度。`scores.size()= N * J` - `N`:图片中的目标数量 - `J`:num_joints(一个目标的关键点数量) - **num_joints**: 成员变量,一个目标的关键点数量 - -- **num_joints**: 成员变量,一个目标的关键点数量 - **Clear()**: 成员函数,用于清除结构体中存储的结果 - **Str()**: 成员函数,将结构体中的信息以字符串形式输出(用于Debug) @@ -34,10 +31,9 @@ struct KeyPointDetectionResult { `fastdeploy.vision.KeyPointDetectionResult` - **keypoints**(list of list(float)): 成员变量,表示识别到的目标行为的关键点坐标。 - `keypoints.size()= N * J * 2` + `keypoints.size()= N * J` `N`:图片中的目标数量 `J`:num_joints(关键点数量) - `3`:坐标信息[x, y, conf] - **scores**(list of float): 成员变量,表示识别到的目标行为的关键点坐标的置信度。 `scores.size()= N * J` `N`:图片中的目标数量 diff --git a/docs/api_docs/python/vision_results_cn.md b/docs/api_docs/python/vision_results_cn.md index 19b2a6662..e9dc00621 100644 --- a/docs/api_docs/python/vision_results_cn.md +++ b/docs/api_docs/python/vision_results_cn.md @@ -46,10 +46,9 @@ API:`fastdeploy.vision.FaceDetectionResult` , 该结果返回: KeyPointDetectionResult 代码定义在`fastdeploy/vision/common/result.h`中,用于表明图像中目标行为的各个关键点坐标和置信度。 API:`fastdeploy.vision.KeyPointDetectionResult` , 该结果返回: -- **keypoints**(list of list(float)): 成员变量,表示识别到的目标行为的关键点坐标。`keypoints.size()= N * J * 2`, +- **keypoints**(list of list(float)): 成员变量,表示识别到的目标行为的关键点坐标。`keypoints.size()= N * J`, - `N`:图片中的目标数量 - `J`:num_joints(一个目标的关键点数量) - - `3`:坐标信息[x, y] - **scores**(list of float): 成员变量,表示识别到的目标行为的关键点坐标的置信度。`scores.size()= N * J` - `N`:图片中的目标数量 - `J`:num_joints(一个目标的关键点数量) diff --git a/docs/api_docs/python/vision_results_en.md b/docs/api_docs/python/vision_results_en.md index dc02d0b4f..f297331d1 100644 --- a/docs/api_docs/python/vision_results_en.md +++ b/docs/api_docs/python/vision_results_en.md @@ -49,10 +49,9 @@ API: `fastdeploy.vision.FaceDetectionResult`, The FaceDetectionResult will retur The KeyPointDetectionResult code is defined in `fastdeploy/vision/common/result.h` and is used to indicate the coordinates and confidence of each keypoint of the target behavior in the image. API:`fastdeploy.vision.KeyPointDetectionResult`, The KeyPointDetectionResult will return: -- **keypoints**(list of list(float)): Member variable, representing the key point coordinates of the identified target behavior. `keypoints.size()= N * J * 2`, +- **keypoints**(list of list(float)): Member variable, representing the key point coordinates of the identified target behavior. `keypoints.size()= N * J`, - `N`: number of objects in the picture - `J`: num_joints(number of keypoints for a target) - - `3`: 坐标信息[x, y] - **scores**(list of float): Member variable, representing the confidence of the keypoint coordinates of the recognized target behavior. `scores.size()= N * J` - `N`: number of objects in the picture - `J`: num_joints(number of keypoints for a target) diff --git a/tests/models/test_ppyoloe.py b/tests/models/test_ppyoloe.py index 08b19bf91..a479f90e1 100755 --- a/tests/models/test_ppyoloe.py +++ b/tests/models/test_ppyoloe.py @@ -60,6 +60,7 @@ def test_detection_ppyoloe(): assert diff_label_ids[scores > score_threshold].max( ) < 1e-04, "There's diff in label_ids." + def test_detection_ppyoloe1(): model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz" input_url1 = "https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg" @@ -75,15 +76,18 @@ def test_detection_ppyoloe1(): preprocessor = fd.vision.detection.PaddleDetPreprocessor(config_file) postprocessor = fd.vision.detection.PaddleDetPostprocessor() - + rc.test_option.set_model_path(model_file, params_file) - runtime = fd.Runtime(rc.test_option); + runtime = fd.Runtime(rc.test_option) # compare diff im1 = cv2.imread("./resources/000000014439.jpg") for i in range(2): input_tensors = preprocessor.run([im1]) - output_tensors = runtime.infer({"image": input_tensors[0], "scale_factor": input_tensors[1]}) + output_tensors = runtime.infer({ + "image": input_tensors[0], + "scale_factor": input_tensors[1] + }) results = postprocessor.run(output_tensors) result = results[0] with open("resources/ppyoloe_baseline.pkl", "rb") as f: From 2f6f6977d76fff2176e46c796ad3551c5b911c7b Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Sun, 11 Dec 2022 14:10:16 +0000 Subject: [PATCH 02/18] Update im.copy() to im in examples --- examples/vision/classification/resnet/python/infer.py | 2 +- examples/vision/classification/yolov5cls/python/infer.py | 2 +- examples/vision/detection/nanodet_plus/python/infer.py | 2 +- examples/vision/detection/scaledyolov4/python/infer.py | 2 +- examples/vision/detection/yolor/python/infer.py | 2 +- examples/vision/detection/yolov5/python/infer.py | 2 +- examples/vision/detection/yolov5/quantize/python/infer.py | 2 +- examples/vision/detection/yolov5lite/python/infer.py | 2 +- examples/vision/detection/yolov6/python/infer.py | 2 +- examples/vision/detection/yolov6/quantize/python/infer.py | 2 +- examples/vision/detection/yolov7/python/infer.py | 2 +- examples/vision/detection/yolov7/quantize/python/infer.py | 2 +- examples/vision/detection/yolov7end2end_ort/python/infer.py | 2 +- examples/vision/detection/yolov7end2end_trt/python/infer.py | 2 +- examples/vision/detection/yolox/python/infer.py | 2 +- examples/vision/facealign/face_landmark_1000/python/infer.py | 2 +- examples/vision/facealign/pfld/python/infer.py | 2 +- examples/vision/facealign/pipnet/python/infer.py | 2 +- examples/vision/facedet/retinaface/python/infer.py | 2 +- examples/vision/facedet/scrfd/python/infer.py | 2 +- examples/vision/facedet/scrfd/rknpu2/python/infer.py | 2 +- examples/vision/facedet/ultraface/python/infer.py | 2 +- examples/vision/facedet/yolov5face/python/infer.py | 2 +- examples/vision/headpose/fsanet/python/infer.py | 2 +- examples/vision/matting/modnet/python/infer.py | 2 +- examples/vision/matting/ppmatting/python/infer.py | 2 +- examples/vision/matting/rvm/python/infer.py | 4 ++-- examples/vision/segmentation/paddleseg/python/infer.py | 2 +- .../vision/segmentation/paddleseg/quantize/python/infer.py | 2 +- examples/vision/segmentation/paddleseg/rknpu2/python/infer.py | 2 +- 30 files changed, 31 insertions(+), 31 deletions(-) diff --git a/examples/vision/classification/resnet/python/infer.py b/examples/vision/classification/resnet/python/infer.py index b8b268f3a..ba22304fc 100644 --- a/examples/vision/classification/resnet/python/infer.py +++ b/examples/vision/classification/resnet/python/infer.py @@ -46,5 +46,5 @@ model = fd.vision.classification.ResNet( args.model, runtime_option=runtime_option) # 预测图片分类结果 im = cv2.imread(args.image) -result = model.predict(im.copy(), args.topk) +result = model.predict(im, args.topk) print(result) diff --git a/examples/vision/classification/yolov5cls/python/infer.py b/examples/vision/classification/yolov5cls/python/infer.py index 576db32f2..55974a764 100644 --- a/examples/vision/classification/yolov5cls/python/infer.py +++ b/examples/vision/classification/yolov5cls/python/infer.py @@ -47,5 +47,5 @@ model = fd.vision.classification.YOLOv5Cls( # 预测图片分类结果 im = cv2.imread(args.image) -result = model.predict(im.copy(), args.topk) +result = model.predict(im, args.topk) print(result) diff --git a/examples/vision/detection/nanodet_plus/python/infer.py b/examples/vision/detection/nanodet_plus/python/infer.py index 9e7bf184f..a60f3a3b4 100644 --- a/examples/vision/detection/nanodet_plus/python/infer.py +++ b/examples/vision/detection/nanodet_plus/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/scaledyolov4/python/infer.py b/examples/vision/detection/scaledyolov4/python/infer.py index 9d990d77b..cd23e84e3 100644 --- a/examples/vision/detection/scaledyolov4/python/infer.py +++ b/examples/vision/detection/scaledyolov4/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolor/python/infer.py b/examples/vision/detection/yolor/python/infer.py index d5ab3dd7f..4111fa0ae 100644 --- a/examples/vision/detection/yolor/python/infer.py +++ b/examples/vision/detection/yolor/python/infer.py @@ -54,7 +54,7 @@ else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov5/python/infer.py b/examples/vision/detection/yolov5/python/infer.py index fdded06c9..462740e9c 100644 --- a/examples/vision/detection/yolov5/python/infer.py +++ b/examples/vision/detection/yolov5/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov5/quantize/python/infer.py b/examples/vision/detection/yolov5/quantize/python/infer.py index da502fe93..996bc5419 100644 --- a/examples/vision/detection/yolov5/quantize/python/infer.py +++ b/examples/vision/detection/yolov5/quantize/python/infer.py @@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv5( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov5lite/python/infer.py b/examples/vision/detection/yolov5lite/python/infer.py index b1ec69046..2242a33a5 100644 --- a/examples/vision/detection/yolov5lite/python/infer.py +++ b/examples/vision/detection/yolov5lite/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov6/python/infer.py b/examples/vision/detection/yolov6/python/infer.py index 0a0a163fb..47bf3e689 100644 --- a/examples/vision/detection/yolov6/python/infer.py +++ b/examples/vision/detection/yolov6/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov6/quantize/python/infer.py b/examples/vision/detection/yolov6/quantize/python/infer.py index da9fa3d97..77f46d4c2 100644 --- a/examples/vision/detection/yolov6/quantize/python/infer.py +++ b/examples/vision/detection/yolov6/quantize/python/infer.py @@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv6( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov7/python/infer.py b/examples/vision/detection/yolov7/python/infer.py index b0ece7e47..468b47dc5 100644 --- a/examples/vision/detection/yolov7/python/infer.py +++ b/examples/vision/detection/yolov7/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) # 预测结果可视化 vis_im = fd.vision.vis_detection(im, result) diff --git a/examples/vision/detection/yolov7/quantize/python/infer.py b/examples/vision/detection/yolov7/quantize/python/infer.py index de84e4061..d07834c7c 100644 --- a/examples/vision/detection/yolov7/quantize/python/infer.py +++ b/examples/vision/detection/yolov7/quantize/python/infer.py @@ -71,7 +71,7 @@ model = fd.vision.detection.YOLOv7( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov7end2end_ort/python/infer.py b/examples/vision/detection/yolov7end2end_ort/python/infer.py index 2b812b71a..914facb59 100644 --- a/examples/vision/detection/yolov7end2end_ort/python/infer.py +++ b/examples/vision/detection/yolov7end2end_ort/python/infer.py @@ -44,7 +44,7 @@ model = fd.vision.detection.YOLOv7End2EndORT( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolov7end2end_trt/python/infer.py b/examples/vision/detection/yolov7end2end_trt/python/infer.py index d179de06f..b1ad8ea0c 100644 --- a/examples/vision/detection/yolov7end2end_trt/python/infer.py +++ b/examples/vision/detection/yolov7end2end_trt/python/infer.py @@ -44,7 +44,7 @@ model = fd.vision.detection.YOLOv7End2EndTRT( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 diff --git a/examples/vision/detection/yolox/python/infer.py b/examples/vision/detection/yolox/python/infer.py index 69203010b..c5e71754a 100644 --- a/examples/vision/detection/yolox/python/infer.py +++ b/examples/vision/detection/yolox/python/infer.py @@ -52,7 +52,7 @@ if args.image is None: else: image = args.image im = cv2.imread(image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 vis_im = fd.vision.vis_detection(im, result) diff --git a/examples/vision/facealign/face_landmark_1000/python/infer.py b/examples/vision/facealign/face_landmark_1000/python/infer.py index bbb3a19c8..9eaedfb63 100644 --- a/examples/vision/facealign/face_landmark_1000/python/infer.py +++ b/examples/vision/facealign/face_landmark_1000/python/infer.py @@ -82,7 +82,7 @@ model = fd.vision.facealign.FaceLandmark1000( # for image im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_face_alignment(im, result) diff --git a/examples/vision/facealign/pfld/python/infer.py b/examples/vision/facealign/pfld/python/infer.py index 622fbf822..0a695e8c6 100755 --- a/examples/vision/facealign/pfld/python/infer.py +++ b/examples/vision/facealign/pfld/python/infer.py @@ -80,7 +80,7 @@ model = fd.vision.facealign.PFLD(args.model, runtime_option=runtime_option) # for image im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_face_alignment(im, result) diff --git a/examples/vision/facealign/pipnet/python/infer.py b/examples/vision/facealign/pipnet/python/infer.py index 628b86f79..46f9c2d90 100644 --- a/examples/vision/facealign/pipnet/python/infer.py +++ b/examples/vision/facealign/pipnet/python/infer.py @@ -85,7 +85,7 @@ model = fd.vision.facealign.PIPNet(args.model, runtime_option=runtime_option) model.num_landmarks = args.num_landmarks # for image im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_face_alignment(im, result) diff --git a/examples/vision/facedet/retinaface/python/infer.py b/examples/vision/facedet/retinaface/python/infer.py index 3b0152b1c..6b7f63e5e 100644 --- a/examples/vision/facedet/retinaface/python/infer.py +++ b/examples/vision/facedet/retinaface/python/infer.py @@ -43,7 +43,7 @@ model = fd.vision.facedet.RetinaFace(args.model, runtime_option=runtime_option) # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 vis_im = fd.vision.vis_face_detection(im, result) diff --git a/examples/vision/facedet/scrfd/python/infer.py b/examples/vision/facedet/scrfd/python/infer.py index a99e66385..af141e011 100644 --- a/examples/vision/facedet/scrfd/python/infer.py +++ b/examples/vision/facedet/scrfd/python/infer.py @@ -43,7 +43,7 @@ model = fd.vision.facedet.SCRFD(args.model, runtime_option=runtime_option) # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 vis_im = fd.vision.vis_face_detection(im, result) diff --git a/examples/vision/facedet/scrfd/rknpu2/python/infer.py b/examples/vision/facedet/scrfd/rknpu2/python/infer.py index 3b3fc9d83..77e57a0f1 100644 --- a/examples/vision/facedet/scrfd/rknpu2/python/infer.py +++ b/examples/vision/facedet/scrfd/rknpu2/python/infer.py @@ -49,7 +49,7 @@ model.disable_normalize_and_permute() # 预测图片分割结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 diff --git a/examples/vision/facedet/ultraface/python/infer.py b/examples/vision/facedet/ultraface/python/infer.py index 5399110b9..2087ce5bf 100644 --- a/examples/vision/facedet/ultraface/python/infer.py +++ b/examples/vision/facedet/ultraface/python/infer.py @@ -43,7 +43,7 @@ model = fd.vision.facedet.UltraFace(args.model, runtime_option=runtime_option) # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 vis_im = fd.vision.vis_face_detection(im, result) diff --git a/examples/vision/facedet/yolov5face/python/infer.py b/examples/vision/facedet/yolov5face/python/infer.py index a9f044682..ca907afb4 100644 --- a/examples/vision/facedet/yolov5face/python/infer.py +++ b/examples/vision/facedet/yolov5face/python/infer.py @@ -43,7 +43,7 @@ model = fd.vision.facedet.YOLOv5Face(args.model, runtime_option=runtime_option) # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 预测结果可视化 vis_im = fd.vision.vis_face_detection(im, result) diff --git a/examples/vision/headpose/fsanet/python/infer.py b/examples/vision/headpose/fsanet/python/infer.py index 866ce6d5c..488e35153 100644 --- a/examples/vision/headpose/fsanet/python/infer.py +++ b/examples/vision/headpose/fsanet/python/infer.py @@ -80,7 +80,7 @@ model = fd.vision.headpose.FSANet(args.model, runtime_option=runtime_option) # for image im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_headpose(im, result) diff --git a/examples/vision/matting/modnet/python/infer.py b/examples/vision/matting/modnet/python/infer.py index 408ba2340..abb9be037 100644 --- a/examples/vision/matting/modnet/python/infer.py +++ b/examples/vision/matting/modnet/python/infer.py @@ -52,7 +52,7 @@ model.size = (256, 256) # 预测图片抠图结果 im = cv2.imread(args.image) bg = cv2.imread(args.bg) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_matting_alpha(im, result) diff --git a/examples/vision/matting/ppmatting/python/infer.py b/examples/vision/matting/ppmatting/python/infer.py index 61031e1b6..32a81eca7 100644 --- a/examples/vision/matting/ppmatting/python/infer.py +++ b/examples/vision/matting/ppmatting/python/infer.py @@ -56,7 +56,7 @@ model = fd.vision.matting.PPMatting( # 预测图片抠图结果 im = cv2.imread(args.image) bg = cv2.imread(args.bg) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_matting(im, result) diff --git a/examples/vision/matting/rvm/python/infer.py b/examples/vision/matting/rvm/python/infer.py index 11951b00f..fcde64fb2 100755 --- a/examples/vision/matting/rvm/python/infer.py +++ b/examples/vision/matting/rvm/python/infer.py @@ -73,7 +73,7 @@ if args.video is not None: # for video cap = cv2.VideoCapture(args.video) # Define the codec and create VideoWriter object - fourcc = cv2.VideoWriter_fourcc(*'mp4v') + fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') composition = cv2.VideoWriter(output_composition, fourcc, 20.0, (1080, 1920)) alpha = cv2.VideoWriter(output_alpha, fourcc, 20.0, (1080, 1920)) @@ -100,7 +100,7 @@ if args.video is not None: if args.image is not None: # for image im = cv2.imread(args.image) - result = model.predict(im.copy()) + result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_matting(im, result) diff --git a/examples/vision/segmentation/paddleseg/python/infer.py b/examples/vision/segmentation/paddleseg/python/infer.py index 866e32bfb..9df7665a2 100644 --- a/examples/vision/segmentation/paddleseg/python/infer.py +++ b/examples/vision/segmentation/paddleseg/python/infer.py @@ -49,7 +49,7 @@ model = fd.vision.segmentation.PaddleSegModel( # 预测图片分割结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 diff --git a/examples/vision/segmentation/paddleseg/quantize/python/infer.py b/examples/vision/segmentation/paddleseg/quantize/python/infer.py index f95f04d17..85a875c1e 100644 --- a/examples/vision/segmentation/paddleseg/quantize/python/infer.py +++ b/examples/vision/segmentation/paddleseg/quantize/python/infer.py @@ -72,5 +72,5 @@ model = fd.vision.segmentation.PaddleSegModel( # 预测图片检测结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) diff --git a/examples/vision/segmentation/paddleseg/rknpu2/python/infer.py b/examples/vision/segmentation/paddleseg/rknpu2/python/infer.py index 8841132a9..d7239eb42 100644 --- a/examples/vision/segmentation/paddleseg/rknpu2/python/infer.py +++ b/examples/vision/segmentation/paddleseg/rknpu2/python/infer.py @@ -53,7 +53,7 @@ model.disable_normalize_and_permute() # 预测图片分割结果 im = cv2.imread(args.image) -result = model.predict(im.copy()) +result = model.predict(im) print(result) # 可视化结果 From bfc9ac9ee1f0810fdbeb50761a96aa6a21b60285 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Mon, 12 Dec 2022 04:13:40 +0000 Subject: [PATCH 03/18] Update new Api, fastdeploy::vision::Visualize to fastdeploy::vision --- examples/vision/detection/nanodet_plus/cpp/infer.cc | 6 +++--- examples/vision/detection/scaledyolov4/cpp/infer.cc | 6 +++--- examples/vision/detection/yolor/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov5/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov5/quantize/cpp/infer.cc | 2 +- examples/vision/detection/yolov5lite/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov6/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov6/quantize/cpp/infer.cc | 2 +- examples/vision/detection/yolov7/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov7/quantize/cpp/infer.cc | 2 +- examples/vision/detection/yolov7end2end_ort/cpp/infer.cc | 6 +++--- examples/vision/detection/yolov7end2end_trt/cpp/infer.cc | 6 +++--- examples/vision/detection/yolox/cpp/infer.cc | 6 +++--- examples/vision/facedet/retinaface/cpp/infer.cc | 6 +++--- examples/vision/facedet/scrfd/cpp/infer.cc | 6 +++--- examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc | 2 +- examples/vision/facedet/ultraface/cpp/infer.cc | 6 +++--- examples/vision/facedet/yolov5face/cpp/infer.cc | 6 +++--- examples/vision/matting/modnet/cpp/infer.cc | 6 +++--- examples/vision/matting/ppmatting/cpp/infer.cc | 6 +++--- 20 files changed, 52 insertions(+), 52 deletions(-) diff --git a/examples/vision/detection/nanodet_plus/cpp/infer.cc b/examples/vision/detection/nanodet_plus/cpp/infer.cc index 8443639cc..8c9f3338f 100644 --- a/examples/vision/detection/nanodet_plus/cpp/infer.cc +++ b/examples/vision/detection/nanodet_plus/cpp/infer.cc @@ -30,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { return; } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -82,7 +82,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/scaledyolov4/cpp/infer.cc b/examples/vision/detection/scaledyolov4/cpp/infer.cc index 7d912b223..040823a1d 100644 --- a/examples/vision/detection/scaledyolov4/cpp/infer.cc +++ b/examples/vision/detection/scaledyolov4/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -56,7 +56,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -83,7 +83,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolor/cpp/infer.cc b/examples/vision/detection/yolor/cpp/infer.cc index 0fe8913d4..105855a02 100644 --- a/examples/vision/detection/yolor/cpp/infer.cc +++ b/examples/vision/detection/yolor/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5/cpp/infer.cc b/examples/vision/detection/yolov5/cpp/infer.cc index a7ac1fe81..dea29450b 100644 --- a/examples/vision/detection/yolov5/cpp/infer.cc +++ b/examples/vision/detection/yolov5/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5/quantize/cpp/infer.cc b/examples/vision/detection/yolov5/quantize/cpp/infer.cc index e429b2aad..690c19ddf 100644 --- a/examples/vision/detection/yolov5/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov5/quantize/cpp/infer.cc @@ -39,7 +39,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5lite/cpp/infer.cc b/examples/vision/detection/yolov5lite/cpp/infer.cc index ac32bca93..0d9c58201 100644 --- a/examples/vision/detection/yolov5lite/cpp/infer.cc +++ b/examples/vision/detection/yolov5lite/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -56,7 +56,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -83,7 +83,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov6/cpp/infer.cc b/examples/vision/detection/yolov6/cpp/infer.cc index 72b2e7bed..fd00796f1 100644 --- a/examples/vision/detection/yolov6/cpp/infer.cc +++ b/examples/vision/detection/yolov6/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov6/quantize/cpp/infer.cc b/examples/vision/detection/yolov6/quantize/cpp/infer.cc index b40200962..57754ca30 100644 --- a/examples/vision/detection/yolov6/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov6/quantize/cpp/infer.cc @@ -39,7 +39,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7/cpp/infer.cc b/examples/vision/detection/yolov7/cpp/infer.cc index cf79a16ad..803a7227a 100644 --- a/examples/vision/detection/yolov7/cpp/infer.cc +++ b/examples/vision/detection/yolov7/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7/quantize/cpp/infer.cc b/examples/vision/detection/yolov7/quantize/cpp/infer.cc index 0eda80b6c..6033e3454 100644 --- a/examples/vision/detection/yolov7/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov7/quantize/cpp/infer.cc @@ -39,7 +39,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc index a0e70544a..72ffe1a91 100644 --- a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc +++ b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -56,7 +56,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -83,7 +83,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc index 1c7a17d37..20d963edf 100644 --- a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc +++ b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -56,7 +56,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -83,7 +83,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolox/cpp/infer.cc b/examples/vision/detection/yolox/cpp/infer.cc index 2eeaccbf8..bed65f982 100644 --- a/examples/vision/detection/yolox/cpp/infer.cc +++ b/examples/vision/detection/yolox/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/retinaface/cpp/infer.cc b/examples/vision/facedet/retinaface/cpp/infer.cc index a1fd27b6e..ceacce746 100644 --- a/examples/vision/facedet/retinaface/cpp/infer.cc +++ b/examples/vision/facedet/retinaface/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/scrfd/cpp/infer.cc b/examples/vision/facedet/scrfd/cpp/infer.cc index c804218ee..28823242a 100644 --- a/examples/vision/facedet/scrfd/cpp/infer.cc +++ b/examples/vision/facedet/scrfd/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc b/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc index a01f1b184..cd06004fe 100644 --- a/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc +++ b/examples/vision/facedet/scrfd/rknpu2/cpp/infer.cc @@ -73,7 +73,7 @@ void InferScrfd(const std::string& device) { (dur / CLOCKS_PER_SEC)); std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } \ No newline at end of file diff --git a/examples/vision/facedet/ultraface/cpp/infer.cc b/examples/vision/facedet/ultraface/cpp/infer.cc index b45bb3b0b..6327c63d6 100644 --- a/examples/vision/facedet/ultraface/cpp/infer.cc +++ b/examples/vision/facedet/ultraface/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/yolov5face/cpp/infer.cc b/examples/vision/facedet/yolov5face/cpp/infer.cc index 418834e1e..58e15c8ef 100644 --- a/examples/vision/facedet/yolov5face/cpp/infer.cc +++ b/examples/vision/facedet/yolov5face/cpp/infer.cc @@ -31,7 +31,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -55,7 +55,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -81,7 +81,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::Visualize::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/matting/modnet/cpp/infer.cc b/examples/vision/matting/modnet/cpp/infer.cc index fe1ebc910..d11c9717f 100644 --- a/examples/vision/matting/modnet/cpp/infer.cc +++ b/examples/vision/matting/modnet/cpp/infer.cc @@ -34,7 +34,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file, auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -65,7 +65,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file, auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -97,7 +97,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file, auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " diff --git a/examples/vision/matting/ppmatting/cpp/infer.cc b/examples/vision/matting/ppmatting/cpp/infer.cc index 304e4239a..9c917a77f 100644 --- a/examples/vision/matting/ppmatting/cpp/infer.cc +++ b/examples/vision/matting/ppmatting/cpp/infer.cc @@ -44,7 +44,7 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file, } auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -78,7 +78,7 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file, } auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -113,7 +113,7 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file, } auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); auto vis_im_with_bg = - fastdeploy::vision::Visualize::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " From 37359c65f3af287f97462ee8853786304fb52496 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Mon, 12 Dec 2022 10:59:15 +0000 Subject: [PATCH 04/18] Update SwapBackgroundSegmentation && SwapBackgroundMatting to SwapBackground --- .../vision/detection/nanodet_plus/cpp/infer.cc | 9 +++------ .../vision/detection/scaledyolov4/cpp/infer.cc | 9 +++------ examples/vision/detection/yolor/cpp/infer.cc | 9 +++------ examples/vision/detection/yolov5/cpp/infer.cc | 9 +++------ .../vision/detection/yolov5/quantize/cpp/infer.cc | 3 +-- examples/vision/detection/yolov5lite/cpp/infer.cc | 9 +++------ examples/vision/detection/yolov6/cpp/infer.cc | 9 +++------ .../vision/detection/yolov6/quantize/cpp/infer.cc | 3 +-- examples/vision/detection/yolov7/cpp/infer.cc | 9 +++------ .../vision/detection/yolov7/quantize/cpp/infer.cc | 3 +-- .../detection/yolov7end2end_ort/cpp/infer.cc | 9 +++------ .../detection/yolov7end2end_trt/cpp/infer.cc | 11 ++++------- examples/vision/detection/yolox/cpp/infer.cc | 9 +++------ examples/vision/facedet/retinaface/cpp/infer.cc | 9 +++------ examples/vision/facedet/scrfd/cpp/infer.cc | 9 +++------ examples/vision/facedet/ultraface/cpp/infer.cc | 9 +++------ examples/vision/facedet/yolov5face/cpp/infer.cc | 9 +++------ examples/vision/matting/modnet/cpp/infer.cc | 15 ++++++--------- examples/vision/matting/modnet/python/infer.py | 2 +- examples/vision/matting/ppmatting/cpp/infer.cc | 15 ++++++--------- examples/vision/matting/ppmatting/python/infer.py | 2 +- examples/vision/matting/rvm/python/infer.py | 4 ++-- 22 files changed, 62 insertions(+), 113 deletions(-) diff --git a/examples/vision/detection/nanodet_plus/cpp/infer.cc b/examples/vision/detection/nanodet_plus/cpp/infer.cc index 8c9f3338f..ec0bbee98 100644 --- a/examples/vision/detection/nanodet_plus/cpp/infer.cc +++ b/examples/vision/detection/nanodet_plus/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -30,7 +29,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { return; } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -73,7 +71,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -82,7 +79,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/scaledyolov4/cpp/infer.cc b/examples/vision/detection/scaledyolov4/cpp/infer.cc index 040823a1d..683156cc9 100644 --- a/examples/vision/detection/scaledyolov4/cpp/infer.cc +++ b/examples/vision/detection/scaledyolov4/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolor/cpp/infer.cc b/examples/vision/detection/yolor/cpp/infer.cc index 105855a02..bacc18b40 100644 --- a/examples/vision/detection/yolor/cpp/infer.cc +++ b/examples/vision/detection/yolor/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5/cpp/infer.cc b/examples/vision/detection/yolov5/cpp/infer.cc index dea29450b..1c3907918 100644 --- a/examples/vision/detection/yolov5/cpp/infer.cc +++ b/examples/vision/detection/yolov5/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5/quantize/cpp/infer.cc b/examples/vision/detection/yolov5/quantize/cpp/infer.cc index 690c19ddf..1addf1507 100644 --- a/examples/vision/detection/yolov5/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov5/quantize/cpp/infer.cc @@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, assert(model.Initialized()); auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov5lite/cpp/infer.cc b/examples/vision/detection/yolov5lite/cpp/infer.cc index 0d9c58201..0a3f7b81b 100644 --- a/examples/vision/detection/yolov5lite/cpp/infer.cc +++ b/examples/vision/detection/yolov5lite/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov6/cpp/infer.cc b/examples/vision/detection/yolov6/cpp/infer.cc index fd00796f1..dbca64f40 100644 --- a/examples/vision/detection/yolov6/cpp/infer.cc +++ b/examples/vision/detection/yolov6/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov6/quantize/cpp/infer.cc b/examples/vision/detection/yolov6/quantize/cpp/infer.cc index 57754ca30..adda977f3 100644 --- a/examples/vision/detection/yolov6/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov6/quantize/cpp/infer.cc @@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, assert(model.Initialized()); auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7/cpp/infer.cc b/examples/vision/detection/yolov7/cpp/infer.cc index 803a7227a..5fd848b8e 100644 --- a/examples/vision/detection/yolov7/cpp/infer.cc +++ b/examples/vision/detection/yolov7/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7/quantize/cpp/infer.cc b/examples/vision/detection/yolov7/quantize/cpp/infer.cc index 6033e3454..9ae42b582 100644 --- a/examples/vision/detection/yolov7/quantize/cpp/infer.cc +++ b/examples/vision/detection/yolov7/quantize/cpp/infer.cc @@ -29,7 +29,6 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, assert(model.Initialized()); auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -39,7 +38,7 @@ void InitAndInfer(const std::string& model_dir, const std::string& image_file, std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc index 72ffe1a91..5c9df2816 100644 --- a/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc +++ b/examples/vision/detection/yolov7end2end_ort/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc index 20d963edf..4f7a2f791 100644 --- a/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc +++ b/examples/vision/detection/yolov7end2end_trt/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -47,7 +46,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -56,7 +54,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -74,7 +72,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -83,7 +80,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -107,4 +104,4 @@ int main(int argc, char* argv[]) { TrtInfer(argv[1], argv[2]); } return 0; -} \ No newline at end of file +} diff --git a/examples/vision/detection/yolox/cpp/infer.cc b/examples/vision/detection/yolox/cpp/infer.cc index bed65f982..836ab7e63 100644 --- a/examples/vision/detection/yolox/cpp/infer.cc +++ b/examples/vision/detection/yolox/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::DetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/retinaface/cpp/infer.cc b/examples/vision/facedet/retinaface/cpp/infer.cc index ceacce746..f125e4ba7 100644 --- a/examples/vision/facedet/retinaface/cpp/infer.cc +++ b/examples/vision/facedet/retinaface/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/scrfd/cpp/infer.cc b/examples/vision/facedet/scrfd/cpp/infer.cc index 28823242a..c09403741 100644 --- a/examples/vision/facedet/scrfd/cpp/infer.cc +++ b/examples/vision/facedet/scrfd/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/ultraface/cpp/infer.cc b/examples/vision/facedet/ultraface/cpp/infer.cc index 6327c63d6..b50674e1d 100644 --- a/examples/vision/facedet/ultraface/cpp/infer.cc +++ b/examples/vision/facedet/ultraface/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/facedet/yolov5face/cpp/infer.cc b/examples/vision/facedet/yolov5face/cpp/infer.cc index 58e15c8ef..ecdcfcd0c 100644 --- a/examples/vision/facedet/yolov5face/cpp/infer.cc +++ b/examples/vision/facedet/yolov5face/cpp/infer.cc @@ -22,7 +22,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -31,7 +30,7 @@ void CpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -46,7 +45,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -55,7 +53,7 @@ void GpuInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } @@ -72,7 +70,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } auto im = cv::imread(image_file); - auto im_bak = im.clone(); fastdeploy::vision::FaceDetectionResult res; if (!model.Predict(&im, &res)) { @@ -81,7 +78,7 @@ void TrtInfer(const std::string& model_file, const std::string& image_file) { } std::cout << res.Str() << std::endl; - auto vis_im = fastdeploy::vision::VisFaceDetection(im_bak, res); + auto vis_im = fastdeploy::vision::VisFaceDetection(im, res); cv::imwrite("vis_result.jpg", vis_im); std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; } diff --git a/examples/vision/matting/modnet/cpp/infer.cc b/examples/vision/matting/modnet/cpp/infer.cc index d11c9717f..aa280eddd 100644 --- a/examples/vision/matting/modnet/cpp/infer.cc +++ b/examples/vision/matting/modnet/cpp/infer.cc @@ -23,7 +23,6 @@ void CpuInfer(const std::string& model_file, const std::string& image_file, } model.size = {256, 256}; auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; @@ -32,9 +31,9 @@ void CpuInfer(const std::string& model_file, const std::string& image_file, return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -54,7 +53,6 @@ void GpuInfer(const std::string& model_file, const std::string& image_file, model.size = {256, 256}; auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; @@ -63,9 +61,9 @@ void GpuInfer(const std::string& model_file, const std::string& image_file, return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -86,7 +84,6 @@ void TrtInfer(const std::string& model_file, const std::string& image_file, } model.size = {256, 256}; auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; @@ -95,9 +92,9 @@ void TrtInfer(const std::string& model_file, const std::string& image_file, return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " diff --git a/examples/vision/matting/modnet/python/infer.py b/examples/vision/matting/modnet/python/infer.py index abb9be037..37c749010 100644 --- a/examples/vision/matting/modnet/python/infer.py +++ b/examples/vision/matting/modnet/python/infer.py @@ -56,7 +56,7 @@ result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_matting_alpha(im, result) -vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result) +vis_im_with_bg = fd.vision.swap_background(im, bg, result) cv2.imwrite("visualized_result_fg.jpg", vis_im) cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg) print( diff --git a/examples/vision/matting/ppmatting/cpp/infer.cc b/examples/vision/matting/ppmatting/cpp/infer.cc index 9c917a77f..2acb2a8ca 100644 --- a/examples/vision/matting/ppmatting/cpp/infer.cc +++ b/examples/vision/matting/ppmatting/cpp/infer.cc @@ -35,16 +35,15 @@ void CpuInfer(const std::string& model_dir, const std::string& image_file, } auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; if (!model.Predict(&im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -69,16 +68,15 @@ void GpuInfer(const std::string& model_dir, const std::string& image_file, } auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; if (!model.Predict(&im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " @@ -104,16 +102,15 @@ void TrtInfer(const std::string& model_dir, const std::string& image_file, } auto im = cv::imread(image_file); - auto im_bak = im.clone(); cv::Mat bg = cv::imread(background_file); fastdeploy::vision::MattingResult res; if (!model.Predict(&im, &res)) { std::cerr << "Failed to predict." << std::endl; return; } - auto vis_im = fastdeploy::vision::VisMatting(im_bak, res); + auto vis_im = fastdeploy::vision::VisMatting(im, res); auto vis_im_with_bg = - fastdeploy::vision::SwapBackgroundMatting(im_bak, bg, res); + fastdeploy::vision::SwapBackground(im, bg, res); cv::imwrite("visualized_result.jpg", vis_im_with_bg); cv::imwrite("visualized_result_fg.jpg", vis_im); std::cout << "Visualized result save in ./visualized_result_replaced_bg.jpg " diff --git a/examples/vision/matting/ppmatting/python/infer.py b/examples/vision/matting/ppmatting/python/infer.py index 32a81eca7..89913cd11 100644 --- a/examples/vision/matting/ppmatting/python/infer.py +++ b/examples/vision/matting/ppmatting/python/infer.py @@ -60,7 +60,7 @@ result = model.predict(im) print(result) # 可视化结果 vis_im = fd.vision.vis_matting(im, result) -vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result) +vis_im_with_bg = fd.vision.swap_background(im, bg, result) cv2.imwrite("visualized_result_fg.jpg", vis_im) cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg) print( diff --git a/examples/vision/matting/rvm/python/infer.py b/examples/vision/matting/rvm/python/infer.py index fcde64fb2..0e9eb6b21 100755 --- a/examples/vision/matting/rvm/python/infer.py +++ b/examples/vision/matting/rvm/python/infer.py @@ -86,7 +86,7 @@ if args.video is not None: break result = model.predict(frame) vis_im = fd.vision.vis_matting(frame, result) - vis_im_with_bg = fd.vision.swap_background_matting(frame, bg, result) + vis_im_with_bg = fd.vision.swap_background(frame, bg, result) alpha.write(vis_im) composition.write(vis_im_with_bg) cv2.waitKey(30) @@ -104,7 +104,7 @@ if args.image is not None: print(result) # 可视化结果 vis_im = fd.vision.vis_matting(im, result) - vis_im_with_bg = fd.vision.swap_background_matting(im, bg, result) + vis_im_with_bg = fd.vision.swap_background(im, bg, result) cv2.imwrite("visualized_result_fg.jpg", vis_im) cv2.imwrite("visualized_result_replaced_bg.jpg", vis_im_with_bg) print( From 37590ad2dc00abed74f043ce848a3dd179cb451a Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Mon, 12 Dec 2022 21:29:57 +0800 Subject: [PATCH 05/18] Update README_CN.md --- README_CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_CN.md b/README_CN.md index 8814215fe..d296e37b3 100755 --- a/README_CN.md +++ b/README_CN.md @@ -118,7 +118,7 @@ model = vision.detection.PPYOLOE("ppyoloe_crn_l_300e_coco/model.pdmodel", "ppyoloe_crn_l_300e_coco/model.pdiparams", "ppyoloe_crn_l_300e_coco/infer_cfg.yml") im = cv2.imread("000000014439.jpg") -result = model.predict(im.copy()) +result = model.predict(im) print(result) vis_im = vision.vis_detection(im, result, score_threshold=0.5) From 8d038e430856110fa666736fd456a6fda0c40464 Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Mon, 12 Dec 2022 21:36:15 +0800 Subject: [PATCH 06/18] Update README_CN.md --- README_CN.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README_CN.md b/README_CN.md index d296e37b3..30dc326f8 100755 --- a/README_CN.md +++ b/README_CN.md @@ -165,7 +165,7 @@ int main(int argc, char* argv[]) { vision::DetectionResult res; model.Predict(&im, &res); - auto vis_im = vision::Visualize::VisDetection(im, res, 0.5); + auto vis_im = vision::VisDetection(im, res, 0.5); cv::imwrite("vis_image.jpg", vis_im); return 0; } From b016b762cf3f9bcd6451c54cdfe16f569feed867 Mon Sep 17 00:00:00 2001 From: huangjianhui <852142024@qq.com> Date: Wed, 18 Jan 2023 16:40:07 +0800 Subject: [PATCH 07/18] Update preprocessor.h --- fastdeploy/vision/segmentation/ppseg/preprocessor.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fastdeploy/vision/segmentation/ppseg/preprocessor.h b/fastdeploy/vision/segmentation/ppseg/preprocessor.h index 6452e8e0e..1b27863e4 100644 --- a/fastdeploy/vision/segmentation/ppseg/preprocessor.h +++ b/fastdeploy/vision/segmentation/ppseg/preprocessor.h @@ -31,7 +31,8 @@ class FASTDEPLOY_DECL PaddleSegPreprocessor { /** \brief Process the input image and prepare input tensors for runtime * * \param[in] images The input image data list, all the elements are returned by cv::imread() - * \param[in] outputs The output tensors which will feed in runtime, include image + * \param[in] outputs The output tensors which will feed in runtime + * \param[in] imgs_info The original input images shape info map, key is "shape_info", value is vector> a{{height, width}} * \return true if the preprocess successed, otherwise false */ virtual bool Run( From 03cdc4cde36056f48bcd1b9bbd41e4ba3e6b07fa Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Wed, 18 Jan 2023 12:20:05 +0000 Subject: [PATCH 08/18] PaddleSeg supports triton serving --- .../serving/models/paddleseg/1/README.md | 3 + .../serving/models/paddleseg/config.pbtxt | 67 ++++++++++ .../serving/models/postprocess/1/model.py | 115 +++++++++++++++++ .../serving/models/postprocess/config.pbtxt | 30 +++++ .../serving/models/preprocess/1/deploy.yaml | 12 ++ .../serving/models/preprocess/1/model.py | 117 ++++++++++++++++++ .../serving/models/preprocess/config.pbtxt | 34 +++++ .../serving/models/runtime/1/README.md | 5 + .../serving/models/runtime/config.pbtxt | 60 +++++++++ 9 files changed, 443 insertions(+) create mode 100644 examples/vision/segmentation/paddleseg/serving/models/paddleseg/1/README.md create mode 100644 examples/vision/segmentation/paddleseg/serving/models/paddleseg/config.pbtxt create mode 100755 examples/vision/segmentation/paddleseg/serving/models/postprocess/1/model.py create mode 100644 examples/vision/segmentation/paddleseg/serving/models/postprocess/config.pbtxt create mode 100644 examples/vision/segmentation/paddleseg/serving/models/preprocess/1/deploy.yaml create mode 100644 examples/vision/segmentation/paddleseg/serving/models/preprocess/1/model.py create mode 100644 examples/vision/segmentation/paddleseg/serving/models/preprocess/config.pbtxt create mode 100644 examples/vision/segmentation/paddleseg/serving/models/runtime/1/README.md create mode 100644 examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt diff --git a/examples/vision/segmentation/paddleseg/serving/models/paddleseg/1/README.md b/examples/vision/segmentation/paddleseg/serving/models/paddleseg/1/README.md new file mode 100644 index 000000000..42ae7e483 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/paddleseg/1/README.md @@ -0,0 +1,3 @@ +# PaddleSeg Pipeline + +The pipeline directory does not have model files, but a version number directory needs to be maintained. diff --git a/examples/vision/segmentation/paddleseg/serving/models/paddleseg/config.pbtxt b/examples/vision/segmentation/paddleseg/serving/models/paddleseg/config.pbtxt new file mode 100644 index 000000000..9571a5b91 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/paddleseg/config.pbtxt @@ -0,0 +1,67 @@ +platform: "ensemble" + +input [ + { + name: "INPUT" + data_type: TYPE_UINT8 + dims: [-1, -1, -1, 3 ] + } +] + +output [ + { + name: "SEG_RESULT" + data_type: TYPE_STRING + dims: [ -1 ] + } +] + +ensemble_scheduling { + step [ + { + model_name: "preprocess" + model_version: 1 + input_map { + key: "preprocess_input" + value: "INPUT" + } + output_map { + key: "preprocess_output_1" + value: "RUNTIME_INPUT_1" + } + output_map { + key: "preprocess_output_2" + value: "POSTPROCESS_INPUT_2" + } + }, + { + model_name: "runtime" + model_version: 1 + input_map { + key: "x" + value: "RUNTIME_INPUT_1" + } + output_map { + key: "argmax_0.tmp_0" + value: "RUNTIME_OUTPUT" + } + }, + { + model_name: "postprocess" + model_version: 1 + input_map { + key: "post_input_1" + value: "RUNTIME_OUTPUT" + } + input_map { + key: "post_input_2" + value: "POSTPROCESS_INPUT_2" + } + output_map { + key: "post_output" + value: "SEG_RESULT" + } + } + ] +} + diff --git a/examples/vision/segmentation/paddleseg/serving/models/postprocess/1/model.py b/examples/vision/segmentation/paddleseg/serving/models/postprocess/1/model.py new file mode 100755 index 000000000..510aad6ea --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/postprocess/1/model.py @@ -0,0 +1,115 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import numpy as np +import time +import os +import fastdeploy as fd + +# triton_python_backend_utils is available in every Triton Python model. You +# need to use this module to create inference requests and responses. It also +# contains some utility functions for extracting information from model_config +# and converting Triton input/output types to numpy types. +import triton_python_backend_utils as pb_utils + + +class TritonPythonModel: + """Your Python model must use the same class name. Every Python model + that is created must have "TritonPythonModel" as the class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to intialize any state associated with this model. + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + # You must parse model_config. JSON string is not parsed here + self.model_config = json.loads(args['model_config']) + print("model_config:", self.model_config) + + self.input_names = [] + for input_config in self.model_config["input"]: + self.input_names.append(input_config["name"]) + print("postprocess input names:", self.input_names) + + self.output_names = [] + self.output_dtype = [] + for output_config in self.model_config["output"]: + self.output_names.append(output_config["name"]) + dtype = pb_utils.triton_string_to_numpy(output_config["data_type"]) + self.output_dtype.append(dtype) + print("postprocess output names:", self.output_names) + + yaml_path = os.path.abspath(os.path.dirname(__file__)) + "/deploy.yaml" + self.postprocess_ = fd.vision.segmentation.PaddleSegPostprocessor( + yaml_path) + + def execute(self, requests): + """`execute` must be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference is requested + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse. + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + responses = [] + for request in requests: + infer_outputs = pb_utils.get_input_tensor_by_name( + request, self.input_names[0]) + im_info = pb_utils.get_input_tensor_by_name(request, + self.input_names[1]) + infer_outputs = infer_outputs.as_numpy() + im_info = im_info.as_numpy() + for i in range(im_info.shape[0]): + im_info[i] = json.loads(im_info[i].decode('utf-8').replace( + "'", '"')) + + results = self.postprocess_.run([infer_outputs], im_info[0]) + r_str = fd.vision.utils.fd_result_to_json(results) + + r_np = np.array(r_str, dtype=np.object_) + out_tensor = pb_utils.Tensor(self.output_names[0], r_np) + inference_response = pb_utils.InferenceResponse( + output_tensors=[out_tensor, ]) + responses.append(inference_response) + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + Implementing `finalize` function is optional. This function allows + the model to perform any necessary clean ups before exit. + """ + print('Cleaning up...') diff --git a/examples/vision/segmentation/paddleseg/serving/models/postprocess/config.pbtxt b/examples/vision/segmentation/paddleseg/serving/models/postprocess/config.pbtxt new file mode 100644 index 000000000..81f31ba08 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/postprocess/config.pbtxt @@ -0,0 +1,30 @@ +name: "postprocess" +backend: "python" + +input [ + { + name: "post_input_1" + data_type: TYPE_INT32 + dims: [-1, -1, -1] + }, + { + name: "post_input_2" + data_type: TYPE_STRING + dims: [ -1 ] + } +] + +output [ + { + name: "post_output" + data_type: TYPE_STRING + dims: [ -1 ] + } +] + +instance_group [ + { + count: 1 + kind: KIND_CPU + } +] diff --git a/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/deploy.yaml b/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/deploy.yaml new file mode 100644 index 000000000..6d33e5009 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/deploy.yaml @@ -0,0 +1,12 @@ +Deploy: + input_shape: + - -1 + - 3 + - -1 + - -1 + model: model.pdmodel + output_dtype: int32 + output_op: argmax + params: model.pdiparams + transforms: + - type: Normalize diff --git a/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/model.py b/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/model.py new file mode 100644 index 000000000..48a72d6fa --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/preprocess/1/model.py @@ -0,0 +1,117 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import numpy as np +import os + +import fastdeploy as fd + +# triton_python_backend_utils is available in every Triton Python model. You +# need to use this module to create inference requests and responses. It also +# contains some utility functions for extracting information from model_config +# and converting Triton input/output types to numpy types. +import triton_python_backend_utils as pb_utils + + +class TritonPythonModel: + """Your Python model must use the same class name. Every Python model + that is created must have "TritonPythonModel" as the class name. + """ + + def initialize(self, args): + """`initialize` is called only once when the model is being loaded. + Implementing `initialize` function is optional. This function allows + the model to intialize any state associated with this model. + Parameters + ---------- + args : dict + Both keys and values are strings. The dictionary keys and values are: + * model_config: A JSON string containing the model configuration + * model_instance_kind: A string containing model instance kind + * model_instance_device_id: A string containing model instance device ID + * model_repository: Model repository path + * model_version: Model version + * model_name: Model name + """ + # You must parse model_config. JSON string is not parsed here + self.model_config = json.loads(args['model_config']) + print("model_config:", self.model_config) + + self.input_names = [] + for input_config in self.model_config["input"]: + self.input_names.append(input_config["name"]) + print("preprocess input names:", self.input_names) + + self.output_names = [] + self.output_dtype = [] + for output_config in self.model_config["output"]: + self.output_names.append(output_config["name"]) + # dtype = pb_utils.triton_string_to_numpy(output_config["data_type"]) + # self.output_dtype.append(dtype) + self.output_dtype.append(output_config["data_type"]) + print("preprocess output names:", self.output_names) + + # init PaddleSegPreprocess class + yaml_path = os.path.abspath(os.path.dirname(__file__)) + "/deploy.yaml" + self.preprocess_ = fd.vision.segmentation.PaddleSegPreprocessor( + yaml_path) + #if args['model_instance_kind'] == 'GPU': + # device_id = int(args['model_instance_device_id']) + # self.preprocess_.use_gpu(device_id) + + def execute(self, requests): + """`execute` must be implemented in every Python model. `execute` + function receives a list of pb_utils.InferenceRequest as the only + argument. This function is called when an inference is requested + for this model. Depending on the batching configuration (e.g. Dynamic + Batching) used, `requests` may contain multiple requests. Every + Python model, must create one pb_utils.InferenceResponse for every + pb_utils.InferenceRequest in `requests`. If there is an error, you can + set the error argument when creating a pb_utils.InferenceResponse. + Parameters + ---------- + requests : list + A list of pb_utils.InferenceRequest + Returns + ------- + list + A list of pb_utils.InferenceResponse. The length of this list must + be the same as `requests` + """ + responses = [] + for request in requests: + data = pb_utils.get_input_tensor_by_name(request, + self.input_names[0]) + data = data.as_numpy() + outputs, im_info = self.preprocess_.run(data) + + # PaddleSeg preprocess has two outputs + dlpack_tensor = outputs[0].to_dlpack() + output_tensor_0 = pb_utils.Tensor.from_dlpack(self.output_names[0], + dlpack_tensor) + output_tensor_1 = pb_utils.Tensor( + self.output_names[1], np.array( + [im_info], dtype=np.object_)) + inference_response = pb_utils.InferenceResponse( + output_tensors=[output_tensor_0, output_tensor_1]) + responses.append(inference_response) + return responses + + def finalize(self): + """`finalize` is called only once when the model is being unloaded. + Implementing `finalize` function is optional. This function allows + the model to perform any necessary clean ups before exit. + """ + print('Cleaning up...') diff --git a/examples/vision/segmentation/paddleseg/serving/models/preprocess/config.pbtxt b/examples/vision/segmentation/paddleseg/serving/models/preprocess/config.pbtxt new file mode 100644 index 000000000..01cb94869 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/preprocess/config.pbtxt @@ -0,0 +1,34 @@ +name: "preprocess" +backend: "python" + +input [ + { + name: "preprocess_input" + data_type: TYPE_UINT8 + dims: [-1, -1, -1, 3 ] + } +] + +output [ + { + name: "preprocess_output_1" + data_type: TYPE_FP32 + dims: [-1, 3, -1, -1 ] + }, + { + name: "preprocess_output_2" + data_type: TYPE_STRING + dims: [ -1] + } +] + +instance_group [ + { + # The number of instances is 1 + count: 1 + # Use CPU, GPU inference option is:KIND_GPU + kind: KIND_CPU + # The instance is deployed on the 0th GPU card + # gpus: [0] + } +] diff --git a/examples/vision/segmentation/paddleseg/serving/models/runtime/1/README.md b/examples/vision/segmentation/paddleseg/serving/models/runtime/1/README.md new file mode 100644 index 000000000..1e5d914b4 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/runtime/1/README.md @@ -0,0 +1,5 @@ +# Runtime Directory + +This directory holds the model files. +Paddle models must be model.pdmodel and model.pdiparams files. +ONNX models must be model.onnx files. diff --git a/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt b/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt new file mode 100644 index 000000000..bd145c590 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt @@ -0,0 +1,60 @@ +# optional, If name is specified it must match the name of the model repository directory containing the model. +name: "runtime" +backend: "fastdeploy" + +# Input configuration of the model +input [ + { + # input name + name: "x" + # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING + data_type: TYPE_FP32 + # input shape + dims: [-1, 3, -1, -1 ] + } +] + +# The output of the model is configured in the same format as the input +output [ + { + name: "argmax_0.tmp_0" + data_type: TYPE_INT32 + dims: [ -1, -1, -1 ] + } +] + +# Number of instances of the model +instance_group [ + { + # The number of instances is 1 + count: 1 + # Use GPU, CPU inference option is:KIND_CPU + kind: KIND_GPU + # The instance is deployed on the 0th GPU card + gpus: [0] + } +] + +optimization { + execution_accelerators { + gpu_execution_accelerator : [ { + # use TRT engine + name: "paddle", + #name: "tensorrt", + # use fp16 on TRT engine + parameters { key: "precision" value: "trt_fp32" } + }, + { + name: "min_shape" + parameters { key: "x" value: "1 3 256 256" } + }, + { + name: "opt_shape" + parameters { key: "x" value: "1 3 1024 1024" } + }, + { + name: "max_shape" + parameters { key: "x" value: "16 3 2048 2048" } + } + ] +}} From 1a3d0f86217feebd6d006b6091e44638ad7df9c0 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Wed, 18 Jan 2023 13:23:50 +0000 Subject: [PATCH 09/18] Add PaddleSeg simple serving example --- .../paddleseg/python/serving/README.md | 36 ++++++++++++++++++ .../paddleseg/python/serving/README_CN.md | 36 ++++++++++++++++++ .../paddleseg/python/serving/client.py | 23 +++++++++++ .../paddleseg/python/serving/server.py | 38 +++++++++++++++++++ 4 files changed, 133 insertions(+) create mode 100644 examples/vision/segmentation/paddleseg/python/serving/README.md create mode 100644 examples/vision/segmentation/paddleseg/python/serving/README_CN.md create mode 100644 examples/vision/segmentation/paddleseg/python/serving/client.py create mode 100644 examples/vision/segmentation/paddleseg/python/serving/server.py diff --git a/examples/vision/segmentation/paddleseg/python/serving/README.md b/examples/vision/segmentation/paddleseg/python/serving/README.md new file mode 100644 index 000000000..da41a3a00 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/python/serving/README.md @@ -0,0 +1,36 @@ +English | [简体中文](README_CN.md) + +# PaddleSegmentation Python Simple Serving Demo + + +## Environment + +- 1. Prepare environment and install FastDeploy Python whl, refer to [download_prebuilt_libraries](../../../../../../docs/en/build_and_install/download_prebuilt_libraries.md) + +Server: +```bash +# Download demo code +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving + +# Download PP_LiteSeg model +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz +tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz + +# Launch server, change the configurations in server.py to select hardware, backend, etc. +# and use --host, --port to specify IP and port +fastdeploy simple_serving --app server:app +``` + +Client: +```bash +# Download demo code +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving + +# Download test image +wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png + +# Send request and get inference result (Please adapt the IP and port if necessary) +python client.py +``` diff --git a/examples/vision/segmentation/paddleseg/python/serving/README_CN.md b/examples/vision/segmentation/paddleseg/python/serving/README_CN.md new file mode 100644 index 000000000..3f382c904 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/python/serving/README_CN.md @@ -0,0 +1,36 @@ +简体中文 | [English](README.md) + +# PaddleSegmentation Python轻量服务化部署示例 + +在部署前,需确认以下两个步骤 + +- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) +- 2. FastDeploy Python whl包安装,参考[FastDeploy Python安装](../../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md) + +服务端: +```bash +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/segmentation/paddleseg/python/serving + +# 下载PP_LiteSeg模型文件 +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz +tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz + +# 启动服务,可修改server.py中的配置项来指定硬件、后端等 +# 可通过--host、--port指定IP和端口号 +fastdeploy simple_serving --app server:app +``` + +客户端: +```bash +# 下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/detection/paddledetection/python/serving + +# 下载测试图片 +wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg + +# 请求服务,获取推理结果(如有必要,请修改脚本中的IP和端口号) +python client.py +``` diff --git a/examples/vision/segmentation/paddleseg/python/serving/client.py b/examples/vision/segmentation/paddleseg/python/serving/client.py new file mode 100644 index 000000000..e652c4462 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/python/serving/client.py @@ -0,0 +1,23 @@ +import requests +import json +import cv2 +import fastdeploy as fd +from fastdeploy.serving.utils import cv2_to_base64 + +if __name__ == '__main__': + url = "http://127.0.0.1:8000/fd/ppliteseg" + headers = {"Content-Type": "application/json"} + + im = cv2.imread("cityscapes_demo.png") + data = {"data": {"image": cv2_to_base64(im)}, "parameters": {}} + + resp = requests.post(url=url, headers=headers, data=json.dumps(data)) + if resp.status_code == 200: + r_json = json.loads(resp.json()["result"]) + result = fd.vision.utils.json_to_segmentation(r_json) + vis_im = fd.vision.vis_segmentation(im, result, weight=0.5) + cv2.imwrite("visualized_result.jpg", vis_im) + print("Visualized result save in ./visualized_result.jpg") + else: + print("Error code:", resp.status_code) + print(resp.text) diff --git a/examples/vision/segmentation/paddleseg/python/serving/server.py b/examples/vision/segmentation/paddleseg/python/serving/server.py new file mode 100644 index 000000000..2ae2df09c --- /dev/null +++ b/examples/vision/segmentation/paddleseg/python/serving/server.py @@ -0,0 +1,38 @@ +import fastdeploy as fd +from fastdeploy.serving.server import SimpleServer +import os +import logging + +logging.getLogger().setLevel(logging.INFO) + +# Configurations +model_dir = 'PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer' +device = 'cpu' +use_trt = False + +# Prepare model +model_file = os.path.join(model_dir, "model.pdmodel") +params_file = os.path.join(model_dir, "model.pdiparams") +config_file = os.path.join(model_dir, "deploy.yaml") + +# Setup runtime option to select hardware, backend, etc. +option = fd.RuntimeOption() +if device.lower() == 'gpu': + option.use_gpu() +if use_trt: + option.use_trt_backend() + option.set_trt_cache_file('pp_lite_seg.trt') + +# Create model instance +model_instance = fd.vision.segmentation.PaddleSegModel( + model_file=model_file, + params_file=params_file, + config_file=config_file, + runtime_option=option) + +# Create server, setup REST API +app = SimpleServer() +app.register( + task_name="fd/ppliteseg", + model_handler=fd.serving.handler.VisionModelHandler, + predictor=model_instance) From 03fd1aa5cff4cb8adaae1c8a29aed404a38f2283 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Wed, 18 Jan 2023 13:25:00 +0000 Subject: [PATCH 10/18] Add PaddleSeg triton serving client code --- .../serving/paddleseg_grpc_client.py | 116 ++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py diff --git a/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py b/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py new file mode 100644 index 000000000..1fe3828db --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py @@ -0,0 +1,116 @@ +import logging +import numpy as np +import time +from typing import Optional +import cv2 +import json + +from tritonclient import utils as client_utils +from tritonclient.grpc import InferenceServerClient, InferInput, InferRequestedOutput, service_pb2_grpc, service_pb2 + +LOGGER = logging.getLogger("run_inference_on_triton") + + +class SyncGRPCTritonRunner: + DEFAULT_MAX_RESP_WAIT_S = 120 + + def __init__( + self, + server_url: str, + model_name: str, + model_version: str, + *, + verbose=False, + resp_wait_s: Optional[float]=None, ): + self._server_url = server_url + self._model_name = model_name + self._model_version = model_version + self._verbose = verbose + self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s + + self._client = InferenceServerClient( + self._server_url, verbose=self._verbose) + error = self._verify_triton_state(self._client) + if error: + raise RuntimeError( + f"Could not communicate to Triton Server: {error}") + + LOGGER.debug( + f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " + f"are up and ready!") + + model_config = self._client.get_model_config(self._model_name, + self._model_version) + model_metadata = self._client.get_model_metadata(self._model_name, + self._model_version) + LOGGER.info(f"Model config {model_config}") + LOGGER.info(f"Model metadata {model_metadata}") + + for tm in model_metadata.inputs: + print("tm:", tm) + self._inputs = {tm.name: tm for tm in model_metadata.inputs} + self._input_names = list(self._inputs) + self._outputs = {tm.name: tm for tm in model_metadata.outputs} + self._output_names = list(self._outputs) + self._outputs_req = [ + InferRequestedOutput(name) for name in self._outputs + ] + + def Run(self, inputs): + """ + Args: + inputs: list, Each value corresponds to an input name of self._input_names + Returns: + results: dict, {name : numpy.array} + """ + infer_inputs = [] + for idx, data in enumerate(inputs): + infer_input = InferInput(self._input_names[idx], data.shape, + "UINT8") + infer_input.set_data_from_numpy(data) + infer_inputs.append(infer_input) + + results = self._client.infer( + model_name=self._model_name, + model_version=self._model_version, + inputs=infer_inputs, + outputs=self._outputs_req, + client_timeout=self._response_wait_t, ) + results = {name: results.as_numpy(name) for name in self._output_names} + return results + + def _verify_triton_state(self, triton_client): + if not triton_client.is_server_live(): + return f"Triton server {self._server_url} is not live" + elif not triton_client.is_server_ready(): + return f"Triton server {self._server_url} is not ready" + elif not triton_client.is_model_ready(self._model_name, + self._model_version): + return f"Model {self._model_name}:{self._model_version} is not ready" + return None + + +if __name__ == "__main__": + model_name = "paddleseg" + model_version = "1" + url = "localhost:8001" + runner = SyncGRPCTritonRunner(url, model_name, model_version) + im = cv2.imread("cityscapes_demo.png") + im = np.array([im, ]) + # batch input + # im = np.array([im, im, im]) + for i in range(1): + result = runner.Run([im, ]) + for name, values in result.items(): + print("output_name:", name) + # values is batch + for value in values: + value = json.loads(value) + #print(value) + import fastdeploy as fd + result = fd.C.vision.SegmentationResult() + result.label_map = value["label_map"] + result.shape = value["shape"] + im = cv2.imread("cityscapes_demo.png") + vis_im = fd.vision.vis_segmentation(im, result, 0.5) + cv2.imwrite("seg.png", vis_im) From 9d30be35a49628c36bf601c0e09ca31edbac5d69 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 19 Jan 2023 03:37:14 +0000 Subject: [PATCH 11/18] Update triton serving runtime config.pbtxt --- .../paddleseg/serving/models/runtime/config.pbtxt | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt b/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt index bd145c590..875086e2b 100644 --- a/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt +++ b/examples/vision/segmentation/paddleseg/serving/models/runtime/config.pbtxt @@ -1,6 +1,7 @@ # optional, If name is specified it must match the name of the model repository directory containing the model. name: "runtime" backend: "fastdeploy" +max_batch_size: 1 # Input configuration of the model input [ @@ -10,7 +11,7 @@ input [ # input type such as TYPE_FP32、TYPE_UINT8、TYPE_INT8、TYPE_INT16、TYPE_INT32、TYPE_INT64、TYPE_FP16、TYPE_STRING data_type: TYPE_FP32 # input shape - dims: [-1, 3, -1, -1 ] + dims: [3, -1, -1 ] } ] @@ -19,7 +20,7 @@ output [ { name: "argmax_0.tmp_0" data_type: TYPE_INT32 - dims: [ -1, -1, -1 ] + dims: [ -1, -1 ] } ] @@ -39,9 +40,8 @@ optimization { execution_accelerators { gpu_execution_accelerator : [ { # use TRT engine - name: "paddle", - #name: "tensorrt", - # use fp16 on TRT engine + name: "tensorrt", + # use fp32 on TRT engine parameters { key: "precision" value: "trt_fp32" } }, { @@ -54,7 +54,7 @@ optimization { }, { name: "max_shape" - parameters { key: "x" value: "16 3 2048 2048" } + parameters { key: "x" value: "1 3 2048 2048" } } ] }} From 7d145b5be386b98a4c5b709b6b57d89ca8a37bd5 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 19 Jan 2023 06:26:04 +0000 Subject: [PATCH 12/18] Update paddleseg grpc client --- .../paddleseg/serving/paddleseg_grpc_client.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py b/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py index 1fe3828db..f200dec25 100644 --- a/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py +++ b/examples/vision/segmentation/paddleseg/serving/paddleseg_grpc_client.py @@ -106,11 +106,7 @@ if __name__ == "__main__": # values is batch for value in values: value = json.loads(value) - #print(value) - import fastdeploy as fd - result = fd.C.vision.SegmentationResult() - result.label_map = value["label_map"] - result.shape = value["shape"] - im = cv2.imread("cityscapes_demo.png") - vis_im = fd.vision.vis_segmentation(im, result, 0.5) - cv2.imwrite("seg.png", vis_im) + print( + "Only print the first 20 labels in label_map of SEG_RESULT") + value["label_map"] = value["label_map"][:20] + print(value) From 175c6b0f39b4eeb4e63082bff7c6ed7bce2e0624 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 19 Jan 2023 06:27:07 +0000 Subject: [PATCH 13/18] Add paddle serving README --- .../segmentation/paddleseg/serving/README.md | 62 +++++++++++++++++ .../paddleseg/serving/README_CN.md | 68 +++++++++++++++++++ 2 files changed, 130 insertions(+) create mode 100644 examples/vision/segmentation/paddleseg/serving/README.md create mode 100644 examples/vision/segmentation/paddleseg/serving/README_CN.md diff --git a/examples/vision/segmentation/paddleseg/serving/README.md b/examples/vision/segmentation/paddleseg/serving/README.md new file mode 100644 index 000000000..a451e8730 --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/README.md @@ -0,0 +1,62 @@ +English | [简体中文](README_CN.md) +# PaddleSegmentation Serving Deployment Demo + +## Launch Serving + +```bash +# Download demo code +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/segmentation/paddleseg/serving + +#Download PP_LiteSeg model file +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz +tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz + +# Move the model files to models/infer/1 +mv yolov5s.onnx models/infer/1/ + +# Pull fastdeploy image, x.y.z is FastDeploy version, example 1.0.2. +docker pull paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 + +# Run the docker. The docker name is fd_serving, and the current directory is mounted as the docker's /serving directory +nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/serving paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash + +# Start the service (Without setting the CUDA_VISIBLE_DEVICES environment variable, it will have scheduling privileges for all GPU cards) +CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models --backend-config=python,shm-default-byte-size=10485760 +``` + +Output the following contents if serving is launched + +``` +...... +I0928 04:51:15.784517 206 grpc_server.cc:4117] Started GRPCInferenceService at 0.0.0.0:8001 +I0928 04:51:15.785177 206 http_server.cc:2815] Started HTTPService at 0.0.0.0:8000 +I0928 04:51:15.826578 206 http_server.cc:167] Started Metrics Service at 0.0.0.0:8002 +``` + +## Client Requests + +Execute the following command in the physical machine to send a grpc request and output the result + +``` +#Download test images +wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png + +#Installing client-side dependencies +python3 -m pip install tritonclient\[all\] + +# Send requests +python3 paddleseg_grpc_client.py +``` + +When the request is sent successfully, the results are returned in json format and printed out: + +``` + +``` + +## Modify Configs + + + +The default is to run ONNXRuntime on CPU. If developers need to run it on GPU or other inference engines, please see the [Configs File](../../../../../serving/docs/EN/model_configuration-en.md) to modify the configs in `models/runtime/config.pbtxt`. diff --git a/examples/vision/segmentation/paddleseg/serving/README_CN.md b/examples/vision/segmentation/paddleseg/serving/README_CN.md new file mode 100644 index 000000000..676272f2e --- /dev/null +++ b/examples/vision/segmentation/paddleseg/serving/README_CN.md @@ -0,0 +1,68 @@ +[English](README.md) | 简体中文 +# PaddleSegmentation 服务化部署示例 + +在服务化部署前,需确认 + +- 1. 服务化镜像的软硬件环境要求和镜像拉取命令请参考[FastDeploy服务化部署](../../../../../serving/README_CN.md) + + +## 启动服务 + +```bash +#下载部署示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/examples/vision/segmentation/paddleseg/serving + +#下载yolov5模型文件 +wget https://bj.bcebos.com/paddlehub/fastdeploy/PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz +tar -xvf PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer.tgz + +# 将模型文件放入 models/runtime/1目录下 +mv PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer/model.pdmodel models/runtime/1/ +mv PP_LiteSeg_B_STDC2_cityscapes_with_argmax_infer/model.pdiparams models/runtime/1/ + +# 拉取fastdeploy镜像(x.y.z为镜像版本号,需参照serving文档替换为数字) +# GPU镜像 +docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 +# CPU镜像 +docker pull registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-cpu-only-21.10 + +# 运行容器.容器名字为 fd_serving, 并挂载当前目录为容器的 /serving 目录 +nvidia-docker run -it --net=host --name fd_serving -v `pwd`/:/serving registry.baidubce.com/paddlepaddle/fastdeploy:x.y.z-gpu-cuda11.4-trt8.4-21.10 bash + +# 启动服务(不设置CUDA_VISIBLE_DEVICES环境变量,会拥有所有GPU卡的调度权限) +CUDA_VISIBLE_DEVICES=0 fastdeployserver --model-repository=/serving/models --backend-config=python,shm-default-byte-size=10485760 +``` +>> **注意**: 当出现"Address already in use", 请使用`--grpc-port`指定端口号来启动服务,同时更改paddleseg_grpc_client.py中的请求端口号 + +服务启动成功后, 会有以下输出: +``` +...... +I0928 04:51:15.784517 206 grpc_server.cc:4117] Started GRPCInferenceService at 0.0.0.0:8001 +I0928 04:51:15.785177 206 http_server.cc:2815] Started HTTPService at 0.0.0.0:8000 +I0928 04:51:15.826578 206 http_server.cc:167] Started Metrics Service at 0.0.0.0:8002 +``` + + +## 客户端请求 + +在物理机器中执行以下命令,发送grpc请求并输出结果 +``` +#下载测试图片 +wget https://paddleseg.bj.bcebos.com/dygraph/demo/cityscapes_demo.png + +#安装客户端依赖 +python3 -m pip install tritonclient[all] + +# 发送请求 +python3 paddleseg_grpc_client.py +``` + +发送请求成功后,会返回json格式的检测结果并打印输出: +``` + +``` + +## 配置修改 + +当前默认配置在CPU上运行ONNXRuntime引擎, 如果要在GPU或其他推理引擎上运行。 需要修改`models/runtime/config.pbtxt`中配置,详情请参考[配置文档](../../../../../serving/docs/zh_CN/model_configuration.md) From 942cee83d7d0c94ed0598bb040e9fe7ec3e7f873 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Thu, 9 Feb 2023 14:04:10 +0000 Subject: [PATCH 14/18] Add decrypt function to load encrypted model --- fastdeploy/runtime/option_pybind.cc | 1 + fastdeploy/runtime/runtime.cc | 26 +++++++++++++++++++++++++- fastdeploy/runtime/runtime.h | 3 +++ fastdeploy/runtime/runtime_option.cc | 9 +++++++++ python/fastdeploy/runtime.py | 7 +++++++ 5 files changed, 45 insertions(+), 1 deletion(-) diff --git a/fastdeploy/runtime/option_pybind.cc b/fastdeploy/runtime/option_pybind.cc index 1c786459b..7af90d831 100644 --- a/fastdeploy/runtime/option_pybind.cc +++ b/fastdeploy/runtime/option_pybind.cc @@ -35,6 +35,7 @@ void BindOption(pybind11::module& m) { .def(pybind11::init()) .def("set_model_path", &RuntimeOption::SetModelPath) .def("set_model_buffer", &RuntimeOption::SetModelBuffer) + .def("set_encryption_key", &RuntimeOption::SetEncryptionKey) .def("use_gpu", &RuntimeOption::UseGpu) .def("use_cpu", &RuntimeOption::UseCpu) .def("use_rknpu2", &RuntimeOption::UseRKNPU2) diff --git a/fastdeploy/runtime/runtime.cc b/fastdeploy/runtime/runtime.cc index 70714e4f0..2a00dfda4 100644 --- a/fastdeploy/runtime/runtime.cc +++ b/fastdeploy/runtime/runtime.cc @@ -104,7 +104,31 @@ bool AutoSelectBackend(RuntimeOption& option) { bool Runtime::Init(const RuntimeOption& _option) { option = _option; - + if ("" != option.encryption_key_) { + #ifdef ENABLE_ENCRYPTION + if (option.model_from_memory_) { + option.model_file = Decrypt(option.model_file, option.encryption_key_); + if (!(option.params_file.empty())) { + option.params_file = Decrypt(option.params_file, option.encryption_key_); + } + } else { + std::string model_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer), + "Fail to read binary from model file"); + option.model_file = Decrypt(model_buffer, option.encryption_key_); + if (!(option.params_file.empty())) { + std::string params_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer), + "Fail to read binary from parameter file"); + option.params_file = Decrypt(params_buffer, option.encryption_key_); + } + option.model_from_memory_ = true; + } + #else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; + #endif + } // Choose default backend by model format and device if backend is not // specified if (option.backend == Backend::UNKNOWN) { diff --git a/fastdeploy/runtime/runtime.h b/fastdeploy/runtime/runtime.h index 6e7dc9629..772773007 100755 --- a/fastdeploy/runtime/runtime.h +++ b/fastdeploy/runtime/runtime.h @@ -23,6 +23,9 @@ #include "fastdeploy/core/fd_tensor.h" #include "fastdeploy/runtime/runtime_option.h" #include "fastdeploy/utils/perf.h" +#ifdef ENABLE_ENCRYPTION + #include "fastdeploy/encryption/include/decrypt.h" +#endif /** \brief All C++ FastDeploy APIs are defined inside this namespace * diff --git a/fastdeploy/runtime/runtime_option.cc b/fastdeploy/runtime/runtime_option.cc index c09352d58..8568b3b7f 100644 --- a/fastdeploy/runtime/runtime_option.cc +++ b/fastdeploy/runtime/runtime_option.cc @@ -36,6 +36,15 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer, model_from_memory_ = true; } +void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) { + #ifdef ENABLE_ENCRYPTION + encryption_key_ = encryption_key; + #else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; + #endif +} + void RuntimeOption::UseGpu(int gpu_id) { #ifdef WITH_GPU device = Device::GPU; diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index cd7b6641b..1d2fc1c1d 100644 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -187,6 +187,13 @@ class RuntimeOption: return self._option.set_model_buffer(model_buffer, params_buffer, model_format) + def set_encryption_key(self, + encryption_key): + """When loading encrypted model, encryption_key is required to decrypte model + :param encryption_key: (str)The key for decrypting model + """ + return self._option.set_encryption_key(encryption_key) + def use_gpu(self, device_id=0): """Inference with Nvidia GPU From 5160771a1c95035fedc608fb4ab4325f82459f05 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Fri, 10 Feb 2023 03:24:45 +0000 Subject: [PATCH 15/18] Update runtime_option.h --- fastdeploy/runtime/runtime_option.h | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fastdeploy/runtime/runtime_option.h b/fastdeploy/runtime/runtime_option.h index 0aa6bbec8..b263f4ad3 100644 --- a/fastdeploy/runtime/runtime_option.h +++ b/fastdeploy/runtime/runtime_option.h @@ -59,6 +59,12 @@ struct FASTDEPLOY_DECL RuntimeOption { const std::string& params_buffer = "", const ModelFormat& format = ModelFormat::PADDLE); + /** \brief When loading encrypted model, encryption_key is required to decrypte model + * + * \param[in] encryption_key The key for decrypting model + */ + void SetEncryptionKey(const std::string& encryption_key); + /// Use cpu to inference, the runtime will inference on CPU by default void UseCpu(); /// Use Nvidia GPU to inference @@ -178,6 +184,8 @@ struct FASTDEPLOY_DECL RuntimeOption { /// format of input model ModelFormat model_format = ModelFormat::PADDLE; + std::string encryption_key_ = ""; + // for cpu inference // default will let the backend choose their own default value int cpu_thread_num = -1; From 9d8b08415570d7cbb14f08c1025a52d0276d974f Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Fri, 10 Feb 2023 05:06:20 +0000 Subject: [PATCH 16/18] Update tutorials for encryption model --- tutorials/encrypt_model/README.md | 46 ++++++++++++++++++++++++++ tutorials/encrypt_model/README_CN.md | 48 ++++++++++++++++++++++++++++ tutorials/encrypt_model/encrypt.py | 33 +++++++++++++++++++ 3 files changed, 127 insertions(+) create mode 100644 tutorials/encrypt_model/README.md create mode 100644 tutorials/encrypt_model/README_CN.md create mode 100644 tutorials/encrypt_model/encrypt.py diff --git a/tutorials/encrypt_model/README.md b/tutorials/encrypt_model/README.md new file mode 100644 index 000000000..8a49c107c --- /dev/null +++ b/tutorials/encrypt_model/README.md @@ -0,0 +1,46 @@ +English | [中文](README_CN.md) + +# FastDeploy generates an encrypted model + +This directory provides `encrypt.py` to quickly complete the encryption of the model and parameter files of ResNet50_vd + +## encryption +```bash +# Download deployment example code +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/tutorials/encrypt_model + +# Download the ResNet50_vd model file +wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz +tar -xvf ResNet50_vd_infer.tgz + +python encrypt.py --model ResNet50_vd_infer +``` +>> **Note** After the encryption is completed, the ResNet50_vd_infer_encrypt folder will be generated, including `__model__.encrypted`, `__params__.encrypted`, `encryption_key.txt` three files, where `encryption_key.txt` contains the encrypted key. At the same time, you need to copy the `inference_cls.yaml` configuration file in the original folder to the ResNet50_vd_infer_encrypt folder for subsequent deployment + +### Python encryption interface + +Use the encrypted interface through the following interface settings +```python +import fastdeploy as fd +import os +# when key is not given, key will be automatically generated. +# otherwise, the file will be encrypted by specific key +encrypted_model, key = fd.encryption.encrypt(model_file.read()) +encrypted_params, key= fd.encryption.encrypt(params_file.read(), key) +``` + +### FastDeploy deployment encryption model (decryption) + +Through the setting of the following interface, FastDeploy can deploy the encryption model +```python +import fastdeploy as fd +option = fd.RuntimeOption() +option.set_encryption_key(key) +``` + +```C++ +fastdeploy::RuntimeOption option; +option.SetEncryptionKey(key) +``` +>> **Note** For more details about RuntimeOption, please refer to [RuntimeOption Python Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html), [ RuntimeOption C++ Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) \ No newline at end of file diff --git a/tutorials/encrypt_model/README_CN.md b/tutorials/encrypt_model/README_CN.md new file mode 100644 index 000000000..8230f68e6 --- /dev/null +++ b/tutorials/encrypt_model/README_CN.md @@ -0,0 +1,48 @@ +[English](README.md) | 中文 + +# 使用FastDeploy生成加密模型 + +本目录下提供`encrypt.py`快速完成ResNet50_vd的模型和参数文件加密 + +FastDeploy支持对称加密的方案,通过调用OpenSSL中的对称加密算法(AES)对模型进行加密并产生密钥 + +## 加密 +```bash +#下载加密示例代码 +git clone https://github.com/PaddlePaddle/FastDeploy.git +cd FastDeploy/tutorials/encrypt_model + +# 下载ResNet50_vd模型文件 +wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz +tar -xvf ResNet50_vd_infer.tgz + +python encrypt.py --model ResNet50_vd_infer +``` +>> **注意** 加密完成后会生成ResNet50_vd_infer_encrypt文件夹,包含`__model__.encrypted`,`__params__.encrypted`,`encryption_key.txt`三个文件,其中`encryption_key.txt`包含加密后的秘钥,同时需要将原文件夹中的、`inference_cls.yaml`配置文件 拷贝至ResNet50_vd_infer_encrypt文件夹,以便后续部署使用 + +### Python加密接口 + +通过如下接口的设定,使用加密接口(解密) +```python +import fastdeploy as fd +import os +# when key is not given, key will be automatically generated. +# otherwise, the file will be encrypted by specific key +encrypted_model, key = fd.encryption.encrypt(model_file.read()) +encrypted_params, key= fd.encryption.encrypt(params_file.read(), key) +``` + +### FastDeploy 部署加密模型 + +通过如下接口的设定,完成加密模型的推理 +```python +import fastdeploy as fd +option = fd.RuntimeOption() +option.set_encryption_key(key) +``` + +```C++ +fastdeploy::RuntimeOption option; +option.SetEncryptionKey(key) +``` +>> **注意** RuntimeOption的更多详细信息,请参考[RuntimeOption Python文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html),[RuntimeOption C++文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) \ No newline at end of file diff --git a/tutorials/encrypt_model/encrypt.py b/tutorials/encrypt_model/encrypt.py new file mode 100644 index 000000000..380509042 --- /dev/null +++ b/tutorials/encrypt_model/encrypt.py @@ -0,0 +1,33 @@ +import fastdeploy as fd +import os + +def parse_arguments(): + import argparse + import ast + parser = argparse.ArgumentParser() + parser.add_argument( + "--model", required=True, help="Path of model directory.") + return parser.parse_args() + +if __name__ == "__main__": + args = parse_arguments() + model_file = os.path.join(args.model, "inference.pdmodel") + params_file = os.path.join(args.model, "inference.pdiparams") + config_file = os.path.join(args.model, "inference_cls.yaml") + model_buffer = open(model_file, 'rb') + params_buffer = open(params_file, 'rb') + encrypted_model, key = fd.encryption.encrypt(model_buffer.read()) + encrypted_params, key= fd.encryption.encrypt(params_buffer.read(), key) + encrypted_model_dir = args.model + "_encrypt" + model_buffer.close() + params_buffer.close() + os.mkdir(encrypted_model_dir) + with open(os.path.join(encrypted_model_dir, "__model__.encrypted"), "w") as f: + f.write(encrypted_model) + + with open(os.path.join(encrypted_model_dir, "__params__.encrypted"), "w") as f: + f.write(encrypted_params) + + with open(os.path.join(encrypted_model_dir, "encryption_key.txt"), "w") as f: + f.write(key) + print("encryption success") \ No newline at end of file From bf61f84d22177b8fce47036d0fa89723d0666189 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Mon, 13 Feb 2023 02:35:55 +0000 Subject: [PATCH 17/18] Add some args in encrypt.py --- tutorials/encrypt_model/README.md | 6 ++--- tutorials/encrypt_model/README_CN.md | 4 +-- tutorials/encrypt_model/encrypt.py | 38 +++++++++++++++++++--------- 3 files changed, 31 insertions(+), 17 deletions(-) diff --git a/tutorials/encrypt_model/README.md b/tutorials/encrypt_model/README.md index 8a49c107c..755671686 100644 --- a/tutorials/encrypt_model/README.md +++ b/tutorials/encrypt_model/README.md @@ -6,7 +6,7 @@ This directory provides `encrypt.py` to quickly complete the encryption of the m ## encryption ```bash -# Download deployment example code +# Download deployment example code git clone https://github.com/PaddlePaddle/FastDeploy.git cd FastDeploy/tutorials/encrypt_model @@ -14,7 +14,7 @@ cd FastDeploy/tutorials/encrypt_model wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz tar -xvf ResNet50_vd_infer.tgz -python encrypt.py --model ResNet50_vd_infer +python encrypt.py --model_file ResNet50_vd_infer/inference.pdmodel --params_file ResNet50_vd_infer/inference.pdiparams --encrypted_model_dir ResNet50_vd_infer_encrypt ``` >> **Note** After the encryption is completed, the ResNet50_vd_infer_encrypt folder will be generated, including `__model__.encrypted`, `__params__.encrypted`, `encryption_key.txt` three files, where `encryption_key.txt` contains the encrypted key. At the same time, you need to copy the `inference_cls.yaml` configuration file in the original folder to the ResNet50_vd_infer_encrypt folder for subsequent deployment @@ -43,4 +43,4 @@ option.set_encryption_key(key) fastdeploy::RuntimeOption option; option.SetEncryptionKey(key) ``` ->> **Note** For more details about RuntimeOption, please refer to [RuntimeOption Python Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html), [ RuntimeOption C++ Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) \ No newline at end of file +>> **Note** For more details about RuntimeOption, please refer to [RuntimeOption Python Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html), [ RuntimeOption C++ Documentation](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) diff --git a/tutorials/encrypt_model/README_CN.md b/tutorials/encrypt_model/README_CN.md index 8230f68e6..c2f80ffd4 100644 --- a/tutorials/encrypt_model/README_CN.md +++ b/tutorials/encrypt_model/README_CN.md @@ -16,7 +16,7 @@ cd FastDeploy/tutorials/encrypt_model wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz tar -xvf ResNet50_vd_infer.tgz -python encrypt.py --model ResNet50_vd_infer +python encrypt.py --model_file ResNet50_vd_infer/inference.pdmodel --params_file ResNet50_vd_infer/inference.pdiparams --encrypted_model_dir ResNet50_vd_infer_encrypt ``` >> **注意** 加密完成后会生成ResNet50_vd_infer_encrypt文件夹,包含`__model__.encrypted`,`__params__.encrypted`,`encryption_key.txt`三个文件,其中`encryption_key.txt`包含加密后的秘钥,同时需要将原文件夹中的、`inference_cls.yaml`配置文件 拷贝至ResNet50_vd_infer_encrypt文件夹,以便后续部署使用 @@ -45,4 +45,4 @@ option.set_encryption_key(key) fastdeploy::RuntimeOption option; option.SetEncryptionKey(key) ``` ->> **注意** RuntimeOption的更多详细信息,请参考[RuntimeOption Python文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html),[RuntimeOption C++文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) \ No newline at end of file +>> **注意** RuntimeOption的更多详细信息,请参考[RuntimeOption Python文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/python/html/runtime_option.html),[RuntimeOption C++文档](https://www.paddlepaddle.org.cn/fastdeploy-api-doc/cpp/html/structfastdeploy_1_1RuntimeOption.html) diff --git a/tutorials/encrypt_model/encrypt.py b/tutorials/encrypt_model/encrypt.py index 380509042..f4d80ed2f 100644 --- a/tutorials/encrypt_model/encrypt.py +++ b/tutorials/encrypt_model/encrypt.py @@ -1,33 +1,47 @@ import fastdeploy as fd import os + def parse_arguments(): import argparse import ast parser = argparse.ArgumentParser() parser.add_argument( - "--model", required=True, help="Path of model directory.") + "--encrypted_model_dir", + required=False, + help="Path of model directory.") + parser.add_argument( + "--model_file", required=True, help="Path of model file directory.") + parser.add_argument( + "--params_file", + required=True, + help="Path of parameters file directory.") return parser.parse_args() + if __name__ == "__main__": args = parse_arguments() - model_file = os.path.join(args.model, "inference.pdmodel") - params_file = os.path.join(args.model, "inference.pdiparams") - config_file = os.path.join(args.model, "inference_cls.yaml") - model_buffer = open(model_file, 'rb') - params_buffer = open(params_file, 'rb') + model_buffer = open(args.model_file, 'rb') + params_buffer = open(args.params_file, 'rb') encrypted_model, key = fd.encryption.encrypt(model_buffer.read()) - encrypted_params, key= fd.encryption.encrypt(params_buffer.read(), key) - encrypted_model_dir = args.model + "_encrypt" + # use the same key to encrypt parameter file + encrypted_params, key = fd.encryption.encrypt(params_buffer.read(), key) + encrypted_model_dir = "encrypt_model_dir" + if args.encrypted_model_dir: + encrypted_model_dir = args.encrypted_model_dir model_buffer.close() params_buffer.close() os.mkdir(encrypted_model_dir) - with open(os.path.join(encrypted_model_dir, "__model__.encrypted"), "w") as f: + with open(os.path.join(encrypted_model_dir, "__model__.encrypted"), + "w") as f: f.write(encrypted_model) - with open(os.path.join(encrypted_model_dir, "__params__.encrypted"), "w") as f: + with open(os.path.join(encrypted_model_dir, "__params__.encrypted"), + "w") as f: f.write(encrypted_params) - with open(os.path.join(encrypted_model_dir, "encryption_key.txt"), "w") as f: + with open(os.path.join(encrypted_model_dir, "encryption_key.txt"), + "w") as f: f.write(key) - print("encryption success") \ No newline at end of file + print("encryption key: ", key) + print("encryption success") From 45b070e4959c82e0960b83697ce9260a89951832 Mon Sep 17 00:00:00 2001 From: felixhjh <852142024@qq.com> Date: Mon, 13 Feb 2023 06:31:04 +0000 Subject: [PATCH 18/18] Format code --- fastdeploy/runtime/runtime.cc | 46 +++++++++++++++------------- fastdeploy/runtime/runtime.h | 4 +-- fastdeploy/runtime/runtime_option.cc | 12 ++++---- fastdeploy/runtime/runtime_option.h | 2 +- python/fastdeploy/runtime.py | 16 +++++----- 5 files changed, 42 insertions(+), 38 deletions(-) diff --git a/fastdeploy/runtime/runtime.cc b/fastdeploy/runtime/runtime.cc index 2a00dfda4..67774a306 100644 --- a/fastdeploy/runtime/runtime.cc +++ b/fastdeploy/runtime/runtime.cc @@ -104,30 +104,32 @@ bool AutoSelectBackend(RuntimeOption& option) { bool Runtime::Init(const RuntimeOption& _option) { option = _option; + // decrypt encrypted model if ("" != option.encryption_key_) { - #ifdef ENABLE_ENCRYPTION - if (option.model_from_memory_) { - option.model_file = Decrypt(option.model_file, option.encryption_key_); - if (!(option.params_file.empty())) { - option.params_file = Decrypt(option.params_file, option.encryption_key_); - } - } else { - std::string model_buffer = ""; - FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer), - "Fail to read binary from model file"); - option.model_file = Decrypt(model_buffer, option.encryption_key_); - if (!(option.params_file.empty())) { - std::string params_buffer = ""; - FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer), - "Fail to read binary from parameter file"); - option.params_file = Decrypt(params_buffer, option.encryption_key_); - } - option.model_from_memory_ = true; +#ifdef ENABLE_ENCRYPTION + if (option.model_from_memory_) { + option.model_file = Decrypt(option.model_file, option.encryption_key_); + if (!(option.params_file.empty())) { + option.params_file = + Decrypt(option.params_file, option.encryption_key_); } - #else - FDERROR << "The FastDeploy didn't compile with encryption function." - << std::endl; - #endif + } else { + std::string model_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.model_file, &model_buffer), + "Fail to read binary from model file"); + option.model_file = Decrypt(model_buffer, option.encryption_key_); + if (!(option.params_file.empty())) { + std::string params_buffer = ""; + FDASSERT(ReadBinaryFromFile(option.params_file, ¶ms_buffer), + "Fail to read binary from parameter file"); + option.params_file = Decrypt(params_buffer, option.encryption_key_); + } + option.model_from_memory_ = true; + } +#else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; +#endif } // Choose default backend by model format and device if backend is not // specified diff --git a/fastdeploy/runtime/runtime.h b/fastdeploy/runtime/runtime.h index 772773007..fa8b8f198 100755 --- a/fastdeploy/runtime/runtime.h +++ b/fastdeploy/runtime/runtime.h @@ -24,7 +24,7 @@ #include "fastdeploy/runtime/runtime_option.h" #include "fastdeploy/utils/perf.h" #ifdef ENABLE_ENCRYPTION - #include "fastdeploy/encryption/include/decrypt.h" +#include "fastdeploy/encryption/include/decrypt.h" #endif /** \brief All C++ FastDeploy APIs are defined inside this namespace @@ -102,7 +102,7 @@ struct FASTDEPLOY_DECL Runtime { */ double GetProfileTime() { return backend_->benchmark_result_.time_of_runtime; - } + } private: void CreateOrtBackend(); diff --git a/fastdeploy/runtime/runtime_option.cc b/fastdeploy/runtime/runtime_option.cc index 8568b3b7f..d074a9603 100644 --- a/fastdeploy/runtime/runtime_option.cc +++ b/fastdeploy/runtime/runtime_option.cc @@ -37,12 +37,12 @@ void RuntimeOption::SetModelBuffer(const std::string& model_buffer, } void RuntimeOption::SetEncryptionKey(const std::string& encryption_key) { - #ifdef ENABLE_ENCRYPTION - encryption_key_ = encryption_key; - #else - FDERROR << "The FastDeploy didn't compile with encryption function." - << std::endl; - #endif +#ifdef ENABLE_ENCRYPTION + encryption_key_ = encryption_key; +#else + FDERROR << "The FastDeploy didn't compile with encryption function." + << std::endl; +#endif } void RuntimeOption::UseGpu(int gpu_id) { diff --git a/fastdeploy/runtime/runtime_option.h b/fastdeploy/runtime/runtime_option.h index b263f4ad3..a10ed9845 100644 --- a/fastdeploy/runtime/runtime_option.h +++ b/fastdeploy/runtime/runtime_option.h @@ -207,7 +207,7 @@ struct FASTDEPLOY_DECL RuntimeOption { // *** The belowing api are deprecated, will be removed in v1.2.0 // *** Do not use it anymore - void SetPaddleMKLDNN(bool pd_mkldnn = true); + void SetPaddleMKLDNN(bool pd_mkldnn = true); void EnablePaddleToTrt(); void DeletePaddleBackendPass(const std::string& delete_pass_name); void EnablePaddleLogInfo(); diff --git a/python/fastdeploy/runtime.py b/python/fastdeploy/runtime.py index 1d2fc1c1d..a9004a15a 100644 --- a/python/fastdeploy/runtime.py +++ b/python/fastdeploy/runtime.py @@ -187,8 +187,7 @@ class RuntimeOption: return self._option.set_model_buffer(model_buffer, params_buffer, model_format) - def set_encryption_key(self, - encryption_key): + def set_encryption_key(self, encryption_key): """When loading encrypted model, encryption_key is required to decrypte model :param encryption_key: (str)The key for decrypting model """ @@ -590,10 +589,12 @@ class RuntimeOption: replica_num=1, available_memory_proportion=1.0, enable_half_partial=False): - logging.warning("`RuntimeOption.set_ipu_config` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.set_ipu_config()` instead.") - self._option.paddle_infer_option.set_ipu_config(enable_fp16, replica_num, - available_memory_proportion, - enable_half_partial) + logging.warning( + "`RuntimeOption.set_ipu_config` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.set_ipu_config()` instead." + ) + self._option.paddle_infer_option.set_ipu_config( + enable_fp16, replica_num, available_memory_proportion, + enable_half_partial) @property def poros_option(self): @@ -664,7 +665,8 @@ class RuntimeOption: continue if hasattr(getattr(self._option, attr), "__call__"): continue - message += " {} : {}\t\n".format(attr, getattr(self._option, attr)) + message += " {} : {}\t\n".format(attr, + getattr(self._option, attr)) message.strip("\n") message += ")" return message