diff --git a/tests/models/runtime_config.py b/tests/models/runtime_config.py new file mode 100644 index 000000000..12383a7c2 --- /dev/null +++ b/tests/models/runtime_config.py @@ -0,0 +1,4 @@ +import fastdeploy as fd + + +test_option = fd.RuntimeOption() diff --git a/tests/models/test_pfld.py b/tests/models/test_pfld.py new file mode 100644 index 000000000..ef1ba448e --- /dev/null +++ b/tests/models/test_pfld.py @@ -0,0 +1,40 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import runtime_config as rc + +def test_facealignment_pfld(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/pfld-106-lite.onnx" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/facealign_input.png" + output_url = "https://bj.bcebos.com/paddlehub/fastdeploy/result_landmarks.npy" + fd.download(model_url, ".") + fd.download(input_url, ".") + fd.download(output_url, ".") + model_path = "pfld-106-lite.onnx" + # use ORT + model = fd.vision.facealign.PFLD(model_path, runtime_option=rc.test_option) + + # compare diff + im = cv2.imread("./facealign_input.png") + result = model.predict(im.copy()) + expect = np.load("./result_landmarks.npy") + + diff = np.fabs(np.array(result.landmarks) - expect) + thres = 1e-04 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_ppmatting.py b/tests/models/test_ppmatting.py new file mode 100644 index 000000000..78a085a5f --- /dev/null +++ b/tests/models/test_ppmatting.py @@ -0,0 +1,105 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_matting_ppmatting(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PP-Matting-512.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PP-Matting-512" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + with open("./ppmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_matting_ppmodnet(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPModnet_MobileNetV2.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PPModnet_MobileNetV2" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/ppmodnet_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + with open("./ppmodnet_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_matting_pphumanmatting(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/PPHumanMatting.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/matting_input.jpg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "./PPHumanMatting" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "deploy.yaml") + model = fd.vision.matting.PPMatting( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片抠图结果 + im = cv2.imread("./matting_input.jpg") + result = model.predict(im.copy()) + + pkl_url = "https://bj.bcebos.com/fastdeploy/tests/pphumanmatting_result.pkl" + if pkl_url: + fd.download(pkl_url, ".") + + with open("./pphumanmatting_result.pkl", "rb") as f: + baseline = pickle.load(f) + + diff = np.fabs(np.array(result.alpha) - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_pptinypose.py b/tests/models/test_pptinypose.py new file mode 100644 index 000000000..95cacdd5e --- /dev/null +++ b/tests/models/test_pptinypose.py @@ -0,0 +1,100 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import runtime_config as rc + +def test_keypointdetection_pptinypose(): + pp_tinypose_model_url = "https://bj.bcebos.com/fastdeploy/tests/PP_TinyPose_256x192_test.tgz" + fd.download_and_decompress(pp_tinypose_model_url, ".") + model_path = "./PP_TinyPose_256x192_test" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "infer_cfg.yml") + image_file = os.path.join(model_path, "hrnet_demo.jpg") + baseline_file = os.path.join(model_path, "baseline.npy") + model = fd.vision.keypointdetection.PPTinyPose( + model_file, params_file, config_file, runtime_option=rc.test_option) + + # 预测图片关键点 + im = cv2.imread(image_file) + result = model.predict(im) + result = np.concatenate( + (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), + axis=1) + baseline = np.load(baseline_file) + diff = np.fabs(result - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + print("No diff") + + +def test_keypointdetection_det_keypoint_unite(): + det_keypoint_unite_model_url = "https://bj.bcebos.com/fastdeploy/tests/PicoDet_320x320_TinyPose_256x192_test.tgz" + fd.download_and_decompress(det_keypoint_unite_model_url, ".") + model_path = "./PicoDet_320x320_TinyPose_256x192_test" + # 配置runtime,加载模型 + runtime_option = fd.RuntimeOption() + tinypose_model_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/model.pdmodel") + tinypose_params_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/model.pdiparams") + tinypose_config_file = os.path.join( + model_path, "PP_TinyPose_256x192_infer/infer_cfg.yml") + picodet_model_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdmodel") + picodet_params_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/model.pdiparams") + picodet_config_file = os.path.join( + model_path, "PP_PicoDet_V2_S_Pedestrian_320x320_infer/infer_cfg.yml") + image_file = os.path.join(model_path, "000000018491.jpg") + # image_file = os.path.join(model_path, "hrnet_demo.jpg") + + baseline_file = os.path.join(model_path, "baseline.npy") + + tinypose_model = fd.vision.keypointdetection.PPTinyPose( + tinypose_model_file, + tinypose_params_file, + tinypose_config_file, + runtime_option=runtime_option) + + det_model = fd.vision.detection.PicoDet( + picodet_model_file, + picodet_params_file, + picodet_config_file, + runtime_option=rc.test_option) + + # 预测图片关键点 + im = cv2.imread(image_file) + pipeline = fd.pipeline.PPTinyPose(det_model, tinypose_model) + pipeline.detection_model_score_threshold = 0.5 + result = pipeline.predict(im) + print(result) + result = np.concatenate( + (np.array(result.keypoints), np.array(result.scores)[:, np.newaxis]), + axis=1) + print(result) + np.save("baseline.npy", result) + baseline = np.load(baseline_file) + diff = np.fabs(result - np.array(baseline)) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + print("No diff") diff --git a/tests/models/test_pptracking.py b/tests/models/test_pptracking.py new file mode 100644 index 000000000..42010705c --- /dev/null +++ b/tests/models/test_pptracking.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import numpy as np +import pickle +import runtime_config as rc + + +def test_pptracking(): + model_url = "https://bj.bcebos.com/fastdeploy/tests/pptracking.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/person.mp4" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "pptracking/fairmot_hrnetv2_w18_dlafpn_30e_576x320" + # use default backend + runtime_option = fd.RuntimeOption() + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + config_file = os.path.join(model_path, "infer_cfg.yml") + model = fd.vision.tracking.PPTracking(model_file, params_file, config_file, runtime_option=rc.test_option) + cap = cv2.VideoCapture("./person.mp4") + frame_id = 0 + while True: + _, frame = cap.read() + if frame is None: + break + result = model.predict(frame) + # compare diff + expect = pickle.load(open("pptracking/frame" + str(frame_id) + ".pkl", "rb")) + diff_boxes = np.fabs(np.array(expect["boxes"]) - np.array(result.boxes)) + diff_scores = np.fabs(np.array(expect["scores"]) - np.array(result.scores)) + diff = max(diff_boxes.max(), diff_scores.max()) + thres = 1e-05 + assert diff < thres, "The label diff is %f, which is bigger than %f" % (diff, thres) + frame_id = frame_id + 1 + cv2.waitKey(30) + if frame_id >= 10: + cap.release() + cv2.destroyAllWindows() + break diff --git a/tests/models/test_quantize_diff.py b/tests/models/test_quantize_diff.py new file mode 100755 index 000000000..8bc7b396a --- /dev/null +++ b/tests/models/test_quantize_diff.py @@ -0,0 +1,96 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np + +model_url = "https://bj.bcebos.com/fastdeploy/tests/yolov6_quant.tgz" +fd.download_and_decompress(model_url, ".") + + +def test_quant_mkldnn(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "mkldnn_output.npy") + + option = fd.RuntimeOption() + option.use_paddle_backend() + option.use_cpu() + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_quant_ort(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "ort_output.npy") + + option = fd.RuntimeOption() + option.use_ort_backend() + option.use_cpu() + + option.set_ort_graph_opt_level(1) + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) + + +def test_quant_trt(): + model_path = "./yolov6_quant" + model_file = os.path.join(model_path, "model.pdmodel") + params_file = os.path.join(model_path, "model.pdiparams") + + input_file = os.path.join(model_path, "input.npy") + output_file = os.path.join(model_path, "trt_output.npy") + + option = fd.RuntimeOption() + option.use_trt_backend() + option.use_gpu() + + option.set_model_path(model_file, params_file) + runtime = fd.Runtime(option) + input_name = runtime.get_input_info(0).name + data = np.load(input_file) + outs = runtime.infer({input_name: data}) + expected = np.load(output_file) + diff = np.fabs(outs[0] - expected) + thres = 1e-05 + assert diff.max() < thres, "The diff is %f, which is bigger than %f" % ( + diff.max(), thres) diff --git a/tests/models/test_rvm.py b/tests/models/test_rvm.py new file mode 100644 index 000000000..23fd544c6 --- /dev/null +++ b/tests/models/test_rvm.py @@ -0,0 +1,54 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_matting_rvm_cpu(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/rvm.tgz" + input_url = "https://bj.bcebos.com/paddlehub/fastdeploy/video.mp4" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "rvm/rvm_mobilenetv3_fp32.onnx" + # use ORT + runtime_option.use_ort_backend() + model = fd.vision.matting.RobustVideoMatting( + model_path, runtime_option=rc.test_option) + + cap = cv2.VideoCapture(input_url) + + frame_id = 0 + while True: + _, frame = cap.read() + if frame is None: + break + result = model.predict(frame) + # compare diff + expect_alpha = np.load("rvm/result_alpha_" + str(frame_id) + ".npy") + result_alpha = np.array(result.alpha).reshape(1920, 1080) + diff = np.fabs(expect_alpha - result_alpha) + thres = 1e-05 + assert diff.max( + ) < thres, "The label diff is %f, which is bigger than %f" % ( + diff.max(), thres) + frame_id = frame_id + 1 + cv2.waitKey(30) + if frame_id >= 10: + cap.release() + cv2.destroyAllWindows() + break diff --git a/tests/models/test_yolov5cls.py b/tests/models/test_yolov5cls.py new file mode 100755 index 000000000..aeafad519 --- /dev/null +++ b/tests/models/test_yolov5cls.py @@ -0,0 +1,49 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fastdeploy as fd +import cv2 +import os +import pickle +import numpy as np +import runtime_config as rc + +def test_classification_yolov5cls(): + model_url = "https://bj.bcebos.com/paddlehub/fastdeploy/yolov5n-cls.tgz" + input_url = "https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg" + fd.download_and_decompress(model_url, ".") + fd.download(input_url, ".") + model_path = "yolov5n-cls/yolov5n-cls.onnx" + # use ORT + runtime_option = fd.RuntimeOption() + runtime_option.use_ort_backend() + model = fd.vision.classification.YOLOv5Cls( + model_path, runtime_option=rc.test_option) + + # compare diff + im = cv2.imread("./ILSVRC2012_val_00000010.jpeg") + result = model.predict(im.copy(), topk=5) + with open("yolov5n-cls/result.pkl", "rb") as f: + expect = pickle.load(f) + + diff_label = np.fabs( + np.array(result.label_ids) - np.array(expect["labels"])) + diff_score = np.fabs(np.array(result.scores) - np.array(expect["scores"])) + thres = 1e-05 + assert diff_label.max( + ) < thres, "The label diff is %f, which is bigger than %f" % ( + diff_label.max(), thres) + assert diff_score.max( + ) < thres, "The score diff is %f, which is bigger than %f" % ( + diff_score.max(), thres)