[Turorials] Add tutorials for intel gpu (#860)

* Add tutorials for intel gpu

* fix gflags dependency

* Update README_CN.md

* Update README.md

* Update README.md
This commit is contained in:
Jason
2022-12-13 10:21:56 +08:00
committed by GitHub
parent 5fc6cf30df
commit 534d5b8c8b
20 changed files with 650 additions and 39 deletions

View File

@@ -0,0 +1,38 @@
English | [中文](README_CN.md)
# PaddleClas Python Example
Before deployment, confirm the following two steps
- 1. The software and hardware environment meet the requirements. Refer to [FastDeploy Environment Requirements](../../../docs/en/build_and_install/download_prebuilt_libraries.md)
- 2. Install FastDeploy Python wheel package. Refer to [Install FastDeploy](../../../docs/en/build_and_install/download_prebuilt_libraries.md)
```bash
# Get FastDeploy codes
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/tutorials/intel_gpu/python
# Download PaddleClas model and test image
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
tar -xvf ResNet50_vd_infer.tgz
# Inference with CPU
python infer_resnet50.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu --topk 1
# Inference with Intel GPU
python infer_resnet50.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device intel_gpu --topk 1
# Download PaddleDetection/PP-YOLOE model and test image
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
tar xvf ppyoloe_crn_l_300e_coco.tgz
# Inference with CPU
python infer_ppyoloe.py --model ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device cpu
# Inference with Intel GPU
python infer_ppyoloe.py --model ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device intel_gpu
```

View File

@@ -0,0 +1,37 @@
English | [中文](README_CN.md)
# PaddleClas Python Example
在部署前,需确认以下两个步骤
- 1. 软硬件环境满足要求,参考[FastDeploy环境要求](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
- 2. FastDeploy Python whl包安装参考[FastDeploy Python安装](../../../../../docs/cn/build_and_install/download_prebuilt_libraries.md)
```bash
# Get FastDeploy codes
git clone https://github.com/PaddlePaddle/FastDeploy.git
cd FastDeploy/tutorials/intel_gpu/python
# Download model and test image
wget https://bj.bcebos.com/paddlehub/fastdeploy/ResNet50_vd_infer.tgz
tar -xvf ResNet50_vd_infer.tgz
wget https://gitee.com/paddlepaddle/PaddleClas/raw/release/2.4/deploy/images/ImageNet/ILSVRC2012_val_00000010.jpeg
# Inference with CPU
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device cpu --topk 1
# Inference with Intel GPU
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device intel_gpu --topk 1
# Download PaddleDetection/PP-YOLOE model and test image
wget https://bj.bcebos.com/paddlehub/fastdeploy/ppyoloe_crn_l_300e_coco.tgz
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
tar xvf ppyoloe_crn_l_300e_coco.tgz
# Inference with CPU
python infer_ppyoloe.py --model ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device cpu
# Inference with Intel GPU
python infer_ppyoloe.py --model ppyoloe_crn_l_300e_coco --image 000000014439.jpg --device intel_gpu
```

View File

@@ -0,0 +1,65 @@
import fastdeploy as fd
import cv2
import os
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PP-YOLOE model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'intel_gpu'.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
option.use_openvino_backend()
assert args.device.lower(
) in ["cpu", "intel_gpu"], "--device only support ['cpu', 'intel_gpu']"
if args.device.lower() == "intel_gpu":
option.set_openvino_device("HETERO:GPU,CPU")
option.set_openvino_shape_info({
"image": [1, 3, 640, 640],
"scale_factor": [1, 2]
})
option.set_openvino_cpu_operators(["MulticlassNms"])
return option
args = parse_arguments()
runtime_option = build_option(args)
model_file = os.path.join(args.model, "model.pdmodel")
params_file = os.path.join(args.model, "model.pdiparams")
config_file = os.path.join(args.model, "infer_cfg.yml")
model = fd.vision.detection.PPYOLOE(
model_file, params_file, config_file, runtime_option=runtime_option)
im = cv2.imread(args.image)
print("Warmup 20 times...")
for i in range(20):
result = model.predict(im)
print("Counting time...")
start = time.time()
for i in range(50):
result = model.predict(im)
end = time.time()
print("Elapsed time: {}ms".format((end - start) * 1000))
vis_im = fd.vision.vis_detection(im, result, score_threshold=0.5)
cv2.imwrite("visualized_result.jpg", vis_im)
print("Visualized result save in ./visualized_result.jpg")

View File

@@ -0,0 +1,61 @@
import fastdeploy as fd
import cv2
import os
import time
def parse_arguments():
import argparse
import ast
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", required=True, help="Path of PaddleClas model.")
parser.add_argument(
"--image", type=str, required=True, help="Path of test image file.")
parser.add_argument(
"--topk", type=int, default=1, help="Return topk results.")
parser.add_argument(
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'intel_gpu'.")
return parser.parse_args()
def build_option(args):
option = fd.RuntimeOption()
option.use_openvino_backend()
assert args.device.lower(
) in ["cpu", "intel_gpu"], "--device only support ['cpu', 'intel_gpu']"
if args.device.lower() == "intel_gpu":
option.set_openvino_device("GPU")
option.set_openvino_shape_info({"inputs": [1, 3, 224, 224]})
return option
args = parse_arguments()
runtime_option = build_option(args)
model_file = os.path.join(args.model, "inference.pdmodel")
params_file = os.path.join(args.model, "inference.pdiparams")
config_file = os.path.join(args.model, "inference_cls.yaml")
model = fd.vision.classification.PaddleClasModel(
model_file, params_file, config_file, runtime_option=runtime_option)
im = cv2.imread(args.image)
print("Warmup 20 times...")
for i in range(20):
result = model.predict(im, args.topk)
print("Counting time...")
start = time.time()
for i in range(50):
result = model.predict(im, args.topk)
end = time.time()
print("Elapsed time: {}ms".format((end - start) * 1000))
print(result)