[Backend] support ipu in paddle inference backend. (#437)

* feat(ipu): add ipu support for paddle_infer backend.

* fix(): remove unused env.

* fix(ipu): simplify user API for IPU.

* fix(cmake): fix merge conflict error in CMakeList.

Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
czr-gc
2022-10-30 18:59:59 +08:00
committed by GitHub
parent ee2c6136fc
commit ede59af857
23 changed files with 457 additions and 115 deletions

View File

@@ -23,6 +23,8 @@ python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg -
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --topk 1
# GPU上使用TensorRT推理 注意TensorRT推理第一次运行有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True --topk 1
# IPU推理注意IPU推理首次运行会有序列化模型的操作有一定耗时需要耐心等待
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device ipu --topk 1
```
运行完成后返回结果如下所示

View File

@@ -17,7 +17,7 @@ def parse_arguments():
"--device",
type=str,
default='cpu',
help="Type of inference device, support 'cpu' or 'gpu'.")
help="Type of inference device, support 'cpu' or 'gpu' or 'ipu'.")
parser.add_argument(
"--use_trt",
type=ast.literal_eval,
@@ -32,6 +32,9 @@ def build_option(args):
if args.device.lower() == "gpu":
option.use_gpu()
if args.device.lower() == "ipu":
option.use_ipu()
if args.use_trt:
option.use_trt_backend()
return option