mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-26 10:00:33 +08:00
[Backend] support ipu in paddle inference backend. (#437)
* feat(ipu): add ipu support for paddle_infer backend. * fix(): remove unused env. * fix(ipu): simplify user API for IPU. * fix(cmake): fix merge conflict error in CMakeList. Co-authored-by: Jason <jiangjiajun@baidu.com>
This commit is contained in:
@@ -23,6 +23,8 @@ python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg -
|
||||
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --topk 1
|
||||
# GPU上使用TensorRT推理 (注意:TensorRT推理第一次运行,有序列化模型的操作,有一定耗时,需要耐心等待)
|
||||
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device gpu --use_trt True --topk 1
|
||||
# IPU推理(注意:IPU推理首次运行会有序列化模型的操作,有一定耗时,需要耐心等待)
|
||||
python infer.py --model ResNet50_vd_infer --image ILSVRC2012_val_00000010.jpeg --device ipu --topk 1
|
||||
```
|
||||
|
||||
运行完成后返回结果如下所示
|
||||
|
||||
@@ -17,7 +17,7 @@ def parse_arguments():
|
||||
"--device",
|
||||
type=str,
|
||||
default='cpu',
|
||||
help="Type of inference device, support 'cpu' or 'gpu'.")
|
||||
help="Type of inference device, support 'cpu' or 'gpu' or 'ipu'.")
|
||||
parser.add_argument(
|
||||
"--use_trt",
|
||||
type=ast.literal_eval,
|
||||
@@ -32,6 +32,9 @@ def build_option(args):
|
||||
if args.device.lower() == "gpu":
|
||||
option.use_gpu()
|
||||
|
||||
if args.device.lower() == "ipu":
|
||||
option.use_ipu()
|
||||
|
||||
if args.use_trt:
|
||||
option.use_trt_backend()
|
||||
return option
|
||||
|
||||
Reference in New Issue
Block a user