[XPU] Support XPU via Paddle Inference backend (#1987)

* [backend] Support XPU via Paddle Inference backend

* [backend] Support XPU via Paddle Inference backend

* [backend] Support XPU via Paddle Inference backend

* [XPU] support XPU benchmark via paddle inference

* [XPU] support XPU benchmark via paddle inference

* [benchmark] add xpu paddle h2d config files
This commit is contained in:
DefTruth
2023-05-25 14:13:40 +08:00
committed by GitHub
parent 24f32d10a7
commit 49c033a828
16 changed files with 262 additions and 57 deletions

View File

@@ -0,0 +1,14 @@
device: xpu
device_id: 0
cpu_thread_nums: 1
warmup: 200
repeat: 1000
backend: paddle
profile_mode: runtime
include_h2d_d2h: true
use_fp16: false
collect_memory_info: false
sampling_interval: 1
precision_compare: false
xpu_l3_cache: 62914560
result_path: benchmark_xpu_paddle_fp32_l3.txt

View File

@@ -0,0 +1,14 @@
device: xpu
device_id: 0
cpu_thread_nums: 1
warmup: 200
repeat: 1000
backend: paddle
profile_mode: runtime
include_h2d_d2h: true
use_fp16: false
collect_memory_info: false
sampling_interval: 1
precision_compare: false
xpu_l3_cache: 0
result_path: benchmark_xpu_paddle_fp32.txt

View File

@@ -0,0 +1,14 @@
device: xpu
device_id: 0
cpu_thread_nums: 1
warmup: 200
repeat: 1000
backend: paddle
profile_mode: runtime
include_h2d_d2h: false
use_fp16: false
collect_memory_info: false
sampling_interval: 1
precision_compare: false
xpu_l3_cache: 62914560
result_path: benchmark_xpu_paddle_fp32_l3.txt

View File

@@ -0,0 +1,14 @@
device: xpu
device_id: 0
cpu_thread_nums: 1
warmup: 200
repeat: 1000
backend: paddle
profile_mode: runtime
include_h2d_d2h: false
use_fp16: false
collect_memory_info: false
sampling_interval: 1
precision_compare: false
xpu_l3_cache: 0
result_path: benchmark_xpu_paddle_fp32.txt