mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
[XPU] Add gm_default_size -> Backend::LITE (#1934)
* add gm_default_size * add gm_default_size --------- Co-authored-by: DefTruth <31974251+DefTruth@users.noreply.github.com>
This commit is contained in:
@@ -226,7 +226,8 @@ class RuntimeOption:
|
||||
autotune_file="",
|
||||
precision="int16",
|
||||
adaptive_seqlen=False,
|
||||
enable_multi_stream=False):
|
||||
enable_multi_stream=False,
|
||||
gm_default_size=0):
|
||||
"""Inference with KunlunXin XPU
|
||||
|
||||
:param device_id: (int)The index of KunlunXin XPU will be used for inference, default 0
|
||||
@@ -244,7 +245,8 @@ class RuntimeOption:
|
||||
"""
|
||||
return self._option.use_kunlunxin(device_id, l3_workspace_size, locked,
|
||||
autotune, autotune_file, precision,
|
||||
adaptive_seqlen, enable_multi_stream)
|
||||
adaptive_seqlen, enable_multi_stream,
|
||||
gm_default_size)
|
||||
|
||||
def use_cpu(self):
|
||||
"""Inference with CPU
|
||||
|
Reference in New Issue
Block a user