mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 09:07:10 +08:00
[XPU] Add docs for gm_default_size and fix type (#1962)
Add description of gm_default_size and change kunlunxin_gm_default_size‘s type to int64_t
This commit is contained in:
@@ -108,7 +108,7 @@ void FD_C_RuntimeOptionWrapperUseKunlunXin(
|
||||
std::string(precision),
|
||||
bool(adaptive_seqlen),
|
||||
bool(enable_multi_stream),
|
||||
gm_default_size);
|
||||
int64_t(gm_default_size));
|
||||
}
|
||||
|
||||
void FD_C_RuntimeOptionWrapperUseSophgo(
|
||||
|
@@ -126,6 +126,7 @@ FASTDEPLOY_CAPI_EXPORT extern void FD_C_RuntimeOptionWrapperUseAscend(
|
||||
/// \param[in] adaptive_seqlen Is the input of multi_encoder variable length
|
||||
/// \param[in] enable_multi_stream Whether to enable the multi stream of
|
||||
/// KunlunXin XPU.
|
||||
/// \param[in] gm_default_size The default size of context global memory of KunlunXin XPU.
|
||||
///
|
||||
FASTDEPLOY_CAPI_EXPORT extern void FD_C_RuntimeOptionWrapperUseKunlunXin(
|
||||
__fd_keep FD_C_RuntimeOptionWrapper* fd_c_runtime_option_wrapper,
|
||||
|
@@ -103,6 +103,7 @@ public class RuntimeOption {
|
||||
/// \param adaptive_seqlen Is the input of multi_encoder variable length
|
||||
/// \param enable_multi_stream Whether to enable the multi stream of
|
||||
/// KunlunXin XPU.
|
||||
/// \param gm_default_size The default size of context global memory of KunlunXin XPU.
|
||||
///
|
||||
public void
|
||||
UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
|
||||
|
@@ -75,12 +75,12 @@ struct LiteBackendOption {
|
||||
std::string kunlunxin_autotune_file = "";
|
||||
/// kunlunxin_precision
|
||||
std::string kunlunxin_precision = "int16";
|
||||
/// kunlunxin_gm_default_size
|
||||
int kunlunxin_gm_default_size = 0;
|
||||
/// kunlunxin_adaptive_seqlen
|
||||
bool kunlunxin_adaptive_seqlen = false;
|
||||
/// kunlunxin_enable_multi_stream
|
||||
bool kunlunxin_enable_multi_stream = false;
|
||||
/// kunlunxin_gm_default_size
|
||||
int64_t kunlunxin_gm_default_size = 0;
|
||||
|
||||
/// Optimized model dir for CxxConfig
|
||||
std::string optimized_model_dir = "";
|
||||
|
@@ -106,6 +106,7 @@ struct FASTDEPLOY_DECL RuntimeOption {
|
||||
/// \param adaptive_seqlen Is the input of multi_encoder variable length
|
||||
/// \param enable_multi_stream Whether to enable the multi stream of
|
||||
/// KunlunXin XPU.
|
||||
/// \param gm_default_size The default size of global memory of KunlunXin XPU.
|
||||
///
|
||||
void UseKunlunXin(int kunlunxin_id = 0, int l3_workspace_size = 0xfffc00,
|
||||
bool locked = false, bool autotune = true,
|
||||
|
@@ -242,6 +242,7 @@ class RuntimeOption:
|
||||
:param precision: (str)Calculation accuracy of multi_encoder
|
||||
:param adaptive_seqlen: (bool)adaptive_seqlen Is the input of multi_encoder variable length
|
||||
:param enable_multi_stream: (bool)Whether to enable the multi stream of KunlunXin XPU.
|
||||
:param gm_default_size The default size of context global memory of KunlunXin XPU.
|
||||
"""
|
||||
return self._option.use_kunlunxin(device_id, l3_workspace_size, locked,
|
||||
autotune, autotune_file, precision,
|
||||
|
Reference in New Issue
Block a user