mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[Feature] block sparse attention (#3209)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
* 支持稀疏attn * fix bug * code style * fix moba attn get kv shape * 修复a100编译 * codestyle * code style * code style * code style * fix conflict * 增加单侧 * code style * 增加eblite 加载时间 * fix bug * for ci * for ci * for ci * for ci * 支持mlp block size 128 * 增加小算子单测 * fix 单测 mlp * 将环境变量加入到config里面 * fix rollout config
This commit is contained in:
@@ -26,6 +26,7 @@ from fastdeploy.config import (
|
||||
FDConfig,
|
||||
GraphOptimizationConfig,
|
||||
LoadConfig,
|
||||
MobaAttentionConfig,
|
||||
ModelConfig,
|
||||
ParallelConfig,
|
||||
SpeculativeConfig,
|
||||
@@ -336,6 +337,10 @@ class EngineArgs:
|
||||
"""
|
||||
Configuration for graph optimization backend execution.
|
||||
"""
|
||||
moba_attention_config: Optional[Dict[str, Any]] = None
|
||||
"""
|
||||
Configuration for moba attention.
|
||||
"""
|
||||
|
||||
enable_logprob: bool = False
|
||||
"""
|
||||
@@ -529,6 +534,12 @@ class EngineArgs:
|
||||
default=EngineArgs.graph_optimization_config,
|
||||
help="",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--moba-attention-config",
|
||||
type=json.loads,
|
||||
default=EngineArgs.moba_attention_config,
|
||||
help="",
|
||||
)
|
||||
model_group.add_argument(
|
||||
"--guided-decoding-backend",
|
||||
type=str,
|
||||
@@ -918,6 +929,18 @@ class EngineArgs:
|
||||
graph_optimization_args[k] = v
|
||||
return GraphOptimizationConfig(graph_optimization_args)
|
||||
|
||||
def create_moba_attention_config(self) -> MobaAttentionConfig:
|
||||
"""
|
||||
Create and retuan a MobaAttentionConfig object based on the current settings.
|
||||
"""
|
||||
attention_args = asdict(self)
|
||||
if self.moba_attention_config is not None:
|
||||
for k, v in self.moba_attention_config.items():
|
||||
attention_args[k] = v
|
||||
return MobaAttentionConfig(attention_args)
|
||||
else:
|
||||
return MobaAttentionConfig(None)
|
||||
|
||||
def create_early_stop_config(self) -> EarlyStopConfig:
|
||||
"""
|
||||
Create and retuan an EarlyStopConfig object based on the current settings.
|
||||
@@ -955,6 +978,7 @@ class EngineArgs:
|
||||
speculative_cfg = self.create_speculative_config()
|
||||
graph_opt_cfg = self.create_graph_optimization_config()
|
||||
graph_opt_cfg.update_use_cudagraph(self.use_cudagraph)
|
||||
moba_attention_config = self.create_moba_attention_config()
|
||||
|
||||
early_stop_cfg = self.create_early_stop_config()
|
||||
early_stop_cfg.update_enable_early_stop(self.enable_early_stop)
|
||||
@@ -992,6 +1016,7 @@ class EngineArgs:
|
||||
max_long_partial_prefills=self.max_long_partial_prefills,
|
||||
long_prefill_token_threshold=self.long_prefill_token_threshold,
|
||||
graph_opt_config=graph_opt_cfg,
|
||||
moba_attention_config=moba_attention_config,
|
||||
guided_decoding_backend=self.guided_decoding_backend,
|
||||
disable_any_whitespace=self.guided_decoding_disable_any_whitespace,
|
||||
early_stop_config=early_stop_cfg,
|
||||
|
||||
Reference in New Issue
Block a user