[Feature] block sparse attention (#3209)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled

* 支持稀疏attn

* fix bug

* code style

* fix moba attn get kv shape

* 修复a100编译

* codestyle

* code style

* code style

* code style

* fix conflict

* 增加单侧

* code style

* 增加eblite 加载时间

* fix bug

* for ci

* for ci

* for ci

* for ci

* 支持mlp block size 128

* 增加小算子单测

* fix 单测 mlp

* 将环境变量加入到config里面

* fix rollout config
This commit is contained in:
yangjianfengo1
2025-08-26 22:16:04 +08:00
committed by GitHub
parent f0a362af18
commit 646a0c2fd8
31 changed files with 6507 additions and 10 deletions

View File

@@ -681,6 +681,67 @@ class GraphOptimizationConfig:
argument = self.use_cudagraph
class MobaAttentionConfig:
def __init__(
self,
args,
):
self.moba_encoder_top_k_left: int = None
self.moba_encoder_top_k_right: int = None
"The sparse topk of encoder attention is located at [moba_encoder_top_k_left, moba_encoder top_k_right]"
self.moba_decoder_top_k_left: int = None
self.moba_decoder_top_k_right: int = None
"The sparse topk of decoder attention is located at [moba_decoder_top_k_left, moba_decoder top_k_right]"
self.moba_use_encoder_seq_limit: int = None
"When the number of encdoer token is less than moba_use_encoder_seq_limit, it is not sparse"
self.moba_use_decoder_seq_limit: int = None
"When the number of decdoer token is less than moba_use_decoder_seq_limit, it is not sparse"
self.moba_block_size: int = 128
self.mlp_weight_name: str = "moba_mlp_weight.safetensors"
self.moba_max_seq_length: int = 128 * 1024
if args is not None:
for key, value in args.items():
if hasattr(self, key):
setattr(self, key, value)
if self.moba_use_encoder_seq_limit is None and self.moba_encoder_top_k_left is not None:
self.moba_use_encoder_seq_limit = self.moba_encoder_top_k_left * self.moba_block_size
if self.moba_use_decoder_seq_limit is None and self.moba_decoder_top_k_left is not None:
self.moba_use_decoder_seq_limit = self.moba_decoder_top_k_left * self.moba_block_size
self.check_legality_parameters()
def check_legality_parameters(
self,
) -> None:
if self.moba_encoder_top_k_left is not None:
assert self.moba_encoder_top_k_left > 0, "moba_encoder_top_k_left must large than 0"
if self.moba_encoder_top_k_right is not None:
assert self.moba_encoder_top_k_right > 0, "moba_encoder_top_k_right must large than 0"
assert (
self.moba_encoder_top_k_right >= self.moba_encoder_top_k_left
), "moba_encoder_top_k_right must large than moba_encoder_top_k_left"
if self.moba_decoder_top_k_left is not None:
assert self.moba_decoder_top_k_left > 0, "moba_decoder_top_k_left must large than 0"
if self.moba_decoder_top_k_right is not None:
assert self.moba_decoder_top_k_right > 0, "moba_decoder_top_k_right must large than 0"
assert (
self.moba_decoder_top_k_right >= self.moba_decoder_top_k_left
), "moba_decoder_top_k_right must large than moba_decoder_top_k_left"
if self.moba_use_encoder_seq_limit is not None and self.moba_encoder_top_k_left is not None:
assert self.moba_use_encoder_seq_limit >= self.moba_encoder_top_k_left * self.moba_block_size
if self.moba_use_decoder_seq_limit is not None and self.moba_decoder_top_k_left is not None:
assert self.moba_use_decoder_seq_limit >= self.moba_decoder_top_k_left * self.moba_block_size
def to_json_string(self):
"""
Convert moba_attention_config to json string.
"""
return json.dumps({key: value for key, value in self.__dict__.items() if value is not None})
class EarlyStopConfig:
def __init__(
self,
@@ -1035,6 +1096,7 @@ class FDConfig:
decoding_config: DecodingConfig = None,
quant_config: QuantConfigBase = None,
graph_opt_config: GraphOptimizationConfig = None,
moba_attention_config: MobaAttentionConfig = None,
speculative_config: SpeculativeConfig = None,
tokenizer: str = None,
max_model_len: int = 8192,
@@ -1069,7 +1131,7 @@ class FDConfig:
self.early_stop_config: Optional[EarlyStopConfig] = early_stop_config
self.decoding_config: DecodingConfig = decoding_config # type: ignore
self.cache_config: CacheConfig = cache_config # type: ignore
self.moba_attention_config: Optional[MobaAttentionConfig] = moba_attention_config
# Initialize cuda graph capture list
if self.graph_opt_config.cudagraph_capture_sizes is None:
self.graph_opt_config._set_cudagraph_sizes(max_num_seqs=self.parallel_config.max_num_seqs)