Revert "[Bug fix] fix attention rank init (#2743)" (#2761)

This reverts commit e8bbe7244b.
This commit is contained in:
RichardWooSJTU
2025-07-09 10:38:12 +08:00
committed by GitHub
parent f72c4de539
commit 6610aa29d0
4 changed files with 13 additions and 17 deletions

View File

@@ -91,7 +91,7 @@ class AppendAttentionBackend(AttentionBackend):
self.use_speculate: bool = self.speculative_method is not None
self.speculate_max_draft_token_num: int = fd_config.speculative_config.num_speculative_tokens
self.keep_pd_step_flag: bool = fd_config.speculative_config.model_type == "mtp"
self.rank: int = fd_config.parallel_config.expert_parallel_rank * fd_config.parallel_config.tensor_parallel_degree + fd_config.parallel_config.tensor_parallel_rank
self.rank: int = fd_config.parallel_config.tensor_parallel_rank
self.kv_num_heads: int = kv_num_heads
self.num_heads: int = num_heads
@@ -108,12 +108,12 @@ class AppendAttentionBackend(AttentionBackend):
if fd_config.parallel_config.expert_parallel_rank is None:
fd_config.parallel_config.expert_parallel_rank = 0
device_id = self.rank + fd_config.parallel_config.tensor_parallel_degree * \
fd_config.parallel_config.expert_parallel_rank
if self.device_id is None:
self.device_id = self.rank
self.device_id = device_id
else:
device_ids = self.device_id.split(",")
rank_index = self.rank % len(device_ids)
self.device_id = self.device_id[rank_index]
self.device_id = self.device_id.split(",")[device_id]
def init_attention_metadata(self, forward_meta: ForwardMeta):
"""Initialize attntion metadata hence all layers in the forward pass can reuse it."""