fix attention bug in spec decoding (#5480)

This commit is contained in:
freeliuzc
2025-12-10 12:56:13 +08:00
committed by GitHub
parent c5973c2087
commit 6715196924
2 changed files with 3 additions and 3 deletions

View File

@@ -483,9 +483,6 @@ class LLMEngine:
if self.cfg.scheduler_config.splitwise_role == "prefill":
variables["FLAGS_fmt_write_cache_completed_signal"] = 1
if self.cfg.model_config.enable_mm:
variables["FLAGS_max_partition_size"] = 1024
command_prefix = ""
for k, v in variables.items():
command_prefix += f"{k}={v} "

View File

@@ -148,6 +148,9 @@ class AppendAttentionBackend(AttentionBackend):
self.head_dim: int = fd_config.model_config.head_dim
self.num_layers: int = fd_config.model_config.num_hidden_layers
self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 1024))
# split kv still has bug in speculative decoding
if self.speculative_method is not None:
self.max_partition_size = self.max_seq_len
self.encoder_block_shape_q: int = encoder_block_shape_q
self.decoder_block_shape_q: int = decoder_block_shape_q