fix attention bug in spec decoding (#5460)

This commit is contained in:
freeliuzc
2025-12-10 10:56:37 +08:00
committed by GitHub
parent 419b416376
commit 53460935ec
2 changed files with 3 additions and 3 deletions

View File

@@ -485,9 +485,6 @@ class LLMEngine:
if self.cfg.scheduler_config.splitwise_role == "prefill":
variables["FLAGS_fmt_write_cache_completed_signal"] = 1
if self.cfg.model_config.enable_mm:
variables["FLAGS_max_partition_size"] = 1024
command_prefix = ""
for k, v in variables.items():
command_prefix += f"{k}={v} "