diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 01f28819f..d550a3701 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -483,9 +483,6 @@ class LLMEngine: if self.cfg.scheduler_config.splitwise_role == "prefill": variables["FLAGS_fmt_write_cache_completed_signal"] = 1 - if self.cfg.model_config.enable_mm: - variables["FLAGS_max_partition_size"] = 1024 - command_prefix = "" for k, v in variables.items(): command_prefix += f"{k}={v} " diff --git a/fastdeploy/model_executor/layers/attention/append_attn_backend.py b/fastdeploy/model_executor/layers/attention/append_attn_backend.py index 346251a30..14562c3f7 100644 --- a/fastdeploy/model_executor/layers/attention/append_attn_backend.py +++ b/fastdeploy/model_executor/layers/attention/append_attn_backend.py @@ -148,6 +148,9 @@ class AppendAttentionBackend(AttentionBackend): self.head_dim: int = fd_config.model_config.head_dim self.num_layers: int = fd_config.model_config.num_hidden_layers self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 1024)) + # split kv still has bug in speculative decoding + if self.speculative_method is not None: + self.max_partition_size = self.max_seq_len self.encoder_block_shape_q: int = encoder_block_shape_q self.decoder_block_shape_q: int = decoder_block_shape_q