diff --git a/fastdeploy/engine/engine.py b/fastdeploy/engine/engine.py index 1d6103176..c508f4ee5 100644 --- a/fastdeploy/engine/engine.py +++ b/fastdeploy/engine/engine.py @@ -377,7 +377,7 @@ class LLMEngine: "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": "python", "FLAGS_use_append_attn": 1, "NCCL_ALGO": "Ring", - "FLAGS_max_partition_size": int(os.getenv("FLAGS_max_partition_size", 32768)), + "FLAGS_max_partition_size": int(os.getenv("FLAGS_max_partition_size", 1024)), "FLAGS_hardamard_moe_block_size": int(os.getenv("FLAGS_hardamard_moe_block_size", 128)), "FLAGS_hardamard_use_diagonal_block_matrix": int( os.getenv("FLAGS_hardamard_use_diagonal_block_matrix", 0) diff --git a/fastdeploy/model_executor/layers/attention/append_attn_backend.py b/fastdeploy/model_executor/layers/attention/append_attn_backend.py index 551e19e59..029764c63 100644 --- a/fastdeploy/model_executor/layers/attention/append_attn_backend.py +++ b/fastdeploy/model_executor/layers/attention/append_attn_backend.py @@ -113,7 +113,7 @@ class AppendAttentionBackend(AttentionBackend): self.group_size: int = self.num_heads // self.kv_num_heads self.head_dim: int = fd_config.model_config.head_dim self.num_layers: int = fd_config.model_config.num_hidden_layers - self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 32768)) + self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 1024)) self.encoder_block_shape_q: int = encoder_block_shape_q self.decoder_block_shape_q: int = decoder_block_shape_q