mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
check (#3720)
This commit is contained in:
@@ -377,7 +377,7 @@ class LLMEngine:
|
||||
"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": "python",
|
||||
"FLAGS_use_append_attn": 1,
|
||||
"NCCL_ALGO": "Ring",
|
||||
"FLAGS_max_partition_size": int(os.getenv("FLAGS_max_partition_size", 32768)),
|
||||
"FLAGS_max_partition_size": int(os.getenv("FLAGS_max_partition_size", 1024)),
|
||||
"FLAGS_hardamard_moe_block_size": int(os.getenv("FLAGS_hardamard_moe_block_size", 128)),
|
||||
"FLAGS_hardamard_use_diagonal_block_matrix": int(
|
||||
os.getenv("FLAGS_hardamard_use_diagonal_block_matrix", 0)
|
||||
|
@@ -113,7 +113,7 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
self.group_size: int = self.num_heads // self.kv_num_heads
|
||||
self.head_dim: int = fd_config.model_config.head_dim
|
||||
self.num_layers: int = fd_config.model_config.num_hidden_layers
|
||||
self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 32768))
|
||||
self.max_partition_size: int = int(os.getenv("FLAGS_max_partition_size", 1024))
|
||||
self.encoder_block_shape_q: int = encoder_block_shape_q
|
||||
self.decoder_block_shape_q: int = decoder_block_shape_q
|
||||
|
||||
|
Reference in New Issue
Block a user