[FDConfig]Remove max_num_batched_tokens/max_num_seqs in parallel config (#4116)

* remove max_num_batched_tokens in parallel config

* remove max_num_seqs

* update test case

* fix test

* fix

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
YuanRisheng
2025-09-17 10:43:35 +08:00
committed by GitHub
parent c01a756912
commit 2e9e53ff7e
30 changed files with 169 additions and 131 deletions

View File

@@ -86,7 +86,7 @@ class GCUFlashAttnBackend(AttentionBackend):
self.attention_metadata: GCUFlashAttnMetadata = None
self.block_size = fd_config.cache_config.block_size
self.max_seq_len = fd_config.parallel_config.max_model_len
self.max_num_seqs = fd_config.parallel_config.max_num_seqs
self.max_num_seqs = fd_config.scheduler_config.max_num_seqs
self.causal = getattr(fd_config.model_config, "causal", True)

View File

@@ -84,7 +84,7 @@ class GCUMemEfficientAttnBackend(AttentionBackend):
self.attention_metadata: GCUMemEfficientAttnMetadata = None
self.block_size = fd_config.cache_config.block_size
self.max_seq_len = fd_config.parallel_config.max_model_len
self.max_num_seqs = fd_config.parallel_config.max_num_seqs
self.max_num_seqs = fd_config.scheduler_config.max_num_seqs
self.causal = getattr(fd_config.model_config, "causal", True)