[FDConfig]Remove max_num_batched_tokens/max_num_seqs in parallel config (#4116)

* remove max_num_batched_tokens in parallel config

* remove max_num_seqs

* update test case

* fix test

* fix

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
YuanRisheng
2025-09-17 10:43:35 +08:00
committed by GitHub
parent c01a756912
commit 2e9e53ff7e
30 changed files with 169 additions and 131 deletions

View File

@@ -7,6 +7,7 @@ from fastdeploy.config import (
FDConfig,
GraphOptimizationConfig,
ParallelConfig,
SchedulerConfig,
)
from fastdeploy.model_executor.forward_meta import ForwardMeta
from fastdeploy.model_executor.graph_optimization.decorator import (
@@ -90,11 +91,15 @@ class TestCUDAGrpahRecapture(unittest.TestCase):
# Set FastDeploy config
graph_opt_config = GraphOptimizationConfig(args={})
graph_opt_config.use_cudagraph = True
parallel_config = ParallelConfig(args={})
scheduler_config = SchedulerConfig(args={})
cache_config = CacheConfig(args={})
parallel_config.max_num_seqs = 1
scheduler_config.max_num_seqs = 1
parallel_config = ParallelConfig(args={})
fd_config = FDConfig(
graph_opt_config=graph_opt_config, parallel_config=parallel_config, cache_config=cache_config
graph_opt_config=graph_opt_config,
scheduler_config=scheduler_config,
cache_config=cache_config,
parallel_config=parallel_config,
)
# Run Test Case1