mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[FDConfig]Remove max_num_batched_tokens/max_num_seqs in parallel config (#4116)
* remove max_num_batched_tokens in parallel config * remove max_num_seqs * update test case * fix test * fix --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -29,6 +29,7 @@ from fastdeploy.config import (
|
||||
FDConfig,
|
||||
GraphOptimizationConfig,
|
||||
ParallelConfig,
|
||||
SchedulerConfig,
|
||||
)
|
||||
from fastdeploy.model_executor.forward_meta import ForwardMeta
|
||||
from fastdeploy.model_executor.graph_optimization.decorator import (
|
||||
@@ -88,15 +89,17 @@ class TestStaticGraphCUDAGraphSplit(unittest.TestCase):
|
||||
"""Run test case"""
|
||||
# Set FastDeploy config
|
||||
graph_opt_config = GraphOptimizationConfig({"use_cudagraph": True, "graph_opt_level": 1})
|
||||
parallel_config = ParallelConfig({"max_num_seqs": 1})
|
||||
graph_opt_config._set_cudagraph_sizes(max_num_seqs=parallel_config.max_num_seqs)
|
||||
graph_opt_config.init_with_cudagrpah_size(max_capture_size=parallel_config.max_num_seqs)
|
||||
scheduler_config = SchedulerConfig({"max_num_seqs": 1})
|
||||
graph_opt_config._set_cudagraph_sizes(max_num_seqs=scheduler_config.max_num_seqs)
|
||||
graph_opt_config.init_with_cudagrpah_size(max_capture_size=scheduler_config.max_num_seqs)
|
||||
cache_config = CacheConfig({})
|
||||
parallel_config = ParallelConfig(args={})
|
||||
|
||||
fd_config = FDConfig(
|
||||
graph_opt_config=graph_opt_config,
|
||||
parallel_config=parallel_config,
|
||||
scheduler_config=scheduler_config,
|
||||
cache_config=cache_config,
|
||||
parallel_config=parallel_config,
|
||||
test_mode=True,
|
||||
)
|
||||
|
||||
|
Reference in New Issue
Block a user