mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[FDConfig]Remove max_num_batched_tokens/max_num_seqs in parallel config (#4116)
* remove max_num_batched_tokens in parallel config * remove max_num_seqs * update test case * fix test * fix --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -21,6 +21,7 @@ from fastdeploy.config import (
|
||||
FDConfig,
|
||||
GraphOptimizationConfig,
|
||||
ParallelConfig,
|
||||
SchedulerConfig,
|
||||
)
|
||||
|
||||
|
||||
@@ -50,12 +51,17 @@ class FakeModelConfig:
|
||||
|
||||
def get_default_test_fd_config():
|
||||
graph_opt_config = GraphOptimizationConfig(args={})
|
||||
scheduler_config = SchedulerConfig(args={})
|
||||
scheduler_config.max_num_seqs = 1
|
||||
parallel_config = ParallelConfig(args={})
|
||||
parallel_config.max_num_seqs = 1
|
||||
parallel_config.data_parallel_rank = 1
|
||||
cache_config = CacheConfig({})
|
||||
fd_config = FDConfig(
|
||||
graph_opt_config=graph_opt_config, parallel_config=parallel_config, cache_config=cache_config, test_mode=True
|
||||
graph_opt_config=graph_opt_config,
|
||||
parallel_config=parallel_config,
|
||||
cache_config=cache_config,
|
||||
scheduler_config=scheduler_config,
|
||||
test_mode=True,
|
||||
)
|
||||
fd_config.model_config = FakeModelConfig()
|
||||
return fd_config
|
||||
|
Reference in New Issue
Block a user