mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[FDConfig]Remove max_num_batched_tokens/max_num_seqs in parallel config (#4116)
* remove max_num_batched_tokens in parallel config * remove max_num_seqs * update test case * fix test * fix --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -145,9 +145,9 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.cudagraph_only_prefill = self.graph_opt_config.cudagraph_only_prefill
|
||||
|
||||
# Initialize share inputs
|
||||
self._init_share_inputs(self.parallel_config.max_num_seqs)
|
||||
self._init_share_inputs(self.scheduler_config.max_num_seqs)
|
||||
self.infer_seed_increment = paddle.full(
|
||||
shape=[self.parallel_config.max_num_seqs, 1],
|
||||
shape=[self.scheduler_config.max_num_seqs, 1],
|
||||
fill_value=4,
|
||||
dtype="int64",
|
||||
).cpu()
|
||||
@@ -1208,13 +1208,13 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
# decode_max_tile_size must take into account the maximum case, where *1024 can cover 128K.
|
||||
decode_max_tile_size = (
|
||||
1024
|
||||
* self.parallel_config.max_num_seqs
|
||||
* self.scheduler_config.max_num_seqs
|
||||
* np.ceil((decoder_step_token_num * group_size) / decoder_block_shape_q)
|
||||
)
|
||||
encode_max_tile_size = self.parallel_config.max_num_seqs * np.ceil(
|
||||
encode_max_tile_size = self.scheduler_config.max_num_seqs * np.ceil(
|
||||
(self.model_config.max_model_len * group_size) / encoder_block_shape_q
|
||||
)
|
||||
kv_max_tile_size = self.parallel_config.max_num_seqs * np.ceil(
|
||||
kv_max_tile_size = self.scheduler_config.max_num_seqs * np.ceil(
|
||||
self.model_config.max_model_len / self.fd_config.cache_config.block_size
|
||||
)
|
||||
self.share_inputs["decoder_batch_ids"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
|
||||
@@ -1508,7 +1508,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
for num_tokens in sorted(capture_sizes, reverse=True):
|
||||
self._dummy_run(
|
||||
num_tokens=num_tokens,
|
||||
batch_size=self.parallel_config.max_num_seqs,
|
||||
batch_size=self.scheduler_config.max_num_seqs,
|
||||
in_capturing=True,
|
||||
expected_decode_len=expected_decode_len,
|
||||
capture_prefill=True,
|
||||
@@ -1519,7 +1519,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
else:
|
||||
for batch_size in sorted(capture_sizes, reverse=True):
|
||||
self._dummy_run(
|
||||
num_tokens=self.parallel_config.max_num_batched_tokens,
|
||||
num_tokens=self.scheduler_config.max_num_batched_tokens,
|
||||
batch_size=batch_size,
|
||||
in_capturing=True,
|
||||
expected_decode_len=expected_decode_len,
|
||||
@@ -1536,7 +1536,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
start_time = time.perf_counter()
|
||||
for batch_size in self.sot_warmup_sizes:
|
||||
self._dummy_run(
|
||||
num_tokens=self.parallel_config.max_num_batched_tokens,
|
||||
num_tokens=self.scheduler_config.max_num_batched_tokens,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
logger.info(f"SOT warmup the model with the batch size:{batch_size}")
|
||||
@@ -1815,8 +1815,8 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
|
||||
# 2. Dummy run
|
||||
self._dummy_run(
|
||||
num_tokens=self.parallel_config.max_num_batched_tokens,
|
||||
batch_size=min(self.parallel_config.max_num_seqs, 3),
|
||||
num_tokens=self.scheduler_config.max_num_batched_tokens,
|
||||
batch_size=min(self.scheduler_config.max_num_seqs, 3),
|
||||
)
|
||||
|
||||
# 3. gc
|
||||
|
Reference in New Issue
Block a user