mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
polish code for prefill restrictions (#2992)
This commit is contained in:
@@ -140,13 +140,6 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
"""
|
||||
Check whether prefill stage finished
|
||||
"""
|
||||
if self.enable_mm:
|
||||
# VL only support 1 batch to prefill
|
||||
prefill_statue = (self.share_inputs["seq_lens_this_time"] != 0) & (
|
||||
self.share_inputs["seq_lens_this_time"] != 1
|
||||
)
|
||||
return not paddle.any(prefill_statue).numpy()
|
||||
else:
|
||||
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
||||
return 1
|
||||
else:
|
||||
|
@@ -278,7 +278,7 @@ class PaddleDisWorkerProc():
|
||||
if self.local_rank % mp_num_per_node == 0:
|
||||
if self.task_queue.num_tasks() > 0:
|
||||
# VL only support 1 batch to prefill
|
||||
if not self.fd_config.model_config.enable_mm or self.worker.prefill_finished():
|
||||
if not self.fd_config.model_config.enable_mm or not self.worker.prefill_finished():
|
||||
if self.nnode > 1:
|
||||
self.task_queue.read_finish_flag.set(1)
|
||||
else:
|
||||
@@ -332,7 +332,6 @@ class PaddleDisWorkerProc():
|
||||
# Execute model to generate token. The generated token will be written to the buffer.
|
||||
# These generated tokens can be obtained through get_output op.
|
||||
self.worker.execute_model(req_dicts)
|
||||
if not self.fd_config.model_config.enable_mm:
|
||||
self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished()
|
||||
|
||||
def determine_num_available_blocks(self) -> None:
|
||||
|
Reference in New Issue
Block a user