polish code for prefill restrictions (#2991)

This commit is contained in:
Zero Rains
2025-07-23 20:10:14 +08:00
committed by GitHub
parent 172e69fe17
commit ca0f71bd39
2 changed files with 5 additions and 13 deletions

View File

@@ -150,13 +150,6 @@ class GPUModelRunner(ModelRunnerBase):
""" """
Check whether prefill stage finished Check whether prefill stage finished
""" """
if self.enable_mm:
# VL only support 1 batch to prefill
prefill_statue = (self.share_inputs["seq_lens_this_time"] != 0) & (
self.share_inputs["seq_lens_this_time"] != 1
)
return not paddle.any(prefill_statue).numpy()
else:
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0: if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
return 1 return 1
else: else:

View File

@@ -286,7 +286,7 @@ class PaddleDisWorkerProc:
if self.local_rank % mp_num_per_node == 0: if self.local_rank % mp_num_per_node == 0:
if self.task_queue.num_tasks() > 0: if self.task_queue.num_tasks() > 0:
# VL only support 1 batch to prefill # VL only support 1 batch to prefill
if not self.fd_config.model_config.enable_mm or self.worker.prefill_finished(): if not self.fd_config.model_config.enable_mm or not self.worker.prefill_finished():
if self.nnode > 1: if self.nnode > 1:
self.task_queue.read_finish_flag.set(1) self.task_queue.read_finish_flag.set(1)
else: else:
@@ -346,7 +346,6 @@ class PaddleDisWorkerProc:
# Execute model to generate token. The generated token will be written to the buffer. # Execute model to generate token. The generated token will be written to the buffer.
# These generated tokens can be obtained through get_output op. # These generated tokens can be obtained through get_output op.
self.worker.execute_model(req_dicts) self.worker.execute_model(req_dicts)
if not self.fd_config.model_config.enable_mm:
self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished() self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished()
def initialize_kv_cache(self) -> None: def initialize_kv_cache(self) -> None: