[BugFix] Add prefill restrictions for chunked_prefill+VL (#2983)

This commit is contained in:
Zero Rains
2025-07-23 16:45:57 +08:00
committed by GitHub
parent a39a67334c
commit 850c9d98d4
2 changed files with 18 additions and 9 deletions

View File

@@ -285,10 +285,12 @@ class PaddleDisWorkerProc:
# The first worker detects whether there are tasks in the task queue
if self.local_rank % mp_num_per_node == 0:
if self.task_queue.num_tasks() > 0:
if self.nnode > 1:
self.task_queue.read_finish_flag.set(1)
else:
self.exist_task_signal.value[self.fd_config.parallel_config.expert_parallel_rank] = 1
# VL only support 1 batch to prefill
if not self.fd_config.model_config.enable_mm or self.worker.prefill_finished():
if self.nnode > 1:
self.task_queue.read_finish_flag.set(1)
else:
self.exist_task_signal.value[self.fd_config.parallel_config.expert_parallel_rank] = 1
if self.parallel_config.tensor_parallel_size > 1:
# Synchronize the signal for other workers
@@ -344,8 +346,8 @@ class PaddleDisWorkerProc:
# Execute model to generate token. The generated token will be written to the buffer.
# These generated tokens can be obtained through get_output op.
self.worker.execute_model(req_dicts)
self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished()
if not self.fd_config.model_config.enable_mm:
self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished()
def initialize_kv_cache(self) -> None:
"""Profiles the peak memory usage of the model to determine how many