mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
@@ -105,9 +105,9 @@ class GCUModelRunner(ModelRunnerBase):
|
|||||||
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
||||||
return 1
|
return 1
|
||||||
|
@@ -69,11 +69,11 @@ class GcuWorker(WorkerBase):
|
|||||||
local_rank=self.local_rank,
|
local_rank=self.local_rank,
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
return self.model_runner.prefill_finished()
|
return self.model_runner.exist_prefill()
|
||||||
|
|
||||||
def determine_available_memory(self) -> int:
|
def determine_available_memory(self) -> int:
|
||||||
"""
|
"""
|
||||||
|
@@ -148,9 +148,9 @@ class GPUModelRunner(ModelRunnerBase):
|
|||||||
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
Check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
||||||
return 1
|
return 1
|
||||||
|
@@ -78,11 +78,11 @@ class GpuWorker(WorkerBase):
|
|||||||
local_rank=self.local_rank,
|
local_rank=self.local_rank,
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
Check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
return self.model_runner.prefill_finished()
|
return self.model_runner.exist_prefill()
|
||||||
|
|
||||||
def determine_available_memory(self) -> int:
|
def determine_available_memory(self) -> int:
|
||||||
"""
|
"""
|
||||||
|
@@ -105,9 +105,9 @@ class IluvatarModelRunner(ModelRunnerBase):
|
|||||||
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
self.local_rank + int(self.parallel_config.engine_worker_queue_port)
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
||||||
return 1
|
return 1
|
||||||
|
@@ -69,11 +69,11 @@ class IluvatarWorker(WorkerBase):
|
|||||||
local_rank=self.local_rank,
|
local_rank=self.local_rank,
|
||||||
)
|
)
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
return self.model_runner.prefill_finished()
|
return self.model_runner.exist_prefill()
|
||||||
|
|
||||||
def determine_available_memory(self) -> int:
|
def determine_available_memory(self) -> int:
|
||||||
"""
|
"""
|
||||||
|
@@ -96,6 +96,6 @@ class WorkerBase(ABC):
|
|||||||
"""Basic health check (override for device-specific checks)."""
|
"""Basic health check (override for device-specific checks)."""
|
||||||
return NotImplementedError
|
return NotImplementedError
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""check whether prefill stage finished."""
|
"""check whether prefill stage exist."""
|
||||||
return True
|
return True
|
||||||
|
@@ -286,7 +286,7 @@ class PaddleDisWorkerProc:
|
|||||||
if self.local_rank % mp_num_per_node == 0:
|
if self.local_rank % mp_num_per_node == 0:
|
||||||
if self.task_queue.num_tasks() > 0:
|
if self.task_queue.num_tasks() > 0:
|
||||||
# VL only support 1 batch to prefill
|
# VL only support 1 batch to prefill
|
||||||
if not self.fd_config.model_config.enable_mm or not self.worker.prefill_finished():
|
if not self.fd_config.model_config.enable_mm or not self.worker.exist_prefill():
|
||||||
if self.nnode > 1:
|
if self.nnode > 1:
|
||||||
self.task_queue.read_finish_flag.set(1)
|
self.task_queue.read_finish_flag.set(1)
|
||||||
else:
|
else:
|
||||||
@@ -346,7 +346,7 @@ class PaddleDisWorkerProc:
|
|||||||
# Execute model to generate token. The generated token will be written to the buffer.
|
# Execute model to generate token. The generated token will be written to the buffer.
|
||||||
# These generated tokens can be obtained through get_output op.
|
# These generated tokens can be obtained through get_output op.
|
||||||
self.worker.execute_model(req_dicts)
|
self.worker.execute_model(req_dicts)
|
||||||
self.exist_prefill_task_signal.value[0] = self.worker.prefill_finished()
|
self.exist_prefill_task_signal.value[0] = self.worker.exist_prefill()
|
||||||
|
|
||||||
def initialize_kv_cache(self) -> None:
|
def initialize_kv_cache(self) -> None:
|
||||||
"""Profiles the peak memory usage of the model to determine how many
|
"""Profiles the peak memory usage of the model to determine how many
|
||||||
|
@@ -584,9 +584,9 @@ class XPUModelRunner(ModelRunnerBase):
|
|||||||
logger.warn("XPU not support cuda graph currently")
|
logger.warn("XPU not support cuda graph currently")
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
if int(paddle.max(self.share_inputs["seq_lens_encoder"])) != 0:
|
||||||
return 1
|
return 1
|
||||||
|
@@ -143,11 +143,11 @@ class XpuWorker(WorkerBase):
|
|||||||
output = self.model_runner.execute_model(model_forward_batch)
|
output = self.model_runner.execute_model(model_forward_batch)
|
||||||
return output
|
return output
|
||||||
|
|
||||||
def prefill_finished(self):
|
def exist_prefill(self):
|
||||||
"""
|
"""
|
||||||
check whether prefill stage finished
|
check whether prefill stage exist
|
||||||
"""
|
"""
|
||||||
return self.model_runner.prefill_finished()
|
return self.model_runner.exist_prefill()
|
||||||
|
|
||||||
def preprocess_new_task(self, req_dicts: List[Request]) -> None:
|
def preprocess_new_task(self, req_dicts: List[Request]) -> None:
|
||||||
"""Process new requests and then start the decode loop
|
"""Process new requests and then start the decode loop
|
||||||
|
Reference in New Issue
Block a user