mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
[Bug Fix] fix vl V1 schedule bug (#3284)
Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com> Co-authored-by: YUNSHEN XIE <1084314248@qq.com>
This commit is contained in:
@@ -195,7 +195,6 @@ class ResourceManagerV1(ResourceManager):
|
|||||||
)
|
)
|
||||||
request.num_image_end = img_num_per_boundary[new_boundary_idx]
|
request.num_image_end = img_num_per_boundary[new_boundary_idx]
|
||||||
|
|
||||||
request.num_image_end = img_num_per_boundary[new_boundary_idx]
|
|
||||||
request.image_type_ids_start = np.sum(grid_thw[: request.num_image_start, 0])
|
request.image_type_ids_start = np.sum(grid_thw[: request.num_image_start, 0])
|
||||||
request.image_type_ids_end = np.sum(grid_thw[: request.num_image_end, 0])
|
request.image_type_ids_end = np.sum(grid_thw[: request.num_image_end, 0])
|
||||||
request.image_start = np.sum(np.prod(grid_thw[: request.num_image_start], axis=1))
|
request.image_start = np.sum(np.prod(grid_thw[: request.num_image_start], axis=1))
|
||||||
|
@@ -24,6 +24,7 @@ import paddle
|
|||||||
import paddle.distributed as dist
|
import paddle.distributed as dist
|
||||||
from paddle.distributed import fleet
|
from paddle.distributed import fleet
|
||||||
|
|
||||||
|
from fastdeploy import envs
|
||||||
from fastdeploy.config import (
|
from fastdeploy.config import (
|
||||||
CacheConfig,
|
CacheConfig,
|
||||||
DecodingConfig,
|
DecodingConfig,
|
||||||
@@ -289,8 +290,9 @@ class PaddleDisWorkerProc:
|
|||||||
if self.local_rank % mp_num_per_node == 0:
|
if self.local_rank % mp_num_per_node == 0:
|
||||||
if self.task_queue.num_tasks() > 0:
|
if self.task_queue.num_tasks() > 0:
|
||||||
# VL only support 1 batch to prefill
|
# VL only support 1 batch to prefill
|
||||||
|
if envs.ENABLE_V1_KVCACHE_SCHEDULER or not (
|
||||||
if not self.fd_config.model_config.enable_mm or not self.worker.exist_prefill():
|
self.fd_config.model_config.enable_mm and self.worker.exist_prefill()
|
||||||
|
):
|
||||||
if self.nnode > 1 and self.parallel_config.tensor_parallel_size > self.max_chips_per_node:
|
if self.nnode > 1 and self.parallel_config.tensor_parallel_size > self.max_chips_per_node:
|
||||||
self.task_queue.read_finish_flag.set(1)
|
self.task_queue.read_finish_flag.set(1)
|
||||||
else:
|
else:
|
||||||
|
Reference in New Issue
Block a user