mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-03 15:56:49 +08:00
[Feature] support ep in mixed mode (#3001)
* [LLM] support ep * Update worker_process.py * Update expert_service.py * Update worker_process.py * format files
This commit is contained in:
@@ -225,6 +225,9 @@ class Config:
|
|||||||
else:
|
else:
|
||||||
self.is_master = False
|
self.is_master = False
|
||||||
|
|
||||||
|
if self.tensor_parallel_size <= self.worker_num_per_node:
|
||||||
|
self.is_master = True
|
||||||
|
|
||||||
import paddle
|
import paddle
|
||||||
|
|
||||||
self.paddle_commit_id = paddle.version.commit
|
self.paddle_commit_id = paddle.version.commit
|
||||||
|
@@ -243,38 +243,38 @@ class LLMEngine:
|
|||||||
self.splitwise_receive_thread.daemon = True
|
self.splitwise_receive_thread.daemon = True
|
||||||
self.splitwise_receive_thread.start()
|
self.splitwise_receive_thread.start()
|
||||||
|
|
||||||
self.cfg.init_cache_info()
|
self.cfg.init_cache_info()
|
||||||
|
|
||||||
role = self.cfg.splitwise_role
|
role = self.cfg.splitwise_role
|
||||||
host_ip = self.cfg.host_ip
|
host_ip = self.cfg.host_ip
|
||||||
disaggregate = self.cfg.disaggregate_info
|
disaggregate = self.cfg.disaggregate_info
|
||||||
if self.cfg.scheduler_config.name == "splitwise":
|
if self.cfg.scheduler_config.name == "splitwise":
|
||||||
self.scheduler.start(role, host_ip, disaggregate)
|
self.scheduler.start(role, host_ip, disaggregate)
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
|
|
||||||
if self.cfg.parallel_config.enable_expert_parallel and self.cfg.parallel_config.data_parallel_size > 1:
|
if self.cfg.parallel_config.enable_expert_parallel and self.cfg.parallel_config.data_parallel_size > 1:
|
||||||
self.dp_processed = []
|
self.dp_processed = []
|
||||||
for i in range(
|
for i in range(
|
||||||
1,
|
1,
|
||||||
self.cfg.parallel_config.data_parallel_size // self.cfg.nnode,
|
self.cfg.parallel_config.data_parallel_size // self.cfg.nnode,
|
||||||
):
|
):
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
self.dp_processed.append(
|
self.dp_processed.append(
|
||||||
multiprocessing.Process(
|
multiprocessing.Process(
|
||||||
target=start_expert_service,
|
target=start_expert_service,
|
||||||
args=(
|
args=(
|
||||||
self.cfg,
|
self.cfg,
|
||||||
i + self.cfg.node_rank * self.cfg.worker_num_per_node,
|
i + self.cfg.node_rank * self.cfg.worker_num_per_node,
|
||||||
self.ipc_signal_suffix,
|
self.ipc_signal_suffix,
|
||||||
),
|
),
|
||||||
)
|
|
||||||
)
|
)
|
||||||
llm_logger.info(
|
)
|
||||||
f"Engine is initialized successfully with {self.cfg.tensor_parallel_size}"
|
llm_logger.info(
|
||||||
+ f" data parallel id {i}"
|
f"Engine is initialized successfully with {self.cfg.tensor_parallel_size}"
|
||||||
)
|
+ f" data parallel id {i}"
|
||||||
self.dp_processed[-1].start()
|
)
|
||||||
|
self.dp_processed[-1].start()
|
||||||
|
|
||||||
console_logger.info(f"Worker processes are launched with {time.time() - start_time} seconds.")
|
console_logger.info(f"Worker processes are launched with {time.time() - start_time} seconds.")
|
||||||
return True
|
return True
|
||||||
|
@@ -50,9 +50,10 @@ class ExpertService:
|
|||||||
cfg (Config): Config object containing all the configuration parameters.
|
cfg (Config): Config object containing all the configuration parameters.
|
||||||
"""
|
"""
|
||||||
self.cfg = cfg
|
self.cfg = cfg
|
||||||
start_pos = (local_data_parallel_id * self.cfg.tensor_parallel_size) % self.cfg.worker_num_per_node
|
start_pos = (local_data_parallel_id * self.cfg.tensor_parallel_size) % cfg.worker_num_per_node
|
||||||
end_pos = ((local_data_parallel_id + 1) * self.cfg.tensor_parallel_size) % self.cfg.worker_num_per_node
|
end_pos = start_pos + self.cfg.tensor_parallel_size
|
||||||
self.cfg.cache_config.rdma_comm_ports = self.cfg.cache_config.rdma_comm_ports[start_pos:end_pos]
|
if cfg.splitwise_role != "mixed":
|
||||||
|
self.cfg.cache_config.rdma_comm_ports = self.cfg.cache_config.rdma_comm_ports[start_pos:end_pos]
|
||||||
self.cfg.local_device_ids = self.cfg.device_ids.split(",")[start_pos:end_pos]
|
self.cfg.local_device_ids = self.cfg.device_ids.split(",")[start_pos:end_pos]
|
||||||
self.cfg.parallel_config.local_data_parallel_id = local_data_parallel_id
|
self.cfg.parallel_config.local_data_parallel_id = local_data_parallel_id
|
||||||
self.cfg.disaggregate_info = None
|
self.cfg.disaggregate_info = None
|
||||||
@@ -78,11 +79,13 @@ class ExpertService:
|
|||||||
cfg.splitwise_role,
|
cfg.splitwise_role,
|
||||||
local_data_parallel_id,
|
local_data_parallel_id,
|
||||||
)
|
)
|
||||||
|
if cfg.splitwise_role != "mixed":
|
||||||
if len(self.cfg.cache_config.pd_comm_port) == 1:
|
if len(self.cfg.cache_config.pd_comm_port) == 1:
|
||||||
self.cfg.cache_config.pd_comm_port[0] = int(self.cfg.cache_config.pd_comm_port[0]) + local_data_parallel_id
|
self.cfg.cache_config.pd_comm_port[0] = (
|
||||||
else:
|
int(self.cfg.cache_config.pd_comm_port[0]) + local_data_parallel_id
|
||||||
self.cfg.cache_config.pd_comm_port = [self.cfg.cache_config.pd_comm_port[local_data_parallel_id]]
|
)
|
||||||
|
else:
|
||||||
|
self.cfg.cache_config.pd_comm_port = [self.cfg.cache_config.pd_comm_port[local_data_parallel_id]]
|
||||||
|
|
||||||
self.split_connector = SplitwiseConnector(
|
self.split_connector = SplitwiseConnector(
|
||||||
self.cfg,
|
self.cfg,
|
||||||
@@ -119,15 +122,16 @@ class ExpertService:
|
|||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
llm_logger.info(f"start expert service {local_data_parallel_id}")
|
llm_logger.info(f"start expert service {local_data_parallel_id}")
|
||||||
|
if self.cfg.splitwise_role != "mixed":
|
||||||
self.cache_manager_processes = self.resource_manager.cache_manager.launch_cache_manager(
|
self.cache_manager_processes = self.resource_manager.cache_manager.launch_cache_manager(
|
||||||
cache_config=self.cfg.cache_config,
|
cache_config=self.cfg.cache_config,
|
||||||
tensor_parallel_size=self.cfg.tensor_parallel_size,
|
tensor_parallel_size=self.cfg.tensor_parallel_size,
|
||||||
device_ids=self.cfg.local_device_ids,
|
device_ids=self.cfg.local_device_ids,
|
||||||
pod_ip=self.cfg.master_ip,
|
pod_ip=self.cfg.pod_ips[0],
|
||||||
engine_worker_queue_port=self.cfg.engine_worker_queue_port,
|
engine_worker_queue_port=self.cfg.engine_worker_queue_port,
|
||||||
pid_suffix=f"{local_data_parallel_id}_{ipc_signal_suffix}",
|
pid_suffix=f"{local_data_parallel_id}_{ipc_signal_suffix}",
|
||||||
)
|
)
|
||||||
|
self.split_mode_get_tasks()
|
||||||
|
|
||||||
self.insert_task_to_worker_thread = threading.Thread(target=self._insert_task_to_worker, args=())
|
self.insert_task_to_worker_thread = threading.Thread(target=self._insert_task_to_worker, args=())
|
||||||
self.insert_task_to_worker_thread.daemon = True
|
self.insert_task_to_worker_thread.daemon = True
|
||||||
@@ -138,8 +142,6 @@ class ExpertService:
|
|||||||
|
|
||||||
self.token_processor.run()
|
self.token_processor.run()
|
||||||
|
|
||||||
self.split_mode_get_tasks()
|
|
||||||
|
|
||||||
self.cfg.init_cache_info()
|
self.cfg.init_cache_info()
|
||||||
|
|
||||||
role = self.cfg.splitwise_role
|
role = self.cfg.splitwise_role
|
||||||
@@ -321,13 +323,13 @@ class ExpertService:
|
|||||||
else:
|
else:
|
||||||
is_prefill = True
|
is_prefill = True
|
||||||
self.token_processor.number_of_input_tokens += tasks[i].prompt_token_ids_len
|
self.token_processor.number_of_input_tokens += tasks[i].prompt_token_ids_len
|
||||||
|
if is_decode or is_prefill:
|
||||||
self.split_connector.send_cache_infos(tasks, current_id)
|
self.split_connector.send_cache_infos(tasks, current_id)
|
||||||
for task in tasks:
|
for task in tasks:
|
||||||
task.infer_start_time = time.time()
|
task.infer_start_time = time.time()
|
||||||
if not is_decode:
|
if not is_decode:
|
||||||
llm_logger.info(f"Tasks are sent to engine, req_ids={req_ids}")
|
llm_logger.info(f"Tasks are sent to engine, req_ids={req_ids}")
|
||||||
if not is_prefill:
|
if not is_prefill and self.cfg.cache_config.enable_chunked_prefill:
|
||||||
if not self.cfg.enable_mm:
|
if not self.cfg.enable_mm:
|
||||||
self.update_requests_chunk_size(tasks)
|
self.update_requests_chunk_size(tasks)
|
||||||
else:
|
else:
|
||||||
|
@@ -283,14 +283,15 @@ class PaddleDisWorkerProc:
|
|||||||
paddle.distributed.barrier()
|
paddle.distributed.barrier()
|
||||||
|
|
||||||
self.insert_step = False
|
self.insert_step = False
|
||||||
self.worker_healthy_live_signal.value[self.local_rank] = int(time.time())
|
self.worker_healthy_live_signal.value[self.local_rank % self.max_chips_per_node] = int(time.time())
|
||||||
|
|
||||||
# The first worker detects whether there are tasks in the task queue
|
# The first worker detects whether there are tasks in the task queue
|
||||||
if self.local_rank % mp_num_per_node == 0:
|
if self.local_rank % mp_num_per_node == 0:
|
||||||
if self.task_queue.num_tasks() > 0:
|
if self.task_queue.num_tasks() > 0:
|
||||||
# VL only support 1 batch to prefill
|
# VL only support 1 batch to prefill
|
||||||
|
|
||||||
if not self.fd_config.model_config.enable_mm or not self.worker.exist_prefill():
|
if not self.fd_config.model_config.enable_mm or not self.worker.exist_prefill():
|
||||||
if self.nnode > 1:
|
if self.nnode > 1 and self.parallel_config.tensor_parallel_size > self.max_chips_per_node:
|
||||||
self.task_queue.read_finish_flag.set(1)
|
self.task_queue.read_finish_flag.set(1)
|
||||||
else:
|
else:
|
||||||
self.exist_task_signal.value[self.fd_config.parallel_config.expert_parallel_rank] = 1
|
self.exist_task_signal.value[self.fd_config.parallel_config.expert_parallel_rank] = 1
|
||||||
|
Reference in New Issue
Block a user