[Feature]CP support data clear (#4214)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled

* Update serving_chat.py

* Update serving_completion.py

* Update serving_completion.py

* mv connection_manager init

* [BugFix] fix kv cache

* fix format

* [Feature] support clear data

---------

Co-authored-by: Yuanle Liu <yuanlehome@163.com>
Co-authored-by: RAM <gstian5555@outlook.com>
This commit is contained in:
ltd0924
2025-09-23 16:53:39 +08:00
committed by GitHub
parent f38b174a75
commit de4feff147
10 changed files with 65 additions and 0 deletions

View File

@@ -801,6 +801,18 @@ class EngineSevice:
def check_and_free_block_tables(self): def check_and_free_block_tables(self):
self.resource_manager.check_and_free_block_tables() self.resource_manager.check_and_free_block_tables()
def clear_data(self):
try:
llm_logger.info("Clear Data: Start")
self.token_processor.clear_data()
self.engine_worker_queue.clear_data()
self.zmq_server.req_dict.clear()
llm_logger.info("Clear Data: Successfully")
return True
except Exception as e:
llm_logger.error(f"Clear data error: {e}")
return False
def _exit_sub_services(self): def _exit_sub_services(self):
""" """
exit sub services exit sub services

View File

@@ -512,6 +512,10 @@ class ResourceManagerV1(ResourceManager):
def finish_requests_async(self, request_ids: Union[str, Iterable[str]]): def finish_requests_async(self, request_ids: Union[str, Iterable[str]]):
return self.finish_execution_pool.submit(self.finish_requests, request_ids) return self.finish_execution_pool.submit(self.finish_requests, request_ids)
def clear_data(self):
self.waiting: deque[Request] = deque()
self.to_be_rescheduled_request_id_set = set()
def finish_requests(self, request_ids: Union[str, Iterable[str]]): def finish_requests(self, request_ids: Union[str, Iterable[str]]):
llm_logger.info(f"recycle resources for requests: {request_ids}") llm_logger.info(f"recycle resources for requests: {request_ids}")
try: try:

View File

@@ -141,6 +141,9 @@ class EngineClient:
self.zmq_client = ZmqIpcClient(model, mode) self.zmq_client = ZmqIpcClient(model, mode)
self.zmq_client.connect() self.zmq_client.connect()
def check_model_weight_status(self):
return self.model_weights_status_signal.value[0] < 0
async def format_and_add_data(self, prompts: dict): async def format_and_add_data(self, prompts: dict):
""" """
Format the request data and send the request to the server. Format the request data and send the request to the server.

View File

@@ -480,6 +480,7 @@ def reset_scheduler():
if llm_engine is None: if llm_engine is None:
return Response("Engine not loaded", status_code=500) return Response("Engine not loaded", status_code=500)
llm_engine.engine.clear_data()
llm_engine.engine.scheduler.reset() llm_engine.engine.scheduler.reset()
return Response("Scheduler Reset Successfully", status_code=200) return Response("Scheduler Reset Successfully", status_code=200)

View File

@@ -210,6 +210,8 @@ class OpenAIServingChat:
decoder_base_url=self.tokenizer_base_url, decoder_base_url=self.tokenizer_base_url,
) )
while num_choices > 0: while num_choices > 0:
if self.engine_client.check_model_weight_status():
raise ValueError("Engine is clearing model weight")
try: try:
response = await asyncio.wait_for(response_queue.get(), timeout=10) response = await asyncio.wait_for(response_queue.get(), timeout=10)
current_waiting_time = 0 current_waiting_time = 0
@@ -425,6 +427,8 @@ class OpenAIServingChat:
decoder_base_url=self.tokenizer_base_url, decoder_base_url=self.tokenizer_base_url,
) )
while True: while True:
if self.engine_client.check_model_weight_status():
raise ValueError("Engine is clearing model weight")
try: try:
response = await asyncio.wait_for(response_queue.get(), timeout=10) response = await asyncio.wait_for(response_queue.get(), timeout=10)
current_waiting_time = 0 current_waiting_time = 0

View File

@@ -216,6 +216,8 @@ class OpenAIServingCompletion:
completion_batched_token_ids = [[] for _ in range(num_choices)] completion_batched_token_ids = [[] for _ in range(num_choices)]
current_waiting_time = 0 current_waiting_time = 0
while num_choices > 0: while num_choices > 0:
if self.engine_client.check_model_weight_status():
raise ValueError("Engine is clearing model weight")
try: try:
response = await asyncio.wait_for(response_queue.get(), timeout=10) response = await asyncio.wait_for(response_queue.get(), timeout=10)
current_waiting_time = 0 current_waiting_time = 0
@@ -333,6 +335,8 @@ class OpenAIServingCompletion:
) )
current_waiting_time = 0 current_waiting_time = 0
while num_choices > 0: while num_choices > 0:
if self.engine_client.check_model_weight_status():
raise ValueError("Engine is clearing model weight")
try: try:
response = await asyncio.wait_for(response_queue.get(), timeout=10) response = await asyncio.wait_for(response_queue.get(), timeout=10)
current_waiting_time = 0 current_waiting_time = 0

View File

@@ -392,6 +392,13 @@ class EngineWorkerQueue:
llm_logger.debug("get tasks from queue success") llm_logger.debug("get tasks from queue success")
return item return item
def clear_data(self):
self.lock.acquire()
self.tasks[:] = list()
self.client_read_flag[:] = [1] * self.num_client
self.lock.release()
llm_logger.info("clear data for engine worker queue")
def cleanup(self): def cleanup(self):
""" """
Exit the worker queue gracefully. Exit the worker queue gracefully.

View File

@@ -464,6 +464,31 @@ class TokenProcessor:
main_process_metrics.request_inference_time.observe(current_time - task.inference_start_time) main_process_metrics.request_inference_time.observe(current_time - task.inference_start_time)
main_process_metrics.request_generation_tokens.observe(self.tokens_counter[task.request_id]) main_process_metrics.request_generation_tokens.observe(self.tokens_counter[task.request_id])
def clear_data(self):
if envs.ENABLE_V1_KVCACHE_SCHEDULER:
self.resource_manager.clear_data()
for i in range(self.cfg.max_num_seqs):
if self.resource_manager.stop_flags[i]:
continue
task = self.resource_manager.tasks_list[i]
result = RequestOutput(
request_id=task.request_id,
outputs=CompletionOutput(
index=i,
send_idx=self.tokens_counter[task.request_id],
token_ids=task.eos_token_ids,
draft_token_ids=[],
),
finished=True,
metrics=RequestMetrics(
arrival_time=time.time(),
request_start_time=task.arrival_time,
),
)
is_prefill = task.disaggregate_info is not None and task.disaggregate_info["role"] == "prefill"
self._recycle_resources(task.request_id, i, task, result, is_prefill)
llm_logger.warning(f"clear data for task {task.request_id}")
def _record_speculative_decoding_mertics(self, accept_num): def _record_speculative_decoding_mertics(self, accept_num):
"""Record metrics of speculative decoding""" """Record metrics of speculative decoding"""
if not hasattr(main_process_metrics, "spec_decode_draft_acceptance_rate"): if not hasattr(main_process_metrics, "spec_decode_draft_acceptance_rate"):

View File

@@ -228,6 +228,7 @@ class DynamicWeightManager:
logger.info("finished loading new checkpoint") logger.info("finished loading new checkpoint")
elif model_weights_status.value[0] == ModelWeightsStatus.CLEARING: elif model_weights_status.value[0] == ModelWeightsStatus.CLEARING:
logger.info("infer engine stopped! start to clear checkpoint...") logger.info("infer engine stopped! start to clear checkpoint...")
model_runner.clear_requests()
model_runner.clear_parameters(pid) model_runner.clear_parameters(pid)
while model_weights_status.value[0] != ModelWeightsStatus.CLEARED: while model_weights_status.value[0] != ModelWeightsStatus.CLEARED:
time.sleep(0.01) time.sleep(0.01)

View File

@@ -1704,6 +1704,10 @@ class GPUModelRunner(ModelRunnerBase):
self.forward_meta.clear_caches() self.forward_meta.clear_caches()
paddle.device.cuda.empty_cache() paddle.device.cuda.empty_cache()
def clear_requests(self):
"""Dynamic model loader use to clear requests use for RL"""
self.share_inputs["stop_flags"][:] = True
def clear_parameters(self, pid): def clear_parameters(self, pid):
"""Dynamic model loader use to clear parameters use for RL""" """Dynamic model loader use to clear parameters use for RL"""
# Clear CUDAGraph # Clear CUDAGraph