mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-18 22:44:39 +08:00
[Feature] support clear data (#4185)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
* fix * fix * fix * [Feature] support clear data * update * fix * fix * fix * fix
This commit is contained in:
@@ -478,6 +478,7 @@ def reset_scheduler():
|
||||
|
||||
if llm_engine is None:
|
||||
return Response("Engine not loaded", status_code=500)
|
||||
llm_engine.engine.clear_data()
|
||||
llm_engine.engine.scheduler.reset()
|
||||
return Response("Scheduler Reset Successfully", status_code=200)
|
||||
|
||||
|
@@ -210,6 +210,8 @@ class OpenAIServingChat:
|
||||
decoder_base_url=self.tokenizer_base_url,
|
||||
)
|
||||
while num_choices > 0:
|
||||
if self.engine_client.check_model_weight_status():
|
||||
raise ValueError("Engine is clearing model weight")
|
||||
try:
|
||||
response = await asyncio.wait_for(response_queue.get(), timeout=10)
|
||||
current_waiting_time = 0
|
||||
@@ -425,6 +427,8 @@ class OpenAIServingChat:
|
||||
decoder_base_url=self.tokenizer_base_url,
|
||||
)
|
||||
while True:
|
||||
if self.engine_client.check_model_weight_status():
|
||||
return ErrorResponse(code=400, message="Model weight cleared")
|
||||
try:
|
||||
response = await asyncio.wait_for(response_queue.get(), timeout=10)
|
||||
current_waiting_time = 0
|
||||
@@ -513,6 +517,7 @@ class OpenAIServingChat:
|
||||
|
||||
if final_res.get("error_msg") is not None and "Recover" in final_res["error_msg"]:
|
||||
choice.finish_reason = "recover_stop"
|
||||
|
||||
choices.append(choice)
|
||||
|
||||
num_prompt_tokens = len(prompt_token_ids)
|
||||
|
@@ -216,6 +216,8 @@ class OpenAIServingCompletion:
|
||||
completion_batched_token_ids = [[] for _ in range(num_choices)]
|
||||
current_waiting_time = 0
|
||||
while num_choices > 0:
|
||||
if self.engine_client.check_model_weight_status():
|
||||
return ErrorResponse(message="Model weight cleared", code=400)
|
||||
try:
|
||||
response = await asyncio.wait_for(response_queue.get(), timeout=10)
|
||||
current_waiting_time = 0
|
||||
@@ -270,7 +272,6 @@ class OpenAIServingCompletion:
|
||||
return res
|
||||
except Exception as e:
|
||||
api_server_logger.error(f"Error in completion_full_generator: {e}", exc_info=True)
|
||||
raise
|
||||
finally:
|
||||
self.engine_client.semaphore.release()
|
||||
if dealer is not None:
|
||||
@@ -333,6 +334,8 @@ class OpenAIServingCompletion:
|
||||
)
|
||||
current_waiting_time = 0
|
||||
while num_choices > 0:
|
||||
if self.engine_client.check_model_weight_status():
|
||||
raise ValueError("Engine is clearing model weight")
|
||||
try:
|
||||
response = await asyncio.wait_for(response_queue.get(), timeout=10)
|
||||
current_waiting_time = 0
|
||||
|
Reference in New Issue
Block a user