[BugFix] Api server bugs (#3530)

* Update serving_chat.py

* Update serving_completion.py

* Update serving_completion.py
This commit is contained in:
ltd0924
2025-08-22 14:01:14 +08:00
committed by GitHub
parent c18975366e
commit 4d6fb96cd6
2 changed files with 9 additions and 8 deletions

View File

@@ -99,6 +99,7 @@ class OpenAIServingChat:
if isinstance(prompt_token_ids, np.ndarray):
prompt_token_ids = prompt_token_ids.tolist()
except Exception as e:
self.engine_client.semaphore.release()
return ErrorResponse(code=400, message=str(e))
del current_req_dict

View File

@@ -109,20 +109,20 @@ class OpenAIServingCompletion:
except Exception:
return ErrorResponse(code=408, message=f"Request queued time exceed {self.max_waiting_time}")
try:
for idx, prompt in enumerate(request_prompts):
request_id_idx = f"{request_id}-{idx}"
current_req_dict = request.to_dict_for_infer(request_id_idx, prompt)
try:
try:
for idx, prompt in enumerate(request_prompts):
request_id_idx = f"{request_id}-{idx}"
current_req_dict = request.to_dict_for_infer(request_id_idx, prompt)
current_req_dict["arrival_time"] = time.time()
prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict)
if isinstance(prompt_token_ids, np.ndarray):
prompt_token_ids = prompt_token_ids.tolist()
text_after_process_list.append(current_req_dict.get("text_after_process"))
prompt_batched_token_ids.append(prompt_token_ids)
except Exception as e:
return ErrorResponse(message=str(e), code=400)
del current_req_dict
del current_req_dict
except Exception as e:
self.engine_client.semaphore.release()
return ErrorResponse(message=str(e), code=400)
if request.stream:
return self.completion_stream_generator(