[Sync Code] Update vs branch (#3403)

* Pre ce modified (#3335) (#3360)

* Pre ce modified (#3335)

* update

* update

* fix

* fix

* update

* update

* update

* fix

* update

* update

* update

* add ut fix pr(3367)

* [Bug Fix] Fix V1 video bug (#3387)

* fix stopseq error info (#3342)

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>

* [BugFix] Fix default log level of paddleformers (#3377)

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>

* [Polish Code] Remove useless notes

* feat(log):add_request_and_response_log (#3392)

* Optimize CI execution workflow. (#3371) (#3384)

* fix

* [BugFix] fix control signal release failed (#3374)

* [BugFix]

* [BugFix]

* [BugFix]

* [BugFix]

* fix

* fix

---------

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>

---------

Co-authored-by: YUNSHEN XIE <1084314248@qq.com>
Co-authored-by: ming1753 <61511741+ming1753@users.noreply.github.com>
Co-authored-by: JYChen <zoooo0820@qq.com>
Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
Co-authored-by: Jiang-Jia-Jun <jiangjiajun@baidu.com>
Co-authored-by: xiaolei373 <zley373@gmail.com>
Co-authored-by: ltd0924 <32387785+ltd0924@users.noreply.github.com>
This commit is contained in:
Jiang-Jia-Jun
2025-08-14 17:14:45 +08:00
committed by GitHub
parent 81092c0fe3
commit e11331927f
16 changed files with 329 additions and 155 deletions

View File

@@ -78,45 +78,45 @@ class OpenAIServingChat:
api_server_logger.error(err_msg)
return ErrorResponse(message=err_msg, code=400)
if request.user is not None:
request_id = f"chatcmpl-{request.user}-{uuid.uuid4()}"
else:
request_id = f"chatcmpl-{uuid.uuid4()}"
api_server_logger.info(f"create chat completion request: {request_id}")
text_after_process = None
try:
current_req_dict = request.to_dict_for_infer(request_id)
current_req_dict["arrival_time"] = time.time()
prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict)
text_after_process = current_req_dict.get("text_after_process")
if isinstance(prompt_token_ids, np.ndarray):
prompt_token_ids = prompt_token_ids.tolist()
except Exception as e:
return ErrorResponse(code=400, message=str(e))
del current_req_dict
try:
api_server_logger.debug(f"{self.engine_client.semaphore.status()}")
if self.max_waiting_time < 0:
await self.engine_client.semaphore.acquire()
else:
await asyncio.wait_for(self.engine_client.semaphore.acquire(), timeout=self.max_waiting_time)
except Exception:
return ErrorResponse(code=408, message=f"Request queued time exceed {self.max_waiting_time}")
api_server_logger.debug(f"current waiting request {self.engine_client.semaphore.status()}")
if request.stream:
return self.chat_completion_stream_generator(
request, request_id, request.model, prompt_token_ids, text_after_process
)
else:
if request.user is not None:
request_id = f"chatcmpl-{request.user}-{uuid.uuid4()}"
else:
request_id = f"chatcmpl-{uuid.uuid4()}"
api_server_logger.info(f"create chat completion request: {request_id}")
text_after_process = None
try:
return await self.chat_completion_full_generator(
request, request_id, request.model, prompt_token_ids, text_after_process
)
current_req_dict = request.to_dict_for_infer(request_id)
current_req_dict["arrival_time"] = time.time()
prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict)
text_after_process = current_req_dict.get("text_after_process")
if isinstance(prompt_token_ids, np.ndarray):
prompt_token_ids = prompt_token_ids.tolist()
except Exception as e:
return ErrorResponse(code=400, message=str(e))
del current_req_dict
if request.stream:
return self.chat_completion_stream_generator(
request, request_id, request.model, prompt_token_ids, text_after_process
)
else:
try:
return await self.chat_completion_full_generator(
request, request_id, request.model, prompt_token_ids, text_after_process
)
except Exception as e:
return ErrorResponse(code=400, message=str(e))
except Exception:
return ErrorResponse(code=408, message=f"Request queued time exceed {self.max_waiting_time}")
def _create_streaming_error_response(self, message: str) -> str:
error_response = ErrorResponse(
code=400,
@@ -240,6 +240,7 @@ class OpenAIServingChat:
prompt_tokens_details=PromptTokenUsageInfo(cached_tokens=num_cached_tokens),
)
yield f"data: {chunk.model_dump_json(exclude_unset=True)} \n\n"
api_server_logger.info(f"Chat Streaming response send_idx 0: {chunk.model_dump_json()}")
first_iteration = False
output = res["outputs"]
@@ -274,6 +275,7 @@ class OpenAIServingChat:
logprobs=logprobs_res,
arrival_time=arrival_time,
)
if res["finished"]:
num_choices -= 1
work_process_metrics.e2e_request_latency.observe(
@@ -305,6 +307,9 @@ class OpenAIServingChat:
if len(choices) == max_streaming_response_tokens or res["finished"]:
chunk.choices = choices
yield f"data: {chunk.model_dump_json(exclude_unset=True)}\n\n"
# 打印尾包
if res["finished"]:
api_server_logger.info(f"Chat Streaming response last send: {chunk.model_dump_json()}")
choices = []
if choices:
@@ -458,13 +463,15 @@ class OpenAIServingChat:
prompt_tokens_details=PromptTokenUsageInfo(cached_tokens=final_res.get("num_cached_tokens", 0)),
)
work_process_metrics.e2e_request_latency.observe(time.time() - final_res["metrics"]["request_start_time"])
return ChatCompletionResponse(
res = ChatCompletionResponse(
id=request_id,
created=created_time,
model=model_name,
choices=choices,
usage=usage,
)
api_server_logger.info(f"Chat response: {res.model_dump_json()}")
return res
def _create_chat_logprobs(
self,