[fix]Modify follow-up push parameters and Modify the verification method for thinking length (#4086)

* 续推参数  generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式

* 续推参数  generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式

* 续推参数  generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式

* 续推参数  generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式

* add completion_token_ids

* add logger

* fix reasoning_max_tokens ParameterError

* add unittest

* add unittest

* add unittest

* add unittest

* add unittest

* add unit test
This commit is contained in:
luukunn
2025-09-19 14:26:01 +08:00
committed by GitHub
parent 66a98b44ed
commit ee9d8a840a
6 changed files with 75 additions and 24 deletions

View File

@@ -593,6 +593,7 @@ class ChatCompletionRequest(BaseModel):
prompt_token_ids: Optional[List[int]] = None
max_streaming_response_tokens: Optional[int] = None
disable_chat_template: Optional[bool] = False
completion_token_ids: Optional[List[int]] = None
# doc: end-chat-completion-extra-params
def to_dict_for_infer(self, request_id=None):
@@ -618,6 +619,9 @@ class ChatCompletionRequest(BaseModel):
), "The parameter `raw_request` is not supported now, please use completion api instead."
for key, value in self.metadata.items():
req_dict[key] = value
from fastdeploy.utils import api_server_logger
api_server_logger.warning("The parameter metadata is obsolete.")
for key, value in self.dict().items():
if value is not None:
req_dict[key] = value