mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[fix]Modify follow-up push parameters and Modify the verification method for thinking length (#4086)
* 续推参数 generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式 * 续推参数 generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式 * 续推参数 generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式 * 续推参数 generated_token_ids 修改成 completion_token_ids;修改思考长度校验方式 * add completion_token_ids * add logger * fix reasoning_max_tokens ParameterError * add unittest * add unittest * add unittest * add unittest * add unittest * add unit test
This commit is contained in:
@@ -236,8 +236,13 @@ class EngineClient:
|
||||
raise ParameterError("max_tokens", f"max_tokens can be defined [1, {self.max_model_len}).")
|
||||
|
||||
if data.get("reasoning_max_tokens") is not None:
|
||||
if data["reasoning_max_tokens"] > data["max_tokens"] or data["reasoning_max_tokens"] < 1:
|
||||
raise ParameterError("reasoning_max_tokens", "reasoning_max_tokens must be between max_tokens and 1")
|
||||
if data["reasoning_max_tokens"] < 1:
|
||||
raise ParameterError("reasoning_max_tokens", "reasoning_max_tokens must be greater than 1")
|
||||
if data["reasoning_max_tokens"] > data["max_tokens"]:
|
||||
data["reasoning_max_tokens"] = data["max_tokens"]
|
||||
api_server_logger.warning(
|
||||
f"req_id: {data['request_id']}, reasoning_max_tokens exceeds max_tokens, the value of reasoning_max_tokens will be adjusted to match that of max_tokens"
|
||||
)
|
||||
|
||||
# logprobs
|
||||
logprobs = data.get("logprobs")
|
||||
|
Reference in New Issue
Block a user