Support limit thinking lengths (#4244)

Co-authored-by: K11OntheBoat <“ruianmaidanglao@163.com”>
This commit is contained in:
K11OntheBoat
2025-09-24 17:30:53 +08:00
committed by GitHub
parent 12043fc476
commit 05b7800d80
8 changed files with 184 additions and 26 deletions

View File

@@ -193,7 +193,7 @@ def post_process_normal(
) -> ModelRunnerOutput:
"""Post-processing steps after completing a single token generation."""
# handle vl:
if model_output.enable_thinking:
if model_output.enable_thinking and model_output.think_end_id is not None:
exists_think_end = sampler_output.sampled_token_ids == model_output.think_end_id
paddle.assign(
paddle.where(