From e0e7d6843532a6a40aff96e12dc7a64ecab2aa5a Mon Sep 17 00:00:00 2001 From: ltd0924 <32387785+ltd0924@users.noreply.github.com> Date: Thu, 4 Sep 2025 20:31:48 +0800 Subject: [PATCH] Update qwen_vl_processor.py (#3808) --- fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py b/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py index ab249b1f0..dc85b78c0 100644 --- a/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py +++ b/fastdeploy/input/qwen_vl_processor/qwen_vl_processor.py @@ -231,6 +231,15 @@ class QwenVLProcessor(TextProcessor): elif request.get("messages"): messages = request["messages"] self._check_mm_limits(messages) + chat_template_kwargs = request.get("chat_template_kwargs") + if chat_template_kwargs: + if isinstance(chat_template_kwargs, dict): + for k, v in chat_template_kwargs.items(): + if k not in request: + request[k] = v + else: + raise ValueError("Invalid input: chat_template_kwargs must be a dict") + request.setdefault("enable_thinking", True) outputs = self.processor.request2ids(request) else: