mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
Support limit thinking lengths (#4244)
Co-authored-by: K11OntheBoat <“ruianmaidanglao@163.com”>
This commit is contained in:
@@ -322,15 +322,21 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
else:
|
||||
position_ids = None
|
||||
|
||||
enable_thinking = request.get("enable_thinking", True)
|
||||
enable_thinking = enable_thinking if enable_thinking is not None else True
|
||||
self.share_inputs["enable_thinking"][:] = enable_thinking
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 1 if enable_thinking else 0
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = request.get("reasoning_max_tokens", 2048)
|
||||
self.share_inputs["rope_emb"][idx : idx + 1, :] = self.prepare_rope3d(
|
||||
position_ids, request.get("max_tokens", 2048)
|
||||
)
|
||||
|
||||
if request.get("enable_thinking", False) and request.get("reasoning_max_tokens") is not None:
|
||||
# Enable thinking
|
||||
self.share_inputs["enable_thinking"][:] = True
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 1
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = request.get("reasoning_max_tokens")
|
||||
else:
|
||||
# Disable thinking
|
||||
self.share_inputs["enable_thinking"][:] = False
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 0
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = 0
|
||||
|
||||
if isinstance(request.prompt_token_ids, np.ndarray):
|
||||
prompt_token_ids = request.prompt_token_ids.tolist()
|
||||
else:
|
||||
@@ -549,16 +555,22 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["prompt_lens"][idx : idx + 1] = length
|
||||
|
||||
if self.enable_mm:
|
||||
enable_thinking = request.get("enable_thinking", True)
|
||||
enable_thinking = enable_thinking if enable_thinking is not None else True
|
||||
self.share_inputs["enable_thinking"][:] = enable_thinking
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 1 if enable_thinking else 0
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = request.get("reasoning_max_tokens", 2048)
|
||||
self.share_inputs["rope_emb"][idx : idx + 1, :] = self.prepare_rope3d(
|
||||
position_ids, request.get("max_tokens", 2048)
|
||||
)
|
||||
self.share_inputs["seq_lens_decoder"][idx : idx + 1] = 0
|
||||
|
||||
if request.get("enable_thinking", False) and request.get("reasoning_max_tokens") is not None:
|
||||
# Enable thinking
|
||||
self.share_inputs["enable_thinking"][:] = True
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 1
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = request.get("reasoning_max_tokens")
|
||||
else:
|
||||
# Disable thinking
|
||||
self.share_inputs["enable_thinking"][:] = False
|
||||
self.share_inputs["need_think_end"][idx : idx + 1, :] = 0
|
||||
self.share_inputs["reasoning_index"][idx : idx + 1, :] = 0
|
||||
|
||||
def get_attr_from_request(request, attr, default_value=None):
|
||||
res = request.get(attr, default_value)
|
||||
if res is not None:
|
||||
@@ -853,6 +865,11 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
# Initialize rotary position embedding
|
||||
tmp_position_ids = paddle.arange(self.parallel_config.max_model_len).reshape((1, -1))
|
||||
|
||||
# Initialize thinking related buffers
|
||||
self.share_inputs["need_think_end"] = paddle.full(shape=[max_num_seqs, 1], fill_value=0, dtype="int32")
|
||||
self.share_inputs["enable_thinking"] = paddle.full(shape=[1], fill_value=False, dtype="bool")
|
||||
self.share_inputs["reasoning_index"] = paddle.full(shape=[max_num_seqs, 1], fill_value=0, dtype="int32")
|
||||
|
||||
# TODO(gongshaotian): move to models
|
||||
if not self.enable_mm:
|
||||
self.share_inputs["rope_emb"] = get_rope(
|
||||
@@ -952,11 +969,6 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
dtype="float32",
|
||||
)
|
||||
self.share_inputs["image_features"] = None
|
||||
self.share_inputs["need_think_end"] = paddle.full(shape=[max_num_seqs, 1], fill_value=0, dtype="int32")
|
||||
self.share_inputs["enable_thinking"] = paddle.full(
|
||||
shape=[1], fill_value=("ernie" in self.model_config.model_type), dtype="bool"
|
||||
)
|
||||
self.share_inputs["reasoning_index"] = paddle.full(shape=[max_num_seqs, 1], fill_value=0, dtype="int32")
|
||||
|
||||
def _prepare_inputs(self) -> None:
|
||||
"""Prepare the model inputs"""
|
||||
@@ -1398,10 +1410,10 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
),
|
||||
accept_tokens=(self.share_inputs["accept_tokens"] if self.speculative_decoding else None),
|
||||
accept_num=(self.share_inputs["accept_num"] if self.speculative_decoding else None),
|
||||
enable_thinking=(self.share_inputs["enable_thinking"] if self.enable_mm else None),
|
||||
think_end_id=(getattr(self.model_config, "think_end_id", -1) if self.enable_mm else -1),
|
||||
need_think_end=(self.share_inputs["need_think_end"] if self.enable_mm else None),
|
||||
reasoning_index=(self.share_inputs["reasoning_index"] if self.enable_mm else None),
|
||||
enable_thinking=self.share_inputs["enable_thinking"],
|
||||
think_end_id=self.model_config.think_end_id,
|
||||
need_think_end=self.share_inputs["need_think_end"],
|
||||
reasoning_index=self.share_inputs["reasoning_index"],
|
||||
stop_token_ids=self.share_inputs["stop_seqs"],
|
||||
stop_seqs_len=self.share_inputs["stop_seqs_len"],
|
||||
)
|
||||
@@ -1714,10 +1726,10 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
),
|
||||
accept_tokens=(self.share_inputs["accept_tokens"] if self.speculative_decoding else None),
|
||||
accept_num=(self.share_inputs["accept_num"] if self.speculative_decoding else None),
|
||||
enable_thinking=(self.share_inputs["enable_thinking"] if self.enable_mm else None),
|
||||
think_end_id=(getattr(self.model_config, "think_end_id", -1) if self.enable_mm else -1),
|
||||
need_think_end=(self.share_inputs["need_think_end"][:num_running_requests] if self.enable_mm else None),
|
||||
reasoning_index=(self.share_inputs["reasoning_index"][:num_running_requests] if self.enable_mm else None),
|
||||
enable_thinking=self.share_inputs["enable_thinking"],
|
||||
think_end_id=self.model_config.think_end_id,
|
||||
need_think_end=self.share_inputs["need_think_end"][:num_running_requests],
|
||||
reasoning_index=self.share_inputs["reasoning_index"][:num_running_requests],
|
||||
stop_token_ids=self.share_inputs["stop_seqs"],
|
||||
stop_seqs_len=self.share_inputs["stop_seqs_len"],
|
||||
)
|
||||
|
Reference in New Issue
Block a user