mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 00:06:38 +08:00
Fix bug for offline inference in scheduler v1 (#3117)
This commit is contained in:
@@ -500,6 +500,7 @@ class LLMEngine:
|
|||||||
enable_thinking = kwargs.get("enable_thinking", None)
|
enable_thinking = kwargs.get("enable_thinking", None)
|
||||||
request = self.data_processor.process_request(request, self.cfg.max_model_len, enable_thinking=enable_thinking)
|
request = self.data_processor.process_request(request, self.cfg.max_model_len, enable_thinking=enable_thinking)
|
||||||
request.prompt_token_ids_len = len(request.prompt_token_ids)
|
request.prompt_token_ids_len = len(request.prompt_token_ids)
|
||||||
|
request.need_prefill_tokens = request.prompt_token_ids_len
|
||||||
input_ids_len = request.prompt_token_ids_len
|
input_ids_len = request.prompt_token_ids_len
|
||||||
request.set(
|
request.set(
|
||||||
"max_tokens",
|
"max_tokens",
|
||||||
|
Reference in New Issue
Block a user