mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[Feature] support fd return decode response (#4407)
* [Feature] support fd return decode response * Resolving conflicts * fix * fix * fix * fix * fix --------- Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
This commit is contained in:
@@ -38,7 +38,6 @@ from fastdeploy.engine.args_utils import EngineArgs
|
||||
from fastdeploy.engine.common_engine import EngineService
|
||||
from fastdeploy.engine.expert_service import start_data_parallel_service
|
||||
from fastdeploy.engine.request import Request
|
||||
from fastdeploy.input.preprocess import InputPreprocessor
|
||||
from fastdeploy.inter_communicator import EngineWorkerQueue, IPCSignal
|
||||
from fastdeploy.metrics.metrics import main_process_metrics
|
||||
from fastdeploy.utils import EngineError, console_logger, envs, llm_logger
|
||||
@@ -87,13 +86,6 @@ class LLMEngine:
|
||||
self.running = True
|
||||
self.is_started = False
|
||||
|
||||
self.input_processor = InputPreprocessor(
|
||||
cfg.model_config,
|
||||
cfg.structured_outputs_config.reasoning_parser,
|
||||
cfg.limit_mm_per_prompt,
|
||||
cfg.mm_processor_kwargs,
|
||||
cfg.tool_parser,
|
||||
)
|
||||
self.engine = EngineService(cfg)
|
||||
|
||||
if self.cfg.cache_config.num_gpu_blocks_override is None:
|
||||
@@ -117,12 +109,12 @@ class LLMEngine:
|
||||
self.ipc_signal_suffix = self.cfg.parallel_config.engine_worker_queue_port[0]
|
||||
self._init_worker_signals()
|
||||
|
||||
self.data_processor = self.input_processor.create_processor()
|
||||
self.engine.data_processor = self.data_processor
|
||||
# Launch components: scheduler, cache_manager, expert_service et.al.
|
||||
self.launch_components()
|
||||
|
||||
self.engine.start()
|
||||
self.engine.create_data_processor()
|
||||
self.data_processor = self.engine.data_processor
|
||||
|
||||
# If block numer is specified and model is deployed in mixed mode, start cache manager first
|
||||
if not self.do_profile and self.cfg.scheduler_config.splitwise_role != "mixed":
|
||||
@@ -246,7 +238,7 @@ class LLMEngine:
|
||||
chat_template_kwargs = kwargs.get("chat_template_kwargs") or {}
|
||||
chat_template_kwargs["chat_template"] = kwargs.get("chat_template")
|
||||
kwargs["chat_template_kwargs"] = chat_template_kwargs
|
||||
request = self.data_processor.process_request(request, self.cfg.model_config.max_model_len, **kwargs)
|
||||
request = self.engine.data_processor.process_request(request, self.cfg.model_config.max_model_len, **kwargs)
|
||||
request.prompt_token_ids_len = len(request.prompt_token_ids)
|
||||
request.need_prefill_tokens = request.prompt_token_ids_len
|
||||
input_ids_len = request.prompt_token_ids_len
|
||||
@@ -482,9 +474,9 @@ class LLMEngine:
|
||||
py_script = os.path.join(current_dir_path, worker_path)
|
||||
|
||||
ori_vocab_size = (
|
||||
len(self.data_processor.tokenizer.sp_model)
|
||||
if hasattr(self.data_processor.tokenizer, "sp_model")
|
||||
else len(self.data_processor.tokenizer.vocab)
|
||||
len(self.engine.data_processor.tokenizer.sp_model)
|
||||
if hasattr(self.engine.data_processor.tokenizer, "sp_model")
|
||||
else len(self.engine.data_processor.tokenizer.vocab)
|
||||
)
|
||||
|
||||
think_end_id = self.data_processor.tokenizer.get_vocab().get("</think>", -1)
|
||||
@@ -511,8 +503,8 @@ class LLMEngine:
|
||||
f" --total_block_num {self.cfg.cache_config.total_block_num}"
|
||||
f" --block_size {self.cfg.cache_config.block_size}"
|
||||
f" --enc_dec_block_num {self.cfg.cache_config.enc_dec_block_num}"
|
||||
f" --eos_tokens_lens {self.data_processor.eos_token_id_len}"
|
||||
f" --pad_token_id {self.data_processor.pad_token_id}"
|
||||
f" --eos_tokens_lens {self.engine.data_processor.eos_token_id_len}"
|
||||
f" --pad_token_id {self.engine.data_processor.pad_token_id}"
|
||||
f" --engine_pid {self.cfg.parallel_config.engine_worker_queue_port[0]}"
|
||||
f" --max_num_batched_tokens {self.cfg.scheduler_config.max_num_batched_tokens}"
|
||||
f" --splitwise_role {self.cfg.scheduler_config.splitwise_role}"
|
||||
@@ -611,7 +603,7 @@ class LLMEngine:
|
||||
for result in self._get_generated_tokens(req_id):
|
||||
is_end = result.finished
|
||||
if stream and not is_end:
|
||||
processed = self.data_processor.process_response(result)
|
||||
processed = self.engine.data_processor.process_response(result)
|
||||
if processed is None:
|
||||
continue
|
||||
output = processed.to_dict()
|
||||
@@ -619,7 +611,7 @@ class LLMEngine:
|
||||
|
||||
# Exit loop if termination condition is met
|
||||
if is_end:
|
||||
processed = self.data_processor.process_response(result)
|
||||
processed = self.engine.data_processor.process_response(result)
|
||||
output = processed.to_dict()
|
||||
llm_logger.debug(f"Generate result: {output}")
|
||||
if not stream:
|
||||
|
||||
Reference in New Issue
Block a user