diff --git a/fastdeploy/entrypoints/openai/protocol.py b/fastdeploy/entrypoints/openai/protocol.py index 6aa6b8bd0..678ae8dd0 100644 --- a/fastdeploy/entrypoints/openai/protocol.py +++ b/fastdeploy/entrypoints/openai/protocol.py @@ -126,6 +126,8 @@ class ChatMessage(BaseModel): tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None prompt_token_ids: Optional[List[int]] = None completion_token_ids: Optional[List[int]] = None + text_after_process: Optional[str] = None + raw_prediction: Optional[str] = None class ChatCompletionResponseChoice(BaseModel): @@ -183,6 +185,8 @@ class DeltaMessage(BaseModel): completion_token_ids: Optional[List[int]] = None reasoning_content: Optional[str] = None tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None + text_after_process: Optional[str] = None + raw_prediction: Optional[str] = None class ChatCompletionResponseStreamChoice(BaseModel): @@ -219,6 +223,8 @@ class CompletionResponseChoice(BaseModel): text: str prompt_token_ids: Optional[List[int]] = None completion_token_ids: Optional[List[int]] = None + text_after_process: Optional[str] = None + raw_prediction: Optional[str] = None arrival_time: Optional[float] = None logprobs: Optional[CompletionLogprobs] = None reasoning_content: Optional[str] = None @@ -261,6 +267,8 @@ class CompletionResponseStreamChoice(BaseModel): logprobs: Optional[CompletionLogprobs] = None prompt_token_ids: Optional[List[int]] = None completion_token_ids: Optional[List[int]] = None + text_after_process: Optional[str] = None + raw_prediction: Optional[str] = None reasoning_content: Optional[str] = None finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 8d8d4b98d..6b5492b05 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -83,11 +83,12 @@ class OpenAIServingChat: else: request_id = f"chatcmpl-{uuid.uuid4()}" api_server_logger.info(f"create chat completion request: {request_id}") - + text_after_process = None try: current_req_dict = request.to_dict_for_infer(request_id) current_req_dict["arrival_time"] = time.time() prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict) + text_after_process = current_req_dict.get("text_after_process") if isinstance(prompt_token_ids, np.ndarray): prompt_token_ids = prompt_token_ids.tolist() except Exception as e: @@ -104,10 +105,14 @@ class OpenAIServingChat: return ErrorResponse(code=408, message=f"Request queued time exceed {self.max_waiting_time}") if request.stream: - return self.chat_completion_stream_generator(request, request_id, request.model, prompt_token_ids) + return self.chat_completion_stream_generator( + request, request_id, request.model, prompt_token_ids, text_after_process + ) else: try: - return await self.chat_completion_full_generator(request, request_id, request.model, prompt_token_ids) + return await self.chat_completion_full_generator( + request, request_id, request.model, prompt_token_ids, text_after_process + ) except Exception as e: return ErrorResponse(code=400, message=str(e)) @@ -124,6 +129,7 @@ class OpenAIServingChat: request_id: str, model_name: str, prompt_token_ids: list(), + text_after_process: str, ): """ Streaming chat completion generator. @@ -216,6 +222,7 @@ class OpenAIServingChat: ) if request.return_token_ids: choice.delta.prompt_token_ids = list(prompt_token_ids) + choice.delta.text_after_process = text_after_process chunk = ChatCompletionStreamResponse( id=request_id, object=chunk_object_type, @@ -279,6 +286,7 @@ class OpenAIServingChat: if request.return_token_ids: choice.delta.completion_token_ids = list(output["token_ids"]) + choice.delta.raw_prediction = output.get("raw_prediction") if include_continuous_usage: chunk.usage = UsageInfo( prompt_tokens=num_prompt_tokens, @@ -329,6 +337,7 @@ class OpenAIServingChat: request_id: str, model_name: str, prompt_token_ids: list(), + text_after_process: str, ): """ Full chat completion generator. @@ -406,6 +415,8 @@ class OpenAIServingChat: tool_calls=output.get("tool_call_content"), prompt_token_ids=prompt_token_ids if request.return_token_ids else None, completion_token_ids=completion_token_ids if request.return_token_ids else None, + text_after_process=text_after_process if request.return_token_ids else None, + raw_prediction=output.get("raw_prediction") if request.return_token_ids else None, ) logprobs_full_res = None if logprob_contents: diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index e2a1158d3..345c393c6 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -100,6 +100,7 @@ class OpenAIServingCompletion: api_server_logger.info(f"start inference for request {num_choices}") prompt_batched_token_ids = [] + text_after_process_list = [] try: for idx, prompt in enumerate(request_prompts): request_id_idx = f"{request_id}-{idx}" @@ -109,6 +110,7 @@ class OpenAIServingCompletion: prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict) if isinstance(prompt_token_ids, np.ndarray): prompt_token_ids = prompt_token_ids.tolist() + text_after_process_list.append(current_req_dict.get("text_after_process")) prompt_batched_token_ids.append(prompt_token_ids) except Exception as e: return ErrorResponse(message=str(e), code=400) @@ -131,6 +133,7 @@ class OpenAIServingCompletion: created_time=created_time, model_name=request.model, prompt_batched_token_ids=prompt_batched_token_ids, + text_after_process_list=text_after_process_list, ) else: try: @@ -141,6 +144,7 @@ class OpenAIServingCompletion: created_time=created_time, model_name=request.model, prompt_batched_token_ids=prompt_batched_token_ids, + text_after_process_list=text_after_process_list, ) except Exception as e: return ErrorResponse(code=400, message=str(e)) @@ -156,6 +160,7 @@ class OpenAIServingCompletion: created_time: int, model_name: str, prompt_batched_token_ids: list(), + text_after_process_list: list(), ): """ Process the full completion request with multiple choices. @@ -225,6 +230,7 @@ class OpenAIServingCompletion: model_name=model_name, prompt_batched_token_ids=prompt_batched_token_ids, completion_batched_token_ids=completion_batched_token_ids, + text_after_process_list=text_after_process_list, ) except Exception as e: api_server_logger.error(f"Error in completion_full_generator: {e}", exc_info=True) @@ -251,6 +257,7 @@ class OpenAIServingCompletion: created_time: int, model_name: str, prompt_batched_token_ids: list(), + text_after_process_list: list(), ): """ Process the stream completion request. @@ -309,6 +316,7 @@ class OpenAIServingCompletion: index=idx, text="", prompt_token_ids=list(prompt_batched_token_ids[idx]), + text_after_process=text_after_process_list[idx], completion_token_ids=None, ) ], @@ -337,6 +345,7 @@ class OpenAIServingCompletion: text=output["text"], prompt_token_ids=None, completion_token_ids=output.get("token_ids") if request.return_token_ids else None, + raw_prediction=output.get("raw_prediction") if request.return_token_ids else None, tool_calls=output.get("tool_call_content"), reasoning_content=output.get("reasoning_content"), arrival_time=arrival_time, @@ -398,6 +407,7 @@ class OpenAIServingCompletion: model_name: str, prompt_batched_token_ids: list(), completion_batched_token_ids: list(), + text_after_process_list: list(), ) -> CompletionResponse: choices: List[CompletionResponseChoice] = [] num_prompt_tokens = 0 @@ -444,6 +454,8 @@ class OpenAIServingCompletion: text=output_text, prompt_token_ids=prompt_token_ids if request.return_token_ids else None, completion_token_ids=completion_token_ids if request.return_token_ids else None, + raw_prediction=output.get("raw_prediction") if request.return_token_ids else None, + text_after_process=text_after_process_list[idx] if request.return_token_ids else None, reasoning_content=output.get("reasoning_content"), tool_calls=output.get("tool_call_content"), logprobs=aggregated_logprobs, diff --git a/fastdeploy/input/ernie_processor.py b/fastdeploy/input/ernie_processor.py index 28d91bdbf..8921a78e0 100644 --- a/fastdeploy/input/ernie_processor.py +++ b/fastdeploy/input/ernie_processor.py @@ -153,7 +153,7 @@ class ErnieProcessor(BaseDataProcessor): if request.get("prompt"): prompt = request.get("prompt") prompt = prompt[0] if isinstance(prompt, list) else prompt - + request["text_after_process"] = prompt tokens = self.tokenizer.tokenize(prompt) token_ids = self.tokenizer.convert_tokens_to_ids(tokens) request["prompt_token_ids"] = token_ids @@ -247,6 +247,7 @@ class ErnieProcessor(BaseDataProcessor): response_dict["outputs"]["reasoning_content"] = reasoning_content else: response_dict["outputs"]["text"] = full_text + response_dict["outputs"]["raw_prediction"] = full_text data_processor_logger.info(f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}") del self.decode_status[req_id] return response_dict @@ -283,6 +284,7 @@ class ErnieProcessor(BaseDataProcessor): response_dict["outputs"]["reasoning_content"] = reasoning_content else: response_dict["outputs"]["text"] = delta_text + response_dict["outputs"]["raw_prediction"] = delta_text if is_end: data_processor_logger.info(f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}") del self.decode_status[req_id] @@ -307,7 +309,7 @@ class ErnieProcessor(BaseDataProcessor): split_special_tokens=False, add_special_tokens=False, ) - + request_or_messages["text_after_process"] = spliced_message req_id = None if isinstance(request_or_messages, dict): req_id = request_or_messages.get("request_id", None) diff --git a/fastdeploy/input/ernie_vl_processor.py b/fastdeploy/input/ernie_vl_processor.py index 63ae5bc31..4d32909e0 100644 --- a/fastdeploy/input/ernie_vl_processor.py +++ b/fastdeploy/input/ernie_vl_processor.py @@ -209,6 +209,7 @@ class ErnieMoEVLProcessor(ErnieProcessor): self._check_mm_limits(multimodal_data) images = multimodal_data.get("image", None) videos = multimodal_data.get("video", None) + request["text_after_process"] = request.get("prompt") outputs = self.ernie_processor.text2ids(request["prompt"], images, videos) elif request.get("messages"): messages = request["messages"] diff --git a/fastdeploy/input/mm_processor/process.py b/fastdeploy/input/mm_processor/process.py index ea2559a0f..65fad4dbd 100644 --- a/fastdeploy/input/mm_processor/process.py +++ b/fastdeploy/input/mm_processor/process.py @@ -494,16 +494,15 @@ class DataProcessor: """ if self.tokenizer.chat_template is None: raise ValueError("This model does not support chat_template.") - - prompt_token_str = ( - self.tokenizer.apply_chat_template( - request, - tokenize=False, - add_generation_prompt=request.get("add_generation_prompt", True), - ) - .replace("<|image@placeholder|>", "") - .replace("<|video@placeholder|>", "") + prompt_token_template = self.tokenizer.apply_chat_template( + request, + tokenize=False, + add_generation_prompt=request.get("add_generation_prompt", True), ) + prompt_token_str = prompt_token_template.replace("<|image@placeholder|>", "").replace( + "<|video@placeholder|>", "" + ) + request["text_after_process"] = prompt_token_template tokens = self.tokenizer.tokenize(prompt_token_str) token_ids = self.tokenizer.convert_tokens_to_ids(tokens) data_processor_logger.info( diff --git a/fastdeploy/input/text_processor.py b/fastdeploy/input/text_processor.py index cbaca990c..df94cefec 100644 --- a/fastdeploy/input/text_processor.py +++ b/fastdeploy/input/text_processor.py @@ -264,6 +264,7 @@ class DataProcessor(BaseDataProcessor): # processing prompt_token_ids if not request.get("prompt_token_ids"): if "prompt" in request: + request["text_after_process"] = request["prompt"] request["prompt_token_ids"] = self.text2ids(request["prompt"], max_model_len).tolist() elif "messages" in request: if self.tokenizer.chat_template is None: @@ -335,6 +336,7 @@ class DataProcessor(BaseDataProcessor): delta_text, _, previous_texts = self.ids2tokens(token_ids, req_id) if is_end: full_text = previous_texts + delta_text + response_dict["outputs"]["raw_prediction"] = full_text if enable_thinking and self.reasoning_parser: reasoning_content, text = self.reasoning_parser.extract_reasoning_content(full_text, response_dict) response_dict["outputs"]["text"] = text @@ -364,7 +366,7 @@ class DataProcessor(BaseDataProcessor): if token_ids[-1] == self.tokenizer.eos_token_id: token_ids = token_ids[:-1] delta_text, previous_token_ids, previous_texts = self.ids2tokens(token_ids, req_id) - + response_dict["outputs"]["raw_prediction"] = delta_text if enable_thinking and self.reasoning_parser: reasoning_content, text = self.reasoning_parser.extract_reasoning_content_streaming( previous_texts, @@ -455,6 +457,7 @@ class DataProcessor(BaseDataProcessor): add_special_tokens=False, return_tensors="pd", ) + request["text_after_process"] = spliced_message req_id = None tokens = self.tokenizer.tokenize(spliced_message) if isinstance(request, dict):