[Feature] add prompt_tokens and completion_tokens (#3504)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled

This commit is contained in:
memoryCoderC
2025-08-21 10:23:27 +08:00
committed by GitHub
parent 30b3f2dc07
commit 31f639f10b
3 changed files with 16 additions and 0 deletions

View File

@@ -139,6 +139,8 @@ class ChatMessage(BaseModel):
completion_token_ids: Optional[List[int]] = None
text_after_process: Optional[str] = None
raw_prediction: Optional[str] = None
prompt_tokens: Optional[str] = None
completion_tokens: Optional[str] = None
class ChatCompletionResponseChoice(BaseModel):
@@ -198,6 +200,8 @@ class DeltaMessage(BaseModel):
tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None
text_after_process: Optional[str] = None
raw_prediction: Optional[str] = None
prompt_tokens: Optional[str] = None
completion_tokens: Optional[str] = None
class ChatCompletionResponseStreamChoice(BaseModel):
@@ -236,6 +240,8 @@ class CompletionResponseChoice(BaseModel):
completion_token_ids: Optional[List[int]] = None
text_after_process: Optional[str] = None
raw_prediction: Optional[str] = None
prompt_tokens: Optional[str] = None
completion_tokens: Optional[str] = None
arrival_time: Optional[float] = None
logprobs: Optional[CompletionLogprobs] = None
reasoning_content: Optional[str] = None
@@ -280,6 +286,8 @@ class CompletionResponseStreamChoice(BaseModel):
completion_token_ids: Optional[List[int]] = None
text_after_process: Optional[str] = None
raw_prediction: Optional[str] = None
prompt_tokens: Optional[str] = None
completion_tokens: Optional[str] = None
reasoning_content: Optional[str] = None
finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None
tool_calls: Optional[List[DeltaToolCall | ToolCall]] = None

View File

@@ -237,6 +237,7 @@ class OpenAIServingChat:
if request.return_token_ids:
choice.delta.prompt_token_ids = list(prompt_token_ids)
choice.delta.text_after_process = text_after_process
choice.delta.prompt_tokens = text_after_process
chunk = ChatCompletionStreamResponse(
id=request_id,
object=chunk_object_type,
@@ -308,6 +309,7 @@ class OpenAIServingChat:
if request.return_token_ids:
choice.delta.completion_token_ids = list(output["token_ids"])
choice.delta.raw_prediction = output.get("raw_prediction")
choice.delta.completion_tokens = output.get("raw_prediction")
if include_continuous_usage:
chunk.usage = UsageInfo(
prompt_tokens=num_prompt_tokens,
@@ -442,7 +444,9 @@ class OpenAIServingChat:
prompt_token_ids=prompt_token_ids if request.return_token_ids else None,
completion_token_ids=completion_token_ids if request.return_token_ids else None,
text_after_process=text_after_process if request.return_token_ids else None,
prompt_tokens=text_after_process if request.return_token_ids else None,
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
)
logprobs_full_res = None
if logprob_contents:

View File

@@ -343,6 +343,7 @@ class OpenAIServingCompletion:
text="",
prompt_token_ids=list(prompt_batched_token_ids[idx]),
text_after_process=text_after_process_list[idx],
prompt_tokens=text_after_process_list[idx],
completion_token_ids=None,
)
],
@@ -393,6 +394,7 @@ class OpenAIServingCompletion:
completion_token_ids=output.get("token_ids") if request.return_token_ids else None,
tool_calls=None,
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
reasoning_content=output.get("reasoning_content"),
arrival_time=arrival_time,
logprobs=logprobs_res,
@@ -511,7 +513,9 @@ class OpenAIServingCompletion:
prompt_token_ids=prompt_token_ids if request.return_token_ids else None,
completion_token_ids=completion_token_ids if request.return_token_ids else None,
raw_prediction=output.get("raw_prediction") if request.return_token_ids else None,
completion_tokens=output.get("raw_prediction") if request.return_token_ids else None,
text_after_process=text_after_process_list[idx] if request.return_token_ids else None,
prompt_tokens=text_after_process_list[idx] if request.return_token_ids else None,
reasoning_content=output.get("reasoning_content"),
tool_calls=output.get("tool_call"),
logprobs=aggregated_logprobs,