[Feature] support mtp logprob (#4457)

* support logprob in mtp

* remove debug code

* fix

* feat: add draft_logprobs for Speculative Decode MTP

* Revert "feat: add draft_logprobs for Speculative Decode MTP"

This reverts commit d5a3c5c933.

* fix

* feat: add draft_logprobs for Speculative Decode MTP

* feat: add draft_logprobs for Speculative Decode MTP

* fix some bugs

* fix codestyle

* fix bugs

* fix bugs

* fix bugs

* fix bus

* fix bugs

* fix unitest

---------

Co-authored-by: sunlei1024 <sunlei5788@gmail.com>
Co-authored-by: sunlei18 <sunlei18@sunlei18deMacBook-Pro.local>
This commit is contained in:
GoldPancake
2025-10-20 10:18:00 +08:00
committed by GitHub
parent beaec373c0
commit 9c7187998c
18 changed files with 1393 additions and 80 deletions

View File

@@ -212,6 +212,7 @@ class OpenAIServingCompletion:
valid_results = [dict()] * num_choices
output_tokens = [0] * num_choices
aggregated_top_logprobs = [[[], [], []] for _ in range(num_choices)]
aggregated_draft_top_logprobs = [[[], [], []] for _ in range(num_choices)]
aggregated_token_ids = [[] for _ in range(num_choices)]
completion_batched_token_ids = [[] for _ in range(num_choices)]
current_waiting_time = 0
@@ -238,12 +239,19 @@ class OpenAIServingCompletion:
raise ValueError("{}".format(data["error_msg"]))
output = data["outputs"]
output_top_logprobs = output["top_logprobs"]
output_top_logprobs = output.get("top_logprobs") or None
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
if output_top_logprobs is not None:
aggregated_top_logprobs[rid][0].extend(output_top_logprobs[0])
aggregated_top_logprobs[rid][1].extend(output_top_logprobs[1])
aggregated_top_logprobs[rid][2].extend(output_top_logprobs[2])
# draft logprobs
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
aggregated_draft_top_logprobs[rid][0].extend(output_draft_top_logprobs[0])
aggregated_draft_top_logprobs[rid][1].extend(output_draft_top_logprobs[1])
aggregated_draft_top_logprobs[rid][2].extend(output_draft_top_logprobs[2])
aggregated_token_ids[rid].extend(data["outputs"]["token_ids"])
self.engine_client.data_processor.process_response_dict(
@@ -254,6 +262,7 @@ class OpenAIServingCompletion:
if data.get("finished", False):
data["output_token_ids"] = output_tokens[rid]
data["outputs"]["top_logprobs"] = aggregated_top_logprobs[rid]
data["outputs"]["draft_top_logprobs"] = aggregated_draft_top_logprobs[rid]
data["outputs"]["token_ids"] = aggregated_token_ids[rid]
valid_results[rid] = data
num_choices -= 1
@@ -390,10 +399,17 @@ class OpenAIServingCompletion:
await self._echo_back_prompt(request, res, idx)
output = res["outputs"]
output_top_logprobs = output["top_logprobs"]
output_draft_top_logprobs = output["draft_top_logprobs"]
logprobs_res: Optional[CompletionLogprobs] = None
draft_logprobs_res: Optional[CompletionLogprobs] = None
if request.logprobs and output_top_logprobs is not None:
logprobs_res = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
# draft logprobs
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
draft_logprobs_res = self._create_completion_logprobs(
output_draft_top_logprobs, request.logprobs, 0
)
output_tokens[idx] += 1
delta_message = CompletionResponseStreamChoice(
index=idx,
@@ -406,6 +422,7 @@ class OpenAIServingCompletion:
reasoning_content="",
arrival_time=arrival_time,
logprobs=logprobs_res,
draft_logprobs=draft_logprobs_res,
)
if not res["finished"] and "delta_message" in output:
delta_message_output = output["delta_message"]
@@ -493,12 +510,19 @@ class OpenAIServingCompletion:
completion_token_ids = completion_batched_token_ids[idx]
output = final_res["outputs"]
output_top_logprobs = output["top_logprobs"]
output_top_logprobs = output.get("top_logprobs") or None
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
aggregated_logprobs: Optional[CompletionLogprobs] = None
if output_top_logprobs is not None:
aggregated_logprobs = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
aggregated_draft_logprobs: Optional[CompletionLogprobs] = None
if output_draft_top_logprobs is not None:
aggregated_draft_logprobs = self._create_completion_logprobs(
output_draft_top_logprobs, request.logprobs, 0
)
if request.echo:
assert prompt_text is not None
token_ids = [*prompt_token_ids, *output["token_ids"]]
@@ -524,6 +548,7 @@ class OpenAIServingCompletion:
reasoning_content=output.get("reasoning_content"),
tool_calls=output.get("tool_call"),
logprobs=aggregated_logprobs,
draft_logprobs=aggregated_draft_logprobs,
finish_reason=finish_reason,
)
choices.append(choice_data)