mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[Speculative Decoding] Add draft_logprobs Support for Speculative Decode MTP (#4467)
* feat: add draft_logprobs for Speculative Decode MTP * feat: add draft_logprobs for Speculative Decode MTP * feat: add draft_logprobs for Speculative Decode MTP * fix: postprocess for speculative decode * test: test_speculative_decoding_use_logprobs * fix: test_completion_echo * fix test_max_streaming_tokens --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -234,6 +234,7 @@ class OpenAIServingCompletion:
|
||||
valid_results = [dict()] * num_choices
|
||||
output_tokens = [0] * num_choices
|
||||
aggregated_top_logprobs = [[[], [], []] for _ in range(num_choices)]
|
||||
aggregated_draft_top_logprobs = [[[], [], []] for _ in range(num_choices)]
|
||||
aggregated_token_ids = [[] for _ in range(num_choices)]
|
||||
completion_batched_token_ids = [[] for _ in range(num_choices)]
|
||||
current_waiting_time = 0
|
||||
@@ -266,12 +267,19 @@ class OpenAIServingCompletion:
|
||||
raise ValueError("{}".format(data["error_msg"]))
|
||||
|
||||
output = data["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_top_logprobs = output.get("top_logprobs") or None
|
||||
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
|
||||
if output_top_logprobs is not None:
|
||||
aggregated_top_logprobs[rid][0].extend(output_top_logprobs[0])
|
||||
aggregated_top_logprobs[rid][1].extend(output_top_logprobs[1])
|
||||
aggregated_top_logprobs[rid][2].extend(output_top_logprobs[2])
|
||||
|
||||
# draft logprobs
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
aggregated_draft_top_logprobs[rid][0].extend(output_draft_top_logprobs[0])
|
||||
aggregated_draft_top_logprobs[rid][1].extend(output_draft_top_logprobs[1])
|
||||
aggregated_draft_top_logprobs[rid][2].extend(output_draft_top_logprobs[2])
|
||||
|
||||
aggregated_token_ids[rid].extend(data["outputs"]["token_ids"])
|
||||
|
||||
self.engine_client.data_processor.process_response_dict(
|
||||
@@ -282,6 +290,7 @@ class OpenAIServingCompletion:
|
||||
if data.get("finished", False):
|
||||
data["output_token_ids"] = output_tokens[rid]
|
||||
data["outputs"]["top_logprobs"] = aggregated_top_logprobs[rid]
|
||||
data["outputs"]["draft_top_logprobs"] = aggregated_draft_top_logprobs[rid]
|
||||
data["outputs"]["token_ids"] = aggregated_token_ids[rid]
|
||||
valid_results[rid] = data
|
||||
num_choices -= 1
|
||||
@@ -437,10 +446,17 @@ class OpenAIServingCompletion:
|
||||
await self._process_echo_logic(request, idx, res["outputs"])
|
||||
output = res["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_draft_top_logprobs = output["draft_top_logprobs"]
|
||||
logprobs_res: Optional[CompletionLogprobs] = None
|
||||
draft_logprobs_res: Optional[CompletionLogprobs] = None
|
||||
if request.logprobs and output_top_logprobs is not None:
|
||||
logprobs_res = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
|
||||
|
||||
# draft logprobs
|
||||
if request.include_draft_logprobs and output_draft_top_logprobs is not None:
|
||||
draft_logprobs_res = self._create_completion_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, 0
|
||||
)
|
||||
output_tokens[idx] += 1
|
||||
delta_message = CompletionResponseStreamChoice(
|
||||
index=idx,
|
||||
@@ -452,6 +468,7 @@ class OpenAIServingCompletion:
|
||||
reasoning_content="",
|
||||
arrival_time=arrival_time,
|
||||
logprobs=logprobs_res,
|
||||
draft_logprobs=draft_logprobs_res,
|
||||
)
|
||||
if not res["finished"] and "delta_message" in output:
|
||||
delta_message_output = output["delta_message"]
|
||||
@@ -541,15 +558,23 @@ class OpenAIServingCompletion:
|
||||
final_res = final_res_batch[idx]
|
||||
prompt_token_ids = prompt_batched_token_ids[idx // (1 if request.n is None else request.n)]
|
||||
assert prompt_token_ids is not None
|
||||
prompt_text = request.prompt
|
||||
completion_token_ids = completion_batched_token_ids[idx]
|
||||
|
||||
output = final_res["outputs"]
|
||||
output_top_logprobs = output["top_logprobs"]
|
||||
output_top_logprobs = output.get("top_logprobs") or None
|
||||
output_draft_top_logprobs = output.get("draft_top_logprobs") or None
|
||||
|
||||
aggregated_logprobs: Optional[CompletionLogprobs] = None
|
||||
if output_top_logprobs is not None:
|
||||
aggregated_logprobs = self._create_completion_logprobs(output_top_logprobs, request.logprobs, 0)
|
||||
|
||||
aggregated_draft_logprobs: Optional[CompletionLogprobs] = None
|
||||
if output_draft_top_logprobs is not None:
|
||||
aggregated_draft_logprobs = self._create_completion_logprobs(
|
||||
output_draft_top_logprobs, request.logprobs, 0
|
||||
)
|
||||
|
||||
if request.echo:
|
||||
prompt_text = self._echo_back_prompt(request, idx // (1 if request.n is None else request.n))
|
||||
token_ids = [*prompt_token_ids, *output["token_ids"]]
|
||||
@@ -574,6 +599,7 @@ class OpenAIServingCompletion:
|
||||
reasoning_content=output.get("reasoning_content"),
|
||||
tool_calls=output.get("tool_call"),
|
||||
logprobs=aggregated_logprobs,
|
||||
draft_logprobs=aggregated_draft_logprobs,
|
||||
finish_reason=finish_reason,
|
||||
)
|
||||
choices.append(choice_data)
|
||||
|
||||
Reference in New Issue
Block a user