mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[[Cherry-Pick][BugFix] fix hung when n>1 and --enable-logprob (#5492)(#5499) (#5498)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
* [BugFix] fix hung when n>1 and --enable-logprob (#5492) * check * check * check
This commit is contained in:
@@ -1366,8 +1366,8 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.top_p_normalized_logprobs = any(
|
||||
req.sampling_params.top_p_normalized_logprobs for req in logprobs_reqs
|
||||
)
|
||||
else:
|
||||
self.max_logprobs = None
|
||||
elif self.enable_logprob:
|
||||
self.max_logprobs = None if not self.speculative_decoding else 0
|
||||
|
||||
# Remove padding
|
||||
(
|
||||
@@ -2355,6 +2355,19 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.sampling_metadata,
|
||||
p_done_idxs,
|
||||
)
|
||||
|
||||
if (
|
||||
self.enable_logprob
|
||||
and not envs.FD_USE_GET_SAVE_OUTPUT_V1
|
||||
and sampler_output.logprobs_tensors is None
|
||||
):
|
||||
sampler_output.logprobs_tensors = LogprobsTensors(
|
||||
logprob_token_ids=sampler_output.sampled_token_ids,
|
||||
logprobs=paddle.empty_like(sampler_output.sampled_token_ids, device="cpu", dtype="float32"),
|
||||
selected_token_ranks=paddle.empty(
|
||||
[sampler_output.sampled_token_ids.shape[0]], device="cpu", dtype="int64"
|
||||
),
|
||||
)
|
||||
if self.parallel_config.tensor_parallel_size > 1:
|
||||
paddle.distributed.broadcast(
|
||||
sampler_output.sampled_token_ids,
|
||||
|
||||
Reference in New Issue
Block a user