replace paddle.max by numpy to avoid useless error log (#4893)

Co-authored-by: K11OntheBoat <“ruianmaidanglao@163.com”>
This commit is contained in:
K11OntheBoat
2025-11-11 16:28:05 +08:00
committed by GitHub
parent 3098aee05f
commit 76be598129
2 changed files with 4 additions and 4 deletions

View File

@@ -659,7 +659,7 @@ class MTPProposer(Proposer):
"""
check whether prefill stage exist
"""
if int(paddle.max(self.model_inputs["seq_lens_encoder"])) != 0:
if np.any(self.share_inputs["seq_lens_encoder"].numpy() > 0):
return 1
else:
return 0

View File

@@ -222,13 +222,13 @@ class GPUModelRunner(ModelRunnerBase):
"""
check whether prefill stage exist
"""
return int(paddle.max(self.share_inputs["seq_lens_encoder"])) > 0
return np.any(self.share_inputs["seq_lens_encoder"].numpy() > 0)
def exist_decode(self):
"""
check whether decode stage exist
"""
return int(paddle.max(self.share_inputs["seq_lens_decoder"])) > 0
return np.any(self.share_inputs["seq_lens_decoder"].numpy() > 0)
def only_prefill(self):
"""
@@ -1272,7 +1272,7 @@ class GPUModelRunner(ModelRunnerBase):
self.share_inputs["output_padding_offset"].copy_(output_padding_offset, False)
# Update bad tokens len
max_bad_tokens_len = paddle.max(self.share_inputs["bad_tokens_len"])
max_bad_tokens_len = np.max(self.share_inputs["bad_tokens_len"].numpy())
# Initialize forward meta data
self.initialize_forward_meta()