mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[Inference, rename] remove padding_offsets from atten use batch_id_per_token (#2880)
* remove padding_offsets from atten
This commit is contained in:
@@ -85,8 +85,8 @@ class ForwardMeta():
|
||||
|
||||
# Accumulated offset
|
||||
cum_offsets: Optional[paddle.Tensor] = None
|
||||
# Offset tensor, used to restore the position of ids_remove_madding after padding removal to the original input_ids
|
||||
padding_offset: Optional[paddle.Tensor] = None
|
||||
# batch_id_per_token tensor, used to indicate which token belongs which batch after padding removal to the original input_ids
|
||||
batch_id_per_token: Optional[paddle.Tensor] = None
|
||||
# Accumulated sequence length of query
|
||||
cu_seqlens_q: Optional[paddle.Tensor] = None
|
||||
# Accumulated sequence length of key
|
||||
|
@@ -216,7 +216,7 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
forward_meta.seq_lens_encoder,
|
||||
forward_meta.seq_lens_decoder,
|
||||
forward_meta.seq_lens_this_time,
|
||||
forward_meta.padding_offset,
|
||||
forward_meta.batch_id_per_token,
|
||||
forward_meta.cu_seqlens_q,
|
||||
metadata.block_tables,
|
||||
metadata.encoder_batch_ids,
|
||||
|
@@ -32,7 +32,7 @@ def append_attention(
|
||||
seq_lens_encoder: paddle.Tensor,
|
||||
seq_lens_decoder: paddle.Tensor,
|
||||
seq_lens_this_time: paddle.Tensor,
|
||||
padding_offsets: paddle.Tensor,
|
||||
batch_id_per_token: paddle.Tensor,
|
||||
cu_seqlens_q: paddle.Tensor,
|
||||
block_tables: paddle.Tensor,
|
||||
encoder_batch_ids: paddle.Tensor,
|
||||
@@ -86,7 +86,7 @@ def append_attention(
|
||||
seq_lens_encoder,
|
||||
seq_lens_decoder,
|
||||
seq_lens_this_time,
|
||||
padding_offsets,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
block_tables,
|
||||
encoder_batch_ids,
|
||||
|
@@ -72,7 +72,7 @@ def pre_process(
|
||||
Return:
|
||||
ids_remove_padding:
|
||||
cum_offsets:
|
||||
padding_offset:
|
||||
batch_id_per_token:
|
||||
cu_seqlens_q:
|
||||
cu_seqlens_k:
|
||||
"""
|
||||
@@ -85,7 +85,7 @@ def pre_process(
|
||||
(
|
||||
ids_remove_padding,
|
||||
cum_offsets,
|
||||
padding_offset,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
cu_seqlens_k,
|
||||
) = speculate_get_padding_offset(
|
||||
@@ -115,12 +115,12 @@ def pre_process(
|
||||
(
|
||||
ids_remove_padding,
|
||||
cum_offsets,
|
||||
padding_offset,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
cu_seqlens_k,
|
||||
) = get_padding_offset(input_ids, cum_offsets_now, token_num,
|
||||
seq_lens_this_time)
|
||||
return (ids_remove_padding, cum_offsets, padding_offset, cu_seqlens_q,
|
||||
return (ids_remove_padding, cum_offsets, batch_id_per_token, cu_seqlens_q,
|
||||
cu_seqlens_k, output_cum_offsets, output_padding_offset)
|
||||
|
||||
|
||||
|
@@ -272,8 +272,8 @@ class MTPProposer(Proposer):
|
||||
self.main_model_inputs["ids_remove_padding"])
|
||||
self.model_inputs["cum_offsets"] = paddle.clone(
|
||||
self.main_model_inputs["cum_offsets"])
|
||||
self.model_inputs["padding_offset"] = paddle.clone(
|
||||
self.main_model_inputs["padding_offset"])
|
||||
self.model_inputs["batch_id_per_token"] = paddle.clone(
|
||||
self.main_model_inputs["batch_id_per_token"])
|
||||
self.model_inputs["cu_seqlens_q"] = paddle.clone(
|
||||
self.main_model_inputs["cu_seqlens_q"])
|
||||
self.model_inputs["cu_seqlens_k"] = paddle.clone(
|
||||
@@ -447,7 +447,7 @@ class MTPProposer(Proposer):
|
||||
seq_lens_decoder=self.model_inputs["seq_lens_decoder"],
|
||||
seq_lens_this_time=self.model_inputs["seq_lens_this_time"],
|
||||
cum_offsets=self.model_inputs["cum_offsets"],
|
||||
padding_offset=self.model_inputs["padding_offset"],
|
||||
batch_id_per_token=self.model_inputs["batch_id_per_token"],
|
||||
cu_seqlens_q=self.model_inputs["cu_seqlens_q"],
|
||||
cu_seqlens_k=self.model_inputs["cu_seqlens_k"],
|
||||
block_tables=self.model_inputs["block_tables"],
|
||||
@@ -542,7 +542,7 @@ class MTPProposer(Proposer):
|
||||
(
|
||||
ids_remove_padding,
|
||||
cum_offsets,
|
||||
padding_offset,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
cu_seqlens_k,
|
||||
output_cum_offsets,
|
||||
@@ -560,8 +560,8 @@ class MTPProposer(Proposer):
|
||||
self.model_inputs["ids_remove_padding"].copy_(
|
||||
ids_remove_padding, False)
|
||||
self.model_inputs["cum_offsets"].copy_(cum_offsets, False)
|
||||
self.model_inputs["padding_offset"].copy_(
|
||||
padding_offset, False)
|
||||
self.model_inputs["batch_id_per_token"].copy_(
|
||||
batch_id_per_token, False)
|
||||
self.model_inputs["cu_seqlens_q"].copy_(cu_seqlens_q, False)
|
||||
self.model_inputs["cu_seqlens_k"].copy_(cu_seqlens_k, False)
|
||||
# for speculative decoding
|
||||
|
@@ -559,7 +559,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["cum_offsets"] = paddle.full([max_num_seqs, 1],
|
||||
0,
|
||||
dtype='int32')
|
||||
self.share_inputs["padding_offset"] = paddle.full([max_num_seqs, 1],
|
||||
self.share_inputs["batch_id_per_token"] = paddle.full([max_num_seqs, 1],
|
||||
0,
|
||||
dtype='int32')
|
||||
self.share_inputs["cu_seqlens_q"] = paddle.full([max_num_seqs, 1],
|
||||
@@ -670,7 +670,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
(
|
||||
ids_remove_padding,
|
||||
cum_offsets,
|
||||
padding_offset,
|
||||
batch_id_per_token,
|
||||
cu_seqlens_q,
|
||||
cu_seqlens_k,
|
||||
output_cum_offsets,
|
||||
@@ -685,7 +685,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["ids_remove_padding"].copy_(ids_remove_padding,
|
||||
False)
|
||||
self.share_inputs["cum_offsets"].copy_(cum_offsets, False)
|
||||
self.share_inputs["padding_offset"].copy_(padding_offset, False)
|
||||
self.share_inputs["batch_id_per_token"].copy_(batch_id_per_token, False)
|
||||
self.share_inputs["cu_seqlens_q"].copy_(cu_seqlens_q, False)
|
||||
self.share_inputs["cu_seqlens_k"].copy_(cu_seqlens_k, False)
|
||||
|
||||
@@ -762,7 +762,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
seq_lens_decoder=self.share_inputs["seq_lens_decoder"],
|
||||
seq_lens_this_time=self.share_inputs["seq_lens_this_time"],
|
||||
cum_offsets=self.share_inputs["cum_offsets"],
|
||||
padding_offset=self.share_inputs["padding_offset"],
|
||||
batch_id_per_token=self.share_inputs["batch_id_per_token"],
|
||||
cu_seqlens_q=self.share_inputs["cu_seqlens_q"],
|
||||
cu_seqlens_k=self.share_inputs["cu_seqlens_k"],
|
||||
block_tables=self.share_inputs["block_tables"],
|
||||
|
Reference in New Issue
Block a user