mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
remove input_ids from ForwardMeta (#4793)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
This commit is contained in:
@@ -65,8 +65,6 @@ class ForwardMeta:
|
||||
ForwardMeta is used to store the global meta information of the model forward.
|
||||
"""
|
||||
|
||||
# Input tokens IDs
|
||||
input_ids: paddle.Tensor
|
||||
# Input tokens IDs of removed padding
|
||||
ids_remove_padding: paddle.Tensor
|
||||
# Rotation position embedding
|
||||
@@ -265,7 +263,7 @@ class HPUForwardMeta(ForwardMeta):
|
||||
"""
|
||||
|
||||
#
|
||||
input_ids: paddle.Tensor
|
||||
input_ids: paddle.Tensor = None
|
||||
|
||||
# attention meta
|
||||
forward_mode: ForwardMode = ForwardMode.MIXED
|
||||
|
||||
@@ -623,7 +623,6 @@ class MTPProposer(Proposer):
|
||||
"""
|
||||
# Initialize forward meta
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.model_inputs["input_ids"],
|
||||
ids_remove_padding=self.model_inputs["ids_remove_padding"],
|
||||
rotary_embs=self.model_inputs["rope_emb"],
|
||||
attn_backend=self.attn_backends[0],
|
||||
|
||||
@@ -587,7 +587,6 @@ class GCUModelRunner(ModelRunnerBase):
|
||||
"""
|
||||
# Initialize forward meta
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.share_inputs["input_ids"],
|
||||
ids_remove_padding=self.share_inputs["ids_remove_padding"],
|
||||
rotary_embs=self.share_inputs["rope_emb"],
|
||||
attn_backend=self.attn_backends[0],
|
||||
|
||||
@@ -1330,7 +1330,6 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
"""
|
||||
# Initialize forward meta
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.share_inputs["input_ids"],
|
||||
ids_remove_padding=self.share_inputs["ids_remove_padding"],
|
||||
rotary_embs=self.share_inputs["rope_emb"],
|
||||
attn_backend=self.attn_backends[0],
|
||||
|
||||
@@ -1128,7 +1128,6 @@ class MetaxModelRunner(ModelRunnerBase):
|
||||
"""
|
||||
# Initialize forward meta
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.share_inputs["input_ids"],
|
||||
ids_remove_padding=self.share_inputs["ids_remove_padding"],
|
||||
rotary_embs=self.share_inputs["rope_emb"],
|
||||
attn_backend=self.attn_backends[0],
|
||||
|
||||
@@ -89,7 +89,6 @@ def xpu_pre_process(
|
||||
share_inputs["cu_seqlens_k"] = cu_seqlens_k
|
||||
|
||||
xpu_forward_meta = XPUForwardMeta(
|
||||
input_ids=share_inputs["input_ids"],
|
||||
ids_remove_padding=share_inputs["ids_remove_padding"],
|
||||
rotary_embs=share_inputs["rope_emb"],
|
||||
attn_backend=None,
|
||||
|
||||
@@ -42,12 +42,12 @@ class TestCase1SubLayer1(paddle.nn.Layer):
|
||||
def forward(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 forward pass"""
|
||||
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
def forward_correct(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 Correct forward pass"""
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
|
||||
@@ -59,15 +59,15 @@ class TestCase1SubLayer2(paddle.nn.Layer):
|
||||
|
||||
def forward(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer2 forward pass"""
|
||||
x = forward_meta.input_ids
|
||||
y = forward_meta.input_ids
|
||||
x = forward_meta.ids_remove_padding
|
||||
y = forward_meta.ids_remove_padding
|
||||
output = x + y
|
||||
return output
|
||||
|
||||
def forward_correct(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer2 Correct forward pass"""
|
||||
x = forward_meta.input_ids
|
||||
y = forward_meta.input_ids
|
||||
x = forward_meta.ids_remove_padding
|
||||
y = forward_meta.ids_remove_padding
|
||||
output = x + y
|
||||
return output
|
||||
|
||||
@@ -81,12 +81,12 @@ class TestCase1SubLayer3(paddle.nn.Layer):
|
||||
|
||||
def forward(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer3 forward pass"""
|
||||
output = paddle.matmul(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.matmul(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
def forward_correct(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer3 Correct forward pass"""
|
||||
output = paddle.matmul(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.matmul(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
|
||||
@@ -110,13 +110,12 @@ class TestModel1(paddle.nn.Layer):
|
||||
sublayer1_output = self.sublayer1(ids_remove_padding=ids_remove_padding, forward_meta=sub_meta1)
|
||||
|
||||
# sublayer2 not use cuda graph
|
||||
sub_meta2 = ForwardMeta(input_ids=sublayer1_output, ids_remove_padding=sublayer1_output)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output)
|
||||
sublayer2_output = self.sublayer2(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
self.sublayer2_output_buffer.copy_(sublayer2_output, False)
|
||||
|
||||
# sublayer3 use cuda graph
|
||||
sub_meta3 = ForwardMeta(
|
||||
input_ids=self.sublayer2_output_buffer,
|
||||
ids_remove_padding=self.sublayer2_output_buffer,
|
||||
step_use_cudagraph=True,
|
||||
)
|
||||
@@ -134,11 +133,11 @@ class TestModel1(paddle.nn.Layer):
|
||||
)
|
||||
|
||||
# sublayer2 not use cuda graph
|
||||
sub_meta2 = ForwardMeta(input_ids=sublayer1_output, ids_remove_padding=sublayer1_output)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output)
|
||||
sublayer2_output = self.sublayer2.forward_correct(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
|
||||
# sublayer3 not use cuda graph
|
||||
sub_meta3 = ForwardMeta(input_ids=sublayer2_output, ids_remove_padding=sublayer2_output)
|
||||
sub_meta3 = ForwardMeta(ids_remove_padding=sublayer2_output)
|
||||
sublayer3_output = self.sublayer3.forward_correct(ids_remove_padding=sublayer2_output, forward_meta=sub_meta3)
|
||||
|
||||
return sublayer3_output
|
||||
@@ -175,7 +174,7 @@ class TestCUDAGrpahSubgraph(unittest.TestCase):
|
||||
# Run Test Case1
|
||||
test_model1 = TestModel1(fd_config=fd_config)
|
||||
input_tensor1 = paddle.ones([8])
|
||||
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
forward_meta1 = ForwardMeta(ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
|
||||
# Trigger Capture
|
||||
_ = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
|
||||
|
||||
@@ -27,13 +27,13 @@ class TestCase1SubLayer1(paddle.nn.Layer):
|
||||
def forward(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 forward pass"""
|
||||
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
def forward_correct(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 Correct forward pass"""
|
||||
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
|
||||
@@ -55,9 +55,7 @@ class TestModel1(paddle.nn.Layer):
|
||||
sublayer1_output = self.sublayer1(ids_remove_padding=ids_remove_padding, forward_meta=sub_meta1)
|
||||
|
||||
# sublayer2 use cuda graph
|
||||
sub_meta2 = ForwardMeta(
|
||||
input_ids=sublayer1_output, ids_remove_padding=sublayer1_output, step_use_cudagraph=True
|
||||
)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output, step_use_cudagraph=True)
|
||||
sublayer2_output = self.sublayer2(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
|
||||
return sublayer2_output
|
||||
@@ -71,7 +69,7 @@ class TestModel1(paddle.nn.Layer):
|
||||
)
|
||||
|
||||
# sublayer2 not use cuda graph
|
||||
sub_meta2 = ForwardMeta(input_ids=sublayer1_output, ids_remove_padding=sublayer1_output)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output)
|
||||
sublayer2_output = self.sublayer2.forward_correct(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
|
||||
return sublayer2_output
|
||||
@@ -109,7 +107,7 @@ class TestCUDAGrpahRecapture(unittest.TestCase):
|
||||
# Run Test Case1
|
||||
self.test_model1 = TestModel1(fd_config=fd_config)
|
||||
input_tensor1 = paddle.ones([1, 32768])
|
||||
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
forward_meta1 = ForwardMeta(ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
|
||||
# Correct output
|
||||
self.output_correct = self.test_model1.forward_correct(
|
||||
|
||||
@@ -42,13 +42,13 @@ class TestCase1SubLayer1(paddle.nn.Layer):
|
||||
def forward(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 forward pass"""
|
||||
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
def forward_correct(self, ids_remove_padding, forward_meta: ForwardMeta):
|
||||
"""Sub layer1 Correct forward pass"""
|
||||
|
||||
output = paddle.add(forward_meta.input_ids, forward_meta.input_ids)
|
||||
output = paddle.add(forward_meta.ids_remove_padding, forward_meta.ids_remove_padding)
|
||||
return output
|
||||
|
||||
|
||||
@@ -70,9 +70,7 @@ class TestModel1(paddle.nn.Layer):
|
||||
sublayer1_output = self.sublayer1(ids_remove_padding=ids_remove_padding, forward_meta=sub_meta1)
|
||||
|
||||
# sublayer2 use cuda graph
|
||||
sub_meta2 = ForwardMeta(
|
||||
input_ids=sublayer1_output, ids_remove_padding=sublayer1_output, step_use_cudagraph=True
|
||||
)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output, step_use_cudagraph=True)
|
||||
sublayer2_output = self.sublayer2(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
|
||||
return sublayer2_output
|
||||
@@ -86,7 +84,7 @@ class TestModel1(paddle.nn.Layer):
|
||||
)
|
||||
|
||||
# sublayer2 not use cuda graph
|
||||
sub_meta2 = ForwardMeta(input_ids=sublayer1_output, ids_remove_padding=sublayer1_output)
|
||||
sub_meta2 = ForwardMeta(ids_remove_padding=sublayer1_output)
|
||||
sublayer2_output = self.sublayer2.forward_correct(ids_remove_padding=sublayer1_output, forward_meta=sub_meta2)
|
||||
|
||||
return sublayer2_output
|
||||
@@ -122,7 +120,7 @@ class TestCUDAGrpahSpecDecode(unittest.TestCase):
|
||||
# Run Test Case1
|
||||
test_model1 = TestModel1(fd_config=fd_config)
|
||||
input_tensor1 = paddle.ones([1, 32768])
|
||||
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
forward_meta1 = ForwardMeta(ids_remove_padding=input_tensor1, step_use_cudagraph=True)
|
||||
|
||||
# Trigger Capture
|
||||
_ = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
|
||||
|
||||
@@ -108,9 +108,7 @@ class TestGraphOptBackend(unittest.TestCase):
|
||||
|
||||
# Create input data
|
||||
self.input_tensor = paddle.randint(32, shape=self.input_shape, dtype=self.dtype)
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.input_tensor, ids_remove_padding=self.input_tensor, step_use_cudagraph=True
|
||||
)
|
||||
self.forward_meta = ForwardMeta(ids_remove_padding=self.input_tensor, step_use_cudagraph=True)
|
||||
|
||||
# Compute baseline result once
|
||||
baseline_model = Attention(fd_config=self.baseline_fd_config, **self.model_config)
|
||||
|
||||
@@ -108,7 +108,7 @@ class TestStaticGraphCUDAGraphSplit(unittest.TestCase):
|
||||
|
||||
test_model1 = TestModel(fd_config=fd_config)
|
||||
x = paddle.randint(32, shape=[1, 8])
|
||||
forward_meta1 = ForwardMeta(input_ids=x, ids_remove_padding=x, step_use_cudagraph=True)
|
||||
forward_meta1 = ForwardMeta(ids_remove_padding=x, step_use_cudagraph=True)
|
||||
|
||||
# Trigger Capture
|
||||
with sot_warmup_guard(True):
|
||||
|
||||
@@ -320,7 +320,6 @@ class TestAttentionPerformance(unittest.TestCase):
|
||||
) = pre_process(input_ids, seq_lens_this_time, False, None, seq_lens_encoder, seq_lens_decoder)
|
||||
|
||||
meta = ForwardMeta(
|
||||
input_ids=input_ids,
|
||||
ids_remove_padding=ids_remove_padding,
|
||||
seq_lens_encoder=seq_lens_encoder,
|
||||
seq_lens_decoder=seq_lens_decoder,
|
||||
|
||||
@@ -30,6 +30,7 @@ class TOYGPUModelRunner:
|
||||
self.pre_max_block_num = 16
|
||||
# Not the tensor in real sense, just for make ForwardMeta
|
||||
self.share_inputs = {}
|
||||
|
||||
self.share_inputs["input_ids"] = paddle.full(
|
||||
[self.max_num_seqs, self.max_model_len],
|
||||
0,
|
||||
@@ -63,7 +64,6 @@ class TOYGPUModelRunner:
|
||||
"""
|
||||
# Ignore the attentionbackbend for simplify
|
||||
self.forward_meta = ForwardMeta(
|
||||
input_ids=self.share_inputs["input_ids"],
|
||||
ids_remove_padding=self.share_inputs["ids_remove_padding"],
|
||||
# rotary_embs=self.share_inputs["rope_emb"],# Ignore the rope_emb for simplify
|
||||
# attn_backend=self.attn_backends[0],# Ignore the attn_backbend for simplify
|
||||
|
||||
Reference in New Issue
Block a user