mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[TSP] Support qwen3 moe tsp + cudagraph (#4871)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
* support qwen3_moe tsp mode * fix * fix * update * update * update * fix * support external_rmsnorm * update * fix
This commit is contained in:
@@ -312,6 +312,7 @@ class Ernie4_5_DecoderLayer(nn.Layer):
|
||||
hidden_size=fd_config.model_config.hidden_size,
|
||||
eps=fd_config.model_config.rms_norm_eps,
|
||||
prefix=f"{prefix}.post_attention_layernorm",
|
||||
layer_id=layer_id,
|
||||
)
|
||||
|
||||
def load_state_dict(self, state_dict):
|
||||
@@ -329,18 +330,9 @@ class Ernie4_5_DecoderLayer(nn.Layer):
|
||||
hidden_states: paddle.Tensor,
|
||||
residual: paddle.Tensor = None,
|
||||
):
|
||||
if residual is None:
|
||||
residual = hidden_states
|
||||
hidden_states = self.input_layernorm(
|
||||
hidden_states,
|
||||
forward_meta=forward_meta,
|
||||
)
|
||||
else:
|
||||
hidden_states, residual = self.input_layernorm(
|
||||
hidden_states,
|
||||
residual,
|
||||
forward_meta=forward_meta,
|
||||
)
|
||||
hidden_states, residual = self.input_layernorm(
|
||||
hidden_states, residual_input=residual, forward_meta=forward_meta
|
||||
)
|
||||
|
||||
hidden_states = self.self_attn(
|
||||
hidden_states=hidden_states,
|
||||
@@ -350,7 +342,6 @@ class Ernie4_5_DecoderLayer(nn.Layer):
|
||||
hidden_states, residual = self.post_attention_layernorm(
|
||||
hidden_states,
|
||||
residual,
|
||||
forward_meta=forward_meta,
|
||||
)
|
||||
|
||||
hidden_states = self.mlp(hidden_states)
|
||||
@@ -455,9 +446,7 @@ class Ernie4_5_Model(nn.Layer):
|
||||
for i in range(self.num_layers):
|
||||
hidden_states, residual = self.layers[i](forward_meta, hidden_states, residual)
|
||||
|
||||
hidden_states = hidden_states + residual
|
||||
|
||||
out = self.norm(hidden_states, forward_meta=forward_meta)
|
||||
out = self.norm(hidden_states, residual, forward_meta=forward_meta)[0]
|
||||
|
||||
if current_platform.is_iluvatar() and forward_meta.attn_backend.mixed:
|
||||
out = forward_meta.attn_backend.reverse_transpose(out)
|
||||
|
||||
Reference in New Issue
Block a user