mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
fix mask_offset in append_attn (#3745)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
* fix mask_offset in append_attn * fix test
This commit is contained in:
@@ -163,14 +163,14 @@ class Attention(nn.Layer):
|
||||
def init_weight(self):
|
||||
self.q_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype=self._dtype,
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
|
||||
self.k_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype=self._dtype,
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
@@ -184,8 +184,8 @@ class Attention(nn.Layer):
|
||||
if self.use_qk_norm:
|
||||
q_norm_weight_tensor = paddle.to_tensor(get_tensor(state_dict.pop(self.q_norm_key + ".weight")))
|
||||
k_norm_weight_tensor = paddle.to_tensor(get_tensor(state_dict.pop(self.k_norm_key + ".weight")))
|
||||
self.q_norm_weight.set_value(q_norm_weight_tensor)
|
||||
self.k_norm_weight.set_value(k_norm_weight_tensor)
|
||||
self.q_norm_weight.set_value(q_norm_weight_tensor.astype("float32"))
|
||||
self.k_norm_weight.set_value(k_norm_weight_tensor.astype("float32"))
|
||||
|
||||
def forward(
|
||||
self,
|
||||
|
Reference in New Issue
Block a user