mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00
Revert "【FIX】Change the name of sparse attn from moba to plas (#3845)" (#4001)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
This reverts commit e31c8f7336
.
This commit is contained in:
@@ -20,7 +20,7 @@ from .block_multihead_attn_backend import BlockAttentionBackend
|
||||
from .flash_attn_backend import FlashAttentionBackend
|
||||
from .iluvatar_attn_backend import IluvatarAttnBackend
|
||||
from .mla_attention_backend import MLAAttentionBackend
|
||||
from .moba_attention_backend import PlasAttentionBackend
|
||||
from .moba_attention_backend import MobaAttentionBackend
|
||||
from .native_paddle_backend import PaddleNativeAttnBackend
|
||||
from .xpu_attn_backend import XPUAttentionBackend
|
||||
|
||||
@@ -35,5 +35,5 @@ __all__ = [
|
||||
"IluvatarAttnBackend",
|
||||
"BlockAttentionBackend",
|
||||
"Attention",
|
||||
"PlasAttentionBackend",
|
||||
"MobaAttentionBackend",
|
||||
]
|
||||
|
@@ -119,19 +119,19 @@ class Attention(nn.Layer):
|
||||
self.init_weight()
|
||||
|
||||
if (
|
||||
fd_config.plas_attention_config is not None
|
||||
and fd_config.plas_attention_config.plas_encoder_top_k_left is not None
|
||||
and fd_config.plas_attention_config.plas_encoder_top_k_right is not None
|
||||
and fd_config.plas_attention_config.plas_decoder_top_k_left is not None
|
||||
and fd_config.plas_attention_config.plas_decoder_top_k_right is not None
|
||||
fd_config.moba_attention_config is not None
|
||||
and fd_config.moba_attention_config.moba_encoder_top_k_left is not None
|
||||
and fd_config.moba_attention_config.moba_encoder_top_k_right is not None
|
||||
and fd_config.moba_attention_config.moba_decoder_top_k_left is not None
|
||||
and fd_config.moba_attention_config.moba_decoder_top_k_right is not None
|
||||
):
|
||||
mlp_weight_path = os.path.join(
|
||||
fd_config.model_config.model, fd_config.plas_attention_config.mlp_weight_name
|
||||
fd_config.model_config.model, fd_config.moba_attention_config.mlp_weight_name
|
||||
)
|
||||
self.plas_use_mlp = mlp_weight_path is not None and os.path.exists(mlp_weight_path)
|
||||
plas_block_size = fd_config.plas_attention_config.plas_block_size
|
||||
plas_max_seq_length = fd_config.plas_attention_config.plas_max_seq_length
|
||||
if self.plas_use_mlp:
|
||||
self.moba_use_mlp = mlp_weight_path is not None and os.path.exists(mlp_weight_path)
|
||||
moba_block_size = fd_config.moba_attention_config.moba_block_size
|
||||
moba_max_seq_length = fd_config.moba_attention_config.moba_max_seq_length
|
||||
if self.moba_use_mlp:
|
||||
mlp_weight = {}
|
||||
with safe_open(mlp_weight_path, framework="np", device="cpu") as f:
|
||||
for key_name in f.keys():
|
||||
@@ -148,12 +148,12 @@ class Attention(nn.Layer):
|
||||
* self.kv_num_heads : (fd_config.parallel_config.tensor_parallel_rank + 1)
|
||||
* self.kv_num_heads
|
||||
]
|
||||
assert self.attn_gate_weight.shape[1] % plas_block_size == 0
|
||||
assert self.attn_gate_weight.shape[1] % moba_block_size == 0
|
||||
|
||||
self.cache_k_block_means = paddle.zeros(
|
||||
[
|
||||
fd_config.parallel_config.max_num_seqs,
|
||||
plas_max_seq_length // plas_block_size,
|
||||
moba_max_seq_length // moba_block_size,
|
||||
self.kv_num_heads,
|
||||
self.head_dim,
|
||||
],
|
||||
|
@@ -39,7 +39,7 @@ from fastdeploy.model_executor.layers.attention.base_attention_backend import (
|
||||
|
||||
|
||||
@dataclass
|
||||
class PlasAttentionMetadata(AttentionMetadata):
|
||||
class MobaAttentionMetadata(AttentionMetadata):
|
||||
"""
|
||||
AppendAttentionMetadata
|
||||
"""
|
||||
@@ -54,7 +54,7 @@ class PlasAttentionMetadata(AttentionMetadata):
|
||||
max_dec_len_this_time: int = 0
|
||||
|
||||
|
||||
class PlasAttentionBackend(AttentionBackend):
|
||||
class MobaAttentionBackend(AttentionBackend):
|
||||
"""
|
||||
The backend class that uses paddle native attention implementation.
|
||||
Which is used only for testing purpose.
|
||||
@@ -70,11 +70,11 @@ class PlasAttentionBackend(AttentionBackend):
|
||||
decoder_block_shape_q: int = -1,
|
||||
) -> None:
|
||||
"""
|
||||
PlasAttentionBackend __init__
|
||||
MobaAttentionBackend __init__
|
||||
"""
|
||||
super().__init__()
|
||||
self.attention_metadata: PlasAttentionMetadata = None
|
||||
assert fd_config.plas_attention_config is not None, "plas_attention_config is None"
|
||||
self.attention_metadata: MobaAttentionMetadata = None
|
||||
assert fd_config.moba_attention_config is not None, "moba_attention_config is None"
|
||||
self.block_size = fd_config.parallel_config.block_size
|
||||
self.max_seq_len = fd_config.parallel_config.max_model_len
|
||||
self.max_num_seqs = fd_config.parallel_config.max_num_seqs
|
||||
@@ -83,18 +83,18 @@ class PlasAttentionBackend(AttentionBackend):
|
||||
self.head_dim = fd_config.model_config.head_dim
|
||||
self.num_layers: int = fd_config.model_config.num_hidden_layers
|
||||
self.attn_block_m = 128
|
||||
self.plas_block_size = fd_config.plas_attention_config.plas_block_size
|
||||
self.plas_encoder_top_k_left = int(fd_config.plas_attention_config.plas_encoder_top_k_left)
|
||||
self.plas_encoder_top_k_right = int(fd_config.plas_attention_config.plas_encoder_top_k_right)
|
||||
self.plas_use_encoder_seq_limit = int(fd_config.plas_attention_config.plas_use_encoder_seq_limit)
|
||||
self.plas_decoder_top_k_left = int(fd_config.plas_attention_config.plas_decoder_top_k_left)
|
||||
self.plas_decoder_top_k_right = int(fd_config.plas_attention_config.plas_decoder_top_k_right)
|
||||
self.plas_use_decoder_seq_limit = int(fd_config.plas_attention_config.plas_use_decoder_seq_limit)
|
||||
self.plas_max_seq_length = fd_config.plas_attention_config.plas_max_seq_length
|
||||
self.moba_block_size = fd_config.moba_attention_config.moba_block_size
|
||||
self.moba_encoder_top_k_left = int(fd_config.moba_attention_config.moba_encoder_top_k_left)
|
||||
self.moba_encoder_top_k_right = int(fd_config.moba_attention_config.moba_encoder_top_k_right)
|
||||
self.moba_use_encoder_seq_limit = int(fd_config.moba_attention_config.moba_use_encoder_seq_limit)
|
||||
self.moba_decoder_top_k_left = int(fd_config.moba_attention_config.moba_decoder_top_k_left)
|
||||
self.moba_decoder_top_k_right = int(fd_config.moba_attention_config.moba_decoder_top_k_right)
|
||||
self.moba_use_decoder_seq_limit = int(fd_config.moba_attention_config.moba_use_decoder_seq_limit)
|
||||
self.moba_max_seq_length = fd_config.moba_attention_config.moba_max_seq_length
|
||||
|
||||
def init_attention_metadata(self, forward_meta: ForwardMeta):
|
||||
"""Init the metadata for a forward pass."""
|
||||
metadata = PlasAttentionMetadata()
|
||||
metadata = MobaAttentionMetadata()
|
||||
metadata._dtype = paddle.get_default_dtype()
|
||||
metadata.cu_seq_q_pack, metadata.cu_seqlens_k, metadata.q_pack_tokens = get_cur_cu_seq_len_k(
|
||||
forward_meta.seq_lens_encoder,
|
||||
@@ -116,7 +116,7 @@ class PlasAttentionBackend(AttentionBackend):
|
||||
[k_token_num + self.attn_block_m, self.kv_num_heads * self.head_dim], dtype=metadata._dtype
|
||||
)
|
||||
self.attention_metadata = metadata
|
||||
assert self.max_seq_len <= self.plas_max_seq_length
|
||||
assert self.max_seq_len <= self.moba_max_seq_length
|
||||
|
||||
def get_kv_cache_shape(
|
||||
self,
|
||||
@@ -186,13 +186,13 @@ class PlasAttentionBackend(AttentionBackend):
|
||||
self.max_seq_len,
|
||||
attention_metadata.max_enc_len_this_time,
|
||||
attention_metadata.max_dec_len_this_time,
|
||||
self.plas_encoder_top_k_left,
|
||||
self.plas_encoder_top_k_right,
|
||||
self.plas_use_encoder_seq_limit,
|
||||
self.plas_decoder_top_k_left,
|
||||
self.plas_decoder_top_k_right,
|
||||
self.plas_use_decoder_seq_limit,
|
||||
layer.plas_use_mlp,
|
||||
self.moba_encoder_top_k_left,
|
||||
self.moba_encoder_top_k_right,
|
||||
self.moba_use_encoder_seq_limit,
|
||||
self.moba_decoder_top_k_left,
|
||||
self.moba_decoder_top_k_right,
|
||||
self.moba_use_decoder_seq_limit,
|
||||
layer.moba_use_mlp,
|
||||
getattr(layer, "cache_quant_type_str", "none"),
|
||||
)[0]
|
||||
return out
|
||||
|
Reference in New Issue
Block a user