polish code with new pre-commit rule (#2923)

This commit is contained in:
Zero Rains
2025-07-19 23:19:27 +08:00
committed by GitHub
parent b8676d71a8
commit 25698d56d1
424 changed files with 14307 additions and 13518 deletions

View File

@@ -25,14 +25,19 @@ import paddle
from paddle.nn.functional.flash_attention import flash_attn_unpadded
from fastdeploy.model_executor.layers.attention.ops import (
get_block_shape_and_split_kv_block, init_signal_layerwise,
open_shm_and_get_meta_signal, init_kv_signal_per_query)
get_block_shape_and_split_kv_block,
init_kv_signal_per_query,
init_signal_layerwise,
open_shm_and_get_meta_signal,
)
from fastdeploy.platforms import current_platform
if current_platform.is_cuda() and not current_platform.is_dcu():
from fastdeploy.model_executor.ops.gpu import (decode_mla_write_cache,
multi_head_latent_attention,
prefill_mla_write_cache)
from fastdeploy.model_executor.ops.gpu import (
decode_mla_write_cache,
multi_head_latent_attention,
prefill_mla_write_cache,
)
if TYPE_CHECKING:
from fastdeploy.model_executor.forward_meta import ForwardMeta
@@ -40,13 +45,14 @@ if TYPE_CHECKING:
from fastdeploy.config import FDConfig
from fastdeploy.model_executor.layers.attention.attention import Attention
from fastdeploy.model_executor.layers.attention.base_attention_backend import (
AttentionBackend, AttentionMetadata)
AttentionBackend,
AttentionMetadata,
)
from fastdeploy.model_executor.layers.attention.utils import init_rank_and_device_id
def yarn_get_mscale(scale=1, mscale=1):
"""
"""
""" """
if scale <= 1:
return 1.0
return 0.1 * mscale * math.log(scale) + 1.0
@@ -57,6 +63,7 @@ class MLAAttentionMetadata(AttentionMetadata):
"""
MLAAttentionMetadata for Multi-Layer Attention
"""
max_len_kv: paddle.Tensor = None
set_max_lengths: int = -1
encoder_batch_ids: paddle.Tensor = None
@@ -89,8 +96,13 @@ class MLAAttentionBackend(AttentionBackend):
MLA Attention Backend implementation.
"""
def __init__(self, fd_config: FDConfig, kv_num_heads: int, num_heads: int,
head_dim: int) -> None:
def __init__(
self,
fd_config: FDConfig,
kv_num_heads: int,
num_heads: int,
head_dim: int,
) -> None:
"""
MLAAttentionBackend __init__
"""
@@ -100,9 +112,9 @@ class MLAAttentionBackend(AttentionBackend):
# 基础配置
self.block_size: int = fd_config.parallel_config.block_size
self.max_seq_len: int = fd_config.parallel_config.max_model_len
self.rope_theta: float = (10000.0
if fd_config.model_config.rope_theta is None
else fd_config.model_config.rope_theta)
self.rope_theta: float = (
10000.0 if fd_config.model_config.rope_theta is None else fd_config.model_config.rope_theta
)
self.rope_3d: bool = getattr(fd_config.model_config, "rope_3d", False)
self.causal: bool = getattr(fd_config.model_config, "causal", True)
self.speculative_method: str = fd_config.speculative_config.method
@@ -119,14 +131,11 @@ class MLAAttentionBackend(AttentionBackend):
# For Multi Head Latent Attention
self.kv_lora_rank: int = fd_config.model_config.kv_lora_rank
self.qk_rope_head_dim: int = fd_config.model_config.qk_rope_head_dim
self.qk_head_dim: int = fd_config.model_config.qk_nope_head_dim \
+ fd_config.model_config.qk_rope_head_dim
self.qk_head_dim: int = fd_config.model_config.qk_nope_head_dim + fd_config.model_config.qk_rope_head_dim
self.attn_softmax_scale: float = self.qk_head_dim**-0.5
if fd_config.model_config.rope_scaling:
mscale_all_dim = fd_config.model_config.rope_scaling.get(
"mscale_all_dim", False) # 1.0
scaling_factor = fd_config.model_config.rope_scaling[
"factor"] # 40
mscale_all_dim = fd_config.model_config.rope_scaling.get("mscale_all_dim", False) # 1.0
scaling_factor = fd_config.model_config.rope_scaling["factor"] # 40
mscale = yarn_get_mscale(scaling_factor, float(mscale_all_dim))
self.attn_softmax_scale = self.attn_softmax_scale * mscale * mscale
@@ -134,7 +143,7 @@ class MLAAttentionBackend(AttentionBackend):
self.start_layer_index: int = fd_config.model_config.start_layer_index
self.device_id: int = os.getenv("CUDA_VISIBLE_DEVICES", None)
self.rank, self.device_id = init_rank_and_device_id(fd_config)
def init_attention_metadata(self, forward_meta: ForwardMeta):
@@ -199,7 +208,8 @@ class MLAAttentionBackend(AttentionBackend):
)
elif self.pd_disaggregation_mode == "per_query":
metadata.kv_signal_metadata = open_shm_and_get_meta_signal(
self.rank, int(self.device_id), self.keep_pd_step_flag)
self.rank, int(self.device_id), self.keep_pd_step_flag
)
self.attention_metadata: AttentionMetadata = metadata
@@ -207,13 +217,16 @@ class MLAAttentionBackend(AttentionBackend):
"""get_attntion_meta"""
return self.attention_metadata
def get_kv_cache_shape(self,
max_num_blocks: int) -> Tuple[int, int, int, int]:
def get_kv_cache_shape(self, max_num_blocks: int) -> Tuple[int, int, int, int]:
"""
Calculate kv cache shape for MLA
"""
return (max_num_blocks, 1, self.block_size,
self.kv_lora_rank + self.qk_rope_head_dim)
return (
max_num_blocks,
1,
self.block_size,
self.kv_lora_rank + self.qk_rope_head_dim,
)
def forward_extend(
self,
@@ -232,13 +245,12 @@ class MLAAttentionBackend(AttentionBackend):
metadata = self.attention_metadata
if self.pd_disaggregation_mode == "per_query":
metadata.kv_signal_data_list[
layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index)
metadata.kv_signal_data_list[layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index,
)
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(
forward_meta, 'caches') else None
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(forward_meta, "caches") else None
# 写入缓存
prefill_mla_write_cache(
@@ -251,7 +263,7 @@ class MLAAttentionBackend(AttentionBackend):
forward_meta.cum_offsets,
metadata.block_tables,
"none",
getattr(forward_meta, 'max_input_length', -1),
getattr(forward_meta, "max_input_length", -1),
)
# Flash注意力计算
@@ -287,13 +299,12 @@ class MLAAttentionBackend(AttentionBackend):
metadata = self.attention_metadata
if self.use_pd_disaggregation:
metadata.kv_signal_data_list[
layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index)
metadata.kv_signal_data_list[layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index,
)
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(
forward_meta, 'caches') else None
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(forward_meta, "caches") else None
# 获取推测解码参数
speculate_decoder = self.speculative_method is not None
@@ -335,8 +346,7 @@ class MLAAttentionBackend(AttentionBackend):
metadata.decoder_batch_ids,
metadata.decoder_tile_ids_per_batch,
metadata.decoder_num_blocks,
metadata.
decoder_num_blocks, # PaddleNLP 传入的是 decoder_num_blocks_cpu
metadata.decoder_num_blocks, # PaddleNLP 传入的是 decoder_num_blocks_cpu
metadata.max_enc_len_this_time,
metadata.max_dec_len_this_time,
metadata.max_len_kv,
@@ -385,13 +395,12 @@ class MLAAttentionBackend(AttentionBackend):
speculate_max_tokens = self.speculate_max_draft_token_num
if self.use_pd_disaggregation:
metadata.kv_signal_data_list[
layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index)
metadata.kv_signal_data_list[layer.layer_id] = init_signal_layerwise(
metadata.kv_signal_metadata,
layer.layer_id + self.start_layer_index,
)
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(
forward_meta, 'caches') else None
latent_cache = forward_meta.caches[layer.layer_id] if hasattr(forward_meta, "caches") else None
if k is not None:
prefill_mla_write_cache(
@@ -460,8 +469,7 @@ class MLAAttentionBackend(AttentionBackend):
metadata.decoder_batch_ids,
metadata.decoder_tile_ids_per_batch,
metadata.decoder_num_blocks,
metadata.
decoder_num_blocks, # PaddleNLP 传入的是 decoder_num_blocks_cpu
metadata.decoder_num_blocks, # PaddleNLP 传入的是 decoder_num_blocks_cpu
metadata.max_enc_len_this_time,
metadata.max_dec_len_this_time,
metadata.max_len_kv,