mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[V1 Loader] Ernie kv cache quant support v1 loader (#3899)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled
Publish Job / Run Stable Tests (push) Has been cancelled
CI Images Build / FD-Clone-Linux (push) Has been cancelled
CI Images Build / Show Code Archive Output (push) Has been cancelled
CI Images Build / CI Images Build (push) Has been cancelled
CI Images Build / BUILD_SM8090 (push) Has been cancelled
CI Images Build / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
CI Images Build / Run FastDeploy LogProb Tests (push) Has been cancelled
CI Images Build / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
CI Images Build / Run Base Tests (push) Has been cancelled
CI Images Build / Run Accuracy Tests (push) Has been cancelled
CI Images Build / Run Stable Tests (push) Has been cancelled
CI Images Build / Publish Docker Images Pre Check (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled
Publish Job / Run Stable Tests (push) Has been cancelled
CI Images Build / FD-Clone-Linux (push) Has been cancelled
CI Images Build / Show Code Archive Output (push) Has been cancelled
CI Images Build / CI Images Build (push) Has been cancelled
CI Images Build / BUILD_SM8090 (push) Has been cancelled
CI Images Build / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
CI Images Build / Run FastDeploy LogProb Tests (push) Has been cancelled
CI Images Build / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
CI Images Build / Run Base Tests (push) Has been cancelled
CI Images Build / Run Accuracy Tests (push) Has been cancelled
CI Images Build / Run Stable Tests (push) Has been cancelled
CI Images Build / Publish Docker Images Pre Check (push) Has been cancelled
* support c8 for ernie * add unittest * support vl * fix c8
This commit is contained in:
@@ -34,6 +34,7 @@ import os
|
||||
from safetensors import safe_open
|
||||
|
||||
from fastdeploy.model_executor.layers.utils import get_tensor
|
||||
from fastdeploy.model_executor.utils import default_weight_loader
|
||||
|
||||
|
||||
class Attention(nn.Layer):
|
||||
@@ -77,6 +78,7 @@ class Attention(nn.Layer):
|
||||
ValueError: If the `v_head_dim` is less than 0.
|
||||
"""
|
||||
super().__init__()
|
||||
self.fd_config = fd_config
|
||||
self.num_heads: int = (
|
||||
fd_config.model_config.num_attention_heads // fd_config.parallel_config.tensor_parallel_size
|
||||
)
|
||||
@@ -101,23 +103,21 @@ class Attention(nn.Layer):
|
||||
self.use_neox_rotary_style: bool = use_neox_rotary_style
|
||||
|
||||
if fd_config.quant_config and hasattr(fd_config.quant_config, "kv_cache_quant_type"):
|
||||
self.kvcache_quant_method: QuantMethodBase = fd_config.quant_config.get_quant_method(self)
|
||||
self.quant_method: QuantMethodBase = fd_config.quant_config.get_quant_method(self)
|
||||
else:
|
||||
self.kvcache_quant_method = None
|
||||
self.quant_method = None
|
||||
|
||||
if self.kvcache_quant_method is None:
|
||||
if self.quant_method is None:
|
||||
logger.info(f"Attention is running in cache kv {self._dtype} mode")
|
||||
else:
|
||||
logger.info(
|
||||
f"Attention is running in cache kv {self.kvcache_quant_method.cache_quant_config.quant_type} mode"
|
||||
)
|
||||
logger.info(f"Attention is running in cache kv {self.quant_method.cache_quant_config.quant_type} mode")
|
||||
self.use_qk_norm = use_qk_norm
|
||||
self.rms_norm_eps = rms_norm_eps
|
||||
if self.use_qk_norm:
|
||||
self.q_norm_key = f"{self.prefix}.q_norm"
|
||||
self.k_norm_key = f"{self.prefix}.k_norm"
|
||||
self.init_weight()
|
||||
|
||||
self.init_weight()
|
||||
if (
|
||||
fd_config.moba_attention_config is not None
|
||||
and fd_config.moba_attention_config.moba_encoder_top_k_left is not None
|
||||
@@ -161,32 +161,50 @@ class Attention(nn.Layer):
|
||||
)
|
||||
|
||||
def init_weight(self):
|
||||
self.q_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
if self.quant_method is not None:
|
||||
self.quant_method.create_weights(
|
||||
self,
|
||||
weight_loader=(
|
||||
self.weight_loader if hasattr(self, "weight_loader") else default_weight_loader(self.fd_config)
|
||||
),
|
||||
)
|
||||
|
||||
self.k_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
if self.use_qk_norm:
|
||||
self.q_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
|
||||
self.k_norm_weight = self.create_parameter(
|
||||
shape=[self.qk_head_dim],
|
||||
dtype="float32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
|
||||
def load_state_dict(self, state_dict: Dict[str, paddle.Tensor | np.ndarray]):
|
||||
"""
|
||||
Attention only have quant related scales not other parameters.
|
||||
"""
|
||||
if self.kvcache_quant_method is not None:
|
||||
self.kvcache_quant_method.create_weights(self, state_dict)
|
||||
if self.quant_method is not None:
|
||||
self.quant_method.process_loaded_weights(self, state_dict)
|
||||
if self.use_qk_norm:
|
||||
q_norm_weight_tensor = paddle.to_tensor(get_tensor(state_dict.pop(self.q_norm_key + ".weight")))
|
||||
k_norm_weight_tensor = paddle.to_tensor(get_tensor(state_dict.pop(self.k_norm_key + ".weight")))
|
||||
self.q_norm_weight.set_value(q_norm_weight_tensor.astype("float32"))
|
||||
self.k_norm_weight.set_value(k_norm_weight_tensor.astype("float32"))
|
||||
|
||||
def weight_loader(self, param, loaded_weight, loaded_shard_id: Optional[str] = None):
|
||||
loaded_weight = get_tensor(loaded_weight).cast(paddle.get_default_dtype())
|
||||
if self.quant_method.cache_quant_config.has_zero_point: # cache_int4_zp
|
||||
loaded_weight = 1.0 / loaded_weight
|
||||
else:
|
||||
loaded_weight = self.quant_method.cache_quant_config.max_bound / loaded_weight
|
||||
|
||||
param.copy_(loaded_weight, False)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
q: paddle.Tensor = None,
|
||||
|
Reference in New Issue
Block a user