mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
supports dynamic Cfp8 (#3767)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
* supports dynamic Cfp8 * add unittest
This commit is contained in:
@@ -231,6 +231,17 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
metadata.kv_signal_metadata,
|
||||
layer.layer_id + self.start_layer_index,
|
||||
)
|
||||
cache_quant_type_str = getattr(layer, "cache_quant_type_str", "none")
|
||||
if cache_quant_type_str == "block_wise_fp8":
|
||||
cache_k = forward_meta.caches[4 * layer.layer_id]
|
||||
cache_v = forward_meta.caches[4 * layer.layer_id + 1]
|
||||
cache_k_scales = forward_meta.caches[4 * layer.layer_id + 2]
|
||||
cache_v_scales = forward_meta.caches[4 * layer.layer_id + 3]
|
||||
else:
|
||||
cache_k = forward_meta.caches[2 * layer.layer_id]
|
||||
cache_v = forward_meta.caches[2 * layer.layer_id + 1]
|
||||
cache_k_scales = getattr(layer, "cache_k_scale", None)
|
||||
cache_v_scales = getattr(layer, "cache_v_scale", None)
|
||||
|
||||
if self.use_output:
|
||||
quant_max_bound = getattr(layer, "quant_max_bound", 0.0)
|
||||
@@ -269,8 +280,8 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
|
||||
append_attention_with_output(
|
||||
qkv,
|
||||
forward_meta.caches[2 * layer.layer_id],
|
||||
forward_meta.caches[2 * layer.layer_id + 1],
|
||||
cache_k,
|
||||
cache_v,
|
||||
forward_meta.seq_lens_encoder,
|
||||
forward_meta.seq_lens_decoder,
|
||||
forward_meta.seq_lens_this_time,
|
||||
@@ -293,8 +304,8 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
metadata.attn_mask,
|
||||
layer.qkv_bias,
|
||||
layer.qkv_scale,
|
||||
getattr(layer, "cache_k_scale", None),
|
||||
getattr(layer, "cache_v_scale", None),
|
||||
cache_k_scales,
|
||||
cache_v_scales,
|
||||
getattr(layer, "cache_k_out_scale", None),
|
||||
getattr(layer, "cache_v_out_scale", None),
|
||||
getattr(layer, "cache_k_zp", None),
|
||||
@@ -325,8 +336,8 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
else:
|
||||
res = append_attention(
|
||||
qkv,
|
||||
forward_meta.caches[2 * layer.layer_id],
|
||||
forward_meta.caches[2 * layer.layer_id + 1],
|
||||
cache_k,
|
||||
cache_v,
|
||||
forward_meta.seq_lens_encoder,
|
||||
forward_meta.seq_lens_decoder,
|
||||
forward_meta.seq_lens_this_time,
|
||||
@@ -348,8 +359,8 @@ class AppendAttentionBackend(AttentionBackend):
|
||||
metadata.attn_mask,
|
||||
layer.qkv_bias,
|
||||
layer.qkv_scale,
|
||||
getattr(layer, "cache_k_scale", None),
|
||||
getattr(layer, "cache_v_scale", None),
|
||||
cache_k_scales,
|
||||
cache_v_scales,
|
||||
getattr(layer, "cache_k_out_scale", None),
|
||||
getattr(layer, "cache_v_out_scale", None),
|
||||
getattr(layer, "cache_k_zp", None),
|
||||
|
@@ -33,6 +33,7 @@ class KvCacheQuantzationTypes(str, Enum):
|
||||
|
||||
INT8 = "int8"
|
||||
FP8 = "float8_e4m3fn"
|
||||
BLOCK_WISE_FP8 = "block_wise_fp8"
|
||||
INT8_ZP = "int8_zp"
|
||||
INT4_ZP = "int4_zp"
|
||||
FP8_ZP = "float8_e4m3fn_zp"
|
||||
@@ -62,7 +63,11 @@ class KvCacheQuantConfig(QuantConfigBase):
|
||||
|
||||
if self.quant_type == KvCacheQuantzationTypes.INT8 or self.quant_type == KvCacheQuantzationTypes.INT8_ZP:
|
||||
self.max_bound = 127.0
|
||||
elif self.quant_type == KvCacheQuantzationTypes.FP8 or self.quant_type == KvCacheQuantzationTypes.FP8_ZP:
|
||||
elif (
|
||||
self.quant_type == KvCacheQuantzationTypes.FP8
|
||||
or self.quant_type == KvCacheQuantzationTypes.FP8_ZP
|
||||
or self.quant_type == KvCacheQuantzationTypes.BLOCK_WISE_FP8
|
||||
):
|
||||
self.max_bound = 448.0
|
||||
elif self.quant_type == KvCacheQuantzationTypes.INT4_ZP:
|
||||
self.max_bound = 7.0
|
||||
@@ -178,12 +183,17 @@ class KVCacheMethodBase(QuantMethodBase):
|
||||
layer.cache_quant_type_str = "cache_int4_zp"
|
||||
layer.quant_max_bound = 7.0
|
||||
layer.quant_min_bound = -7.0
|
||||
elif self.cache_quant_config.quant_type == KvCacheQuantzationTypes.BLOCK_WISE_FP8:
|
||||
layer.cache_quant_type_str = "block_wise_fp8"
|
||||
layer.quant_max_bound = 448.0
|
||||
layer.quant_min_bound = -448.0
|
||||
else:
|
||||
raise NotImplementedError(f"{self.cache_quant_config.quant_type} is not implemented")
|
||||
|
||||
self.load_scale(layer, state_dict)
|
||||
if self.cache_quant_config.has_zero_point:
|
||||
self.load_zp(layer, state_dict)
|
||||
if "block_wise" not in layer.cache_quant_type_str:
|
||||
self.load_scale(layer, state_dict)
|
||||
if self.cache_quant_config.has_zero_point:
|
||||
self.load_zp(layer, state_dict)
|
||||
|
||||
def apply(self, layer):
|
||||
"""
|
||||
|
Reference in New Issue
Block a user