[Feature] Add an unquantized option for MoE and Dense quant type (#4813)

This commit is contained in:
Sunny-bot1
2025-11-19 16:24:03 +08:00
committed by GitHub
parent 9ff418db73
commit 43f0c7557e
3 changed files with 48 additions and 34 deletions

View File

@@ -143,11 +143,13 @@ class LinearBase(nn.Layer):
self.with_bias = with_bias
self.add_bias = add_bias
self.prefix = prefix
self.is_quantized = fd_config.model_config.is_quantized
self.is_quantized = fd_config.model_config.is_quantized and not (
fd_config.quant_config.name() == "mix_quant" and fd_config.quant_config.dense_quant_type is None
)
# key
if weight_key:
self.weight_key = f"{prefix}.{weight_key}"
elif fd_config.model_config.is_quantized and not skip_quant:
elif self.is_quantized and not skip_quant:
self.weight_key = f"{prefix}.quant_weight"
self.weight_scale_key = f"{prefix}.weight_scale"
self.act_scale_key = f"{prefix}.activation_scale"
@@ -170,7 +172,7 @@ class LinearBase(nn.Layer):
self.output_size,
]
if fd_config.quant_config and not skip_quant:
if fd_config.quant_config and not skip_quant and fd_config.quant_config.get_quant_method(self):
self.quant_method = fd_config.quant_config.get_quant_method(self)
else:
self.quant_method: Optional[QuantMethodBase] = UnquantizedLinearMethod()
@@ -232,7 +234,7 @@ class LinearBase(nn.Layer):
# weight
self.state_dict = state_dict
assert self.weight_key is not None, "weight_key should not be None."
if self.fd_config.model_config.is_quantized:
if self.is_quantized:
self.load_prequant_weight(state_dict)
else:
self.load_weight(state_dict)
@@ -784,7 +786,7 @@ class QKVParallelLinear(ColumnParallelLinear):
assert self.weight_key is not None, "weight_key should not be None."
# qkv fused in disk
if self.fd_config.model_config.is_quantized:
if self.is_quantized:
self.load_prequant_weight(state_dict)
else:
self.load_weight(state_dict)

View File

@@ -182,10 +182,13 @@ class FusedMoE(nn.Layer):
self._dtype = self._helper.get_default_dtype()
self.weight_dtype = self._dtype
self.is_quantized = fd_config.model_config.is_quantized and not (
fd_config.quant_config.name() == "mix_quant" and fd_config.quant_config.moe_quant_type is None
)
moe_quant_config = fd_config.quant_config
self.moe_quant_config = moe_quant_config
self.moe_quant_type = None
if moe_quant_config:
if moe_quant_config and moe_quant_config.get_quant_method(self):
self.quant_method = moe_quant_config.get_quant_method(self)
self.moe_quant_type = moe_quant_config.name()
else:
@@ -561,7 +564,7 @@ class FusedMoE(nn.Layer):
"""
load_state_dict function.
"""
if self.fd_config.model_config.is_quantized:
if self.is_quantized:
if getattr(self.fd_config.quant_config, "is_permuted", True):
self.quant_method.process_prequanted_weights(self, state_dict, is_rearrange)
else:

View File

@@ -66,8 +66,8 @@ class MixQuantConfig(QuantConfigBase):
@classmethod
def from_config(cls, config: dict) -> "MixQuantConfig":
return cls(
config["dense_quant_type"],
config["moe_quant_type"],
config.get("dense_quant_type", None),
config.get("moe_quant_type", None),
config.get("kv_cache_quant_type", None),
config.get("image_moe_quant_type", None),
config.get("is_channel_wise", False),
@@ -81,29 +81,35 @@ class MixQuantConfig(QuantConfigBase):
def get_quant_method(self, layer) -> Optional[QuantMethodBase]:
if isinstance(layer, FusedMoE):
if layer.moe_tag == "Image":
return (
get_quantization_config(self.image_moe_quant_type)
.from_config(
{
"is_permuted": self.is_permuted,
"is_quantized": not self.is_checkpoint_bf16,
"hadamard_block_size": self.hadamard_block_size,
}
if self.image_moe_quant_type is not None:
return (
get_quantization_config(self.image_moe_quant_type)
.from_config(
{
"is_permuted": self.is_permuted,
"is_quantized": not self.is_checkpoint_bf16,
"hadamard_block_size": self.hadamard_block_size,
}
)
.get_quant_method(layer)
)
.get_quant_method(layer)
)
else:
return None
else:
return (
get_quantization_config(self.moe_quant_type)
.from_config(
{
"is_permuted": self.is_permuted,
"is_quantized": not self.is_checkpoint_bf16,
"hadamard_block_size": self.hadamard_block_size,
}
if self.moe_quant_type is not None:
return (
get_quantization_config(self.moe_quant_type)
.from_config(
{
"is_permuted": self.is_permuted,
"is_quantized": not self.is_checkpoint_bf16,
"hadamard_block_size": self.hadamard_block_size,
}
)
.get_quant_method(layer)
)
.get_quant_method(layer)
)
else:
return None
elif isinstance(layer, Attention):
if self.kv_cache_quant_type is not None:
return (
@@ -114,8 +120,11 @@ class MixQuantConfig(QuantConfigBase):
else:
return None
else:
return (
get_quantization_config(self.dense_quant_type)
.from_config({"is_quantized": not self.is_checkpoint_bf16})
.get_quant_method(layer)
)
if self.dense_quant_type is not None:
return (
get_quantization_config(self.dense_quant_type)
.from_config({"is_quantized": not self.is_checkpoint_bf16})
.get_quant_method(layer)
)
else:
return None