[CP2.2] Machete support group scale & wint8 & v1 loader (#4166)

* support v1 loader for machete (#3999)

* [Optimize] Support WINT8 and group scale for Machete (#3905)

* [Optimize] Machete using group scale default (#4121)
This commit is contained in:
Sunny-bot1
2025-09-19 11:13:12 +08:00
committed by GitHub
parent 74d7b9151d
commit 4f460db556
5 changed files with 166 additions and 82 deletions

View File

@@ -141,8 +141,7 @@ class WeightOnlyConfig(QuantConfigBase):
)
if (
self.name() == "wint4"
and _ENABLE_MACHETE
_ENABLE_MACHETE
and envs.FD_USE_MACHETE == "1"
and layer.weight_shape[1]
and layer.weight_shape[1] % 128 == 0
@@ -219,12 +218,22 @@ class WeightOnlyLinearMethod(QuantMethodBase):
quant_attrs,
)
else:
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
weight_scale_shape = [layer.weight_shape[1]]
layer.weight_shape.reverse()
if self.quant_config.name() == "wint4":
layer.weight_shape[0] //= 2
layer.weight_dtype = "int8"
if isinstance(self, MacheteWeightOnlyLinearMethod):
# Using group scale for machete, group size is 128
weight_scale_shape = [(layer.weight_shape[0] + 127) // 128, layer.weight_shape[1]]
if self.quant_config.name() == "wint4":
layer.weight_shape[0] //= 8
else:
layer.weight_shape[0] //= 4
layer.weight_dtype = "int32"
else:
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
weight_scale_shape = [layer.weight_shape[1]]
layer.weight_shape.reverse()
if self.quant_config.name() == "wint4":
layer.weight_shape[0] //= 2
layer.weight_dtype = "int8"
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
@@ -260,17 +269,30 @@ class WeightOnlyLinearMethod(QuantMethodBase):
def process_weights_after_loading(self, layer) -> None:
if not layer.fd_config.load_config.load_choices == "default_v1":
return
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
layer.weight,
algo=self.quant_config.algo,
arch=self.quant_config.weight_only_linear_arch,
)
if isinstance(self, MacheteWeightOnlyLinearMethod):
from fastdeploy.model_executor.layers.quantization.ops import (
machete_quantize_and_pack,
)
# Using group scale for machete, group size is 128
quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack(
w=layer.weight,
atype=layer._dtype,
quant_type="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
group_size=128,
)
else:
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
layer.weight,
algo=self.quant_config.algo,
arch=self.quant_config.weight_only_linear_arch,
)
free_tensor(layer.weight)
layer.weight = layer.create_parameter(
shape=quanted_weight_tensor.shape,
dtype="int8",
dtype="int8" if not isinstance(self, MacheteWeightOnlyLinearMethod) else "int32",
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
@@ -361,32 +383,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
) -> None:
super().__init__(quant_config)
def create_weights(self, layer, **extra_weight_attrs):
assert layer.bias is None, "Machete weight only linear method does not support bias."
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
weight_scale_shape = [1, layer.weight_shape[1]]
# layer.weight_shape.reverse()
if self.quant_config.name() == "wint4":
layer.weight_shape[0] //= 8
layer.weight_dtype = "int32"
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
layer.weight_scale = layer.create_parameter(
shape=weight_scale_shape,
dtype=layer._dtype,
is_bias=False,
)
def process_prequanted_weights(self, layer, state_dict) -> None:
pass
@@ -395,24 +391,27 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
machete_quantize_and_pack,
)
# Using group scale for machete, group size is 128
quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack(
w=weight,
atype=layer._dtype,
quant_type="uint4b8",
quant_type="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
group_size=128,
)
layer.weight.set_value(quanted_weight_tensor)
layer.weight_scale.set_value(weight_scale_tensor.astype(paddle.get_default_dtype()))
def apply(self, layer, x):
assert layer.bias is None, "Machete weight only linear method does not support bias."
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
from fastdeploy.model_executor.layers.quantization.ops import machete_wint_mm
# Using group scale for machete, group size is 128
linear_out = machete_wint_mm(
x,
w_prepack=layer.weight,
w_g_s=layer.weight_scale,
weight_dtype="uint4b8",
weight_dtype="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
group_size=128,
)
if layer.with_bias:
linear_out = paddle.add(linear_out, layer.bias)
return linear_out