mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[CP2.2] Machete support group scale & wint8 & v1 loader (#4166)
* support v1 loader for machete (#3999) * [Optimize] Support WINT8 and group scale for Machete (#3905) * [Optimize] Machete using group scale default (#4121)
This commit is contained in:
@@ -85,7 +85,7 @@ def quantize_weights(
|
||||
w_s: Scales (None if `group_size` is None).
|
||||
"""
|
||||
assert paddle.is_floating_point(w), "w must be float type"
|
||||
assert quant_type in ["uint4", "uint4b8"], "only support quant_type = uint4, uint4b8"
|
||||
assert quant_type in ["uint4b8", "uint8b128"], "only support quant_type = uint4b8, uint8b128"
|
||||
|
||||
orig_device = w.place
|
||||
size_k, size_n = w.shape
|
||||
@@ -103,8 +103,12 @@ def quantize_weights(
|
||||
max_val = paddle.max(w, axis=0, keepdim=True)
|
||||
min_val = paddle.min(w, axis=0, keepdim=True)
|
||||
|
||||
max_q_val = float(7.0)
|
||||
min_q_val = float(-8.0)
|
||||
if quant_type == "uint4b8":
|
||||
max_q_val = float(7.0)
|
||||
min_q_val = float(-8.0)
|
||||
else:
|
||||
max_q_val = float(127.0)
|
||||
min_q_val = float(-128.0)
|
||||
|
||||
w_s = paddle.ones([1], dtype=paddle.float32) # unscaled case
|
||||
|
||||
@@ -124,6 +128,8 @@ def quantize_weights(
|
||||
# w_q += quant_type.bias
|
||||
if quant_type == "uint4b8":
|
||||
w_q += 8
|
||||
else:
|
||||
w_q += 128
|
||||
|
||||
# Restore original shapes
|
||||
if group_size is not None and group_size < size_k:
|
||||
@@ -131,11 +137,11 @@ def quantize_weights(
|
||||
def reshape_w(w_tensor):
|
||||
w_tensor = w_tensor.reshape([group_size, -1, size_n])
|
||||
w_tensor = w_tensor.transpose([1, 0, 2])
|
||||
w_tensor = w_tensor.reshape([size_k, size_n])
|
||||
w_tensor = w_tensor.reshape([size_k, size_n]).contiguous()
|
||||
return w_tensor
|
||||
|
||||
w_q = reshape_w(w_q)
|
||||
w_s = w_s.reshape([-1, size_n])
|
||||
w_s = w_s.reshape([-1, size_n]).contiguous()
|
||||
|
||||
# Move tensors back to original device
|
||||
w_q = w_q.to(orig_device)
|
||||
@@ -153,7 +159,8 @@ def machete_quantize_and_pack(
|
||||
group_size: int = -1,
|
||||
):
|
||||
w_q, w_s = quantize_weights(w, group_size, quant_type=quant_type)
|
||||
w_q = pack_rows(w_q, 4, *w_q.shape)
|
||||
num_bits = 4 if quant_type == "uint4b8" else 8
|
||||
w_q = pack_rows(w_q, num_bits, *w_q.shape)
|
||||
w_q_col = w_q.transpose([1, 0]).contiguous() # convert to col major
|
||||
w_q_prepack = machete_prepack_B(
|
||||
w_q_col,
|
||||
|
@@ -141,8 +141,7 @@ class WeightOnlyConfig(QuantConfigBase):
|
||||
)
|
||||
|
||||
if (
|
||||
self.name() == "wint4"
|
||||
and _ENABLE_MACHETE
|
||||
_ENABLE_MACHETE
|
||||
and envs.FD_USE_MACHETE == "1"
|
||||
and layer.weight_shape[1]
|
||||
and layer.weight_shape[1] % 128 == 0
|
||||
@@ -219,12 +218,22 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
||||
quant_attrs,
|
||||
)
|
||||
else:
|
||||
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
||||
weight_scale_shape = [layer.weight_shape[1]]
|
||||
layer.weight_shape.reverse()
|
||||
if self.quant_config.name() == "wint4":
|
||||
layer.weight_shape[0] //= 2
|
||||
layer.weight_dtype = "int8"
|
||||
if isinstance(self, MacheteWeightOnlyLinearMethod):
|
||||
# Using group scale for machete, group size is 128
|
||||
weight_scale_shape = [(layer.weight_shape[0] + 127) // 128, layer.weight_shape[1]]
|
||||
if self.quant_config.name() == "wint4":
|
||||
layer.weight_shape[0] //= 8
|
||||
else:
|
||||
layer.weight_shape[0] //= 4
|
||||
layer.weight_dtype = "int32"
|
||||
else:
|
||||
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
||||
weight_scale_shape = [layer.weight_shape[1]]
|
||||
layer.weight_shape.reverse()
|
||||
if self.quant_config.name() == "wint4":
|
||||
layer.weight_shape[0] //= 2
|
||||
layer.weight_dtype = "int8"
|
||||
|
||||
layer.weight = layer.create_parameter(
|
||||
shape=layer.weight_shape,
|
||||
dtype=layer.weight_dtype,
|
||||
@@ -260,17 +269,30 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
||||
def process_weights_after_loading(self, layer) -> None:
|
||||
if not layer.fd_config.load_config.load_choices == "default_v1":
|
||||
return
|
||||
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
|
||||
layer.weight,
|
||||
algo=self.quant_config.algo,
|
||||
arch=self.quant_config.weight_only_linear_arch,
|
||||
)
|
||||
if isinstance(self, MacheteWeightOnlyLinearMethod):
|
||||
from fastdeploy.model_executor.layers.quantization.ops import (
|
||||
machete_quantize_and_pack,
|
||||
)
|
||||
|
||||
# Using group scale for machete, group size is 128
|
||||
quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack(
|
||||
w=layer.weight,
|
||||
atype=layer._dtype,
|
||||
quant_type="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
|
||||
group_size=128,
|
||||
)
|
||||
else:
|
||||
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
|
||||
layer.weight,
|
||||
algo=self.quant_config.algo,
|
||||
arch=self.quant_config.weight_only_linear_arch,
|
||||
)
|
||||
|
||||
free_tensor(layer.weight)
|
||||
|
||||
layer.weight = layer.create_parameter(
|
||||
shape=quanted_weight_tensor.shape,
|
||||
dtype="int8",
|
||||
dtype="int8" if not isinstance(self, MacheteWeightOnlyLinearMethod) else "int32",
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
@@ -361,32 +383,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
|
||||
) -> None:
|
||||
super().__init__(quant_config)
|
||||
|
||||
def create_weights(self, layer, **extra_weight_attrs):
|
||||
|
||||
assert layer.bias is None, "Machete weight only linear method does not support bias."
|
||||
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
|
||||
|
||||
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
||||
weight_scale_shape = [1, layer.weight_shape[1]]
|
||||
|
||||
# layer.weight_shape.reverse()
|
||||
if self.quant_config.name() == "wint4":
|
||||
layer.weight_shape[0] //= 8
|
||||
layer.weight_dtype = "int32"
|
||||
|
||||
layer.weight = layer.create_parameter(
|
||||
shape=layer.weight_shape,
|
||||
dtype=layer.weight_dtype,
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
|
||||
layer.weight_scale = layer.create_parameter(
|
||||
shape=weight_scale_shape,
|
||||
dtype=layer._dtype,
|
||||
is_bias=False,
|
||||
)
|
||||
|
||||
def process_prequanted_weights(self, layer, state_dict) -> None:
|
||||
pass
|
||||
|
||||
@@ -395,24 +391,27 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
|
||||
machete_quantize_and_pack,
|
||||
)
|
||||
|
||||
# Using group scale for machete, group size is 128
|
||||
quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack(
|
||||
w=weight,
|
||||
atype=layer._dtype,
|
||||
quant_type="uint4b8",
|
||||
quant_type="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
|
||||
group_size=128,
|
||||
)
|
||||
layer.weight.set_value(quanted_weight_tensor)
|
||||
layer.weight_scale.set_value(weight_scale_tensor.astype(paddle.get_default_dtype()))
|
||||
|
||||
def apply(self, layer, x):
|
||||
assert layer.bias is None, "Machete weight only linear method does not support bias."
|
||||
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
|
||||
from fastdeploy.model_executor.layers.quantization.ops import machete_wint_mm
|
||||
|
||||
# Using group scale for machete, group size is 128
|
||||
linear_out = machete_wint_mm(
|
||||
x,
|
||||
w_prepack=layer.weight,
|
||||
w_g_s=layer.weight_scale,
|
||||
weight_dtype="uint4b8",
|
||||
weight_dtype="uint4b8" if self.quant_config.name() == "wint4" else "uint8b128",
|
||||
group_size=128,
|
||||
)
|
||||
|
||||
if layer.with_bias:
|
||||
linear_out = paddle.add(linear_out, layer.bias)
|
||||
return linear_out
|
||||
|
Reference in New Issue
Block a user