mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00
support v1 loader for machete (#3999)
This commit is contained in:
@@ -226,12 +226,19 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
|||||||
quant_attrs,
|
quant_attrs,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
if isinstance(self, MacheteWeightOnlyLinearMethod):
|
||||||
weight_scale_shape = [layer.weight_shape[1]]
|
weight_scale_shape = [1, layer.weight_shape[1]]
|
||||||
layer.weight_shape.reverse()
|
if self.quant_config.name() == "wint4":
|
||||||
if self.quant_config.name() == "wint4":
|
layer.weight_shape[0] //= 8
|
||||||
layer.weight_shape[0] //= 2
|
layer.weight_dtype = "int32"
|
||||||
layer.weight_dtype = "int8"
|
else:
|
||||||
|
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
||||||
|
weight_scale_shape = [layer.weight_shape[1]]
|
||||||
|
layer.weight_shape.reverse()
|
||||||
|
if self.quant_config.name() == "wint4":
|
||||||
|
layer.weight_shape[0] //= 2
|
||||||
|
layer.weight_dtype = "int8"
|
||||||
|
|
||||||
layer.weight = layer.create_parameter(
|
layer.weight = layer.create_parameter(
|
||||||
shape=layer.weight_shape,
|
shape=layer.weight_shape,
|
||||||
dtype=layer.weight_dtype,
|
dtype=layer.weight_dtype,
|
||||||
@@ -267,17 +274,28 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
|||||||
def process_weights_after_loading(self, layer) -> None:
|
def process_weights_after_loading(self, layer) -> None:
|
||||||
if not self.quant_config.is_checkpoint_bf16:
|
if not self.quant_config.is_checkpoint_bf16:
|
||||||
return
|
return
|
||||||
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
|
if isinstance(self, MacheteWeightOnlyLinearMethod):
|
||||||
layer.weight,
|
from fastdeploy.model_executor.layers.quantization.ops import (
|
||||||
algo=self.quant_config.algo,
|
machete_quantize_and_pack,
|
||||||
arch=self.quant_config.weight_only_linear_arch,
|
)
|
||||||
)
|
|
||||||
|
quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack(
|
||||||
|
w=layer.weight,
|
||||||
|
atype=layer._dtype,
|
||||||
|
quant_type="uint4b8",
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
quanted_weight_tensor, weight_scale_tensor = weight_quantize(
|
||||||
|
layer.weight,
|
||||||
|
algo=self.quant_config.algo,
|
||||||
|
arch=self.quant_config.weight_only_linear_arch,
|
||||||
|
)
|
||||||
|
|
||||||
free_tensor(layer.weight)
|
free_tensor(layer.weight)
|
||||||
|
|
||||||
layer.weight = layer.create_parameter(
|
layer.weight = layer.create_parameter(
|
||||||
shape=quanted_weight_tensor.shape,
|
shape=quanted_weight_tensor.shape,
|
||||||
dtype="int8",
|
dtype="int8" if not isinstance(self, MacheteWeightOnlyLinearMethod) else "int32",
|
||||||
is_bias=False,
|
is_bias=False,
|
||||||
default_initializer=paddle.nn.initializer.Constant(0),
|
default_initializer=paddle.nn.initializer.Constant(0),
|
||||||
)
|
)
|
||||||
@@ -368,32 +386,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
|
|||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(quant_config)
|
super().__init__(quant_config)
|
||||||
|
|
||||||
def create_weights(self, layer, **extra_weight_attrs):
|
|
||||||
|
|
||||||
assert layer.bias is None, "Machete weight only linear method does not support bias."
|
|
||||||
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
|
|
||||||
|
|
||||||
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
|
|
||||||
weight_scale_shape = [1, layer.weight_shape[1]]
|
|
||||||
|
|
||||||
# layer.weight_shape.reverse()
|
|
||||||
if self.quant_config.name() == "wint4":
|
|
||||||
layer.weight_shape[0] //= 8
|
|
||||||
layer.weight_dtype = "int32"
|
|
||||||
|
|
||||||
layer.weight = layer.create_parameter(
|
|
||||||
shape=layer.weight_shape,
|
|
||||||
dtype=layer.weight_dtype,
|
|
||||||
is_bias=False,
|
|
||||||
default_initializer=paddle.nn.initializer.Constant(0),
|
|
||||||
)
|
|
||||||
|
|
||||||
layer.weight_scale = layer.create_parameter(
|
|
||||||
shape=weight_scale_shape,
|
|
||||||
dtype=layer._dtype,
|
|
||||||
is_bias=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
def process_prequanted_weights(self, layer, state_dict) -> None:
|
def process_prequanted_weights(self, layer, state_dict) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@@ -412,7 +404,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod):
|
|||||||
|
|
||||||
def apply(self, layer, x):
|
def apply(self, layer, x):
|
||||||
assert layer.bias is None, "Machete weight only linear method does not support bias."
|
assert layer.bias is None, "Machete weight only linear method does not support bias."
|
||||||
assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4."
|
|
||||||
from fastdeploy.model_executor.layers.quantization.ops import machete_wint_mm
|
from fastdeploy.model_executor.layers.quantization.ops import machete_wint_mm
|
||||||
|
|
||||||
linear_out = machete_wint_mm(
|
linear_out = machete_wint_mm(
|
||||||
|
@@ -72,6 +72,7 @@ model_param_map = {
|
|||||||
"FD_ATTENTION_BACKEND": "MLA_ATTN",
|
"FD_ATTENTION_BACKEND": "MLA_ATTN",
|
||||||
"FLAGS_mla_use_tensorcore": "1",
|
"FLAGS_mla_use_tensorcore": "1",
|
||||||
"FLAGS_flash_attn_version": "3",
|
"FLAGS_flash_attn_version": "3",
|
||||||
|
"FD_USE_MACHETE": "1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
Reference in New Issue
Block a user