From 3b1da6e4dd53a38da4aa17532c160e343c50eb6f Mon Sep 17 00:00:00 2001 From: Sunny-bot1 <68891411+Sunny-bot1@users.noreply.github.com> Date: Wed, 10 Sep 2025 10:21:33 +0800 Subject: [PATCH] support v1 loader for machete (#3999) --- .../layers/quantization/weight_only.py | 69 ++++++++----------- tests/model_loader/test_common_model.py | 1 + 2 files changed, 31 insertions(+), 39 deletions(-) diff --git a/fastdeploy/model_executor/layers/quantization/weight_only.py b/fastdeploy/model_executor/layers/quantization/weight_only.py index ac77f15f3..b8e929b4c 100644 --- a/fastdeploy/model_executor/layers/quantization/weight_only.py +++ b/fastdeploy/model_executor/layers/quantization/weight_only.py @@ -226,12 +226,19 @@ class WeightOnlyLinearMethod(QuantMethodBase): quant_attrs, ) else: - # The scale shape should be equal to the output dim of weight using Per-Channel Quantization. - weight_scale_shape = [layer.weight_shape[1]] - layer.weight_shape.reverse() - if self.quant_config.name() == "wint4": - layer.weight_shape[0] //= 2 - layer.weight_dtype = "int8" + if isinstance(self, MacheteWeightOnlyLinearMethod): + weight_scale_shape = [1, layer.weight_shape[1]] + if self.quant_config.name() == "wint4": + layer.weight_shape[0] //= 8 + layer.weight_dtype = "int32" + else: + # The scale shape should be equal to the output dim of weight using Per-Channel Quantization. + weight_scale_shape = [layer.weight_shape[1]] + layer.weight_shape.reverse() + if self.quant_config.name() == "wint4": + layer.weight_shape[0] //= 2 + layer.weight_dtype = "int8" + layer.weight = layer.create_parameter( shape=layer.weight_shape, dtype=layer.weight_dtype, @@ -267,17 +274,28 @@ class WeightOnlyLinearMethod(QuantMethodBase): def process_weights_after_loading(self, layer) -> None: if not self.quant_config.is_checkpoint_bf16: return - quanted_weight_tensor, weight_scale_tensor = weight_quantize( - layer.weight, - algo=self.quant_config.algo, - arch=self.quant_config.weight_only_linear_arch, - ) + if isinstance(self, MacheteWeightOnlyLinearMethod): + from fastdeploy.model_executor.layers.quantization.ops import ( + machete_quantize_and_pack, + ) + + quanted_weight_tensor, weight_scale_tensor = machete_quantize_and_pack( + w=layer.weight, + atype=layer._dtype, + quant_type="uint4b8", + ) + else: + quanted_weight_tensor, weight_scale_tensor = weight_quantize( + layer.weight, + algo=self.quant_config.algo, + arch=self.quant_config.weight_only_linear_arch, + ) free_tensor(layer.weight) layer.weight = layer.create_parameter( shape=quanted_weight_tensor.shape, - dtype="int8", + dtype="int8" if not isinstance(self, MacheteWeightOnlyLinearMethod) else "int32", is_bias=False, default_initializer=paddle.nn.initializer.Constant(0), ) @@ -368,32 +386,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod): ) -> None: super().__init__(quant_config) - def create_weights(self, layer, **extra_weight_attrs): - - assert layer.bias is None, "Machete weight only linear method does not support bias." - assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4." - - # The scale shape should be equal to the output dim of weight using Per-Channel Quantization. - weight_scale_shape = [1, layer.weight_shape[1]] - - # layer.weight_shape.reverse() - if self.quant_config.name() == "wint4": - layer.weight_shape[0] //= 8 - layer.weight_dtype = "int32" - - layer.weight = layer.create_parameter( - shape=layer.weight_shape, - dtype=layer.weight_dtype, - is_bias=False, - default_initializer=paddle.nn.initializer.Constant(0), - ) - - layer.weight_scale = layer.create_parameter( - shape=weight_scale_shape, - dtype=layer._dtype, - is_bias=False, - ) - def process_prequanted_weights(self, layer, state_dict) -> None: pass @@ -412,7 +404,6 @@ class MacheteWeightOnlyLinearMethod(WeightOnlyLinearMethod): def apply(self, layer, x): assert layer.bias is None, "Machete weight only linear method does not support bias." - assert self.quant_config.name() == "wint4", "Machete weight only linear method only supports wint4." from fastdeploy.model_executor.layers.quantization.ops import machete_wint_mm linear_out = machete_wint_mm( diff --git a/tests/model_loader/test_common_model.py b/tests/model_loader/test_common_model.py index 335792f40..38333b8f8 100644 --- a/tests/model_loader/test_common_model.py +++ b/tests/model_loader/test_common_model.py @@ -72,6 +72,7 @@ model_param_map = { "FD_ATTENTION_BACKEND": "MLA_ATTN", "FLAGS_mla_use_tensorcore": "1", "FLAGS_flash_attn_version": "3", + "FD_USE_MACHETE": "1", }, }, ],