This commit is contained in:
bukejiyu
2025-08-06 14:45:27 +08:00
committed by GitHub
parent 91dc87f1c5
commit 20839abccf
30 changed files with 1361 additions and 1087 deletions

View File

@@ -81,8 +81,16 @@ class BlockWiseFP8LinearMethod(QuantMethodBase):
super().__init__()
self.quant_config = quant_config
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
layer.weight_shape.reverse()
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
layer.weight_scale = layer.create_parameter(
shape=[
(layer.output_size + self.quant_config.weight_block_size[0] - 1)

View File

@@ -16,6 +16,8 @@
from typing import Optional
import paddle
from fastdeploy.model_executor.layers.moe import FusedMoE
from ..utils import get_tensor
@@ -79,11 +81,14 @@ class TensorWiseFP8LinearMethod(QuantMethodBase):
self.quant_round_type = 1
self.weight_dtype = "float8_e4m3fn"
def create_weights(self, layer):
"""
Nothing to do!
"""
pass
def create_weights(self, layer, **extra_weight_attrs):
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
def process_prequanted_weights(self, layer, state_dict) -> None:
"""

View File

@@ -63,11 +63,17 @@ class W4AFP8LinearMethod(QuantMethodBase):
super().__init__()
self.quant_config = quant_config
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
layer.weight_shape.reverse()
layer.weight_shape[0] //= 2
layer.weight_dtype = "int8"
pass
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
def process_loaded_weights(self, layer, weights) -> None:
(

View File

@@ -74,7 +74,7 @@ class W8A8LinearMethod(QuantMethodBase):
self.quant_config = quant_config
self.smooth_quant_method = SmoothQuantLinearMethod(quant_config)
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
layer.weight_shape.reverse()
layer.weight_dtype = "int8"
if self.quant_config.use_smooth_quant:
@@ -85,7 +85,12 @@ class W8A8LinearMethod(QuantMethodBase):
if weight_scale is None or in_scale is None:
self.skip_quant = True
return
layer.wieght = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
max_range = 127.0
linear_out_scale = paddle.to_tensor(weight_scale / (max_range * max_range * in_scale)).astype("float32")
layer.linear_out_scale = layer.create_parameter(
@@ -136,7 +141,7 @@ class SmoothQuantLinearMethod(QuantMethodBase):
super().__init__()
self.quant_config = quant_config
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
linear_shift_shape = [layer.output_size]
linear_smooth_shape = [layer.output_size]
layer.linear_shift = self.create_parameter(

View File

@@ -168,7 +168,7 @@ class WeightOnlyLinearMethod(QuantMethodBase):
super().__init__()
self.quant_config = quant_config
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
# The scale shape should be equal to the output dim of weight using Per-Channel Quantization.
weight_scale_shape = [layer.weight_shape[1]]
@@ -177,6 +177,14 @@ class WeightOnlyLinearMethod(QuantMethodBase):
if self.quant_config.name() == "wint4":
layer.weight_shape[0] //= 2
layer.weight_dtype = "int8"
layer.weight = layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
layer.weight_scale = layer.create_parameter(
shape=weight_scale_shape,
dtype=layer._dtype,

View File

@@ -69,12 +69,18 @@ class WFP8AFP8LinearMethod(QuantMethodBase):
super().__init__()
self.quant_config = quant_config
def create_weights(self, layer):
def create_weights(self, layer, **extra_weight_attrs):
""" """
layer.weight_shape.reverse()
layer.weight_dtype = "float8_e4m3fn"
# TODO(YuanRisheng): set weight logic should be moved to process_loaded_weights func
self.skip_quant = False
layer.create_parameter(
shape=layer.weight_shape,
dtype=layer.weight_dtype,
is_bias=False,
default_initializer=paddle.nn.initializer.Constant(0),
)
layer.weight_scale = layer.create_parameter(
shape=[1],
dtype="float32",