mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
[v1 loader]qwen Offline fp8 (#4036)
* support offline fp8 * update ut * update ut * update ut * fix * update * update
This commit is contained in:
@@ -65,7 +65,7 @@ class WeightOnlyConfig(QuantConfigBase):
|
||||
@classmethod
|
||||
def from_config(cls, config: dict) -> "WeightOnlyConfig":
|
||||
algo = config["algo"]
|
||||
is_checkpoint_bf16 = config.get("is_checkpoint_bf16", False)
|
||||
is_checkpoint_bf16 = not config.get("is_quantized", False)
|
||||
return cls(algo, is_checkpoint_bf16)
|
||||
|
||||
def get_quant_method(self, layer) -> Optional[QuantMethodBase]:
|
||||
@@ -162,7 +162,7 @@ class WINT8Config(WeightOnlyConfig):
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: dict) -> "WINT8Config":
|
||||
is_checkpoint_bf16 = config.get("is_checkpoint_bf16", False)
|
||||
is_checkpoint_bf16 = not config.get("is_quantized", False)
|
||||
return cls(is_checkpoint_bf16)
|
||||
|
||||
def name(self) -> str:
|
||||
@@ -182,7 +182,7 @@ class WINT4Config(WeightOnlyConfig):
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: dict) -> "WINT4Config":
|
||||
is_checkpoint_bf16 = config.get("is_checkpoint_bf16", False)
|
||||
is_checkpoint_bf16 = not config.get("is_quantized", False)
|
||||
return cls(is_checkpoint_bf16)
|
||||
|
||||
def name(self) -> str:
|
||||
@@ -202,13 +202,15 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
||||
self.quant_config = quant_config
|
||||
|
||||
def create_weights(self, layer, **extra_weight_attrs):
|
||||
if self.quant_config.is_checkpoint_bf16:
|
||||
# TODO(bukejiyu): remove v1 loader check when v0 loader is removed
|
||||
if self.quant_config.is_checkpoint_bf16 and layer.fd_config.load_config.load_choices == "default_v1":
|
||||
layer.weight = layer.create_parameter(
|
||||
shape=layer.weight_shape,
|
||||
dtype=layer.weight_dtype,
|
||||
is_bias=False,
|
||||
default_initializer=paddle.nn.initializer.Constant(0),
|
||||
)
|
||||
extra_weight_attrs["weight_need_transpose"] = extra_weight_attrs.get("model_format") == "torch"
|
||||
quant_attrs = extra_weight_attrs
|
||||
if (
|
||||
isinstance(layer, MergedColumnParallelLinear)
|
||||
@@ -256,6 +258,7 @@ class WeightOnlyLinearMethod(QuantMethodBase):
|
||||
{
|
||||
"weight_loader": weight_loader,
|
||||
"output_dim": output_dim,
|
||||
"weight_need_transpose": not extra_weight_attrs.get("model_format") == "torch",
|
||||
},
|
||||
)
|
||||
|
||||
|
Reference in New Issue
Block a user