[BugFix]fix v1 loader moe bf16, and supoort dynamic_load_weight create quant param (#4229)

* fix v1 loader moe bf16, and supoort dynamic_load_weight create quant param

* include_stop_str_in_output=False not return eos text
This commit is contained in:
chen
2025-09-24 14:12:05 +08:00
committed by GitHub
parent 44010cee13
commit 3161014e49
3 changed files with 9 additions and 2 deletions

View File

@@ -199,6 +199,7 @@ class UnquantizedFusedMoEMethod(MoEMethodBase):
layer.up_gate_proj_weight,
{
"weight_loader": extra_weight_attrs.get("weight_loader", default_weight_loader(layer.fd_config)),
"weight_need_transpose": extra_weight_attrs.get("model_format") == "torch",
"model_format": extra_weight_attrs.get("model_format", ""),
},
)
@@ -206,6 +207,7 @@ class UnquantizedFusedMoEMethod(MoEMethodBase):
layer.down_proj_weight,
{
"weight_loader": extra_weight_attrs.get("weight_loader", default_weight_loader(layer.fd_config)),
"weight_need_transpose": extra_weight_attrs.get("model_format") == "torch",
"model_format": extra_weight_attrs.get("model_format", ""),
},
)

View File

@@ -85,6 +85,8 @@ def parse_quant_config(args, model_config, is_ernie, is_v1_loader):
else:
if not quantization_config.get("is_quantized"):
quantization_config["is_quantized"] = model_config.is_quantized
if args.dynamic_load_weight and quantization_config is not None:
quantization_config["is_quantized"] = True
quant_cls = get_quantization_config(quant_config_name)
quant_config = quant_cls.from_config(quantization_config)
return quant_config