refactor rl get_name_mappings_to_training (#2847)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled

* refactor rl get_name_mappings_to_training

* fix tp>1

* change variable name(ffn1->up_gate_proj/ffn2->down_proj)

* change variable name(linear_weight->weight/linear_bias->bias)

* add rl names mapping for vl

* fix ernie 0.3B error

* fix develop code

* fix
This commit is contained in:
Yuanle Liu
2025-07-15 22:31:42 +08:00
committed by GitHub
parent e7bcbbab52
commit 61b3997b85
47 changed files with 1591 additions and 1629 deletions

View File

@@ -70,11 +70,11 @@ class WFP8AFP8LinearMethod(QuantMethodBase):
def create_weights(self, layer):
"""
"""
layer.linear_weight_shape.reverse()
layer.weight_shape.reverse()
layer.weight_dtype = "float8_e4m3fn"
# TODO(YuanRisheng): set weight logic should be moved to process_loaded_weights func
self.skip_quant = False
layer.linear_weight_scale = layer.create_parameter(
layer.weight_scale = layer.create_parameter(
shape=[1],
dtype="float32",
is_bias=False,
@@ -86,7 +86,7 @@ class WFP8AFP8LinearMethod(QuantMethodBase):
"""
if self.skip_quant:
weight_tensor = weights.cast(layer._dtype)
layer.linear_weight.set_value(weight_tensor)
layer.weight.set_value(weight_tensor)
return
if weights.dtype != paddle.float8_e4m3fn:
self.use_per_token_if_dynamic = True
@@ -95,22 +95,22 @@ class WFP8AFP8LinearMethod(QuantMethodBase):
weight_tensor,
use_per_token_if_dynamic=False,
)
layer.linear_weight.copy_(qweight, False)
layer.linear_weight_scale.set_value(weight_scale)
layer.weight.copy_(qweight, False)
layer.weight_scale.set_value(weight_scale)
def apply(self, layer, x):
"""
"""
if self.skip_quant:
linear_out = paddle.matmul(x, layer.linear_weight, False, True)
linear_out = paddle.matmul(x, layer.weight, False, True)
return linear_out
if self.use_per_token_if_dynamic:
out_type = x.dtype
a_q, a_scales = scaled_fp8_quant(
x, use_per_token_if_dynamic=self.use_per_token_if_dynamic)
linear_out = cutlass_scaled_mm(a_q, layer.linear_weight, a_scales,
layer.linear_weight_scale, out_type,
layer.linear_bias)
linear_out = cutlass_scaled_mm(a_q, layer.weight, a_scales,
layer.weight_scale, out_type,
layer.bias)
else:
raise NotImplementedError
return linear_out