[BugFix]fix v1 loader lm head fp32 (#5270)

This commit is contained in:
chen
2025-11-27 20:12:56 +08:00
committed by GitHub
parent b52ec268f7
commit 35f85baf09
8 changed files with 24 additions and 9 deletions

View File

@@ -120,10 +120,10 @@ class ParallelEHProjection(nn.Layer):
weight_tensor = get_tensor(state_dict.pop(self.weight_key)).astype(paddle.get_default_dtype())
if self.linear.weight.shape != weight_tensor.shape:
weight_tensor = weight_tensor.transpose([1, 0])
self.linear.weight.set_value(weight_tensor)
self.linear.weight.set_value(weight_tensor.astype(self.linear.weight.dtype))
if self.bias_key is not None:
bias = get_tensor(state_dict.pop(self.bias_key)).astype(paddle.get_default_dtype())
bias = get_tensor(state_dict.pop(self.bias_key)).astype(self.linear.bias.dtype)
self.linear.bias.set_value(bias)
def forward(self, input):

View File

@@ -68,7 +68,9 @@ def load_weights_from_cache(model, weights_iterator):
)
param.copy_(loaded_weight, False)
if "embeddings" in loaded_weight_name and getattr(model, "tie_word_embeddings", False):
model.lm_head.linear.weight.set_value(loaded_weight.transpose([1, 0]))
model.lm_head.linear.weight.set_value(
loaded_weight.transpose([1, 0]).astype(model.lm_head.linear.weight.dtype)
)
for _, model_sublayer in model.named_sublayers():
if isinstance(model_sublayer, KVBatchLinear):
model_sublayer.process_weights_after_loading()

View File

@@ -600,7 +600,9 @@ class Ernie4_5_MoeForCausalLM(ModelForCasualLM):
process_weights_after_loading_fn(model_sublayer_name, param)
if self.tie_word_embeddings:
self.lm_head.linear.weight.set_value(self.ernie.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.ernie.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
def compute_logits(self, hidden_states: paddle.Tensor):
logits = self.lm_head(hidden_states)

View File

@@ -720,7 +720,9 @@ class Ernie4_5_VLMoeForConditionalGeneration(ModelForCasualLM):
)
process_weights_after_loading_fn(model_sublayer_name, param)
if self.tie_word_embeddings:
self.lm_head.linear.weight.set_value(self.ernie.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.ernie.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
@paddle.no_grad()
def set_state_dict(self, state_dict: Dict[str, Union[np.ndarray, paddle.Tensor]]):

View File

@@ -376,7 +376,9 @@ class Qwen2ForCausalLM(ModelForCasualLM):
model_sublayer_name = re.sub(r"\.(weight)$", "", model_param_name)
process_weights_after_loading_fn(model_sublayer_name, param)
if self.tie_word_embeddings:
self.lm_head.linear.weight.set_value(self.qwen2.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.qwen2.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
@classmethod
def name(self):

View File

@@ -232,7 +232,9 @@ class Qwen2_5_VLForConditionalGeneration(ModelForCasualLM):
process_weights_after_loading_fn(model_sublayer_name, param)
if self.tie_word_embeddings:
self.lm_head.linear.weight.set_value(self.model.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.model.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
@paddle.no_grad()
def set_state_dict(self, state_dict: Dict[str, Union[np.ndarray, paddle.Tensor]]):
@@ -247,7 +249,9 @@ class Qwen2_5_VLForConditionalGeneration(ModelForCasualLM):
self.model.load_state_dict(state_dict)
self.visual.load_state_dict(state_dict)
if self.tie_word_embeddings:
self.lm_head.linear.weight.set_value(self.model.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.model.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
else:
self.lm_head.load_state_dict(state_dict)

View File

@@ -319,7 +319,9 @@ class Qwen3ForCausalLM(ModelForCasualLM):
process_weights_after_loading_fn(model_sublayer_name, param)
if self.tie_word_embeddings and not is_pooling_model:
self.lm_head.linear.weight.set_value(self.model.embed_tokens.embeddings.weight.transpose([1, 0]))
self.lm_head.linear.weight.set_value(
self.model.embed_tokens.embeddings.weight.transpose([1, 0]).astype(self.lm_head.linear.weight.dtype)
)
@paddle.no_grad()
def set_state_dict(self, state_dict):

View File

@@ -50,6 +50,7 @@ class TestGeneration(unittest.TestCase):
model=MODEL_NAME,
max_num_batched_tokens=4096,
tensor_parallel_size=1,
lm_head_fp32=True,
engine_worker_queue_port=int(os.getenv("FD_ENGINE_QUEUE_PORT")),
cache_queue_port=int(os.getenv("FD_CACHE_QUEUE_PORT")),
)