mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-27 04:46:16 +08:00

Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
479 lines
16 KiB
Python
479 lines
16 KiB
Python
"""
|
|
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import re
|
|
from functools import partial
|
|
|
|
import paddle
|
|
from paddle import nn
|
|
from paddleformers.transformers import PretrainedModel
|
|
from paddleformers.utils.log import logger
|
|
|
|
from fastdeploy.config import FDConfig, ModelConfig
|
|
from fastdeploy.model_executor.forward_meta import ForwardMeta
|
|
from fastdeploy.model_executor.graph_optimization.decorator import (
|
|
support_graph_optimization,
|
|
)
|
|
from fastdeploy.model_executor.layers.activation import SiluAndMul
|
|
from fastdeploy.model_executor.layers.attention.attention import Attention
|
|
from fastdeploy.model_executor.layers.embeddings import VocabParallelEmbedding
|
|
from fastdeploy.model_executor.layers.linear import (
|
|
MergedColumnParallelLinear,
|
|
QKVParallelLinear,
|
|
RowParallelLinear,
|
|
)
|
|
from fastdeploy.model_executor.layers.lm_head import ParallelLMHead
|
|
from fastdeploy.model_executor.layers.normalization import RMSNorm
|
|
from fastdeploy.model_executor.models.model_base import (
|
|
ModelCategory,
|
|
ModelForCasualLM,
|
|
ModelRegistry,
|
|
)
|
|
|
|
|
|
class Qwen2MLP(nn.Layer):
|
|
""" """
|
|
|
|
def __init__(
|
|
self,
|
|
fd_config: FDConfig,
|
|
prefix: str = "",
|
|
) -> None:
|
|
super().__init__()
|
|
self.nranks = fd_config.parallel_config.tensor_parallel_size
|
|
self.up_gate_proj = MergedColumnParallelLinear(
|
|
fd_config=fd_config,
|
|
prefix=f"{prefix}.up_gate_proj",
|
|
input_size=fd_config.model_config.hidden_size,
|
|
output_size=fd_config.model_config.intermediate_size * 2,
|
|
with_bias=False,
|
|
activation=fd_config.model_config.hidden_act,
|
|
)
|
|
|
|
self.down_proj = RowParallelLinear(
|
|
fd_config=fd_config,
|
|
prefix=f"{prefix}.down_proj",
|
|
input_size=fd_config.model_config.intermediate_size,
|
|
output_size=fd_config.model_config.hidden_size,
|
|
with_bias=False,
|
|
)
|
|
|
|
self.act_fn = SiluAndMul(
|
|
fd_config=fd_config,
|
|
bias=getattr(self.up_gate_proj, "bias", None),
|
|
act_method=fd_config.model_config.hidden_act,
|
|
)
|
|
|
|
def load_state_dict(self, state_dict):
|
|
""" """
|
|
self.up_gate_proj.load_state_dict(state_dict)
|
|
self.down_proj.load_state_dict(state_dict)
|
|
|
|
def forward(self, x):
|
|
""" """
|
|
gate_up_out = self.up_gate_proj(x)
|
|
act_out = self.act_fn(gate_up_out)
|
|
down_out = self.down_proj(act_out)
|
|
return down_out
|
|
|
|
|
|
class Qwen2Attention(nn.Layer):
|
|
""" """
|
|
|
|
def __init__(self, fd_config: FDConfig, layer_id: int, prefix: str = "") -> None:
|
|
super().__init__()
|
|
|
|
self.qkv_proj = QKVParallelLinear(fd_config=fd_config, prefix=f"{prefix}.qkv_proj", with_bias=True)
|
|
|
|
self.o_proj = RowParallelLinear(
|
|
fd_config=fd_config,
|
|
prefix=f"{prefix}.o_proj",
|
|
input_size=fd_config.model_config.hidden_size,
|
|
output_size=fd_config.model_config.hidden_size,
|
|
)
|
|
|
|
self.attn = Attention(
|
|
fd_config=fd_config,
|
|
layer_id=layer_id,
|
|
prefix=prefix,
|
|
use_neox_rotary_style=True,
|
|
)
|
|
|
|
def load_state_dict(self, state_dict):
|
|
""" """
|
|
self.qkv_proj.load_state_dict(state_dict)
|
|
self.o_proj.load_state_dict(state_dict)
|
|
self.attn.load_state_dict(state_dict)
|
|
|
|
def forward(
|
|
self,
|
|
forward_meta: ForwardMeta,
|
|
hidden_states: paddle.Tensor,
|
|
):
|
|
""" """
|
|
qkv_out = self.qkv_proj(hidden_states)
|
|
|
|
atten_out = self.attn(
|
|
qkv=qkv_out,
|
|
forward_meta=forward_meta,
|
|
)
|
|
output = self.o_proj(atten_out)
|
|
return output
|
|
|
|
|
|
class Qwen2DecoderLayer(nn.Layer):
|
|
""" """
|
|
|
|
def __init__(
|
|
self,
|
|
fd_config: FDConfig,
|
|
prefix: str = "",
|
|
) -> None:
|
|
super().__init__()
|
|
layer_id = int(prefix.split(sep=".")[-1])
|
|
|
|
self.self_attn = Qwen2Attention(
|
|
fd_config=fd_config,
|
|
layer_id=layer_id,
|
|
prefix=f"{prefix}.self_attn",
|
|
)
|
|
|
|
self.mlp = Qwen2MLP(
|
|
fd_config=fd_config,
|
|
prefix=f"{prefix}.mlp",
|
|
)
|
|
|
|
self.input_layernorm = RMSNorm(
|
|
fd_config,
|
|
hidden_size=fd_config.model_config.hidden_size,
|
|
eps=fd_config.model_config.rms_norm_eps,
|
|
prefix=f"{prefix}.input_layernorm",
|
|
)
|
|
|
|
self.post_attention_layernorm = RMSNorm(
|
|
fd_config,
|
|
hidden_size=fd_config.model_config.hidden_size,
|
|
eps=fd_config.model_config.rms_norm_eps,
|
|
prefix=f"{prefix}.post_attention_layernorm",
|
|
)
|
|
|
|
def load_state_dict(self, state_dict):
|
|
""" """
|
|
self.self_attn.load_state_dict(state_dict)
|
|
self.mlp.load_state_dict(state_dict)
|
|
self.input_layernorm.load_state_dict(state_dict)
|
|
self.post_attention_layernorm.load_state_dict(state_dict)
|
|
|
|
def forward(
|
|
self,
|
|
forward_meta: ForwardMeta,
|
|
hidden_states: paddle.Tensor,
|
|
residual: paddle.Tensor = None,
|
|
):
|
|
""" """
|
|
# Self Attention
|
|
if residual is None:
|
|
residual = hidden_states
|
|
hidden_states = self.input_layernorm(hidden_states)
|
|
else:
|
|
hidden_states, residual = self.input_layernorm(hidden_states, residual)
|
|
|
|
hidden_states = self.self_attn(
|
|
hidden_states=hidden_states,
|
|
forward_meta=forward_meta,
|
|
)
|
|
|
|
# Fully Connected
|
|
hidden_states, residual = self.post_attention_layernorm(hidden_states, residual)
|
|
|
|
hidden_states = self.mlp(hidden_states)
|
|
|
|
return hidden_states, residual
|
|
|
|
|
|
@support_graph_optimization
|
|
class Qwen2Model(nn.Layer):
|
|
""" """
|
|
|
|
def __init__(
|
|
self,
|
|
fd_config: FDConfig = None,
|
|
):
|
|
"""
|
|
Initializer for the Qwen2Model class.
|
|
|
|
Args:
|
|
|
|
"""
|
|
super().__init__()
|
|
|
|
self.num_layers = fd_config.model_config.num_hidden_layers
|
|
fd_config.model_config.pretrained_config.prefix_name = "qwen2"
|
|
|
|
self.embed_tokens = VocabParallelEmbedding(
|
|
fd_config=fd_config,
|
|
num_embeddings=fd_config.model_config.vocab_size,
|
|
embedding_dim=fd_config.model_config.hidden_size,
|
|
params_dtype=paddle.get_default_dtype,
|
|
prefix=(f"{fd_config.model_config.pretrained_config.prefix_name}.embed_tokens"),
|
|
)
|
|
|
|
self.layers = nn.LayerList(
|
|
[
|
|
Qwen2DecoderLayer(
|
|
fd_config=fd_config,
|
|
prefix=f"{fd_config.model_config.pretrained_config.prefix_name}.layers.{i}",
|
|
)
|
|
for i in range(self.num_layers)
|
|
]
|
|
)
|
|
|
|
self.norm = RMSNorm(
|
|
fd_config,
|
|
hidden_size=fd_config.model_config.hidden_size,
|
|
eps=fd_config.model_config.rms_norm_eps,
|
|
prefix=f"{fd_config.model_config.pretrained_config.prefix_name}.norm",
|
|
)
|
|
|
|
def load_state_dict(self, state_dict):
|
|
"""
|
|
Load model parameters from a given state dictionary.
|
|
|
|
Args:
|
|
state_dict (dict[str, np.ndarray | paddle.Tensor]):
|
|
A dictionary containing model parameters, where keys are parameter names
|
|
and values are NumPy arrays or PaddlePaddle tensors.
|
|
"""
|
|
self.embed_tokens.load_state_dict(state_dict)
|
|
self.norm.load_state_dict(state_dict)
|
|
for i in range(self.num_layers):
|
|
logger.info(f"Start load layer {i}")
|
|
self.layers[i].load_state_dict(state_dict)
|
|
|
|
def forward(
|
|
self,
|
|
ids_remove_padding: paddle.Tensor,
|
|
forward_meta: ForwardMeta,
|
|
):
|
|
""" """
|
|
|
|
hidden_states = self.embed_tokens(ids_remove_padding=ids_remove_padding)
|
|
|
|
residual = None
|
|
|
|
for i in range(self.num_layers):
|
|
hidden_states, residual = self.layers[i](forward_meta, hidden_states, residual)
|
|
|
|
hidden_states = hidden_states + residual
|
|
|
|
out = self.norm(hidden_states)
|
|
|
|
return out
|
|
|
|
|
|
@ModelRegistry.register_model_class(
|
|
architecture="Qwen2ForCausalLM",
|
|
module_name="qwen2",
|
|
category=[ModelCategory.TEXT_GENERATION, ModelCategory.EMBEDDING],
|
|
primary_use=ModelCategory.TEXT_GENERATION,
|
|
)
|
|
class Qwen2ForCausalLM(ModelForCasualLM):
|
|
"""
|
|
Qwen2ForCausalLM
|
|
"""
|
|
|
|
def __init__(self, fd_config: FDConfig):
|
|
"""
|
|
Args:
|
|
fd_config (FDConfig): Configurations for the LLM model.
|
|
"""
|
|
super(Qwen2ForCausalLM, self).__init__(fd_config)
|
|
|
|
self.fd_config = fd_config
|
|
self.qwen2 = Qwen2Model(fd_config=fd_config)
|
|
|
|
self.ori_vocab_size = fd_config.model_config.ori_vocab_size
|
|
|
|
self.lm_head = ParallelLMHead(
|
|
fd_config=fd_config,
|
|
embedding_dim=fd_config.model_config.hidden_size,
|
|
num_embeddings=fd_config.model_config.vocab_size,
|
|
prefix="lm_head",
|
|
)
|
|
|
|
@paddle.no_grad()
|
|
def load_weights(self, weights_iterator) -> None:
|
|
"""
|
|
Load model parameters from a given weights_iterator object.
|
|
|
|
Args:
|
|
weights_iterator (Iterator): An iterator yielding (name, weight) pairs.
|
|
"""
|
|
|
|
from fastdeploy.model_executor.utils import (
|
|
default_weight_loader,
|
|
process_weights_after_loading,
|
|
)
|
|
|
|
stacked_params_mapping = [
|
|
# (param_name, shard_name, shard_id)
|
|
("qkv_proj", "q_proj", "q"),
|
|
("qkv_proj", "k_proj", "k"),
|
|
("qkv_proj", "v_proj", "v"),
|
|
("up_gate_proj", "gate_proj", "gate"),
|
|
("up_gate_proj", "up_proj", "up"),
|
|
("embed_tokens.embeddings", "embed_tokens", None),
|
|
("lm_head.linear", "lm_head", None),
|
|
]
|
|
|
|
params_dict = dict(self.named_parameters())
|
|
process_weights_after_loading_fn = process_weights_after_loading(dict(self.named_sublayers()))
|
|
for loaded_weight_name, loaded_weight in weights_iterator:
|
|
model_format = self.fd_config.model_config.model_format
|
|
# Because the prefix for Paddle is qwen2, and for Hugging Face it is model.
|
|
if model_format == "torch":
|
|
loaded_weight_name = loaded_weight_name.replace("model", "qwen2")
|
|
for param_name, weight_name, shard_id in stacked_params_mapping:
|
|
if weight_name not in loaded_weight_name:
|
|
continue
|
|
model_param_name = loaded_weight_name.replace(weight_name, param_name)
|
|
if model_param_name not in params_dict:
|
|
continue
|
|
param = params_dict[model_param_name]
|
|
weight_loader = getattr(param, "weight_loader", default_weight_loader(self.fd_config))
|
|
weight_loader(param, loaded_weight, shard_id)
|
|
break
|
|
else:
|
|
model_param_name = loaded_weight_name
|
|
if model_param_name not in params_dict:
|
|
continue
|
|
param = params_dict[model_param_name]
|
|
weight_loader = getattr(param, "weight_loader", default_weight_loader(self.fd_config))
|
|
weight_loader(param, loaded_weight)
|
|
model_sublayer_name = re.sub(r"\.(weight)$", "", model_param_name)
|
|
process_weights_after_loading_fn(model_sublayer_name, param)
|
|
|
|
@classmethod
|
|
def name(self):
|
|
""" """
|
|
return "Qwen2ForCausalLM"
|
|
|
|
@paddle.no_grad()
|
|
def set_state_dict(self, state_dict):
|
|
"""
|
|
Load model parameters from a given state dictionary.
|
|
|
|
Args:
|
|
state_dict (dict[str, np.ndarray | paddle.Tensor]):
|
|
A dictionary containing model parameters, where keys are parameter names
|
|
and values are NumPy arrays or PaddlePaddle tensors.
|
|
"""
|
|
self.qwen2.load_state_dict(state_dict)
|
|
self.lm_head.load_state_dict(state_dict)
|
|
|
|
def compute_logits(self, hidden_states: paddle.Tensor):
|
|
""" """
|
|
logits = self.lm_head(hidden_states)
|
|
logits = logits.astype(paddle.float32)
|
|
logits[:, self.ori_vocab_size :] = -float("inf")
|
|
|
|
return logits
|
|
|
|
def forward(
|
|
self,
|
|
ids_remove_padding: paddle.Tensor,
|
|
forward_meta: ForwardMeta,
|
|
):
|
|
""" """
|
|
hidden_states = self.qwen2(ids_remove_padding=ids_remove_padding, forward_meta=forward_meta)
|
|
|
|
return hidden_states
|
|
|
|
def clear_grpah_opt_backend(self):
|
|
"""Clear graph optimization backend, the captured cuda graph will be cleaned"""
|
|
self.qwen2.clear_grpah_opt_backend(fd_config=self.fd_config)
|
|
|
|
|
|
class Qwen2PretrainedModel(PretrainedModel):
|
|
"""
|
|
Qwen2PretrainedModel
|
|
"""
|
|
|
|
config_class = FDConfig
|
|
|
|
def _init_weight(self, layer):
|
|
"""
|
|
_init_weight
|
|
"""
|
|
return None
|
|
|
|
@classmethod
|
|
def arch_name(self):
|
|
return "Qwen2ForCausalLM"
|
|
|
|
@classmethod
|
|
def _get_tensor_parallel_mappings(cls, config: ModelConfig, is_split=True):
|
|
|
|
from paddleformers.transformers.conversion_utils import split_or_merge_func
|
|
|
|
fn = split_or_merge_func(
|
|
is_split=is_split,
|
|
tensor_parallel_degree=config.tensor_parallel_degree,
|
|
tensor_parallel_rank=config.tensor_parallel_rank,
|
|
num_attention_heads=config.num_attention_heads,
|
|
)
|
|
|
|
def get_tensor_parallel_split_mappings(num_layers):
|
|
final_actions = {}
|
|
|
|
base_actions = {
|
|
"lm_head.weight": partial(fn, is_column=True),
|
|
# Row Linear
|
|
"embed_tokens.weight": partial(fn, is_column=False),
|
|
"layers.0.self_attn.o_proj.weight": partial(fn, is_column=False),
|
|
"layers.0.mlp.down_proj.weight": partial(fn, is_column=False),
|
|
}
|
|
|
|
# Column Linear
|
|
if config.fuse_attention_qkv:
|
|
base_actions["layers.0.self_attn.qkv_proj.weight"] = partial(fn, is_column=True)
|
|
else:
|
|
base_actions["layers.0.self_attn.q_proj.weight"] = partial(fn, is_column=True)
|
|
base_actions["layers.0.self_attn.q_proj.bias"] = partial(fn, is_column=True)
|
|
# if we have enough num_key_value_heads to split, then split it.
|
|
if config.num_key_value_heads % config.tensor_parallel_degree == 0:
|
|
base_actions["layers.0.self_attn.k_proj.weight"] = partial(fn, is_column=True)
|
|
base_actions["layers.0.self_attn.v_proj.weight"] = partial(fn, is_column=True)
|
|
base_actions["layers.0.self_attn.k_proj.bias"] = partial(fn, is_column=True)
|
|
base_actions["layers.0.self_attn.v_proj.bias"] = partial(fn, is_column=True)
|
|
|
|
base_actions["layers.0.mlp.gate_proj.weight"] = partial(fn, is_column=True)
|
|
base_actions["layers.0.mlp.up_proj.weight"] = partial(fn, is_column=True)
|
|
|
|
for key, action in base_actions.items():
|
|
if "layers.0." in key:
|
|
for i in range(num_layers):
|
|
final_actions[key.replace("layers.0.", f"layers.{i}.")] = action
|
|
final_actions[key] = action
|
|
|
|
return final_actions
|
|
|
|
mappings = get_tensor_parallel_split_mappings(config.num_hidden_layers)
|
|
|
|
return mappings
|