diff --git a/fastdeploy/config.py b/fastdeploy/config.py index 446e59298..bc275bf28 100644 --- a/fastdeploy/config.py +++ b/fastdeploy/config.py @@ -84,6 +84,7 @@ class ModelConfig(PretrainedConfig): head_dim: Optional[int] = None, tie_word_embeddings: bool = False, is_quantized: bool = False, + rms_norm_eps: float = 1e-5, **kwargs, ): super().__init__(**kwargs) @@ -123,6 +124,7 @@ class ModelConfig(PretrainedConfig): self.dtype = dtype self.tie_word_embeddings = tie_word_embeddings self.is_quantized = is_quantized + self.rms_norm_eps = rms_norm_eps @dataclass diff --git a/fastdeploy/model_executor/models/ernie4_5_moe.py b/fastdeploy/model_executor/models/ernie4_5_moe.py index f6b73622a..a6d064043 100644 --- a/fastdeploy/model_executor/models/ernie4_5_moe.py +++ b/fastdeploy/model_executor/models/ernie4_5_moe.py @@ -288,14 +288,14 @@ class Ernie4_5_DecoderLayer(nn.Layer): self.input_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.input_layernorm", ) self.post_attention_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.post_attention_layernorm", ) @@ -366,7 +366,7 @@ class Ernie4_5_Model(nn.Layer): self.norm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{fd_config.model_config.prefix_name}.norm", ) diff --git a/fastdeploy/model_executor/models/ernie4_5_mtp.py b/fastdeploy/model_executor/models/ernie4_5_mtp.py index 029becc1e..7920155ec 100644 --- a/fastdeploy/model_executor/models/ernie4_5_mtp.py +++ b/fastdeploy/model_executor/models/ernie4_5_mtp.py @@ -275,14 +275,14 @@ class Ernie4_5_MTPModel(nn.Layer): self.enorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix="ernie.mtp_emb_norm.0", ) self.hnorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix="ernie.mtp_hidden_norm.0", ) diff --git a/fastdeploy/model_executor/models/ernie4_5_vl/ernie4_5_vl_moe.py b/fastdeploy/model_executor/models/ernie4_5_vl/ernie4_5_vl_moe.py index a08433a57..c908b56ab 100644 --- a/fastdeploy/model_executor/models/ernie4_5_vl/ernie4_5_vl_moe.py +++ b/fastdeploy/model_executor/models/ernie4_5_vl/ernie4_5_vl_moe.py @@ -271,14 +271,14 @@ class Ernie4_5_VLDecoderLayer(nn.Layer): self.input_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.input_layernorm", ) self.post_attention_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.post_attention_layernorm", ) @@ -355,7 +355,7 @@ class Ernie4_5_VLModel(nn.Layer): self.norm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{fd_config.model_config.prefix_name}.norm", ) diff --git a/fastdeploy/model_executor/models/qwen2.py b/fastdeploy/model_executor/models/qwen2.py index 242d6f9da..4fab1e30b 100644 --- a/fastdeploy/model_executor/models/qwen2.py +++ b/fastdeploy/model_executor/models/qwen2.py @@ -161,14 +161,14 @@ class Qwen2DecoderLayer(nn.Layer): self.input_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.input_layernorm", ) self.post_attention_layernorm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.post_attention_layernorm", ) @@ -248,7 +248,7 @@ class Qwen2Model(nn.Layer): self.norm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-5, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{fd_config.model_config.prefix_name}.norm", ) diff --git a/fastdeploy/model_executor/models/qwen3.py b/fastdeploy/model_executor/models/qwen3.py index c1654f414..2db0814d2 100644 --- a/fastdeploy/model_executor/models/qwen3.py +++ b/fastdeploy/model_executor/models/qwen3.py @@ -79,12 +79,12 @@ class Qwen3Attention(nn.Layer): self.q_norm = RMSNorm(fd_config=fd_config, hidden_size=fd_config.model_config.head_dim, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.q_norm", begin_norm_axis=2) self.k_norm = RMSNorm(fd_config=fd_config, hidden_size=fd_config.model_config.head_dim, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.k_norm", begin_norm_axis=2) @@ -184,7 +184,7 @@ class Qwen3Model(nn.Layer): self.norm = RMSNorm( fd_config, hidden_size=fd_config.model_config.hidden_size, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{fd_config.model_config.prefix_name}.norm", ) diff --git a/fastdeploy/model_executor/models/qwen3moe.py b/fastdeploy/model_executor/models/qwen3moe.py index c4d01ef6e..9962fa1ee 100644 --- a/fastdeploy/model_executor/models/qwen3moe.py +++ b/fastdeploy/model_executor/models/qwen3moe.py @@ -121,12 +121,12 @@ class Qwen3Attention(nn.Layer): self.q_norm = RMSNorm(fd_config, hidden_size=self.head_dim, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.q_norm", begin_norm_axis=2) self.k_norm = RMSNorm(fd_config, hidden_size=self.head_dim, - eps=1e-6, + eps=fd_config.model_config.rms_norm_eps, prefix=f"{prefix}.k_norm", begin_norm_axis=2)