[NewFeatures] support eplb (#3547)

* [NewFeatures] support eplb

* fix eplb
This commit is contained in:
xiaoxiaohehe001
2025-08-26 16:19:30 +08:00
committed by GitHub
parent 56e2d7e668
commit 9afa236e39
17 changed files with 174 additions and 67 deletions

View File

@@ -80,6 +80,7 @@ class FusedMoE(nn.Layer):
layer_idx: int = -1,
moe_tag: str = "",
gate_correction_bias=None,
redundant_table_manger: RedundantExpertManger = None,
weight_key_map: dict = {},
):
"""
@@ -147,15 +148,8 @@ class FusedMoE(nn.Layer):
self.moe_quant_type = moe_quant_config.name()
else:
self.quant_method = get_moe_method()
self.redundant_table_manger = None
self.redundant_table_manger = redundant_table_manger
if self.ep_size > 1:
if fd_config.model_config.enable_redundant_experts is True:
self.redundant_table_manger = RedundantExpertManger(
n_routed_experts=fd_config.model_config.moe_num_experts,
num_hidden_layers=fd_config.model_config.num_hidden_layers,
redundant_experts_num=fd_config.model_config.redundant_experts_num,
ep_size=self.ep_size,
)
self.quant_method.init_ep(self)
if fd_config.load_config.dynamic_load_weight:
@@ -423,6 +417,7 @@ class FusedMoE(nn.Layer):
state_dict: dict,
up_gate_proj_expert_weight_key: str,
down_proj_expert_weight_key: str,
is_rearrange: bool = False,
):
"""
Load experts weight from state_dict.
@@ -451,7 +446,12 @@ class FusedMoE(nn.Layer):
]
up_gate_proj_weights = []
down_proj_weights = []
is_ffn_merged = up_gate_proj_expert_weight_key.format(self.expert_id_offset) in state_dict
if isinstance(state_dict, list):
state_dict = dict(state_dict)
is_ffn_merged = (
up_gate_proj_expert_weight_key.format(logical_expert_ids[0] if is_rearrange else self.expert_id_offset)
in state_dict
)
if is_ffn_merged:
for expert_idx in logical_expert_ids:
down_proj_expert_weight_key_name = down_proj_expert_weight_key.format(expert_idx)
@@ -533,10 +533,12 @@ class FusedMoE(nn.Layer):
assert up_gate_proj_expert_weight_key is not None, "up_gate_proj_expert_weight_key should not be none."
assert down_proj_expert_weight_key is not None, "down_proj_expert_weight_key should not be none."
up_gate_proj_weights, down_proj_weights, logical_expert_ids, _ = self.load_experts_weight(
state_dict,
up_gate_proj_expert_weight_key,
down_proj_expert_weight_key,
up_gate_proj_weights, down_proj_weights, logical_expert_ids, ep_rank_to_expert_id_list = (
self.load_experts_weight(
state_dict,
up_gate_proj_expert_weight_key,
down_proj_expert_weight_key,
)
)
assert (
len(up_gate_proj_weights) == self.num_local_experts
@@ -545,7 +547,7 @@ class FusedMoE(nn.Layer):
len(down_proj_weights) == self.num_local_experts
), "down_proj_weights length should be equal to num_local_experts."
return up_gate_proj_weights, down_proj_weights
return up_gate_proj_weights, down_proj_weights, logical_expert_ids, ep_rank_to_expert_id_list
def extract_gate_correction_bias(self, gate_correction_bias_key, state_dict):
"""
@@ -561,7 +563,7 @@ class FusedMoE(nn.Layer):
if is_supported_moe_backend is not None and is_supported_moe_backend(self.quant_method):
if self.fd_config.model_config.is_quantized:
if getattr(self.fd_config.quant_config, "is_permuted", True):
self.quant_method.process_prequanted_weights(self, state_dict)
self.quant_method.process_prequanted_weights(self, state_dict, is_rearrange)
else:
self.quant_method.process_loaded_weights(self, state_dict)
else:
@@ -569,7 +571,7 @@ class FusedMoE(nn.Layer):
else:
if self.fd_config.model_config.is_quantized:
if getattr(self.fd_config.quant_config, "is_permuted", True):
self.quant_method.process_prequanted_weights(self, state_dict)
self.quant_method.process_prequanted_weights(self, state_dict, is_rearrange)
else:
self.quant_method.create_weights(self, state_dict)
else: