diff --git a/fastdeploy/model_executor/layers/moe/fused_moe_backend_base.py b/fastdeploy/model_executor/layers/moe/fused_moe_backend_base.py index efe0ec1be..8c35fa83a 100644 --- a/fastdeploy/model_executor/layers/moe/fused_moe_backend_base.py +++ b/fastdeploy/model_executor/layers/moe/fused_moe_backend_base.py @@ -175,12 +175,13 @@ class MoEMethodBase(QuantMethodBase): Paddle Cutlass compute Fused MoE. """ if layer.ep_size > 1: - if layer.fd_config.model_config.moe_phase.phase == "prefill" and layer.layer_idx == 0: - if layer.fd_config.scheduler_config.splitwise_role == "mixed": + is_moe_start_layer = layer.layer_idx == layer.fd_config.model_config.moe_layer_start_index + if layer.fd_config.model_config.moe_phase.phase == "prefill": + if layer.fd_config.scheduler_config.splitwise_role == "mixed" and is_moe_start_layer: self.ep_prefill_runner.clean_low_latency_buffer() return self.apply_ep_prefill(layer, x, gate) else: - if layer.fd_config.scheduler_config.splitwise_role == "mixed" and layer.layer_idx == 0: + if layer.fd_config.scheduler_config.splitwise_role == "mixed" and is_moe_start_layer: self.ep_decoder_runner.clean_low_latency_buffer() return self.apply_ep_decode(layer, x, gate) else: