mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
54 lines
2.1 KiB
C++
54 lines
2.1 KiB
C++
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
#pragma once
|
|
|
|
#include "cutlass_kernels/moe_gemm/fused_moe_gemm_kernels.h"
|
|
#include "fused_moe_op.h"
|
|
|
|
using namespace phi;
|
|
|
|
template <typename T, int VecSize>
|
|
__global__ void moe_token_type_ids_kernel(T *gating_output,
|
|
const int *moe_token_type_ids_out,
|
|
const int num_rows,
|
|
const int num_experts,
|
|
const int k) {
|
|
const int moe_token_index = blockIdx.x * blockDim.x + threadIdx.x;
|
|
|
|
if (moe_token_index >= num_rows) {
|
|
return;
|
|
}
|
|
|
|
gating_output[moe_token_index * 2] =
|
|
gating_output[moe_token_index * 2] +
|
|
(moe_token_type_ids_out[moe_token_index]) * -1e10;
|
|
gating_output[moe_token_index * 2 + 1] =
|
|
gating_output[moe_token_index * 2 + 1] +
|
|
(1 - moe_token_type_ids_out[moe_token_index]) * -1e10;
|
|
}
|
|
|
|
template <typename T>
|
|
void moe_token_type_ids_kernelLauncher(T *gating_output,
|
|
const int *moe_token_type_ids_out,
|
|
const int num_rows,
|
|
const int num_experts,
|
|
const int k,
|
|
cudaStream_t stream) {
|
|
const int blocks = num_rows * k / 512 + 1;
|
|
const int threads = 512;
|
|
moe_token_type_ids_kernel<T, 1><<<blocks, 512, 0, stream>>>(
|
|
gating_output, moe_token_type_ids_out, num_rows, num_experts, k);
|
|
}
|