[Feature] add ue8m0 for per_token_quant_fp8 (#5563)

* ue8m0

* add default arg

---------

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
This commit is contained in:
fxyfxy777
2025-12-16 18:40:12 +08:00
committed by GitHub
parent eeb99d2af5
commit 73e1d6aa90
4 changed files with 193 additions and 56 deletions

View File

@@ -284,13 +284,16 @@ std::vector<paddle::Tensor> EPMoeExpertDispatchFP8(
const int token_nums_this_rank_padded);
std::vector<paddle::Tensor> PerTokenQuant(paddle::Tensor& input,
const int block_size);
const int block_size,
const bool use_ue8m0);
std::vector<paddle::Tensor> PerTokenQuantPadding(paddle::Tensor& input,
const int block_size);
const int block_size,
const bool use_ue8m0);
std::vector<paddle::Tensor> MaskedPerTokenQuant(
paddle::Tensor& input,
paddle::Tensor& recv_expert_count,
const int block_size);
const int block_size,
const bool use_ue8m0);
std::vector<paddle::Tensor> EPMoeExpertCombine(
const paddle::Tensor& ffn_out,
@@ -1234,12 +1237,14 @@ PYBIND11_MODULE(fastdeploy_ops, m) {
&PerTokenQuant,
py::arg("input"),
py::arg("block_size"),
py::arg("use_ue8m0") = false,
"per token per block quant");
m.def("per_token_quant_padding",
&PerTokenQuantPadding,
py::arg("input"),
py::arg("block_size"),
py::arg("use_ue8m0") = false,
"per token per block quant and padding transpose scale");
m.def("masked_per_token_quant",
@@ -1247,6 +1252,7 @@ PYBIND11_MODULE(fastdeploy_ops, m) {
py::arg("input"),
py::arg("recv_expert_count"),
py::arg("block_size"),
py::arg("use_ue8m0") = false,
"per token per block quant");
#ifdef ENABLE_MACHETE

View File

@@ -16,6 +16,16 @@
constexpr float epsilon = 1e-10;
__device__ __forceinline__ float ceil_to_ue8m0(float s) {
int exp;
frexpf(s, &exp);
float pow2 = ldexpf(1.0f, exp - 1);
if (pow2 < s) {
pow2 = ldexpf(1.0f, exp);
}
return pow2;
}
template <typename T>
__global__ void quant_per_token_per_block(
const T *input,
@@ -24,7 +34,8 @@ __global__ void quant_per_token_per_block(
const int token_num,
const int hidden_size,
const int hidden_size_scale,
const bool use_finegrained_range) {
const bool use_finegrained_range,
const bool use_ue8m0) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
const int warp_id = tid / 32;
@@ -83,11 +94,14 @@ __global__ void quant_per_token_per_block(
}
float scale_to_store = max_value_thread / MAX_VALUE;
if (use_ue8m0) {
scale_to_store = ceil_to_ue8m0(scale_to_store);
}
// quant
#pragma unroll
for (int vid = 0; vid < NUM_PER_THREADS; vid++) {
res_vec[vid] = static_cast<phi::dtype::float8_e4m3fn>(
load_vec_float[vid] * MAX_VALUE / max_value_thread);
load_vec_float[vid] / scale_to_store);
}
// store
if (is_valid_data)
@@ -102,7 +116,8 @@ __global__ void quant_per_token_per_block(
}
std::vector<paddle::Tensor> PerTokenQuant(paddle::Tensor &input,
const int block_size) {
const int block_size,
const bool use_ue8m0) {
auto input_dim = input.dims();
const int token_num = input_dim[0];
const int hidden_size = input_dim[1];
@@ -132,7 +147,8 @@ std::vector<paddle::Tensor> PerTokenQuant(paddle::Tensor &input,
token_num,
hidden_size,
hidden_size_scale,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
case paddle::DataType::FLOAT16:
quant_per_token_per_block<<<gridx, blockx, 0, input.stream()>>>(
@@ -142,7 +158,8 @@ std::vector<paddle::Tensor> PerTokenQuant(paddle::Tensor &input,
token_num,
hidden_size,
hidden_size_scale,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
default:
PD_THROW("Unsupported data type for PerTokenQuant");
@@ -159,7 +176,8 @@ __global__ void quant_per_token_per_block_padding(
const int padded_token_num,
const int hidden_size,
const int hidden_size_scale,
const bool use_finegrained_range) {
const bool use_finegrained_range,
const bool use_ue8m0) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
const int warp_id = tid / 32;
@@ -209,11 +227,14 @@ __global__ void quant_per_token_per_block_padding(
}
float scale_to_store = max_value_thread / MAX_VALUE;
if (use_ue8m0) {
scale_to_store = ceil_to_ue8m0(scale_to_store);
}
// quant
#pragma unroll
for (int vid = 0; vid < NUM_PER_THREADS; vid++) {
res_vec[vid] = static_cast<phi::dtype::float8_e4m3fn>(
load_vec_float[vid] * MAX_VALUE / max_value_thread);
load_vec_float[vid] / scale_to_store);
}
// store
Store<phi::dtype::float8_e4m3fn, NUM_PER_THREADS>(
@@ -226,7 +247,8 @@ __global__ void quant_per_token_per_block_padding(
}
std::vector<paddle::Tensor> PerTokenQuantPadding(paddle::Tensor &input,
const int block_size) {
const int block_size,
const bool use_ue8m0) {
using ScaleDtype = float;
auto input_dim = input.dims();
@@ -269,7 +291,8 @@ std::vector<paddle::Tensor> PerTokenQuantPadding(paddle::Tensor &input,
padded_token_num,
hidden_size,
hidden_size_scale,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
case paddle::DataType::FLOAT16:
quant_per_token_per_block_padding<<<gridx, blockx, 0, input.stream()>>>(
@@ -280,7 +303,8 @@ std::vector<paddle::Tensor> PerTokenQuantPadding(paddle::Tensor &input,
padded_token_num,
hidden_size,
hidden_size_scale,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
default:
PD_THROW("Unsupported data type for PerTokenQuant");
@@ -320,7 +344,8 @@ __global__ void masked_quant_per_token_per_block(
const int hidden_size,
const int hidden_size_scale,
const int num_max_tokens_per_expert,
const bool use_finegrained_range) {
const bool use_finegrained_range,
const bool use_ue8m0) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
const int warp_id = tid / 32;
@@ -382,11 +407,14 @@ __global__ void masked_quant_per_token_per_block(
}
float scale_to_store = max_value_thread / MAX_VALUE;
if (use_ue8m0) {
scale_to_store = ceil_to_ue8m0(scale_to_store);
}
// quant
#pragma unroll
for (int vid = 0; vid < NUM_PER_THREADS; vid++) {
res_vec[vid] = static_cast<phi::dtype::float8_e4m3fn>(
load_vec_float[vid] * MAX_VALUE / max_value_thread);
load_vec_float[vid] / scale_to_store);
}
// store
Store<phi::dtype::float8_e4m3fn, NUM_PER_THREADS>(
@@ -401,7 +429,8 @@ __global__ void masked_quant_per_token_per_block(
std::vector<paddle::Tensor> MaskedPerTokenQuant(
paddle::Tensor &input,
paddle::Tensor &recv_expert_count,
const int block_size) {
const int block_size,
const bool use_ue8m0) {
auto input_dim = input.dims();
const int num_local_expert = input_dim[0];
const int num_max_tokens_per_expert = input_dim[1];
@@ -439,7 +468,8 @@ std::vector<paddle::Tensor> MaskedPerTokenQuant(
hidden_size,
hidden_size_scale,
num_max_tokens_per_expert,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
case paddle::DataType::FLOAT16:
masked_quant_per_token_per_block<<<gridx, blockx, 0, input.stream()>>>(
@@ -451,7 +481,8 @@ std::vector<paddle::Tensor> MaskedPerTokenQuant(
hidden_size,
hidden_size_scale,
num_max_tokens_per_expert,
use_finegrained_range);
use_finegrained_range,
use_ue8m0);
break;
default:
PD_THROW("Unsupported data type for PerTokenQuant");
@@ -462,13 +493,13 @@ std::vector<paddle::Tensor> MaskedPerTokenQuant(
PD_BUILD_STATIC_OP(per_token_quant)
.Inputs({"input"})
.Outputs({"output", "output_scale"})
.Attrs({"block_size: int"})
.Attrs({"block_size: int", "use_ue8m0: bool"})
.SetKernelFn(PD_KERNEL(PerTokenQuant));
PD_BUILD_STATIC_OP(per_token_quant_padding)
.Inputs({"input"})
.Outputs({"output", "output_scale"})
.Attrs({"block_size: int"})
.Attrs({"block_size: int", "use_ue8m0: bool"})
.SetKernelFn(PD_KERNEL(PerTokenQuantPadding))
.SetInferShapeFn(PD_INFER_SHAPE(PerTokenQuantPaddingInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(PerTokenQuantPaddingInferDtype));
@@ -476,5 +507,5 @@ PD_BUILD_STATIC_OP(per_token_quant_padding)
PD_BUILD_STATIC_OP(masked_per_token_quant)
.Inputs({"input", "recv_expert_count"})
.Outputs({"output", "output_scale"})
.Attrs({"block_size: int"})
.Attrs({"block_size: int", "use_ue8m0: bool"})
.SetKernelFn(PD_KERNEL(MaskedPerTokenQuant));