mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
This reverts commit 73e1d6aa90.
This commit is contained in:
@@ -23,20 +23,7 @@ import paddle
|
||||
from fastdeploy.model_executor.ops.gpu import masked_per_token_quant
|
||||
|
||||
|
||||
def ceil_to_ue8m0_paddle(x: paddle.Tensor):
|
||||
"""
|
||||
x > 0
|
||||
return 2 ^ ceil(log2(x))
|
||||
"""
|
||||
# log2(x)
|
||||
log2_x = paddle.log(x) / paddle.log(paddle.to_tensor(2.0, dtype=x.dtype))
|
||||
# ceil
|
||||
ceil_log2_x = paddle.ceil(log2_x)
|
||||
# 2^k
|
||||
return paddle.pow(paddle.to_tensor(2.0, dtype=x.dtype), ceil_log2_x)
|
||||
|
||||
|
||||
def masked_per_token_quant_ref(input_tensor, recv_expert_count, block_size, use_ue8m0):
|
||||
def masked_per_token_quant_ref(input_tensor, recv_expert_count, block_size):
|
||||
"""
|
||||
Paddle API implementation of masked_per_token_quant
|
||||
|
||||
@@ -97,9 +84,6 @@ def masked_per_token_quant_ref(input_tensor, recv_expert_count, block_size, use_
|
||||
# Calculate scale
|
||||
scale = max_abs_val / MAX_VALUE
|
||||
|
||||
if use_ue8m0:
|
||||
scale = ceil_to_ue8m0_paddle(scale)
|
||||
|
||||
# Quantize
|
||||
quanted_value = reshaped_input / scale
|
||||
|
||||
@@ -136,11 +120,10 @@ class TestMaskedPerTokenQuant(unittest.TestCase):
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
)
|
||||
self.recv_expert_count = paddle.to_tensor([3, 2], dtype="int32")
|
||||
self.use_ue8m0 = True
|
||||
|
||||
# Get reference results from paddle implementation
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
self.input_tensor, self.recv_expert_count, self.block_size
|
||||
)
|
||||
|
||||
def _mask_invalid_tokens(self, quanted_x, quanted_scale, recv_expert_count):
|
||||
@@ -166,7 +149,7 @@ class TestMaskedPerTokenQuant(unittest.TestCase):
|
||||
def test_masked_per_token_quant_basic(self):
|
||||
"""Test basic functionality against CUDA kernel"""
|
||||
quanted_x_cuda, quanted_scale_cuda = masked_per_token_quant(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
self.input_tensor, self.recv_expert_count, self.block_size
|
||||
)
|
||||
|
||||
quanted_x_cuda_masked, quanted_scale_cuda_masked = self._mask_invalid_tokens(
|
||||
@@ -194,28 +177,6 @@ class TestMaskedPerTokenQuant(unittest.TestCase):
|
||||
self.assertLess(diff_val, 0.01, msg="Quantized values should be close")
|
||||
|
||||
|
||||
class TestMaskedPerTokenQuantWithUe8m0Case1(TestMaskedPerTokenQuant):
|
||||
"""Test with float16 input"""
|
||||
|
||||
def setUp(self) -> None:
|
||||
paddle.seed(2024)
|
||||
self.num_local_expert = 3
|
||||
self.num_max_tokens_per_expert = 6
|
||||
self.hidden_size = 512
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.float16
|
||||
self.use_ue8m0 = True
|
||||
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
)
|
||||
self.recv_expert_count = paddle.to_tensor([4, 2, 5], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
)
|
||||
|
||||
|
||||
class TestMaskedPerTokenQuantCase1(TestMaskedPerTokenQuant):
|
||||
"""Test with float16 input"""
|
||||
|
||||
@@ -226,7 +187,6 @@ class TestMaskedPerTokenQuantCase1(TestMaskedPerTokenQuant):
|
||||
self.hidden_size = 512
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.float16
|
||||
self.use_ue8m0 = False
|
||||
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
@@ -234,29 +194,7 @@ class TestMaskedPerTokenQuantCase1(TestMaskedPerTokenQuant):
|
||||
self.recv_expert_count = paddle.to_tensor([4, 2, 5], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
)
|
||||
|
||||
|
||||
class TestMaskedPerTokenQuantWithUe8m0Case2(TestMaskedPerTokenQuant):
|
||||
"""Test with different hidden size"""
|
||||
|
||||
def setUp(self) -> None:
|
||||
paddle.seed(2024)
|
||||
self.num_local_expert = 4
|
||||
self.num_max_tokens_per_expert = 8
|
||||
self.hidden_size = 384 # 3 * 128
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.bfloat16
|
||||
self.use_ue8m0 = True
|
||||
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
)
|
||||
self.recv_expert_count = paddle.to_tensor([6, 3, 7, 1], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
self.input_tensor, self.recv_expert_count, self.block_size
|
||||
)
|
||||
|
||||
|
||||
@@ -270,7 +208,6 @@ class TestMaskedPerTokenQuantCase2(TestMaskedPerTokenQuant):
|
||||
self.hidden_size = 384 # 3 * 128
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.bfloat16
|
||||
self.use_ue8m0 = False
|
||||
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
@@ -278,29 +215,7 @@ class TestMaskedPerTokenQuantCase2(TestMaskedPerTokenQuant):
|
||||
self.recv_expert_count = paddle.to_tensor([6, 3, 7, 1], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
)
|
||||
|
||||
|
||||
class TestMaskedPerTokenQuantWithUe8m0Case3(TestMaskedPerTokenQuant):
|
||||
"""Test with all experts having max tokens"""
|
||||
|
||||
def setUp(self) -> None:
|
||||
paddle.seed(2024)
|
||||
self.num_local_expert = 2
|
||||
self.num_max_tokens_per_expert = 4
|
||||
self.hidden_size = 256
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.bfloat16
|
||||
self.use_ue8m0 = True
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
)
|
||||
# All experts use all tokens
|
||||
self.recv_expert_count = paddle.to_tensor([4, 4], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
self.input_tensor, self.recv_expert_count, self.block_size
|
||||
)
|
||||
|
||||
|
||||
@@ -314,7 +229,7 @@ class TestMaskedPerTokenQuantCase3(TestMaskedPerTokenQuant):
|
||||
self.hidden_size = 256
|
||||
self.block_size = 128
|
||||
self.dtype = paddle.bfloat16
|
||||
self.use_ue8m0 = True
|
||||
|
||||
self.input_tensor = paddle.randn(
|
||||
[self.num_local_expert, self.num_max_tokens_per_expert, self.hidden_size], dtype=self.dtype
|
||||
)
|
||||
@@ -322,7 +237,7 @@ class TestMaskedPerTokenQuantCase3(TestMaskedPerTokenQuant):
|
||||
self.recv_expert_count = paddle.to_tensor([4, 4], dtype="int32")
|
||||
|
||||
self.quanted_x_ref, self.quanted_scale_ref = masked_per_token_quant_ref(
|
||||
self.input_tensor, self.recv_expert_count, self.block_size, self.use_ue8m0
|
||||
self.input_tensor, self.recv_expert_count, self.block_size
|
||||
)
|
||||
|
||||
|
||||
@@ -335,7 +250,7 @@ class TestMaskedPerTokenQuantEdgeCases(unittest.TestCase):
|
||||
input_tensor = paddle.randn([2, 4, 256], dtype="bfloat16")
|
||||
recv_expert_count = paddle.to_tensor([0, 2], dtype="int32") # First expert has no tokens
|
||||
|
||||
quanted_x_ref, quanted_scale_ref = masked_per_token_quant_ref(input_tensor, recv_expert_count, 128, False)
|
||||
quanted_x_ref, quanted_scale_ref = masked_per_token_quant_ref(input_tensor, recv_expert_count, 128)
|
||||
|
||||
# First expert should be all zeros - convert to float32 for comparison
|
||||
expert_0_quanted = quanted_x_ref[0].astype("float32")
|
||||
|
||||
@@ -25,20 +25,7 @@ from fastdeploy.model_executor.ops.gpu import per_token_quant, per_token_quant_p
|
||||
paddle.seed(2024)
|
||||
|
||||
|
||||
def ceil_to_ue8m0_paddle(x: paddle.Tensor):
|
||||
"""
|
||||
x > 0
|
||||
return 2 ^ ceil(log2(x))
|
||||
"""
|
||||
# log2(x)
|
||||
log2_x = paddle.log(x) / paddle.log(paddle.to_tensor(2.0, dtype=x.dtype))
|
||||
# ceil
|
||||
ceil_log2_x = paddle.ceil(log2_x)
|
||||
# 2^k
|
||||
return paddle.pow(paddle.to_tensor(2.0, dtype=x.dtype), ceil_log2_x)
|
||||
|
||||
|
||||
def per_token_quant_paddle(input_tensor, block_size, use_ue8m0: bool = False):
|
||||
def per_token_quant_paddle(input_tensor, block_size):
|
||||
MAX_VALUE = 448.0
|
||||
epsilon = 1e-10
|
||||
|
||||
@@ -46,6 +33,7 @@ def per_token_quant_paddle(input_tensor, block_size, use_ue8m0: bool = False):
|
||||
token_num = input_shape[0]
|
||||
hidden_size = input_shape[1]
|
||||
|
||||
# According to https://github.com/PaddlePaddle/FastDeploy/pull/3659
|
||||
padding_size = (block_size - hidden_size % block_size) % block_size
|
||||
|
||||
padded_input = input_tensor
|
||||
@@ -60,8 +48,6 @@ def per_token_quant_paddle(input_tensor, block_size, use_ue8m0: bool = False):
|
||||
max_abs_val = paddle.max(paddle.abs(reshaped_input), axis=-1, keepdim=True)
|
||||
max_abs_val = paddle.clip(max_abs_val, min=epsilon)
|
||||
scale = max_abs_val / MAX_VALUE
|
||||
if use_ue8m0:
|
||||
scale = ceil_to_ue8m0_paddle(scale)
|
||||
|
||||
quanted_value = reshaped_input / scale
|
||||
|
||||
@@ -75,8 +61,8 @@ def per_token_quant_paddle(input_tensor, block_size, use_ue8m0: bool = False):
|
||||
return quanted_x, quanted_scale
|
||||
|
||||
|
||||
def per_token_quant_padding_paddle(input_tensor, block_size, dtype, use_ue8m0):
|
||||
quanted_x, intermediate_scale = per_token_quant_paddle(input_tensor, block_size, use_ue8m0)
|
||||
def per_token_quant_padding_paddle(input_tensor, block_size, dtype):
|
||||
quanted_x, intermediate_scale = per_token_quant_paddle(input_tensor, block_size)
|
||||
token_num = input_tensor.shape[0]
|
||||
|
||||
tma_alignment_elements = 4
|
||||
@@ -102,16 +88,16 @@ class TestPerTokenQuant(unittest.TestCase):
|
||||
self.input_tensor = self.get_input(shape=[self.token_num, self.hidden_size], dtype=self.dtype)
|
||||
|
||||
def test_per_token_quant(self):
|
||||
for use_ue8m0 in [False, True]:
|
||||
paddle_output, paddle_output_scale = per_token_quant_paddle(self.input_tensor, self.block_size, use_ue8m0)
|
||||
output, output_scale = per_token_quant(self.input_tensor, self.block_size, use_ue8m0)
|
||||
paddle_output, paddle_output_scale = per_token_quant_paddle(self.input_tensor, self.block_size)
|
||||
output, output_scale = per_token_quant(self.input_tensor, self.block_size)
|
||||
|
||||
np.testing.assert_allclose(paddle_output_scale.numpy(), output_scale.numpy(), rtol=1e-6)
|
||||
np.testing.assert_allclose(paddle_output_scale.numpy(), output_scale.numpy(), rtol=1e-6)
|
||||
|
||||
output_rel_diff = paddle.mean(
|
||||
paddle.abs(output.to(paddle.float32) - paddle_output.to(paddle.float32))
|
||||
) / paddle.mean(paddle.abs(paddle_output.to(paddle.float32)))
|
||||
assert output_rel_diff < 0.001
|
||||
output_rel_diff = paddle.mean(
|
||||
paddle.abs(output.to(paddle.float32) - paddle_output.to(paddle.float32))
|
||||
) / paddle.mean(paddle.abs(paddle_output.to(paddle.float32)))
|
||||
|
||||
assert output_rel_diff < 0.001
|
||||
|
||||
|
||||
class TestPerTokenQuantCase1(TestPerTokenQuant):
|
||||
@@ -150,25 +136,24 @@ class TestPerTokenQuantPadding(TestPerTokenQuant):
|
||||
self.input_tensor = self.get_input(shape=[self.token_num, self.hidden_size], dtype=self.dtype)
|
||||
|
||||
def test_per_token_quant_padding(self):
|
||||
for use_ue8m0 in [False, True]:
|
||||
paddle_output, paddle_output_scale = per_token_quant_padding_paddle(
|
||||
self.input_tensor, self.block_size, self.dtype, use_ue8m0
|
||||
)
|
||||
output, output_scale = per_token_quant_padding(self.input_tensor, self.block_size, use_ue8m0)
|
||||
paddle_output, paddle_output_scale = per_token_quant_padding_paddle(
|
||||
self.input_tensor, self.block_size, self.dtype
|
||||
)
|
||||
output, output_scale = per_token_quant_padding(self.input_tensor, self.block_size)
|
||||
|
||||
self.assertEqual(paddle_output_scale.shape, output_scale.shape)
|
||||
np.testing.assert_allclose(
|
||||
paddle_output_scale[0 : self.token_num].numpy(),
|
||||
output_scale[0 : self.token_num].numpy(),
|
||||
rtol=1e-5,
|
||||
atol=1e-5,
|
||||
)
|
||||
self.assertEqual(paddle_output_scale.shape, output_scale.shape)
|
||||
np.testing.assert_allclose(
|
||||
paddle_output_scale[0 : self.token_num].numpy(),
|
||||
output_scale[0 : self.token_num].numpy(),
|
||||
rtol=1e-5,
|
||||
atol=1e-5,
|
||||
)
|
||||
|
||||
output_rel_diff = paddle.mean(
|
||||
paddle.abs(output.to(paddle.float32) - paddle_output.to(paddle.float32))
|
||||
) / paddle.mean(paddle.abs(paddle_output.to(paddle.float32)) + 1e-9)
|
||||
output_rel_diff = paddle.mean(
|
||||
paddle.abs(output.to(paddle.float32) - paddle_output.to(paddle.float32))
|
||||
) / paddle.mean(paddle.abs(paddle_output.to(paddle.float32)) + 1e-9)
|
||||
|
||||
assert output_rel_diff < 0.001
|
||||
assert output_rel_diff < 0.001
|
||||
|
||||
|
||||
class TestPerTokenQuantPaddingCase1(TestPerTokenQuantPadding):
|
||||
|
||||
Reference in New Issue
Block a user