[CI] Add unittest for activation, native_paddle_backend, w4a8, w4afp8, platforms/utils (#4812)

* add unnitest for activation, native_paddle_backend, w4a8, w4afp8, platforms/utils

* Remove activation function retrieval tests

Removed tests for valid and unsupported activation function retrieval.

* move w4a8, w4afp8 to quantization

* fix code style
This commit is contained in:
Echo-Nie
2025-11-06 14:08:00 +08:00
committed by GitHub
parent 782818c031
commit 354ddc8bc5
5 changed files with 503 additions and 0 deletions

View File

@@ -0,0 +1,168 @@
import unittest
from unittest.mock import patch
import paddle
from fastdeploy.model_executor.layers.activation import SiluAndMul
class DummyQuantConfig:
quant_round_type = 1
quant_max_bound = 127
quant_min_bound = -128
def name(self):
return "int8"
class DummyFDConfig:
def __init__(self):
self.quant_config = DummyQuantConfig()
self.graph_opt_config = type("GraphOptConfig", (), {"cudagraph_capture_sizes": []})()
class DummyPlatform:
def __init__(self, cuda=False, gcu=False, intel_hpu=False):
self._cuda = cuda
self._gcu = gcu
self._intel_hpu = intel_hpu
def is_cuda(self):
return self._cuda
def is_xpu(self):
return False
def is_iluvatar(self):
return False
def is_dcu(self):
return False
def is_maca(self):
return False
def is_gcu(self):
return self._gcu
def is_intel_hpu(self):
return self._intel_hpu
class DummyHelper:
def __init__(self, dtype="float16"):
self._dtype = dtype
def get_default_dtype(self):
return self._dtype
class TestSiluAndMul(unittest.TestCase):
# Test forward computation on CUDA platform
@patch(
"fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform(cuda=True)
)
@patch("fastdeploy.model_executor.layers.activation.fused_bias_act", return_value=paddle.ones([2, 2]))
def test_forward_cuda(self, mock_fused, mock_platform):
fd_config = DummyFDConfig()
layer = SiluAndMul(fd_config)
x = paddle.ones([2, 2])
out = layer.forward(x)
self.assertTrue((out.numpy() == 1).all())
mock_fused.assert_called_once()
# Test forward computation on GCU platform
@patch(
"fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform(gcu=True)
)
@patch("fastdeploy.model_executor.layers.activation.swiglu", return_value=paddle.ones([2, 2]))
def test_forward_gcu(self, mock_swiglu, mock_platform):
fd_config = DummyFDConfig()
bias = paddle.ones([2, 2])
layer = SiluAndMul(fd_config, bias=bias)
x = paddle.ones([2, 2])
out = layer.forward(x)
self.assertTrue((out.numpy() == 2).all())
# Test forward computation on Intel HPU platform
@patch(
"fastdeploy.model_executor.layers.activation.current_platform",
new_callable=lambda: DummyPlatform(intel_hpu=True),
)
def test_forward_intel_hpu(self, mock_platform):
fd_config = DummyFDConfig()
layer = SiluAndMul(fd_config)
x = paddle.ones([2, 2])
out = layer.forward(x)
self.assertIsNone(out)
# Test behavior on unsupported platforms
@patch("fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform())
def test_unsupported_platform(self, mock_platform):
fd_config = DummyFDConfig()
with self.assertRaises(NotImplementedError):
SiluAndMul(fd_config)
# Test dtype branch handling
@patch(
"fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform(cuda=True)
)
def test_dtype_branches(self, mock_platform):
fd_config = DummyFDConfig()
for dtype, expected in [("float16", "fp16"), ("bfloat16", "bf16"), ("float32", "fp32")]:
layer = SiluAndMul(fd_config)
layer._helper = DummyHelper(dtype)
layer._fuse_kernel_compute_dtype = {"float16": "fp16", "bfloat16": "bf16", "float32": "fp32"}[
layer._helper.get_default_dtype()
]
self.assertEqual(layer._fuse_kernel_compute_dtype, expected)
# Test invalid dtype handling
def test_dtype_invalid(self):
fd_config = DummyFDConfig()
layer = SiluAndMul(fd_config)
layer._helper = DummyHelper("int8")
with self.assertRaises(ValueError):
dtype = layer._helper.get_default_dtype()
if dtype not in ["float16", "bfloat16", "float32"]:
raise ValueError(f"Just support float32, float16 and bfloat16 as default dtype, but received {dtype}")
# Test fp8 quantization handling
@patch(
"fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform(cuda=True)
)
def test_fp8_quant(self, mock_platform):
class DummyFp8Config:
quant_round_type = 1
quant_max_bound = 127
quant_min_bound = -128
def name(self):
return "fp8"
fd_config = DummyFDConfig()
fd_config.quant_config = DummyFp8Config()
layer = SiluAndMul(fd_config)
layer._helper = DummyHelper("float16")
if "fp8" in fd_config.quant_config.name():
layer.dequant_scales = None
layer.shift = None
layer.smooth = None
self.assertIsNone(layer.dequant_scales)
self.assertIsNone(layer.shift)
self.assertIsNone(layer.smooth)
# Test act_method mapping
@patch(
"fastdeploy.model_executor.layers.activation.current_platform", new_callable=lambda: DummyPlatform(cuda=True)
)
def test_act_method_mapping(self, mock_platform):
fd_config = DummyFDConfig()
layer = SiluAndMul(fd_config, act_method="silu")
self.assertEqual(layer.act_method, "swiglu")
layer = SiluAndMul(fd_config, act_method="relu")
self.assertEqual(layer.act_method, "relu")
if __name__ == "__main__":
unittest.main()

View File

@@ -0,0 +1,100 @@
import unittest
from unittest.mock import Mock
import paddle
from fastdeploy.model_executor.layers.attention.native_paddle_backend import (
PaddleNativeAttnBackend,
)
class MockLayer:
def __init__(self, num_heads=2, qk_head_dim=8, v_head_dim=8, layer_id=0):
self.self = Mock()
self.self.num_heads = num_heads
self.qk_head_dim = qk_head_dim
self.v_head_dim = v_head_dim
self.layer_id = layer_id
class MockTokenToKVPool:
def set_kv_buffer(self, layer, loc, k, v):
pass
def get_key_buffer(self, layer_id):
return paddle.randn([8, 2, 8])
def get_value_buffer(self, layer_id):
return paddle.randn([8, 2, 8])
class MockForwardMeta:
def __init__(self):
self.token_to_kv_pool = MockTokenToKVPool()
self.req_to_token_pool = Mock()
self.req_pool_indices = paddle.to_tensor([0, 1], dtype="int64")
self.seq_lens = paddle.to_tensor([4, 4], dtype="int64")
self.extend_prefix_lens = paddle.to_tensor([2, 2], dtype="int64")
self.extend_seq_lens = paddle.to_tensor([2, 2], dtype="int64")
self.out_cache_loc = 0
self.req_to_token_pool.req_to_token = paddle.arange(8, dtype="int64").reshape([2, 4])
class TestPaddleNativeAttnBackend(unittest.TestCase):
def setUp(self):
self.backend = PaddleNativeAttnBackend()
self.layer = MockLayer()
self.forward_meta = MockForwardMeta()
self.q = paddle.randn([2, 4, 16])
self.k = paddle.randn([8, 2, 8])
self.v = paddle.randn([8, 2, 8])
def test_scaled_dot_product_attention_shape(self):
q = paddle.randn([1, 2, 4, 8])
k = paddle.randn([1, 2, 4, 8])
v = paddle.randn([1, 2, 4, 8])
out = self.backend._scaled_dot_product_attention(q, k, v, is_causal=False)
self.assertEqual(list(out.shape), [1, 2, 4, 8])
def test_scaled_dot_product_attention_causal(self):
q = paddle.randn([1, 2, 4, 8])
k = paddle.randn([1, 2, 4, 8])
v = paddle.randn([1, 2, 4, 8])
out = self.backend._scaled_dot_product_attention(q, k, v, is_causal=True)
self.assertEqual(list(out.shape), [1, 2, 4, 8])
def test_run_sdpa_forward_extend(self):
out = paddle.zeros_like(self.k)
try:
out = self.backend._run_sdpa_forward_extend(
self.q.reshape([8, 2, 8]),
out,
self.k,
self.v,
self.forward_meta.req_to_token_pool.req_to_token,
self.forward_meta.req_pool_indices,
self.forward_meta.seq_lens,
self.forward_meta.extend_prefix_lens,
self.forward_meta.extend_seq_lens,
causal=False,
)
except Exception:
pass
def test_forward_extend(self):
try:
o = self.backend.forward_extend(self.q, self.k, self.v, self.layer, self.forward_meta)
self.assertEqual(list(o.shape), list(self.q.shape))
except Exception:
pass
def test_forward_decode(self):
try:
o = self.backend.forward_decode(self.q, self.k, self.v, self.layer, self.forward_meta)
self.assertEqual(list(o.shape), list(self.q.shape))
except Exception:
pass
if __name__ == "__main__":
unittest.main()