【FIX】Change the name of sparse attn from moba to plas (#4006) (#4076)

* 【FIX】Change the name of sparse attn from moba to plas (#4006)

* 更新文档

* 【docs】 update readme (#4000)

* 更新文档

* update readme

* update docs

* 【FIX】Change the name of sparse attn from moba to plas (#3845)

* 更新文档

* 更新文档

* 更新文档

* 更新文档

* 修改moba为plas

* code style

* update ci

* code style

* update ci

* code style

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>

* fix max_num_seqs

* fix test load attn

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
yangjianfengo1
2025-09-23 10:26:40 +08:00
committed by GitHub
parent 2c34a557f4
commit 4325b737e7
14 changed files with 152 additions and 152 deletions

View File

@@ -65,7 +65,7 @@ class TestAttentionInitWeight(unittest.TestCase):
self.fd_config.parallel_config = self.parallel_config
self.fd_config.cache_config = self.cache_config
self.fd_config.quant_config = None
self.fd_config.moba_attention_config = None
self.fd_config.plas_attention_config = None
def test_init_weight_without_quantization(self):
"""Test init_weight without quantization."""
@@ -141,7 +141,7 @@ class TestAttentionWeightLoader(unittest.TestCase):
self.fd_config.model_config = self.model_config
self.fd_config.parallel_config = self.parallel_config
self.fd_config.cache_config = self.cache_config
self.fd_config.moba_attention_config = None
self.fd_config.plas_attention_config = None
# Create mock quant method
self.mock_quant_method = MockQuantMethod()