【Inference Optimize】MLA Tensor-Core is enabled by default (#4335)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled

This commit is contained in:
AIbin
2025-10-10 10:54:56 +08:00
committed by GitHub
parent 5f80862578
commit c4ebaf8a07
2 changed files with 2 additions and 2 deletions

View File

@@ -331,7 +331,7 @@ void GetBlockShapeAndSplitKVBlock(
// decoder
if (max_dec_len_this_time > 0) {
const bool mla_use_tensorcore = GetMlaUseTensorcore();
const bool mla_use_tensorcore = true; //GetMlaUseTensorcore();
if (mla_use_tensorcore && group_size <= 64) {
const int set_chunk_size = get_mla_dec_chunk_size(bsz);

View File

@@ -66,7 +66,7 @@ std::vector<paddle::Tensor> MultiHeadLatentAttentionKernel(
// int chunk_size = decoder_chunk_size_cpu.data<int>()[0];
//
const bool mla_use_tensorcore = get_mla_use_tensorcore();
const bool mla_use_tensorcore = true; //get_mla_use_tensorcore();
auto sm_version = GetSMVersion();
if ((speculate_decoder || mla_use_tensorcore) && sm_version < 90) {
PD_THROW("Please use speculate_decoder=0 and FLAGS_mla_use_tensorcore=0 when sm < 90.");