[Bug Fix] fix outdated doc and disable mm model prefix caching (#4425)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled

* fix outdated doc and disable mm model prefix caching

* fix outdated doc and disable mm model prefix caching

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
ApplEOFDiscord
2025-10-16 11:10:33 +08:00
committed by GitHub
parent adeee84dd6
commit 4178c110d2
3 changed files with 3 additions and 3 deletions

View File

@@ -92,7 +92,7 @@ from PIL import Image
from fastdeploy.entrypoints.llm import LLM from fastdeploy.entrypoints.llm import LLM
from fastdeploy.engine.sampling_params import SamplingParams from fastdeploy.engine.sampling_params import SamplingParams
from fastdeploy.input.ernie_tokenizer import Ernie4_5Tokenizer from fastdeploy.input.ernie4_5_tokenizer import Ernie4_5Tokenizer
PATH = "baidu/ERNIE-4.5-VL-28B-A3B-Paddle" PATH = "baidu/ERNIE-4.5-VL-28B-A3B-Paddle"
tokenizer = Ernie4_5Tokenizer.from_pretrained(PATH) tokenizer = Ernie4_5Tokenizer.from_pretrained(PATH)

View File

@@ -1257,6 +1257,8 @@ class FDConfig:
self.cache_config.postprocess(self.max_num_batched_tokens, self.max_num_seqs) self.cache_config.postprocess(self.max_num_batched_tokens, self.max_num_seqs)
self.cache_config.max_block_num_per_seq = int(self.max_model_len // self.cache_config.block_size) self.cache_config.max_block_num_per_seq = int(self.max_model_len // self.cache_config.block_size)
if self.model_config is not None and self.model_config.enable_mm:
self.cache_config.enable_prefix_caching = False
if self.guided_decoding_backend == "auto": if self.guided_decoding_backend == "auto":
if self.model_config.enable_mm: if self.model_config.enable_mm:

View File

@@ -394,8 +394,6 @@ class EngineArgs:
self.enable_prefix_caching = False self.enable_prefix_caching = False
if self.speculative_config is not None: if self.speculative_config is not None:
self.enable_prefix_caching = False self.enable_prefix_caching = False
if self.enable_mm:
self.enable_prefix_caching = False
if not current_platform.is_cuda(): if not current_platform.is_cuda():
self.enable_prefix_caching = False self.enable_prefix_caching = False
# if self.dynamic_load_weight: # if self.dynamic_load_weight: