mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
[BugFix] Fix chunked prefill (#3759)
* add error traceback info * update error msg * update code * default enable chunked prefill * update code * update code * add envs * update code * update enable chunked_prefill * update code * update code * update code * update code * update code --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
@@ -1233,23 +1233,14 @@ class FDConfig:
|
||||
|
||||
self.paddle_commit_id = paddle.version.commit
|
||||
|
||||
if self.cache_config.enable_chunked_prefill:
|
||||
self.force_chunked_prefill = int(envs.FD_FORCE_CHUNKED_PREFILL)
|
||||
if (
|
||||
self.speculative_config is not None
|
||||
and self.speculative_config.method in ["mtp"]
|
||||
and not self.force_chunked_prefill
|
||||
):
|
||||
self.cache_config.enable_chunked_prefill = False
|
||||
|
||||
if self.max_num_batched_tokens is None:
|
||||
if self.cache_config.enable_chunked_prefill:
|
||||
self.max_num_batched_tokens = 2048
|
||||
if int(envs.ENABLE_V1_KVCACHE_SCHEDULER):
|
||||
self.max_num_batched_tokens = 8192 # if set to max_model_len, it's easy to be OOM
|
||||
else:
|
||||
if not int(os.getenv("ENABLE_V1_KVCACHE_SCHEDULER", "0")):
|
||||
self.max_num_batched_tokens = self.max_model_len
|
||||
if self.cache_config.enable_chunked_prefill:
|
||||
self.max_num_batched_tokens = 2048
|
||||
else:
|
||||
self.max_num_batched_tokens = 8192 # if set to max_model_len, it's easy to be OOM
|
||||
self.max_num_batched_tokens = self.max_model_len
|
||||
|
||||
if self.long_prefill_token_threshold == 0:
|
||||
self.long_prefill_token_threshold = int(self.max_model_len * 0.04)
|
||||
|
Reference in New Issue
Block a user