mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[BugFix] fix mm cudagraph (#5266)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
* fix mm cudagraph * fix test_prompt_ids bug * update code * update ci code * update ci code * update ci code
This commit is contained in:
2
.github/workflows/_base_test.yml
vendored
2
.github/workflows/_base_test.yml
vendored
@@ -209,7 +209,7 @@ jobs:
|
||||
export TEMPLATE=TOKEN_NORMAL
|
||||
curl -X POST http://0.0.0.0:${FLASK_PORT}/switch \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-VL-28B-A3B-Thinking\", \"--reasoning-parser\": \"ernie-45-vl-thinking\", \"--tool-call-parser\": \"ernie-45-vl-thinking\", \"--tensor-parallel-size\": 1, \"--quantization\": \"wint4\", \"--max-model-len\": 131072, \"--max-num-seqs\": 32}"
|
||||
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-VL-28B-A3B-Thinking\", \"--reasoning-parser\": \"ernie-45-vl-thinking\", \"--tool-call-parser\": \"ernie-45-vl-thinking\", \"--tensor-parallel-size\": 1, \"--quantization\": \"wint4\", \"--max-model-len\": 131072, \"--max-num-seqs\": 32, \"--no-enable-prefix-caching\": true}"
|
||||
check_service 90
|
||||
python -m pytest -sv test_prompt_ids.py || TEST_EXIT_CODE=1
|
||||
|
||||
|
||||
@@ -1747,9 +1747,6 @@ class FDConfig:
|
||||
else:
|
||||
# It will hang when real batch_size < tp_size
|
||||
self.graph_opt_config.filter_capture_size(tp_size=self.parallel_config.tensor_parallel_size)
|
||||
if self.model_config.enable_mm and self.graph_opt_config.use_cudagraph:
|
||||
self.cache_config.enable_prefix_caching = False
|
||||
logger.info("Multi-modal models do not support prefix caching when using CUDAGraph!")
|
||||
|
||||
if self.scheduler_config.splitwise_role == "mixed":
|
||||
self._disable_sequence_parallel_moe_if_needed("Mixed")
|
||||
|
||||
@@ -89,7 +89,7 @@ def build_command(config):
|
||||
|
||||
# 添加配置参数
|
||||
for key, value in config.items():
|
||||
if "--enable" in key:
|
||||
if "--enable" in key or "--no-enable" in key:
|
||||
value = bool(value if isinstance(value, bool) else eval(value))
|
||||
if value:
|
||||
cmd.append(key)
|
||||
|
||||
Reference in New Issue
Block a user