Merge vl execution path into normal execution path (#2829)

* merge vl model into gpu_model runner

Change-Id: I9f4691a3d5f135e8d72b1d58abcd15ef3aa3f2a6

* fix chinese

Change-Id: Ic7405109b984c21e076fb3b01ff6feb571d0119a

* fix the parse parameter

Change-Id: I4cd62ee87c06220af580d91e347145d4394917fe

* fix the bug in online_inference

Change-Id: Idb111bb2114e83017c4050b2a68cf039c6d3c559

* polish code

Change-Id: I7d4194102c2f1b0743b74fbd5fc284eb8ef4d17c
This commit is contained in:
Zero Rains
2025-07-15 22:20:03 +08:00
committed by GitHub
parent 5fc659b900
commit e7bcbbab52
9 changed files with 441 additions and 1732 deletions

View File

@@ -990,8 +990,6 @@ class LLMEngine(object):
pd_cmd = pd_cmd + f" --log_dir {log_dir}"
worker_path = "../worker/worker_process.py"
if self.cfg.enable_mm:
worker_path = "../worker/vl_worker_process.py"
py_script = os.path.join(current_dir_path, worker_path)
ori_vocab_size = (
@@ -1030,7 +1028,9 @@ class LLMEngine(object):
f" --speculative_benchmark_mode {self.cfg.speculative_config.benchmark_mode}"
f" --max_capture_batch_size {self.cfg.max_capture_batch_size}"
f" --guided_decoding_backend {self.cfg.guided_decoding_backend}"
f" --load_strategy {self.cfg.model_config.load_strategy}")
f" --load_strategy {self.cfg.model_config.load_strategy}"
f" --enable_mm {self.cfg.enable_mm}")
worker_append_flag = {
"enable_expert_parallel":