Merge vl execution path into normal execution path (#2829)

* merge vl model into gpu_model runner

Change-Id: I9f4691a3d5f135e8d72b1d58abcd15ef3aa3f2a6

* fix chinese

Change-Id: Ic7405109b984c21e076fb3b01ff6feb571d0119a

* fix the parse parameter

Change-Id: I4cd62ee87c06220af580d91e347145d4394917fe

* fix the bug in online_inference

Change-Id: Idb111bb2114e83017c4050b2a68cf039c6d3c559

* polish code

Change-Id: I7d4194102c2f1b0743b74fbd5fc284eb8ef4d17c
This commit is contained in:
Zero Rains
2025-07-15 22:20:03 +08:00
committed by GitHub
parent 5fc659b900
commit e7bcbbab52
9 changed files with 441 additions and 1732 deletions

View File

@@ -18,7 +18,7 @@ from __future__ import annotations
from dataclasses import dataclass, field
from enum import Enum
from typing import Literal, Optional, Union
from typing import Literal, Optional
from paddleformers.transformers.configuration_utils import PretrainedConfig
from paddleformers.trl import llm_utils
@@ -89,6 +89,7 @@ class ModelConfig:
self.max_model_len = 0
self.dtype = ""
self.enable_logprob = False
self.enable_mm = False
for key, value in args.items():
if hasattr(self, key):