[Metax] support default_v1 loader & thinking model (#4956)

Co-authored-by: plusNew001 <95567040+plusNew001@users.noreply.github.com>
This commit is contained in:
MingkunZhang
2025-11-12 16:32:26 +08:00
committed by GitHub
parent bde6e2f931
commit 9d9f5df8d0
4 changed files with 23 additions and 5 deletions

View File

@@ -363,8 +363,13 @@ def v1_loader_support(fd_config):
def _err_msg(msg: str) -> str:
logger.info(msg + "; fallback to the v0 loader for model loading.")
if not (current_platform.is_cuda() or current_platform.is_xpu() or current_platform.is_iluvatar()):
_err_msg("v1loader currently only support backends gpu, xpu and iluvatar")
if not (
current_platform.is_cuda()
or current_platform.is_xpu()
or current_platform.is_iluvatar()
or current_platform.is_maca()
):
_err_msg("v1loader currently only support backends gpu, xpu, iluvatar and maca")
return False
if is_pre_sliced_weight(fd_config.model_config.model):