mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 16:48:03 +08:00
Sync v2.0 version of code to github repo
This commit is contained in:
@@ -19,14 +19,31 @@ from abc import ABC, abstractmethod
|
||||
import paddle
|
||||
from paddle import nn
|
||||
|
||||
from fastdeploy.config import LLMConfig, LoadConfig, ModelConfig
|
||||
from fastdeploy.config import FDConfig, LoadConfig, ModelConfig
|
||||
from fastdeploy.model_executor.models.ernie4_5_moe import \
|
||||
Ernie4_5_PretrainedModel
|
||||
from fastdeploy.model_executor.models.ernie4_5_mtp import \
|
||||
Ernie4_5_MTPPretrainedModel
|
||||
from fastdeploy.model_executor.models.model_base import ModelRegistry
|
||||
from fastdeploy.model_executor.models.qwen2 import Qwen2PretrainedModel
|
||||
from fastdeploy.model_executor.models.qwen3 import Qwen3PretrainedModel
|
||||
from fastdeploy.model_executor.models.qwen3moe import Qwen3MoePretrainedModel
|
||||
from fastdeploy.model_executor.models.utils import load_checkpoint
|
||||
|
||||
MODEL_CLASSES = {
|
||||
"Ernie4_5_MoeForCausalLM": Ernie4_5_PretrainedModel,
|
||||
"Ernie4_5_MTPForCausalLM": Ernie4_5_MTPPretrainedModel,
|
||||
"Qwen2ForCausalLM": Qwen2PretrainedModel,
|
||||
"Qwen3ForCausalLM": Qwen3PretrainedModel,
|
||||
"Qwen3MoeForCausalLM": Qwen3MoePretrainedModel,
|
||||
"Ernie4_5_ForCausalLM": Ernie4_5_PretrainedModel
|
||||
}
|
||||
|
||||
|
||||
# TODO(gongshaotian): implement real interface to replace this
|
||||
def get_model(llm_config: LLMConfig) -> nn.Layer:
|
||||
def get_model_from_loader(fd_config: FDConfig) -> nn.Layer:
|
||||
""" load or download model """
|
||||
model_path = llm_config.load_config.model_path
|
||||
model = paddle.load(model_path, return_numpy=True)
|
||||
model_loader = DefaultModelLoader(fd_config.load_config)
|
||||
model = model_loader.load_model(fd_config)
|
||||
return model
|
||||
|
||||
|
||||
@@ -42,7 +59,7 @@ class BaseModelLoader(ABC):
|
||||
raise NotImplementedError
|
||||
|
||||
@abstractmethod
|
||||
def load_model(self, llm_config: LLMConfig) -> nn.Layer:
|
||||
def load_model(self, fd_config: FDConfig) -> nn.Layer:
|
||||
""" Load a model with the given configurations."""
|
||||
raise NotImplementedError
|
||||
|
||||
@@ -56,5 +73,23 @@ class DefaultModelLoader(BaseModelLoader):
|
||||
def download_model(self, model_config: ModelConfig) -> None:
|
||||
pass
|
||||
|
||||
def load_model(self, llm_config: LLMConfig) -> nn.Layer:
|
||||
pass
|
||||
def load_model(self, fd_config: FDConfig) -> nn.Layer:
|
||||
context = paddle.LazyGuard()
|
||||
architectures = fd_config.model_config.architectures[0]
|
||||
|
||||
# TODO(gongshaotian): Now, only support safetensor
|
||||
|
||||
model_class = MODEL_CLASSES[architectures]
|
||||
state_dict = load_checkpoint(
|
||||
fd_config.parallel_config.model_name_or_path,
|
||||
model_class,
|
||||
fd_config.model_config,
|
||||
return_numpy=True)
|
||||
with context:
|
||||
model_cls = ModelRegistry.get_class(architectures)
|
||||
model = model_cls(fd_config)
|
||||
|
||||
model.eval()
|
||||
model.set_state_dict(state_dict)
|
||||
|
||||
return model
|
||||
|
Reference in New Issue
Block a user