Files
FastDeploy/tests/model_loader/test_w4a8_model.py
Zhang Yulong 349aa6348b
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled
Publish Job / Run Stable Tests (push) Has been cancelled
CI Images Build / FD-Clone-Linux (push) Has been cancelled
CI Images Build / Show Code Archive Output (push) Has been cancelled
CI Images Build / CI Images Build (push) Has been cancelled
CI Images Build / BUILD_SM8090 (push) Has been cancelled
CI Images Build / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
CI Images Build / Run FastDeploy LogProb Tests (push) Has been cancelled
CI Images Build / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
CI Images Build / Run Base Tests (push) Has been cancelled
CI Images Build / Run Accuracy Tests (push) Has been cancelled
CI Images Build / Run Stable Tests (push) Has been cancelled
CI Images Build / Publish Docker Images Pre Check (push) Has been cancelled
add cache queue port (#3904)
* add cache queue port

* add cache queue port

* add cache queue port
2025-09-05 21:17:06 +08:00

72 lines
2.1 KiB
Python

"""
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
import os
import weakref
import pytest
from fastdeploy.entrypoints.llm import LLM
bash_path = os.getenv("MODEL_PATH")
FD_ENGINE_QUEUE_PORTS = [
[9961, 9962],
[9971, 9972],
[9981, 9982],
[9991, 9992],
]
FD_CACHE_QUEUE_PORT = int(os.getenv("FD_CACHE_QUEUE_PORT", 8333))
models = [
"ernie-4_5-fake-w4a8-unpermuted",
"ernie-4_5-fake-w4a8-permuted",
"ernie-4_5-fake-w4afp8-unpermuted",
"ernie-4_5-fake-w4afp8-permuted",
]
prompts = ["解释下“温故而知新"]
@pytest.fixture(scope="module", params=models)
def llm(request):
"""LLM测试夹具"""
model_path = os.path.join(bash_path, request.param)
try:
port_index = models.index(request.param) % len(FD_ENGINE_QUEUE_PORTS)
llm_instance = LLM(
model=model_path,
tensor_parallel_size=1,
data_parallel_size=2,
max_model_len=8192,
num_gpu_blocks_override=1024,
engine_worker_queue_port=FD_ENGINE_QUEUE_PORTS[port_index],
cache_queue_port=FD_CACHE_QUEUE_PORT,
load_choices="default",
enable_expert_parallel=True,
)
yield weakref.proxy(llm_instance)
except Exception as e:
assert False, f"LLM initialization failed: {e}"
@pytest.mark.timeout(60)
def test_generation(llm):
print(f"testing generation with model: {llm}")
# topp_params = SamplingParams(temperature=0.1, top_p=0, max_tokens=20)
# output = llm.generate(prompts=prompts, sampling_params=topp_params)
# print(output)