Files
FastDeploy/tests/xpu_ci/test_w4a8.py
Jiaxin Sui 8e0f4dfd0c [XPU] [CI] Xpu Ci Refactor (#5252)
* add xpu ci

* add case

* add case

* fix ci bug

* Update Docker image tag to 'latest' in CI workflow

* Fix set -e usage in run_xpu_ci_pytest.sh

* add pd case

* add case

* Configure pip to use Tsinghua mirror for dependencies

Set the global pip index URL to Tsinghua mirror.

* fix ci bug

* fix bug

* fix bug

---------

Co-authored-by: suijiaxin <suijiaxin@Suis-MacBook-Pro.local>
Co-authored-by: root <root@gajl-bbc-onlinec-com-1511964.gajl.baidu.com>
Co-authored-by: root <root@gajl-bbc-onlinec-com-1511972.gajl.baidu.com>
2025-12-02 17:15:51 +08:00

102 lines
2.8 KiB
Python

# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
W4A8模式测试 - ERNIE-4.5-300B W4A8C8量化模型
测试配置:
- 模型: ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle
- 量化: W4A8
- Tensor Parallel: 4
"""
import openai
import pytest
from conftest import get_model_path, get_port_num, print_logs_on_failure, start_server
def test_w4a8(xpu_env):
"""W4A8量化模式测试"""
print("\n============================开始W4A8测试!============================")
# 获取配置
port_num = get_port_num()
model_path = get_model_path()
# 构建服务器启动参数
server_args = [
"--model",
f"{model_path}/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle",
"--port",
str(port_num),
"--engine-worker-queue-port",
str(port_num + 1),
"--metrics-port",
str(port_num + 2),
"--cache-queue-port",
str(port_num + 47873),
"--tensor-parallel-size",
"4",
"--num-gpu-blocks-override",
"16384",
"--max-model-len",
"32768",
"--max-num-seqs",
"64",
"--quantization",
"W4A8",
]
# 启动服务器
if not start_server(server_args):
pytest.fail("W4A8模式服务启动失败")
# 执行测试
try:
ip = "0.0.0.0"
client = openai.Client(base_url=f"http://{ip}:{port_num}/v1", api_key="EMPTY_API_KEY")
# 非流式对话
response = client.chat.completions.create(
model="default",
messages=[
{"role": "user", "content": "你好,你是谁?"},
],
temperature=1,
top_p=0,
max_tokens=64,
stream=False,
)
print(f"\n模型回复: {response.choices[0].message.content}")
# 验证响应
assert any(
keyword in response.choices[0].message.content
for keyword in ["人工智能", "文心一言", "小度", "百度", "智能助手"]
), f"响应内容不符合预期: {response.choices[0].message.content}"
print("\nW4A8测试通过!")
except Exception as e:
print(f"\nW4A8测试失败: {str(e)}")
print_logs_on_failure()
pytest.fail(f"W4A8测试失败: {str(e)}")
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])