mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 00:57:33 +08:00
Update CI (#3474)
* update CI cases * update CI cases * update CI cases * update CI cases * Merge upstream/develop and resolve directory rename conflict * Merge upstream/develop and resolve directory rename conflict * Merge upstream/develop and resolve directory rename conflict * update deploy * update deploy * update deploy * update deploy * update deploy
This commit is contained in:
1
.github/workflows/_accuracy_test.yml
vendored
1
.github/workflows/_accuracy_test.yml
vendored
@@ -132,6 +132,7 @@ jobs:
|
|||||||
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
||||||
-e TZ="Asia/Shanghai" \
|
-e TZ="Asia/Shanghai" \
|
||||||
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
|
|
||||||
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
25
.github/workflows/_base_test.yml
vendored
25
.github/workflows/_base_test.yml
vendored
@@ -132,6 +132,7 @@ jobs:
|
|||||||
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
||||||
-e TZ="Asia/Shanghai" \
|
-e TZ="Asia/Shanghai" \
|
||||||
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
|
|
||||||
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
@@ -155,7 +156,19 @@ jobs:
|
|||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-0.3B-Paddle\"}"
|
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-0.3B-Paddle\"}"
|
||||||
|
|
||||||
curl -X POST http://localhost:${FLASK_PORT}/wait_for_infer?timeout=90
|
check_service() {
|
||||||
|
local timeout=${1:-90}
|
||||||
|
local url="http://localhost:${FLASK_PORT}/wait_for_infer?timeout=${timeout}"
|
||||||
|
local resp
|
||||||
|
|
||||||
|
resp=$(curl -s -X POST "$url")
|
||||||
|
|
||||||
|
if echo "$resp" | grep -q "服务启动超时"; then
|
||||||
|
exit 8
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_service 90
|
||||||
popd
|
popd
|
||||||
|
|
||||||
pushd tests/ce/server
|
pushd tests/ce/server
|
||||||
@@ -166,8 +179,16 @@ jobs:
|
|||||||
curl -X POST http://0.0.0.0:${FLASK_PORT}/switch \
|
curl -X POST http://0.0.0.0:${FLASK_PORT}/switch \
|
||||||
-H "Content-Type: application/json" \
|
-H "Content-Type: application/json" \
|
||||||
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-0.3B-Paddle\", \"--early-stop-config\": \"{\\\"enable_early_stop\\\":true, \\\"window_size\\\":6, \\\"threshold\\\":0.93}\"}"
|
-d "{\"--model\": \"/MODELDATA/ERNIE-4.5-0.3B-Paddle\", \"--early-stop-config\": \"{\\\"enable_early_stop\\\":true, \\\"window_size\\\":6, \\\"threshold\\\":0.93}\"}"
|
||||||
curl -X POST http://localhost:${FLASK_PORT}/wait_for_infer?timeout=90
|
check_service 90
|
||||||
python -m pytest -sv test_repetition_early_stop.py || TEST_EXIT_CODE=1
|
python -m pytest -sv test_repetition_early_stop.py || TEST_EXIT_CODE=1
|
||||||
|
|
||||||
|
curl -X POST http://0.0.0.0:${FLASK_PORT}/switch \
|
||||||
|
-H "Content-Type: application/json" \
|
||||||
|
-d "{\"--model\": \"/MODELDATA/ernie-4_5-21b-a3b-bf16-paddle\", \"--config\": \"21b_mtp.yaml\", \"--enable-logprob\": \"False\"}"
|
||||||
|
check_service 180
|
||||||
|
export TEMPLATE=TOKEN_NORMAL
|
||||||
|
python -m pytest -sv test_seed_usage.py -k "not test_seed_stream" || TEST_EXIT_CODE=1
|
||||||
|
|
||||||
popd
|
popd
|
||||||
echo "TEST_EXIT_CODE=${TEST_EXIT_CODE}" >> /workspace/FastDeploy/exit_code.env
|
echo "TEST_EXIT_CODE=${TEST_EXIT_CODE}" >> /workspace/FastDeploy/exit_code.env
|
||||||
'
|
'
|
||||||
|
1
.github/workflows/_build_linux.yml
vendored
1
.github/workflows/_build_linux.yml
vendored
@@ -148,6 +148,7 @@ jobs:
|
|||||||
elif [[ "${PADDLEVERSION}" != "" ]];then
|
elif [[ "${PADDLEVERSION}" != "" ]];then
|
||||||
python -m pip install paddlepaddle-gpu==${PADDLEVERSION} -i https://www.paddlepaddle.org.cn/packages/stable/cu126/
|
python -m pip install paddlepaddle-gpu==${PADDLEVERSION} -i https://www.paddlepaddle.org.cn/packages/stable/cu126/
|
||||||
else
|
else
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
1
.github/workflows/_logprob_test_linux.yml
vendored
1
.github/workflows/_logprob_test_linux.yml
vendored
@@ -122,6 +122,7 @@ jobs:
|
|||||||
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
||||||
-e TZ="Asia/Shanghai" \
|
-e TZ="Asia/Shanghai" \
|
||||||
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
|
|
||||||
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
1
.github/workflows/_pre_ce_test.yml
vendored
1
.github/workflows/_pre_ce_test.yml
vendored
@@ -129,6 +129,7 @@ jobs:
|
|||||||
--gpus "\"device=${DEVICES}\"" ${docker_image} /bin/bash -c '
|
--gpus "\"device=${DEVICES}\"" ${docker_image} /bin/bash -c '
|
||||||
git config --global --add safe.directory /workspace/FastDeploy
|
git config --global --add safe.directory /workspace/FastDeploy
|
||||||
cd FastDeploy
|
cd FastDeploy
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install ${fd_wheel_url}
|
python -m pip install ${fd_wheel_url}
|
||||||
bash scripts/run_pre_ce.sh
|
bash scripts/run_pre_ce.sh
|
||||||
|
1
.github/workflows/_stable_test.yml
vendored
1
.github/workflows/_stable_test.yml
vendored
@@ -138,6 +138,7 @@ jobs:
|
|||||||
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
-v "${CACHE_DIR}/ConfigDir:/root/.config" \
|
||||||
-e TZ="Asia/Shanghai" \
|
-e TZ="Asia/Shanghai" \
|
||||||
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
--gpus '"device='"${DEVICES}"'"' ${docker_image} /bin/bash -xc '
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
|
|
||||||
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
1
.github/workflows/_unit_test_coverage.yml
vendored
1
.github/workflows/_unit_test_coverage.yml
vendored
@@ -144,6 +144,7 @@ jobs:
|
|||||||
|
|
||||||
git config --global --add safe.directory /workspace/FastDeploy
|
git config --global --add safe.directory /workspace/FastDeploy
|
||||||
cd FastDeploy
|
cd FastDeploy
|
||||||
|
#python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
|
|
||||||
pip config set global.extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
pip config set global.extra-index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
|
@@ -17,6 +17,7 @@ pwd
|
|||||||
|
|
||||||
git config --global --add safe.directory /workspace1/FastDeploy
|
git config --global --add safe.directory /workspace1/FastDeploy
|
||||||
|
|
||||||
|
#python -m pip install --force-reinstall --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install --force-reinstall paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install --force-reinstall paddlepaddle-gpu==3.0.0.dev20250818 -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install --upgrade --force-reinstall -r requirements/unittest/requirements.txt
|
python -m pip install --upgrade --force-reinstall -r requirements/unittest/requirements.txt
|
||||||
bash tools/build_wheel.sh
|
bash tools/build_wheel.sh
|
||||||
|
8
tests/ce/deploy/21b_mtp.yaml
Normal file
8
tests/ce/deploy/21b_mtp.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
max_model_len: 32768
|
||||||
|
max_num_seqs: 128
|
||||||
|
tensor_parallel_size: 1
|
||||||
|
quantization: wint4
|
||||||
|
speculative_config:
|
||||||
|
method: mtp
|
||||||
|
num_speculative_tokens: 1
|
||||||
|
model: /MODELDATA/ernie-4_5-21b-a3b-bf16-paddle/mtp/
|
@@ -79,6 +79,7 @@ def build_command(config):
|
|||||||
# 添加配置参数
|
# 添加配置参数
|
||||||
for key, value in config.items():
|
for key, value in config.items():
|
||||||
if "--enable" in key:
|
if "--enable" in key:
|
||||||
|
value = bool(value if isinstance(value, bool) else eval(value))
|
||||||
if value:
|
if value:
|
||||||
cmd.append(key)
|
cmd.append(key)
|
||||||
else:
|
else:
|
||||||
@@ -189,6 +190,8 @@ def stop_server(signum=None, frame=None):
|
|||||||
# 若log目录存在,则重命名为log_timestamp
|
# 若log目录存在,则重命名为log_timestamp
|
||||||
if os.path.isdir("./log"):
|
if os.path.isdir("./log"):
|
||||||
os.rename("./log", "./log_{}".format(time.strftime("%Y%m%d%H%M%S")))
|
os.rename("./log", "./log_{}".format(time.strftime("%Y%m%d%H%M%S")))
|
||||||
|
if os.path.exists("gemm_profiles.json"):
|
||||||
|
os.remove("gemm_profiles.json")
|
||||||
|
|
||||||
if signum:
|
if signum:
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
@@ -18,8 +18,18 @@ TOKEN_LOGPROB = {
|
|||||||
"max_tokens": 10000,
|
"max_tokens": 10000,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TOKEN_NORMAL = {
|
||||||
|
"model": "default",
|
||||||
|
"temperature": 0,
|
||||||
|
"top_p": 0,
|
||||||
|
"seed": 33,
|
||||||
|
"stream": True,
|
||||||
|
"max_tokens": 10000,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
TEMPLATES = {
|
TEMPLATES = {
|
||||||
"TOKEN_LOGPROB": TOKEN_LOGPROB,
|
"TOKEN_LOGPROB": TOKEN_LOGPROB,
|
||||||
|
"TOKEN_NORMAL": TOKEN_NORMAL,
|
||||||
# "ANOTHER_TEMPLATE": ANOTHER_TEMPLATE
|
# "ANOTHER_TEMPLATE": ANOTHER_TEMPLATE
|
||||||
}
|
}
|
||||||
|
@@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import pytest
|
||||||
from core import TEMPLATE, URL, build_request_payload, get_stream_chunks, send_request
|
from core import TEMPLATE, URL, build_request_payload, get_stream_chunks, send_request
|
||||||
|
|
||||||
|
|
||||||
@@ -44,7 +45,7 @@ def test_seed_stream():
|
|||||||
assert l1 == l2, f"top_p=0, 固定seed, logprobs[{idx}]不一致"
|
assert l1 == l2, f"top_p=0, 固定seed, logprobs[{idx}]不一致"
|
||||||
|
|
||||||
|
|
||||||
def test_usage_stream():
|
def test_chat_usage_stream():
|
||||||
"""测试payload max_tokens参数"""
|
"""测试payload max_tokens参数"""
|
||||||
data = {
|
data = {
|
||||||
"messages": [
|
"messages": [
|
||||||
@@ -65,9 +66,103 @@ def test_usage_stream():
|
|||||||
|
|
||||||
usage = chunks[-1]["usage"]
|
usage = chunks[-1]["usage"]
|
||||||
total_tokens = usage["completion_tokens"] + usage["prompt_tokens"]
|
total_tokens = usage["completion_tokens"] + usage["prompt_tokens"]
|
||||||
assert data["max_tokens"] >= usage["completion_tokens"], "completion_tokens大于max_tokens"
|
assert data["max_tokens"] >= usage["completion_tokens"], f"completion_tokens大于max_tokens, usage: {usage}"
|
||||||
assert data["metadata"]["min_tokens"] <= usage["completion_tokens"], "completion_tokens小于min_tokens"
|
assert (
|
||||||
assert usage["total_tokens"] == total_tokens, "total_tokens不等于prompt_tokens + completion_tokens"
|
data["metadata"]["min_tokens"] <= usage["completion_tokens"]
|
||||||
|
), f"completion_tokens小于min_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
usage["total_tokens"] == total_tokens
|
||||||
|
), f"total_tokens不等于prompt_tokens + completion_tokens, usage: {usage}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_chat_usage_non_stream():
|
||||||
|
"""测试非流式 usage"""
|
||||||
|
data = {
|
||||||
|
"messages": [
|
||||||
|
{"role": "system", "content": "You are a helpful assistant."},
|
||||||
|
{"role": "user", "content": "牛顿的三大运动定律是什么?"},
|
||||||
|
],
|
||||||
|
"max_tokens": 50,
|
||||||
|
"stream": False,
|
||||||
|
"metadata": {"min_tokens": 10},
|
||||||
|
}
|
||||||
|
|
||||||
|
payload = build_request_payload(TEMPLATE, data)
|
||||||
|
|
||||||
|
response = send_request(url=URL, payload=payload).json()
|
||||||
|
# print(response)
|
||||||
|
# chunks = get_stream_chunks(response)
|
||||||
|
# for idx, chunk in enumerate(chunks):
|
||||||
|
# print(f"\nchunk[{idx}]:\n{json.dumps(chunk, indent=2, ensure_ascii=False)}")
|
||||||
|
|
||||||
|
usage = response["usage"]
|
||||||
|
total_tokens = usage["completion_tokens"] + usage["prompt_tokens"]
|
||||||
|
assert data["max_tokens"] >= usage["completion_tokens"], f"completion_tokens大于max_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
data["metadata"]["min_tokens"] <= usage["completion_tokens"]
|
||||||
|
), f"completion_tokens小于min_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
usage["total_tokens"] == total_tokens
|
||||||
|
), f"total_tokens不等于prompt_tokens + completion_tokens, usage: {usage}"
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.skip(reason="修复后打开")
|
||||||
|
def test_non_chat_usage_stream():
|
||||||
|
"""测试completions 流式 usage"""
|
||||||
|
data = {
|
||||||
|
"prompt": "牛顿的三大运动定律是什么?",
|
||||||
|
"max_tokens": 50,
|
||||||
|
"stream": True,
|
||||||
|
"stream_options": {"include_usage": True, "continuous_usage_stats": True},
|
||||||
|
"metadata": {"min_tokens": 10},
|
||||||
|
}
|
||||||
|
completion_url = URL.replace("chat/completions", "completions")
|
||||||
|
|
||||||
|
payload = build_request_payload(TEMPLATE, data)
|
||||||
|
|
||||||
|
response = send_request(url=completion_url, payload=payload, stream=True)
|
||||||
|
chunks = get_stream_chunks(response)
|
||||||
|
# for idx, chunk in enumerate(chunks):
|
||||||
|
# print(f"\nchunk[{idx}]:\n{json.dumps(chunk, indent=2, ensure_ascii=False)}")
|
||||||
|
|
||||||
|
usage = chunks[-1]["usage"]
|
||||||
|
total_tokens = usage["completion_tokens"] + usage["prompt_tokens"]
|
||||||
|
assert data["max_tokens"] >= usage["completion_tokens"], f"completion_tokens大于max_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
data["metadata"]["min_tokens"] <= usage["completion_tokens"]
|
||||||
|
), f"completion_tokens小于min_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
usage["total_tokens"] == total_tokens
|
||||||
|
), f"total_tokens不等于prompt_tokens + completion_tokens, usage: {usage}"
|
||||||
|
|
||||||
|
|
||||||
|
def test_non_chat_usage_non_stream():
|
||||||
|
"""测试completions 非流式 usage"""
|
||||||
|
data = {
|
||||||
|
"prompt": "牛顿的三大运动定律是什么?",
|
||||||
|
"max_tokens": 50,
|
||||||
|
"stream": False,
|
||||||
|
"metadata": {"min_tokens": 10},
|
||||||
|
}
|
||||||
|
completion_url = URL.replace("chat/completions", "completions")
|
||||||
|
|
||||||
|
payload = build_request_payload(TEMPLATE, data)
|
||||||
|
|
||||||
|
response = send_request(url=completion_url, payload=payload).json()
|
||||||
|
# print(response)
|
||||||
|
# chunks = get_stream_chunks(response)
|
||||||
|
# for idx, chunk in enumerate(chunks):
|
||||||
|
# print(f"\nchunk[{idx}]:\n{json.dumps(chunk, indent=2, ensure_ascii=False)}")
|
||||||
|
|
||||||
|
usage = response["usage"]
|
||||||
|
total_tokens = usage["completion_tokens"] + usage["prompt_tokens"]
|
||||||
|
assert data["max_tokens"] >= usage["completion_tokens"], f"completion_tokens大于max_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
data["metadata"]["min_tokens"] <= usage["completion_tokens"]
|
||||||
|
), f"completion_tokens小于min_tokens, usage: {usage}"
|
||||||
|
assert (
|
||||||
|
usage["total_tokens"] == total_tokens
|
||||||
|
), f"total_tokens不等于prompt_tokens + completion_tokens, usage: {usage}"
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@@ -1,386 +0,0 @@
|
|||||||
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import signal
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
import openai
|
|
||||||
import pytest
|
|
||||||
import requests
|
|
||||||
|
|
||||||
# Read ports from environment variables; use default values if not set
|
|
||||||
FD_API_PORT = int(os.getenv("FD_API_PORT", 8188))
|
|
||||||
FD_ENGINE_QUEUE_PORT = int(os.getenv("FD_ENGINE_QUEUE_PORT", 8133))
|
|
||||||
FD_METRICS_PORT = int(os.getenv("FD_METRICS_PORT", 8233))
|
|
||||||
|
|
||||||
# List of ports to clean before and after tests
|
|
||||||
PORTS_TO_CLEAN = [FD_API_PORT, FD_ENGINE_QUEUE_PORT, FD_METRICS_PORT]
|
|
||||||
|
|
||||||
|
|
||||||
def is_port_open(host: str, port: int, timeout=1.0):
|
|
||||||
"""
|
|
||||||
Check if a TCP port is open on the given host.
|
|
||||||
Returns True if connection succeeds, False otherwise.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with socket.create_connection((host, port), timeout):
|
|
||||||
return True
|
|
||||||
except Exception:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def kill_process_on_port(port: int):
|
|
||||||
"""
|
|
||||||
Kill processes that are listening on the given port.
|
|
||||||
Uses `lsof` to find process ids and sends SIGKILL.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
output = subprocess.check_output(f"lsof -i:{port} -t", shell=True).decode().strip()
|
|
||||||
for pid in output.splitlines():
|
|
||||||
os.kill(int(pid), signal.SIGKILL)
|
|
||||||
print(f"Killed process on port {port}, pid={pid}")
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def clean_ports():
|
|
||||||
"""
|
|
||||||
Kill all processes occupying the ports listed in PORTS_TO_CLEAN.
|
|
||||||
"""
|
|
||||||
for port in PORTS_TO_CLEAN:
|
|
||||||
kill_process_on_port(port)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session", autouse=True)
|
|
||||||
def setup_and_run_server():
|
|
||||||
"""
|
|
||||||
Pytest fixture that runs once per test session:
|
|
||||||
- Cleans ports before tests
|
|
||||||
- Starts the API server as a subprocess
|
|
||||||
- Waits for server port to open (up to 30 seconds)
|
|
||||||
- Tears down server after all tests finish
|
|
||||||
"""
|
|
||||||
print("Pre-test port cleanup...")
|
|
||||||
clean_ports()
|
|
||||||
|
|
||||||
base_path = os.getenv("MODEL_PATH")
|
|
||||||
if base_path:
|
|
||||||
model_path = os.path.join(base_path, "ernie-4_5-21b-a3b-bf16-paddle")
|
|
||||||
else:
|
|
||||||
model_path = "./ernie-4_5-21b-a3b-bf16-paddle"
|
|
||||||
|
|
||||||
mtp_model_path = os.path.join(model_path, "mtp")
|
|
||||||
mtp_mode_str = json.dumps({"method": "mtp", "num_speculative_tokens": 1, "model": mtp_model_path})
|
|
||||||
|
|
||||||
log_path = "server.log"
|
|
||||||
cmd = [
|
|
||||||
sys.executable,
|
|
||||||
"-m",
|
|
||||||
"fastdeploy.entrypoints.openai.api_server",
|
|
||||||
"--model",
|
|
||||||
model_path,
|
|
||||||
"--port",
|
|
||||||
str(FD_API_PORT),
|
|
||||||
"--tensor-parallel-size",
|
|
||||||
"1",
|
|
||||||
"--engine-worker-queue-port",
|
|
||||||
str(FD_ENGINE_QUEUE_PORT),
|
|
||||||
"--metrics-port",
|
|
||||||
str(FD_METRICS_PORT),
|
|
||||||
"--max-model-len",
|
|
||||||
"32768",
|
|
||||||
"--max-num-seqs",
|
|
||||||
"128",
|
|
||||||
"--quantization",
|
|
||||||
"wint4",
|
|
||||||
"--speculative-config",
|
|
||||||
mtp_mode_str,
|
|
||||||
]
|
|
||||||
|
|
||||||
# Start subprocess in new process group
|
|
||||||
with open(log_path, "w") as logfile:
|
|
||||||
process = subprocess.Popen(
|
|
||||||
cmd,
|
|
||||||
stdout=logfile,
|
|
||||||
stderr=subprocess.STDOUT,
|
|
||||||
start_new_session=True, # Enables killing full group via os.killpg
|
|
||||||
)
|
|
||||||
|
|
||||||
# Wait up to 300 seconds for API server to be ready
|
|
||||||
for _ in range(300):
|
|
||||||
if is_port_open("127.0.0.1", FD_API_PORT):
|
|
||||||
print(f"API server is up on port {FD_API_PORT}")
|
|
||||||
break
|
|
||||||
time.sleep(1)
|
|
||||||
else:
|
|
||||||
print("[TIMEOUT] API server failed to start in 5 minutes. Cleaning up...")
|
|
||||||
try:
|
|
||||||
os.killpg(process.pid, signal.SIGTERM)
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to kill process group: {e}")
|
|
||||||
raise RuntimeError(f"API server did not start on port {FD_API_PORT}")
|
|
||||||
|
|
||||||
yield # Run tests
|
|
||||||
|
|
||||||
print("\n===== Post-test server cleanup... =====")
|
|
||||||
try:
|
|
||||||
os.killpg(process.pid, signal.SIGTERM)
|
|
||||||
print(f"API server (pid={process.pid}) terminated")
|
|
||||||
except Exception as e:
|
|
||||||
print(f"Failed to terminate API server: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def api_url(request):
|
|
||||||
"""
|
|
||||||
Returns the API endpoint URL for chat completions.
|
|
||||||
"""
|
|
||||||
return f"http://0.0.0.0:{FD_API_PORT}/v1/chat/completions"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
|
||||||
def metrics_url(request):
|
|
||||||
"""
|
|
||||||
Returns the metrics endpoint URL.
|
|
||||||
"""
|
|
||||||
return f"http://0.0.0.0:{FD_METRICS_PORT}/metrics"
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def headers():
|
|
||||||
"""
|
|
||||||
Returns common HTTP request headers.
|
|
||||||
"""
|
|
||||||
return {"Content-Type": "application/json"}
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def consistent_payload():
|
|
||||||
"""
|
|
||||||
Returns a fixed payload for consistency testing,
|
|
||||||
including a fixed random seed and temperature.
|
|
||||||
"""
|
|
||||||
return {
|
|
||||||
"messages": [{"role": "user", "content": "用一句话介绍 PaddlePaddle"}],
|
|
||||||
"temperature": 0.9,
|
|
||||||
"top_p": 0, # fix top_p to reduce randomness
|
|
||||||
"seed": 13, # fixed random seed
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# ==========================
|
|
||||||
# Helper function to calculate difference rate between two texts
|
|
||||||
# ==========================
|
|
||||||
def calculate_diff_rate(text1, text2):
|
|
||||||
"""
|
|
||||||
Calculate the difference rate between two strings
|
|
||||||
based on the normalized Levenshtein edit distance.
|
|
||||||
Returns a float in [0,1], where 0 means identical.
|
|
||||||
"""
|
|
||||||
if text1 == text2:
|
|
||||||
return 0.0
|
|
||||||
|
|
||||||
len1, len2 = len(text1), len(text2)
|
|
||||||
dp = [[0] * (len2 + 1) for _ in range(len1 + 1)]
|
|
||||||
|
|
||||||
for i in range(len1 + 1):
|
|
||||||
for j in range(len2 + 1):
|
|
||||||
if i == 0 or j == 0:
|
|
||||||
dp[i][j] = i + j
|
|
||||||
elif text1[i - 1] == text2[j - 1]:
|
|
||||||
dp[i][j] = dp[i - 1][j - 1]
|
|
||||||
else:
|
|
||||||
dp[i][j] = 1 + min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1])
|
|
||||||
|
|
||||||
edit_distance = dp[len1][len2]
|
|
||||||
max_len = max(len1, len2)
|
|
||||||
return edit_distance / max_len if max_len > 0 else 0.0
|
|
||||||
|
|
||||||
|
|
||||||
# ==========================
|
|
||||||
# Consistency test for repeated runs with fixed payload
|
|
||||||
# ==========================
|
|
||||||
def test_consistency_between_runs(api_url, headers, consistent_payload):
|
|
||||||
"""
|
|
||||||
Test that two runs with the same fixed input produce similar outputs.
|
|
||||||
"""
|
|
||||||
# First request
|
|
||||||
resp1 = requests.post(api_url, headers=headers, json=consistent_payload)
|
|
||||||
assert resp1.status_code == 200
|
|
||||||
result1 = resp1.json()
|
|
||||||
content1 = result1["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
# Second request
|
|
||||||
resp2 = requests.post(api_url, headers=headers, json=consistent_payload)
|
|
||||||
assert resp2.status_code == 200
|
|
||||||
result2 = resp2.json()
|
|
||||||
content2 = result2["choices"][0]["message"]["content"]
|
|
||||||
|
|
||||||
# Calculate difference rate
|
|
||||||
diff_rate = calculate_diff_rate(content1, content2)
|
|
||||||
|
|
||||||
# Verify that the difference rate is below the threshold
|
|
||||||
assert diff_rate < 0.05, f"Output difference too large ({diff_rate:.4%})"
|
|
||||||
|
|
||||||
|
|
||||||
# ==========================
|
|
||||||
# OpenAI Client chat.completions Test
|
|
||||||
# ==========================
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
|
||||||
def openai_client():
|
|
||||||
ip = "0.0.0.0"
|
|
||||||
service_http_port = str(FD_API_PORT)
|
|
||||||
client = openai.Client(
|
|
||||||
base_url=f"http://{ip}:{service_http_port}/v1",
|
|
||||||
api_key="EMPTY_API_KEY",
|
|
||||||
)
|
|
||||||
return client
|
|
||||||
|
|
||||||
|
|
||||||
# Non-streaming test
|
|
||||||
def test_non_streaming_chat(openai_client):
|
|
||||||
"""
|
|
||||||
Test non-streaming chat functionality with the local service
|
|
||||||
"""
|
|
||||||
response = openai_client.chat.completions.create(
|
|
||||||
model="default",
|
|
||||||
messages=[
|
|
||||||
{"role": "system", "content": "You are a helpful AI assistant."},
|
|
||||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
|
||||||
],
|
|
||||||
temperature=1,
|
|
||||||
max_tokens=1024,
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
assert hasattr(response, "choices")
|
|
||||||
assert len(response.choices) > 0
|
|
||||||
assert hasattr(response.choices[0], "message")
|
|
||||||
assert hasattr(response.choices[0].message, "content")
|
|
||||||
|
|
||||||
|
|
||||||
# Streaming test
|
|
||||||
def test_streaming_chat(openai_client, capsys):
|
|
||||||
"""
|
|
||||||
Test streaming chat functionality with the local service
|
|
||||||
"""
|
|
||||||
response = openai_client.chat.completions.create(
|
|
||||||
model="default",
|
|
||||||
messages=[
|
|
||||||
{"role": "system", "content": "You are a helpful AI assistant."},
|
|
||||||
{"role": "user", "content": "List 3 countries and their capitals."},
|
|
||||||
{
|
|
||||||
"role": "assistant",
|
|
||||||
"content": "China(Beijing), France(Paris), Australia(Canberra).",
|
|
||||||
},
|
|
||||||
{"role": "user", "content": "OK, tell more."},
|
|
||||||
],
|
|
||||||
temperature=1,
|
|
||||||
max_tokens=1024,
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
output = []
|
|
||||||
for chunk in response:
|
|
||||||
if hasattr(chunk.choices[0], "delta") and hasattr(chunk.choices[0].delta, "content"):
|
|
||||||
output.append(chunk.choices[0].delta.content)
|
|
||||||
assert len(output) > 2
|
|
||||||
|
|
||||||
|
|
||||||
# ==========================
|
|
||||||
# OpenAI Client completions Test
|
|
||||||
# ==========================
|
|
||||||
|
|
||||||
|
|
||||||
def test_non_streaming(openai_client):
|
|
||||||
"""
|
|
||||||
Test non-streaming chat functionality with the local service
|
|
||||||
"""
|
|
||||||
response = openai_client.completions.create(
|
|
||||||
model="default",
|
|
||||||
prompt="Hello, how are you?",
|
|
||||||
temperature=1,
|
|
||||||
max_tokens=1024,
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Assertions to check the response structure
|
|
||||||
assert hasattr(response, "choices")
|
|
||||||
assert len(response.choices) > 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_streaming(openai_client, capsys):
|
|
||||||
"""
|
|
||||||
Test streaming functionality with the local service
|
|
||||||
"""
|
|
||||||
response = openai_client.completions.create(
|
|
||||||
model="default",
|
|
||||||
prompt="Hello, how are you?",
|
|
||||||
temperature=1,
|
|
||||||
max_tokens=1024,
|
|
||||||
stream=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Collect streaming output
|
|
||||||
output = []
|
|
||||||
for chunk in response:
|
|
||||||
output.append(chunk.choices[0].text)
|
|
||||||
assert len(output) > 0
|
|
||||||
|
|
||||||
|
|
||||||
def test_profile_reset_block_num():
|
|
||||||
"""测试profile reset_block_num功能,与baseline diff不能超过5%"""
|
|
||||||
log_file = "./log/config.log"
|
|
||||||
baseline = 30065
|
|
||||||
|
|
||||||
if not os.path.exists(log_file):
|
|
||||||
pytest.fail(f"Log file not found: {log_file}")
|
|
||||||
|
|
||||||
with open(log_file, "r") as f:
|
|
||||||
log_lines = f.readlines()
|
|
||||||
|
|
||||||
target_line = None
|
|
||||||
for line in log_lines:
|
|
||||||
if "Reset block num" in line:
|
|
||||||
target_line = line.strip()
|
|
||||||
break
|
|
||||||
|
|
||||||
if target_line is None:
|
|
||||||
pytest.fail("日志中没有Reset block num信息")
|
|
||||||
|
|
||||||
match = re.search(r"total_block_num:(\d+)", target_line)
|
|
||||||
if not match:
|
|
||||||
pytest.fail(f"Failed to extract total_block_num from line: {target_line}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
actual_value = int(match.group(1))
|
|
||||||
except ValueError:
|
|
||||||
pytest.fail(f"Invalid number format: {match.group(1)}")
|
|
||||||
|
|
||||||
lower_bound = baseline * (1 - 0.05)
|
|
||||||
upper_bound = baseline * (1 + 0.05)
|
|
||||||
print(f"Reset total_block_num: {actual_value}. baseline: {baseline}")
|
|
||||||
|
|
||||||
assert lower_bound <= actual_value <= upper_bound, (
|
|
||||||
f"Reset total_block_num {actual_value} 与 baseline {baseline} diff需要在5%以内"
|
|
||||||
f"Allowed range: [{lower_bound:.1f}, {upper_bound:.1f}]"
|
|
||||||
)
|
|
Reference in New Issue
Block a user