Add more base chat cases (#3203)

* add test base class

* fix codestyle

* fix codestyle

* add base chat
This commit is contained in:
Divano
2025-08-05 10:24:12 +08:00
committed by GitHub
parent fe540f6caa
commit 88596c0c63
3 changed files with 61 additions and 53 deletions

View File

@@ -9,7 +9,7 @@ some basic check for fd web api
import json
from core import TEMPLATE, URL, build_request_payload, send_request
from core import TEMPLATE, URL, build_request_payload, get_token_list, send_request
def test_stream_response():
@@ -76,9 +76,12 @@ def test_logprobs_enabled():
def test_stop_sequence():
data = {
"stream": False,
"stop": ["果冻"],
"stop": [""],
"messages": [
{"role": "user", "content": "你要严格按照我接下来的话输出,输出冒号后面的内容,请输出:这是第一段。果冻这是第二段啦啦啦啦啦。"},
{
"role": "user",
"content": "你要严格按照我接下来的话输出,输出冒号后面的内容,请输出:这是第一段。这是第二段啦啦啦啦啦。",
},
],
"max_tokens": 20,
"top_p": 0,
@@ -86,8 +89,11 @@ def test_stop_sequence():
payload = build_request_payload(TEMPLATE, data)
resp = send_request(URL, payload).json()
content = resp["choices"][0]["message"]["content"]
token_list = get_token_list(resp)
print("截断输出:", content)
assert "第二段" not in content
assert "第二段" not in token_list
assert "" in token_list, "没有找到。符号"
def test_sampling_parameters():
@@ -125,7 +131,7 @@ def test_multi_turn_conversation():
def test_bad_words_filtering():
banned_tokens = ["", ""]
banned_tokens = [""]
data = {
"stream": False,
@@ -140,36 +146,14 @@ def test_bad_words_filtering():
payload = build_request_payload(TEMPLATE, data)
response = send_request(URL, payload).json()
content = response["choices"][0]["message"]["content"]
print("生成内容:", content)
token_list = get_token_list(response)
for word in banned_tokens:
assert word not in content, f"bad_word '{word}' 不应出现在生成结果中"
assert word not in token_list, f"bad_word '{word}' 不应出现在生成结果中"
print("test_bad_words_filtering 通过:生成结果未包含被禁词")
data = {
"stream": False,
"messages": [
{"role": "system", "content": "你是一个助手,回答简洁清楚"},
{"role": "user", "content": "请输出冒号后面的字,一模一样: 我爱吃果冻,苹果,香蕉,和荔枝呀呀呀"},
],
"top_p": 0,
"max_tokens": 69,
# "bad_words": banned_tokens,
}
payload = build_request_payload(TEMPLATE, data)
response = send_request(URL, payload).json()
content = response["choices"][0]["message"]["content"]
print("生成内容:", content)
for word in banned_tokens:
assert word not in content, f"bad_word '{word}' 不应出现在生成结果中"
print("test_bad_words_filtering 通过:生成结果未包含被禁词")
print("test_bad_words_filtering 正例验证通过")
def test_bad_words_filtering1():
@@ -195,8 +179,10 @@ def test_bad_words_filtering1():
for word in banned_tokens:
assert word not in content, f"bad_word '{word}' 不应出现在生成结果中"
print("test_bad_words_filtering 通过:生成结果未包含被禁词")
word = "呀呀"
print("test_bad_words_filtering1 通过:生成结果未包含被禁词")
# 正例验证
word = ""
data = {
"stream": False,
"messages": [
@@ -212,7 +198,7 @@ def test_bad_words_filtering1():
content = response["choices"][0]["message"]["content"]
print("生成内容:", content)
token_list = get_token_list(response)
assert word in token_list, f"'{word}' 应出现在生成结果中"
assert word in content, f" '{word}' 应出现在生成结果中"
print("test_bad_words_filtering 通过:生成结果未包含被禁词")
print("test_bad_words_filtering1 正例验证通过")