mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[CI] add output for last_token in test_streaming_with_stop_str (#5170)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
This commit is contained in:
@@ -385,14 +385,14 @@ def test_streaming_with_stop_str(openai_client):
|
||||
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
||||
temperature=1,
|
||||
max_tokens=5,
|
||||
extra_body={"include_stop_str_in_output": True},
|
||||
extra_body={"min_tokens": 1, "include_stop_str_in_output": True},
|
||||
stream=True,
|
||||
)
|
||||
# Assertions to check the response structure
|
||||
last_token = ""
|
||||
for chunk in response:
|
||||
last_token = chunk.choices[0].delta.content
|
||||
assert last_token.endswith("</s>")
|
||||
assert last_token.endswith("</s>"), f"last_token did not end with '</s>': {last_token!r}"
|
||||
|
||||
response = openai_client.chat.completions.create(
|
||||
model="default",
|
||||
|
||||
@@ -539,14 +539,14 @@ def test_streaming_with_stop_str(openai_client):
|
||||
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
||||
temperature=1,
|
||||
max_tokens=5,
|
||||
extra_body={"include_stop_str_in_output": True},
|
||||
extra_body={"min_tokens": 1, "include_stop_str_in_output": True},
|
||||
stream=True,
|
||||
)
|
||||
# Assertions to check the response structure
|
||||
last_token = ""
|
||||
for chunk in response:
|
||||
last_token = chunk.choices[0].delta.content
|
||||
assert last_token.endswith("</s>")
|
||||
assert last_token.endswith("</s>"), f"last_token did not end with '</s>': {last_token!r}"
|
||||
|
||||
response = openai_client.chat.completions.create(
|
||||
model="default",
|
||||
|
||||
Reference in New Issue
Block a user