From 80db7fce058e00bfb9fbbbed28e93beb1082700a Mon Sep 17 00:00:00 2001 From: zhuzixuan Date: Wed, 27 Aug 2025 15:29:01 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Bugfix=E3=80=91=E4=BF=AE=E5=A4=8D2.1?= =?UTF-8?q?=E5=88=86=E6=94=AF=E4=B8=8A0.3B=E6=A8=A1=E5=9E=8B=E6=80=A7?= =?UTF-8?q?=E8=83=BD=E5=A4=A7=E5=B9=85=E4=B8=8B=E9=99=8D=20(#3624)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 恢复异步方法。 【BugFix】completion接口echo回显支持 (#3245) * wenxin-tools-511,修复v1/completion无法回显的问题。 * 支持多prompt的回显 * 支持多prompt情况下的流式回显 * 补充了 completion 接口支持 echo 的单元测试 * pre-commit * 移除了多余的test文件 * 修复了completion接口echo支持的单测方法 * 补充了单元测试文件 * 补充单测 * unittest * 补充单测 * 修复单测 * 删除不必要的assert. * 重新提交 * 更新测试方法 * ut * 验证是否是正确思路单测 * 验证是否是正确思路单测 * 验证是否是正确思路单测3 * 优化单测代码,有针对性地缩小单测范围。 * 优化单测代码2,有针对性地缩小单测范围。 * 优化单测代码3,有针对性地缩小单测范围。 * support 'echo' in chat/completion. * update * update * update * update * update * update * 补充了关于tokenid的单元测试 * update * 修正index错误 * 修正index错误 * [Bugfix] Significant performance degradation of 0.3B model on branch 2.1 --- .../entrypoints/openai/serving_completion.py | 5 +-- .../openai/test_completion_echo.py | 42 +++++++++++-------- 2 files changed, 26 insertions(+), 21 deletions(-) diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index 85d51ed01..9372b8e4d 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -240,7 +240,7 @@ class OpenAIServingCompletion: dealer.close() self.engine_client.semaphore.release() - def _echo_back_prompt(self, request, res, idx): + async def _echo_back_prompt(self, request, res, idx): if res["outputs"].get("send_idx", -1) == 0 and request.echo: if isinstance(request.prompt, list): prompt_text = request.prompt[idx] @@ -346,7 +346,7 @@ class OpenAIServingCompletion: else: arrival_time = res["metrics"]["arrival_time"] - inference_start_time[idx] - self._echo_back_prompt(request, res, idx) + await self._echo_back_prompt(request, res, idx) output = res["outputs"] output_top_logprobs = output["top_logprobs"] logprobs_res: Optional[CompletionLogprobs] = None @@ -471,7 +471,6 @@ class OpenAIServingCompletion: else: token_ids = output["token_ids"] output_text = output["text"] - finish_reason = self.calc_finish_reason(request.max_tokens, final_res["output_token_ids"], output, False) choice_data = CompletionResponseChoice( diff --git a/test/entrypoints/openai/test_completion_echo.py b/test/entrypoints/openai/test_completion_echo.py index 6f733491b..f96901e61 100644 --- a/test/entrypoints/openai/test_completion_echo.py +++ b/test/entrypoints/openai/test_completion_echo.py @@ -7,13 +7,22 @@ from fastdeploy.entrypoints.openai.serving_completion import ( ) -class TestCompletionEcho(unittest.TestCase): +class YourClass: + async def _1(self, a, b, c): + if b["outputs"].get("send_idx", -1) == 0 and a.echo: + if isinstance(a.prompt, list): + text = a.prompt[c] + else: + text = a.prompt + b["outputs"]["text"] = text + (b["outputs"]["text"] or "") + + +class TestCompletionEcho(unittest.IsolatedAsyncioTestCase): def setUp(self): self.mock_engine = MagicMock() self.completion_handler = None def test_single_prompt_non_streaming(self): - """测试单prompt非流式响应""" self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) request = CompletionRequest(prompt="test prompt", max_tokens=10, echo=True, logprobs=1) @@ -42,8 +51,7 @@ class TestCompletionEcho(unittest.TestCase): self.assertEqual(response.choices[0].text, "test prompt generated text") - def test_echo_back_prompt_and_streaming(self): - """测试_echo_back_prompt方法和流式响应的prompt拼接逻辑""" + async def test_echo_back_prompt_and_streaming(self): self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) request = CompletionRequest(prompt="test prompt", max_tokens=10, stream=True, echo=True) @@ -57,7 +65,7 @@ class TestCompletionEcho(unittest.TestCase): mock_echo.side_effect = mock_echo_side_effect - self.completion_handler._echo_back_prompt(request, mock_response, 0) + await self.completion_handler._echo_back_prompt(request, mock_response, 0) mock_echo.assert_called_once_with(request, mock_response, 0) @@ -65,7 +73,6 @@ class TestCompletionEcho(unittest.TestCase): self.assertEqual(request.prompt, "test prompt") def test_multi_prompt_non_streaming(self): - """测试多prompt非流式响应""" self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, echo=True) @@ -97,7 +104,7 @@ class TestCompletionEcho(unittest.TestCase): self.assertEqual(response.choices[0].text, "prompt1 response1") self.assertEqual(response.choices[1].text, "prompt2 response2") - def test_multi_prompt_streaming(self): + async def test_multi_prompt_streaming(self): self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, stream=True, echo=True) @@ -114,8 +121,8 @@ class TestCompletionEcho(unittest.TestCase): mock_echo.side_effect = mock_echo_side_effect - self.completion_handler._echo_back_prompt(request, mock_responses[0], 0) - self.completion_handler._echo_back_prompt(request, mock_responses[1], 1) + await self.completion_handler._echo_back_prompt(request, mock_responses[0], 0) + await self.completion_handler._echo_back_prompt(request, mock_responses[1], 1) self.assertEqual(mock_echo.call_count, 2) mock_echo.assert_any_call(request, mock_responses[0], 0) @@ -125,41 +132,40 @@ class TestCompletionEcho(unittest.TestCase): self.assertEqual(mock_responses[1]["outputs"]["text"], "prompt2 response2") self.assertEqual(request.prompt, ["prompt1", "prompt2"]) - def test_echo_back_prompt_and_streaming1(self): + async def test_echo_back_prompt_and_streaming1(self): request = CompletionRequest(echo=True, prompt=["Hello", "World"]) res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) - instance._echo_back_prompt(request, res, idx) + await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "Hello!") - def test_1_prompt_is_string_and_send_idx_is_0(self): + async def test_1_prompt_is_string_and_send_idx_is_0(self): request = CompletionRequest(echo=True, prompt="Hello") res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) - instance._echo_back_prompt(request, res, idx) + await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "Hello!") - def test_1_send_idx_is_not_0(self): + async def test_1_send_idx_is_not_0(self): request = CompletionRequest(echo=True, prompt="Hello") res = {"outputs": {"send_idx": 1, "text": "!"}} idx = 0 instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) - instance._echo_back_prompt(request, res, idx) + await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "!") - def test_1_echo_is_false(self): - """测试echo为False时,_echo_back_prompt不拼接prompt""" + async def test_1_echo_is_false(self): request = CompletionRequest(echo=False, prompt="Hello") res = {"outputs": {"send_idx": 0, "text": "!"}} idx = 0 instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30) - instance._echo_back_prompt(request, res, idx) + await instance._echo_back_prompt(request, res, idx) self.assertEqual(res["outputs"]["text"], "!")