mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00
【Bugfix】修复2.1分支上0.3B模型性能大幅下降 (#3624)
* 恢复异步方法。 【BugFix】completion接口echo回显支持 (#3245) * wenxin-tools-511,修复v1/completion无法回显的问题。 * 支持多prompt的回显 * 支持多prompt情况下的流式回显 * 补充了 completion 接口支持 echo 的单元测试 * pre-commit * 移除了多余的test文件 * 修复了completion接口echo支持的单测方法 * 补充了单元测试文件 * 补充单测 * unittest * 补充单测 * 修复单测 * 删除不必要的assert. * 重新提交 * 更新测试方法 * ut * 验证是否是正确思路单测 * 验证是否是正确思路单测 * 验证是否是正确思路单测3 * 优化单测代码,有针对性地缩小单测范围。 * 优化单测代码2,有针对性地缩小单测范围。 * 优化单测代码3,有针对性地缩小单测范围。 * support 'echo' in chat/completion. * update * update * update * update * update * update * 补充了关于tokenid的单元测试 * update * 修正index错误 * 修正index错误 * [Bugfix] Significant performance degradation of 0.3B model on branch 2.1
This commit is contained in:
@@ -240,7 +240,7 @@ class OpenAIServingCompletion:
|
|||||||
dealer.close()
|
dealer.close()
|
||||||
self.engine_client.semaphore.release()
|
self.engine_client.semaphore.release()
|
||||||
|
|
||||||
def _echo_back_prompt(self, request, res, idx):
|
async def _echo_back_prompt(self, request, res, idx):
|
||||||
if res["outputs"].get("send_idx", -1) == 0 and request.echo:
|
if res["outputs"].get("send_idx", -1) == 0 and request.echo:
|
||||||
if isinstance(request.prompt, list):
|
if isinstance(request.prompt, list):
|
||||||
prompt_text = request.prompt[idx]
|
prompt_text = request.prompt[idx]
|
||||||
@@ -346,7 +346,7 @@ class OpenAIServingCompletion:
|
|||||||
else:
|
else:
|
||||||
arrival_time = res["metrics"]["arrival_time"] - inference_start_time[idx]
|
arrival_time = res["metrics"]["arrival_time"] - inference_start_time[idx]
|
||||||
|
|
||||||
self._echo_back_prompt(request, res, idx)
|
await self._echo_back_prompt(request, res, idx)
|
||||||
output = res["outputs"]
|
output = res["outputs"]
|
||||||
output_top_logprobs = output["top_logprobs"]
|
output_top_logprobs = output["top_logprobs"]
|
||||||
logprobs_res: Optional[CompletionLogprobs] = None
|
logprobs_res: Optional[CompletionLogprobs] = None
|
||||||
@@ -471,7 +471,6 @@ class OpenAIServingCompletion:
|
|||||||
else:
|
else:
|
||||||
token_ids = output["token_ids"]
|
token_ids = output["token_ids"]
|
||||||
output_text = output["text"]
|
output_text = output["text"]
|
||||||
|
|
||||||
finish_reason = self.calc_finish_reason(request.max_tokens, final_res["output_token_ids"], output, False)
|
finish_reason = self.calc_finish_reason(request.max_tokens, final_res["output_token_ids"], output, False)
|
||||||
|
|
||||||
choice_data = CompletionResponseChoice(
|
choice_data = CompletionResponseChoice(
|
||||||
|
@@ -7,13 +7,22 @@ from fastdeploy.entrypoints.openai.serving_completion import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class TestCompletionEcho(unittest.TestCase):
|
class YourClass:
|
||||||
|
async def _1(self, a, b, c):
|
||||||
|
if b["outputs"].get("send_idx", -1) == 0 and a.echo:
|
||||||
|
if isinstance(a.prompt, list):
|
||||||
|
text = a.prompt[c]
|
||||||
|
else:
|
||||||
|
text = a.prompt
|
||||||
|
b["outputs"]["text"] = text + (b["outputs"]["text"] or "")
|
||||||
|
|
||||||
|
|
||||||
|
class TestCompletionEcho(unittest.IsolatedAsyncioTestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.mock_engine = MagicMock()
|
self.mock_engine = MagicMock()
|
||||||
self.completion_handler = None
|
self.completion_handler = None
|
||||||
|
|
||||||
def test_single_prompt_non_streaming(self):
|
def test_single_prompt_non_streaming(self):
|
||||||
"""测试单prompt非流式响应"""
|
|
||||||
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
|
|
||||||
request = CompletionRequest(prompt="test prompt", max_tokens=10, echo=True, logprobs=1)
|
request = CompletionRequest(prompt="test prompt", max_tokens=10, echo=True, logprobs=1)
|
||||||
@@ -42,8 +51,7 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(response.choices[0].text, "test prompt generated text")
|
self.assertEqual(response.choices[0].text, "test prompt generated text")
|
||||||
|
|
||||||
def test_echo_back_prompt_and_streaming(self):
|
async def test_echo_back_prompt_and_streaming(self):
|
||||||
"""测试_echo_back_prompt方法和流式响应的prompt拼接逻辑"""
|
|
||||||
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
|
|
||||||
request = CompletionRequest(prompt="test prompt", max_tokens=10, stream=True, echo=True)
|
request = CompletionRequest(prompt="test prompt", max_tokens=10, stream=True, echo=True)
|
||||||
@@ -57,7 +65,7 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
|
|
||||||
mock_echo.side_effect = mock_echo_side_effect
|
mock_echo.side_effect = mock_echo_side_effect
|
||||||
|
|
||||||
self.completion_handler._echo_back_prompt(request, mock_response, 0)
|
await self.completion_handler._echo_back_prompt(request, mock_response, 0)
|
||||||
|
|
||||||
mock_echo.assert_called_once_with(request, mock_response, 0)
|
mock_echo.assert_called_once_with(request, mock_response, 0)
|
||||||
|
|
||||||
@@ -65,7 +73,6 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
self.assertEqual(request.prompt, "test prompt")
|
self.assertEqual(request.prompt, "test prompt")
|
||||||
|
|
||||||
def test_multi_prompt_non_streaming(self):
|
def test_multi_prompt_non_streaming(self):
|
||||||
"""测试多prompt非流式响应"""
|
|
||||||
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
|
|
||||||
request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, echo=True)
|
request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, echo=True)
|
||||||
@@ -97,7 +104,7 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
self.assertEqual(response.choices[0].text, "prompt1 response1")
|
self.assertEqual(response.choices[0].text, "prompt1 response1")
|
||||||
self.assertEqual(response.choices[1].text, "prompt2 response2")
|
self.assertEqual(response.choices[1].text, "prompt2 response2")
|
||||||
|
|
||||||
def test_multi_prompt_streaming(self):
|
async def test_multi_prompt_streaming(self):
|
||||||
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
self.completion_handler = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
|
|
||||||
request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, stream=True, echo=True)
|
request = CompletionRequest(prompt=["prompt1", "prompt2"], max_tokens=10, stream=True, echo=True)
|
||||||
@@ -114,8 +121,8 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
|
|
||||||
mock_echo.side_effect = mock_echo_side_effect
|
mock_echo.side_effect = mock_echo_side_effect
|
||||||
|
|
||||||
self.completion_handler._echo_back_prompt(request, mock_responses[0], 0)
|
await self.completion_handler._echo_back_prompt(request, mock_responses[0], 0)
|
||||||
self.completion_handler._echo_back_prompt(request, mock_responses[1], 1)
|
await self.completion_handler._echo_back_prompt(request, mock_responses[1], 1)
|
||||||
|
|
||||||
self.assertEqual(mock_echo.call_count, 2)
|
self.assertEqual(mock_echo.call_count, 2)
|
||||||
mock_echo.assert_any_call(request, mock_responses[0], 0)
|
mock_echo.assert_any_call(request, mock_responses[0], 0)
|
||||||
@@ -125,41 +132,40 @@ class TestCompletionEcho(unittest.TestCase):
|
|||||||
self.assertEqual(mock_responses[1]["outputs"]["text"], "prompt2 response2")
|
self.assertEqual(mock_responses[1]["outputs"]["text"], "prompt2 response2")
|
||||||
self.assertEqual(request.prompt, ["prompt1", "prompt2"])
|
self.assertEqual(request.prompt, ["prompt1", "prompt2"])
|
||||||
|
|
||||||
def test_echo_back_prompt_and_streaming1(self):
|
async def test_echo_back_prompt_and_streaming1(self):
|
||||||
request = CompletionRequest(echo=True, prompt=["Hello", "World"])
|
request = CompletionRequest(echo=True, prompt=["Hello", "World"])
|
||||||
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
||||||
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
instance._echo_back_prompt(request, res, idx)
|
await instance._echo_back_prompt(request, res, idx)
|
||||||
self.assertEqual(res["outputs"]["text"], "Hello!")
|
self.assertEqual(res["outputs"]["text"], "Hello!")
|
||||||
|
|
||||||
def test_1_prompt_is_string_and_send_idx_is_0(self):
|
async def test_1_prompt_is_string_and_send_idx_is_0(self):
|
||||||
request = CompletionRequest(echo=True, prompt="Hello")
|
request = CompletionRequest(echo=True, prompt="Hello")
|
||||||
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
||||||
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
instance._echo_back_prompt(request, res, idx)
|
await instance._echo_back_prompt(request, res, idx)
|
||||||
self.assertEqual(res["outputs"]["text"], "Hello!")
|
self.assertEqual(res["outputs"]["text"], "Hello!")
|
||||||
|
|
||||||
def test_1_send_idx_is_not_0(self):
|
async def test_1_send_idx_is_not_0(self):
|
||||||
request = CompletionRequest(echo=True, prompt="Hello")
|
request = CompletionRequest(echo=True, prompt="Hello")
|
||||||
res = {"outputs": {"send_idx": 1, "text": "!"}}
|
res = {"outputs": {"send_idx": 1, "text": "!"}}
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
||||||
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
instance._echo_back_prompt(request, res, idx)
|
await instance._echo_back_prompt(request, res, idx)
|
||||||
self.assertEqual(res["outputs"]["text"], "!")
|
self.assertEqual(res["outputs"]["text"], "!")
|
||||||
|
|
||||||
def test_1_echo_is_false(self):
|
async def test_1_echo_is_false(self):
|
||||||
"""测试echo为False时,_echo_back_prompt不拼接prompt"""
|
|
||||||
request = CompletionRequest(echo=False, prompt="Hello")
|
request = CompletionRequest(echo=False, prompt="Hello")
|
||||||
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
res = {"outputs": {"send_idx": 0, "text": "!"}}
|
||||||
idx = 0
|
idx = 0
|
||||||
|
|
||||||
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
instance = OpenAIServingCompletion(self.mock_engine, pid=123, ips=None, max_waiting_time=30)
|
||||||
instance._echo_back_prompt(request, res, idx)
|
await instance._echo_back_prompt(request, res, idx)
|
||||||
self.assertEqual(res["outputs"]["text"], "!")
|
self.assertEqual(res["outputs"]["text"], "!")
|
||||||
|
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user