diff --git a/fastdeploy/entrypoints/openai/serving_chat.py b/fastdeploy/entrypoints/openai/serving_chat.py index 778061b85..c9dbf2705 100644 --- a/fastdeploy/entrypoints/openai/serving_chat.py +++ b/fastdeploy/entrypoints/openai/serving_chat.py @@ -20,7 +20,7 @@ import time import traceback import uuid from typing import List, Optional - +import numpy as np import msgpack import aiozmq from aiozmq import zmq @@ -75,6 +75,8 @@ class OpenAIServingChat: current_req_dict = request.to_dict_for_infer(request_id) current_req_dict["arrival_time"] = time.time() prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict) + if isinstance(prompt_token_ids, np.ndarray): + prompt_token_ids = prompt_token_ids.tolist() except Exception as e: return ErrorResponse(code=400, message=str(e)) diff --git a/fastdeploy/entrypoints/openai/serving_completion.py b/fastdeploy/entrypoints/openai/serving_completion.py index acefc3d17..682d4231b 100644 --- a/fastdeploy/entrypoints/openai/serving_completion.py +++ b/fastdeploy/entrypoints/openai/serving_completion.py @@ -18,6 +18,7 @@ import asyncio import aiozmq import json import msgpack +import numpy as np from aiozmq import zmq from asyncio import FIRST_COMPLETED, AbstractEventLoop, Task import time @@ -105,9 +106,10 @@ class OpenAIServingCompletion: current_req_dict = request.to_dict_for_infer(request_id_idx, prompt) try: current_req_dict["arrival_time"] = time.time() - prompt_batched_token_ids.append( - self.engine_client.format_and_add_data(current_req_dict) - ) + prompt_token_ids = self.engine_client.format_and_add_data(current_req_dict) + if isinstance(prompt_token_ids, np.ndarray): + prompt_token_ids = prompt_token_ids.tolist() + prompt_batched_token_ids.append(prompt_token_ids) except Exception as e: return ErrorResponse(message=str(e), code=400)