mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
[NewFeature]Support dp multi api server && Fix some bug in mixed ep && merge develop (#3598)
* [Feature] update ep * fix ci * fix ci * fix ci * fix ci * fix ci * fix ci * fix ci * fix queue ports idx * fix ci * fix ci * fix ci * fix ci * fix ci * fix ci * fix ci * fix ci * Update engine.py * fix ci * fix some bug in mixed ep * add server fix and op fix * rm some log * fix code style * ltd fix * fix * fix * fix some bug * fix bug * fix bug * fix style * Update config.py * Update splitwise_connector.py * Update cache_messager.py * Update __init__.py * merge and fix * Update engine.py * Update common_engine.py * Update run_ci_xpu.sh * Update ernie_processor.py * Update ernie_processor.py --------- Co-authored-by: ltd0924 <ltd0924@sina.com> Co-authored-by: ltd0924 <32387785+ltd0924@users.noreply.github.com>
This commit is contained in:
@@ -37,7 +37,7 @@ from fastdeploy.entrypoints.openai.protocol import (
|
||||
UsageInfo,
|
||||
)
|
||||
from fastdeploy.metrics.work_metrics import work_process_metrics
|
||||
from fastdeploy.utils import api_server_logger, get_host_ip
|
||||
from fastdeploy.utils import api_server_logger
|
||||
from fastdeploy.worker.output import LogprobsLists
|
||||
|
||||
|
||||
@@ -50,15 +50,16 @@ class OpenAIServingChat:
|
||||
self.engine_client = engine_client
|
||||
self.models = models
|
||||
self.pid = pid
|
||||
self.master_ip = ips
|
||||
self.max_waiting_time = max_waiting_time
|
||||
self.host_ip = get_host_ip()
|
||||
self.chat_template = chat_template
|
||||
if self.master_ip is not None:
|
||||
if isinstance(self.master_ip, list):
|
||||
self.master_ip = self.master_ip[0]
|
||||
if ips is not None:
|
||||
if isinstance(ips, list):
|
||||
self.master_ip = ips[0]
|
||||
else:
|
||||
self.master_ip = self.master_ip.split(",")[0]
|
||||
self.master_ip = ips.split(",")[0]
|
||||
else:
|
||||
self.master_ip = "0.0.0.0"
|
||||
api_server_logger.info(f"master ip: {self.master_ip}")
|
||||
|
||||
async def _ensure_connection_manager(self):
|
||||
"""ensure connection manager initialized"""
|
||||
@@ -67,19 +68,16 @@ class OpenAIServingChat:
|
||||
self.engine_client.connection_initialized = True
|
||||
|
||||
def _check_master(self):
|
||||
if self.master_ip is None:
|
||||
return True
|
||||
if self.host_ip == self.master_ip:
|
||||
return True
|
||||
return False
|
||||
return self.engine_client.is_master
|
||||
|
||||
async def create_chat_completion(self, request: ChatCompletionRequest):
|
||||
"""
|
||||
Create a new chat completion using the specified parameters.
|
||||
"""
|
||||
|
||||
if not self._check_master():
|
||||
err_msg = f"Only master node can accept completion request, please send request to master node: {self.pod_ips[0]}"
|
||||
err_msg = (
|
||||
f"Only master node can accept completion request, please send request to master node: {self.master_ip}"
|
||||
)
|
||||
api_server_logger.error(err_msg)
|
||||
return ErrorResponse(message=err_msg, code=400)
|
||||
|
||||
@@ -117,7 +115,6 @@ class OpenAIServingChat:
|
||||
api_server_logger.error(error_msg)
|
||||
self.engine_client.semaphore.release()
|
||||
return ErrorResponse(code=400, message=error_msg)
|
||||
|
||||
del current_req_dict
|
||||
|
||||
if request.stream:
|
||||
@@ -193,6 +190,7 @@ class OpenAIServingChat:
|
||||
choices=[],
|
||||
model=model_name,
|
||||
)
|
||||
api_server_logger.info(f"create chat completion request: {request_id}")
|
||||
|
||||
try:
|
||||
await self._ensure_connection_manager()
|
||||
@@ -388,7 +386,6 @@ class OpenAIServingChat:
|
||||
enable_thinking = request.metadata.get("enable_thinking") if request.metadata else None
|
||||
|
||||
include_stop_str_in_output = request.include_stop_str_in_output
|
||||
|
||||
try:
|
||||
await self._ensure_connection_manager()
|
||||
dealer, response_queue = await self.engine_client.connection_manager.get_connection(request_id)
|
||||
|
Reference in New Issue
Block a user