mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-07 01:22:59 +08:00
[Bug fix] Fix perf in mixed deployment with yiyan adpater (#3703)
Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
This commit is contained in:
@@ -360,6 +360,7 @@ class TokenProcessor:
|
|||||||
metrics = RequestMetrics(
|
metrics = RequestMetrics(
|
||||||
arrival_time=task.arrival_time,
|
arrival_time=task.arrival_time,
|
||||||
inference_start_time=task.inference_start_time,
|
inference_start_time=task.inference_start_time,
|
||||||
|
model_execute_time=time.time() - task.inference_start_time,
|
||||||
first_token_time=time.time() - task.inference_start_time,
|
first_token_time=time.time() - task.inference_start_time,
|
||||||
time_in_queue=task.schedule_start_time - task.preprocess_end_time,
|
time_in_queue=task.schedule_start_time - task.preprocess_end_time,
|
||||||
preprocess_cost_time=task.preprocess_end_time - task.preprocess_start_time,
|
preprocess_cost_time=task.preprocess_end_time - task.preprocess_start_time,
|
||||||
@@ -503,6 +504,7 @@ class TokenProcessor:
|
|||||||
metrics = RequestMetrics(
|
metrics = RequestMetrics(
|
||||||
arrival_time=task.arrival_time,
|
arrival_time=task.arrival_time,
|
||||||
inference_start_time=task.inference_start_time,
|
inference_start_time=task.inference_start_time,
|
||||||
|
model_execute_time=time.time() - task.inference_start_time,
|
||||||
first_token_time=time.time() - task.inference_start_time,
|
first_token_time=time.time() - task.inference_start_time,
|
||||||
time_in_queue=task.schedule_start_time - task.preprocess_end_time,
|
time_in_queue=task.schedule_start_time - task.preprocess_end_time,
|
||||||
preprocess_cost_time=task.preprocess_end_time - task.preprocess_start_time,
|
preprocess_cost_time=task.preprocess_end_time - task.preprocess_start_time,
|
||||||
@@ -514,6 +516,7 @@ class TokenProcessor:
|
|||||||
else:
|
else:
|
||||||
metrics = RequestMetrics(
|
metrics = RequestMetrics(
|
||||||
arrival_time=time.time(),
|
arrival_time=time.time(),
|
||||||
|
model_execute_time=time.time() - task.inference_start_time,
|
||||||
request_start_time=task.arrival_time,
|
request_start_time=task.arrival_time,
|
||||||
)
|
)
|
||||||
self.number_of_output_tokens += len(token_ids)
|
self.number_of_output_tokens += len(token_ids)
|
||||||
|
@@ -209,7 +209,7 @@ class LocalScheduler:
|
|||||||
return (token_num + block_size - 1) // block_size
|
return (token_num + block_size - 1) // block_size
|
||||||
|
|
||||||
def get_unhandled_request_num(self):
|
def get_unhandled_request_num(self):
|
||||||
return len(self.requests)
|
return len(self.ids) - self.ids_read_cursor
|
||||||
|
|
||||||
def get_requests(
|
def get_requests(
|
||||||
self,
|
self,
|
||||||
|
Reference in New Issue
Block a user