From ec499a010475090dcf9147d25c4aa58aeadf49e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9D=8E=E6=B3=B3=E6=A1=A6?= <39643373+liyonghua0910@users.noreply.github.com> Date: Tue, 21 Oct 2025 10:43:33 +0800 Subject: [PATCH] [Cherry-pick] fix requests & block metrics (#4500) * [fix] fix requests & block metrics * [chore] rename variables --- .../cache_manager/prefix_cache_manager.py | 7 +++++++ fastdeploy/engine/resource_manager.py | 4 ++-- .../engine/sched/resource_manager_v1.py | 20 +++++++++---------- fastdeploy/metrics/metrics.py | 2 +- fastdeploy/output/token_processor.py | 7 +++++-- 5 files changed, 25 insertions(+), 15 deletions(-) diff --git a/fastdeploy/cache_manager/prefix_cache_manager.py b/fastdeploy/cache_manager/prefix_cache_manager.py index a0b110bde..a2dc1ce34 100644 --- a/fastdeploy/cache_manager/prefix_cache_manager.py +++ b/fastdeploy/cache_manager/prefix_cache_manager.py @@ -111,6 +111,11 @@ class PrefixCacheManager: + f"{self.num_cpu_blocks}, bytes_per_layer_per_block {self.cache_config.bytes_per_layer_per_block}" ) + main_process_metrics.max_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.available_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.free_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.available_gpu_resource.set(1.0) + @property def available_gpu_resource(self): return len(self.gpu_free_block_list) / self.num_gpu_blocks if self.num_gpu_blocks > 0 else 0.0 @@ -235,6 +240,8 @@ class PrefixCacheManager: self.node_id_pool = list(range(self.num_gpu_blocks + self.num_cpu_blocks)) main_process_metrics.max_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.available_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.free_gpu_block_num.set(self.num_gpu_blocks) main_process_metrics.available_gpu_resource.set(1.0) def _enable_cpu_cache(self): diff --git a/fastdeploy/engine/resource_manager.py b/fastdeploy/engine/resource_manager.py index ef6190fc1..dc0f5a501 100644 --- a/fastdeploy/engine/resource_manager.py +++ b/fastdeploy/engine/resource_manager.py @@ -311,8 +311,8 @@ class ResourceManager: break # record batch size here - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) - main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num) + num_blocks_used_by_tasks = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) + main_process_metrics.available_gpu_block_num.set(self.total_block_number() - num_blocks_used_by_tasks) main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) diff --git a/fastdeploy/engine/sched/resource_manager_v1.py b/fastdeploy/engine/sched/resource_manager_v1.py index 57bb61d90..f5bdcceb0 100644 --- a/fastdeploy/engine/sched/resource_manager_v1.py +++ b/fastdeploy/engine/sched/resource_manager_v1.py @@ -126,8 +126,6 @@ class ResourceManagerV1(ResourceManager): self.to_be_rescheduled_request_id_set.add(preempted_req.request_id) preempted_reqs.append(preempted_req) scheduled_reqs.append(self._prepare_preempt_task(preempted_req)) - main_process_metrics.num_requests_waiting.inc(1) - main_process_metrics.num_requests_running.dec(1) if preempted_req == request: # No more request to preempt. can_schedule = False @@ -384,8 +382,6 @@ class ResourceManagerV1(ResourceManager): request, self.config.cache_config.block_size, request.num_computed_tokens ) request.status = RequestStatus.RUNNING - main_process_metrics.num_requests_waiting.dec(1) - main_process_metrics.num_requests_running.inc(1) allocated_position = self.get_available_position() request.idx = allocated_position self.tasks_list[allocated_position] = request @@ -429,8 +425,6 @@ class ResourceManagerV1(ResourceManager): request, self.config.cache_config.block_size, request.num_computed_tokens ) request.status = RequestStatus.RUNNING - main_process_metrics.num_requests_waiting.dec(1) - main_process_metrics.num_requests_running.inc(1) else: if self.config.cache_config.enable_prefix_caching: self._free_blocks(request) @@ -438,11 +432,17 @@ class ResourceManagerV1(ResourceManager): else: llm_logger.error("Unknown request status type") if scheduled_reqs: - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) - main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num) - main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) - main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) llm_logger.debug(f"schedued_reqs: {scheduled_reqs}") + + # Update metrics + num_tasks = sum([1 if task else 0 for task in self.tasks_list]) + num_blocks_used_by_tasks = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) + main_process_metrics.available_gpu_block_num.set(self.total_block_number() - num_blocks_used_by_tasks) + main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) + main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) + main_process_metrics.num_requests_running.set(len(self.running)) + main_process_metrics.num_requests_waiting.set(num_tasks - len(self.running)) + return scheduled_reqs def get_available_position(self) -> int: diff --git a/fastdeploy/metrics/metrics.py b/fastdeploy/metrics/metrics.py index ca8b6b391..c1982e0a8 100644 --- a/fastdeploy/metrics/metrics.py +++ b/fastdeploy/metrics/metrics.py @@ -311,7 +311,7 @@ class MetricsManager: "available_gpu_block_num": { "type": Gauge, "name": "fastdeploy:available_gpu_block_num", - "description": "Number of available gpu blocks in cache, including prefix caching blocks that are not officially released", + "description": "Number of available gpu blocks in cache, including blocks in LRU list", "kwargs": {}, }, "free_gpu_block_num": { diff --git a/fastdeploy/output/token_processor.py b/fastdeploy/output/token_processor.py index 6fb7da882..9383ab3e0 100644 --- a/fastdeploy/output/token_processor.py +++ b/fastdeploy/output/token_processor.py @@ -297,9 +297,12 @@ class TokenProcessor: self.resource_manager.tasks_list[index] = None self.resource_manager._recycle_block_tables(task) - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.resource_manager.tasks_list]) + # Update block metrics + num_blocks_used_by_tasks = sum( + [len(task.block_tables) if task else 0 for task in self.resource_manager.tasks_list] + ) main_process_metrics.available_gpu_block_num.set( - self.resource_manager.total_block_number() - task_used_block_num + self.resource_manager.total_block_number() - num_blocks_used_by_tasks ) main_process_metrics.batch_size.set( self.resource_manager.max_num_seqs - self.resource_manager.available_batch()