diff --git a/fastdeploy/cache_manager/prefix_cache_manager.py b/fastdeploy/cache_manager/prefix_cache_manager.py index 76ee92bb0..50801d83e 100644 --- a/fastdeploy/cache_manager/prefix_cache_manager.py +++ b/fastdeploy/cache_manager/prefix_cache_manager.py @@ -115,6 +115,7 @@ class PrefixCacheManager: main_process_metrics.max_gpu_block_num.set(self.num_gpu_blocks) main_process_metrics.available_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.free_gpu_block_num.set(self.num_gpu_blocks) main_process_metrics.available_gpu_resource.set(1.0) @property @@ -358,6 +359,7 @@ class PrefixCacheManager: main_process_metrics.max_gpu_block_num.set(self.num_gpu_blocks) main_process_metrics.available_gpu_block_num.set(self.num_gpu_blocks) + main_process_metrics.free_gpu_block_num.set(self.num_gpu_blocks) main_process_metrics.available_gpu_resource.set(1.0) def can_allocate_gpu_blocks(self, num_blocks: int): diff --git a/fastdeploy/engine/resource_manager.py b/fastdeploy/engine/resource_manager.py index 39f6a80e4..7c72d9d88 100644 --- a/fastdeploy/engine/resource_manager.py +++ b/fastdeploy/engine/resource_manager.py @@ -311,8 +311,8 @@ class ResourceManager: break # record batch size here - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) - main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num) + num_blocks_used_by_tasks = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) + main_process_metrics.available_gpu_block_num.set(self.total_block_number() - num_blocks_used_by_tasks) main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) llm_logger.info( diff --git a/fastdeploy/engine/sched/resource_manager_v1.py b/fastdeploy/engine/sched/resource_manager_v1.py index 20e908817..66993a064 100644 --- a/fastdeploy/engine/sched/resource_manager_v1.py +++ b/fastdeploy/engine/sched/resource_manager_v1.py @@ -630,8 +630,6 @@ class ResourceManagerV1(ResourceManager): request, self.config.cache_config.block_size, request.num_computed_tokens ) request.status = RequestStatus.RUNNING - main_process_metrics.num_requests_waiting.dec(1) - main_process_metrics.num_requests_running.inc(1) else: if self.config.cache_config.enable_prefix_caching: self._free_blocks(request) @@ -640,11 +638,17 @@ class ResourceManagerV1(ResourceManager): llm_logger.error("Unknown request status type") if scheduled_reqs: - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) - main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num) - main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) - main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) llm_logger.debug(f"schedued_reqs: {scheduled_reqs}") + + # Update metrics + num_tasks = sum([1 if task else 0 for task in self.tasks_list]) + num_blocks_used_by_tasks = sum([len(task.block_tables) if task else 0 for task in self.tasks_list]) + main_process_metrics.available_gpu_block_num.set(self.total_block_number() - num_blocks_used_by_tasks) + main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch()) + main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc()) + main_process_metrics.num_requests_running.set(len(self.running)) + main_process_metrics.num_requests_waiting.set(num_tasks - len(self.running)) + return scheduled_reqs def get_available_position(self) -> int: diff --git a/fastdeploy/metrics/metrics.py b/fastdeploy/metrics/metrics.py index c6a6bbba7..a499c3237 100644 --- a/fastdeploy/metrics/metrics.py +++ b/fastdeploy/metrics/metrics.py @@ -316,7 +316,7 @@ class MetricsManager: "available_gpu_block_num": { "type": Gauge, "name": "fastdeploy:available_gpu_block_num", - "description": "Number of available gpu blocks in cache, including prefix caching blocks that are not officially released", + "description": "Number of available gpu blocks in cache, including blocks in LRU list", "kwargs": {}, }, "free_gpu_block_num": { diff --git a/fastdeploy/output/token_processor.py b/fastdeploy/output/token_processor.py index 4e16e3d0a..7d87033e8 100644 --- a/fastdeploy/output/token_processor.py +++ b/fastdeploy/output/token_processor.py @@ -421,9 +421,12 @@ class TokenProcessor: if task_id in self.resource_manager.req_dict: del self.resource_manager.req_dict[task_id] - task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.resource_manager.tasks_list]) + # Update block metrics + num_blocks_used_by_tasks = sum( + [len(task.block_tables) if task else 0 for task in self.resource_manager.tasks_list] + ) main_process_metrics.available_gpu_block_num.set( - self.resource_manager.total_block_number() - task_used_block_num + self.resource_manager.total_block_number() - num_blocks_used_by_tasks ) main_process_metrics.batch_size.set( self.resource_manager.max_num_seqs - self.resource_manager.available_batch()