mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
[LLM] fix multinode bugs (#2945)
* [LLM] fix multinode bugs * [LLM] fix multinode bugs * [LLM] fix multinode bugs * [LLM] fix ci bugs * fix ci bugs * fix ci bugs
This commit is contained in:
@@ -100,16 +100,17 @@ class GpuWorker(WorkerBase):
|
||||
# 1. Record memory state before profile run
|
||||
start_time = time.perf_counter()
|
||||
Gb = 1024**3
|
||||
paddle.device.cuda.reset_max_memory_reserved(self.local_rank)
|
||||
paddle.device.cuda.reset_max_memory_allocated(self.local_rank)
|
||||
local_rank = self.local_rank % self.max_chips_per_node
|
||||
paddle.device.cuda.reset_max_memory_reserved(local_rank)
|
||||
paddle.device.cuda.reset_max_memory_allocated(local_rank)
|
||||
paddle_reserved_mem_before_run = paddle.device.cuda.max_memory_reserved(
|
||||
self.local_rank)
|
||||
local_rank)
|
||||
paddle_allocated_mem_before_run = paddle.device.cuda.max_memory_allocated(
|
||||
self.local_rank) # not reserved
|
||||
local_rank) # not reserved
|
||||
|
||||
pynvml.nvmlInit()
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(
|
||||
int(self.device_ids[self.local_rank]))
|
||||
int(self.device_ids[local_rank]))
|
||||
before_run_meminfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
|
||||
|
||||
logger.info((
|
||||
@@ -126,9 +127,9 @@ class GpuWorker(WorkerBase):
|
||||
|
||||
# 3. Statistical memory information
|
||||
paddle_reserved_mem_after_run = paddle.device.cuda.max_memory_reserved(
|
||||
self.local_rank)
|
||||
local_rank)
|
||||
paddle_allocated_mem_after_run = paddle.device.cuda.max_memory_allocated(
|
||||
self.local_rank)
|
||||
local_rank)
|
||||
|
||||
model_block_memory_used = self.cal_theortical_kvcache()
|
||||
paddle_peak_increase = paddle_reserved_mem_after_run - paddle_allocated_mem_before_run
|
||||
|
Reference in New Issue
Block a user