[CUDAGraph]CUDA Graph support unique memory pool (#4230)

* cuda graph use unique memory pool

* fix custom device import bug

* refine code

* refine code

* refine code
This commit is contained in:
RAM
2025-09-24 19:45:22 +08:00
committed by GitHub
parent 5ff10c8ced
commit 870364b547
2 changed files with 13 additions and 1 deletions

View File

@@ -96,6 +96,13 @@ class CudaGraphPiecewiseBackend:
self.cudagraph_capture_sizes = fd_config.graph_opt_config.cudagraph_capture_sizes
self.warm_up_size = fd_config.graph_opt_config.cudagraph_num_of_warmups
self.real_shape_to_captured_size = fd_config.graph_opt_config.real_shape_to_captured_size
self.unique_memory_pool_id = None
if self.fd_config.graph_opt_config.use_unique_memory_pool:
# TODO(gongshaotian): Optimize code
if paddle.is_compiled_with_cuda():
from paddle.base.core import CUDAGraph
self.unique_memory_pool_id = CUDAGraph.gen_new_memory_pool_id()
self._create_entry_dict()
@@ -169,7 +176,7 @@ class CudaGraphPiecewiseBackend:
input_addresses = [x.data_ptr() for (_, x) in kwargs.items() if isinstance(x, paddle.Tensor)]
entry.input_addresses = input_addresses
new_grpah = graphs.CUDAGraph()
new_grpah = graphs.CUDAGraph(pool_id=self.unique_memory_pool_id)
paddle.device.synchronize()
# Capture