[CUDAGraph]Switch the scope so that output buffer of CUDAGraph can automatically release (#3612)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled
Publish Job / publish_pre_check (push) Has been cancelled
Publish Job / print_publish_pre_check_outputs (push) Has been cancelled
Publish Job / FD-Clone-Linux (push) Has been cancelled
Publish Job / Show Code Archive Output (push) Has been cancelled
Publish Job / BUILD_SM8090 (push) Has been cancelled
Publish Job / BUILD_SM8689 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8090 (push) Has been cancelled
Publish Job / PADDLE_PYPI_UPLOAD_8689 (push) Has been cancelled
Publish Job / Run FastDeploy Unit Tests and Coverage (push) Has been cancelled
Publish Job / Run FastDeploy LogProb Tests (push) Has been cancelled
Publish Job / Extracted partial CE model tasks to run in CI. (push) Has been cancelled
Publish Job / Run Base Tests (push) Has been cancelled
Publish Job / Run Accuracy Tests (push) Has been cancelled

* fix typo

* fix typo

* add print dot files

* fix bug

* Switch the scope so that output buffer of cudagraph can automatically release

* Revert "add print dot files"

This reverts commit dc21809eb5.
This commit is contained in:
RAM
2025-08-26 21:28:19 +08:00
committed by GitHub
parent 82e64b13e1
commit f0a362af18
3 changed files with 34 additions and 17 deletions

View File

@@ -153,7 +153,7 @@ class TestCUDAGrpahSubgraph(unittest.TestCase):
graph_opt_config = GraphOptimizationConfig(args={}) graph_opt_config = GraphOptimizationConfig(args={})
graph_opt_config.use_cudagraph = True graph_opt_config.use_cudagraph = True
parallel_config = ParallelConfig(args={}) parallel_config = ParallelConfig(args={})
parallel_config.max_num_seqs = 1 parallel_config.max_num_seqs = 8
cache_config = CacheConfig({}) cache_config = CacheConfig({})
# Initialize cuda graph capture list # Initialize cuda graph capture list
graph_opt_config._set_cudagraph_sizes(max_num_seqs=parallel_config.max_num_seqs) graph_opt_config._set_cudagraph_sizes(max_num_seqs=parallel_config.max_num_seqs)
@@ -167,7 +167,7 @@ class TestCUDAGrpahSubgraph(unittest.TestCase):
# Run Test Case1 # Run Test Case1
test_model1 = TestModel1(fd_config=fd_config) test_model1 = TestModel1(fd_config=fd_config)
input_tensor1 = paddle.ones([32768]) input_tensor1 = paddle.ones([8])
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True) forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
# Triger Capture # Triger Capture
@@ -180,7 +180,7 @@ class TestCUDAGrpahSubgraph(unittest.TestCase):
# Corrent output # Corrent output
output1_correct = test_model1.forward_correct(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output1_correct = test_model1.forward_correct(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
assert sum(output1 - output1_correct) == 0 assert (output1 == output1_correct).all()
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -98,33 +98,50 @@ class TestCUDAGrpahRecapture(unittest.TestCase):
) )
# Run Test Case1 # Run Test Case1
test_model1 = TestModel1(fd_config=fd_config) self.test_model1 = TestModel1(fd_config=fd_config)
input_tensor1 = paddle.ones([32768]) input_tensor1 = paddle.ones([1, 32768])
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True) forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
# Corrent output
self.output_correct = self.test_model1.forward_correct(
ids_remove_padding=input_tensor1, forward_meta=forward_meta1
)
# Capture and Destory
self.capture_and_replay(input_tensor1, forward_meta1)
self.recapture_and_replay(input_tensor1, forward_meta1)
def capture_and_replay(self, input_tensor1, forward_meta1):
""" """
# Triger Capture # Triger Capture
print_gpu_memory_use(0, "before capture") print_gpu_memory_use(0, "before capture")
_ = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output1 = self.test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
print_gpu_memory_use(0, "after capture") print_gpu_memory_use(0, "after capture")
# Reaplay # Reaplay
output1 = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output1 = self.test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
assert (output1 == self.output_correct).all()
# Destory # Destory
print_gpu_memory_use(0, "before destory") print_gpu_memory_use(0, "before destory")
test_model1.clear_grpah_opt_backend() self.test_model1.clear_grpah_opt_backend()
print_gpu_memory_use(0, "after destory") print_gpu_memory_use(0, "after destory")
def recapture_and_replay(self, input_tensor1, forward_meta1):
""" """
# Triger Capture # Triger Capture
print_gpu_memory_use(0, "before recapture") print_gpu_memory_use(0, "before recapture")
_ = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output2 = self.test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
print_gpu_memory_use(0, "after recapture") print_gpu_memory_use(0, "after recapture")
# Reaplay # Reaplay
output2 = test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output2 = self.test_model1(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
assert (output2 == self.output_correct).all()
# Corrent output # Destory
output1_correct = test_model1.forward_correct(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) print_gpu_memory_use(0, "before destory")
self.test_model1.clear_grpah_opt_backend()
assert sum(output1 - output2) == 0 print_gpu_memory_use(0, "after destory")
assert sum(output1_correct - output1) == 0
if __name__ == "__main__": if __name__ == "__main__":

View File

@@ -114,7 +114,7 @@ class TestCUDAGrpahSpecDecode(unittest.TestCase):
# Run Test Case1 # Run Test Case1
test_model1 = TestModel1(fd_config=fd_config) test_model1 = TestModel1(fd_config=fd_config)
input_tensor1 = paddle.ones([32768]) input_tensor1 = paddle.ones([1, 32768])
forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True) forward_meta1 = ForwardMeta(input_ids=input_tensor1, ids_remove_padding=input_tensor1, step_use_cudagraph=True)
# Triger Capture # Triger Capture
@@ -127,7 +127,7 @@ class TestCUDAGrpahSpecDecode(unittest.TestCase):
# Corrent output # Corrent output
output1_correct = test_model1.forward_correct(ids_remove_padding=input_tensor1, forward_meta=forward_meta1) output1_correct = test_model1.forward_correct(ids_remove_padding=input_tensor1, forward_meta=forward_meta1)
assert sum(output1 - output1_correct) == 0 assert (output1 == output1_correct).all()
if __name__ == "__main__": if __name__ == "__main__":