skip DtoH capture (#4988)

This commit is contained in:
Sunny-bot1
2025-11-13 10:57:44 +08:00
committed by GitHub
parent 8329338d37
commit 5b24013d46

View File

@@ -17,6 +17,7 @@
#ifndef PADDLE_WITH_CUSTOM_DEVICE_METAX_GPU
#include "paddle/phi/core/memory/memcpy.h"
#endif
#include "paddle/phi/backends/gpu/cuda/cuda_graph_with_memory_pool.h"
#include "utils.cuh"
template <int THREADBLOCK_SIZE>
@@ -287,9 +288,12 @@ void GetBlockShapeAndSplitKVBlock(
seq_lens_encoder.data<int>(),
max_len_tensor_gpu.data<int>(),
bsz);
max_len_tensor_cpu.copy_(
max_len_tensor_gpu, max_len_tensor_cpu.place(), false);
// Note (sunxin): Skip capturing the DtoH copy (it's time-consuming); CPU data
// is only for branching in attention.
if (!phi::backends::gpu::IsCUDAGraphCapturing()) {
max_len_tensor_cpu.copy_(
max_len_tensor_gpu, max_len_tensor_cpu.place(), false);
}
auto max_len_cpu_ptr = max_len_tensor_cpu.data<int>();
int max_len_this_time = max_len_cpu_ptr[0];
@@ -398,9 +402,12 @@ void GetBlockShapeAndSplitKVBlock(
bsz,
decoder_block_shape_q,
group_size);
decoder_num_blocks_cpu.copy_(
decoder_num_blocks_device, decoder_num_blocks_cpu.place(), false);
// Note (sunxin): Skip capturing the DtoH copy (it's time-consuming); CPU
// data is only for branching in attention.
if (!phi::backends::gpu::IsCUDAGraphCapturing()) {
decoder_num_blocks_cpu.copy_(
decoder_num_blocks_device, decoder_num_blocks_cpu.place(), false);
}
PADDLE_ENFORCE_GPU_SUCCESS(cudaMemsetAsync(
decoder_chunk_size_device.data<int>(), 64, sizeof(int32_t), stream));
}