mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
[XPU] bind some OPs for VL model with pybind (#4522)
This commit is contained in:
@@ -15,93 +15,97 @@
|
||||
#include "helper.h"
|
||||
|
||||
template <int THREADBLOCK_SIZE>
|
||||
__global__ void update_inputs_kernel(bool *not_need_stop,
|
||||
int *seq_lens_this_time,
|
||||
int *seq_lens_encoder,
|
||||
int *seq_lens_decoder,
|
||||
int64_t *input_ids,
|
||||
const int64_t *stop_nums,
|
||||
const bool *stop_flags,
|
||||
const bool *is_block_step,
|
||||
const int64_t *next_tokens,
|
||||
__global__ void update_inputs_kernel(bool* not_need_stop,
|
||||
int* seq_lens_this_time,
|
||||
int* seq_lens_encoder,
|
||||
int* seq_lens_decoder,
|
||||
int64_t* input_ids,
|
||||
const int64_t* stop_nums,
|
||||
const bool* stop_flags,
|
||||
const bool* is_block_step,
|
||||
const int64_t* next_tokens,
|
||||
const int bsz,
|
||||
const int max_bsz,
|
||||
const int input_ids_stride) {
|
||||
int thread_idx = threadIdx.x;
|
||||
typedef cub::BlockReduce<int64_t, THREADBLOCK_SIZE> BlockReduce;
|
||||
__shared__ typename BlockReduce::TempStorage temp_storage;
|
||||
int thread_idx = threadIdx.x;
|
||||
typedef cub::BlockReduce<int64_t, THREADBLOCK_SIZE> BlockReduce;
|
||||
__shared__ typename BlockReduce::TempStorage temp_storage;
|
||||
|
||||
bool stop_flag_now = false;
|
||||
int64_t stop_flag_now_int = 0;
|
||||
if (thread_idx < max_bsz) {
|
||||
if (thread_idx < bsz) {
|
||||
stop_flag_now = stop_flags[thread_idx];
|
||||
if (is_block_step[thread_idx]) {
|
||||
stop_flag_now_int = 0;
|
||||
} else {
|
||||
stop_flag_now_int = static_cast<int64_t>(stop_flag_now);
|
||||
}
|
||||
} else {
|
||||
stop_flag_now_int = 1;
|
||||
}
|
||||
}
|
||||
bool stop_flag_now = false;
|
||||
int64_t stop_flag_now_int = 0;
|
||||
if (thread_idx < max_bsz) {
|
||||
if (thread_idx < bsz) {
|
||||
const int seq_len_this_time = seq_lens_this_time[thread_idx];
|
||||
const int seq_len_encoder = seq_lens_encoder[thread_idx];
|
||||
const int seq_len_decoder = seq_lens_decoder[thread_idx];
|
||||
|
||||
seq_lens_decoder[thread_idx] = stop_flag_now ?
|
||||
0 : (seq_len_encoder > 0 ?
|
||||
(seq_len_encoder + seq_len_decoder) : seq_len_decoder + 1);
|
||||
|
||||
seq_lens_this_time[thread_idx] = stop_flag_now ? 0 : 1;
|
||||
seq_lens_encoder[thread_idx] = 0;
|
||||
int64_t *input_ids_now = input_ids + thread_idx * input_ids_stride;
|
||||
input_ids_now[0] = next_tokens[thread_idx];
|
||||
}
|
||||
__syncthreads();
|
||||
int64_t stop_sum = BlockReduce(temp_storage).Sum(stop_flag_now_int);
|
||||
if (thread_idx == 0) {
|
||||
not_need_stop[0] = stop_sum < stop_nums[0];
|
||||
stop_flag_now = stop_flags[thread_idx];
|
||||
if (is_block_step[thread_idx]) {
|
||||
stop_flag_now_int = 0;
|
||||
} else {
|
||||
stop_flag_now_int = static_cast<int64_t>(stop_flag_now);
|
||||
}
|
||||
} else {
|
||||
stop_flag_now_int = 1;
|
||||
}
|
||||
}
|
||||
if (thread_idx < bsz) {
|
||||
const int seq_len_this_time = seq_lens_this_time[thread_idx];
|
||||
const int seq_len_encoder = seq_lens_encoder[thread_idx];
|
||||
const int seq_len_decoder = seq_lens_decoder[thread_idx];
|
||||
|
||||
seq_lens_decoder[thread_idx] =
|
||||
stop_flag_now
|
||||
? 0
|
||||
: (seq_len_encoder > 0 ? (seq_len_encoder + seq_len_decoder)
|
||||
: seq_len_decoder + 1);
|
||||
|
||||
seq_lens_this_time[thread_idx] = stop_flag_now ? 0 : 1;
|
||||
seq_lens_encoder[thread_idx] = 0;
|
||||
int64_t* input_ids_now = input_ids + thread_idx * input_ids_stride;
|
||||
input_ids_now[0] = next_tokens[thread_idx];
|
||||
}
|
||||
__syncthreads();
|
||||
int64_t stop_sum = BlockReduce(temp_storage).Sum(stop_flag_now_int);
|
||||
if (thread_idx == 0) {
|
||||
not_need_stop[0] = stop_sum < stop_nums[0];
|
||||
}
|
||||
}
|
||||
|
||||
void UpdateInputes(const paddle::Tensor &stop_flags,
|
||||
const paddle::Tensor ¬_need_stop, // only on cpu
|
||||
const paddle::Tensor &seq_lens_this_time,
|
||||
const paddle::Tensor &seq_lens_encoder,
|
||||
const paddle::Tensor &seq_lens_decoder,
|
||||
const paddle::Tensor &input_ids,
|
||||
const paddle::Tensor &stop_nums,
|
||||
const paddle::Tensor &next_tokens,
|
||||
const paddle::Tensor &is_block_step) {
|
||||
void UpdateInputs(const paddle::Tensor& stop_flags,
|
||||
const paddle::Tensor& not_need_stop, // only on cpu
|
||||
const paddle::Tensor& seq_lens_this_time,
|
||||
const paddle::Tensor& seq_lens_encoder,
|
||||
const paddle::Tensor& seq_lens_decoder,
|
||||
const paddle::Tensor& input_ids,
|
||||
const paddle::Tensor& stop_nums,
|
||||
const paddle::Tensor& next_tokens,
|
||||
const paddle::Tensor& is_block_step) {
|
||||
#ifdef PADDLE_WITH_CUSTOM_DEVICE
|
||||
auto dev_ctx = static_cast<const phi::CustomContext*>(paddle::experimental::DeviceContextPool::Instance().Get(input_ids.place()));
|
||||
auto cu_stream = dev_ctx->stream();
|
||||
auto dev_ctx = static_cast<const phi::CustomContext*>(
|
||||
paddle::experimental::DeviceContextPool::Instance().Get(
|
||||
input_ids.place()));
|
||||
auto cu_stream = dev_ctx->stream();
|
||||
#else
|
||||
auto cu_stream = input_ids.stream();
|
||||
auto cu_stream = input_ids.stream();
|
||||
#endif
|
||||
const int max_bsz = stop_flags.shape()[0];
|
||||
const int now_bsz = seq_lens_this_time.shape()[0];
|
||||
const int input_ids_stride = input_ids.shape()[1];
|
||||
auto not_need_stop_gpu = not_need_stop.copy_to(stop_flags.place(), false);
|
||||
update_inputs_kernel<1024><<<1, 1024, 0, cu_stream>>>(
|
||||
const_cast<bool *>(not_need_stop_gpu.data<bool>()),
|
||||
const_cast<int *>(seq_lens_this_time.data<int>()),
|
||||
const_cast<int *>(seq_lens_encoder.data<int>()),
|
||||
const_cast<int *>(seq_lens_decoder.data<int>()),
|
||||
const_cast<int64_t *>(input_ids.data<int64_t>()),
|
||||
stop_nums.data<int64_t>(),
|
||||
stop_flags.data<bool>(),
|
||||
is_block_step.data<bool>(),
|
||||
next_tokens.data<int64_t>(),
|
||||
now_bsz,
|
||||
max_bsz,
|
||||
input_ids_stride);
|
||||
auto not_need_stop_cpu =
|
||||
not_need_stop_gpu.copy_to(not_need_stop.place(), false);
|
||||
bool *not_need_stop_data = const_cast<bool *>(not_need_stop.data<bool>());
|
||||
not_need_stop_data[0] = not_need_stop_cpu.data<bool>()[0];
|
||||
const int max_bsz = stop_flags.shape()[0];
|
||||
const int now_bsz = seq_lens_this_time.shape()[0];
|
||||
const int input_ids_stride = input_ids.shape()[1];
|
||||
auto not_need_stop_gpu = not_need_stop.copy_to(stop_flags.place(), false);
|
||||
update_inputs_kernel<1024><<<1, 1024, 0, cu_stream>>>(
|
||||
const_cast<bool*>(not_need_stop_gpu.data<bool>()),
|
||||
const_cast<int*>(seq_lens_this_time.data<int>()),
|
||||
const_cast<int*>(seq_lens_encoder.data<int>()),
|
||||
const_cast<int*>(seq_lens_decoder.data<int>()),
|
||||
const_cast<int64_t*>(input_ids.data<int64_t>()),
|
||||
stop_nums.data<int64_t>(),
|
||||
stop_flags.data<bool>(),
|
||||
is_block_step.data<bool>(),
|
||||
next_tokens.data<int64_t>(),
|
||||
now_bsz,
|
||||
max_bsz,
|
||||
input_ids_stride);
|
||||
auto not_need_stop_cpu =
|
||||
not_need_stop_gpu.copy_to(not_need_stop.place(), false);
|
||||
bool* not_need_stop_data = const_cast<bool*>(not_need_stop.data<bool>());
|
||||
not_need_stop_data[0] = not_need_stop_cpu.data<bool>()[0];
|
||||
}
|
||||
|
||||
PD_BUILD_STATIC_OP(update_inputs)
|
||||
@@ -124,4 +128,4 @@ PD_BUILD_STATIC_OP(update_inputs)
|
||||
{"seq_lens_encoder", "seq_lens_encoder_out"},
|
||||
{"seq_lens_decoder", "seq_lens_decoder_out"},
|
||||
{"input_ids", "input_ids_out"}})
|
||||
.SetKernelFn(PD_KERNEL(UpdateInputes));
|
||||
.SetKernelFn(PD_KERNEL(UpdateInputs));
|
||||
|
||||
Reference in New Issue
Block a user