// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "helper.h" template __global__ void update_inputs_kernel_v1(bool *not_need_stop, int *seq_lens_this_time, int *seq_lens_encoder, int *seq_lens_decoder, int *step_seq_lens_decoder, int64_t *prompt_lens, int64_t *topk_ids, int64_t *input_ids, int *block_tables, const int64_t *stop_nums, bool *stop_flags, bool *is_block_step, const int64_t *next_tokens, const int bsz, const int max_bsz, const int input_ids_stride, const int block_num_per_seq, const int block_size) { int thread_idx = threadIdx.x; typedef cub::BlockReduce BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; bool stop_flag_now = false; int64_t stop_flag_now_int = 0; if (thread_idx < max_bsz) { if (thread_idx < bsz) { stop_flag_now = stop_flags[thread_idx]; stop_flag_now_int = static_cast(stop_flag_now); } else { stop_flag_now_int = 1; } } if (thread_idx < bsz) { if(stop_flag_now) { seq_lens_this_time[thread_idx] = 0; // stop at next step seq_lens_decoder[thread_idx] = 0; seq_lens_encoder[thread_idx] = 0; } else { if (seq_lens_this_time[thread_idx] + seq_lens_decoder[thread_idx] >= prompt_lens[thread_idx]) { // decoding seq_lens_decoder[thread_idx] += seq_lens_this_time[thread_idx]; seq_lens_this_time[thread_idx] = 1; seq_lens_encoder[thread_idx] = 0; int64_t *input_ids_now = input_ids + thread_idx * input_ids_stride; input_ids_now[0] = next_tokens[thread_idx]; // to judge whether block is not enough int *block_table_now = block_tables + thread_idx * block_num_per_seq; if (seq_lens_this_time[thread_idx] != 0 && block_table_now[seq_lens_decoder[thread_idx] / block_size] == -1) { // should be scheduled by server is_block_step[thread_idx] = true; seq_lens_this_time[thread_idx]= 0; stop_flags[thread_idx] = true; step_seq_lens_decoder[thread_idx] = seq_lens_decoder[thread_idx]; seq_lens_decoder[thread_idx] = 0; stop_flag_now_int = 1; } } else { stop_flags[thread_idx] = true; seq_lens_this_time[thread_idx] = 0; seq_lens_decoder[thread_idx] = 0; seq_lens_encoder[thread_idx] = 0; topk_ids[thread_idx] = -1; stop_flag_now_int = 1; } } } __syncthreads(); int64_t stop_sum = BlockReduce(temp_storage).Sum(stop_flag_now_int); if (thread_idx == 0) { not_need_stop[0] = stop_sum < stop_nums[0]; } } void UpdateInputesV1(const paddle::Tensor &stop_flags, const paddle::Tensor ¬_need_stop, // only on cpu const paddle::Tensor &seq_lens_this_time, const paddle::Tensor &seq_lens_encoder, const paddle::Tensor &seq_lens_decoder, const paddle::Tensor &step_seq_lens_decoder, const paddle::Tensor &prompt_lens, const paddle::Tensor &topk_ids, const paddle::Tensor &input_ids, const paddle::Tensor &block_tables, const paddle::Tensor &stop_nums, const paddle::Tensor &next_tokens, const paddle::Tensor &is_block_step, const int block_size) { #ifdef PADDLE_WITH_CUSTOM_DEVICE auto dev_ctx = static_cast(paddle::experimental::DeviceContextPool::Instance().Get(input_ids.place())); auto cu_stream = dev_ctx->stream(); #else auto cu_stream = input_ids.stream(); #endif const int max_bsz = stop_flags.shape()[0]; const int now_bsz = seq_lens_this_time.shape()[0]; const int input_ids_stride = input_ids.shape()[1]; const int block_num_per_seq = block_tables.shape()[1]; auto not_need_stop_gpu = not_need_stop.copy_to(stop_flags.place(), false); update_inputs_kernel_v1<1024><<<1, 1024, 0, cu_stream>>>( const_cast(not_need_stop_gpu.data()), const_cast(seq_lens_this_time.data()), const_cast(seq_lens_encoder.data()), const_cast(seq_lens_decoder.data()), const_cast(step_seq_lens_decoder.data()), const_cast(prompt_lens.data()), const_cast(topk_ids.data()), const_cast(input_ids.data()), const_cast(block_tables.data()), stop_nums.data(), const_cast(stop_flags.data()), const_cast(is_block_step.data()), next_tokens.data(), now_bsz, max_bsz, input_ids_stride, block_num_per_seq, block_size); auto not_need_stop_cpu = not_need_stop_gpu.copy_to(not_need_stop.place(), false); bool *not_need_stop_data = const_cast(not_need_stop.data()); not_need_stop_data[0] = not_need_stop_cpu.data()[0]; } PD_BUILD_STATIC_OP(update_inputs_v1) .Inputs({"stop_flags", "not_need_stop", "seq_lens_this_time", "seq_lens_encoder", "seq_lens_decoder", "step_seq_lens_decoder", "prompt_lens", "topk_ids", "input_ids", "block_tables", "stop_nums", "next_tokens", "is_block_step"}) .Attrs({"block_size: int"}) .Outputs({"not_need_stop_out", "seq_lens_this_time_out", "seq_lens_encoder_out", "seq_lens_decoder_out", "step_seq_lens_decoder_out", "topk_ids_out", "input_ids_out", "stop_flags_out", "is_block_step_out"}) .SetInplaceMap({{"not_need_stop", "not_need_stop_out"}, {"seq_lens_this_time", "seq_lens_this_time_out"}, {"seq_lens_encoder", "seq_lens_encoder_out"}, {"seq_lens_decoder", "seq_lens_decoder_out"}, {"topk_ids", "topk_ids_out"}, {"input_ids", "input_ids_out"}, {"stop_flags", "stop_flags_out"}, {"step_seq_lens_decoder", "step_seq_lens_decoder_out"}, {"is_block_step", "is_block_step_out"}}) .SetKernelFn(PD_KERNEL(UpdateInputesV1));