[XPU] add speculate_step_system_cache (#5397)

* [XPU] add speculate_step_system_cache

* [XPU] add speculate_step_system_cache

---------

Co-authored-by: cmcamdy <1027740945@qq.com>
This commit is contained in:
RuohengMa
2025-12-09 14:40:11 +08:00
committed by GitHub
parent e1c4a12e34
commit 8178e3fc6a
8 changed files with 684 additions and 82 deletions

View File

@@ -0,0 +1,118 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "speculate_step_helper.h"
void SpeculateStepPaddleBase(
const paddle::Tensor &stop_flags,
const paddle::Tensor &seq_lens_this_time,
const paddle::Tensor &ori_seq_lens_encoder,
const paddle::optional<paddle::Tensor> &ori_seq_lens_decoder,
const paddle::Tensor &seq_lens_encoder,
const paddle::Tensor &seq_lens_decoder,
const paddle::Tensor &block_tables, // [bsz, block_num_per_seq]
const paddle::Tensor &encoder_block_lens,
const paddle::Tensor &is_block_step,
const paddle::Tensor &step_block_list,
const paddle::Tensor &step_lens,
const paddle::Tensor &recover_block_list,
const paddle::Tensor &recover_lens,
const paddle::Tensor &need_block_list,
const paddle::Tensor &need_block_len,
const paddle::Tensor &used_list_len,
const paddle::Tensor &free_list,
const paddle::Tensor &free_list_len,
const paddle::Tensor &input_ids,
const paddle::Tensor &pre_ids,
const paddle::Tensor &step_idx,
const paddle::Tensor &next_tokens,
const paddle::Tensor &first_token_ids,
const paddle::Tensor &accept_num,
const int block_size,
const int encoder_decoder_block_num,
const int max_draft_tokens) {
namespace api = baidu::xpu::api;
phi::XPUPlace place(phi::backends::xpu::GetXPUCurrentDeviceId());
auto dev_ctx = paddle::experimental::DeviceContextPool::Instance().Get(place);
auto xpu_ctx = static_cast<const phi::XPUContext *>(dev_ctx);
api::Context *ctx = xpu_ctx->x_context();
if (seq_lens_this_time.is_cpu()) {
ctx = new api::Context(api::kCPU);
}
const int bsz = seq_lens_this_time.shape()[0];
PADDLE_ENFORCE_LE(
bsz,
640,
phi::errors::InvalidArgument(
"Only support bsz <= 640, but received bsz is %d", bsz));
const int block_num_per_seq = block_tables.shape()[1];
const int length = input_ids.shape()[1];
const int pre_id_length = pre_ids.shape()[1];
const int max_decoder_block_num = pre_id_length / block_size;
int r = baidu::xpu::api::plugin::speculate_free_and_dispatch_block(
ctx,
const_cast<bool *>(stop_flags.data<bool>()),
const_cast<int *>(seq_lens_this_time.data<int>()),
const_cast<int *>(seq_lens_decoder.data<int>()),
const_cast<int *>(block_tables.data<int>()),
const_cast<int *>(encoder_block_lens.data<int>()),
const_cast<bool *>(is_block_step.data<bool>()),
const_cast<int *>(step_block_list.data<int>()),
const_cast<int *>(step_lens.data<int>()),
const_cast<int *>(recover_block_list.data<int>()),
const_cast<int *>(recover_lens.data<int>()),
const_cast<int *>(need_block_list.data<int>()),
const_cast<int *>(need_block_len.data<int>()),
const_cast<int *>(used_list_len.data<int>()),
const_cast<int *>(free_list.data<int>()),
const_cast<int *>(free_list_len.data<int>()),
const_cast<int64_t *>(first_token_ids.data<int64_t>()),
const_cast<int *>(accept_num.data<int>()),
bsz,
block_size,
block_num_per_seq,
max_decoder_block_num,
max_draft_tokens);
PD_CHECK(r == 0, "speculate_free_and_dispatch_block failed.");
auto recover_lens_cpu = recover_lens.copy_to(paddle::CPUPlace(), false);
int recover_lens_cpu_data = recover_lens_cpu.data<int>()[0];
if (recover_lens_cpu_data > 0) {
r = baidu::xpu::api::plugin::speculate_recover_block(
ctx,
const_cast<int *>(recover_block_list.data<int>()),
const_cast<int *>(recover_lens.data<int>()),
const_cast<bool *>(stop_flags.data<bool>()),
const_cast<int *>(seq_lens_this_time.data<int>()),
ori_seq_lens_encoder.data<int>(),
ori_seq_lens_decoder ? ori_seq_lens_decoder.get_ptr()->data<int>()
: nullptr,
const_cast<int *>(seq_lens_encoder.data<int>()),
const_cast<int *>(seq_lens_decoder.data<int>()),
const_cast<int *>(block_tables.data<int>()),
const_cast<int *>(free_list.data<int>()),
const_cast<int *>(free_list_len.data<int>()),
const_cast<int64_t *>(input_ids.data<int64_t>()),
pre_ids.data<int64_t>(),
step_idx.data<int64_t>(),
encoder_block_lens.data<int>(),
used_list_len.data<int>(),
next_tokens.data<int64_t>(),
first_token_ids.data<int64_t>(),
bsz,
block_num_per_seq,
length,
pre_id_length);
PD_CHECK(r == 0, "speculate_recover_block failed.");
}
}

View File

@@ -0,0 +1,49 @@
// Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <paddle/phi/backends/xpu/xpu_context.h>
#include "paddle/extension.h"
#include "paddle/phi/core/enforce.h"
#include "xpu/plugin.h"
void SpeculateStepPaddleBase(
const paddle::Tensor &stop_flags,
const paddle::Tensor &seq_lens_this_time,
const paddle::Tensor &ori_seq_lens_encoder,
const paddle::optional<paddle::Tensor> &ori_seq_lens_decoder,
const paddle::Tensor &seq_lens_encoder,
const paddle::Tensor &seq_lens_decoder,
const paddle::Tensor &block_tables, // [bsz, block_num_per_seq]
const paddle::Tensor &encoder_block_lens,
const paddle::Tensor &is_block_step,
const paddle::Tensor &step_block_list,
const paddle::Tensor &step_lens,
const paddle::Tensor &recover_block_list,
const paddle::Tensor &recover_lens,
const paddle::Tensor &need_block_list,
const paddle::Tensor &need_block_len,
const paddle::Tensor &used_list_len,
const paddle::Tensor &free_list,
const paddle::Tensor &free_list_len,
const paddle::Tensor &input_ids,
const paddle::Tensor &pre_ids,
const paddle::Tensor &step_idx,
const paddle::Tensor &next_tokens,
const paddle::Tensor &first_token_ids,
const paddle::Tensor &accept_num,
const int block_size,
const int encoder_decoder_block_num,
const int max_draft_tokens);

View File

@@ -12,10 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <paddle/phi/backends/xpu/xpu_context.h>
#include "paddle/extension.h"
#include "paddle/phi/core/enforce.h"
#include "xpu/plugin.h"
#include "speculate_step_helper.h"
#ifndef PD_BUILD_STATIC_OP
#define PD_BUILD_STATIC_OP(name) PD_BUILD_OP(static_op_##name)
@@ -48,77 +45,33 @@ void SpeculateStepPaddle(
const int block_size,
const int encoder_decoder_block_num,
const int max_draft_tokens) {
namespace api = baidu::xpu::api;
phi::XPUPlace place(phi::backends::xpu::GetXPUCurrentDeviceId());
auto dev_ctx = paddle::experimental::DeviceContextPool::Instance().Get(place);
auto xpu_ctx = static_cast<const phi::XPUContext *>(dev_ctx);
api::Context *ctx = xpu_ctx->x_context();
if (seq_lens_this_time.is_cpu()) {
ctx = new api::Context(api::kCPU);
}
const int bsz = seq_lens_this_time.shape()[0];
PADDLE_ENFORCE_LE(
bsz,
640,
phi::errors::InvalidArgument(
"Only support bsz <= 640, but received bsz is %d", bsz));
const int block_num_per_seq = block_tables.shape()[1];
const int length = input_ids.shape()[1];
const int pre_id_length = pre_ids.shape()[1];
const int max_decoder_block_num = pre_id_length / block_size;
int r = baidu::xpu::api::plugin::speculate_free_and_dispatch_block(
ctx,
const_cast<bool *>(stop_flags.data<bool>()),
const_cast<int *>(seq_lens_this_time.data<int>()),
const_cast<int *>(seq_lens_decoder.data<int>()),
const_cast<int *>(block_tables.data<int>()),
const_cast<int *>(encoder_block_lens.data<int>()),
const_cast<bool *>(is_block_step.data<bool>()),
const_cast<int *>(step_block_list.data<int>()),
const_cast<int *>(step_lens.data<int>()),
const_cast<int *>(recover_block_list.data<int>()),
const_cast<int *>(recover_lens.data<int>()),
const_cast<int *>(need_block_list.data<int>()),
const_cast<int *>(need_block_len.data<int>()),
const_cast<int *>(used_list_len.data<int>()),
const_cast<int *>(free_list.data<int>()),
const_cast<int *>(free_list_len.data<int>()),
const_cast<int64_t *>(first_token_ids.data<int64_t>()),
const_cast<int *>(accept_num.data<int>()),
bsz,
block_size,
block_num_per_seq,
max_decoder_block_num,
max_draft_tokens);
PD_CHECK(r == 0, "speculate_free_and_dispatch_block failed.");
auto recover_lens_cpu = recover_lens.copy_to(paddle::CPUPlace(), false);
int recover_lens_cpu_data = recover_lens_cpu.data<int>()[0];
if (recover_lens_cpu_data > 0) {
r = baidu::xpu::api::plugin::speculate_recover_block(
ctx,
const_cast<int *>(recover_block_list.data<int>()),
const_cast<int *>(recover_lens.data<int>()),
const_cast<bool *>(stop_flags.data<bool>()),
const_cast<int *>(seq_lens_this_time.data<int>()),
ori_seq_lens_encoder.data<int>(),
const_cast<int *>(seq_lens_encoder.data<int>()),
seq_lens_decoder.data<int>(),
const_cast<int *>(block_tables.data<int>()),
const_cast<int *>(free_list.data<int>()),
const_cast<int *>(free_list_len.data<int>()),
const_cast<int64_t *>(input_ids.data<int64_t>()),
pre_ids.data<int64_t>(),
step_idx.data<int64_t>(),
encoder_block_lens.data<int>(),
used_list_len.data<int>(),
next_tokens.data<int64_t>(),
first_token_ids.data<int64_t>(),
bsz,
block_num_per_seq,
length,
pre_id_length);
PD_CHECK(r == 0, "speculate_recover_block failed.");
}
SpeculateStepPaddleBase(stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
paddle::optional<paddle::Tensor>(),
seq_lens_encoder,
seq_lens_decoder,
block_tables,
encoder_block_lens,
is_block_step,
step_block_list,
step_lens,
recover_block_list,
recover_lens,
need_block_list,
need_block_len,
used_list_len,
free_list,
free_list_len,
input_ids,
pre_ids,
step_idx,
next_tokens,
first_token_ids,
accept_num,
block_size,
encoder_decoder_block_num,
max_draft_tokens);
}
PD_BUILD_STATIC_OP(speculate_step_paddle)

View File

@@ -0,0 +1,143 @@
// Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "speculate_step_helper.h"
#ifndef PD_BUILD_STATIC_OP
#define PD_BUILD_STATIC_OP(name) PD_BUILD_OP(static_op_##name)
#endif
void SpeculateStepSystemCachePaddle(
const paddle::Tensor &stop_flags,
const paddle::Tensor &seq_lens_this_time,
const paddle::Tensor &ori_seq_lens_encoder,
const paddle::Tensor &ori_seq_lens_decoder,
const paddle::Tensor &seq_lens_encoder,
const paddle::Tensor &seq_lens_decoder,
const paddle::Tensor &block_tables, // [bsz, block_num_per_seq]
const paddle::Tensor &encoder_block_lens,
const paddle::Tensor &is_block_step,
const paddle::Tensor &step_block_list,
const paddle::Tensor &step_lens,
const paddle::Tensor &recover_block_list,
const paddle::Tensor &recover_lens,
const paddle::Tensor &need_block_list,
const paddle::Tensor &need_block_len,
const paddle::Tensor &used_list_len,
const paddle::Tensor &free_list,
const paddle::Tensor &free_list_len,
const paddle::Tensor &input_ids,
const paddle::Tensor &pre_ids,
const paddle::Tensor &step_idx,
const paddle::Tensor &next_tokens,
const paddle::Tensor &first_token_ids,
const paddle::Tensor &accept_num,
const int block_size,
const int encoder_decoder_block_num,
const int max_draft_tokens) {
SpeculateStepPaddleBase(
stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
paddle::make_optional<paddle::Tensor>(ori_seq_lens_decoder),
seq_lens_encoder,
seq_lens_decoder,
block_tables,
encoder_block_lens,
is_block_step,
step_block_list,
step_lens,
recover_block_list,
recover_lens,
need_block_list,
need_block_len,
used_list_len,
free_list,
free_list_len,
input_ids,
pre_ids,
step_idx,
next_tokens,
first_token_ids,
accept_num,
block_size,
encoder_decoder_block_num,
max_draft_tokens);
}
PD_BUILD_STATIC_OP(speculate_step_system_cache)
.Inputs({"stop_flags",
"seq_lens_this_time",
"ori_seq_lens_encoder",
"ori_seq_lens_decoder",
"seq_lens_encoder",
"seq_lens_decoder",
"block_tables",
"encoder_block_lens",
"is_block_step",
"step_block_list",
"step_lens",
"recover_block_list",
"recover_lens",
"need_block_list",
"need_block_len",
"used_list_len",
"free_list",
"free_list_len",
"input_ids",
"pre_ids",
"step_idx",
"next_tokens",
"first_token_ids",
"accept_num"})
.Attrs({"block_size: int",
"encoder_decoder_block_num: int",
"max_draft_tokens: int"})
.Outputs({"stop_flags_out",
"seq_lens_this_time_out",
"seq_lens_encoder_out",
"seq_lens_decoder_out",
"block_tables_out",
"encoder_block_lens_out",
"is_block_step_out",
"step_block_list_out",
"step_lens_out",
"recover_block_list_out",
"recover_lens_out",
"need_block_list_out",
"need_block_len_out",
"used_list_len_out",
"free_list_out",
"free_list_len_out",
"input_ids_out",
"first_token_ids_out"})
.SetInplaceMap({{"stop_flags", "stop_flags_out"},
{"seq_lens_this_time", "seq_lens_this_time_out"},
{"seq_lens_encoder", "seq_lens_encoder_out"},
{"seq_lens_decoder", "seq_lens_decoder_out"},
{"block_tables", "block_tables_out"},
{"encoder_block_lens", "encoder_block_lens_out"},
{"is_block_step", "is_block_step_out"},
{"step_block_list", "step_block_list_out"},
{"step_lens", "step_lens_out"},
{"recover_block_list", "recover_block_list_out"},
{"recover_lens", "recover_lens_out"},
{"need_block_list", "need_block_list_out"},
{"need_block_len", "need_block_len_out"},
{"used_list_len", "used_list_len_out"},
{"free_list", "free_list_out"},
{"free_list_len", "free_list_len_out"},
{"input_ids", "input_ids_out"},
{"first_token_ids", "first_token_ids_out"}})
.SetKernelFn(PD_KERNEL(SpeculateStepSystemCachePaddle));

View File

@@ -207,8 +207,9 @@ DLL_EXPORT int speculate_recover_block(Context* ctx,
bool* stop_flags,
int* seq_lens_this_time,
const int* ori_seq_lens_encoder,
const int* ori_seq_lens_decoder,
int* seq_lens_encoder,
const int* seq_lens_decoder,
int* seq_lens_decoder,
int* block_tables,
int* free_list,
int* free_list_len,

View File

@@ -33,8 +33,9 @@ __global__ void speculate_recover_block(int* recover_block_list, // [bsz]
bool* stop_flags,
int* seq_lens_this_time,
const int* ori_seq_lens_encoder,
const int* ori_seq_lens_decoder,
int* seq_lens_encoder,
const int* seq_lens_decoder,
int* seq_lens_decoder,
int* block_tables,
int* free_list,
int* free_list_len,
@@ -82,6 +83,7 @@ __global__ void speculate_recover_block(int* recover_block_list, // [bsz]
for (int bid = cid; bid < recover_len_lm; bid += ncores) {
int recover_id;
int ori_seq_len_encoder;
int ori_seq_len_decoder;
int step_idx_now;
int encoder_block_len;
int decoder_used_len;
@@ -89,12 +91,20 @@ __global__ void speculate_recover_block(int* recover_block_list, // [bsz]
GM2LM(recover_block_list + bid, &recover_id, sizeof(int));
GM2LM_ASYNC(
ori_seq_lens_encoder + recover_id, &ori_seq_len_encoder, sizeof(int));
if (ori_seq_lens_decoder != nullptr) {
GM2LM_ASYNC(
ori_seq_lens_decoder + recover_id, &ori_seq_len_decoder, sizeof(int));
}
GM2LM_ASYNC(step_idx + recover_id, &step_idx_now, sizeof(int));
GM2LM_ASYNC(
encoder_block_lens + recover_id, &encoder_block_len, sizeof(int));
GM2LM_ASYNC(used_list_len + recover_id, &decoder_used_len, sizeof(int));
GM2LM_ASYNC(next_tokens + recover_id, &next_token, sizeof(int64_t));
mfence();
if (ori_seq_lens_decoder != nullptr) {
LM2GM_ASYNC(
&ori_seq_len_decoder, seq_lens_decoder + recover_id, sizeof(int));
}
int seq_len = ori_seq_len_encoder + step_idx_now;
mfence();

View File

@@ -26,8 +26,9 @@ __attribute__((global)) void speculate_recover_block(
bool *stop_flags,
int *seq_lens_this_time,
const int *ori_seq_lens_encoder,
const int *ori_seq_lens_decoder,
int *seq_lens_encoder,
const int *seq_lens_decoder,
int *seq_lens_decoder,
int *block_tables,
int *free_list,
int *free_list_len,
@@ -57,8 +58,9 @@ static int cpu_wrapper(Context *ctx,
bool *stop_flags,
int *seq_lens_this_time,
const int *ori_seq_lens_encoder,
const int *ori_seq_lens_decoder,
int *seq_lens_encoder,
const int *seq_lens_decoder,
int *seq_lens_decoder,
int *block_tables,
int *free_list,
int *free_list_len,
@@ -76,6 +78,9 @@ static int cpu_wrapper(Context *ctx,
for (int bid = 0; bid < recover_len[0]; bid++) {
const int recover_id = recover_block_list[bid];
const int ori_seq_len_encoder = ori_seq_lens_encoder[recover_id];
if (ori_seq_lens_decoder != nullptr) {
seq_lens_decoder[recover_id] = ori_seq_lens_decoder[recover_id];
}
const int step_idx_now = step_idx[recover_id];
const int seq_len = ori_seq_len_encoder + step_idx_now;
const int encoder_block_len = encoder_block_lens[recover_id];
@@ -112,8 +117,9 @@ static int xpu3_wrapper(Context *ctx,
bool *stop_flags,
int *seq_lens_this_time,
const int *ori_seq_lens_encoder,
const int *ori_seq_lens_decoder,
int *seq_lens_encoder,
const int *seq_lens_decoder,
int *seq_lens_decoder,
int *block_tables,
int *free_list,
int *free_list_len,
@@ -136,6 +142,7 @@ static int xpu3_wrapper(Context *ctx,
stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
ori_seq_lens_decoder,
seq_lens_encoder,
seq_lens_decoder,
block_tables,
@@ -161,8 +168,9 @@ int speculate_recover_block(Context *ctx,
bool *stop_flags,
int *seq_lens_this_time,
const int *ori_seq_lens_encoder,
const int *ori_seq_lens_decoder,
int *seq_lens_encoder,
const int *seq_lens_decoder,
int *seq_lens_decoder,
int *block_tables,
int *free_list,
int *free_list_len,
@@ -185,7 +193,8 @@ int speculate_recover_block(Context *ctx,
stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
seq_lens_encoder);
ori_seq_lens_decoder);
WRAPPER_DUMP_PARAM1(ctx, seq_lens_encoder);
WRAPPER_DUMP_PARAM6(ctx,
seq_lens_decoder,
block_tables,
@@ -208,6 +217,7 @@ int speculate_recover_block(Context *ctx,
stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
ori_seq_lens_decoder,
seq_lens_encoder,
seq_lens_decoder,
block_tables,
@@ -232,6 +242,7 @@ int speculate_recover_block(Context *ctx,
stop_flags,
seq_lens_this_time,
ori_seq_lens_encoder,
ori_seq_lens_decoder,
seq_lens_encoder,
seq_lens_decoder,
block_tables,