[BugFix] [PD Disaggregation] fix v1 scheduler prefill node profile run & ipc transfer protocol (#5132)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Deploy GitHub Pages / deploy (push) Has been cancelled

* [fix] fix v1 scheduler profile run for append attention in prefill node

* [fix] skip send_signal if kv signal not inited for gpu and xpu

* [fix] extend fix to flash_attn & mla_attn

* [fix] fix v1 pd run in ipc transfer protocol

* [ci] add test for v1 pd profile run using ipc transfer protocol

* [style] fix code style check

* [style] fix code style again

* [fix] fix profile run

* [update] remove --num-gpu-blocks-override in example script

* [chore] rename forward_meta is_profiling to is_dummy_or_profile_run
This commit is contained in:
Yonghua Li
2025-11-20 21:39:22 +08:00
committed by GitHub
parent 01c30f6b87
commit 43097a512a
12 changed files with 512 additions and 94 deletions

View File

@@ -18,88 +18,94 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ipc.h>
#include <sys/mman.h>
#include <sys/msg.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <unistd.h>
#include "driver_types.h"
#include "msg_utils.h"
#include "paddle/extension.h"
#include "paddle/phi/core/allocator.h"
#include "paddle/phi/core/dense_tensor.h"
#include "msg_utils.h"
struct RemoteCacheKvIpc {
struct save_cache_kv_complete_signal_layerwise_meta_data{
int32_t layer_id=-1;
void * shm_ptr=nullptr;
int shm_fd=-1;
save_cache_kv_complete_signal_layerwise_meta_data(){}
save_cache_kv_complete_signal_layerwise_meta_data(int32_t layer_id_,
void* shm_ptr_,
int shm_fd_)
:layer_id(layer_id_), shm_ptr(shm_ptr_), shm_fd(shm_fd_){
struct save_cache_kv_complete_signal_layerwise_meta_data {
int32_t layer_id = -1;
void* shm_ptr = nullptr;
int shm_fd = -1;
save_cache_kv_complete_signal_layerwise_meta_data() {}
save_cache_kv_complete_signal_layerwise_meta_data(int32_t layer_id_,
void* shm_ptr_,
int shm_fd_)
: layer_id(layer_id_), shm_ptr(shm_ptr_), shm_fd(shm_fd_) {}
};
struct save_cache_kv_complete_signal_layerwise_meta_data_per_query {
int layer_id_;
int num_layers_;
bool inited = false;
struct msgdatakv msg_sed;
int msgid;
save_cache_kv_complete_signal_layerwise_meta_data_per_query() {}
void init(const int* seq_lens_encoder,
const int* seq_lens_decoder,
const int rank,
const int num_layers,
const int real_bsz) {
layer_id_ = 0;
num_layers_ = num_layers;
msg_sed.mtype = 1;
int encoder_count = 0;
for (int i = 0; i < real_bsz; i++) {
if (seq_lens_encoder[i] > 0) {
msg_sed.mtext[3 * encoder_count + 2] = i;
msg_sed.mtext[3 * encoder_count + 3] = seq_lens_decoder[i];
msg_sed.mtext[3 * encoder_count + 4] = seq_lens_encoder[i];
encoder_count++;
}
};
}
msg_sed.mtext[0] = encoder_count;
struct save_cache_kv_complete_signal_layerwise_meta_data_per_query{
int layer_id_;
int num_layers_;
bool inited = false;
struct msgdatakv msg_sed;
int msgid;
if (!inited) {
// just init once
const int msg_id = 1024 + rank;
key_t key = ftok("/opt/", msg_id);
msgid = msgget(key, IPC_CREAT | 0666);
inited = true;
}
}
save_cache_kv_complete_signal_layerwise_meta_data_per_query(){}
void init(const int *seq_lens_encoder,
const int *seq_lens_decoder,
const int rank,
const int num_layers,
const int real_bsz) {
layer_id_ = 0;
num_layers_ = num_layers;
msg_sed.mtype = 1;
int encoder_count = 0;
for (int i = 0; i < real_bsz; i++) {
if (seq_lens_encoder[i] > 0) {
msg_sed.mtext[3 * encoder_count + 2] = i;
msg_sed.mtext[3 * encoder_count + 3] = seq_lens_decoder[i];
msg_sed.mtext[3 * encoder_count + 4] = seq_lens_encoder[i];
encoder_count++;
}
}
msg_sed.mtext[0] = encoder_count;
if (!inited) {
// just init once
const int msg_id = 1024 + rank;
key_t key = ftok("/opt/", msg_id);
msgid = msgget(key, IPC_CREAT | 0666);
inited = true;
}
void CUDART_CB send_signal() {
if (inited) {
msg_sed.mtext[1] = layer_id_;
if ((msgsnd(msgid, &msg_sed, (MAX_BSZ * 3 + 2) * 4, 0)) == -1) {
printf("kv signal full msg buffer\n");
}
layer_id_ = (layer_id_ + 1);
assert(layer_id_ <= num_layers_);
}
}
};
void CUDART_CB send_signal() {
msg_sed.mtext[1] = layer_id_;
if ((msgsnd(msgid, &msg_sed, (MAX_BSZ * 3 + 2) * 4, 0)) == -1) {
printf("kv signal full msg buffer\n");
}
layer_id_ = (layer_id_ + 1);
assert(layer_id_ <= num_layers_);
}
};
static RemoteCacheKvIpc::save_cache_kv_complete_signal_layerwise_meta_data
kv_complete_signal_meta_data;
static RemoteCacheKvIpc::
save_cache_kv_complete_signal_layerwise_meta_data_per_query
kv_complete_signal_meta_data_per_query;
static void* kv_complete_signal_identity_ptr;
static bool kv_complete_signal_shmem_opened;
static RemoteCacheKvIpc::save_cache_kv_complete_signal_layerwise_meta_data kv_complete_signal_meta_data;
static RemoteCacheKvIpc::save_cache_kv_complete_signal_layerwise_meta_data_per_query kv_complete_signal_meta_data_per_query;
static void* kv_complete_signal_identity_ptr;
static bool kv_complete_signal_shmem_opened;
static RemoteCacheKvIpc::save_cache_kv_complete_signal_layerwise_meta_data open_shm_and_get_complete_signal_meta_data(
const int rank_id,
const int device_id,
const bool keep_pd_step_flag);
static void CUDART_CB save_cache_kv_complete_signal_layerwise(void* meta_data);
static void CUDART_CB save_cache_kv_complete_signal_layerwise_per_query(void* meta_data);
static RemoteCacheKvIpc::save_cache_kv_complete_signal_layerwise_meta_data
open_shm_and_get_complete_signal_meta_data(const int rank_id,
const int device_id,
const bool keep_pd_step_flag);
static void CUDART_CB
save_cache_kv_complete_signal_layerwise(void* meta_data);
static void CUDART_CB
save_cache_kv_complete_signal_layerwise_per_query(void* meta_data);
};