Merge branch 'feature/experimental_feature_20250908' into yuanlehome-patch-2

This commit is contained in:
Yuanle Liu
2025-09-28 13:42:19 +08:00
committed by GitHub
7 changed files with 33 additions and 5 deletions

View File

@@ -38,14 +38,20 @@ __device__ int64_t topp_sampling_kernel(const int64_t *candidate_ids,
const int tid = threadIdx.x; const int tid = threadIdx.x;
float sum_scores = 0.0f; float sum_scores = 0.0f;
float rand_top_p = curand_uniform(dev_curand_states + tid) * topp; for (int i = 0; i < candidate_len; i++) {
sum_scores += candidate_scores[i];
}
float tgt_topp = sum_scores < topp ? sum_scores : topp;
sum_scores = 0.0f;
float rand_top_p = curand_uniform(dev_curand_states + tid) * tgt_topp;
for (int i = 0; i < candidate_len; i++) { for (int i = 0; i < candidate_len; i++) {
sum_scores += candidate_scores[i]; sum_scores += candidate_scores[i];
if (rand_top_p <= sum_scores) { if (rand_top_p <= sum_scores) {
return candidate_ids[i]; return candidate_ids[i];
} }
} }
return candidate_ids[0]; return candidate_ids[0];
} }
__global__ void setup_kernel(curandState_t *state, const uint64_t seed, __global__ void setup_kernel(curandState_t *state, const uint64_t seed,

View File

@@ -467,6 +467,9 @@ __global__ void KeMatrixTopPBeamTopKFt(
break; break;
} }
} }
if (top_p_value == 1.0 && actual_candidates_lens[token_id] == 0){
actual_candidates_lens[token_id] = max_cadidate_len;
}
} }
} }

View File

@@ -497,6 +497,7 @@ def control_scheduler(request: ControlSchedulerRequest):
return JSONResponse(content=content.model_dump(), status_code=500) return JSONResponse(content=content.model_dump(), status_code=500)
if request.reset: if request.reset:
llm_engine.engine.clear_data()
llm_engine.engine.scheduler.reset() llm_engine.engine.scheduler.reset()
if request.load_shards_num or request.reallocate_shard: if request.load_shards_num or request.reallocate_shard:

View File

@@ -95,6 +95,13 @@ environment_variables: dict[str, Callable[[], Any]] = {
"FD_FOR_TORCH_MODEL_FORMAT": lambda: bool(int(os.getenv("FD_FOR_TORCH_MODEL_FORMAT", "0"))), "FD_FOR_TORCH_MODEL_FORMAT": lambda: bool(int(os.getenv("FD_FOR_TORCH_MODEL_FORMAT", "0"))),
# force disable default chunked prefill # force disable default chunked prefill
"FD_DISABLE_CHUNKED_PREFILL": lambda: bool(int(os.getenv("FD_DISABLE_CHUNKED_PREFILL", "0"))), "FD_DISABLE_CHUNKED_PREFILL": lambda: bool(int(os.getenv("FD_DISABLE_CHUNKED_PREFILL", "0"))),
# For separate setting of sampling parameters for speculative decoding
"FD_SPECULATE_SAMPLING_TOP_P": lambda: (
None if "FD_SPECULATE_SAMPLING_TOP_P" not in os.environ else float(os.environ["FD_SPECULATE_SAMPLING_TOP_P"])
),
"FD_SPECULATE_SAMPLING_TOP_K": lambda: (
None if "FD_SPECULATE_SAMPLING_TOP_K" not in os.environ else float(os.environ["FD_SPECULATE_SAMPLING_TOP_K"])
),
"FD_ENABLE_INTERNAL_ADAPTER": lambda: int(os.getenv("FD_ENABLE_INTERNAL_ADAPTER", "0")), "FD_ENABLE_INTERNAL_ADAPTER": lambda: int(os.getenv("FD_ENABLE_INTERNAL_ADAPTER", "0")),
# LLMEngine recieve requests port, used when FD_ENABLE_INTERNAL_ADAPTER=1 # LLMEngine recieve requests port, used when FD_ENABLE_INTERNAL_ADAPTER=1
"FD_ZMQ_RECV_REQUEST_SERVER_PORT": lambda: os.getenv("FD_ZMQ_RECV_REQUEST_SERVER_PORT", "8200"), "FD_ZMQ_RECV_REQUEST_SERVER_PORT": lambda: os.getenv("FD_ZMQ_RECV_REQUEST_SERVER_PORT", "8200"),

View File

@@ -257,6 +257,7 @@ class DynamicWeightManager:
while model_weights_status.value[0] != 0: while model_weights_status.value[0] != 0:
if model_weights_status.value[0] == 1: if model_weights_status.value[0] == 1:
logger.info("infer engine stopped! start to load new checkpoint...") logger.info("infer engine stopped! start to load new checkpoint...")
model_runner.clear_requests()
model_runner.update_parameters(pid) model_runner.update_parameters(pid)
elif model_weights_status.value[0] == -1: elif model_weights_status.value[0] == -1:
logger.info("infer engine stopped! start to clear checkpoint...") logger.info("infer engine stopped! start to clear checkpoint...")

View File

@@ -303,8 +303,16 @@ class MTPProposer(Proposer):
) )
# self.model_inputs["caches"] = self.cache_kvs # self.model_inputs["caches"] = self.cache_kvs
# Inherit generation hyperparameters from the main model for consistency # Inherit generation hyperparameters from the main model for consistency
self.model_inputs["top_p"] = self.target_model_inputs["top_p"] self.model_inputs["top_p"] = (
self.model_inputs["top_k"] = self.target_model_inputs["top_k"] self.target_model_inputs["top_p"]
if envs.FD_SPECULATE_SAMPLING_TOP_P is None
else paddle.full_like(self.target_model_inputs["top_p"], envs.FD_SPECULATE_SAMPLING_TOP_P)
)
self.model_inputs["top_k"] = (
self.target_model_inputs["top_k"]
if envs.FD_SPECULATE_SAMPLING_TOP_K is None
else paddle.full_like(self.target_model_inputs["top_k"], envs.FD_SPECULATE_SAMPLING_TOP_K)
)
self.model_inputs["temperature"] = self.target_model_inputs["temperature"] self.model_inputs["temperature"] = self.target_model_inputs["temperature"]
self.model_inputs["eos_token_id"] = self.target_model_inputs["eos_token_id"] self.model_inputs["eos_token_id"] = self.target_model_inputs["eos_token_id"]
self.model_inputs["penalty_score"] = self.target_model_inputs["penalty_score"] self.model_inputs["penalty_score"] = self.target_model_inputs["penalty_score"]

View File

@@ -315,6 +315,8 @@ class PaddleDisWorkerProc:
self.worker.model_runner, self.worker.model_runner,
self.parallel_config.engine_worker_queue_port, self.parallel_config.engine_worker_queue_port,
) )
logger.info(f"current task queue data: {self.task_queue.num_tasks()}")
self.task_queue.clear_data()
self.model_weights_signal[0] = 0 self.model_weights_signal[0] = 0
logger.info(f"Rank: {self.local_rank} has updated or cleared parameters.") logger.info(f"Rank: {self.local_rank} has updated or cleared parameters.")