mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 00:33:03 +08:00
fix top_p_candidates and support separate setting of sampling params for mtp (#4189)
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
Some checks failed
CE Compile Job / ce_job_pre_check (push) Has been cancelled
CE Compile Job / print_ce_job_pre_check_outputs (push) Has been cancelled
CE Compile Job / FD-Clone-Linux (push) Has been cancelled
CE Compile Job / Show Code Archive Output (push) Has been cancelled
CE Compile Job / BUILD_SM8090 (push) Has been cancelled
CE Compile Job / BUILD_SM8689 (push) Has been cancelled
CE Compile Job / CE_UPLOAD (push) Has been cancelled
* fix top_p_candidates * For separate setting params for mtp * delete print * fix
This commit is contained in:
@@ -38,14 +38,20 @@ __device__ int64_t topp_sampling_kernel(const int64_t *candidate_ids,
|
|||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
|
|
||||||
float sum_scores = 0.0f;
|
float sum_scores = 0.0f;
|
||||||
float rand_top_p = curand_uniform(dev_curand_states + tid) * topp;
|
for (int i = 0; i < candidate_len; i++) {
|
||||||
|
sum_scores += candidate_scores[i];
|
||||||
|
}
|
||||||
|
float tgt_topp = sum_scores < topp ? sum_scores : topp;
|
||||||
|
|
||||||
|
sum_scores = 0.0f;
|
||||||
|
float rand_top_p = curand_uniform(dev_curand_states + tid) * tgt_topp;
|
||||||
for (int i = 0; i < candidate_len; i++) {
|
for (int i = 0; i < candidate_len; i++) {
|
||||||
sum_scores += candidate_scores[i];
|
sum_scores += candidate_scores[i];
|
||||||
if (rand_top_p <= sum_scores) {
|
if (rand_top_p <= sum_scores) {
|
||||||
return candidate_ids[i];
|
return candidate_ids[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return candidate_ids[0];
|
return candidate_ids[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void setup_kernel(curandState_t *state, const uint64_t seed,
|
__global__ void setup_kernel(curandState_t *state, const uint64_t seed,
|
||||||
|
@@ -467,6 +467,9 @@ __global__ void KeMatrixTopPBeamTopKFt(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (top_p_value == 1.0 && actual_candidates_lens[token_id] == 0){
|
||||||
|
actual_candidates_lens[token_id] = max_cadidate_len;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -95,6 +95,13 @@ environment_variables: dict[str, Callable[[], Any]] = {
|
|||||||
"FD_FOR_TORCH_MODEL_FORMAT": lambda: bool(int(os.getenv("FD_FOR_TORCH_MODEL_FORMAT", "0"))),
|
"FD_FOR_TORCH_MODEL_FORMAT": lambda: bool(int(os.getenv("FD_FOR_TORCH_MODEL_FORMAT", "0"))),
|
||||||
# force disable default chunked prefill
|
# force disable default chunked prefill
|
||||||
"FD_DISABLE_CHUNKED_PREFILL": lambda: bool(int(os.getenv("FD_DISABLE_CHUNKED_PREFILL", "0"))),
|
"FD_DISABLE_CHUNKED_PREFILL": lambda: bool(int(os.getenv("FD_DISABLE_CHUNKED_PREFILL", "0"))),
|
||||||
|
# For separate setting of sampling parameters for speculative decoding
|
||||||
|
"FD_SPECULATE_SAMPLING_TOP_P": lambda: (
|
||||||
|
None if "FD_SPECULATE_SAMPLING_TOP_P" not in os.environ else float(os.environ["FD_SPECULATE_SAMPLING_TOP_P"])
|
||||||
|
),
|
||||||
|
"FD_SPECULATE_SAMPLING_TOP_K": lambda: (
|
||||||
|
None if "FD_SPECULATE_SAMPLING_TOP_K" not in os.environ else float(os.environ["FD_SPECULATE_SAMPLING_TOP_K"])
|
||||||
|
),
|
||||||
"FD_ENABLE_INTERNAL_ADAPTER": lambda: int(os.getenv("FD_ENABLE_INTERNAL_ADAPTER", "0")),
|
"FD_ENABLE_INTERNAL_ADAPTER": lambda: int(os.getenv("FD_ENABLE_INTERNAL_ADAPTER", "0")),
|
||||||
# LLMEngine recieve requests port, used when FD_ENABLE_INTERNAL_ADAPTER=1
|
# LLMEngine recieve requests port, used when FD_ENABLE_INTERNAL_ADAPTER=1
|
||||||
"FD_ZMQ_RECV_REQUEST_SERVER_PORT": lambda: os.getenv("FD_ZMQ_RECV_REQUEST_SERVER_PORT", "8200"),
|
"FD_ZMQ_RECV_REQUEST_SERVER_PORT": lambda: os.getenv("FD_ZMQ_RECV_REQUEST_SERVER_PORT", "8200"),
|
||||||
|
@@ -303,8 +303,16 @@ class MTPProposer(Proposer):
|
|||||||
)
|
)
|
||||||
# self.model_inputs["caches"] = self.cache_kvs
|
# self.model_inputs["caches"] = self.cache_kvs
|
||||||
# Inherit generation hyperparameters from the main model for consistency
|
# Inherit generation hyperparameters from the main model for consistency
|
||||||
self.model_inputs["top_p"] = self.target_model_inputs["top_p"]
|
self.model_inputs["top_p"] = (
|
||||||
self.model_inputs["top_k"] = self.target_model_inputs["top_k"]
|
self.target_model_inputs["top_p"]
|
||||||
|
if envs.FD_SPECULATE_SAMPLING_TOP_P is None
|
||||||
|
else paddle.full_like(self.target_model_inputs["top_p"], envs.FD_SPECULATE_SAMPLING_TOP_P)
|
||||||
|
)
|
||||||
|
self.model_inputs["top_k"] = (
|
||||||
|
self.target_model_inputs["top_k"]
|
||||||
|
if envs.FD_SPECULATE_SAMPLING_TOP_K is None
|
||||||
|
else paddle.full_like(self.target_model_inputs["top_k"], envs.FD_SPECULATE_SAMPLING_TOP_K)
|
||||||
|
)
|
||||||
self.model_inputs["temperature"] = self.target_model_inputs["temperature"]
|
self.model_inputs["temperature"] = self.target_model_inputs["temperature"]
|
||||||
self.model_inputs["eos_token_id"] = self.target_model_inputs["eos_token_id"]
|
self.model_inputs["eos_token_id"] = self.target_model_inputs["eos_token_id"]
|
||||||
self.model_inputs["penalty_score"] = self.target_model_inputs["penalty_score"]
|
self.model_inputs["penalty_score"] = self.target_model_inputs["penalty_score"]
|
||||||
|
Reference in New Issue
Block a user