mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-06 17:17:14 +08:00
[Feature] support prompt repetition_penalty (#2806)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
This commit is contained in:
@@ -227,12 +227,15 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
1] = request.prompt_token_ids[-1]
|
||||
self.share_inputs["input_ids"][idx:idx + 1,
|
||||
0] = request.prompt_token_ids[0]
|
||||
self.share_inputs["prompt_ids"][idx:idx + 1,
|
||||
0] = request.prompt_token_ids[0]
|
||||
self.share_inputs['seq_lens_encoder'][idx:idx + 1] = 0
|
||||
self.share_inputs['seq_lens_decoder'][idx:idx + 1] = length
|
||||
self.share_inputs['seq_lens_this_time'][idx:idx + 1] = 1
|
||||
self.share_inputs['step_seq_lens_encoder'][idx:idx + 1] = 0
|
||||
self.share_inputs['step_seq_lens_decoder'][idx:idx +
|
||||
1] = length
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1] = length
|
||||
self.share_inputs['step_idx'][idx:idx + 1] = 1
|
||||
|
||||
if self.speculative_decoding:
|
||||
@@ -247,6 +250,9 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["input_ids"][idx:idx +
|
||||
1, :length] = np.array(
|
||||
request.prompt_token_ids)
|
||||
self.share_inputs["prompt_ids"][idx:idx +
|
||||
1, :length] = np.array(
|
||||
request.prompt_token_ids)
|
||||
|
||||
# Use chunked prefill
|
||||
if self.parallel_config.enable_chunked_prefill:
|
||||
@@ -286,6 +292,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
idx:idx + 1] = token_chunk_size
|
||||
self.share_inputs['seq_lens_encoder'][idx:idx +
|
||||
1] = token_chunk_size
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1] = token_chunk_size
|
||||
else:
|
||||
if self.enable_mm:
|
||||
inputs = self._preprocess_mm_task(request.multimodal_inputs)
|
||||
@@ -310,6 +317,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs['step_seq_lens_encoder'][idx:idx +
|
||||
1] = length
|
||||
self.share_inputs['seq_lens_encoder'][idx:idx + 1] = length
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1] = length
|
||||
|
||||
if self.enable_mm:
|
||||
enable_thinking = request.get("enable_thinking", True)
|
||||
@@ -408,6 +416,8 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["input_ids"][idx:idx +
|
||||
1, :input_length] = np.array(
|
||||
[5] * input_length)
|
||||
self.share_inputs["prompt_ids"][idx:idx + 1, :input_length] = np.array(
|
||||
[5] * input_length)
|
||||
self.share_inputs["eos_token_id"][:] = np.array(
|
||||
[2], dtype="int64").reshape(-1, 1)
|
||||
self.share_inputs["seq_lens_this_time"][idx:idx + 1] = input_length
|
||||
@@ -415,6 +425,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
1] = input_length
|
||||
self.share_inputs["seq_lens_encoder"][idx:idx + 1] = input_length
|
||||
self.share_inputs["seq_lens_decoder"][idx:idx + 1] = 0
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1] = 0
|
||||
self.share_inputs["step_idx"][idx:idx + 1] = 0
|
||||
self.share_inputs["max_dec_len"][idx:idx + 1] = max_dec_len
|
||||
self.share_inputs["min_dec_len"][idx:idx + 1] = max_dec_len
|
||||
@@ -446,6 +457,10 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
[max_num_seqs, self.parallel_config.max_model_len],
|
||||
self.parallel_config.pad_token_id,
|
||||
dtype='int64')
|
||||
self.share_inputs["prompt_ids"] = paddle.full(
|
||||
[max_num_seqs, self.parallel_config.max_model_len],
|
||||
self.parallel_config.pad_token_id,
|
||||
dtype='int64')
|
||||
self.share_inputs["eos_token_id"] = paddle.full(
|
||||
[self.parallel_config.eos_tokens_lens, 1], 0, dtype='int64')
|
||||
self.share_inputs["top_p"] = paddle.full([max_num_seqs, 1],
|
||||
@@ -490,6 +505,9 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
[max_num_seqs, 1], 0, dtype='int32')
|
||||
self.share_inputs["step_seq_lens_decoder"] = paddle.full(
|
||||
[max_num_seqs, 1], 0, dtype='int32')
|
||||
self.share_inputs["prompt_lens"] = paddle.full([max_num_seqs, 1],
|
||||
0,
|
||||
dtype='int64')
|
||||
self.share_inputs["step_idx"] = paddle.full([max_num_seqs, 1],
|
||||
0,
|
||||
dtype='int64')
|
||||
@@ -699,6 +717,8 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
top_k=self.share_inputs["top_k"],
|
||||
step_idx=self.share_inputs["step_idx"],
|
||||
pre_token_ids=self.share_inputs["pre_ids"],
|
||||
prompt_ids=self.share_inputs["prompt_ids"],
|
||||
prompt_lens=self.share_inputs["prompt_lens"],
|
||||
frequency_penalties=self.share_inputs["frequency_score"],
|
||||
presence_penalties=self.share_inputs["presence_score"],
|
||||
repetition_penalties=self.share_inputs["penalty_score"],
|
||||
@@ -1036,6 +1056,10 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
self.share_inputs["image_features"] = None
|
||||
token_chunk_size = inputs["input_ids"].shape[1]
|
||||
self.share_inputs["input_ids"][idx:idx + 1, :token_chunk_size] = inputs["input_ids"]
|
||||
self.share_inputs["prompt_ids"][
|
||||
idx:idx + 1,
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1]: self.share_inputs["prompt_lens"][idx:idx + 1] + token_chunk_size
|
||||
] = inputs["input_ids"]
|
||||
self.share_inputs["seq_lens_decoder"][idx:idx +1] = task.start_idx
|
||||
task.start_idx += token_chunk_size
|
||||
else:
|
||||
@@ -1048,6 +1072,7 @@ class GPUModelRunner(ModelRunnerBase):
|
||||
1] = token_chunk_size
|
||||
self.share_inputs['seq_lens_encoder'][idx:idx +
|
||||
1] = token_chunk_size
|
||||
self.share_inputs["prompt_lens"][idx:idx + 1] += token_chunk_size
|
||||
self.share_inputs["step_idx"][idx:idx + 1] = 0
|
||||
|
||||
if self.speculative_decoding and self.proposer.is_chunk_prefill_enabled(
|
||||
|
Reference in New Issue
Block a user