mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
[FIX]fix bad_words when sending requests consecutively (#3197)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
* fix bad_words * fix log * fix log
This commit is contained in:
@@ -218,20 +218,22 @@ class SamplingParams:
|
|||||||
prompt_token_ids = tokenizer.encode(text=prompt, add_special_tokens=False)["input_ids"]
|
prompt_token_ids = tokenizer.encode(text=prompt, add_special_tokens=False)["input_ids"]
|
||||||
|
|
||||||
if len(prompt_token_ids) != 1:
|
if len(prompt_token_ids) != 1:
|
||||||
logger.warning(
|
if not add_prefix_space:
|
||||||
f"Skip bad_words: {prompt}."
|
logger.warning(
|
||||||
f"Bad words should be a single token."
|
f"Skip bad_words: <{prompt}>."
|
||||||
f"Got tokens: {prompt_token_ids}."
|
f"Bad words should be a single token."
|
||||||
)
|
f"Got tokens: {prompt_token_ids}."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if prompt_token_ids[0] > tokenizer.vocab_size:
|
if prompt_token_ids[0] > tokenizer.vocab_size:
|
||||||
logger.warning(
|
if not add_prefix_space:
|
||||||
f"Skip bad_words: {prompt}."
|
logger.warning(
|
||||||
f"All token id values should be satisfying:"
|
f"Skip bad_words: <{prompt}>."
|
||||||
f" 0 <= token_id < {tokenizer.vocab_size}."
|
f"All token id values should be satisfying:"
|
||||||
f"Got token: {prompt_token_ids}."
|
f" 0 <= token_id < {tokenizer.vocab_size}."
|
||||||
)
|
f"Got token: {prompt_token_ids}."
|
||||||
|
)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if prompt_token_ids not in self._bad_words_token_ids:
|
if prompt_token_ids not in self._bad_words_token_ids:
|
||||||
|
@@ -270,13 +270,15 @@ class GCUModelRunner(ModelRunnerBase):
|
|||||||
request.block_tables, dtype="int32"
|
request.block_tables, dtype="int32"
|
||||||
)
|
)
|
||||||
|
|
||||||
if request.get("bad_words_token_ids") is not None:
|
if request.get("bad_words_token_ids") is not None and len(request.get("bad_words_token_ids")) > 0:
|
||||||
bad_words_len = len(request.get("bad_words_token_ids"))
|
bad_words_len = len(request.get("bad_words_token_ids"))
|
||||||
if bad_words_len > 0:
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
||||||
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
||||||
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
request.get("bad_words_token_ids"), dtype="int64"
|
||||||
request.get("bad_words_token_ids"), dtype="int64"
|
)
|
||||||
)
|
else:
|
||||||
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = 1
|
||||||
|
self.share_inputs["bad_tokens"][idx : idx + 1, :] = np.array([-1], dtype="int64")
|
||||||
|
|
||||||
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
||||||
stop_seqs_num = len(request.get("stop_seqs_len"))
|
stop_seqs_num = len(request.get("stop_seqs_len"))
|
||||||
|
@@ -489,13 +489,15 @@ class GPUModelRunner(ModelRunnerBase):
|
|||||||
request.block_tables, dtype="int32"
|
request.block_tables, dtype="int32"
|
||||||
)
|
)
|
||||||
|
|
||||||
if request.get("bad_words_token_ids") is not None:
|
if request.get("bad_words_token_ids") is not None and len(request.get("bad_words_token_ids")) > 0:
|
||||||
bad_words_len = len(request.get("bad_words_token_ids"))
|
bad_words_len = len(request.get("bad_words_token_ids"))
|
||||||
if bad_words_len > 0:
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
||||||
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
||||||
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
request.get("bad_words_token_ids"), dtype="int64"
|
||||||
request.get("bad_words_token_ids"), dtype="int64"
|
)
|
||||||
)
|
else:
|
||||||
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = 1
|
||||||
|
self.share_inputs["bad_tokens"][idx : idx + 1, :] = np.array([-1], dtype="int64")
|
||||||
|
|
||||||
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
||||||
stop_seqs_num = len(request.get("stop_seqs_len"))
|
stop_seqs_num = len(request.get("stop_seqs_len"))
|
||||||
|
@@ -242,13 +242,15 @@ class IluvatarModelRunner(ModelRunnerBase):
|
|||||||
request.block_tables, dtype="int32"
|
request.block_tables, dtype="int32"
|
||||||
)
|
)
|
||||||
|
|
||||||
if request.get("bad_words_token_ids") is not None:
|
if request.get("bad_words_token_ids") is not None and len(request.get("bad_words_token_ids")) > 0:
|
||||||
bad_words_len = len(request.get("bad_words_token_ids"))
|
bad_words_len = len(request.get("bad_words_token_ids"))
|
||||||
if bad_words_len > 0:
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
||||||
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
||||||
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
request.get("bad_words_token_ids"), dtype="int64"
|
||||||
request.get("bad_words_token_ids"), dtype="int64"
|
)
|
||||||
)
|
else:
|
||||||
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = 1
|
||||||
|
self.share_inputs["bad_tokens"][idx : idx + 1, :] = np.array([-1], dtype="int64")
|
||||||
|
|
||||||
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
||||||
stop_seqs_num = len(request.get("stop_seqs_len"))
|
stop_seqs_num = len(request.get("stop_seqs_len"))
|
||||||
|
@@ -506,13 +506,15 @@ class XPUModelRunner(ModelRunnerBase):
|
|||||||
request.block_tables, dtype="int32"
|
request.block_tables, dtype="int32"
|
||||||
)
|
)
|
||||||
|
|
||||||
if request.get("bad_words_token_ids") is not None:
|
if request.get("bad_words_token_ids") is not None and len(request.get("bad_words_token_ids")) > 0:
|
||||||
bad_words_len = len(request.get("bad_words_token_ids"))
|
bad_words_len = len(request.get("bad_words_token_ids"))
|
||||||
if bad_words_len > 0:
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
||||||
self.share_inputs["bad_tokens_len"][idx : idx + 1] = bad_words_len
|
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
||||||
self.share_inputs["bad_tokens"][idx : idx + 1, :bad_words_len] = np.array(
|
request.get("bad_words_token_ids"), dtype="int64"
|
||||||
request.get("bad_words_token_ids"), dtype="int64"
|
)
|
||||||
)
|
else:
|
||||||
|
self.share_inputs["bad_tokens_len"][idx : idx + 1] = 1
|
||||||
|
self.share_inputs["bad_tokens"][idx : idx + 1, :] = np.array([-1], dtype="int64")
|
||||||
|
|
||||||
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
if request.get("stop_token_ids") is not None and request.get("stop_seqs_len") is not None:
|
||||||
stop_seqs_num = len(request.get("stop_seqs_len"))
|
stop_seqs_num = len(request.get("stop_seqs_len"))
|
||||||
|
Reference in New Issue
Block a user