polish code with new pre-commit rule (#2923)

This commit is contained in:
Zero Rains
2025-07-19 23:19:27 +08:00
committed by GitHub
parent b8676d71a8
commit 25698d56d1
424 changed files with 14307 additions and 13518 deletions

View File

@@ -69,12 +69,12 @@ class ErnieProcessor(BaseDataProcessor):
# Generation config
try:
self.generation_config = GenerationConfig.from_pretrained(
self.model_name_or_path)
self.generation_config = GenerationConfig.from_pretrained(self.model_name_or_path)
except Exception as e:
data_processor_logger.warning(
f"Can't find generation config, so it will not use "
f"generation_config field in the model config, details={e}")
f"generation_config field in the model config, details={e}"
)
self.generation_config = None
def process_request(self, request, max_model_len=None, **kwargs):
@@ -89,8 +89,7 @@ class ErnieProcessor(BaseDataProcessor):
str: error message
"""
request = self._apply_default_parameters(request)
if request.get("eos_token_ids") is None or len(
request.eos_token_ids) == 0:
if request.get("eos_token_ids") is None or len(request.eos_token_ids) == 0:
request.eos_token_ids = self.eos_token_ids
stop_sequences = request.get("stop", [])
if stop_sequences is not None and len(stop_sequences) != 0:
@@ -98,12 +97,9 @@ class ErnieProcessor(BaseDataProcessor):
request.set("stop_token_ids", stop_seqs)
request.set("stop_seqs_len", stop_seqs_len)
if request.prompt_token_ids is None or len(
request.prompt_token_ids) == 0:
system = request.get("system")
if request.prompt_token_ids is None or len(request.prompt_token_ids) == 0:
if request.prompt is None and request.messages is None:
raise ValueError(
f"The request should have `input_ids`, `text` or `messages`: {request}.")
raise ValueError(f"The request should have `input_ids`, `text` or `messages`: {request}.")
if request.prompt is not None or not request.raw_request:
prompt = request.prompt if request.prompt is not None else request.messages[0]
prompt = prompt[0] if isinstance(prompt, list) else prompt
@@ -114,14 +110,13 @@ class ErnieProcessor(BaseDataProcessor):
else:
request.prompt_token_ids = self.messages2ids(request.to_dict())
if max_model_len is not None and len(
request.prompt_token_ids) > max_model_len:
request.prompt_token_ids = request.prompt_token_ids[:
max_model_len -
1]
if max_model_len is not None and len(request.prompt_token_ids) > max_model_len:
request.prompt_token_ids = request.prompt_token_ids[: max_model_len - 1]
if request.get("max_tokens") is None:
request.set("max_tokens",
max(1, max_model_len - len(request.prompt_token_ids)))
request.set(
"max_tokens",
max(1, max_model_len - len(request.prompt_token_ids)),
)
if request.get("temperature") < _SAMPLING_EPS:
# zero temperature is equivalent to greedy sampling
request.set("temperature", 1)
@@ -140,45 +135,36 @@ class ErnieProcessor(BaseDataProcessor):
str: error message
"""
request = self._apply_default_parameters(request)
if not request.get('eos_token_ids'):
request['eos_token_ids'] = self.eos_token_ids
if not request.get("eos_token_ids"):
request["eos_token_ids"] = self.eos_token_ids
# 处理stop_sequences
stop_sequences = request.get('stop', [])
stop_sequences = request.get("stop", [])
if stop_sequences:
stop_seqs, stop_seqs_len = self.update_stop_seq(stop_sequences)
request['stop_token_ids'] = stop_seqs
request['stop_seqs_len'] = stop_seqs_len
request["stop_token_ids"] = stop_seqs
request["stop_seqs_len"] = stop_seqs_len
system = request.get("system")
# 处理prompt_token_ids
if not request.get('prompt_token_ids'):
if request.get('prompt') is None and request.get(
'messages') is None:
raise ValueError(
f"Request must contain 'prompt_token_ids', 'prompt', or 'messages': {request}"
)
if request.get('prompt'):
prompt = request.get('prompt')
if not request.get("prompt_token_ids"):
if request.get("prompt") is None and request.get("messages") is None:
raise ValueError(f"Request must contain 'prompt_token_ids', 'prompt', or 'messages': {request}")
if request.get("prompt"):
prompt = request.get("prompt")
prompt = prompt[0] if isinstance(prompt, list) else prompt
tokens = self.tokenizer.tokenize(prompt)
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
request['prompt_token_ids'] = token_ids
request["prompt_token_ids"] = token_ids
req_id = request.get("request_id", None)
data_processor_logger.info(
f"req_id:{req_id}, tokens:{tokens}, token_ids: {token_ids}"
)
data_processor_logger.info(f"req_id:{req_id}, tokens:{tokens}, token_ids: {token_ids}")
else:
request['prompt_token_ids'] = self.messages2ids(request)
request["prompt_token_ids"] = self.messages2ids(request)
# 截断超过长度限制的prompt
if max_model_len is not None and len(
request['prompt_token_ids']) > max_model_len:
request['prompt_token_ids'] = request[
'prompt_token_ids'][:max_model_len - 1]
if max_model_len is not None and len(request["prompt_token_ids"]) > max_model_len:
request["prompt_token_ids"] = request["prompt_token_ids"][: max_model_len - 1]
if request.get("max_tokens") is None:
request["max_tokens"] = max(
1, max_model_len - len(request['prompt_token_ids']))
request["max_tokens"] = max(1, max_model_len - len(request["prompt_token_ids"]))
if request.get("temperature") < _SAMPLING_EPS:
# zero temperature is equivalent to greedy sampling
request["temperature"] = 1
@@ -200,22 +186,18 @@ class ErnieProcessor(BaseDataProcessor):
req_id = response_dict.request_id
token_ids = response_dict.outputs.token_ids
response_dict.usage = {
"completion_tokens": response_dict.outputs.index + 1
}
response_dict.usage = {"completion_tokens": response_dict.outputs.index + 1}
if token_ids[-1] == self.tokenizer.eos_token_id:
token_ids = token_ids[:-1]
full_text = self.tokenizer.decode(token_ids)
if self.reasoning_parser:
reasoning_content, text = self.reasoning_parser.extract_reasoning_content(
full_text, response_dict)
reasoning_content, text = self.reasoning_parser.extract_reasoning_content(full_text, response_dict)
response_dict.outputs.text = text
response_dict.outputs.reasoning_content = reasoning_content
else:
response_dict.outputs.text = full_text
data_processor_logger.info(f"req_id:{req_id}, token)ids: {token_ids}")
if response_dict.outputs.text == "" and \
response_dict.outputs.reasoning_content == "":
if response_dict.outputs.text == "" and response_dict.outputs.reasoning_content == "":
return None
return response_dict
@@ -230,8 +212,7 @@ class ErnieProcessor(BaseDataProcessor):
Dict: response contain text fields
"""
if stream:
return self.process_response_dict_streaming(
response_dict, **kwargs)
return self.process_response_dict_streaming(response_dict, **kwargs)
else:
return self.process_response_dict_normal(response_dict, **kwargs)
@@ -255,16 +236,12 @@ class ErnieProcessor(BaseDataProcessor):
if is_end:
full_text = previous_texts + delta_text
if self.reasoning_parser:
reasoning_content, text = self.reasoning_parser.extract_reasoning_content(
full_text, response_dict)
reasoning_content, text = self.reasoning_parser.extract_reasoning_content(full_text, response_dict)
response_dict["outputs"]["text"] = text
response_dict["outputs"][
"reasoning_content"] = reasoning_content
response_dict["outputs"]["reasoning_content"] = reasoning_content
else:
response_dict["outputs"]["text"] = full_text
data_processor_logger.info(
f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}"
)
data_processor_logger.info(f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}")
del self.decode_status[req_id]
return response_dict
@@ -286,20 +263,22 @@ class ErnieProcessor(BaseDataProcessor):
if is_end and len(token_ids) > 0 and not kwargs.get("include_stop_str_in_output"):
if token_ids[-1] == self.tokenizer.eos_token_id:
token_ids = token_ids[:-1]
delta_text, previous_token_ids, previous_texts = self.ids2tokens(
token_ids, req_id)
delta_text, previous_token_ids, previous_texts = self.ids2tokens(token_ids, req_id)
if enable_thinking and self.reasoning_parser:
reasoning_content, text = self.reasoning_parser.extract_reasoning_content_streaming(
previous_texts, previous_texts + delta_text, delta_text,
previous_token_ids, previous_token_ids + token_ids, token_ids)
previous_texts,
previous_texts + delta_text,
delta_text,
previous_token_ids,
previous_token_ids + token_ids,
token_ids,
)
response_dict["outputs"]["text"] = text
response_dict["outputs"]["reasoning_content"] = reasoning_content
else:
response_dict["outputs"]["text"] = delta_text
if is_end:
data_processor_logger.info(
f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}"
)
data_processor_logger.info(f"req_id:{req_id}, decode_status: {self.decode_status[req_id]}")
del self.decode_status[req_id]
return response_dict
@@ -320,15 +299,15 @@ class ErnieProcessor(BaseDataProcessor):
request_or_messages,
tokenize=False,
split_special_tokens=False,
add_special_tokens=False)
add_special_tokens=False,
)
req_id = None
if isinstance(request_or_messages, dict):
req_id = request_or_messages.get("request_id", None)
tokens = self.tokenizer.tokenize(spliced_message)
token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
data_processor_logger.info(
f"req_id:{req_id}, tokens:{tokens}, token_ids: {token_ids}")
data_processor_logger.info(f"req_id:{req_id}, tokens:{tokens}, token_ids: {token_ids}")
return token_ids
def ids2tokens(self, token_id, task_id):
@@ -352,7 +331,8 @@ class ErnieProcessor(BaseDataProcessor):
previous_token_ids = self.decode_status[task_id][2]
previous_texts = self.decode_status[task_id][3]
decode_str, prefix_offset, read_offset = self.tokenizer.decode_token(
previous_token_ids + token_id, prefix_offset, read_offset)
previous_token_ids + token_id, prefix_offset, read_offset
)
self.decode_status[task_id][0] = prefix_offset
self.decode_status[task_id][1] = read_offset
self.decode_status[task_id][2] += token_id
@@ -368,17 +348,15 @@ class ErnieProcessor(BaseDataProcessor):
tokenizer (AutoTokenizer)
"""
vocab_file_names = [
"tokenizer.model", "spm.model", "ernie_token_100k.model"
"tokenizer.model",
"spm.model",
"ernie_token_100k.model",
]
for i in range(len(vocab_file_names)):
if os.path.exists(
os.path.join(self.model_name_or_path,
vocab_file_names[i])):
ErnieBotTokenizer.resource_files_names[
"vocab_file"] = vocab_file_names[i]
if os.path.exists(os.path.join(self.model_name_or_path, vocab_file_names[i])):
ErnieBotTokenizer.resource_files_names["vocab_file"] = vocab_file_names[i]
break
self.tokenizer = ErnieBotTokenizer.from_pretrained(
self.model_name_or_path)
self.tokenizer = ErnieBotTokenizer.from_pretrained(self.model_name_or_path)
def get_pad_id(self):
"""
@@ -391,16 +369,17 @@ class ErnieProcessor(BaseDataProcessor):
# return self.tokenizer.eos_token
return self.tokenizer.pad_token_id
def pad_batch_data(self,
insts,
pad_id=0,
return_seq_len=False,
return_array=True,
pad_style="right"):
def pad_batch_data(
self,
insts,
pad_id=0,
return_seq_len=False,
return_array=True,
pad_style="right",
):
"""Pad the instances to the max sequence length in batch."""
if len(insts) == 0:
padded_insts = np.array([[]],
dtype=np.int64) if return_array else [[]]
padded_insts = np.array([[]], dtype=np.int64) if return_array else [[]]
if return_seq_len:
seq_len = np.array([], dtype=np.int64) if return_array else []
return padded_insts, seq_len
@@ -408,15 +387,11 @@ class ErnieProcessor(BaseDataProcessor):
max_len = max(map(len, insts))
if pad_style == "left":
padded_insts = [[pad_id] * (max_len - len(inst)) + list(inst)
for inst in insts]
padded_insts = [[pad_id] * (max_len - len(inst)) + list(inst) for inst in insts]
else:
padded_insts = [
list(inst) + [pad_id] * (max_len - len(inst)) for inst in insts
]
padded_insts = [list(inst) + [pad_id] * (max_len - len(inst)) for inst in insts]
if return_array:
padded_insts = np.array(padded_insts,
dtype=np.int64).reshape([-1, max_len])
padded_insts = np.array(padded_insts, dtype=np.int64).reshape([-1, max_len])
if return_seq_len:
seq_len = [len(inst) for inst in insts]
@@ -432,15 +407,9 @@ class ErnieProcessor(BaseDataProcessor):
stop_seqs = []
for seq in stop_sequences:
if seq != self.tokenizer.eos_token_id:
stop_seqs.append(
self.tokenizer.convert_tokens_to_ids(
self.tokenizer.tokenize(seq)))
stop_seqs, stop_seqs_len = self.pad_batch_data(stop_seqs,
pad_id=-1,
return_seq_len=True,
return_array=False)
data_processor_logger.debug(
f"processed stop_seqs: {stop_seqs}, {stop_seqs_len}")
stop_seqs.append(self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(seq)))
stop_seqs, stop_seqs_len = self.pad_batch_data(stop_seqs, pad_id=-1, return_seq_len=True, return_array=False)
data_processor_logger.debug(f"processed stop_seqs: {stop_seqs}, {stop_seqs_len}")
return stop_seqs, stop_seqs_len
def process_logprob_response(self, token_ids, **kwargs):