* fix typos

* ci

---------

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
This commit is contained in:
co63oc
2025-09-12 11:04:38 +08:00
committed by GitHub
parent 82dab8a91a
commit 8466219ec8
14 changed files with 22 additions and 22 deletions

View File

@@ -190,7 +190,7 @@ __device__ void speculate_update_repeat_times_optimized(
buffer_ptr_pre_ids.toggle(); buffer_ptr_pre_ids.toggle();
} }
} }
// each core loads all the needed pre_ids into lm without mfence inbetween // each core loads all the needed pre_ids into lm without mfence in between
// according to the index recorded by previous iteration // according to the index recorded by previous iteration
else { else {
int cnt = -1; int cnt = -1;

View File

@@ -514,7 +514,7 @@ class EngineService:
main_process_metrics.num_requests_waiting.dec(len(tasks)) main_process_metrics.num_requests_waiting.dec(len(tasks))
main_process_metrics.num_requests_running.inc(len(tasks)) main_process_metrics.num_requests_running.inc(len(tasks))
except Exception as e: except Exception as e:
err_msg = f"Error happend while insert task to engine: {e}, {traceback.format_exc()!s}." err_msg = f"Error happened while insert task to engine: {e}, {traceback.format_exc()!s}."
llm_logger.error(err_msg) llm_logger.error(err_msg)
def _scheduler_task_to_worker_v1(self): def _scheduler_task_to_worker_v1(self):
@@ -569,7 +569,7 @@ class EngineService:
time.sleep(0.005) time.sleep(0.005)
except Exception as e: except Exception as e:
err_msg = "Error happend while insert task to engine: {}, {}.".format(e, str(traceback.format_exc())) err_msg = "Error happened while insert task to engine: {}, {}.".format(e, str(traceback.format_exc()))
llm_logger.error(err_msg) llm_logger.error(err_msg)
def start_zmq_service(self, api_server_pid=None): def start_zmq_service(self, api_server_pid=None):
@@ -651,7 +651,7 @@ class EngineService:
self.zmq_server.send_multipart(request_id, [error_result]) self.zmq_server.send_multipart(request_id, [error_result])
except Exception as e: except Exception as e:
llm_logger.error( llm_logger.error(
f"Error happend while receiving new request from zmq, details={e}, " f"Error happened while receiving new request from zmq, details={e}, "
f"traceback={traceback.format_exc()}" f"traceback={traceback.format_exc()}"
) )
@@ -669,7 +669,7 @@ class EngineService:
self.zmq_server.send_multipart(request_id, contents) self.zmq_server.send_multipart(request_id, contents)
except Exception as e: except Exception as e:
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}") llm_logger.error(f"Unexcepted error happened: {e}, {traceback.format_exc()!s}")
def split_mode_get_tasks(self): def split_mode_get_tasks(self):
""" """

View File

@@ -563,7 +563,7 @@ class LLMEngine:
try: try:
req_id = self._format_and_add_data(prompts) req_id = self._format_and_add_data(prompts)
except Exception as e: except Exception as e:
llm_logger.error(f"Error happend while adding request, details={e}, {str(traceback.format_exc())}") llm_logger.error(f"Error happened while adding request, details={e}, {str(traceback.format_exc())}")
raise EngineError(str(e), error_code=400) raise EngineError(str(e), error_code=400)
# Get the result of the current request # Get the result of the current request

View File

@@ -204,8 +204,8 @@ class EngineClient:
f"preprocess time cost {preprocess_cost_time}" f"preprocess time cost {preprocess_cost_time}"
) )
self.vaild_parameters(task) self.valid_parameters(task)
api_server_logger.debug(f"Recieve task: {task}") api_server_logger.debug(f"Receive task: {task}")
try: try:
if not self.enable_mm: if not self.enable_mm:
self.zmq_client.send_json(task) self.zmq_client.send_json(task)
@@ -215,7 +215,7 @@ class EngineClient:
api_server_logger.error(f"zmq_client send task error: {e}, {str(traceback.format_exc())}") api_server_logger.error(f"zmq_client send task error: {e}, {str(traceback.format_exc())}")
raise EngineError(str(e), error_code=400) raise EngineError(str(e), error_code=400)
def vaild_parameters(self, data): def valid_parameters(self, data):
""" """
Validate stream options Validate stream options
""" """

View File

@@ -125,7 +125,7 @@ class LLM:
continue continue
self.req_output[request_id].add(result) self.req_output[request_id].add(result)
except Exception as e: except Exception as e:
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}") llm_logger.error(f"Unexcepted error happened: {e}, {traceback.format_exc()!s}")
def generate( def generate(
self, self,

View File

@@ -124,7 +124,7 @@ class MobaAttentionBackend(AttentionBackend):
kv_cache_quant_type: str = None, kv_cache_quant_type: str = None,
): ):
""" """
Caculate kv cache shape Calculate kv cache shape
""" """
if kv_cache_quant_type is not None and kv_cache_quant_type == "int4_zp": if kv_cache_quant_type is not None and kv_cache_quant_type == "int4_zp":
return ( return (

View File

@@ -56,7 +56,7 @@ class ParallelLMHead(nn.Layer):
embedding_dim (int): size of hidden state. embedding_dim (int): size of hidden state.
prefix (str): The name of current layer. Defaults to "". prefix (str): The name of current layer. Defaults to "".
with_bias (bool): whether to have bias. Default: False. with_bias (bool): whether to have bias. Default: False.
dtype (str): The dtype of weight. Defalut: None. dtype (str): The dtype of weight. Default: None.
""" """
super(ParallelLMHead, self).__init__() super(ParallelLMHead, self).__init__()
self.weight_key: str = prefix + ".weight" self.weight_key: str = prefix + ".weight"

View File

@@ -364,7 +364,7 @@ class Sampler(nn.Layer):
) )
if sampling_metadata.enable_early_stop: if sampling_metadata.enable_early_stop:
# will set the stop batch in stop_flags # will set the stop batch in stop_flags
assert sampling_metadata.stop_flags is not None, "need stop_flags for eary stop" assert sampling_metadata.stop_flags is not None, "need stop_flags for early stop"
self.early_stopper.process(probs, next_tokens, sampling_metadata.stop_flags) self.early_stopper.process(probs, next_tokens, sampling_metadata.stop_flags)
sampler_output = SamplerOutput( sampler_output = SamplerOutput(

View File

@@ -683,7 +683,7 @@ class KernelInterface:
op_dict = {"op_name": op_name, "reset_zero_when_tune": ""} op_dict = {"op_name": op_name, "reset_zero_when_tune": ""}
op_dict["triton_kernel_args"] = ",".join(modified_arg_exclude_constexpr) op_dict["triton_kernel_args"] = ",".join(modified_arg_exclude_constexpr)
op_dict["key"] = ",".join(self.key_args) op_dict["key"] = ",".join(self.key_args)
# when tunning, we need to reset the out to zero. # when tuning, we need to reset the out to zero.
if "reset_zero_when_tune" in other_config.keys(): if "reset_zero_when_tune" in other_config.keys():
op_dict["reset_zero_when_tune"] = other_config["reset_zero_when_tune"] op_dict["reset_zero_when_tune"] = other_config["reset_zero_when_tune"]

View File

@@ -178,7 +178,7 @@ class TokenProcessor:
) )
except Exception as e: except Exception as e:
print(f"Recieve message error: {e}") print(f"Receive message error: {e}")
continue continue
else: else:
is_blocking = True is_blocking = True

View File

@@ -105,7 +105,7 @@ class DynamicWeightManager:
def clear_parameters(self, pid: int = 0) -> None: def clear_parameters(self, pid: int = 0) -> None:
"""Clear all model parameters and free memory.""" """Clear all model parameters and free memory."""
logger.info("start clear paramaters") logger.info("start clear parameters")
paddle.device.cuda.empty_cache() paddle.device.cuda.empty_cache()
for param in self.model.state_dict().values(): for param in self.model.state_dict().values():
param._clear_data() param._clear_data()

View File

@@ -146,7 +146,7 @@ class Ernie4_5_MoeForCausalLMRL(Ernie4_5_MoeForCausalLM, BaseRLModel):
return "Ernie4_5_MoeForCausalLMRL" return "Ernie4_5_MoeForCausalLMRL"
def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]: def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]:
"""Generate mapping between inference and training parameter for RL(donot delete!).""" """Generate mapping between inference and training parameter for RL(do not delete!)."""
if self._mappings_built: if self._mappings_built:
return self.infer_to_train_mapping return self.infer_to_train_mapping
@@ -225,7 +225,7 @@ class Ernie4_5_VLMoeForConditionalGenerationRL(Ernie4_5_VLMoeForConditionalGener
return "Ernie4_5_VLMoeForConditionalGenerationRL" return "Ernie4_5_VLMoeForConditionalGenerationRL"
def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]: def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]:
"""Generate mapping between inference and training parameter for RL(donot delete!).""" """Generate mapping between inference and training parameter for RL(do not delete!)."""
if self._mappings_built: if self._mappings_built:
return self.infer_to_train_mapping return self.infer_to_train_mapping
@@ -331,7 +331,7 @@ class Qwen2ForCausalLMRL(Qwen2ForCausalLM, BaseRLModel):
return "Qwen2ForCausalLMRL" return "Qwen2ForCausalLMRL"
def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]: def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]:
"""Generate mapping between inference and training parameter for RL(donot delete!).""" """Generate mapping between inference and training parameter for RL(do not delete!)."""
if self._mappings_built: if self._mappings_built:
return self.infer_to_train_mapping return self.infer_to_train_mapping
@@ -380,7 +380,7 @@ class Qwen3MoeForCausalLMRL(Qwen3MoeForCausalLM, BaseRLModel):
return "Qwen3MoeForCausalLMRL" return "Qwen3MoeForCausalLMRL"
def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]: def get_name_mappings_to_training(self, trainer_degree=None) -> Dict[str, str]:
"""Generate mapping between inference and training parameter for RL(donot delete!).""" """Generate mapping between inference and training parameter for RL(do not delete!)."""
if self._mappings_built: if self._mappings_built:
return self.infer_to_train_mapping return self.infer_to_train_mapping

View File

@@ -648,7 +648,7 @@ class GlobalScheduler:
stolen_responses[response_queue_name].append(response.serialize()) stolen_responses[response_queue_name].append(response.serialize())
continue continue
scheduler_logger.error(f"Scheduler has recieved a non-existent response from engine: {[response]}") scheduler_logger.error(f"Scheduler has received a non-existent response from engine: {[response]}")
with self.mutex: with self.mutex:
for request_id, responses in local_responses.items(): for request_id, responses in local_responses.items():

View File

@@ -49,7 +49,7 @@ class DcuWorker(GpuWorker):
""" """
self.max_chips_per_node = 8 self.max_chips_per_node = 8
if self.device_config.device_type == "cuda" and paddle.device.is_compiled_with_cuda(): if self.device_config.device_type == "cuda" and paddle.device.is_compiled_with_cuda():
# Set evironment variable # Set environment variable
self.device_ids = self.parallel_config.device_ids.split(",") self.device_ids = self.parallel_config.device_ids.split(",")
self.device = f"gpu:{self.local_rank % self.max_chips_per_node}" self.device = f"gpu:{self.local_rank % self.max_chips_per_node}"
paddle.device.set_device(self.device) paddle.device.set_device(self.device)