* fix typos

* ci

---------

Co-authored-by: YuBaoku <49938469+EmmonsCurse@users.noreply.github.com>
This commit is contained in:
co63oc
2025-09-12 11:04:38 +08:00
committed by GitHub
parent 82dab8a91a
commit 8466219ec8
14 changed files with 22 additions and 22 deletions

View File

@@ -514,7 +514,7 @@ class EngineService:
main_process_metrics.num_requests_waiting.dec(len(tasks))
main_process_metrics.num_requests_running.inc(len(tasks))
except Exception as e:
err_msg = f"Error happend while insert task to engine: {e}, {traceback.format_exc()!s}."
err_msg = f"Error happened while insert task to engine: {e}, {traceback.format_exc()!s}."
llm_logger.error(err_msg)
def _scheduler_task_to_worker_v1(self):
@@ -569,7 +569,7 @@ class EngineService:
time.sleep(0.005)
except Exception as e:
err_msg = "Error happend while insert task to engine: {}, {}.".format(e, str(traceback.format_exc()))
err_msg = "Error happened while insert task to engine: {}, {}.".format(e, str(traceback.format_exc()))
llm_logger.error(err_msg)
def start_zmq_service(self, api_server_pid=None):
@@ -651,7 +651,7 @@ class EngineService:
self.zmq_server.send_multipart(request_id, [error_result])
except Exception as e:
llm_logger.error(
f"Error happend while receiving new request from zmq, details={e}, "
f"Error happened while receiving new request from zmq, details={e}, "
f"traceback={traceback.format_exc()}"
)
@@ -669,7 +669,7 @@ class EngineService:
self.zmq_server.send_multipart(request_id, contents)
except Exception as e:
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}")
llm_logger.error(f"Unexcepted error happened: {e}, {traceback.format_exc()!s}")
def split_mode_get_tasks(self):
"""

View File

@@ -563,7 +563,7 @@ class LLMEngine:
try:
req_id = self._format_and_add_data(prompts)
except Exception as e:
llm_logger.error(f"Error happend while adding request, details={e}, {str(traceback.format_exc())}")
llm_logger.error(f"Error happened while adding request, details={e}, {str(traceback.format_exc())}")
raise EngineError(str(e), error_code=400)
# Get the result of the current request

View File

@@ -204,8 +204,8 @@ class EngineClient:
f"preprocess time cost {preprocess_cost_time}"
)
self.vaild_parameters(task)
api_server_logger.debug(f"Recieve task: {task}")
self.valid_parameters(task)
api_server_logger.debug(f"Receive task: {task}")
try:
if not self.enable_mm:
self.zmq_client.send_json(task)
@@ -215,7 +215,7 @@ class EngineClient:
api_server_logger.error(f"zmq_client send task error: {e}, {str(traceback.format_exc())}")
raise EngineError(str(e), error_code=400)
def vaild_parameters(self, data):
def valid_parameters(self, data):
"""
Validate stream options
"""

View File

@@ -125,7 +125,7 @@ class LLM:
continue
self.req_output[request_id].add(result)
except Exception as e:
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}")
llm_logger.error(f"Unexcepted error happened: {e}, {traceback.format_exc()!s}")
def generate(
self,

View File

@@ -124,7 +124,7 @@ class MobaAttentionBackend(AttentionBackend):
kv_cache_quant_type: str = None,
):
"""
Caculate kv cache shape
Calculate kv cache shape
"""
if kv_cache_quant_type is not None and kv_cache_quant_type == "int4_zp":
return (

View File

@@ -56,7 +56,7 @@ class ParallelLMHead(nn.Layer):
embedding_dim (int): size of hidden state.
prefix (str): The name of current layer. Defaults to "".
with_bias (bool): whether to have bias. Default: False.
dtype (str): The dtype of weight. Defalut: None.
dtype (str): The dtype of weight. Default: None.
"""
super(ParallelLMHead, self).__init__()
self.weight_key: str = prefix + ".weight"

View File

@@ -364,7 +364,7 @@ class Sampler(nn.Layer):
)
if sampling_metadata.enable_early_stop:
# will set the stop batch in stop_flags
assert sampling_metadata.stop_flags is not None, "need stop_flags for eary stop"
assert sampling_metadata.stop_flags is not None, "need stop_flags for early stop"
self.early_stopper.process(probs, next_tokens, sampling_metadata.stop_flags)
sampler_output = SamplerOutput(

View File

@@ -683,7 +683,7 @@ class KernelInterface:
op_dict = {"op_name": op_name, "reset_zero_when_tune": ""}
op_dict["triton_kernel_args"] = ",".join(modified_arg_exclude_constexpr)
op_dict["key"] = ",".join(self.key_args)
# when tunning, we need to reset the out to zero.
# when tuning, we need to reset the out to zero.
if "reset_zero_when_tune" in other_config.keys():
op_dict["reset_zero_when_tune"] = other_config["reset_zero_when_tune"]

View File

@@ -178,7 +178,7 @@ class TokenProcessor:
)
except Exception as e:
print(f"Recieve message error: {e}")
print(f"Receive message error: {e}")
continue
else:
is_blocking = True

View File

@@ -105,7 +105,7 @@ class DynamicWeightManager:
def clear_parameters(self, pid: int = 0) -> None:
"""Clear all model parameters and free memory."""
logger.info("start clear paramaters")
logger.info("start clear parameters")
paddle.device.cuda.empty_cache()
for param in self.model.state_dict().values():
param._clear_data()

View File

@@ -648,7 +648,7 @@ class GlobalScheduler:
stolen_responses[response_queue_name].append(response.serialize())
continue
scheduler_logger.error(f"Scheduler has recieved a non-existent response from engine: {[response]}")
scheduler_logger.error(f"Scheduler has received a non-existent response from engine: {[response]}")
with self.mutex:
for request_id, responses in local_responses.items():

View File

@@ -49,7 +49,7 @@ class DcuWorker(GpuWorker):
"""
self.max_chips_per_node = 8
if self.device_config.device_type == "cuda" and paddle.device.is_compiled_with_cuda():
# Set evironment variable
# Set environment variable
self.device_ids = self.parallel_config.device_ids.split(",")
self.device = f"gpu:{self.local_rank % self.max_chips_per_node}"
paddle.device.set_device(self.device)