修复更多错误日志中的语法和拼写问题

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
copilot-swe-agent[bot]
2025-12-23 12:10:17 +00:00
parent cee3a7a356
commit a79dfc108c
6 changed files with 8 additions and 8 deletions

View File

@@ -603,7 +603,7 @@ class CacheMessagerV1:
else:
time.sleep(0.001)
except Exception as e:
logger.info(f"add cache task occured error: {e}, {traceback.format_exc()!s}.")
logger.info(f"add cache task occurred error: {e}, {traceback.format_exc()!s}.")
def prefill_layerwise_send_cache_thread(self):
"""

View File

@@ -1641,7 +1641,7 @@ class EngineService:
if think_end_id > 0:
self.llm_logger.info(f"Get think_end_id {think_end_id} from vocab.")
else:
self.llm_logger.info("No </think> token found in vocabulary, the model can not do reasoning.")
self.llm_logger.info("No </think> token found in vocabulary, the model cannot do reasoning.")
image_patch_id = self.data_processor.tokenizer.get_vocab().get("<|IMAGE_PLACEHOLDER|>", -1)
line_break_id = self.data_processor.tokenizer.get_vocab().get("\n", -1)

View File

@@ -522,7 +522,7 @@ class LLMEngine:
if think_end_id > 0:
llm_logger.info(f"Get think_end_id {think_end_id} from vocab.")
else:
llm_logger.info("No </think> token found in vocabulary, the model can not do reasoning.")
llm_logger.info("No </think> token found in vocabulary, the model cannot do reasoning.")
image_patch_id = self.data_processor.tokenizer.get_vocab().get("<|IMAGE_PLACEHOLDER|>", -1)
line_break_id = self.data_processor.tokenizer.get_vocab().get("\n", -1)

View File

@@ -213,7 +213,7 @@ class SamplingParams:
if self.logprobs is not None and (self.logprobs < 0 or self.logprobs > 20):
raise ValueError("Invalid value for 'top_logprobs': must be between 0 and 20.")
if self.prompt_logprobs is not None:
raise ValueError("prompt_logprobs is not support when FD_USE_GET_SAVE_OUTPUT_V1 is disabled.")
raise ValueError("prompt_logprobs is not supported when FD_USE_GET_SAVE_OUTPUT_V1 is disabled.")
else: # True (1)
if self.logprobs is not None and self.logprobs < -1:
raise ValueError(f"logprobs must be a non-negative value or -1, got {self.logprobs}.")

View File

@@ -416,7 +416,7 @@ class ResultReader:
result = RequestOutput.from_dict(data)
self.data.appendleft(result)
except Exception as e:
logger.error(f"Parse Result Error:{e}, {str(traceback.format_exc())}, {result}")
logger.error(f"Parse Result Error: {e}, {str(traceback.format_exc())}, {result}")
return total

View File

@@ -119,9 +119,9 @@ class GCUModelRunner(ModelRunnerBase):
Init speculative proposer
"""
if self.speculative_method == "ngram":
raise NotImplementedError("NgramProposer is not support by GCUModelRunner.")
raise NotImplementedError("NgramProposer is not supported by GCUModelRunner.")
elif self.speculative_method == "mtp":
raise NotImplementedError("MTPProposer is not support by GCUModelRunner.")
raise NotImplementedError("MTPProposer is not supported by GCUModelRunner.")
else:
self.proposer = None
@@ -644,7 +644,7 @@ class GCUModelRunner(ModelRunnerBase):
if not profile and (
self.cache_config.enable_prefix_caching or self.scheduler_config.splitwise_role != "mixed"
):
raise NotImplementedError("prefix_caching is not support by GCUModelRunner.")
raise NotImplementedError("prefix_caching is not supported by GCUModelRunner.")
else:
for i in range(self.model_config.num_hidden_layers):