【Feature】add fd commit/branch info when start server (#2752)

* add_commit_config

* fix

---------

Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com>
This commit is contained in:
gaoziyuan
2025-07-09 11:52:22 +08:00
committed by GitHub
parent f4f1d8de44
commit 6b10c19482

View File

@@ -17,6 +17,7 @@
import json import json
import os import os
from datetime import datetime from datetime import datetime
from dataclasses import dataclass
from typing import Any, Dict, List, Literal, Optional from typing import Any, Dict, List, Literal, Optional
from fastdeploy import envs from fastdeploy import envs
@@ -467,7 +468,63 @@ class ParallelConfig:
llm_logger.info("Parallel Configuration Information :") llm_logger.info("Parallel Configuration Information :")
for k, v in self.__dict__.items(): for k, v in self.__dict__.items():
llm_logger.info("{:<20}:{:<6}{}".format(k, "", v)) llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))
llm_logger.info("==================") llm_logger.info(
"=============================================================")
@dataclass
class CommitConfig:
"""
Configuration for tracking version information from version.txt
Attributes:
fastdeploy_commit: Full FastDeploy git commit hash
paddle_version: PaddlePaddle version string
paddle_commit: PaddlePaddle git commit hash
cuda_version: CUDA version string
compiler_version: CXX compiler version string
"""
fastdeploy_commit: str = ""
paddle_version: str = ""
paddle_commit: str = ""
cuda_version: str = ""
compiler_version: str = ""
def __post_init__(self):
"""Automatically load version info when initialized"""
self._load_from_version_file()
def _load_from_version_file(self, file_path: str = "fastdeploy/version.txt"):
"""Internal method to load version info from file"""
try:
with open(file_path, 'r') as f:
for line in f:
line = line.strip()
if line.startswith("fastdeploy GIT COMMIT ID:"):
self.fastdeploy_commit = line.split(":")[1].strip()
elif line.startswith("Paddle version:"):
self.paddle_version = line.split(":")[1].strip()
elif line.startswith("Paddle GIT COMMIT ID:"):
self.paddle_commit = line.split(":")[1].strip()
elif line.startswith("CUDA version:"):
self.cuda_version = line.split(":")[1].strip()
elif line.startswith("CXX compiler version:"):
self.compiler_version = line.split(":")[1].strip()
except FileNotFoundError:
llm_logger.info(f"Warning: Version file not found at {file_path}")
except Exception as e:
llm_logger.info(f"Warning: Could not read version file - {str(e)}")
def print(self):
"""
print all config
"""
llm_logger.info("Fasedeploy Commit Information :")
for k, v in self.__dict__.items():
llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))
llm_logger.info(
"=============================================================")
class Config: class Config:
@@ -502,6 +559,7 @@ class Config:
cache_config: CacheConfig, cache_config: CacheConfig,
scheduler_config: SchedulerConfig, scheduler_config: SchedulerConfig,
parallel_config: ParallelConfig, parallel_config: ParallelConfig,
commit_config: CommitConfig = CommitConfig(),
model_name_or_path: str = None, model_name_or_path: str = None,
tokenizer: str = None, tokenizer: str = None,
tensor_parallel_size: int = 8, tensor_parallel_size: int = 8,
@@ -559,6 +617,7 @@ class Config:
self.cache_config = cache_config self.cache_config = cache_config
self.scheduler_config = scheduler_config self.scheduler_config = scheduler_config
self.parallel_config = parallel_config self.parallel_config = parallel_config
self.commit_config = commit_config
self.model_name_or_path = model_name_or_path self.model_name_or_path = model_name_or_path
self.tokenizer = tokenizer self.tokenizer = tokenizer
self.max_num_batched_tokens = max_num_batched_tokens self.max_num_batched_tokens = max_num_batched_tokens
@@ -756,7 +815,11 @@ class Config:
if k == "generation_config" and v is not None: if k == "generation_config" and v is not None:
for gck, gcv in v.to_dict().items(): for gck, gcv in v.to_dict().items():
llm_logger.info("{:<20}:{:<6}{}".format(gck, "", gcv)) llm_logger.info("{:<20}:{:<6}{}".format(gck, "", gcv))
elif k == "cache_config" or k == "model_config" or k == "scheduler_config" or k == "parallel_config": elif (k == "cache_config" or
k == "model_config" or
k == "scheduler_config" or
k == "parallel_config" or
k == "commit_config"):
v.print() v.print()
else: else:
llm_logger.info("{:<20}:{:<6}{}".format(k, "", v)) llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))