From 6b10c19482e5c88e363d026b173d9f15db5c6c85 Mon Sep 17 00:00:00 2001 From: gaoziyuan <88373061+gzy19990617@users.noreply.github.com> Date: Wed, 9 Jul 2025 11:52:22 +0800 Subject: [PATCH] =?UTF-8?q?=E3=80=90Feature=E3=80=91add=20fd=20commit/bran?= =?UTF-8?q?ch=20info=20when=20start=20server=20(#2752)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add_commit_config * fix --------- Co-authored-by: Jiang-Jia-Jun <163579578+Jiang-Jia-Jun@users.noreply.github.com> --- fastdeploy/engine/config.py | 67 +++++++++++++++++++++++++++++++++++-- 1 file changed, 65 insertions(+), 2 deletions(-) diff --git a/fastdeploy/engine/config.py b/fastdeploy/engine/config.py index 65f67254f..eb95f2bf1 100644 --- a/fastdeploy/engine/config.py +++ b/fastdeploy/engine/config.py @@ -17,6 +17,7 @@ import json import os from datetime import datetime +from dataclasses import dataclass from typing import Any, Dict, List, Literal, Optional from fastdeploy import envs @@ -467,7 +468,63 @@ class ParallelConfig: llm_logger.info("Parallel Configuration Information :") for k, v in self.__dict__.items(): llm_logger.info("{:<20}:{:<6}{}".format(k, "", v)) - llm_logger.info("==================") + llm_logger.info( + "=============================================================") + + +@dataclass +class CommitConfig: + """ + Configuration for tracking version information from version.txt + + Attributes: + fastdeploy_commit: Full FastDeploy git commit hash + paddle_version: PaddlePaddle version string + paddle_commit: PaddlePaddle git commit hash + cuda_version: CUDA version string + compiler_version: CXX compiler version string + """ + fastdeploy_commit: str = "" + paddle_version: str = "" + paddle_commit: str = "" + cuda_version: str = "" + compiler_version: str = "" + + def __post_init__(self): + """Automatically load version info when initialized""" + self._load_from_version_file() + + def _load_from_version_file(self, file_path: str = "fastdeploy/version.txt"): + """Internal method to load version info from file""" + try: + with open(file_path, 'r') as f: + for line in f: + line = line.strip() + if line.startswith("fastdeploy GIT COMMIT ID:"): + self.fastdeploy_commit = line.split(":")[1].strip() + elif line.startswith("Paddle version:"): + self.paddle_version = line.split(":")[1].strip() + elif line.startswith("Paddle GIT COMMIT ID:"): + self.paddle_commit = line.split(":")[1].strip() + elif line.startswith("CUDA version:"): + self.cuda_version = line.split(":")[1].strip() + elif line.startswith("CXX compiler version:"): + self.compiler_version = line.split(":")[1].strip() + except FileNotFoundError: + llm_logger.info(f"Warning: Version file not found at {file_path}") + except Exception as e: + llm_logger.info(f"Warning: Could not read version file - {str(e)}") + + def print(self): + """ + print all config + + """ + llm_logger.info("Fasedeploy Commit Information :") + for k, v in self.__dict__.items(): + llm_logger.info("{:<20}:{:<6}{}".format(k, "", v)) + llm_logger.info( + "=============================================================") class Config: @@ -502,6 +559,7 @@ class Config: cache_config: CacheConfig, scheduler_config: SchedulerConfig, parallel_config: ParallelConfig, + commit_config: CommitConfig = CommitConfig(), model_name_or_path: str = None, tokenizer: str = None, tensor_parallel_size: int = 8, @@ -559,6 +617,7 @@ class Config: self.cache_config = cache_config self.scheduler_config = scheduler_config self.parallel_config = parallel_config + self.commit_config = commit_config self.model_name_or_path = model_name_or_path self.tokenizer = tokenizer self.max_num_batched_tokens = max_num_batched_tokens @@ -756,7 +815,11 @@ class Config: if k == "generation_config" and v is not None: for gck, gcv in v.to_dict().items(): llm_logger.info("{:<20}:{:<6}{}".format(gck, "", gcv)) - elif k == "cache_config" or k == "model_config" or k == "scheduler_config" or k == "parallel_config": + elif (k == "cache_config" or + k == "model_config" or + k == "scheduler_config" or + k == "parallel_config" or + k == "commit_config"): v.print() else: llm_logger.info("{:<20}:{:<6}{}".format(k, "", v))