From 7634ffb7094e6c34f487e2d3fa6f5cf78e733950 Mon Sep 17 00:00:00 2001 From: EnflameGCU <118410644+EnflameGCU@users.noreply.github.com> Date: Fri, 25 Jul 2025 10:59:29 +0800 Subject: [PATCH] [GCU] Add CI (#3006) --- .github/workflows/ci_gcu.yml | 89 ++++++++++++++++++++++++++++++++++++ scripts/run_ci_gcu.sh | 86 ++++++++++++++++++++++++++++++++++ test/ci_use/GCU/run_ernie.py | 32 +++++++++++++ 3 files changed, 207 insertions(+) create mode 100644 .github/workflows/ci_gcu.yml create mode 100644 scripts/run_ci_gcu.sh create mode 100644 test/ci_use/GCU/run_ernie.py diff --git a/.github/workflows/ci_gcu.yml b/.github/workflows/ci_gcu.yml new file mode 100644 index 000000000..1e918cbdf --- /dev/null +++ b/.github/workflows/ci_gcu.yml @@ -0,0 +1,89 @@ +name: CI_GCU + +on: + pull_request: + branches: + - develop + - 'release/*' + workflow_dispatch: + +concurrency: + group: ${{ github.event.pull_request.number }}-gcu-ci + cancel-in-progress: true + +jobs: + CI_GCU: + runs-on: [self-hosted, GCU-S60-8Card] + steps: + - name: Print current runner name + run: | + echo "Current runner name: ${{ runner.name }}" + + - name: Code Checkout + env: + docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/device/paddle-gcu:topsrider3.5.102-ubuntu20-x86_64-gcc84 + run: | + REPO="https://github.com/${{ github.repository }}.git" + FULL_REPO="${{ github.repository }}" + REPO_NAME="${FULL_REPO##*/}" + BASE_BRANCH="${{ github.base_ref }}" + # Clean the repository directory before starting + docker run --rm --net=host -v $(pwd):/workspace -w /workspace \ + -e "REPO_NAME=${REPO_NAME}" \ + -e "BASE_BRANCH=${BASE_BRANCH}" \ + ${docker_image} /bin/bash -c ' + if [ -d ${REPO_NAME} ]; then + echo "Directory ${REPO_NAME} exists, removing it..." + rm -rf ${REPO_NAME} + fi + ' + git config --global user.name "FastDeployCI" + git config --global user.email "fastdeploy_ci@example.com" + git clone ${REPO} ${REPO_NAME} -b ${BASE_BRANCH} + cd FastDeploy + if [ "${{ github.event_name }}" = "pull_request" ]; then + git fetch origin pull/${{ github.event.pull_request.number }}/head:pr/${{ github.event.pull_request.number }} + git merge pr/${{ github.event.pull_request.number }} + git log -n 3 --oneline + else + git checkout ${{ github.sha }} + git log -n 3 --oneline + fi + + - name: Run CI unittest + env: + docker_image: ccr-2vdh3abv-pub.cnc.bj.baidubce.com/device/paddle-gcu:topsrider3.5.102-ubuntu20-x86_64-gcc84 + run: | + runner_name="${{ runner.name }}" + last_char="${runner_name: -1}" + + if [[ "$last_char" =~ [0-3] ]]; then + gcu_id="$last_char" + else + gcu_id="0" + fi + FD_API_PORT=$((9180 + gcu_id * 100)) + FD_ENGINE_QUEUE_PORT=$((9150 + gcu_id * 100)) + FD_METRICS_PORT=$((9170 + gcu_id * 100)) + + PARENT_DIR=$(dirname "$WORKSPACE") + echo "PARENT_DIR:$PARENT_DIR" + echo "Install drivers..." + cd /work/deps + bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y + cd - + docker run --rm --network=host --ipc=host -it --privileged \ + -v $(pwd):/workspace -w /workspace \ + -v "/home:/home" \ + -v "/work:/work" \ + -e "MODEL_PATH=/work/models" \ + -e "http_proxy=$(git config --global --get http.proxy)" \ + -e "https_proxy=$(git config --global --get https.proxy)" \ + -e "FD_API_PORT=${FD_API_PORT}" \ + -e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \ + -e "FD_METRICS_PORT=${FD_METRICS_PORT}" \ + ${docker_image} /bin/bash -c " + git config --global --add safe.directory /workspace/FastDeploy + cd FastDeploy + bash scripts/run_ci_gcu.sh + " diff --git a/scripts/run_ci_gcu.sh b/scripts/run_ci_gcu.sh new file mode 100644 index 000000000..76d4d1767 --- /dev/null +++ b/scripts/run_ci_gcu.sh @@ -0,0 +1,86 @@ +#!/bin/bash +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +echo "$DIR" + +#先kill一遍 +ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true +ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true +lsof -t -i :8188 | xargs kill -9 || true + +export model_path=${MODEL_PATH}/paddle/ERNIE-4.5-21B-A3B-Paddle + +echo "pip install requirements" +python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple +echo "uninstall org" +python -m pip uninstall paddlepaddle -y +python -m pip uninstall paddle-custom-gcu -y +python -m pip install paddlepaddle==3.1.0a0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/ +echo "build whl" +bash build.sh 1 || exit 1 + +unset http_proxy +unset https_proxy +unset no_proxy + +# 起服务 +rm -rf log/* +rm -f core* +# pkill -9 python #流水线不执行这个 +#清空消息队列 +ipcrm --all=msg +python -m fastdeploy.entrypoints.openai.api_server \ + --model ${model_path} \ + --port 8188 \ + --metrics-port 8200 \ + --tensor-parallel-size 4 \ + --num-gpu-blocks-override 4096 \ + --max-model-len 32768 \ + --max-num-seqs 8 \ + --quantization wint4 > server.log 2>&1 & + +sleep 60 +# 探活 +TIMEOUT=$((5 * 60)) +INTERVAL=10 # 检查间隔(秒) +ENDPOINT="http://0.0.0.0:8188/health" +START_TIME=$(date +%s) # 记录开始时间戳 +echo "开始服务健康检查,最长等待时间:${TIMEOUT}秒" +while true; do + # 计算已耗时 + CURRENT_TIME=$(date +%s) + ELAPSED=$((CURRENT_TIME - START_TIME)) + + # 超时判断 + if [ $ELAPSED -ge $TIMEOUT ]; then + echo -e "\n服务启动超时:经过 $((TIMEOUT/60)) 分钟服务仍未启动!" + cat server.log + cat log/workerlog.0 + exit 1 + fi + + HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 2 "$ENDPOINT" || true) + + if [ "$HTTP_CODE" = "200" ]; then + echo -e "\n服务启动成功!耗时 ${ELAPSED} 秒" + break + else + sleep $INTERVAL + fi +done + +cat server.log + +# 执行服务化推理 +python test/ci_use/GCU/run_ernie.py +exit_code=$? +echo exit_code is ${exit_code} + +ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true +ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true +lsof -t -i :8188 | xargs kill -9 || true + +if [ ${exit_code} -ne 0 ]; then + echo "log/workerlog.0" + cat log/workerlog.0 + exit 1 +fi diff --git a/test/ci_use/GCU/run_ernie.py b/test/ci_use/GCU/run_ernie.py new file mode 100644 index 000000000..f4e8a9ef9 --- /dev/null +++ b/test/ci_use/GCU/run_ernie.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openai + +ip = "0.0.0.0" +service_http_port = "8188" # 服务配置的 +client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY") + +# 非流式对话 +response = client.chat.completions.create( + model="default", + messages=[ + {"role": "user", "content": "The largest ocean is"}, + ], + temperature=1, + top_p=0, + max_tokens=64, + stream=False, +) +print(response)