[GCU] Enable gcu CI (#3190)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled

* [GCU] Update to the latest version

* [GCU] Enable CI
This commit is contained in:
EnflameGCU
2025-08-13 11:48:24 +08:00
committed by GitHub
parent ce9180241e
commit d1a92e3e17
6 changed files with 87 additions and 45 deletions

View File

@@ -29,7 +29,9 @@ jobs:
REPO_NAME="${FULL_REPO##*/}"
BASE_BRANCH="${{ github.base_ref }}"
# Clean the repository directory before starting
docker run --rm --net=host -v $(pwd):/workspace -w /workspace \
docker run --rm --net=host -v $(pwd):/workspace \
-v ${{ github.workspace }}/../../..:${{ github.workspace }}/../../.. \
-w /workspace \
-e "REPO_NAME=${REPO_NAME}" \
-e "BASE_BRANCH=${BASE_BRANCH}" \
${docker_image} /bin/bash -c '
@@ -40,6 +42,7 @@ jobs:
'
git config --global user.name "FastDeployCI"
git config --global user.email "fastdeploy_ci@example.com"
source ${{ github.workspace }}/../../../proxy
git clone ${REPO} ${REPO_NAME} -b ${BASE_BRANCH}
cd FastDeploy
if [ "${{ github.event_name }}" = "pull_request" ]; then
@@ -50,6 +53,9 @@ jobs:
git checkout ${{ github.sha }}
git log -n 3 --oneline
fi
echo "Copy models..."
sudo mkdir -p ci_models && sudo cp -r /work/deps/ERNIE-4.5-21B-A3B-Paddle ci_models
echo "Copy models done."
- name: Run CI unittest
env:
@@ -71,13 +77,15 @@ jobs:
echo "PARENT_DIR:$PARENT_DIR"
echo "Install drivers..."
cd /work/deps
bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y
sudo bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y
cd -
docker run --rm --network=host --ipc=host -it --privileged \
-v $(pwd):/workspace -w /workspace \
-v "/home:/home" \
-v "/work:/work" \
-e "MODEL_PATH=/work/models" \
echo "Create docker..."
docker run --rm --network=host --ipc=host --privileged \
-v $(pwd):/workspace \
-v /home:/home \
-v /work:/work \
-w /workspace \
-e "MODEL_PATH=./ci_models" \
-e "http_proxy=$(git config --global --get http.proxy)" \
-e "https_proxy=$(git config --global --get https.proxy)" \
-e "FD_API_PORT=${FD_API_PORT}" \

View File

@@ -76,6 +76,8 @@ class GCUFlashAttnBackend(AttentionBackend):
kv_num_heads: int,
num_heads: int,
head_dim: int,
encoder_block_shape_q: int = -1,
decoder_block_shape_q: int = -1,
):
"""
GCUFlashAttnBackend __init__
@@ -94,7 +96,7 @@ class GCUFlashAttnBackend(AttentionBackend):
self.head_dim = head_dim
self.scaling = 1.0 / (self.head_dim**0.5)
self.num_layers = fd_config.model_config.num_hidden_layers
self.position_ids_base = paddle.arange(self.max_seq_len)
self.position_ids_base = np.arange(self.max_seq_len)
# TODO(zhengjun): Need to adapt the allocation logic and
# temporarily allocate according to fixed size

View File

@@ -74,6 +74,8 @@ class GCUMemEfficientAttnBackend(AttentionBackend):
kv_num_heads: int,
num_heads: int,
head_dim: int,
encoder_block_shape_q: int = -1,
decoder_block_shape_q: int = -1,
):
"""
GCUMemEfficientAttnBackend __init__
@@ -92,7 +94,7 @@ class GCUMemEfficientAttnBackend(AttentionBackend):
self.head_dim = head_dim
self.scaling = 1.0 / (self.head_dim**0.5)
self.num_layers = fd_config.model_config.num_hidden_layers
self.position_ids_base = paddle.arange(self.max_seq_len)
self.position_ids_base = np.arange(self.max_seq_len)
# TODO(zhengjun): Need to adapt the allocation logic and
# temporarily allocate according to fixed size

View File

@@ -295,7 +295,7 @@ class GCUModelRunner(ModelRunnerBase):
if self.speculative_method in ["mtp"]:
self.proposer.insert_prefill_inputs(req_dicts)
self.share_inputs["seq_lens_this_time"] = self.seq_lens_this_time_buffer[:num_running_requests]
self.share_inputs["seq_lens_this_time"] = self.seq_lens_this_time_buffer
def _dummy_prefill_inputs(self, num_tokens: int, batch_size: int, expected_decode_len: int):
"""Set dummy prefill inputs to share_inputs"""
@@ -675,7 +675,7 @@ class GCUModelRunner(ModelRunnerBase):
)
self.share_inputs["decoder_batch_ids"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
self.share_inputs["decoder_tile_ids_per_batch"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
self.share_inputs["decoder_num_blocks_cpu"] = paddle.full([1], 0, dtype="int32").pin_memory()
self.share_inputs["decoder_num_blocks_cpu"] = paddle.full([1], 0, dtype="int32").cpu()
self.share_inputs["max_len_tensor_cpu"] = paddle.full([8], 0, dtype="int32").cpu()
# Get the attention backend
@@ -1062,9 +1062,7 @@ class GCUModelRunner(ModelRunnerBase):
self._update_chunked_prefill(model_forward_batch)
self._add_cache(model_forward_batch)
self.seq_lens_this_time_buffer[:num_running_requests].copy_(
self.share_inputs["seq_lens_this_time"][:num_running_requests], False
)
self.seq_lens_this_time_buffer.copy_(self.share_inputs["seq_lens_this_time"], False)
return None
def _add_cache(self, model_forward_batch) -> None:

View File

@@ -1,13 +1,18 @@
#!/bin/bash
#!/usr/bin/env bash
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
echo "$DIR"
echo "Current directory: ${DIR}"
#先kill一遍
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
lsof -t -i :8188 | xargs kill -9 || true
function stop_processes() {
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
lsof -t -i :8188 | xargs kill -9 || true
}
export model_path=${MODEL_PATH}/paddle/ERNIE-4.5-21B-A3B-Paddle
echo "Clean up processes..."
stop_processes
echo "Clean up completed."
export model_path=${MODEL_PATH}/ERNIE-4.5-21B-A3B-Paddle
echo "pip install requirements"
python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
@@ -15,6 +20,7 @@ echo "uninstall org"
python -m pip uninstall paddlepaddle -y
python -m pip uninstall paddle-custom-gcu -y
python -m pip install paddlepaddle==3.1.0a0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
python -m pip install --pre paddle-custom-gcu==3.0.0.dev20250801 -i https://www.paddlepaddle.org.cn/packages/nightly/gcu/
echo "build whl"
bash build.sh 1 || exit 1
@@ -22,12 +28,12 @@ unset http_proxy
unset https_proxy
unset no_proxy
# 起服务
rm -rf log/*
rm -f core*
# pkill -9 python #流水线不执行这个
#清空消息队列
# Empty the message queue
ipcrm --all=msg
echo "Start server..."
python -m fastdeploy.entrypoints.openai.api_server \
--model ${model_path} \
--port 8188 \
@@ -38,21 +44,40 @@ python -m fastdeploy.entrypoints.openai.api_server \
--max-num-seqs 8 \
--quantization wint4 > server.log 2>&1 &
sleep 60
# 探活
TIMEOUT=$((5 * 60))
INTERVAL=10 # 检查间隔(秒)
echo "Waiting 90 seconds..."
sleep 90
if grep -q "Failed to launch worker processes" server.log; then
echo "Failed to launch worker processes..."
stop_processes
cat server.log
cat log/workerlog.0
exit 1
fi
if grep -q "Traceback (most recent call last):" server.log; then
echo "Some errors occurred..."
stop_processes
cat server.log
cat log/workerlog.0
exit 1
fi
# Health check
TIMEOUT=$((11 * 60))
INTERVAL=30 # Check interval (seconds)
ENDPOINT="http://0.0.0.0:8188/health"
START_TIME=$(date +%s) # 记录开始时间戳
echo "开始服务健康检查,最长等待时间:${TIMEOUT}"
START_TIME=$(date +%s) # Record the start timestamp
echo "Start the server health check, maximum waiting time: ${TIMEOUT} seconds..."
while true; do
# 计算已耗时
# Used to calculate the time cost
CURRENT_TIME=$(date +%s)
ELAPSED=$((CURRENT_TIME - START_TIME))
# 超时判断
# Timeout
if [ $ELAPSED -ge $TIMEOUT ]; then
echo -e "\n服务启动超时:经过 $((TIMEOUT/60)) 分钟服务仍未启动!"
echo -e "\nServer start timeout: After $((TIMEOUT/60)) minutes, the service still doesn't start!"
stop_processes
cat server.log
cat log/workerlog.0
exit 1
@@ -61,7 +86,7 @@ while true; do
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 2 "$ENDPOINT" || true)
if [ "$HTTP_CODE" = "200" ]; then
echo -e "\n服务启动成功!耗时 ${ELAPSED}"
echo -e "\nThe server was successfully launched! Totally takes $((ELAPSED+90)) seconds."
break
else
sleep $INTERVAL
@@ -69,18 +94,19 @@ while true; do
done
cat server.log
echo -e "\n"
# 执行服务化推理
echo "Start inference..."
python test/ci_use/GCU/run_ernie.py
exit_code=$?
echo exit_code is ${exit_code}
echo -e "exit_code is ${exit_code}.\n"
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
lsof -t -i :8188 | xargs kill -9 || true
echo "Stop server..."
stop_processes
echo "Stop server done."
if [ ${exit_code} -ne 0 ]; then
echo "log/workerlog.0"
echo "Exit with error, please refer to log/workerlog.0"
cat log/workerlog.0
exit 1
fi

View File

@@ -15,10 +15,9 @@
import openai
ip = "0.0.0.0"
service_http_port = "8188" # 服务配置的
service_http_port = "8188"
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
# 非流式对话
response = client.chat.completions.create(
model="default",
messages=[
@@ -26,7 +25,14 @@ response = client.chat.completions.create(
],
temperature=1,
top_p=0,
max_tokens=64,
max_tokens=256,
stream=False,
)
print(response)
print(f"response is: {response}", flush=True)
generate_context = response.choices[0].message.content
print(f"\ngenerate_context is: {generate_context}", flush=True)
assert "pacific ocean" in generate_context.lower(), "The answer was incorrect!"
print("Test successfully!", flush=True)