mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-04 16:22:57 +08:00
[GCU] Enable gcu CI (#3190)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled
* [GCU] Update to the latest version * [GCU] Enable CI
This commit is contained in:
22
.github/workflows/ci_gcu.yml
vendored
22
.github/workflows/ci_gcu.yml
vendored
@@ -29,7 +29,9 @@ jobs:
|
|||||||
REPO_NAME="${FULL_REPO##*/}"
|
REPO_NAME="${FULL_REPO##*/}"
|
||||||
BASE_BRANCH="${{ github.base_ref }}"
|
BASE_BRANCH="${{ github.base_ref }}"
|
||||||
# Clean the repository directory before starting
|
# Clean the repository directory before starting
|
||||||
docker run --rm --net=host -v $(pwd):/workspace -w /workspace \
|
docker run --rm --net=host -v $(pwd):/workspace \
|
||||||
|
-v ${{ github.workspace }}/../../..:${{ github.workspace }}/../../.. \
|
||||||
|
-w /workspace \
|
||||||
-e "REPO_NAME=${REPO_NAME}" \
|
-e "REPO_NAME=${REPO_NAME}" \
|
||||||
-e "BASE_BRANCH=${BASE_BRANCH}" \
|
-e "BASE_BRANCH=${BASE_BRANCH}" \
|
||||||
${docker_image} /bin/bash -c '
|
${docker_image} /bin/bash -c '
|
||||||
@@ -40,6 +42,7 @@ jobs:
|
|||||||
'
|
'
|
||||||
git config --global user.name "FastDeployCI"
|
git config --global user.name "FastDeployCI"
|
||||||
git config --global user.email "fastdeploy_ci@example.com"
|
git config --global user.email "fastdeploy_ci@example.com"
|
||||||
|
source ${{ github.workspace }}/../../../proxy
|
||||||
git clone ${REPO} ${REPO_NAME} -b ${BASE_BRANCH}
|
git clone ${REPO} ${REPO_NAME} -b ${BASE_BRANCH}
|
||||||
cd FastDeploy
|
cd FastDeploy
|
||||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||||
@@ -50,6 +53,9 @@ jobs:
|
|||||||
git checkout ${{ github.sha }}
|
git checkout ${{ github.sha }}
|
||||||
git log -n 3 --oneline
|
git log -n 3 --oneline
|
||||||
fi
|
fi
|
||||||
|
echo "Copy models..."
|
||||||
|
sudo mkdir -p ci_models && sudo cp -r /work/deps/ERNIE-4.5-21B-A3B-Paddle ci_models
|
||||||
|
echo "Copy models done."
|
||||||
|
|
||||||
- name: Run CI unittest
|
- name: Run CI unittest
|
||||||
env:
|
env:
|
||||||
@@ -71,13 +77,15 @@ jobs:
|
|||||||
echo "PARENT_DIR:$PARENT_DIR"
|
echo "PARENT_DIR:$PARENT_DIR"
|
||||||
echo "Install drivers..."
|
echo "Install drivers..."
|
||||||
cd /work/deps
|
cd /work/deps
|
||||||
bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y
|
sudo bash TopsRider_i3x_*_deb_amd64.run --driver --no-auto-load -y
|
||||||
cd -
|
cd -
|
||||||
docker run --rm --network=host --ipc=host -it --privileged \
|
echo "Create docker..."
|
||||||
-v $(pwd):/workspace -w /workspace \
|
docker run --rm --network=host --ipc=host --privileged \
|
||||||
-v "/home:/home" \
|
-v $(pwd):/workspace \
|
||||||
-v "/work:/work" \
|
-v /home:/home \
|
||||||
-e "MODEL_PATH=/work/models" \
|
-v /work:/work \
|
||||||
|
-w /workspace \
|
||||||
|
-e "MODEL_PATH=./ci_models" \
|
||||||
-e "http_proxy=$(git config --global --get http.proxy)" \
|
-e "http_proxy=$(git config --global --get http.proxy)" \
|
||||||
-e "https_proxy=$(git config --global --get https.proxy)" \
|
-e "https_proxy=$(git config --global --get https.proxy)" \
|
||||||
-e "FD_API_PORT=${FD_API_PORT}" \
|
-e "FD_API_PORT=${FD_API_PORT}" \
|
||||||
|
@@ -76,6 +76,8 @@ class GCUFlashAttnBackend(AttentionBackend):
|
|||||||
kv_num_heads: int,
|
kv_num_heads: int,
|
||||||
num_heads: int,
|
num_heads: int,
|
||||||
head_dim: int,
|
head_dim: int,
|
||||||
|
encoder_block_shape_q: int = -1,
|
||||||
|
decoder_block_shape_q: int = -1,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
GCUFlashAttnBackend __init__
|
GCUFlashAttnBackend __init__
|
||||||
@@ -94,7 +96,7 @@ class GCUFlashAttnBackend(AttentionBackend):
|
|||||||
self.head_dim = head_dim
|
self.head_dim = head_dim
|
||||||
self.scaling = 1.0 / (self.head_dim**0.5)
|
self.scaling = 1.0 / (self.head_dim**0.5)
|
||||||
self.num_layers = fd_config.model_config.num_hidden_layers
|
self.num_layers = fd_config.model_config.num_hidden_layers
|
||||||
self.position_ids_base = paddle.arange(self.max_seq_len)
|
self.position_ids_base = np.arange(self.max_seq_len)
|
||||||
|
|
||||||
# TODO(zhengjun): Need to adapt the allocation logic and
|
# TODO(zhengjun): Need to adapt the allocation logic and
|
||||||
# temporarily allocate according to fixed size
|
# temporarily allocate according to fixed size
|
||||||
|
@@ -74,6 +74,8 @@ class GCUMemEfficientAttnBackend(AttentionBackend):
|
|||||||
kv_num_heads: int,
|
kv_num_heads: int,
|
||||||
num_heads: int,
|
num_heads: int,
|
||||||
head_dim: int,
|
head_dim: int,
|
||||||
|
encoder_block_shape_q: int = -1,
|
||||||
|
decoder_block_shape_q: int = -1,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
GCUMemEfficientAttnBackend __init__
|
GCUMemEfficientAttnBackend __init__
|
||||||
@@ -92,7 +94,7 @@ class GCUMemEfficientAttnBackend(AttentionBackend):
|
|||||||
self.head_dim = head_dim
|
self.head_dim = head_dim
|
||||||
self.scaling = 1.0 / (self.head_dim**0.5)
|
self.scaling = 1.0 / (self.head_dim**0.5)
|
||||||
self.num_layers = fd_config.model_config.num_hidden_layers
|
self.num_layers = fd_config.model_config.num_hidden_layers
|
||||||
self.position_ids_base = paddle.arange(self.max_seq_len)
|
self.position_ids_base = np.arange(self.max_seq_len)
|
||||||
|
|
||||||
# TODO(zhengjun): Need to adapt the allocation logic and
|
# TODO(zhengjun): Need to adapt the allocation logic and
|
||||||
# temporarily allocate according to fixed size
|
# temporarily allocate according to fixed size
|
||||||
|
@@ -295,7 +295,7 @@ class GCUModelRunner(ModelRunnerBase):
|
|||||||
|
|
||||||
if self.speculative_method in ["mtp"]:
|
if self.speculative_method in ["mtp"]:
|
||||||
self.proposer.insert_prefill_inputs(req_dicts)
|
self.proposer.insert_prefill_inputs(req_dicts)
|
||||||
self.share_inputs["seq_lens_this_time"] = self.seq_lens_this_time_buffer[:num_running_requests]
|
self.share_inputs["seq_lens_this_time"] = self.seq_lens_this_time_buffer
|
||||||
|
|
||||||
def _dummy_prefill_inputs(self, num_tokens: int, batch_size: int, expected_decode_len: int):
|
def _dummy_prefill_inputs(self, num_tokens: int, batch_size: int, expected_decode_len: int):
|
||||||
"""Set dummy prefill inputs to share_inputs"""
|
"""Set dummy prefill inputs to share_inputs"""
|
||||||
@@ -675,7 +675,7 @@ class GCUModelRunner(ModelRunnerBase):
|
|||||||
)
|
)
|
||||||
self.share_inputs["decoder_batch_ids"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
|
self.share_inputs["decoder_batch_ids"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
|
||||||
self.share_inputs["decoder_tile_ids_per_batch"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
|
self.share_inputs["decoder_tile_ids_per_batch"] = paddle.full([int(decode_max_tile_size)], 0, dtype="int32")
|
||||||
self.share_inputs["decoder_num_blocks_cpu"] = paddle.full([1], 0, dtype="int32").pin_memory()
|
self.share_inputs["decoder_num_blocks_cpu"] = paddle.full([1], 0, dtype="int32").cpu()
|
||||||
self.share_inputs["max_len_tensor_cpu"] = paddle.full([8], 0, dtype="int32").cpu()
|
self.share_inputs["max_len_tensor_cpu"] = paddle.full([8], 0, dtype="int32").cpu()
|
||||||
|
|
||||||
# Get the attention backend
|
# Get the attention backend
|
||||||
@@ -1062,9 +1062,7 @@ class GCUModelRunner(ModelRunnerBase):
|
|||||||
|
|
||||||
self._update_chunked_prefill(model_forward_batch)
|
self._update_chunked_prefill(model_forward_batch)
|
||||||
self._add_cache(model_forward_batch)
|
self._add_cache(model_forward_batch)
|
||||||
self.seq_lens_this_time_buffer[:num_running_requests].copy_(
|
self.seq_lens_this_time_buffer.copy_(self.share_inputs["seq_lens_this_time"], False)
|
||||||
self.share_inputs["seq_lens_this_time"][:num_running_requests], False
|
|
||||||
)
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _add_cache(self, model_forward_batch) -> None:
|
def _add_cache(self, model_forward_batch) -> None:
|
||||||
|
@@ -1,13 +1,18 @@
|
|||||||
#!/bin/bash
|
#!/usr/bin/env bash
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||||
echo "$DIR"
|
echo "Current directory: ${DIR}"
|
||||||
|
|
||||||
#先kill一遍
|
function stop_processes() {
|
||||||
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
||||||
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
||||||
lsof -t -i :8188 | xargs kill -9 || true
|
lsof -t -i :8188 | xargs kill -9 || true
|
||||||
|
}
|
||||||
|
|
||||||
export model_path=${MODEL_PATH}/paddle/ERNIE-4.5-21B-A3B-Paddle
|
echo "Clean up processes..."
|
||||||
|
stop_processes
|
||||||
|
echo "Clean up completed."
|
||||||
|
|
||||||
|
export model_path=${MODEL_PATH}/ERNIE-4.5-21B-A3B-Paddle
|
||||||
|
|
||||||
echo "pip install requirements"
|
echo "pip install requirements"
|
||||||
python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
python -m pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
||||||
@@ -15,6 +20,7 @@ echo "uninstall org"
|
|||||||
python -m pip uninstall paddlepaddle -y
|
python -m pip uninstall paddlepaddle -y
|
||||||
python -m pip uninstall paddle-custom-gcu -y
|
python -m pip uninstall paddle-custom-gcu -y
|
||||||
python -m pip install paddlepaddle==3.1.0a0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
|
python -m pip install paddlepaddle==3.1.0a0 -i https://www.paddlepaddle.org.cn/packages/stable/cpu/
|
||||||
|
python -m pip install --pre paddle-custom-gcu==3.0.0.dev20250801 -i https://www.paddlepaddle.org.cn/packages/nightly/gcu/
|
||||||
echo "build whl"
|
echo "build whl"
|
||||||
bash build.sh 1 || exit 1
|
bash build.sh 1 || exit 1
|
||||||
|
|
||||||
@@ -22,12 +28,12 @@ unset http_proxy
|
|||||||
unset https_proxy
|
unset https_proxy
|
||||||
unset no_proxy
|
unset no_proxy
|
||||||
|
|
||||||
# 起服务
|
|
||||||
rm -rf log/*
|
rm -rf log/*
|
||||||
rm -f core*
|
rm -f core*
|
||||||
# pkill -9 python #流水线不执行这个
|
|
||||||
#清空消息队列
|
# Empty the message queue
|
||||||
ipcrm --all=msg
|
ipcrm --all=msg
|
||||||
|
echo "Start server..."
|
||||||
python -m fastdeploy.entrypoints.openai.api_server \
|
python -m fastdeploy.entrypoints.openai.api_server \
|
||||||
--model ${model_path} \
|
--model ${model_path} \
|
||||||
--port 8188 \
|
--port 8188 \
|
||||||
@@ -38,21 +44,40 @@ python -m fastdeploy.entrypoints.openai.api_server \
|
|||||||
--max-num-seqs 8 \
|
--max-num-seqs 8 \
|
||||||
--quantization wint4 > server.log 2>&1 &
|
--quantization wint4 > server.log 2>&1 &
|
||||||
|
|
||||||
sleep 60
|
echo "Waiting 90 seconds..."
|
||||||
# 探活
|
sleep 90
|
||||||
TIMEOUT=$((5 * 60))
|
|
||||||
INTERVAL=10 # 检查间隔(秒)
|
if grep -q "Failed to launch worker processes" server.log; then
|
||||||
|
echo "Failed to launch worker processes..."
|
||||||
|
stop_processes
|
||||||
|
cat server.log
|
||||||
|
cat log/workerlog.0
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if grep -q "Traceback (most recent call last):" server.log; then
|
||||||
|
echo "Some errors occurred..."
|
||||||
|
stop_processes
|
||||||
|
cat server.log
|
||||||
|
cat log/workerlog.0
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Health check
|
||||||
|
TIMEOUT=$((11 * 60))
|
||||||
|
INTERVAL=30 # Check interval (seconds)
|
||||||
ENDPOINT="http://0.0.0.0:8188/health"
|
ENDPOINT="http://0.0.0.0:8188/health"
|
||||||
START_TIME=$(date +%s) # 记录开始时间戳
|
START_TIME=$(date +%s) # Record the start timestamp
|
||||||
echo "开始服务健康检查,最长等待时间:${TIMEOUT}秒"
|
echo "Start the server health check, maximum waiting time: ${TIMEOUT} seconds..."
|
||||||
while true; do
|
while true; do
|
||||||
# 计算已耗时
|
# Used to calculate the time cost
|
||||||
CURRENT_TIME=$(date +%s)
|
CURRENT_TIME=$(date +%s)
|
||||||
ELAPSED=$((CURRENT_TIME - START_TIME))
|
ELAPSED=$((CURRENT_TIME - START_TIME))
|
||||||
|
|
||||||
# 超时判断
|
# Timeout
|
||||||
if [ $ELAPSED -ge $TIMEOUT ]; then
|
if [ $ELAPSED -ge $TIMEOUT ]; then
|
||||||
echo -e "\n服务启动超时:经过 $((TIMEOUT/60)) 分钟服务仍未启动!"
|
echo -e "\nServer start timeout: After $((TIMEOUT/60)) minutes, the service still doesn't start!"
|
||||||
|
stop_processes
|
||||||
cat server.log
|
cat server.log
|
||||||
cat log/workerlog.0
|
cat log/workerlog.0
|
||||||
exit 1
|
exit 1
|
||||||
@@ -61,7 +86,7 @@ while true; do
|
|||||||
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 2 "$ENDPOINT" || true)
|
HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" -m 2 "$ENDPOINT" || true)
|
||||||
|
|
||||||
if [ "$HTTP_CODE" = "200" ]; then
|
if [ "$HTTP_CODE" = "200" ]; then
|
||||||
echo -e "\n服务启动成功!耗时 ${ELAPSED} 秒"
|
echo -e "\nThe server was successfully launched! Totally takes $((ELAPSED+90)) seconds."
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
sleep $INTERVAL
|
sleep $INTERVAL
|
||||||
@@ -69,18 +94,19 @@ while true; do
|
|||||||
done
|
done
|
||||||
|
|
||||||
cat server.log
|
cat server.log
|
||||||
|
echo -e "\n"
|
||||||
|
|
||||||
# 执行服务化推理
|
echo "Start inference..."
|
||||||
python test/ci_use/GCU/run_ernie.py
|
python test/ci_use/GCU/run_ernie.py
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
echo exit_code is ${exit_code}
|
echo -e "exit_code is ${exit_code}.\n"
|
||||||
|
|
||||||
ps -efww | grep -E 'api_server' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
echo "Stop server..."
|
||||||
ps -efww | grep -E '8188' | grep -v grep | awk '{print $2}' | xargs kill -9 || true
|
stop_processes
|
||||||
lsof -t -i :8188 | xargs kill -9 || true
|
echo "Stop server done."
|
||||||
|
|
||||||
if [ ${exit_code} -ne 0 ]; then
|
if [ ${exit_code} -ne 0 ]; then
|
||||||
echo "log/workerlog.0"
|
echo "Exit with error, please refer to log/workerlog.0"
|
||||||
cat log/workerlog.0
|
cat log/workerlog.0
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
@@ -15,10 +15,9 @@
|
|||||||
import openai
|
import openai
|
||||||
|
|
||||||
ip = "0.0.0.0"
|
ip = "0.0.0.0"
|
||||||
service_http_port = "8188" # 服务配置的
|
service_http_port = "8188"
|
||||||
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
|
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
|
||||||
|
|
||||||
# 非流式对话
|
|
||||||
response = client.chat.completions.create(
|
response = client.chat.completions.create(
|
||||||
model="default",
|
model="default",
|
||||||
messages=[
|
messages=[
|
||||||
@@ -26,7 +25,14 @@ response = client.chat.completions.create(
|
|||||||
],
|
],
|
||||||
temperature=1,
|
temperature=1,
|
||||||
top_p=0,
|
top_p=0,
|
||||||
max_tokens=64,
|
max_tokens=256,
|
||||||
stream=False,
|
stream=False,
|
||||||
)
|
)
|
||||||
print(response)
|
print(f"response is: {response}", flush=True)
|
||||||
|
|
||||||
|
generate_context = response.choices[0].message.content
|
||||||
|
print(f"\ngenerate_context is: {generate_context}", flush=True)
|
||||||
|
|
||||||
|
assert "pacific ocean" in generate_context.lower(), "The answer was incorrect!"
|
||||||
|
|
||||||
|
print("Test successfully!", flush=True)
|
||||||
|
Reference in New Issue
Block a user