Files
FastDeploy/examples/splitwise/start_v0_tp2.sh
Juncai 36822fa49c [PD Disaggregation] remove splitwise deployment on single node and refine the code (#4891)
* remove splitwise deployment on single node and refine the code

* up

* up

* up

* add test

* up
2025-11-14 09:56:53 +08:00

112 lines
2.9 KiB
Bash

#!/bin/bash
set -e
# Test splitwise deployment
# There are two methods for splitwise deployment:
# v0: using splitwise_scheduler or dp_scheduler
# v1: using local_scheduler + router
wait_for_health() {
local server_port=$1
while true; do
status_code=$(curl -s -o /dev/null -w "%{http_code}" "http://0.0.0.0:${server_port}/health" || echo "000")
if [ "$status_code" -eq 200 ]; then
break
else
echo "Service not ready. Retrying in 2s..."
sleep 2
fi
done
}
# prepare environment
MODEL_NAME="PaddlePaddle/ERNIE-4.5-0.3B-Paddle"
export FD_DEBUG=1
export ENABLE_V1_KVCACHE_SCHEDULER=0
export KVCACHE_GDRCOPY_FLUSH_ENABLE=1
SCRIPT_PATH=$(readlink -f "$0")
SCRIPT_DIR=$(dirname "$SCRIPT_PATH")
export $(bash ${SCRIPT_DIR}/../../scripts/get_rdma_nics.sh gpu)
echo "KVCACHE_RDMA_NICS:${KVCACHE_RDMA_NICS}"
if [ -z "${KVCACHE_RDMA_NICS}" ]; then
echo "KVCACHE_RDMA_NICS is empty, please check the output of get_rdma_nics.sh"
exit 1
fi
unset http_proxy && unset https_proxy
rm -rf log_*
# start redis
if ! redis-cli ping &>/dev/null; then
echo "Redis is not running. Starting redis-server..."
redis-server --daemonize yes
sleep 1
else
echo "Redis is already running."
fi
sleep 1
# start prefill
export CUDA_VISIBLE_DEVICES=0,1
export FD_LOG_DIR="log_prefill"
mkdir -p ${FD_LOG_DIR}
nohup python -m fastdeploy.entrypoints.openai.api_server \
--model ${MODEL_NAME} \
--port 8100 \
--metrics-port 8101 \
--engine-worker-queue-port 8102 \
--cache-queue-port 8103 \
--max-model-len 32768 \
--tensor-parallel-size 2 \
--splitwise-role "prefill" \
--cache-transfer-protocol "rdma,ipc" \
--pd-comm-port 8104 \
--rdma-comm-ports 8105,8106 \
--scheduler-name "splitwise" \
--scheduler-host "127.0.0.1" \
--scheduler-port 6379 \
--scheduler-ttl 9000 \
2>&1 >${FD_LOG_DIR}/nohup &
# wait_for_health 8100
# start decode
export CUDA_VISIBLE_DEVICES=2,3
export FD_LOG_DIR="log_decode"
mkdir -p ${FD_LOG_DIR}
nohup python -m fastdeploy.entrypoints.openai.api_server \
--model ${MODEL_NAME} \
--port 9000 \
--metrics-port 9001 \
--engine-worker-queue-port 9002 \
--cache-queue-port 9003 \
--max-model-len 32768 \
--tensor-parallel-size 2 \
--splitwise-role "decode" \
--cache-transfer-protocol "rdma,ipc" \
--pd-comm-port 9004 \
--rdma-comm-ports 9005,9006 \
--scheduler-name "splitwise" \
--scheduler-host "127.0.0.1" \
--scheduler-port 6379 \
--scheduler-ttl 9000 \
2>&1 >${FD_LOG_DIR}/nohup &
wait_for_health 9000
# send request
sleep 10 # make sure server is registered to router
port=9000
curl -X POST "http://0.0.0.0:${port}/v1/chat/completions" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{"role": "user", "content": "hello"}
],
"max_tokens": 20,
"stream": true
}'