mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-10-05 08:37:06 +08:00
update ci cases
This commit is contained in:

committed by
Zhang Yulong

parent
5328daa333
commit
eb77b1be6d
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@@ -60,10 +60,12 @@ jobs:
|
|||||||
runner_name="${{ runner.name }}"
|
runner_name="${{ runner.name }}"
|
||||||
last_char="${runner_name: -1}"
|
last_char="${runner_name: -1}"
|
||||||
|
|
||||||
if [[ "$last_char" =~ [0-3] ]]; then
|
if [ "${last_char}" = "1" ]; then
|
||||||
gpu_id="$last_char"
|
gpu_id=2
|
||||||
|
DEVICES="2,3"
|
||||||
else
|
else
|
||||||
gpu_id="0"
|
gpu_id=0
|
||||||
|
DEVICES="0,1"
|
||||||
fi
|
fi
|
||||||
FD_API_PORT=$((9180 + gpu_id * 100))
|
FD_API_PORT=$((9180 + gpu_id * 100))
|
||||||
FD_ENGINE_QUEUE_PORT=$((9150 + gpu_id * 100))
|
FD_ENGINE_QUEUE_PORT=$((9150 + gpu_id * 100))
|
||||||
@@ -80,7 +82,7 @@ jobs:
|
|||||||
-e "FD_API_PORT=${FD_API_PORT}" \
|
-e "FD_API_PORT=${FD_API_PORT}" \
|
||||||
-e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \
|
-e "FD_ENGINE_QUEUE_PORT=${FD_ENGINE_QUEUE_PORT}" \
|
||||||
-e "FD_METRICS_PORT=${FD_METRICS_PORT}" \
|
-e "FD_METRICS_PORT=${FD_METRICS_PORT}" \
|
||||||
--gpus device=${gpu_id} ${docker_image} /bin/bash -c "
|
--gpus '"device=${DEVICES}"' ${docker_image} /bin/bash -c "
|
||||||
git config --global --add safe.directory /workspace/FastDeploy
|
git config --global --add safe.directory /workspace/FastDeploy
|
||||||
cd FastDeploy
|
cd FastDeploy
|
||||||
bash scripts/run_ci.sh
|
bash scripts/run_ci.sh
|
||||||
|
@@ -5,7 +5,7 @@ echo "$DIR"
|
|||||||
python -m pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
python -m pip config set global.index-url https://mirrors.tuna.tsinghua.edu.cn/pypi/web/simple
|
||||||
python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
python -m pip install --pre paddlepaddle-gpu -i https://www.paddlepaddle.org.cn/packages/nightly/cu126/
|
||||||
python -m pip install -r requirements.txt
|
python -m pip install -r requirements.txt
|
||||||
python -m pip install jsonschema aistudio_sdk==0.2.6
|
python -m pip install jsonschema aistudio_sdk==0.3.5
|
||||||
bash build.sh || exit 1
|
bash build.sh || exit 1
|
||||||
|
|
||||||
failed_files=()
|
failed_files=()
|
||||||
|
1801
test/ci_use/EB_VL_Lite/baseline.txt
Normal file
1801
test/ci_use/EB_VL_Lite/baseline.txt
Normal file
File diff suppressed because it is too large
Load Diff
69
test/ci_use/EB_VL_Lite/rollout_model.py
Normal file
69
test/ci_use/EB_VL_Lite/rollout_model.py
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import difflib
|
||||||
|
|
||||||
|
from paddleformers.trl.llm_utils import init_dist_env
|
||||||
|
|
||||||
|
from fastdeploy.rl.rollout_config import RolloutModelConfig
|
||||||
|
from fastdeploy.rl.rollout_model import RolloutModel
|
||||||
|
|
||||||
|
_, ranks = init_dist_env()
|
||||||
|
|
||||||
|
|
||||||
|
# base result
|
||||||
|
base_path = os.getenv("MODEL_PATH")
|
||||||
|
model_path = os.path.join(base_path, "ernie-4_5-vl-28b-a3b-bf16-paddle")
|
||||||
|
|
||||||
|
# Usage example:
|
||||||
|
init_kwargs = {
|
||||||
|
"model_name_or_path": MODEL_PATH,
|
||||||
|
"max_model_len": 32768,
|
||||||
|
"tensor_parallel_size": ranks,
|
||||||
|
"dynamic_load_weight": True,
|
||||||
|
"load_strategy": "ipc_snapshot",
|
||||||
|
"enable_mm": True,
|
||||||
|
"quantization": "wint8",
|
||||||
|
}
|
||||||
|
|
||||||
|
rollout_config = RolloutModelConfig(**init_kwargs)
|
||||||
|
actor_eval_model = RolloutModel(rollout_config)
|
||||||
|
|
||||||
|
content = ""
|
||||||
|
for k, v in actor_eval_model.state_dict().items():
|
||||||
|
content += f"{k}\n"
|
||||||
|
for k, v in actor_eval_model.get_name_mappings_to_training().items():
|
||||||
|
content += f"{k}:{v}\n"
|
||||||
|
|
||||||
|
# with open("baseline.txt", "w", encoding="utf-8") as f:
|
||||||
|
# f.write(baseline)
|
||||||
|
|
||||||
|
def compare_strings(a: str, b: str) -> bool:
|
||||||
|
if a == b:
|
||||||
|
print("✅ 两个字符串完全一致")
|
||||||
|
return True
|
||||||
|
|
||||||
|
print("❌ 字符串不一致,差异如下(上下文差异显示):")
|
||||||
|
diff = difflib.ndiff(a.splitlines(), b.splitlines())
|
||||||
|
for line in diff:
|
||||||
|
if line.startswith("- ") or line.startswith("+ "):
|
||||||
|
print(line)
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
with open("baseline.txt", "r", encoding="utf-8") as f:
|
||||||
|
baseline = f.read()
|
||||||
|
assert compare_strings(baseline, content), "In the unittest of RL scenario, your modification " \
|
||||||
|
"caused inconsistency in the content before and after. Please fix it. " \
|
||||||
|
"Can request assistance from yuanlehome or gzy19990617 (github id)."
|
@@ -88,7 +88,7 @@ def setup_and_run_server():
|
|||||||
sys.executable, "-m", "fastdeploy.entrypoints.openai.api_server",
|
sys.executable, "-m", "fastdeploy.entrypoints.openai.api_server",
|
||||||
"--model", model_path,
|
"--model", model_path,
|
||||||
"--port", str(FD_API_PORT),
|
"--port", str(FD_API_PORT),
|
||||||
"--tensor-parallel-size", "1",
|
"--tensor-parallel-size", "2",
|
||||||
"--engine-worker-queue-port", str(FD_ENGINE_QUEUE_PORT),
|
"--engine-worker-queue-port", str(FD_ENGINE_QUEUE_PORT),
|
||||||
"--metrics-port", str(FD_METRICS_PORT),
|
"--metrics-port", str(FD_METRICS_PORT),
|
||||||
"--enable-mm",
|
"--enable-mm",
|
||||||
@@ -197,9 +197,9 @@ def test_consistency_between_runs(api_url, headers, consistent_payload):
|
|||||||
# base result
|
# base result
|
||||||
base_path = os.getenv("MODEL_PATH")
|
base_path = os.getenv("MODEL_PATH")
|
||||||
if base_path:
|
if base_path:
|
||||||
base_file = os.path.join(base_path, "ernie-4_5-vl-base")
|
base_file = os.path.join(base_path, "ernie-4_5-vl-base-tp2")
|
||||||
else:
|
else:
|
||||||
base_file = "ernie-4_5-vl-base"
|
base_file = "ernie-4_5-vl-base-tp2"
|
||||||
with open(base_file, "r") as f:
|
with open(base_file, "r") as f:
|
||||||
content2 = f.read()
|
content2 = f.read()
|
||||||
|
|
||||||
|
59
test/ci_use/EB_VL_Lite/test_rollout_model.py
Normal file
59
test/ci_use/EB_VL_Lite/test_rollout_model.py
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
def test_rollout_model_with_distributed_launch():
|
||||||
|
"""
|
||||||
|
test_rollout_model
|
||||||
|
"""
|
||||||
|
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||||
|
|
||||||
|
rollout_script = os.path.join(current_dir, "rollout_model.py")
|
||||||
|
|
||||||
|
command = [
|
||||||
|
sys.executable,
|
||||||
|
"-m", "paddle.distributed.launch",
|
||||||
|
"--gpus", "0,1",
|
||||||
|
rollout_script
|
||||||
|
]
|
||||||
|
|
||||||
|
print(f"Executing command: {' '.join(command)}")
|
||||||
|
|
||||||
|
process = subprocess.Popen(
|
||||||
|
command,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
stdout, stderr = process.communicate(timeout=300)
|
||||||
|
return_code = process.returncode
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
process.kill()
|
||||||
|
stdout, stderr = process.communicate()
|
||||||
|
return_code = -1
|
||||||
|
|
||||||
|
print("\n" + "=" * 50 + " STDOUT " + "=" * 50)
|
||||||
|
print(stdout)
|
||||||
|
print("\n" + "=" * 50 + " STDERR " + "=" * 50)
|
||||||
|
print(stderr)
|
||||||
|
|
||||||
|
assert return_code == 0, f"Process exited with code {return_code}"
|
Reference in New Issue
Block a user