From 007ee71208edc1a3b7738be1cc436e5511090b45 Mon Sep 17 00:00:00 2001 From: Lucas Date: Tue, 4 Nov 2025 15:06:19 +0800 Subject: [PATCH] [XPU] add deploy doc for PaddleOCR-VL in XPU (#4784) --- docs/usage/kunlunxin_xpu_deployment.md | 76 +++++++++++++++++++++- docs/zh/usage/kunlunxin_xpu_deployment.md | 78 +++++++++++++++++++++-- 2 files changed, 147 insertions(+), 7 deletions(-) diff --git a/docs/usage/kunlunxin_xpu_deployment.md b/docs/usage/kunlunxin_xpu_deployment.md index 9c937022e..ce784de8b 100644 --- a/docs/usage/kunlunxin_xpu_deployment.md +++ b/docs/usage/kunlunxin_xpu_deployment.md @@ -20,6 +20,7 @@ |ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "W4A8" \
--gpu-memory-utilization 0.9 \
--load-choices "default"|2.3.0| |ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--load-choices "default"|2.3.0| |ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--load-choices "default"|2.3.0| +|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256|2.3.0| ## Quick start @@ -162,8 +163,77 @@ for chunk in response: if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant': reasoning_content = get_str(chunk.choices[0].delta.reasoning_content) content = get_str(chunk.choices[0].delta.content) - is_reason = "[answer]" if reasoning_content == '' else "[think]" - is_reason = "" - print(reasoning_content+content+is_reason, end='', flush=True) + print(reasoning_content + content + is_reason, end='', flush=True) +print('\n') +``` + +### Deploy online serving based on PaddleOCR-VL-0.9B + +#### Start service + +Deploy the PaddleOCR-VL-0.9B model with BF16 precision and 16K context length on 1 XPU + +```bash +export FD_ENABLE_MAX_PREFILL=1 +export XPU_VISIBLE_DEVICES="0" # Specify any card +python -m fastdeploy.entrypoints.openai.api_server \ + --model PaddlePaddle/PaddleOCR-VL \ + --port 8188 \ + --metrics-port 8181 \ + --engine-worker-queue-port 8182 \ + --max-model-len 16384 \ + --max-num-batched-tokens 16384 \ + --gpu-memory-utilization 0.8 \ + --max-num-seqs 256 +``` + +#### Send requests + +```bash +curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + {"role": "user", "content": [ + {"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}}, + {"type": "text", "text": "OCR:"} + ]} + ], + "metadata": {"enable_thinking": false} +}' +``` + +```python +import openai + +ip = "0.0.0.0" +service_http_port = "8188" +client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY") + +response = client.chat.completions.create( + model="default", + messages=[ + {"role": "user", "content": [ + {"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}}, + {"type": "text", "text": "OCR:"} + ] + }, + ], + temperature=0.0001, + max_tokens=4096, + stream=True, + top_p=0, + metadata={"enable_thinking": False}, +) + +def get_str(content_raw): + content_str = str(content_raw) if content_raw is not None else '' + return content_str + +for chunk in response: + if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant': + reasoning_content = get_str(chunk.choices[0].delta.reasoning_content) + content = get_str(chunk.choices[0].delta.content) + print(reasoning_content + content, end='', flush=True) print('\n') ``` diff --git a/docs/zh/usage/kunlunxin_xpu_deployment.md b/docs/zh/usage/kunlunxin_xpu_deployment.md index feeb96fba..af35b1364 100644 --- a/docs/zh/usage/kunlunxin_xpu_deployment.md +++ b/docs/zh/usage/kunlunxin_xpu_deployment.md @@ -20,6 +20,7 @@ |ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \
--port 8188 \
--tensor-parallel-size 4 \
--max-model-len 32768 \
--max-num-seqs 64 \
--quantization "W4A8" \
--gpu-memory-utilization 0.9 \
--load-choices "default"|2.3.0| |ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0"# 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \
--port 8188 \
--tensor-parallel-size 1 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--load-choices "default"|2.3.0| |ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7"
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \
--port 8188 \
--tensor-parallel-size 8 \
--quantization "wint8" \
--max-model-len 32768 \
--max-num-seqs 10 \
--enable-mm \
--mm-processor-kwargs '{"video_max_frames": 30}' \
--limit-mm-per-prompt '{"image": 10, "video": 3}' \
--reasoning-parser ernie-45-vl \
--load-choices "default"|2.3.0| +|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256|2.3.0| ## 快速开始 @@ -117,7 +118,7 @@ python -m fastdeploy.entrypoints.openai.api_server \ --load-choices "default" ``` -#### Send requests +#### 请求服务 ```bash curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \ @@ -164,8 +165,77 @@ for chunk in response: if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant': reasoning_content = get_str(chunk.choices[0].delta.reasoning_content) content = get_str(chunk.choices[0].delta.content) - is_reason = "[answer]" if reasoning_content == '' else "[think]" - is_reason = "" - print(reasoning_content+content+is_reason, end='', flush=True) + print(reasoning_content + content, end='', flush=True) +print('\n') +``` + +### 基于PaddleOCR-VL-0.9B模型部署在线服务 + +#### 启动服务 + +基于 BF16 精度和 16K 上下文部署 PaddleOCR-VL-0.9B 模型到 单卡 P800 服务器 + +```bash +export FD_ENABLE_MAX_PREFILL=1 +export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡 +python -m fastdeploy.entrypoints.openai.api_server \ + --model PaddlePaddle/PaddleOCR-VL \ + --port 8188 \ + --metrics-port 8181 \ + --engine-worker-queue-port 8182 \ + --max-model-len 16384 \ + --max-num-batched-tokens 16384 \ + --gpu-memory-utilization 0.8 \ + --max-num-seqs 256 +``` + +#### 请求服务 + +```bash +curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \ +-H "Content-Type: application/json" \ +-d '{ + "messages": [ + {"role": "user", "content": [ + {"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}}, + {"type": "text", "text": "OCR:"} + ]} + ], + "metadata": {"enable_thinking": false} +}' +``` + +```python +import openai + +ip = "0.0.0.0" +service_http_port = "8188" +client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY") + +response = client.chat.completions.create( + model="default", + messages=[ + {"role": "user", "content": [ + {"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}}, + {"type": "text", "text": "OCR:"} + ] + }, + ], + temperature=0.0001, + max_tokens=4096, + stream=True, + top_p=0, + metadata={"enable_thinking": False}, +) + +def get_str(content_raw): + content_str = str(content_raw) if content_raw is not None else '' + return content_str + +for chunk in response: + if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant': + reasoning_content = get_str(chunk.choices[0].delta.reasoning_content) + content = get_str(chunk.choices[0].delta.content) + print(reasoning_content + content, end='', flush=True) print('\n') ```