[XPU] add deploy doc for PaddleOCR-VL in XPU (#4784)

This commit is contained in:
Lucas
2025-11-04 15:06:19 +08:00
committed by GitHub
parent bffa08b74b
commit 007ee71208
2 changed files with 147 additions and 7 deletions

View File

@@ -20,6 +20,7 @@
|ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"<br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 4 \ <br> --max-model-len 32768 \ <br> --max-num-seqs 64 \ <br> --quantization "W4A8" \ <br> --gpu-memory-utilization 0.9 \ <br> --load-choices "default"|2.3.0|
|ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0" # Specify any card<br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 1 \ <br> --quantization "wint8" \ <br> --max-model-len 32768 \ <br> --max-num-seqs 10 \ <br> --enable-mm \ <br> --mm-processor-kwargs '{"video_max_frames": 30}' \ <br> --limit-mm-per-prompt '{"image": 10, "video": 3}' \ <br> --reasoning-parser ernie-45-vl \ <br> --load-choices "default"|2.3.0|
|ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" <br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 8 \ <br> --quantization "wint8" \ <br> --max-model-len 32768 \ <br> --max-num-seqs 10 \ <br> --enable-mm \ <br> --mm-processor-kwargs '{"video_max_frames": 30}' \ <br> --limit-mm-per-prompt '{"image": 10, "video": 3}' \ <br> --reasoning-parser ernie-45-vl \ <br> --load-choices "default"|2.3.0|
|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1 <br>export XPU_VISIBLE_DEVICES="0" # Specify any card <br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/PaddleOCR-VL \ <br> --port 8188 \ <br> --metrics-port 8181 \ <br> --engine-worker-queue-port 8182 \ <br> --max-model-len 16384 \ <br> --max-num-batched-tokens 16384 \ <br> --gpu-memory-utilization 0.8 \ <br> --max-num-seqs 256|2.3.0|
## Quick start
@@ -162,8 +163,77 @@ for chunk in response:
if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant':
reasoning_content = get_str(chunk.choices[0].delta.reasoning_content)
content = get_str(chunk.choices[0].delta.content)
is_reason = "[answer]" if reasoning_content == '' else "[think]"
is_reason = ""
print(reasoning_content+content+is_reason, end='', flush=True)
print(reasoning_content + content + is_reason, end='', flush=True)
print('\n')
```
### Deploy online serving based on PaddleOCR-VL-0.9B
#### Start service
Deploy the PaddleOCR-VL-0.9B model with BF16 precision and 16K context length on 1 XPU
```bash
export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # Specify any card
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256
```
#### Send requests
```bash
curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}},
{"type": "text", "text": "OCR:"}
]}
],
"metadata": {"enable_thinking": false}
}'
```
```python
import openai
ip = "0.0.0.0"
service_http_port = "8188"
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
response = client.chat.completions.create(
model="default",
messages=[
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}},
{"type": "text", "text": "OCR:"}
]
},
],
temperature=0.0001,
max_tokens=4096,
stream=True,
top_p=0,
metadata={"enable_thinking": False},
)
def get_str(content_raw):
content_str = str(content_raw) if content_raw is not None else ''
return content_str
for chunk in response:
if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant':
reasoning_content = get_str(chunk.choices[0].delta.reasoning_content)
content = get_str(chunk.choices[0].delta.content)
print(reasoning_content + content, end='', flush=True)
print('\n')
```

View File

@@ -20,6 +20,7 @@
|ERNIE-4.5-300B-A47B-W4A8C8-TP4|32K|W4A8|4|export XPU_VISIBLE_DEVICES="0,1,2,3" or "4,5,6,7"<br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-300B-A47B-W4A8C8-TP4-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 4 \ <br> --max-model-len 32768 \ <br> --max-num-seqs 64 \ <br> --quantization "W4A8" \ <br> --gpu-memory-utilization 0.9 \ <br> --load-choices "default"|2.3.0|
|ERNIE-4.5-VL-28B-A3B|32K|WINT8|1|export XPU_VISIBLE_DEVICES="0"# 指定任意一张卡<br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-VL-28B-A3B-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 1 \ <br> --quantization "wint8" \ <br> --max-model-len 32768 \ <br> --max-num-seqs 10 \ <br> --enable-mm \ <br> --mm-processor-kwargs '{"video_max_frames": 30}' \ <br> --limit-mm-per-prompt '{"image": 10, "video": 3}' \ <br> --reasoning-parser ernie-45-vl \ <br> --load-choices "default"|2.3.0|
|ERNIE-4.5-VL-424B-A47B|32K|WINT8|8|export XPU_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" <br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/ERNIE-4.5-VL-424B-A47B-Paddle \ <br> --port 8188 \ <br> --tensor-parallel-size 8 \ <br> --quantization "wint8" \ <br> --max-model-len 32768 \ <br> --max-num-seqs 10 \ <br> --enable-mm \ <br> --mm-processor-kwargs '{"video_max_frames": 30}' \ <br> --limit-mm-per-prompt '{"image": 10, "video": 3}' \ <br> --reasoning-parser ernie-45-vl \ <br> --load-choices "default"|2.3.0|
|PaddleOCR-VL-0.9B|32K|BF16|1|export FD_ENABLE_MAX_PREFILL=1 <br>export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡 <br>python -m fastdeploy.entrypoints.openai.api_server \ <br> --model PaddlePaddle/PaddleOCR-VL \ <br> --port 8188 \ <br> --metrics-port 8181 \ <br> --engine-worker-queue-port 8182 \ <br> --max-model-len 16384 \ <br> --max-num-batched-tokens 16384 \ <br> --gpu-memory-utilization 0.8 \ <br> --max-num-seqs 256|2.3.0|
## 快速开始
@@ -117,7 +118,7 @@ python -m fastdeploy.entrypoints.openai.api_server \
--load-choices "default"
```
#### Send requests
#### 请求服务
```bash
curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \
@@ -164,8 +165,77 @@ for chunk in response:
if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant':
reasoning_content = get_str(chunk.choices[0].delta.reasoning_content)
content = get_str(chunk.choices[0].delta.content)
is_reason = "[answer]" if reasoning_content == '' else "[think]"
is_reason = ""
print(reasoning_content+content+is_reason, end='', flush=True)
print(reasoning_content + content, end='', flush=True)
print('\n')
```
### 基于PaddleOCR-VL-0.9B模型部署在线服务
#### 启动服务
基于 BF16 精度和 16K 上下文部署 PaddleOCR-VL-0.9B 模型到 单卡 P800 服务器
```bash
export FD_ENABLE_MAX_PREFILL=1
export XPU_VISIBLE_DEVICES="0" # 指定任意一张卡
python -m fastdeploy.entrypoints.openai.api_server \
--model PaddlePaddle/PaddleOCR-VL \
--port 8188 \
--metrics-port 8181 \
--engine-worker-queue-port 8182 \
--max-model-len 16384 \
--max-num-batched-tokens 16384 \
--gpu-memory-utilization 0.8 \
--max-num-seqs 256
```
#### 请求服务
```bash
curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}},
{"type": "text", "text": "OCR:"}
]}
],
"metadata": {"enable_thinking": false}
}'
```
```python
import openai
ip = "0.0.0.0"
service_http_port = "8188"
client = openai.Client(base_url=f"http://{ip}:{service_http_port}/v1", api_key="EMPTY_API_KEY")
response = client.chat.completions.create(
model="default",
messages=[
{"role": "user", "content": [
{"type": "image_url", "image_url": {"url": "https://paddle-model-ecology.bj.bcebos.com/PPOCRVL/dataset/ocr_v5_eval/handwrite_ch_rec_val/中文手写古籍_000054_crop_32.jpg"}},
{"type": "text", "text": "OCR:"}
]
},
],
temperature=0.0001,
max_tokens=4096,
stream=True,
top_p=0,
metadata={"enable_thinking": False},
)
def get_str(content_raw):
content_str = str(content_raw) if content_raw is not None else ''
return content_str
for chunk in response:
if chunk.choices[0].delta is not None and chunk.choices[0].delta.role != 'assistant':
reasoning_content = get_str(chunk.choices[0].delta.reasoning_content)
content = get_str(chunk.choices[0].delta.content)
print(reasoning_content + content, end='', flush=True)
print('\n')
```