[Doc] modify offline_inference docs (#2787)
Some checks failed
Deploy GitHub Pages / deploy (push) Has been cancelled

* modify reasoning_output docs

* modify offline inference docs

* modify offline inference docs

* modify offline_inference docs

* modify offline_inference docs
This commit is contained in:
LiqinruiG
2025-07-10 01:06:14 +08:00
committed by GitHub
parent a4fdb3970b
commit 54affdc44b
2 changed files with 8 additions and 8 deletions

View File

@@ -51,7 +51,7 @@ outputs = llm.chat(
for output in outputs:
prompt = output.prompt
generated_text = output.outputs.text
reasoning_text = output.outputs.resoning_content
reasoning_text = output.outputs.reasoning_content
```
### Text Completion Interface (LLM.generate)
@@ -89,7 +89,7 @@ from PIL import Image
from fastdeploy.entrypoints.llm import LLM
from fastdeploy.engine.sampling_params import SamplingParams
from fastdeploy.input.ernie_tokenizer_v2 import ErnieBotTokenizer
from fastdeploy.input.ernie_tokenizer import ErnieBotTokenizer
PATH = "baidu/ERNIE-4.5-VL-28B-A3B-Paddle"
tokenizer = ErnieBotTokenizer.from_pretrained(os.path.dirname(PATH))
@@ -125,7 +125,7 @@ for message in messages:
})
sampling_params = SamplingParams(temperature=0.1, max_tokens=6400)
llm = LLM(model=PATH, tensor_parallel_size=8, max_model_len=32768, enable_mm=True, limit_mm_per_prompt={"image": 100}, reasoning_parser="ernie-45-vl")
llm = LLM(model=PATH, tensor_parallel_size=1, max_model_len=32768, enable_mm=True, limit_mm_per_prompt={"image": 100}, reasoning_parser="ernie-45-vl")
outputs = llm.generate(prompts={
"prompt": prompt,
"multimodal_data": {
@@ -138,7 +138,7 @@ outputs = llm.generate(prompts={
for output in outputs:
prompt = output.prompt
generated_text = output.outputs.text
reasoning_text = output.outputs.resoning_content
reasoning_text = output.outputs.reasoning_content
```
>Note: The `generate interface` does not currently support passing parameters to control the thinking function (on/off). It always uses the model's default parameters.

View File

@@ -52,7 +52,7 @@ outputs = llm.chat(
for output in outputs:
prompt = output.prompt
generated_text = output.outputs.text
reasoning_text = output.outputs.resoning_content
reasoning_text = output.outputs.reasoning_content
```
### 续写接口(LLM.generate)
@@ -90,7 +90,7 @@ from PIL import Image
from fastdeploy.entrypoints.llm import LLM
from fastdeploy.engine.sampling_params import SamplingParams
from fastdeploy.input.ernie_tokenizer_v2 import ErnieBotTokenizer
from fastdeploy.input.ernie_tokenizer import ErnieBotTokenizer
PATH = "baidu/ERNIE-4.5-VL-28B-A3B-Paddle"
tokenizer = ErnieBotTokenizer.from_pretrained(os.path.dirname(PATH))
@@ -126,7 +126,7 @@ for message in messages:
})
sampling_params = SamplingParams(temperature=0.1, max_tokens=6400)
llm = LLM(model=PATH, tensor_parallel_size=8, max_model_len=32768, enable_mm=True, limit_mm_per_prompt={"image": 100}, reasoning_parser="ernie-45-vl")
llm = LLM(model=PATH, tensor_parallel_size=1, max_model_len=32768, enable_mm=True, limit_mm_per_prompt={"image": 100}, reasoning_parser="ernie-45-vl")
outputs = llm.generate(prompts={
"prompt": prompt,
"multimodal_data": {
@@ -139,7 +139,7 @@ outputs = llm.generate(prompts={
for output in outputs:
prompt = output.prompt
generated_text = output.outputs.text
reasoning_text = output.outputs.resoning_content
reasoning_text = output.outputs.reasoning_content
```
> 注: `generate` 接口, 暂时不支持思考开关参数控制, 均使用模型默认思考能力。