mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-09-27 04:46:16 +08:00
2359 lines
56 KiB
HTML
2359 lines
56 KiB
HTML
|
||
<!doctype html>
|
||
<html lang="en" class="no-js">
|
||
<head>
|
||
|
||
<meta charset="utf-8">
|
||
<meta name="viewport" content="width=device-width,initial-scale=1">
|
||
|
||
|
||
|
||
|
||
<link rel="prev" href="../get_started/quick_start_qwen/">
|
||
|
||
|
||
<link rel="next" href="metrics/">
|
||
|
||
|
||
<link rel="icon" href="../assets/images/favicon.ico">
|
||
<meta name="generator" content="mkdocs-1.6.1, mkdocs-material-9.6.20">
|
||
|
||
|
||
|
||
<title>OpenAI-Compatible API Server - FastDeploy: Large Language Model Deployment</title>
|
||
|
||
|
||
|
||
<link rel="stylesheet" href="../assets/stylesheets/main.e53b48f4.min.css">
|
||
|
||
|
||
<link rel="stylesheet" href="../assets/stylesheets/palette.06af60db.min.css">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
|
||
<link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300i,400,400i,700,700i%7CRoboto+Mono:400,400i,700,700i&display=fallback">
|
||
<style>:root{--md-text-font:"Roboto";--md-code-font:"Roboto Mono"}</style>
|
||
|
||
|
||
|
||
<script>__md_scope=new URL("..",location),__md_hash=e=>[...e].reduce(((e,_)=>(e<<5)-e+_.charCodeAt(0)),0),__md_get=(e,_=localStorage,t=__md_scope)=>JSON.parse(_.getItem(t.pathname+"."+e)),__md_set=(e,_,t=localStorage,a=__md_scope)=>{try{t.setItem(a.pathname+"."+e,JSON.stringify(_))}catch(e){}}</script>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
</head>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<body dir="ltr" data-md-color-scheme="default" data-md-color-primary="indigo" data-md-color-accent="indigo">
|
||
|
||
|
||
<input class="md-toggle" data-md-toggle="drawer" type="checkbox" id="__drawer" autocomplete="off">
|
||
<input class="md-toggle" data-md-toggle="search" type="checkbox" id="__search" autocomplete="off">
|
||
<label class="md-overlay" for="__drawer"></label>
|
||
<div data-md-component="skip">
|
||
|
||
|
||
<a href="#openai-protocol-compatible-api-server" class="md-skip">
|
||
Skip to content
|
||
</a>
|
||
|
||
</div>
|
||
<div data-md-component="announce">
|
||
|
||
</div>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<header class="md-header md-header--shadow" data-md-component="header">
|
||
<nav class="md-header__inner md-grid" aria-label="Header">
|
||
<a href=".." title="FastDeploy: Large Language Model Deployment" class="md-header__button md-logo" aria-label="FastDeploy: Large Language Model Deployment" data-md-component="logo">
|
||
|
||
<img src="../assets/images/logo.jpg" alt="logo">
|
||
|
||
</a>
|
||
<label class="md-header__button md-icon" for="__drawer">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M3 6h18v2H3zm0 5h18v2H3zm0 5h18v2H3z"/></svg>
|
||
</label>
|
||
<div class="md-header__title" data-md-component="header-title">
|
||
<div class="md-header__ellipsis">
|
||
<div class="md-header__topic">
|
||
<span class="md-ellipsis">
|
||
FastDeploy: Large Language Model Deployment
|
||
</span>
|
||
</div>
|
||
<div class="md-header__topic" data-md-component="header-topic">
|
||
<span class="md-ellipsis">
|
||
|
||
OpenAI-Compatible API Server
|
||
|
||
</span>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
<form class="md-header__option" data-md-component="palette">
|
||
|
||
|
||
|
||
|
||
<input class="md-option" data-md-color-media="(prefers-color-scheme: light)" data-md-color-scheme="default" data-md-color-primary="indigo" data-md-color-accent="indigo" aria-label="Switch to dark mode" type="radio" name="__palette" id="__palette_0">
|
||
|
||
<label class="md-header__button md-icon" title="Switch to dark mode" for="__palette_1" hidden>
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 8a4 4 0 0 0-4 4 4 4 0 0 0 4 4 4 4 0 0 0 4-4 4 4 0 0 0-4-4m0 10a6 6 0 0 1-6-6 6 6 0 0 1 6-6 6 6 0 0 1 6 6 6 6 0 0 1-6 6m8-9.31V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12z"/></svg>
|
||
</label>
|
||
|
||
|
||
|
||
|
||
|
||
<input class="md-option" data-md-color-media="(prefers-color-scheme: dark)" data-md-color-scheme="slate" data-md-color-primary="black" data-md-color-accent="indigo" aria-label="Switch to system preference" type="radio" name="__palette" id="__palette_1">
|
||
|
||
<label class="md-header__button md-icon" title="Switch to system preference" for="__palette_0" hidden>
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M12 18c-.89 0-1.74-.2-2.5-.55C11.56 16.5 13 14.42 13 12s-1.44-4.5-3.5-5.45C10.26 6.2 11.11 6 12 6a6 6 0 0 1 6 6 6 6 0 0 1-6 6m8-9.31V4h-4.69L12 .69 8.69 4H4v4.69L.69 12 4 15.31V20h4.69L12 23.31 15.31 20H20v-4.69L23.31 12z"/></svg>
|
||
</label>
|
||
|
||
|
||
</form>
|
||
|
||
|
||
|
||
<script>var palette=__md_get("__palette");if(palette&&palette.color){if("(prefers-color-scheme)"===palette.color.media){var media=matchMedia("(prefers-color-scheme: light)"),input=document.querySelector(media.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");palette.color.media=input.getAttribute("data-md-color-media"),palette.color.scheme=input.getAttribute("data-md-color-scheme"),palette.color.primary=input.getAttribute("data-md-color-primary"),palette.color.accent=input.getAttribute("data-md-color-accent")}for(var[key,value]of Object.entries(palette.color))document.body.setAttribute("data-md-color-"+key,value)}</script>
|
||
|
||
|
||
<div class="md-header__option">
|
||
<div class="md-select">
|
||
|
||
<button class="md-header__button md-icon" aria-label="Select language">
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="m12.87 15.07-2.54-2.51.03-.03A17.5 17.5 0 0 0 14.07 6H17V4h-7V2H8v2H1v2h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2zm-2.62 7 1.62-4.33L19.12 17z"/></svg>
|
||
</button>
|
||
<div class="md-select__inner">
|
||
<ul class="md-select__list">
|
||
|
||
<li class="md-select__item">
|
||
<a href="./" hreflang="en" class="md-select__link">
|
||
English
|
||
</a>
|
||
</li>
|
||
|
||
<li class="md-select__item">
|
||
<a href="../zh/online_serving/" hreflang="zh" class="md-select__link">
|
||
简体中文
|
||
</a>
|
||
</li>
|
||
|
||
</ul>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
|
||
|
||
<label class="md-header__button md-icon" for="__search">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.52 6.52 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5"/></svg>
|
||
</label>
|
||
<div class="md-search" data-md-component="search" role="dialog">
|
||
<label class="md-search__overlay" for="__search"></label>
|
||
<div class="md-search__inner" role="search">
|
||
<form class="md-search__form" name="search">
|
||
<input type="text" class="md-search__input" name="query" aria-label="Search" placeholder="Search" autocapitalize="off" autocorrect="off" autocomplete="off" spellcheck="false" data-md-component="search-query" required>
|
||
<label class="md-search__icon md-icon" for="__search">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0 1 16 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.52 6.52 0 0 1 9.5 16 6.5 6.5 0 0 1 3 9.5 6.5 6.5 0 0 1 9.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5"/></svg>
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11z"/></svg>
|
||
</label>
|
||
<nav class="md-search__options" aria-label="Search">
|
||
|
||
<button type="reset" class="md-search__icon md-icon" title="Clear" aria-label="Clear" tabindex="-1">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path d="M19 6.41 17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12z"/></svg>
|
||
</button>
|
||
</nav>
|
||
|
||
</form>
|
||
<div class="md-search__output">
|
||
<div class="md-search__scrollwrap" tabindex="0" data-md-scrollfix>
|
||
<div class="md-search-result" data-md-component="search-result">
|
||
<div class="md-search-result__meta">
|
||
Initializing search
|
||
</div>
|
||
<ol class="md-search-result__list" role="presentation"></ol>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
|
||
<div class="md-header__source">
|
||
<a href="https://github.com/PaddlePaddle/FastDeploy" title="Go to repository" class="md-source" data-md-component="source">
|
||
<div class="md-source__icon md-icon">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 7.0.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2025 Fonticons, Inc.--><path d="M439.6 236.1 244 40.5c-5.4-5.5-12.8-8.5-20.4-8.5s-15 3-20.4 8.4L162.5 81l51.5 51.5c27.1-9.1 52.7 16.8 43.4 43.7l49.7 49.7c34.2-11.8 61.2 31 35.5 56.7-26.5 26.5-70.2-2.9-56-37.3L240.3 199v121.9c25.3 12.5 22.3 41.8 9.1 55-6.4 6.4-15.2 10.1-24.3 10.1s-17.8-3.6-24.3-10.1c-17.6-17.6-11.1-46.9 11.2-56v-123c-20.8-8.5-24.6-30.7-18.6-45L142.6 101 8.5 235.1C3 240.6 0 247.9 0 255.5s3 15 8.5 20.4l195.6 195.7c5.4 5.4 12.7 8.4 20.4 8.4s15-3 20.4-8.4l194.7-194.7c5.4-5.4 8.4-12.8 8.4-20.4s-3-15-8.4-20.4"/></svg>
|
||
</div>
|
||
<div class="md-source__repository">
|
||
FastDeploy
|
||
</div>
|
||
</a>
|
||
</div>
|
||
|
||
</nav>
|
||
|
||
</header>
|
||
|
||
<div class="md-container" data-md-component="container">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<main class="md-main" data-md-component="main">
|
||
<div class="md-main__inner md-grid">
|
||
|
||
|
||
|
||
<div class="md-sidebar md-sidebar--primary" data-md-component="sidebar" data-md-type="navigation" >
|
||
<div class="md-sidebar__scrollwrap">
|
||
<div class="md-sidebar__inner">
|
||
|
||
|
||
|
||
|
||
<nav class="md-nav md-nav--primary" aria-label="Navigation" data-md-level="0">
|
||
<label class="md-nav__title" for="__drawer">
|
||
<a href=".." title="FastDeploy: Large Language Model Deployment" class="md-nav__button md-logo" aria-label="FastDeploy: Large Language Model Deployment" data-md-component="logo">
|
||
|
||
<img src="../assets/images/logo.jpg" alt="logo">
|
||
|
||
</a>
|
||
FastDeploy: Large Language Model Deployment
|
||
</label>
|
||
|
||
<div class="md-nav__source">
|
||
<a href="https://github.com/PaddlePaddle/FastDeploy" title="Go to repository" class="md-source" data-md-component="source">
|
||
<div class="md-source__icon md-icon">
|
||
|
||
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><!--! Font Awesome Free 7.0.0 by @fontawesome - https://fontawesome.com License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License) Copyright 2025 Fonticons, Inc.--><path d="M439.6 236.1 244 40.5c-5.4-5.5-12.8-8.5-20.4-8.5s-15 3-20.4 8.4L162.5 81l51.5 51.5c27.1-9.1 52.7 16.8 43.4 43.7l49.7 49.7c34.2-11.8 61.2 31 35.5 56.7-26.5 26.5-70.2-2.9-56-37.3L240.3 199v121.9c25.3 12.5 22.3 41.8 9.1 55-6.4 6.4-15.2 10.1-24.3 10.1s-17.8-3.6-24.3-10.1c-17.6-17.6-11.1-46.9 11.2-56v-123c-20.8-8.5-24.6-30.7-18.6-45L142.6 101 8.5 235.1C3 240.6 0 247.9 0 255.5s3 15 8.5 20.4l195.6 195.7c5.4 5.4 12.7 8.4 20.4 8.4s15-3 20.4-8.4l194.7-194.7c5.4-5.4 8.4-12.8 8.4-20.4s-3-15-8.4-20.4"/></svg>
|
||
</div>
|
||
<div class="md-source__repository">
|
||
FastDeploy
|
||
</div>
|
||
</a>
|
||
</div>
|
||
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href=".." class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
FastDeploy
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_2" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_2" id="__nav_2_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Quick Start
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_2_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_2">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Quick Start
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_2_1" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_2_1" id="__nav_2_1_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Installation
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="2" aria-labelledby="__nav_2_1_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_2_1">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Installation
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/nvidia_gpu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Nvidia GPU
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/kunlunxin_xpu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
KunlunXin XPU
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/hygon_dcu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
HYGON DCU
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/Enflame_gcu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Enflame S60
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/iluvatar_gpu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Iluvatar CoreX
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/installation/metax_gpu/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Metax C550
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/quick_start/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Quick Deployment For ERNIE-4.5-0.3B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/quick_start_vl/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Quick Deployment for ERNIE-4.5-VL-28B-A3B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/ernie-4.5/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-300B-A47B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/ernie-4.5-vl/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-VL-424B-A47B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../get_started/quick_start_qwen/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Quick Deployment For QWEN
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--active md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_3" checked>
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_3" id="__nav_3_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Online Serving
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_3_label" aria-expanded="true">
|
||
<label class="md-nav__title" for="__nav_3">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Online Serving
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--active">
|
||
|
||
<input class="md-nav__toggle md-toggle" type="checkbox" id="__toc">
|
||
|
||
|
||
|
||
|
||
|
||
<label class="md-nav__link md-nav__link--active" for="__toc">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
OpenAI-Compatible API Server
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<a href="./" class="md-nav__link md-nav__link--active">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
OpenAI-Compatible API Server
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
|
||
|
||
|
||
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<label class="md-nav__title" for="__toc">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Table of contents
|
||
</label>
|
||
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#chat-completion-api" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Chat Completion API
|
||
</span>
|
||
</a>
|
||
|
||
<nav class="md-nav" aria-label="Chat Completion API">
|
||
<ul class="md-nav__list">
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#sending-user-requests" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Sending User Requests
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#compatible-openai-parameters" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Compatible OpenAI Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#additional-parameters-added-by-fastdeploy" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Additional Parameters Added by FastDeploy
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#differences-in-return-fields" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Differences in Return Fields
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#completion-api" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Completion API
|
||
</span>
|
||
</a>
|
||
|
||
<nav class="md-nav" aria-label="Completion API">
|
||
<ul class="md-nav__list">
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#sending-user-requests_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Sending User Requests
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#compatible-openai-parameters_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Compatible OpenAI Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#additional-parameters-added-by-fastdeploy_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Additional Parameters Added by FastDeploy
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#overview-of-return-parameters" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Overview of Return Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="metrics/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Monitor Metrics
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="scheduler/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Scheduler
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="graceful_shutdown_service/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Graceful Shutdown
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../offline_inference/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Offline Inference
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_5" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_5" id="__nav_5_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Best Practices
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_5_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_5">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Best Practices
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-0.3B-Paddle/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-0.3B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-21B-A3B-Paddle/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-21B-A3B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-300B-A47B-Paddle/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-300B-A47B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-21B-A3B-Thinking/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-21B-A3B-Thinking
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-VL-28B-A3B-Paddle/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-VL-28B-A3B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/ERNIE-4.5-VL-424B-A47B-Paddle/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
ERNIE-4.5-VL-424B-A47B
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../best_practices/FAQ/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
FAQ
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_6" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_6" id="__nav_6_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Quantization
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_6_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_6">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Quantization
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../quantization/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Overview
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../quantization/online_quantization/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Online Quantization
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../quantization/wint2/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
WINT2 Quantization
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_7" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_7" id="__nav_7_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Features
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_7_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_7">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Features
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/prefix_caching/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Prefix Caching
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/disaggregated/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Disaggregation
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/chunked_prefill/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Chunked Prefill
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/load_balance/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Load Balance
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/speculative_decoding/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Speculative Decoding
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/structured_outputs/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Structured Outputs
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/reasoning_output/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Reasoning Output
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/early_stop/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Early Stop
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/plugins/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Plugins
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/sampling/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Sampling
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/multi-node_deployment/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
MultiNode Deployment
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/graph_optimization/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Graph Optimization
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/data_parallel_service/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Data Parallelism
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../features/plas_attention/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
PLAS
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../supported_models/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Supported Models
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../benchmark/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Benchmark
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item md-nav__item--nested">
|
||
|
||
|
||
|
||
<input class="md-nav__toggle md-toggle " type="checkbox" id="__nav_10" >
|
||
|
||
|
||
<label class="md-nav__link" for="__nav_10" id="__nav_10_label" tabindex="0">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Usage
|
||
|
||
</span>
|
||
|
||
|
||
<span class="md-nav__icon md-icon"></span>
|
||
</label>
|
||
|
||
<nav class="md-nav" data-md-level="1" aria-labelledby="__nav_10_label" aria-expanded="false">
|
||
<label class="md-nav__title" for="__nav_10">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Usage
|
||
</label>
|
||
<ul class="md-nav__list" data-md-scrollfix>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../usage/log/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Log Description
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../usage/code_overview/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Code Overview
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<li class="md-nav__item">
|
||
<a href="../usage/environment_variables/" class="md-nav__link">
|
||
|
||
|
||
|
||
<span class="md-ellipsis">
|
||
Environment Variables
|
||
|
||
</span>
|
||
|
||
|
||
</a>
|
||
</li>
|
||
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
|
||
|
||
</ul>
|
||
</nav>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
|
||
<div class="md-sidebar md-sidebar--secondary" data-md-component="sidebar" data-md-type="toc" >
|
||
<div class="md-sidebar__scrollwrap">
|
||
<div class="md-sidebar__inner">
|
||
|
||
|
||
<nav class="md-nav md-nav--secondary" aria-label="Table of contents">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<label class="md-nav__title" for="__toc">
|
||
<span class="md-nav__icon md-icon"></span>
|
||
Table of contents
|
||
</label>
|
||
<ul class="md-nav__list" data-md-component="toc" data-md-scrollfix>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#chat-completion-api" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Chat Completion API
|
||
</span>
|
||
</a>
|
||
|
||
<nav class="md-nav" aria-label="Chat Completion API">
|
||
<ul class="md-nav__list">
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#sending-user-requests" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Sending User Requests
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#compatible-openai-parameters" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Compatible OpenAI Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#additional-parameters-added-by-fastdeploy" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Additional Parameters Added by FastDeploy
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#differences-in-return-fields" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Differences in Return Fields
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#completion-api" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Completion API
|
||
</span>
|
||
</a>
|
||
|
||
<nav class="md-nav" aria-label="Completion API">
|
||
<ul class="md-nav__list">
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#sending-user-requests_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Sending User Requests
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#compatible-openai-parameters_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Compatible OpenAI Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#additional-parameters-added-by-fastdeploy_1" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Additional Parameters Added by FastDeploy
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
<li class="md-nav__item">
|
||
<a href="#overview-of-return-parameters" class="md-nav__link">
|
||
<span class="md-ellipsis">
|
||
Overview of Return Parameters
|
||
</span>
|
||
</a>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
</nav>
|
||
|
||
</li>
|
||
|
||
</ul>
|
||
|
||
</nav>
|
||
</div>
|
||
</div>
|
||
</div>
|
||
|
||
|
||
|
||
<div class="md-content" data-md-component="content">
|
||
<article class="md-content__inner md-typeset">
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
<h1 id="openai-protocol-compatible-api-server">OpenAI Protocol-Compatible API Server</h1>
|
||
<p>FastDeploy provides a service-oriented deployment solution that is compatible with the OpenAI protocol. Users can quickly deploy it using the following command:</p>
|
||
<pre><code class="language-bash">python -m fastdeploy.entrypoints.openai.api_server \
|
||
--model baidu/ERNIE-4.5-0.3B-Paddle \
|
||
--port 8188 --tensor-parallel-size 8 \
|
||
--max-model-len 32768
|
||
</code></pre>
|
||
<p>To enable log probability output, simply deploy with the following command:</p>
|
||
<pre><code class="language-bash">python -m fastdeploy.entrypoints.openai.api_server \
|
||
--model baidu/ERNIE-4.5-0.3B-Paddle \
|
||
--port 8188 --tensor-parallel-size 8 \
|
||
--max-model-len 32768 \
|
||
--enable-logprob
|
||
</code></pre>
|
||
<p>For more usage methods of the command line during service deployment, refer to <a href="../parameters/">Parameter Descriptions</a>.</p>
|
||
<h2 id="chat-completion-api">Chat Completion API</h2>
|
||
<p>FastDeploy provides a Chat Completion API that is compatible with the OpenAI protocol, allowing user requests to be sent directly using OpenAI's request method.</p>
|
||
<h3 id="sending-user-requests">Sending User Requests</h3>
|
||
<p>Here is an example of sending a user request using the curl command:</p>
|
||
<pre><code class="language-bash">curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \
|
||
-H "Content-Type: application/json" \
|
||
-d '{
|
||
"messages": [
|
||
{"role": "user", "content": "Hello!"}
|
||
]
|
||
}'
|
||
</code></pre>
|
||
<p>Here's an example curl command demonstrating how to include the logprobs parameter in a user request:</p>
|
||
<pre><code class="language-bash">curl -X POST "http://0.0.0.0:8188/v1/chat/completions" \
|
||
-H "Content-Type: application/json" \
|
||
-d '{
|
||
"messages": [
|
||
{"role": "user", "content": "Hello!"}
|
||
],
|
||
"logprobs": true, "top_logprobs": 0,
|
||
}'
|
||
</code></pre>
|
||
<p>Here is an example of sending a user request using a Python script:</p>
|
||
<pre><code class="language-python">import openai
|
||
host = "0.0.0.0"
|
||
port = "8170"
|
||
client = openai.Client(base_url=f"http://{host}:{port}/v1", api_key="null")
|
||
|
||
response = client.chat.completions.create(
|
||
model="null",
|
||
messages=[
|
||
{"role": "system", "content": "I'm a helpful AI assistant."},
|
||
{"role": "user", "content": "Rewrite Li Bai's 'Quiet Night Thought' as a modern poem"},
|
||
],
|
||
stream=True,
|
||
)
|
||
for chunk in response:
|
||
if chunk.choices[0].delta:
|
||
print(chunk.choices[0].delta.content, end='')
|
||
print('\n')
|
||
</code></pre>
|
||
<p>For a description of the OpenAI protocol, refer to the document <a href="https://platform.openai.com/docs/api-reference/chat/create">OpenAI Chat Completion API</a>.</p>
|
||
<h3 id="compatible-openai-parameters">Compatible OpenAI Parameters</h3>
|
||
<pre><code class="language-python">messages: Union[List[Any], List[int]]
|
||
# List of input messages, which can be text messages (`List[Any]`, typically `List[dict]`) or token ID lists (`List[int]`).
|
||
|
||
tools: Optional[List[ChatCompletionToolsParam]] = None
|
||
# List of tool call configurations, used for enabling function calling (Function Calling) or tool usage (e.g., ReAct framework).
|
||
|
||
model: Optional[str] = "default"
|
||
# Specifies the model name or version to use, defaulting to `"default"` (which may point to the base model).
|
||
|
||
frequency_penalty: Optional[float] = None
|
||
# Frequency penalty coefficient, reducing the probability of generating the same token repeatedly (`>1.0` suppresses repetition, `<1.0` encourages repetition, default `None` disables).
|
||
|
||
logprobs: Optional[bool] = False
|
||
# Whether to return the log probabilities of each generated token, used for debugging or analysis.
|
||
|
||
top_logprobs: Optional[int] = 0
|
||
# Returns the top `top_logprobs` tokens and their log probabilities for each generated position (default `0` means no return).
|
||
|
||
max_tokens: Optional[int] = Field(
|
||
default=None,
|
||
deprecated="max_tokens is deprecated in favor of the max_completion_tokens field",
|
||
)
|
||
# Deprecated: Maximum number of tokens to generate (recommended to use `max_completion_tokens` instead).
|
||
|
||
max_completion_tokens: Optional[int] = None
|
||
# Maximum number of tokens to generate (recommended alternative to `max_tokens`), no default limit (restricted by the model's context window).
|
||
|
||
presence_penalty: Optional[float] = None
|
||
# Presence penalty coefficient, reducing the probability of generating new topics (unseen topics) (`>1.0` suppresses new topics, `<1.0` encourages new topics, default `None` disables).
|
||
|
||
stream: Optional[bool] = False
|
||
# Whether to enable streaming output (return results token by token), default `False` (returns complete results at once).
|
||
|
||
stream_options: Optional[StreamOptions] = None
|
||
# Additional configurations for streaming output (such as chunk size, timeout, etc.), refer to the specific definition of `StreamOptions`.
|
||
|
||
temperature: Optional[float] = None
|
||
# Temperature coefficient, controlling generation randomness (`0.0` for deterministic generation, `>1.0` for more randomness, default `None` uses model default).
|
||
|
||
top_p: Optional[float] = None
|
||
# Nucleus sampling threshold, only retaining tokens whose cumulative probability exceeds `top_p` (default `None` disables).
|
||
|
||
response_format: Optional[AnyResponseFormat] = None
|
||
# Specifies the output format (such as JSON, XML, etc.), requires passing a predefined format configuration object.
|
||
|
||
user: Optional[str] = None
|
||
# User identifier, used for tracking or distinguishing requests from different users (default `None` does not pass).
|
||
|
||
metadata: Optional[dict] = None
|
||
# Additional metadata, used for passing custom information (such as request ID, debug markers, etc.).
|
||
|
||
</code></pre>
|
||
<h3 id="additional-parameters-added-by-fastdeploy">Additional Parameters Added by FastDeploy</h3>
|
||
<blockquote>
|
||
<p>Note:
|
||
When sending requests using curl, the following parameters can be used directly;
|
||
When sending requests using openai.Client, these parameters need to be placed in the <code>extra_body</code> parameter, e.g. <code>extra_body={"chat_template_kwargs": {"enable_thinking":True}, "include_stop_str_in_output": True}</code>.</p>
|
||
</blockquote>
|
||
<p>The following sampling parameters are supported.</p>
|
||
<pre><code class="language-python">top_k: Optional[int] = None
|
||
# Limits the consideration to the top K tokens with the highest probability at each generation step, used to control randomness (default None means no limit).
|
||
|
||
min_p: Optional[float] = None
|
||
# Nucleus sampling threshold, only retaining tokens whose cumulative probability exceeds min_p (default None means disabled).
|
||
|
||
min_tokens: Optional[int] = None
|
||
# Forces a minimum number of tokens to be generated, avoiding premature truncation (default None means no limit).
|
||
|
||
include_stop_str_in_output: Optional[bool] = False
|
||
# Whether to include the stop string content in the output (default False, meaning output is truncated when a stop string is encountered).
|
||
|
||
bad_words: Optional[List[str]] = None
|
||
# List of forbidden words (e.g., sensitive words) that the model should avoid generating (default None means no restriction).
|
||
|
||
bad_words_token_ids: Optional[List[int]] = None
|
||
# List of forbidden token ids that the model should avoid generating (default None means no restriction).
|
||
|
||
repetition_penalty: Optional[float] = None
|
||
# Repetition penalty coefficient, reducing the probability of repeating already generated tokens (`>1.0` suppresses repetition, `<1.0` encourages repetition, default None means disabled).
|
||
</code></pre>
|
||
<p>The following extra parameters are supported:</p>
|
||
<pre><code class="language-python">chat_template_kwargs: Optional[dict] = None
|
||
# Additional parameters passed to the chat template, used for customizing dialogue formats (default None).
|
||
|
||
chat_template: Optional[str] = None
|
||
# Custom chat template will override the model's default chat template (default None).
|
||
|
||
reasoning_max_tokens: Optional[int] = None
|
||
# Maximum number of tokens to generate during reasoning (e.g., CoT, chain of thought) (default None means using global max_tokens).
|
||
|
||
structural_tag: Optional[str] = None
|
||
# Structural tag, used to mark specific structures of generated content (such as JSON, XML, etc., default None).
|
||
|
||
guided_json: Optional[Union[str, dict, BaseModel]] = None
|
||
# Guides the generation of content conforming to JSON structure, can be a JSON string, dictionary, or Pydantic model (default None).
|
||
|
||
guided_regex: Optional[str] = None
|
||
# Guides the generation of content conforming to regular expression rules (default None means no restriction).
|
||
|
||
guided_choice: Optional[List[str]] = None
|
||
# Guides the generation of content selected from a specified candidate list (default None means no restriction).
|
||
|
||
guided_grammar: Optional[str] = None
|
||
# Guides the generation of content conforming to grammar rules (such as BNF) (default None means no restriction).
|
||
|
||
return_token_ids: Optional[bool] = None
|
||
# Whether to return the token IDs of the generation results instead of text (default None means return text).
|
||
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
# Directly passes the token ID list of the prompt, skipping the text encoding step (default None means using text input).
|
||
|
||
disable_chat_template: Optional[bool] = False
|
||
# Whether to disable chat template rendering, using raw input directly (default False means template is enabled).
|
||
|
||
temp_scaled_logprobs: Optional[bool] = False
|
||
# Whether to divide the logits by the temperature coefficient when calculating logprobs (default is False, meaning the logits are not divided by the temperature coefficient).
|
||
|
||
top_p_normalized_logprobs: Optional[bool] = False
|
||
# Whether to perform top-p normalization when calculating logprobs (default is False, indicating that top-p normalization is not performed).
|
||
</code></pre>
|
||
<h3 id="differences-in-return-fields">Differences in Return Fields</h3>
|
||
<p>Additional return fields added by FastDeploy:</p>
|
||
<ul>
|
||
<li><code>arrival_time</code>: Cumulative time consumed for all tokens</li>
|
||
<li><code>reasoning_content</code>: Return results of the chain of thought</li>
|
||
<li><code>prompt_token_ids</code>: List of token IDs for the input sequence</li>
|
||
<li><code>completion_token_ids</code>: List of token IDs for the output sequence</li>
|
||
</ul>
|
||
<p>Overview of return parameters:</p>
|
||
<pre><code class="language-python">
|
||
ChatCompletionResponse:
|
||
id: str
|
||
object: str = "chat.completion"
|
||
created: int = Field(default_factory=lambda: int(time.time()))
|
||
model: str
|
||
choices: List[ChatCompletionResponseChoice]
|
||
usage: UsageInfo
|
||
ChatCompletionResponseChoice:
|
||
index: int
|
||
message: ChatMessage
|
||
logprobs: Optional[LogProbs] = None
|
||
finish_reason: Optional[Literal["stop", "length", "tool_calls", "recover_stop"]]
|
||
ChatMessage:
|
||
role: str
|
||
content: str
|
||
reasoning_content: Optional[str] = None
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
completion_token_ids: Optional[List[int]] = None
|
||
|
||
# Fields returned for streaming responses
|
||
ChatCompletionStreamResponse:
|
||
id: str
|
||
object: str = "chat.completion.chunk"
|
||
created: int = Field(default_factory=lambda: int(time.time()))
|
||
model: str
|
||
choices: List[ChatCompletionResponseStreamChoice]
|
||
usage: Optional[UsageInfo] = None
|
||
ChatCompletionResponseStreamChoice:
|
||
index: int
|
||
delta: DeltaMessage
|
||
logprobs: Optional[LogProbs] = None
|
||
finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None
|
||
arrival_time: Optional[float] = None
|
||
DeltaMessage:
|
||
role: Optional[str] = None
|
||
content: Optional[str] = None
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
completion_token_ids: Optional[List[int]] = None
|
||
reasoning_content: Optional[str] = None
|
||
</code></pre>
|
||
<h2 id="completion-api">Completion API</h2>
|
||
<p>The Completion API interface is mainly used for continuation scenarios, suitable for users who have customized context input and expect the model to only output continuation content; the inference process does not add other <code>prompt</code> concatenations.</p>
|
||
<h3 id="sending-user-requests_1">Sending User Requests</h3>
|
||
<p>Here is an example of sending a user request using the curl command:</p>
|
||
<pre><code class="language-bash">curl -X POST "http://0.0.0.0:8188/v1/completions" \
|
||
-H "Content-Type: application/json" \
|
||
-d '{
|
||
"prompt": "以下是一篇关于深圳文心公园的500字游记和赏析:"
|
||
}'
|
||
</code></pre>
|
||
<p>Here is an example of sending a user request using a Python script:</p>
|
||
<pre><code class="language-python">import openai
|
||
host = "0.0.0.0"
|
||
port = "8170"
|
||
client = openai.Client(base_url=f"http://{host}:{port}/v1", api_key="null")
|
||
|
||
response = client.completions.create(
|
||
model="default",
|
||
prompt="以下是一篇关于深圳文心公园的500字游记和赏析:",
|
||
stream=False,
|
||
)
|
||
print(response.choices[0].text)
|
||
</code></pre>
|
||
<p>For an explanation of the OpenAI protocol, refer to the <a href="https://platform.openai.com/docs/api-reference/completions/create">OpenAI Completion API</a>。</p>
|
||
<h3 id="compatible-openai-parameters_1">Compatible OpenAI Parameters</h3>
|
||
<pre><code class="language-python">model: Optional[str] = "default"
|
||
# Specifies the model name or version to use, defaulting to `"default"` (which may point to the base model).
|
||
|
||
prompt: Union[List[int], List[List[int]], str, List[str]]
|
||
# Input prompt, supporting multiple formats:
|
||
# - `str`: Plain text prompt (e.g., `"Hello, how are you?"`).
|
||
# - `List[str]`: Multiple text segments (e.g., `["User:", "Hello!", "Assistant:", "Hi!"]`).
|
||
# - `List[int]`: Directly passes a list of token IDs (e.g., `[123, 456]`).
|
||
# - `List[List[int]]`: List of multiple token ID lists (e.g., `[[123], [456, 789]]`).
|
||
|
||
best_of: Optional[int] = None
|
||
# Generates `best_of` candidate results and returns the highest-scoring one (requires `n=1`).
|
||
|
||
frequency_penalty: Optional[float] = None
|
||
# Frequency penalty coefficient, reducing the probability of generating the same token repeatedly (`>1.0` suppresses repetition, `<1.0` encourages repetition).
|
||
|
||
logprobs: Optional[int] = None
|
||
# Returns the log probabilities of each generated token, can specify the number of candidates to return.
|
||
|
||
max_tokens: Optional[int] = None
|
||
# Maximum number of tokens to generate (including input and output), no default limit (restricted by the model's context window).
|
||
|
||
presence_penalty: Optional[float] = None
|
||
# Presence penalty coefficient, reducing the probability of generating new topics (unseen topics) (`>1.0` suppresses new topics, `<1.0` encourages new topics).
|
||
</code></pre>
|
||
<h3 id="additional-parameters-added-by-fastdeploy_1">Additional Parameters Added by FastDeploy</h3>
|
||
<blockquote>
|
||
<p>Note:
|
||
When sending requests using curl, the following parameters can be used directly;
|
||
When sending requests using openai.Client, these parameters need to be placed in the <code>extra_body</code> parameter, e.g. <code>extra_body={"chat_template_kwargs": {"enable_thinking":True}, "include_stop_str_in_output": True}</code>.</p>
|
||
</blockquote>
|
||
<p>The following sampling parameters are supported.</p>
|
||
<pre><code class="language-python">top_k: Optional[int] = None
|
||
# Limits the consideration to the top K tokens with the highest probability at each generation step, used to control randomness (default None means no limit).
|
||
|
||
min_p: Optional[float] = None
|
||
# Nucleus sampling threshold, only retaining tokens whose cumulative probability exceeds min_p (default None means disabled).
|
||
|
||
min_tokens: Optional[int] = None
|
||
# Forces a minimum number of tokens to be generated, avoiding premature truncation (default None means no limit).
|
||
|
||
include_stop_str_in_output: Optional[bool] = False
|
||
# Whether to include the stop string content in the output (default False, meaning output is truncated when a stop string is encountered).
|
||
|
||
bad_words: Optional[List[str]] = None
|
||
# List of forbidden words (e.g., sensitive words) that the model should avoid generating (default None means no restriction).
|
||
|
||
bad_words_token_ids: Optional[List[int]] = None
|
||
# List of forbidden token ids that the model should avoid generating (default None means no restriction).
|
||
|
||
repetition_penalty: Optional[float] = None
|
||
# Repetition penalty coefficient, reducing the probability of repeating already generated tokens (`>1.0` suppresses repetition, `<1.0` encourages repetition, default None means disabled).
|
||
</code></pre>
|
||
<p>The following extra parameters are supported:</p>
|
||
<pre><code class="language-python">guided_json: Optional[Union[str, dict, BaseModel]] = None
|
||
# Guides the generation of content conforming to JSON structure, can be a JSON string, dictionary, or Pydantic model (default None).
|
||
|
||
guided_regex: Optional[str] = None
|
||
# Guides the generation of content conforming to regular expression rules (default None means no restriction).
|
||
|
||
guided_choice: Optional[List[str]] = None
|
||
# Guides the generation of content selected from a specified candidate list (default None means no restriction).
|
||
|
||
guided_grammar: Optional[str] = None
|
||
# Guides the generation of content conforming to grammar rules (such as BNF) (default None means no restriction).
|
||
|
||
return_token_ids: Optional[bool] = None
|
||
# Whether to return the token IDs of the generation results instead of text (default None means return text).
|
||
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
# Directly passes the token ID list of the prompt, skipping the text encoding step (default None means using text input).
|
||
</code></pre>
|
||
<h3 id="overview-of-return-parameters">Overview of Return Parameters</h3>
|
||
<pre><code class="language-python">
|
||
CompletionResponse:
|
||
id: str
|
||
object: str = "text_completion"
|
||
created: int = Field(default_factory=lambda: int(time.time()))
|
||
model: str
|
||
choices: List[CompletionResponseChoice]
|
||
usage: UsageInfo
|
||
CompletionResponseChoice:
|
||
index: int
|
||
text: str
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
completion_token_ids: Optional[List[int]] = None
|
||
arrival_time: Optional[float] = None
|
||
logprobs: Optional[int] = None
|
||
reasoning_content: Optional[str] = None
|
||
finish_reason: Optional[Literal["stop", "length", "tool_calls"]]
|
||
|
||
# Fields returned for streaming responses
|
||
CompletionStreamResponse:
|
||
id: str
|
||
object: str = "text_completion"
|
||
created: int = Field(default_factory=lambda: int(time.time()))
|
||
model: str
|
||
choices: List[CompletionResponseStreamChoice]
|
||
usage: Optional[UsageInfo] = None
|
||
CompletionResponseStreamChoice:
|
||
index: int
|
||
text: str
|
||
arrival_time: float = None
|
||
prompt_token_ids: Optional[List[int]] = None
|
||
completion_token_ids: Optional[List[int]] = None
|
||
logprobs: Optional[float] = None
|
||
reasoning_content: Optional[str] = None
|
||
finish_reason: Optional[Literal["stop", "length", "tool_calls"]] = None
|
||
|
||
</code></pre>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
</article>
|
||
</div>
|
||
|
||
|
||
<script>var target=document.getElementById(location.hash.slice(1));target&&target.name&&(target.checked=target.name.startsWith("__tabbed_"))</script>
|
||
</div>
|
||
|
||
</main>
|
||
|
||
<footer class="md-footer">
|
||
|
||
<div class="md-footer-meta md-typeset">
|
||
<div class="md-footer-meta__inner md-grid">
|
||
<div class="md-copyright">
|
||
|
||
<div class="md-copyright__highlight">
|
||
Copyright © 2025 Maintained by FastDeploy
|
||
</div>
|
||
|
||
|
||
Made with
|
||
<a href="https://squidfunk.github.io/mkdocs-material/" target="_blank" rel="noopener">
|
||
Material for MkDocs
|
||
</a>
|
||
|
||
</div>
|
||
|
||
</div>
|
||
</div>
|
||
</footer>
|
||
|
||
</div>
|
||
<div class="md-dialog" data-md-component="dialog">
|
||
<div class="md-dialog__inner md-typeset"></div>
|
||
</div>
|
||
|
||
|
||
|
||
|
||
<script id="__config" type="application/json">{"base": "..", "features": [], "search": "../assets/javascripts/workers/search.973d3a69.min.js", "tags": null, "translations": {"clipboard.copied": "Copied to clipboard", "clipboard.copy": "Copy to clipboard", "search.result.more.one": "1 more on this page", "search.result.more.other": "# more on this page", "search.result.none": "No matching documents", "search.result.one": "1 matching document", "search.result.other": "# matching documents", "search.result.placeholder": "Type to start searching", "search.result.term.missing": "Missing", "select.version": "Select version"}, "version": null}</script>
|
||
|
||
|
||
<script src="../assets/javascripts/bundle.f55a23d4.min.js"></script>
|
||
|
||
|
||
</body>
|
||
</html> |