diff --git a/docs/features/sampling.md b/docs/features/sampling.md index 01edb5dd8..3a0d22869 100644 --- a/docs/features/sampling.md +++ b/docs/features/sampling.md @@ -98,7 +98,7 @@ curl -X POST "http://0.0.0.0:9222/v1/chat/completions" \ {"role": "user", "content": "How old are you"} ], "top_p": 0.8, - "top_k": 50 + "top_k": 20 }' ``` @@ -117,7 +117,7 @@ response = client.chat.completions.create( ], stream=True, top_p=0.8, - top_k=50 + extra_body={"top_k": 20, "min_p":0.1} ) for chunk in response: if chunk.choices[0].delta: @@ -159,8 +159,7 @@ response = client.chat.completions.create( ], stream=True, top_p=0.8, - top_k=20, - min_p=0.1 + extra_body={"top_k": 20, "min_p":0.1} ) for chunk in response: if chunk.choices[0].delta: diff --git a/docs/offline_inference.md b/docs/offline_inference.md index 45a77615a..3bb52a191 100644 --- a/docs/offline_inference.md +++ b/docs/offline_inference.md @@ -183,6 +183,7 @@ For ```LLM``` configuration, refer to [Parameter Documentation](parameters.md). * min_p(float): Minimum probability relative to the maximum probability for a token to be considered (>0 filters low-probability tokens to improve quality) * max_tokens(int): Maximum generated tokens (input + output) * min_tokens(int): Minimum forced generation length +* bad_words(list[str]): Prohibited words ### 2.5 fastdeploy.engine.request.RequestOutput diff --git a/docs/zh/features/sampling.md b/docs/zh/features/sampling.md index 829006d31..24cc003b5 100644 --- a/docs/zh/features/sampling.md +++ b/docs/zh/features/sampling.md @@ -98,7 +98,7 @@ curl -X POST "http://0.0.0.0:9222/v1/chat/completions" \ {"role": "user", "content": "How old are you"} ], "top_p": 0.8, - "top_k": 50 + "top_k": 20 }' ``` @@ -118,7 +118,7 @@ response = client.chat.completions.create( ], stream=True, top_p=0.8, - extra_body={"top_k": 50} + extra_body={"top_k": 20} ) for chunk in response: if chunk.choices[0].delta: @@ -161,8 +161,7 @@ response = client.chat.completions.create( ], stream=True, top_p=0.8, - extra_body={"top_k": 20}, - min_p=0.1 + extra_body={"top_k": 20, "min_p": 0.1} ) for chunk in response: if chunk.choices[0].delta: diff --git a/docs/zh/offline_inference.md b/docs/zh/offline_inference.md index 015fc7b72..7dc8e195e 100644 --- a/docs/zh/offline_inference.md +++ b/docs/zh/offline_inference.md @@ -183,6 +183,7 @@ for output in outputs: * min_p(float): token入选的最小概率阈值(相对于最高概率token的比值,设为>0可通过过滤低概率token来提升文本生成质量) * max_tokens(int): 限制模型生成的最大token数量(包括输入和输出) * min_tokens(int): 强制模型生成的最少token数量,避免过早结束 +* bad_words(list[str]): 禁止生成的词列表, 防止模型生成不希望出现的词 ### 2.5 fastdeploy.engine.request.RequestOutput