mirror of
https://github.com/PaddlePaddle/FastDeploy.git
synced 2025-12-24 13:28:13 +08:00
LLM.chat add "tools" param (#4415)
* llm add tools param initial commit * llm add tools param bugfix * offline add tools add unittests * fix preprocessor * move tools paramter into tasks * change variable name
This commit is contained in:
@@ -23,12 +23,14 @@ import traceback
|
||||
import uuid
|
||||
from typing import Any, Optional, Union
|
||||
|
||||
from pydantic import ValidationError
|
||||
from tqdm import tqdm
|
||||
|
||||
from fastdeploy.engine.args_utils import EngineArgs
|
||||
from fastdeploy.engine.engine import LLMEngine
|
||||
from fastdeploy.engine.sampling_params import SamplingParams
|
||||
from fastdeploy.entrypoints.chat_utils import load_chat_template
|
||||
from fastdeploy.entrypoints.openai.protocol import ChatCompletionToolsParam
|
||||
from fastdeploy.entrypoints.openai.tool_parsers import ToolParserManager
|
||||
from fastdeploy.utils import (
|
||||
deprecated_kwargs_warning,
|
||||
@@ -204,6 +206,7 @@ class LLM:
|
||||
use_tqdm: bool = True,
|
||||
chat_template_kwargs: Optional[dict[str, Any]] = None,
|
||||
chat_template: Optional[str] = None,
|
||||
tools: Optional[Union[ChatCompletionToolsParam, list[ChatCompletionToolsParam]]] = None,
|
||||
stream: bool = False,
|
||||
):
|
||||
"""
|
||||
@@ -243,6 +246,12 @@ class LLM:
|
||||
if chat_template is None:
|
||||
chat_template = self.chat_template
|
||||
|
||||
validated_tools = None
|
||||
if tools is not None:
|
||||
try:
|
||||
validated_tools = self._validate_tools(tools)
|
||||
except ValueError as e:
|
||||
raise RuntimeError(f"Failed to validate 'tools' parameter in chat method: {e}") from e
|
||||
messages_len = len(messages)
|
||||
for i in range(messages_len):
|
||||
messages[i] = {"messages": messages[i]}
|
||||
@@ -251,6 +260,7 @@ class LLM:
|
||||
sampling_params=sampling_params,
|
||||
chat_template_kwargs=chat_template_kwargs,
|
||||
chat_template=chat_template,
|
||||
tools=validated_tools,
|
||||
)
|
||||
|
||||
topk_logprobs = sampling_params[0].logprobs if sampling_params_len > 1 else sampling_params.logprobs
|
||||
@@ -310,6 +320,8 @@ class LLM:
|
||||
if current_sampling_params.guided_decoding is not None:
|
||||
guided_decoding_dict = current_sampling_params.guided_decoding.to_dict()
|
||||
tasks.update(guided_decoding_dict)
|
||||
if kwargs.get("tools") is not None:
|
||||
tasks["tools"] = kwargs.get("tools")
|
||||
self.llm_engine.add_requests(tasks, current_sampling_params, **kwargs)
|
||||
return req_ids
|
||||
|
||||
@@ -558,6 +570,60 @@ class LLM:
|
||||
|
||||
return incremental_result
|
||||
|
||||
def _validate_tools(self, raw_tools: Any) -> Optional[list[dict]]:
|
||||
"""
|
||||
Validate the format of the `tools` parameter for chat requests.
|
||||
Valid inputs are accepted and standardized, while invalid inputs raise ValueError.
|
||||
Empty dict/list will be returned as None.
|
||||
|
||||
Args:
|
||||
raw_tools: Raw `tools` parameter obtained from kwargs (can be any type)
|
||||
|
||||
Returns:
|
||||
Optional[List[Dict[str, Any]]]: Standardized list of valid tool dictionaries if validation passes;
|
||||
None if `raw_tools` is None or empty (empty dict/list).
|
||||
|
||||
Raises:
|
||||
ValueError: Raised when input type is invalid or format does not meet standards.
|
||||
"""
|
||||
if raw_tools is None:
|
||||
return None
|
||||
if isinstance(raw_tools, ChatCompletionToolsParam):
|
||||
return [raw_tools]
|
||||
if isinstance(raw_tools, list) and all(isinstance(t, ChatCompletionToolsParam) for t in raw_tools):
|
||||
if not raw_tools:
|
||||
return None
|
||||
else:
|
||||
return raw_tools
|
||||
|
||||
if not isinstance(raw_tools, dict) and not isinstance(raw_tools, list):
|
||||
raise ValueError(
|
||||
f"Invalid tools top-level type! Expected None, dict (single tool) or list (multiple tools), "
|
||||
f"but got type '{type(raw_tools).__name__}' (value: {raw_tools})."
|
||||
)
|
||||
tools_list: list[dict[str, Any]] = [raw_tools] if isinstance(raw_tools, dict) else raw_tools
|
||||
|
||||
if not tools_list:
|
||||
return None
|
||||
|
||||
validated_tools = []
|
||||
for idx, tool in enumerate(tools_list):
|
||||
if not isinstance(tool, dict):
|
||||
raise ValueError(
|
||||
f"Invalid element type in tools list! At index {idx}, "
|
||||
f"expected dict (tool definition), but got type '{type(tool).__name__}' (value: {tool})."
|
||||
)
|
||||
|
||||
try:
|
||||
validated_tool_obj = ChatCompletionToolsParam.model_validate(tool)
|
||||
validated_tools.append(validated_tool_obj.model_dump())
|
||||
except ValidationError as e:
|
||||
raise ValueError(
|
||||
f"Invalid tool format at index {idx} in tools list! " f"Tool content: {tool}\nError details: {e}"
|
||||
) from e
|
||||
|
||||
return validated_tools
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# llm = LLM(model="llama_model")
|
||||
|
||||
@@ -19,6 +19,7 @@ import unittest
|
||||
import weakref
|
||||
|
||||
from fastdeploy.entrypoints.llm import LLM
|
||||
from fastdeploy.entrypoints.openai.protocol import ChatCompletionToolsParam
|
||||
|
||||
MODEL_NAME = os.getenv("MODEL_PATH") + "/ERNIE-4.5-0.3B-Paddle"
|
||||
|
||||
@@ -58,6 +59,133 @@ class TestChat(unittest.TestCase):
|
||||
outputs = self.llm.chat(messages=self.PROMPTS, sampling_params=None)
|
||||
self.assertEqual(len(self.PROMPTS), len(outputs))
|
||||
|
||||
def test_chat_with_tools(self):
|
||||
"""Test chat with tools:
|
||||
1. spliced_message (after chat_template) contains tool-related content
|
||||
2. Model output contains tool_call
|
||||
"""
|
||||
prompts = [{"role": "user", "content": "北京海淀区今天天气怎么样?用摄氏度表示温度。"}]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Determine weather in my location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
|
||||
"unit": {"type": "string", "enum": ["c", "f"]},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
"required": ["location", "unit"],
|
||||
},
|
||||
"strict": True,
|
||||
},
|
||||
}
|
||||
]
|
||||
chat_template = "{%- if tools %}\n {{- '<|im_start|>system\\n' }}\n {%- if messages[0].role == 'system' %}\n {{- messages[0].content + '\\n\\n' }}\n {%- endif %}\n {{- \"# Tools\\n\\nYou may call one or more functions to assist with the user query.\\n\\nYou are provided with function signatures within <tools></tools> XML tags:\\n<tools>\" }}\n {%- for tool in tools %}\n {{- \"\\n\" }}\n {{- tool | tojson }}\n {%- endfor %}\n {{- \"\\n</tools>\\n\\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\\n<tool_call>\\n{\\\"name\\\": <function-name>, \\\"arguments\\\": <args-json-object>}\\n</tool_call><|im_end|>\\n\" }}\n{%- else %}\n {%- if messages[0].role == 'system' %}\n {{- '<|im_start|>system\\n' + messages[0].content + '<|im_end|>\\n' }}\n {%- endif %}\n{%- endif %}\n{%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}\n{%- for message in messages[::-1] %}\n {%- set index = (messages|length - 1) - loop.index0 %}\n {%- if ns.multi_step_tool and message.role == \"user\" and message.content is string and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}\n {%- set ns.multi_step_tool = false %}\n {%- set ns.last_query_index = index %}\n {%- endif %}\n{%- endfor %}\n{%- for message in messages %}\n {%- if message.content is string %}\n {%- set content = message.content %}\n {%- else %}\n {%- set content = '' %}\n {%- endif %}\n {%- if (message.role == \"user\") or (message.role == \"system\" and not loop.first) %}\n {{- '<|im_start|>' + message.role + '\\n' + content + '<|im_end|>' + '\\n' }}\n {%- elif message.role == \"assistant\" %}\n {%- set reasoning_content = '' %}\n {%- if message.reasoning_content is string %}\n {%- set reasoning_content = message.reasoning_content %}\n {%- else %}\n {%- if '' in content %}\n {%- set reasoning_content = content.split('')[0].rstrip('\\n').split('')[-1].lstrip('\\n') %}\n {%- set content = content.split('')[-1].lstrip('\\n') %}\n {%- endif %}\n {%- endif %}\n {%- if loop.index0 > ns.last_query_index %}\n {%- if loop.last or (not loop.last and reasoning_content) %}\n {{- '<|im_start|>' + message.role + '\\n\\n' + reasoning_content.strip('\\n') + '\\n\\n\\n' + content.lstrip('\\n') }}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- else %}\n {{- '<|im_start|>' + message.role + '\\n' + content }}\n {%- endif %}\n {%- if message.tool_calls %}\n {%- for tool_call in message.tool_calls %}\n {%- if (loop.first and content) or (not loop.first) %}\n {{- '\\n' }}\n {%- endif %}\n {%- if tool_call.function %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '<tool_call>\\n{\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\", \"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments | tojson }}\n {%- endif %}\n {{- '}\\n</tool_call>' }}\n {%- endfor %}\n {%- endif %}\n {{- '<|im_end|>\\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.first or (messages[loop.index0 - 1].role != \"tool\") %}\n {{- '<|im_start|>user' }}\n {%- endif %}\n {{- '\\n<tool_response>\\n' }}\n {{- content }}\n {{- '\\n</tool_response>' }}\n {%- if loop.last or (messages[loop.index0 + 1].role != \"tool\") %}\n {{- '<|im_end|>\\n' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\\n' }}\n {%- if enable_thinking is defined and enable_thinking is false %}\n {{- '\\n\\n\\n\\n' }}\n {%- endif %}\n{%- endif %}"
|
||||
|
||||
data_processor = self.llm.llm_engine.data_processor
|
||||
captured_spliced_message = None
|
||||
|
||||
def capture_spliced_message(request_or_messages, **kwargs):
|
||||
"""Wrap original messages2ids to capture spliced_message"""
|
||||
token_ids = data_processor.original_messages2ids(request_or_messages, **kwargs)
|
||||
nonlocal captured_spliced_message
|
||||
captured_spliced_message = request_or_messages.get("prompt_tokens")
|
||||
return token_ids
|
||||
|
||||
data_processor.original_messages2ids = data_processor.messages2ids
|
||||
data_processor.messages2ids = capture_spliced_message
|
||||
|
||||
try:
|
||||
outputs = self.llm.chat(
|
||||
messages=prompts,
|
||||
tools=tools,
|
||||
chat_template=chat_template,
|
||||
chat_template_kwargs={"enable_thinking": False},
|
||||
stream=False,
|
||||
)
|
||||
|
||||
self.assertIsNotNone(captured_spliced_message, "Failed to capture spliced_message from messages2ids")
|
||||
self.assertIn(
|
||||
"<tools>",
|
||||
captured_spliced_message,
|
||||
f"spliced_message '{captured_spliced_message}' missing <tools> tag (chat_template not applied)",
|
||||
)
|
||||
|
||||
output = outputs[0]
|
||||
self.assertEqual(len(prompts), len(outputs))
|
||||
self.assertTrue(hasattr(output, "outputs"))
|
||||
self.assertTrue(hasattr(output.outputs, "text"))
|
||||
finally:
|
||||
data_processor.messages2ids = data_processor.original_messages2ids
|
||||
|
||||
def test_validate_tools(self):
|
||||
"""Test both valid and invalid scenarios for _validate_tools method"""
|
||||
# Prepare valid test data
|
||||
valid_tool_dict = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_weather",
|
||||
"description": "Get real-time weather of a city",
|
||||
"parameters": {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]},
|
||||
},
|
||||
}
|
||||
valid_tool_model = ChatCompletionToolsParam(**valid_tool_dict)
|
||||
valid_model_list = [valid_tool_model, valid_tool_model]
|
||||
valid_dict_list = [valid_tool_dict, valid_tool_dict]
|
||||
|
||||
# Test valid scenarios
|
||||
# 1. Input is None
|
||||
self.assertIsNone(self.llm._validate_tools(None))
|
||||
|
||||
# 2. Input is single ChatCompletionToolsParam instance
|
||||
result = self.llm._validate_tools(valid_tool_model)
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertIsInstance(result[0], ChatCompletionToolsParam)
|
||||
|
||||
# 3. Input is list of ChatCompletionToolsParam instances
|
||||
self.assertEqual(self.llm._validate_tools(valid_model_list), valid_model_list)
|
||||
|
||||
# 4. Input is single valid dict
|
||||
result = self.llm._validate_tools(valid_tool_dict)
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertIsInstance(result[0], dict)
|
||||
self.assertEqual(result[0]["type"], "function")
|
||||
|
||||
# 5. Input is list of valid dicts
|
||||
result = self.llm._validate_tools(valid_dict_list)
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertIsInstance(result[1], dict)
|
||||
|
||||
# 6. Input is empty list
|
||||
self.assertIsNone(self.llm._validate_tools([]))
|
||||
|
||||
# Test invalid scenarios (should raise ValueError)
|
||||
# 1. Input is string (invalid top-level type)
|
||||
with self.assertRaises(ValueError):
|
||||
self.llm._validate_tools("invalid_string")
|
||||
|
||||
# 2. Input list contains non-dict element
|
||||
with self.assertRaises(ValueError):
|
||||
self.llm._validate_tools([valid_tool_dict, 123])
|
||||
|
||||
# 3. Tool dict missing required field (function.name)
|
||||
invalid_tool_missing_name = {"type": "function", "function": {"description": "Missing 'name' field"}}
|
||||
with self.assertRaises(ValueError):
|
||||
self.llm._validate_tools(invalid_tool_missing_name)
|
||||
|
||||
# 4. Tool dict with wrong 'type' value
|
||||
invalid_tool_wrong_type = {"type": "invalid_type", "function": {"name": "test", "description": "Wrong type"}}
|
||||
with self.assertRaises(ValueError):
|
||||
self.llm._validate_tools(invalid_tool_wrong_type)
|
||||
|
||||
# 5. Input is boolean
|
||||
with self.assertRaises(ValueError):
|
||||
self.llm._validate_tools(True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
Reference in New Issue
Block a user