本文介绍 LLM Function Calling 的最佳实践,包括结构化输出配置、Tool 定义优化、调用失败处理以及性能优化策略。
Function Calling 是 LLM 与外部系统交互的核心能力。通过结构化输出和正确的 Tool 定义,Agent 可以可靠地调用工具。
from openai import OpenAI
client = OpenAI()
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "查询北京天气"}],
tools=[{"type": "function", "function": {"name": "get_weather", "parameters": {"type": "object", "properties": {"location": {"type": "string"}}, "required": ["location"]}}}]
)
tool_call = response.choices[0].message.tool_calls[0]
from anthropic import Anthropic
client = Anthropic()
response = client.messages.create(
model="claude-sonnet-4-20250514",
messages=[{"role": "user", "content": "查询北京天气"}],
tools=[{"name": "get_weather", "input_schema": {"type": "object", "properties": {"location": {"type": "string"}}, "required": ["location"]}}]
)
def execute_tool_call(tool_call):
try:
return execute_function(tool_call)
except ValidationError as e:
return f"参数验证失败: {e}"
代码示例验证通过