fix: impose tool rules for anthropic (#1342)

This commit is contained in:
Sarah Wooders 2025-03-20 09:55:19 -07:00 committed by GitHub
parent 82c43a9262
commit f995f7a4cf
3 changed files with 35 additions and 24 deletions

View File

@ -606,25 +606,6 @@ def _prepare_anthropic_request(
# TODO eventually enable parallel tool use
data["tools"] = anthropic_tools
# tool_choice_type other than "auto" only plays nice if thinking goes inside the tool calls
if put_inner_thoughts_in_kwargs:
if len(anthropic_tools) == 1:
data["tool_choice"] = {
"type": "tool",
"name": anthropic_tools[0]["name"],
"disable_parallel_tool_use": True,
}
else:
data["tool_choice"] = {
"type": "any",
"disable_parallel_tool_use": True,
}
else:
data["tool_choice"] = {
"type": "auto",
"disable_parallel_tool_use": True,
}
# Move 'system' to the top level
assert data["messages"][0]["role"] == "system", f"Expected 'system' role in messages[0]:\n{data['messages'][0]}"
data["system"] = data["messages"][0]["content"]
@ -720,6 +701,7 @@ def anthropic_bedrock_chat_completions_request(
# Make the request
try:
# bedrock does not support certain args
print("Warning: Tool rules not supported with Anthropic Bedrock")
data["tool_choice"] = {"type": "any"}
log_event(name="llm_request_sent", attributes=data)
response = client.messages.create(**data)
@ -862,7 +844,6 @@ def anthropic_chat_completions_process_stream(
total_tokens=prompt_tokens,
),
)
log_event(name="llm_request_sent", attributes=chat_completion_request.model_dump())
if stream_interface:

View File

@ -374,14 +374,26 @@ def create(
# Force tool calling
tool_call = None
if force_tool_call is not None:
tool_call = {"type": "function", "function": {"name": force_tool_call}}
# tool_call = {"type": "function", "function": {"name": force_tool_call}}
tool_choice = {"type": "tool", "name": force_tool_call}
tools = [{"type": "function", "function": f} for f in functions if f["name"] == force_tool_call]
assert functions is not None
# need to have this setting to be able to put inner thoughts in kwargs
llm_config.put_inner_thoughts_in_kwargs = True
else:
if llm_config.put_inner_thoughts_in_kwargs:
# tool_choice_type other than "auto" only plays nice if thinking goes inside the tool calls
tool_choice = {"type": "any", "disable_parallel_tool_use": True}
else:
tool_choice = {"type": "auto", "disable_parallel_tool_use": True}
tools = [{"type": "function", "function": f} for f in functions]
chat_completion_request = ChatCompletionRequest(
model=llm_config.model,
messages=[cast_message_to_subtype(m.to_openai_dict()) for m in messages],
tools=([{"type": "function", "function": f} for f in functions] if functions else None),
tool_choice=tool_call,
tools=tools,
tool_choice=tool_choice,
max_tokens=llm_config.max_tokens, # Note: max_tokens is required for Anthropic API
temperature=llm_config.temperature,
stream=stream,

View File

@ -74,7 +74,25 @@ class ToolFunctionChoice(BaseModel):
function: FunctionCall
ToolChoice = Union[Literal["none", "auto", "required"], ToolFunctionChoice]
class AnthropicToolChoiceTool(BaseModel):
type: str = "tool"
name: str
disable_parallel_tool_use: Optional[bool] = False
class AnthropicToolChoiceAny(BaseModel):
type: str = "any"
disable_parallel_tool_use: Optional[bool] = False
class AnthropicToolChoiceAuto(BaseModel):
type: str = "auto"
disable_parallel_tool_use: Optional[bool] = False
ToolChoice = Union[
Literal["none", "auto", "required", "any"], ToolFunctionChoice, AnthropicToolChoiceTool, AnthropicToolChoiceAny, AnthropicToolChoiceAuto
]
## tools ##