fix: anthropic streaming (#1768)

Co-authored-by: Kevin Lin <klin5061@gmail.com>
This commit is contained in:
cthomas 2025-04-17 16:22:14 -07:00 committed by GitHub
parent 4d38ea11fa
commit 2ed0c93d07
2 changed files with 7 additions and 2 deletions

View File

@ -619,7 +619,7 @@ class Message(BaseMessage):
text_content = self.content[0].text text_content = self.content[0].text
# Otherwise, check if we have TextContent and multiple other parts # Otherwise, check if we have TextContent and multiple other parts
elif self.content and len(self.content) > 1: elif self.content and len(self.content) > 1:
text = [content for content in self.content if isinstance(self.content[0], TextContent)] text = [content for content in self.content if isinstance(content, TextContent)]
if len(text) > 1: if len(text) > 1:
assert len(text) == 1, f"multiple text content parts found in a single message: {self.content}" assert len(text) == 1, f"multiple text content parts found in a single message: {self.content}"
text_content = text[0].text text_content = text[0].text

View File

@ -6,6 +6,8 @@ from collections import deque
from datetime import datetime from datetime import datetime
from typing import AsyncGenerator, Literal, Optional, Union from typing import AsyncGenerator, Literal, Optional, Union
import demjson3 as demjson
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
from letta.helpers.datetime_helpers import is_utc_datetime from letta.helpers.datetime_helpers import is_utc_datetime
from letta.interface import AgentInterface from letta.interface import AgentInterface
@ -530,7 +532,6 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
try: try:
# NOTE: this is hardcoded for our DeepSeek API integration # NOTE: this is hardcoded for our DeepSeek API integration
json_reasoning_content = parse_json(self.expect_reasoning_content_buffer) json_reasoning_content = parse_json(self.expect_reasoning_content_buffer)
print(f"json_reasoning_content: {json_reasoning_content}")
processed_chunk = ToolCallMessage( processed_chunk = ToolCallMessage(
id=message_id, id=message_id,
@ -547,6 +548,10 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
except json.JSONDecodeError as e: except json.JSONDecodeError as e:
print(f"Failed to interpret reasoning content ({self.expect_reasoning_content_buffer}) as JSON: {e}") print(f"Failed to interpret reasoning content ({self.expect_reasoning_content_buffer}) as JSON: {e}")
return None
except demjson.JSONDecodeError as e:
print(f"Failed to interpret reasoning content ({self.expect_reasoning_content_buffer}) as JSON: {e}")
return None return None
# Else, # Else,
# return None # return None