mirror of
https://github.com/cpacker/MemGPT.git
synced 2025-06-03 04:30:22 +00:00
fix: revert conditional tool return message creation (#1930)
Co-authored-by: Matt Zhou <mattzh1314@gmail.com>
This commit is contained in:
parent
cc315e8d03
commit
66659953c9
@ -210,20 +210,20 @@ def create_letta_messages_from_llm_response(
|
||||
|
||||
# TODO: Use ToolReturnContent instead of TextContent
|
||||
# TODO: This helps preserve ordering
|
||||
if function_response:
|
||||
tool_message = Message(
|
||||
role=MessageRole.tool,
|
||||
content=[TextContent(text=package_function_response(function_call_success, function_response))],
|
||||
organization_id=actor.organization_id,
|
||||
agent_id=agent_id,
|
||||
model=model,
|
||||
tool_calls=[],
|
||||
tool_call_id=tool_call_id,
|
||||
created_at=get_utc_time(),
|
||||
)
|
||||
if pre_computed_tool_message_id:
|
||||
tool_message.id = pre_computed_tool_message_id
|
||||
messages.append(tool_message)
|
||||
tool_message = Message(
|
||||
role=MessageRole.tool,
|
||||
content=[TextContent(text=package_function_response(function_call_success, function_response))],
|
||||
organization_id=actor.organization_id,
|
||||
agent_id=agent_id,
|
||||
model=model,
|
||||
tool_calls=[],
|
||||
tool_call_id=tool_call_id,
|
||||
created_at=get_utc_time(),
|
||||
name=function_name,
|
||||
)
|
||||
if pre_computed_tool_message_id:
|
||||
tool_message.id = pre_computed_tool_message_id
|
||||
messages.append(tool_message)
|
||||
|
||||
if add_heartbeat_request_system_message:
|
||||
heartbeat_system_message = create_heartbeat_system_message(
|
||||
|
@ -196,13 +196,15 @@ async def test_multiple_messages(disable_e2b_api_key, client, agent, endpoint):
|
||||
stream = await async_client.chat.completions.create(**request.model_dump(exclude_none=True))
|
||||
async with stream:
|
||||
async for chunk in stream:
|
||||
print(chunk)
|
||||
if chunk.choices and chunk.choices[0].delta.content:
|
||||
print(chunk.choices[0].delta.content)
|
||||
print("============================================")
|
||||
request = _get_chat_request("What are you up to?")
|
||||
stream = await async_client.chat.completions.create(**request.model_dump(exclude_none=True))
|
||||
async with stream:
|
||||
async for chunk in stream:
|
||||
print(chunk)
|
||||
if chunk.choices and chunk.choices[0].delta.content:
|
||||
print(chunk.choices[0].delta.content)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
Loading…
Reference in New Issue
Block a user