mirror of
https://github.com/cpacker/MemGPT.git
synced 2025-06-03 04:30:22 +00:00
39 lines
1.5 KiB
Python
39 lines
1.5 KiB
Python
from typing import List, Optional
|
|
|
|
from pydantic import BaseModel, Field, HttpUrl
|
|
|
|
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
|
|
from letta.schemas.message import MessageCreate
|
|
|
|
|
|
class LettaRequest(BaseModel):
|
|
messages: List[MessageCreate] = Field(..., description="The messages to be sent to the agent.")
|
|
use_assistant_message: bool = Field(
|
|
default=True,
|
|
description="Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.",
|
|
)
|
|
assistant_message_tool_name: str = Field(
|
|
default=DEFAULT_MESSAGE_TOOL,
|
|
description="The name of the designated message tool.",
|
|
)
|
|
assistant_message_tool_kwarg: str = Field(
|
|
default=DEFAULT_MESSAGE_TOOL_KWARG,
|
|
description="The name of the message argument in the designated message tool.",
|
|
)
|
|
|
|
|
|
class LettaStreamingRequest(LettaRequest):
|
|
stream_tokens: bool = Field(
|
|
default=False,
|
|
description="Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).",
|
|
)
|
|
|
|
|
|
class LettaBatchRequest(LettaRequest):
|
|
agent_id: str = Field(..., description="The ID of the agent to send this batch request for")
|
|
|
|
|
|
class CreateBatch(BaseModel):
|
|
requests: List[LettaBatchRequest] = Field(..., description="List of requests to be processed in batch.")
|
|
callback_url: Optional[HttpUrl] = Field(None, description="Optional URL to call via POST when the batch completes.")
|