mirror of
https://github.com/cpacker/MemGPT.git
synced 2025-06-03 04:30:22 +00:00
revert: Revert "feat: fix streaming put_inner_thoughts_in_kwargs
" (#1912)
This commit is contained in:
parent
9e83dbc27f
commit
eb97f157ff
@ -1,7 +1,6 @@
|
||||
import copy
|
||||
import json
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
from typing import Any, List, Union
|
||||
|
||||
import requests
|
||||
@ -11,30 +10,6 @@ from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
|
||||
from letta.utils import json_dumps, printd
|
||||
|
||||
|
||||
def convert_to_structured_output(openai_function: dict) -> dict:
|
||||
"""Convert function call objects to structured output objects
|
||||
|
||||
See: https://platform.openai.com/docs/guides/structured-outputs/supported-schemas
|
||||
"""
|
||||
structured_output = {
|
||||
"name": openai_function["name"],
|
||||
"description": openai_function["description"],
|
||||
"strict": True,
|
||||
"parameters": {"type": "object", "properties": {}, "additionalProperties": False, "required": []},
|
||||
}
|
||||
|
||||
for param, details in openai_function["parameters"]["properties"].items():
|
||||
structured_output["parameters"]["properties"][param] = {"type": details["type"], "description": details["description"]}
|
||||
|
||||
if "enum" in details:
|
||||
structured_output["parameters"]["properties"][param]["enum"] = details["enum"]
|
||||
|
||||
# Add all properties to required list
|
||||
structured_output["parameters"]["required"] = list(structured_output["parameters"]["properties"].keys())
|
||||
|
||||
return structured_output
|
||||
|
||||
|
||||
def make_post_request(url: str, headers: dict[str, str], data: dict[str, Any]) -> dict[str, Any]:
|
||||
printd(f"Sending request to {url}")
|
||||
try:
|
||||
@ -103,34 +78,33 @@ def add_inner_thoughts_to_functions(
|
||||
inner_thoughts_key: str,
|
||||
inner_thoughts_description: str,
|
||||
inner_thoughts_required: bool = True,
|
||||
# inner_thoughts_to_front: bool = True, TODO support sorting somewhere, probably in the to_dict?
|
||||
) -> List[dict]:
|
||||
"""Add an inner_thoughts kwarg to every function in the provided list, ensuring it's the first parameter"""
|
||||
"""Add an inner_thoughts kwarg to every function in the provided list"""
|
||||
# return copies
|
||||
new_functions = []
|
||||
|
||||
# functions is a list of dicts in the OpenAI schema (https://platform.openai.com/docs/api-reference/chat/create)
|
||||
for function_object in functions:
|
||||
function_params = function_object["parameters"]["properties"]
|
||||
required_params = list(function_object["parameters"]["required"])
|
||||
|
||||
# if the inner thoughts arg doesn't exist, add it
|
||||
if inner_thoughts_key not in function_params:
|
||||
function_params[inner_thoughts_key] = {
|
||||
"type": "string",
|
||||
"description": inner_thoughts_description,
|
||||
}
|
||||
|
||||
# make sure it's tagged as required
|
||||
new_function_object = copy.deepcopy(function_object)
|
||||
|
||||
# Create a new OrderedDict with inner_thoughts as the first item
|
||||
new_properties = OrderedDict()
|
||||
new_properties[inner_thoughts_key] = {
|
||||
"type": "string",
|
||||
"description": inner_thoughts_description,
|
||||
}
|
||||
|
||||
# Add the rest of the properties
|
||||
new_properties.update(function_object["parameters"]["properties"])
|
||||
|
||||
# Cast OrderedDict back to a regular dict
|
||||
new_function_object["parameters"]["properties"] = dict(new_properties)
|
||||
|
||||
# Update required parameters if necessary
|
||||
if inner_thoughts_required:
|
||||
required_params = new_function_object["parameters"].get("required", [])
|
||||
if inner_thoughts_key not in required_params:
|
||||
required_params.insert(0, inner_thoughts_key)
|
||||
new_function_object["parameters"]["required"] = required_params
|
||||
if inner_thoughts_required and inner_thoughts_key not in required_params:
|
||||
required_params.append(inner_thoughts_key)
|
||||
new_function_object["parameters"]["required"] = required_params
|
||||
|
||||
new_functions.append(new_function_object)
|
||||
|
||||
# return a list of copies
|
||||
return new_functions
|
||||
|
||||
|
||||
|
@ -140,7 +140,6 @@ def create(
|
||||
raise ValueError(f"OpenAI key is missing from letta config file")
|
||||
|
||||
data = build_openai_chat_completions_request(llm_config, messages, user_id, functions, function_call, use_tool_naming, max_tokens)
|
||||
print(f"Data.tools: {data.tools}")
|
||||
|
||||
if stream: # Client requested token streaming
|
||||
data.stream = True
|
||||
|
@ -9,11 +9,7 @@ from httpx_sse._exceptions import SSEError
|
||||
|
||||
from letta.constants import OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING
|
||||
from letta.errors import LLMError
|
||||
from letta.llm_api.helpers import (
|
||||
add_inner_thoughts_to_functions,
|
||||
convert_to_structured_output,
|
||||
make_post_request,
|
||||
)
|
||||
from letta.llm_api.helpers import add_inner_thoughts_to_functions, make_post_request
|
||||
from letta.local_llm.constants import (
|
||||
INNER_THOUGHTS_KWARG,
|
||||
INNER_THOUGHTS_KWARG_DESCRIPTION,
|
||||
@ -116,7 +112,7 @@ def build_openai_chat_completions_request(
|
||||
use_tool_naming: bool,
|
||||
max_tokens: Optional[int],
|
||||
) -> ChatCompletionRequest:
|
||||
if functions and llm_config.put_inner_thoughts_in_kwargs:
|
||||
if llm_config.put_inner_thoughts_in_kwargs:
|
||||
functions = add_inner_thoughts_to_functions(
|
||||
functions=functions,
|
||||
inner_thoughts_key=INNER_THOUGHTS_KWARG,
|
||||
@ -158,8 +154,8 @@ def build_openai_chat_completions_request(
|
||||
)
|
||||
# https://platform.openai.com/docs/guides/text-generation/json-mode
|
||||
# only supported by gpt-4o, gpt-4-turbo, or gpt-3.5-turbo
|
||||
# if "gpt-4o" in llm_config.model or "gpt-4-turbo" in llm_config.model or "gpt-3.5-turbo" in llm_config.model:
|
||||
# data.response_format = {"type": "json_object"}
|
||||
if "gpt-4o" in llm_config.model or "gpt-4-turbo" in llm_config.model or "gpt-3.5-turbo" in llm_config.model:
|
||||
data.response_format = {"type": "json_object"}
|
||||
|
||||
if "inference.memgpt.ai" in llm_config.model_endpoint:
|
||||
# override user id for inference.memgpt.ai
|
||||
@ -465,13 +461,6 @@ def openai_chat_completions_request_stream(
|
||||
data.pop("tools")
|
||||
data.pop("tool_choice", None) # extra safe, should exist always (default="auto")
|
||||
|
||||
if "tools" in data:
|
||||
for tool in data["tools"]:
|
||||
# tool["strict"] = True
|
||||
tool["function"] = convert_to_structured_output(tool["function"])
|
||||
|
||||
print(f"\n\n\n\nData[tools]: {json.dumps(data['tools'], indent=2)}")
|
||||
|
||||
printd(f"Sending request to {url}")
|
||||
try:
|
||||
return _sse_post(url=url, data=data, headers=headers)
|
||||
|
Loading…
Reference in New Issue
Block a user