mirror of
https://github.com/cpacker/MemGPT.git
synced 2025-06-03 04:30:22 +00:00
fix: misc fixes (bad link to old docs, composio print statement, context window selection) (#1992)
This commit is contained in:
parent
b990e8b3af
commit
b529588907
@ -10,7 +10,7 @@ import letta.utils as utils
|
|||||||
from letta import create_client
|
from letta import create_client
|
||||||
from letta.agent import Agent, save_agent
|
from letta.agent import Agent, save_agent
|
||||||
from letta.config import LettaConfig
|
from letta.config import LettaConfig
|
||||||
from letta.constants import CLI_WARNING_PREFIX, LETTA_DIR
|
from letta.constants import CLI_WARNING_PREFIX, LETTA_DIR, MIN_CONTEXT_WINDOW
|
||||||
from letta.local_llm.constants import ASSISTANT_MESSAGE_CLI_SYMBOL
|
from letta.local_llm.constants import ASSISTANT_MESSAGE_CLI_SYMBOL
|
||||||
from letta.log import get_logger
|
from letta.log import get_logger
|
||||||
from letta.metadata import MetadataStore
|
from letta.metadata import MetadataStore
|
||||||
@ -244,6 +244,19 @@ def run(
|
|||||||
llm_model_name = questionary.select("Select LLM model:", choices=llm_choices).ask().model
|
llm_model_name = questionary.select("Select LLM model:", choices=llm_choices).ask().model
|
||||||
llm_config = [llm_config for llm_config in llm_configs if llm_config.model == llm_model_name][0]
|
llm_config = [llm_config for llm_config in llm_configs if llm_config.model == llm_model_name][0]
|
||||||
|
|
||||||
|
# option to override context window
|
||||||
|
if llm_config.context_window is not None:
|
||||||
|
context_window_validator = lambda x: x.isdigit() and int(x) > MIN_CONTEXT_WINDOW and int(x) <= llm_config.context_window
|
||||||
|
context_window_input = questionary.text(
|
||||||
|
"Select LLM context window limit (hit enter for default):",
|
||||||
|
default=str(llm_config.context_window),
|
||||||
|
validate=context_window_validator,
|
||||||
|
).ask()
|
||||||
|
if context_window_input is not None:
|
||||||
|
llm_config.context_window = int(context_window_input)
|
||||||
|
else:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
# choose form list of embedding configs
|
# choose form list of embedding configs
|
||||||
embedding_configs = client.list_embedding_configs()
|
embedding_configs = client.list_embedding_configs()
|
||||||
embedding_options = [embedding_config.embedding_model for embedding_config in embedding_configs]
|
embedding_options = [embedding_config.embedding_model for embedding_config in embedding_configs]
|
||||||
|
@ -18,6 +18,9 @@ IN_CONTEXT_MEMORY_KEYWORD = "CORE_MEMORY"
|
|||||||
# OpenAI error message: Invalid 'messages[1].tool_calls[0].id': string too long. Expected a string with maximum length 29, but got a string with length 36 instead.
|
# OpenAI error message: Invalid 'messages[1].tool_calls[0].id': string too long. Expected a string with maximum length 29, but got a string with length 36 instead.
|
||||||
TOOL_CALL_ID_MAX_LEN = 29
|
TOOL_CALL_ID_MAX_LEN = 29
|
||||||
|
|
||||||
|
# minimum context window size
|
||||||
|
MIN_CONTEXT_WINDOW = 4000
|
||||||
|
|
||||||
# embeddings
|
# embeddings
|
||||||
MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
|
MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
|
||||||
|
|
||||||
|
@ -26,6 +26,9 @@ from letta.streaming_interface import AgentRefreshStreamingInterface
|
|||||||
|
|
||||||
# interface = interface()
|
# interface = interface()
|
||||||
|
|
||||||
|
# disable composio print on exit
|
||||||
|
os.environ["COMPOSIO_DISABLE_VERSION_CHECK"] = "true"
|
||||||
|
|
||||||
app = typer.Typer(pretty_exceptions_enable=False)
|
app = typer.Typer(pretty_exceptions_enable=False)
|
||||||
app.command(name="run")(run)
|
app.command(name="run")(run)
|
||||||
app.command(name="version")(version)
|
app.command(name="version")(version)
|
||||||
|
2
main.py
2
main.py
@ -1,6 +1,6 @@
|
|||||||
import typer
|
import typer
|
||||||
|
|
||||||
typer.secho(
|
typer.secho(
|
||||||
"Command `python main.py` no longer supported. Please run `letta run`. See https://letta.readme.io/docs/quickstart.",
|
"Command `python main.py` no longer supported. Please run `letta run`. See https://docs.letta.com for more info.",
|
||||||
fg=typer.colors.YELLOW,
|
fg=typer.colors.YELLOW,
|
||||||
)
|
)
|
||||||
|
@ -49,6 +49,13 @@ def test_letta_run_create_new_agent(swap_letta_config):
|
|||||||
except (pexpect.TIMEOUT, pexpect.EOF):
|
except (pexpect.TIMEOUT, pexpect.EOF):
|
||||||
print("[WARNING] LLM model selection step was skipped.")
|
print("[WARNING] LLM model selection step was skipped.")
|
||||||
|
|
||||||
|
# Optional: Context window selection
|
||||||
|
try:
|
||||||
|
child.expect("Select LLM context window limit", timeout=20)
|
||||||
|
child.sendline("")
|
||||||
|
except (pexpect.TIMEOUT, pexpect.EOF):
|
||||||
|
print("[WARNING] Context window selection step was skipped.")
|
||||||
|
|
||||||
# Optional: Embedding model selection
|
# Optional: Embedding model selection
|
||||||
try:
|
try:
|
||||||
child.expect("Select embedding model:", timeout=20)
|
child.expect("Select embedding model:", timeout=20)
|
||||||
@ -63,6 +70,7 @@ def test_letta_run_create_new_agent(swap_letta_config):
|
|||||||
child.expect("Enter your message:", timeout=60)
|
child.expect("Enter your message:", timeout=60)
|
||||||
# Capture the output up to this point
|
# Capture the output up to this point
|
||||||
full_output = child.before
|
full_output = child.before
|
||||||
|
assert full_output is not None, "No output was captured."
|
||||||
# Count occurrences of inner thoughts
|
# Count occurrences of inner thoughts
|
||||||
cloud_emoji_count = full_output.count(INNER_THOUGHTS_CLI_SYMBOL)
|
cloud_emoji_count = full_output.count(INNER_THOUGHTS_CLI_SYMBOL)
|
||||||
assert cloud_emoji_count == 1, f"It appears that there are multiple instances of inner thought outputted."
|
assert cloud_emoji_count == 1, f"It appears that there are multiple instances of inner thought outputted."
|
||||||
|
Loading…
Reference in New Issue
Block a user