mirror of
https://github.com/cpacker/MemGPT.git
synced 2025-06-03 04:30:22 +00:00

Co-authored-by: Shubham Naik <shubham.naik10@gmail.com> Co-authored-by: Matthew Zhou <mattzh1314@gmail.com> Co-authored-by: Matt Zhou <mattzhou@Matts-MacBook-Pro.local> Co-authored-by: Shubham Naik <shub@memgpt.ai>
33 lines
877 B
Python
33 lines
877 B
Python
from typing import TYPE_CHECKING, List
|
|
|
|
from fastapi import APIRouter, Depends
|
|
|
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
from letta.schemas.llm_config import LLMConfig
|
|
from letta.server.rest_api.utils import get_letta_server
|
|
|
|
if TYPE_CHECKING:
|
|
from letta.server.server import SyncServer
|
|
|
|
router = APIRouter(prefix="/models", tags=["models", "llms"])
|
|
|
|
|
|
@router.get("/", response_model=List[LLMConfig], operation_id="list_models")
|
|
def list_llm_backends(
|
|
server: "SyncServer" = Depends(get_letta_server),
|
|
):
|
|
|
|
models = server.list_llm_models()
|
|
print(models)
|
|
return models
|
|
|
|
|
|
@router.get("/embedding", response_model=List[EmbeddingConfig], operation_id="list_embedding_models")
|
|
def list_embedding_backends(
|
|
server: "SyncServer" = Depends(get_letta_server),
|
|
):
|
|
|
|
models = server.list_embedding_models()
|
|
print(models)
|
|
return models
|