letta-nightly 0.7.2.dev20250423222439__tar.gz → 0.7.3.dev20250424054013__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/PKG-INFO +1 -1
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/__init__.py +1 -1
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agent.py +2 -1
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/letta_agent.py +2 -1
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/letta_agent_batch.py +8 -3
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/voice_agent.py +2 -2
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/client/client.py +3 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/functions.py +2 -1
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/schema_generator.py +5 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/anthropic_client.py +5 -4
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/google_ai_client.py +9 -43
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/google_vertex_client.py +6 -5
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/llm_client.py +8 -14
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/llm_client_base.py +17 -16
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/openai_client.py +14 -13
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/memory.py +2 -1
- letta_nightly-0.7.3.dev20250424054013/letta/personas/examples/sleeptime_memory_persona.txt +5 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/enums.py +3 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/server.py +1 -6
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/agent_manager.py +1 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/pyproject.toml +1 -1
- letta_nightly-0.7.2.dev20250423222439/letta/personas/examples/offline_memory_persona.txt +0 -4
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/LICENSE +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/README.md +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/__main__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/base_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/ephemeral_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/ephemeral_memory_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agents/helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/benchmark/benchmark.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/benchmark/constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/cli/cli.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/cli/cli_config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/cli/cli_load.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/client/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/client/streaming.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/client/utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/data_sources/connectors.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/data_sources/connectors_helper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/embeddings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/errors.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/ast_parsers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/function_sets/base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/function_sets/extras.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/function_sets/multi_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/base_client.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/exceptions.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/sse_client.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/stdio_client.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/functions/mcp_client/types.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/groups/dynamic_multi_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/groups/helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/groups/round_robin_multi_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/groups/sleeptime_multi_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/groups/supervisor_multi_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/composio_helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/converters.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/datetime_helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/json_helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/message_helper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/tool_execution_helper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/helpers/tool_rule_solver.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/humans/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/humans/examples/basic.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/humans/examples/cs_phd.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/interfaces/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/interfaces/anthropic_streaming_interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/interfaces/openai_chat_completions_streaming_interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/interfaces/utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/jobs/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/jobs/helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/jobs/llm_batch_job_polling.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/jobs/scheduler.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/jobs/types.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/anthropic.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/aws_bedrock.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/azure_openai.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/azure_openai_constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/cohere.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/deepseek.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/google_constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/helpers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/llm_api_tools.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/mistral.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/llm_api/openai.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/README.md +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/chat_completion_proxy.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/function_parser.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/grammars/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/grammars/gbnf_grammar_generator.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/grammars/json.gbnf +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/json_parser.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/koboldcpp/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/koboldcpp/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llamacpp/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llamacpp/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/airoboros.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/chatml.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/dolphin.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/llama3.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/llm_chat_completion_wrappers/zephyr.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/lmstudio/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/lmstudio/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/ollama/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/ollama/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/settings/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/settings/deterministic_mirostat.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/settings/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/settings/simple.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/vllm/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/webui/api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/webui/legacy_api.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/webui/legacy_settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/local_llm/webui/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/log.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/main.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/openai_backcompat/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/openai_backcompat/openai_object.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/__all__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/agents_tags.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/block.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/block_history.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/blocks_agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/custom_columns.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/enums.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/errors.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/file.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/group.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/groups_agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/groups_blocks.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/identities_agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/identities_blocks.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/identity.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/job.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/job_messages.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/llm_batch_items.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/llm_batch_job.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/message.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/mixins.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/organization.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/passage.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/provider.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/sandbox_config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/source.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/sources_agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/sqlalchemy_base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/sqlite_functions.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/step.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/tool.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/tools_agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/orm/user.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/anna_pa.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/google_search_persona.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/memgpt_doc.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/memgpt_starter.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/o1_persona.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/sam.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/sam_pov.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/sam_simple_pov_gpt35.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/sleeptime_doc_persona.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/personas/examples/sqldb/test.db +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/gpt_summarize.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/gpt_system.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_base.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_chat.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_chat_compressed.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_chat_fstring.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_convo_only.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_doc.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_gpt35_extralong.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_intuitive_knowledge.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_memory_only.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_modified_chat.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_modified_o1.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_offline_memory.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_offline_memory_chat.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/memgpt_sleeptime_chat.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/sleeptime.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/prompts/system/sleeptime_doc_ingest.txt +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/pytest.ini +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/block.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/embedding_config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/embedding_config_overrides.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/environment_variables.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/file.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/group.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/health.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/identity.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/job.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/letta_base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/letta_message.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/letta_message_content.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/letta_request.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/letta_response.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/llm_batch_job.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/llm_config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/llm_config_overrides.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/memory.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/message.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/openai/chat_completion_request.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/openai/chat_completion_response.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/openai/chat_completions.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/openai/embedding_response.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/openai/openai.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/organization.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/passage.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/providers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/response_format.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/run.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/sandbox_config.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/source.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/step.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/tool.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/tool_execution_result.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/tool_rule.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/usage.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/schemas/user.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_agent.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_agent_environment_variable.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_block.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_custom_fields.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_message.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_tag.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/marshmallow_tool.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/serialize_schemas/pydantic_agent_schema.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/constants.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/db.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/generate_openapi_schema.sh +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/app.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/auth/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/auth/index.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/auth_token.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/chat_completions_interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/optimistic_json_parser.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/agents.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/blocks.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/groups.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/health.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/identities.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/jobs.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/llms.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/messages.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/organizations.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/providers.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/runs.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/sandbox_configs.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/sources.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/steps.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/tags.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/tools.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/users.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/routers/v1/voice.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/static_files.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/rest_api/utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/startup.sh +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/static_files/assets/index-048c9598.js +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/static_files/assets/index-0e31b727.css +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/static_files/favicon.ico +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/static_files/index.html +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/static_files/memgpt_logo_transparent.png +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/ws_api/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/ws_api/example_client.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/ws_api/interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/ws_api/protocol.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/server/ws_api/server.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/block_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/group_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/helpers/agent_manager_helper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/helpers/tool_execution_helper.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/identity_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/job_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/llm_batch_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/message_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/organization_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/passage_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/per_agent_lock_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/provider_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/sandbox_config_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/source_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/step_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/summarizer/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/summarizer/enums.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/summarizer/summarizer.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_executor/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_executor/tool_execution_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_executor/tool_execution_sandbox.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_executor/tool_executor.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_sandbox/__init__.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_sandbox/base.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_sandbox/e2b_sandbox.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/tool_sandbox/local_sandbox.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/services/user_manager.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/settings.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/streaming_interface.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/streaming_utils.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/system.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/tracing.py +0 -0
- {letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/utils.py +0 -0
{letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/agent.py
RENAMED
@@ -332,13 +332,14 @@ class Agent(BaseAgent):
|
|
332
332
|
log_telemetry(self.logger, "_get_ai_reply create start")
|
333
333
|
# New LLM client flow
|
334
334
|
llm_client = LLMClient.create(
|
335
|
-
|
335
|
+
provider=self.agent_state.llm_config.model_endpoint_type,
|
336
336
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
337
337
|
)
|
338
338
|
|
339
339
|
if llm_client and not stream:
|
340
340
|
response = llm_client.send_llm_request(
|
341
341
|
messages=message_sequence,
|
342
|
+
llm_config=self.agent_state.llm_config,
|
342
343
|
tools=allowed_functions,
|
343
344
|
stream=stream,
|
344
345
|
force_tool_call=force_tool_call,
|
@@ -66,7 +66,7 @@ class LettaAgent(BaseAgent):
|
|
66
66
|
)
|
67
67
|
tool_rules_solver = ToolRulesSolver(agent_state.tool_rules)
|
68
68
|
llm_client = LLMClient.create(
|
69
|
-
|
69
|
+
provider=agent_state.llm_config.model_endpoint_type,
|
70
70
|
put_inner_thoughts_first=True,
|
71
71
|
)
|
72
72
|
for step in range(max_steps):
|
@@ -182,6 +182,7 @@ class LettaAgent(BaseAgent):
|
|
182
182
|
|
183
183
|
response = await llm_client.send_llm_request_async(
|
184
184
|
messages=in_context_messages,
|
185
|
+
llm_config=agent_state.llm_config,
|
185
186
|
tools=allowed_tools,
|
186
187
|
force_tool_call=force_tool_call,
|
187
188
|
stream=stream,
|
@@ -156,7 +156,7 @@ class LettaAgentBatch:
|
|
156
156
|
|
157
157
|
log_event(name="init_llm_client")
|
158
158
|
llm_client = LLMClient.create(
|
159
|
-
|
159
|
+
provider=agent_states[0].llm_config.model_endpoint_type,
|
160
160
|
put_inner_thoughts_first=True,
|
161
161
|
)
|
162
162
|
agent_llm_config_mapping = {s.id: s.llm_config for s in agent_states}
|
@@ -272,9 +272,14 @@ class LettaAgentBatch:
|
|
272
272
|
request_status_updates.append(RequestStatusUpdateInfo(llm_batch_id=llm_batch_id, agent_id=aid, request_status=status))
|
273
273
|
|
274
274
|
# translate provider‑specific response → OpenAI‑style tool call (unchanged)
|
275
|
-
llm_client = LLMClient.create(
|
275
|
+
llm_client = LLMClient.create(
|
276
|
+
provider=item.llm_config.model_endpoint_type,
|
277
|
+
put_inner_thoughts_first=True,
|
278
|
+
)
|
276
279
|
tool_call = (
|
277
|
-
llm_client.convert_response_to_chat_completion(
|
280
|
+
llm_client.convert_response_to_chat_completion(
|
281
|
+
response_data=pr.message.model_dump(), input_messages=[], llm_config=item.llm_config
|
282
|
+
)
|
278
283
|
.choices[0]
|
279
284
|
.message.tool_calls[0]
|
280
285
|
)
|
@@ -90,7 +90,7 @@ class VoiceAgent(BaseAgent):
|
|
90
90
|
# )
|
91
91
|
self.message_buffer_limit = message_buffer_limit
|
92
92
|
# self.message_buffer_min = message_buffer_min
|
93
|
-
self.
|
93
|
+
self.sleeptime_memory_agent = EphemeralMemoryAgent(
|
94
94
|
agent_id=agent_id, openai_client=openai_client, message_manager=message_manager, agent_manager=agent_manager, actor=actor
|
95
95
|
)
|
96
96
|
|
@@ -372,7 +372,7 @@ class VoiceAgent(BaseAgent):
|
|
372
372
|
return f"Failed to call tool. Error: {e}", False
|
373
373
|
|
374
374
|
async def _recall_memory(self, query, agent_state: AgentState) -> None:
|
375
|
-
results = await self.
|
375
|
+
results = await self.sleeptime_memory_agent.step([MessageCreate(role="user", content=[TextContent(text=query)])])
|
376
376
|
target_block = next(b for b in agent_state.memory.blocks if b.label == self.summary_block_label)
|
377
377
|
self.block_manager.update_block(
|
378
378
|
block_id=target_block.id, block_update=BlockUpdate(value=results[0].content[0].text), actor=self.actor
|
@@ -85,6 +85,7 @@ class AbstractClient(object):
|
|
85
85
|
description: Optional[str] = None,
|
86
86
|
tags: Optional[List[str]] = None,
|
87
87
|
message_buffer_autoclear: bool = False,
|
88
|
+
response_format: Optional[ResponseFormatUnion] = None,
|
88
89
|
) -> AgentState:
|
89
90
|
raise NotImplementedError
|
90
91
|
|
@@ -2352,6 +2353,7 @@ class LocalClient(AbstractClient):
|
|
2352
2353
|
initial_message_sequence: Optional[List[Message]] = None,
|
2353
2354
|
tags: Optional[List[str]] = None,
|
2354
2355
|
message_buffer_autoclear: bool = False,
|
2356
|
+
response_format: Optional[ResponseFormatUnion] = None,
|
2355
2357
|
) -> AgentState:
|
2356
2358
|
"""Create an agent
|
2357
2359
|
|
@@ -2405,6 +2407,7 @@ class LocalClient(AbstractClient):
|
|
2405
2407
|
"initial_message_sequence": initial_message_sequence,
|
2406
2408
|
"tags": tags,
|
2407
2409
|
"message_buffer_autoclear": message_buffer_autoclear,
|
2410
|
+
"response_format": response_format,
|
2408
2411
|
}
|
2409
2412
|
|
2410
2413
|
# Only add name if it's not None
|
@@ -2,7 +2,7 @@ import importlib
|
|
2
2
|
import inspect
|
3
3
|
from textwrap import dedent # remove indentation
|
4
4
|
from types import ModuleType
|
5
|
-
from typing import Dict, List, Optional
|
5
|
+
from typing import Dict, List, Literal, Optional
|
6
6
|
|
7
7
|
from letta.errors import LettaToolCreateError
|
8
8
|
from letta.functions.schema_generator import generate_schema
|
@@ -20,6 +20,7 @@ def derive_openai_json_schema(source_code: str, name: Optional[str] = None) -> d
|
|
20
20
|
"Optional": Optional,
|
21
21
|
"List": List,
|
22
22
|
"Dict": Dict,
|
23
|
+
"Literal": Literal,
|
23
24
|
# To support Pydantic models
|
24
25
|
# "BaseModel": BaseModel,
|
25
26
|
# "Field": Field,
|
@@ -5,6 +5,7 @@ from typing import Any, Dict, List, Optional, Type, Union, get_args, get_origin
|
|
5
5
|
from composio.client.collections import ActionParametersModel
|
6
6
|
from docstring_parser import parse
|
7
7
|
from pydantic import BaseModel
|
8
|
+
from typing_extensions import Literal
|
8
9
|
|
9
10
|
from letta.functions.mcp_client.types import MCPTool
|
10
11
|
|
@@ -70,6 +71,10 @@ def type_to_json_schema_type(py_type) -> dict:
|
|
70
71
|
"items": type_to_json_schema_type(args[0]),
|
71
72
|
}
|
72
73
|
|
74
|
+
# Handle literals
|
75
|
+
if get_origin(py_type) is Literal:
|
76
|
+
return {"type": "string", "enum": get_args(py_type)}
|
77
|
+
|
73
78
|
# Handle object types
|
74
79
|
if py_type == dict or origin in (dict, Dict):
|
75
80
|
args = get_args(py_type)
|
@@ -43,18 +43,18 @@ logger = get_logger(__name__)
|
|
43
43
|
|
44
44
|
class AnthropicClient(LLMClientBase):
|
45
45
|
|
46
|
-
def request(self, request_data: dict) -> dict:
|
46
|
+
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
47
47
|
client = self._get_anthropic_client(async_client=False)
|
48
48
|
response = client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
|
49
49
|
return response.model_dump()
|
50
50
|
|
51
|
-
async def request_async(self, request_data: dict) -> dict:
|
51
|
+
async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
52
52
|
client = self._get_anthropic_client(async_client=True)
|
53
53
|
response = await client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
|
54
54
|
return response.model_dump()
|
55
55
|
|
56
56
|
@trace_method
|
57
|
-
async def stream_async(self, request_data: dict) -> AsyncStream[BetaRawMessageStreamEvent]:
|
57
|
+
async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[BetaRawMessageStreamEvent]:
|
58
58
|
client = self._get_anthropic_client(async_client=True)
|
59
59
|
request_data["stream"] = True
|
60
60
|
return await client.beta.messages.create(**request_data, betas=["tools-2024-04-04"])
|
@@ -310,6 +310,7 @@ class AnthropicClient(LLMClientBase):
|
|
310
310
|
self,
|
311
311
|
response_data: dict,
|
312
312
|
input_messages: List[PydanticMessage],
|
313
|
+
llm_config: LLMConfig,
|
313
314
|
) -> ChatCompletionResponse:
|
314
315
|
"""
|
315
316
|
Example response from Claude 3:
|
@@ -411,7 +412,7 @@ class AnthropicClient(LLMClientBase):
|
|
411
412
|
total_tokens=prompt_tokens + completion_tokens,
|
412
413
|
),
|
413
414
|
)
|
414
|
-
if
|
415
|
+
if llm_config.put_inner_thoughts_in_kwargs:
|
415
416
|
chat_completion_response = unpack_all_inner_thoughts_from_kwargs(
|
416
417
|
response=chat_completion_response, inner_thoughts_key=INNER_THOUGHTS_KWARG
|
417
418
|
)
|
@@ -25,15 +25,15 @@ logger = get_logger(__name__)
|
|
25
25
|
|
26
26
|
class GoogleAIClient(LLMClientBase):
|
27
27
|
|
28
|
-
def request(self, request_data: dict) -> dict:
|
28
|
+
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
29
29
|
"""
|
30
30
|
Performs underlying request to llm and returns raw response.
|
31
31
|
"""
|
32
32
|
# print("[google_ai request]", json.dumps(request_data, indent=2))
|
33
33
|
|
34
34
|
url, headers = get_gemini_endpoint_and_headers(
|
35
|
-
base_url=str(
|
36
|
-
model=
|
35
|
+
base_url=str(llm_config.model_endpoint),
|
36
|
+
model=llm_config.model,
|
37
37
|
api_key=str(model_settings.gemini_api_key),
|
38
38
|
key_in_header=True,
|
39
39
|
generate_content=True,
|
@@ -55,7 +55,7 @@ class GoogleAIClient(LLMClientBase):
|
|
55
55
|
tool_objs = [Tool(**t) for t in tools]
|
56
56
|
tool_names = [t.function.name for t in tool_objs]
|
57
57
|
# Convert to the exact payload style Google expects
|
58
|
-
tools = self.convert_tools_to_google_ai_format(tool_objs)
|
58
|
+
tools = self.convert_tools_to_google_ai_format(tool_objs, llm_config)
|
59
59
|
else:
|
60
60
|
tool_names = []
|
61
61
|
|
@@ -88,6 +88,7 @@ class GoogleAIClient(LLMClientBase):
|
|
88
88
|
self,
|
89
89
|
response_data: dict,
|
90
90
|
input_messages: List[PydanticMessage],
|
91
|
+
llm_config: LLMConfig,
|
91
92
|
) -> ChatCompletionResponse:
|
92
93
|
"""
|
93
94
|
Converts custom response format from llm client into an OpenAI
|
@@ -150,7 +151,7 @@ class GoogleAIClient(LLMClientBase):
|
|
150
151
|
assert isinstance(function_args, dict), function_args
|
151
152
|
|
152
153
|
# NOTE: this also involves stripping the inner monologue out of the function
|
153
|
-
if
|
154
|
+
if llm_config.put_inner_thoughts_in_kwargs:
|
154
155
|
from letta.local_llm.constants import INNER_THOUGHTS_KWARG
|
155
156
|
|
156
157
|
assert INNER_THOUGHTS_KWARG in function_args, f"Couldn't find inner thoughts in function args:\n{function_call}"
|
@@ -259,49 +260,14 @@ class GoogleAIClient(LLMClientBase):
|
|
259
260
|
return ChatCompletionResponse(
|
260
261
|
id=response_id,
|
261
262
|
choices=choices,
|
262
|
-
model=
|
263
|
+
model=llm_config.model, # NOTE: Google API doesn't pass back model in the response
|
263
264
|
created=get_utc_time_int(),
|
264
265
|
usage=usage,
|
265
266
|
)
|
266
267
|
except KeyError as e:
|
267
268
|
raise e
|
268
269
|
|
269
|
-
def
|
270
|
-
"""Recursively clean schema parts to remove unsupported Google AI keywords."""
|
271
|
-
if not isinstance(schema_part, dict):
|
272
|
-
return
|
273
|
-
|
274
|
-
# Per https://ai.google.dev/gemini-api/docs/function-calling?example=meeting#notes_and_limitations
|
275
|
-
# * Only a subset of the OpenAPI schema is supported.
|
276
|
-
# * Supported parameter types in Python are limited.
|
277
|
-
unsupported_keys = ["default", "exclusiveMaximum", "exclusiveMinimum"]
|
278
|
-
keys_to_remove_at_this_level = [key for key in unsupported_keys if key in schema_part]
|
279
|
-
for key_to_remove in keys_to_remove_at_this_level:
|
280
|
-
logger.warning(f"Removing unsupported keyword '{key_to_remove}' from schema part.")
|
281
|
-
del schema_part[key_to_remove]
|
282
|
-
|
283
|
-
if schema_part.get("type") == "string" and "format" in schema_part:
|
284
|
-
allowed_formats = ["enum", "date-time"]
|
285
|
-
if schema_part["format"] not in allowed_formats:
|
286
|
-
logger.warning(f"Removing unsupported format '{schema_part['format']}' for string type. Allowed: {allowed_formats}")
|
287
|
-
del schema_part["format"]
|
288
|
-
|
289
|
-
# Check properties within the current level
|
290
|
-
if "properties" in schema_part and isinstance(schema_part["properties"], dict):
|
291
|
-
for prop_name, prop_schema in schema_part["properties"].items():
|
292
|
-
self._clean_google_ai_schema_properties(prop_schema)
|
293
|
-
|
294
|
-
# Check items within arrays
|
295
|
-
if "items" in schema_part and isinstance(schema_part["items"], dict):
|
296
|
-
self._clean_google_ai_schema_properties(schema_part["items"])
|
297
|
-
|
298
|
-
# Check within anyOf, allOf, oneOf lists
|
299
|
-
for key in ["anyOf", "allOf", "oneOf"]:
|
300
|
-
if key in schema_part and isinstance(schema_part[key], list):
|
301
|
-
for item_schema in schema_part[key]:
|
302
|
-
self._clean_google_ai_schema_properties(item_schema)
|
303
|
-
|
304
|
-
def convert_tools_to_google_ai_format(self, tools: List[Tool]) -> List[dict]:
|
270
|
+
def convert_tools_to_google_ai_format(self, tools: List[Tool], llm_config: LLMConfig) -> List[dict]:
|
305
271
|
"""
|
306
272
|
OpenAI style:
|
307
273
|
"tools": [{
|
@@ -365,7 +331,7 @@ class GoogleAIClient(LLMClientBase):
|
|
365
331
|
self._clean_google_ai_schema_properties(func["parameters"])
|
366
332
|
|
367
333
|
# Add inner thoughts
|
368
|
-
if
|
334
|
+
if llm_config.put_inner_thoughts_in_kwargs:
|
369
335
|
from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_DESCRIPTION
|
370
336
|
|
371
337
|
func["parameters"]["properties"][INNER_THOUGHTS_KWARG] = {
|
@@ -18,7 +18,7 @@ from letta.utils import get_tool_call_id
|
|
18
18
|
|
19
19
|
class GoogleVertexClient(GoogleAIClient):
|
20
20
|
|
21
|
-
def request(self, request_data: dict) -> dict:
|
21
|
+
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
22
22
|
"""
|
23
23
|
Performs underlying request to llm and returns raw response.
|
24
24
|
"""
|
@@ -29,7 +29,7 @@ class GoogleVertexClient(GoogleAIClient):
|
|
29
29
|
http_options={"api_version": "v1"},
|
30
30
|
)
|
31
31
|
response = client.models.generate_content(
|
32
|
-
model=
|
32
|
+
model=llm_config.model,
|
33
33
|
contents=request_data["contents"],
|
34
34
|
config=request_data["config"],
|
35
35
|
)
|
@@ -45,7 +45,7 @@ class GoogleVertexClient(GoogleAIClient):
|
|
45
45
|
"""
|
46
46
|
Constructs a request object in the expected data format for this client.
|
47
47
|
"""
|
48
|
-
request_data = super().build_request_data(messages,
|
48
|
+
request_data = super().build_request_data(messages, llm_config, tools, force_tool_call)
|
49
49
|
request_data["config"] = request_data.pop("generation_config")
|
50
50
|
request_data["config"]["tools"] = request_data.pop("tools")
|
51
51
|
|
@@ -75,6 +75,7 @@ class GoogleVertexClient(GoogleAIClient):
|
|
75
75
|
self,
|
76
76
|
response_data: dict,
|
77
77
|
input_messages: List[PydanticMessage],
|
78
|
+
llm_config: LLMConfig,
|
78
79
|
) -> ChatCompletionResponse:
|
79
80
|
"""
|
80
81
|
Converts custom response format from llm client into an OpenAI
|
@@ -136,7 +137,7 @@ class GoogleVertexClient(GoogleAIClient):
|
|
136
137
|
assert isinstance(function_args, dict), function_args
|
137
138
|
|
138
139
|
# NOTE: this also involves stripping the inner monologue out of the function
|
139
|
-
if
|
140
|
+
if llm_config.put_inner_thoughts_in_kwargs:
|
140
141
|
from letta.local_llm.constants import INNER_THOUGHTS_KWARG
|
141
142
|
|
142
143
|
assert INNER_THOUGHTS_KWARG in function_args, f"Couldn't find inner thoughts in function args:\n{function_call}"
|
@@ -233,7 +234,7 @@ class GoogleVertexClient(GoogleAIClient):
|
|
233
234
|
return ChatCompletionResponse(
|
234
235
|
id=response_id,
|
235
236
|
choices=choices,
|
236
|
-
model=
|
237
|
+
model=llm_config.model, # NOTE: Google API doesn't pass back model in the response
|
237
238
|
created=get_utc_time_int(),
|
238
239
|
usage=usage,
|
239
240
|
)
|
@@ -1,7 +1,7 @@
|
|
1
1
|
from typing import Optional
|
2
2
|
|
3
3
|
from letta.llm_api.llm_client_base import LLMClientBase
|
4
|
-
from letta.schemas.
|
4
|
+
from letta.schemas.enums import ProviderType
|
5
5
|
|
6
6
|
|
7
7
|
class LLMClient:
|
@@ -9,17 +9,15 @@ class LLMClient:
|
|
9
9
|
|
10
10
|
@staticmethod
|
11
11
|
def create(
|
12
|
-
|
12
|
+
provider: ProviderType,
|
13
13
|
put_inner_thoughts_first: bool = True,
|
14
14
|
) -> Optional[LLMClientBase]:
|
15
15
|
"""
|
16
16
|
Create an LLM client based on the model endpoint type.
|
17
17
|
|
18
18
|
Args:
|
19
|
-
|
19
|
+
provider: The model endpoint type
|
20
20
|
put_inner_thoughts_first: Whether to put inner thoughts first in the response
|
21
|
-
use_structured_output: Whether to use structured output
|
22
|
-
use_tool_naming: Whether to use tool naming
|
23
21
|
|
24
22
|
Returns:
|
25
23
|
An instance of LLMClientBase subclass
|
@@ -27,33 +25,29 @@ class LLMClient:
|
|
27
25
|
Raises:
|
28
26
|
ValueError: If the model endpoint type is not supported
|
29
27
|
"""
|
30
|
-
match
|
31
|
-
case
|
28
|
+
match provider:
|
29
|
+
case ProviderType.google_ai:
|
32
30
|
from letta.llm_api.google_ai_client import GoogleAIClient
|
33
31
|
|
34
32
|
return GoogleAIClient(
|
35
|
-
llm_config=llm_config,
|
36
33
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
37
34
|
)
|
38
|
-
case
|
35
|
+
case ProviderType.google_vertex:
|
39
36
|
from letta.llm_api.google_vertex_client import GoogleVertexClient
|
40
37
|
|
41
38
|
return GoogleVertexClient(
|
42
|
-
llm_config=llm_config,
|
43
39
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
44
40
|
)
|
45
|
-
case
|
41
|
+
case ProviderType.anthropic:
|
46
42
|
from letta.llm_api.anthropic_client import AnthropicClient
|
47
43
|
|
48
44
|
return AnthropicClient(
|
49
|
-
llm_config=llm_config,
|
50
45
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
51
46
|
)
|
52
|
-
case
|
47
|
+
case ProviderType.openai:
|
53
48
|
from letta.llm_api.openai_client import OpenAIClient
|
54
49
|
|
55
50
|
return OpenAIClient(
|
56
|
-
llm_config=llm_config,
|
57
51
|
put_inner_thoughts_first=put_inner_thoughts_first,
|
58
52
|
)
|
59
53
|
case _:
|
@@ -20,17 +20,16 @@ class LLMClientBase:
|
|
20
20
|
|
21
21
|
def __init__(
|
22
22
|
self,
|
23
|
-
llm_config: LLMConfig,
|
24
23
|
put_inner_thoughts_first: Optional[bool] = True,
|
25
24
|
use_tool_naming: bool = True,
|
26
25
|
):
|
27
|
-
self.llm_config = llm_config
|
28
26
|
self.put_inner_thoughts_first = put_inner_thoughts_first
|
29
27
|
self.use_tool_naming = use_tool_naming
|
30
28
|
|
31
29
|
def send_llm_request(
|
32
30
|
self,
|
33
31
|
messages: List[Message],
|
32
|
+
llm_config: LLMConfig,
|
34
33
|
tools: Optional[List[dict]] = None, # TODO: change to Tool object
|
35
34
|
stream: bool = False,
|
36
35
|
force_tool_call: Optional[str] = None,
|
@@ -40,23 +39,24 @@ class LLMClientBase:
|
|
40
39
|
If stream=True, returns a Stream[ChatCompletionChunk] that can be iterated over.
|
41
40
|
Otherwise returns a ChatCompletionResponse.
|
42
41
|
"""
|
43
|
-
request_data = self.build_request_data(messages,
|
42
|
+
request_data = self.build_request_data(messages, llm_config, tools, force_tool_call)
|
44
43
|
|
45
44
|
try:
|
46
45
|
log_event(name="llm_request_sent", attributes=request_data)
|
47
46
|
if stream:
|
48
|
-
return self.stream(request_data)
|
47
|
+
return self.stream(request_data, llm_config)
|
49
48
|
else:
|
50
|
-
response_data = self.request(request_data)
|
49
|
+
response_data = self.request(request_data, llm_config)
|
51
50
|
log_event(name="llm_response_received", attributes=response_data)
|
52
51
|
except Exception as e:
|
53
52
|
raise self.handle_llm_error(e)
|
54
53
|
|
55
|
-
return self.convert_response_to_chat_completion(response_data, messages)
|
54
|
+
return self.convert_response_to_chat_completion(response_data, messages, llm_config)
|
56
55
|
|
57
56
|
async def send_llm_request_async(
|
58
57
|
self,
|
59
58
|
messages: List[Message],
|
59
|
+
llm_config: LLMConfig,
|
60
60
|
tools: Optional[List[dict]] = None, # TODO: change to Tool object
|
61
61
|
stream: bool = False,
|
62
62
|
force_tool_call: Optional[str] = None,
|
@@ -66,19 +66,19 @@ class LLMClientBase:
|
|
66
66
|
If stream=True, returns an AsyncStream[ChatCompletionChunk] that can be async iterated over.
|
67
67
|
Otherwise returns a ChatCompletionResponse.
|
68
68
|
"""
|
69
|
-
request_data = self.build_request_data(messages,
|
69
|
+
request_data = self.build_request_data(messages, llm_config, tools, force_tool_call)
|
70
70
|
|
71
71
|
try:
|
72
72
|
log_event(name="llm_request_sent", attributes=request_data)
|
73
73
|
if stream:
|
74
|
-
return await self.stream_async(request_data)
|
74
|
+
return await self.stream_async(request_data, llm_config)
|
75
75
|
else:
|
76
|
-
response_data = await self.request_async(request_data)
|
76
|
+
response_data = await self.request_async(request_data, llm_config)
|
77
77
|
log_event(name="llm_response_received", attributes=response_data)
|
78
78
|
except Exception as e:
|
79
79
|
raise self.handle_llm_error(e)
|
80
80
|
|
81
|
-
return self.convert_response_to_chat_completion(response_data, messages)
|
81
|
+
return self.convert_response_to_chat_completion(response_data, messages, llm_config)
|
82
82
|
|
83
83
|
async def send_llm_batch_request_async(
|
84
84
|
self,
|
@@ -102,14 +102,14 @@ class LLMClientBase:
|
|
102
102
|
raise NotImplementedError
|
103
103
|
|
104
104
|
@abstractmethod
|
105
|
-
def request(self, request_data: dict) -> dict:
|
105
|
+
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
106
106
|
"""
|
107
107
|
Performs underlying request to llm and returns raw response.
|
108
108
|
"""
|
109
109
|
raise NotImplementedError
|
110
110
|
|
111
111
|
@abstractmethod
|
112
|
-
async def request_async(self, request_data: dict) -> dict:
|
112
|
+
async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
113
113
|
"""
|
114
114
|
Performs underlying request to llm and returns raw response.
|
115
115
|
"""
|
@@ -120,6 +120,7 @@ class LLMClientBase:
|
|
120
120
|
self,
|
121
121
|
response_data: dict,
|
122
122
|
input_messages: List[Message],
|
123
|
+
llm_config: LLMConfig,
|
123
124
|
) -> ChatCompletionResponse:
|
124
125
|
"""
|
125
126
|
Converts custom response format from llm client into an OpenAI
|
@@ -128,18 +129,18 @@ class LLMClientBase:
|
|
128
129
|
raise NotImplementedError
|
129
130
|
|
130
131
|
@abstractmethod
|
131
|
-
def stream(self, request_data: dict) -> Stream[ChatCompletionChunk]:
|
132
|
+
def stream(self, request_data: dict, llm_config: LLMConfig) -> Stream[ChatCompletionChunk]:
|
132
133
|
"""
|
133
134
|
Performs underlying streaming request to llm and returns raw response.
|
134
135
|
"""
|
135
|
-
raise NotImplementedError(f"Streaming is not supported for {
|
136
|
+
raise NotImplementedError(f"Streaming is not supported for {llm_config.model_endpoint_type}")
|
136
137
|
|
137
138
|
@abstractmethod
|
138
|
-
async def stream_async(self, request_data: dict) -> AsyncStream[ChatCompletionChunk]:
|
139
|
+
async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[ChatCompletionChunk]:
|
139
140
|
"""
|
140
141
|
Performs underlying streaming request to llm and returns raw response.
|
141
142
|
"""
|
142
|
-
raise NotImplementedError(f"Streaming is not supported for {
|
143
|
+
raise NotImplementedError(f"Streaming is not supported for {llm_config.model_endpoint_type}")
|
143
144
|
|
144
145
|
@abstractmethod
|
145
146
|
def handle_llm_error(self, e: Exception) -> Exception:
|
@@ -62,11 +62,11 @@ def supports_parallel_tool_calling(model: str) -> bool:
|
|
62
62
|
|
63
63
|
|
64
64
|
class OpenAIClient(LLMClientBase):
|
65
|
-
def _prepare_client_kwargs(self) -> dict:
|
65
|
+
def _prepare_client_kwargs(self, llm_config: LLMConfig) -> dict:
|
66
66
|
api_key = model_settings.openai_api_key or os.environ.get("OPENAI_API_KEY")
|
67
67
|
# supposedly the openai python client requires a dummy API key
|
68
68
|
api_key = api_key or "DUMMY_API_KEY"
|
69
|
-
kwargs = {"api_key": api_key, "base_url":
|
69
|
+
kwargs = {"api_key": api_key, "base_url": llm_config.model_endpoint}
|
70
70
|
|
71
71
|
return kwargs
|
72
72
|
|
@@ -115,7 +115,7 @@ class OpenAIClient(LLMClientBase):
|
|
115
115
|
# TODO(matt) move into LLMConfig
|
116
116
|
# TODO: This vllm checking is very brittle and is a patch at most
|
117
117
|
tool_choice = None
|
118
|
-
if llm_config.model_endpoint == "https://inference.memgpt.ai" or (llm_config.handle and "vllm" in
|
118
|
+
if llm_config.model_endpoint == "https://inference.memgpt.ai" or (llm_config.handle and "vllm" in llm_config.handle):
|
119
119
|
tool_choice = "auto" # TODO change to "required" once proxy supports it
|
120
120
|
elif tools:
|
121
121
|
# only set if tools is non-Null
|
@@ -152,20 +152,20 @@ class OpenAIClient(LLMClientBase):
|
|
152
152
|
|
153
153
|
return data.model_dump(exclude_unset=True)
|
154
154
|
|
155
|
-
def request(self, request_data: dict) -> dict:
|
155
|
+
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
156
156
|
"""
|
157
157
|
Performs underlying synchronous request to OpenAI API and returns raw response dict.
|
158
158
|
"""
|
159
|
-
client = OpenAI(**self._prepare_client_kwargs())
|
159
|
+
client = OpenAI(**self._prepare_client_kwargs(llm_config))
|
160
160
|
|
161
161
|
response: ChatCompletion = client.chat.completions.create(**request_data)
|
162
162
|
return response.model_dump()
|
163
163
|
|
164
|
-
async def request_async(self, request_data: dict) -> dict:
|
164
|
+
async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
|
165
165
|
"""
|
166
166
|
Performs underlying asynchronous request to OpenAI API and returns raw response dict.
|
167
167
|
"""
|
168
|
-
client = AsyncOpenAI(**self._prepare_client_kwargs())
|
168
|
+
client = AsyncOpenAI(**self._prepare_client_kwargs(llm_config))
|
169
169
|
response: ChatCompletion = await client.chat.completions.create(**request_data)
|
170
170
|
return response.model_dump()
|
171
171
|
|
@@ -173,6 +173,7 @@ class OpenAIClient(LLMClientBase):
|
|
173
173
|
self,
|
174
174
|
response_data: dict,
|
175
175
|
input_messages: List[PydanticMessage], # Included for consistency, maybe used later
|
176
|
+
llm_config: LLMConfig,
|
176
177
|
) -> ChatCompletionResponse:
|
177
178
|
"""
|
178
179
|
Converts raw OpenAI response dict into the ChatCompletionResponse Pydantic model.
|
@@ -183,30 +184,30 @@ class OpenAIClient(LLMClientBase):
|
|
183
184
|
chat_completion_response = ChatCompletionResponse(**response_data)
|
184
185
|
|
185
186
|
# Unpack inner thoughts if they were embedded in function arguments
|
186
|
-
if
|
187
|
+
if llm_config.put_inner_thoughts_in_kwargs:
|
187
188
|
chat_completion_response = unpack_all_inner_thoughts_from_kwargs(
|
188
189
|
response=chat_completion_response, inner_thoughts_key=INNER_THOUGHTS_KWARG
|
189
190
|
)
|
190
191
|
|
191
192
|
# If we used a reasoning model, create a content part for the ommitted reasoning
|
192
|
-
if is_openai_reasoning_model(
|
193
|
+
if is_openai_reasoning_model(llm_config.model):
|
193
194
|
chat_completion_response.choices[0].message.ommitted_reasoning_content = True
|
194
195
|
|
195
196
|
return chat_completion_response
|
196
197
|
|
197
|
-
def stream(self, request_data: dict) -> Stream[ChatCompletionChunk]:
|
198
|
+
def stream(self, request_data: dict, llm_config: LLMConfig) -> Stream[ChatCompletionChunk]:
|
198
199
|
"""
|
199
200
|
Performs underlying streaming request to OpenAI and returns the stream iterator.
|
200
201
|
"""
|
201
|
-
client = OpenAI(**self._prepare_client_kwargs())
|
202
|
+
client = OpenAI(**self._prepare_client_kwargs(llm_config))
|
202
203
|
response_stream: Stream[ChatCompletionChunk] = client.chat.completions.create(**request_data, stream=True)
|
203
204
|
return response_stream
|
204
205
|
|
205
|
-
async def stream_async(self, request_data: dict) -> AsyncStream[ChatCompletionChunk]:
|
206
|
+
async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[ChatCompletionChunk]:
|
206
207
|
"""
|
207
208
|
Performs underlying asynchronous streaming request to OpenAI and returns the async stream iterator.
|
208
209
|
"""
|
209
|
-
client = AsyncOpenAI(**self._prepare_client_kwargs())
|
210
|
+
client = AsyncOpenAI(**self._prepare_client_kwargs(llm_config))
|
210
211
|
response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create(**request_data, stream=True)
|
211
212
|
return response_stream
|
212
213
|
|
{letta_nightly-0.7.2.dev20250423222439 → letta_nightly-0.7.3.dev20250424054013}/letta/memory.py
RENAMED
@@ -79,7 +79,7 @@ def summarize_messages(
|
|
79
79
|
llm_config_no_inner_thoughts.put_inner_thoughts_in_kwargs = False
|
80
80
|
|
81
81
|
llm_client = LLMClient.create(
|
82
|
-
|
82
|
+
provider=llm_config_no_inner_thoughts.model_endpoint_type,
|
83
83
|
put_inner_thoughts_first=False,
|
84
84
|
)
|
85
85
|
# try to use new client, otherwise fallback to old flow
|
@@ -87,6 +87,7 @@ def summarize_messages(
|
|
87
87
|
if llm_client:
|
88
88
|
response = llm_client.send_llm_request(
|
89
89
|
messages=message_sequence,
|
90
|
+
llm_config=llm_config_no_inner_thoughts,
|
90
91
|
stream=False,
|
91
92
|
)
|
92
93
|
else:
|
@@ -0,0 +1,5 @@
|
|
1
|
+
I am an expert conversation memory agent that can do the following:
|
2
|
+
- Consolidate memories into more concise blocks
|
3
|
+
- Identify patterns in user behavior
|
4
|
+
- Make inferences based on the memory
|
5
|
+
I manage the memory blocks such that they contain everything that is important about the conversation.
|