letta-nightly 0.6.34.dev20250303104329__tar.gz → 0.6.35.dev20250304104154__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/PKG-INFO +6 -5
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/__init__.py +1 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/agent.py +40 -15
- letta_nightly-0.6.35.dev20250304104154/letta/agents/base_agent.py +51 -0
- letta_nightly-0.6.35.dev20250304104154/letta/agents/ephemeral_agent.py +72 -0
- letta_nightly-0.6.35.dev20250304104154/letta/agents/low_latency_agent.py +315 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/constants.py +3 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/ast_parsers.py +50 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/helpers.py +79 -2
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/schema_generator.py +3 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/converters.py +3 -3
- letta_nightly-0.6.35.dev20250304104154/letta/interfaces/openai_chat_completions_streaming_interface.py +109 -0
- letta_nightly-0.6.35.dev20250304104154/letta/interfaces/utils.py +11 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/anthropic.py +9 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/azure_openai.py +3 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/google_ai.py +3 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/google_vertex.py +4 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/llm_api_tools.py +1 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/openai.py +6 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/chat_completion_proxy.py +6 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/log.py +2 -2
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/step.py +1 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/tool.py +1 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_convo_only.txt +3 -5
- letta_nightly-0.6.35.dev20250304104154/letta/prompts/system/memgpt_memory_only.txt +29 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/agent.py +0 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/step.py +1 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/tool.py +16 -2
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/app.py +5 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/agents.py +32 -21
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/identities.py +9 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/runs.py +49 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/tools.py +1 -0
- letta_nightly-0.6.35.dev20250304104154/letta/server/rest_api/routers/v1/voice.py +79 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/utils.py +3 -2
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/server.py +15 -7
- letta_nightly-0.6.35.dev20250304104154/letta/server/ws_api/__init__.py +0 -0
- letta_nightly-0.6.35.dev20250304104154/letta/services/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/agent_manager.py +10 -6
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/helpers/agent_manager_helper.py +0 -2
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/helpers/tool_execution_helper.py +18 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/job_manager.py +98 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/step_manager.py +2 -0
- letta_nightly-0.6.35.dev20250304104154/letta/services/summarizer/__init__.py +0 -0
- letta_nightly-0.6.35.dev20250304104154/letta/services/summarizer/enums.py +9 -0
- letta_nightly-0.6.35.dev20250304104154/letta/services/summarizer/summarizer.py +102 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/tool_execution_sandbox.py +20 -3
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/tool_manager.py +1 -1
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/settings.py +2 -0
- letta_nightly-0.6.35.dev20250304104154/letta/tracing.py +225 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/pyproject.toml +6 -5
- letta_nightly-0.6.34.dev20250303104329/letta/chat_only_agent.py +0 -101
- letta_nightly-0.6.34.dev20250303104329/letta/server/rest_api/routers/v1/voice.py +0 -315
- letta_nightly-0.6.34.dev20250303104329/letta/tracing.py +0 -205
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/LICENSE +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/README.md +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/__main__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/client → letta_nightly-0.6.35.dev20250304104154/letta/agents}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/benchmark/benchmark.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/benchmark/constants.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/cli/cli.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/cli/cli_config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/cli/cli_load.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/functions → letta_nightly-0.6.35.dev20250304104154/letta/client}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/client/client.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/client/streaming.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/client/utils.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/data_sources/connectors.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/data_sources/connectors_helper.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/embeddings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/errors.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/humans → letta_nightly-0.6.35.dev20250304104154/letta/functions}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/function_sets/base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/function_sets/extras.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/function_sets/multi_agent.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/functions.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/functions/interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/composio_helpers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/datetime_helpers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/json_helpers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/tool_execution_helper.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/helpers/tool_rule_solver.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/llm_api → letta_nightly-0.6.35.dev20250304104154/letta/humans}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/humans/examples/basic.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/humans/examples/cs_phd.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/local_llm → letta_nightly-0.6.35.dev20250304104154/letta/interfaces}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/local_llm/grammars → letta_nightly-0.6.35.dev20250304104154/letta/llm_api}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/aws_bedrock.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/azure_openai_constants.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/cohere.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/deepseek.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/google_constants.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/helpers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/llm_api/mistral.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/README.md +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/local_llm/llm_chat_completion_wrappers → letta_nightly-0.6.35.dev20250304104154/letta/local_llm}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/constants.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/function_parser.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/local_llm/settings → letta_nightly-0.6.35.dev20250304104154/letta/local_llm/grammars}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/grammars/gbnf_grammar_generator.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/grammars/json.gbnf +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/json_parser.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/koboldcpp/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/koboldcpp/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llamacpp/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llamacpp/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/openai_backcompat → letta_nightly-0.6.35.dev20250304104154/letta/local_llm/llm_chat_completion_wrappers}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/airoboros.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/chatml.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/dolphin.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/llama3.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/llm_chat_completion_wrappers/zephyr.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/lmstudio/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/lmstudio/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/ollama/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/ollama/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/personas → letta_nightly-0.6.35.dev20250304104154/letta/local_llm/settings}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/settings/deterministic_mirostat.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/settings/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/settings/simple.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/utils.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/vllm/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/webui/api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/webui/legacy_api.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/webui/legacy_settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/local_llm/webui/settings.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/main.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/memory.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/offline_memory_agent.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/prompts → letta_nightly-0.6.35.dev20250304104154/letta/openai_backcompat}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/openai_backcompat/openai_object.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/__all__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/agent.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/agents_tags.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/block.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/blocks_agents.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/custom_columns.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/enums.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/errors.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/file.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/identities_agents.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/identity.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/job.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/job_messages.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/message.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/mixins.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/organization.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/passage.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/provider.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/sandbox_config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/source.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/sources_agents.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/sqlalchemy_base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/sqlite_functions.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/tools_agents.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/orm/user.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server → letta_nightly-0.6.35.dev20250304104154/letta/personas}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/anna_pa.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/google_search_persona.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/memgpt_doc.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/memgpt_starter.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/o1_persona.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/offline_memory_persona.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/sam.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/sam_pov.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/sam_simple_pov_gpt35.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/personas/examples/sqldb/test.db +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server/rest_api → letta_nightly-0.6.35.dev20250304104154/letta/prompts}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/gpt_summarize.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/gpt_system.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_base.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_chat.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_chat_compressed.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_chat_fstring.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_doc.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_gpt35_extralong.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_intuitive_knowledge.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_modified_chat.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_modified_o1.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_offline_memory.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/prompts/system/memgpt_offline_memory_chat.txt +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/pytest.ini +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/block.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/embedding_config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/embedding_config_overrides.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/enums.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/environment_variables.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/file.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/health.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/identity.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/job.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/letta_base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/letta_message.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/letta_request.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/letta_response.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/llm_config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/llm_config_overrides.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/memory.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/message.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/openai/chat_completion_request.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/openai/chat_completion_response.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/openai/chat_completions.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/openai/embedding_response.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/openai/openai.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/organization.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/passage.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/providers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/run.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/sandbox_config.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/source.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/tool_rule.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/usage.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/schemas/user.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/serialize_schemas/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/serialize_schemas/agent.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/serialize_schemas/base.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/serialize_schemas/custom_fields.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/serialize_schemas/message.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server/rest_api/auth → letta_nightly-0.6.35.dev20250304104154/letta/server}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/constants.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/db.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/generate_openapi_schema.sh +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server/rest_api/routers → letta_nightly-0.6.35.dev20250304104154/letta/server/rest_api}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server/rest_api/routers/openai/chat_completions → letta_nightly-0.6.35.dev20250304104154/letta/server/rest_api/auth}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/auth/index.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/auth_token.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/chat_completions_interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/optimistic_json_parser.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/server/ws_api → letta_nightly-0.6.35.dev20250304104154/letta/server/rest_api/routers}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329/letta/services → letta_nightly-0.6.35.dev20250304104154/letta/server/rest_api/routers/openai/chat_completions}/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/__init__.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/blocks.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/health.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/jobs.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/llms.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/organizations.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/providers.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/sandbox_configs.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/sources.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/steps.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/tags.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/routers/v1/users.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/rest_api/static_files.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/startup.sh +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/static_files/assets/index-048c9598.js +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/static_files/assets/index-0e31b727.css +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/static_files/favicon.ico +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/static_files/index.html +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/static_files/memgpt_logo_transparent.png +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/utils.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/ws_api/example_client.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/ws_api/interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/ws_api/protocol.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/server/ws_api/server.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/block_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/identity_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/message_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/organization_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/passage_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/per_agent_lock_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/provider_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/sandbox_config_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/source_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/services/user_manager.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/streaming_interface.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/streaming_utils.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/system.py +0 -0
- {letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/utils.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: letta-nightly
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.35.dev20250304104154
|
|
4
4
|
Summary: Create LLM agents with long-term memory and custom tools
|
|
5
5
|
License: Apache License
|
|
6
6
|
Author: Letta Team
|
|
@@ -30,6 +30,7 @@ Requires-Dist: brotli (>=1.1.0,<2.0.0)
|
|
|
30
30
|
Requires-Dist: colorama (>=0.4.6,<0.5.0)
|
|
31
31
|
Requires-Dist: composio-core (>=0.7.2,<0.8.0)
|
|
32
32
|
Requires-Dist: composio-langchain (>=0.7.2,<0.8.0)
|
|
33
|
+
Requires-Dist: datamodel-code-generator[http] (>=0.25.0,<0.26.0)
|
|
33
34
|
Requires-Dist: datasets (>=2.14.6,<3.0.0) ; extra == "dev" or extra == "all"
|
|
34
35
|
Requires-Dist: demjson3 (>=3.0.6,<4.0.0)
|
|
35
36
|
Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "external-tools" or extra == "all"
|
|
@@ -56,10 +57,10 @@ Requires-Dist: marshmallow-sqlalchemy (>=1.4.1,<2.0.0)
|
|
|
56
57
|
Requires-Dist: nltk (>=3.8.1,<4.0.0)
|
|
57
58
|
Requires-Dist: numpy (>=1.26.2,<2.0.0)
|
|
58
59
|
Requires-Dist: openai (>=1.60.0,<2.0.0)
|
|
59
|
-
Requires-Dist: opentelemetry-api (
|
|
60
|
-
Requires-Dist: opentelemetry-exporter-otlp (
|
|
61
|
-
Requires-Dist: opentelemetry-instrumentation-requests (
|
|
62
|
-
Requires-Dist: opentelemetry-sdk (
|
|
60
|
+
Requires-Dist: opentelemetry-api (==1.30.0)
|
|
61
|
+
Requires-Dist: opentelemetry-exporter-otlp (==1.30.0)
|
|
62
|
+
Requires-Dist: opentelemetry-instrumentation-requests (==0.51b0)
|
|
63
|
+
Requires-Dist: opentelemetry-sdk (==1.30.0)
|
|
63
64
|
Requires-Dist: pathvalidate (>=3.2.1,<4.0.0)
|
|
64
65
|
Requires-Dist: pexpect (>=4.9.0,<5.0.0) ; extra == "dev" or extra == "all"
|
|
65
66
|
Requires-Dist: pg8000 (>=1.30.3,<2.0.0) ; extra == "postgres" or extra == "all"
|
{letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/agent.py
RENAMED
|
@@ -60,16 +60,8 @@ from letta.services.tool_manager import ToolManager
|
|
|
60
60
|
from letta.settings import summarizer_settings
|
|
61
61
|
from letta.streaming_interface import StreamingRefreshCLIInterface
|
|
62
62
|
from letta.system import get_heartbeat, get_token_limit_warning, package_function_response, package_summarize_message, package_user_message
|
|
63
|
-
from letta.tracing import trace_method
|
|
64
|
-
from letta.utils import
|
|
65
|
-
count_tokens,
|
|
66
|
-
get_friendly_error_msg,
|
|
67
|
-
get_tool_call_id,
|
|
68
|
-
log_telemetry,
|
|
69
|
-
parse_json,
|
|
70
|
-
printd,
|
|
71
|
-
validate_function_response,
|
|
72
|
-
)
|
|
63
|
+
from letta.tracing import log_event, trace_method
|
|
64
|
+
from letta.utils import count_tokens, get_friendly_error_msg, get_tool_call_id, log_telemetry, parse_json, validate_function_response
|
|
73
65
|
|
|
74
66
|
logger = get_logger(__name__)
|
|
75
67
|
|
|
@@ -315,7 +307,7 @@ class Agent(BaseAgent):
|
|
|
315
307
|
# Return updated messages
|
|
316
308
|
return messages
|
|
317
309
|
|
|
318
|
-
@trace_method
|
|
310
|
+
@trace_method
|
|
319
311
|
def _get_ai_reply(
|
|
320
312
|
self,
|
|
321
313
|
message_sequence: List[Message],
|
|
@@ -408,7 +400,7 @@ class Agent(BaseAgent):
|
|
|
408
400
|
log_telemetry(self.logger, "_handle_ai_response finish catch-all exception")
|
|
409
401
|
raise Exception("Retries exhausted and no valid response received.")
|
|
410
402
|
|
|
411
|
-
@trace_method
|
|
403
|
+
@trace_method
|
|
412
404
|
def _handle_ai_response(
|
|
413
405
|
self,
|
|
414
406
|
response_message: ChatCompletionMessage, # TODO should we eventually move the Message creation outside of this function?
|
|
@@ -546,7 +538,24 @@ class Agent(BaseAgent):
|
|
|
546
538
|
log_telemetry(
|
|
547
539
|
self.logger, "_handle_ai_response execute tool start", function_name=function_name, function_args=function_args
|
|
548
540
|
)
|
|
541
|
+
log_event(
|
|
542
|
+
"tool_call_initiated",
|
|
543
|
+
attributes={
|
|
544
|
+
"function_name": function_name,
|
|
545
|
+
"target_letta_tool": target_letta_tool.model_dump(),
|
|
546
|
+
**{f"function_args.{k}": v for k, v in function_args.items()},
|
|
547
|
+
},
|
|
548
|
+
)
|
|
549
|
+
|
|
549
550
|
function_response, sandbox_run_result = self.execute_tool_and_persist_state(function_name, function_args, target_letta_tool)
|
|
551
|
+
|
|
552
|
+
log_event(
|
|
553
|
+
"tool_call_ended",
|
|
554
|
+
attributes={
|
|
555
|
+
"function_response": function_response,
|
|
556
|
+
"sandbox_run_result": sandbox_run_result.model_dump() if sandbox_run_result else None,
|
|
557
|
+
},
|
|
558
|
+
)
|
|
550
559
|
log_telemetry(
|
|
551
560
|
self.logger, "_handle_ai_response execute tool finish", function_name=function_name, function_args=function_args
|
|
552
561
|
)
|
|
@@ -648,7 +657,7 @@ class Agent(BaseAgent):
|
|
|
648
657
|
log_telemetry(self.logger, "_handle_ai_response finish")
|
|
649
658
|
return messages, heartbeat_request, function_failed
|
|
650
659
|
|
|
651
|
-
@trace_method
|
|
660
|
+
@trace_method
|
|
652
661
|
def step(
|
|
653
662
|
self,
|
|
654
663
|
messages: Union[Message, List[Message]],
|
|
@@ -836,6 +845,13 @@ class Agent(BaseAgent):
|
|
|
836
845
|
f"{CLI_WARNING_PREFIX}last response total_tokens ({current_total_tokens}) > {summarizer_settings.memory_warning_threshold * int(self.agent_state.llm_config.context_window)}"
|
|
837
846
|
)
|
|
838
847
|
|
|
848
|
+
log_event(
|
|
849
|
+
name="memory_pressure_warning",
|
|
850
|
+
attributes={
|
|
851
|
+
"current_total_tokens": current_total_tokens,
|
|
852
|
+
"context_window_limit": self.agent_state.llm_config.context_window,
|
|
853
|
+
},
|
|
854
|
+
)
|
|
839
855
|
# Only deliver the alert if we haven't already (this period)
|
|
840
856
|
if not self.agent_alerted_about_memory_pressure:
|
|
841
857
|
active_memory_warning = True
|
|
@@ -1037,9 +1053,18 @@ class Agent(BaseAgent):
|
|
|
1037
1053
|
self.agent_alerted_about_memory_pressure = False
|
|
1038
1054
|
curr_in_context_messages = self.agent_manager.get_in_context_messages(agent_id=self.agent_state.id, actor=self.user)
|
|
1039
1055
|
|
|
1056
|
+
current_token_count = sum(get_token_counts_for_messages(curr_in_context_messages))
|
|
1040
1057
|
logger.info(f"Ran summarizer, messages length {prior_len} -> {len(curr_in_context_messages)}")
|
|
1041
|
-
logger.info(
|
|
1042
|
-
|
|
1058
|
+
logger.info(f"Summarizer brought down total token count from {sum(token_counts)} -> {current_token_count}")
|
|
1059
|
+
log_event(
|
|
1060
|
+
name="summarization",
|
|
1061
|
+
attributes={
|
|
1062
|
+
"prior_length": prior_len,
|
|
1063
|
+
"current_length": len(curr_in_context_messages),
|
|
1064
|
+
"prior_token_count": sum(token_counts),
|
|
1065
|
+
"current_token_count": current_token_count,
|
|
1066
|
+
"context_window_limit": self.agent_state.llm_config.context_window,
|
|
1067
|
+
},
|
|
1043
1068
|
)
|
|
1044
1069
|
|
|
1045
1070
|
def add_function(self, function_name: str) -> str:
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from abc import ABC, abstractmethod
|
|
2
|
+
from typing import Any, AsyncGenerator, List
|
|
3
|
+
|
|
4
|
+
import openai
|
|
5
|
+
|
|
6
|
+
from letta.schemas.letta_message import UserMessage
|
|
7
|
+
from letta.schemas.message import Message
|
|
8
|
+
from letta.schemas.user import User
|
|
9
|
+
from letta.services.agent_manager import AgentManager
|
|
10
|
+
from letta.services.message_manager import MessageManager
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class BaseAgent(ABC):
|
|
14
|
+
"""
|
|
15
|
+
Abstract base class for AI agents, handling message management, tool execution,
|
|
16
|
+
and context tracking.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(
|
|
20
|
+
self,
|
|
21
|
+
agent_id: str,
|
|
22
|
+
openai_client: openai.AsyncClient,
|
|
23
|
+
message_manager: MessageManager,
|
|
24
|
+
agent_manager: AgentManager,
|
|
25
|
+
actor: User,
|
|
26
|
+
):
|
|
27
|
+
self.agent_id = agent_id
|
|
28
|
+
self.openai_client = openai_client
|
|
29
|
+
self.message_manager = message_manager
|
|
30
|
+
self.agent_manager = agent_manager
|
|
31
|
+
self.actor = actor
|
|
32
|
+
|
|
33
|
+
@abstractmethod
|
|
34
|
+
async def step(self, input_message: UserMessage) -> List[Message]:
|
|
35
|
+
"""
|
|
36
|
+
Main execution loop for the agent.
|
|
37
|
+
"""
|
|
38
|
+
raise NotImplementedError
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
async def step_stream(self, input_message: UserMessage) -> AsyncGenerator[str, None]:
|
|
42
|
+
"""
|
|
43
|
+
Main async execution loop for the agent. Implementations must yield messages as SSE events.
|
|
44
|
+
"""
|
|
45
|
+
raise NotImplementedError
|
|
46
|
+
|
|
47
|
+
def pre_process_input_message(self, input_message: UserMessage) -> Any:
|
|
48
|
+
"""
|
|
49
|
+
Pre-process function to run on the input_message.
|
|
50
|
+
"""
|
|
51
|
+
return input_message.model_dump()
|
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
from typing import AsyncGenerator, Dict, List
|
|
2
|
+
|
|
3
|
+
import openai
|
|
4
|
+
|
|
5
|
+
from letta.agents.base_agent import BaseAgent
|
|
6
|
+
from letta.schemas.agent import AgentState
|
|
7
|
+
from letta.schemas.enums import MessageRole
|
|
8
|
+
from letta.schemas.letta_message import TextContent, UserMessage
|
|
9
|
+
from letta.schemas.message import Message
|
|
10
|
+
from letta.schemas.openai.chat_completion_request import ChatCompletionRequest
|
|
11
|
+
from letta.schemas.user import User
|
|
12
|
+
from letta.services.agent_manager import AgentManager
|
|
13
|
+
from letta.services.message_manager import MessageManager
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class EphemeralAgent(BaseAgent):
|
|
17
|
+
"""
|
|
18
|
+
A stateless agent (thin wrapper around OpenAI)
|
|
19
|
+
|
|
20
|
+
# TODO: Extend to more clients
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(
|
|
24
|
+
self,
|
|
25
|
+
agent_id: str,
|
|
26
|
+
openai_client: openai.AsyncClient,
|
|
27
|
+
message_manager: MessageManager,
|
|
28
|
+
agent_manager: AgentManager,
|
|
29
|
+
actor: User,
|
|
30
|
+
):
|
|
31
|
+
super().__init__(
|
|
32
|
+
agent_id=agent_id,
|
|
33
|
+
openai_client=openai_client,
|
|
34
|
+
message_manager=message_manager,
|
|
35
|
+
agent_manager=agent_manager,
|
|
36
|
+
actor=actor,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
async def step(self, input_message: UserMessage) -> List[Message]:
|
|
40
|
+
"""
|
|
41
|
+
Synchronous method that takes a user's input text and returns a summary from OpenAI.
|
|
42
|
+
Returns a list of ephemeral Message objects containing both the user text and the assistant summary.
|
|
43
|
+
"""
|
|
44
|
+
agent_state = self.agent_manager.get_agent_by_id(agent_id=self.agent_id, actor=self.actor)
|
|
45
|
+
|
|
46
|
+
input_message = self.pre_process_input_message(input_message=input_message)
|
|
47
|
+
request = self._build_openai_request([input_message], agent_state)
|
|
48
|
+
|
|
49
|
+
chat_completion = await self.openai_client.chat.completions.create(**request.model_dump(exclude_unset=True))
|
|
50
|
+
|
|
51
|
+
return [
|
|
52
|
+
Message(
|
|
53
|
+
role=MessageRole.assistant,
|
|
54
|
+
content=[TextContent(text=chat_completion.choices[0].message.content.strip())],
|
|
55
|
+
)
|
|
56
|
+
]
|
|
57
|
+
|
|
58
|
+
def _build_openai_request(self, openai_messages: List[Dict], agent_state: AgentState) -> ChatCompletionRequest:
|
|
59
|
+
openai_request = ChatCompletionRequest(
|
|
60
|
+
model=agent_state.llm_config.model,
|
|
61
|
+
messages=openai_messages,
|
|
62
|
+
user=self.actor.id,
|
|
63
|
+
max_completion_tokens=agent_state.llm_config.max_tokens,
|
|
64
|
+
temperature=agent_state.llm_config.temperature,
|
|
65
|
+
)
|
|
66
|
+
return openai_request
|
|
67
|
+
|
|
68
|
+
async def step_stream(self, input_message: UserMessage) -> AsyncGenerator[str, None]:
|
|
69
|
+
"""
|
|
70
|
+
This agent is synchronous-only. If called in an async context, raise an error.
|
|
71
|
+
"""
|
|
72
|
+
raise NotImplementedError("EphemeralAgent does not support async step.")
|
|
@@ -0,0 +1,315 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import Any, AsyncGenerator, Dict, List, Tuple
|
|
4
|
+
|
|
5
|
+
import openai
|
|
6
|
+
|
|
7
|
+
from letta.agents.base_agent import BaseAgent
|
|
8
|
+
from letta.agents.ephemeral_agent import EphemeralAgent
|
|
9
|
+
from letta.constants import NON_USER_MSG_PREFIX
|
|
10
|
+
from letta.helpers.datetime_helpers import get_utc_time
|
|
11
|
+
from letta.helpers.tool_execution_helper import (
|
|
12
|
+
add_pre_execution_message,
|
|
13
|
+
enable_strict_mode,
|
|
14
|
+
execute_external_tool,
|
|
15
|
+
remove_request_heartbeat,
|
|
16
|
+
)
|
|
17
|
+
from letta.interfaces.openai_chat_completions_streaming_interface import OpenAIChatCompletionsStreamingInterface
|
|
18
|
+
from letta.log import get_logger
|
|
19
|
+
from letta.orm.enums import ToolType
|
|
20
|
+
from letta.schemas.agent import AgentState
|
|
21
|
+
from letta.schemas.block import BlockUpdate
|
|
22
|
+
from letta.schemas.message import Message, MessageUpdate
|
|
23
|
+
from letta.schemas.openai.chat_completion_request import (
|
|
24
|
+
AssistantMessage,
|
|
25
|
+
ChatCompletionRequest,
|
|
26
|
+
Tool,
|
|
27
|
+
ToolCall,
|
|
28
|
+
ToolCallFunction,
|
|
29
|
+
ToolMessage,
|
|
30
|
+
UserMessage,
|
|
31
|
+
)
|
|
32
|
+
from letta.schemas.user import User
|
|
33
|
+
from letta.server.rest_api.utils import (
|
|
34
|
+
convert_letta_messages_to_openai,
|
|
35
|
+
create_assistant_messages_from_openai_response,
|
|
36
|
+
create_tool_call_messages_from_openai_response,
|
|
37
|
+
create_user_message,
|
|
38
|
+
)
|
|
39
|
+
from letta.services.agent_manager import AgentManager
|
|
40
|
+
from letta.services.block_manager import BlockManager
|
|
41
|
+
from letta.services.helpers.agent_manager_helper import compile_system_message
|
|
42
|
+
from letta.services.message_manager import MessageManager
|
|
43
|
+
from letta.services.summarizer.enums import SummarizationMode
|
|
44
|
+
from letta.services.summarizer.summarizer import Summarizer
|
|
45
|
+
from letta.utils import united_diff
|
|
46
|
+
|
|
47
|
+
logger = get_logger(__name__)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class LowLatencyAgent(BaseAgent):
|
|
51
|
+
"""
|
|
52
|
+
A function-calling loop for streaming OpenAI responses with tool execution.
|
|
53
|
+
This agent:
|
|
54
|
+
- Streams partial tokens in real-time for low-latency output.
|
|
55
|
+
- Detects tool calls and invokes external tools.
|
|
56
|
+
- Gracefully handles OpenAI API failures (429, etc.) and streams errors.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
agent_id: str,
|
|
62
|
+
openai_client: openai.AsyncClient,
|
|
63
|
+
message_manager: MessageManager,
|
|
64
|
+
agent_manager: AgentManager,
|
|
65
|
+
block_manager: BlockManager,
|
|
66
|
+
actor: User,
|
|
67
|
+
summarization_mode: SummarizationMode = SummarizationMode.STATIC_MESSAGE_BUFFER,
|
|
68
|
+
message_buffer_limit: int = 10,
|
|
69
|
+
message_buffer_min: int = 4,
|
|
70
|
+
):
|
|
71
|
+
super().__init__(
|
|
72
|
+
agent_id=agent_id, openai_client=openai_client, message_manager=message_manager, agent_manager=agent_manager, actor=actor
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# TODO: Make this more general, factorable
|
|
76
|
+
# Summarizer settings
|
|
77
|
+
self.block_manager = block_manager
|
|
78
|
+
# TODO: This is not guaranteed to exist!
|
|
79
|
+
self.summary_block_label = "human"
|
|
80
|
+
self.summarizer = Summarizer(
|
|
81
|
+
mode=summarization_mode,
|
|
82
|
+
summarizer_agent=EphemeralAgent(
|
|
83
|
+
agent_id=agent_id, openai_client=openai_client, message_manager=message_manager, agent_manager=agent_manager, actor=actor
|
|
84
|
+
),
|
|
85
|
+
message_buffer_limit=message_buffer_limit,
|
|
86
|
+
message_buffer_min=message_buffer_min,
|
|
87
|
+
)
|
|
88
|
+
self.message_buffer_limit = message_buffer_limit
|
|
89
|
+
self.message_buffer_min = message_buffer_min
|
|
90
|
+
|
|
91
|
+
async def step(self, input_message: UserMessage) -> List[Message]:
|
|
92
|
+
raise NotImplementedError("LowLatencyAgent does not have a synchronous step implemented currently.")
|
|
93
|
+
|
|
94
|
+
async def step_stream(self, input_message: UserMessage) -> AsyncGenerator[str, None]:
|
|
95
|
+
"""
|
|
96
|
+
Async generator that yields partial tokens as SSE events, handles tool calls,
|
|
97
|
+
and streams error messages if OpenAI API failures occur.
|
|
98
|
+
"""
|
|
99
|
+
input_message = self.pre_process_input_message(input_message=input_message)
|
|
100
|
+
agent_state = self.agent_manager.get_agent_by_id(agent_id=self.agent_id, actor=self.actor)
|
|
101
|
+
in_context_messages = self.message_manager.get_messages_by_ids(message_ids=agent_state.message_ids, actor=self.actor)
|
|
102
|
+
letta_message_db_queue = [create_user_message(input_message=input_message, agent_id=agent_state.id, actor=self.actor)]
|
|
103
|
+
in_memory_message_history = [input_message]
|
|
104
|
+
|
|
105
|
+
while True:
|
|
106
|
+
# Constantly pull down and integrate memory blocks
|
|
107
|
+
in_context_messages = self._rebuild_memory(in_context_messages=in_context_messages, agent_state=agent_state)
|
|
108
|
+
|
|
109
|
+
# Convert Letta messages to OpenAI messages
|
|
110
|
+
openai_messages = convert_letta_messages_to_openai(in_context_messages)
|
|
111
|
+
openai_messages.extend(in_memory_message_history)
|
|
112
|
+
request = self._build_openai_request(openai_messages, agent_state)
|
|
113
|
+
|
|
114
|
+
# Execute the request
|
|
115
|
+
stream = await self.openai_client.chat.completions.create(**request.model_dump(exclude_unset=True))
|
|
116
|
+
streaming_interface = OpenAIChatCompletionsStreamingInterface(stream_pre_execution_message=True)
|
|
117
|
+
|
|
118
|
+
async for sse in streaming_interface.process(stream):
|
|
119
|
+
yield sse
|
|
120
|
+
|
|
121
|
+
# Process the AI response (buffered messages, tool execution, etc.)
|
|
122
|
+
continue_execution = await self._handle_ai_response(
|
|
123
|
+
streaming_interface, agent_state, in_memory_message_history, letta_message_db_queue
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
if not continue_execution:
|
|
127
|
+
break
|
|
128
|
+
|
|
129
|
+
# Rebuild context window
|
|
130
|
+
await self._rebuild_context_window(in_context_messages, letta_message_db_queue, agent_state)
|
|
131
|
+
|
|
132
|
+
yield "data: [DONE]\n\n"
|
|
133
|
+
|
|
134
|
+
async def _handle_ai_response(
|
|
135
|
+
self,
|
|
136
|
+
streaming_interface: OpenAIChatCompletionsStreamingInterface,
|
|
137
|
+
agent_state: AgentState,
|
|
138
|
+
in_memory_message_history: List[Dict[str, Any]],
|
|
139
|
+
letta_message_db_queue: List[Any],
|
|
140
|
+
) -> bool:
|
|
141
|
+
"""
|
|
142
|
+
Handles AI response processing, including buffering messages, detecting tool calls,
|
|
143
|
+
executing tools, and deciding whether to continue execution.
|
|
144
|
+
|
|
145
|
+
Returns:
|
|
146
|
+
bool: True if execution should continue, False if the step loop should terminate.
|
|
147
|
+
"""
|
|
148
|
+
# Handle assistant message buffering
|
|
149
|
+
if streaming_interface.content_buffer:
|
|
150
|
+
content = "".join(streaming_interface.content_buffer)
|
|
151
|
+
in_memory_message_history.append({"role": "assistant", "content": content})
|
|
152
|
+
|
|
153
|
+
assistant_msgs = create_assistant_messages_from_openai_response(
|
|
154
|
+
response_text=content,
|
|
155
|
+
agent_id=agent_state.id,
|
|
156
|
+
model=agent_state.llm_config.model,
|
|
157
|
+
actor=self.actor,
|
|
158
|
+
)
|
|
159
|
+
letta_message_db_queue.extend(assistant_msgs)
|
|
160
|
+
|
|
161
|
+
# Handle tool execution if a tool call occurred
|
|
162
|
+
if streaming_interface.tool_call_happened:
|
|
163
|
+
try:
|
|
164
|
+
tool_args = json.loads(streaming_interface.tool_call_args_str)
|
|
165
|
+
except json.JSONDecodeError:
|
|
166
|
+
tool_args = {}
|
|
167
|
+
|
|
168
|
+
tool_call_id = streaming_interface.tool_call_id or f"call_{uuid.uuid4().hex[:8]}"
|
|
169
|
+
|
|
170
|
+
assistant_tool_call_msg = AssistantMessage(
|
|
171
|
+
content=None,
|
|
172
|
+
tool_calls=[
|
|
173
|
+
ToolCall(
|
|
174
|
+
id=tool_call_id,
|
|
175
|
+
function=ToolCallFunction(
|
|
176
|
+
name=streaming_interface.tool_call_name,
|
|
177
|
+
arguments=streaming_interface.tool_call_args_str,
|
|
178
|
+
),
|
|
179
|
+
)
|
|
180
|
+
],
|
|
181
|
+
)
|
|
182
|
+
in_memory_message_history.append(assistant_tool_call_msg.model_dump())
|
|
183
|
+
|
|
184
|
+
tool_result, function_call_success = await self._execute_tool(
|
|
185
|
+
tool_name=streaming_interface.tool_call_name,
|
|
186
|
+
tool_args=tool_args,
|
|
187
|
+
agent_state=agent_state,
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
tool_message = ToolMessage(content=json.dumps({"result": tool_result}), tool_call_id=tool_call_id)
|
|
191
|
+
in_memory_message_history.append(tool_message.model_dump())
|
|
192
|
+
|
|
193
|
+
heartbeat_user_message = UserMessage(
|
|
194
|
+
content=f"{NON_USER_MSG_PREFIX} Tool finished executing. Summarize the result for the user."
|
|
195
|
+
)
|
|
196
|
+
in_memory_message_history.append(heartbeat_user_message.model_dump())
|
|
197
|
+
|
|
198
|
+
tool_call_messages = create_tool_call_messages_from_openai_response(
|
|
199
|
+
agent_id=agent_state.id,
|
|
200
|
+
model=agent_state.llm_config.model,
|
|
201
|
+
function_name=streaming_interface.tool_call_name,
|
|
202
|
+
function_arguments=tool_args,
|
|
203
|
+
tool_call_id=tool_call_id,
|
|
204
|
+
function_call_success=function_call_success,
|
|
205
|
+
function_response=tool_result,
|
|
206
|
+
actor=self.actor,
|
|
207
|
+
add_heartbeat_request_system_message=True,
|
|
208
|
+
)
|
|
209
|
+
letta_message_db_queue.extend(tool_call_messages)
|
|
210
|
+
|
|
211
|
+
# Continue execution by restarting the loop with updated context
|
|
212
|
+
return True
|
|
213
|
+
|
|
214
|
+
# Exit the loop if finish_reason_stop or no tool call occurred
|
|
215
|
+
return not streaming_interface.finish_reason_stop
|
|
216
|
+
|
|
217
|
+
async def _rebuild_context_window(
|
|
218
|
+
self, in_context_messages: List[Message], letta_message_db_queue: List[Message], agent_state: AgentState
|
|
219
|
+
) -> None:
|
|
220
|
+
new_letta_messages = self.message_manager.create_many_messages(letta_message_db_queue, actor=self.actor)
|
|
221
|
+
|
|
222
|
+
# TODO: Make this more general and configurable, less brittle
|
|
223
|
+
target_block = next(b for b in agent_state.memory.blocks if b.label == self.summary_block_label)
|
|
224
|
+
previous_summary = self.block_manager.get_block_by_id(block_id=target_block.id, actor=self.actor).value
|
|
225
|
+
new_in_context_messages, summary_str, updated = await self.summarizer.summarize(
|
|
226
|
+
in_context_messages=in_context_messages, new_letta_messages=new_letta_messages, previous_summary=previous_summary
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
if updated:
|
|
230
|
+
self.block_manager.update_block(block_id=target_block.id, block_update=BlockUpdate(value=summary_str), actor=self.actor)
|
|
231
|
+
|
|
232
|
+
self.agent_manager.set_in_context_messages(
|
|
233
|
+
agent_id=self.agent_id, message_ids=[m.id for m in new_in_context_messages], actor=self.actor
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
def _rebuild_memory(self, in_context_messages: List[Message], agent_state: AgentState) -> List[Message]:
|
|
237
|
+
# TODO: This is a pretty brittle pattern established all over our code, need to get rid of this
|
|
238
|
+
curr_system_message = in_context_messages[0]
|
|
239
|
+
curr_memory_str = agent_state.memory.compile()
|
|
240
|
+
if curr_memory_str in curr_system_message.text:
|
|
241
|
+
# NOTE: could this cause issues if a block is removed? (substring match would still work)
|
|
242
|
+
logger.debug(
|
|
243
|
+
f"Memory hasn't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
|
|
244
|
+
)
|
|
245
|
+
return in_context_messages
|
|
246
|
+
|
|
247
|
+
memory_edit_timestamp = get_utc_time()
|
|
248
|
+
new_system_message_str = compile_system_message(
|
|
249
|
+
system_prompt=agent_state.system,
|
|
250
|
+
in_context_memory=agent_state.memory,
|
|
251
|
+
in_context_memory_last_edit=memory_edit_timestamp,
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
diff = united_diff(curr_system_message.text, new_system_message_str)
|
|
255
|
+
if len(diff) > 0:
|
|
256
|
+
logger.info(f"Rebuilding system with new memory...\nDiff:\n{diff}")
|
|
257
|
+
|
|
258
|
+
new_system_message = self.message_manager.update_message_by_id(
|
|
259
|
+
curr_system_message.id, message_update=MessageUpdate(content=new_system_message_str), actor=self.actor
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# Skip pulling down the agent's memory again to save on a db call
|
|
263
|
+
return [new_system_message] + in_context_messages[1:]
|
|
264
|
+
|
|
265
|
+
else:
|
|
266
|
+
return in_context_messages
|
|
267
|
+
|
|
268
|
+
def _build_openai_request(self, openai_messages: List[Dict], agent_state: AgentState) -> ChatCompletionRequest:
|
|
269
|
+
tool_schemas = self._build_tool_schemas(agent_state)
|
|
270
|
+
tool_choice = "auto" if tool_schemas else None
|
|
271
|
+
|
|
272
|
+
openai_request = ChatCompletionRequest(
|
|
273
|
+
model=agent_state.llm_config.model,
|
|
274
|
+
messages=openai_messages,
|
|
275
|
+
tools=self._build_tool_schemas(agent_state),
|
|
276
|
+
tool_choice=tool_choice,
|
|
277
|
+
user=self.actor.id,
|
|
278
|
+
max_completion_tokens=agent_state.llm_config.max_tokens,
|
|
279
|
+
temperature=agent_state.llm_config.temperature,
|
|
280
|
+
stream=True,
|
|
281
|
+
)
|
|
282
|
+
return openai_request
|
|
283
|
+
|
|
284
|
+
def _build_tool_schemas(self, agent_state: AgentState, external_tools_only=True) -> List[Tool]:
|
|
285
|
+
if external_tools_only:
|
|
286
|
+
tools = [t for t in agent_state.tools if t.tool_type in {ToolType.EXTERNAL_COMPOSIO, ToolType.CUSTOM}]
|
|
287
|
+
else:
|
|
288
|
+
tools = agent_state.tools
|
|
289
|
+
|
|
290
|
+
# TODO: Customize whether or not to have heartbeats, pre_exec_message, etc.
|
|
291
|
+
return [
|
|
292
|
+
Tool(type="function", function=enable_strict_mode(add_pre_execution_message(remove_request_heartbeat(t.json_schema))))
|
|
293
|
+
for t in tools
|
|
294
|
+
]
|
|
295
|
+
|
|
296
|
+
async def _execute_tool(self, tool_name: str, tool_args: dict, agent_state: AgentState) -> Tuple[str, bool]:
|
|
297
|
+
"""
|
|
298
|
+
Executes a tool and returns (result, success_flag).
|
|
299
|
+
"""
|
|
300
|
+
target_tool = next((x for x in agent_state.tools if x.name == tool_name), None)
|
|
301
|
+
if not target_tool:
|
|
302
|
+
return f"Tool not found: {tool_name}", False
|
|
303
|
+
|
|
304
|
+
try:
|
|
305
|
+
tool_result, _ = execute_external_tool(
|
|
306
|
+
agent_state=agent_state,
|
|
307
|
+
function_name=tool_name,
|
|
308
|
+
function_args=tool_args,
|
|
309
|
+
target_letta_tool=target_tool,
|
|
310
|
+
actor=self.actor,
|
|
311
|
+
allow_agent_state_modifications=False,
|
|
312
|
+
)
|
|
313
|
+
return tool_result, True
|
|
314
|
+
except Exception as e:
|
|
315
|
+
return f"Failed to call tool. Error: {e}", False
|
{letta_nightly-0.6.34.dev20250303104329 → letta_nightly-0.6.35.dev20250304104154}/letta/constants.py
RENAMED
|
@@ -27,7 +27,6 @@ TOOL_CALL_ID_MAX_LEN = 29
|
|
|
27
27
|
|
|
28
28
|
# minimum context window size
|
|
29
29
|
MIN_CONTEXT_WINDOW = 4096
|
|
30
|
-
DEFAULT_CONTEXT_WINDOW_SIZE = 32000
|
|
31
30
|
|
|
32
31
|
# embeddings
|
|
33
32
|
MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
|
|
@@ -96,6 +95,9 @@ LLM_MAX_TOKENS = {
|
|
|
96
95
|
"deepseek-chat": 64000,
|
|
97
96
|
"deepseek-reasoner": 64000,
|
|
98
97
|
## OpenAI models: https://platform.openai.com/docs/models/overview
|
|
98
|
+
# gpt-4.5-preview
|
|
99
|
+
"gpt-4.5-preview": 128000,
|
|
100
|
+
"gpt-4.5-preview-2025-02-27": 128000,
|
|
99
101
|
# "o1-preview
|
|
100
102
|
"chatgpt-4o-latest": 128000,
|
|
101
103
|
# "o1-preview-2024-09-12
|
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
import ast
|
|
2
2
|
import json
|
|
3
|
-
from typing import Dict
|
|
3
|
+
from typing import Dict, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from letta.errors import LettaToolCreateError
|
|
4
6
|
|
|
5
7
|
# Registry of known types for annotation resolution
|
|
6
8
|
BUILTIN_TYPES = {
|
|
@@ -116,3 +118,50 @@ def coerce_dict_args_by_annotations(function_args: dict, annotations: Dict[str,
|
|
|
116
118
|
except (TypeError, ValueError, json.JSONDecodeError, SyntaxError) as e:
|
|
117
119
|
raise ValueError(f"Failed to coerce argument '{arg_name}' to {annotation_str}: {e}")
|
|
118
120
|
return coerced_args
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def get_function_name_and_description(source_code: str, name: Optional[str] = None) -> Tuple[str, str]:
|
|
124
|
+
"""Gets the name and description for a given function source code by parsing the AST.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
source_code: The source code to parse
|
|
128
|
+
name: Optional override for the function name
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Tuple of (function_name, docstring)
|
|
132
|
+
"""
|
|
133
|
+
try:
|
|
134
|
+
# Parse the source code into an AST
|
|
135
|
+
tree = ast.parse(source_code)
|
|
136
|
+
|
|
137
|
+
# Find the last function definition
|
|
138
|
+
function_def = None
|
|
139
|
+
for node in ast.walk(tree):
|
|
140
|
+
if isinstance(node, ast.FunctionDef):
|
|
141
|
+
function_def = node
|
|
142
|
+
|
|
143
|
+
if not function_def:
|
|
144
|
+
raise LettaToolCreateError("No function definition found in source code")
|
|
145
|
+
|
|
146
|
+
# Get the function name
|
|
147
|
+
function_name = name if name is not None else function_def.name
|
|
148
|
+
|
|
149
|
+
# Get the docstring if it exists
|
|
150
|
+
docstring = ast.get_docstring(function_def)
|
|
151
|
+
|
|
152
|
+
if not function_name:
|
|
153
|
+
raise LettaToolCreateError("Could not determine function name")
|
|
154
|
+
|
|
155
|
+
if not docstring:
|
|
156
|
+
raise LettaToolCreateError("Docstring is missing")
|
|
157
|
+
|
|
158
|
+
return function_name, docstring
|
|
159
|
+
|
|
160
|
+
except Exception as e:
|
|
161
|
+
raise LettaToolCreateError(f"Failed to parse function name and docstring: {str(e)}")
|
|
162
|
+
|
|
163
|
+
except Exception as e:
|
|
164
|
+
import traceback
|
|
165
|
+
|
|
166
|
+
traceback.print_exc()
|
|
167
|
+
raise LettaToolCreateError(f"Name and docstring generation failed: {str(e)}")
|