letta-nightly 0.6.48.dev20250407104216__py3-none-any.whl → 0.6.49.dev20250408030511__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +1 -1
- letta/agent.py +47 -12
- letta/agents/base_agent.py +7 -4
- letta/agents/helpers.py +52 -0
- letta/agents/letta_agent.py +105 -42
- letta/agents/voice_agent.py +2 -2
- letta/constants.py +13 -1
- letta/errors.py +10 -3
- letta/functions/function_sets/base.py +65 -0
- letta/functions/interface.py +2 -2
- letta/functions/mcp_client/base_client.py +18 -1
- letta/{dynamic_multi_agent.py → groups/dynamic_multi_agent.py} +3 -0
- letta/groups/helpers.py +113 -0
- letta/{round_robin_multi_agent.py → groups/round_robin_multi_agent.py} +2 -0
- letta/groups/sleeptime_multi_agent.py +259 -0
- letta/{supervisor_multi_agent.py → groups/supervisor_multi_agent.py} +1 -0
- letta/helpers/converters.py +109 -7
- letta/helpers/message_helper.py +1 -0
- letta/helpers/tool_rule_solver.py +40 -23
- letta/interface.py +12 -5
- letta/interfaces/anthropic_streaming_interface.py +329 -0
- letta/llm_api/anthropic.py +12 -1
- letta/llm_api/anthropic_client.py +65 -14
- letta/llm_api/azure_openai.py +2 -2
- letta/llm_api/google_ai_client.py +13 -2
- letta/llm_api/google_constants.py +3 -0
- letta/llm_api/google_vertex_client.py +2 -2
- letta/llm_api/llm_api_tools.py +1 -1
- letta/llm_api/llm_client.py +7 -0
- letta/llm_api/llm_client_base.py +2 -7
- letta/llm_api/openai.py +7 -1
- letta/llm_api/openai_client.py +250 -0
- letta/orm/__init__.py +4 -0
- letta/orm/agent.py +6 -0
- letta/orm/block.py +32 -2
- letta/orm/block_history.py +46 -0
- letta/orm/custom_columns.py +60 -0
- letta/orm/enums.py +7 -0
- letta/orm/group.py +6 -0
- letta/orm/groups_blocks.py +13 -0
- letta/orm/llm_batch_items.py +55 -0
- letta/orm/llm_batch_job.py +48 -0
- letta/orm/message.py +7 -1
- letta/orm/organization.py +2 -0
- letta/orm/sqlalchemy_base.py +18 -15
- letta/prompts/system/memgpt_sleeptime_chat.txt +52 -0
- letta/prompts/system/sleeptime.txt +26 -0
- letta/schemas/agent.py +13 -1
- letta/schemas/enums.py +17 -2
- letta/schemas/group.py +14 -1
- letta/schemas/letta_message.py +5 -3
- letta/schemas/llm_batch_job.py +53 -0
- letta/schemas/llm_config.py +14 -4
- letta/schemas/message.py +44 -0
- letta/schemas/tool.py +3 -0
- letta/schemas/usage.py +1 -0
- letta/server/db.py +2 -0
- letta/server/rest_api/app.py +1 -1
- letta/server/rest_api/chat_completions_interface.py +8 -3
- letta/server/rest_api/interface.py +36 -7
- letta/server/rest_api/routers/v1/agents.py +53 -39
- letta/server/rest_api/routers/v1/runs.py +14 -2
- letta/server/rest_api/utils.py +15 -4
- letta/server/server.py +120 -71
- letta/services/agent_manager.py +70 -6
- letta/services/block_manager.py +190 -2
- letta/services/group_manager.py +68 -0
- letta/services/helpers/agent_manager_helper.py +6 -4
- letta/services/llm_batch_manager.py +139 -0
- letta/services/message_manager.py +17 -31
- letta/services/tool_executor/tool_execution_sandbox.py +1 -3
- letta/services/tool_executor/tool_executor.py +9 -20
- letta/services/tool_manager.py +14 -3
- letta/services/tool_sandbox/__init__.py +0 -0
- letta/services/tool_sandbox/base.py +188 -0
- letta/services/tool_sandbox/e2b_sandbox.py +116 -0
- letta/services/tool_sandbox/local_sandbox.py +221 -0
- letta/sleeptime_agent.py +61 -0
- letta/streaming_interface.py +20 -10
- letta/utils.py +4 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/METADATA +2 -2
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/RECORD +85 -69
- letta/offline_memory_agent.py +0 -173
- letta/services/tool_executor/async_tool_execution_sandbox.py +0 -397
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408030511.dist-info}/entry_points.txt +0 -0
letta/services/tool_manager.py
CHANGED
|
@@ -2,7 +2,15 @@ import importlib
|
|
|
2
2
|
import warnings
|
|
3
3
|
from typing import List, Optional
|
|
4
4
|
|
|
5
|
-
from letta.constants import
|
|
5
|
+
from letta.constants import (
|
|
6
|
+
BASE_FUNCTION_RETURN_CHAR_LIMIT,
|
|
7
|
+
BASE_MEMORY_TOOLS,
|
|
8
|
+
BASE_SLEEPTIME_TOOLS,
|
|
9
|
+
BASE_TOOLS,
|
|
10
|
+
LETTA_TOOL_SET,
|
|
11
|
+
MCP_TOOL_TAG_NAME_PREFIX,
|
|
12
|
+
MULTI_AGENT_TOOLS,
|
|
13
|
+
)
|
|
6
14
|
from letta.functions.functions import derive_openai_json_schema, load_function_set
|
|
7
15
|
from letta.log import get_logger
|
|
8
16
|
from letta.orm.enums import ToolType
|
|
@@ -194,7 +202,7 @@ class ToolManager:
|
|
|
194
202
|
# create tool in db
|
|
195
203
|
tools = []
|
|
196
204
|
for name, schema in functions_to_schema.items():
|
|
197
|
-
if name in
|
|
205
|
+
if name in LETTA_TOOL_SET:
|
|
198
206
|
if name in BASE_TOOLS:
|
|
199
207
|
tool_type = ToolType.LETTA_CORE
|
|
200
208
|
tags = [tool_type.value]
|
|
@@ -204,9 +212,12 @@ class ToolManager:
|
|
|
204
212
|
elif name in MULTI_AGENT_TOOLS:
|
|
205
213
|
tool_type = ToolType.LETTA_MULTI_AGENT_CORE
|
|
206
214
|
tags = [tool_type.value]
|
|
215
|
+
elif name in BASE_SLEEPTIME_TOOLS:
|
|
216
|
+
tool_type = ToolType.LETTA_SLEEPTIME_CORE
|
|
217
|
+
tags = [tool_type.value]
|
|
207
218
|
else:
|
|
208
219
|
raise ValueError(
|
|
209
|
-
f"Tool name {name} is not in the list of base tool names: {BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS}"
|
|
220
|
+
f"Tool name {name} is not in the list of base tool names: {BASE_TOOLS + BASE_MEMORY_TOOLS + MULTI_AGENT_TOOLS + BASE_SLEEPTIME_TOOLS}"
|
|
210
221
|
)
|
|
211
222
|
|
|
212
223
|
# create to tool
|
|
File without changes
|
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import base64
|
|
3
|
+
import pickle
|
|
4
|
+
import uuid
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from typing import Any, Dict, Optional, Tuple
|
|
7
|
+
|
|
8
|
+
from letta.functions.helpers import generate_model_from_args_json_schema
|
|
9
|
+
from letta.schemas.agent import AgentState
|
|
10
|
+
from letta.schemas.sandbox_config import SandboxRunResult
|
|
11
|
+
from letta.services.helpers.tool_execution_helper import add_imports_and_pydantic_schemas_for_args
|
|
12
|
+
from letta.services.organization_manager import OrganizationManager
|
|
13
|
+
from letta.services.sandbox_config_manager import SandboxConfigManager
|
|
14
|
+
from letta.services.tool_manager import ToolManager
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AsyncToolSandboxBase(ABC):
|
|
18
|
+
NAMESPACE = uuid.NAMESPACE_DNS
|
|
19
|
+
LOCAL_SANDBOX_RESULT_START_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-start-marker"))
|
|
20
|
+
LOCAL_SANDBOX_RESULT_END_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-end-marker"))
|
|
21
|
+
LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt"
|
|
22
|
+
|
|
23
|
+
def __init__(self, tool_name: str, args: dict, user, tool_object=None):
|
|
24
|
+
self.tool_name = tool_name
|
|
25
|
+
self.args = args
|
|
26
|
+
self.user = user
|
|
27
|
+
self.organization = OrganizationManager().get_organization_by_id(self.user.organization_id)
|
|
28
|
+
self.privileged_tools = self.organization.privileged_tools
|
|
29
|
+
|
|
30
|
+
self.tool = tool_object or ToolManager().get_tool_by_name(tool_name=tool_name, actor=self.user)
|
|
31
|
+
if self.tool is None:
|
|
32
|
+
raise ValueError(
|
|
33
|
+
f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
self.sandbox_config_manager = SandboxConfigManager()
|
|
37
|
+
|
|
38
|
+
# See if we should inject agent_state or not based on the presence of the "agent_state" arg
|
|
39
|
+
if "agent_state" in self.parse_function_arguments(self.tool.source_code, self.tool.name):
|
|
40
|
+
self.inject_agent_state = True
|
|
41
|
+
else:
|
|
42
|
+
self.inject_agent_state = False
|
|
43
|
+
|
|
44
|
+
@abstractmethod
|
|
45
|
+
async def run(
|
|
46
|
+
self,
|
|
47
|
+
agent_state: Optional[AgentState] = None,
|
|
48
|
+
additional_env_vars: Optional[Dict] = None,
|
|
49
|
+
) -> SandboxRunResult:
|
|
50
|
+
"""
|
|
51
|
+
Run the tool in a sandbox environment asynchronously.
|
|
52
|
+
Must be implemented by subclasses.
|
|
53
|
+
"""
|
|
54
|
+
raise NotImplementedError
|
|
55
|
+
|
|
56
|
+
def generate_execution_script(self, agent_state: Optional[AgentState], wrap_print_with_markers: bool = False) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Generate code to run inside of execution sandbox.
|
|
59
|
+
Serialize the agent state and arguments, call the tool,
|
|
60
|
+
then base64-encode/pickle the result.
|
|
61
|
+
"""
|
|
62
|
+
code = "from typing import *\n"
|
|
63
|
+
code += "import pickle\n"
|
|
64
|
+
code += "import sys\n"
|
|
65
|
+
code += "import base64\n"
|
|
66
|
+
|
|
67
|
+
# Additional imports to support agent state
|
|
68
|
+
if self.inject_agent_state:
|
|
69
|
+
code += "import letta\n"
|
|
70
|
+
code += "from letta import * \n"
|
|
71
|
+
|
|
72
|
+
# Add schema code if available
|
|
73
|
+
if self.tool.args_json_schema:
|
|
74
|
+
schema_code = add_imports_and_pydantic_schemas_for_args(self.tool.args_json_schema)
|
|
75
|
+
if "from __future__ import annotations" in schema_code:
|
|
76
|
+
schema_code = schema_code.replace("from __future__ import annotations", "").lstrip()
|
|
77
|
+
code = "from __future__ import annotations\n\n" + code
|
|
78
|
+
code += schema_code + "\n"
|
|
79
|
+
|
|
80
|
+
# Load the agent state
|
|
81
|
+
if self.inject_agent_state:
|
|
82
|
+
agent_state_pickle = pickle.dumps(agent_state)
|
|
83
|
+
code += f"agent_state = pickle.loads({agent_state_pickle})\n"
|
|
84
|
+
else:
|
|
85
|
+
code += "agent_state = None\n"
|
|
86
|
+
|
|
87
|
+
# Initialize arguments
|
|
88
|
+
if self.tool.args_json_schema:
|
|
89
|
+
args_schema = generate_model_from_args_json_schema(self.tool.args_json_schema)
|
|
90
|
+
code += f"args_object = {args_schema.__name__}(**{self.args})\n"
|
|
91
|
+
for param in self.args:
|
|
92
|
+
code += f"{param} = args_object.{param}\n"
|
|
93
|
+
else:
|
|
94
|
+
for param in self.args:
|
|
95
|
+
code += self.initialize_param(param, self.args[param])
|
|
96
|
+
|
|
97
|
+
# Insert the tool's source code
|
|
98
|
+
code += "\n" + self.tool.source_code + "\n"
|
|
99
|
+
|
|
100
|
+
# Invoke the function and store the result in a global variable
|
|
101
|
+
code += (
|
|
102
|
+
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME}" + ' = {"results": ' + self.invoke_function_call() + ', "agent_state": agent_state}\n'
|
|
103
|
+
)
|
|
104
|
+
code += (
|
|
105
|
+
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME} = base64.b64encode("
|
|
106
|
+
f"pickle.dumps({self.LOCAL_SANDBOX_RESULT_VAR_NAME})"
|
|
107
|
+
").decode('utf-8')\n"
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
if wrap_print_with_markers:
|
|
111
|
+
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_START_MARKER}')\n"
|
|
112
|
+
code += f"sys.stdout.write(str({self.LOCAL_SANDBOX_RESULT_VAR_NAME}))\n"
|
|
113
|
+
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_END_MARKER}')\n"
|
|
114
|
+
else:
|
|
115
|
+
code += f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME}\n"
|
|
116
|
+
|
|
117
|
+
return code
|
|
118
|
+
|
|
119
|
+
def _convert_param_to_value(self, param_type: str, raw_value: str) -> str:
|
|
120
|
+
"""
|
|
121
|
+
Convert parameter to Python code representation based on JSON schema type.
|
|
122
|
+
"""
|
|
123
|
+
if param_type == "string":
|
|
124
|
+
# Safely inject a Python string via pickle
|
|
125
|
+
value = "pickle.loads(" + str(pickle.dumps(raw_value)) + ")"
|
|
126
|
+
elif param_type in ["integer", "boolean", "number", "array", "object"]:
|
|
127
|
+
# This is simplistic. In real usage, ensure correct type-casting or sanitization.
|
|
128
|
+
value = raw_value
|
|
129
|
+
else:
|
|
130
|
+
raise TypeError(f"Unsupported type: {param_type}, raw_value={raw_value}")
|
|
131
|
+
|
|
132
|
+
return str(value)
|
|
133
|
+
|
|
134
|
+
def initialize_param(self, name: str, raw_value: str) -> str:
|
|
135
|
+
"""
|
|
136
|
+
Produce code for initializing a single parameter in the generated script.
|
|
137
|
+
"""
|
|
138
|
+
params = self.tool.json_schema["parameters"]["properties"]
|
|
139
|
+
spec = params.get(name)
|
|
140
|
+
if spec is None:
|
|
141
|
+
# Possibly an extra param like 'self' that we ignore
|
|
142
|
+
return ""
|
|
143
|
+
|
|
144
|
+
param_type = spec.get("type")
|
|
145
|
+
if param_type is None and spec.get("parameters"):
|
|
146
|
+
param_type = spec["parameters"].get("type")
|
|
147
|
+
|
|
148
|
+
value = self._convert_param_to_value(param_type, raw_value)
|
|
149
|
+
return f"{name} = {value}\n"
|
|
150
|
+
|
|
151
|
+
def invoke_function_call(self) -> str:
|
|
152
|
+
"""
|
|
153
|
+
Generate the function call code string with the appropriate arguments.
|
|
154
|
+
"""
|
|
155
|
+
kwargs = []
|
|
156
|
+
for name in self.args:
|
|
157
|
+
if name in self.tool.json_schema["parameters"]["properties"]:
|
|
158
|
+
kwargs.append(name)
|
|
159
|
+
|
|
160
|
+
param_list = [f"{arg}={arg}" for arg in kwargs]
|
|
161
|
+
if self.inject_agent_state:
|
|
162
|
+
param_list.append("agent_state=agent_state")
|
|
163
|
+
|
|
164
|
+
params = ", ".join(param_list)
|
|
165
|
+
func_call_str = self.tool.name + "(" + params + ")"
|
|
166
|
+
return func_call_str
|
|
167
|
+
|
|
168
|
+
def parse_best_effort(self, text: str) -> Tuple[Any, Optional[AgentState]]:
|
|
169
|
+
"""
|
|
170
|
+
Decode and unpickle the result from the function execution if possible.
|
|
171
|
+
Returns (function_return_value, agent_state).
|
|
172
|
+
"""
|
|
173
|
+
if not text:
|
|
174
|
+
return None, None
|
|
175
|
+
|
|
176
|
+
result = pickle.loads(base64.b64decode(text))
|
|
177
|
+
agent_state = result["agent_state"]
|
|
178
|
+
return result["results"], agent_state
|
|
179
|
+
|
|
180
|
+
def parse_function_arguments(self, source_code: str, tool_name: str):
|
|
181
|
+
"""Get arguments of a function from its source code"""
|
|
182
|
+
tree = ast.parse(source_code)
|
|
183
|
+
args = []
|
|
184
|
+
for node in ast.walk(tree):
|
|
185
|
+
if isinstance(node, ast.FunctionDef) and node.name == tool_name:
|
|
186
|
+
for arg in node.args.args:
|
|
187
|
+
args.append(arg.arg)
|
|
188
|
+
return args
|
|
@@ -0,0 +1,116 @@
|
|
|
1
|
+
from typing import Dict, Optional
|
|
2
|
+
|
|
3
|
+
from letta.log import get_logger
|
|
4
|
+
from letta.schemas.agent import AgentState
|
|
5
|
+
from letta.schemas.sandbox_config import SandboxConfig, SandboxRunResult, SandboxType
|
|
6
|
+
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
|
|
7
|
+
from letta.utils import get_friendly_error_msg
|
|
8
|
+
|
|
9
|
+
logger = get_logger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
13
|
+
METADATA_CONFIG_STATE_KEY = "config_state"
|
|
14
|
+
|
|
15
|
+
def __init__(self, tool_name: str, args: dict, user, force_recreate=True, tool_object=None):
|
|
16
|
+
super().__init__(tool_name, args, user, tool_object)
|
|
17
|
+
self.force_recreate = force_recreate
|
|
18
|
+
|
|
19
|
+
async def run(
|
|
20
|
+
self,
|
|
21
|
+
agent_state: Optional[AgentState] = None,
|
|
22
|
+
additional_env_vars: Optional[Dict] = None,
|
|
23
|
+
) -> SandboxRunResult:
|
|
24
|
+
"""
|
|
25
|
+
Run the tool in a sandbox environment asynchronously,
|
|
26
|
+
*always* using a subprocess for execution.
|
|
27
|
+
"""
|
|
28
|
+
result = await self.run_e2b_sandbox(agent_state=agent_state, additional_env_vars=additional_env_vars)
|
|
29
|
+
|
|
30
|
+
# Simple console logging for demonstration
|
|
31
|
+
for log_line in (result.stdout or []) + (result.stderr or []):
|
|
32
|
+
print(f"Tool execution log: {log_line}")
|
|
33
|
+
|
|
34
|
+
return result
|
|
35
|
+
|
|
36
|
+
async def run_e2b_sandbox(
|
|
37
|
+
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None
|
|
38
|
+
) -> SandboxRunResult:
|
|
39
|
+
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.E2B, actor=self.user)
|
|
40
|
+
# TODO: So this defaults to force recreating always
|
|
41
|
+
# TODO: Eventually, provision one sandbox PER agent, and that agent re-uses that one specifically
|
|
42
|
+
e2b_sandbox = await self.create_e2b_sandbox_with_metadata_hash(sandbox_config=sbx_config)
|
|
43
|
+
|
|
44
|
+
logger.info(f"E2B Sandbox configurations: {sbx_config}")
|
|
45
|
+
logger.info(f"E2B Sandbox ID: {e2b_sandbox.sandbox_id}")
|
|
46
|
+
|
|
47
|
+
# TODO: This only makes sense if we re-use sandboxes
|
|
48
|
+
# # Since this sandbox was used, we extend its lifecycle by the timeout
|
|
49
|
+
# await sbx.set_timeout(sbx_config.get_e2b_config().timeout)
|
|
50
|
+
|
|
51
|
+
# Get environment variables for the sandbox
|
|
52
|
+
# TODO: We set limit to 100 here, but maybe we want it uncapped? Realistically this should be fine.
|
|
53
|
+
env_vars = self.sandbox_config_manager.get_sandbox_env_vars_as_dict(sandbox_config_id=sbx_config.id, actor=self.user, limit=100)
|
|
54
|
+
# Get environment variables for this agent specifically
|
|
55
|
+
if agent_state:
|
|
56
|
+
env_vars.update(agent_state.get_agent_env_vars_as_dict())
|
|
57
|
+
|
|
58
|
+
# Finally, get any that are passed explicitly into the `run` function call
|
|
59
|
+
if additional_env_vars:
|
|
60
|
+
env_vars.update(additional_env_vars)
|
|
61
|
+
code = self.generate_execution_script(agent_state=agent_state)
|
|
62
|
+
|
|
63
|
+
execution = await e2b_sandbox.run_code(code, envs=env_vars)
|
|
64
|
+
|
|
65
|
+
if execution.results:
|
|
66
|
+
func_return, agent_state = self.parse_best_effort(execution.results[0].text)
|
|
67
|
+
elif execution.error:
|
|
68
|
+
logger.error(f"Executing tool {self.tool_name} raised a {execution.error.name} with message: \n{execution.error.value}")
|
|
69
|
+
logger.error(f"Traceback from e2b sandbox: \n{execution.error.traceback}")
|
|
70
|
+
func_return = get_friendly_error_msg(
|
|
71
|
+
function_name=self.tool_name, exception_name=execution.error.name, exception_message=execution.error.value
|
|
72
|
+
)
|
|
73
|
+
execution.logs.stderr.append(execution.error.traceback)
|
|
74
|
+
else:
|
|
75
|
+
raise ValueError(f"Tool {self.tool_name} returned execution with None")
|
|
76
|
+
|
|
77
|
+
return SandboxRunResult(
|
|
78
|
+
func_return=func_return,
|
|
79
|
+
agent_state=agent_state,
|
|
80
|
+
stdout=execution.logs.stdout,
|
|
81
|
+
stderr=execution.logs.stderr,
|
|
82
|
+
status="error" if execution.error else "success",
|
|
83
|
+
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
def parse_exception_from_e2b_execution(self, e2b_execution: "Execution") -> Exception:
|
|
87
|
+
builtins_dict = __builtins__ if isinstance(__builtins__, dict) else vars(__builtins__)
|
|
88
|
+
# Dynamically fetch the exception class from builtins, defaulting to Exception if not found
|
|
89
|
+
exception_class = builtins_dict.get(e2b_execution.error.name, Exception)
|
|
90
|
+
return exception_class(e2b_execution.error.value)
|
|
91
|
+
|
|
92
|
+
async def create_e2b_sandbox_with_metadata_hash(self, sandbox_config: SandboxConfig) -> "Sandbox":
|
|
93
|
+
from e2b_code_interpreter import AsyncSandbox
|
|
94
|
+
|
|
95
|
+
state_hash = sandbox_config.fingerprint()
|
|
96
|
+
e2b_config = sandbox_config.get_e2b_config()
|
|
97
|
+
|
|
98
|
+
if e2b_config.template:
|
|
99
|
+
sbx = await AsyncSandbox.create(sandbox_config.get_e2b_config().template, metadata={self.METADATA_CONFIG_STATE_KEY: state_hash})
|
|
100
|
+
else:
|
|
101
|
+
# no template
|
|
102
|
+
sbx = await AsyncSandbox.create(
|
|
103
|
+
metadata={self.METADATA_CONFIG_STATE_KEY: state_hash}, **e2b_config.model_dump(exclude={"pip_requirements"})
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# install pip requirements
|
|
107
|
+
if e2b_config.pip_requirements:
|
|
108
|
+
for package in e2b_config.pip_requirements:
|
|
109
|
+
await sbx.commands.run(f"pip install {package}")
|
|
110
|
+
return sbx
|
|
111
|
+
|
|
112
|
+
async def list_running_e2b_sandboxes(self):
|
|
113
|
+
from e2b_code_interpreter import AsyncSandbox
|
|
114
|
+
|
|
115
|
+
# List running sandboxes and access metadata.
|
|
116
|
+
return await AsyncSandbox.list()
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import os
|
|
3
|
+
import sys
|
|
4
|
+
import tempfile
|
|
5
|
+
from typing import Dict, Optional, Tuple
|
|
6
|
+
|
|
7
|
+
from letta.schemas.agent import AgentState
|
|
8
|
+
from letta.schemas.sandbox_config import SandboxRunResult, SandboxType
|
|
9
|
+
from letta.services.helpers.tool_execution_helper import (
|
|
10
|
+
create_venv_for_local_sandbox,
|
|
11
|
+
find_python_executable,
|
|
12
|
+
install_pip_requirements_for_sandbox,
|
|
13
|
+
)
|
|
14
|
+
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
|
|
15
|
+
from letta.tracing import log_event, trace_method
|
|
16
|
+
from letta.utils import get_friendly_error_msg
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
20
|
+
METADATA_CONFIG_STATE_KEY = "config_state"
|
|
21
|
+
REQUIREMENT_TXT_NAME = "requirements.txt"
|
|
22
|
+
|
|
23
|
+
def __init__(self, tool_name: str, args: dict, user, force_recreate_venv=False, tool_object=None):
|
|
24
|
+
super().__init__(tool_name, args, user, tool_object)
|
|
25
|
+
self.force_recreate_venv = force_recreate_venv
|
|
26
|
+
|
|
27
|
+
async def run(
|
|
28
|
+
self,
|
|
29
|
+
agent_state: Optional[AgentState] = None,
|
|
30
|
+
additional_env_vars: Optional[Dict] = None,
|
|
31
|
+
) -> SandboxRunResult:
|
|
32
|
+
"""
|
|
33
|
+
Run the tool in a sandbox environment asynchronously,
|
|
34
|
+
*always* using a subprocess for execution.
|
|
35
|
+
"""
|
|
36
|
+
result = await self.run_local_dir_sandbox(agent_state=agent_state, additional_env_vars=additional_env_vars)
|
|
37
|
+
|
|
38
|
+
# Simple console logging for demonstration
|
|
39
|
+
for log_line in (result.stdout or []) + (result.stderr or []):
|
|
40
|
+
print(f"Tool execution log: {log_line}")
|
|
41
|
+
|
|
42
|
+
return result
|
|
43
|
+
|
|
44
|
+
@trace_method
|
|
45
|
+
async def run_local_dir_sandbox(self, agent_state: Optional[AgentState], additional_env_vars: Optional[Dict]) -> SandboxRunResult:
|
|
46
|
+
"""
|
|
47
|
+
Unified asynchronougit pus method to run the tool in a local sandbox environment,
|
|
48
|
+
always via subprocess for multi-core parallelism.
|
|
49
|
+
"""
|
|
50
|
+
# Get sandbox configuration
|
|
51
|
+
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.LOCAL, actor=self.user)
|
|
52
|
+
local_configs = sbx_config.get_local_config()
|
|
53
|
+
use_venv = local_configs.use_venv
|
|
54
|
+
|
|
55
|
+
# Prepare environment variables
|
|
56
|
+
env = os.environ.copy()
|
|
57
|
+
env_vars = self.sandbox_config_manager.get_sandbox_env_vars_as_dict(sandbox_config_id=sbx_config.id, actor=self.user, limit=100)
|
|
58
|
+
env.update(env_vars)
|
|
59
|
+
|
|
60
|
+
if agent_state:
|
|
61
|
+
env.update(agent_state.get_agent_env_vars_as_dict())
|
|
62
|
+
|
|
63
|
+
if additional_env_vars:
|
|
64
|
+
env.update(additional_env_vars)
|
|
65
|
+
|
|
66
|
+
# Make sure sandbox directory exists
|
|
67
|
+
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
68
|
+
if not os.path.exists(sandbox_dir) or not os.path.isdir(sandbox_dir):
|
|
69
|
+
os.makedirs(sandbox_dir)
|
|
70
|
+
|
|
71
|
+
# If using a virtual environment, ensure it's prepared in parallel
|
|
72
|
+
venv_preparation_task = None
|
|
73
|
+
if use_venv:
|
|
74
|
+
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
75
|
+
venv_preparation_task = asyncio.create_task(self._prepare_venv(local_configs, venv_path, env))
|
|
76
|
+
|
|
77
|
+
# Generate and write execution script (always with markers, since we rely on stdout)
|
|
78
|
+
with tempfile.NamedTemporaryFile(mode="w", dir=sandbox_dir, suffix=".py", delete=False) as temp_file:
|
|
79
|
+
code = self.generate_execution_script(agent_state=agent_state, wrap_print_with_markers=True)
|
|
80
|
+
temp_file.write(code)
|
|
81
|
+
temp_file.flush()
|
|
82
|
+
temp_file_path = temp_file.name
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
# If we started a venv preparation task, wait for it to complete
|
|
86
|
+
if venv_preparation_task:
|
|
87
|
+
await venv_preparation_task
|
|
88
|
+
|
|
89
|
+
# Determine the python executable and environment for the subprocess
|
|
90
|
+
exec_env = env.copy()
|
|
91
|
+
if use_venv:
|
|
92
|
+
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
93
|
+
python_executable = find_python_executable(local_configs)
|
|
94
|
+
exec_env["VIRTUAL_ENV"] = venv_path
|
|
95
|
+
exec_env["PATH"] = os.path.join(venv_path, "bin") + ":" + exec_env["PATH"]
|
|
96
|
+
else:
|
|
97
|
+
# If not using venv, use whatever Python we are running on
|
|
98
|
+
python_executable = sys.executable
|
|
99
|
+
|
|
100
|
+
exec_env["PYTHONWARNINGS"] = "ignore"
|
|
101
|
+
|
|
102
|
+
# Execute in subprocess
|
|
103
|
+
return await self._execute_tool_subprocess(
|
|
104
|
+
sbx_config=sbx_config,
|
|
105
|
+
python_executable=python_executable,
|
|
106
|
+
temp_file_path=temp_file_path,
|
|
107
|
+
env=exec_env,
|
|
108
|
+
cwd=sandbox_dir,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
print(f"Executing tool {self.tool_name} has an unexpected error: {e}")
|
|
113
|
+
print(f"Auto-generated code for debugging:\n\n{code}")
|
|
114
|
+
raise e
|
|
115
|
+
finally:
|
|
116
|
+
# Clean up the temp file
|
|
117
|
+
os.remove(temp_file_path)
|
|
118
|
+
|
|
119
|
+
async def _prepare_venv(self, local_configs, venv_path: str, env: Dict[str, str]):
|
|
120
|
+
"""
|
|
121
|
+
Prepare virtual environment asynchronously (in a background thread).
|
|
122
|
+
"""
|
|
123
|
+
if self.force_recreate_venv or not os.path.isdir(venv_path):
|
|
124
|
+
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
125
|
+
log_event(name="start create_venv_for_local_sandbox", attributes={"venv_path": venv_path})
|
|
126
|
+
await asyncio.to_thread(
|
|
127
|
+
create_venv_for_local_sandbox,
|
|
128
|
+
sandbox_dir_path=sandbox_dir,
|
|
129
|
+
venv_path=venv_path,
|
|
130
|
+
env=env,
|
|
131
|
+
force_recreate=self.force_recreate_venv,
|
|
132
|
+
)
|
|
133
|
+
log_event(name="finish create_venv_for_local_sandbox")
|
|
134
|
+
|
|
135
|
+
log_event(name="start install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
136
|
+
await asyncio.to_thread(install_pip_requirements_for_sandbox, local_configs, upgrade=True, user_install_if_no_venv=False, env=env)
|
|
137
|
+
log_event(name="finish install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
138
|
+
|
|
139
|
+
@trace_method
|
|
140
|
+
async def _execute_tool_subprocess(
|
|
141
|
+
self, sbx_config, python_executable: str, temp_file_path: str, env: Dict[str, str], cwd: str
|
|
142
|
+
) -> SandboxRunResult:
|
|
143
|
+
"""
|
|
144
|
+
Execute user code in a subprocess, always capturing stdout and stderr.
|
|
145
|
+
We parse special markers to extract the pickled result string.
|
|
146
|
+
"""
|
|
147
|
+
try:
|
|
148
|
+
log_event(name="start subprocess")
|
|
149
|
+
|
|
150
|
+
process = await asyncio.create_subprocess_exec(
|
|
151
|
+
python_executable, temp_file_path, env=env, cwd=cwd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
try:
|
|
155
|
+
stdout_bytes, stderr_bytes = await asyncio.wait_for(process.communicate(), timeout=60)
|
|
156
|
+
except asyncio.TimeoutError:
|
|
157
|
+
# Terminate the process on timeout
|
|
158
|
+
if process.returncode is None:
|
|
159
|
+
process.terminate()
|
|
160
|
+
try:
|
|
161
|
+
await asyncio.wait_for(process.wait(), timeout=5)
|
|
162
|
+
except asyncio.TimeoutError:
|
|
163
|
+
process.kill()
|
|
164
|
+
|
|
165
|
+
raise TimeoutError(f"Executing tool {self.tool_name} timed out after 60 seconds.")
|
|
166
|
+
|
|
167
|
+
stdout = stdout_bytes.decode("utf-8") if stdout_bytes else ""
|
|
168
|
+
stderr = stderr_bytes.decode("utf-8") if stderr_bytes else ""
|
|
169
|
+
log_event(name="finish subprocess")
|
|
170
|
+
|
|
171
|
+
# Parse markers to isolate the function result
|
|
172
|
+
func_result, stdout_text = self.parse_out_function_results_markers(stdout)
|
|
173
|
+
func_return, agent_state = self.parse_best_effort(func_result)
|
|
174
|
+
|
|
175
|
+
return SandboxRunResult(
|
|
176
|
+
func_return=func_return,
|
|
177
|
+
agent_state=agent_state,
|
|
178
|
+
stdout=[stdout_text] if stdout_text else [],
|
|
179
|
+
stderr=[stderr] if stderr else [],
|
|
180
|
+
status="success" if process.returncode == 0 else "error",
|
|
181
|
+
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
except (TimeoutError, Exception) as e:
|
|
185
|
+
# Distinguish between timeouts and other exceptions for clarity
|
|
186
|
+
if isinstance(e, TimeoutError):
|
|
187
|
+
raise e
|
|
188
|
+
|
|
189
|
+
print(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
|
|
190
|
+
func_return = get_friendly_error_msg(
|
|
191
|
+
function_name=self.tool_name,
|
|
192
|
+
exception_name=type(e).__name__,
|
|
193
|
+
exception_message=str(e),
|
|
194
|
+
)
|
|
195
|
+
return SandboxRunResult(
|
|
196
|
+
func_return=func_return,
|
|
197
|
+
agent_state=None,
|
|
198
|
+
stdout=[],
|
|
199
|
+
stderr=[str(e)],
|
|
200
|
+
status="error",
|
|
201
|
+
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
def parse_out_function_results_markers(self, text: str) -> Tuple[str, str]:
|
|
205
|
+
"""
|
|
206
|
+
Parse the function results out of the stdout using special markers.
|
|
207
|
+
Returns (function_result_str, stripped_stdout).
|
|
208
|
+
"""
|
|
209
|
+
if self.LOCAL_SANDBOX_RESULT_START_MARKER not in text:
|
|
210
|
+
# No markers found, so nothing to parse
|
|
211
|
+
return "", text
|
|
212
|
+
|
|
213
|
+
marker_len = len(self.LOCAL_SANDBOX_RESULT_START_MARKER)
|
|
214
|
+
start_index = text.index(self.LOCAL_SANDBOX_RESULT_START_MARKER) + marker_len
|
|
215
|
+
end_index = text.index(self.LOCAL_SANDBOX_RESULT_END_MARKER)
|
|
216
|
+
|
|
217
|
+
# The actual pickled base64 is between start_index and end_index
|
|
218
|
+
results_str = text[start_index:end_index]
|
|
219
|
+
# The rest of stdout (minus the markers)
|
|
220
|
+
remainder = text[: start_index - marker_len] + text[end_index + marker_len :]
|
|
221
|
+
return results_str, remainder
|
letta/sleeptime_agent.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
|
+
|
|
3
|
+
from letta.agent import Agent, AgentState, save_agent
|
|
4
|
+
from letta.interface import AgentInterface
|
|
5
|
+
from letta.orm import User
|
|
6
|
+
from letta.schemas.message import Message
|
|
7
|
+
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
|
8
|
+
from letta.schemas.usage import LettaUsageStatistics
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class SleeptimeAgent(Agent):
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
interface: AgentInterface,
|
|
15
|
+
agent_state: AgentState,
|
|
16
|
+
user: User = None,
|
|
17
|
+
# extras
|
|
18
|
+
first_message_verify_mono: bool = False,
|
|
19
|
+
max_memory_rethinks: int = 10,
|
|
20
|
+
):
|
|
21
|
+
super().__init__(interface, agent_state, user)
|
|
22
|
+
self.first_message_verify_mono = first_message_verify_mono
|
|
23
|
+
self.max_memory_rethinks = max_memory_rethinks
|
|
24
|
+
|
|
25
|
+
def step(
|
|
26
|
+
self,
|
|
27
|
+
messages: Union[Message, List[Message]],
|
|
28
|
+
chaining: bool = True,
|
|
29
|
+
max_chaining_steps: Optional[int] = None,
|
|
30
|
+
**kwargs,
|
|
31
|
+
) -> LettaUsageStatistics:
|
|
32
|
+
"""Go through what is currently in memory core memory and integrate information."""
|
|
33
|
+
next_input_message = messages if isinstance(messages, list) else [messages]
|
|
34
|
+
counter = 0
|
|
35
|
+
total_usage = UsageStatistics()
|
|
36
|
+
step_count = 0
|
|
37
|
+
|
|
38
|
+
while counter < self.max_memory_rethinks:
|
|
39
|
+
if counter > 0:
|
|
40
|
+
next_input_message = []
|
|
41
|
+
kwargs["first_message"] = False
|
|
42
|
+
step_response = self.inner_step(
|
|
43
|
+
messages=next_input_message,
|
|
44
|
+
**kwargs,
|
|
45
|
+
)
|
|
46
|
+
for message in step_response.messages:
|
|
47
|
+
if message.tool_calls:
|
|
48
|
+
for tool_call in message.tool_calls:
|
|
49
|
+
# check if the function name is "finish_rethinking_memory"
|
|
50
|
+
if tool_call.function.name == "finish_rethinking_memory":
|
|
51
|
+
counter = self.max_memory_rethinks
|
|
52
|
+
break
|
|
53
|
+
usage = step_response.usage
|
|
54
|
+
step_count += 1
|
|
55
|
+
total_usage += usage
|
|
56
|
+
counter += 1
|
|
57
|
+
self.interface.step_complete()
|
|
58
|
+
|
|
59
|
+
save_agent(self)
|
|
60
|
+
|
|
61
|
+
return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
|