letta-nightly 0.6.45.dev20250329104117__py3-none-any.whl → 0.6.46.dev20250330050944__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +1 -1
- letta/agent.py +25 -8
- letta/agents/base_agent.py +6 -5
- letta/agents/letta_agent.py +323 -0
- letta/agents/voice_agent.py +4 -3
- letta/client/client.py +2 -0
- letta/dynamic_multi_agent.py +5 -5
- letta/errors.py +20 -0
- letta/helpers/tool_execution_helper.py +1 -1
- letta/helpers/tool_rule_solver.py +1 -1
- letta/llm_api/anthropic.py +2 -0
- letta/llm_api/anthropic_client.py +153 -167
- letta/llm_api/google_ai_client.py +112 -29
- letta/llm_api/llm_api_tools.py +5 -0
- letta/llm_api/llm_client.py +6 -7
- letta/llm_api/llm_client_base.py +38 -17
- letta/llm_api/openai.py +2 -0
- letta/orm/group.py +2 -5
- letta/round_robin_multi_agent.py +18 -7
- letta/schemas/group.py +6 -0
- letta/schemas/message.py +23 -14
- letta/schemas/openai/chat_completion_request.py +6 -1
- letta/schemas/providers.py +3 -3
- letta/serialize_schemas/marshmallow_agent.py +34 -10
- letta/serialize_schemas/pydantic_agent_schema.py +23 -3
- letta/server/rest_api/app.py +9 -0
- letta/server/rest_api/interface.py +25 -2
- letta/server/rest_api/optimistic_json_parser.py +1 -1
- letta/server/rest_api/routers/v1/agents.py +57 -23
- letta/server/rest_api/routers/v1/groups.py +72 -49
- letta/server/rest_api/routers/v1/sources.py +1 -0
- letta/server/rest_api/utils.py +0 -1
- letta/server/server.py +73 -80
- letta/server/startup.sh +1 -1
- letta/services/agent_manager.py +7 -0
- letta/services/group_manager.py +87 -29
- letta/services/message_manager.py +5 -0
- letta/services/tool_executor/async_tool_execution_sandbox.py +397 -0
- letta/services/tool_executor/tool_execution_manager.py +27 -0
- letta/services/{tool_execution_sandbox.py → tool_executor/tool_execution_sandbox.py} +40 -12
- letta/services/tool_executor/tool_executor.py +23 -6
- letta/settings.py +17 -1
- letta/supervisor_multi_agent.py +3 -1
- {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/METADATA +1 -1
- {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/RECORD +48 -46
- {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.45.dev20250329104117.dist-info → letta_nightly-0.6.46.dev20250330050944.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,397 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import asyncio
|
|
3
|
+
import base64
|
|
4
|
+
import os
|
|
5
|
+
import pickle
|
|
6
|
+
import sys
|
|
7
|
+
import tempfile
|
|
8
|
+
import uuid
|
|
9
|
+
from typing import Any, Dict, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
from letta.functions.helpers import generate_model_from_args_json_schema
|
|
12
|
+
from letta.schemas.agent import AgentState
|
|
13
|
+
from letta.schemas.sandbox_config import SandboxRunResult, SandboxType
|
|
14
|
+
from letta.services.helpers.tool_execution_helper import (
|
|
15
|
+
add_imports_and_pydantic_schemas_for_args,
|
|
16
|
+
create_venv_for_local_sandbox,
|
|
17
|
+
find_python_executable,
|
|
18
|
+
install_pip_requirements_for_sandbox,
|
|
19
|
+
)
|
|
20
|
+
from letta.services.organization_manager import OrganizationManager
|
|
21
|
+
from letta.services.sandbox_config_manager import SandboxConfigManager
|
|
22
|
+
from letta.services.tool_manager import ToolManager
|
|
23
|
+
from letta.tracing import log_event, trace_method
|
|
24
|
+
from letta.utils import get_friendly_error_msg
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AsyncToolExecutionSandbox:
|
|
28
|
+
METADATA_CONFIG_STATE_KEY = "config_state"
|
|
29
|
+
REQUIREMENT_TXT_NAME = "requirements.txt"
|
|
30
|
+
|
|
31
|
+
# For generating long, random marker hashes
|
|
32
|
+
NAMESPACE = uuid.NAMESPACE_DNS
|
|
33
|
+
LOCAL_SANDBOX_RESULT_START_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-start-marker"))
|
|
34
|
+
LOCAL_SANDBOX_RESULT_END_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-end-marker"))
|
|
35
|
+
|
|
36
|
+
# This is the variable name in the auto-generated code that contains the function results
|
|
37
|
+
# We make this a long random string to avoid collisions with any variables in the user's code
|
|
38
|
+
LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt"
|
|
39
|
+
|
|
40
|
+
def __init__(self, tool_name: str, args: dict, user, force_recreate=True, force_recreate_venv=False, tool_object=None):
|
|
41
|
+
self.tool_name = tool_name
|
|
42
|
+
self.args = args
|
|
43
|
+
self.user = user
|
|
44
|
+
# get organization
|
|
45
|
+
self.organization = OrganizationManager().get_organization_by_id(self.user.organization_id)
|
|
46
|
+
self.privileged_tools = self.organization.privileged_tools
|
|
47
|
+
|
|
48
|
+
# If a tool object is provided, we use it directly, otherwise pull via name
|
|
49
|
+
if tool_object is not None:
|
|
50
|
+
self.tool = tool_object
|
|
51
|
+
else:
|
|
52
|
+
# Get the tool via name
|
|
53
|
+
self.tool = ToolManager().get_tool_by_name(tool_name=tool_name, actor=self.user)
|
|
54
|
+
if not self.tool:
|
|
55
|
+
raise ValueError(
|
|
56
|
+
f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.sandbox_config_manager = SandboxConfigManager()
|
|
60
|
+
self.force_recreate = force_recreate
|
|
61
|
+
self.force_recreate_venv = force_recreate_venv
|
|
62
|
+
|
|
63
|
+
async def run(
|
|
64
|
+
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None, inject_agent_state: bool = False
|
|
65
|
+
) -> SandboxRunResult:
|
|
66
|
+
"""
|
|
67
|
+
Run the tool in a sandbox environment asynchronously,
|
|
68
|
+
*always* using a subprocess for execution.
|
|
69
|
+
"""
|
|
70
|
+
result = await self.run_local_dir_sandbox(
|
|
71
|
+
agent_state=agent_state, additional_env_vars=additional_env_vars, inject_agent_state=inject_agent_state
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
# Simple console logging for demonstration
|
|
75
|
+
for log_line in (result.stdout or []) + (result.stderr or []):
|
|
76
|
+
print(f"Tool execution log: {log_line}")
|
|
77
|
+
|
|
78
|
+
return result
|
|
79
|
+
|
|
80
|
+
@trace_method
|
|
81
|
+
async def run_local_dir_sandbox(
|
|
82
|
+
self, agent_state: Optional[AgentState], additional_env_vars: Optional[Dict], inject_agent_state: bool
|
|
83
|
+
) -> SandboxRunResult:
|
|
84
|
+
"""
|
|
85
|
+
Unified asynchronougit pus method to run the tool in a local sandbox environment,
|
|
86
|
+
always via subprocess for multi-core parallelism.
|
|
87
|
+
"""
|
|
88
|
+
# Get sandbox configuration
|
|
89
|
+
sbx_config = self.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.LOCAL, actor=self.user)
|
|
90
|
+
local_configs = sbx_config.get_local_config()
|
|
91
|
+
use_venv = local_configs.use_venv
|
|
92
|
+
|
|
93
|
+
# Prepare environment variables
|
|
94
|
+
env = os.environ.copy()
|
|
95
|
+
env_vars = self.sandbox_config_manager.get_sandbox_env_vars_as_dict(sandbox_config_id=sbx_config.id, actor=self.user, limit=100)
|
|
96
|
+
env.update(env_vars)
|
|
97
|
+
|
|
98
|
+
if agent_state:
|
|
99
|
+
env.update(agent_state.get_agent_env_vars_as_dict())
|
|
100
|
+
|
|
101
|
+
if additional_env_vars:
|
|
102
|
+
env.update(additional_env_vars)
|
|
103
|
+
|
|
104
|
+
# Make sure sandbox directory exists
|
|
105
|
+
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
106
|
+
if not os.path.exists(sandbox_dir) or not os.path.isdir(sandbox_dir):
|
|
107
|
+
os.makedirs(sandbox_dir)
|
|
108
|
+
|
|
109
|
+
# If using a virtual environment, ensure it's prepared in parallel
|
|
110
|
+
venv_preparation_task = None
|
|
111
|
+
if use_venv:
|
|
112
|
+
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
113
|
+
if self.force_recreate_venv or not os.path.isdir(venv_path):
|
|
114
|
+
venv_preparation_task = asyncio.create_task(self._prepare_venv(local_configs, venv_path, env))
|
|
115
|
+
|
|
116
|
+
# Generate and write execution script (always with markers, since we rely on stdout)
|
|
117
|
+
with tempfile.NamedTemporaryFile(mode="w", dir=sandbox_dir, suffix=".py", delete=False) as temp_file:
|
|
118
|
+
code = self.generate_execution_script(agent_state=agent_state, inject_agent_state=inject_agent_state)
|
|
119
|
+
temp_file.write(code)
|
|
120
|
+
temp_file.flush()
|
|
121
|
+
temp_file_path = temp_file.name
|
|
122
|
+
|
|
123
|
+
try:
|
|
124
|
+
# If we started a venv preparation task, wait for it to complete
|
|
125
|
+
if venv_preparation_task:
|
|
126
|
+
await venv_preparation_task
|
|
127
|
+
|
|
128
|
+
# Determine the python executable and environment for the subprocess
|
|
129
|
+
exec_env = env.copy()
|
|
130
|
+
if use_venv:
|
|
131
|
+
venv_path = str(os.path.join(sandbox_dir, local_configs.venv_name))
|
|
132
|
+
python_executable = find_python_executable(local_configs)
|
|
133
|
+
exec_env["VIRTUAL_ENV"] = venv_path
|
|
134
|
+
exec_env["PATH"] = os.path.join(venv_path, "bin") + ":" + exec_env["PATH"]
|
|
135
|
+
else:
|
|
136
|
+
# If not using venv, use whatever Python we are running on
|
|
137
|
+
python_executable = sys.executable
|
|
138
|
+
|
|
139
|
+
exec_env["PYTHONWARNINGS"] = "ignore"
|
|
140
|
+
|
|
141
|
+
# Execute in subprocess
|
|
142
|
+
return await self._execute_tool_subprocess(
|
|
143
|
+
sbx_config=sbx_config,
|
|
144
|
+
python_executable=python_executable,
|
|
145
|
+
temp_file_path=temp_file_path,
|
|
146
|
+
env=exec_env,
|
|
147
|
+
cwd=sandbox_dir,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
except Exception as e:
|
|
151
|
+
print(f"Executing tool {self.tool_name} has an unexpected error: {e}")
|
|
152
|
+
print(f"Auto-generated code for debugging:\n\n{code}")
|
|
153
|
+
raise e
|
|
154
|
+
finally:
|
|
155
|
+
# Clean up the temp file
|
|
156
|
+
os.remove(temp_file_path)
|
|
157
|
+
|
|
158
|
+
async def _prepare_venv(self, local_configs, venv_path: str, env: Dict[str, str]):
|
|
159
|
+
"""
|
|
160
|
+
Prepare virtual environment asynchronously (in a background thread).
|
|
161
|
+
"""
|
|
162
|
+
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir)
|
|
163
|
+
log_event(name="start create_venv_for_local_sandbox", attributes={"venv_path": venv_path})
|
|
164
|
+
|
|
165
|
+
await asyncio.to_thread(
|
|
166
|
+
create_venv_for_local_sandbox,
|
|
167
|
+
sandbox_dir_path=sandbox_dir,
|
|
168
|
+
venv_path=venv_path,
|
|
169
|
+
env=env,
|
|
170
|
+
force_recreate=self.force_recreate_venv,
|
|
171
|
+
)
|
|
172
|
+
log_event(name="finish create_venv_for_local_sandbox")
|
|
173
|
+
|
|
174
|
+
log_event(name="start install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
175
|
+
await asyncio.to_thread(install_pip_requirements_for_sandbox, local_configs, upgrade=True, user_install_if_no_venv=False, env=env)
|
|
176
|
+
log_event(name="finish install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
177
|
+
|
|
178
|
+
@trace_method
|
|
179
|
+
async def _execute_tool_subprocess(
|
|
180
|
+
self, sbx_config, python_executable: str, temp_file_path: str, env: Dict[str, str], cwd: str
|
|
181
|
+
) -> SandboxRunResult:
|
|
182
|
+
"""
|
|
183
|
+
Execute user code in a subprocess, always capturing stdout and stderr.
|
|
184
|
+
We parse special markers to extract the pickled result string.
|
|
185
|
+
"""
|
|
186
|
+
try:
|
|
187
|
+
log_event(name="start subprocess")
|
|
188
|
+
|
|
189
|
+
process = await asyncio.create_subprocess_exec(
|
|
190
|
+
python_executable, temp_file_path, env=env, cwd=cwd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
stdout_bytes, stderr_bytes = await asyncio.wait_for(process.communicate(), timeout=60)
|
|
195
|
+
except asyncio.TimeoutError:
|
|
196
|
+
# Terminate the process on timeout
|
|
197
|
+
if process.returncode is None:
|
|
198
|
+
process.terminate()
|
|
199
|
+
try:
|
|
200
|
+
await asyncio.wait_for(process.wait(), timeout=5)
|
|
201
|
+
except asyncio.TimeoutError:
|
|
202
|
+
process.kill()
|
|
203
|
+
|
|
204
|
+
raise TimeoutError(f"Executing tool {self.tool_name} timed out after 60 seconds.")
|
|
205
|
+
|
|
206
|
+
stdout = stdout_bytes.decode("utf-8") if stdout_bytes else ""
|
|
207
|
+
stderr = stderr_bytes.decode("utf-8") if stderr_bytes else ""
|
|
208
|
+
log_event(name="finish subprocess")
|
|
209
|
+
|
|
210
|
+
# Parse markers to isolate the function result
|
|
211
|
+
func_result, stdout_text = self.parse_out_function_results_markers(stdout)
|
|
212
|
+
func_return, agent_state = self.parse_best_effort(func_result)
|
|
213
|
+
|
|
214
|
+
return SandboxRunResult(
|
|
215
|
+
func_return=func_return,
|
|
216
|
+
agent_state=agent_state,
|
|
217
|
+
stdout=[stdout_text] if stdout_text else [],
|
|
218
|
+
stderr=[stderr] if stderr else [],
|
|
219
|
+
status="success" if process.returncode == 0 else "error",
|
|
220
|
+
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
except (TimeoutError, Exception) as e:
|
|
224
|
+
# Distinguish between timeouts and other exceptions for clarity
|
|
225
|
+
if isinstance(e, TimeoutError):
|
|
226
|
+
raise e
|
|
227
|
+
|
|
228
|
+
print(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
|
|
229
|
+
func_return = get_friendly_error_msg(
|
|
230
|
+
function_name=self.tool_name,
|
|
231
|
+
exception_name=type(e).__name__,
|
|
232
|
+
exception_message=str(e),
|
|
233
|
+
)
|
|
234
|
+
return SandboxRunResult(
|
|
235
|
+
func_return=func_return,
|
|
236
|
+
agent_state=None,
|
|
237
|
+
stdout=[],
|
|
238
|
+
stderr=[str(e)],
|
|
239
|
+
status="error",
|
|
240
|
+
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
def parse_out_function_results_markers(self, text: str) -> Tuple[str, str]:
|
|
244
|
+
"""
|
|
245
|
+
Parse the function results out of the stdout using special markers.
|
|
246
|
+
Returns (function_result_str, stripped_stdout).
|
|
247
|
+
"""
|
|
248
|
+
if self.LOCAL_SANDBOX_RESULT_START_MARKER not in text:
|
|
249
|
+
# No markers found, so nothing to parse
|
|
250
|
+
return "", text
|
|
251
|
+
|
|
252
|
+
marker_len = len(self.LOCAL_SANDBOX_RESULT_START_MARKER)
|
|
253
|
+
start_index = text.index(self.LOCAL_SANDBOX_RESULT_START_MARKER) + marker_len
|
|
254
|
+
end_index = text.index(self.LOCAL_SANDBOX_RESULT_END_MARKER)
|
|
255
|
+
|
|
256
|
+
# The actual pickled base64 is between start_index and end_index
|
|
257
|
+
results_str = text[start_index:end_index]
|
|
258
|
+
# The rest of stdout (minus the markers)
|
|
259
|
+
remainder = text[: start_index - marker_len] + text[end_index + marker_len :]
|
|
260
|
+
return results_str, remainder
|
|
261
|
+
|
|
262
|
+
def parse_best_effort(self, text: str) -> Tuple[Any, Optional[AgentState]]:
|
|
263
|
+
"""
|
|
264
|
+
Decode and unpickle the result from the function execution if possible.
|
|
265
|
+
Returns (function_return_value, agent_state).
|
|
266
|
+
"""
|
|
267
|
+
if not text:
|
|
268
|
+
return None, None
|
|
269
|
+
|
|
270
|
+
result = pickle.loads(base64.b64decode(text))
|
|
271
|
+
agent_state = result["agent_state"] if result["agent_state"] is not None else None
|
|
272
|
+
return result["results"], agent_state
|
|
273
|
+
|
|
274
|
+
def parse_function_arguments(self, source_code: str, tool_name: str) -> list:
|
|
275
|
+
"""
|
|
276
|
+
Get arguments of the given function from its source code via AST.
|
|
277
|
+
"""
|
|
278
|
+
tree = ast.parse(source_code)
|
|
279
|
+
args = []
|
|
280
|
+
for node in ast.walk(tree):
|
|
281
|
+
if isinstance(node, ast.FunctionDef) and node.name == tool_name:
|
|
282
|
+
for arg in node.args.args:
|
|
283
|
+
args.append(arg.arg)
|
|
284
|
+
return args
|
|
285
|
+
|
|
286
|
+
def generate_execution_script(self, agent_state: Optional[AgentState], inject_agent_state: bool) -> str:
|
|
287
|
+
"""
|
|
288
|
+
Generate code to run inside of execution sandbox.
|
|
289
|
+
Serialize the agent state and arguments, call the tool,
|
|
290
|
+
then base64-encode/pickle the result.
|
|
291
|
+
"""
|
|
292
|
+
code = "from typing import *\n"
|
|
293
|
+
code += "import pickle\n"
|
|
294
|
+
code += "import sys\n"
|
|
295
|
+
code += "import base64\n"
|
|
296
|
+
|
|
297
|
+
# Additional imports to support agent state
|
|
298
|
+
if inject_agent_state:
|
|
299
|
+
code += "import letta\n"
|
|
300
|
+
code += "from letta import * \n"
|
|
301
|
+
|
|
302
|
+
# Add schema code if available
|
|
303
|
+
if self.tool.args_json_schema:
|
|
304
|
+
schema_code = add_imports_and_pydantic_schemas_for_args(self.tool.args_json_schema)
|
|
305
|
+
if "from __future__ import annotations" in schema_code:
|
|
306
|
+
schema_code = schema_code.replace("from __future__ import annotations", "").lstrip()
|
|
307
|
+
code = "from __future__ import annotations\n\n" + code
|
|
308
|
+
code += schema_code + "\n"
|
|
309
|
+
|
|
310
|
+
# Load the agent state
|
|
311
|
+
if inject_agent_state:
|
|
312
|
+
agent_state_pickle = pickle.dumps(agent_state)
|
|
313
|
+
code += f"agent_state = pickle.loads({agent_state_pickle})\n"
|
|
314
|
+
else:
|
|
315
|
+
code += "agent_state = None\n"
|
|
316
|
+
|
|
317
|
+
# Initialize arguments
|
|
318
|
+
if self.tool.args_json_schema:
|
|
319
|
+
args_schema = generate_model_from_args_json_schema(self.tool.args_json_schema)
|
|
320
|
+
code += f"args_object = {args_schema.__name__}(**{self.args})\n"
|
|
321
|
+
for param in self.args:
|
|
322
|
+
code += f"{param} = args_object.{param}\n"
|
|
323
|
+
else:
|
|
324
|
+
for param in self.args:
|
|
325
|
+
code += self.initialize_param(param, self.args[param])
|
|
326
|
+
|
|
327
|
+
# Insert the tool's source code
|
|
328
|
+
code += "\n" + self.tool.source_code + "\n"
|
|
329
|
+
|
|
330
|
+
# Invoke the function and store the result in a global variable
|
|
331
|
+
code += (
|
|
332
|
+
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME}"
|
|
333
|
+
+ ' = {"results": '
|
|
334
|
+
+ self.invoke_function_call(inject_agent_state=inject_agent_state)
|
|
335
|
+
+ ', "agent_state": agent_state}\n'
|
|
336
|
+
)
|
|
337
|
+
code += (
|
|
338
|
+
f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME} = base64.b64encode("
|
|
339
|
+
f"pickle.dumps({self.LOCAL_SANDBOX_RESULT_VAR_NAME})"
|
|
340
|
+
").decode('utf-8')\n"
|
|
341
|
+
)
|
|
342
|
+
|
|
343
|
+
# If we're always in a subprocess, we must rely on markers to parse out the result
|
|
344
|
+
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_START_MARKER}')\n"
|
|
345
|
+
code += f"sys.stdout.write(str({self.LOCAL_SANDBOX_RESULT_VAR_NAME}))\n"
|
|
346
|
+
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_END_MARKER}')\n"
|
|
347
|
+
|
|
348
|
+
return code
|
|
349
|
+
|
|
350
|
+
def _convert_param_to_value(self, param_type: str, raw_value: str) -> str:
|
|
351
|
+
"""
|
|
352
|
+
Convert parameter to Python code representation based on JSON schema type.
|
|
353
|
+
"""
|
|
354
|
+
if param_type == "string":
|
|
355
|
+
# Safely inject a Python string via pickle
|
|
356
|
+
value = "pickle.loads(" + str(pickle.dumps(raw_value)) + ")"
|
|
357
|
+
elif param_type in ["integer", "boolean", "number", "array", "object"]:
|
|
358
|
+
# This is simplistic. In real usage, ensure correct type-casting or sanitization.
|
|
359
|
+
value = raw_value
|
|
360
|
+
else:
|
|
361
|
+
raise TypeError(f"Unsupported type: {param_type}, raw_value={raw_value}")
|
|
362
|
+
|
|
363
|
+
return str(value)
|
|
364
|
+
|
|
365
|
+
def initialize_param(self, name: str, raw_value: str) -> str:
|
|
366
|
+
"""
|
|
367
|
+
Produce code for initializing a single parameter in the generated script.
|
|
368
|
+
"""
|
|
369
|
+
params = self.tool.json_schema["parameters"]["properties"]
|
|
370
|
+
spec = params.get(name)
|
|
371
|
+
if spec is None:
|
|
372
|
+
# Possibly an extra param like 'self' that we ignore
|
|
373
|
+
return ""
|
|
374
|
+
|
|
375
|
+
param_type = spec.get("type")
|
|
376
|
+
if param_type is None and spec.get("parameters"):
|
|
377
|
+
param_type = spec["parameters"].get("type")
|
|
378
|
+
|
|
379
|
+
value = self._convert_param_to_value(param_type, raw_value)
|
|
380
|
+
return f"{name} = {value}\n"
|
|
381
|
+
|
|
382
|
+
def invoke_function_call(self, inject_agent_state: bool) -> str:
|
|
383
|
+
"""
|
|
384
|
+
Generate the function call code string with the appropriate arguments.
|
|
385
|
+
"""
|
|
386
|
+
kwargs = []
|
|
387
|
+
for name in self.args:
|
|
388
|
+
if name in self.tool.json_schema["parameters"]["properties"]:
|
|
389
|
+
kwargs.append(name)
|
|
390
|
+
|
|
391
|
+
param_list = [f"{arg}={arg}" for arg in kwargs]
|
|
392
|
+
if inject_agent_state:
|
|
393
|
+
param_list.append("agent_state=agent_state")
|
|
394
|
+
|
|
395
|
+
params = ", ".join(param_list)
|
|
396
|
+
func_call_str = self.tool.name + "(" + params + ")"
|
|
397
|
+
return func_call_str
|
|
@@ -15,6 +15,7 @@ from letta.services.tool_executor.tool_executor import (
|
|
|
15
15
|
SandboxToolExecutor,
|
|
16
16
|
ToolExecutor,
|
|
17
17
|
)
|
|
18
|
+
from letta.tracing import trace_method
|
|
18
19
|
from letta.utils import get_friendly_error_msg
|
|
19
20
|
|
|
20
21
|
|
|
@@ -72,3 +73,29 @@ class ToolExecutionManager:
|
|
|
72
73
|
self.logger.error(f"Error executing tool {function_name}: {str(e)}")
|
|
73
74
|
error_message = get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))
|
|
74
75
|
return error_message, SandboxRunResult(status="error")
|
|
76
|
+
|
|
77
|
+
@trace_method
|
|
78
|
+
async def execute_tool_async(self, function_name: str, function_args: dict, tool: Tool) -> Tuple[Any, Optional[SandboxRunResult]]:
|
|
79
|
+
"""
|
|
80
|
+
Execute a tool asynchronously and persist any state changes.
|
|
81
|
+
"""
|
|
82
|
+
try:
|
|
83
|
+
# Get the appropriate executor for this tool type
|
|
84
|
+
# TODO: Extend this async model to composio
|
|
85
|
+
|
|
86
|
+
if tool.tool_type == ToolType.CUSTOM:
|
|
87
|
+
executor = SandboxToolExecutor()
|
|
88
|
+
result_tuple = await executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
|
|
89
|
+
else:
|
|
90
|
+
executor = ToolExecutorFactory.get_executor(tool.tool_type)
|
|
91
|
+
result_tuple = executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
|
|
92
|
+
return result_tuple
|
|
93
|
+
|
|
94
|
+
except Exception as e:
|
|
95
|
+
self.logger.error(f"Error executing tool {function_name}: {str(e)}")
|
|
96
|
+
error_message = get_friendly_error_msg(
|
|
97
|
+
function_name=function_name,
|
|
98
|
+
exception_name=type(e).__name__,
|
|
99
|
+
exception_message=str(e),
|
|
100
|
+
)
|
|
101
|
+
return error_message, SandboxRunResult(status="error")
|
|
@@ -3,7 +3,6 @@ import base64
|
|
|
3
3
|
import io
|
|
4
4
|
import os
|
|
5
5
|
import pickle
|
|
6
|
-
import runpy
|
|
7
6
|
import subprocess
|
|
8
7
|
import sys
|
|
9
8
|
import tempfile
|
|
@@ -27,6 +26,7 @@ from letta.services.organization_manager import OrganizationManager
|
|
|
27
26
|
from letta.services.sandbox_config_manager import SandboxConfigManager
|
|
28
27
|
from letta.services.tool_manager import ToolManager
|
|
29
28
|
from letta.settings import tool_settings
|
|
29
|
+
from letta.tracing import log_event, trace_method
|
|
30
30
|
from letta.utils import get_friendly_error_msg
|
|
31
31
|
|
|
32
32
|
logger = get_logger(__name__)
|
|
@@ -112,6 +112,7 @@ class ToolExecutionSandbox:
|
|
|
112
112
|
os.environ.clear()
|
|
113
113
|
os.environ.update(original_env) # Restore original environment variables
|
|
114
114
|
|
|
115
|
+
@trace_method
|
|
115
116
|
def run_local_dir_sandbox(
|
|
116
117
|
self, agent_state: Optional[AgentState] = None, additional_env_vars: Optional[Dict] = None
|
|
117
118
|
) -> SandboxRunResult:
|
|
@@ -152,7 +153,7 @@ class ToolExecutionSandbox:
|
|
|
152
153
|
if local_configs.use_venv:
|
|
153
154
|
return self.run_local_dir_sandbox_venv(sbx_config, env, temp_file_path)
|
|
154
155
|
else:
|
|
155
|
-
return self.
|
|
156
|
+
return self.run_local_dir_sandbox_directly(sbx_config, env, temp_file_path)
|
|
156
157
|
except Exception as e:
|
|
157
158
|
logger.error(f"Executing tool {self.tool_name} has an unexpected error: {e}")
|
|
158
159
|
logger.error(f"Logging out tool {self.tool_name} auto-generated code for debugging: \n\n{code}")
|
|
@@ -161,6 +162,7 @@ class ToolExecutionSandbox:
|
|
|
161
162
|
# Clean up the temp file
|
|
162
163
|
os.remove(temp_file_path)
|
|
163
164
|
|
|
165
|
+
@trace_method
|
|
164
166
|
def run_local_dir_sandbox_venv(self, sbx_config: SandboxConfig, env: Dict[str, str], temp_file_path: str) -> SandboxRunResult:
|
|
165
167
|
local_configs = sbx_config.get_local_config()
|
|
166
168
|
sandbox_dir = os.path.expanduser(local_configs.sandbox_dir) # Expand tilde
|
|
@@ -169,11 +171,15 @@ class ToolExecutionSandbox:
|
|
|
169
171
|
# Recreate venv if required
|
|
170
172
|
if self.force_recreate_venv or not os.path.isdir(venv_path):
|
|
171
173
|
logger.warning(f"Virtual environment directory does not exist at: {venv_path}, creating one now...")
|
|
174
|
+
log_event(name="start create_venv_for_local_sandbox", attributes={"venv_path": venv_path})
|
|
172
175
|
create_venv_for_local_sandbox(
|
|
173
176
|
sandbox_dir_path=sandbox_dir, venv_path=venv_path, env=env, force_recreate=self.force_recreate_venv
|
|
174
177
|
)
|
|
178
|
+
log_event(name="finish create_venv_for_local_sandbox")
|
|
175
179
|
|
|
180
|
+
log_event(name="start install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
176
181
|
install_pip_requirements_for_sandbox(local_configs, env=env)
|
|
182
|
+
log_event(name="finish install_pip_requirements_for_sandbox", attributes={"local_configs": local_configs.model_dump_json()})
|
|
177
183
|
|
|
178
184
|
# Ensure Python executable exists
|
|
179
185
|
python_executable = find_python_executable(local_configs)
|
|
@@ -187,6 +193,7 @@ class ToolExecutionSandbox:
|
|
|
187
193
|
|
|
188
194
|
# Execute the code
|
|
189
195
|
try:
|
|
196
|
+
log_event(name="start subprocess")
|
|
190
197
|
result = subprocess.run(
|
|
191
198
|
[python_executable, temp_file_path],
|
|
192
199
|
env=env,
|
|
@@ -195,6 +202,7 @@ class ToolExecutionSandbox:
|
|
|
195
202
|
capture_output=True,
|
|
196
203
|
text=True,
|
|
197
204
|
)
|
|
205
|
+
log_event(name="finish subprocess")
|
|
198
206
|
func_result, stdout = self.parse_out_function_results_markers(result.stdout)
|
|
199
207
|
func_return, agent_state = self.parse_best_effort(func_result)
|
|
200
208
|
|
|
@@ -230,34 +238,54 @@ class ToolExecutionSandbox:
|
|
|
230
238
|
logger.error(f"Executing tool {self.tool_name} has an unexpected error: {e}")
|
|
231
239
|
raise e
|
|
232
240
|
|
|
233
|
-
|
|
241
|
+
@trace_method
|
|
242
|
+
def run_local_dir_sandbox_directly(self, sbx_config: SandboxConfig, env: Dict[str, str], temp_file_path: str) -> SandboxRunResult:
|
|
234
243
|
status = "success"
|
|
235
|
-
agent_state, stderr = None, None
|
|
244
|
+
func_return, agent_state, stderr = None, None, None
|
|
236
245
|
|
|
237
|
-
# Redirect stdout and stderr to capture script output
|
|
238
246
|
old_stdout = sys.stdout
|
|
239
247
|
old_stderr = sys.stderr
|
|
240
248
|
captured_stdout, captured_stderr = io.StringIO(), io.StringIO()
|
|
249
|
+
|
|
241
250
|
sys.stdout = captured_stdout
|
|
242
251
|
sys.stderr = captured_stderr
|
|
243
252
|
|
|
244
253
|
try:
|
|
245
|
-
# Execute the temp file
|
|
246
254
|
with self.temporary_env_vars(env):
|
|
247
|
-
result = runpy.run_path(temp_file_path, init_globals=env)
|
|
248
255
|
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
256
|
+
# Read and compile the Python script
|
|
257
|
+
with open(temp_file_path, "r", encoding="utf-8") as f:
|
|
258
|
+
source = f.read()
|
|
259
|
+
code_obj = compile(source, temp_file_path, "exec")
|
|
260
|
+
|
|
261
|
+
# Provide a dict for globals
|
|
262
|
+
globals_dict = dict(env) # or {}
|
|
263
|
+
# If you need to mimic `__main__` behavior:
|
|
264
|
+
globals_dict["__name__"] = "__main__"
|
|
265
|
+
globals_dict["__file__"] = temp_file_path
|
|
266
|
+
|
|
267
|
+
# Execute the compiled code
|
|
268
|
+
log_event(name="start exec", attributes={"temp_file_path": temp_file_path})
|
|
269
|
+
exec(code_obj, globals_dict)
|
|
270
|
+
log_event(name="finish exec", attributes={"temp_file_path": temp_file_path})
|
|
271
|
+
|
|
272
|
+
# Get result from the global dict
|
|
273
|
+
func_result = globals_dict.get(self.LOCAL_SANDBOX_RESULT_VAR_NAME)
|
|
274
|
+
func_return, agent_state = self.parse_best_effort(func_result)
|
|
252
275
|
|
|
253
276
|
except Exception as e:
|
|
254
|
-
func_return = get_friendly_error_msg(
|
|
277
|
+
func_return = get_friendly_error_msg(
|
|
278
|
+
function_name=self.tool_name,
|
|
279
|
+
exception_name=type(e).__name__,
|
|
280
|
+
exception_message=str(e),
|
|
281
|
+
)
|
|
255
282
|
traceback.print_exc(file=sys.stderr)
|
|
256
283
|
status = "error"
|
|
257
284
|
|
|
258
|
-
# Restore stdout
|
|
285
|
+
# Restore stdout/stderr
|
|
259
286
|
sys.stdout = old_stdout
|
|
260
287
|
sys.stderr = old_stderr
|
|
288
|
+
|
|
261
289
|
stdout_output = [captured_stdout.getvalue()] if captured_stdout.getvalue() else []
|
|
262
290
|
stderr_output = [captured_stderr.getvalue()] if captured_stderr.getvalue() else []
|
|
263
291
|
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import ast
|
|
1
2
|
import math
|
|
2
3
|
from abc import ABC, abstractmethod
|
|
3
4
|
from typing import Any, Optional, Tuple
|
|
@@ -14,7 +15,7 @@ from letta.schemas.user import User
|
|
|
14
15
|
from letta.services.agent_manager import AgentManager
|
|
15
16
|
from letta.services.message_manager import MessageManager
|
|
16
17
|
from letta.services.passage_manager import PassageManager
|
|
17
|
-
from letta.services.
|
|
18
|
+
from letta.services.tool_executor.async_tool_execution_sandbox import AsyncToolExecutionSandbox
|
|
18
19
|
from letta.utils import get_friendly_error_msg
|
|
19
20
|
|
|
20
21
|
|
|
@@ -59,7 +60,7 @@ class LettaCoreToolExecutor(ToolExecutor):
|
|
|
59
60
|
Returns:
|
|
60
61
|
Optional[str]: None is always returned as this function does not produce a response.
|
|
61
62
|
"""
|
|
62
|
-
return
|
|
63
|
+
return "Sent message successfully."
|
|
63
64
|
|
|
64
65
|
def conversation_search(self, agent_state: AgentState, actor: User, query: str, page: Optional[int] = 0) -> Optional[str]:
|
|
65
66
|
"""
|
|
@@ -320,9 +321,10 @@ class ExternalMCPToolExecutor(ToolExecutor):
|
|
|
320
321
|
class SandboxToolExecutor(ToolExecutor):
|
|
321
322
|
"""Executor for sandboxed tools."""
|
|
322
323
|
|
|
323
|
-
def execute(
|
|
324
|
+
async def execute(
|
|
324
325
|
self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
|
|
325
326
|
) -> Tuple[Any, Optional[SandboxRunResult]]:
|
|
327
|
+
|
|
326
328
|
# Store original memory state
|
|
327
329
|
orig_memory_str = agent_state.memory.compile()
|
|
328
330
|
|
|
@@ -330,12 +332,17 @@ class SandboxToolExecutor(ToolExecutor):
|
|
|
330
332
|
# Prepare function arguments
|
|
331
333
|
function_args = self._prepare_function_args(function_args, tool, function_name)
|
|
332
334
|
|
|
333
|
-
# Create agent state copy for sandbox
|
|
334
335
|
agent_state_copy = self._create_agent_state_copy(agent_state)
|
|
335
336
|
|
|
337
|
+
# TODO: This is brittle, think about better way to do this?
|
|
338
|
+
if "agent_state" in self.parse_function_arguments(tool.source_code, tool.name):
|
|
339
|
+
inject_agent_state = True
|
|
340
|
+
else:
|
|
341
|
+
inject_agent_state = False
|
|
342
|
+
|
|
336
343
|
# Execute in sandbox
|
|
337
|
-
sandbox_run_result =
|
|
338
|
-
agent_state=agent_state_copy
|
|
344
|
+
sandbox_run_result = await AsyncToolExecutionSandbox(function_name, function_args, actor, tool_object=tool).run(
|
|
345
|
+
agent_state=agent_state_copy, inject_agent_state=inject_agent_state
|
|
339
346
|
)
|
|
340
347
|
|
|
341
348
|
function_response, updated_agent_state = sandbox_run_result.func_return, sandbox_run_result.agent_state
|
|
@@ -364,6 +371,16 @@ class SandboxToolExecutor(ToolExecutor):
|
|
|
364
371
|
# This is defensive programming - we try to coerce but fall back if it fails
|
|
365
372
|
return function_args
|
|
366
373
|
|
|
374
|
+
def parse_function_arguments(self, source_code: str, tool_name: str):
|
|
375
|
+
"""Get arguments of a function from its source code"""
|
|
376
|
+
tree = ast.parse(source_code)
|
|
377
|
+
args = []
|
|
378
|
+
for node in ast.walk(tree):
|
|
379
|
+
if isinstance(node, ast.FunctionDef) and node.name == tool_name:
|
|
380
|
+
for arg in node.args.args:
|
|
381
|
+
args.append(arg.arg)
|
|
382
|
+
return args
|
|
383
|
+
|
|
367
384
|
def _create_agent_state_copy(self, agent_state: AgentState):
|
|
368
385
|
"""Create a copy of agent state for sandbox execution."""
|
|
369
386
|
agent_state_copy = agent_state.__deepcopy__()
|
letta/settings.py
CHANGED
|
@@ -175,7 +175,7 @@ class Settings(BaseSettings):
|
|
|
175
175
|
# multi agent settings
|
|
176
176
|
multi_agent_send_message_max_retries: int = 3
|
|
177
177
|
multi_agent_send_message_timeout: int = 20 * 60
|
|
178
|
-
multi_agent_concurrent_sends: int =
|
|
178
|
+
multi_agent_concurrent_sends: int = 50
|
|
179
179
|
|
|
180
180
|
# telemetry logging
|
|
181
181
|
verbose_telemetry_logging: bool = False
|
|
@@ -187,6 +187,22 @@ class Settings(BaseSettings):
|
|
|
187
187
|
uvicorn_reload: bool = False
|
|
188
188
|
uvicorn_timeout_keep_alive: int = 5
|
|
189
189
|
|
|
190
|
+
# event loop parallelism
|
|
191
|
+
event_loop_threadpool_max_workers: int = 43
|
|
192
|
+
|
|
193
|
+
# experimental toggle
|
|
194
|
+
use_experimental: bool = False
|
|
195
|
+
|
|
196
|
+
# LLM provider client settings
|
|
197
|
+
httpx_max_retries: int = 5
|
|
198
|
+
httpx_timeout_connect: float = 10.0
|
|
199
|
+
httpx_timeout_read: float = 60.0
|
|
200
|
+
httpx_timeout_write: float = 30.0
|
|
201
|
+
httpx_timeout_pool: float = 10.0
|
|
202
|
+
httpx_max_connections: int = 500
|
|
203
|
+
httpx_max_keepalive_connections: int = 500
|
|
204
|
+
httpx_keepalive_expiry: float = 120.0
|
|
205
|
+
|
|
190
206
|
@property
|
|
191
207
|
def letta_pg_uri(self) -> str:
|
|
192
208
|
if self.pg_uri:
|