letta-nightly 0.7.30.dev20250603104343__py3-none-any.whl → 0.8.0.dev20250604201135__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +7 -1
- letta/agent.py +14 -7
- letta/agents/base_agent.py +1 -0
- letta/agents/ephemeral_summary_agent.py +104 -0
- letta/agents/helpers.py +35 -3
- letta/agents/letta_agent.py +492 -176
- letta/agents/letta_agent_batch.py +22 -16
- letta/agents/prompts/summary_system_prompt.txt +62 -0
- letta/agents/voice_agent.py +22 -7
- letta/agents/voice_sleeptime_agent.py +13 -8
- letta/constants.py +33 -1
- letta/data_sources/connectors.py +52 -36
- letta/errors.py +4 -0
- letta/functions/ast_parsers.py +13 -30
- letta/functions/function_sets/base.py +3 -1
- letta/functions/functions.py +2 -0
- letta/functions/mcp_client/base_client.py +151 -97
- letta/functions/mcp_client/sse_client.py +49 -31
- letta/functions/mcp_client/stdio_client.py +107 -106
- letta/functions/schema_generator.py +22 -22
- letta/groups/helpers.py +3 -4
- letta/groups/sleeptime_multi_agent.py +4 -4
- letta/groups/sleeptime_multi_agent_v2.py +22 -0
- letta/helpers/composio_helpers.py +16 -0
- letta/helpers/converters.py +20 -0
- letta/helpers/datetime_helpers.py +1 -6
- letta/helpers/tool_rule_solver.py +2 -1
- letta/interfaces/anthropic_streaming_interface.py +17 -2
- letta/interfaces/openai_chat_completions_streaming_interface.py +1 -0
- letta/interfaces/openai_streaming_interface.py +18 -2
- letta/llm_api/anthropic_client.py +24 -3
- letta/llm_api/google_ai_client.py +0 -15
- letta/llm_api/google_vertex_client.py +6 -5
- letta/llm_api/llm_client_base.py +15 -0
- letta/llm_api/openai.py +2 -2
- letta/llm_api/openai_client.py +60 -8
- letta/orm/__init__.py +2 -0
- letta/orm/agent.py +45 -43
- letta/orm/base.py +0 -2
- letta/orm/block.py +1 -0
- letta/orm/custom_columns.py +13 -0
- letta/orm/enums.py +5 -0
- letta/orm/file.py +3 -1
- letta/orm/files_agents.py +68 -0
- letta/orm/mcp_server.py +48 -0
- letta/orm/message.py +1 -0
- letta/orm/organization.py +11 -2
- letta/orm/passage.py +25 -10
- letta/orm/sandbox_config.py +5 -2
- letta/orm/sqlalchemy_base.py +171 -110
- letta/prompts/system/memgpt_base.txt +6 -1
- letta/prompts/system/memgpt_v2_chat.txt +57 -0
- letta/prompts/system/sleeptime.txt +2 -0
- letta/prompts/system/sleeptime_v2.txt +28 -0
- letta/schemas/agent.py +87 -20
- letta/schemas/block.py +7 -1
- letta/schemas/file.py +57 -0
- letta/schemas/mcp.py +74 -0
- letta/schemas/memory.py +5 -2
- letta/schemas/message.py +9 -0
- letta/schemas/openai/openai.py +0 -6
- letta/schemas/providers.py +33 -4
- letta/schemas/tool.py +26 -21
- letta/schemas/tool_execution_result.py +5 -0
- letta/server/db.py +23 -8
- letta/server/rest_api/app.py +73 -56
- letta/server/rest_api/interface.py +4 -4
- letta/server/rest_api/routers/v1/agents.py +132 -47
- letta/server/rest_api/routers/v1/blocks.py +3 -2
- letta/server/rest_api/routers/v1/embeddings.py +3 -3
- letta/server/rest_api/routers/v1/groups.py +3 -3
- letta/server/rest_api/routers/v1/jobs.py +14 -17
- letta/server/rest_api/routers/v1/organizations.py +10 -10
- letta/server/rest_api/routers/v1/providers.py +12 -10
- letta/server/rest_api/routers/v1/runs.py +3 -3
- letta/server/rest_api/routers/v1/sandbox_configs.py +12 -12
- letta/server/rest_api/routers/v1/sources.py +108 -43
- letta/server/rest_api/routers/v1/steps.py +8 -6
- letta/server/rest_api/routers/v1/tools.py +134 -95
- letta/server/rest_api/utils.py +12 -1
- letta/server/server.py +272 -73
- letta/services/agent_manager.py +246 -313
- letta/services/block_manager.py +30 -9
- letta/services/context_window_calculator/__init__.py +0 -0
- letta/services/context_window_calculator/context_window_calculator.py +150 -0
- letta/services/context_window_calculator/token_counter.py +82 -0
- letta/services/file_processor/__init__.py +0 -0
- letta/services/file_processor/chunker/__init__.py +0 -0
- letta/services/file_processor/chunker/llama_index_chunker.py +29 -0
- letta/services/file_processor/embedder/__init__.py +0 -0
- letta/services/file_processor/embedder/openai_embedder.py +84 -0
- letta/services/file_processor/file_processor.py +123 -0
- letta/services/file_processor/parser/__init__.py +0 -0
- letta/services/file_processor/parser/base_parser.py +9 -0
- letta/services/file_processor/parser/mistral_parser.py +54 -0
- letta/services/file_processor/types.py +0 -0
- letta/services/files_agents_manager.py +184 -0
- letta/services/group_manager.py +118 -0
- letta/services/helpers/agent_manager_helper.py +76 -21
- letta/services/helpers/tool_execution_helper.py +3 -0
- letta/services/helpers/tool_parser_helper.py +100 -0
- letta/services/identity_manager.py +44 -42
- letta/services/job_manager.py +21 -10
- letta/services/mcp/base_client.py +5 -2
- letta/services/mcp/sse_client.py +3 -5
- letta/services/mcp/stdio_client.py +3 -5
- letta/services/mcp_manager.py +281 -0
- letta/services/message_manager.py +40 -26
- letta/services/organization_manager.py +55 -19
- letta/services/passage_manager.py +211 -13
- letta/services/provider_manager.py +48 -2
- letta/services/sandbox_config_manager.py +105 -0
- letta/services/source_manager.py +4 -5
- letta/services/step_manager.py +9 -6
- letta/services/summarizer/summarizer.py +50 -23
- letta/services/telemetry_manager.py +7 -0
- letta/services/tool_executor/tool_execution_manager.py +11 -52
- letta/services/tool_executor/tool_execution_sandbox.py +4 -34
- letta/services/tool_executor/tool_executor.py +107 -105
- letta/services/tool_manager.py +56 -17
- letta/services/tool_sandbox/base.py +39 -92
- letta/services/tool_sandbox/e2b_sandbox.py +16 -11
- letta/services/tool_sandbox/local_sandbox.py +51 -23
- letta/services/user_manager.py +36 -3
- letta/settings.py +10 -3
- letta/templates/__init__.py +0 -0
- letta/templates/sandbox_code_file.py.j2 +47 -0
- letta/templates/template_helper.py +16 -0
- letta/tracing.py +30 -1
- letta/types/__init__.py +7 -0
- letta/utils.py +25 -1
- {letta_nightly-0.7.30.dev20250603104343.dist-info → letta_nightly-0.8.0.dev20250604201135.dist-info}/METADATA +7 -2
- {letta_nightly-0.7.30.dev20250603104343.dist-info → letta_nightly-0.8.0.dev20250604201135.dist-info}/RECORD +136 -110
- {letta_nightly-0.7.30.dev20250603104343.dist-info → letta_nightly-0.8.0.dev20250604201135.dist-info}/LICENSE +0 -0
- {letta_nightly-0.7.30.dev20250603104343.dist-info → letta_nightly-0.8.0.dev20250604201135.dist-info}/WHEEL +0 -0
- {letta_nightly-0.7.30.dev20250603104343.dist-info → letta_nightly-0.8.0.dev20250604201135.dist-info}/entry_points.txt +0 -0
letta/services/tool_manager.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
import asyncio
|
2
2
|
import importlib
|
3
3
|
import warnings
|
4
|
-
from typing import List, Optional
|
4
|
+
from typing import List, Optional, Union
|
5
5
|
|
6
6
|
from letta.constants import (
|
7
7
|
BASE_FUNCTION_RETURN_CHAR_LIMIT,
|
@@ -26,6 +26,7 @@ from letta.schemas.tool import Tool as PydanticTool
|
|
26
26
|
from letta.schemas.tool import ToolCreate, ToolUpdate
|
27
27
|
from letta.schemas.user import User as PydanticUser
|
28
28
|
from letta.server.db import db_registry
|
29
|
+
from letta.services.mcp.types import SSEServerConfig, StdioServerConfig
|
29
30
|
from letta.tracing import trace_method
|
30
31
|
from letta.utils import enforce_types, printd
|
31
32
|
|
@@ -90,6 +91,12 @@ class ToolManager:
|
|
90
91
|
|
91
92
|
return tool
|
92
93
|
|
94
|
+
@enforce_types
|
95
|
+
async def create_mcp_server(
|
96
|
+
self, server_config: Union[StdioServerConfig, SSEServerConfig], actor: PydanticUser
|
97
|
+
) -> List[Union[StdioServerConfig, SSEServerConfig]]:
|
98
|
+
pass
|
99
|
+
|
93
100
|
@enforce_types
|
94
101
|
@trace_method
|
95
102
|
def create_or_update_mcp_tool(self, tool_create: ToolCreate, mcp_server_name: str, actor: PydanticUser) -> PydanticTool:
|
@@ -101,6 +108,16 @@ class ToolManager:
|
|
101
108
|
actor,
|
102
109
|
)
|
103
110
|
|
111
|
+
@enforce_types
|
112
|
+
async def create_mcp_tool_async(self, tool_create: ToolCreate, mcp_server_name: str, actor: PydanticUser) -> PydanticTool:
|
113
|
+
metadata = {MCP_TOOL_TAG_NAME_PREFIX: {"server_name": mcp_server_name}}
|
114
|
+
return await self.create_or_update_tool_async(
|
115
|
+
PydanticTool(
|
116
|
+
tool_type=ToolType.EXTERNAL_MCP, name=tool_create.json_schema["name"], metadata_=metadata, **tool_create.model_dump()
|
117
|
+
),
|
118
|
+
actor,
|
119
|
+
)
|
120
|
+
|
104
121
|
@enforce_types
|
105
122
|
@trace_method
|
106
123
|
def create_or_update_composio_tool(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
|
@@ -108,6 +125,13 @@ class ToolManager:
|
|
108
125
|
PydanticTool(tool_type=ToolType.EXTERNAL_COMPOSIO, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
|
109
126
|
)
|
110
127
|
|
128
|
+
@enforce_types
|
129
|
+
@trace_method
|
130
|
+
async def create_or_update_composio_tool_async(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
|
131
|
+
return await self.create_or_update_tool_async(
|
132
|
+
PydanticTool(tool_type=ToolType.EXTERNAL_COMPOSIO, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
|
133
|
+
)
|
134
|
+
|
111
135
|
@enforce_types
|
112
136
|
@trace_method
|
113
137
|
def create_or_update_langchain_tool(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
|
@@ -145,7 +169,7 @@ class ToolManager:
|
|
145
169
|
|
146
170
|
tool = ToolModel(**tool_data)
|
147
171
|
await tool.create_async(session, actor=actor) # Re-raise other database-related errors
|
148
|
-
|
172
|
+
return tool.to_pydantic()
|
149
173
|
|
150
174
|
@enforce_types
|
151
175
|
@trace_method
|
@@ -215,6 +239,7 @@ class ToolManager:
|
|
215
239
|
@trace_method
|
216
240
|
async def list_tools_async(self, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50) -> List[PydanticTool]:
|
217
241
|
"""List all tools with optional pagination."""
|
242
|
+
tools_to_delete = []
|
218
243
|
async with db_registry.async_session() as session:
|
219
244
|
tools = await ToolModel.list_async(
|
220
245
|
db_session=session,
|
@@ -223,23 +248,26 @@ class ToolManager:
|
|
223
248
|
organization_id=actor.organization_id,
|
224
249
|
)
|
225
250
|
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
251
|
+
# Remove any malformed tools
|
252
|
+
results = []
|
253
|
+
for tool in tools:
|
254
|
+
try:
|
255
|
+
pydantic_tool = tool.to_pydantic()
|
256
|
+
results.append(pydantic_tool)
|
257
|
+
except (ValueError, ModuleNotFoundError, AttributeError) as e:
|
258
|
+
tools_to_delete.append(tool)
|
259
|
+
logger.warning(f"Deleting malformed tool with id={tool.id} and name={tool.name}, error was:\n{e}")
|
260
|
+
logger.warning("Deleted tool: ")
|
261
|
+
logger.warning(tool.pretty_print_columns())
|
262
|
+
|
263
|
+
for tool in tools_to_delete:
|
264
|
+
await self.delete_tool_by_id_async(tool.id, actor=actor)
|
237
265
|
|
238
266
|
return results
|
239
267
|
|
240
268
|
@enforce_types
|
241
269
|
@trace_method
|
242
|
-
def
|
270
|
+
async def size_async(
|
243
271
|
self,
|
244
272
|
actor: PydanticUser,
|
245
273
|
include_base_tools: bool,
|
@@ -249,10 +277,10 @@ class ToolManager:
|
|
249
277
|
|
250
278
|
If include_builtin is True, it will also count the built-in tools.
|
251
279
|
"""
|
252
|
-
with db_registry.
|
280
|
+
async with db_registry.async_session() as session:
|
253
281
|
if include_base_tools:
|
254
|
-
return ToolModel.
|
255
|
-
return ToolModel.
|
282
|
+
return await ToolModel.size_async(db_session=session, actor=actor)
|
283
|
+
return await ToolModel.size_async(db_session=session, actor=actor, name=LETTA_TOOL_SET)
|
256
284
|
|
257
285
|
@enforce_types
|
258
286
|
@trace_method
|
@@ -324,6 +352,17 @@ class ToolManager:
|
|
324
352
|
except NoResultFound:
|
325
353
|
raise ValueError(f"Tool with id {tool_id} not found.")
|
326
354
|
|
355
|
+
@enforce_types
|
356
|
+
@trace_method
|
357
|
+
async def delete_tool_by_id_async(self, tool_id: str, actor: PydanticUser) -> None:
|
358
|
+
"""Delete a tool by its ID."""
|
359
|
+
async with db_registry.async_session() as session:
|
360
|
+
try:
|
361
|
+
tool = await ToolModel.read_async(db_session=session, identifier=tool_id, actor=actor)
|
362
|
+
await tool.hard_delete_async(db_session=session, actor=actor)
|
363
|
+
except NoResultFound:
|
364
|
+
raise ValueError(f"Tool with id {tool_id} not found.")
|
365
|
+
|
327
366
|
@enforce_types
|
328
367
|
@trace_method
|
329
368
|
def upsert_base_tools(self, actor: PydanticUser) -> List[PydanticTool]:
|
@@ -1,9 +1,7 @@
|
|
1
|
-
import ast
|
2
|
-
import base64
|
3
1
|
import pickle
|
4
2
|
import uuid
|
5
3
|
from abc import ABC, abstractmethod
|
6
|
-
from typing import Any, Dict, Optional
|
4
|
+
from typing import Any, Dict, Optional
|
7
5
|
|
8
6
|
from letta.functions.helpers import generate_model_from_args_json_schema
|
9
7
|
from letta.schemas.agent import AgentState
|
@@ -11,20 +9,21 @@ from letta.schemas.sandbox_config import SandboxConfig
|
|
11
9
|
from letta.schemas.tool import Tool
|
12
10
|
from letta.schemas.tool_execution_result import ToolExecutionResult
|
13
11
|
from letta.services.helpers.tool_execution_helper import add_imports_and_pydantic_schemas_for_args
|
12
|
+
from letta.services.helpers.tool_parser_helper import convert_param_to_str_value, parse_function_arguments
|
14
13
|
from letta.services.sandbox_config_manager import SandboxConfigManager
|
15
14
|
from letta.services.tool_manager import ToolManager
|
15
|
+
from letta.types import JsonDict, JsonValue
|
16
16
|
|
17
17
|
|
18
18
|
class AsyncToolSandboxBase(ABC):
|
19
19
|
NAMESPACE = uuid.NAMESPACE_DNS
|
20
|
-
LOCAL_SANDBOX_RESULT_START_MARKER =
|
21
|
-
LOCAL_SANDBOX_RESULT_END_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-end-marker"))
|
20
|
+
LOCAL_SANDBOX_RESULT_START_MARKER = uuid.uuid5(NAMESPACE, "local-sandbox-result-start-marker").bytes
|
22
21
|
LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt"
|
23
22
|
|
24
23
|
def __init__(
|
25
24
|
self,
|
26
25
|
tool_name: str,
|
27
|
-
args:
|
26
|
+
args: JsonDict,
|
28
27
|
user,
|
29
28
|
tool_object: Optional[Tool] = None,
|
30
29
|
sandbox_config: Optional[SandboxConfig] = None,
|
@@ -48,7 +47,7 @@ class AsyncToolSandboxBase(ABC):
|
|
48
47
|
self._sandbox_config_manager = None
|
49
48
|
|
50
49
|
# See if we should inject agent_state or not based on the presence of the "agent_state" arg
|
51
|
-
if "agent_state" in
|
50
|
+
if "agent_state" in parse_function_arguments(self.tool.source_code, self.tool.name):
|
52
51
|
self.inject_agent_state = True
|
53
52
|
else:
|
54
53
|
self.inject_agent_state = False
|
@@ -74,83 +73,50 @@ class AsyncToolSandboxBase(ABC):
|
|
74
73
|
|
75
74
|
def generate_execution_script(self, agent_state: Optional[AgentState], wrap_print_with_markers: bool = False) -> str:
|
76
75
|
"""
|
77
|
-
Generate code to run inside of execution sandbox.
|
78
|
-
|
79
|
-
then base64-encode/pickle the result.
|
76
|
+
Generate code to run inside of execution sandbox. Serialize the agent state and arguments, call the tool,
|
77
|
+
then base64-encode/pickle the result. Runs a jinja2 template constructing the python file.
|
80
78
|
"""
|
81
|
-
|
82
|
-
code += "import pickle\n"
|
83
|
-
code += "import sys\n"
|
84
|
-
code += "import base64\n"
|
79
|
+
from letta.templates.template_helper import render_template
|
85
80
|
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
81
|
+
TEMPLATE_NAME = "sandbox_code_file.py.j2"
|
82
|
+
|
83
|
+
future_import = False
|
84
|
+
schema_code = None
|
90
85
|
|
91
|
-
# Add schema code if available
|
92
86
|
if self.tool.args_json_schema:
|
87
|
+
# Add schema code if available
|
93
88
|
schema_code = add_imports_and_pydantic_schemas_for_args(self.tool.args_json_schema)
|
94
89
|
if "from __future__ import annotations" in schema_code:
|
95
90
|
schema_code = schema_code.replace("from __future__ import annotations", "").lstrip()
|
96
|
-
|
97
|
-
code += schema_code + "\n"
|
98
|
-
|
99
|
-
# Load the agent state
|
100
|
-
if self.inject_agent_state:
|
101
|
-
agent_state_pickle = pickle.dumps(agent_state)
|
102
|
-
code += f"agent_state = pickle.loads({agent_state_pickle})\n"
|
103
|
-
else:
|
104
|
-
code += "agent_state = None\n"
|
91
|
+
future_import = True
|
105
92
|
|
106
|
-
|
107
|
-
if self.tool.args_json_schema:
|
93
|
+
# Initialize arguments
|
108
94
|
args_schema = generate_model_from_args_json_schema(self.tool.args_json_schema)
|
109
|
-
|
95
|
+
tool_args = f"args_object = {args_schema.__name__}(**{self.args})\n"
|
110
96
|
for param in self.args:
|
111
|
-
|
97
|
+
tool_args += f"{param} = args_object.{param}\n"
|
112
98
|
else:
|
99
|
+
tool_args = ""
|
113
100
|
for param in self.args:
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
101
|
+
tool_args += self.initialize_param(param, self.args[param])
|
102
|
+
|
103
|
+
agent_state_pickle = pickle.dumps(agent_state) if self.inject_agent_state else None
|
104
|
+
|
105
|
+
return render_template(
|
106
|
+
TEMPLATE_NAME,
|
107
|
+
future_import=future_import,
|
108
|
+
inject_agent_state=self.inject_agent_state,
|
109
|
+
schema_imports=schema_code,
|
110
|
+
agent_state_pickle=agent_state_pickle,
|
111
|
+
tool_args=tool_args,
|
112
|
+
tool_source_code=self.tool.source_code,
|
113
|
+
local_sandbox_result_var_name=self.LOCAL_SANDBOX_RESULT_VAR_NAME,
|
114
|
+
invoke_function_call=self.invoke_function_call(),
|
115
|
+
wrap_print_with_markers=wrap_print_with_markers,
|
116
|
+
start_marker=self.LOCAL_SANDBOX_RESULT_START_MARKER,
|
127
117
|
)
|
128
118
|
|
129
|
-
|
130
|
-
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_START_MARKER}')\n"
|
131
|
-
code += f"sys.stdout.write(str({self.LOCAL_SANDBOX_RESULT_VAR_NAME}))\n"
|
132
|
-
code += f"sys.stdout.write('{self.LOCAL_SANDBOX_RESULT_END_MARKER}')\n"
|
133
|
-
else:
|
134
|
-
code += f"{self.LOCAL_SANDBOX_RESULT_VAR_NAME}\n"
|
135
|
-
|
136
|
-
return code
|
137
|
-
|
138
|
-
def _convert_param_to_value(self, param_type: str, raw_value: str) -> str:
|
139
|
-
"""
|
140
|
-
Convert parameter to Python code representation based on JSON schema type.
|
141
|
-
"""
|
142
|
-
if param_type == "string":
|
143
|
-
# Safely inject a Python string via pickle
|
144
|
-
value = "pickle.loads(" + str(pickle.dumps(raw_value)) + ")"
|
145
|
-
elif param_type in ["integer", "boolean", "number", "array", "object"]:
|
146
|
-
# This is simplistic. In real usage, ensure correct type-casting or sanitization.
|
147
|
-
value = raw_value
|
148
|
-
else:
|
149
|
-
raise TypeError(f"Unsupported type: {param_type}, raw_value={raw_value}")
|
150
|
-
|
151
|
-
return str(value)
|
152
|
-
|
153
|
-
def initialize_param(self, name: str, raw_value: str) -> str:
|
119
|
+
def initialize_param(self, name: str, raw_value: JsonValue) -> str:
|
154
120
|
"""
|
155
121
|
Produce code for initializing a single parameter in the generated script.
|
156
122
|
"""
|
@@ -164,7 +130,7 @@ class AsyncToolSandboxBase(ABC):
|
|
164
130
|
if param_type is None and spec.get("parameters"):
|
165
131
|
param_type = spec["parameters"].get("type")
|
166
132
|
|
167
|
-
value =
|
133
|
+
value = convert_param_to_str_value(param_type, raw_value)
|
168
134
|
return f"{name} = {value}\n"
|
169
135
|
|
170
136
|
def invoke_function_call(self) -> str:
|
@@ -184,24 +150,5 @@ class AsyncToolSandboxBase(ABC):
|
|
184
150
|
func_call_str = self.tool.name + "(" + params + ")"
|
185
151
|
return func_call_str
|
186
152
|
|
187
|
-
def
|
188
|
-
|
189
|
-
Decode and unpickle the result from the function execution if possible.
|
190
|
-
Returns (function_return_value, agent_state).
|
191
|
-
"""
|
192
|
-
if not text:
|
193
|
-
return None, None
|
194
|
-
|
195
|
-
result = pickle.loads(base64.b64decode(text))
|
196
|
-
agent_state = result["agent_state"]
|
197
|
-
return result["results"], agent_state
|
198
|
-
|
199
|
-
def parse_function_arguments(self, source_code: str, tool_name: str):
|
200
|
-
"""Get arguments of a function from its source code"""
|
201
|
-
tree = ast.parse(source_code)
|
202
|
-
args = []
|
203
|
-
for node in ast.walk(tree):
|
204
|
-
if isinstance(node, ast.FunctionDef) and node.name == tool_name:
|
205
|
-
for arg in node.args.args:
|
206
|
-
args.append(arg.arg)
|
207
|
-
return args
|
153
|
+
def _update_env_vars(self):
|
154
|
+
pass # TODO
|
@@ -1,16 +1,23 @@
|
|
1
|
-
from typing import Any, Dict, Optional
|
1
|
+
from typing import TYPE_CHECKING, Any, Dict, Optional
|
2
|
+
|
3
|
+
from e2b_code_interpreter import AsyncSandbox
|
2
4
|
|
3
5
|
from letta.log import get_logger
|
4
6
|
from letta.schemas.agent import AgentState
|
5
7
|
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
|
6
8
|
from letta.schemas.tool import Tool
|
7
9
|
from letta.schemas.tool_execution_result import ToolExecutionResult
|
10
|
+
from letta.services.helpers.tool_parser_helper import parse_stdout_best_effort
|
8
11
|
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
|
9
12
|
from letta.tracing import log_event, trace_method
|
13
|
+
from letta.types import JsonDict
|
10
14
|
from letta.utils import get_friendly_error_msg
|
11
15
|
|
12
16
|
logger = get_logger(__name__)
|
13
17
|
|
18
|
+
if TYPE_CHECKING:
|
19
|
+
from e2b_code_interpreter import Execution
|
20
|
+
|
14
21
|
|
15
22
|
class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
16
23
|
METADATA_CONFIG_STATE_KEY = "config_state"
|
@@ -18,9 +25,9 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
18
25
|
def __init__(
|
19
26
|
self,
|
20
27
|
tool_name: str,
|
21
|
-
args:
|
28
|
+
args: JsonDict,
|
22
29
|
user,
|
23
|
-
force_recreate=True,
|
30
|
+
force_recreate: bool = True,
|
24
31
|
tool_object: Optional[Tool] = None,
|
25
32
|
sandbox_config: Optional[SandboxConfig] = None,
|
26
33
|
sandbox_env_vars: Optional[Dict[str, Any]] = None,
|
@@ -92,7 +99,7 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
92
99
|
)
|
93
100
|
execution = await e2b_sandbox.run_code(code, envs=env_vars)
|
94
101
|
if execution.results:
|
95
|
-
func_return, agent_state =
|
102
|
+
func_return, agent_state = parse_stdout_best_effort(execution.results[0].text)
|
96
103
|
log_event(
|
97
104
|
"e2b_execution_succeeded",
|
98
105
|
{
|
@@ -138,16 +145,15 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
138
145
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
139
146
|
)
|
140
147
|
|
141
|
-
|
148
|
+
@staticmethod
|
149
|
+
def parse_exception_from_e2b_execution(e2b_execution: "Execution") -> Exception:
|
142
150
|
builtins_dict = __builtins__ if isinstance(__builtins__, dict) else vars(__builtins__)
|
143
151
|
# Dynamically fetch the exception class from builtins, defaulting to Exception if not found
|
144
152
|
exception_class = builtins_dict.get(e2b_execution.error.name, Exception)
|
145
153
|
return exception_class(e2b_execution.error.value)
|
146
154
|
|
147
155
|
@trace_method
|
148
|
-
async def create_e2b_sandbox_with_metadata_hash(self, sandbox_config: SandboxConfig) -> "
|
149
|
-
from e2b_code_interpreter import AsyncSandbox
|
150
|
-
|
156
|
+
async def create_e2b_sandbox_with_metadata_hash(self, sandbox_config: SandboxConfig) -> "AsyncSandbox":
|
151
157
|
state_hash = sandbox_config.fingerprint()
|
152
158
|
e2b_config = sandbox_config.get_e2b_config()
|
153
159
|
|
@@ -194,8 +200,7 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
194
200
|
|
195
201
|
return sbx
|
196
202
|
|
197
|
-
|
198
|
-
|
199
|
-
|
203
|
+
@staticmethod
|
204
|
+
async def list_running_e2b_sandboxes():
|
200
205
|
# List running sandboxes and access metadata.
|
201
206
|
return await AsyncSandbox.list()
|
@@ -1,8 +1,12 @@
|
|
1
1
|
import asyncio
|
2
|
+
import hashlib
|
2
3
|
import os
|
4
|
+
import struct
|
3
5
|
import sys
|
4
6
|
import tempfile
|
5
|
-
from typing import Any, Dict, Optional
|
7
|
+
from typing import Any, Dict, Optional
|
8
|
+
|
9
|
+
from pydantic.config import JsonDict
|
6
10
|
|
7
11
|
from letta.schemas.agent import AgentState
|
8
12
|
from letta.schemas.sandbox_config import SandboxConfig, SandboxType
|
@@ -13,10 +17,11 @@ from letta.services.helpers.tool_execution_helper import (
|
|
13
17
|
find_python_executable,
|
14
18
|
install_pip_requirements_for_sandbox,
|
15
19
|
)
|
20
|
+
from letta.services.helpers.tool_parser_helper import parse_stdout_best_effort
|
16
21
|
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
|
17
22
|
from letta.settings import tool_settings
|
18
23
|
from letta.tracing import log_event, trace_method
|
19
|
-
from letta.utils import get_friendly_error_msg
|
24
|
+
from letta.utils import get_friendly_error_msg, parse_stderr_error_msg
|
20
25
|
|
21
26
|
|
22
27
|
class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
@@ -26,7 +31,7 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
26
31
|
def __init__(
|
27
32
|
self,
|
28
33
|
tool_name: str,
|
29
|
-
args:
|
34
|
+
args: JsonDict,
|
30
35
|
user,
|
31
36
|
force_recreate_venv=False,
|
32
37
|
tool_object: Optional[Tool] = None,
|
@@ -123,7 +128,15 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
123
128
|
# If not using venv, use whatever Python we are running on
|
124
129
|
python_executable = sys.executable
|
125
130
|
|
126
|
-
|
131
|
+
# handle unwanted terminal behavior
|
132
|
+
exec_env.update(
|
133
|
+
{
|
134
|
+
"PYTHONWARNINGS": "ignore",
|
135
|
+
"NO_COLOR": "1",
|
136
|
+
"TERM": "dumb",
|
137
|
+
"PYTHONUNBUFFERED": "1",
|
138
|
+
}
|
139
|
+
)
|
127
140
|
|
128
141
|
# Execute in subprocess
|
129
142
|
return await self._execute_tool_subprocess(
|
@@ -170,6 +183,7 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
170
183
|
Execute user code in a subprocess, always capturing stdout and stderr.
|
171
184
|
We parse special markers to extract the pickled result string.
|
172
185
|
"""
|
186
|
+
stdout_text = ""
|
173
187
|
try:
|
174
188
|
log_event(name="start subprocess")
|
175
189
|
|
@@ -190,13 +204,20 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
190
204
|
|
191
205
|
raise TimeoutError(f"Executing tool {self.tool_name} timed out after 60 seconds.")
|
192
206
|
|
193
|
-
stdout = stdout_bytes.decode("utf-8") if stdout_bytes else ""
|
194
207
|
stderr = stderr_bytes.decode("utf-8") if stderr_bytes else ""
|
195
208
|
log_event(name="finish subprocess")
|
196
209
|
|
197
210
|
# Parse markers to isolate the function result
|
198
|
-
|
199
|
-
func_return, agent_state =
|
211
|
+
func_result_bytes, stdout_text = self.parse_out_function_results_markers(stdout_bytes)
|
212
|
+
func_return, agent_state = parse_stdout_best_effort(func_result_bytes)
|
213
|
+
|
214
|
+
if process.returncode != 0 and func_return is None:
|
215
|
+
exception_name, msg = parse_stderr_error_msg(stderr)
|
216
|
+
func_return = get_friendly_error_msg(
|
217
|
+
function_name=self.tool_name,
|
218
|
+
exception_name=exception_name,
|
219
|
+
exception_message=msg,
|
220
|
+
)
|
200
221
|
|
201
222
|
return ToolExecutionResult(
|
202
223
|
func_return=func_return,
|
@@ -213,6 +234,8 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
213
234
|
raise e
|
214
235
|
|
215
236
|
print(f"Subprocess execution for tool {self.tool_name} encountered an error: {e}")
|
237
|
+
print(e.__class__.__name__)
|
238
|
+
print(e.__traceback__)
|
216
239
|
func_return = get_friendly_error_msg(
|
217
240
|
function_name=self.tool_name,
|
218
241
|
exception_name=type(e).__name__,
|
@@ -221,27 +244,32 @@ class AsyncToolSandboxLocal(AsyncToolSandboxBase):
|
|
221
244
|
return ToolExecutionResult(
|
222
245
|
func_return=func_return,
|
223
246
|
agent_state=None,
|
224
|
-
stdout=[],
|
247
|
+
stdout=[stdout_text],
|
225
248
|
stderr=[str(e)],
|
226
249
|
status="error",
|
227
250
|
sandbox_config_fingerprint=sbx_config.fingerprint(),
|
228
251
|
)
|
229
252
|
|
230
|
-
def parse_out_function_results_markers(self,
|
253
|
+
def parse_out_function_results_markers(self, data: bytes) -> tuple[bytes, str]:
|
231
254
|
"""
|
232
255
|
Parse the function results out of the stdout using special markers.
|
233
|
-
Returns (
|
256
|
+
Returns (function_results_bytes, stripped_stdout_bytes).
|
234
257
|
"""
|
235
|
-
|
236
|
-
|
237
|
-
return "",
|
238
|
-
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
258
|
+
pos = data.find(self.LOCAL_SANDBOX_RESULT_START_MARKER)
|
259
|
+
if pos < 0:
|
260
|
+
return b"", data.decode("utf-8") if data else ""
|
261
|
+
|
262
|
+
DATA_LENGTH_INDICATOR = 4
|
263
|
+
CHECKSUM_LENGTH = 32
|
264
|
+
pos_start = pos + len(self.LOCAL_SANDBOX_RESULT_START_MARKER)
|
265
|
+
checksum_start = pos_start + DATA_LENGTH_INDICATOR
|
266
|
+
message_start = checksum_start + CHECKSUM_LENGTH
|
267
|
+
|
268
|
+
message_len = struct.unpack(">I", data[pos_start:checksum_start])[0]
|
269
|
+
checksum = data[checksum_start:message_start]
|
270
|
+
message_data = data[message_start : message_start + message_len]
|
271
|
+
actual_checksum = hashlib.md5(message_data).hexdigest().encode("ascii")
|
272
|
+
if actual_checksum == checksum:
|
273
|
+
remainder = data[:pos] + data[message_start + message_len :]
|
274
|
+
return message_data, (remainder.decode("utf-8") if remainder else "")
|
275
|
+
raise Exception("Function ran, but output is corrupted.")
|
letta/services/user_manager.py
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
from typing import List, Optional
|
2
2
|
|
3
|
+
from sqlalchemy import select, text
|
4
|
+
|
3
5
|
from letta.orm.errors import NoResultFound
|
4
6
|
from letta.orm.organization import Organization as OrganizationModel
|
5
7
|
from letta.orm.user import User as UserModel
|
@@ -38,6 +40,27 @@ class UserManager:
|
|
38
40
|
|
39
41
|
return user.to_pydantic()
|
40
42
|
|
43
|
+
@enforce_types
|
44
|
+
@trace_method
|
45
|
+
async def create_default_actor_async(self, org_id: str = OrganizationManager.DEFAULT_ORG_ID) -> PydanticUser:
|
46
|
+
"""Create the default user."""
|
47
|
+
async with db_registry.async_session() as session:
|
48
|
+
# Make sure the org id exists
|
49
|
+
try:
|
50
|
+
await OrganizationModel.read_async(db_session=session, identifier=org_id)
|
51
|
+
except NoResultFound:
|
52
|
+
raise ValueError(f"No organization with {org_id} exists in the organization table.")
|
53
|
+
|
54
|
+
# Try to retrieve the user
|
55
|
+
try:
|
56
|
+
actor = await UserModel.read_async(db_session=session, identifier=self.DEFAULT_USER_ID)
|
57
|
+
except NoResultFound:
|
58
|
+
# If it doesn't exist, make it
|
59
|
+
actor = UserModel(id=self.DEFAULT_USER_ID, name=self.DEFAULT_USER_NAME, organization_id=org_id)
|
60
|
+
await actor.create_async(session)
|
61
|
+
|
62
|
+
return actor.to_pydantic()
|
63
|
+
|
41
64
|
@enforce_types
|
42
65
|
@trace_method
|
43
66
|
def create_user(self, pydantic_user: PydanticUser) -> PydanticUser:
|
@@ -123,7 +146,18 @@ class UserManager:
|
|
123
146
|
async def get_actor_by_id_async(self, actor_id: str) -> PydanticUser:
|
124
147
|
"""Fetch a user by ID asynchronously."""
|
125
148
|
async with db_registry.async_session() as session:
|
126
|
-
|
149
|
+
# Turn off seqscan to force use pk index
|
150
|
+
await session.execute(text("SET LOCAL enable_seqscan = OFF"))
|
151
|
+
try:
|
152
|
+
stmt = select(UserModel).where(UserModel.id == actor_id)
|
153
|
+
result = await session.execute(stmt)
|
154
|
+
user = result.scalar_one_or_none()
|
155
|
+
finally:
|
156
|
+
await session.execute(text("SET LOCAL enable_seqscan = ON"))
|
157
|
+
|
158
|
+
if not user:
|
159
|
+
raise NoResultFound(f"User not found with id={actor_id}")
|
160
|
+
|
127
161
|
return user.to_pydantic()
|
128
162
|
|
129
163
|
@enforce_types
|
@@ -154,8 +188,7 @@ class UserManager:
|
|
154
188
|
try:
|
155
189
|
return await self.get_actor_by_id_async(self.DEFAULT_USER_ID)
|
156
190
|
except NoResultFound:
|
157
|
-
|
158
|
-
return self.create_default_user(org_id=self.DEFAULT_ORG_ID)
|
191
|
+
return await self.create_default_actor_async(org_id=self.DEFAULT_ORG_ID)
|
159
192
|
|
160
193
|
@enforce_types
|
161
194
|
@trace_method
|
letta/settings.py
CHANGED
@@ -172,6 +172,10 @@ class Settings(BaseSettings):
|
|
172
172
|
debug: Optional[bool] = False
|
173
173
|
cors_origins: Optional[list] = cors_origins
|
174
174
|
|
175
|
+
# default handles
|
176
|
+
default_llm_handle: Optional[str] = None
|
177
|
+
default_embedding_handle: Optional[str] = None
|
178
|
+
|
175
179
|
# database configuration
|
176
180
|
pg_db: Optional[str] = None
|
177
181
|
pg_user: Optional[str] = None
|
@@ -204,15 +208,15 @@ class Settings(BaseSettings):
|
|
204
208
|
uvicorn_reload: bool = False
|
205
209
|
uvicorn_timeout_keep_alive: int = 5
|
206
210
|
|
211
|
+
use_uvloop: bool = False
|
212
|
+
use_granian: bool = False
|
213
|
+
|
207
214
|
# event loop parallelism
|
208
215
|
event_loop_threadpool_max_workers: int = 43
|
209
216
|
|
210
217
|
# experimental toggle
|
211
218
|
use_experimental: bool = False
|
212
219
|
use_vertex_structured_outputs_experimental: bool = False
|
213
|
-
use_vertex_async_loop_experimental: bool = False
|
214
|
-
experimental_enable_async_db_engine: bool = False
|
215
|
-
experimental_skip_rebuild_memory: bool = False
|
216
220
|
|
217
221
|
# LLM provider client settings
|
218
222
|
httpx_max_retries: int = 5
|
@@ -231,6 +235,9 @@ class Settings(BaseSettings):
|
|
231
235
|
batch_job_polling_lookback_weeks: int = 2
|
232
236
|
batch_job_polling_batch_size: Optional[int] = None
|
233
237
|
|
238
|
+
# for OCR
|
239
|
+
mistral_api_key: Optional[str] = None
|
240
|
+
|
234
241
|
@property
|
235
242
|
def letta_pg_uri(self) -> str:
|
236
243
|
if self.pg_uri:
|
File without changes
|