letta-nightly 0.11.7.dev20251007104119__py3-none-any.whl → 0.11.7.dev20251008104128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/adapters/letta_llm_adapter.py +1 -0
- letta/adapters/letta_llm_request_adapter.py +0 -1
- letta/adapters/letta_llm_stream_adapter.py +7 -2
- letta/adapters/simple_llm_request_adapter.py +88 -0
- letta/adapters/simple_llm_stream_adapter.py +192 -0
- letta/agents/agent_loop.py +6 -0
- letta/agents/ephemeral_summary_agent.py +2 -1
- letta/agents/helpers.py +142 -6
- letta/agents/letta_agent.py +13 -33
- letta/agents/letta_agent_batch.py +2 -4
- letta/agents/letta_agent_v2.py +87 -77
- letta/agents/letta_agent_v3.py +899 -0
- letta/agents/voice_agent.py +2 -6
- letta/constants.py +8 -4
- letta/errors.py +40 -0
- letta/functions/function_sets/base.py +84 -4
- letta/functions/function_sets/multi_agent.py +0 -3
- letta/functions/schema_generator.py +113 -71
- letta/groups/dynamic_multi_agent.py +3 -2
- letta/groups/helpers.py +1 -2
- letta/groups/round_robin_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent_v2.py +1 -1
- letta/groups/sleeptime_multi_agent_v3.py +17 -17
- letta/groups/supervisor_multi_agent.py +84 -80
- letta/helpers/converters.py +3 -0
- letta/helpers/message_helper.py +4 -0
- letta/helpers/tool_rule_solver.py +92 -5
- letta/interfaces/anthropic_streaming_interface.py +409 -0
- letta/interfaces/gemini_streaming_interface.py +296 -0
- letta/interfaces/openai_streaming_interface.py +752 -1
- letta/llm_api/anthropic_client.py +126 -16
- letta/llm_api/bedrock_client.py +4 -2
- letta/llm_api/deepseek_client.py +4 -1
- letta/llm_api/google_vertex_client.py +123 -42
- letta/llm_api/groq_client.py +4 -1
- letta/llm_api/llm_api_tools.py +11 -4
- letta/llm_api/llm_client_base.py +6 -2
- letta/llm_api/openai.py +32 -2
- letta/llm_api/openai_client.py +423 -18
- letta/llm_api/xai_client.py +4 -1
- letta/main.py +9 -5
- letta/memory.py +1 -0
- letta/orm/__init__.py +1 -1
- letta/orm/agent.py +10 -0
- letta/orm/block.py +7 -16
- letta/orm/blocks_agents.py +8 -2
- letta/orm/files_agents.py +2 -0
- letta/orm/job.py +7 -5
- letta/orm/mcp_oauth.py +1 -0
- letta/orm/message.py +21 -6
- letta/orm/organization.py +2 -0
- letta/orm/provider.py +6 -2
- letta/orm/run.py +71 -0
- letta/orm/sandbox_config.py +7 -1
- letta/orm/sqlalchemy_base.py +0 -306
- letta/orm/step.py +6 -5
- letta/orm/step_metrics.py +5 -5
- letta/otel/tracing.py +28 -3
- letta/plugins/defaults.py +4 -4
- letta/prompts/system_prompts/__init__.py +2 -0
- letta/prompts/system_prompts/letta_v1.py +25 -0
- letta/schemas/agent.py +3 -2
- letta/schemas/agent_file.py +9 -3
- letta/schemas/block.py +23 -10
- letta/schemas/enums.py +21 -2
- letta/schemas/job.py +17 -4
- letta/schemas/letta_message_content.py +71 -2
- letta/schemas/letta_stop_reason.py +5 -5
- letta/schemas/llm_config.py +53 -3
- letta/schemas/memory.py +1 -1
- letta/schemas/message.py +504 -117
- letta/schemas/openai/responses_request.py +64 -0
- letta/schemas/providers/__init__.py +2 -0
- letta/schemas/providers/anthropic.py +16 -0
- letta/schemas/providers/ollama.py +115 -33
- letta/schemas/providers/openrouter.py +52 -0
- letta/schemas/providers/vllm.py +2 -1
- letta/schemas/run.py +48 -42
- letta/schemas/step.py +2 -2
- letta/schemas/step_metrics.py +1 -1
- letta/schemas/tool.py +15 -107
- letta/schemas/tool_rule.py +88 -5
- letta/serialize_schemas/marshmallow_agent.py +1 -0
- letta/server/db.py +86 -408
- letta/server/rest_api/app.py +61 -10
- letta/server/rest_api/dependencies.py +14 -0
- letta/server/rest_api/redis_stream_manager.py +19 -8
- letta/server/rest_api/routers/v1/agents.py +364 -292
- letta/server/rest_api/routers/v1/blocks.py +14 -20
- letta/server/rest_api/routers/v1/identities.py +45 -110
- letta/server/rest_api/routers/v1/internal_templates.py +21 -0
- letta/server/rest_api/routers/v1/jobs.py +23 -6
- letta/server/rest_api/routers/v1/messages.py +1 -1
- letta/server/rest_api/routers/v1/runs.py +126 -85
- letta/server/rest_api/routers/v1/sandbox_configs.py +10 -19
- letta/server/rest_api/routers/v1/tools.py +281 -594
- letta/server/rest_api/routers/v1/voice.py +1 -1
- letta/server/rest_api/streaming_response.py +29 -29
- letta/server/rest_api/utils.py +122 -64
- letta/server/server.py +160 -887
- letta/services/agent_manager.py +236 -919
- letta/services/agent_serialization_manager.py +16 -0
- letta/services/archive_manager.py +0 -100
- letta/services/block_manager.py +211 -168
- letta/services/file_manager.py +1 -1
- letta/services/files_agents_manager.py +24 -33
- letta/services/group_manager.py +0 -142
- letta/services/helpers/agent_manager_helper.py +7 -2
- letta/services/helpers/run_manager_helper.py +85 -0
- letta/services/job_manager.py +96 -411
- letta/services/lettuce/__init__.py +6 -0
- letta/services/lettuce/lettuce_client_base.py +86 -0
- letta/services/mcp_manager.py +38 -6
- letta/services/message_manager.py +165 -362
- letta/services/organization_manager.py +0 -36
- letta/services/passage_manager.py +0 -345
- letta/services/provider_manager.py +0 -80
- letta/services/run_manager.py +301 -0
- letta/services/sandbox_config_manager.py +0 -234
- letta/services/step_manager.py +62 -39
- letta/services/summarizer/summarizer.py +9 -7
- letta/services/telemetry_manager.py +0 -16
- letta/services/tool_executor/builtin_tool_executor.py +35 -0
- letta/services/tool_executor/core_tool_executor.py +397 -2
- letta/services/tool_executor/files_tool_executor.py +3 -3
- letta/services/tool_executor/multi_agent_tool_executor.py +30 -15
- letta/services/tool_executor/tool_execution_manager.py +6 -8
- letta/services/tool_executor/tool_executor_base.py +3 -3
- letta/services/tool_manager.py +85 -339
- letta/services/tool_sandbox/base.py +24 -13
- letta/services/tool_sandbox/e2b_sandbox.py +16 -1
- letta/services/tool_schema_generator.py +123 -0
- letta/services/user_manager.py +0 -99
- letta/settings.py +20 -4
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/METADATA +3 -5
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/RECORD +140 -132
- letta/agents/temporal/activities/__init__.py +0 -4
- letta/agents/temporal/activities/example_activity.py +0 -7
- letta/agents/temporal/activities/prepare_messages.py +0 -10
- letta/agents/temporal/temporal_agent_workflow.py +0 -56
- letta/agents/temporal/types.py +0 -25
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/licenses/LICENSE +0 -0
letta/services/tool_manager.py
CHANGED
@@ -32,6 +32,7 @@ from letta.schemas.user import User as PydanticUser
|
|
32
32
|
from letta.server.db import db_registry
|
33
33
|
from letta.services.helpers.agent_manager_helper import calculate_multi_agent_tools
|
34
34
|
from letta.services.mcp.types import SSEServerConfig, StdioServerConfig
|
35
|
+
from letta.services.tool_schema_generator import generate_schema_for_tool_creation, generate_schema_for_tool_update
|
35
36
|
from letta.settings import settings
|
36
37
|
from letta.utils import enforce_types, printd
|
37
38
|
|
@@ -41,44 +42,35 @@ logger = get_logger(__name__)
|
|
41
42
|
class ToolManager:
|
42
43
|
"""Manager class to handle business logic related to Tools."""
|
43
44
|
|
44
|
-
# TODO: Refactor this across the codebase to use CreateTool instead of passing in a Tool object
|
45
|
-
@enforce_types
|
46
|
-
@trace_method
|
47
|
-
def create_or_update_tool(self, pydantic_tool: PydanticTool, actor: PydanticUser, bypass_name_check: bool = False) -> PydanticTool:
|
48
|
-
"""Create a new tool based on the ToolCreate schema."""
|
49
|
-
tool_id = self.get_tool_id_by_name(tool_name=pydantic_tool.name, actor=actor)
|
50
|
-
if tool_id:
|
51
|
-
# Put to dict and remove fields that should not be reset
|
52
|
-
update_data = pydantic_tool.model_dump(exclude_unset=True, exclude_none=True)
|
53
|
-
|
54
|
-
# If there's anything to update
|
55
|
-
if update_data:
|
56
|
-
# In case we want to update the tool type
|
57
|
-
# Useful if we are shuffling around base tools
|
58
|
-
updated_tool_type = None
|
59
|
-
if "tool_type" in update_data:
|
60
|
-
updated_tool_type = update_data.get("tool_type")
|
61
|
-
tool = self.update_tool_by_id(
|
62
|
-
tool_id, ToolUpdate(**update_data), actor, updated_tool_type=updated_tool_type, bypass_name_check=bypass_name_check
|
63
|
-
)
|
64
|
-
else:
|
65
|
-
printd(
|
66
|
-
f"`create_or_update_tool` was called with user_id={actor.id}, organization_id={actor.organization_id}, name={pydantic_tool.name}, but found existing tool with nothing to update."
|
67
|
-
)
|
68
|
-
tool = self.get_tool_by_id(tool_id, actor=actor)
|
69
|
-
else:
|
70
|
-
tool = self.create_tool(pydantic_tool, actor=actor)
|
71
|
-
|
72
|
-
return tool
|
73
|
-
|
74
45
|
@enforce_types
|
75
46
|
@trace_method
|
76
47
|
async def create_or_update_tool_async(
|
77
48
|
self, pydantic_tool: PydanticTool, actor: PydanticUser, bypass_name_check: bool = False
|
78
49
|
) -> PydanticTool:
|
79
50
|
"""Create a new tool based on the ToolCreate schema."""
|
80
|
-
|
81
|
-
|
51
|
+
if pydantic_tool.tool_type == ToolType.CUSTOM and not pydantic_tool.json_schema:
|
52
|
+
generated_schema = generate_schema_for_tool_creation(pydantic_tool)
|
53
|
+
if generated_schema:
|
54
|
+
pydantic_tool.json_schema = generated_schema
|
55
|
+
else:
|
56
|
+
raise ValueError("Failed to generate schema for tool", pydantic_tool.source_code)
|
57
|
+
|
58
|
+
print("SCHEMA", pydantic_tool.json_schema)
|
59
|
+
|
60
|
+
# make sure the name matches the json_schema
|
61
|
+
if not pydantic_tool.name:
|
62
|
+
pydantic_tool.name = pydantic_tool.json_schema.get("name")
|
63
|
+
else:
|
64
|
+
if pydantic_tool.name != pydantic_tool.json_schema.get("name"):
|
65
|
+
raise LettaToolNameSchemaMismatchError(
|
66
|
+
tool_name=pydantic_tool.name,
|
67
|
+
json_schema_name=pydantic_tool.json_schema.get("name"),
|
68
|
+
source_code=pydantic_tool.source_code,
|
69
|
+
)
|
70
|
+
|
71
|
+
# check if the tool name already exists
|
72
|
+
current_tool = await self.get_tool_by_name_async(tool_name=pydantic_tool.name, actor=actor)
|
73
|
+
if current_tool:
|
82
74
|
# Put to dict and remove fields that should not be reset
|
83
75
|
update_data = pydantic_tool.model_dump(exclude_unset=True, exclude_none=True)
|
84
76
|
update_data["organization_id"] = actor.organization_id
|
@@ -91,17 +83,16 @@ class ToolManager:
|
|
91
83
|
if "tool_type" in update_data:
|
92
84
|
updated_tool_type = update_data.get("tool_type")
|
93
85
|
tool = await self.update_tool_by_id_async(
|
94
|
-
|
86
|
+
current_tool.id, ToolUpdate(**update_data), actor, updated_tool_type=updated_tool_type
|
95
87
|
)
|
96
88
|
else:
|
97
89
|
printd(
|
98
90
|
f"`create_or_update_tool` was called with user_id={actor.id}, organization_id={actor.organization_id}, name={pydantic_tool.name}, but found existing tool with nothing to update."
|
99
91
|
)
|
100
|
-
tool = await self.get_tool_by_id_async(
|
101
|
-
|
102
|
-
tool = await self.create_tool_async(pydantic_tool, actor=actor)
|
92
|
+
tool = await self.get_tool_by_id_async(current_tool.id, actor=actor)
|
93
|
+
return tool
|
103
94
|
|
104
|
-
return
|
95
|
+
return await self.create_tool_async(pydantic_tool, actor=actor)
|
105
96
|
|
106
97
|
@enforce_types
|
107
98
|
async def create_mcp_server(
|
@@ -110,12 +101,11 @@ class ToolManager:
|
|
110
101
|
pass
|
111
102
|
|
112
103
|
@enforce_types
|
113
|
-
|
114
|
-
def create_or_update_mcp_tool(
|
104
|
+
async def create_mcp_tool_async(
|
115
105
|
self, tool_create: ToolCreate, mcp_server_name: str, mcp_server_id: str, actor: PydanticUser
|
116
106
|
) -> PydanticTool:
|
117
107
|
metadata = {MCP_TOOL_TAG_NAME_PREFIX: {"server_name": mcp_server_name, "server_id": mcp_server_id}}
|
118
|
-
return self.
|
108
|
+
return await self.create_or_update_tool_async(
|
119
109
|
PydanticTool(
|
120
110
|
tool_type=ToolType.EXTERNAL_MCP, name=tool_create.json_schema["name"], metadata_=metadata, **tool_create.model_dump()
|
121
111
|
),
|
@@ -123,7 +113,8 @@ class ToolManager:
|
|
123
113
|
)
|
124
114
|
|
125
115
|
@enforce_types
|
126
|
-
|
116
|
+
@trace_method
|
117
|
+
async def create_or_update_mcp_tool_async(
|
127
118
|
self, tool_create: ToolCreate, mcp_server_name: str, mcp_server_id: str, actor: PydanticUser
|
128
119
|
) -> PydanticTool:
|
129
120
|
metadata = {MCP_TOOL_TAG_NAME_PREFIX: {"server_name": mcp_server_name, "server_id": mcp_server_id}}
|
@@ -134,43 +125,15 @@ class ToolManager:
|
|
134
125
|
actor,
|
135
126
|
)
|
136
127
|
|
137
|
-
@enforce_types
|
138
|
-
@trace_method
|
139
|
-
def create_or_update_composio_tool(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
|
140
|
-
return self.create_or_update_tool(
|
141
|
-
PydanticTool(tool_type=ToolType.EXTERNAL_COMPOSIO, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
|
142
|
-
)
|
143
|
-
|
144
|
-
@enforce_types
|
145
|
-
@trace_method
|
146
|
-
async def create_or_update_composio_tool_async(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
|
147
|
-
return await self.create_or_update_tool_async(
|
148
|
-
PydanticTool(tool_type=ToolType.EXTERNAL_COMPOSIO, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
|
149
|
-
)
|
150
|
-
|
151
|
-
@enforce_types
|
152
|
-
@trace_method
|
153
|
-
def create_tool(self, pydantic_tool: PydanticTool, actor: PydanticUser) -> PydanticTool:
|
154
|
-
"""Create a new tool based on the ToolCreate schema."""
|
155
|
-
with db_registry.session() as session:
|
156
|
-
# Auto-generate description if not provided
|
157
|
-
if pydantic_tool.description is None:
|
158
|
-
pydantic_tool.description = pydantic_tool.json_schema.get("description", None)
|
159
|
-
tool_data = pydantic_tool.model_dump(to_orm=True)
|
160
|
-
# Set the organization id at the ORM layer
|
161
|
-
tool_data["organization_id"] = actor.organization_id
|
162
|
-
|
163
|
-
tool = ToolModel(**tool_data)
|
164
|
-
tool.create(session, actor=actor) # Re-raise other database-related errors
|
165
|
-
return tool.to_pydantic()
|
166
|
-
|
167
128
|
@enforce_types
|
168
129
|
@trace_method
|
169
130
|
async def create_tool_async(self, pydantic_tool: PydanticTool, actor: PydanticUser) -> PydanticTool:
|
170
131
|
"""Create a new tool based on the ToolCreate schema."""
|
132
|
+
# Generate schema only if not provided (only for custom tools)
|
133
|
+
|
171
134
|
async with db_registry.async_session() as session:
|
172
135
|
# Auto-generate description if not provided
|
173
|
-
if pydantic_tool.description is None:
|
136
|
+
if pydantic_tool.description is None and pydantic_tool.json_schema:
|
174
137
|
pydantic_tool.description = pydantic_tool.json_schema.get("description", None)
|
175
138
|
tool_data = pydantic_tool.model_dump(to_orm=True)
|
176
139
|
# Set the organization id at the ORM layer
|
@@ -219,6 +182,11 @@ class ToolManager:
|
|
219
182
|
if not pydantic_tools:
|
220
183
|
return []
|
221
184
|
|
185
|
+
# get schemas if not provided
|
186
|
+
for tool in pydantic_tools:
|
187
|
+
if tool.json_schema is None:
|
188
|
+
tool.json_schema = generate_schema_for_tool_creation(tool)
|
189
|
+
|
222
190
|
# auto-generate descriptions if not provided
|
223
191
|
for tool in pydantic_tools:
|
224
192
|
if tool.description is None:
|
@@ -232,16 +200,6 @@ class ToolManager:
|
|
232
200
|
# fallback to individual upserts for sqlite
|
233
201
|
return await self._upsert_tools_individually(pydantic_tools, actor, override_existing_tools)
|
234
202
|
|
235
|
-
@enforce_types
|
236
|
-
@trace_method
|
237
|
-
def get_tool_by_id(self, tool_id: str, actor: PydanticUser) -> PydanticTool:
|
238
|
-
"""Fetch a tool by its ID."""
|
239
|
-
with db_registry.session() as session:
|
240
|
-
# Retrieve tool by id using the Tool model's read method
|
241
|
-
tool = ToolModel.read(db_session=session, identifier=tool_id, actor=actor)
|
242
|
-
# Convert the SQLAlchemy Tool object to PydanticTool
|
243
|
-
return tool.to_pydantic()
|
244
|
-
|
245
203
|
@enforce_types
|
246
204
|
@trace_method
|
247
205
|
async def get_tool_by_id_async(self, tool_id: str, actor: PydanticUser) -> PydanticTool:
|
@@ -252,17 +210,6 @@ class ToolManager:
|
|
252
210
|
# Convert the SQLAlchemy Tool object to PydanticTool
|
253
211
|
return tool.to_pydantic()
|
254
212
|
|
255
|
-
@enforce_types
|
256
|
-
@trace_method
|
257
|
-
def get_tool_by_name(self, tool_name: str, actor: PydanticUser) -> Optional[PydanticTool]:
|
258
|
-
"""Retrieve a tool by its name and a user. We derive the organization from the user, and retrieve that tool."""
|
259
|
-
try:
|
260
|
-
with db_registry.session() as session:
|
261
|
-
tool = ToolModel.read(db_session=session, name=tool_name, actor=actor)
|
262
|
-
return tool.to_pydantic()
|
263
|
-
except NoResultFound:
|
264
|
-
return None
|
265
|
-
|
266
213
|
@enforce_types
|
267
214
|
@trace_method
|
268
215
|
async def get_tool_by_name_async(self, tool_name: str, actor: PydanticUser) -> Optional[PydanticTool]:
|
@@ -274,17 +221,6 @@ class ToolManager:
|
|
274
221
|
except NoResultFound:
|
275
222
|
return None
|
276
223
|
|
277
|
-
@enforce_types
|
278
|
-
@trace_method
|
279
|
-
def get_tool_id_by_name(self, tool_name: str, actor: PydanticUser) -> Optional[str]:
|
280
|
-
"""Retrieve a tool by its name and a user. We derive the organization from the user, and retrieve that tool."""
|
281
|
-
try:
|
282
|
-
with db_registry.session() as session:
|
283
|
-
tool = ToolModel.read(db_session=session, name=tool_name, actor=actor)
|
284
|
-
return tool.id
|
285
|
-
except NoResultFound:
|
286
|
-
return None
|
287
|
-
|
288
224
|
@enforce_types
|
289
225
|
@trace_method
|
290
226
|
async def get_tool_id_by_name_async(self, tool_name: str, actor: PydanticUser) -> Optional[str]:
|
@@ -568,114 +504,6 @@ class ToolManager:
|
|
568
504
|
return await ToolModel.size_async(db_session=session, actor=actor)
|
569
505
|
return await ToolModel.size_async(db_session=session, actor=actor, name=LETTA_TOOL_SET)
|
570
506
|
|
571
|
-
@enforce_types
|
572
|
-
@trace_method
|
573
|
-
def update_tool_by_id(
|
574
|
-
self,
|
575
|
-
tool_id: str,
|
576
|
-
tool_update: ToolUpdate,
|
577
|
-
actor: PydanticUser,
|
578
|
-
updated_tool_type: Optional[ToolType] = None,
|
579
|
-
bypass_name_check: bool = False,
|
580
|
-
) -> PydanticTool:
|
581
|
-
# TODO: remove this (legacy non-async)
|
582
|
-
"""
|
583
|
-
Update a tool with complex validation and schema derivation logic.
|
584
|
-
|
585
|
-
This method handles updates differently based on tool type:
|
586
|
-
- MCP tools: JSON schema is trusted, no Python source derivation
|
587
|
-
- Python/TypeScript tools: Schema derived from source code if provided
|
588
|
-
- Name conflicts are checked unless bypassed
|
589
|
-
|
590
|
-
Args:
|
591
|
-
tool_id: The UUID of the tool to update
|
592
|
-
tool_update: Partial update data (only changed fields)
|
593
|
-
actor: User performing the update (for permissions)
|
594
|
-
updated_tool_type: Optional new tool type (e.g., converting custom to builtin)
|
595
|
-
bypass_name_check: Skip name conflict validation (use with caution)
|
596
|
-
|
597
|
-
Returns:
|
598
|
-
Updated tool as Pydantic model
|
599
|
-
|
600
|
-
Raises:
|
601
|
-
LettaToolNameConflictError: If new name conflicts with existing tool
|
602
|
-
NoResultFound: If tool doesn't exist or user lacks access
|
603
|
-
|
604
|
-
Side Effects:
|
605
|
-
- Updates tool in database
|
606
|
-
- May change tool name if source code is modified
|
607
|
-
- Recomputes JSON schema from source for non-MCP tools
|
608
|
-
|
609
|
-
Important:
|
610
|
-
When source_code is provided for Python/TypeScript tools, the name
|
611
|
-
MUST match the function name in the code, overriding any name in json_schema
|
612
|
-
"""
|
613
|
-
# First, check if source code update would cause a name conflict
|
614
|
-
update_data = tool_update.model_dump(to_orm=True, exclude_none=True)
|
615
|
-
new_name = None
|
616
|
-
new_schema = None
|
617
|
-
|
618
|
-
# Fetch current tool to allow conditional logic based on tool type
|
619
|
-
current_tool = self.get_tool_by_id(tool_id=tool_id, actor=actor)
|
620
|
-
|
621
|
-
# For MCP tools, do NOT derive schema from Python source. Trust provided JSON schema.
|
622
|
-
if current_tool.tool_type == ToolType.EXTERNAL_MCP:
|
623
|
-
if "json_schema" in update_data:
|
624
|
-
new_schema = update_data["json_schema"].copy()
|
625
|
-
new_name = new_schema.get("name", current_tool.name)
|
626
|
-
else:
|
627
|
-
new_schema = current_tool.json_schema
|
628
|
-
new_name = current_tool.name
|
629
|
-
update_data.pop("source_code", None)
|
630
|
-
if new_name != current_tool.name:
|
631
|
-
existing_tool = self.get_tool_by_name(tool_name=new_name, actor=actor)
|
632
|
-
if existing_tool:
|
633
|
-
raise LettaToolNameConflictError(tool_name=new_name)
|
634
|
-
else:
|
635
|
-
# For non-MCP tools, preserve existing behavior
|
636
|
-
if "source_code" in update_data.keys() and not bypass_name_check:
|
637
|
-
# Check source type to use appropriate parser
|
638
|
-
source_type = update_data.get("source_type", current_tool.source_type)
|
639
|
-
if source_type == "typescript":
|
640
|
-
from letta.functions.typescript_parser import derive_typescript_json_schema
|
641
|
-
|
642
|
-
derived_schema = derive_typescript_json_schema(source_code=update_data["source_code"])
|
643
|
-
else:
|
644
|
-
# Default to Python for backwards compatibility
|
645
|
-
derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
|
646
|
-
|
647
|
-
new_name = derived_schema["name"]
|
648
|
-
if "json_schema" not in update_data.keys():
|
649
|
-
new_schema = derived_schema
|
650
|
-
else:
|
651
|
-
new_schema = update_data["json_schema"].copy()
|
652
|
-
new_schema["name"] = new_name
|
653
|
-
update_data["json_schema"] = new_schema
|
654
|
-
if new_name != current_tool.name:
|
655
|
-
existing_tool = self.get_tool_by_name(tool_name=new_name, actor=actor)
|
656
|
-
if existing_tool:
|
657
|
-
raise LettaToolNameConflictError(tool_name=new_name)
|
658
|
-
|
659
|
-
# Now perform the update within the session
|
660
|
-
with db_registry.session() as session:
|
661
|
-
# Fetch the tool by ID
|
662
|
-
tool = ToolModel.read(db_session=session, identifier=tool_id, actor=actor)
|
663
|
-
|
664
|
-
# Update tool attributes with only the fields that were explicitly set
|
665
|
-
for key, value in update_data.items():
|
666
|
-
setattr(tool, key, value)
|
667
|
-
|
668
|
-
# If we already computed the new schema, apply it
|
669
|
-
if new_schema is not None:
|
670
|
-
tool.json_schema = new_schema
|
671
|
-
tool.name = new_name
|
672
|
-
|
673
|
-
if updated_tool_type:
|
674
|
-
tool.tool_type = updated_tool_type
|
675
|
-
|
676
|
-
# Save the updated tool to the database
|
677
|
-
return tool.update(db_session=session, actor=actor).to_pydantic()
|
678
|
-
|
679
507
|
@enforce_types
|
680
508
|
@trace_method
|
681
509
|
async def update_tool_by_id_async(
|
@@ -687,32 +515,56 @@ class ToolManager:
|
|
687
515
|
bypass_name_check: bool = False,
|
688
516
|
) -> PydanticTool:
|
689
517
|
"""Update a tool by its ID with the given ToolUpdate object."""
|
690
|
-
# First, check if source code update would cause a name conflict
|
691
|
-
update_data = tool_update.model_dump(to_orm=True, exclude_none=True)
|
692
|
-
new_name = None
|
693
|
-
new_schema = None
|
694
|
-
|
695
518
|
# Fetch current tool early to allow conditional logic based on tool type
|
696
519
|
current_tool = await self.get_tool_by_id_async(tool_id=tool_id, actor=actor)
|
697
520
|
|
698
|
-
#
|
699
|
-
|
700
|
-
if
|
701
|
-
|
521
|
+
# Handle schema updates for custom tools
|
522
|
+
new_schema = None
|
523
|
+
if current_tool.tool_type == ToolType.CUSTOM:
|
524
|
+
if tool_update.json_schema is not None:
|
525
|
+
new_schema = tool_update.json_schema
|
526
|
+
elif tool_update.args_json_schema is not None:
|
527
|
+
# Generate full schema from args_json_schema
|
528
|
+
generated_schema = generate_schema_for_tool_update(
|
529
|
+
current_tool=current_tool,
|
530
|
+
json_schema=None,
|
531
|
+
args_json_schema=tool_update.args_json_schema,
|
532
|
+
source_code=tool_update.source_code,
|
533
|
+
source_type=tool_update.source_type,
|
534
|
+
)
|
535
|
+
if generated_schema:
|
536
|
+
tool_update.json_schema = generated_schema
|
537
|
+
new_schema = generated_schema
|
538
|
+
|
539
|
+
# Now model_dump with the potentially updated schema
|
540
|
+
update_data = tool_update.model_dump(to_orm=True, exclude_none=True)
|
541
|
+
|
542
|
+
# Determine the final schema and name
|
543
|
+
if new_schema:
|
544
|
+
new_name = new_schema.get("name", current_tool.name)
|
545
|
+
elif "json_schema" in update_data:
|
546
|
+
new_schema = update_data["json_schema"]
|
702
547
|
new_name = new_schema.get("name", current_tool.name)
|
703
548
|
else:
|
549
|
+
# Keep existing schema
|
704
550
|
new_schema = current_tool.json_schema
|
705
551
|
new_name = current_tool.name
|
706
552
|
|
707
|
-
#
|
708
|
-
|
709
|
-
|
710
|
-
|
711
|
-
|
712
|
-
|
713
|
-
new_schema =
|
714
|
-
|
715
|
-
|
553
|
+
# Handle explicit name updates
|
554
|
+
if "name" in update_data and update_data["name"] != current_tool.name:
|
555
|
+
# Name is being explicitly changed
|
556
|
+
new_name = update_data["name"]
|
557
|
+
# Update the json_schema name to match if there's a schema
|
558
|
+
if new_schema:
|
559
|
+
new_schema = new_schema.copy()
|
560
|
+
new_schema["name"] = new_name
|
561
|
+
update_data["json_schema"] = new_schema
|
562
|
+
elif new_schema and new_name != current_tool.name:
|
563
|
+
# Schema provides a different name but name wasn't explicitly changed
|
564
|
+
update_data["name"] = new_name
|
565
|
+
# raise ValueError(
|
566
|
+
# f"JSON schema name '{new_name}' conflicts with current tool name '{current_tool.name}'. Update the name field explicitly if you want to rename the tool."
|
567
|
+
# )
|
716
568
|
|
717
569
|
# If name changes, enforce uniqueness
|
718
570
|
if new_name != current_tool.name:
|
@@ -723,7 +575,9 @@ class ToolManager:
|
|
723
575
|
# NOTE: EXTREMELEY HACKY, we need to stop making assumptions about the source_code
|
724
576
|
if "source_code" in update_data and f"def {new_name}" not in update_data.get("source_code", ""):
|
725
577
|
raise LettaToolNameSchemaMismatchError(
|
726
|
-
tool_name=new_name,
|
578
|
+
tool_name=new_name,
|
579
|
+
json_schema_name=new_schema.get("name") if new_schema else None,
|
580
|
+
source_code=update_data.get("source_code"),
|
727
581
|
)
|
728
582
|
|
729
583
|
# Now perform the update within the session
|
@@ -747,17 +601,6 @@ class ToolManager:
|
|
747
601
|
tool = await tool.update_async(db_session=session, actor=actor)
|
748
602
|
return tool.to_pydantic()
|
749
603
|
|
750
|
-
@enforce_types
|
751
|
-
@trace_method
|
752
|
-
def delete_tool_by_id(self, tool_id: str, actor: PydanticUser) -> None:
|
753
|
-
"""Delete a tool by its ID."""
|
754
|
-
with db_registry.session() as session:
|
755
|
-
try:
|
756
|
-
tool = ToolModel.read(db_session=session, identifier=tool_id, actor=actor)
|
757
|
-
tool.hard_delete(db_session=session, actor=actor)
|
758
|
-
except NoResultFound:
|
759
|
-
raise ValueError(f"Tool with id {tool_id} not found.")
|
760
|
-
|
761
604
|
@enforce_types
|
762
605
|
@trace_method
|
763
606
|
async def delete_tool_by_id_async(self, tool_id: str, actor: PydanticUser) -> None:
|
@@ -769,103 +612,6 @@ class ToolManager:
|
|
769
612
|
except NoResultFound:
|
770
613
|
raise ValueError(f"Tool with id {tool_id} not found.")
|
771
614
|
|
772
|
-
@enforce_types
|
773
|
-
@trace_method
|
774
|
-
def upsert_base_tools(self, actor: PydanticUser) -> List[PydanticTool]:
|
775
|
-
"""
|
776
|
-
Initialize or update all built-in Letta tools for a user.
|
777
|
-
|
778
|
-
This method scans predefined modules to discover and register all base tools
|
779
|
-
that ship with Letta. Tools are categorized by type (core, memory, multi-agent, etc.)
|
780
|
-
and tagged appropriately for filtering.
|
781
|
-
|
782
|
-
Args:
|
783
|
-
actor: The user to create/update tools for
|
784
|
-
|
785
|
-
Returns:
|
786
|
-
List of all base tools that were created or updated
|
787
|
-
|
788
|
-
Tool Categories Created:
|
789
|
-
- LETTA_CORE: Basic conversation tools (send_message)
|
790
|
-
- LETTA_MEMORY_CORE: Memory management (core_memory_append/replace)
|
791
|
-
- LETTA_MULTI_AGENT_CORE: Multi-agent communication tools
|
792
|
-
- LETTA_SLEEPTIME_CORE: Sleeptime agent tools
|
793
|
-
- LETTA_VOICE_SLEEPTIME_CORE: Voice agent specific tools
|
794
|
-
- LETTA_BUILTIN: Additional built-in utilities
|
795
|
-
- LETTA_FILES_CORE: File handling tools
|
796
|
-
|
797
|
-
Side Effects:
|
798
|
-
- Creates or updates tools in database
|
799
|
-
- Tools are marked with appropriate type and tags
|
800
|
-
- Existing custom tools with same names are NOT overwritten
|
801
|
-
|
802
|
-
Note:
|
803
|
-
This is typically called during user initialization or system upgrade
|
804
|
-
to ensure all base tools are available. Custom tools take precedence
|
805
|
-
over base tools with the same name.
|
806
|
-
"""
|
807
|
-
functions_to_schema = {}
|
808
|
-
|
809
|
-
for module_name in LETTA_TOOL_MODULE_NAMES:
|
810
|
-
try:
|
811
|
-
module = importlib.import_module(module_name)
|
812
|
-
except Exception as e:
|
813
|
-
# Handle other general exceptions
|
814
|
-
raise e
|
815
|
-
|
816
|
-
try:
|
817
|
-
# Load the function set
|
818
|
-
functions_to_schema.update(load_function_set(module))
|
819
|
-
except ValueError as e:
|
820
|
-
err = f"Error loading function set '{module_name}': {e}"
|
821
|
-
warnings.warn(err)
|
822
|
-
|
823
|
-
# create tool in db
|
824
|
-
tools = []
|
825
|
-
for name, schema in functions_to_schema.items():
|
826
|
-
if name in LETTA_TOOL_SET:
|
827
|
-
if name in BASE_TOOLS:
|
828
|
-
tool_type = ToolType.LETTA_CORE
|
829
|
-
tags = [tool_type.value]
|
830
|
-
elif name in BASE_MEMORY_TOOLS:
|
831
|
-
tool_type = ToolType.LETTA_MEMORY_CORE
|
832
|
-
tags = [tool_type.value]
|
833
|
-
elif name in calculate_multi_agent_tools():
|
834
|
-
tool_type = ToolType.LETTA_MULTI_AGENT_CORE
|
835
|
-
tags = [tool_type.value]
|
836
|
-
elif name in BASE_SLEEPTIME_TOOLS:
|
837
|
-
tool_type = ToolType.LETTA_SLEEPTIME_CORE
|
838
|
-
tags = [tool_type.value]
|
839
|
-
elif name in BASE_VOICE_SLEEPTIME_TOOLS or name in BASE_VOICE_SLEEPTIME_CHAT_TOOLS:
|
840
|
-
tool_type = ToolType.LETTA_VOICE_SLEEPTIME_CORE
|
841
|
-
tags = [tool_type.value]
|
842
|
-
elif name in BUILTIN_TOOLS:
|
843
|
-
tool_type = ToolType.LETTA_BUILTIN
|
844
|
-
tags = [tool_type.value]
|
845
|
-
elif name in FILES_TOOLS:
|
846
|
-
tool_type = ToolType.LETTA_FILES_CORE
|
847
|
-
tags = [tool_type.value]
|
848
|
-
else:
|
849
|
-
logger.warning(f"Tool name {name} is not in any known base tool set, skipping")
|
850
|
-
continue
|
851
|
-
|
852
|
-
# create to tool
|
853
|
-
tools.append(
|
854
|
-
self.create_or_update_tool(
|
855
|
-
PydanticTool(
|
856
|
-
name=name,
|
857
|
-
tags=tags,
|
858
|
-
source_type="python",
|
859
|
-
tool_type=tool_type,
|
860
|
-
return_char_limit=BASE_FUNCTION_RETURN_CHAR_LIMIT,
|
861
|
-
),
|
862
|
-
actor=actor,
|
863
|
-
)
|
864
|
-
)
|
865
|
-
|
866
|
-
# TODO: Delete any base tools that are stale
|
867
|
-
return tools
|
868
|
-
|
869
615
|
@enforce_types
|
870
616
|
@trace_method
|
871
617
|
async def upsert_base_tools_async(
|
@@ -5,6 +5,7 @@ from abc import ABC, abstractmethod
|
|
5
5
|
from typing import Any, Dict, Optional
|
6
6
|
|
7
7
|
from letta.functions.helpers import generate_model_from_args_json_schema
|
8
|
+
from letta.otel.tracing import trace_method
|
8
9
|
from letta.schemas.agent import AgentState
|
9
10
|
from letta.schemas.sandbox_config import SandboxConfig
|
10
11
|
from letta.schemas.tool import Tool
|
@@ -33,12 +34,7 @@ class AsyncToolSandboxBase(ABC):
|
|
33
34
|
self.tool_name = tool_name
|
34
35
|
self.args = args
|
35
36
|
self.user = user
|
36
|
-
|
37
|
-
self.tool = tool_object or ToolManager().get_tool_by_name(tool_name=tool_name, actor=self.user)
|
38
|
-
if self.tool is None:
|
39
|
-
raise ValueError(
|
40
|
-
f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
|
41
|
-
)
|
37
|
+
self.tool = tool_object
|
42
38
|
|
43
39
|
# Store provided values or create manager to fetch them later
|
44
40
|
self.provided_sandbox_config = sandbox_config
|
@@ -47,14 +43,27 @@ class AsyncToolSandboxBase(ABC):
|
|
47
43
|
# Only create the manager if we need to (lazy initialization)
|
48
44
|
self._sandbox_config_manager = None
|
49
45
|
|
50
|
-
|
51
|
-
if "agent_state" in parse_function_arguments(self.tool.source_code, self.tool.name):
|
52
|
-
self.inject_agent_state = True
|
53
|
-
else:
|
54
|
-
self.inject_agent_state = False
|
46
|
+
self._initialized = False
|
55
47
|
|
56
|
-
|
57
|
-
|
48
|
+
async def _init_async(self):
|
49
|
+
"""Must be called inside the run method before the sandbox can be used"""
|
50
|
+
if not self._initialized:
|
51
|
+
if not self.tool:
|
52
|
+
self.tool = await ToolManager().get_tool_by_name_async(tool_name=self.tool_name, actor=self.user)
|
53
|
+
|
54
|
+
# missing tool
|
55
|
+
if self.tool is None:
|
56
|
+
raise ValueError(
|
57
|
+
f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
|
58
|
+
)
|
59
|
+
|
60
|
+
# TODO: deprecate this
|
61
|
+
if "agent_state" in parse_function_arguments(self.tool.source_code, self.tool.name):
|
62
|
+
self.inject_agent_state = True
|
63
|
+
else:
|
64
|
+
self.inject_agent_state = False
|
65
|
+
self.is_async_function = self._detect_async_function()
|
66
|
+
self._initialized = True
|
58
67
|
|
59
68
|
# Lazily initialize the manager only when needed
|
60
69
|
@property
|
@@ -75,11 +84,13 @@ class AsyncToolSandboxBase(ABC):
|
|
75
84
|
"""
|
76
85
|
raise NotImplementedError
|
77
86
|
|
87
|
+
@trace_method
|
78
88
|
async def generate_execution_script(self, agent_state: Optional[AgentState], wrap_print_with_markers: bool = False) -> str:
|
79
89
|
"""
|
80
90
|
Generate code to run inside of execution sandbox. Serialize the agent state and arguments, call the tool,
|
81
91
|
then base64-encode/pickle the result. Constructs the python file.
|
82
92
|
"""
|
93
|
+
await self._init_async()
|
83
94
|
future_import = False
|
84
95
|
schema_code = None
|
85
96
|
|