letta-nightly 0.11.2.dev20250811104433__py3-none-any.whl → 0.11.3.dev20250812002120__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +1 -1
- letta/agents/letta_agent.py +22 -10
- letta/constants.py +7 -0
- letta/functions/function_sets/base.py +1 -1
- letta/helpers/converters.py +19 -0
- letta/helpers/json_helpers.py +1 -1
- letta/helpers/tool_rule_solver.py +48 -96
- letta/interfaces/openai_streaming_interface.py +9 -0
- letta/llm_api/anthropic_client.py +9 -2
- letta/llm_api/google_vertex_client.py +17 -4
- letta/llm_api/llm_client_base.py +4 -0
- letta/llm_api/openai_client.py +4 -1
- letta/log.py +3 -1
- letta/schemas/enums.py +4 -3
- letta/schemas/llm_config.py +35 -25
- letta/schemas/response_format.py +5 -6
- letta/schemas/tool_rule.py +8 -1
- letta/services/agent_manager.py +2 -3
- letta/services/mcp/base_client.py +6 -2
- letta/services/mcp_manager.py +11 -5
- letta/services/tool_executor/tool_execution_sandbox.py +8 -4
- letta/services/tool_manager.py +66 -42
- letta/services/tool_sandbox/e2b_sandbox.py +4 -2
- letta/services/tool_sandbox/modal_sandbox.py +4 -4
- letta/settings.py +2 -1
- {letta_nightly-0.11.2.dev20250811104433.dist-info → letta_nightly-0.11.3.dev20250812002120.dist-info}/METADATA +1 -1
- {letta_nightly-0.11.2.dev20250811104433.dist-info → letta_nightly-0.11.3.dev20250812002120.dist-info}/RECORD +30 -30
- {letta_nightly-0.11.2.dev20250811104433.dist-info → letta_nightly-0.11.3.dev20250812002120.dist-info}/LICENSE +0 -0
- {letta_nightly-0.11.2.dev20250811104433.dist-info → letta_nightly-0.11.3.dev20250812002120.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.2.dev20250811104433.dist-info → letta_nightly-0.11.3.dev20250812002120.dist-info}/entry_points.txt +0 -0
letta/schemas/llm_config.py
CHANGED
@@ -94,6 +94,9 @@ class LLMConfig(BaseModel):
|
|
94
94
|
"""
|
95
95
|
model = values.get("model")
|
96
96
|
|
97
|
+
if model is None:
|
98
|
+
return values
|
99
|
+
|
97
100
|
# Define models where we want put_inner_thoughts_in_kwargs to be False
|
98
101
|
avoid_put_inner_thoughts_in_kwargs = ["gpt-4"]
|
99
102
|
|
@@ -107,25 +110,13 @@ class LLMConfig(BaseModel):
|
|
107
110
|
if is_openai_reasoning_model(model):
|
108
111
|
values["put_inner_thoughts_in_kwargs"] = False
|
109
112
|
|
110
|
-
if values.get("
|
113
|
+
if values.get("model_endpoint_type") == "anthropic" and (
|
114
|
+
model.startswith("claude-3-7-sonnet") or model.startswith("claude-sonnet-4") or model.startswith("claude-opus-4")
|
115
|
+
):
|
111
116
|
values["put_inner_thoughts_in_kwargs"] = False
|
112
117
|
|
113
118
|
return values
|
114
119
|
|
115
|
-
@model_validator(mode="after")
|
116
|
-
def issue_warning_for_reasoning_constraints(self) -> "LLMConfig":
|
117
|
-
if self.enable_reasoner:
|
118
|
-
if self.max_reasoning_tokens is None:
|
119
|
-
logger.warning("max_reasoning_tokens must be set when enable_reasoner is True")
|
120
|
-
if self.max_tokens is not None and self.max_reasoning_tokens >= self.max_tokens:
|
121
|
-
logger.warning("max_tokens must be greater than max_reasoning_tokens (thinking budget)")
|
122
|
-
if self.put_inner_thoughts_in_kwargs:
|
123
|
-
logger.debug("Extended thinking is not compatible with put_inner_thoughts_in_kwargs")
|
124
|
-
elif self.max_reasoning_tokens and not self.enable_reasoner:
|
125
|
-
logger.warning("model will not use reasoning unless enable_reasoner is set to True")
|
126
|
-
|
127
|
-
return self
|
128
|
-
|
129
120
|
@classmethod
|
130
121
|
def default_config(cls, model_name: str):
|
131
122
|
"""
|
@@ -185,31 +176,50 @@ class LLMConfig(BaseModel):
|
|
185
176
|
+ (f" [ip={self.model_endpoint}]" if self.model_endpoint else "")
|
186
177
|
)
|
187
178
|
|
179
|
+
@classmethod
|
180
|
+
def is_openai_reasoning_model(cls, config: "LLMConfig") -> bool:
|
181
|
+
return config.model_endpoint_type == "openai" and (
|
182
|
+
config.model.startswith("o1") or config.model.startswith("o3") or config.model.startswith("o4")
|
183
|
+
)
|
184
|
+
|
185
|
+
@classmethod
|
186
|
+
def is_anthropic_reasoning_model(cls, config: "LLMConfig") -> bool:
|
187
|
+
return config.model_endpoint_type == "anthropic" and (
|
188
|
+
config.model.startswith("claude-opus-4")
|
189
|
+
or config.model.startswith("claude-sonnet-4")
|
190
|
+
or config.model.startswith("claude-3-7-sonnet")
|
191
|
+
)
|
192
|
+
|
193
|
+
@classmethod
|
194
|
+
def is_google_vertex_reasoning_model(cls, config: "LLMConfig") -> bool:
|
195
|
+
return config.model_endpoint_type == "google_vertex" and (
|
196
|
+
config.model.startswith("gemini-2.5-flash") or config.model.startswith("gemini-2.5-pro")
|
197
|
+
)
|
198
|
+
|
188
199
|
@classmethod
|
189
200
|
def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool):
|
190
201
|
if not reasoning:
|
202
|
+
if cls.is_openai_reasoning_model(config) or config.model.startswith("gemini-2.5-pro"):
|
203
|
+
raise ValueError("Reasoning cannot be disabled for OpenAI o1/o3 models")
|
191
204
|
config.put_inner_thoughts_in_kwargs = False
|
192
205
|
config.enable_reasoner = False
|
193
206
|
|
194
207
|
else:
|
195
208
|
config.enable_reasoner = True
|
196
|
-
if (
|
197
|
-
config.model_endpoint_type == "anthropic"
|
198
|
-
and ("claude-opus-4" in config.model or "claude-sonnet-4" in config.model or "claude-3-7-sonnet" in config.model)
|
199
|
-
) or (
|
200
|
-
config.model_endpoint_type == "google_vertex" and ("gemini-2.5-flash" in config.model or "gemini-2.0-pro" in config.model)
|
201
|
-
):
|
209
|
+
if cls.is_anthropic_reasoning_model(config):
|
202
210
|
config.put_inner_thoughts_in_kwargs = False
|
203
211
|
if config.max_reasoning_tokens == 0:
|
204
212
|
config.max_reasoning_tokens = 1024
|
205
|
-
elif
|
206
|
-
|
207
|
-
|
213
|
+
elif cls.is_google_vertex_reasoning_model(config):
|
214
|
+
# Handle as non-reasoner until we support summary
|
215
|
+
config.put_inner_thoughts_in_kwargs = True
|
216
|
+
if config.max_reasoning_tokens == 0:
|
217
|
+
config.max_reasoning_tokens = 1024
|
218
|
+
elif cls.is_openai_reasoning_model(config):
|
208
219
|
config.put_inner_thoughts_in_kwargs = False
|
209
220
|
if config.reasoning_effort is None:
|
210
221
|
config.reasoning_effort = "medium"
|
211
222
|
else:
|
212
223
|
config.put_inner_thoughts_in_kwargs = True
|
213
|
-
config.enable_reasoner = False
|
214
224
|
|
215
225
|
return config
|
letta/schemas/response_format.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
from enum import Enum
|
2
2
|
from typing import Annotated, Any, Dict, Literal, Union
|
3
3
|
|
4
|
-
from pydantic import BaseModel, Field,
|
4
|
+
from pydantic import BaseModel, Field, field_validator
|
5
5
|
|
6
6
|
|
7
7
|
class ResponseFormatType(str, Enum):
|
@@ -52,13 +52,12 @@ class JsonSchemaResponseFormat(ResponseFormat):
|
|
52
52
|
description="The JSON schema of the response.",
|
53
53
|
)
|
54
54
|
|
55
|
-
@
|
56
|
-
|
55
|
+
@field_validator("json_schema")
|
56
|
+
@classmethod
|
57
|
+
def validate_json_schema(cls, v: dict[str, Any]) -> Dict[str, Any]:
|
57
58
|
"""Validate that the provided schema is a valid JSON schema."""
|
58
|
-
if not isinstance(v, dict):
|
59
|
-
raise ValueError("JSON schema must be a dictionary")
|
60
59
|
if "schema" not in v:
|
61
|
-
raise ValueError("JSON schema should include a
|
60
|
+
raise ValueError("JSON schema should include a schema property")
|
62
61
|
return v
|
63
62
|
|
64
63
|
|
letta/schemas/tool_rule.py
CHANGED
@@ -3,7 +3,7 @@ import logging
|
|
3
3
|
from typing import Annotated, Any, Dict, List, Literal, Optional, Set, Union
|
4
4
|
|
5
5
|
from jinja2 import Template
|
6
|
-
from pydantic import Field
|
6
|
+
from pydantic import Field, field_validator
|
7
7
|
|
8
8
|
from letta.schemas.enums import ToolRuleType
|
9
9
|
from letta.schemas.letta_base import LettaBase
|
@@ -117,6 +117,13 @@ class ConditionalToolRule(BaseToolRule):
|
|
117
117
|
|
118
118
|
return {self.default_child} if self.default_child else available_tools
|
119
119
|
|
120
|
+
@field_validator("child_output_mapping")
|
121
|
+
@classmethod
|
122
|
+
def validate_child_output_mapping(cls, v):
|
123
|
+
if len(v) == 0:
|
124
|
+
raise ValueError("Conditional tool rule must have at least one child tool.")
|
125
|
+
return v
|
126
|
+
|
120
127
|
@staticmethod
|
121
128
|
def _matches_key(function_output: str, key: Any) -> bool:
|
122
129
|
"""Helper function to determine if function output matches a mapping key."""
|
letta/services/agent_manager.py
CHANGED
@@ -1706,6 +1706,7 @@ class AgentManager:
|
|
1706
1706
|
else:
|
1707
1707
|
return agent_state
|
1708
1708
|
|
1709
|
+
# Do not remove comment. (cliandy)
|
1709
1710
|
# TODO: This is probably one of the worst pieces of code I've ever written please rip up as you see wish
|
1710
1711
|
@enforce_types
|
1711
1712
|
@trace_method
|
@@ -1715,7 +1716,6 @@ class AgentManager:
|
|
1715
1716
|
actor: PydanticUser,
|
1716
1717
|
force=False,
|
1717
1718
|
update_timestamp=True,
|
1718
|
-
tool_rules_solver: Optional[ToolRulesSolver] = None,
|
1719
1719
|
dry_run: bool = False,
|
1720
1720
|
) -> Tuple[PydanticAgentState, Optional[PydanticMessage], int, int]:
|
1721
1721
|
"""Rebuilds the system message with the latest memory object and any shared memory block updates
|
@@ -1728,8 +1728,7 @@ class AgentManager:
|
|
1728
1728
|
num_archival_memories = await self.passage_manager.agent_passage_size_async(actor=actor, agent_id=agent_id)
|
1729
1729
|
agent_state = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=["memory", "sources", "tools"], actor=actor)
|
1730
1730
|
|
1731
|
-
|
1732
|
-
tool_rules_solver = ToolRulesSolver(agent_state.tool_rules)
|
1731
|
+
tool_rules_solver = ToolRulesSolver(agent_state.tool_rules)
|
1733
1732
|
|
1734
1733
|
curr_system_message = await self.message_manager.get_message_by_id_async(message_id=agent_state.message_ids[0], actor=actor)
|
1735
1734
|
|
@@ -27,10 +27,14 @@ class AsyncBaseMCPClient:
|
|
27
27
|
await self.session.initialize()
|
28
28
|
self.initialized = True
|
29
29
|
except ConnectionError as e:
|
30
|
-
|
30
|
+
# MCP connection failures are often due to user misconfiguration, not system errors
|
31
|
+
# Log at debug level to avoid triggering Sentry alerts for expected configuration issues
|
32
|
+
logger.debug(f"MCP connection failed: {str(e)}")
|
31
33
|
raise e
|
32
34
|
except Exception as e:
|
33
|
-
|
35
|
+
# MCP connection failures are often due to user misconfiguration, not system errors
|
36
|
+
# Log at info level to help with debugging without triggering Sentry alerts
|
37
|
+
logger.info(
|
34
38
|
f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}"
|
35
39
|
)
|
36
40
|
if hasattr(self.server_config, "server_url") and self.server_config.server_url:
|
letta/services/mcp_manager.py
CHANGED
@@ -60,7 +60,9 @@ class MCPManager:
|
|
60
60
|
tools = await mcp_client.list_tools()
|
61
61
|
return tools
|
62
62
|
except Exception as e:
|
63
|
-
|
63
|
+
# MCP tool listing errors are often due to connection/configuration issues, not system errors
|
64
|
+
# Log at info level to avoid triggering Sentry alerts for expected failures
|
65
|
+
logger.info(f"Error listing tools for MCP server {mcp_server_name}: {e}")
|
64
66
|
return []
|
65
67
|
finally:
|
66
68
|
await mcp_client.cleanup()
|
@@ -302,7 +304,8 @@ class MCPManager:
|
|
302
304
|
try:
|
303
305
|
mcp_config = json.load(f)
|
304
306
|
except Exception as e:
|
305
|
-
|
307
|
+
# Config parsing errors are user configuration issues, not system errors
|
308
|
+
logger.warning(f"Failed to parse MCP config file ({mcp_config_path}) as json: {e}")
|
306
309
|
return mcp_server_list
|
307
310
|
|
308
311
|
# Proper formatting is "mcpServers" key at the top level,
|
@@ -313,7 +316,8 @@ class MCPManager:
|
|
313
316
|
|
314
317
|
# No support for duplicate server names
|
315
318
|
if server_name in mcp_server_list:
|
316
|
-
|
319
|
+
# Duplicate server names are configuration issues, not system errors
|
320
|
+
logger.warning(f"Duplicate MCP server name found (skipping): {server_name}")
|
317
321
|
continue
|
318
322
|
|
319
323
|
if "url" in server_params_raw:
|
@@ -328,7 +332,8 @@ class MCPManager:
|
|
328
332
|
)
|
329
333
|
mcp_server_list[server_name] = server_params
|
330
334
|
except Exception as e:
|
331
|
-
|
335
|
+
# Config parsing errors are user configuration issues, not system errors
|
336
|
+
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
|
332
337
|
continue
|
333
338
|
else:
|
334
339
|
# Attempt to parse the server params as a StdioServerParameters
|
@@ -341,7 +346,8 @@ class MCPManager:
|
|
341
346
|
)
|
342
347
|
mcp_server_list[server_name] = server_params
|
343
348
|
except Exception as e:
|
344
|
-
|
349
|
+
# Config parsing errors are user configuration issues, not system errors
|
350
|
+
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
|
345
351
|
continue
|
346
352
|
return mcp_server_list
|
347
353
|
|
@@ -224,8 +224,10 @@ class ToolExecutionSandbox:
|
|
224
224
|
with open(temp_file_path, "r") as f:
|
225
225
|
code = f.read()
|
226
226
|
|
227
|
-
|
228
|
-
|
227
|
+
# Tool errors are expected behavior - tools can raise exceptions as part of their normal operation
|
228
|
+
# Only log at debug level to avoid triggering Sentry alerts for expected errors
|
229
|
+
logger.debug(f"Tool {self.tool_name} process error: {e}")
|
230
|
+
logger.debug(f"Tool {self.tool_name} auto-generated code for debugging: \n\n{code}")
|
229
231
|
func_return = get_friendly_error_msg(
|
230
232
|
function_name=self.tool_name,
|
231
233
|
exception_name=type(e).__name__,
|
@@ -371,8 +373,10 @@ class ToolExecutionSandbox:
|
|
371
373
|
},
|
372
374
|
)
|
373
375
|
elif execution.error:
|
374
|
-
|
375
|
-
|
376
|
+
# Tool errors are expected behavior - tools can raise exceptions as part of their normal operation
|
377
|
+
# Only log at debug level to avoid triggering Sentry alerts for expected errors
|
378
|
+
logger.debug(f"Tool {self.tool_name} raised a {execution.error.name}: {execution.error.value}")
|
379
|
+
logger.debug(f"Traceback from e2b sandbox: \n{execution.error.traceback}")
|
376
380
|
func_return = get_friendly_error_msg(
|
377
381
|
function_name=self.tool_name, exception_name=execution.error.name, exception_message=execution.error.value
|
378
382
|
)
|
letta/services/tool_manager.py
CHANGED
@@ -410,32 +410,37 @@ class ToolManager:
|
|
410
410
|
new_name = None
|
411
411
|
new_schema = None
|
412
412
|
|
413
|
-
#
|
414
|
-
|
415
|
-
# if source code is provided, always derive the name from it
|
416
|
-
if "source_code" in update_data.keys() and not bypass_name_check:
|
417
|
-
# derive the schema from source code to get the function name
|
418
|
-
derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
|
419
|
-
new_name = derived_schema["name"]
|
420
|
-
|
421
|
-
# if json_schema wasn't provided, use the derived schema
|
422
|
-
if "json_schema" not in update_data.keys():
|
423
|
-
new_schema = derived_schema
|
424
|
-
else:
|
425
|
-
# if json_schema was provided, update only its name to match the source code
|
426
|
-
new_schema = update_data["json_schema"].copy()
|
427
|
-
new_schema["name"] = new_name
|
428
|
-
# update the json_schema in update_data so it gets applied in the loop
|
429
|
-
update_data["json_schema"] = new_schema
|
413
|
+
# Fetch current tool to allow conditional logic based on tool type
|
414
|
+
current_tool = self.get_tool_by_id(tool_id=tool_id, actor=actor)
|
430
415
|
|
431
|
-
|
432
|
-
|
433
|
-
|
416
|
+
# For MCP tools, do NOT derive schema from Python source. Trust provided JSON schema.
|
417
|
+
if current_tool.tool_type == ToolType.EXTERNAL_MCP:
|
418
|
+
if "json_schema" in update_data:
|
419
|
+
new_schema = update_data["json_schema"].copy()
|
420
|
+
new_name = new_schema.get("name", current_tool.name)
|
421
|
+
else:
|
422
|
+
new_schema = current_tool.json_schema
|
423
|
+
new_name = current_tool.name
|
424
|
+
update_data.pop("source_code", None)
|
434
425
|
if new_name != current_tool.name:
|
435
|
-
# check if a tool with the new name already exists
|
436
426
|
existing_tool = self.get_tool_by_name(tool_name=new_name, actor=actor)
|
437
427
|
if existing_tool:
|
438
428
|
raise LettaToolNameConflictError(tool_name=new_name)
|
429
|
+
else:
|
430
|
+
# For non-MCP tools, preserve existing behavior
|
431
|
+
if "source_code" in update_data.keys() and not bypass_name_check:
|
432
|
+
derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
|
433
|
+
new_name = derived_schema["name"]
|
434
|
+
if "json_schema" not in update_data.keys():
|
435
|
+
new_schema = derived_schema
|
436
|
+
else:
|
437
|
+
new_schema = update_data["json_schema"].copy()
|
438
|
+
new_schema["name"] = new_name
|
439
|
+
update_data["json_schema"] = new_schema
|
440
|
+
if new_name != current_tool.name:
|
441
|
+
existing_tool = self.get_tool_by_name(tool_name=new_name, actor=actor)
|
442
|
+
if existing_tool:
|
443
|
+
raise LettaToolNameConflictError(tool_name=new_name)
|
439
444
|
|
440
445
|
# Now perform the update within the session
|
441
446
|
with db_registry.session() as session:
|
@@ -473,32 +478,51 @@ class ToolManager:
|
|
473
478
|
new_name = None
|
474
479
|
new_schema = None
|
475
480
|
|
476
|
-
#
|
477
|
-
|
478
|
-
# if source code is provided, always derive the name from it
|
479
|
-
if "source_code" in update_data.keys() and not bypass_name_check:
|
480
|
-
# derive the schema from source code to get the function name
|
481
|
-
derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
|
482
|
-
new_name = derived_schema["name"]
|
483
|
-
|
484
|
-
# if json_schema wasn't provided, use the derived schema
|
485
|
-
if "json_schema" not in update_data.keys():
|
486
|
-
new_schema = derived_schema
|
487
|
-
else:
|
488
|
-
# if json_schema was provided, update only its name to match the source code
|
489
|
-
new_schema = update_data["json_schema"].copy()
|
490
|
-
new_schema["name"] = new_name
|
491
|
-
# update the json_schema in update_data so it gets applied in the loop
|
492
|
-
update_data["json_schema"] = new_schema
|
481
|
+
# Fetch current tool early to allow conditional logic based on tool type
|
482
|
+
current_tool = await self.get_tool_by_id_async(tool_id=tool_id, actor=actor)
|
493
483
|
|
494
|
-
|
495
|
-
|
496
|
-
#
|
484
|
+
# For MCP tools, do NOT derive schema from Python source. Trust provided JSON schema.
|
485
|
+
if current_tool.tool_type == ToolType.EXTERNAL_MCP:
|
486
|
+
# Prefer provided json_schema; fall back to current
|
487
|
+
if "json_schema" in update_data:
|
488
|
+
new_schema = update_data["json_schema"].copy()
|
489
|
+
new_name = new_schema.get("name", current_tool.name)
|
490
|
+
else:
|
491
|
+
new_schema = current_tool.json_schema
|
492
|
+
new_name = current_tool.name
|
493
|
+
# Ensure we don't trigger derive
|
494
|
+
update_data.pop("source_code", None)
|
495
|
+
# If name changes, enforce uniqueness
|
497
496
|
if new_name != current_tool.name:
|
498
|
-
# check if a tool with the new name already exists
|
499
497
|
name_exists = await self.tool_name_exists_async(tool_name=new_name, actor=actor)
|
500
498
|
if name_exists:
|
501
499
|
raise LettaToolNameConflictError(tool_name=new_name)
|
500
|
+
else:
|
501
|
+
# For non-MCP tools, preserve existing behavior
|
502
|
+
# TODO: Consider this behavior...is this what we want?
|
503
|
+
# TODO: I feel like it's bad if json_schema strays from source code so
|
504
|
+
# if source code is provided, always derive the name from it
|
505
|
+
if "source_code" in update_data.keys() and not bypass_name_check:
|
506
|
+
# derive the schema from source code to get the function name
|
507
|
+
derived_schema = derive_openai_json_schema(source_code=update_data["source_code"])
|
508
|
+
new_name = derived_schema["name"]
|
509
|
+
|
510
|
+
# if json_schema wasn't provided, use the derived schema
|
511
|
+
if "json_schema" not in update_data.keys():
|
512
|
+
new_schema = derived_schema
|
513
|
+
else:
|
514
|
+
# if json_schema was provided, update only its name to match the source code
|
515
|
+
new_schema = update_data["json_schema"].copy()
|
516
|
+
new_schema["name"] = new_name
|
517
|
+
# update the json_schema in update_data so it gets applied in the loop
|
518
|
+
update_data["json_schema"] = new_schema
|
519
|
+
|
520
|
+
# check if the name is changing and if so, verify it doesn't conflict
|
521
|
+
if new_name != current_tool.name:
|
522
|
+
# check if a tool with the new name already exists
|
523
|
+
name_exists = await self.tool_name_exists_async(tool_name=new_name, actor=actor)
|
524
|
+
if name_exists:
|
525
|
+
raise LettaToolNameConflictError(tool_name=new_name)
|
502
526
|
|
503
527
|
# Now perform the update within the session
|
504
528
|
async with db_registry.async_session() as session:
|
@@ -82,8 +82,10 @@ class AsyncToolSandboxE2B(AsyncToolSandboxBase):
|
|
82
82
|
},
|
83
83
|
)
|
84
84
|
elif execution.error:
|
85
|
-
|
86
|
-
|
85
|
+
# Tool errors are expected behavior - tools can raise exceptions as part of their normal operation
|
86
|
+
# Only log at debug level to avoid triggering Sentry alerts for expected errors
|
87
|
+
logger.debug(f"Tool {self.tool_name} raised a {execution.error.name}: {execution.error.value}")
|
88
|
+
logger.debug(f"Traceback from e2b sandbox: \n{execution.error.traceback}")
|
87
89
|
func_return = get_friendly_error_msg(
|
88
90
|
function_name=self.tool_name, exception_name=execution.error.name, exception_message=execution.error.value
|
89
91
|
)
|
@@ -100,10 +100,10 @@ class AsyncToolSandboxModal(AsyncToolSandboxBase):
|
|
100
100
|
|
101
101
|
# Process the result
|
102
102
|
if result["error"]:
|
103
|
-
|
104
|
-
|
105
|
-
)
|
106
|
-
logger.
|
103
|
+
# Tool errors are expected behavior - tools can raise exceptions as part of their normal operation
|
104
|
+
# Only log at debug level to avoid triggering Sentry alerts for expected errors
|
105
|
+
logger.debug(f"Tool {self.tool_name} raised a {result['error']['name']}: {result['error']['value']}")
|
106
|
+
logger.debug(f"Traceback from Modal sandbox: \n{result['error']['traceback']}")
|
107
107
|
func_return = get_friendly_error_msg(
|
108
108
|
function_name=self.tool_name, exception_name=result["error"]["name"], exception_message=result["error"]["value"]
|
109
109
|
)
|
letta/settings.py
CHANGED
@@ -144,6 +144,7 @@ class ModelSettings(BaseSettings):
|
|
144
144
|
# google ai
|
145
145
|
gemini_api_key: Optional[str] = None
|
146
146
|
gemini_base_url: str = "https://generativelanguage.googleapis.com/"
|
147
|
+
gemini_force_minimum_thinking_budget: bool = False
|
147
148
|
|
148
149
|
# google vertex
|
149
150
|
google_cloud_project: Optional[str] = None
|
@@ -200,7 +201,7 @@ class DatabaseChoice(str, Enum):
|
|
200
201
|
class Settings(BaseSettings):
|
201
202
|
model_config = SettingsConfigDict(env_prefix="letta_", extra="ignore")
|
202
203
|
|
203
|
-
letta_dir: Optional[Path] = Field(Path.home() / ".letta",
|
204
|
+
letta_dir: Optional[Path] = Field(Path.home() / ".letta", alias="LETTA_DIR")
|
204
205
|
debug: Optional[bool] = False
|
205
206
|
cors_origins: Optional[list] = cors_origins
|
206
207
|
|