letta-nightly 0.6.41.dev20250317104157__py3-none-any.whl → 0.6.43.dev20250318012126__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (31) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +11 -3
  3. letta/embeddings.py +18 -8
  4. letta/functions/mcp_client/base_client.py +37 -11
  5. letta/functions/mcp_client/sse_client.py +21 -9
  6. letta/functions/mcp_client/stdio_client.py +8 -3
  7. letta/helpers/converters.py +17 -5
  8. letta/helpers/tool_rule_solver.py +52 -72
  9. letta/llm_api/anthropic.py +1 -1
  10. letta/memory.py +6 -1
  11. letta/orm/sqlalchemy_base.py +7 -4
  12. letta/orm/tool.py +1 -0
  13. letta/schemas/enums.py +1 -5
  14. letta/schemas/message.py +6 -0
  15. letta/schemas/source.py +6 -1
  16. letta/schemas/tool.py +1 -4
  17. letta/schemas/tool_rule.py +73 -2
  18. letta/serialize_schemas/pydantic_agent_schema.py +3 -2
  19. letta/server/rest_api/app.py +6 -6
  20. letta/server/rest_api/routers/v1/sources.py +15 -2
  21. letta/server/rest_api/routers/v1/tools.py +1 -1
  22. letta/server/server.py +33 -11
  23. letta/services/tool_manager.py +7 -3
  24. letta/settings.py +7 -0
  25. letta/supervisor_multi_agent.py +11 -2
  26. letta/tracing.py +12 -2
  27. {letta_nightly-0.6.41.dev20250317104157.dist-info → letta_nightly-0.6.43.dev20250318012126.dist-info}/METADATA +2 -2
  28. {letta_nightly-0.6.41.dev20250317104157.dist-info → letta_nightly-0.6.43.dev20250318012126.dist-info}/RECORD +31 -31
  29. {letta_nightly-0.6.41.dev20250317104157.dist-info → letta_nightly-0.6.43.dev20250318012126.dist-info}/LICENSE +0 -0
  30. {letta_nightly-0.6.41.dev20250317104157.dist-info → letta_nightly-0.6.43.dev20250318012126.dist-info}/WHEEL +0 -0
  31. {letta_nightly-0.6.41.dev20250317104157.dist-info → letta_nightly-0.6.43.dev20250318012126.dist-info}/entry_points.txt +0 -0
letta/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.6.41"
1
+ __version__ = "0.6.43"
2
2
 
3
3
  # import clients
4
4
  from letta.client.client import LocalClient, RESTClient, create_client
letta/agent.py CHANGED
@@ -367,7 +367,10 @@ class Agent(BaseAgent):
367
367
  ) -> ChatCompletionResponse:
368
368
  """Get response from LLM API with robust retry mechanism."""
369
369
  log_telemetry(self.logger, "_get_ai_reply start")
370
- allowed_tool_names = self.tool_rules_solver.get_allowed_tool_names(last_function_response=self.last_function_response)
370
+ available_tools = set([t.name for t in self.agent_state.tools])
371
+ allowed_tool_names = self.tool_rules_solver.get_allowed_tool_names(
372
+ available_tools=available_tools, last_function_response=self.last_function_response
373
+ )
371
374
  agent_state_tool_jsons = [t.json_schema for t in self.agent_state.tools]
372
375
 
373
376
  allowed_functions = (
@@ -377,8 +380,8 @@ class Agent(BaseAgent):
377
380
  )
378
381
 
379
382
  # Don't allow a tool to be called if it failed last time
380
- if last_function_failed and self.tool_rules_solver.last_tool_name:
381
- allowed_functions = [f for f in allowed_functions if f["name"] != self.tool_rules_solver.last_tool_name]
383
+ if last_function_failed and self.tool_rules_solver.tool_call_history:
384
+ allowed_functions = [f for f in allowed_functions if f["name"] != self.tool_rules_solver.tool_call_history[-1]]
382
385
  if not allowed_functions:
383
386
  return None
384
387
 
@@ -773,6 +776,11 @@ class Agent(BaseAgent):
773
776
  **kwargs,
774
777
  ) -> LettaUsageStatistics:
775
778
  """Run Agent.step in a loop, handling chaining via heartbeat requests and function failures"""
779
+ # Defensively clear the tool rules solver history
780
+ # Usually this would be extraneous as Agent loop is re-loaded on every message send
781
+ # But just to be safe
782
+ self.tool_rules_solver.clear_tool_history()
783
+
776
784
  next_input_message = messages if isinstance(messages, list) else [messages]
777
785
  counter = 0
778
786
  total_usage = UsageStatistics()
letta/embeddings.py CHANGED
@@ -3,6 +3,7 @@ from typing import Any, List, Optional
3
3
 
4
4
  import numpy as np
5
5
  import tiktoken
6
+ from openai import OpenAI
6
7
 
7
8
  from letta.constants import EMBEDDING_TO_TOKENIZER_DEFAULT, EMBEDDING_TO_TOKENIZER_MAP, MAX_EMBEDDING_DIM
8
9
  from letta.schemas.embedding_config import EmbeddingConfig
@@ -201,6 +202,21 @@ class GoogleVertexEmbeddings:
201
202
  return response.embeddings[0].embedding
202
203
 
203
204
 
205
+ class OpenAIEmbeddings:
206
+
207
+ def __init__(self, api_key: str, model: str, base_url: str):
208
+ if base_url:
209
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
210
+ else:
211
+ self.client = OpenAI(api_key=api_key)
212
+ self.model = model
213
+
214
+ def get_text_embedding(self, text: str):
215
+ response = self.client.embeddings.create(input=text, model=self.model)
216
+
217
+ return response.data[0].embedding
218
+
219
+
204
220
  def query_embedding(embedding_model, query_text: str):
205
221
  """Generate padded embedding for querying database"""
206
222
  query_vec = embedding_model.get_text_embedding(query_text)
@@ -218,15 +234,9 @@ def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None
218
234
  from letta.settings import model_settings
219
235
 
220
236
  if endpoint_type == "openai":
221
- from llama_index.embeddings.openai import OpenAIEmbedding
222
-
223
- additional_kwargs = {"user_id": user_id} if user_id else {}
224
- model = OpenAIEmbedding(
225
- api_base=config.embedding_endpoint,
226
- api_key=model_settings.openai_api_key,
227
- additional_kwargs=additional_kwargs,
237
+ return OpenAIEmbeddings(
238
+ api_key=model_settings.openai_api_key, model=config.embedding_model, base_url=model_settings.openai_api_base
228
239
  )
229
- return model
230
240
 
231
241
  elif endpoint_type == "azure":
232
242
  assert all(
@@ -5,12 +5,14 @@ from mcp import ClientSession, Tool
5
5
 
6
6
  from letta.functions.mcp_client.types import BaseServerConfig
7
7
  from letta.log import get_logger
8
+ from letta.settings import tool_settings
8
9
 
9
10
  logger = get_logger(__name__)
10
11
 
11
12
 
12
13
  class BaseMCPClient:
13
- def __init__(self):
14
+ def __init__(self, server_config: BaseServerConfig):
15
+ self.server_config = server_config
14
16
  self.session: Optional[ClientSession] = None
15
17
  self.stdio = None
16
18
  self.write = None
@@ -18,30 +20,54 @@ class BaseMCPClient:
18
20
  self.loop = asyncio.new_event_loop()
19
21
  self.cleanup_funcs = []
20
22
 
21
- def connect_to_server(self, server_config: BaseServerConfig):
23
+ def connect_to_server(self):
22
24
  asyncio.set_event_loop(self.loop)
23
- success = self._initialize_connection(server_config)
25
+ success = self._initialize_connection(self.server_config, timeout=tool_settings.mcp_connect_to_server_timeout)
24
26
 
25
27
  if success:
26
- self.loop.run_until_complete(self.session.initialize())
27
- self.initialized = True
28
+ try:
29
+ self.loop.run_until_complete(
30
+ asyncio.wait_for(self.session.initialize(), timeout=tool_settings.mcp_connect_to_server_timeout)
31
+ )
32
+ self.initialized = True
33
+ except asyncio.TimeoutError:
34
+ raise RuntimeError(
35
+ f"Timed out while initializing session for MCP server {self.server_config.server_name} (timeout={tool_settings.mcp_connect_to_server_timeout}s)."
36
+ )
28
37
  else:
29
38
  raise RuntimeError(
30
- f"Connecting to MCP server failed. Please review your server config: {server_config.model_dump_json(indent=4)}"
39
+ f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}"
31
40
  )
32
41
 
33
- def _initialize_connection(self, server_config: BaseServerConfig) -> bool:
42
+ def _initialize_connection(self, server_config: BaseServerConfig, timeout: float) -> bool:
34
43
  raise NotImplementedError("Subclasses must implement _initialize_connection")
35
44
 
36
45
  def list_tools(self) -> List[Tool]:
37
46
  self._check_initialized()
38
- response = self.loop.run_until_complete(self.session.list_tools())
39
- return response.tools
47
+ try:
48
+ response = self.loop.run_until_complete(
49
+ asyncio.wait_for(self.session.list_tools(), timeout=tool_settings.mcp_list_tools_timeout)
50
+ )
51
+ return response.tools
52
+ except asyncio.TimeoutError:
53
+ # Could log, throw a custom exception, etc.
54
+ logger.error(
55
+ f"Timed out while listing tools for MCP server {self.server_config.server_name} (timeout={tool_settings.mcp_list_tools_timeout}s)."
56
+ )
57
+ return []
40
58
 
41
59
  def execute_tool(self, tool_name: str, tool_args: dict) -> Tuple[str, bool]:
42
60
  self._check_initialized()
43
- result = self.loop.run_until_complete(self.session.call_tool(tool_name, tool_args))
44
- return str(result.content), result.isError
61
+ try:
62
+ result = self.loop.run_until_complete(
63
+ asyncio.wait_for(self.session.call_tool(tool_name, tool_args), timeout=tool_settings.mcp_execute_tool_timeout)
64
+ )
65
+ return str(result.content), result.isError
66
+ except asyncio.TimeoutError:
67
+ logger.error(
68
+ f"Timed out while executing tool '{tool_name}' for MCP server {self.server_config.server_name} (timeout={tool_settings.mcp_execute_tool_timeout}s)."
69
+ )
70
+ return "", True
45
71
 
46
72
  def _check_initialized(self):
47
73
  if not self.initialized:
@@ -1,21 +1,33 @@
1
+ import asyncio
2
+
1
3
  from mcp import ClientSession
2
4
  from mcp.client.sse import sse_client
3
5
 
4
6
  from letta.functions.mcp_client.base_client import BaseMCPClient
5
7
  from letta.functions.mcp_client.types import SSEServerConfig
8
+ from letta.log import get_logger
6
9
 
7
10
  # see: https://modelcontextprotocol.io/quickstart/user
8
11
  MCP_CONFIG_TOPLEVEL_KEY = "mcpServers"
9
12
 
13
+ logger = get_logger(__name__)
14
+
10
15
 
11
16
  class SSEMCPClient(BaseMCPClient):
12
- def _initialize_connection(self, server_config: SSEServerConfig) -> bool:
13
- sse_cm = sse_client(url=server_config.server_url)
14
- sse_transport = self.loop.run_until_complete(sse_cm.__aenter__())
15
- self.stdio, self.write = sse_transport
16
- self.cleanup_funcs.append(lambda: self.loop.run_until_complete(sse_cm.__aexit__(None, None, None)))
17
+ def _initialize_connection(self, server_config: SSEServerConfig, timeout: float) -> bool:
18
+ try:
19
+ sse_cm = sse_client(url=server_config.server_url)
20
+ sse_transport = self.loop.run_until_complete(asyncio.wait_for(sse_cm.__aenter__(), timeout=timeout))
21
+ self.stdio, self.write = sse_transport
22
+ self.cleanup_funcs.append(lambda: self.loop.run_until_complete(sse_cm.__aexit__(None, None, None)))
17
23
 
18
- session_cm = ClientSession(self.stdio, self.write)
19
- self.session = self.loop.run_until_complete(session_cm.__aenter__())
20
- self.cleanup_funcs.append(lambda: self.loop.run_until_complete(session_cm.__aexit__(None, None, None)))
21
- return True
24
+ session_cm = ClientSession(self.stdio, self.write)
25
+ self.session = self.loop.run_until_complete(asyncio.wait_for(session_cm.__aenter__(), timeout=timeout))
26
+ self.cleanup_funcs.append(lambda: self.loop.run_until_complete(session_cm.__aexit__(None, None, None)))
27
+ return True
28
+ except asyncio.TimeoutError:
29
+ logger.error(f"Timed out while establishing SSE connection (timeout={timeout}s).")
30
+ return False
31
+ except Exception:
32
+ logger.exception("Exception occurred while initializing SSE client session.")
33
+ return False
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import sys
2
3
  from contextlib import asynccontextmanager
3
4
 
@@ -16,19 +17,23 @@ logger = get_logger(__name__)
16
17
 
17
18
 
18
19
  class StdioMCPClient(BaseMCPClient):
19
- def _initialize_connection(self, server_config: StdioServerConfig) -> bool:
20
+ def _initialize_connection(self, server_config: StdioServerConfig, timeout: float) -> bool:
20
21
  try:
21
22
  server_params = StdioServerParameters(command=server_config.command, args=server_config.args)
22
23
  stdio_cm = forked_stdio_client(server_params)
23
- stdio_transport = self.loop.run_until_complete(stdio_cm.__aenter__())
24
+ stdio_transport = self.loop.run_until_complete(asyncio.wait_for(stdio_cm.__aenter__(), timeout=timeout))
24
25
  self.stdio, self.write = stdio_transport
25
26
  self.cleanup_funcs.append(lambda: self.loop.run_until_complete(stdio_cm.__aexit__(None, None, None)))
26
27
 
27
28
  session_cm = ClientSession(self.stdio, self.write)
28
- self.session = self.loop.run_until_complete(session_cm.__aenter__())
29
+ self.session = self.loop.run_until_complete(asyncio.wait_for(session_cm.__aenter__(), timeout=timeout))
29
30
  self.cleanup_funcs.append(lambda: self.loop.run_until_complete(session_cm.__aexit__(None, None, None)))
30
31
  return True
32
+ except asyncio.TimeoutError:
33
+ logger.error(f"Timed out while establishing stdio connection (timeout={timeout}s).")
34
+ return False
31
35
  except Exception:
36
+ logger.exception("Exception occurred while initializing stdio client session.")
32
37
  return False
33
38
 
34
39
 
@@ -20,7 +20,15 @@ from letta.schemas.letta_message_content import (
20
20
  )
21
21
  from letta.schemas.llm_config import LLMConfig
22
22
  from letta.schemas.message import ToolReturn
23
- from letta.schemas.tool_rule import ChildToolRule, ConditionalToolRule, ContinueToolRule, InitToolRule, TerminalToolRule, ToolRule
23
+ from letta.schemas.tool_rule import (
24
+ ChildToolRule,
25
+ ConditionalToolRule,
26
+ ContinueToolRule,
27
+ InitToolRule,
28
+ MaxCountPerStepToolRule,
29
+ TerminalToolRule,
30
+ ToolRule,
31
+ )
24
32
 
25
33
  # --------------------------
26
34
  # LLMConfig Serialization
@@ -85,23 +93,27 @@ def deserialize_tool_rules(data: Optional[List[Dict]]) -> List[Union[ChildToolRu
85
93
  return [deserialize_tool_rule(rule_data) for rule_data in data]
86
94
 
87
95
 
88
- def deserialize_tool_rule(data: Dict) -> Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule]:
96
+ def deserialize_tool_rule(
97
+ data: Dict,
98
+ ) -> Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule, MaxCountPerStepToolRule]:
89
99
  """Deserialize a dictionary to the appropriate ToolRule subclass based on 'type'."""
90
100
  rule_type = ToolRuleType(data.get("type"))
91
101
 
92
- if rule_type == ToolRuleType.run_first or rule_type == ToolRuleType.InitToolRule:
102
+ if rule_type == ToolRuleType.run_first:
93
103
  data["type"] = ToolRuleType.run_first
94
104
  return InitToolRule(**data)
95
- elif rule_type == ToolRuleType.exit_loop or rule_type == ToolRuleType.TerminalToolRule:
105
+ elif rule_type == ToolRuleType.exit_loop:
96
106
  data["type"] = ToolRuleType.exit_loop
97
107
  return TerminalToolRule(**data)
98
- elif rule_type == ToolRuleType.constrain_child_tools or rule_type == ToolRuleType.ToolRule:
108
+ elif rule_type == ToolRuleType.constrain_child_tools:
99
109
  data["type"] = ToolRuleType.constrain_child_tools
100
110
  return ChildToolRule(**data)
101
111
  elif rule_type == ToolRuleType.conditional:
102
112
  return ConditionalToolRule(**data)
103
113
  elif rule_type == ToolRuleType.continue_loop:
104
114
  return ContinueToolRule(**data)
115
+ elif rule_type == ToolRuleType.max_count_per_step:
116
+ return MaxCountPerStepToolRule(**data)
105
117
  raise ValueError(f"Unknown ToolRule type: {rule_type}")
106
118
 
107
119
 
@@ -1,10 +1,17 @@
1
- import json
2
- from typing import List, Optional, Union
1
+ from typing import List, Optional, Set, Union
3
2
 
4
3
  from pydantic import BaseModel, Field
5
4
 
6
5
  from letta.schemas.enums import ToolRuleType
7
- from letta.schemas.tool_rule import BaseToolRule, ChildToolRule, ConditionalToolRule, ContinueToolRule, InitToolRule, TerminalToolRule
6
+ from letta.schemas.tool_rule import (
7
+ BaseToolRule,
8
+ ChildToolRule,
9
+ ConditionalToolRule,
10
+ ContinueToolRule,
11
+ InitToolRule,
12
+ MaxCountPerStepToolRule,
13
+ TerminalToolRule,
14
+ )
8
15
 
9
16
 
10
17
  class ToolRuleValidationError(Exception):
@@ -21,13 +28,15 @@ class ToolRulesSolver(BaseModel):
21
28
  continue_tool_rules: List[ContinueToolRule] = Field(
22
29
  default_factory=list, description="Continue tool rules to be used to continue tool execution."
23
30
  )
24
- tool_rules: List[Union[ChildToolRule, ConditionalToolRule]] = Field(
31
+ # TODO: This should be renamed?
32
+ # TODO: These are tools that control the set of allowed functions in the next turn
33
+ child_based_tool_rules: List[Union[ChildToolRule, ConditionalToolRule, MaxCountPerStepToolRule]] = Field(
25
34
  default_factory=list, description="Standard tool rules for controlling execution sequence and allowed transitions."
26
35
  )
27
36
  terminal_tool_rules: List[TerminalToolRule] = Field(
28
37
  default_factory=list, description="Terminal tool rules that end the agent loop if called."
29
38
  )
30
- last_tool_name: Optional[str] = Field(None, description="The most recent tool used, updated with each tool call.")
39
+ tool_call_history: List[str] = Field(default_factory=list, description="History of tool calls, updated with each tool call.")
31
40
 
32
41
  def __init__(self, tool_rules: List[BaseToolRule], **kwargs):
33
42
  super().__init__(**kwargs)
@@ -38,45 +47,60 @@ class ToolRulesSolver(BaseModel):
38
47
  self.init_tool_rules.append(rule)
39
48
  elif rule.type == ToolRuleType.constrain_child_tools:
40
49
  assert isinstance(rule, ChildToolRule)
41
- self.tool_rules.append(rule)
50
+ self.child_based_tool_rules.append(rule)
42
51
  elif rule.type == ToolRuleType.conditional:
43
52
  assert isinstance(rule, ConditionalToolRule)
44
53
  self.validate_conditional_tool(rule)
45
- self.tool_rules.append(rule)
54
+ self.child_based_tool_rules.append(rule)
46
55
  elif rule.type == ToolRuleType.exit_loop:
47
56
  assert isinstance(rule, TerminalToolRule)
48
57
  self.terminal_tool_rules.append(rule)
49
58
  elif rule.type == ToolRuleType.continue_loop:
50
59
  assert isinstance(rule, ContinueToolRule)
51
60
  self.continue_tool_rules.append(rule)
61
+ elif rule.type == ToolRuleType.max_count_per_step:
62
+ assert isinstance(rule, MaxCountPerStepToolRule)
63
+ self.child_based_tool_rules.append(rule)
52
64
 
53
65
  def update_tool_usage(self, tool_name: str):
54
- """Update the internal state to track the last tool called."""
55
- self.last_tool_name = tool_name
66
+ """Update the internal state to track tool call history."""
67
+ self.tool_call_history.append(tool_name)
56
68
 
57
- def get_allowed_tool_names(self, error_on_empty: bool = False, last_function_response: Optional[str] = None) -> List[str]:
69
+ def clear_tool_history(self):
70
+ """Clear the history of tool calls."""
71
+ self.tool_call_history.clear()
72
+
73
+ def get_allowed_tool_names(
74
+ self, available_tools: Set[str], error_on_empty: bool = False, last_function_response: Optional[str] = None
75
+ ) -> List[str]:
58
76
  """Get a list of tool names allowed based on the last tool called."""
59
- if self.last_tool_name is None:
60
- # Use initial tool rules if no tool has been called yet
61
- return [rule.tool_name for rule in self.init_tool_rules]
77
+ # TODO: This piece of code here is quite ugly and deserves a refactor
78
+ # TODO: There's some weird logic encoded here:
79
+ # TODO: -> This only takes into consideration Init, and a set of Child/Conditional/MaxSteps tool rules
80
+ # TODO: -> Init tool rules outputs are treated additively, Child/Conditional/MaxSteps are intersection based
81
+ # TODO: -> Tool rules should probably be refactored to take in a set of tool names?
82
+ # If no tool has been called yet, return InitToolRules additively
83
+ if not self.tool_call_history:
84
+ if self.init_tool_rules:
85
+ # If there are init tool rules, only return those defined in the init tool rules
86
+ return [rule.tool_name for rule in self.init_tool_rules]
87
+ else:
88
+ # Otherwise, return all the available tools
89
+ return list(available_tools)
62
90
  else:
63
- # Find a matching ToolRule for the last tool used
64
- current_rule = next((rule for rule in self.tool_rules if rule.tool_name == self.last_tool_name), None)
91
+ # Collect valid tools from all child-based rules
92
+ valid_tool_sets = [
93
+ rule.get_valid_tools(self.tool_call_history, available_tools, last_function_response)
94
+ for rule in self.child_based_tool_rules
95
+ ]
65
96
 
66
- if current_rule is None:
67
- if error_on_empty:
68
- raise ValueError(f"No tool rule found for {self.last_tool_name}")
69
- return []
97
+ # Compute intersection of all valid tool sets
98
+ final_allowed_tools = set.intersection(*valid_tool_sets) if valid_tool_sets else available_tools
70
99
 
71
- # If the current rule is a conditional tool rule, use the LLM response to
72
- # determine which child tool to use
73
- if isinstance(current_rule, ConditionalToolRule):
74
- if not last_function_response:
75
- raise ValueError("Conditional tool rule requires an LLM response to determine which child tool to use")
76
- next_tool = self.evaluate_conditional_tool(current_rule, last_function_response)
77
- return [next_tool] if next_tool else []
100
+ if error_on_empty and not final_allowed_tools:
101
+ raise ValueError("No valid tools found based on tool rules.")
78
102
 
79
- return current_rule.children if current_rule.children else []
103
+ return list(final_allowed_tools)
80
104
 
81
105
  def is_terminal_tool(self, tool_name: str) -> bool:
82
106
  """Check if the tool is defined as a terminal tool in the terminal tool rules."""
@@ -84,7 +108,7 @@ class ToolRulesSolver(BaseModel):
84
108
 
85
109
  def has_children_tools(self, tool_name):
86
110
  """Check if the tool has children tools"""
87
- return any(rule.tool_name == tool_name for rule in self.tool_rules)
111
+ return any(rule.tool_name == tool_name for rule in self.child_based_tool_rules)
88
112
 
89
113
  def is_continue_tool(self, tool_name):
90
114
  """Check if the tool is defined as a continue tool in the tool rules."""
@@ -103,47 +127,3 @@ class ToolRulesSolver(BaseModel):
103
127
  if len(rule.child_output_mapping) == 0:
104
128
  raise ToolRuleValidationError("Conditional tool rule must have at least one child tool.")
105
129
  return True
106
-
107
- def evaluate_conditional_tool(self, tool: ConditionalToolRule, last_function_response: str) -> str:
108
- """
109
- Parse function response to determine which child tool to use based on the mapping
110
-
111
- Args:
112
- tool (ConditionalToolRule): The conditional tool rule
113
- last_function_response (str): The function response in JSON format
114
-
115
- Returns:
116
- str: The name of the child tool to use next
117
- """
118
- json_response = json.loads(last_function_response)
119
- function_output = json_response["message"]
120
-
121
- # Try to match the function output with a mapping key
122
- for key in tool.child_output_mapping:
123
-
124
- # Convert function output to match key type for comparison
125
- if isinstance(key, bool):
126
- typed_output = function_output.lower() == "true"
127
- elif isinstance(key, int):
128
- try:
129
- typed_output = int(function_output)
130
- except (ValueError, TypeError):
131
- continue
132
- elif isinstance(key, float):
133
- try:
134
- typed_output = float(function_output)
135
- except (ValueError, TypeError):
136
- continue
137
- else: # string
138
- if function_output == "True" or function_output == "False":
139
- typed_output = function_output.lower()
140
- elif function_output == "None":
141
- typed_output = None
142
- else:
143
- typed_output = function_output
144
-
145
- if typed_output == key:
146
- return tool.child_output_mapping[key]
147
-
148
- # If no match found, use default
149
- return tool.default_child
@@ -840,7 +840,7 @@ def anthropic_chat_completions_process_stream(
840
840
  # Create a dummy message for ID/datetime if needed
841
841
  dummy_message = _Message(
842
842
  role=_MessageRole.assistant,
843
- text="",
843
+ content=[],
844
844
  agent_id="",
845
845
  model="",
846
846
  name=None,
letta/memory.py CHANGED
@@ -37,7 +37,12 @@ def get_memory_functions(cls: Memory) -> Dict[str, Callable]:
37
37
 
38
38
  def _format_summary_history(message_history: List[Message]):
39
39
  # TODO use existing prompt formatters for this (eg ChatML)
40
- return "\n".join([f"{m.role}: {m.content[0].text}" for m in message_history])
40
+ def get_message_text(content):
41
+ if content and len(content) == 1 and isinstance(content[0], TextContent):
42
+ return content[0].text
43
+ return ""
44
+
45
+ return "\n".join([f"{m.role}: {get_message_text(m.content)}" for m in message_history])
41
46
 
42
47
 
43
48
  def summarize_messages(
@@ -508,10 +508,13 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
508
508
  raise NotImplementedError("Sqlalchemy models must declare a __pydantic_model__ property to be convertable.")
509
509
 
510
510
  def to_pydantic(self) -> "BaseModel":
511
- """converts to the basic pydantic model counterpart"""
512
- model = self.__pydantic_model__.model_validate(self)
513
- if hasattr(self, "metadata_"):
514
- model.metadata = self.metadata_
511
+ """Converts the SQLAlchemy model to its corresponding Pydantic model."""
512
+ model = self.__pydantic_model__.model_validate(self, from_attributes=True)
513
+
514
+ # Explicitly map metadata_ to metadata in Pydantic model
515
+ if hasattr(self, "metadata_") and hasattr(model, "metadata_"):
516
+ setattr(model, "metadata_", self.metadata_) # Ensures correct assignment
517
+
515
518
  return model
516
519
 
517
520
  def pretty_print_columns(self) -> str:
letta/orm/tool.py CHANGED
@@ -44,5 +44,6 @@ class Tool(SqlalchemyBase, OrganizationMixin):
44
44
  source_code: Mapped[Optional[str]] = mapped_column(String, doc="The source code of the function.")
45
45
  json_schema: Mapped[Optional[dict]] = mapped_column(JSON, default=lambda: {}, doc="The OAI compatable JSON schema of the function.")
46
46
  args_json_schema: Mapped[Optional[dict]] = mapped_column(JSON, default=lambda: {}, doc="The JSON schema of the function arguments.")
47
+ metadata_: Mapped[Optional[dict]] = mapped_column(JSON, default=lambda: {}, doc="A dictionary of additional metadata for the tool.")
47
48
  # relationships
48
49
  organization: Mapped["Organization"] = relationship("Organization", back_populates="tools", lazy="selectin")
letta/schemas/enums.py CHANGED
@@ -47,8 +47,4 @@ class ToolRuleType(str, Enum):
47
47
  continue_loop = "continue_loop"
48
48
  conditional = "conditional"
49
49
  constrain_child_tools = "constrain_child_tools"
50
- require_parent_tools = "require_parent_tools"
51
- # Deprecated
52
- InitToolRule = "InitToolRule"
53
- TerminalToolRule = "TerminalToolRule"
54
- ToolRule = "ToolRule"
50
+ max_count_per_step = "max_count_per_step"
letta/schemas/message.py CHANGED
@@ -748,6 +748,12 @@ class Message(BaseMessage):
748
748
  else:
749
749
  raise ValueError(self.role)
750
750
 
751
+ # Validate that parts is never empty before returning
752
+ if "parts" not in google_ai_message or not google_ai_message["parts"]:
753
+ # If parts is empty, add a default text part
754
+ google_ai_message["parts"] = [{"text": "empty message"}]
755
+ warnings.warn(f"Empty 'parts' detected in message with role '{self.role}'. Added default empty text part.")
756
+
751
757
  return google_ai_message
752
758
 
753
759
  def to_cohere_dict(
letta/schemas/source.py CHANGED
@@ -50,7 +50,12 @@ class SourceCreate(BaseSource):
50
50
  # required
51
51
  name: str = Field(..., description="The name of the source.")
52
52
  # TODO: @matt, make this required after shub makes the FE changes
53
- embedding_config: Optional[EmbeddingConfig] = Field(None, description="The embedding configuration used by the source.")
53
+
54
+ embedding: Optional[str] = Field(None, description="The hande for the embedding config used by the source.")
55
+ embedding_chunk_size: Optional[int] = Field(None, description="The chunk size of the embedding.")
56
+
57
+ # TODO: remove (legacy config)
58
+ embedding_config: Optional[EmbeddingConfig] = Field(None, description="(Legacy) The embedding configuration used by the source.")
54
59
 
55
60
  # optional
56
61
  description: Optional[str] = Field(None, description="The description of the source.")
letta/schemas/tool.py CHANGED
@@ -66,6 +66,7 @@ class Tool(BaseTool):
66
66
  # metadata fields
67
67
  created_by_id: Optional[str] = Field(None, description="The id of the user that made this Tool.")
68
68
  last_updated_by_id: Optional[str] = Field(None, description="The id of the user that made this Tool.")
69
+ metadata_: Optional[Dict[str, Any]] = Field(default_factory=dict, description="A dictionary of additional metadata for the tool.")
69
70
 
70
71
  @model_validator(mode="after")
71
72
  def refresh_source_code_and_json_schema(self):
@@ -137,10 +138,6 @@ class ToolCreate(LettaBase):
137
138
 
138
139
  @classmethod
139
140
  def from_mcp(cls, mcp_server_name: str, mcp_tool: MCPTool) -> "ToolCreate":
140
-
141
- # Get the MCP tool from the MCP server
142
- # NVM
143
-
144
141
  # Pass the MCP tool to the schema generator
145
142
  json_schema = generate_tool_schema_for_mcp(mcp_tool=mcp_tool)
146
143
 
@@ -1,4 +1,5 @@
1
- from typing import Annotated, Any, Dict, List, Literal, Optional, Union
1
+ import json
2
+ from typing import Annotated, Any, Dict, List, Literal, Optional, Set, Union
2
3
 
3
4
  from pydantic import Field
4
5
 
@@ -11,6 +12,9 @@ class BaseToolRule(LettaBase):
11
12
  tool_name: str = Field(..., description="The name of the tool. Must exist in the database for the user's organization.")
12
13
  type: ToolRuleType = Field(..., description="The type of the message.")
13
14
 
15
+ def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> set[str]:
16
+ raise NotImplementedError
17
+
14
18
 
15
19
  class ChildToolRule(BaseToolRule):
16
20
  """
@@ -20,6 +24,10 @@ class ChildToolRule(BaseToolRule):
20
24
  type: Literal[ToolRuleType.constrain_child_tools] = ToolRuleType.constrain_child_tools
21
25
  children: List[str] = Field(..., description="The children tools that can be invoked.")
22
26
 
27
+ def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> Set[str]:
28
+ last_tool = tool_call_history[-1] if tool_call_history else None
29
+ return set(self.children) if last_tool == self.tool_name else available_tools
30
+
23
31
 
24
32
  class ConditionalToolRule(BaseToolRule):
25
33
  """
@@ -31,6 +39,50 @@ class ConditionalToolRule(BaseToolRule):
31
39
  child_output_mapping: Dict[Any, str] = Field(..., description="The output case to check for mapping")
32
40
  require_output_mapping: bool = Field(default=False, description="Whether to throw an error when output doesn't match any case")
33
41
 
42
+ def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> Set[str]:
43
+ """Determine valid tools based on function output mapping."""
44
+ if not tool_call_history or tool_call_history[-1] != self.tool_name:
45
+ return available_tools # No constraints if this rule doesn't apply
46
+
47
+ if not last_function_response:
48
+ raise ValueError("Conditional tool rule requires an LLM response to determine which child tool to use")
49
+
50
+ try:
51
+ json_response = json.loads(last_function_response)
52
+ function_output = json_response.get("message", "")
53
+ except json.JSONDecodeError:
54
+ if self.require_output_mapping:
55
+ return set() # Strict mode: Invalid response means no allowed tools
56
+ return {self.default_child} if self.default_child else available_tools
57
+
58
+ # Match function output to a mapped child tool
59
+ for key, tool in self.child_output_mapping.items():
60
+ if self._matches_key(function_output, key):
61
+ return {tool}
62
+
63
+ # If no match found, use default or allow all tools if no default is set
64
+ if self.require_output_mapping:
65
+ return set() # Strict mode: No match means no valid tools
66
+
67
+ return {self.default_child} if self.default_child else available_tools
68
+
69
+ def _matches_key(self, function_output: str, key: Any) -> bool:
70
+ """Helper function to determine if function output matches a mapping key."""
71
+ if isinstance(key, bool):
72
+ return function_output.lower() == "true" if key else function_output.lower() == "false"
73
+ elif isinstance(key, int):
74
+ try:
75
+ return int(function_output) == key
76
+ except ValueError:
77
+ return False
78
+ elif isinstance(key, float):
79
+ try:
80
+ return float(function_output) == key
81
+ except ValueError:
82
+ return False
83
+ else: # Assume string
84
+ return str(function_output) == str(key)
85
+
34
86
 
35
87
  class InitToolRule(BaseToolRule):
36
88
  """
@@ -56,7 +108,26 @@ class ContinueToolRule(BaseToolRule):
56
108
  type: Literal[ToolRuleType.continue_loop] = ToolRuleType.continue_loop
57
109
 
58
110
 
111
+ class MaxCountPerStepToolRule(BaseToolRule):
112
+ """
113
+ Represents a tool rule configuration which constrains the total number of times this tool can be invoked in a single step.
114
+ """
115
+
116
+ type: Literal[ToolRuleType.max_count_per_step] = ToolRuleType.max_count_per_step
117
+ max_count_limit: int = Field(..., description="The max limit for the total number of times this tool can be invoked in a single step.")
118
+
119
+ def get_valid_tools(self, tool_call_history: List[str], available_tools: Set[str], last_function_response: Optional[str]) -> Set[str]:
120
+ """Restricts the tool if it has been called max_count_limit times in the current step."""
121
+ count = tool_call_history.count(self.tool_name)
122
+
123
+ # If the tool has been used max_count_limit times, it is no longer allowed
124
+ if count >= self.max_count_limit:
125
+ return available_tools - {self.tool_name}
126
+
127
+ return available_tools
128
+
129
+
59
130
  ToolRule = Annotated[
60
- Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule],
131
+ Union[ChildToolRule, InitToolRule, TerminalToolRule, ConditionalToolRule, ContinueToolRule, MaxCountPerStepToolRule],
61
132
  Field(discriminator="type"),
62
133
  ]
@@ -15,7 +15,7 @@ class CoreMemoryBlockSchema(BaseModel):
15
15
  is_template: bool
16
16
  label: str
17
17
  limit: int
18
- metadata_: Dict[str, Any] = Field(default_factory=dict)
18
+ metadata_: Optional[Dict] = None
19
19
  template_name: Optional[str]
20
20
  updated_at: str
21
21
  value: str
@@ -85,6 +85,7 @@ class ToolSchema(BaseModel):
85
85
  tags: List[str]
86
86
  tool_type: str
87
87
  updated_at: str
88
+ metadata_: Optional[Dict] = None
88
89
 
89
90
 
90
91
  class AgentSchema(BaseModel):
@@ -99,7 +100,7 @@ class AgentSchema(BaseModel):
99
100
  llm_config: LLMConfig
100
101
  message_buffer_autoclear: bool
101
102
  messages: List[MessageSchema]
102
- metadata_: Dict
103
+ metadata_: Optional[Dict] = None
103
104
  multi_agent_group: Optional[Any]
104
105
  name: str
105
106
  system: str
@@ -256,15 +256,15 @@ def create_application() -> "FastAPI":
256
256
  )
257
257
 
258
258
  # Set up OpenTelemetry tracing
259
- endpoint = os.getenv("OTEL_EXPORTER_OTLP_ENDPOINT")
260
- if endpoint:
261
- print(f"▶ Using OTLP tracing with endpoint: {endpoint}")
259
+ otlp_endpoint = settings.otel_exporter_otlp_endpoint
260
+ if otlp_endpoint and not settings.disable_tracing:
261
+ print(f"▶ Using OTLP tracing with endpoint: {otlp_endpoint}")
262
262
  env_name_suffix = os.getenv("ENV_NAME")
263
263
  service_name = f"letta-server-{env_name_suffix.lower()}" if env_name_suffix else "letta-server"
264
264
  from letta.tracing import setup_tracing
265
265
 
266
266
  setup_tracing(
267
- endpoint=endpoint,
267
+ endpoint=otlp_endpoint,
268
268
  app=app,
269
269
  service_name=service_name,
270
270
  )
@@ -326,7 +326,7 @@ def start_server(
326
326
  print(f"▶ Server running at: https://{host or 'localhost'}:{port or REST_DEFAULT_PORT}")
327
327
  print(f"▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n")
328
328
  uvicorn.run(
329
- app,
329
+ "letta.server.rest_api.app:app",
330
330
  host=host or "localhost",
331
331
  port=port or REST_DEFAULT_PORT,
332
332
  workers=settings.uvicorn_workers,
@@ -345,7 +345,7 @@ def start_server(
345
345
  print(f"▶ View using ADE at: https://app.letta.com/development-servers/local/dashboard\n")
346
346
 
347
347
  uvicorn.run(
348
- app,
348
+ "letta.server.rest_api.app:app",
349
349
  host=host or "localhost",
350
350
  port=port or REST_DEFAULT_PORT,
351
351
  workers=settings.uvicorn_workers,
@@ -76,8 +76,20 @@ def create_source(
76
76
  Create a new data source.
77
77
  """
78
78
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
79
- source = Source(**source_create.model_dump())
80
-
79
+ if not source_create.embedding_config:
80
+ if not source_create.embedding:
81
+ # TODO: modify error type
82
+ raise ValueError("Must specify either embedding or embedding_config in request")
83
+ source_create.embedding_config = server.get_embedding_config_from_handle(
84
+ handle=source_create.embedding,
85
+ embedding_chunk_size=source_create.embedding_chunk_size or constants.DEFAULT_EMBEDDING_CHUNK_SIZE,
86
+ )
87
+ source = Source(
88
+ name=source_create.name,
89
+ embedding_config=source_create.embedding_config,
90
+ description=source_create.description,
91
+ metadata=source_create.metadata,
92
+ )
81
93
  return server.source_manager.create_source(source=source, actor=actor)
82
94
 
83
95
 
@@ -91,6 +103,7 @@ def modify_source(
91
103
  """
92
104
  Update the name or documentation of an existing data source.
93
105
  """
106
+ # TODO: allow updating the handle/embedding config
94
107
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
95
108
  if not server.source_manager.get_source_by_id(source_id=source_id, actor=actor):
96
109
  raise HTTPException(status_code=404, detail=f"Source with id={source_id} does not exist.")
@@ -398,7 +398,7 @@ def add_mcp_tool(
398
398
  )
399
399
 
400
400
  tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=mcp_tool)
401
- return server.tool_manager.create_or_update_mcp_tool(tool_create=tool_create, actor=actor)
401
+ return server.tool_manager.create_or_update_mcp_tool(tool_create=tool_create, mcp_server_name=mcp_server_name, actor=actor)
402
402
 
403
403
 
404
404
  @router.put("/mcp/servers", response_model=List[Union[StdioServerConfig, SSEServerConfig]], operation_id="add_mcp_server")
letta/server/server.py CHANGED
@@ -332,14 +332,14 @@ class SyncServer(Server):
332
332
 
333
333
  for server_name, server_config in mcp_server_configs.items():
334
334
  if server_config.type == MCPServerType.SSE:
335
- self.mcp_clients[server_name] = SSEMCPClient()
335
+ self.mcp_clients[server_name] = SSEMCPClient(server_config)
336
336
  elif server_config.type == MCPServerType.STDIO:
337
- self.mcp_clients[server_name] = StdioMCPClient()
337
+ self.mcp_clients[server_name] = StdioMCPClient(server_config)
338
338
  else:
339
339
  raise ValueError(f"Invalid MCP server config: {server_config}")
340
340
 
341
341
  try:
342
- self.mcp_clients[server_name].connect_to_server(server_config)
342
+ self.mcp_clients[server_name].connect_to_server()
343
343
  except Exception as e:
344
344
  logger.error(e)
345
345
  self.mcp_clients.pop(server_name)
@@ -723,10 +723,17 @@ class SyncServer(Server):
723
723
  assert isinstance(message, MessageCreate)
724
724
 
725
725
  # If wrapping is enabled, wrap with metadata before placing content inside the Message object
726
+ if isinstance(message.content, str):
727
+ message_content = message.content
728
+ elif message.content and len(message.content) > 0 and isinstance(message.content[0], TextContent):
729
+ message_content = message.content[0].text
730
+ else:
731
+ assert message_content is not None, "Message content is empty"
732
+
726
733
  if message.role == MessageRole.user and wrap_user_message:
727
- message.content = system.package_user_message(user_message=message.content)
734
+ message_content = system.package_user_message(user_message=message_content)
728
735
  elif message.role == MessageRole.system and wrap_system_message:
729
- message.content = system.package_system_message(system_message=message.content)
736
+ message_content = system.package_system_message(system_message=message_content)
730
737
  else:
731
738
  raise ValueError(f"Invalid message role: {message.role}")
732
739
 
@@ -735,7 +742,7 @@ class SyncServer(Server):
735
742
  Message(
736
743
  agent_id=agent_id,
737
744
  role=message.role,
738
- content=[TextContent(text=message.content)] if message.content else [],
745
+ content=[TextContent(text=message_content)] if message_content else [],
739
746
  name=message.name,
740
747
  # assigned later?
741
748
  model=None,
@@ -1266,6 +1273,11 @@ class SyncServer(Server):
1266
1273
  # TODO support both command + SSE servers (via config)
1267
1274
  def get_mcp_servers(self) -> dict[str, Union[SSEServerConfig, StdioServerConfig]]:
1268
1275
  """List the MCP servers in the config (doesn't test that they are actually working)"""
1276
+
1277
+ # TODO implement non-flatfile mechanism
1278
+ if not tool_settings.mcp_read_from_config:
1279
+ raise RuntimeError("MCP config file disabled. Enable it in settings.")
1280
+
1269
1281
  mcp_server_list = {}
1270
1282
 
1271
1283
  # Attempt to read from ~/.letta/mcp_config.json
@@ -1326,13 +1338,18 @@ class SyncServer(Server):
1326
1338
 
1327
1339
  def add_mcp_server_to_config(
1328
1340
  self, server_config: Union[SSEServerConfig, StdioServerConfig], allow_upsert: bool = True
1329
- ) -> dict[str, Union[SSEServerConfig, StdioServerConfig]]:
1341
+ ) -> List[Union[SSEServerConfig, StdioServerConfig]]:
1330
1342
  """Add a new server config to the MCP config file"""
1331
1343
 
1344
+ # TODO implement non-flatfile mechanism
1345
+ if not tool_settings.mcp_read_from_config:
1346
+ raise RuntimeError("MCP config file disabled. Enable it in settings.")
1347
+
1332
1348
  # If the config file doesn't exist, throw an error.
1333
1349
  mcp_config_path = os.path.join(constants.LETTA_DIR, constants.MCP_CONFIG_NAME)
1334
1350
  if not os.path.exists(mcp_config_path):
1335
- raise FileNotFoundError(f"MCP config file not found: {mcp_config_path}")
1351
+ # Create the file if it doesn't exist
1352
+ logger.debug(f"MCP config file not found, creating new file at: {mcp_config_path}")
1336
1353
 
1337
1354
  # If the file does exist, attempt to parse it get calling get_mcp_servers
1338
1355
  try:
@@ -1348,13 +1365,13 @@ class SyncServer(Server):
1348
1365
 
1349
1366
  # Attempt to initialize the connection to the server
1350
1367
  if server_config.type == MCPServerType.SSE:
1351
- new_mcp_client = SSEMCPClient()
1368
+ new_mcp_client = SSEMCPClient(server_config)
1352
1369
  elif server_config.type == MCPServerType.STDIO:
1353
- new_mcp_client = StdioMCPClient()
1370
+ new_mcp_client = StdioMCPClient(server_config)
1354
1371
  else:
1355
1372
  raise ValueError(f"Invalid MCP server config: {server_config}")
1356
1373
  try:
1357
- new_mcp_client.connect_to_server(server_config)
1374
+ new_mcp_client.connect_to_server()
1358
1375
  except:
1359
1376
  logger.exception(f"Failed to connect to MCP server: {server_config.server_name}")
1360
1377
  raise RuntimeError(f"Failed to connect to MCP server: {server_config.server_name}")
@@ -1384,9 +1401,14 @@ class SyncServer(Server):
1384
1401
  def delete_mcp_server_from_config(self, server_name: str) -> dict[str, Union[SSEServerConfig, StdioServerConfig]]:
1385
1402
  """Delete a server config from the MCP config file"""
1386
1403
 
1404
+ # TODO implement non-flatfile mechanism
1405
+ if not tool_settings.mcp_read_from_config:
1406
+ raise RuntimeError("MCP config file disabled. Enable it in settings.")
1407
+
1387
1408
  # If the config file doesn't exist, throw an error.
1388
1409
  mcp_config_path = os.path.join(constants.LETTA_DIR, constants.MCP_CONFIG_NAME)
1389
1410
  if not os.path.exists(mcp_config_path):
1411
+ # If the file doesn't exist, raise an error
1390
1412
  raise FileNotFoundError(f"MCP config file not found: {mcp_config_path}")
1391
1413
 
1392
1414
  # If the file does exist, attempt to parse it get calling get_mcp_servers
@@ -2,7 +2,7 @@ import importlib
2
2
  import warnings
3
3
  from typing import List, Optional
4
4
 
5
- from letta.constants import BASE_FUNCTION_RETURN_CHAR_LIMIT, BASE_MEMORY_TOOLS, BASE_TOOLS, MULTI_AGENT_TOOLS
5
+ from letta.constants import BASE_FUNCTION_RETURN_CHAR_LIMIT, BASE_MEMORY_TOOLS, BASE_TOOLS, MCP_TOOL_TAG_NAME_PREFIX, MULTI_AGENT_TOOLS
6
6
  from letta.functions.functions import derive_openai_json_schema, load_function_set
7
7
  from letta.log import get_logger
8
8
  from letta.orm.enums import ToolType
@@ -57,9 +57,13 @@ class ToolManager:
57
57
  return tool
58
58
 
59
59
  @enforce_types
60
- def create_or_update_mcp_tool(self, tool_create: ToolCreate, actor: PydanticUser) -> PydanticTool:
60
+ def create_or_update_mcp_tool(self, tool_create: ToolCreate, mcp_server_name: str, actor: PydanticUser) -> PydanticTool:
61
+ metadata = {MCP_TOOL_TAG_NAME_PREFIX: {"server_name": mcp_server_name}}
61
62
  return self.create_or_update_tool(
62
- PydanticTool(tool_type=ToolType.EXTERNAL_MCP, name=tool_create.json_schema["name"], **tool_create.model_dump()), actor
63
+ PydanticTool(
64
+ tool_type=ToolType.EXTERNAL_MCP, name=tool_create.json_schema["name"], metadata_=metadata, **tool_create.model_dump()
65
+ ),
66
+ actor,
63
67
  )
64
68
 
65
69
  @enforce_types
letta/settings.py CHANGED
@@ -18,6 +18,12 @@ class ToolSettings(BaseSettings):
18
18
  # Local Sandbox configurations
19
19
  local_sandbox_dir: Optional[str] = None
20
20
 
21
+ # MCP settings
22
+ mcp_connect_to_server_timeout: float = 15.0
23
+ mcp_list_tools_timeout: float = 10.0
24
+ mcp_execute_tool_timeout: float = 60.0
25
+ mcp_read_from_config: bool = True # if False, will throw if attempting to read/write from file
26
+
21
27
 
22
28
  class SummarizerSettings(BaseSettings):
23
29
  model_config = SettingsConfigDict(env_prefix="letta_summarizer_", extra="ignore")
@@ -174,6 +180,7 @@ class Settings(BaseSettings):
174
180
  # telemetry logging
175
181
  verbose_telemetry_logging: bool = False
176
182
  otel_exporter_otlp_endpoint: str = "http://localhost:4317"
183
+ disable_tracing: bool = False
177
184
 
178
185
  # uvicorn settings
179
186
  uvicorn_workers: int = 1
@@ -3,16 +3,18 @@ from typing import List, Optional
3
3
  from letta.agent import Agent, AgentState
4
4
  from letta.constants import DEFAULT_MESSAGE_TOOL
5
5
  from letta.functions.function_sets.multi_agent import send_message_to_all_agents_in_group
6
+ from letta.functions.functions import parse_source_code
7
+ from letta.functions.schema_generator import generate_schema
6
8
  from letta.interface import AgentInterface
7
9
  from letta.orm import User
8
10
  from letta.orm.enums import ToolType
9
11
  from letta.schemas.letta_message_content import TextContent
10
12
  from letta.schemas.message import Message, MessageCreate
13
+ from letta.schemas.tool import Tool
11
14
  from letta.schemas.tool_rule import ChildToolRule, InitToolRule, TerminalToolRule
12
15
  from letta.schemas.usage import LettaUsageStatistics
13
16
  from letta.services.agent_manager import AgentManager
14
17
  from letta.services.tool_manager import ToolManager
15
- from tests.helpers.utils import create_tool_from_func
16
18
 
17
19
 
18
20
  class SupervisorMultiAgent(Agent):
@@ -47,7 +49,14 @@ class SupervisorMultiAgent(Agent):
47
49
 
48
50
  # add multi agent tool
49
51
  if self.tool_manager.get_tool_by_name(tool_name="send_message_to_all_agents_in_group", actor=self.user) is None:
50
- multi_agent_tool = create_tool_from_func(send_message_to_all_agents_in_group)
52
+ multi_agent_tool = Tool(
53
+ name=send_message_to_all_agents_in_group.__name__,
54
+ description="",
55
+ source_type="python",
56
+ tags=[],
57
+ source_code=parse_source_code(send_message_to_all_agents_in_group),
58
+ json_schema=generate_schema(send_message_to_all_agents_in_group, None),
59
+ )
51
60
  multi_agent_tool.tool_type = ToolType.LETTA_MULTI_AGENT_CORE
52
61
  multi_agent_tool = self.tool_manager.create_or_update_tool(
53
62
  pydantic_tool=multi_agent_tool,
letta/tracing.py CHANGED
@@ -90,8 +90,8 @@ async def trace_error_handler(_request: Request, exc: Exception) -> JSONResponse
90
90
  # Add error details to current span
91
91
  span = trace.get_current_span()
92
92
  if span:
93
- span.add_event(
94
- name="exception",
93
+ span.record_exception(
94
+ exc,
95
95
  attributes={
96
96
  "exception.message": error_msg,
97
97
  "exception.type": type(exc).__name__,
@@ -112,6 +112,16 @@ def setup_tracing(
112
112
  global _is_tracing_initialized
113
113
 
114
114
  provider = TracerProvider(resource=Resource.create({"service.name": service_name}))
115
+ import uuid
116
+
117
+ provider = TracerProvider(
118
+ resource=Resource.create(
119
+ {
120
+ "service.name": service_name,
121
+ "device.id": uuid.getnode(), # MAC address as unique device identifier
122
+ }
123
+ )
124
+ )
115
125
  if endpoint:
116
126
  provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter(endpoint=endpoint)))
117
127
  _is_tracing_initialized = True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.6.41.dev20250317104157
3
+ Version: 0.6.43.dev20250318012126
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -31,7 +31,7 @@ Requires-Dist: brotli (>=1.1.0,<2.0.0)
31
31
  Requires-Dist: colorama (>=0.4.6,<0.5.0)
32
32
  Requires-Dist: composio-core (>=0.7.7,<0.8.0)
33
33
  Requires-Dist: composio-langchain (>=0.7.7,<0.8.0)
34
- Requires-Dist: datamodel-code-generator[http] (>=0.25.0,<0.26.0) ; extra == "desktop" or extra == "all"
34
+ Requires-Dist: datamodel-code-generator[http] (>=0.25.0,<0.26.0)
35
35
  Requires-Dist: demjson3 (>=3.0.6,<4.0.0)
36
36
  Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "external-tools" or extra == "desktop" or extra == "all"
37
37
  Requires-Dist: docstring-parser (>=0.16,<0.17)
@@ -1,6 +1,6 @@
1
- letta/__init__.py,sha256=JZZg-67nC15yHnBqHm2_pXiqP-XF7i9RHClzkczUwtI,918
1
+ letta/__init__.py,sha256=d3F-DwijWs3SOdYADCZaqCu4IbjBND6Ba0ZTRL-G3ro,918
2
2
  letta/__main__.py,sha256=6Hs2PV7EYc5Tid4g4OtcLXhqVHiNYTGzSBdoOnW2HXA,29
3
- letta/agent.py,sha256=xBXIgaeiGXDBpgNKX0-2w10iRBBvcLgBK0W85jTiSX8,68141
3
+ letta/agent.py,sha256=hPFLDf68QqSVIP22dr40h21Jewif0Aq8IbAjrRHM0MI,68511
4
4
  letta/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  letta/agents/base_agent.py,sha256=8IMB7UK4ft-Wi-ZYjX7akqQUhk_cSswRgepqeZyvCMs,1550
6
6
  letta/agents/ephemeral_agent.py,sha256=DBMXT50UQoqjkvl_Piwle3Fy7iXopy15oMWwnWzbpvo,2751
@@ -19,7 +19,7 @@ letta/constants.py,sha256=_QI06HD6yLmGFrTqexLkUnUWSsKUiBlM1QtbiLGEq4k,7773
19
19
  letta/data_sources/connectors.py,sha256=R2AssXpqS7wN6VI8AfxvqaZs5S1ZACc4E_FewmR9iZI,7022
20
20
  letta/data_sources/connectors_helper.py,sha256=oQpVlc-BjSz9sTZ7sp4PsJSXJbBKpZPi3Dam03CURTQ,3376
21
21
  letta/dynamic_multi_agent.py,sha256=syJ-uODfZR_OHd4Acx19yMvjSJ2jwjyX0SOPaeLsZ38,12056
22
- letta/embeddings.py,sha256=zqlfbN3aCgSOlNd9M2NW9zrwx4WwQzketb8oa5BzzE8,10831
22
+ letta/embeddings.py,sha256=0XHDkca0cXvqSEU06fWfpSfOODU_3HcqDaSh6kuP6EM,11115
23
23
  letta/errors.py,sha256=6fQXg2unP-2fo3R7db0ayKKWlD2XMusOPNi9TgJplCg,5558
24
24
  letta/functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  letta/functions/ast_parsers.py,sha256=CQI0rUoIXcLKAev_GYrXcldRIGN5ZQtk5u4FLoHe5sE,5728
@@ -30,18 +30,18 @@ letta/functions/functions.py,sha256=NyWLh7a-f4mXti5vM1oWDwXzfA58VmVVqL03O9vosKY,
30
30
  letta/functions/helpers.py,sha256=0I-ezZeM3slhAifpdlR5k2Rn6GxExD6xACCKuoYmE8M,29119
31
31
  letta/functions/interface.py,sha256=s_PPp5WDvGH_y9KUpMlORkdC141ITczFk3wsevrrUD8,2866
32
32
  letta/functions/mcp_client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
- letta/functions/mcp_client/base_client.py,sha256=uHp4XuXFy3avCkc3XYr1IxEPTHZWADqrDk-kR-oFyLY,2144
34
- letta/functions/mcp_client/sse_client.py,sha256=VElaM87sEMTvam4t3IJgWGhyqs5BL4YVErqVPv2hQug,961
35
- letta/functions/mcp_client/stdio_client.py,sha256=qy9vHB99oI4pCAQ4KmQUKY0bTboaQI4pnPm9DPQfPvY,4210
33
+ letta/functions/mcp_client/base_client.py,sha256=n2j1wEDopF8BkivkqrZDWrq3I2gChVfkQ0MgH4Iyg1I,3507
34
+ letta/functions/mcp_client/sse_client.py,sha256=XOLnWIcrjBEkZ-IksgnHKSdVds3pC2E8OPkIhCNxloo,1470
35
+ letta/functions/mcp_client/stdio_client.py,sha256=2oouLGphu4S15OrYj97n9_ZYZo-GMRWNLWwaw-U4TNs,4562
36
36
  letta/functions/mcp_client/types.py,sha256=nmcnQn2EpxXzXg5_pWPsHZobfxO6OucaUgz1bVvam7o,1411
37
37
  letta/functions/schema_generator.py,sha256=4hiDQpHemyfKWME-5X6xJuSiv7g9V_BgAPFegohHBIM,22327
38
38
  letta/helpers/__init__.py,sha256=p0luQ1Oe3Skc6sH4O58aHHA3Qbkyjifpuq0DZ1GAY0U,59
39
39
  letta/helpers/composio_helpers.py,sha256=6CWV483vE3N-keQlblexxBiQHxorMAgQuvbok4adGO4,949
40
- letta/helpers/converters.py,sha256=3qHoPDPa7ycPeeE6eLYZ8mad1mA6oGPgY95lDyhb3_A,8971
40
+ letta/helpers/converters.py,sha256=ndAm9p7cfA1YuoLdZOV7rTFADz2iyeUt_OZtaI5GFO0,9037
41
41
  letta/helpers/datetime_helpers.py,sha256=7U5ZJkE0cLki4sG8ukIHZSAoFfQpLWQu2kFekETy6Zg,2633
42
42
  letta/helpers/json_helpers.py,sha256=PWZ5HhSqGXO4e563dM_8M72q7ScirjXQ4Rv1ckohaV8,396
43
43
  letta/helpers/tool_execution_helper.py,sha256=bskCscuz2nqoUboFcYA7sQGeikdEyqiYnNpO4gLQTdc,7469
44
- letta/helpers/tool_rule_solver.py,sha256=z-2Zq_qWykgWanFZYxtxUee4FkMnxqvntXe2tomoH68,6774
44
+ letta/helpers/tool_rule_solver.py,sha256=Vv_wXLXc5CcSAn5DJDTZmnhC58HgcCA2RusYNYU5quA,6035
45
45
  letta/humans/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
46
  letta/humans/examples/basic.txt,sha256=Lcp8YESTWvOJgO4Yf_yyQmgo5bKakeB1nIVrwEGG6PA,17
47
47
  letta/humans/examples/cs_phd.txt,sha256=9C9ZAV_VuG7GB31ksy3-_NAyk8rjE6YtVOkhp08k1xw,297
@@ -50,7 +50,7 @@ letta/interfaces/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
50
50
  letta/interfaces/openai_chat_completions_streaming_interface.py,sha256=SfqVp7V7AbBqv8D_IwyqrcztNiI0nKhjAvqtZQE_jfM,4729
51
51
  letta/interfaces/utils.py,sha256=c6jvO0dBYHh8DQnlN-B0qeNC64d3CSunhfqlFA4pJTY,278
52
52
  letta/llm_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
53
- letta/llm_api/anthropic.py,sha256=pDj9W_Myz4fe05eoMiApO45G8jqzVoO9QyfFpCs-uV0,38360
53
+ letta/llm_api/anthropic.py,sha256=XHmy2MJvV6y-Rc-GOv2M8xdaB5jmEcp7SMKI2Wc4izw,38363
54
54
  letta/llm_api/aws_bedrock.py,sha256=J_oCM810-m2T-tgo3iRwSM0BuykBN5AK3SbkyiOaGbc,3835
55
55
  letta/llm_api/azure_openai.py,sha256=GP50e3WyoU2O_vb_b06GYTA1S157I0G21lF9-qv9nsA,6459
56
56
  letta/llm_api/azure_openai_constants.py,sha256=ZaR2IasJThijG0uhLKJThrixdAxLPD2IojfeaJ-KQMQ,294
@@ -106,7 +106,7 @@ letta/local_llm/webui/legacy_settings.py,sha256=BLmd3TSx5StnY3ibjwaxYATPt_Lvq-o1
106
106
  letta/local_llm/webui/settings.py,sha256=gmLHfiOl1u4JmlAZU2d2O8YKF9lafdakyjwR_ftVPh8,552
107
107
  letta/log.py,sha256=FbFwU9KEX7k0FBYhPl7fJ6uQ3NO3-ZbsnM2OpcTFXjo,2217
108
108
  letta/main.py,sha256=_agyaYPJq70OL0goNwO34zENL2KupnTgqlg-HVcNaTY,15379
109
- letta/memory.py,sha256=JjKripPdzsCxXCPYBmFP_yRVN_PSqEQW5s8EKK-Mt9M,3411
109
+ letta/memory.py,sha256=dGzMdYYG1Mhan9POLcmyI6fVL7Xs8TEozWfAo4v1QQo,3592
110
110
  letta/offline_memory_agent.py,sha256=P_rm6GmKAH6lg7-njuv7dK29f7v5-tAQy-rMOwcPfwk,7499
111
111
  letta/openai_backcompat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
112
112
  letta/openai_backcompat/openai_object.py,sha256=GSzeCTwLpLD2fH4X8wVqzwdmoTjKK2I4PnriBY453lc,13505
@@ -136,10 +136,10 @@ letta/orm/provider.py,sha256=-qA9tvKTZgaM4D7CoDZZiA7zTgjaaWDV4jZvifQv_MM,805
136
136
  letta/orm/sandbox_config.py,sha256=DyOy_1_zCMlp13elCqPcuuA6OwUove6mrjhcpROTg50,4150
137
137
  letta/orm/source.py,sha256=z89VZUHV9K8Ew9JCYoZqUeRb1WEUKmrn0MMFkppaphE,2117
138
138
  letta/orm/sources_agents.py,sha256=Ik_PokCBrXRd9wXWomeNeb8EtLUwjb9VMZ8LWXqpK5A,473
139
- letta/orm/sqlalchemy_base.py,sha256=IaiVDy7q5mZsP2x2ME0IFyI6KYMwMiKb5oHODqfiDNs,22491
139
+ letta/orm/sqlalchemy_base.py,sha256=3jb795D-hK7o4paLjniHkPP9EnUtLG0bfHrc2OpD5QI,22672
140
140
  letta/orm/sqlite_functions.py,sha256=JCScKiRlYCKxy9hChQ8wsk4GMKknZE24MunnG3fM1Gw,4255
141
141
  letta/orm/step.py,sha256=fjm7fLtYLCtFM6Mj6e2boP6P7dHSFG24Nem85VfVqHg,3216
142
- letta/orm/tool.py,sha256=x58lmVG5IhXTVt82CnoN-ExuObnQCbeSMx_yOhUMmA4,2515
142
+ letta/orm/tool.py,sha256=ft3BDA7Pt-zsXLyPvS_Z_Ibis6H6vY20F7Li7p6nPu8,2652
143
143
  letta/orm/tools_agents.py,sha256=r6t-V21w2_mG8n38zuUb5jOi_3hRxsjgezsLA4sg0m4,626
144
144
  letta/orm/user.py,sha256=rK5N5ViDxmesZMqVVHB7FcQNpcSoM-hB42MyI6q3MnI,1004
145
145
  letta/personas/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -175,7 +175,7 @@ letta/schemas/agent.py,sha256=YjjzBIA4txW5LzQjVoDfXHgNZQh8HCYZ7tOyYVlbU0s,14276
175
175
  letta/schemas/block.py,sha256=rMWflyj982qW86dQK-9saXBHKaLCu3aUG2gQTckG3Ik,4984
176
176
  letta/schemas/embedding_config.py,sha256=ufboqW9ctSBJdhwzJRbrGtTzOTwSKfT0LY0mowpr6fs,3398
177
177
  letta/schemas/embedding_config_overrides.py,sha256=lkTa4y-EQ2RnaEKtKDM0sEAk7EwNa67REw8DGNNtGQY,84
178
- letta/schemas/enums.py,sha256=zBNS0iIDtAS9mrDafin4mY33LLPUxq_SoPx6z5BiM_g,1156
178
+ letta/schemas/enums.py,sha256=0tOunJGgrTlORWkl3HrFkFjBT-4KgA2Y9QpLceoY2yY,1033
179
179
  letta/schemas/environment_variables.py,sha256=VRtzOjdeQdHcSHXisk7oJUQlheruxhSWNS0xqlfGzbs,2429
180
180
  letta/schemas/file.py,sha256=ChN2CWzLI2TT9WLItcfElEH0E8b7kzPylF2OQBr6Beg,1550
181
181
  letta/schemas/group.py,sha256=fniDxIkgrYxo8yR1br-ZqhjjhOIaFqG-JMYHWo2J81Y,2141
@@ -190,7 +190,7 @@ letta/schemas/letta_response.py,sha256=pq-SxXQy5yZo1-DiAwV2mMURlUvz1Uu7HHR_tB1hM
190
190
  letta/schemas/llm_config.py,sha256=bqq4LGE9layPcnnkzd_8d2SB8o1x8XdDzfd2ZkYQwcY,5611
191
191
  letta/schemas/llm_config_overrides.py,sha256=-oRglCTcajF6UAK3RAa0FLWVuKODPI1v403fDIWMAtA,1815
192
192
  letta/schemas/memory.py,sha256=GOYDfPKzbWftUWO9Hv4KW7xAi1EIQmC8zpP7qvEkVHw,10245
193
- letta/schemas/message.py,sha256=1V9_1OZxVkkKeBx9Y9WcrnHF0ggc2zeV3K-Wix8mYBA,39339
193
+ letta/schemas/message.py,sha256=2bpl6VAqIaz6Rzqcq_NcPjbmlq7zvZx7sYFht1hs1_c,39727
194
194
  letta/schemas/openai/chat_completion_request.py,sha256=3tALmbBV2pv1CcqzNLBxxIPOQ8Z85woucT7FN0fuDic,3402
195
195
  letta/schemas/openai/chat_completion_response.py,sha256=koEb_NTiz5YsAAX81Z0cSqSFX4a6MdD2lhoXtxF_rw4,4100
196
196
  letta/schemas/openai/chat_completions.py,sha256=l0e9sT9boTD5VBU5YtJ0s7qUtCfFGB2K-gQLeEZ2LHU,3599
@@ -201,10 +201,10 @@ letta/schemas/passage.py,sha256=RG0vkaewEu4a_NAZM-FVyMammHjqpPP0RDYAdu27g6A,3723
201
201
  letta/schemas/providers.py,sha256=2Ijzjj1gPETiFyl8yb4ZbwaTljw5WSCdGayAgCsBeYE,43665
202
202
  letta/schemas/run.py,sha256=SRqPRziINIiPunjOhE_NlbnQYgxTvqmbauni_yfBQRA,2085
203
203
  letta/schemas/sandbox_config.py,sha256=SZCo3FSMz-DIBMKGu0atT4tsVFXGsqMFPaJnjrxpkX4,5993
204
- letta/schemas/source.py,sha256=-BQVolcXA2ziCu2ztR6cbTdGUc8G7vGJy7rvpdf1hpg,2880
204
+ letta/schemas/source.py,sha256=IuenIFs7B8uOuYJIHXqR1E28wVSa-pUX6NkLZH7cukg,3141
205
205
  letta/schemas/step.py,sha256=WkcVnruUUOWLKwiWPn2Gfal4EQZPNLqlsd9859xhgsw,2224
206
- letta/schemas/tool.py,sha256=PXWxEqzM-kADijlsJzu0ZYtWLnjpq4ZUNX4NzesIWNQ,12291
207
- letta/schemas/tool_rule.py,sha256=2YQZba4fXS3u4j8pIk7BDujfq8rnxSVMwJSyaVgApH4,2149
206
+ letta/schemas/tool.py,sha256=3hm9SJhHG0d6-ychZXBDsKcI83_6So-qKxTLaFRQqko,12363
207
+ letta/schemas/tool_rule.py,sha256=tZ-BoyFJcFLMOd8KIng8pw3yCtdV8KGh4Vz730ZA-WQ,5674
208
208
  letta/schemas/usage.py,sha256=8oYRH-JX0PfjIu2zkT5Uu3UWQ7Unnz_uHiO8hRGI4m0,912
209
209
  letta/schemas/user.py,sha256=V32Tgl6oqB3KznkxUz12y7agkQicjzW7VocSpj78i6Q,1526
210
210
  letta/serialize_schemas/__init__.py,sha256=cosMjvWz7cubC1azbUofzYrcDBTuSgjJImUdsrSs3p0,77
@@ -216,13 +216,13 @@ letta/serialize_schemas/marshmallow_custom_fields.py,sha256=_rXV4eGY4wKqzZQPyk3o
216
216
  letta/serialize_schemas/marshmallow_message.py,sha256=vkBlIFONzsiREXo507sWl1mjVjCLV97RtB7jyE0N_fw,1336
217
217
  letta/serialize_schemas/marshmallow_tag.py,sha256=ssNGdD-z9UafhoTTOcvWRXju__NSx1bPijae_vljMr4,682
218
218
  letta/serialize_schemas/marshmallow_tool.py,sha256=98dzHzKcH_HE5n_gCkU683iHvg1M9IGmRZ2BWnh__j4,404
219
- letta/serialize_schemas/pydantic_agent_schema.py,sha256=4FfF405nyH3lpTpU6r1vkZmP5916FhuEcgaEqpiAMko,2633
219
+ letta/serialize_schemas/pydantic_agent_schema.py,sha256=hsf0l1RiAbFrGUYNsRg21L4S7QUD6z_-Wft5QigPiOM,2664
220
220
  letta/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
221
221
  letta/server/constants.py,sha256=yAdGbLkzlOU_dLTx0lKDmAnj0ZgRXCEaIcPJWO69eaE,92
222
222
  letta/server/db.py,sha256=cA1MHpMCTTC1MX7VWppJ-cKq1XW93Vws_vTV0-bKmTE,3642
223
223
  letta/server/generate_openapi_schema.sh,sha256=0OtBhkC1g6CobVmNEd_m2B6sTdppjbJLXaM95icejvE,371
224
224
  letta/server/rest_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
225
- letta/server/rest_api/app.py,sha256=gP558sE9dY4914GYVPuypPwYiGHaVtfeWSsOUZBq67c,13618
225
+ letta/server/rest_api/app.py,sha256=Y4QZZWh0mI4d7o7ZZh8G4A9ib_CBBDz9DtaLrH-9fXU,13723
226
226
  letta/server/rest_api/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
227
227
  letta/server/rest_api/auth/index.py,sha256=fQBGyVylGSRfEMLQ17cZzrHd5Y1xiVylvPqH5Rl-lXQ,1378
228
228
  letta/server/rest_api/auth_token.py,sha256=725EFEIiNj4dh70hrSd94UysmFD8vcJLrTRfNHkzxDo,774
@@ -244,15 +244,15 @@ letta/server/rest_api/routers/v1/organizations.py,sha256=8n-kA9LHtKImdY2xL-v7m6n
244
244
  letta/server/rest_api/routers/v1/providers.py,sha256=qyZsNTXgLVsoLZoCVY4qaqiG34zCEVmRUP2Cn6maK_4,2949
245
245
  letta/server/rest_api/routers/v1/runs.py,sha256=-2_YA2nuxcLqiPjG9CPHeZbyrtlNQZnAsaNohGn5fMg,8278
246
246
  letta/server/rest_api/routers/v1/sandbox_configs.py,sha256=9hqnnMwJ3wCwO-Bezu3Xl8i3TDSIuInw3gSeHaKUXfE,8526
247
- letta/server/rest_api/routers/v1/sources.py,sha256=rpQhaRHyzGUK43LX623L8BBLqL85HJ6fUYPMvI4k3kA,8434
247
+ letta/server/rest_api/routers/v1/sources.py,sha256=-3JB2ohr4hqkB7Zw0frTG1PN2KplU1SRr2mLX6KXJ44,9104
248
248
  letta/server/rest_api/routers/v1/steps.py,sha256=DVVwaxLNbNAgWpr2oQkrNjdS-wi0bP8kVJZUO-hiaf8,3275
249
249
  letta/server/rest_api/routers/v1/tags.py,sha256=coydgvL6-9cuG2Hy5Ea7QY3inhTHlsf69w0tcZenBus,880
250
- letta/server/rest_api/routers/v1/tools.py,sha256=pWIlYUksq9QNFSmknB_DdSB5zs77zaHoj-Ha-WxYkO8,17211
250
+ letta/server/rest_api/routers/v1/tools.py,sha256=wDj8L_-qd4FGiLa2zJMKIsRPxpWjWP0x2clkU0SBvTQ,17244
251
251
  letta/server/rest_api/routers/v1/users.py,sha256=G5DBHSkPfBgVHN2Wkm-rVYiLQAudwQczIq2Z3YLdbVo,2277
252
252
  letta/server/rest_api/routers/v1/voice.py,sha256=7J0L-Nkz65m0PXcpQI2ATMIZzumDDSUzgtIus7d-tv8,2461
253
253
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
254
254
  letta/server/rest_api/utils.py,sha256=aF0u__Q33-aPWAiHi9JA0jKAjqnwbVKzJdD5NgFpnOU,13828
255
- letta/server/server.py,sha256=Ml4-MklB28x3iAONfp3m2oSbrhVctAvvxUWApeWIWrk,75884
255
+ letta/server/server.py,sha256=YT0Bxl6rkoXilCkVnFFGkT9bvK1cAG_RVDho3UeJ8eg,76962
256
256
  letta/server/startup.sh,sha256=2S_MuvYYY5YZQOYBL-7mq2CC-A7Hhwyd9be2QqmNqzA,2514
257
257
  letta/server/static_files/assets/index-048c9598.js,sha256=mR16XppvselwKCcNgONs4L7kZEVa4OEERm4lNZYtLSk,146819
258
258
  letta/server/static_files/assets/index-0e31b727.css,sha256=SBbja96uiQVLDhDOroHgM6NSl7tS4lpJRCREgSS_hA8,7672
@@ -285,17 +285,17 @@ letta/services/summarizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
285
285
  letta/services/summarizer/enums.py,sha256=szzPX2OBRRJEZsBTGYQThrNz02ELFqhuLwvOR7ozi7A,208
286
286
  letta/services/summarizer/summarizer.py,sha256=qPcR7VsHsgUsUtxmKx_73l3XdDhFvDzZ8VeIs4w3NBc,4757
287
287
  letta/services/tool_execution_sandbox.py,sha256=6AB3rFS34PzoyE9dxtUmuaUUWvKrwdE083NuBRa1eC0,22969
288
- letta/services/tool_manager.py,sha256=rXOdB2quTWE9EGjkg9Q4EY6-hAwX2R9gGBCXFs8hR4w,9862
288
+ letta/services/tool_manager.py,sha256=CAXkbw8cv6LZ8Do9lloWw0biKH0lBf-qtsdq0yedhbQ,10053
289
289
  letta/services/user_manager.py,sha256=ScHbdJK9kNF8QXjsd3ZWGEL87n_Uyp3YwfKetOJmpHs,4304
290
- letta/settings.py,sha256=W3gFFywjk6CKbWTSlj0zXGyzl8CJUrQJgMLvq2sKEWA,7074
290
+ letta/settings.py,sha256=dRXIVQwgq5V1fH7H5KZN1guuBlCNgvDapZ88r1IMC7s,7360
291
291
  letta/streaming_interface.py,sha256=1vuAckIxo1p1UsXtDzE8LTUve5RoTZRdXUe-WBIYDWU,15818
292
292
  letta/streaming_utils.py,sha256=jLqFTVhUL76FeOuYk8TaRQHmPTf3HSRc2EoJwxJNK6U,11946
293
- letta/supervisor_multi_agent.py,sha256=dw6XPAxZcyjAXEVYkMIxJFhZdR2m2_rq-fYev5hZE50,3936
293
+ letta/supervisor_multi_agent.py,sha256=jMy0J-a1_u5ZCulweXwJ98SgF6Hnvwxh1L3_wavnTi4,4330
294
294
  letta/system.py,sha256=dnOrS2FlRMwijQnOvfrky0Lg8wEw-FUq2zzfAJOUSKA,8477
295
- letta/tracing.py,sha256=e1dNfdMI__38E-_xMxPrxyM5mSGil8Od1tSbf_UNWaY,8004
295
+ letta/tracing.py,sha256=RstWXpfWVF77nmb_ISORVWd9IQw2Ky3de40k_S70yKI,8258
296
296
  letta/utils.py,sha256=AdHrQ2OQ3V4XhJ1LtYwbLUO71j2IJY37cIUxXPgaaRY,32125
297
- letta_nightly-0.6.41.dev20250317104157.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
298
- letta_nightly-0.6.41.dev20250317104157.dist-info/METADATA,sha256=Tg27LnMLvS_yubSnumqbGU_5pXTCdM0V-BzpEMywOKs,22886
299
- letta_nightly-0.6.41.dev20250317104157.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
300
- letta_nightly-0.6.41.dev20250317104157.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
301
- letta_nightly-0.6.41.dev20250317104157.dist-info/RECORD,,
297
+ letta_nightly-0.6.43.dev20250318012126.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
298
+ letta_nightly-0.6.43.dev20250318012126.dist-info/METADATA,sha256=vVaOSTkQsHF743XpzwI9bL1K4ulPjiMU1Z5cFUUtzPQ,22847
299
+ letta_nightly-0.6.43.dev20250318012126.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
300
+ letta_nightly-0.6.43.dev20250318012126.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
301
+ letta_nightly-0.6.43.dev20250318012126.dist-info/RECORD,,