letta-nightly 0.8.6.dev20250627220317__py3-none-any.whl → 0.8.7.dev20250627220731__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
letta/__init__.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import os
2
2
 
3
- __version__ = "0.8.5"
3
+ __version__ = "0.8.7"
4
+
4
5
 
5
6
  if os.environ.get("LETTA_VERSION"):
6
7
  __version__ = os.environ["LETTA_VERSION"]
letta/constants.py CHANGED
@@ -83,6 +83,7 @@ SEND_MESSAGE_TOOL_NAME = "send_message"
83
83
  # Base tools that cannot be edited, as they access agent state directly
84
84
  # Note that we don't include "conversation_search_date" for now
85
85
  BASE_TOOLS = [SEND_MESSAGE_TOOL_NAME, "conversation_search", "archival_memory_insert", "archival_memory_search"]
86
+ DEPRECATED_BASE_TOOLS = ["archival_memory_insert", "archival_memory_search"]
86
87
  # Base memory tools CAN be edited, and are added by default by the server
87
88
  BASE_MEMORY_TOOLS = ["core_memory_append", "core_memory_replace"]
88
89
  # New v2 collection of the base memory tools (effecitvely same as sleeptime set), to pair with memgpt_v2 prompt
@@ -129,7 +130,7 @@ MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX = re.compile(
129
130
  BUILTIN_TOOLS = ["run_code", "web_search"]
130
131
 
131
132
  # Built in tools
132
- FILES_TOOLS = ["open_file", "close_file", "grep", "search_files"]
133
+ FILES_TOOLS = ["open_files", "grep_files", "search_files"]
133
134
 
134
135
  FILE_MEMORY_EXISTS_MESSAGE = "The following files are currently accessible in memory:"
135
136
  FILE_MEMORY_EMPTY_MESSAGE = (
@@ -1,30 +1,32 @@
1
- from typing import TYPE_CHECKING, List, Optional, Tuple
1
+ from typing import TYPE_CHECKING, List, Optional
2
+
3
+ from letta.functions.types import FileOpenRequest
2
4
 
3
5
  if TYPE_CHECKING:
4
6
  from letta.schemas.agent import AgentState
5
7
  from letta.schemas.file import FileMetadata
6
8
 
7
9
 
8
- async def open_file(agent_state: "AgentState", file_name: str, view_range: Optional[Tuple[int, int]]) -> str:
9
- """
10
- Open the file with name `file_name` and load the contents into files section in core memory.
10
+ async def open_files(agent_state: "AgentState", file_requests: List[FileOpenRequest], close_all_others: bool = False) -> str:
11
+ """Open one or more files and load their contents into files section in core memory. Maximum of 5 files can be opened simultaneously.
11
12
 
12
- Args:
13
- file_name (str): Name of the file to view. Required.
14
- view_range (Optional[Tuple[int, int]]): Optional tuple indicating range to view.
13
+ Examples:
14
+ Open single file (entire content):
15
+ file_requests = [FileOpenRequest(file_name="config.py")]
15
16
 
16
- Returns:
17
- str: A status message
18
- """
19
- raise NotImplementedError("Tool not implemented. Please contact the Letta team.")
17
+ Open multiple files with different view ranges:
18
+ file_requests = [
19
+ FileOpenRequest(file_name="config.py", offset=1, length=50), # Lines 1-50
20
+ FileOpenRequest(file_name="main.py", offset=100, length=100), # Lines 100-199
21
+ FileOpenRequest(file_name="utils.py") # Entire file
22
+ ]
20
23
 
21
-
22
- async def close_file(agent_state: "AgentState", file_name: str) -> str:
23
- """
24
- Close file with name `file_name` in files section in core memory.
24
+ Close all other files and open new ones:
25
+ open_files(agent_state, file_requests, close_all_others=True)
25
26
 
26
27
  Args:
27
- file_name (str): Name of the file to close.
28
+ file_requests (List[FileOpenRequest]): List of file open requests, each specifying file name and optional view range.
29
+ close_all_others (bool): If True, closes all other currently open files first. Defaults to False.
28
30
 
29
31
  Returns:
30
32
  str: A status message
@@ -32,7 +34,7 @@ async def close_file(agent_state: "AgentState", file_name: str) -> str:
32
34
  raise NotImplementedError("Tool not implemented. Please contact the Letta team.")
33
35
 
34
36
 
35
- async def grep(
37
+ async def grep_files(
36
38
  agent_state: "AgentState",
37
39
  pattern: str,
38
40
  include: Optional[str] = None,
@@ -45,7 +47,7 @@ async def grep(
45
47
  pattern (str): Keyword or regex pattern to search within file contents.
46
48
  include (Optional[str]): Optional keyword or regex pattern to filter filenames to include in the search.
47
49
  context_lines (Optional[int]): Number of lines of context to show before and after each match.
48
- Equivalent to `-C` in grep. Defaults to 3.
50
+ Equivalent to `-C` in grep_files. Defaults to 3.
49
51
 
50
52
  Returns:
51
53
  str: Matching lines with optional surrounding context or a summary output.
@@ -9,6 +9,56 @@ from typing_extensions import Literal
9
9
 
10
10
  from letta.constants import REQUEST_HEARTBEAT_DESCRIPTION, REQUEST_HEARTBEAT_PARAM
11
11
  from letta.functions.mcp_client.types import MCPTool
12
+ from letta.log import get_logger
13
+
14
+ logger = get_logger(__name__)
15
+
16
+
17
+ def validate_google_style_docstring(function):
18
+ """Validate that a function's docstring follows Google Python style format.
19
+
20
+ Args:
21
+ function: The function to validate
22
+
23
+ Raises:
24
+ ValueError: If the docstring is not in Google Python style format
25
+ """
26
+ if not function.__doc__:
27
+ raise ValueError(
28
+ f"Function '{function.__name__}' has no docstring. Expected Google Python style docstring with Args and Returns sections."
29
+ )
30
+
31
+ docstring = function.__doc__.strip()
32
+
33
+ # Basic Google style requirements:
34
+ # 1. Should have Args: section if function has parameters (excluding self, agent_state)
35
+ # 2. Should have Returns: section if function returns something other than None
36
+ # 3. Args and Returns sections should be properly formatted
37
+
38
+ sig = inspect.signature(function)
39
+ has_params = any(param.name not in ["self", "agent_state"] for param in sig.parameters.values())
40
+
41
+ # Check for Args section if function has parameters
42
+ if has_params and "Args:" not in docstring:
43
+ raise ValueError(f"Function '{function.__name__}' with parameters must have 'Args:' section in Google Python style docstring")
44
+
45
+ # NOTE: No check for Returns section - this is irrelevant to the LLM
46
+ # In proper Google Python format, the Returns: is required
47
+
48
+ # Validate Args section format if present
49
+ if "Args:" in docstring:
50
+ args_start = docstring.find("Args:")
51
+ args_end = docstring.find("Returns:", args_start) if "Returns:" in docstring[args_start:] else len(docstring)
52
+ args_section = docstring[args_start:args_end].strip()
53
+
54
+ # Check that each parameter is documented
55
+ for param in sig.parameters.values():
56
+ if param.name in ["self", "agent_state"]:
57
+ continue
58
+ if f"{param.name} (" not in args_section and f"{param.name}:" not in args_section:
59
+ raise ValueError(
60
+ f"Function '{function.__name__}' parameter '{param.name}' not documented in Args section of Google Python style docstring"
61
+ )
12
62
 
13
63
 
14
64
  def is_optional(annotation):
@@ -277,6 +327,20 @@ def pydantic_model_to_json_schema(model: Type[BaseModel]) -> dict:
277
327
  "description": prop["description"],
278
328
  }
279
329
 
330
+ # Handle the case where the property uses anyOf (e.g., Optional types)
331
+ if "anyOf" in prop:
332
+ # For Optional types, extract the non-null type
333
+ non_null_types = [t for t in prop["anyOf"] if t.get("type") != "null"]
334
+ if len(non_null_types) == 1:
335
+ # Simple Optional[T] case - use the non-null type
336
+ return {
337
+ "type": non_null_types[0]["type"],
338
+ "description": prop["description"],
339
+ }
340
+ else:
341
+ # Complex anyOf case - not supported yet
342
+ raise ValueError(f"Complex anyOf patterns are not supported: {prop}")
343
+
280
344
  # If it's a regular property with a direct type (e.g., string, number)
281
345
  return {
282
346
  "type": "string" if prop["type"] == "string" else prop["type"],
@@ -344,7 +408,18 @@ def pydantic_model_to_json_schema(model: Type[BaseModel]) -> dict:
344
408
  return clean_schema(schema_part=schema, full_schema=schema)
345
409
 
346
410
 
347
- def generate_schema(function, name: Optional[str] = None, description: Optional[str] = None) -> dict:
411
+ def generate_schema(function, name: Optional[str] = None, description: Optional[str] = None, tool_id: Optional[str] = None) -> dict:
412
+ # Validate that the function has a Google Python style docstring
413
+ try:
414
+ validate_google_style_docstring(function)
415
+ except ValueError:
416
+ logger.warning(
417
+ f"Function `{function.__name__}` in module `{function.__module__}` "
418
+ f"{'(tool_id=' + tool_id + ') ' if tool_id else ''}"
419
+ f"is not in Google style docstring format. "
420
+ f"Docstring received:\n{repr(function.__doc__[:200]) if function.__doc__ else 'None'}"
421
+ )
422
+
348
423
  # Get the signature of the function
349
424
  sig = inspect.signature(function)
350
425
 
@@ -353,10 +428,19 @@ def generate_schema(function, name: Optional[str] = None, description: Optional[
353
428
 
354
429
  if not description:
355
430
  # Support multiline docstrings for complex functions, TODO (cliandy): consider having this as a setting
356
- if docstring.long_description:
431
+ # Always prefer combining short + long description when both exist
432
+ if docstring.short_description and docstring.long_description:
433
+ description = f"{docstring.short_description}\n\n{docstring.long_description}"
434
+ elif docstring.short_description:
435
+ description = docstring.short_description
436
+ elif docstring.long_description:
357
437
  description = docstring.long_description
358
438
  else:
359
- description = docstring.short_description
439
+ description = "No description available"
440
+
441
+ examples_section = extract_examples_section(function.__doc__)
442
+ if examples_section and "Examples:" not in description:
443
+ description = f"{description}\n\n{examples_section}"
360
444
 
361
445
  # Prepare the schema dictionary
362
446
  schema = {
@@ -443,6 +527,38 @@ def generate_schema(function, name: Optional[str] = None, description: Optional[
443
527
  return schema
444
528
 
445
529
 
530
+ def extract_examples_section(docstring: Optional[str]) -> Optional[str]:
531
+ """Extracts the 'Examples:' section from a Google-style docstring.
532
+
533
+ Args:
534
+ docstring (Optional[str]): The full docstring of a function.
535
+
536
+ Returns:
537
+ Optional[str]: The extracted examples section, or None if not found.
538
+ """
539
+ if not docstring or "Examples:" not in docstring:
540
+ return None
541
+
542
+ lines = docstring.strip().splitlines()
543
+ in_examples = False
544
+ examples_lines = []
545
+
546
+ for line in lines:
547
+ stripped = line.strip()
548
+
549
+ if not in_examples and stripped.startswith("Examples:"):
550
+ in_examples = True
551
+ examples_lines.append(line)
552
+ continue
553
+
554
+ if in_examples:
555
+ if stripped and not line.startswith(" ") and stripped.endswith(":"):
556
+ break
557
+ examples_lines.append(line)
558
+
559
+ return "\n".join(examples_lines).strip() if examples_lines else None
560
+
561
+
446
562
  def generate_schema_from_args_schema_v2(
447
563
  args_schema: Type[BaseModel], name: Optional[str] = None, description: Optional[str] = None, append_heartbeat: bool = True
448
564
  ) -> Dict[str, Any]:
letta/functions/types.py CHANGED
@@ -1,6 +1,18 @@
1
+ from typing import Optional
2
+
1
3
  from pydantic import BaseModel, Field
2
4
 
3
5
 
4
6
  class SearchTask(BaseModel):
5
7
  query: str = Field(description="Search query for web search")
6
8
  question: str = Field(description="Question to answer from search results, considering full conversation context")
9
+
10
+
11
+ class FileOpenRequest(BaseModel):
12
+ file_name: str = Field(description="Name of the file to open")
13
+ offset: Optional[int] = Field(
14
+ default=None, description="Optional starting line number (1-indexed). If not specified, starts from beginning of file."
15
+ )
16
+ length: Optional[int] = Field(
17
+ default=None, description="Optional number of lines to view from offset. If not specified, views to end of file."
18
+ )
@@ -141,7 +141,11 @@ class SleeptimeMultiAgentV2(BaseAgent):
141
141
  include_return_message_types: Optional[List[MessageType]] = None,
142
142
  ):
143
143
  response = await self.step(
144
- input_messages, max_steps, use_assistant_message, request_start_timestamp_ns, include_return_message_types
144
+ input_messages=input_messages,
145
+ max_steps=max_steps,
146
+ use_assistant_message=use_assistant_message,
147
+ request_start_timestamp_ns=request_start_timestamp_ns,
148
+ include_return_message_types=include_return_message_types,
145
149
  )
146
150
 
147
151
  for message in response.messages:
@@ -183,6 +183,7 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
183
183
  identifier_keys: Optional[List[str]] = None,
184
184
  identity_id: Optional[str] = None,
185
185
  query_options: Sequence[ORMOption] | None = None, # ← new
186
+ has_feedback: Optional[bool] = None,
186
187
  **kwargs,
187
188
  ) -> List["SqlalchemyBase"]:
188
189
  """
@@ -281,6 +282,7 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
281
282
  identifier_keys: Optional[List[str]] = None,
282
283
  identity_id: Optional[str] = None,
283
284
  check_is_deleted: bool = False,
285
+ has_feedback: Optional[bool] = None,
284
286
  **kwargs,
285
287
  ):
286
288
  """
@@ -337,6 +339,13 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
337
339
  if end_date:
338
340
  query = query.filter(cls.created_at < end_date)
339
341
 
342
+ # Feedback filtering
343
+ if has_feedback is not None and hasattr(cls, "feedback"):
344
+ if has_feedback:
345
+ query = query.filter(cls.feedback.isnot(None))
346
+ else:
347
+ query = query.filter(cls.feedback.is_(None))
348
+
340
349
  # Handle pagination based on before/after
341
350
  if before_obj or after_obj:
342
351
  conditions = []
letta/schemas/agent.py CHANGED
@@ -152,9 +152,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
152
152
  initial_message_sequence: Optional[List[MessageCreate]] = Field(
153
153
  None, description="The initial set of messages to put in the agent's in-context memory."
154
154
  )
155
- include_base_tools: bool = Field(
156
- True, description="If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions)."
157
- )
155
+ include_base_tools: bool = Field(True, description="If true, attaches the Letta core tools (e.g. core_memory related functions).")
158
156
  include_multi_agent_tools: bool = Field(
159
157
  False, description="If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent)."
160
158
  )
@@ -7,6 +7,7 @@ from letta.orm.errors import NoResultFound
7
7
  from letta.schemas.step import Step
8
8
  from letta.server.rest_api.utils import get_letta_server
9
9
  from letta.server.server import SyncServer
10
+ from letta.services.step_manager import FeedbackType
10
11
 
11
12
  router = APIRouter(prefix="/steps", tags=["steps"])
12
13
 
@@ -23,6 +24,7 @@ async def list_steps(
23
24
  agent_id: Optional[str] = Query(None, description="Filter by the ID of the agent that performed the step"),
24
25
  trace_ids: Optional[list[str]] = Query(None, description="Filter by trace ids returned by the server"),
25
26
  feedback: Optional[Literal["positive", "negative"]] = Query(None, description="Filter by feedback"),
27
+ has_feedback: Optional[bool] = Query(None, description="Filter by whether steps have feedback (true) or not (false)"),
26
28
  tags: Optional[list[str]] = Query(None, description="Filter by tags"),
27
29
  server: SyncServer = Depends(get_letta_server),
28
30
  actor_id: Optional[str] = Header(None, alias="user_id"),
@@ -49,6 +51,7 @@ async def list_steps(
49
51
  agent_id=agent_id,
50
52
  trace_ids=trace_ids,
51
53
  feedback=feedback,
54
+ has_feedback=has_feedback,
52
55
  tags=tags,
53
56
  )
54
57
 
@@ -72,7 +75,7 @@ async def retrieve_step(
72
75
  @router.patch("/{step_id}/feedback", response_model=Step, operation_id="add_feedback")
73
76
  async def add_feedback(
74
77
  step_id: str,
75
- feedback: Optional[Literal["positive", "negative"]],
78
+ feedback: Optional[FeedbackType],
76
79
  actor_id: Optional[str] = Header(None, alias="user_id"),
77
80
  server: SyncServer = Depends(get_letta_server),
78
81
  ):
letta/server/server.py CHANGED
@@ -1496,7 +1496,7 @@ class SyncServer(Server):
1496
1496
  await self.agent_manager.attach_block_async(agent_id=main_agent.id, block_id=block.id, actor=actor)
1497
1497
 
1498
1498
  if clear_history and block.value != "":
1499
- block = await self.block_manager.update_block_async(block_id=block.id, block=BlockUpdate(value=""))
1499
+ block = await self.block_manager.update_block_async(block_id=block.id, block_update=BlockUpdate(value=""), actor=actor)
1500
1500
 
1501
1501
  request = CreateAgent(
1502
1502
  name=main_agent.name + "-doc-sleeptime",
@@ -16,6 +16,7 @@ from letta.constants import (
16
16
  BASE_VOICE_SLEEPTIME_CHAT_TOOLS,
17
17
  BASE_VOICE_SLEEPTIME_TOOLS,
18
18
  DEFAULT_TIMEZONE,
19
+ DEPRECATED_BASE_TOOLS,
19
20
  FILES_TOOLS,
20
21
  MULTI_AGENT_TOOLS,
21
22
  )
@@ -77,6 +78,7 @@ from letta.services.helpers.agent_manager_helper import (
77
78
  build_agent_passage_query,
78
79
  build_passage_query,
79
80
  build_source_passage_query,
81
+ calculate_base_tools,
80
82
  check_supports_structured_output,
81
83
  compile_system_message,
82
84
  derive_system_message,
@@ -261,13 +263,13 @@ class AgentManager:
261
263
  elif agent_create.enable_sleeptime:
262
264
  tool_names |= set(BASE_SLEEPTIME_CHAT_TOOLS)
263
265
  elif agent_create.agent_type == AgentType.memgpt_v2_agent:
264
- tool_names |= set(BASE_TOOLS + BASE_MEMORY_TOOLS_V2)
266
+ tool_names |= calculate_base_tools(is_v2=True)
265
267
  elif agent_create.agent_type == AgentType.react_agent:
266
268
  pass # no default tools
267
269
  elif agent_create.agent_type == AgentType.workflow_agent:
268
270
  pass # no default tools
269
271
  else:
270
- tool_names |= set(BASE_TOOLS + BASE_MEMORY_TOOLS)
272
+ tool_names |= calculate_base_tools(is_v2=False)
271
273
  if agent_create.include_multi_agent_tools:
272
274
  tool_names |= set(MULTI_AGENT_TOOLS)
273
275
 
@@ -428,16 +430,19 @@ class AgentManager:
428
430
  elif agent_create.enable_sleeptime:
429
431
  tool_names |= set(BASE_SLEEPTIME_CHAT_TOOLS)
430
432
  elif agent_create.agent_type == AgentType.memgpt_v2_agent:
431
- tool_names |= set(BASE_TOOLS + BASE_MEMORY_TOOLS_V2)
433
+ tool_names |= calculate_base_tools(is_v2=True)
432
434
  elif agent_create.agent_type == AgentType.react_agent:
433
435
  pass # no default tools
434
436
  elif agent_create.agent_type == AgentType.workflow_agent:
435
437
  pass # no default tools
436
438
  else:
437
- tool_names |= set(BASE_TOOLS + BASE_MEMORY_TOOLS)
439
+ tool_names |= calculate_base_tools(is_v2=False)
438
440
  if agent_create.include_multi_agent_tools:
439
441
  tool_names |= set(MULTI_AGENT_TOOLS)
440
442
 
443
+ # take out the deprecated tool names
444
+ tool_names.difference_update(set(DEPRECATED_BASE_TOOLS))
445
+
441
446
  supplied_ids = set(agent_create.tool_ids or [])
442
447
 
443
448
  source_ids = agent_create.source_ids or []
@@ -124,8 +124,8 @@ class LineChunker:
124
124
  else:
125
125
  line_offset = 0
126
126
 
127
- # Add line numbers for all strategies
128
- content_lines = [f"{i + line_offset}: {line}" for i, line in enumerate(content_lines)]
127
+ # Add line numbers for all strategies (1-indexed for user display)
128
+ content_lines = [f"{i + line_offset + 1}: {line}" for i, line in enumerate(content_lines)]
129
129
 
130
130
  # Add metadata about total chunks
131
131
  if add_metadata:
@@ -133,7 +133,10 @@ class LineChunker:
133
133
  "sentences" if strategy == ChunkingStrategy.DOCUMENTATION else "chunks" if strategy == ChunkingStrategy.PROSE else "lines"
134
134
  )
135
135
  if start is not None and end is not None:
136
- content_lines.insert(0, f"[Viewing {chunk_type} {start} to {end-1} (out of {total_chunks} {chunk_type})]")
136
+ # Display 1-indexed ranges for users
137
+ start_display = start + 1
138
+ end_display = end
139
+ content_lines.insert(0, f"[Viewing {chunk_type} {start_display} to {end_display} (out of {total_chunks} {chunk_type})]")
137
140
  else:
138
141
  content_lines.insert(0, f"[Viewing file start (out of {total_chunks} {chunk_type})]")
139
142
 
@@ -283,6 +283,40 @@ class FileAgentManager:
283
283
  await session.execute(stmt)
284
284
  await session.commit()
285
285
 
286
+ @enforce_types
287
+ @trace_method
288
+ async def close_all_other_files(self, *, agent_id: str, keep_file_names: List[str], actor: PydanticUser) -> List[str]:
289
+ """Close every open file for this agent except those in keep_file_names.
290
+
291
+ Args:
292
+ agent_id: ID of the agent
293
+ keep_file_names: List of file names to keep open
294
+ actor: User performing the action
295
+
296
+ Returns:
297
+ List of file names that were closed
298
+ """
299
+ async with db_registry.async_session() as session:
300
+ stmt = (
301
+ update(FileAgentModel)
302
+ .where(
303
+ and_(
304
+ FileAgentModel.agent_id == agent_id,
305
+ FileAgentModel.organization_id == actor.organization_id,
306
+ FileAgentModel.is_open.is_(True),
307
+ # Only add the NOT IN filter when there are names to keep
308
+ ~FileAgentModel.file_name.in_(keep_file_names) if keep_file_names else True,
309
+ )
310
+ )
311
+ .values(is_open=False, visible_content=None)
312
+ .returning(FileAgentModel.file_name) # Gets the names we closed
313
+ .execution_options(synchronize_session=False) # No need to sync ORM state
314
+ )
315
+
316
+ closed_file_names = [row.file_name for row in (await session.execute(stmt))]
317
+ await session.commit()
318
+ return closed_file_names
319
+
286
320
  @enforce_types
287
321
  @trace_method
288
322
  async def enforce_max_open_files_and_open(
@@ -1,12 +1,20 @@
1
1
  from datetime import datetime
2
- from typing import List, Literal, Optional
2
+ from typing import List, Literal, Optional, Set
3
3
 
4
4
  import numpy as np
5
5
  from sqlalchemy import Select, and_, asc, desc, func, literal, nulls_last, or_, select, union_all
6
6
  from sqlalchemy.sql.expression import exists
7
7
 
8
8
  from letta import system
9
- from letta.constants import IN_CONTEXT_MEMORY_KEYWORD, MAX_EMBEDDING_DIM, STRUCTURED_OUTPUT_MODELS
9
+ from letta.constants import (
10
+ BASE_MEMORY_TOOLS,
11
+ BASE_MEMORY_TOOLS_V2,
12
+ BASE_TOOLS,
13
+ DEPRECATED_BASE_TOOLS,
14
+ IN_CONTEXT_MEMORY_KEYWORD,
15
+ MAX_EMBEDDING_DIM,
16
+ STRUCTURED_OUTPUT_MODELS,
17
+ )
10
18
  from letta.embeddings import embedding_model
11
19
  from letta.helpers import ToolRulesSolver
12
20
  from letta.helpers.datetime_helpers import format_datetime, get_local_time, get_local_time_fast
@@ -1038,3 +1046,10 @@ def build_agent_passage_query(
1038
1046
  query = query.order_by(AgentPassage.created_at.desc(), AgentPassage.id.asc())
1039
1047
 
1040
1048
  return query
1049
+
1050
+
1051
+ def calculate_base_tools(is_v2: bool) -> Set[str]:
1052
+ if is_v2:
1053
+ return (set(BASE_TOOLS) - set(DEPRECATED_BASE_TOOLS)) | set(BASE_MEMORY_TOOLS_V2)
1054
+ else:
1055
+ return (set(BASE_TOOLS) - set(DEPRECATED_BASE_TOOLS)) | set(BASE_MEMORY_TOOLS)
@@ -1,4 +1,5 @@
1
1
  from datetime import datetime
2
+ from enum import Enum
2
3
  from typing import List, Literal, Optional
3
4
 
4
5
  from sqlalchemy import select
@@ -18,6 +19,11 @@ from letta.server.db import db_registry
18
19
  from letta.utils import enforce_types
19
20
 
20
21
 
22
+ class FeedbackType(str, Enum):
23
+ POSITIVE = "positive"
24
+ NEGATIVE = "negative"
25
+
26
+
21
27
  class StepManager:
22
28
 
23
29
  @enforce_types
@@ -35,6 +41,7 @@ class StepManager:
35
41
  agent_id: Optional[str] = None,
36
42
  trace_ids: Optional[list[str]] = None,
37
43
  feedback: Optional[Literal["positive", "negative"]] = None,
44
+ has_feedback: Optional[bool] = None,
38
45
  ) -> List[PydanticStep]:
39
46
  """List all jobs with optional pagination and status filter."""
40
47
  async with db_registry.async_session() as session:
@@ -55,6 +62,7 @@ class StepManager:
55
62
  end_date=end_date,
56
63
  limit=limit,
57
64
  ascending=True if order == "asc" else False,
65
+ has_feedback=has_feedback,
58
66
  **filter_kwargs,
59
67
  )
60
68
  return [step.to_pydantic() for step in steps]
@@ -154,9 +162,7 @@ class StepManager:
154
162
 
155
163
  @enforce_types
156
164
  @trace_method
157
- async def add_feedback_async(
158
- self, step_id: str, feedback: Optional[Literal["positive", "negative"]], actor: PydanticUser
159
- ) -> PydanticStep:
165
+ async def add_feedback_async(self, step_id: str, feedback: Optional[FeedbackType], actor: PydanticUser) -> PydanticStep:
160
166
  async with db_registry.async_session() as session:
161
167
  step = await StepModel.read_async(db_session=session, identifier=step_id, actor=actor)
162
168
  if not step:
@@ -250,6 +256,7 @@ class StepManager:
250
256
  return job
251
257
 
252
258
 
259
+ # noinspection PyTypeChecker
253
260
  @singleton
254
261
  class NoopStepManager(StepManager):
255
262
  """
@@ -292,29 +299,4 @@ class NoopStepManager(StepManager):
292
299
  job_id: Optional[str] = None,
293
300
  step_id: Optional[str] = None,
294
301
  ) -> PydanticStep:
295
- step_data = {
296
- "origin": None,
297
- "organization_id": actor.organization_id,
298
- "agent_id": agent_id,
299
- "provider_id": provider_id,
300
- "provider_name": provider_name,
301
- "provider_category": provider_category,
302
- "model": model,
303
- "model_endpoint": model_endpoint,
304
- "context_window_limit": context_window_limit,
305
- "completion_tokens": usage.completion_tokens,
306
- "prompt_tokens": usage.prompt_tokens,
307
- "total_tokens": usage.total_tokens,
308
- "job_id": job_id,
309
- "tags": [],
310
- "tid": None,
311
- "trace_id": get_trace_id(), # Get the current trace ID
312
- }
313
- if step_id:
314
- step_data["id"] = step_id
315
- async with db_registry.async_session() as session:
316
- if job_id:
317
- await self._verify_job_access_async(session, job_id, actor, access=["write"])
318
- new_step = StepModel(**step_data)
319
- await new_step.create_async(session)
320
- return new_step.to_pydantic()
302
+ return
@@ -1,7 +1,9 @@
1
1
  import asyncio
2
2
  import re
3
- from typing import Any, Dict, List, Optional, Tuple
3
+ from typing import Any, Dict, List, Optional
4
4
 
5
+ from letta.constants import MAX_FILES_OPEN
6
+ from letta.functions.types import FileOpenRequest
5
7
  from letta.log import get_logger
6
8
  from letta.otel.tracing import trace_method
7
9
  from letta.schemas.agent import AgentState
@@ -31,7 +33,7 @@ class LettaFileToolExecutor(ToolExecutor):
31
33
  MAX_REGEX_COMPLEXITY = 1000 # Prevent catastrophic backtracking
32
34
  MAX_MATCHES_PER_FILE = 20 # Limit matches per file
33
35
  MAX_TOTAL_MATCHES = 50 # Global match limit
34
- GREP_TIMEOUT_SECONDS = 30 # Max time for grep operation
36
+ GREP_TIMEOUT_SECONDS = 30 # Max time for grep_files operation
35
37
  MAX_CONTEXT_LINES = 1 # Lines of context around matches
36
38
 
37
39
  def __init__(
@@ -72,9 +74,8 @@ class LettaFileToolExecutor(ToolExecutor):
72
74
  raise ValueError("Agent state is required for file tools")
73
75
 
74
76
  function_map = {
75
- "open_file": self.open_file,
76
- "close_file": self.close_file,
77
- "grep": self.grep,
77
+ "open_files": self.open_files,
78
+ "grep_files": self.grep_files,
78
79
  "search_files": self.search_files,
79
80
  }
80
81
 
@@ -98,56 +99,135 @@ class LettaFileToolExecutor(ToolExecutor):
98
99
  )
99
100
 
100
101
  @trace_method
101
- async def open_file(self, agent_state: AgentState, file_name: str, view_range: Optional[Tuple[int, int]] = None) -> str:
102
- """Stub for open_file tool."""
103
- start, end = None, None
104
- if view_range:
105
- start, end = view_range
106
- if start >= end:
107
- raise ValueError(f"Provided view range {view_range} is invalid, starting range must be less than ending range.")
108
-
109
- # TODO: This is inefficient. We can skip the initial DB lookup by preserving on the block metadata what the file_id is
110
- file_agent = await self.files_agents_manager.get_file_agent_by_file_name(
111
- agent_id=agent_state.id, file_name=file_name, actor=self.actor
112
- )
102
+ async def open_files(self, agent_state: AgentState, file_requests: List[FileOpenRequest], close_all_others: bool = False) -> str:
103
+ """Open one or more files and load their contents into memory blocks."""
104
+ # Parse raw dictionaries into FileOpenRequest objects if needed
105
+ parsed_requests = []
106
+ for req in file_requests:
107
+ if isinstance(req, dict):
108
+ # LLM returned a dictionary, parse it into FileOpenRequest
109
+ parsed_requests.append(FileOpenRequest(**req))
110
+ elif isinstance(req, FileOpenRequest):
111
+ # Already a FileOpenRequest object
112
+ parsed_requests.append(req)
113
+ else:
114
+ raise ValueError(f"Invalid file request type: {type(req)}. Expected dict or FileOpenRequest.")
115
+
116
+ file_requests = parsed_requests
117
+
118
+ # Validate file count first
119
+ if len(file_requests) > MAX_FILES_OPEN:
120
+ raise ValueError(f"Cannot open {len(file_requests)} files: exceeds maximum limit of {MAX_FILES_OPEN} files")
121
+
122
+ if not file_requests:
123
+ raise ValueError("No file requests provided")
113
124
 
114
- if not file_agent:
115
- file_blocks = agent_state.memory.file_blocks
116
- file_names = [fb.label for fb in file_blocks]
117
- raise ValueError(
118
- f"{file_name} not attached - did you get the filename correct? Currently you have the following files attached: {file_names}"
125
+ # Extract file names for various operations
126
+ file_names = [req.file_name for req in file_requests]
127
+
128
+ # Get all currently attached files for error reporting
129
+ file_blocks = agent_state.memory.file_blocks
130
+ attached_file_names = [fb.label for fb in file_blocks]
131
+
132
+ # Close all other files if requested
133
+ closed_by_close_all_others = []
134
+ if close_all_others:
135
+ closed_by_close_all_others = await self.files_agents_manager.close_all_other_files(
136
+ agent_id=agent_state.id, keep_file_names=file_names, actor=self.actor
119
137
  )
120
138
 
121
- file_id = file_agent.file_id
122
- file = await self.file_manager.get_file_by_id(file_id=file_id, actor=self.actor, include_content=True)
139
+ # Process each file
140
+ opened_files = []
141
+ all_closed_files = []
142
+
143
+ for file_request in file_requests:
144
+ file_name = file_request.file_name
145
+ offset = file_request.offset
146
+ length = file_request.length
147
+
148
+ # Convert 1-indexed offset/length to 0-indexed start/end for LineChunker
149
+ start, end = None, None
150
+ if offset is not None or length is not None:
151
+ if offset is not None and offset < 1:
152
+ raise ValueError(f"Offset for file {file_name} must be >= 1 (1-indexed), got {offset}")
153
+ if length is not None and length < 1:
154
+ raise ValueError(f"Length for file {file_name} must be >= 1, got {length}")
155
+
156
+ # Convert to 0-indexed for LineChunker
157
+ start = (offset - 1) if offset is not None else None
158
+ if start is not None and length is not None:
159
+ end = start + length
160
+ else:
161
+ end = None
162
+
163
+ # Validate file exists and is attached to agent
164
+ file_agent = await self.files_agents_manager.get_file_agent_by_file_name(
165
+ agent_id=agent_state.id, file_name=file_name, actor=self.actor
166
+ )
123
167
 
124
- # TODO: Inefficient, maybe we can pre-compute this
125
- # TODO: This is also not the best way to split things - would be cool to have "content aware" splitting
126
- # TODO: Split code differently from large text blurbs
127
- content_lines = LineChunker().chunk_text(file_metadata=file, start=start, end=end)
128
- visible_content = "\n".join(content_lines)
168
+ if not file_agent:
169
+ raise ValueError(
170
+ f"{file_name} not attached - did you get the filename correct? Currently you have the following files attached: {attached_file_names}"
171
+ )
129
172
 
130
- # Efficiently handle LRU eviction and file opening in a single transaction
131
- closed_files, was_already_open = await self.files_agents_manager.enforce_max_open_files_and_open(
132
- agent_id=agent_state.id, file_id=file_id, file_name=file_name, actor=self.actor, visible_content=visible_content
133
- )
173
+ file_id = file_agent.file_id
174
+ file = await self.file_manager.get_file_by_id(file_id=file_id, actor=self.actor, include_content=True)
175
+
176
+ # Process file content
177
+ content_lines = LineChunker().chunk_text(file_metadata=file, start=start, end=end)
178
+ visible_content = "\n".join(content_lines)
134
179
 
135
- success_msg = f"Successfully opened file {file_name}, lines {start} to {end} are now visible in memory block <{file_name}>"
136
- if closed_files:
180
+ # Handle LRU eviction and file opening
181
+ closed_files, was_already_open = await self.files_agents_manager.enforce_max_open_files_and_open(
182
+ agent_id=agent_state.id, file_id=file_id, file_name=file_name, actor=self.actor, visible_content=visible_content
183
+ )
184
+
185
+ opened_files.append(file_name)
186
+ all_closed_files.extend(closed_files)
187
+
188
+ # Update access timestamps for all opened files efficiently
189
+ await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=file_names, actor=self.actor)
190
+
191
+ # Build success message
192
+ if len(file_requests) == 1:
193
+ # Single file - maintain existing format
194
+ file_request = file_requests[0]
195
+ file_name = file_request.file_name
196
+ offset = file_request.offset
197
+ length = file_request.length
198
+ if offset is not None and length is not None:
199
+ end_line = offset + length - 1
200
+ success_msg = (
201
+ f"Successfully opened file {file_name}, lines {offset} to {end_line} are now visible in memory block <{file_name}>"
202
+ )
203
+ elif offset is not None:
204
+ success_msg = f"Successfully opened file {file_name}, lines {offset} to end are now visible in memory block <{file_name}>"
205
+ else:
206
+ success_msg = f"Successfully opened file {file_name}, entire file is now visible in memory block <{file_name}>"
207
+ else:
208
+ # Multiple files - show individual ranges if specified
209
+ file_summaries = []
210
+ for req in file_requests:
211
+ if req.offset is not None and req.length is not None:
212
+ end_line = req.offset + req.length - 1
213
+ file_summaries.append(f"{req.file_name} (lines {req.offset}-{end_line})")
214
+ elif req.offset is not None:
215
+ file_summaries.append(f"{req.file_name} (lines {req.offset}-end)")
216
+ else:
217
+ file_summaries.append(req.file_name)
218
+ success_msg = f"Successfully opened {len(file_requests)} files: {', '.join(file_summaries)}"
219
+
220
+ # Add information about closed files
221
+ if closed_by_close_all_others:
222
+ success_msg += f"\nNote: Closed {len(closed_by_close_all_others)} file(s) due to close_all_others=True: {', '.join(closed_by_close_all_others)}"
223
+
224
+ if all_closed_files:
137
225
  success_msg += (
138
- f"\nNote: Closed {len(closed_files)} least recently used file(s) due to open file limit: {', '.join(closed_files)}"
226
+ f"\nNote: Closed {len(all_closed_files)} least recently used file(s) due to open file limit: {', '.join(all_closed_files)}"
139
227
  )
140
228
 
141
229
  return success_msg
142
230
 
143
- @trace_method
144
- async def close_file(self, agent_state: AgentState, file_name: str) -> str:
145
- """Stub for close_file tool."""
146
- await self.files_agents_manager.update_file_agent_by_name(
147
- agent_id=agent_state.id, file_name=file_name, actor=self.actor, is_open=False
148
- )
149
- return f"Successfully closed file {file_name}, use function calls to re-open file"
150
-
151
231
  def _validate_regex_pattern(self, pattern: str) -> None:
152
232
  """Validate regex pattern to prevent catastrophic backtracking."""
153
233
  if len(pattern) > self.MAX_REGEX_COMPLEXITY:
@@ -204,7 +284,9 @@ class LettaFileToolExecutor(ToolExecutor):
204
284
  return context_lines_with_indicator
205
285
 
206
286
  @trace_method
207
- async def grep(self, agent_state: AgentState, pattern: str, include: Optional[str] = None, context_lines: Optional[int] = 3) -> str:
287
+ async def grep_files(
288
+ self, agent_state: AgentState, pattern: str, include: Optional[str] = None, context_lines: Optional[int] = 3
289
+ ) -> str:
208
290
  """
209
291
  Search for pattern in all attached files and return matches with context.
210
292
 
@@ -213,7 +295,7 @@ class LettaFileToolExecutor(ToolExecutor):
213
295
  pattern: Regular expression pattern to search for
214
296
  include: Optional pattern to filter filenames to include in the search
215
297
  context_lines (Optional[int]): Number of lines of context to show before and after each match.
216
- Equivalent to `-C` in grep. Defaults to 3.
298
+ Equivalent to `-C` in grep_files. Defaults to 3.
217
299
 
218
300
  Returns:
219
301
  Formatted string with search results, file names, line numbers, and context
@@ -310,20 +392,7 @@ class LettaFileToolExecutor(ToolExecutor):
310
392
  if formatted_lines and formatted_lines[0].startswith("[Viewing"):
311
393
  formatted_lines = formatted_lines[1:]
312
394
 
313
- # Convert 0-based line numbers to 1-based for grep compatibility
314
- corrected_lines = []
315
- for line in formatted_lines:
316
- if line and ":" in line:
317
- try:
318
- line_parts = line.split(":", 1)
319
- line_num = int(line_parts[0].strip())
320
- line_content = line_parts[1] if len(line_parts) > 1 else ""
321
- corrected_lines.append(f"{line_num + 1}:{line_content}")
322
- except (ValueError, IndexError):
323
- corrected_lines.append(line)
324
- else:
325
- corrected_lines.append(line)
326
- formatted_lines = corrected_lines
395
+ # LineChunker now returns 1-indexed line numbers, so no conversion needed
327
396
 
328
397
  # Search for matches in formatted lines
329
398
  for formatted_line in formatted_lines:
letta/utils.py CHANGED
@@ -546,7 +546,7 @@ def enforce_types(func):
546
546
  for arg_name, arg_value in kwargs.items():
547
547
  hint = hints.get(arg_name)
548
548
  if hint and not matches_type(arg_value, hint):
549
- raise ValueError(f"Argument {arg_name} does not match type {hint}; is {arg_value}")
549
+ raise ValueError(f"Argument {arg_name} does not match type {hint}; is {arg_value} of type {type(arg_value)}")
550
550
 
551
551
  return func(*args, **kwargs)
552
552
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.8.6.dev20250627220317
3
+ Version: 0.8.7.dev20250627220731
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -1,4 +1,4 @@
1
- letta/__init__.py,sha256=I71uxdVlQqtf-7maevWKQg8Lzv0Hn81WQqmVkYEDtC8,1042
1
+ letta/__init__.py,sha256=lOUklJSTcBdSfR5NdRUgUPfzmqrkcgpB7cHXQlchEdU,1043
2
2
  letta/agent.py,sha256=PLTHwDvgl4Nffi_NBm3Jn40H39t7Z6_kZWJ8sswNlpk,89097
3
3
  letta/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  letta/agents/base_agent.py,sha256=ymn0Uq9VcImoHmmnP0DyXQ-D3trmpcYVCdi7S9p0fKs,6571
@@ -18,7 +18,7 @@ letta/client/client.py,sha256=l_yKUUzl1-qfxFkDHsOMHxSwyzOBbx-2mi0GfI3WlJE,84906
18
18
  letta/client/streaming.py,sha256=UsDS_tDTsA3HgYryIDvGGmx_dWfnfQwtmEwLi4Z89Ik,4701
19
19
  letta/client/utils.py,sha256=VCGV-op5ZSmurd4yw7Vhf93XDQ0BkyBT8qsuV7EqfiU,2859
20
20
  letta/config.py,sha256=JFGY4TWW0Wm5fTbZamOwWqk5G8Nn-TXyhgByGoAqy2c,12375
21
- letta/constants.py,sha256=84Y0ju1_MLQBo4jkgVci-8ctZMUXxxm2WNYv8oefO7A,14242
21
+ letta/constants.py,sha256=pOLogWzDCJhSr2rp-hPhOFRt0INSLpFx-7M2uJw7NEs,14312
22
22
  letta/data_sources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
23
  letta/data_sources/connectors.py,sha256=V8mUgE3V6CX-CcOyvkPSQ_ZWP2VtuqgTEXkCN1j0p68,7920
24
24
  letta/data_sources/connectors_helper.py,sha256=oQpVlc-BjSz9sTZ7sp4PsJSXJbBKpZPi3Dam03CURTQ,3376
@@ -32,7 +32,7 @@ letta/functions/composio_helpers.py,sha256=mpybCYcB93HWoKrmQIqcuRQG9IH2lHWhsPQx2
32
32
  letta/functions/function_sets/base.py,sha256=FS-LRbvzO-duSUy0yLP_fBk2WSs4NAaaTAUuhl2ZS-I,16154
33
33
  letta/functions/function_sets/builtin.py,sha256=tm9KWrWZKhXMoXgd7PSsp-WfS8chhFe7gIdQhT1N5E4,2027
34
34
  letta/functions/function_sets/extras.py,sha256=mG7jCd2RUsf1w9G8mVcv26twJWpiDhbWI6VvnLZoEOk,4899
35
- letta/functions/function_sets/files.py,sha256=baXefT4pyUqaF7jb_731V4m4q-gpx62wvZ7DNCBo6Eg,2252
35
+ letta/functions/function_sets/files.py,sha256=X8BOQiQ8T1f7E8juZ0V-UoKNIkAZluWApeo1nHDQ6TI,2724
36
36
  letta/functions/function_sets/multi_agent.py,sha256=Dy0lW2MvgbAtTkdmVoMelSDqf9uuiPkZWIpfDBYqap8,5435
37
37
  letta/functions/function_sets/voice.py,sha256=_gmFEj3fSFb-4eMM-ddSOm-Vk1ShIVjpchZI7MQKwSA,3191
38
38
  letta/functions/functions.py,sha256=JVweekyBSlgquNt2Sa4CID7WLE_wIIB7Dl2SHK2gBtM,5940
@@ -45,13 +45,13 @@ letta/functions/mcp_client/sse_client.py,sha256=QDC29Re2mdGji77YrUk7JXfTVJeS6039
45
45
  letta/functions/mcp_client/stdio_client.py,sha256=-SymmkO_KNFZUP5e5mhcidpyyXBv6Xh72yUiMnv_bFw,4709
46
46
  letta/functions/mcp_client/types.py,sha256=7WoTIAA5NhgmpVwdOxcE3FBn8T_TUVOhE6hLPeSubE0,5703
47
47
  letta/functions/prompts.py,sha256=jNl83xjNpoSs8KzGtuc6jzN8jp_T4BC_5f4FMJ88_K0,1145
48
- letta/functions/schema_generator.py,sha256=YDAyqtG_moh9bxrzBAyLfEFTQCMGZHjFlpoVvuKi7XA,23356
49
- letta/functions/types.py,sha256=t7JAL3cjtrGGNI_U5il4rQYme4iGkLSg5Y1nDWjOtfU,254
48
+ letta/functions/schema_generator.py,sha256=kq5m4utb-uGYmHcu3io3X5MZ_czlOPng9USPUL83VA4,28177
49
+ letta/functions/types.py,sha256=mrKmfWaS1mx1m6e9E6ZahhKW7-IedPCf0AjvUZlKhhE,718
50
50
  letta/groups/dynamic_multi_agent.py,sha256=OLCxhICFLYyx8wjKGPr1INc6pniEuk4YGZyZhq2vkiY,12230
51
51
  letta/groups/helpers.py,sha256=VABgj684cInv5TXUZ31baTX4nXLghNe0DCnPiEBdjHw,4435
52
52
  letta/groups/round_robin_multi_agent.py,sha256=uUJff0bO68udOREiKFWeS7eEQlk3bF7hcfLSFXMScqI,6999
53
53
  letta/groups/sleeptime_multi_agent.py,sha256=Wo9uc-ZD99SlW3bcP91EwXc3kIX5t8sCcY1JMJjuiRY,10462
54
- letta/groups/sleeptime_multi_agent_v2.py,sha256=heMOb-vo-cNRLwSciDoIYdJFD6My5y4r3gNFeMX3l_4,13379
54
+ letta/groups/sleeptime_multi_agent_v2.py,sha256=Vgn_Mhz5pGHX4ICLv-vS_j5Fd1Kb7W-1U-YayGAJlVE,13531
55
55
  letta/groups/supervisor_multi_agent.py,sha256=ml8Gi9gyVjPuVZjAJAkpGZDjnM7GOS50NkKf5SIutvQ,4455
56
56
  letta/helpers/__init__.py,sha256=p0luQ1Oe3Skc6sH4O58aHHA3Qbkyjifpuq0DZ1GAY0U,59
57
57
  letta/helpers/composio_helpers.py,sha256=MwfmLt7tgjvxAXLHpx9pa5QolxcqoCbofb-30-DVpsI,1714
@@ -171,7 +171,7 @@ letta/orm/provider_trace.py,sha256=CJMGz-rLqagJ-yXh9SJRbiGr5nAYdxY524hmiTgDFx4,1
171
171
  letta/orm/sandbox_config.py,sha256=zOCvORexDBt16mc6A3U65EI6_2Xe3Roh7k2asLeFMps,4242
172
172
  letta/orm/source.py,sha256=rtehzez80rRrJigXeRBgTlfTZEUy6cVqDizWEN2tvuY,2224
173
173
  letta/orm/sources_agents.py,sha256=Ik_PokCBrXRd9wXWomeNeb8EtLUwjb9VMZ8LWXqpK5A,473
174
- letta/orm/sqlalchemy_base.py,sha256=W9JkvSCjWEMscAlfPKC1GHKM6PoVFTDHxjrGBq6MVgQ,44042
174
+ letta/orm/sqlalchemy_base.py,sha256=L6dX5_InnYWB9HRhYAY2j_yA9Oye5uxmmSIq9PCuEDo,44399
175
175
  letta/orm/sqlite_functions.py,sha256=JCScKiRlYCKxy9hChQ8wsk4GMKknZE24MunnG3fM1Gw,4255
176
176
  letta/orm/step.py,sha256=SLsLY1g4nuUeI47q9rlXPBCSVUNX3lxYAYAIqxy-YK4,3517
177
177
  letta/orm/tool.py,sha256=oTDbvSNNW_jHjYbJqqsLLuXf9uFRTZTZh33TXAcZ898,2839
@@ -227,7 +227,7 @@ letta/prompts/system/voice_chat.txt,sha256=Q_vd2Q08z6qTIVeMML0z9706NG8aAq-scxvi-
227
227
  letta/prompts/system/voice_sleeptime.txt,sha256=LPh-XjAthvsdEkXoZ4NTzTUuMbMsMkoDl9ofCUJC7Us,3696
228
228
  letta/prompts/system/workflow.txt,sha256=pLOaUDsNFAzLs4xb9JgGtd1w-lrq0Q1E7SpFBttXYCI,834
229
229
  letta/pytest.ini,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
230
- letta/schemas/agent.py,sha256=6BNV57XhMMDDQ8YZrFEurNw1L_srx0Cqen7xJyqfNKU,23908
230
+ letta/schemas/agent.py,sha256=AMbKzxCVvGH488jOLy4X8XoUDrUIcWACu8054-djH6A,23874
231
231
  letta/schemas/block.py,sha256=awxCQKxmv4I4k9Au5h-a2RCeSVF54EfWyBQPtHRwuNQ,5585
232
232
  letta/schemas/embedding_config.py,sha256=By79UpBnjh6lg9q6c12th6EfzLSSUVgRbwofnhoI4hM,3762
233
233
  letta/schemas/embedding_config_overrides.py,sha256=lkTa4y-EQ2RnaEKtKDM0sEAk7EwNa67REw8DGNNtGQY,84
@@ -310,7 +310,7 @@ letta/server/rest_api/routers/v1/providers.py,sha256=8SJ_RsSk7L4nh1f_uFE31JOxefm
310
310
  letta/server/rest_api/routers/v1/runs.py,sha256=vieUp7uTvRTdAte0Nw1bqX2APMATZhKTr2R1HVNJT74,8879
311
311
  letta/server/rest_api/routers/v1/sandbox_configs.py,sha256=pKuy88GD3atrBkKa7VVfKTjg8Y07e1vVtdw4TtxkQBk,8910
312
312
  letta/server/rest_api/routers/v1/sources.py,sha256=-_jWL2jDqOt2r0IUUxjHjxf4TZhAJDdt7_J5f_kGF_U,17094
313
- letta/server/rest_api/routers/v1/steps.py,sha256=uNEQVEeRU9RwYuD2Dz1PzZy04nyvMSTY0Tx0WrMEgVs,4362
313
+ letta/server/rest_api/routers/v1/steps.py,sha256=7WptAAnx0FmTQlkRbdGqt01bH0KrwSbCBajOI_RTfOk,4554
314
314
  letta/server/rest_api/routers/v1/tags.py,sha256=ef94QitUSJ3NQVffWF1ZqANUZ2b2jRyGHp_I3UUjhno,912
315
315
  letta/server/rest_api/routers/v1/telemetry.py,sha256=z53BW3Pefi3eWy47FPJyGhFWbZicX9jPJUi5LC5c3sk,790
316
316
  letta/server/rest_api/routers/v1/tools.py,sha256=JYHy522vclWJXouzLpLQJGEXLgTtoWk4w6LQgiUdtMQ,27672
@@ -319,7 +319,7 @@ letta/server/rest_api/routers/v1/voice.py,sha256=ghMBp5Uovbf0-3nN6d9P5kpl1hHACLR
319
319
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
320
320
  letta/server/rest_api/streaming_response.py,sha256=yYTuZHfuZ-DYYbA1Ta6axkBn5MvC6OHuVRHSiBqRNUk,3939
321
321
  letta/server/rest_api/utils.py,sha256=6Ar4r3eohMRr5_p4e07x54ILZC5X4A9XZnupzQRasQA,17808
322
- letta/server/server.py,sha256=JtbXHs4FOcqewxPn3Ol-1XnH4bzcnIdnJAlGQPudClQ,113661
322
+ letta/server/server.py,sha256=kakYFBV22esqMWRgEHJ69wOLYFRz7kpHPvw8qkwMGiQ,113681
323
323
  letta/server/startup.sh,sha256=MRXh1RKbS5lyA7XAsk7O6Q4LEKOqnv5B-dwe0SnTHeQ,2514
324
324
  letta/server/static_files/assets/index-048c9598.js,sha256=mR16XppvselwKCcNgONs4L7kZEVa4OEERm4lNZYtLSk,146819
325
325
  letta/server/static_files/assets/index-0e31b727.css,sha256=SBbja96uiQVLDhDOroHgM6NSl7tS4lpJRCREgSS_hA8,7672
@@ -333,7 +333,7 @@ letta/server/ws_api/interface.py,sha256=TWl9vkcMCnLsUtgsuENZ-ku2oMDA-OUTzLh_yNRo
333
333
  letta/server/ws_api/protocol.py,sha256=5mDgpfNZn_kNwHnpt5Dsuw8gdNH298sgxTGed3etzYg,1836
334
334
  letta/server/ws_api/server.py,sha256=cBSzf-V4zT1bL_0i54OTI3cMXhTIIxqjSRF8pYjk7fg,5835
335
335
  letta/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
336
- letta/services/agent_manager.py,sha256=w636-jxEp1YglX3z9fSWotmlrvFJcHBBOV2gzNMB1c8,120893
336
+ letta/services/agent_manager.py,sha256=BAj7wu74BXp6U7keQ2YPmIgavCVN9MCYcf_9J0Fscl8,121041
337
337
  letta/services/block_manager.py,sha256=YwDGdy6f6MNXVXVOxIMOOP6IEWT8h-k5uQlveof0pyE,22744
338
338
  letta/services/context_window_calculator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
339
339
  letta/services/context_window_calculator/context_window_calculator.py,sha256=H0-Ello1DHV28MnzMseWrg--jarDc6YwCcgwPlWjtZk,6527
@@ -341,7 +341,7 @@ letta/services/context_window_calculator/token_counter.py,sha256=Ai9-aPkNvhhMTj9
341
341
  letta/services/file_manager.py,sha256=qSdkVvouQbpZrCjh90XikUm4jlndm7Fxz4w2sMxt17E,8554
342
342
  letta/services/file_processor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
343
343
  letta/services/file_processor/chunker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
344
- letta/services/file_processor/chunker/line_chunker.py,sha256=Lm5iZR7tFyOlew8XrZpowx-0wbIlluXQXZ6R-VcDY8U,5576
344
+ letta/services/file_processor/chunker/line_chunker.py,sha256=6VGwthka1Xg9z2r84YbaSG6G_UmrQnbe7aELgkmp2d4,5752
345
345
  letta/services/file_processor/chunker/llama_index_chunker.py,sha256=dEBf33TifD_BcxjNAULim9NDF8VPG8EQkjeR9saK4t4,3982
346
346
  letta/services/file_processor/embedder/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
347
347
  letta/services/file_processor/embedder/openai_embedder.py,sha256=BjKsNqh_nfNIDVWkCR2noFX7E6Mr68FQtj79F2xeCpM,3545
@@ -351,9 +351,9 @@ letta/services/file_processor/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeu
351
351
  letta/services/file_processor/parser/base_parser.py,sha256=WfnXP6fL-xQz4eIHEWa6-ZNEAARbF_alowqH4BAUzJo,238
352
352
  letta/services/file_processor/parser/mistral_parser.py,sha256=Hzsrm36HbKQ7CWljTZT1RgbvxE4gvSBq76Ucj80jjeQ,2322
353
353
  letta/services/file_processor/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
354
- letta/services/files_agents_manager.py,sha256=xBzXPrpbUF-HMMcK0js70QOgLv28AeJ21U0wC2Y9CBs,16592
354
+ letta/services/files_agents_manager.py,sha256=OnwWz772QjCdlvNf-Di3KBeUEV7XrvSK0grGMvglv1o,18073
355
355
  letta/services/group_manager.py,sha256=X2gKKUGKTXGRMC8YjwmE6EOB1cVM4lo31eCnmog7dPQ,23368
356
- letta/services/helpers/agent_manager_helper.py,sha256=2zPnxs1QFo_uGv1fG-pqwTQ-oFfgcqNjk9deLVNgby4,43134
356
+ letta/services/helpers/agent_manager_helper.py,sha256=drZiCO_GekinY7F5lYpa_Ykckb63np703InONJIgnys,43502
357
357
  letta/services/helpers/tool_execution_helper.py,sha256=45L7woJ98jK5MQAnhE_4NZdCeyOOzC4328FTQPM7iTA,9159
358
358
  letta/services/helpers/tool_parser_helper.py,sha256=EI5tcre-D5U3mEzIMhfkAGlUwYckW1JlCJ-iqwoTTrc,4336
359
359
  letta/services/identity_manager.py,sha256=L8EYGYXA9sveLwPCTYZIdYZwOMnHex47TBiMYcco_y4,10575
@@ -373,7 +373,7 @@ letta/services/per_agent_lock_manager.py,sha256=cMaW8r-qhucQbiK27jVqz8wzhlr2yuRN
373
373
  letta/services/provider_manager.py,sha256=mEtiBF7kJgSzDwwyqSmWLT6kgvWPk-FERZ9Zw8QKpHw,9557
374
374
  letta/services/sandbox_config_manager.py,sha256=fcJkXCaA6vmrnTusHhns-c_aRXcPlFLICPGdWDaY8XQ,26138
375
375
  letta/services/source_manager.py,sha256=bfkfubjvlvnpkd-W6FcoQ0qDYxWkajut0ZY1cI56bbE,6197
376
- letta/services/step_manager.py,sha256=BqCz9guHYJqx9bu0wn2YCqcIHv80upT0eddyxiP0yvo,11452
376
+ letta/services/step_manager.py,sha256=7eSmV6czRhxZc_Fd2tIXetffRsQbh0Qf5x3tLSe9pRM,10568
377
377
  letta/services/summarizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
378
378
  letta/services/summarizer/enums.py,sha256=szzPX2OBRRJEZsBTGYQThrNz02ELFqhuLwvOR7ozi7A,208
379
379
  letta/services/summarizer/summarizer.py,sha256=9UHxZ1YPIIUWMK5_kz4_Kf5Qj9eHOo2zeEcmbzfUi_o,9275
@@ -382,7 +382,7 @@ letta/services/tool_executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NM
382
382
  letta/services/tool_executor/builtin_tool_executor.py,sha256=bOrw0qzBf1c9e-NkeR1QAPFV8BK-FrBqIRqpPL7jJeM,17460
383
383
  letta/services/tool_executor/composio_tool_executor.py,sha256=ia2AA_WDOseR8Ylam-HEayR7OiyfNSb1sSUrjwqlmFM,2308
384
384
  letta/services/tool_executor/core_tool_executor.py,sha256=YfUTxo78FNFPFNc6714RR_ztrf9I1GATs4cZhSbiEag,20503
385
- letta/services/tool_executor/files_tool_executor.py,sha256=lBjZwjJrDu2JTzRih4tywLwxyxMRc-zipr2I47c_KHk,21133
385
+ letta/services/tool_executor/files_tool_executor.py,sha256=xrl71vmIFKC2pFDKUTjWPSpPSBm8WUcqH52HnPu09zA,24154
386
386
  letta/services/tool_executor/mcp_tool_executor.py,sha256=x8V8J4Xi1ZVbwfaR_IwnUGRrD9w5wgV4G54sjraVBw4,1676
387
387
  letta/services/tool_executor/multi_agent_tool_executor.py,sha256=uBlUqDtS5JYb-bPDiucZBzwtSuoV3NIhhsmzniG5fDA,4464
388
388
  letta/services/tool_executor/tool_execution_manager.py,sha256=flCbTmtxZvYNcTGNC2MrYWkdPIatqFPTWnI8oJUWTIY,6399
@@ -404,9 +404,9 @@ letta/templates/sandbox_code_file.py.j2,sha256=zgzaboDZVtM15XkxILnhiKisF7DSUoI2Y
404
404
  letta/templates/sandbox_code_file_async.py.j2,sha256=hL6UWt4L16o79OPOBq1_Cw7gR5-gpaR_esbmU8bSp8w,1805
405
405
  letta/templates/template_helper.py,sha256=uHWO1PukgMoIIvgqQdPyHq3o3CQ6mcjUjTGvx9VLGkk,409
406
406
  letta/types/__init__.py,sha256=hokKjCVFGEfR7SLMrtZsRsBfsC7yTIbgKPLdGg4K1eY,147
407
- letta/utils.py,sha256=ZfyAcRBITlYs2XM5fHj_Lp08fPFMBMaQSPbfYlETbDs,33198
408
- letta_nightly-0.8.6.dev20250627220317.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
409
- letta_nightly-0.8.6.dev20250627220317.dist-info/METADATA,sha256=Ufu1w-A46HxtEzzu_gAqGO1oF9AZFANSvsKf_dkWOOA,22841
410
- letta_nightly-0.8.6.dev20250627220317.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
411
- letta_nightly-0.8.6.dev20250627220317.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
412
- letta_nightly-0.8.6.dev20250627220317.dist-info/RECORD,,
407
+ letta/utils.py,sha256=WkPJD9cs00CKgu5ezcTz5vSP76npyuBOpK7paQDQtxk,33224
408
+ letta_nightly-0.8.7.dev20250627220731.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
409
+ letta_nightly-0.8.7.dev20250627220731.dist-info/METADATA,sha256=oFq4yuyZJbmGhDMzJ3VZj8IXdRgr0GtmLt7Tt4tdkOw,22841
410
+ letta_nightly-0.8.7.dev20250627220731.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
411
+ letta_nightly-0.8.7.dev20250627220731.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
412
+ letta_nightly-0.8.7.dev20250627220731.dist-info/RECORD,,