letta-nightly 0.7.5.dev20250426104040__py3-none-any.whl → 0.7.5.dev20250428110034__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -57,6 +57,7 @@ class LettaAgent(BaseAgent):
57
57
  self.block_manager = block_manager
58
58
  self.passage_manager = passage_manager
59
59
  self.use_assistant_message = use_assistant_message
60
+ self.response_messages: List[Message] = []
60
61
 
61
62
  @trace_method
62
63
  async def step(self, input_messages: List[MessageCreate], max_steps: int = 10) -> LettaResponse:
@@ -81,6 +82,7 @@ class LettaAgent(BaseAgent):
81
82
 
82
83
  tool_call = response.choices[0].message.tool_calls[0]
83
84
  persisted_messages, should_continue = await self._handle_ai_response(tool_call, agent_state, tool_rules_solver)
85
+ self.response_messages.extend(persisted_messages)
84
86
  new_in_context_messages.extend(persisted_messages)
85
87
 
86
88
  if not should_continue:
@@ -139,6 +141,7 @@ class LettaAgent(BaseAgent):
139
141
  pre_computed_assistant_message_id=interface.letta_assistant_message_id,
140
142
  pre_computed_tool_message_id=interface.letta_tool_message_id,
141
143
  )
144
+ self.response_messages.extend(persisted_messages)
142
145
  new_in_context_messages.extend(persisted_messages)
143
146
 
144
147
  if not should_continue:
@@ -167,7 +170,14 @@ class LettaAgent(BaseAgent):
167
170
  tools = [
168
171
  t
169
172
  for t in agent_state.tools
170
- if t.tool_type in {ToolType.CUSTOM, ToolType.LETTA_CORE, ToolType.LETTA_MEMORY_CORE}
173
+ if t.tool_type
174
+ in {
175
+ ToolType.CUSTOM,
176
+ ToolType.LETTA_CORE,
177
+ ToolType.LETTA_MEMORY_CORE,
178
+ ToolType.LETTA_MULTI_AGENT_CORE,
179
+ ToolType.LETTA_SLEEPTIME_CORE,
180
+ }
171
181
  or (t.tool_type == ToolType.LETTA_MULTI_AGENT_CORE and t.name == "send_message_to_agents_matching_tags")
172
182
  ]
173
183
 
letta/groups/helpers.py CHANGED
@@ -88,11 +88,14 @@ def load_multi_agent(
88
88
  def stringify_message(message: Message, use_assistant_name: bool = False) -> str | None:
89
89
  assistant_name = message.name or "assistant" if use_assistant_name else "assistant"
90
90
  if message.role == "user":
91
- content = json.loads(message.content[0].text)
92
- if content["type"] == "user_message":
93
- return f"{message.name or 'user'}: {content['message']}"
94
- else:
95
- return None
91
+ try:
92
+ content = json.loads(message.content[0].text)
93
+ if content["type"] == "user_message":
94
+ return f"{message.name or 'user'}: {content['message']}"
95
+ else:
96
+ return None
97
+ except:
98
+ return f"{message.name or 'user'}: {message.content[0].text}"
96
99
  elif message.role == "assistant":
97
100
  messages = []
98
101
  if message.tool_calls:
@@ -107,6 +107,9 @@ class SleeptimeMultiAgent(Agent):
107
107
  run_id: str,
108
108
  ) -> LettaUsageStatistics:
109
109
  try:
110
+ job_update = JobUpdate(status=JobStatus.running)
111
+ self.job_manager.update_job_by_id(job_id=run_id, job_update=job_update, actor=self.user)
112
+
110
113
  participant_agent_state = self.agent_manager.get_agent_by_id(participant_agent_id, actor=self.user)
111
114
  participant_agent = Agent(
112
115
  agent_state=participant_agent_state,
@@ -0,0 +1,249 @@
1
+ import asyncio
2
+ from datetime import datetime, timezone
3
+ from typing import AsyncGenerator, List, Optional
4
+
5
+ from letta.agents.base_agent import BaseAgent
6
+ from letta.agents.letta_agent import LettaAgent
7
+ from letta.groups.helpers import stringify_message
8
+ from letta.schemas.enums import JobStatus
9
+ from letta.schemas.group import Group, ManagerType
10
+ from letta.schemas.job import JobUpdate
11
+ from letta.schemas.letta_message_content import TextContent
12
+ from letta.schemas.letta_response import LettaResponse
13
+ from letta.schemas.message import Message, MessageCreate
14
+ from letta.schemas.run import Run
15
+ from letta.schemas.user import User
16
+ from letta.services.agent_manager import AgentManager
17
+ from letta.services.block_manager import BlockManager
18
+ from letta.services.group_manager import GroupManager
19
+ from letta.services.job_manager import JobManager
20
+ from letta.services.message_manager import MessageManager
21
+ from letta.services.passage_manager import PassageManager
22
+
23
+
24
+ class SleeptimeMultiAgentV2(BaseAgent):
25
+ def __init__(
26
+ self,
27
+ agent_id: str,
28
+ message_manager: MessageManager,
29
+ agent_manager: AgentManager,
30
+ block_manager: BlockManager,
31
+ passage_manager: PassageManager,
32
+ group_manager: GroupManager,
33
+ job_manager: JobManager,
34
+ actor: User,
35
+ group: Optional[Group] = None,
36
+ ):
37
+ super().__init__(
38
+ agent_id=agent_id,
39
+ openai_client=None,
40
+ message_manager=message_manager,
41
+ agent_manager=agent_manager,
42
+ actor=actor,
43
+ )
44
+ self.block_manager = block_manager
45
+ self.passage_manager = passage_manager
46
+ self.group_manager = group_manager
47
+ self.job_manager = job_manager
48
+ # Group settings
49
+ assert group.manager_type == ManagerType.sleeptime, f"Expected group manager type to be 'sleeptime', got {group.manager_type}"
50
+ self.group = group
51
+
52
+ async def step(self, input_messages: List[MessageCreate], max_steps: int = 10) -> LettaResponse:
53
+ run_ids = []
54
+
55
+ # Prepare new messages
56
+ new_messages = []
57
+ for message in input_messages:
58
+ if isinstance(message.content, str):
59
+ message.content = [TextContent(text=message.content)]
60
+ message.group_id = self.group.id
61
+ new_messages.append(message)
62
+
63
+ # Load foreground agent
64
+ foreground_agent = LettaAgent(
65
+ agent_id=self.agent_id,
66
+ message_manager=self.message_manager,
67
+ agent_manager=self.agent_manager,
68
+ block_manager=self.block_manager,
69
+ passage_manager=self.passage_manager,
70
+ actor=self.actor,
71
+ )
72
+ # Perform foreground agent step
73
+ response = await foreground_agent.step(input_messages=new_messages, max_steps=max_steps)
74
+
75
+ # Get last response messages
76
+ last_response_messages = foreground_agent.response_messages
77
+
78
+ # Update turns counter
79
+ if self.group.sleeptime_agent_frequency is not None and self.group.sleeptime_agent_frequency > 0:
80
+ turns_counter = self.group_manager.bump_turns_counter(group_id=self.group.id, actor=self.actor)
81
+
82
+ # Perform participant steps
83
+ if self.group.sleeptime_agent_frequency is None or (
84
+ turns_counter is not None and turns_counter % self.group.sleeptime_agent_frequency == 0
85
+ ):
86
+ last_processed_message_id = self.group_manager.get_last_processed_message_id_and_update(
87
+ group_id=self.group.id, last_processed_message_id=last_response_messages[-1].id, actor=self.actor
88
+ )
89
+ for participant_agent_id in self.group.agent_ids:
90
+ try:
91
+ run_id = await self._issue_background_task(
92
+ participant_agent_id,
93
+ last_response_messages,
94
+ last_processed_message_id,
95
+ )
96
+ run_ids.append(run_id)
97
+
98
+ except Exception as e:
99
+ # Individual task failures
100
+ print(f"Agent processing failed: {str(e)}")
101
+ raise e
102
+
103
+ response.usage.run_ids = run_ids
104
+ return response
105
+
106
+ async def step_stream(self, input_messages: List[MessageCreate], max_steps: int = 10) -> AsyncGenerator[str, None]:
107
+ # Prepare new messages
108
+ new_messages = []
109
+ for message in input_messages:
110
+ if isinstance(message.content, str):
111
+ message.content = [TextContent(text=message.content)]
112
+ message.group_id = self.group.id
113
+ new_messages.append(message)
114
+
115
+ # Load foreground agent
116
+ foreground_agent = LettaAgent(
117
+ agent_id=self.agent_id,
118
+ message_manager=self.message_manager,
119
+ agent_manager=self.agent_manager,
120
+ block_manager=self.block_manager,
121
+ passage_manager=self.passage_manager,
122
+ actor=self.actor,
123
+ )
124
+ # Perform foreground agent step
125
+ async for chunk in foreground_agent.step_stream(input_messages=new_messages, max_steps=max_steps):
126
+ yield chunk
127
+
128
+ # Get response messages
129
+ last_response_messages = foreground_agent.response_messages
130
+
131
+ # Update turns counter
132
+ if self.group.sleeptime_agent_frequency is not None and self.group.sleeptime_agent_frequency > 0:
133
+ turns_counter = self.group_manager.bump_turns_counter(group_id=self.group.id, actor=self.actor)
134
+
135
+ # Perform participant steps
136
+ if self.group.sleeptime_agent_frequency is None or (
137
+ turns_counter is not None and turns_counter % self.group.sleeptime_agent_frequency == 0
138
+ ):
139
+ last_processed_message_id = self.group_manager.get_last_processed_message_id_and_update(
140
+ group_id=self.group.id, last_processed_message_id=last_response_messages[-1].id, actor=self.actor
141
+ )
142
+ for sleeptime_agent_id in self.group.agent_ids:
143
+ self._issue_background_task(
144
+ sleeptime_agent_id,
145
+ last_response_messages,
146
+ last_processed_message_id,
147
+ )
148
+
149
+ async def _issue_background_task(
150
+ self,
151
+ sleeptime_agent_id: str,
152
+ response_messages: List[Message],
153
+ last_processed_message_id: str,
154
+ ) -> str:
155
+ run = Run(
156
+ user_id=self.actor.id,
157
+ status=JobStatus.created,
158
+ metadata={
159
+ "job_type": "sleeptime_agent_send_message_async", # is this right?
160
+ "agent_id": sleeptime_agent_id,
161
+ },
162
+ )
163
+ run = self.job_manager.create_job(pydantic_job=run, actor=self.actor)
164
+
165
+ asyncio.create_task(
166
+ self._participant_agent_step(
167
+ foreground_agent_id=self.agent_id,
168
+ sleeptime_agent_id=sleeptime_agent_id,
169
+ response_messages=response_messages,
170
+ last_processed_message_id=last_processed_message_id,
171
+ run_id=run.id,
172
+ )
173
+ )
174
+ return run.id
175
+
176
+ async def _participant_agent_step(
177
+ self,
178
+ foreground_agent_id: str,
179
+ sleeptime_agent_id: str,
180
+ response_messages: List[Message],
181
+ last_processed_message_id: str,
182
+ run_id: str,
183
+ ) -> str:
184
+ try:
185
+ # Update job status
186
+ job_update = JobUpdate(status=JobStatus.running)
187
+ self.job_manager.update_job_by_id(job_id=run_id, job_update=job_update, actor=self.actor)
188
+
189
+ # Create conversation transcript
190
+ prior_messages = []
191
+ if self.group.sleeptime_agent_frequency:
192
+ try:
193
+ prior_messages = self.message_manager.list_messages_for_agent(
194
+ agent_id=foreground_agent_id,
195
+ actor=self.actor,
196
+ after=last_processed_message_id,
197
+ before=response_messages[0].id,
198
+ )
199
+ except Exception:
200
+ pass # continue with just latest messages
201
+
202
+ transcript_summary = [stringify_message(message) for message in prior_messages + response_messages]
203
+ transcript_summary = [summary for summary in transcript_summary if summary is not None]
204
+ message_text = "\n".join(transcript_summary)
205
+
206
+ sleeptime_agent_messages = [
207
+ MessageCreate(
208
+ role="user",
209
+ content=[TextContent(text=message_text)],
210
+ id=Message.generate_id(),
211
+ agent_id=sleeptime_agent_id,
212
+ group_id=self.group.id,
213
+ )
214
+ ]
215
+
216
+ # Load sleeptime agent
217
+ sleeptime_agent = LettaAgent(
218
+ agent_id=sleeptime_agent_id,
219
+ message_manager=self.message_manager,
220
+ agent_manager=self.agent_manager,
221
+ block_manager=self.block_manager,
222
+ passage_manager=self.passage_manager,
223
+ actor=self.actor,
224
+ )
225
+
226
+ # Perform sleeptime agent step
227
+ result = await sleeptime_agent.step(
228
+ input_messages=sleeptime_agent_messages,
229
+ )
230
+
231
+ # Update job status
232
+ job_update = JobUpdate(
233
+ status=JobStatus.completed,
234
+ completed_at=datetime.now(timezone.utc),
235
+ metadata={
236
+ "result": result.model_dump(mode="json"),
237
+ "agent_id": sleeptime_agent_id,
238
+ },
239
+ )
240
+ self.job_manager.update_job_by_id(job_id=run_id, job_update=job_update, actor=self.actor)
241
+ return result
242
+ except Exception as e:
243
+ job_update = JobUpdate(
244
+ status=JobStatus.failed,
245
+ completed_at=datetime.now(timezone.utc),
246
+ metadata={"error": str(e)},
247
+ )
248
+ self.job_manager.update_job_by_id(job_id=run_id, job_update=job_update, actor=self.actor)
249
+ raise
@@ -1,5 +1,6 @@
1
1
  from letta.server.rest_api.routers.v1.agents import router as agents_router
2
2
  from letta.server.rest_api.routers.v1.blocks import router as blocks_router
3
+ from letta.server.rest_api.routers.v1.embeddings import router as embeddings_router
3
4
  from letta.server.rest_api.routers.v1.groups import router as groups_router
4
5
  from letta.server.rest_api.routers.v1.health import router as health_router
5
6
  from letta.server.rest_api.routers.v1.identities import router as identities_router
@@ -32,4 +33,5 @@ ROUTERS = [
32
33
  tags_router,
33
34
  messages_router,
34
35
  voice_router,
36
+ embeddings_router,
35
37
  ]
@@ -0,0 +1,20 @@
1
+ from typing import Optional
2
+
3
+ from fastapi import APIRouter, Depends, Header
4
+
5
+ from letta.server.rest_api.utils import get_letta_server
6
+ from letta.server.server import SyncServer
7
+
8
+ router = APIRouter(prefix="/embeddings", tags=["embeddings"])
9
+
10
+
11
+ @router.get("/total_storage_size", response_model=float, operation_id="get_total_storage_size")
12
+ def get_embeddings_storage_size(
13
+ server: SyncServer = Depends(get_letta_server),
14
+ actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
15
+ ):
16
+ """
17
+ Get the total size of all embeddings in the database for a user in GB.
18
+ """
19
+ actor = server.user_manager.get_user_or_default(user_id=actor_id)
20
+ return server.passage_manager.estimate_embeddings_size_GB(actor=actor)
@@ -13,7 +13,7 @@ router = APIRouter(prefix="/models", tags=["models", "llms"])
13
13
 
14
14
 
15
15
  @router.get("/", response_model=List[LLMConfig], operation_id="list_models")
16
- def list_llm_backends(
16
+ def list_llm_models(
17
17
  server: "SyncServer" = Depends(get_letta_server),
18
18
  ):
19
19
 
@@ -23,7 +23,7 @@ def list_llm_backends(
23
23
 
24
24
 
25
25
  @router.get("/embedding", response_model=List[EmbeddingConfig], operation_id="list_embedding_models")
26
- def list_embedding_backends(
26
+ def list_embedding_models(
27
27
  server: "SyncServer" = Depends(get_letta_server),
28
28
  ):
29
29
 
@@ -20,6 +20,17 @@ from letta.utils import sanitize_filename
20
20
  router = APIRouter(prefix="/sources", tags=["sources"])
21
21
 
22
22
 
23
+ @router.get("/count", response_model=int, operation_id="count_sources")
24
+ def count_sources(
25
+ server: "SyncServer" = Depends(get_letta_server),
26
+ actor_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
27
+ ):
28
+ """
29
+ Count all data sources created by a user.
30
+ """
31
+ return server.source_manager.size(actor=server.user_manager.get_user_or_default(user_id=actor_id))
32
+
33
+
23
34
  @router.get("/{source_id}", response_model=Source, operation_id="retrieve_source")
24
35
  def retrieve_source(
25
36
  source_id: str,
@@ -9,7 +9,7 @@ from composio.exceptions import (
9
9
  EnumMetadataNotFound,
10
10
  EnumStringNotFound,
11
11
  )
12
- from fastapi import APIRouter, Body, Depends, Header, HTTPException
12
+ from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query
13
13
 
14
14
  from letta.errors import LettaToolCreateError
15
15
  from letta.functions.mcp_client.exceptions import MCPTimeoutError
@@ -40,6 +40,24 @@ def delete_tool(
40
40
  server.tool_manager.delete_tool_by_id(tool_id=tool_id, actor=actor)
41
41
 
42
42
 
43
+ @router.get("/count", response_model=int, operation_id="count_tools")
44
+ def count_tools(
45
+ server: SyncServer = Depends(get_letta_server),
46
+ actor_id: Optional[str] = Header(None, alias="user_id"),
47
+ include_base_tools: Optional[bool] = Query(False, description="Include built-in Letta tools in the count"),
48
+ ):
49
+ """
50
+ Get a count of all tools available to agents belonging to the org of the user.
51
+ """
52
+ try:
53
+ return server.tool_manager.size(
54
+ actor=server.user_manager.get_user_or_default(user_id=actor_id), include_base_tools=include_base_tools
55
+ )
56
+ except Exception as e:
57
+ print(f"Error occurred: {e}")
58
+ raise HTTPException(status_code=500, detail=str(e))
59
+
60
+
43
61
  @router.get("/{tool_id}", response_model=Tool, operation_id="retrieve_tool")
44
62
  def retrieve_tool(
45
63
  tool_id: str,
@@ -3,6 +3,7 @@ from typing import List, Optional
3
3
 
4
4
  from openai import OpenAI
5
5
 
6
+ from letta.constants import MAX_EMBEDDING_DIM
6
7
  from letta.embeddings import embedding_model, parse_and_chunk_text
7
8
  from letta.orm.errors import NoResultFound
8
9
  from letta.orm.passage import AgentPassage, SourcePassage
@@ -218,3 +219,16 @@ class PassageManager:
218
219
  """
219
220
  with self.session_maker() as session:
220
221
  return AgentPassage.size(db_session=session, actor=actor, agent_id=agent_id)
222
+
223
+ def estimate_embeddings_size_GB(
224
+ self,
225
+ actor: PydanticUser,
226
+ agent_id: Optional[str] = None,
227
+ ) -> float:
228
+ """
229
+ Estimate the size of the embeddings in GB.
230
+ """
231
+ BYTES_PER_EMBEDDING_DIM = 4
232
+ BYTES_PER_GB = 1024 * 1024 * 1024
233
+ GB_PER_EMBEDDING = BYTES_PER_EMBEDDING_DIM / BYTES_PER_GB * MAX_EMBEDDING_DIM
234
+ return self.size(actor=actor, agent_id=agent_id) * GB_PER_EMBEDDING
@@ -192,7 +192,7 @@ class LettaCoreToolExecutor(ToolExecutor):
192
192
  AgentManager().rebuild_system_prompt(agent_id=agent_state.id, actor=actor, force=True)
193
193
  return None
194
194
 
195
- def core_memory_append(self, agent_state: "AgentState", actor: User, label: str, content: str) -> Optional[str]:
195
+ def core_memory_append(self, agent_state: AgentState, actor: User, label: str, content: str) -> Optional[str]:
196
196
  """
197
197
  Append to the contents of core memory.
198
198
 
@@ -211,7 +211,7 @@ class LettaCoreToolExecutor(ToolExecutor):
211
211
 
212
212
  def core_memory_replace(
213
213
  self,
214
- agent_state: "AgentState",
214
+ agent_state: AgentState,
215
215
  actor: User,
216
216
  label: str,
217
217
  old_content: str,
@@ -237,7 +237,8 @@ class LettaCoreToolExecutor(ToolExecutor):
237
237
  return None
238
238
 
239
239
  def memory_replace(
240
- agent_state: "AgentState",
240
+ self,
241
+ agent_state: AgentState,
241
242
  actor: User,
242
243
  label: str,
243
244
  old_str: str,
@@ -326,7 +327,8 @@ class LettaCoreToolExecutor(ToolExecutor):
326
327
  return success_msg
327
328
 
328
329
  def memory_insert(
329
- agent_state: "AgentState",
330
+ self,
331
+ agent_state: AgentState,
330
332
  actor: User,
331
333
  label: str,
332
334
  new_str: str,
@@ -407,7 +409,7 @@ class LettaCoreToolExecutor(ToolExecutor):
407
409
 
408
410
  return success_msg
409
411
 
410
- def memory_rethink(agent_state: "AgentState", actor: User, label: str, new_memory: str) -> str:
412
+ def memory_rethink(self, agent_state: AgentState, actor: User, label: str, new_memory: str) -> str:
411
413
  """
412
414
  The memory_rethink command allows you to completely rewrite the contents of a
413
415
  memory block. Use this tool to make large sweeping changes (e.g. when you want
@@ -458,7 +460,7 @@ class LettaCoreToolExecutor(ToolExecutor):
458
460
  # return None
459
461
  return success_msg
460
462
 
461
- def memory_finish_edits(agent_state: "AgentState") -> None:
463
+ def memory_finish_edits(self, agent_state: AgentState, actor: User) -> None:
462
464
  """
463
465
  Call the memory_finish_edits command when you are finished making edits
464
466
  (integrating all new information) into the memory blocks. This function
@@ -29,14 +29,6 @@ logger = get_logger(__name__)
29
29
  class ToolManager:
30
30
  """Manager class to handle business logic related to Tools."""
31
31
 
32
- BASE_TOOL_NAMES = [
33
- "send_message",
34
- "conversation_search",
35
- "archival_memory_insert",
36
- "archival_memory_search",
37
- ]
38
- BASE_MEMORY_TOOL_NAMES = ["core_memory_append", "core_memory_replace"]
39
-
40
32
  def __init__(self):
41
33
  # Fetching the db_context similarly as in OrganizationManager
42
34
  from letta.server.db import db_context
@@ -149,12 +141,17 @@ class ToolManager:
149
141
  def size(
150
142
  self,
151
143
  actor: PydanticUser,
144
+ include_base_tools: bool,
152
145
  ) -> int:
153
146
  """
154
147
  Get the total count of tools for the given user.
148
+
149
+ If include_builtin is True, it will also count the built-in tools.
155
150
  """
156
151
  with self.session_maker() as session:
157
- return ToolModel.size(db_session=session, actor=actor)
152
+ if include_base_tools:
153
+ return ToolModel.size(db_session=session, actor=actor)
154
+ return ToolModel.size(db_session=session, actor=actor, name=LETTA_TOOL_SET)
158
155
 
159
156
  @enforce_types
160
157
  def update_tool_by_id(self, tool_id: str, tool_update: ToolUpdate, actor: PydanticUser) -> PydanticTool:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.7.5.dev20250426104040
3
+ Version: 0.7.5.dev20250428110034
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -6,7 +6,7 @@ letta/agents/base_agent.py,sha256=yjB1Yz6L-9hTFinqkvyf6c-8dDX9O4u8VCrKrvA4G3s,23
6
6
  letta/agents/ephemeral_agent.py,sha256=el-SUF_16vv_7OouIR-6z0pAE9Yc0PLibygvfCKwqfo,2736
7
7
  letta/agents/ephemeral_memory_agent.py,sha256=tVRsL18EvruNyrhzPkzLqiptfWa9jAC2GmSjUf5zKQo,4821
8
8
  letta/agents/helpers.py,sha256=sYfjJLCEQg0aaKDRIJ2Xm_fG1yivCB9wU6bSskZMPKg,2568
9
- letta/agents/letta_agent.py,sha256=9kOJW8hU_UhRwaVujYBWxVxsOGbh367xfdymI81RdNI,17816
9
+ letta/agents/letta_agent.py,sha256=Y_hqJjFcFnVU2-kdbMmtKYp4l9_yCXeHCjvo8qqI1gU,18162
10
10
  letta/agents/letta_agent_batch.py,sha256=YsuYKwanRAmSPwmT5NVG3oIyOjlOz08hrQgpgRNhXt0,22899
11
11
  letta/agents/voice_agent.py,sha256=qzDw7MWQYwxsqOT4nIJ9OSl72n7b2Xy3ZUqc2_AEP5M,17334
12
12
  letta/benchmark/benchmark.py,sha256=ebvnwfp3yezaXOQyGXkYCDYpsmre-b9hvNtnyx4xkG0,3701
@@ -40,9 +40,10 @@ letta/functions/mcp_client/stdio_client.py,sha256=2oouLGphu4S15OrYj97n9_ZYZo-GMR
40
40
  letta/functions/mcp_client/types.py,sha256=nmcnQn2EpxXzXg5_pWPsHZobfxO6OucaUgz1bVvam7o,1411
41
41
  letta/functions/schema_generator.py,sha256=OoXDHd5oZMNjljZJplEIMOMmKvxO_rfDFtAi-E9EHMA,22488
42
42
  letta/groups/dynamic_multi_agent.py,sha256=OLCxhICFLYyx8wjKGPr1INc6pniEuk4YGZyZhq2vkiY,12230
43
- letta/groups/helpers.py,sha256=Cof7vfZMOT2gApYpROpiAyKSKrofP1UPJxZQWFcZaqA,4336
43
+ letta/groups/helpers.py,sha256=pfvwIia27wKWBgWjhN12DLkHRfzNw6R9nSBZrrPEmx4,4459
44
44
  letta/groups/round_robin_multi_agent.py,sha256=uUJff0bO68udOREiKFWeS7eEQlk3bF7hcfLSFXMScqI,6999
45
- letta/groups/sleeptime_multi_agent.py,sha256=aGtL7YiCcy1pjEXHNf85to8Wa2AT4m47Su4qXSmP3Pg,10054
45
+ letta/groups/sleeptime_multi_agent.py,sha256=yfi78hnCZ24Alux95dHE6s8JvlLR9LjHqKpRf7Rvpic,10217
46
+ letta/groups/sleeptime_multi_agent_v2.py,sha256=MPOjB8LMAW1R2SAEZC9TIOk12nWqEPzMp_80t8cF1Wg,10064
46
47
  letta/groups/supervisor_multi_agent.py,sha256=ml8Gi9gyVjPuVZjAJAkpGZDjnM7GOS50NkKf5SIutvQ,4455
47
48
  letta/helpers/__init__.py,sha256=p0luQ1Oe3Skc6sH4O58aHHA3Qbkyjifpuq0DZ1GAY0U,59
48
49
  letta/helpers/composio_helpers.py,sha256=5SznD1Y0Y1rV4_wu-uCaZdDU2tNedk-RIX0M9-0r6yo,947
@@ -257,23 +258,24 @@ letta/server/rest_api/optimistic_json_parser.py,sha256=SS60lTp1oH2MXbHOChEnNm46x
257
258
  letta/server/rest_api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
258
259
  letta/server/rest_api/routers/openai/chat_completions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
259
260
  letta/server/rest_api/routers/openai/chat_completions/chat_completions.py,sha256=Ksh_F6hJDOrafY2Gxz7NQdQZXBNeX-3-w4mVEQgMhrQ,5327
260
- letta/server/rest_api/routers/v1/__init__.py,sha256=M-L-Ls4rQaVyQvMjFNEu3NhcRLCvsbOKI-l7F4JxifQ,1557
261
+ letta/server/rest_api/routers/v1/__init__.py,sha256=_skmAcDOK9ovHKfywRaBgigo3IvPmnUSQSR2hGVCOhY,1664
261
262
  letta/server/rest_api/routers/v1/agents.py,sha256=3mbPzEKBCzJmpUBUmJrs1WbmkIGTjEJ32HNUb1Lo5_M,34676
262
263
  letta/server/rest_api/routers/v1/blocks.py,sha256=Sefvon0jLvlNh0oAzntUcDZptnutuJOf-2Wcad_45Dg,4169
264
+ letta/server/rest_api/routers/v1/embeddings.py,sha256=jtfZkffCDnIfPqu9-fVLu7j3rpTOl_IuTIwJ0HQCkMA,786
263
265
  letta/server/rest_api/routers/v1/groups.py,sha256=sLXkw8kgf9fhaQwb-n0SVbyzH6-e1kdzNuqGbdvPPgo,10890
264
266
  letta/server/rest_api/routers/v1/health.py,sha256=MoOjkydhGcJXTiuJrKIB0etVXiRMdTa51S8RQ8-50DQ,399
265
267
  letta/server/rest_api/routers/v1/identities.py,sha256=fvp-0cwvb4iX1fUGPkL--9nq8YD3tIE47kYRxUgOlp4,7462
266
268
  letta/server/rest_api/routers/v1/jobs.py,sha256=4oeJfI2odNGubU_g7WSORJhn_usFsbRaD-qm86rve1E,2746
267
- letta/server/rest_api/routers/v1/llms.py,sha256=lYp5URXtZk1yu_Pe-p1Wq1uQ0qeb6aWtx78rXSB7N_E,881
269
+ letta/server/rest_api/routers/v1/llms.py,sha256=-3VBkh4vUJvrldLy2ptRnexb33psa1zPHaLiQ812xyE,877
268
270
  letta/server/rest_api/routers/v1/messages.py,sha256=3hfaiiCBiWr-wub_uzr3Vyh4IBTZYwGqeDG-2h6Xlus,5753
269
271
  letta/server/rest_api/routers/v1/organizations.py,sha256=r7rj-cA3shgAgM0b2JCMqjYsDIFv3ruZjU7SYbPGGqg,2831
270
272
  letta/server/rest_api/routers/v1/providers.py,sha256=MVfAUvXj_2jx8XFwSigM-8CuCfEATW60h8J5UEmAhp0,3146
271
273
  letta/server/rest_api/routers/v1/runs.py,sha256=9nuJRjBtRgZPq3CiCEUA_3S2xPHFP5DsJxIenH5OO34,8847
272
274
  letta/server/rest_api/routers/v1/sandbox_configs.py,sha256=9hqnnMwJ3wCwO-Bezu3Xl8i3TDSIuInw3gSeHaKUXfE,8526
273
- letta/server/rest_api/routers/v1/sources.py,sha256=U9cf7DlqKAvXgNnmyOvr2XLBHiJbimFCQw__gV__HV8,10709
275
+ letta/server/rest_api/routers/v1/sources.py,sha256=7cCcgZm9AtUFaWlMcyovC-7f_eNhJa4EcDkZuQbNJo8,11148
274
276
  letta/server/rest_api/routers/v1/steps.py,sha256=DVVwaxLNbNAgWpr2oQkrNjdS-wi0bP8kVJZUO-hiaf8,3275
275
277
  letta/server/rest_api/routers/v1/tags.py,sha256=coydgvL6-9cuG2Hy5Ea7QY3inhTHlsf69w0tcZenBus,880
276
- letta/server/rest_api/routers/v1/tools.py,sha256=vDDhG25vQOz3y12XN_dayh4OweW1rjaLHxZJuJgZkQA,18821
278
+ letta/server/rest_api/routers/v1/tools.py,sha256=FXFx8J4Zs-pZ1H8andFzI5Pyv-PJkY8YMlWkZlObGdQ,19544
277
279
  letta/server/rest_api/routers/v1/users.py,sha256=G5DBHSkPfBgVHN2Wkm-rVYiLQAudwQczIq2Z3YLdbVo,2277
278
280
  letta/server/rest_api/routers/v1/voice.py,sha256=0lerWjrKLkt4gXLhZl1cIcgstOz9Q2HZwc67L58BCXE,2451
279
281
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
@@ -307,7 +309,7 @@ letta/services/mcp/stdio_client.py,sha256=wdPzTqSRkibjt9pXhwi0Nul_z_cTAPim-OHjLc
307
309
  letta/services/mcp/types.py,sha256=nmcnQn2EpxXzXg5_pWPsHZobfxO6OucaUgz1bVvam7o,1411
308
310
  letta/services/message_manager.py,sha256=iRFFu7WP9GBtGKrQp5Igiqp_wonSfRKZ_Ran5X6SZZA,17946
309
311
  letta/services/organization_manager.py,sha256=Ax0KmPSc_YYsYaxeld9gc7ST-J6DemHQ542DD7l7AWA,3989
310
- letta/services/passage_manager.py,sha256=KY18gHTbx8ROBsOeR7ZAefTMGZwzbxYqOjbadqVFiyQ,9121
312
+ letta/services/passage_manager.py,sha256=4ALhe4lDdLiGQptBXdvonQ9Sv4SpNSI5TlmQ_1TRfeU,9619
311
313
  letta/services/per_agent_lock_manager.py,sha256=porM0cKKANQ1FvcGXOO_qM7ARk5Fgi1HVEAhXsAg9-4,546
312
314
  letta/services/provider_manager.py,sha256=_gEBW0tYIf2vJEGGYxk-nvogrFI9sjFl_97MSL5WC2s,3759
313
315
  letta/services/sandbox_config_manager.py,sha256=ATgZNWNpkdIQDUPy4ABsguHQba2PZf51-c4Ji60MzLE,13361
@@ -319,8 +321,8 @@ letta/services/summarizer/summarizer.py,sha256=4rbbzcB_lY4-3ybT8HMxM8OskLC38YCs9
319
321
  letta/services/tool_executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
320
322
  letta/services/tool_executor/tool_execution_manager.py,sha256=RcmnwPougb8AxIwKdC4N9ZxTvOQqJyjI6CaVHF2HBi4,4505
321
323
  letta/services/tool_executor/tool_execution_sandbox.py,sha256=hu-SVqfRalJGcXRKTpbYkqgX-DZH3Uky_eD_vh0kx6s,24704
322
- letta/services/tool_executor/tool_executor.py,sha256=keWIzQuwqSzcC6kWcbTY_SfclhKtOT5CZNE-r3OBWNk,27372
323
- letta/services/tool_manager.py,sha256=lFnQkgBYXAXl9q_8_TqJX_UZtLTATXmPliCdm8_7K8M,10561
324
+ letta/services/tool_executor/tool_executor.py,sha256=k2yEzLpU18SFFA6NWrf5NYbCh505ixMpnf6pnBhmV8s,27413
325
+ letta/services/tool_manager.py,sha256=ZtDRk9IODlMSJgwAzI1RQVee0fqgFzNu8iFZ92qwm-c,10569
324
326
  letta/services/tool_sandbox/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
325
327
  letta/services/tool_sandbox/base.py,sha256=pUnPFkEg9I5ktMuT4AOOxbTnTmZTGcTA2phLe1H1EdY,8306
326
328
  letta/services/tool_sandbox/e2b_sandbox.py,sha256=umsXfolzM_j67izswECDdVfnlcm03wLpMoZtS6SZ0sc,6147
@@ -332,8 +334,8 @@ letta/streaming_utils.py,sha256=jLqFTVhUL76FeOuYk8TaRQHmPTf3HSRc2EoJwxJNK6U,1194
332
334
  letta/system.py,sha256=dnOrS2FlRMwijQnOvfrky0Lg8wEw-FUq2zzfAJOUSKA,8477
333
335
  letta/tracing.py,sha256=RstWXpfWVF77nmb_ISORVWd9IQw2Ky3de40k_S70yKI,8258
334
336
  letta/utils.py,sha256=IZFvtj9WYcrxUbkoUUYGDxMYQYdn5SgfqsvnARGsAzc,32245
335
- letta_nightly-0.7.5.dev20250426104040.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
336
- letta_nightly-0.7.5.dev20250426104040.dist-info/METADATA,sha256=LFjhA8EEQOWmWwIneP1zo--Cw9iML8p8MSUAxXUYVjw,22282
337
- letta_nightly-0.7.5.dev20250426104040.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
338
- letta_nightly-0.7.5.dev20250426104040.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
339
- letta_nightly-0.7.5.dev20250426104040.dist-info/RECORD,,
337
+ letta_nightly-0.7.5.dev20250428110034.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
338
+ letta_nightly-0.7.5.dev20250428110034.dist-info/METADATA,sha256=hhRpH0hsiMPeGsUc0483f3XtXyn3aMImulVCkpOdv6A,22282
339
+ letta_nightly-0.7.5.dev20250428110034.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
340
+ letta_nightly-0.7.5.dev20250428110034.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
341
+ letta_nightly-0.7.5.dev20250428110034.dist-info/RECORD,,