letta-nightly 0.6.53.dev20250418104238__py3-none-any.whl → 0.6.54.dev20250419104029__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +6 -31
  3. letta/agents/letta_agent.py +1 -0
  4. letta/agents/letta_agent_batch.py +369 -18
  5. letta/constants.py +15 -4
  6. letta/functions/function_sets/base.py +168 -21
  7. letta/groups/sleeptime_multi_agent.py +3 -3
  8. letta/helpers/converters.py +1 -1
  9. letta/helpers/message_helper.py +1 -0
  10. letta/jobs/llm_batch_job_polling.py +39 -10
  11. letta/jobs/scheduler.py +54 -13
  12. letta/jobs/types.py +26 -6
  13. letta/llm_api/anthropic_client.py +3 -1
  14. letta/llm_api/llm_api_tools.py +7 -1
  15. letta/llm_api/openai.py +2 -0
  16. letta/orm/agent.py +5 -29
  17. letta/orm/base.py +2 -2
  18. letta/orm/enums.py +1 -0
  19. letta/orm/job.py +5 -0
  20. letta/orm/llm_batch_items.py +2 -2
  21. letta/orm/llm_batch_job.py +5 -2
  22. letta/orm/message.py +12 -4
  23. letta/orm/passage.py +0 -6
  24. letta/orm/sqlalchemy_base.py +0 -3
  25. letta/personas/examples/sleeptime_doc_persona.txt +2 -0
  26. letta/prompts/system/sleeptime.txt +20 -11
  27. letta/prompts/system/sleeptime_doc_ingest.txt +35 -0
  28. letta/schemas/agent.py +24 -1
  29. letta/schemas/enums.py +3 -1
  30. letta/schemas/job.py +39 -0
  31. letta/schemas/letta_message.py +24 -7
  32. letta/schemas/letta_request.py +7 -2
  33. letta/schemas/letta_response.py +3 -1
  34. letta/schemas/llm_batch_job.py +4 -3
  35. letta/schemas/llm_config.py +6 -2
  36. letta/schemas/message.py +11 -1
  37. letta/schemas/providers.py +10 -58
  38. letta/serialize_schemas/marshmallow_agent.py +25 -22
  39. letta/serialize_schemas/marshmallow_message.py +1 -1
  40. letta/server/db.py +75 -49
  41. letta/server/rest_api/app.py +1 -0
  42. letta/server/rest_api/interface.py +7 -2
  43. letta/server/rest_api/routers/v1/__init__.py +2 -0
  44. letta/server/rest_api/routers/v1/agents.py +33 -6
  45. letta/server/rest_api/routers/v1/messages.py +132 -0
  46. letta/server/rest_api/routers/v1/sources.py +21 -2
  47. letta/server/rest_api/utils.py +23 -10
  48. letta/server/server.py +67 -21
  49. letta/services/agent_manager.py +44 -21
  50. letta/services/group_manager.py +2 -2
  51. letta/services/helpers/agent_manager_helper.py +5 -3
  52. letta/services/job_manager.py +34 -5
  53. letta/services/llm_batch_manager.py +200 -57
  54. letta/services/message_manager.py +23 -1
  55. letta/services/passage_manager.py +2 -2
  56. letta/services/tool_executor/tool_execution_manager.py +13 -3
  57. letta/services/tool_executor/tool_execution_sandbox.py +0 -1
  58. letta/services/tool_executor/tool_executor.py +48 -9
  59. letta/services/tool_sandbox/base.py +24 -6
  60. letta/services/tool_sandbox/e2b_sandbox.py +25 -5
  61. letta/services/tool_sandbox/local_sandbox.py +23 -7
  62. letta/settings.py +2 -2
  63. {letta_nightly-0.6.53.dev20250418104238.dist-info → letta_nightly-0.6.54.dev20250419104029.dist-info}/METADATA +2 -1
  64. {letta_nightly-0.6.53.dev20250418104238.dist-info → letta_nightly-0.6.54.dev20250419104029.dist-info}/RECORD +67 -65
  65. letta/sleeptime_agent.py +0 -61
  66. {letta_nightly-0.6.53.dev20250418104238.dist-info → letta_nightly-0.6.54.dev20250419104029.dist-info}/LICENSE +0 -0
  67. {letta_nightly-0.6.53.dev20250418104238.dist-info → letta_nightly-0.6.54.dev20250419104029.dist-info}/WHEEL +0 -0
  68. {letta_nightly-0.6.53.dev20250418104238.dist-info → letta_nightly-0.6.54.dev20250419104029.dist-info}/entry_points.txt +0 -0
@@ -1,10 +1,10 @@
1
1
  import datetime
2
- from typing import List, Optional
2
+ from typing import Any, Dict, List, Optional, Tuple
3
3
 
4
4
  from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchIndividualResponse
5
- from sqlalchemy import tuple_
5
+ from sqlalchemy import func, tuple_
6
6
 
7
- from letta.jobs.types import BatchPollingResult, ItemUpdateInfo
7
+ from letta.jobs.types import BatchPollingResult, ItemUpdateInfo, RequestStatusUpdateInfo, StepStatusUpdateInfo
8
8
  from letta.log import get_logger
9
9
  from letta.orm.llm_batch_items import LLMBatchItem
10
10
  from letta.orm.llm_batch_job import LLMBatchJob
@@ -28,11 +28,12 @@ class LLMBatchManager:
28
28
  self.session_maker = db_context
29
29
 
30
30
  @enforce_types
31
- def create_batch_job(
31
+ def create_llm_batch_job(
32
32
  self,
33
33
  llm_provider: ProviderType,
34
34
  create_batch_response: BetaMessageBatch,
35
35
  actor: PydanticUser,
36
+ letta_batch_job_id: str,
36
37
  status: JobStatus = JobStatus.created,
37
38
  ) -> PydanticLLMBatchJob:
38
39
  """Create a new LLM batch job."""
@@ -42,51 +43,52 @@ class LLMBatchManager:
42
43
  llm_provider=llm_provider,
43
44
  create_batch_response=create_batch_response,
44
45
  organization_id=actor.organization_id,
46
+ letta_batch_job_id=letta_batch_job_id,
45
47
  )
46
48
  batch.create(session, actor=actor)
47
49
  return batch.to_pydantic()
48
50
 
49
51
  @enforce_types
50
- def get_batch_job_by_id(self, batch_id: str, actor: Optional[PydanticUser] = None) -> PydanticLLMBatchJob:
52
+ def get_llm_batch_job_by_id(self, llm_batch_id: str, actor: Optional[PydanticUser] = None) -> PydanticLLMBatchJob:
51
53
  """Retrieve a single batch job by ID."""
52
54
  with self.session_maker() as session:
53
- batch = LLMBatchJob.read(db_session=session, identifier=batch_id, actor=actor)
55
+ batch = LLMBatchJob.read(db_session=session, identifier=llm_batch_id, actor=actor)
54
56
  return batch.to_pydantic()
55
57
 
56
58
  @enforce_types
57
- def update_batch_status(
59
+ def update_llm_batch_status(
58
60
  self,
59
- batch_id: str,
61
+ llm_batch_id: str,
60
62
  status: JobStatus,
61
63
  actor: Optional[PydanticUser] = None,
62
64
  latest_polling_response: Optional[BetaMessageBatch] = None,
63
65
  ) -> PydanticLLMBatchJob:
64
66
  """Update a batch job’s status and optionally its polling response."""
65
67
  with self.session_maker() as session:
66
- batch = LLMBatchJob.read(db_session=session, identifier=batch_id, actor=actor)
68
+ batch = LLMBatchJob.read(db_session=session, identifier=llm_batch_id, actor=actor)
67
69
  batch.status = status
68
70
  batch.latest_polling_response = latest_polling_response
69
71
  batch.last_polled_at = datetime.datetime.now(datetime.timezone.utc)
70
72
  batch = batch.update(db_session=session, actor=actor)
71
73
  return batch.to_pydantic()
72
74
 
73
- def bulk_update_batch_statuses(
75
+ def bulk_update_llm_batch_statuses(
74
76
  self,
75
77
  updates: List[BatchPollingResult],
76
78
  ) -> None:
77
79
  """
78
80
  Efficiently update many LLMBatchJob rows. This is used by the cron jobs.
79
81
 
80
- `updates` = [(batch_id, new_status, polling_response_or_None), …]
82
+ `updates` = [(llm_batch_id, new_status, polling_response_or_None), …]
81
83
  """
82
84
  now = datetime.datetime.now(datetime.timezone.utc)
83
85
 
84
86
  with self.session_maker() as session:
85
87
  mappings = []
86
- for batch_id, status, response in updates:
88
+ for llm_batch_id, status, response in updates:
87
89
  mappings.append(
88
90
  {
89
- "id": batch_id,
91
+ "id": llm_batch_id,
90
92
  "status": status,
91
93
  "latest_polling_response": response,
92
94
  "last_polled_at": now,
@@ -97,14 +99,51 @@ class LLMBatchManager:
97
99
  session.commit()
98
100
 
99
101
  @enforce_types
100
- def delete_batch_request(self, batch_id: str, actor: PydanticUser) -> None:
102
+ def list_llm_batch_jobs(
103
+ self,
104
+ letta_batch_id: str,
105
+ limit: Optional[int] = None,
106
+ actor: Optional[PydanticUser] = None,
107
+ after: Optional[str] = None,
108
+ ) -> List[PydanticLLMBatchItem]:
109
+ """
110
+ List all batch items for a given llm_batch_id, optionally filtered by additional criteria and limited in count.
111
+
112
+ Optional filters:
113
+ - after: A cursor string. Only items with an `id` greater than this value are returned.
114
+ - agent_id: Restrict the result set to a specific agent.
115
+ - request_status: Filter items based on their request status (e.g., created, completed, expired).
116
+ - step_status: Filter items based on their step execution status.
117
+
118
+ The results are ordered by their id in ascending order.
119
+ """
120
+ with self.session_maker() as session:
121
+ query = session.query(LLMBatchJob).filter(LLMBatchJob.letta_batch_job_id == letta_batch_id)
122
+
123
+ if actor is not None:
124
+ query = query.filter(LLMBatchJob.organization_id == actor.organization_id)
125
+
126
+ # Additional optional filters
127
+ if after is not None:
128
+ query = query.filter(LLMBatchJob.id > after)
129
+
130
+ query = query.order_by(LLMBatchJob.id.asc())
131
+
132
+ if limit is not None:
133
+ query = query.limit(limit)
134
+
135
+ results = query.all()
136
+ return [item.to_pydantic() for item in results]
137
+
138
+ @enforce_types
139
+ def delete_llm_batch_request(self, llm_batch_id: str, actor: PydanticUser) -> None:
101
140
  """Hard delete a batch job by ID."""
102
141
  with self.session_maker() as session:
103
- batch = LLMBatchJob.read(db_session=session, identifier=batch_id, actor=actor)
142
+ batch = LLMBatchJob.read(db_session=session, identifier=llm_batch_id, actor=actor)
104
143
  batch.hard_delete(db_session=session, actor=actor)
105
144
 
106
145
  @enforce_types
107
- def list_running_batches(self, actor: Optional[PydanticUser] = None) -> List[PydanticLLMBatchJob]:
146
+ def list_running_llm_batches(self, actor: Optional[PydanticUser] = None) -> List[PydanticLLMBatchJob]:
108
147
  """Return all running LLM batch jobs, optionally filtered by actor's organization."""
109
148
  with self.session_maker() as session:
110
149
  query = session.query(LLMBatchJob).filter(LLMBatchJob.status == JobStatus.running)
@@ -116,9 +155,9 @@ class LLMBatchManager:
116
155
  return [batch.to_pydantic() for batch in results]
117
156
 
118
157
  @enforce_types
119
- def create_batch_item(
158
+ def create_llm_batch_item(
120
159
  self,
121
- batch_id: str,
160
+ llm_batch_id: str,
122
161
  agent_id: str,
123
162
  llm_config: LLMConfig,
124
163
  actor: PydanticUser,
@@ -129,7 +168,7 @@ class LLMBatchManager:
129
168
  """Create a new batch item."""
130
169
  with self.session_maker() as session:
131
170
  item = LLMBatchItem(
132
- batch_id=batch_id,
171
+ llm_batch_id=llm_batch_id,
133
172
  agent_id=agent_id,
134
173
  llm_config=llm_config,
135
174
  request_status=request_status,
@@ -141,14 +180,47 @@ class LLMBatchManager:
141
180
  return item.to_pydantic()
142
181
 
143
182
  @enforce_types
144
- def get_batch_item_by_id(self, item_id: str, actor: PydanticUser) -> PydanticLLMBatchItem:
183
+ def create_llm_batch_items_bulk(self, llm_batch_items: List[PydanticLLMBatchItem], actor: PydanticUser) -> List[PydanticLLMBatchItem]:
184
+ """
185
+ Create multiple batch items in bulk for better performance.
186
+
187
+ Args:
188
+ llm_batch_items: List of batch items to create
189
+ actor: User performing the action
190
+
191
+ Returns:
192
+ List of created batch items as Pydantic models
193
+ """
194
+ with self.session_maker() as session:
195
+ # Convert Pydantic models to ORM objects
196
+ orm_items = []
197
+ for item in llm_batch_items:
198
+ orm_item = LLMBatchItem(
199
+ llm_batch_id=item.llm_batch_id,
200
+ agent_id=item.agent_id,
201
+ llm_config=item.llm_config,
202
+ request_status=item.request_status,
203
+ step_status=item.step_status,
204
+ step_state=item.step_state,
205
+ organization_id=actor.organization_id,
206
+ )
207
+ orm_items.append(orm_item)
208
+
209
+ # Use the batch_create method to create all items at once
210
+ created_items = LLMBatchItem.batch_create(orm_items, session, actor=actor)
211
+
212
+ # Convert back to Pydantic models
213
+ return [item.to_pydantic() for item in created_items]
214
+
215
+ @enforce_types
216
+ def get_llm_batch_item_by_id(self, item_id: str, actor: PydanticUser) -> PydanticLLMBatchItem:
145
217
  """Retrieve a single batch item by ID."""
146
218
  with self.session_maker() as session:
147
219
  item = LLMBatchItem.read(db_session=session, identifier=item_id, actor=actor)
148
220
  return item.to_pydantic()
149
221
 
150
222
  @enforce_types
151
- def update_batch_item(
223
+ def update_llm_batch_item(
152
224
  self,
153
225
  item_id: str,
154
226
  actor: PydanticUser,
@@ -173,78 +245,149 @@ class LLMBatchManager:
173
245
  return item.update(db_session=session, actor=actor).to_pydantic()
174
246
 
175
247
  @enforce_types
176
- def list_batch_items(
248
+ def list_llm_batch_items(
177
249
  self,
178
- batch_id: str,
250
+ llm_batch_id: str,
179
251
  limit: Optional[int] = None,
180
252
  actor: Optional[PydanticUser] = None,
253
+ after: Optional[str] = None,
254
+ agent_id: Optional[str] = None,
255
+ request_status: Optional[JobStatus] = None,
256
+ step_status: Optional[AgentStepStatus] = None,
181
257
  ) -> List[PydanticLLMBatchItem]:
182
- """List all batch items for a given batch_id, optionally filtered by organization and limited in count."""
258
+ """
259
+ List all batch items for a given llm_batch_id, optionally filtered by additional criteria and limited in count.
260
+
261
+ Optional filters:
262
+ - after: A cursor string. Only items with an `id` greater than this value are returned.
263
+ - agent_id: Restrict the result set to a specific agent.
264
+ - request_status: Filter items based on their request status (e.g., created, completed, expired).
265
+ - step_status: Filter items based on their step execution status.
266
+
267
+ The results are ordered by their id in ascending order.
268
+ """
183
269
  with self.session_maker() as session:
184
- query = session.query(LLMBatchItem).filter(LLMBatchItem.batch_id == batch_id)
270
+ query = session.query(LLMBatchItem).filter(LLMBatchItem.llm_batch_id == llm_batch_id)
185
271
 
186
272
  if actor is not None:
187
273
  query = query.filter(LLMBatchItem.organization_id == actor.organization_id)
188
274
 
189
- if limit:
275
+ # Additional optional filters
276
+ if agent_id is not None:
277
+ query = query.filter(LLMBatchItem.agent_id == agent_id)
278
+ if request_status is not None:
279
+ query = query.filter(LLMBatchItem.request_status == request_status)
280
+ if step_status is not None:
281
+ query = query.filter(LLMBatchItem.step_status == step_status)
282
+ if after is not None:
283
+ query = query.filter(LLMBatchItem.id > after)
284
+
285
+ query = query.order_by(LLMBatchItem.id.asc())
286
+
287
+ if limit is not None:
190
288
  query = query.limit(limit)
191
289
 
192
290
  results = query.all()
193
291
  return [item.to_pydantic() for item in results]
194
292
 
195
- def bulk_update_batch_items_by_agent(
293
+ def bulk_update_llm_batch_items(
196
294
  self,
197
- updates: List[ItemUpdateInfo],
295
+ llm_batch_id_agent_id_pairs: List[Tuple[str, str]],
296
+ field_updates: List[Dict[str, Any]],
198
297
  ) -> None:
199
298
  """
200
- Efficiently update LLMBatchItem rows by (batch_id, agent_id).
299
+ Efficiently update multiple LLMBatchItem rows by (llm_batch_id, agent_id) pairs.
201
300
 
202
301
  Args:
203
- updates: List of tuples:
204
- (batch_id, agent_id, new_request_status, batch_request_result)
302
+ llm_batch_id_agent_id_pairs: List of (llm_batch_id, agent_id) tuples identifying items to update
303
+ field_updates: List of dictionaries containing the fields to update for each item
205
304
  """
206
- with self.session_maker() as session:
207
- # For bulk_update_mappings, we need the primary key of each row
208
- # So we must map (batch_id, agent_id) → actual PK (id)
209
- # We'll do it in one DB query using the (batch_id, agent_id) sets
305
+ if not llm_batch_id_agent_id_pairs or not field_updates:
306
+ return
210
307
 
211
- # 1. Gather the pairs
212
- key_pairs = [(b_id, a_id) for (b_id, a_id, *_rest) in updates]
308
+ if len(llm_batch_id_agent_id_pairs) != len(field_updates):
309
+ raise ValueError("batch_id_agent_id_pairs and field_updates must have the same length")
213
310
 
214
- # 2. Query items in a single step
311
+ with self.session_maker() as session:
312
+ # Lookup primary keys
215
313
  items = (
216
- session.query(LLMBatchItem.id, LLMBatchItem.batch_id, LLMBatchItem.agent_id)
217
- .filter(tuple_(LLMBatchItem.batch_id, LLMBatchItem.agent_id).in_(key_pairs))
314
+ session.query(LLMBatchItem.id, LLMBatchItem.llm_batch_id, LLMBatchItem.agent_id)
315
+ .filter(tuple_(LLMBatchItem.llm_batch_id, LLMBatchItem.agent_id).in_(llm_batch_id_agent_id_pairs))
218
316
  .all()
219
317
  )
318
+ pair_to_pk = {(b, a): id for id, b, a in items}
220
319
 
221
- # Build a map from (batch_id, agent_id) → PK id
222
- pair_to_pk = {}
223
- for row_id, row_batch_id, row_agent_id in items:
224
- pair_to_pk[(row_batch_id, row_agent_id)] = row_id
225
-
226
- # 3. Construct mappings for the PK-based bulk update
227
320
  mappings = []
228
- for batch_id, agent_id, new_status, new_result in updates:
229
- pk_id = pair_to_pk.get((batch_id, agent_id))
321
+ for (llm_batch_id, agent_id), fields in zip(llm_batch_id_agent_id_pairs, field_updates):
322
+ pk_id = pair_to_pk.get((llm_batch_id, agent_id))
230
323
  if not pk_id:
231
- # Nonexistent or mismatch → skip
232
324
  continue
233
- mappings.append(
234
- {
235
- "id": pk_id,
236
- "request_status": new_status,
237
- "batch_request_result": new_result,
238
- }
239
- )
325
+
326
+ update_fields = fields.copy()
327
+ update_fields["id"] = pk_id
328
+ mappings.append(update_fields)
240
329
 
241
330
  if mappings:
242
331
  session.bulk_update_mappings(LLMBatchItem, mappings)
243
332
  session.commit()
244
333
 
245
334
  @enforce_types
246
- def delete_batch_item(self, item_id: str, actor: PydanticUser) -> None:
335
+ def bulk_update_batch_llm_items_results_by_agent(
336
+ self,
337
+ updates: List[ItemUpdateInfo],
338
+ ) -> None:
339
+ """Update request status and batch results for multiple batch items."""
340
+ batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
341
+ field_updates = [
342
+ {
343
+ "request_status": update.request_status,
344
+ "batch_request_result": update.batch_request_result,
345
+ }
346
+ for update in updates
347
+ ]
348
+
349
+ self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
350
+
351
+ @enforce_types
352
+ def bulk_update_llm_batch_items_step_status_by_agent(
353
+ self,
354
+ updates: List[StepStatusUpdateInfo],
355
+ ) -> None:
356
+ """Update step status for multiple batch items."""
357
+ batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
358
+ field_updates = [{"step_status": update.step_status} for update in updates]
359
+
360
+ self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
361
+
362
+ @enforce_types
363
+ def bulk_update_llm_batch_items_request_status_by_agent(
364
+ self,
365
+ updates: List[RequestStatusUpdateInfo],
366
+ ) -> None:
367
+ """Update request status for multiple batch items."""
368
+ batch_id_agent_id_pairs = [(update.llm_batch_id, update.agent_id) for update in updates]
369
+ field_updates = [{"request_status": update.request_status} for update in updates]
370
+
371
+ self.bulk_update_llm_batch_items(batch_id_agent_id_pairs, field_updates)
372
+
373
+ @enforce_types
374
+ def delete_llm_batch_item(self, item_id: str, actor: PydanticUser) -> None:
247
375
  """Hard delete a batch item by ID."""
248
376
  with self.session_maker() as session:
249
377
  item = LLMBatchItem.read(db_session=session, identifier=item_id, actor=actor)
250
378
  item.hard_delete(db_session=session, actor=actor)
379
+
380
+ @enforce_types
381
+ def count_llm_batch_items(self, llm_batch_id: str) -> int:
382
+ """
383
+ Efficiently count the number of batch items for a given llm_batch_id.
384
+
385
+ Args:
386
+ llm_batch_id (str): The batch identifier to count items for.
387
+
388
+ Returns:
389
+ int: The total number of batch items associated with the given llm_batch_id.
390
+ """
391
+ with self.session_maker() as session:
392
+ count = session.query(func.count(LLMBatchItem.id)).filter(LLMBatchItem.llm_batch_id == llm_batch_id).scalar()
393
+ return count or 0
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  from typing import List, Optional, Sequence
3
3
 
4
- from sqlalchemy import exists, func, select, text
4
+ from sqlalchemy import delete, exists, func, select, text
5
5
 
6
6
  from letta.log import get_logger
7
7
  from letta.orm.agent import Agent as AgentModel
@@ -348,3 +348,25 @@ class MessageManager:
348
348
  # Execute and convert each Message to its Pydantic representation.
349
349
  results = query.all()
350
350
  return [msg.to_pydantic() for msg in results]
351
+
352
+ @enforce_types
353
+ def delete_all_messages_for_agent(self, agent_id: str, actor: PydanticUser) -> int:
354
+ """
355
+ Efficiently deletes all messages associated with a given agent_id,
356
+ while enforcing permission checks and avoiding any ORM‑level loads.
357
+ """
358
+ with self.session_maker() as session:
359
+ # 1) verify the agent exists and the actor has access
360
+ AgentModel.read(db_session=session, identifier=agent_id, actor=actor)
361
+
362
+ # 2) issue a CORE DELETE against the mapped class
363
+ stmt = (
364
+ delete(MessageModel).where(MessageModel.agent_id == agent_id).where(MessageModel.organization_id == actor.organization_id)
365
+ )
366
+ result = session.execute(stmt)
367
+
368
+ # 3) commit once
369
+ session.commit()
370
+
371
+ # 4) return the number of rows deleted
372
+ return result.rowcount
@@ -1,4 +1,4 @@
1
- from datetime import datetime
1
+ from datetime import datetime, timezone
2
2
  from typing import List, Optional
3
3
 
4
4
  from openai import OpenAI
@@ -49,7 +49,7 @@ class PassageManager:
49
49
  "organization_id": data["organization_id"],
50
50
  "metadata_": data.get("metadata", {}),
51
51
  "is_deleted": data.get("is_deleted", False),
52
- "created_at": data.get("created_at", datetime.utcnow()),
52
+ "created_at": data.get("created_at", datetime.now(timezone.utc)),
53
53
  }
54
54
 
55
55
  if "agent_id" in data and data["agent_id"]:
@@ -3,7 +3,7 @@ from typing import Any, Dict, Optional, Tuple, Type
3
3
  from letta.log import get_logger
4
4
  from letta.orm.enums import ToolType
5
5
  from letta.schemas.agent import AgentState
6
- from letta.schemas.sandbox_config import SandboxRunResult
6
+ from letta.schemas.sandbox_config import SandboxConfig, SandboxRunResult
7
7
  from letta.schemas.tool import Tool
8
8
  from letta.schemas.user import User
9
9
  from letta.services.tool_executor.tool_executor import (
@@ -45,10 +45,18 @@ class ToolExecutorFactory:
45
45
  class ToolExecutionManager:
46
46
  """Manager class for tool execution operations."""
47
47
 
48
- def __init__(self, agent_state: AgentState, actor: User):
48
+ def __init__(
49
+ self,
50
+ agent_state: AgentState,
51
+ actor: User,
52
+ sandbox_config: Optional[SandboxConfig] = None,
53
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
54
+ ):
49
55
  self.agent_state = agent_state
50
56
  self.logger = get_logger(__name__)
51
57
  self.actor = actor
58
+ self.sandbox_config = sandbox_config
59
+ self.sandbox_env_vars = sandbox_env_vars
52
60
 
53
61
  def execute_tool(self, function_name: str, function_args: dict, tool: Tool) -> Tuple[Any, Optional[SandboxRunResult]]:
54
62
  """
@@ -67,7 +75,9 @@ class ToolExecutionManager:
67
75
  executor = ToolExecutorFactory.get_executor(tool.tool_type)
68
76
 
69
77
  # Execute the tool
70
- return executor.execute(function_name, function_args, self.agent_state, tool, self.actor)
78
+ return executor.execute(
79
+ function_name, function_args, self.agent_state, tool, self.actor, self.sandbox_config, self.sandbox_env_vars
80
+ )
71
81
 
72
82
  except Exception as e:
73
83
  self.logger.error(f"Error executing tool {function_name}: {str(e)}")
@@ -148,7 +148,6 @@ class ToolExecutionSandbox:
148
148
  temp_file.write(code)
149
149
  temp_file.flush()
150
150
  temp_file_path = temp_file.name
151
-
152
151
  try:
153
152
  if local_configs.use_venv:
154
153
  return self.run_local_dir_sandbox_venv(sbx_config, env, temp_file_path)
@@ -1,6 +1,6 @@
1
1
  import math
2
2
  from abc import ABC, abstractmethod
3
- from typing import Any, Optional, Tuple
3
+ from typing import Any, Dict, Optional, Tuple
4
4
 
5
5
  from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY, RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
6
6
  from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
@@ -8,7 +8,7 @@ from letta.functions.helpers import execute_composio_action, generate_composio_a
8
8
  from letta.helpers.composio_helpers import get_composio_api_key
9
9
  from letta.helpers.json_helpers import json_dumps
10
10
  from letta.schemas.agent import AgentState
11
- from letta.schemas.sandbox_config import SandboxRunResult
11
+ from letta.schemas.sandbox_config import SandboxConfig, SandboxRunResult
12
12
  from letta.schemas.tool import Tool
13
13
  from letta.schemas.user import User
14
14
  from letta.services.agent_manager import AgentManager
@@ -25,7 +25,14 @@ class ToolExecutor(ABC):
25
25
 
26
26
  @abstractmethod
27
27
  def execute(
28
- self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
28
+ self,
29
+ function_name: str,
30
+ function_args: dict,
31
+ agent_state: AgentState,
32
+ tool: Tool,
33
+ actor: User,
34
+ sandbox_config: Optional[SandboxConfig] = None,
35
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
29
36
  ) -> Tuple[Any, Optional[SandboxRunResult]]:
30
37
  """Execute the tool and return the result."""
31
38
 
@@ -34,7 +41,14 @@ class LettaCoreToolExecutor(ToolExecutor):
34
41
  """Executor for LETTA core tools with direct implementation of functions."""
35
42
 
36
43
  def execute(
37
- self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
44
+ self,
45
+ function_name: str,
46
+ function_args: dict,
47
+ agent_state: AgentState,
48
+ tool: Tool,
49
+ actor: User,
50
+ sandbox_config: Optional[SandboxConfig] = None,
51
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
38
52
  ) -> Tuple[Any, Optional[SandboxRunResult]]:
39
53
  # Map function names to method calls
40
54
  function_map = {
@@ -184,7 +198,14 @@ class LettaMemoryToolExecutor(ToolExecutor):
184
198
  """Executor for LETTA memory core tools with direct implementation."""
185
199
 
186
200
  def execute(
187
- self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
201
+ self,
202
+ function_name: str,
203
+ function_args: dict,
204
+ agent_state: AgentState,
205
+ tool: Tool,
206
+ actor: User,
207
+ sandbox_config: Optional[SandboxConfig] = None,
208
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
188
209
  ) -> Tuple[Any, Optional[SandboxRunResult]]:
189
210
  # Map function names to method calls
190
211
  function_map = {
@@ -244,7 +265,14 @@ class ExternalComposioToolExecutor(ToolExecutor):
244
265
  """Executor for external Composio tools."""
245
266
 
246
267
  def execute(
247
- self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
268
+ self,
269
+ function_name: str,
270
+ function_args: dict,
271
+ agent_state: AgentState,
272
+ tool: Tool,
273
+ actor: User,
274
+ sandbox_config: Optional[SandboxConfig] = None,
275
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
248
276
  ) -> Tuple[Any, Optional[SandboxRunResult]]:
249
277
  action_name = generate_composio_action_from_func_name(tool.name)
250
278
 
@@ -324,7 +352,14 @@ class SandboxToolExecutor(ToolExecutor):
324
352
  """Executor for sandboxed tools."""
325
353
 
326
354
  async def execute(
327
- self, function_name: str, function_args: dict, agent_state: AgentState, tool: Tool, actor: User
355
+ self,
356
+ function_name: str,
357
+ function_args: dict,
358
+ agent_state: AgentState,
359
+ tool: Tool,
360
+ actor: User,
361
+ sandbox_config: Optional[SandboxConfig] = None,
362
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
328
363
  ) -> Tuple[Any, Optional[SandboxRunResult]]:
329
364
 
330
365
  # Store original memory state
@@ -338,9 +373,13 @@ class SandboxToolExecutor(ToolExecutor):
338
373
 
339
374
  # Execute in sandbox depending on API key
340
375
  if tool_settings.e2b_api_key:
341
- sandbox = AsyncToolSandboxE2B(function_name, function_args, actor, tool_object=tool)
376
+ sandbox = AsyncToolSandboxE2B(
377
+ function_name, function_args, actor, tool_object=tool, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars
378
+ )
342
379
  else:
343
- sandbox = AsyncToolSandboxLocal(function_name, function_args, actor, tool_object=tool)
380
+ sandbox = AsyncToolSandboxLocal(
381
+ function_name, function_args, actor, tool_object=tool, sandbox_config=sandbox_config, sandbox_env_vars=sandbox_env_vars
382
+ )
344
383
 
345
384
  sandbox_run_result = await sandbox.run(agent_state=agent_state_copy)
346
385
 
@@ -7,9 +7,9 @@ from typing import Any, Dict, Optional, Tuple
7
7
 
8
8
  from letta.functions.helpers import generate_model_from_args_json_schema
9
9
  from letta.schemas.agent import AgentState
10
- from letta.schemas.sandbox_config import SandboxRunResult
10
+ from letta.schemas.sandbox_config import SandboxConfig, SandboxRunResult
11
+ from letta.schemas.tool import Tool
11
12
  from letta.services.helpers.tool_execution_helper import add_imports_and_pydantic_schemas_for_args
12
- from letta.services.organization_manager import OrganizationManager
13
13
  from letta.services.sandbox_config_manager import SandboxConfigManager
14
14
  from letta.services.tool_manager import ToolManager
15
15
 
@@ -20,12 +20,18 @@ class AsyncToolSandboxBase(ABC):
20
20
  LOCAL_SANDBOX_RESULT_END_MARKER = str(uuid.uuid5(NAMESPACE, "local-sandbox-result-end-marker"))
21
21
  LOCAL_SANDBOX_RESULT_VAR_NAME = "result_ZQqiequkcFwRwwGQMqkt"
22
22
 
23
- def __init__(self, tool_name: str, args: dict, user, tool_object=None):
23
+ def __init__(
24
+ self,
25
+ tool_name: str,
26
+ args: dict,
27
+ user,
28
+ tool_object: Optional[Tool] = None,
29
+ sandbox_config: Optional[SandboxConfig] = None,
30
+ sandbox_env_vars: Optional[Dict[str, Any]] = None,
31
+ ):
24
32
  self.tool_name = tool_name
25
33
  self.args = args
26
34
  self.user = user
27
- self.organization = OrganizationManager().get_organization_by_id(self.user.organization_id)
28
- self.privileged_tools = self.organization.privileged_tools
29
35
 
30
36
  self.tool = tool_object or ToolManager().get_tool_by_name(tool_name=tool_name, actor=self.user)
31
37
  if self.tool is None:
@@ -33,7 +39,12 @@ class AsyncToolSandboxBase(ABC):
33
39
  f"Agent attempted to invoke tool {self.tool_name} that does not exist for organization {self.user.organization_id}"
34
40
  )
35
41
 
36
- self.sandbox_config_manager = SandboxConfigManager()
42
+ # Store provided values or create manager to fetch them later
43
+ self.provided_sandbox_config = sandbox_config
44
+ self.provided_sandbox_env_vars = sandbox_env_vars
45
+
46
+ # Only create the manager if we need to (lazy initialization)
47
+ self._sandbox_config_manager = None
37
48
 
38
49
  # See if we should inject agent_state or not based on the presence of the "agent_state" arg
39
50
  if "agent_state" in self.parse_function_arguments(self.tool.source_code, self.tool.name):
@@ -41,6 +52,13 @@ class AsyncToolSandboxBase(ABC):
41
52
  else:
42
53
  self.inject_agent_state = False
43
54
 
55
+ # Lazily initialize the manager only when needed
56
+ @property
57
+ def sandbox_config_manager(self):
58
+ if self._sandbox_config_manager is None:
59
+ self._sandbox_config_manager = SandboxConfigManager()
60
+ return self._sandbox_config_manager
61
+
44
62
  @abstractmethod
45
63
  async def run(
46
64
  self,