letta-nightly 0.5.5.dev20241122170833__py3-none-any.whl → 0.6.0.dev20241204051808__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (70) hide show
  1. letta/__init__.py +2 -2
  2. letta/agent.py +155 -166
  3. letta/agent_store/chroma.py +2 -0
  4. letta/agent_store/db.py +1 -1
  5. letta/cli/cli.py +12 -8
  6. letta/cli/cli_config.py +1 -1
  7. letta/client/client.py +765 -137
  8. letta/config.py +2 -2
  9. letta/constants.py +10 -14
  10. letta/errors.py +12 -0
  11. letta/functions/function_sets/base.py +38 -1
  12. letta/functions/functions.py +40 -57
  13. letta/functions/helpers.py +0 -4
  14. letta/functions/schema_generator.py +279 -18
  15. letta/helpers/tool_rule_solver.py +6 -5
  16. letta/llm_api/helpers.py +99 -5
  17. letta/llm_api/openai.py +8 -2
  18. letta/local_llm/utils.py +13 -6
  19. letta/log.py +7 -9
  20. letta/main.py +1 -1
  21. letta/metadata.py +53 -38
  22. letta/o1_agent.py +1 -4
  23. letta/orm/__init__.py +2 -0
  24. letta/orm/block.py +7 -3
  25. letta/orm/blocks_agents.py +32 -0
  26. letta/orm/errors.py +8 -0
  27. letta/orm/mixins.py +8 -0
  28. letta/orm/organization.py +8 -1
  29. letta/orm/sandbox_config.py +56 -0
  30. letta/orm/sqlalchemy_base.py +68 -10
  31. letta/persistence_manager.py +1 -0
  32. letta/schemas/agent.py +57 -52
  33. letta/schemas/block.py +85 -26
  34. letta/schemas/blocks_agents.py +32 -0
  35. letta/schemas/enums.py +14 -0
  36. letta/schemas/letta_base.py +10 -1
  37. letta/schemas/letta_request.py +11 -23
  38. letta/schemas/letta_response.py +1 -2
  39. letta/schemas/memory.py +41 -76
  40. letta/schemas/message.py +3 -3
  41. letta/schemas/sandbox_config.py +114 -0
  42. letta/schemas/tool.py +37 -1
  43. letta/schemas/tool_rule.py +13 -5
  44. letta/server/rest_api/app.py +5 -4
  45. letta/server/rest_api/interface.py +12 -19
  46. letta/server/rest_api/routers/openai/assistants/threads.py +2 -3
  47. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -2
  48. letta/server/rest_api/routers/v1/__init__.py +4 -9
  49. letta/server/rest_api/routers/v1/agents.py +145 -61
  50. letta/server/rest_api/routers/v1/blocks.py +50 -5
  51. letta/server/rest_api/routers/v1/sandbox_configs.py +127 -0
  52. letta/server/rest_api/routers/v1/sources.py +8 -1
  53. letta/server/rest_api/routers/v1/tools.py +139 -13
  54. letta/server/rest_api/utils.py +6 -0
  55. letta/server/server.py +397 -340
  56. letta/server/static_files/assets/index-9fa459a2.js +1 -1
  57. letta/services/block_manager.py +23 -2
  58. letta/services/blocks_agents_manager.py +106 -0
  59. letta/services/per_agent_lock_manager.py +18 -0
  60. letta/services/sandbox_config_manager.py +256 -0
  61. letta/services/tool_execution_sandbox.py +352 -0
  62. letta/services/tool_manager.py +16 -22
  63. letta/services/tool_sandbox_env/.gitkeep +0 -0
  64. letta/settings.py +4 -0
  65. letta/utils.py +0 -7
  66. {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/METADATA +8 -6
  67. {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/RECORD +70 -60
  68. {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/LICENSE +0 -0
  69. {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/WHEEL +0 -0
  70. {letta_nightly-0.5.5.dev20241122170833.dist-info → letta_nightly-0.6.0.dev20241204051808.dist-info}/entry_points.txt +0 -0
@@ -1,23 +1,28 @@
1
1
  import asyncio
2
+ import warnings
2
3
  from datetime import datetime
3
- from typing import Dict, List, Optional, Union
4
+ from typing import List, Optional, Union
4
5
 
5
6
  from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query, status
6
7
  from fastapi.responses import JSONResponse, StreamingResponse
7
8
 
8
9
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
9
10
  from letta.schemas.agent import AgentState, CreateAgent, UpdateAgentState
11
+ from letta.schemas.block import ( # , BlockLabelUpdate, BlockLimitUpdate
12
+ Block,
13
+ BlockUpdate,
14
+ CreateBlock,
15
+ )
10
16
  from letta.schemas.enums import MessageStreamStatus
11
17
  from letta.schemas.letta_message import (
12
18
  LegacyLettaMessage,
13
19
  LettaMessage,
14
20
  LettaMessageUnion,
15
21
  )
16
- from letta.schemas.letta_request import LettaRequest
22
+ from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest
17
23
  from letta.schemas.letta_response import LettaResponse
18
24
  from letta.schemas.memory import (
19
25
  ArchivalMemorySummary,
20
- BasicBlockMemory,
21
26
  ContextWindowOverview,
22
27
  CreateArchivalMemory,
23
28
  Memory,
@@ -30,7 +35,6 @@ from letta.schemas.tool import Tool
30
35
  from letta.server.rest_api.interface import StreamingServerInterface
31
36
  from letta.server.rest_api.utils import get_letta_server, sse_async_generator
32
37
  from letta.server.server import SyncServer
33
- from letta.utils import deduplicate
34
38
 
35
39
  # These can be forward refs, but because Fastapi needs them at runtime the must be imported normally
36
40
 
@@ -82,13 +86,6 @@ def create_agent(
82
86
  Create a new agent with the specified configuration.
83
87
  """
84
88
  actor = server.get_user_or_default(user_id=user_id)
85
- agent.user_id = actor.id
86
- # TODO: sarah make general
87
- # TODO: eventually remove this
88
- assert agent.memory is not None # TODO: dont force this, can be None (use default human/person)
89
- blocks = agent.memory.get_blocks()
90
- agent.memory = BasicBlockMemory(blocks=blocks)
91
-
92
89
  return server.create_agent(agent, actor=actor)
93
90
 
94
91
 
@@ -195,6 +192,7 @@ def get_agent_in_context_messages(
195
192
  return server.get_in_context_messages(agent_id=agent_id)
196
193
 
197
194
 
195
+ # TODO: remove? can also get with agent blocks
198
196
  @router.get("/{agent_id}/memory", response_model=Memory, operation_id="get_agent_memory")
199
197
  def get_agent_memory(
200
198
  agent_id: str,
@@ -208,21 +206,96 @@ def get_agent_memory(
208
206
  return server.get_agent_memory(agent_id=agent_id)
209
207
 
210
208
 
211
- @router.patch("/{agent_id}/memory", response_model=Memory, operation_id="update_agent_memory")
212
- def update_agent_memory(
209
+ @router.get("/{agent_id}/memory/block/{block_label}", response_model=Block, operation_id="get_agent_memory_block")
210
+ def get_agent_memory_block(
211
+ agent_id: str,
212
+ block_label: str,
213
+ server: "SyncServer" = Depends(get_letta_server),
214
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
215
+ ):
216
+ """
217
+ Retrieve a memory block from an agent.
218
+ """
219
+ actor = server.get_user_or_default(user_id=user_id)
220
+
221
+ block_id = server.blocks_agents_manager.get_block_id_for_label(agent_id=agent_id, block_label=block_label)
222
+ return server.block_manager.get_block_by_id(block_id, actor=actor)
223
+
224
+
225
+ @router.get("/{agent_id}/memory/block", response_model=List[Block], operation_id="get_agent_memory_blocks")
226
+ def get_agent_memory_blocks(
227
+ agent_id: str,
228
+ server: "SyncServer" = Depends(get_letta_server),
229
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
230
+ ):
231
+ """
232
+ Retrieve the memory blocks of a specific agent.
233
+ """
234
+ actor = server.get_user_or_default(user_id=user_id)
235
+ block_ids = server.blocks_agents_manager.list_block_ids_for_agent(agent_id=agent_id)
236
+ return [server.block_manager.get_block_by_id(block_id, actor=actor) for block_id in block_ids]
237
+
238
+
239
+ @router.post("/{agent_id}/memory/block", response_model=Memory, operation_id="add_agent_memory_block")
240
+ def add_agent_memory_block(
213
241
  agent_id: str,
214
- request: Dict = Body(...),
242
+ create_block: CreateBlock = Body(...),
215
243
  server: "SyncServer" = Depends(get_letta_server),
216
244
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
217
245
  ):
218
246
  """
219
- Update the core memory of a specific agent.
220
- This endpoint accepts new memory contents (human and persona) and updates the core memory of the agent identified by the user ID and agent ID.
247
+ Creates a memory block and links it to the agent.
221
248
  """
222
249
  actor = server.get_user_or_default(user_id=user_id)
223
250
 
224
- memory = server.update_agent_core_memory(user_id=actor.id, agent_id=agent_id, new_memory_contents=request)
225
- return memory
251
+ # Copied from POST /blocks
252
+ block_req = Block(**create_block.model_dump())
253
+ block = server.block_manager.create_or_update_block(actor=actor, block=block_req)
254
+
255
+ # Link the block to the agent
256
+ updated_memory = server.link_block_to_agent_memory(user_id=actor.id, agent_id=agent_id, block_id=block.id)
257
+
258
+ return updated_memory
259
+
260
+
261
+ @router.delete("/{agent_id}/memory/block/{block_label}", response_model=Memory, operation_id="remove_agent_memory_block_by_label")
262
+ def remove_agent_memory_block(
263
+ agent_id: str,
264
+ # TODO should this be block_id, or the label?
265
+ # I think label is OK since it's user-friendly + guaranteed to be unique within a Memory object
266
+ block_label: str,
267
+ server: "SyncServer" = Depends(get_letta_server),
268
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
269
+ ):
270
+ """
271
+ Removes a memory block from an agent by unlnking it. If the block is not linked to any other agent, it is deleted.
272
+ """
273
+ actor = server.get_user_or_default(user_id=user_id)
274
+
275
+ # Unlink the block from the agent
276
+ updated_memory = server.unlink_block_from_agent_memory(user_id=actor.id, agent_id=agent_id, block_label=block_label)
277
+
278
+ return updated_memory
279
+
280
+
281
+ @router.patch("/{agent_id}/memory/block/{block_label}", response_model=Block, operation_id="update_agent_memory_block_by_label")
282
+ def update_agent_memory_block(
283
+ agent_id: str,
284
+ block_label: str,
285
+ update_block: BlockUpdate = Body(...),
286
+ server: "SyncServer" = Depends(get_letta_server),
287
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
288
+ ):
289
+ """
290
+ Removes a memory block from an agent by unlnking it. If the block is not linked to any other agent, it is deleted.
291
+ """
292
+ actor = server.get_user_or_default(user_id=user_id)
293
+
294
+ # get the block_id from the label
295
+ block_id = server.blocks_agents_manager.get_block_id_for_label(agent_id=agent_id, block_label=block_label)
296
+
297
+ # update the block
298
+ return server.block_manager.update_block(block_id=block_id, block_update=update_block, actor=actor)
226
299
 
227
300
 
228
301
  @router.get("/{agent_id}/memory/recall", response_model=RecallMemorySummary, operation_id="get_agent_recall_memory_summary")
@@ -318,17 +391,13 @@ def get_agent_messages(
318
391
  limit: int = Query(10, description="Maximum number of messages to retrieve."),
319
392
  msg_object: bool = Query(False, description="If true, returns Message objects. If false, return LettaMessage objects."),
320
393
  # Flags to support the use of AssistantMessage message types
321
- use_assistant_message: bool = Query(
322
- False,
323
- description="[Only applicable if msg_object is False] If true, returns AssistantMessage objects when the agent calls a designated message tool. If false, return FunctionCallMessage objects for all tool calls.",
324
- ),
325
- assistant_message_function_name: str = Query(
394
+ assistant_message_tool_name: str = Query(
326
395
  DEFAULT_MESSAGE_TOOL,
327
- description="[Only applicable if use_assistant_message is True] The name of the designated message tool.",
396
+ description="The name of the designated message tool.",
328
397
  ),
329
- assistant_message_function_kwarg: str = Query(
398
+ assistant_message_tool_kwarg: str = Query(
330
399
  DEFAULT_MESSAGE_TOOL_KWARG,
331
- description="[Only applicable if use_assistant_message is True] The name of the message argument in the designated message tool.",
400
+ description="The name of the message argument in the designated message tool.",
332
401
  ),
333
402
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
334
403
  ):
@@ -344,9 +413,8 @@ def get_agent_messages(
344
413
  limit=limit,
345
414
  reverse=True,
346
415
  return_message_object=msg_object,
347
- use_assistant_message=use_assistant_message,
348
- assistant_message_function_name=assistant_message_function_name,
349
- assistant_message_function_kwarg=assistant_message_function_kwarg,
416
+ assistant_message_tool_name=assistant_message_tool_name,
417
+ assistant_message_tool_kwarg=assistant_message_tool_kwarg,
350
418
  )
351
419
 
352
420
 
@@ -366,43 +434,69 @@ def update_message(
366
434
 
367
435
  @router.post(
368
436
  "/{agent_id}/messages",
437
+ response_model=LettaResponse,
438
+ operation_id="create_agent_message",
439
+ )
440
+ async def send_message(
441
+ agent_id: str,
442
+ server: SyncServer = Depends(get_letta_server),
443
+ request: LettaRequest = Body(...),
444
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
445
+ ):
446
+ """
447
+ Process a user message and return the agent's response.
448
+ This endpoint accepts a message from a user and processes it through the agent.
449
+ """
450
+ actor = server.get_user_or_default(user_id=user_id)
451
+ result = await send_message_to_agent(
452
+ server=server,
453
+ agent_id=agent_id,
454
+ user_id=actor.id,
455
+ messages=request.messages,
456
+ stream_steps=False,
457
+ stream_tokens=False,
458
+ # Support for AssistantMessage
459
+ assistant_message_tool_name=request.assistant_message_tool_name,
460
+ assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
461
+ )
462
+ return result
463
+
464
+
465
+ @router.post(
466
+ "/{agent_id}/messages/stream",
369
467
  response_model=None,
370
468
  operation_id="create_agent_message",
371
469
  responses={
372
470
  200: {
373
471
  "description": "Successful response",
374
472
  "content": {
375
- "application/json": {"$ref": "#/components/schemas/LettaResponse"}, # Use model_json_schema() instead of model directly
376
473
  "text/event-stream": {"description": "Server-Sent Events stream"},
377
474
  },
378
475
  }
379
476
  },
380
477
  )
381
- async def send_message(
478
+ async def send_message_streaming(
382
479
  agent_id: str,
383
480
  server: SyncServer = Depends(get_letta_server),
384
- request: LettaRequest = Body(...),
481
+ request: LettaStreamingRequest = Body(...),
385
482
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
386
483
  ):
387
484
  """
388
485
  Process a user message and return the agent's response.
389
486
  This endpoint accepts a message from a user and processes it through the agent.
390
- It can optionally stream the response if 'stream_steps' or 'stream_tokens' is set to True.
487
+ It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
391
488
  """
392
489
  actor = server.get_user_or_default(user_id=user_id)
393
-
394
490
  result = await send_message_to_agent(
395
491
  server=server,
396
492
  agent_id=agent_id,
397
493
  user_id=actor.id,
398
494
  messages=request.messages,
399
- stream_steps=request.stream_steps,
495
+ stream_steps=True,
400
496
  stream_tokens=request.stream_tokens,
401
- return_message_object=request.return_message_object,
402
497
  # Support for AssistantMessage
403
- use_assistant_message=request.use_assistant_message,
404
- assistant_message_function_name=request.assistant_message_function_name,
405
- assistant_message_function_kwarg=request.assistant_message_function_kwarg,
498
+ assistant_message_tool_name=request.assistant_message_tool_name,
499
+ assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
406
500
  )
407
501
  return result
408
502
 
@@ -417,13 +511,11 @@ async def send_message_to_agent(
417
511
  stream_steps: bool,
418
512
  stream_tokens: bool,
419
513
  # related to whether or not we return `LettaMessage`s or `Message`s
420
- return_message_object: bool, # Should be True for Python Client, False for REST API
421
514
  chat_completion_mode: bool = False,
422
515
  timestamp: Optional[datetime] = None,
423
516
  # Support for AssistantMessage
424
- use_assistant_message: bool = False,
425
- assistant_message_function_name: str = DEFAULT_MESSAGE_TOOL,
426
- assistant_message_function_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
517
+ assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL,
518
+ assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
427
519
  ) -> Union[StreamingResponse, LettaResponse]:
428
520
  """Split off into a separate function so that it can be imported in the /chat/completion proxy."""
429
521
 
@@ -440,13 +532,16 @@ async def send_message_to_agent(
440
532
 
441
533
  # Get the generator object off of the agent's streaming interface
442
534
  # This will be attached to the POST SSE request used under-the-hood
443
- letta_agent = server._get_or_load_agent(agent_id=agent_id)
535
+ # letta_agent = server.load_agent(agent_id=agent_id)
536
+ letta_agent = server.load_agent(agent_id=agent_id)
444
537
 
445
538
  # Disable token streaming if not OpenAI
446
539
  # TODO: cleanup this logic
447
540
  llm_config = letta_agent.agent_state.llm_config
448
- if llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint:
449
- print("Warning: token streaming is only supported for OpenAI models. Setting to False.")
541
+ if stream_tokens and (llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint):
542
+ warnings.warn(
543
+ "Token streaming is only supported for models with type 'openai' or `inference.memgpt.ai` in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False."
544
+ )
450
545
  stream_tokens = False
451
546
 
452
547
  # Create a new interface per request
@@ -464,9 +559,8 @@ async def send_message_to_agent(
464
559
  # streaming_interface.function_call_legacy_mode = stream
465
560
 
466
561
  # Allow AssistantMessage is desired by client
467
- streaming_interface.use_assistant_message = use_assistant_message
468
- streaming_interface.assistant_message_function_name = assistant_message_function_name
469
- streaming_interface.assistant_message_function_kwarg = assistant_message_function_kwarg
562
+ streaming_interface.assistant_message_tool_name = assistant_message_tool_name
563
+ streaming_interface.assistant_message_tool_kwarg = assistant_message_tool_kwarg
470
564
 
471
565
  # Related to JSON buffer reader
472
566
  streaming_interface.inner_thoughts_in_kwargs = (
@@ -481,14 +575,11 @@ async def send_message_to_agent(
481
575
  user_id=user_id,
482
576
  agent_id=agent_id,
483
577
  messages=messages,
578
+ interface=streaming_interface,
484
579
  )
485
580
  )
486
581
 
487
582
  if stream_steps:
488
- if return_message_object:
489
- # TODO implement returning `Message`s in a stream, not just `LettaMessage` format
490
- raise NotImplementedError
491
-
492
583
  # return a stream
493
584
  return StreamingResponse(
494
585
  sse_async_generator(
@@ -518,14 +609,7 @@ async def send_message_to_agent(
518
609
  # If we want to convert these to Message, we can use the attached IDs
519
610
  # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call)
520
611
  # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead
521
- if return_message_object:
522
- message_ids = [m.id for m in filtered_stream]
523
- message_ids = deduplicate(message_ids)
524
- message_objs = [server.get_agent_message(agent_id=agent_id, message_id=m_id) for m_id in message_ids]
525
- message_objs = [m for m in message_objs if m is not None]
526
- return LettaResponse(messages=message_objs, usage=usage)
527
- else:
528
- return LettaResponse(messages=filtered_stream, usage=usage)
612
+ return LettaResponse(messages=filtered_stream, usage=usage)
529
613
 
530
614
  except HTTPException:
531
615
  raise
@@ -3,7 +3,8 @@ from typing import TYPE_CHECKING, List, Optional
3
3
  from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query
4
4
 
5
5
  from letta.orm.errors import NoResultFound
6
- from letta.schemas.block import Block, BlockCreate, BlockUpdate
6
+ from letta.schemas.block import Block, BlockUpdate, CreateBlock
7
+ from letta.schemas.memory import Memory
7
8
  from letta.server.rest_api.utils import get_letta_server
8
9
  from letta.server.server import SyncServer
9
10
 
@@ -28,7 +29,7 @@ def list_blocks(
28
29
 
29
30
  @router.post("/", response_model=Block, operation_id="create_memory_block")
30
31
  def create_block(
31
- create_block: BlockCreate = Body(...),
32
+ create_block: CreateBlock = Body(...),
32
33
  server: SyncServer = Depends(get_letta_server),
33
34
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
34
35
  ):
@@ -40,12 +41,12 @@ def create_block(
40
41
  @router.patch("/{block_id}", response_model=Block, operation_id="update_memory_block")
41
42
  def update_block(
42
43
  block_id: str,
43
- updated_block: BlockUpdate = Body(...),
44
+ update_block: BlockUpdate = Body(...),
44
45
  server: SyncServer = Depends(get_letta_server),
45
46
  user_id: Optional[str] = Header(None, alias="user_id"),
46
47
  ):
47
48
  actor = server.get_user_or_default(user_id=user_id)
48
- return server.block_manager.update_block(block_id=block_id, block_update=updated_block, actor=actor)
49
+ return server.block_manager.update_block(block_id=block_id, block_update=update_block, actor=actor)
49
50
 
50
51
 
51
52
  @router.delete("/{block_id}", response_model=Block, operation_id="delete_memory_block")
@@ -64,8 +65,52 @@ def get_block(
64
65
  server: SyncServer = Depends(get_letta_server),
65
66
  user_id: Optional[str] = Header(None, alias="user_id"),
66
67
  ):
68
+ print("call get block", block_id)
67
69
  actor = server.get_user_or_default(user_id=user_id)
68
70
  try:
69
- return server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
71
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
72
+ if block is None:
73
+ raise HTTPException(status_code=404, detail="Block not found")
74
+ return block
70
75
  except NoResultFound:
71
76
  raise HTTPException(status_code=404, detail="Block not found")
77
+
78
+
79
+ @router.patch("/{block_id}/attach", response_model=Block, operation_id="update_agent_memory_block")
80
+ def link_agent_memory_block(
81
+ block_id: str,
82
+ agent_id: str = Query(..., description="The unique identifier of the agent to attach the source to."),
83
+ server: "SyncServer" = Depends(get_letta_server),
84
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
85
+ ):
86
+ """
87
+ Link a memory block to an agent.
88
+ """
89
+ actor = server.get_user_or_default(user_id=user_id)
90
+
91
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
92
+ if block is None:
93
+ raise HTTPException(status_code=404, detail="Block not found")
94
+
95
+ server.blocks_agents_manager.add_block_to_agent(agent_id=agent_id, block_id=block_id, block_label=block.label)
96
+ return block
97
+
98
+
99
+ @router.patch("/{block_id}/detach", response_model=Memory, operation_id="update_agent_memory_block")
100
+ def unlink_agent_memory_block(
101
+ block_id: str,
102
+ agent_id: str = Query(..., description="The unique identifier of the agent to attach the source to."),
103
+ server: "SyncServer" = Depends(get_letta_server),
104
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
105
+ ):
106
+ """
107
+ Unlink a memory block from an agent
108
+ """
109
+ actor = server.get_user_or_default(user_id=user_id)
110
+
111
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
112
+ if block is None:
113
+ raise HTTPException(status_code=404, detail="Block not found")
114
+ # Link the block to the agent
115
+ server.blocks_agents_manager.remove_block_with_id_from_agent(agent_id=agent_id, block_id=block_id)
116
+ return block
@@ -0,0 +1,127 @@
1
+ from typing import List, Optional
2
+
3
+ from fastapi import APIRouter, Depends, Query
4
+
5
+ from letta.schemas.sandbox_config import SandboxConfig as PydanticSandboxConfig
6
+ from letta.schemas.sandbox_config import SandboxConfigCreate, SandboxConfigUpdate
7
+ from letta.schemas.sandbox_config import SandboxEnvironmentVariable as PydanticEnvVar
8
+ from letta.schemas.sandbox_config import (
9
+ SandboxEnvironmentVariableCreate,
10
+ SandboxEnvironmentVariableUpdate,
11
+ SandboxType,
12
+ )
13
+ from letta.server.rest_api.utils import get_letta_server, get_user_id
14
+ from letta.server.server import SyncServer
15
+
16
+ router = APIRouter(prefix="/sandbox-config", tags=["sandbox-config"])
17
+
18
+
19
+ ### Sandbox Config Routes
20
+
21
+
22
+ @router.post("/", response_model=PydanticSandboxConfig)
23
+ def create_sandbox_config(
24
+ config_create: SandboxConfigCreate,
25
+ server: SyncServer = Depends(get_letta_server),
26
+ user_id: str = Depends(get_user_id),
27
+ ):
28
+ actor = server.get_user_or_default(user_id=user_id)
29
+
30
+ return server.sandbox_config_manager.create_or_update_sandbox_config(config_create, actor)
31
+
32
+
33
+ @router.post("/e2b/default", response_model=PydanticSandboxConfig)
34
+ def create_default_e2b_sandbox_config(
35
+ server: SyncServer = Depends(get_letta_server),
36
+ user_id: str = Depends(get_user_id),
37
+ ):
38
+ actor = server.get_user_or_default(user_id=user_id)
39
+ return server.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.E2B, actor=actor)
40
+
41
+
42
+ @router.post("/local/default", response_model=PydanticSandboxConfig)
43
+ def create_default_local_sandbox_config(
44
+ server: SyncServer = Depends(get_letta_server),
45
+ user_id: str = Depends(get_user_id),
46
+ ):
47
+ actor = server.get_user_or_default(user_id=user_id)
48
+ return server.sandbox_config_manager.get_or_create_default_sandbox_config(sandbox_type=SandboxType.LOCAL, actor=actor)
49
+
50
+
51
+ @router.patch("/{sandbox_config_id}", response_model=PydanticSandboxConfig)
52
+ def update_sandbox_config(
53
+ sandbox_config_id: str,
54
+ config_update: SandboxConfigUpdate,
55
+ server: SyncServer = Depends(get_letta_server),
56
+ user_id: str = Depends(get_user_id),
57
+ ):
58
+ actor = server.get_user_or_default(user_id=user_id)
59
+ return server.sandbox_config_manager.update_sandbox_config(sandbox_config_id, config_update, actor)
60
+
61
+
62
+ @router.delete("/{sandbox_config_id}", status_code=204)
63
+ def delete_sandbox_config(
64
+ sandbox_config_id: str,
65
+ server: SyncServer = Depends(get_letta_server),
66
+ user_id: str = Depends(get_user_id),
67
+ ):
68
+ actor = server.get_user_or_default(user_id=user_id)
69
+ server.sandbox_config_manager.delete_sandbox_config(sandbox_config_id, actor)
70
+
71
+
72
+ @router.get("/", response_model=List[PydanticSandboxConfig])
73
+ def list_sandbox_configs(
74
+ limit: int = Query(1000, description="Number of results to return"),
75
+ cursor: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
76
+ server: SyncServer = Depends(get_letta_server),
77
+ user_id: str = Depends(get_user_id),
78
+ ):
79
+ actor = server.get_user_or_default(user_id=user_id)
80
+ return server.sandbox_config_manager.list_sandbox_configs(actor, limit=limit, cursor=cursor)
81
+
82
+
83
+ ### Sandbox Environment Variable Routes
84
+
85
+
86
+ @router.post("/{sandbox_config_id}/environment-variable", response_model=PydanticEnvVar)
87
+ def create_sandbox_env_var(
88
+ sandbox_config_id: str,
89
+ env_var_create: SandboxEnvironmentVariableCreate,
90
+ server: SyncServer = Depends(get_letta_server),
91
+ user_id: str = Depends(get_user_id),
92
+ ):
93
+ actor = server.get_user_or_default(user_id=user_id)
94
+ return server.sandbox_config_manager.create_sandbox_env_var(env_var_create, sandbox_config_id, actor)
95
+
96
+
97
+ @router.patch("/environment-variable/{env_var_id}", response_model=PydanticEnvVar)
98
+ def update_sandbox_env_var(
99
+ env_var_id: str,
100
+ env_var_update: SandboxEnvironmentVariableUpdate,
101
+ server: SyncServer = Depends(get_letta_server),
102
+ user_id: str = Depends(get_user_id),
103
+ ):
104
+ actor = server.get_user_or_default(user_id=user_id)
105
+ return server.sandbox_config_manager.update_sandbox_env_var(env_var_id, env_var_update, actor)
106
+
107
+
108
+ @router.delete("/environment-variable/{env_var_id}", status_code=204)
109
+ def delete_sandbox_env_var(
110
+ env_var_id: str,
111
+ server: SyncServer = Depends(get_letta_server),
112
+ user_id: str = Depends(get_user_id),
113
+ ):
114
+ actor = server.get_user_or_default(user_id=user_id)
115
+ server.sandbox_config_manager.delete_sandbox_env_var(env_var_id, actor)
116
+
117
+
118
+ @router.get("/{sandbox_config_id}/environment-variable", response_model=List[PydanticEnvVar])
119
+ def list_sandbox_env_vars(
120
+ sandbox_config_id: str,
121
+ limit: int = Query(1000, description="Number of results to return"),
122
+ cursor: Optional[str] = Query(None, description="Pagination cursor to fetch the next set of results"),
123
+ server: SyncServer = Depends(get_letta_server),
124
+ user_id: str = Depends(get_user_id),
125
+ ):
126
+ actor = server.get_user_or_default(user_id=user_id)
127
+ return server.sandbox_config_manager.list_sandbox_env_vars(sandbox_config_id, actor, limit=limit, cursor=cursor)
@@ -37,7 +37,10 @@ def get_source(
37
37
  """
38
38
  actor = server.get_user_or_default(user_id=user_id)
39
39
 
40
- return server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
40
+ source = server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
41
+ if not source:
42
+ raise HTTPException(status_code=404, detail=f"Source with id={source_id} not found.")
43
+ return source
41
44
 
42
45
 
43
46
  @router.get("/name/{source_name}", response_model=str, operation_id="get_source_id_by_name")
@@ -52,6 +55,8 @@ def get_source_id_by_name(
52
55
  actor = server.get_user_or_default(user_id=user_id)
53
56
 
54
57
  source = server.source_manager.get_source_by_name(source_name=source_name, actor=actor)
58
+ if not source:
59
+ raise HTTPException(status_code=404, detail=f"Source with name={source_name} not found.")
55
60
  return source.id
56
61
 
57
62
 
@@ -94,6 +99,8 @@ def update_source(
94
99
  Update the name or documentation of an existing data source.
95
100
  """
96
101
  actor = server.get_user_or_default(user_id=user_id)
102
+ if not server.source_manager.get_source_by_id(source_id=source_id, actor=actor):
103
+ raise HTTPException(status_code=404, detail=f"Source with id={source_id} does not exist.")
97
104
  return server.source_manager.update_source(source_id=source_id, source_update=source, actor=actor)
98
105
 
99
106