letta-nightly 0.5.4.dev20241127104220__py3-none-any.whl → 0.5.4.dev20241128000451__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (38) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +102 -140
  3. letta/agent_store/chroma.py +2 -0
  4. letta/cli/cli.py +3 -5
  5. letta/client/client.py +360 -117
  6. letta/config.py +2 -2
  7. letta/constants.py +5 -0
  8. letta/functions/function_sets/base.py +38 -1
  9. letta/helpers/tool_rule_solver.py +6 -5
  10. letta/main.py +1 -1
  11. letta/metadata.py +39 -41
  12. letta/o1_agent.py +1 -4
  13. letta/persistence_manager.py +1 -0
  14. letta/schemas/agent.py +57 -52
  15. letta/schemas/block.py +69 -25
  16. letta/schemas/enums.py +14 -0
  17. letta/schemas/letta_base.py +1 -1
  18. letta/schemas/letta_request.py +11 -23
  19. letta/schemas/letta_response.py +1 -2
  20. letta/schemas/memory.py +31 -100
  21. letta/schemas/message.py +3 -3
  22. letta/schemas/tool_rule.py +13 -5
  23. letta/server/rest_api/interface.py +12 -19
  24. letta/server/rest_api/routers/openai/assistants/threads.py +2 -3
  25. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +0 -2
  26. letta/server/rest_api/routers/v1/agents.py +90 -86
  27. letta/server/rest_api/routers/v1/blocks.py +50 -5
  28. letta/server/server.py +237 -459
  29. letta/server/static_files/assets/index-9fa459a2.js +1 -1
  30. letta/services/block_manager.py +6 -3
  31. letta/services/blocks_agents_manager.py +15 -0
  32. letta/services/tool_execution_sandbox.py +1 -1
  33. letta/services/tool_manager.py +2 -1
  34. {letta_nightly-0.5.4.dev20241127104220.dist-info → letta_nightly-0.5.4.dev20241128000451.dist-info}/METADATA +1 -1
  35. {letta_nightly-0.5.4.dev20241127104220.dist-info → letta_nightly-0.5.4.dev20241128000451.dist-info}/RECORD +38 -38
  36. {letta_nightly-0.5.4.dev20241127104220.dist-info → letta_nightly-0.5.4.dev20241128000451.dist-info}/LICENSE +0 -0
  37. {letta_nightly-0.5.4.dev20241127104220.dist-info → letta_nightly-0.5.4.dev20241128000451.dist-info}/WHEEL +0 -0
  38. {letta_nightly-0.5.4.dev20241127104220.dist-info → letta_nightly-0.5.4.dev20241128000451.dist-info}/entry_points.txt +0 -0
@@ -1,24 +1,28 @@
1
1
  import asyncio
2
+ import warnings
2
3
  from datetime import datetime
3
- from typing import Dict, List, Optional, Union
4
+ from typing import List, Optional, Union
4
5
 
5
6
  from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query, status
6
7
  from fastapi.responses import JSONResponse, StreamingResponse
7
8
 
8
9
  from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
9
10
  from letta.schemas.agent import AgentState, CreateAgent, UpdateAgentState
10
- from letta.schemas.block import Block, BlockCreate, BlockLabelUpdate, BlockLimitUpdate
11
+ from letta.schemas.block import ( # , BlockLabelUpdate, BlockLimitUpdate
12
+ Block,
13
+ BlockUpdate,
14
+ CreateBlock,
15
+ )
11
16
  from letta.schemas.enums import MessageStreamStatus
12
17
  from letta.schemas.letta_message import (
13
18
  LegacyLettaMessage,
14
19
  LettaMessage,
15
20
  LettaMessageUnion,
16
21
  )
17
- from letta.schemas.letta_request import LettaRequest
22
+ from letta.schemas.letta_request import LettaRequest, LettaStreamingRequest
18
23
  from letta.schemas.letta_response import LettaResponse
19
24
  from letta.schemas.memory import (
20
25
  ArchivalMemorySummary,
21
- BasicBlockMemory,
22
26
  ContextWindowOverview,
23
27
  CreateArchivalMemory,
24
28
  Memory,
@@ -31,7 +35,6 @@ from letta.schemas.tool import Tool
31
35
  from letta.server.rest_api.interface import StreamingServerInterface
32
36
  from letta.server.rest_api.utils import get_letta_server, sse_async_generator
33
37
  from letta.server.server import SyncServer
34
- from letta.utils import deduplicate
35
38
 
36
39
  # These can be forward refs, but because Fastapi needs them at runtime the must be imported normally
37
40
 
@@ -83,13 +86,6 @@ def create_agent(
83
86
  Create a new agent with the specified configuration.
84
87
  """
85
88
  actor = server.get_user_or_default(user_id=user_id)
86
- agent.user_id = actor.id
87
- # TODO: sarah make general
88
- # TODO: eventually remove this
89
- assert agent.memory is not None # TODO: dont force this, can be None (use default human/person)
90
- blocks = agent.memory.get_blocks()
91
- agent.memory = BasicBlockMemory(blocks=blocks)
92
-
93
89
  return server.create_agent(agent, actor=actor)
94
90
 
95
91
 
@@ -196,6 +192,7 @@ def get_agent_in_context_messages(
196
192
  return server.get_in_context_messages(agent_id=agent_id)
197
193
 
198
194
 
195
+ # TODO: remove? can also get with agent blocks
199
196
  @router.get("/{agent_id}/memory", response_model=Memory, operation_id="get_agent_memory")
200
197
  def get_agent_memory(
201
198
  agent_id: str,
@@ -209,47 +206,40 @@ def get_agent_memory(
209
206
  return server.get_agent_memory(agent_id=agent_id)
210
207
 
211
208
 
212
- @router.patch("/{agent_id}/memory", response_model=Memory, operation_id="update_agent_memory")
213
- def update_agent_memory(
209
+ @router.get("/{agent_id}/memory/block/{block_label}", response_model=Block, operation_id="get_agent_memory_block")
210
+ def get_agent_memory_block(
214
211
  agent_id: str,
215
- request: Dict = Body(...),
212
+ block_label: str,
216
213
  server: "SyncServer" = Depends(get_letta_server),
217
214
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
218
215
  ):
219
216
  """
220
- Update the core memory of a specific agent.
221
- This endpoint accepts new memory contents (labels as keys, and values as values) and updates the core memory of the agent identified by the user ID and agent ID.
222
- This endpoint accepts new memory contents to update the core memory of the agent.
223
- This endpoint only supports modifying existing blocks; it does not support deleting/unlinking or creating/linking blocks.
217
+ Retrieve a memory block from an agent.
224
218
  """
225
219
  actor = server.get_user_or_default(user_id=user_id)
226
220
 
227
- memory = server.update_agent_core_memory(user_id=actor.id, agent_id=agent_id, new_memory_contents=request)
228
- return memory
221
+ block_id = server.blocks_agents_manager.get_block_id_for_label(agent_id=agent_id, block_label=block_label)
222
+ return server.block_manager.get_block_by_id(block_id, actor=actor)
229
223
 
230
224
 
231
- @router.patch("/{agent_id}/memory/label", response_model=Memory, operation_id="update_agent_memory_label")
232
- def update_agent_memory_label(
225
+ @router.get("/{agent_id}/memory/block", response_model=List[Block], operation_id="get_agent_memory_blocks")
226
+ def get_agent_memory_blocks(
233
227
  agent_id: str,
234
- update_label: BlockLabelUpdate = Body(...),
235
228
  server: "SyncServer" = Depends(get_letta_server),
236
229
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
237
230
  ):
238
231
  """
239
- Update the label of a block in an agent's memory.
232
+ Retrieve the memory blocks of a specific agent.
240
233
  """
241
234
  actor = server.get_user_or_default(user_id=user_id)
242
-
243
- memory = server.update_agent_memory_label(
244
- user_id=actor.id, agent_id=agent_id, current_block_label=update_label.current_label, new_block_label=update_label.new_label
245
- )
246
- return memory
235
+ block_ids = server.blocks_agents_manager.list_block_ids_for_agent(agent_id=agent_id)
236
+ return [server.block_manager.get_block_by_id(block_id, actor=actor) for block_id in block_ids]
247
237
 
248
238
 
249
239
  @router.post("/{agent_id}/memory/block", response_model=Memory, operation_id="add_agent_memory_block")
250
240
  def add_agent_memory_block(
251
241
  agent_id: str,
252
- create_block: BlockCreate = Body(...),
242
+ create_block: CreateBlock = Body(...),
253
243
  server: "SyncServer" = Depends(get_letta_server),
254
244
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
255
245
  ):
@@ -268,7 +258,7 @@ def add_agent_memory_block(
268
258
  return updated_memory
269
259
 
270
260
 
271
- @router.delete("/{agent_id}/memory/block/{block_label}", response_model=Memory, operation_id="remove_agent_memory_block")
261
+ @router.delete("/{agent_id}/memory/block/{block_label}", response_model=Memory, operation_id="remove_agent_memory_block_by_label")
272
262
  def remove_agent_memory_block(
273
263
  agent_id: str,
274
264
  # TODO should this be block_id, or the label?
@@ -288,25 +278,24 @@ def remove_agent_memory_block(
288
278
  return updated_memory
289
279
 
290
280
 
291
- @router.patch("/{agent_id}/memory/limit", response_model=Memory, operation_id="update_agent_memory_limit")
292
- def update_agent_memory_limit(
281
+ @router.patch("/{agent_id}/memory/block/{block_label}", response_model=Block, operation_id="update_agent_memory_block_by_label")
282
+ def update_agent_memory_block(
293
283
  agent_id: str,
294
- update_label: BlockLimitUpdate = Body(...),
284
+ block_label: str,
285
+ update_block: BlockUpdate = Body(...),
295
286
  server: "SyncServer" = Depends(get_letta_server),
296
287
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
297
288
  ):
298
289
  """
299
- Update the limit of a block in an agent's memory.
290
+ Removes a memory block from an agent by unlnking it. If the block is not linked to any other agent, it is deleted.
300
291
  """
301
292
  actor = server.get_user_or_default(user_id=user_id)
302
293
 
303
- memory = server.update_agent_memory_limit(
304
- user_id=actor.id,
305
- agent_id=agent_id,
306
- block_label=update_label.label,
307
- limit=update_label.limit,
308
- )
309
- return memory
294
+ # get the block_id from the label
295
+ block_id = server.blocks_agents_manager.get_block_id_for_label(agent_id=agent_id, block_label=block_label)
296
+
297
+ # update the block
298
+ return server.block_manager.update_block(block_id=block_id, block_update=update_block, actor=actor)
310
299
 
311
300
 
312
301
  @router.get("/{agent_id}/memory/recall", response_model=RecallMemorySummary, operation_id="get_agent_recall_memory_summary")
@@ -402,17 +391,13 @@ def get_agent_messages(
402
391
  limit: int = Query(10, description="Maximum number of messages to retrieve."),
403
392
  msg_object: bool = Query(False, description="If true, returns Message objects. If false, return LettaMessage objects."),
404
393
  # Flags to support the use of AssistantMessage message types
405
- use_assistant_message: bool = Query(
406
- False,
407
- description="[Only applicable if msg_object is False] If true, returns AssistantMessage objects when the agent calls a designated message tool. If false, return FunctionCallMessage objects for all tool calls.",
408
- ),
409
- assistant_message_function_name: str = Query(
394
+ assistant_message_tool_name: str = Query(
410
395
  DEFAULT_MESSAGE_TOOL,
411
- description="[Only applicable if use_assistant_message is True] The name of the designated message tool.",
396
+ description="The name of the designated message tool.",
412
397
  ),
413
- assistant_message_function_kwarg: str = Query(
398
+ assistant_message_tool_kwarg: str = Query(
414
399
  DEFAULT_MESSAGE_TOOL_KWARG,
415
- description="[Only applicable if use_assistant_message is True] The name of the message argument in the designated message tool.",
400
+ description="The name of the message argument in the designated message tool.",
416
401
  ),
417
402
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
418
403
  ):
@@ -428,9 +413,8 @@ def get_agent_messages(
428
413
  limit=limit,
429
414
  reverse=True,
430
415
  return_message_object=msg_object,
431
- use_assistant_message=use_assistant_message,
432
- assistant_message_function_name=assistant_message_function_name,
433
- assistant_message_function_kwarg=assistant_message_function_kwarg,
416
+ assistant_message_tool_name=assistant_message_tool_name,
417
+ assistant_message_tool_kwarg=assistant_message_tool_kwarg,
434
418
  )
435
419
 
436
420
 
@@ -450,28 +434,60 @@ def update_message(
450
434
 
451
435
  @router.post(
452
436
  "/{agent_id}/messages",
437
+ response_model=LettaResponse,
438
+ operation_id="create_agent_message",
439
+ )
440
+ async def send_message(
441
+ agent_id: str,
442
+ server: SyncServer = Depends(get_letta_server),
443
+ request: LettaRequest = Body(...),
444
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
445
+ ):
446
+ """
447
+ Process a user message and return the agent's response.
448
+ This endpoint accepts a message from a user and processes it through the agent.
449
+ """
450
+ actor = server.get_user_or_default(user_id=user_id)
451
+
452
+ agent_lock = server.per_agent_lock_manager.get_lock(agent_id)
453
+ async with agent_lock:
454
+ result = await send_message_to_agent(
455
+ server=server,
456
+ agent_id=agent_id,
457
+ user_id=actor.id,
458
+ messages=request.messages,
459
+ stream_steps=False,
460
+ stream_tokens=False,
461
+ # Support for AssistantMessage
462
+ assistant_message_tool_name=request.assistant_message_tool_name,
463
+ assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
464
+ )
465
+ return result
466
+
467
+
468
+ @router.post(
469
+ "/{agent_id}/messages/stream",
453
470
  response_model=None,
454
471
  operation_id="create_agent_message",
455
472
  responses={
456
473
  200: {
457
474
  "description": "Successful response",
458
475
  "content": {
459
- "application/json": {"$ref": "#/components/schemas/LettaResponse"}, # Use model_json_schema() instead of model directly
460
476
  "text/event-stream": {"description": "Server-Sent Events stream"},
461
477
  },
462
478
  }
463
479
  },
464
480
  )
465
- async def send_message(
481
+ async def send_message_streaming(
466
482
  agent_id: str,
467
483
  server: SyncServer = Depends(get_letta_server),
468
- request: LettaRequest = Body(...),
484
+ request: LettaStreamingRequest = Body(...),
469
485
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
470
486
  ):
471
487
  """
472
488
  Process a user message and return the agent's response.
473
489
  This endpoint accepts a message from a user and processes it through the agent.
474
- It can optionally stream the response if 'stream_steps' or 'stream_tokens' is set to True.
490
+ It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
475
491
  """
476
492
  actor = server.get_user_or_default(user_id=user_id)
477
493
 
@@ -482,15 +498,13 @@ async def send_message(
482
498
  agent_id=agent_id,
483
499
  user_id=actor.id,
484
500
  messages=request.messages,
485
- stream_steps=request.stream_steps,
501
+ stream_steps=True,
486
502
  stream_tokens=request.stream_tokens,
487
- return_message_object=request.return_message_object,
488
503
  # Support for AssistantMessage
489
- use_assistant_message=request.use_assistant_message,
490
- assistant_message_function_name=request.assistant_message_function_name,
491
- assistant_message_function_kwarg=request.assistant_message_function_kwarg,
504
+ assistant_message_tool_name=request.assistant_message_tool_name,
505
+ assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
492
506
  )
493
- return result
507
+ return result
494
508
 
495
509
 
496
510
  # TODO: move this into server.py?
@@ -503,13 +517,11 @@ async def send_message_to_agent(
503
517
  stream_steps: bool,
504
518
  stream_tokens: bool,
505
519
  # related to whether or not we return `LettaMessage`s or `Message`s
506
- return_message_object: bool, # Should be True for Python Client, False for REST API
507
520
  chat_completion_mode: bool = False,
508
521
  timestamp: Optional[datetime] = None,
509
522
  # Support for AssistantMessage
510
- use_assistant_message: bool = False,
511
- assistant_message_function_name: str = DEFAULT_MESSAGE_TOOL,
512
- assistant_message_function_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
523
+ assistant_message_tool_name: str = DEFAULT_MESSAGE_TOOL,
524
+ assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
513
525
  ) -> Union[StreamingResponse, LettaResponse]:
514
526
  """Split off into a separate function so that it can be imported in the /chat/completion proxy."""
515
527
 
@@ -526,13 +538,16 @@ async def send_message_to_agent(
526
538
 
527
539
  # Get the generator object off of the agent's streaming interface
528
540
  # This will be attached to the POST SSE request used under-the-hood
529
- letta_agent = server._get_or_load_agent(agent_id=agent_id)
541
+ # letta_agent = server.load_agent(agent_id=agent_id)
542
+ letta_agent = server.load_agent(agent_id=agent_id)
530
543
 
531
544
  # Disable token streaming if not OpenAI
532
545
  # TODO: cleanup this logic
533
546
  llm_config = letta_agent.agent_state.llm_config
534
- if llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint:
535
- print("Warning: token streaming is only supported for OpenAI models. Setting to False.")
547
+ if stream_tokens and (llm_config.model_endpoint_type != "openai" or "inference.memgpt.ai" in llm_config.model_endpoint):
548
+ warnings.warn(
549
+ "Token streaming is only supported for models with type 'openai' or `inference.memgpt.ai` in the model_endpoint: agent has endpoint type {llm_config.model_endpoint_type} and {llm_config.model_endpoint}. Setting stream_tokens to False."
550
+ )
536
551
  stream_tokens = False
537
552
 
538
553
  # Create a new interface per request
@@ -550,9 +565,8 @@ async def send_message_to_agent(
550
565
  # streaming_interface.function_call_legacy_mode = stream
551
566
 
552
567
  # Allow AssistantMessage is desired by client
553
- streaming_interface.use_assistant_message = use_assistant_message
554
- streaming_interface.assistant_message_function_name = assistant_message_function_name
555
- streaming_interface.assistant_message_function_kwarg = assistant_message_function_kwarg
568
+ streaming_interface.assistant_message_tool_name = assistant_message_tool_name
569
+ streaming_interface.assistant_message_tool_kwarg = assistant_message_tool_kwarg
556
570
 
557
571
  # Related to JSON buffer reader
558
572
  streaming_interface.inner_thoughts_in_kwargs = (
@@ -567,14 +581,11 @@ async def send_message_to_agent(
567
581
  user_id=user_id,
568
582
  agent_id=agent_id,
569
583
  messages=messages,
584
+ interface=streaming_interface,
570
585
  )
571
586
  )
572
587
 
573
588
  if stream_steps:
574
- if return_message_object:
575
- # TODO implement returning `Message`s in a stream, not just `LettaMessage` format
576
- raise NotImplementedError
577
-
578
589
  # return a stream
579
590
  return StreamingResponse(
580
591
  sse_async_generator(
@@ -604,14 +615,7 @@ async def send_message_to_agent(
604
615
  # If we want to convert these to Message, we can use the attached IDs
605
616
  # NOTE: we will need to de-duplicate the Messsage IDs though (since Assistant->Inner+Func_Call)
606
617
  # TODO: eventually update the interface to use `Message` and `MessageChunk` (new) inside the deque instead
607
- if return_message_object:
608
- message_ids = [m.id for m in filtered_stream]
609
- message_ids = deduplicate(message_ids)
610
- message_objs = [server.get_agent_message(agent_id=agent_id, message_id=m_id) for m_id in message_ids]
611
- message_objs = [m for m in message_objs if m is not None]
612
- return LettaResponse(messages=message_objs, usage=usage)
613
- else:
614
- return LettaResponse(messages=filtered_stream, usage=usage)
618
+ return LettaResponse(messages=filtered_stream, usage=usage)
615
619
 
616
620
  except HTTPException:
617
621
  raise
@@ -3,7 +3,8 @@ from typing import TYPE_CHECKING, List, Optional
3
3
  from fastapi import APIRouter, Body, Depends, Header, HTTPException, Query
4
4
 
5
5
  from letta.orm.errors import NoResultFound
6
- from letta.schemas.block import Block, BlockCreate, BlockUpdate
6
+ from letta.schemas.block import Block, BlockUpdate, CreateBlock
7
+ from letta.schemas.memory import Memory
7
8
  from letta.server.rest_api.utils import get_letta_server
8
9
  from letta.server.server import SyncServer
9
10
 
@@ -28,7 +29,7 @@ def list_blocks(
28
29
 
29
30
  @router.post("/", response_model=Block, operation_id="create_memory_block")
30
31
  def create_block(
31
- create_block: BlockCreate = Body(...),
32
+ create_block: CreateBlock = Body(...),
32
33
  server: SyncServer = Depends(get_letta_server),
33
34
  user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
34
35
  ):
@@ -40,12 +41,12 @@ def create_block(
40
41
  @router.patch("/{block_id}", response_model=Block, operation_id="update_memory_block")
41
42
  def update_block(
42
43
  block_id: str,
43
- updated_block: BlockUpdate = Body(...),
44
+ update_block: BlockUpdate = Body(...),
44
45
  server: SyncServer = Depends(get_letta_server),
45
46
  user_id: Optional[str] = Header(None, alias="user_id"),
46
47
  ):
47
48
  actor = server.get_user_or_default(user_id=user_id)
48
- return server.block_manager.update_block(block_id=block_id, block_update=updated_block, actor=actor)
49
+ return server.block_manager.update_block(block_id=block_id, block_update=update_block, actor=actor)
49
50
 
50
51
 
51
52
  @router.delete("/{block_id}", response_model=Block, operation_id="delete_memory_block")
@@ -64,8 +65,52 @@ def get_block(
64
65
  server: SyncServer = Depends(get_letta_server),
65
66
  user_id: Optional[str] = Header(None, alias="user_id"),
66
67
  ):
68
+ print("call get block", block_id)
67
69
  actor = server.get_user_or_default(user_id=user_id)
68
70
  try:
69
- return server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
71
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
72
+ if block is None:
73
+ raise HTTPException(status_code=404, detail="Block not found")
74
+ return block
70
75
  except NoResultFound:
71
76
  raise HTTPException(status_code=404, detail="Block not found")
77
+
78
+
79
+ @router.patch("/{block_id}/attach", response_model=Block, operation_id="update_agent_memory_block")
80
+ def link_agent_memory_block(
81
+ block_id: str,
82
+ agent_id: str = Query(..., description="The unique identifier of the agent to attach the source to."),
83
+ server: "SyncServer" = Depends(get_letta_server),
84
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
85
+ ):
86
+ """
87
+ Link a memory block to an agent.
88
+ """
89
+ actor = server.get_user_or_default(user_id=user_id)
90
+
91
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
92
+ if block is None:
93
+ raise HTTPException(status_code=404, detail="Block not found")
94
+
95
+ server.blocks_agents_manager.add_block_to_agent(agent_id=agent_id, block_id=block_id, block_label=block.label)
96
+ return block
97
+
98
+
99
+ @router.patch("/{block_id}/detach", response_model=Memory, operation_id="update_agent_memory_block")
100
+ def unlink_agent_memory_block(
101
+ block_id: str,
102
+ agent_id: str = Query(..., description="The unique identifier of the agent to attach the source to."),
103
+ server: "SyncServer" = Depends(get_letta_server),
104
+ user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
105
+ ):
106
+ """
107
+ Unlink a memory block from an agent
108
+ """
109
+ actor = server.get_user_or_default(user_id=user_id)
110
+
111
+ block = server.block_manager.get_block_by_id(block_id=block_id, actor=actor)
112
+ if block is None:
113
+ raise HTTPException(status_code=404, detail="Block not found")
114
+ # Link the block to the agent
115
+ server.blocks_agents_manager.remove_block_with_id_from_agent(agent_id=agent_id, block_id=block_id)
116
+ return block