letta-client 0.1.16__py3-none-any.whl → 0.1.19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

Files changed (104) hide show
  1. letta_client/__init__.py +34 -120
  2. letta_client/agents/__init__.py +18 -54
  3. letta_client/agents/archival_memory/client.py +25 -343
  4. letta_client/agents/client.py +1640 -347
  5. letta_client/agents/context/client.py +6 -4
  6. letta_client/agents/core_memory/client.py +95 -624
  7. letta_client/agents/memory_variables/__init__.py +2 -2
  8. letta_client/agents/memory_variables/client.py +15 -15
  9. letta_client/agents/memory_variables/types/__init__.py +2 -2
  10. letta_client/agents/memory_variables/types/{memory_variables_get_response.py → memory_variables_list_response.py} +1 -1
  11. letta_client/agents/messages/__init__.py +2 -22
  12. letta_client/agents/messages/client.py +32 -38
  13. letta_client/agents/messages/types/__init__.py +2 -21
  14. letta_client/agents/messages/types/letta_streaming_response.py +16 -139
  15. letta_client/agents/messages/types/messages_list_response.py +2 -2
  16. letta_client/agents/sources/client.py +266 -5
  17. letta_client/agents/tools/client.py +25 -27
  18. letta_client/agents/types/__init__.py +15 -25
  19. letta_client/agents/types/agents_search_request_search_item.py +10 -78
  20. letta_client/agents/types/{agents_search_request_search_item_order_by.py → agents_search_request_search_item_direction.py} +7 -6
  21. letta_client/agents/types/agents_search_request_search_item_direction_direction.py +5 -0
  22. letta_client/agents/types/agents_search_request_search_item_direction_value.py +5 -0
  23. letta_client/agents/types/{agents_search_request_search_item_name.py → agents_search_request_search_item_one.py} +5 -4
  24. letta_client/agents/types/agents_search_request_search_item_one_operator.py +5 -0
  25. letta_client/agents/types/{agents_search_request_search_item_tags.py → agents_search_request_search_item_two.py} +2 -1
  26. letta_client/agents/types/{agents_search_request_search_item_version.py → agents_search_request_search_item_zero.py} +3 -2
  27. letta_client/blocks/client.py +12 -260
  28. letta_client/client.py +3 -3
  29. letta_client/core/client_wrapper.py +1 -1
  30. letta_client/jobs/client.py +4 -4
  31. letta_client/providers/client.py +74 -74
  32. letta_client/runs/client.py +14 -12
  33. letta_client/sources/client.py +12 -288
  34. letta_client/tools/client.py +63 -189
  35. letta_client/types/__init__.py +21 -103
  36. letta_client/types/agent_state.py +3 -7
  37. letta_client/types/{assistant_message_output.py → assistant_message.py} +3 -2
  38. letta_client/types/block.py +2 -6
  39. letta_client/types/block_update.py +1 -5
  40. letta_client/types/{archival_memory_summary.py → chat_completion_message_tool_call.py} +7 -7
  41. letta_client/types/context_window_overview.py +4 -6
  42. letta_client/types/create_block.py +1 -5
  43. letta_client/types/embedding_config_embedding_endpoint_type.py +1 -0
  44. letta_client/types/{function_call_output.py → function.py} +1 -1
  45. letta_client/types/{function_schema.py → function_definition.py} +2 -1
  46. letta_client/types/{create_assistant_file_request.py → function_tool.py} +6 -7
  47. letta_client/types/job.py +1 -5
  48. letta_client/types/letta_message_union.py +9 -121
  49. letta_client/types/letta_usage_statistics.py +1 -0
  50. letta_client/types/llm_config_model_endpoint_type.py +1 -0
  51. letta_client/types/{letta_schemas_message_message.py → message.py} +9 -6
  52. letta_client/types/passage.py +1 -5
  53. letta_client/types/reasoning_message.py +2 -1
  54. letta_client/types/run.py +1 -5
  55. letta_client/types/source.py +2 -6
  56. letta_client/types/{system_message_output.py → system_message.py} +3 -2
  57. letta_client/types/{letta_schemas_tool_tool.py → tool.py} +1 -1
  58. letta_client/types/{letta_schemas_letta_message_tool_call.py → tool_call.py} +1 -1
  59. letta_client/types/tool_call_message.py +2 -1
  60. letta_client/types/tool_call_message_tool_call.py +2 -2
  61. letta_client/types/tool_return_message.py +2 -1
  62. letta_client/types/tool_type.py +2 -1
  63. letta_client/types/{user_message_output.py → user_message.py} +3 -2
  64. {letta_client-0.1.16.dist-info → letta_client-0.1.19.dist-info}/METADATA +2 -2
  65. {letta_client-0.1.16.dist-info → letta_client-0.1.19.dist-info}/RECORD +66 -101
  66. letta_client/agents/recall_memory/__init__.py +0 -2
  67. letta_client/agents/recall_memory/client.py +0 -147
  68. letta_client/agents/types/agents_search_request_search_item_name_operator.py +0 -5
  69. letta_client/agents/types/agents_search_request_search_item_order_by_direction.py +0 -5
  70. letta_client/agents/types/agents_search_request_search_item_order_by_value.py +0 -5
  71. letta_client/types/assistant_file.py +0 -33
  72. letta_client/types/assistant_message_input.py +0 -23
  73. letta_client/types/chat_completion_request.py +0 -49
  74. letta_client/types/chat_completion_request_function_call.py +0 -6
  75. letta_client/types/chat_completion_request_messages_item.py +0 -11
  76. letta_client/types/chat_completion_request_stop.py +0 -5
  77. letta_client/types/chat_completion_request_tool_choice.py +0 -8
  78. letta_client/types/chat_completion_response.py +0 -32
  79. letta_client/types/choice.py +0 -25
  80. letta_client/types/create_assistant_request.py +0 -57
  81. letta_client/types/delete_assistant_file_response.py +0 -28
  82. letta_client/types/delete_assistant_response.py +0 -28
  83. letta_client/types/function_call_input.py +0 -19
  84. letta_client/types/letta_schemas_openai_chat_completion_request_tool.py +0 -21
  85. letta_client/types/letta_schemas_openai_chat_completion_request_tool_call.py +0 -24
  86. letta_client/types/letta_schemas_openai_chat_completion_request_tool_call_function.py +0 -20
  87. letta_client/types/letta_schemas_openai_chat_completion_response_message.py +0 -24
  88. letta_client/types/letta_schemas_openai_chat_completion_response_tool_call.py +0 -22
  89. letta_client/types/letta_schemas_openai_chat_completions_tool_call_function.py +0 -27
  90. letta_client/types/letta_schemas_openai_chat_completions_tool_call_input.py +0 -29
  91. letta_client/types/letta_schemas_openai_chat_completions_tool_call_output.py +0 -29
  92. letta_client/types/log_prob_token.py +0 -21
  93. letta_client/types/message_content_log_prob.py +0 -23
  94. letta_client/types/open_ai_assistant.py +0 -67
  95. letta_client/types/recall_memory_summary.py +0 -22
  96. letta_client/types/response_format.py +0 -19
  97. letta_client/types/system_message_input.py +0 -21
  98. letta_client/types/tool_call_function_output.py +0 -27
  99. letta_client/types/tool_function_choice.py +0 -21
  100. letta_client/types/tool_input.py +0 -21
  101. letta_client/types/tool_message.py +0 -21
  102. letta_client/types/user_message_input.py +0 -22
  103. letta_client/types/user_message_input_content.py +0 -5
  104. {letta_client-0.1.16.dist-info → letta_client-0.1.19.dist-info}/WHEEL +0 -0
@@ -5,12 +5,11 @@ from ..core.client_wrapper import SyncClientWrapper
5
5
  from .context.client import ContextClient
6
6
  from .tools.client import ToolsClient
7
7
  from .sources.client import SourcesClient
8
- from .core_memory.client import CoreMemoryClient
9
- from .recall_memory.client import RecallMemoryClient
10
- from .archival_memory.client import ArchivalMemoryClient
11
8
  from .messages.client import MessagesClient
12
9
  from .templates.client import TemplatesClient
13
10
  from .memory_variables.client import MemoryVariablesClient
11
+ from .core_memory.client import CoreMemoryClient
12
+ from .archival_memory.client import ArchivalMemoryClient
14
13
  from ..core.request_options import RequestOptions
15
14
  from ..types.agent_state import AgentState
16
15
  from ..core.unchecked_base_model import construct_type
@@ -27,18 +26,20 @@ from ..types.message_create import MessageCreate
27
26
  from ..core.serialization import convert_and_respect_annotation_metadata
28
27
  from ..core.jsonable_encoder import jsonable_encoder
29
28
  from .types.update_agent_tool_rules_item import UpdateAgentToolRulesItem
29
+ from ..types.memory import Memory
30
+ from ..types.block import Block
31
+ from ..types.passage import Passage
30
32
  from .types.agents_search_request_search_item import AgentsSearchRequestSearchItem
31
33
  from .types.agents_search_request_combinator import AgentsSearchRequestCombinator
32
34
  from ..core.client_wrapper import AsyncClientWrapper
33
35
  from .context.client import AsyncContextClient
34
36
  from .tools.client import AsyncToolsClient
35
37
  from .sources.client import AsyncSourcesClient
36
- from .core_memory.client import AsyncCoreMemoryClient
37
- from .recall_memory.client import AsyncRecallMemoryClient
38
- from .archival_memory.client import AsyncArchivalMemoryClient
39
38
  from .messages.client import AsyncMessagesClient
40
39
  from .templates.client import AsyncTemplatesClient
41
40
  from .memory_variables.client import AsyncMemoryVariablesClient
41
+ from .core_memory.client import AsyncCoreMemoryClient
42
+ from .archival_memory.client import AsyncArchivalMemoryClient
42
43
 
43
44
  # this is used as the default value for optional parameters
44
45
  OMIT = typing.cast(typing.Any, ...)
@@ -50,12 +51,11 @@ class AgentsClient:
50
51
  self.context = ContextClient(client_wrapper=self._client_wrapper)
51
52
  self.tools = ToolsClient(client_wrapper=self._client_wrapper)
52
53
  self.sources = SourcesClient(client_wrapper=self._client_wrapper)
53
- self.core_memory = CoreMemoryClient(client_wrapper=self._client_wrapper)
54
- self.recall_memory = RecallMemoryClient(client_wrapper=self._client_wrapper)
55
- self.archival_memory = ArchivalMemoryClient(client_wrapper=self._client_wrapper)
56
54
  self.messages = MessagesClient(client_wrapper=self._client_wrapper)
57
55
  self.templates = TemplatesClient(client_wrapper=self._client_wrapper)
58
56
  self.memory_variables = MemoryVariablesClient(client_wrapper=self._client_wrapper)
57
+ self.core_memory = CoreMemoryClient(client_wrapper=self._client_wrapper)
58
+ self.archival_memory = ArchivalMemoryClient(client_wrapper=self._client_wrapper)
59
59
 
60
60
  def list(
61
61
  self,
@@ -166,7 +166,7 @@ class AgentsClient:
166
166
  include_multi_agent_tools: typing.Optional[bool] = OMIT,
167
167
  description: typing.Optional[str] = OMIT,
168
168
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
169
- llm: typing.Optional[str] = OMIT,
169
+ model: typing.Optional[str] = OMIT,
170
170
  embedding: typing.Optional[str] = OMIT,
171
171
  context_window_limit: typing.Optional[int] = OMIT,
172
172
  embedding_chunk_size: typing.Optional[int] = OMIT,
@@ -174,7 +174,7 @@ class AgentsClient:
174
174
  template: typing.Optional[bool] = OMIT,
175
175
  project: typing.Optional[str] = OMIT,
176
176
  tool_exec_environment_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
177
- variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
177
+ memory_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
178
178
  request_options: typing.Optional[RequestOptions] = None,
179
179
  ) -> AgentState:
180
180
  """
@@ -233,7 +233,7 @@ class AgentsClient:
233
233
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
234
234
  The metadata of the agent.
235
235
 
236
- llm : typing.Optional[str]
236
+ model : typing.Optional[str]
237
237
  The LLM configuration handle used by the agent, specified in the format provider/model-name, as an alternative to specifying llm_config.
238
238
 
239
239
  embedding : typing.Optional[str]
@@ -257,7 +257,7 @@ class AgentsClient:
257
257
  tool_exec_environment_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
258
258
  The environment variables for tool execution specific to this agent.
259
259
 
260
- variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
260
+ memory_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
261
261
  The variables that should be set for the agent.
262
262
 
263
263
  request_options : typing.Optional[RequestOptions]
@@ -307,8 +307,8 @@ class AgentsClient:
307
307
  "include_base_tools": include_base_tools,
308
308
  "include_multi_agent_tools": include_multi_agent_tools,
309
309
  "description": description,
310
- "metadata_": metadata,
311
- "llm": llm,
310
+ "metadata": metadata,
311
+ "model": model,
312
312
  "embedding": embedding,
313
313
  "context_window_limit": context_window_limit,
314
314
  "embedding_chunk_size": embedding_chunk_size,
@@ -316,7 +316,7 @@ class AgentsClient:
316
316
  "template": template,
317
317
  "project": project,
318
318
  "tool_exec_environment_variables": tool_exec_environment_variables,
319
- "variables": variables,
319
+ "memory_variables": memory_variables,
320
320
  },
321
321
  headers={
322
322
  "content-type": "application/json",
@@ -348,7 +348,7 @@ class AgentsClient:
348
348
  raise ApiError(status_code=_response.status_code, body=_response.text)
349
349
  raise ApiError(status_code=_response.status_code, body=_response_json)
350
350
 
351
- def get(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> AgentState:
351
+ def retrieve(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> AgentState:
352
352
  """
353
353
  Get the state of the agent.
354
354
 
@@ -371,7 +371,7 @@ class AgentsClient:
371
371
  client = Letta(
372
372
  token="YOUR_TOKEN",
373
373
  )
374
- client.agents.get(
374
+ client.agents.retrieve(
375
375
  agent_id="agent_id",
376
376
  )
377
377
  """
@@ -462,7 +462,7 @@ class AgentsClient:
462
462
  raise ApiError(status_code=_response.status_code, body=_response.text)
463
463
  raise ApiError(status_code=_response.status_code, body=_response_json)
464
464
 
465
- def update(
465
+ def modify(
466
466
  self,
467
467
  agent_id: str,
468
468
  *,
@@ -482,7 +482,7 @@ class AgentsClient:
482
482
  request_options: typing.Optional[RequestOptions] = None,
483
483
  ) -> AgentState:
484
484
  """
485
- Update an exsiting agent
485
+ Update an existing agent
486
486
 
487
487
  Parameters
488
488
  ----------
@@ -542,7 +542,7 @@ class AgentsClient:
542
542
  client = Letta(
543
543
  token="YOUR_TOKEN",
544
544
  )
545
- client.agents.update(
545
+ client.agents.modify(
546
546
  agent_id="agent_id",
547
547
  )
548
548
  """
@@ -567,7 +567,7 @@ class AgentsClient:
567
567
  ),
568
568
  "message_ids": message_ids,
569
569
  "description": description,
570
- "metadata_": metadata,
570
+ "metadata": metadata,
571
571
  "tool_exec_environment_variables": tool_exec_environment_variables,
572
572
  },
573
573
  headers={
@@ -600,29 +600,23 @@ class AgentsClient:
600
600
  raise ApiError(status_code=_response.status_code, body=_response.text)
601
601
  raise ApiError(status_code=_response.status_code, body=_response_json)
602
602
 
603
- def reset_messages(
604
- self,
605
- agent_id: str,
606
- *,
607
- add_default_initial_messages: typing.Optional[bool] = None,
608
- request_options: typing.Optional[RequestOptions] = None,
609
- ) -> AgentState:
603
+ def retrieve_agent_memory(
604
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
605
+ ) -> Memory:
610
606
  """
611
- Resets the messages for an agent
607
+ Retrieve the memory state of a specific agent.
608
+ This endpoint fetches the current memory state of the agent identified by the user ID and agent ID.
612
609
 
613
610
  Parameters
614
611
  ----------
615
612
  agent_id : str
616
613
 
617
- add_default_initial_messages : typing.Optional[bool]
618
- If true, adds the default initial messages after resetting.
619
-
620
614
  request_options : typing.Optional[RequestOptions]
621
615
  Request-specific configuration.
622
616
 
623
617
  Returns
624
618
  -------
625
- AgentState
619
+ Memory
626
620
  Successful Response
627
621
 
628
622
  Examples
@@ -632,24 +626,21 @@ class AgentsClient:
632
626
  client = Letta(
633
627
  token="YOUR_TOKEN",
634
628
  )
635
- client.agents.reset_messages(
629
+ client.agents.retrieve_agent_memory(
636
630
  agent_id="agent_id",
637
631
  )
638
632
  """
639
633
  _response = self._client_wrapper.httpx_client.request(
640
- f"v1/agents/{jsonable_encoder(agent_id)}/reset-messages",
641
- method="PATCH",
642
- params={
643
- "add_default_initial_messages": add_default_initial_messages,
644
- },
634
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory",
635
+ method="GET",
645
636
  request_options=request_options,
646
637
  )
647
638
  try:
648
639
  if 200 <= _response.status_code < 300:
649
640
  return typing.cast(
650
- AgentState,
641
+ Memory,
651
642
  construct_type(
652
- type_=AgentState, # type: ignore
643
+ type_=Memory, # type: ignore
653
644
  object_=_response.json(),
654
645
  ),
655
646
  )
@@ -668,39 +659,25 @@ class AgentsClient:
668
659
  raise ApiError(status_code=_response.status_code, body=_response.text)
669
660
  raise ApiError(status_code=_response.status_code, body=_response_json)
670
661
 
671
- def search(
672
- self,
673
- *,
674
- search: typing.Optional[typing.Sequence[AgentsSearchRequestSearchItem]] = OMIT,
675
- project_id: typing.Optional[str] = OMIT,
676
- combinator: typing.Optional[AgentsSearchRequestCombinator] = OMIT,
677
- limit: typing.Optional[float] = OMIT,
678
- offset: typing.Optional[float] = OMIT,
679
- request_options: typing.Optional[RequestOptions] = None,
680
- ) -> None:
662
+ def retrieve_core_memory_block(
663
+ self, agent_id: str, block_label: str, *, request_options: typing.Optional[RequestOptions] = None
664
+ ) -> Block:
681
665
  """
682
- <Note>This endpoint is only available on Letta Cloud.</Note>
683
-
684
- Search deployed agents.
666
+ Retrieve a memory block from an agent.
685
667
 
686
668
  Parameters
687
669
  ----------
688
- search : typing.Optional[typing.Sequence[AgentsSearchRequestSearchItem]]
689
-
690
- project_id : typing.Optional[str]
691
-
692
- combinator : typing.Optional[AgentsSearchRequestCombinator]
693
-
694
- limit : typing.Optional[float]
670
+ agent_id : str
695
671
 
696
- offset : typing.Optional[float]
672
+ block_label : str
697
673
 
698
674
  request_options : typing.Optional[RequestOptions]
699
675
  Request-specific configuration.
700
676
 
701
677
  Returns
702
678
  -------
703
- None
679
+ Block
680
+ Successful Response
704
681
 
705
682
  Examples
706
683
  --------
@@ -709,127 +686,183 @@ class AgentsClient:
709
686
  client = Letta(
710
687
  token="YOUR_TOKEN",
711
688
  )
712
- client.agents.search()
689
+ client.agents.retrieve_core_memory_block(
690
+ agent_id="agent_id",
691
+ block_label="block_label",
692
+ )
713
693
  """
714
694
  _response = self._client_wrapper.httpx_client.request(
715
- "v1/agents/search",
716
- method="POST",
717
- json={
718
- "search": convert_and_respect_annotation_metadata(
719
- object_=search, annotation=typing.Sequence[AgentsSearchRequestSearchItem], direction="write"
720
- ),
721
- "project_id": project_id,
722
- "combinator": combinator,
723
- "limit": limit,
724
- "offset": offset,
725
- },
726
- headers={
727
- "content-type": "application/json",
728
- },
695
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/{jsonable_encoder(block_label)}",
696
+ method="GET",
729
697
  request_options=request_options,
730
- omit=OMIT,
731
698
  )
732
699
  try:
733
700
  if 200 <= _response.status_code < 300:
734
- return
701
+ return typing.cast(
702
+ Block,
703
+ construct_type(
704
+ type_=Block, # type: ignore
705
+ object_=_response.json(),
706
+ ),
707
+ )
708
+ if _response.status_code == 422:
709
+ raise UnprocessableEntityError(
710
+ typing.cast(
711
+ HttpValidationError,
712
+ construct_type(
713
+ type_=HttpValidationError, # type: ignore
714
+ object_=_response.json(),
715
+ ),
716
+ )
717
+ )
735
718
  _response_json = _response.json()
736
719
  except JSONDecodeError:
737
720
  raise ApiError(status_code=_response.status_code, body=_response.text)
738
721
  raise ApiError(status_code=_response.status_code, body=_response_json)
739
722
 
740
-
741
- class AsyncAgentsClient:
742
- def __init__(self, *, client_wrapper: AsyncClientWrapper):
743
- self._client_wrapper = client_wrapper
744
- self.context = AsyncContextClient(client_wrapper=self._client_wrapper)
745
- self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper)
746
- self.sources = AsyncSourcesClient(client_wrapper=self._client_wrapper)
747
- self.core_memory = AsyncCoreMemoryClient(client_wrapper=self._client_wrapper)
748
- self.recall_memory = AsyncRecallMemoryClient(client_wrapper=self._client_wrapper)
749
- self.archival_memory = AsyncArchivalMemoryClient(client_wrapper=self._client_wrapper)
750
- self.messages = AsyncMessagesClient(client_wrapper=self._client_wrapper)
751
- self.templates = AsyncTemplatesClient(client_wrapper=self._client_wrapper)
752
- self.memory_variables = AsyncMemoryVariablesClient(client_wrapper=self._client_wrapper)
753
-
754
- async def list(
723
+ def modify_core_memory_block(
755
724
  self,
725
+ agent_id: str,
726
+ block_label: str,
756
727
  *,
757
- name: typing.Optional[str] = None,
758
- tags: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
759
- match_all_tags: typing.Optional[bool] = None,
760
- cursor: typing.Optional[str] = None,
761
- limit: typing.Optional[int] = None,
762
- query_text: typing.Optional[str] = None,
728
+ value: typing.Optional[str] = OMIT,
729
+ limit: typing.Optional[int] = OMIT,
730
+ name: typing.Optional[str] = OMIT,
731
+ is_template: typing.Optional[bool] = OMIT,
732
+ label: typing.Optional[str] = OMIT,
733
+ description: typing.Optional[str] = OMIT,
734
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
763
735
  request_options: typing.Optional[RequestOptions] = None,
764
- ) -> typing.List[AgentState]:
736
+ ) -> Block:
765
737
  """
766
- List all agents associated with a given user.
767
- This endpoint retrieves a list of all agents and their configurations associated with the specified user ID.
738
+ Updates a memory block of an agent.
768
739
 
769
740
  Parameters
770
741
  ----------
771
- name : typing.Optional[str]
772
- Name of the agent
773
-
774
- tags : typing.Optional[typing.Union[str, typing.Sequence[str]]]
775
- List of tags to filter agents by
742
+ agent_id : str
776
743
 
777
- match_all_tags : typing.Optional[bool]
778
- If True, only returns agents that match ALL given tags. Otherwise, return agents that have ANY of the passed in tags.
744
+ block_label : str
779
745
 
780
- cursor : typing.Optional[str]
781
- Cursor for pagination
746
+ value : typing.Optional[str]
747
+ Value of the block.
782
748
 
783
749
  limit : typing.Optional[int]
784
- Limit for pagination
750
+ Character limit of the block.
785
751
 
786
- query_text : typing.Optional[str]
787
- Search agents by name
752
+ name : typing.Optional[str]
753
+ Name of the block if it is a template.
754
+
755
+ is_template : typing.Optional[bool]
756
+ Whether the block is a template (e.g. saved human/persona options).
757
+
758
+ label : typing.Optional[str]
759
+ Label of the block (e.g. 'human', 'persona') in the context window.
760
+
761
+ description : typing.Optional[str]
762
+ Description of the block.
763
+
764
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
765
+ Metadata of the block.
788
766
 
789
767
  request_options : typing.Optional[RequestOptions]
790
768
  Request-specific configuration.
791
769
 
792
770
  Returns
793
771
  -------
794
- typing.List[AgentState]
772
+ Block
795
773
  Successful Response
796
774
 
797
775
  Examples
798
776
  --------
799
- import asyncio
800
-
801
- from letta_client import AsyncLetta
777
+ from letta_client import Letta
802
778
 
803
- client = AsyncLetta(
779
+ client = Letta(
804
780
  token="YOUR_TOKEN",
805
781
  )
782
+ client.agents.modify_core_memory_block(
783
+ agent_id="agent_id",
784
+ block_label="block_label",
785
+ )
786
+ """
787
+ _response = self._client_wrapper.httpx_client.request(
788
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/{jsonable_encoder(block_label)}",
789
+ method="PATCH",
790
+ json={
791
+ "value": value,
792
+ "limit": limit,
793
+ "name": name,
794
+ "is_template": is_template,
795
+ "label": label,
796
+ "description": description,
797
+ "metadata": metadata,
798
+ },
799
+ request_options=request_options,
800
+ omit=OMIT,
801
+ )
802
+ try:
803
+ if 200 <= _response.status_code < 300:
804
+ return typing.cast(
805
+ Block,
806
+ construct_type(
807
+ type_=Block, # type: ignore
808
+ object_=_response.json(),
809
+ ),
810
+ )
811
+ if _response.status_code == 422:
812
+ raise UnprocessableEntityError(
813
+ typing.cast(
814
+ HttpValidationError,
815
+ construct_type(
816
+ type_=HttpValidationError, # type: ignore
817
+ object_=_response.json(),
818
+ ),
819
+ )
820
+ )
821
+ _response_json = _response.json()
822
+ except JSONDecodeError:
823
+ raise ApiError(status_code=_response.status_code, body=_response.text)
824
+ raise ApiError(status_code=_response.status_code, body=_response_json)
806
825
 
826
+ def list_core_memory_blocks(
827
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
828
+ ) -> typing.List[Block]:
829
+ """
830
+ Retrieve the memory blocks of a specific agent.
807
831
 
808
- async def main() -> None:
809
- await client.agents.list()
832
+ Parameters
833
+ ----------
834
+ agent_id : str
810
835
 
836
+ request_options : typing.Optional[RequestOptions]
837
+ Request-specific configuration.
811
838
 
812
- asyncio.run(main())
839
+ Returns
840
+ -------
841
+ typing.List[Block]
842
+ Successful Response
843
+
844
+ Examples
845
+ --------
846
+ from letta_client import Letta
847
+
848
+ client = Letta(
849
+ token="YOUR_TOKEN",
850
+ )
851
+ client.agents.list_core_memory_blocks(
852
+ agent_id="agent_id",
853
+ )
813
854
  """
814
- _response = await self._client_wrapper.httpx_client.request(
815
- "v1/agents/",
855
+ _response = self._client_wrapper.httpx_client.request(
856
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks",
816
857
  method="GET",
817
- params={
818
- "name": name,
819
- "tags": tags,
820
- "match_all_tags": match_all_tags,
821
- "cursor": cursor,
822
- "limit": limit,
823
- "query_text": query_text,
824
- },
825
858
  request_options=request_options,
826
859
  )
827
860
  try:
828
861
  if 200 <= _response.status_code < 300:
829
862
  return typing.cast(
830
- typing.List[AgentState],
863
+ typing.List[Block],
831
864
  construct_type(
832
- type_=typing.List[AgentState], # type: ignore
865
+ type_=typing.List[Block], # type: ignore
833
866
  object_=_response.json(),
834
867
  ),
835
868
  )
@@ -848,126 +881,1332 @@ class AsyncAgentsClient:
848
881
  raise ApiError(status_code=_response.status_code, body=_response.text)
849
882
  raise ApiError(status_code=_response.status_code, body=_response_json)
850
883
 
851
- async def create(
852
- self,
853
- *,
854
- name: typing.Optional[str] = OMIT,
855
- memory_blocks: typing.Optional[typing.Sequence[CreateBlock]] = OMIT,
856
- tools: typing.Optional[typing.Sequence[str]] = OMIT,
857
- tool_ids: typing.Optional[typing.Sequence[str]] = OMIT,
858
- source_ids: typing.Optional[typing.Sequence[str]] = OMIT,
859
- block_ids: typing.Optional[typing.Sequence[str]] = OMIT,
860
- tool_rules: typing.Optional[typing.Sequence[CreateAgentRequestToolRulesItem]] = OMIT,
861
- tags: typing.Optional[typing.Sequence[str]] = OMIT,
862
- system: typing.Optional[str] = OMIT,
863
- agent_type: typing.Optional[AgentType] = OMIT,
864
- llm_config: typing.Optional[LlmConfig] = OMIT,
865
- embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
866
- initial_message_sequence: typing.Optional[typing.Sequence[MessageCreate]] = OMIT,
867
- include_base_tools: typing.Optional[bool] = OMIT,
868
- include_multi_agent_tools: typing.Optional[bool] = OMIT,
869
- description: typing.Optional[str] = OMIT,
870
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
871
- llm: typing.Optional[str] = OMIT,
872
- embedding: typing.Optional[str] = OMIT,
873
- context_window_limit: typing.Optional[int] = OMIT,
874
- embedding_chunk_size: typing.Optional[int] = OMIT,
875
- from_template: typing.Optional[str] = OMIT,
876
- template: typing.Optional[bool] = OMIT,
877
- project: typing.Optional[str] = OMIT,
878
- tool_exec_environment_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
879
- variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
880
- request_options: typing.Optional[RequestOptions] = None,
884
+ def attach_core_memory_block(
885
+ self, agent_id: str, block_id: str, *, request_options: typing.Optional[RequestOptions] = None
881
886
  ) -> AgentState:
882
887
  """
883
- Create a new agent with the specified configuration.
888
+ Attach a block to an agent.
884
889
 
885
890
  Parameters
886
891
  ----------
887
- name : typing.Optional[str]
888
- The name of the agent.
892
+ agent_id : str
889
893
 
890
- memory_blocks : typing.Optional[typing.Sequence[CreateBlock]]
891
- The blocks to create in the agent's in-context memory.
894
+ block_id : str
892
895
 
893
- tools : typing.Optional[typing.Sequence[str]]
894
- The tools used by the agent.
896
+ request_options : typing.Optional[RequestOptions]
897
+ Request-specific configuration.
895
898
 
896
- tool_ids : typing.Optional[typing.Sequence[str]]
899
+ Returns
900
+ -------
901
+ AgentState
902
+ Successful Response
903
+
904
+ Examples
905
+ --------
906
+ from letta_client import Letta
907
+
908
+ client = Letta(
909
+ token="YOUR_TOKEN",
910
+ )
911
+ client.agents.attach_core_memory_block(
912
+ agent_id="agent_id",
913
+ block_id="block_id",
914
+ )
915
+ """
916
+ _response = self._client_wrapper.httpx_client.request(
917
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/attach/{jsonable_encoder(block_id)}",
918
+ method="PATCH",
919
+ request_options=request_options,
920
+ )
921
+ try:
922
+ if 200 <= _response.status_code < 300:
923
+ return typing.cast(
924
+ AgentState,
925
+ construct_type(
926
+ type_=AgentState, # type: ignore
927
+ object_=_response.json(),
928
+ ),
929
+ )
930
+ if _response.status_code == 422:
931
+ raise UnprocessableEntityError(
932
+ typing.cast(
933
+ HttpValidationError,
934
+ construct_type(
935
+ type_=HttpValidationError, # type: ignore
936
+ object_=_response.json(),
937
+ ),
938
+ )
939
+ )
940
+ _response_json = _response.json()
941
+ except JSONDecodeError:
942
+ raise ApiError(status_code=_response.status_code, body=_response.text)
943
+ raise ApiError(status_code=_response.status_code, body=_response_json)
944
+
945
+ def detach_core_memory_block(
946
+ self, agent_id: str, block_id: str, *, request_options: typing.Optional[RequestOptions] = None
947
+ ) -> AgentState:
948
+ """
949
+ Detach a block from an agent.
950
+
951
+ Parameters
952
+ ----------
953
+ agent_id : str
954
+
955
+ block_id : str
956
+
957
+ request_options : typing.Optional[RequestOptions]
958
+ Request-specific configuration.
959
+
960
+ Returns
961
+ -------
962
+ AgentState
963
+ Successful Response
964
+
965
+ Examples
966
+ --------
967
+ from letta_client import Letta
968
+
969
+ client = Letta(
970
+ token="YOUR_TOKEN",
971
+ )
972
+ client.agents.detach_core_memory_block(
973
+ agent_id="agent_id",
974
+ block_id="block_id",
975
+ )
976
+ """
977
+ _response = self._client_wrapper.httpx_client.request(
978
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/detach/{jsonable_encoder(block_id)}",
979
+ method="PATCH",
980
+ request_options=request_options,
981
+ )
982
+ try:
983
+ if 200 <= _response.status_code < 300:
984
+ return typing.cast(
985
+ AgentState,
986
+ construct_type(
987
+ type_=AgentState, # type: ignore
988
+ object_=_response.json(),
989
+ ),
990
+ )
991
+ if _response.status_code == 422:
992
+ raise UnprocessableEntityError(
993
+ typing.cast(
994
+ HttpValidationError,
995
+ construct_type(
996
+ type_=HttpValidationError, # type: ignore
997
+ object_=_response.json(),
998
+ ),
999
+ )
1000
+ )
1001
+ _response_json = _response.json()
1002
+ except JSONDecodeError:
1003
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1004
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1005
+
1006
+ def list_archival_memory(
1007
+ self,
1008
+ agent_id: str,
1009
+ *,
1010
+ after: typing.Optional[int] = None,
1011
+ before: typing.Optional[int] = None,
1012
+ limit: typing.Optional[int] = None,
1013
+ request_options: typing.Optional[RequestOptions] = None,
1014
+ ) -> typing.List[Passage]:
1015
+ """
1016
+ Retrieve the memories in an agent's archival memory store (paginated query).
1017
+
1018
+ Parameters
1019
+ ----------
1020
+ agent_id : str
1021
+
1022
+ after : typing.Optional[int]
1023
+ Unique ID of the memory to start the query range at.
1024
+
1025
+ before : typing.Optional[int]
1026
+ Unique ID of the memory to end the query range at.
1027
+
1028
+ limit : typing.Optional[int]
1029
+ How many results to include in the response.
1030
+
1031
+ request_options : typing.Optional[RequestOptions]
1032
+ Request-specific configuration.
1033
+
1034
+ Returns
1035
+ -------
1036
+ typing.List[Passage]
1037
+ Successful Response
1038
+
1039
+ Examples
1040
+ --------
1041
+ from letta_client import Letta
1042
+
1043
+ client = Letta(
1044
+ token="YOUR_TOKEN",
1045
+ )
1046
+ client.agents.list_archival_memory(
1047
+ agent_id="agent_id",
1048
+ )
1049
+ """
1050
+ _response = self._client_wrapper.httpx_client.request(
1051
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory",
1052
+ method="GET",
1053
+ params={
1054
+ "after": after,
1055
+ "before": before,
1056
+ "limit": limit,
1057
+ },
1058
+ request_options=request_options,
1059
+ )
1060
+ try:
1061
+ if 200 <= _response.status_code < 300:
1062
+ return typing.cast(
1063
+ typing.List[Passage],
1064
+ construct_type(
1065
+ type_=typing.List[Passage], # type: ignore
1066
+ object_=_response.json(),
1067
+ ),
1068
+ )
1069
+ if _response.status_code == 422:
1070
+ raise UnprocessableEntityError(
1071
+ typing.cast(
1072
+ HttpValidationError,
1073
+ construct_type(
1074
+ type_=HttpValidationError, # type: ignore
1075
+ object_=_response.json(),
1076
+ ),
1077
+ )
1078
+ )
1079
+ _response_json = _response.json()
1080
+ except JSONDecodeError:
1081
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1082
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1083
+
1084
+ def create_archival_memory(
1085
+ self, agent_id: str, *, text: str, request_options: typing.Optional[RequestOptions] = None
1086
+ ) -> typing.List[Passage]:
1087
+ """
1088
+ Insert a memory into an agent's archival memory store.
1089
+
1090
+ Parameters
1091
+ ----------
1092
+ agent_id : str
1093
+
1094
+ text : str
1095
+ Text to write to archival memory.
1096
+
1097
+ request_options : typing.Optional[RequestOptions]
1098
+ Request-specific configuration.
1099
+
1100
+ Returns
1101
+ -------
1102
+ typing.List[Passage]
1103
+ Successful Response
1104
+
1105
+ Examples
1106
+ --------
1107
+ from letta_client import Letta
1108
+
1109
+ client = Letta(
1110
+ token="YOUR_TOKEN",
1111
+ )
1112
+ client.agents.create_archival_memory(
1113
+ agent_id="agent_id",
1114
+ text="text",
1115
+ )
1116
+ """
1117
+ _response = self._client_wrapper.httpx_client.request(
1118
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory",
1119
+ method="POST",
1120
+ json={
1121
+ "text": text,
1122
+ },
1123
+ headers={
1124
+ "content-type": "application/json",
1125
+ },
1126
+ request_options=request_options,
1127
+ omit=OMIT,
1128
+ )
1129
+ try:
1130
+ if 200 <= _response.status_code < 300:
1131
+ return typing.cast(
1132
+ typing.List[Passage],
1133
+ construct_type(
1134
+ type_=typing.List[Passage], # type: ignore
1135
+ object_=_response.json(),
1136
+ ),
1137
+ )
1138
+ if _response.status_code == 422:
1139
+ raise UnprocessableEntityError(
1140
+ typing.cast(
1141
+ HttpValidationError,
1142
+ construct_type(
1143
+ type_=HttpValidationError, # type: ignore
1144
+ object_=_response.json(),
1145
+ ),
1146
+ )
1147
+ )
1148
+ _response_json = _response.json()
1149
+ except JSONDecodeError:
1150
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1151
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1152
+
1153
+ def delete_archival_memory(
1154
+ self, agent_id: str, memory_id: str, *, request_options: typing.Optional[RequestOptions] = None
1155
+ ) -> typing.Optional[typing.Any]:
1156
+ """
1157
+ Delete a memory from an agent's archival memory store.
1158
+
1159
+ Parameters
1160
+ ----------
1161
+ agent_id : str
1162
+
1163
+ memory_id : str
1164
+
1165
+ request_options : typing.Optional[RequestOptions]
1166
+ Request-specific configuration.
1167
+
1168
+ Returns
1169
+ -------
1170
+ typing.Optional[typing.Any]
1171
+ Successful Response
1172
+
1173
+ Examples
1174
+ --------
1175
+ from letta_client import Letta
1176
+
1177
+ client = Letta(
1178
+ token="YOUR_TOKEN",
1179
+ )
1180
+ client.agents.delete_archival_memory(
1181
+ agent_id="agent_id",
1182
+ memory_id="memory_id",
1183
+ )
1184
+ """
1185
+ _response = self._client_wrapper.httpx_client.request(
1186
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory/{jsonable_encoder(memory_id)}",
1187
+ method="DELETE",
1188
+ request_options=request_options,
1189
+ )
1190
+ try:
1191
+ if 200 <= _response.status_code < 300:
1192
+ return typing.cast(
1193
+ typing.Optional[typing.Any],
1194
+ construct_type(
1195
+ type_=typing.Optional[typing.Any], # type: ignore
1196
+ object_=_response.json(),
1197
+ ),
1198
+ )
1199
+ if _response.status_code == 422:
1200
+ raise UnprocessableEntityError(
1201
+ typing.cast(
1202
+ HttpValidationError,
1203
+ construct_type(
1204
+ type_=HttpValidationError, # type: ignore
1205
+ object_=_response.json(),
1206
+ ),
1207
+ )
1208
+ )
1209
+ _response_json = _response.json()
1210
+ except JSONDecodeError:
1211
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1212
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1213
+
1214
+ def reset_messages(
1215
+ self,
1216
+ agent_id: str,
1217
+ *,
1218
+ add_default_initial_messages: typing.Optional[bool] = None,
1219
+ request_options: typing.Optional[RequestOptions] = None,
1220
+ ) -> AgentState:
1221
+ """
1222
+ Resets the messages for an agent
1223
+
1224
+ Parameters
1225
+ ----------
1226
+ agent_id : str
1227
+
1228
+ add_default_initial_messages : typing.Optional[bool]
1229
+ If true, adds the default initial messages after resetting.
1230
+
1231
+ request_options : typing.Optional[RequestOptions]
1232
+ Request-specific configuration.
1233
+
1234
+ Returns
1235
+ -------
1236
+ AgentState
1237
+ Successful Response
1238
+
1239
+ Examples
1240
+ --------
1241
+ from letta_client import Letta
1242
+
1243
+ client = Letta(
1244
+ token="YOUR_TOKEN",
1245
+ )
1246
+ client.agents.reset_messages(
1247
+ agent_id="agent_id",
1248
+ )
1249
+ """
1250
+ _response = self._client_wrapper.httpx_client.request(
1251
+ f"v1/agents/{jsonable_encoder(agent_id)}/reset-messages",
1252
+ method="PATCH",
1253
+ params={
1254
+ "add_default_initial_messages": add_default_initial_messages,
1255
+ },
1256
+ request_options=request_options,
1257
+ )
1258
+ try:
1259
+ if 200 <= _response.status_code < 300:
1260
+ return typing.cast(
1261
+ AgentState,
1262
+ construct_type(
1263
+ type_=AgentState, # type: ignore
1264
+ object_=_response.json(),
1265
+ ),
1266
+ )
1267
+ if _response.status_code == 422:
1268
+ raise UnprocessableEntityError(
1269
+ typing.cast(
1270
+ HttpValidationError,
1271
+ construct_type(
1272
+ type_=HttpValidationError, # type: ignore
1273
+ object_=_response.json(),
1274
+ ),
1275
+ )
1276
+ )
1277
+ _response_json = _response.json()
1278
+ except JSONDecodeError:
1279
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1280
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1281
+
1282
+ def search(
1283
+ self,
1284
+ *,
1285
+ search: typing.Optional[typing.Sequence[AgentsSearchRequestSearchItem]] = OMIT,
1286
+ project_id: typing.Optional[str] = OMIT,
1287
+ combinator: typing.Optional[AgentsSearchRequestCombinator] = OMIT,
1288
+ limit: typing.Optional[float] = OMIT,
1289
+ offset: typing.Optional[float] = OMIT,
1290
+ request_options: typing.Optional[RequestOptions] = None,
1291
+ ) -> None:
1292
+ """
1293
+ <Note>This endpoint is only available on Letta Cloud.</Note>
1294
+
1295
+ Search deployed agents.
1296
+
1297
+ Parameters
1298
+ ----------
1299
+ search : typing.Optional[typing.Sequence[AgentsSearchRequestSearchItem]]
1300
+
1301
+ project_id : typing.Optional[str]
1302
+
1303
+ combinator : typing.Optional[AgentsSearchRequestCombinator]
1304
+
1305
+ limit : typing.Optional[float]
1306
+
1307
+ offset : typing.Optional[float]
1308
+
1309
+ request_options : typing.Optional[RequestOptions]
1310
+ Request-specific configuration.
1311
+
1312
+ Returns
1313
+ -------
1314
+ None
1315
+
1316
+ Examples
1317
+ --------
1318
+ from letta_client import Letta
1319
+
1320
+ client = Letta(
1321
+ token="YOUR_TOKEN",
1322
+ )
1323
+ client.agents.search()
1324
+ """
1325
+ _response = self._client_wrapper.httpx_client.request(
1326
+ "v1/agents/search",
1327
+ method="POST",
1328
+ json={
1329
+ "search": convert_and_respect_annotation_metadata(
1330
+ object_=search, annotation=typing.Sequence[AgentsSearchRequestSearchItem], direction="write"
1331
+ ),
1332
+ "project_id": project_id,
1333
+ "combinator": combinator,
1334
+ "limit": limit,
1335
+ "offset": offset,
1336
+ },
1337
+ headers={
1338
+ "content-type": "application/json",
1339
+ },
1340
+ request_options=request_options,
1341
+ omit=OMIT,
1342
+ )
1343
+ try:
1344
+ if 200 <= _response.status_code < 300:
1345
+ return
1346
+ _response_json = _response.json()
1347
+ except JSONDecodeError:
1348
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1349
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1350
+
1351
+
1352
+ class AsyncAgentsClient:
1353
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
1354
+ self._client_wrapper = client_wrapper
1355
+ self.context = AsyncContextClient(client_wrapper=self._client_wrapper)
1356
+ self.tools = AsyncToolsClient(client_wrapper=self._client_wrapper)
1357
+ self.sources = AsyncSourcesClient(client_wrapper=self._client_wrapper)
1358
+ self.messages = AsyncMessagesClient(client_wrapper=self._client_wrapper)
1359
+ self.templates = AsyncTemplatesClient(client_wrapper=self._client_wrapper)
1360
+ self.memory_variables = AsyncMemoryVariablesClient(client_wrapper=self._client_wrapper)
1361
+ self.core_memory = AsyncCoreMemoryClient(client_wrapper=self._client_wrapper)
1362
+ self.archival_memory = AsyncArchivalMemoryClient(client_wrapper=self._client_wrapper)
1363
+
1364
+ async def list(
1365
+ self,
1366
+ *,
1367
+ name: typing.Optional[str] = None,
1368
+ tags: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
1369
+ match_all_tags: typing.Optional[bool] = None,
1370
+ cursor: typing.Optional[str] = None,
1371
+ limit: typing.Optional[int] = None,
1372
+ query_text: typing.Optional[str] = None,
1373
+ request_options: typing.Optional[RequestOptions] = None,
1374
+ ) -> typing.List[AgentState]:
1375
+ """
1376
+ List all agents associated with a given user.
1377
+ This endpoint retrieves a list of all agents and their configurations associated with the specified user ID.
1378
+
1379
+ Parameters
1380
+ ----------
1381
+ name : typing.Optional[str]
1382
+ Name of the agent
1383
+
1384
+ tags : typing.Optional[typing.Union[str, typing.Sequence[str]]]
1385
+ List of tags to filter agents by
1386
+
1387
+ match_all_tags : typing.Optional[bool]
1388
+ If True, only returns agents that match ALL given tags. Otherwise, return agents that have ANY of the passed in tags.
1389
+
1390
+ cursor : typing.Optional[str]
1391
+ Cursor for pagination
1392
+
1393
+ limit : typing.Optional[int]
1394
+ Limit for pagination
1395
+
1396
+ query_text : typing.Optional[str]
1397
+ Search agents by name
1398
+
1399
+ request_options : typing.Optional[RequestOptions]
1400
+ Request-specific configuration.
1401
+
1402
+ Returns
1403
+ -------
1404
+ typing.List[AgentState]
1405
+ Successful Response
1406
+
1407
+ Examples
1408
+ --------
1409
+ import asyncio
1410
+
1411
+ from letta_client import AsyncLetta
1412
+
1413
+ client = AsyncLetta(
1414
+ token="YOUR_TOKEN",
1415
+ )
1416
+
1417
+
1418
+ async def main() -> None:
1419
+ await client.agents.list()
1420
+
1421
+
1422
+ asyncio.run(main())
1423
+ """
1424
+ _response = await self._client_wrapper.httpx_client.request(
1425
+ "v1/agents/",
1426
+ method="GET",
1427
+ params={
1428
+ "name": name,
1429
+ "tags": tags,
1430
+ "match_all_tags": match_all_tags,
1431
+ "cursor": cursor,
1432
+ "limit": limit,
1433
+ "query_text": query_text,
1434
+ },
1435
+ request_options=request_options,
1436
+ )
1437
+ try:
1438
+ if 200 <= _response.status_code < 300:
1439
+ return typing.cast(
1440
+ typing.List[AgentState],
1441
+ construct_type(
1442
+ type_=typing.List[AgentState], # type: ignore
1443
+ object_=_response.json(),
1444
+ ),
1445
+ )
1446
+ if _response.status_code == 422:
1447
+ raise UnprocessableEntityError(
1448
+ typing.cast(
1449
+ HttpValidationError,
1450
+ construct_type(
1451
+ type_=HttpValidationError, # type: ignore
1452
+ object_=_response.json(),
1453
+ ),
1454
+ )
1455
+ )
1456
+ _response_json = _response.json()
1457
+ except JSONDecodeError:
1458
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1459
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1460
+
1461
+ async def create(
1462
+ self,
1463
+ *,
1464
+ name: typing.Optional[str] = OMIT,
1465
+ memory_blocks: typing.Optional[typing.Sequence[CreateBlock]] = OMIT,
1466
+ tools: typing.Optional[typing.Sequence[str]] = OMIT,
1467
+ tool_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1468
+ source_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1469
+ block_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1470
+ tool_rules: typing.Optional[typing.Sequence[CreateAgentRequestToolRulesItem]] = OMIT,
1471
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
1472
+ system: typing.Optional[str] = OMIT,
1473
+ agent_type: typing.Optional[AgentType] = OMIT,
1474
+ llm_config: typing.Optional[LlmConfig] = OMIT,
1475
+ embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
1476
+ initial_message_sequence: typing.Optional[typing.Sequence[MessageCreate]] = OMIT,
1477
+ include_base_tools: typing.Optional[bool] = OMIT,
1478
+ include_multi_agent_tools: typing.Optional[bool] = OMIT,
1479
+ description: typing.Optional[str] = OMIT,
1480
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1481
+ model: typing.Optional[str] = OMIT,
1482
+ embedding: typing.Optional[str] = OMIT,
1483
+ context_window_limit: typing.Optional[int] = OMIT,
1484
+ embedding_chunk_size: typing.Optional[int] = OMIT,
1485
+ from_template: typing.Optional[str] = OMIT,
1486
+ template: typing.Optional[bool] = OMIT,
1487
+ project: typing.Optional[str] = OMIT,
1488
+ tool_exec_environment_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
1489
+ memory_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
1490
+ request_options: typing.Optional[RequestOptions] = None,
1491
+ ) -> AgentState:
1492
+ """
1493
+ Create a new agent with the specified configuration.
1494
+
1495
+ Parameters
1496
+ ----------
1497
+ name : typing.Optional[str]
1498
+ The name of the agent.
1499
+
1500
+ memory_blocks : typing.Optional[typing.Sequence[CreateBlock]]
1501
+ The blocks to create in the agent's in-context memory.
1502
+
1503
+ tools : typing.Optional[typing.Sequence[str]]
1504
+ The tools used by the agent.
1505
+
1506
+ tool_ids : typing.Optional[typing.Sequence[str]]
1507
+ The ids of the tools used by the agent.
1508
+
1509
+ source_ids : typing.Optional[typing.Sequence[str]]
1510
+ The ids of the sources used by the agent.
1511
+
1512
+ block_ids : typing.Optional[typing.Sequence[str]]
1513
+ The ids of the blocks used by the agent.
1514
+
1515
+ tool_rules : typing.Optional[typing.Sequence[CreateAgentRequestToolRulesItem]]
1516
+ The tool rules governing the agent.
1517
+
1518
+ tags : typing.Optional[typing.Sequence[str]]
1519
+ The tags associated with the agent.
1520
+
1521
+ system : typing.Optional[str]
1522
+ The system prompt used by the agent.
1523
+
1524
+ agent_type : typing.Optional[AgentType]
1525
+ The type of agent.
1526
+
1527
+ llm_config : typing.Optional[LlmConfig]
1528
+ The LLM configuration used by the agent.
1529
+
1530
+ embedding_config : typing.Optional[EmbeddingConfig]
1531
+ The embedding configuration used by the agent.
1532
+
1533
+ initial_message_sequence : typing.Optional[typing.Sequence[MessageCreate]]
1534
+ The initial set of messages to put in the agent's in-context memory.
1535
+
1536
+ include_base_tools : typing.Optional[bool]
1537
+ If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions).
1538
+
1539
+ include_multi_agent_tools : typing.Optional[bool]
1540
+ If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent).
1541
+
1542
+ description : typing.Optional[str]
1543
+ The description of the agent.
1544
+
1545
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1546
+ The metadata of the agent.
1547
+
1548
+ model : typing.Optional[str]
1549
+ The LLM configuration handle used by the agent, specified in the format provider/model-name, as an alternative to specifying llm_config.
1550
+
1551
+ embedding : typing.Optional[str]
1552
+ The embedding configuration handle used by the agent, specified in the format provider/model-name.
1553
+
1554
+ context_window_limit : typing.Optional[int]
1555
+ The context window limit used by the agent.
1556
+
1557
+ embedding_chunk_size : typing.Optional[int]
1558
+ The embedding chunk size used by the agent.
1559
+
1560
+ from_template : typing.Optional[str]
1561
+ The template id used to configure the agent
1562
+
1563
+ template : typing.Optional[bool]
1564
+ Whether the agent is a template
1565
+
1566
+ project : typing.Optional[str]
1567
+ The project slug that the agent will be associated with.
1568
+
1569
+ tool_exec_environment_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
1570
+ The environment variables for tool execution specific to this agent.
1571
+
1572
+ memory_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
1573
+ The variables that should be set for the agent.
1574
+
1575
+ request_options : typing.Optional[RequestOptions]
1576
+ Request-specific configuration.
1577
+
1578
+ Returns
1579
+ -------
1580
+ AgentState
1581
+ Successful Response
1582
+
1583
+ Examples
1584
+ --------
1585
+ import asyncio
1586
+
1587
+ from letta_client import AsyncLetta
1588
+
1589
+ client = AsyncLetta(
1590
+ token="YOUR_TOKEN",
1591
+ )
1592
+
1593
+
1594
+ async def main() -> None:
1595
+ await client.agents.create()
1596
+
1597
+
1598
+ asyncio.run(main())
1599
+ """
1600
+ _response = await self._client_wrapper.httpx_client.request(
1601
+ "v1/agents/",
1602
+ method="POST",
1603
+ json={
1604
+ "name": name,
1605
+ "memory_blocks": convert_and_respect_annotation_metadata(
1606
+ object_=memory_blocks, annotation=typing.Sequence[CreateBlock], direction="write"
1607
+ ),
1608
+ "tools": tools,
1609
+ "tool_ids": tool_ids,
1610
+ "source_ids": source_ids,
1611
+ "block_ids": block_ids,
1612
+ "tool_rules": convert_and_respect_annotation_metadata(
1613
+ object_=tool_rules, annotation=typing.Sequence[CreateAgentRequestToolRulesItem], direction="write"
1614
+ ),
1615
+ "tags": tags,
1616
+ "system": system,
1617
+ "agent_type": agent_type,
1618
+ "llm_config": convert_and_respect_annotation_metadata(
1619
+ object_=llm_config, annotation=LlmConfig, direction="write"
1620
+ ),
1621
+ "embedding_config": convert_and_respect_annotation_metadata(
1622
+ object_=embedding_config, annotation=EmbeddingConfig, direction="write"
1623
+ ),
1624
+ "initial_message_sequence": convert_and_respect_annotation_metadata(
1625
+ object_=initial_message_sequence, annotation=typing.Sequence[MessageCreate], direction="write"
1626
+ ),
1627
+ "include_base_tools": include_base_tools,
1628
+ "include_multi_agent_tools": include_multi_agent_tools,
1629
+ "description": description,
1630
+ "metadata": metadata,
1631
+ "model": model,
1632
+ "embedding": embedding,
1633
+ "context_window_limit": context_window_limit,
1634
+ "embedding_chunk_size": embedding_chunk_size,
1635
+ "from_template": from_template,
1636
+ "template": template,
1637
+ "project": project,
1638
+ "tool_exec_environment_variables": tool_exec_environment_variables,
1639
+ "memory_variables": memory_variables,
1640
+ },
1641
+ headers={
1642
+ "content-type": "application/json",
1643
+ },
1644
+ request_options=request_options,
1645
+ omit=OMIT,
1646
+ )
1647
+ try:
1648
+ if 200 <= _response.status_code < 300:
1649
+ return typing.cast(
1650
+ AgentState,
1651
+ construct_type(
1652
+ type_=AgentState, # type: ignore
1653
+ object_=_response.json(),
1654
+ ),
1655
+ )
1656
+ if _response.status_code == 422:
1657
+ raise UnprocessableEntityError(
1658
+ typing.cast(
1659
+ HttpValidationError,
1660
+ construct_type(
1661
+ type_=HttpValidationError, # type: ignore
1662
+ object_=_response.json(),
1663
+ ),
1664
+ )
1665
+ )
1666
+ _response_json = _response.json()
1667
+ except JSONDecodeError:
1668
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1669
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1670
+
1671
+ async def retrieve(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> AgentState:
1672
+ """
1673
+ Get the state of the agent.
1674
+
1675
+ Parameters
1676
+ ----------
1677
+ agent_id : str
1678
+
1679
+ request_options : typing.Optional[RequestOptions]
1680
+ Request-specific configuration.
1681
+
1682
+ Returns
1683
+ -------
1684
+ AgentState
1685
+ Successful Response
1686
+
1687
+ Examples
1688
+ --------
1689
+ import asyncio
1690
+
1691
+ from letta_client import AsyncLetta
1692
+
1693
+ client = AsyncLetta(
1694
+ token="YOUR_TOKEN",
1695
+ )
1696
+
1697
+
1698
+ async def main() -> None:
1699
+ await client.agents.retrieve(
1700
+ agent_id="agent_id",
1701
+ )
1702
+
1703
+
1704
+ asyncio.run(main())
1705
+ """
1706
+ _response = await self._client_wrapper.httpx_client.request(
1707
+ f"v1/agents/{jsonable_encoder(agent_id)}",
1708
+ method="GET",
1709
+ request_options=request_options,
1710
+ )
1711
+ try:
1712
+ if 200 <= _response.status_code < 300:
1713
+ return typing.cast(
1714
+ AgentState,
1715
+ construct_type(
1716
+ type_=AgentState, # type: ignore
1717
+ object_=_response.json(),
1718
+ ),
1719
+ )
1720
+ if _response.status_code == 422:
1721
+ raise UnprocessableEntityError(
1722
+ typing.cast(
1723
+ HttpValidationError,
1724
+ construct_type(
1725
+ type_=HttpValidationError, # type: ignore
1726
+ object_=_response.json(),
1727
+ ),
1728
+ )
1729
+ )
1730
+ _response_json = _response.json()
1731
+ except JSONDecodeError:
1732
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1733
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1734
+
1735
+ async def delete(
1736
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
1737
+ ) -> typing.Optional[typing.Any]:
1738
+ """
1739
+ Delete an agent.
1740
+
1741
+ Parameters
1742
+ ----------
1743
+ agent_id : str
1744
+
1745
+ request_options : typing.Optional[RequestOptions]
1746
+ Request-specific configuration.
1747
+
1748
+ Returns
1749
+ -------
1750
+ typing.Optional[typing.Any]
1751
+ Successful Response
1752
+
1753
+ Examples
1754
+ --------
1755
+ import asyncio
1756
+
1757
+ from letta_client import AsyncLetta
1758
+
1759
+ client = AsyncLetta(
1760
+ token="YOUR_TOKEN",
1761
+ )
1762
+
1763
+
1764
+ async def main() -> None:
1765
+ await client.agents.delete(
1766
+ agent_id="agent_id",
1767
+ )
1768
+
1769
+
1770
+ asyncio.run(main())
1771
+ """
1772
+ _response = await self._client_wrapper.httpx_client.request(
1773
+ f"v1/agents/{jsonable_encoder(agent_id)}",
1774
+ method="DELETE",
1775
+ request_options=request_options,
1776
+ )
1777
+ try:
1778
+ if 200 <= _response.status_code < 300:
1779
+ return typing.cast(
1780
+ typing.Optional[typing.Any],
1781
+ construct_type(
1782
+ type_=typing.Optional[typing.Any], # type: ignore
1783
+ object_=_response.json(),
1784
+ ),
1785
+ )
1786
+ if _response.status_code == 422:
1787
+ raise UnprocessableEntityError(
1788
+ typing.cast(
1789
+ HttpValidationError,
1790
+ construct_type(
1791
+ type_=HttpValidationError, # type: ignore
1792
+ object_=_response.json(),
1793
+ ),
1794
+ )
1795
+ )
1796
+ _response_json = _response.json()
1797
+ except JSONDecodeError:
1798
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1799
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1800
+
1801
+ async def modify(
1802
+ self,
1803
+ agent_id: str,
1804
+ *,
1805
+ name: typing.Optional[str] = OMIT,
1806
+ tool_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1807
+ source_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1808
+ block_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1809
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
1810
+ system: typing.Optional[str] = OMIT,
1811
+ tool_rules: typing.Optional[typing.Sequence[UpdateAgentToolRulesItem]] = OMIT,
1812
+ llm_config: typing.Optional[LlmConfig] = OMIT,
1813
+ embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
1814
+ message_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1815
+ description: typing.Optional[str] = OMIT,
1816
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1817
+ tool_exec_environment_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
1818
+ request_options: typing.Optional[RequestOptions] = None,
1819
+ ) -> AgentState:
1820
+ """
1821
+ Update an existing agent
1822
+
1823
+ Parameters
1824
+ ----------
1825
+ agent_id : str
1826
+
1827
+ name : typing.Optional[str]
1828
+ The name of the agent.
1829
+
1830
+ tool_ids : typing.Optional[typing.Sequence[str]]
897
1831
  The ids of the tools used by the agent.
898
1832
 
899
- source_ids : typing.Optional[typing.Sequence[str]]
900
- The ids of the sources used by the agent.
1833
+ source_ids : typing.Optional[typing.Sequence[str]]
1834
+ The ids of the sources used by the agent.
1835
+
1836
+ block_ids : typing.Optional[typing.Sequence[str]]
1837
+ The ids of the blocks used by the agent.
1838
+
1839
+ tags : typing.Optional[typing.Sequence[str]]
1840
+ The tags associated with the agent.
1841
+
1842
+ system : typing.Optional[str]
1843
+ The system prompt used by the agent.
1844
+
1845
+ tool_rules : typing.Optional[typing.Sequence[UpdateAgentToolRulesItem]]
1846
+ The tool rules governing the agent.
1847
+
1848
+ llm_config : typing.Optional[LlmConfig]
1849
+ The LLM configuration used by the agent.
1850
+
1851
+ embedding_config : typing.Optional[EmbeddingConfig]
1852
+ The embedding configuration used by the agent.
1853
+
1854
+ message_ids : typing.Optional[typing.Sequence[str]]
1855
+ The ids of the messages in the agent's in-context memory.
1856
+
1857
+ description : typing.Optional[str]
1858
+ The description of the agent.
1859
+
1860
+ metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1861
+ The metadata of the agent.
1862
+
1863
+ tool_exec_environment_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
1864
+ The environment variables for tool execution specific to this agent.
1865
+
1866
+ request_options : typing.Optional[RequestOptions]
1867
+ Request-specific configuration.
1868
+
1869
+ Returns
1870
+ -------
1871
+ AgentState
1872
+ Successful Response
1873
+
1874
+ Examples
1875
+ --------
1876
+ import asyncio
1877
+
1878
+ from letta_client import AsyncLetta
1879
+
1880
+ client = AsyncLetta(
1881
+ token="YOUR_TOKEN",
1882
+ )
1883
+
1884
+
1885
+ async def main() -> None:
1886
+ await client.agents.modify(
1887
+ agent_id="agent_id",
1888
+ )
1889
+
1890
+
1891
+ asyncio.run(main())
1892
+ """
1893
+ _response = await self._client_wrapper.httpx_client.request(
1894
+ f"v1/agents/{jsonable_encoder(agent_id)}",
1895
+ method="PATCH",
1896
+ json={
1897
+ "name": name,
1898
+ "tool_ids": tool_ids,
1899
+ "source_ids": source_ids,
1900
+ "block_ids": block_ids,
1901
+ "tags": tags,
1902
+ "system": system,
1903
+ "tool_rules": convert_and_respect_annotation_metadata(
1904
+ object_=tool_rules, annotation=typing.Sequence[UpdateAgentToolRulesItem], direction="write"
1905
+ ),
1906
+ "llm_config": convert_and_respect_annotation_metadata(
1907
+ object_=llm_config, annotation=LlmConfig, direction="write"
1908
+ ),
1909
+ "embedding_config": convert_and_respect_annotation_metadata(
1910
+ object_=embedding_config, annotation=EmbeddingConfig, direction="write"
1911
+ ),
1912
+ "message_ids": message_ids,
1913
+ "description": description,
1914
+ "metadata": metadata,
1915
+ "tool_exec_environment_variables": tool_exec_environment_variables,
1916
+ },
1917
+ headers={
1918
+ "content-type": "application/json",
1919
+ },
1920
+ request_options=request_options,
1921
+ omit=OMIT,
1922
+ )
1923
+ try:
1924
+ if 200 <= _response.status_code < 300:
1925
+ return typing.cast(
1926
+ AgentState,
1927
+ construct_type(
1928
+ type_=AgentState, # type: ignore
1929
+ object_=_response.json(),
1930
+ ),
1931
+ )
1932
+ if _response.status_code == 422:
1933
+ raise UnprocessableEntityError(
1934
+ typing.cast(
1935
+ HttpValidationError,
1936
+ construct_type(
1937
+ type_=HttpValidationError, # type: ignore
1938
+ object_=_response.json(),
1939
+ ),
1940
+ )
1941
+ )
1942
+ _response_json = _response.json()
1943
+ except JSONDecodeError:
1944
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1945
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1946
+
1947
+ async def retrieve_agent_memory(
1948
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
1949
+ ) -> Memory:
1950
+ """
1951
+ Retrieve the memory state of a specific agent.
1952
+ This endpoint fetches the current memory state of the agent identified by the user ID and agent ID.
901
1953
 
902
- block_ids : typing.Optional[typing.Sequence[str]]
903
- The ids of the blocks used by the agent.
1954
+ Parameters
1955
+ ----------
1956
+ agent_id : str
904
1957
 
905
- tool_rules : typing.Optional[typing.Sequence[CreateAgentRequestToolRulesItem]]
906
- The tool rules governing the agent.
1958
+ request_options : typing.Optional[RequestOptions]
1959
+ Request-specific configuration.
907
1960
 
908
- tags : typing.Optional[typing.Sequence[str]]
909
- The tags associated with the agent.
1961
+ Returns
1962
+ -------
1963
+ Memory
1964
+ Successful Response
910
1965
 
911
- system : typing.Optional[str]
912
- The system prompt used by the agent.
1966
+ Examples
1967
+ --------
1968
+ import asyncio
913
1969
 
914
- agent_type : typing.Optional[AgentType]
915
- The type of agent.
1970
+ from letta_client import AsyncLetta
916
1971
 
917
- llm_config : typing.Optional[LlmConfig]
918
- The LLM configuration used by the agent.
1972
+ client = AsyncLetta(
1973
+ token="YOUR_TOKEN",
1974
+ )
919
1975
 
920
- embedding_config : typing.Optional[EmbeddingConfig]
921
- The embedding configuration used by the agent.
922
1976
 
923
- initial_message_sequence : typing.Optional[typing.Sequence[MessageCreate]]
924
- The initial set of messages to put in the agent's in-context memory.
1977
+ async def main() -> None:
1978
+ await client.agents.retrieve_agent_memory(
1979
+ agent_id="agent_id",
1980
+ )
925
1981
 
926
- include_base_tools : typing.Optional[bool]
927
- If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions).
928
1982
 
929
- include_multi_agent_tools : typing.Optional[bool]
930
- If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent).
1983
+ asyncio.run(main())
1984
+ """
1985
+ _response = await self._client_wrapper.httpx_client.request(
1986
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory",
1987
+ method="GET",
1988
+ request_options=request_options,
1989
+ )
1990
+ try:
1991
+ if 200 <= _response.status_code < 300:
1992
+ return typing.cast(
1993
+ Memory,
1994
+ construct_type(
1995
+ type_=Memory, # type: ignore
1996
+ object_=_response.json(),
1997
+ ),
1998
+ )
1999
+ if _response.status_code == 422:
2000
+ raise UnprocessableEntityError(
2001
+ typing.cast(
2002
+ HttpValidationError,
2003
+ construct_type(
2004
+ type_=HttpValidationError, # type: ignore
2005
+ object_=_response.json(),
2006
+ ),
2007
+ )
2008
+ )
2009
+ _response_json = _response.json()
2010
+ except JSONDecodeError:
2011
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2012
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2013
+
2014
+ async def retrieve_core_memory_block(
2015
+ self, agent_id: str, block_label: str, *, request_options: typing.Optional[RequestOptions] = None
2016
+ ) -> Block:
2017
+ """
2018
+ Retrieve a memory block from an agent.
2019
+
2020
+ Parameters
2021
+ ----------
2022
+ agent_id : str
2023
+
2024
+ block_label : str
2025
+
2026
+ request_options : typing.Optional[RequestOptions]
2027
+ Request-specific configuration.
2028
+
2029
+ Returns
2030
+ -------
2031
+ Block
2032
+ Successful Response
2033
+
2034
+ Examples
2035
+ --------
2036
+ import asyncio
2037
+
2038
+ from letta_client import AsyncLetta
2039
+
2040
+ client = AsyncLetta(
2041
+ token="YOUR_TOKEN",
2042
+ )
2043
+
2044
+
2045
+ async def main() -> None:
2046
+ await client.agents.retrieve_core_memory_block(
2047
+ agent_id="agent_id",
2048
+ block_label="block_label",
2049
+ )
2050
+
2051
+
2052
+ asyncio.run(main())
2053
+ """
2054
+ _response = await self._client_wrapper.httpx_client.request(
2055
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/{jsonable_encoder(block_label)}",
2056
+ method="GET",
2057
+ request_options=request_options,
2058
+ )
2059
+ try:
2060
+ if 200 <= _response.status_code < 300:
2061
+ return typing.cast(
2062
+ Block,
2063
+ construct_type(
2064
+ type_=Block, # type: ignore
2065
+ object_=_response.json(),
2066
+ ),
2067
+ )
2068
+ if _response.status_code == 422:
2069
+ raise UnprocessableEntityError(
2070
+ typing.cast(
2071
+ HttpValidationError,
2072
+ construct_type(
2073
+ type_=HttpValidationError, # type: ignore
2074
+ object_=_response.json(),
2075
+ ),
2076
+ )
2077
+ )
2078
+ _response_json = _response.json()
2079
+ except JSONDecodeError:
2080
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2081
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2082
+
2083
+ async def modify_core_memory_block(
2084
+ self,
2085
+ agent_id: str,
2086
+ block_label: str,
2087
+ *,
2088
+ value: typing.Optional[str] = OMIT,
2089
+ limit: typing.Optional[int] = OMIT,
2090
+ name: typing.Optional[str] = OMIT,
2091
+ is_template: typing.Optional[bool] = OMIT,
2092
+ label: typing.Optional[str] = OMIT,
2093
+ description: typing.Optional[str] = OMIT,
2094
+ metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
2095
+ request_options: typing.Optional[RequestOptions] = None,
2096
+ ) -> Block:
2097
+ """
2098
+ Updates a memory block of an agent.
2099
+
2100
+ Parameters
2101
+ ----------
2102
+ agent_id : str
2103
+
2104
+ block_label : str
2105
+
2106
+ value : typing.Optional[str]
2107
+ Value of the block.
2108
+
2109
+ limit : typing.Optional[int]
2110
+ Character limit of the block.
2111
+
2112
+ name : typing.Optional[str]
2113
+ Name of the block if it is a template.
2114
+
2115
+ is_template : typing.Optional[bool]
2116
+ Whether the block is a template (e.g. saved human/persona options).
2117
+
2118
+ label : typing.Optional[str]
2119
+ Label of the block (e.g. 'human', 'persona') in the context window.
931
2120
 
932
2121
  description : typing.Optional[str]
933
- The description of the agent.
2122
+ Description of the block.
934
2123
 
935
2124
  metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
936
- The metadata of the agent.
2125
+ Metadata of the block.
937
2126
 
938
- llm : typing.Optional[str]
939
- The LLM configuration handle used by the agent, specified in the format provider/model-name, as an alternative to specifying llm_config.
2127
+ request_options : typing.Optional[RequestOptions]
2128
+ Request-specific configuration.
940
2129
 
941
- embedding : typing.Optional[str]
942
- The embedding configuration handle used by the agent, specified in the format provider/model-name.
2130
+ Returns
2131
+ -------
2132
+ Block
2133
+ Successful Response
943
2134
 
944
- context_window_limit : typing.Optional[int]
945
- The context window limit used by the agent.
2135
+ Examples
2136
+ --------
2137
+ import asyncio
946
2138
 
947
- embedding_chunk_size : typing.Optional[int]
948
- The embedding chunk size used by the agent.
2139
+ from letta_client import AsyncLetta
949
2140
 
950
- from_template : typing.Optional[str]
951
- The template id used to configure the agent
2141
+ client = AsyncLetta(
2142
+ token="YOUR_TOKEN",
2143
+ )
952
2144
 
953
- template : typing.Optional[bool]
954
- Whether the agent is a template
955
2145
 
956
- project : typing.Optional[str]
957
- The project slug that the agent will be associated with.
2146
+ async def main() -> None:
2147
+ await client.agents.modify_core_memory_block(
2148
+ agent_id="agent_id",
2149
+ block_label="block_label",
2150
+ )
958
2151
 
959
- tool_exec_environment_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
960
- The environment variables for tool execution specific to this agent.
961
2152
 
962
- variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
963
- The variables that should be set for the agent.
2153
+ asyncio.run(main())
2154
+ """
2155
+ _response = await self._client_wrapper.httpx_client.request(
2156
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/{jsonable_encoder(block_label)}",
2157
+ method="PATCH",
2158
+ json={
2159
+ "value": value,
2160
+ "limit": limit,
2161
+ "name": name,
2162
+ "is_template": is_template,
2163
+ "label": label,
2164
+ "description": description,
2165
+ "metadata": metadata,
2166
+ },
2167
+ request_options=request_options,
2168
+ omit=OMIT,
2169
+ )
2170
+ try:
2171
+ if 200 <= _response.status_code < 300:
2172
+ return typing.cast(
2173
+ Block,
2174
+ construct_type(
2175
+ type_=Block, # type: ignore
2176
+ object_=_response.json(),
2177
+ ),
2178
+ )
2179
+ if _response.status_code == 422:
2180
+ raise UnprocessableEntityError(
2181
+ typing.cast(
2182
+ HttpValidationError,
2183
+ construct_type(
2184
+ type_=HttpValidationError, # type: ignore
2185
+ object_=_response.json(),
2186
+ ),
2187
+ )
2188
+ )
2189
+ _response_json = _response.json()
2190
+ except JSONDecodeError:
2191
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2192
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2193
+
2194
+ async def list_core_memory_blocks(
2195
+ self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
2196
+ ) -> typing.List[Block]:
2197
+ """
2198
+ Retrieve the memory blocks of a specific agent.
2199
+
2200
+ Parameters
2201
+ ----------
2202
+ agent_id : str
964
2203
 
965
2204
  request_options : typing.Optional[RequestOptions]
966
2205
  Request-specific configuration.
967
2206
 
968
2207
  Returns
969
2208
  -------
970
- AgentState
2209
+ typing.List[Block]
971
2210
  Successful Response
972
2211
 
973
2212
  Examples
@@ -982,64 +2221,24 @@ class AsyncAgentsClient:
982
2221
 
983
2222
 
984
2223
  async def main() -> None:
985
- await client.agents.create()
2224
+ await client.agents.list_core_memory_blocks(
2225
+ agent_id="agent_id",
2226
+ )
986
2227
 
987
2228
 
988
2229
  asyncio.run(main())
989
2230
  """
990
2231
  _response = await self._client_wrapper.httpx_client.request(
991
- "v1/agents/",
992
- method="POST",
993
- json={
994
- "name": name,
995
- "memory_blocks": convert_and_respect_annotation_metadata(
996
- object_=memory_blocks, annotation=typing.Sequence[CreateBlock], direction="write"
997
- ),
998
- "tools": tools,
999
- "tool_ids": tool_ids,
1000
- "source_ids": source_ids,
1001
- "block_ids": block_ids,
1002
- "tool_rules": convert_and_respect_annotation_metadata(
1003
- object_=tool_rules, annotation=typing.Sequence[CreateAgentRequestToolRulesItem], direction="write"
1004
- ),
1005
- "tags": tags,
1006
- "system": system,
1007
- "agent_type": agent_type,
1008
- "llm_config": convert_and_respect_annotation_metadata(
1009
- object_=llm_config, annotation=LlmConfig, direction="write"
1010
- ),
1011
- "embedding_config": convert_and_respect_annotation_metadata(
1012
- object_=embedding_config, annotation=EmbeddingConfig, direction="write"
1013
- ),
1014
- "initial_message_sequence": convert_and_respect_annotation_metadata(
1015
- object_=initial_message_sequence, annotation=typing.Sequence[MessageCreate], direction="write"
1016
- ),
1017
- "include_base_tools": include_base_tools,
1018
- "include_multi_agent_tools": include_multi_agent_tools,
1019
- "description": description,
1020
- "metadata_": metadata,
1021
- "llm": llm,
1022
- "embedding": embedding,
1023
- "context_window_limit": context_window_limit,
1024
- "embedding_chunk_size": embedding_chunk_size,
1025
- "from_template": from_template,
1026
- "template": template,
1027
- "project": project,
1028
- "tool_exec_environment_variables": tool_exec_environment_variables,
1029
- "variables": variables,
1030
- },
1031
- headers={
1032
- "content-type": "application/json",
1033
- },
2232
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks",
2233
+ method="GET",
1034
2234
  request_options=request_options,
1035
- omit=OMIT,
1036
2235
  )
1037
2236
  try:
1038
2237
  if 200 <= _response.status_code < 300:
1039
2238
  return typing.cast(
1040
- AgentState,
2239
+ typing.List[Block],
1041
2240
  construct_type(
1042
- type_=AgentState, # type: ignore
2241
+ type_=typing.List[Block], # type: ignore
1043
2242
  object_=_response.json(),
1044
2243
  ),
1045
2244
  )
@@ -1058,14 +2257,18 @@ class AsyncAgentsClient:
1058
2257
  raise ApiError(status_code=_response.status_code, body=_response.text)
1059
2258
  raise ApiError(status_code=_response.status_code, body=_response_json)
1060
2259
 
1061
- async def get(self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> AgentState:
2260
+ async def attach_core_memory_block(
2261
+ self, agent_id: str, block_id: str, *, request_options: typing.Optional[RequestOptions] = None
2262
+ ) -> AgentState:
1062
2263
  """
1063
- Get the state of the agent.
2264
+ Attach a block to an agent.
1064
2265
 
1065
2266
  Parameters
1066
2267
  ----------
1067
2268
  agent_id : str
1068
2269
 
2270
+ block_id : str
2271
+
1069
2272
  request_options : typing.Optional[RequestOptions]
1070
2273
  Request-specific configuration.
1071
2274
 
@@ -1086,16 +2289,17 @@ class AsyncAgentsClient:
1086
2289
 
1087
2290
 
1088
2291
  async def main() -> None:
1089
- await client.agents.get(
2292
+ await client.agents.attach_core_memory_block(
1090
2293
  agent_id="agent_id",
2294
+ block_id="block_id",
1091
2295
  )
1092
2296
 
1093
2297
 
1094
2298
  asyncio.run(main())
1095
2299
  """
1096
2300
  _response = await self._client_wrapper.httpx_client.request(
1097
- f"v1/agents/{jsonable_encoder(agent_id)}",
1098
- method="GET",
2301
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/attach/{jsonable_encoder(block_id)}",
2302
+ method="PATCH",
1099
2303
  request_options=request_options,
1100
2304
  )
1101
2305
  try:
@@ -1122,22 +2326,24 @@ class AsyncAgentsClient:
1122
2326
  raise ApiError(status_code=_response.status_code, body=_response.text)
1123
2327
  raise ApiError(status_code=_response.status_code, body=_response_json)
1124
2328
 
1125
- async def delete(
1126
- self, agent_id: str, *, request_options: typing.Optional[RequestOptions] = None
1127
- ) -> typing.Optional[typing.Any]:
2329
+ async def detach_core_memory_block(
2330
+ self, agent_id: str, block_id: str, *, request_options: typing.Optional[RequestOptions] = None
2331
+ ) -> AgentState:
1128
2332
  """
1129
- Delete an agent.
2333
+ Detach a block from an agent.
1130
2334
 
1131
2335
  Parameters
1132
2336
  ----------
1133
2337
  agent_id : str
1134
2338
 
2339
+ block_id : str
2340
+
1135
2341
  request_options : typing.Optional[RequestOptions]
1136
2342
  Request-specific configuration.
1137
2343
 
1138
2344
  Returns
1139
2345
  -------
1140
- typing.Optional[typing.Any]
2346
+ AgentState
1141
2347
  Successful Response
1142
2348
 
1143
2349
  Examples
@@ -1152,24 +2358,25 @@ class AsyncAgentsClient:
1152
2358
 
1153
2359
 
1154
2360
  async def main() -> None:
1155
- await client.agents.delete(
2361
+ await client.agents.detach_core_memory_block(
1156
2362
  agent_id="agent_id",
2363
+ block_id="block_id",
1157
2364
  )
1158
2365
 
1159
2366
 
1160
2367
  asyncio.run(main())
1161
2368
  """
1162
2369
  _response = await self._client_wrapper.httpx_client.request(
1163
- f"v1/agents/{jsonable_encoder(agent_id)}",
1164
- method="DELETE",
2370
+ f"v1/agents/{jsonable_encoder(agent_id)}/core-memory/blocks/detach/{jsonable_encoder(block_id)}",
2371
+ method="PATCH",
1165
2372
  request_options=request_options,
1166
2373
  )
1167
2374
  try:
1168
2375
  if 200 <= _response.status_code < 300:
1169
2376
  return typing.cast(
1170
- typing.Optional[typing.Any],
2377
+ AgentState,
1171
2378
  construct_type(
1172
- type_=typing.Optional[typing.Any], # type: ignore
2379
+ type_=AgentState, # type: ignore
1173
2380
  object_=_response.json(),
1174
2381
  ),
1175
2382
  )
@@ -1188,77 +2395,111 @@ class AsyncAgentsClient:
1188
2395
  raise ApiError(status_code=_response.status_code, body=_response.text)
1189
2396
  raise ApiError(status_code=_response.status_code, body=_response_json)
1190
2397
 
1191
- async def update(
2398
+ async def list_archival_memory(
1192
2399
  self,
1193
2400
  agent_id: str,
1194
2401
  *,
1195
- name: typing.Optional[str] = OMIT,
1196
- tool_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1197
- source_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1198
- block_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1199
- tags: typing.Optional[typing.Sequence[str]] = OMIT,
1200
- system: typing.Optional[str] = OMIT,
1201
- tool_rules: typing.Optional[typing.Sequence[UpdateAgentToolRulesItem]] = OMIT,
1202
- llm_config: typing.Optional[LlmConfig] = OMIT,
1203
- embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
1204
- message_ids: typing.Optional[typing.Sequence[str]] = OMIT,
1205
- description: typing.Optional[str] = OMIT,
1206
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
1207
- tool_exec_environment_variables: typing.Optional[typing.Dict[str, typing.Optional[str]]] = OMIT,
2402
+ after: typing.Optional[int] = None,
2403
+ before: typing.Optional[int] = None,
2404
+ limit: typing.Optional[int] = None,
1208
2405
  request_options: typing.Optional[RequestOptions] = None,
1209
- ) -> AgentState:
2406
+ ) -> typing.List[Passage]:
1210
2407
  """
1211
- Update an exsiting agent
2408
+ Retrieve the memories in an agent's archival memory store (paginated query).
1212
2409
 
1213
2410
  Parameters
1214
2411
  ----------
1215
2412
  agent_id : str
1216
2413
 
1217
- name : typing.Optional[str]
1218
- The name of the agent.
2414
+ after : typing.Optional[int]
2415
+ Unique ID of the memory to start the query range at.
1219
2416
 
1220
- tool_ids : typing.Optional[typing.Sequence[str]]
1221
- The ids of the tools used by the agent.
2417
+ before : typing.Optional[int]
2418
+ Unique ID of the memory to end the query range at.
1222
2419
 
1223
- source_ids : typing.Optional[typing.Sequence[str]]
1224
- The ids of the sources used by the agent.
2420
+ limit : typing.Optional[int]
2421
+ How many results to include in the response.
1225
2422
 
1226
- block_ids : typing.Optional[typing.Sequence[str]]
1227
- The ids of the blocks used by the agent.
2423
+ request_options : typing.Optional[RequestOptions]
2424
+ Request-specific configuration.
1228
2425
 
1229
- tags : typing.Optional[typing.Sequence[str]]
1230
- The tags associated with the agent.
2426
+ Returns
2427
+ -------
2428
+ typing.List[Passage]
2429
+ Successful Response
1231
2430
 
1232
- system : typing.Optional[str]
1233
- The system prompt used by the agent.
2431
+ Examples
2432
+ --------
2433
+ import asyncio
1234
2434
 
1235
- tool_rules : typing.Optional[typing.Sequence[UpdateAgentToolRulesItem]]
1236
- The tool rules governing the agent.
2435
+ from letta_client import AsyncLetta
1237
2436
 
1238
- llm_config : typing.Optional[LlmConfig]
1239
- The LLM configuration used by the agent.
2437
+ client = AsyncLetta(
2438
+ token="YOUR_TOKEN",
2439
+ )
1240
2440
 
1241
- embedding_config : typing.Optional[EmbeddingConfig]
1242
- The embedding configuration used by the agent.
1243
2441
 
1244
- message_ids : typing.Optional[typing.Sequence[str]]
1245
- The ids of the messages in the agent's in-context memory.
2442
+ async def main() -> None:
2443
+ await client.agents.list_archival_memory(
2444
+ agent_id="agent_id",
2445
+ )
1246
2446
 
1247
- description : typing.Optional[str]
1248
- The description of the agent.
1249
2447
 
1250
- metadata : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
1251
- The metadata of the agent.
2448
+ asyncio.run(main())
2449
+ """
2450
+ _response = await self._client_wrapper.httpx_client.request(
2451
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory",
2452
+ method="GET",
2453
+ params={
2454
+ "after": after,
2455
+ "before": before,
2456
+ "limit": limit,
2457
+ },
2458
+ request_options=request_options,
2459
+ )
2460
+ try:
2461
+ if 200 <= _response.status_code < 300:
2462
+ return typing.cast(
2463
+ typing.List[Passage],
2464
+ construct_type(
2465
+ type_=typing.List[Passage], # type: ignore
2466
+ object_=_response.json(),
2467
+ ),
2468
+ )
2469
+ if _response.status_code == 422:
2470
+ raise UnprocessableEntityError(
2471
+ typing.cast(
2472
+ HttpValidationError,
2473
+ construct_type(
2474
+ type_=HttpValidationError, # type: ignore
2475
+ object_=_response.json(),
2476
+ ),
2477
+ )
2478
+ )
2479
+ _response_json = _response.json()
2480
+ except JSONDecodeError:
2481
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2482
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1252
2483
 
1253
- tool_exec_environment_variables : typing.Optional[typing.Dict[str, typing.Optional[str]]]
1254
- The environment variables for tool execution specific to this agent.
2484
+ async def create_archival_memory(
2485
+ self, agent_id: str, *, text: str, request_options: typing.Optional[RequestOptions] = None
2486
+ ) -> typing.List[Passage]:
2487
+ """
2488
+ Insert a memory into an agent's archival memory store.
2489
+
2490
+ Parameters
2491
+ ----------
2492
+ agent_id : str
2493
+
2494
+ text : str
2495
+ Text to write to archival memory.
1255
2496
 
1256
2497
  request_options : typing.Optional[RequestOptions]
1257
2498
  Request-specific configuration.
1258
2499
 
1259
2500
  Returns
1260
2501
  -------
1261
- AgentState
2502
+ typing.List[Passage]
1262
2503
  Successful Response
1263
2504
 
1264
2505
  Examples
@@ -1273,36 +2514,19 @@ class AsyncAgentsClient:
1273
2514
 
1274
2515
 
1275
2516
  async def main() -> None:
1276
- await client.agents.update(
2517
+ await client.agents.create_archival_memory(
1277
2518
  agent_id="agent_id",
2519
+ text="text",
1278
2520
  )
1279
2521
 
1280
2522
 
1281
2523
  asyncio.run(main())
1282
2524
  """
1283
2525
  _response = await self._client_wrapper.httpx_client.request(
1284
- f"v1/agents/{jsonable_encoder(agent_id)}",
1285
- method="PATCH",
2526
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory",
2527
+ method="POST",
1286
2528
  json={
1287
- "name": name,
1288
- "tool_ids": tool_ids,
1289
- "source_ids": source_ids,
1290
- "block_ids": block_ids,
1291
- "tags": tags,
1292
- "system": system,
1293
- "tool_rules": convert_and_respect_annotation_metadata(
1294
- object_=tool_rules, annotation=typing.Sequence[UpdateAgentToolRulesItem], direction="write"
1295
- ),
1296
- "llm_config": convert_and_respect_annotation_metadata(
1297
- object_=llm_config, annotation=LlmConfig, direction="write"
1298
- ),
1299
- "embedding_config": convert_and_respect_annotation_metadata(
1300
- object_=embedding_config, annotation=EmbeddingConfig, direction="write"
1301
- ),
1302
- "message_ids": message_ids,
1303
- "description": description,
1304
- "metadata_": metadata,
1305
- "tool_exec_environment_variables": tool_exec_environment_variables,
2529
+ "text": text,
1306
2530
  },
1307
2531
  headers={
1308
2532
  "content-type": "application/json",
@@ -1313,9 +2537,78 @@ class AsyncAgentsClient:
1313
2537
  try:
1314
2538
  if 200 <= _response.status_code < 300:
1315
2539
  return typing.cast(
1316
- AgentState,
2540
+ typing.List[Passage],
1317
2541
  construct_type(
1318
- type_=AgentState, # type: ignore
2542
+ type_=typing.List[Passage], # type: ignore
2543
+ object_=_response.json(),
2544
+ ),
2545
+ )
2546
+ if _response.status_code == 422:
2547
+ raise UnprocessableEntityError(
2548
+ typing.cast(
2549
+ HttpValidationError,
2550
+ construct_type(
2551
+ type_=HttpValidationError, # type: ignore
2552
+ object_=_response.json(),
2553
+ ),
2554
+ )
2555
+ )
2556
+ _response_json = _response.json()
2557
+ except JSONDecodeError:
2558
+ raise ApiError(status_code=_response.status_code, body=_response.text)
2559
+ raise ApiError(status_code=_response.status_code, body=_response_json)
2560
+
2561
+ async def delete_archival_memory(
2562
+ self, agent_id: str, memory_id: str, *, request_options: typing.Optional[RequestOptions] = None
2563
+ ) -> typing.Optional[typing.Any]:
2564
+ """
2565
+ Delete a memory from an agent's archival memory store.
2566
+
2567
+ Parameters
2568
+ ----------
2569
+ agent_id : str
2570
+
2571
+ memory_id : str
2572
+
2573
+ request_options : typing.Optional[RequestOptions]
2574
+ Request-specific configuration.
2575
+
2576
+ Returns
2577
+ -------
2578
+ typing.Optional[typing.Any]
2579
+ Successful Response
2580
+
2581
+ Examples
2582
+ --------
2583
+ import asyncio
2584
+
2585
+ from letta_client import AsyncLetta
2586
+
2587
+ client = AsyncLetta(
2588
+ token="YOUR_TOKEN",
2589
+ )
2590
+
2591
+
2592
+ async def main() -> None:
2593
+ await client.agents.delete_archival_memory(
2594
+ agent_id="agent_id",
2595
+ memory_id="memory_id",
2596
+ )
2597
+
2598
+
2599
+ asyncio.run(main())
2600
+ """
2601
+ _response = await self._client_wrapper.httpx_client.request(
2602
+ f"v1/agents/{jsonable_encoder(agent_id)}/archival-memory/{jsonable_encoder(memory_id)}",
2603
+ method="DELETE",
2604
+ request_options=request_options,
2605
+ )
2606
+ try:
2607
+ if 200 <= _response.status_code < 300:
2608
+ return typing.cast(
2609
+ typing.Optional[typing.Any],
2610
+ construct_type(
2611
+ type_=typing.Optional[typing.Any], # type: ignore
1319
2612
  object_=_response.json(),
1320
2613
  ),
1321
2614
  )