letta-client 0.1.14__py3-none-any.whl → 0.1.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

letta_client/__init__.py CHANGED
@@ -50,15 +50,16 @@ from .types import (
50
50
  Job,
51
51
  JobStatus,
52
52
  JobType,
53
+ LettaMessageUnion,
54
+ LettaMessageUnion_AssistantMessage,
55
+ LettaMessageUnion_ReasoningMessage,
56
+ LettaMessageUnion_SystemMessage,
57
+ LettaMessageUnion_ToolCallMessage,
58
+ LettaMessageUnion_ToolReturnMessage,
59
+ LettaMessageUnion_UserMessage,
53
60
  LettaRequest,
61
+ LettaRequestConfig,
54
62
  LettaResponse,
55
- LettaResponseMessagesItem,
56
- LettaResponseMessagesItem_AssistantMessage,
57
- LettaResponseMessagesItem_ReasoningMessage,
58
- LettaResponseMessagesItem_SystemMessage,
59
- LettaResponseMessagesItem_ToolCallMessage,
60
- LettaResponseMessagesItem_ToolReturnMessage,
61
- LettaResponseMessagesItem_UserMessage,
62
63
  LettaSchemasLettaMessageToolCall,
63
64
  LettaSchemasMessageMessage,
64
65
  LettaSchemasOpenaiChatCompletionRequestTool,
@@ -218,15 +219,16 @@ __all__ = [
218
219
  "JobType",
219
220
  "Letta",
220
221
  "LettaEnvironment",
222
+ "LettaMessageUnion",
223
+ "LettaMessageUnion_AssistantMessage",
224
+ "LettaMessageUnion_ReasoningMessage",
225
+ "LettaMessageUnion_SystemMessage",
226
+ "LettaMessageUnion_ToolCallMessage",
227
+ "LettaMessageUnion_ToolReturnMessage",
228
+ "LettaMessageUnion_UserMessage",
221
229
  "LettaRequest",
230
+ "LettaRequestConfig",
222
231
  "LettaResponse",
223
- "LettaResponseMessagesItem",
224
- "LettaResponseMessagesItem_AssistantMessage",
225
- "LettaResponseMessagesItem_ReasoningMessage",
226
- "LettaResponseMessagesItem_SystemMessage",
227
- "LettaResponseMessagesItem_ToolCallMessage",
228
- "LettaResponseMessagesItem_ToolReturnMessage",
229
- "LettaResponseMessagesItem_UserMessage",
230
232
  "LettaSchemasLettaMessageToolCall",
231
233
  "LettaSchemasMessageMessage",
232
234
  "LettaSchemasOpenaiChatCompletionRequestTool",
@@ -39,13 +39,6 @@ from .messages import (
39
39
  LettaStreamingResponse_UsageStatistics,
40
40
  LettaStreamingResponse_UserMessage,
41
41
  MessagesListResponse,
42
- MessagesListResponseItem,
43
- MessagesListResponseItem_AssistantMessage,
44
- MessagesListResponseItem_ReasoningMessage,
45
- MessagesListResponseItem_SystemMessage,
46
- MessagesListResponseItem_ToolCallMessage,
47
- MessagesListResponseItem_ToolReturnMessage,
48
- MessagesListResponseItem_UserMessage,
49
42
  )
50
43
  from .templates import TemplatesMigrateResponse
51
44
 
@@ -74,13 +67,6 @@ __all__ = [
74
67
  "LettaStreamingResponse_UserMessage",
75
68
  "MemoryVariablesGetResponse",
76
69
  "MessagesListResponse",
77
- "MessagesListResponseItem",
78
- "MessagesListResponseItem_AssistantMessage",
79
- "MessagesListResponseItem_ReasoningMessage",
80
- "MessagesListResponseItem_SystemMessage",
81
- "MessagesListResponseItem_ToolCallMessage",
82
- "MessagesListResponseItem_ToolReturnMessage",
83
- "MessagesListResponseItem_UserMessage",
84
70
  "TemplatesMigrateResponse",
85
71
  "UpdateAgentToolRulesItem",
86
72
  "archival_memory",
@@ -63,8 +63,9 @@ class AgentsClient:
63
63
  name: typing.Optional[str] = None,
64
64
  tags: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
65
65
  match_all_tags: typing.Optional[bool] = None,
66
- cursor: typing.Optional[int] = None,
66
+ cursor: typing.Optional[str] = None,
67
67
  limit: typing.Optional[int] = None,
68
+ query_text: typing.Optional[str] = None,
68
69
  request_options: typing.Optional[RequestOptions] = None,
69
70
  ) -> typing.List[AgentState]:
70
71
  """
@@ -82,12 +83,15 @@ class AgentsClient:
82
83
  match_all_tags : typing.Optional[bool]
83
84
  If True, only returns agents that match ALL given tags. Otherwise, return agents that have ANY of the passed in tags.
84
85
 
85
- cursor : typing.Optional[int]
86
+ cursor : typing.Optional[str]
86
87
  Cursor for pagination
87
88
 
88
89
  limit : typing.Optional[int]
89
90
  Limit for pagination
90
91
 
92
+ query_text : typing.Optional[str]
93
+ Search agents by name
94
+
91
95
  request_options : typing.Optional[RequestOptions]
92
96
  Request-specific configuration.
93
97
 
@@ -114,6 +118,7 @@ class AgentsClient:
114
118
  "match_all_tags": match_all_tags,
115
119
  "cursor": cursor,
116
120
  "limit": limit,
121
+ "query_text": query_text,
117
122
  },
118
123
  request_options=request_options,
119
124
  )
@@ -158,6 +163,7 @@ class AgentsClient:
158
163
  embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
159
164
  initial_message_sequence: typing.Optional[typing.Sequence[MessageCreate]] = OMIT,
160
165
  include_base_tools: typing.Optional[bool] = OMIT,
166
+ include_multi_agent_tools: typing.Optional[bool] = OMIT,
161
167
  description: typing.Optional[str] = OMIT,
162
168
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
163
169
  llm: typing.Optional[str] = OMIT,
@@ -216,7 +222,10 @@ class AgentsClient:
216
222
  The initial set of messages to put in the agent's in-context memory.
217
223
 
218
224
  include_base_tools : typing.Optional[bool]
219
- The LLM configuration used by the agent.
225
+ If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions).
226
+
227
+ include_multi_agent_tools : typing.Optional[bool]
228
+ If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent).
220
229
 
221
230
  description : typing.Optional[str]
222
231
  The description of the agent.
@@ -296,6 +305,7 @@ class AgentsClient:
296
305
  object_=initial_message_sequence, annotation=typing.Sequence[MessageCreate], direction="write"
297
306
  ),
298
307
  "include_base_tools": include_base_tools,
308
+ "include_multi_agent_tools": include_multi_agent_tools,
299
309
  "description": description,
300
310
  "metadata_": metadata,
301
311
  "llm": llm,
@@ -747,8 +757,9 @@ class AsyncAgentsClient:
747
757
  name: typing.Optional[str] = None,
748
758
  tags: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
749
759
  match_all_tags: typing.Optional[bool] = None,
750
- cursor: typing.Optional[int] = None,
760
+ cursor: typing.Optional[str] = None,
751
761
  limit: typing.Optional[int] = None,
762
+ query_text: typing.Optional[str] = None,
752
763
  request_options: typing.Optional[RequestOptions] = None,
753
764
  ) -> typing.List[AgentState]:
754
765
  """
@@ -766,12 +777,15 @@ class AsyncAgentsClient:
766
777
  match_all_tags : typing.Optional[bool]
767
778
  If True, only returns agents that match ALL given tags. Otherwise, return agents that have ANY of the passed in tags.
768
779
 
769
- cursor : typing.Optional[int]
780
+ cursor : typing.Optional[str]
770
781
  Cursor for pagination
771
782
 
772
783
  limit : typing.Optional[int]
773
784
  Limit for pagination
774
785
 
786
+ query_text : typing.Optional[str]
787
+ Search agents by name
788
+
775
789
  request_options : typing.Optional[RequestOptions]
776
790
  Request-specific configuration.
777
791
 
@@ -806,6 +820,7 @@ class AsyncAgentsClient:
806
820
  "match_all_tags": match_all_tags,
807
821
  "cursor": cursor,
808
822
  "limit": limit,
823
+ "query_text": query_text,
809
824
  },
810
825
  request_options=request_options,
811
826
  )
@@ -850,6 +865,7 @@ class AsyncAgentsClient:
850
865
  embedding_config: typing.Optional[EmbeddingConfig] = OMIT,
851
866
  initial_message_sequence: typing.Optional[typing.Sequence[MessageCreate]] = OMIT,
852
867
  include_base_tools: typing.Optional[bool] = OMIT,
868
+ include_multi_agent_tools: typing.Optional[bool] = OMIT,
853
869
  description: typing.Optional[str] = OMIT,
854
870
  metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
855
871
  llm: typing.Optional[str] = OMIT,
@@ -908,7 +924,10 @@ class AsyncAgentsClient:
908
924
  The initial set of messages to put in the agent's in-context memory.
909
925
 
910
926
  include_base_tools : typing.Optional[bool]
911
- The LLM configuration used by the agent.
927
+ If true, attaches the Letta core tools (e.g. archival_memory and core_memory related functions).
928
+
929
+ include_multi_agent_tools : typing.Optional[bool]
930
+ If true, attaches the Letta multi-agent tools (e.g. sending a message to another agent).
912
931
 
913
932
  description : typing.Optional[str]
914
933
  The description of the agent.
@@ -996,6 +1015,7 @@ class AsyncAgentsClient:
996
1015
  object_=initial_message_sequence, annotation=typing.Sequence[MessageCreate], direction="write"
997
1016
  ),
998
1017
  "include_base_tools": include_base_tools,
1018
+ "include_multi_agent_tools": include_multi_agent_tools,
999
1019
  "description": description,
1000
1020
  "metadata_": metadata,
1001
1021
  "llm": llm,
@@ -10,13 +10,6 @@ from .types import (
10
10
  LettaStreamingResponse_UsageStatistics,
11
11
  LettaStreamingResponse_UserMessage,
12
12
  MessagesListResponse,
13
- MessagesListResponseItem,
14
- MessagesListResponseItem_AssistantMessage,
15
- MessagesListResponseItem_ReasoningMessage,
16
- MessagesListResponseItem_SystemMessage,
17
- MessagesListResponseItem_ToolCallMessage,
18
- MessagesListResponseItem_ToolReturnMessage,
19
- MessagesListResponseItem_UserMessage,
20
13
  )
21
14
 
22
15
  __all__ = [
@@ -29,11 +22,4 @@ __all__ = [
29
22
  "LettaStreamingResponse_UsageStatistics",
30
23
  "LettaStreamingResponse_UserMessage",
31
24
  "MessagesListResponse",
32
- "MessagesListResponseItem",
33
- "MessagesListResponseItem_AssistantMessage",
34
- "MessagesListResponseItem_ReasoningMessage",
35
- "MessagesListResponseItem_SystemMessage",
36
- "MessagesListResponseItem_ToolCallMessage",
37
- "MessagesListResponseItem_ToolReturnMessage",
38
- "MessagesListResponseItem_UserMessage",
39
25
  ]
@@ -11,6 +11,7 @@ from ...types.http_validation_error import HttpValidationError
11
11
  from json.decoder import JSONDecodeError
12
12
  from ...core.api_error import ApiError
13
13
  from ...types.message_create import MessageCreate
14
+ from ...types.letta_request_config import LettaRequestConfig
14
15
  from ...types.letta_response import LettaResponse
15
16
  from ...core.serialization import convert_and_respect_annotation_metadata
16
17
  from ...types.message_role import MessageRole
@@ -120,14 +121,12 @@ class MessagesClient:
120
121
  raise ApiError(status_code=_response.status_code, body=_response.text)
121
122
  raise ApiError(status_code=_response.status_code, body=_response_json)
122
123
 
123
- def create(
124
+ def send(
124
125
  self,
125
126
  agent_id: str,
126
127
  *,
127
128
  messages: typing.Sequence[MessageCreate],
128
- use_assistant_message: typing.Optional[bool] = OMIT,
129
- assistant_message_tool_name: typing.Optional[str] = OMIT,
130
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
129
+ config: typing.Optional[LettaRequestConfig] = OMIT,
131
130
  request_options: typing.Optional[RequestOptions] = None,
132
131
  ) -> LettaResponse:
133
132
  """
@@ -141,14 +140,8 @@ class MessagesClient:
141
140
  messages : typing.Sequence[MessageCreate]
142
141
  The messages to be sent to the agent.
143
142
 
144
- use_assistant_message : typing.Optional[bool]
145
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
146
-
147
- assistant_message_tool_name : typing.Optional[str]
148
- The name of the designated message tool.
149
-
150
- assistant_message_tool_kwarg : typing.Optional[str]
151
- The name of the message argument in the designated message tool.
143
+ config : typing.Optional[LettaRequestConfig]
144
+ Configuration options for the LettaRequest.
152
145
 
153
146
  request_options : typing.Optional[RequestOptions]
154
147
  Request-specific configuration.
@@ -165,7 +158,7 @@ class MessagesClient:
165
158
  client = Letta(
166
159
  token="YOUR_TOKEN",
167
160
  )
168
- client.agents.messages.create(
161
+ client.agents.messages.send(
169
162
  agent_id="agent_id",
170
163
  messages=[
171
164
  MessageCreate(
@@ -182,9 +175,9 @@ class MessagesClient:
182
175
  "messages": convert_and_respect_annotation_metadata(
183
176
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
184
177
  ),
185
- "use_assistant_message": use_assistant_message,
186
- "assistant_message_tool_name": assistant_message_tool_name,
187
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
178
+ "config": convert_and_respect_annotation_metadata(
179
+ object_=config, annotation=LettaRequestConfig, direction="write"
180
+ ),
188
181
  },
189
182
  request_options=request_options,
190
183
  omit=OMIT,
@@ -318,9 +311,7 @@ class MessagesClient:
318
311
  agent_id: str,
319
312
  *,
320
313
  messages: typing.Sequence[MessageCreate],
321
- use_assistant_message: typing.Optional[bool] = OMIT,
322
- assistant_message_tool_name: typing.Optional[str] = OMIT,
323
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
314
+ config: typing.Optional[LettaRequestConfig] = OMIT,
324
315
  stream_tokens: typing.Optional[bool] = OMIT,
325
316
  request_options: typing.Optional[RequestOptions] = None,
326
317
  ) -> typing.Iterator[LettaStreamingResponse]:
@@ -336,14 +327,8 @@ class MessagesClient:
336
327
  messages : typing.Sequence[MessageCreate]
337
328
  The messages to be sent to the agent.
338
329
 
339
- use_assistant_message : typing.Optional[bool]
340
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
341
-
342
- assistant_message_tool_name : typing.Optional[str]
343
- The name of the designated message tool.
344
-
345
- assistant_message_tool_kwarg : typing.Optional[str]
346
- The name of the message argument in the designated message tool.
330
+ config : typing.Optional[LettaRequestConfig]
331
+ Configuration options for the LettaRequest.
347
332
 
348
333
  stream_tokens : typing.Optional[bool]
349
334
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
@@ -382,9 +367,9 @@ class MessagesClient:
382
367
  "messages": convert_and_respect_annotation_metadata(
383
368
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
384
369
  ),
385
- "use_assistant_message": use_assistant_message,
386
- "assistant_message_tool_name": assistant_message_tool_name,
387
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
370
+ "config": convert_and_respect_annotation_metadata(
371
+ object_=config, annotation=LettaRequestConfig, direction="write"
372
+ ),
388
373
  "stream_tokens": stream_tokens,
389
374
  },
390
375
  headers={
@@ -424,19 +409,17 @@ class MessagesClient:
424
409
  raise ApiError(status_code=_response.status_code, body=_response.text)
425
410
  raise ApiError(status_code=_response.status_code, body=_response_json)
426
411
 
427
- def create_async(
412
+ def send_async(
428
413
  self,
429
414
  agent_id: str,
430
415
  *,
431
416
  messages: typing.Sequence[MessageCreate],
432
- use_assistant_message: typing.Optional[bool] = OMIT,
433
- assistant_message_tool_name: typing.Optional[str] = OMIT,
434
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
417
+ config: typing.Optional[LettaRequestConfig] = OMIT,
435
418
  request_options: typing.Optional[RequestOptions] = None,
436
419
  ) -> Run:
437
420
  """
438
- Asynchronously process a user message and return a job ID.
439
- The actual processing happens in the background, and the status can be checked using the job ID.
421
+ Asynchronously process a user message and return a run object.
422
+ The actual processing happens in the background, and the status can be checked using the run ID.
440
423
 
441
424
  Parameters
442
425
  ----------
@@ -445,14 +428,8 @@ class MessagesClient:
445
428
  messages : typing.Sequence[MessageCreate]
446
429
  The messages to be sent to the agent.
447
430
 
448
- use_assistant_message : typing.Optional[bool]
449
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
450
-
451
- assistant_message_tool_name : typing.Optional[str]
452
- The name of the designated message tool.
453
-
454
- assistant_message_tool_kwarg : typing.Optional[str]
455
- The name of the message argument in the designated message tool.
431
+ config : typing.Optional[LettaRequestConfig]
432
+ Configuration options for the LettaRequest.
456
433
 
457
434
  request_options : typing.Optional[RequestOptions]
458
435
  Request-specific configuration.
@@ -469,7 +446,7 @@ class MessagesClient:
469
446
  client = Letta(
470
447
  token="YOUR_TOKEN",
471
448
  )
472
- client.agents.messages.create_async(
449
+ client.agents.messages.send_async(
473
450
  agent_id="agent_id",
474
451
  messages=[
475
452
  MessageCreate(
@@ -486,9 +463,9 @@ class MessagesClient:
486
463
  "messages": convert_and_respect_annotation_metadata(
487
464
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
488
465
  ),
489
- "use_assistant_message": use_assistant_message,
490
- "assistant_message_tool_name": assistant_message_tool_name,
491
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
466
+ "config": convert_and_respect_annotation_metadata(
467
+ object_=config, annotation=LettaRequestConfig, direction="write"
468
+ ),
492
469
  },
493
470
  request_options=request_options,
494
471
  omit=OMIT,
@@ -618,14 +595,12 @@ class AsyncMessagesClient:
618
595
  raise ApiError(status_code=_response.status_code, body=_response.text)
619
596
  raise ApiError(status_code=_response.status_code, body=_response_json)
620
597
 
621
- async def create(
598
+ async def send(
622
599
  self,
623
600
  agent_id: str,
624
601
  *,
625
602
  messages: typing.Sequence[MessageCreate],
626
- use_assistant_message: typing.Optional[bool] = OMIT,
627
- assistant_message_tool_name: typing.Optional[str] = OMIT,
628
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
603
+ config: typing.Optional[LettaRequestConfig] = OMIT,
629
604
  request_options: typing.Optional[RequestOptions] = None,
630
605
  ) -> LettaResponse:
631
606
  """
@@ -639,14 +614,8 @@ class AsyncMessagesClient:
639
614
  messages : typing.Sequence[MessageCreate]
640
615
  The messages to be sent to the agent.
641
616
 
642
- use_assistant_message : typing.Optional[bool]
643
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
644
-
645
- assistant_message_tool_name : typing.Optional[str]
646
- The name of the designated message tool.
647
-
648
- assistant_message_tool_kwarg : typing.Optional[str]
649
- The name of the message argument in the designated message tool.
617
+ config : typing.Optional[LettaRequestConfig]
618
+ Configuration options for the LettaRequest.
650
619
 
651
620
  request_options : typing.Optional[RequestOptions]
652
621
  Request-specific configuration.
@@ -668,7 +637,7 @@ class AsyncMessagesClient:
668
637
 
669
638
 
670
639
  async def main() -> None:
671
- await client.agents.messages.create(
640
+ await client.agents.messages.send(
672
641
  agent_id="agent_id",
673
642
  messages=[
674
643
  MessageCreate(
@@ -688,9 +657,9 @@ class AsyncMessagesClient:
688
657
  "messages": convert_and_respect_annotation_metadata(
689
658
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
690
659
  ),
691
- "use_assistant_message": use_assistant_message,
692
- "assistant_message_tool_name": assistant_message_tool_name,
693
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
660
+ "config": convert_and_respect_annotation_metadata(
661
+ object_=config, annotation=LettaRequestConfig, direction="write"
662
+ ),
694
663
  },
695
664
  request_options=request_options,
696
665
  omit=OMIT,
@@ -832,9 +801,7 @@ class AsyncMessagesClient:
832
801
  agent_id: str,
833
802
  *,
834
803
  messages: typing.Sequence[MessageCreate],
835
- use_assistant_message: typing.Optional[bool] = OMIT,
836
- assistant_message_tool_name: typing.Optional[str] = OMIT,
837
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
804
+ config: typing.Optional[LettaRequestConfig] = OMIT,
838
805
  stream_tokens: typing.Optional[bool] = OMIT,
839
806
  request_options: typing.Optional[RequestOptions] = None,
840
807
  ) -> typing.AsyncIterator[LettaStreamingResponse]:
@@ -850,14 +817,8 @@ class AsyncMessagesClient:
850
817
  messages : typing.Sequence[MessageCreate]
851
818
  The messages to be sent to the agent.
852
819
 
853
- use_assistant_message : typing.Optional[bool]
854
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
855
-
856
- assistant_message_tool_name : typing.Optional[str]
857
- The name of the designated message tool.
858
-
859
- assistant_message_tool_kwarg : typing.Optional[str]
860
- The name of the message argument in the designated message tool.
820
+ config : typing.Optional[LettaRequestConfig]
821
+ Configuration options for the LettaRequest.
861
822
 
862
823
  stream_tokens : typing.Optional[bool]
863
824
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
@@ -904,9 +865,9 @@ class AsyncMessagesClient:
904
865
  "messages": convert_and_respect_annotation_metadata(
905
866
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
906
867
  ),
907
- "use_assistant_message": use_assistant_message,
908
- "assistant_message_tool_name": assistant_message_tool_name,
909
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
868
+ "config": convert_and_respect_annotation_metadata(
869
+ object_=config, annotation=LettaRequestConfig, direction="write"
870
+ ),
910
871
  "stream_tokens": stream_tokens,
911
872
  },
912
873
  headers={
@@ -946,19 +907,17 @@ class AsyncMessagesClient:
946
907
  raise ApiError(status_code=_response.status_code, body=_response.text)
947
908
  raise ApiError(status_code=_response.status_code, body=_response_json)
948
909
 
949
- async def create_async(
910
+ async def send_async(
950
911
  self,
951
912
  agent_id: str,
952
913
  *,
953
914
  messages: typing.Sequence[MessageCreate],
954
- use_assistant_message: typing.Optional[bool] = OMIT,
955
- assistant_message_tool_name: typing.Optional[str] = OMIT,
956
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
915
+ config: typing.Optional[LettaRequestConfig] = OMIT,
957
916
  request_options: typing.Optional[RequestOptions] = None,
958
917
  ) -> Run:
959
918
  """
960
- Asynchronously process a user message and return a job ID.
961
- The actual processing happens in the background, and the status can be checked using the job ID.
919
+ Asynchronously process a user message and return a run object.
920
+ The actual processing happens in the background, and the status can be checked using the run ID.
962
921
 
963
922
  Parameters
964
923
  ----------
@@ -967,14 +926,8 @@ class AsyncMessagesClient:
967
926
  messages : typing.Sequence[MessageCreate]
968
927
  The messages to be sent to the agent.
969
928
 
970
- use_assistant_message : typing.Optional[bool]
971
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
972
-
973
- assistant_message_tool_name : typing.Optional[str]
974
- The name of the designated message tool.
975
-
976
- assistant_message_tool_kwarg : typing.Optional[str]
977
- The name of the message argument in the designated message tool.
929
+ config : typing.Optional[LettaRequestConfig]
930
+ Configuration options for the LettaRequest.
978
931
 
979
932
  request_options : typing.Optional[RequestOptions]
980
933
  Request-specific configuration.
@@ -996,7 +949,7 @@ class AsyncMessagesClient:
996
949
 
997
950
 
998
951
  async def main() -> None:
999
- await client.agents.messages.create_async(
952
+ await client.agents.messages.send_async(
1000
953
  agent_id="agent_id",
1001
954
  messages=[
1002
955
  MessageCreate(
@@ -1016,9 +969,9 @@ class AsyncMessagesClient:
1016
969
  "messages": convert_and_respect_annotation_metadata(
1017
970
  object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
1018
971
  ),
1019
- "use_assistant_message": use_assistant_message,
1020
- "assistant_message_tool_name": assistant_message_tool_name,
1021
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
972
+ "config": convert_and_respect_annotation_metadata(
973
+ object_=config, annotation=LettaRequestConfig, direction="write"
974
+ ),
1022
975
  },
1023
976
  request_options=request_options,
1024
977
  omit=OMIT,
@@ -11,15 +11,6 @@ from .letta_streaming_response import (
11
11
  LettaStreamingResponse_UserMessage,
12
12
  )
13
13
  from .messages_list_response import MessagesListResponse
14
- from .messages_list_response_item import (
15
- MessagesListResponseItem,
16
- MessagesListResponseItem_AssistantMessage,
17
- MessagesListResponseItem_ReasoningMessage,
18
- MessagesListResponseItem_SystemMessage,
19
- MessagesListResponseItem_ToolCallMessage,
20
- MessagesListResponseItem_ToolReturnMessage,
21
- MessagesListResponseItem_UserMessage,
22
- )
23
14
 
24
15
  __all__ = [
25
16
  "LettaStreamingResponse",
@@ -31,11 +22,4 @@ __all__ = [
31
22
  "LettaStreamingResponse_UsageStatistics",
32
23
  "LettaStreamingResponse_UserMessage",
33
24
  "MessagesListResponse",
34
- "MessagesListResponseItem",
35
- "MessagesListResponseItem_AssistantMessage",
36
- "MessagesListResponseItem_ReasoningMessage",
37
- "MessagesListResponseItem_SystemMessage",
38
- "MessagesListResponseItem_ToolCallMessage",
39
- "MessagesListResponseItem_ToolReturnMessage",
40
- "MessagesListResponseItem_UserMessage",
41
25
  ]
@@ -2,6 +2,6 @@
2
2
 
3
3
  import typing
4
4
  from ....types.letta_schemas_message_message import LettaSchemasMessageMessage
5
- from .messages_list_response_item import MessagesListResponseItem
5
+ from ....types.letta_message_union import LettaMessageUnion
6
6
 
7
- MessagesListResponse = typing.Union[typing.List[LettaSchemasMessageMessage], typing.List[MessagesListResponseItem]]
7
+ MessagesListResponse = typing.Union[typing.List[LettaSchemasMessageMessage], typing.List[LettaMessageUnion]]
letta_client/client.py CHANGED
@@ -1,13 +1,12 @@
1
- from textwrap import dedent
2
1
  import inspect
3
2
  import typing
3
+ from textwrap import dedent
4
4
 
5
- from .base_client import LettaBase, AsyncLettaBase
5
+ from .base_client import AsyncLettaBase, LettaBase
6
6
  from .core.request_options import RequestOptions
7
7
  from .tools.client import ToolsClient as ToolsClientBase
8
8
  from .types.letta_schemas_tool_tool import LettaSchemasToolTool
9
9
 
10
-
11
10
  # this is used as the default value for optional parameters
12
11
  OMIT = typing.cast(typing.Any, ...)
13
12
 
@@ -31,7 +30,7 @@ class ToolsClient(ToolsClientBase):
31
30
  def create_from_function(
32
31
  self,
33
32
  *,
34
- function: typing.Callable,
33
+ func: typing.Callable,
35
34
  name: typing.Optional[str] = OMIT,
36
35
  description: typing.Optional[str] = OMIT,
37
36
  tags: typing.Optional[typing.Sequence[str]] = OMIT,
@@ -42,7 +41,7 @@ class ToolsClient(ToolsClientBase):
42
41
  return_char_limit: typing.Optional[int] = OMIT,
43
42
  request_options: typing.Optional[RequestOptions] = None,
44
43
  ) -> LettaSchemasToolTool:
45
- source_code = dedent(inspect.getsource(function))
44
+ source_code = dedent(inspect.getsource(func))
46
45
  return self.create(
47
46
  source_code=source_code,
48
47
  name=name,
@@ -53,3 +52,29 @@ class ToolsClient(ToolsClientBase):
53
52
  return_char_limit=return_char_limit,
54
53
  request_options=request_options,
55
54
  )
55
+
56
+ def upsert_from_function(
57
+ self,
58
+ *,
59
+ func: typing.Callable,
60
+ name: typing.Optional[str] = OMIT,
61
+ description: typing.Optional[str] = OMIT,
62
+ tags: typing.Optional[typing.Sequence[str]] = OMIT,
63
+ source_type: typing.Optional[str] = OMIT,
64
+ json_schema: typing.Optional[
65
+ typing.Dict[str, typing.Optional[typing.Any]]
66
+ ] = OMIT,
67
+ return_char_limit: typing.Optional[int] = OMIT,
68
+ request_options: typing.Optional[RequestOptions] = None,
69
+ ) -> LettaSchemasToolTool:
70
+ source_code = dedent(inspect.getsource(func))
71
+ return self.upsert(
72
+ source_code=source_code,
73
+ name=name,
74
+ description=description,
75
+ tags=tags,
76
+ source_type=source_type,
77
+ json_schema=json_schema,
78
+ return_char_limit=return_char_limit,
79
+ request_options=request_options,
80
+ )
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "letta-client",
19
- "X-Fern-SDK-Version": "0.1.14",
19
+ "X-Fern-SDK-Version": "0.1.16",
20
20
  }
21
21
  if self.token is not None:
22
22
  headers["Authorization"] = f"Bearer {self.token}"