letta-client 0.1.83__py3-none-any.whl → 0.1.85__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

Files changed (34) hide show
  1. letta_client/__init__.py +16 -2
  2. letta_client/agents/__init__.py +4 -0
  3. letta_client/agents/client.py +30 -0
  4. letta_client/agents/messages/types/messages_modify_response.py +8 -1
  5. letta_client/agents/types/__init__.py +8 -0
  6. letta_client/agents/types/agents_search_response_agents_item_llm_config.py +8 -0
  7. letta_client/agents/types/agents_search_response_agents_item_llm_config_enable_reasoner.py +5 -0
  8. letta_client/agents/types/agents_search_response_agents_item_llm_config_max_reasoning_tokens.py +5 -0
  9. letta_client/core/client_wrapper.py +1 -1
  10. letta_client/templates/__init__.py +4 -0
  11. letta_client/templates/types/__init__.py +8 -0
  12. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py +8 -0
  13. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_enable_reasoner.py +5 -0
  14. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens.py +5 -0
  15. letta_client/tools/client.py +26 -4
  16. letta_client/types/__init__.py +6 -0
  17. letta_client/types/completion_create_params_non_streaming_model.py +4 -5
  18. letta_client/types/completion_create_params_streaming_model.py +4 -5
  19. letta_client/types/file_file.py +1 -1
  20. letta_client/types/hidden_reasoning_message.py +39 -0
  21. letta_client/types/hidden_reasoning_message_state.py +5 -0
  22. letta_client/types/letta_message_union.py +8 -1
  23. letta_client/types/llm_config.py +10 -0
  24. letta_client/types/organization.py +5 -0
  25. letta_client/types/organization_create.py +5 -0
  26. letta_client/types/organization_update.py +27 -0
  27. letta_client/types/reasoning_message.py +2 -0
  28. letta_client/voice/__init__.py +2 -2
  29. letta_client/voice/client.py +19 -7
  30. letta_client/voice/types/__init__.py +2 -2
  31. letta_client/voice/types/{create_voice_chat_completions_request.py → create_voice_chat_completions_request_body.py} +3 -1
  32. {letta_client-0.1.83.dist-info → letta_client-0.1.85.dist-info}/METADATA +1 -1
  33. {letta_client-0.1.83.dist-info → letta_client-0.1.85.dist-info}/RECORD +34 -27
  34. {letta_client-0.1.83.dist-info → letta_client-0.1.85.dist-info}/WHEEL +0 -0
letta_client/__init__.py CHANGED
@@ -92,6 +92,8 @@ from .types import (
92
92
  GroupCreate,
93
93
  GroupCreateManagerConfig,
94
94
  Health,
95
+ HiddenReasoningMessage,
96
+ HiddenReasoningMessageState,
95
97
  HttpValidationError,
96
98
  Identity,
97
99
  IdentityCreate,
@@ -139,6 +141,7 @@ from .types import (
139
141
  OpenaiTypesChatCompletionCreateParamsFunction,
140
142
  Organization,
141
143
  OrganizationCreate,
144
+ OrganizationUpdate,
142
145
  ParameterProperties,
143
146
  ParametersSchema,
144
147
  Passage,
@@ -260,8 +263,10 @@ from .agents import (
260
263
  AgentsSearchResponseAgentsItemLastUpdatedById,
261
264
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
262
265
  AgentsSearchResponseAgentsItemLlmConfig,
266
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
263
267
  AgentsSearchResponseAgentsItemLlmConfigHandle,
264
268
  AgentsSearchResponseAgentsItemLlmConfigHandleItem,
269
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
265
270
  AgentsSearchResponseAgentsItemLlmConfigMaxTokens,
266
271
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
267
272
  AgentsSearchResponseAgentsItemLlmConfigModelEndpoint,
@@ -458,8 +463,10 @@ from .templates import (
458
463
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedById,
459
464
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
460
465
  TemplatesCreateAgentsResponseAgentsItemLlmConfig,
466
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
461
467
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
462
468
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
469
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
463
470
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
464
471
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem,
465
472
  TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint,
@@ -629,7 +636,7 @@ from .tools import (
629
636
  ListMcpServersResponseValue,
630
637
  )
631
638
  from .version import __version__
632
- from .voice import CreateVoiceChatCompletionsRequest
639
+ from .voice import CreateVoiceChatCompletionsRequestBody
633
640
 
634
641
  __all__ = [
635
642
  "ActionModel",
@@ -676,8 +683,10 @@ __all__ = [
676
683
  "AgentsSearchResponseAgentsItemLastUpdatedById",
677
684
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
678
685
  "AgentsSearchResponseAgentsItemLlmConfig",
686
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
679
687
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
680
688
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
689
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
681
690
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
682
691
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
683
692
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -909,7 +918,7 @@ __all__ = [
909
918
  "CoreMemoryBlockSchema",
910
919
  "CreateAgentRequestToolRulesItem",
911
920
  "CreateBlock",
912
- "CreateVoiceChatCompletionsRequest",
921
+ "CreateVoiceChatCompletionsRequestBody",
913
922
  "DeleteMcpServerResponseItem",
914
923
  "DynamicManager",
915
924
  "E2BSandboxConfig",
@@ -927,6 +936,8 @@ __all__ = [
927
936
  "GroupCreate",
928
937
  "GroupCreateManagerConfig",
929
938
  "Health",
939
+ "HiddenReasoningMessage",
940
+ "HiddenReasoningMessageState",
930
941
  "HttpValidationError",
931
942
  "Identity",
932
943
  "IdentityCreate",
@@ -979,6 +990,7 @@ __all__ = [
979
990
  "OpenaiTypesChatCompletionCreateParamsFunction",
980
991
  "Organization",
981
992
  "OrganizationCreate",
993
+ "OrganizationUpdate",
982
994
  "ParameterProperties",
983
995
  "ParametersSchema",
984
996
  "Passage",
@@ -1038,8 +1050,10 @@ __all__ = [
1038
1050
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
1039
1051
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
1040
1052
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
1053
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
1041
1054
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
1042
1055
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
1056
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
1043
1057
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
1044
1058
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
1045
1059
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -35,8 +35,10 @@ from .types import (
35
35
  AgentsSearchResponseAgentsItemLastUpdatedById,
36
36
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
37
37
  AgentsSearchResponseAgentsItemLlmConfig,
38
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
38
39
  AgentsSearchResponseAgentsItemLlmConfigHandle,
39
40
  AgentsSearchResponseAgentsItemLlmConfigHandleItem,
41
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
40
42
  AgentsSearchResponseAgentsItemLlmConfigMaxTokens,
41
43
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
42
44
  AgentsSearchResponseAgentsItemLlmConfigModelEndpoint,
@@ -241,8 +243,10 @@ __all__ = [
241
243
  "AgentsSearchResponseAgentsItemLastUpdatedById",
242
244
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
243
245
  "AgentsSearchResponseAgentsItemLlmConfig",
246
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
244
247
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
245
248
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
249
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
246
250
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
247
251
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
248
252
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -218,6 +218,9 @@ class AgentsClient:
218
218
  embedding: typing.Optional[str] = OMIT,
219
219
  context_window_limit: typing.Optional[int] = OMIT,
220
220
  embedding_chunk_size: typing.Optional[int] = OMIT,
221
+ max_tokens: typing.Optional[int] = OMIT,
222
+ max_reasoning_tokens: typing.Optional[int] = OMIT,
223
+ enable_reasoner: typing.Optional[bool] = OMIT,
221
224
  from_template: typing.Optional[str] = OMIT,
222
225
  template: typing.Optional[bool] = OMIT,
223
226
  create_agent_request_project: typing.Optional[str] = OMIT,
@@ -303,6 +306,15 @@ class AgentsClient:
303
306
  embedding_chunk_size : typing.Optional[int]
304
307
  The embedding chunk size used by the agent.
305
308
 
309
+ max_tokens : typing.Optional[int]
310
+ The maximum number of tokens to generate, including reasoning step. If not set, the model will use its default value.
311
+
312
+ max_reasoning_tokens : typing.Optional[int]
313
+ The maximum number of tokens to generate for reasoning step. If not set, the model will use its default value.
314
+
315
+ enable_reasoner : typing.Optional[bool]
316
+ Whether to enable internal extended thinking step for a reasoner model.
317
+
306
318
  from_template : typing.Optional[str]
307
319
  The template id used to configure the agent
308
320
 
@@ -386,6 +398,9 @@ class AgentsClient:
386
398
  "embedding": embedding,
387
399
  "context_window_limit": context_window_limit,
388
400
  "embedding_chunk_size": embedding_chunk_size,
401
+ "max_tokens": max_tokens,
402
+ "max_reasoning_tokens": max_reasoning_tokens,
403
+ "enable_reasoner": enable_reasoner,
389
404
  "from_template": from_template,
390
405
  "template": template,
391
406
  "project": create_agent_request_project,
@@ -1307,6 +1322,9 @@ class AsyncAgentsClient:
1307
1322
  embedding: typing.Optional[str] = OMIT,
1308
1323
  context_window_limit: typing.Optional[int] = OMIT,
1309
1324
  embedding_chunk_size: typing.Optional[int] = OMIT,
1325
+ max_tokens: typing.Optional[int] = OMIT,
1326
+ max_reasoning_tokens: typing.Optional[int] = OMIT,
1327
+ enable_reasoner: typing.Optional[bool] = OMIT,
1310
1328
  from_template: typing.Optional[str] = OMIT,
1311
1329
  template: typing.Optional[bool] = OMIT,
1312
1330
  create_agent_request_project: typing.Optional[str] = OMIT,
@@ -1392,6 +1410,15 @@ class AsyncAgentsClient:
1392
1410
  embedding_chunk_size : typing.Optional[int]
1393
1411
  The embedding chunk size used by the agent.
1394
1412
 
1413
+ max_tokens : typing.Optional[int]
1414
+ The maximum number of tokens to generate, including reasoning step. If not set, the model will use its default value.
1415
+
1416
+ max_reasoning_tokens : typing.Optional[int]
1417
+ The maximum number of tokens to generate for reasoning step. If not set, the model will use its default value.
1418
+
1419
+ enable_reasoner : typing.Optional[bool]
1420
+ Whether to enable internal extended thinking step for a reasoner model.
1421
+
1395
1422
  from_template : typing.Optional[str]
1396
1423
  The template id used to configure the agent
1397
1424
 
@@ -1483,6 +1510,9 @@ class AsyncAgentsClient:
1483
1510
  "embedding": embedding,
1484
1511
  "context_window_limit": context_window_limit,
1485
1512
  "embedding_chunk_size": embedding_chunk_size,
1513
+ "max_tokens": max_tokens,
1514
+ "max_reasoning_tokens": max_reasoning_tokens,
1515
+ "enable_reasoner": enable_reasoner,
1486
1516
  "from_template": from_template,
1487
1517
  "template": template,
1488
1518
  "project": create_agent_request_project,
@@ -4,10 +4,17 @@ import typing
4
4
  from ....types.system_message import SystemMessage
5
5
  from ....types.user_message import UserMessage
6
6
  from ....types.reasoning_message import ReasoningMessage
7
+ from ....types.hidden_reasoning_message import HiddenReasoningMessage
7
8
  from ....types.tool_call_message import ToolCallMessage
8
9
  from ....types.tool_return_message import ToolReturnMessage
9
10
  from ....types.assistant_message import AssistantMessage
10
11
 
11
12
  MessagesModifyResponse = typing.Union[
12
- SystemMessage, UserMessage, ReasoningMessage, ToolCallMessage, ToolReturnMessage, AssistantMessage
13
+ SystemMessage,
14
+ UserMessage,
15
+ ReasoningMessage,
16
+ HiddenReasoningMessage,
17
+ ToolCallMessage,
18
+ ToolReturnMessage,
19
+ AssistantMessage,
13
20
  ]
@@ -62,8 +62,14 @@ from .agents_search_response_agents_item_last_updated_by_id_item import (
62
62
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
63
63
  )
64
64
  from .agents_search_response_agents_item_llm_config import AgentsSearchResponseAgentsItemLlmConfig
65
+ from .agents_search_response_agents_item_llm_config_enable_reasoner import (
66
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
67
+ )
65
68
  from .agents_search_response_agents_item_llm_config_handle import AgentsSearchResponseAgentsItemLlmConfigHandle
66
69
  from .agents_search_response_agents_item_llm_config_handle_item import AgentsSearchResponseAgentsItemLlmConfigHandleItem
70
+ from .agents_search_response_agents_item_llm_config_max_reasoning_tokens import (
71
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
72
+ )
67
73
  from .agents_search_response_agents_item_llm_config_max_tokens import AgentsSearchResponseAgentsItemLlmConfigMaxTokens
68
74
  from .agents_search_response_agents_item_llm_config_max_tokens_item import (
69
75
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
@@ -515,8 +521,10 @@ __all__ = [
515
521
  "AgentsSearchResponseAgentsItemLastUpdatedById",
516
522
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
517
523
  "AgentsSearchResponseAgentsItemLlmConfig",
524
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
518
525
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
519
526
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
527
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
520
528
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
521
529
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
522
530
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -19,6 +19,12 @@ from .agents_search_response_agents_item_llm_config_temperature import (
19
19
  AgentsSearchResponseAgentsItemLlmConfigTemperature,
20
20
  )
21
21
  from .agents_search_response_agents_item_llm_config_max_tokens import AgentsSearchResponseAgentsItemLlmConfigMaxTokens
22
+ from .agents_search_response_agents_item_llm_config_enable_reasoner import (
23
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
24
+ )
25
+ from .agents_search_response_agents_item_llm_config_max_reasoning_tokens import (
26
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
27
+ )
22
28
  from ...core.pydantic_utilities import IS_PYDANTIC_V2
23
29
  import pydantic
24
30
 
@@ -35,6 +41,8 @@ class AgentsSearchResponseAgentsItemLlmConfig(UncheckedBaseModel):
35
41
  handle: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigHandle] = None
36
42
  temperature: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigTemperature] = None
37
43
  max_tokens: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigMaxTokens] = None
44
+ enable_reasoner: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigEnableReasoner] = None
45
+ max_reasoning_tokens: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens] = None
38
46
 
39
47
  if IS_PYDANTIC_V2:
40
48
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner = typing.Union[bool, typing.Optional[typing.Any]]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens = typing.Union[float, typing.Optional[typing.Any]]
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "letta-client",
19
- "X-Fern-SDK-Version": "0.1.83",
19
+ "X-Fern-SDK-Version": "0.1.85",
20
20
  }
21
21
  if self.token is not None:
22
22
  headers["Authorization"] = f"Bearer {self.token}"
@@ -30,8 +30,10 @@ from .types import (
30
30
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedById,
31
31
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
32
32
  TemplatesCreateAgentsResponseAgentsItemLlmConfig,
33
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
33
34
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
34
35
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
36
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
35
37
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
36
38
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem,
37
39
  TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint,
@@ -225,8 +227,10 @@ __all__ = [
225
227
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
226
228
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
227
229
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
230
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
228
231
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
229
232
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
233
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
230
234
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
231
235
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
232
236
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -75,12 +75,18 @@ from .templates_create_agents_response_agents_item_last_updated_by_id_item impor
75
75
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
76
76
  )
77
77
  from .templates_create_agents_response_agents_item_llm_config import TemplatesCreateAgentsResponseAgentsItemLlmConfig
78
+ from .templates_create_agents_response_agents_item_llm_config_enable_reasoner import (
79
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
80
+ )
78
81
  from .templates_create_agents_response_agents_item_llm_config_handle import (
79
82
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
80
83
  )
81
84
  from .templates_create_agents_response_agents_item_llm_config_handle_item import (
82
85
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
83
86
  )
87
+ from .templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens import (
88
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
89
+ )
84
90
  from .templates_create_agents_response_agents_item_llm_config_max_tokens import (
85
91
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
86
92
  )
@@ -579,8 +585,10 @@ __all__ = [
579
585
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
580
586
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
581
587
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
588
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
582
589
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
583
590
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
591
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
584
592
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
585
593
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
586
594
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -23,6 +23,12 @@ from .templates_create_agents_response_agents_item_llm_config_temperature import
23
23
  from .templates_create_agents_response_agents_item_llm_config_max_tokens import (
24
24
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
25
25
  )
26
+ from .templates_create_agents_response_agents_item_llm_config_enable_reasoner import (
27
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
28
+ )
29
+ from .templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens import (
30
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
31
+ )
26
32
  from ...core.pydantic_utilities import IS_PYDANTIC_V2
27
33
  import pydantic
28
34
 
@@ -39,6 +45,8 @@ class TemplatesCreateAgentsResponseAgentsItemLlmConfig(UncheckedBaseModel):
39
45
  handle: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle] = None
40
46
  temperature: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigTemperature] = None
41
47
  max_tokens: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens] = None
48
+ enable_reasoner: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner] = None
49
+ max_reasoning_tokens: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens] = None
42
50
 
43
51
  if IS_PYDANTIC_V2:
44
52
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner = typing.Union[bool, typing.Optional[typing.Any]]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens = typing.Union[float, typing.Optional[typing.Any]]
@@ -659,12 +659,16 @@ class ToolsClient:
659
659
  raise ApiError(status_code=_response.status_code, body=_response.text)
660
660
  raise ApiError(status_code=_response.status_code, body=_response_json)
661
661
 
662
- def list_composio_apps(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[AppModel]:
662
+ def list_composio_apps(
663
+ self, *, user_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
664
+ ) -> typing.List[AppModel]:
663
665
  """
664
666
  Get a list of all Composio apps
665
667
 
666
668
  Parameters
667
669
  ----------
670
+ user_id : typing.Optional[str]
671
+
668
672
  request_options : typing.Optional[RequestOptions]
669
673
  Request-specific configuration.
670
674
 
@@ -685,6 +689,9 @@ class ToolsClient:
685
689
  _response = self._client_wrapper.httpx_client.request(
686
690
  "v1/tools/composio/apps",
687
691
  method="GET",
692
+ headers={
693
+ "user-id": str(user_id) if user_id is not None else None,
694
+ },
688
695
  request_options=request_options,
689
696
  )
690
697
  try:
@@ -828,13 +835,15 @@ class ToolsClient:
828
835
  raise ApiError(status_code=_response.status_code, body=_response_json)
829
836
 
830
837
  def list_mcp_servers(
831
- self, *, request_options: typing.Optional[RequestOptions] = None
838
+ self, *, user_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
832
839
  ) -> typing.Dict[str, ListMcpServersResponseValue]:
833
840
  """
834
841
  Get a list of all configured MCP servers
835
842
 
836
843
  Parameters
837
844
  ----------
845
+ user_id : typing.Optional[str]
846
+
838
847
  request_options : typing.Optional[RequestOptions]
839
848
  Request-specific configuration.
840
849
 
@@ -855,6 +864,9 @@ class ToolsClient:
855
864
  _response = self._client_wrapper.httpx_client.request(
856
865
  "v1/tools/mcp/servers",
857
866
  method="GET",
867
+ headers={
868
+ "user-id": str(user_id) if user_id is not None else None,
869
+ },
858
870
  request_options=request_options,
859
871
  )
860
872
  try:
@@ -1824,13 +1836,15 @@ class AsyncToolsClient:
1824
1836
  raise ApiError(status_code=_response.status_code, body=_response_json)
1825
1837
 
1826
1838
  async def list_composio_apps(
1827
- self, *, request_options: typing.Optional[RequestOptions] = None
1839
+ self, *, user_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
1828
1840
  ) -> typing.List[AppModel]:
1829
1841
  """
1830
1842
  Get a list of all Composio apps
1831
1843
 
1832
1844
  Parameters
1833
1845
  ----------
1846
+ user_id : typing.Optional[str]
1847
+
1834
1848
  request_options : typing.Optional[RequestOptions]
1835
1849
  Request-specific configuration.
1836
1850
 
@@ -1859,6 +1873,9 @@ class AsyncToolsClient:
1859
1873
  _response = await self._client_wrapper.httpx_client.request(
1860
1874
  "v1/tools/composio/apps",
1861
1875
  method="GET",
1876
+ headers={
1877
+ "user-id": str(user_id) if user_id is not None else None,
1878
+ },
1862
1879
  request_options=request_options,
1863
1880
  )
1864
1881
  try:
@@ -2018,13 +2035,15 @@ class AsyncToolsClient:
2018
2035
  raise ApiError(status_code=_response.status_code, body=_response_json)
2019
2036
 
2020
2037
  async def list_mcp_servers(
2021
- self, *, request_options: typing.Optional[RequestOptions] = None
2038
+ self, *, user_id: typing.Optional[str] = None, request_options: typing.Optional[RequestOptions] = None
2022
2039
  ) -> typing.Dict[str, ListMcpServersResponseValue]:
2023
2040
  """
2024
2041
  Get a list of all configured MCP servers
2025
2042
 
2026
2043
  Parameters
2027
2044
  ----------
2045
+ user_id : typing.Optional[str]
2046
+
2028
2047
  request_options : typing.Optional[RequestOptions]
2029
2048
  Request-specific configuration.
2030
2049
 
@@ -2053,6 +2072,9 @@ class AsyncToolsClient:
2053
2072
  _response = await self._client_wrapper.httpx_client.request(
2054
2073
  "v1/tools/mcp/servers",
2055
2074
  method="GET",
2075
+ headers={
2076
+ "user-id": str(user_id) if user_id is not None else None,
2077
+ },
2056
2078
  request_options=request_options,
2057
2079
  )
2058
2080
  try:
@@ -91,6 +91,8 @@ from .group import Group
91
91
  from .group_create import GroupCreate
92
92
  from .group_create_manager_config import GroupCreateManagerConfig
93
93
  from .health import Health
94
+ from .hidden_reasoning_message import HiddenReasoningMessage
95
+ from .hidden_reasoning_message_state import HiddenReasoningMessageState
94
96
  from .http_validation_error import HttpValidationError
95
97
  from .identity import Identity
96
98
  from .identity_create import IdentityCreate
@@ -142,6 +144,7 @@ from .openai_types_chat_chat_completion_named_tool_choice_param_function import
142
144
  from .openai_types_chat_completion_create_params_function import OpenaiTypesChatCompletionCreateParamsFunction
143
145
  from .organization import Organization
144
146
  from .organization_create import OrganizationCreate
147
+ from .organization_update import OrganizationUpdate
145
148
  from .parameter_properties import ParameterProperties
146
149
  from .parameters_schema import ParametersSchema
147
150
  from .passage import Passage
@@ -302,6 +305,8 @@ __all__ = [
302
305
  "GroupCreate",
303
306
  "GroupCreateManagerConfig",
304
307
  "Health",
308
+ "HiddenReasoningMessage",
309
+ "HiddenReasoningMessageState",
305
310
  "HttpValidationError",
306
311
  "Identity",
307
312
  "IdentityCreate",
@@ -349,6 +354,7 @@ __all__ = [
349
354
  "OpenaiTypesChatCompletionCreateParamsFunction",
350
355
  "Organization",
351
356
  "OrganizationCreate",
357
+ "OrganizationUpdate",
352
358
  "ParameterProperties",
353
359
  "ParametersSchema",
354
360
  "Passage",
@@ -12,11 +12,6 @@ CompletionCreateParamsNonStreamingModel = typing.Union[
12
12
  typing.Literal["o1-preview-2024-09-12"],
13
13
  typing.Literal["o1-mini"],
14
14
  typing.Literal["o1-mini-2024-09-12"],
15
- typing.Literal["computer-use-preview"],
16
- typing.Literal["computer-use-preview-2025-02-04"],
17
- typing.Literal["computer-use-preview-2025-03-11"],
18
- typing.Literal["gpt-4.5-preview"],
19
- typing.Literal["gpt-4.5-preview-2025-02-27"],
20
15
  typing.Literal["gpt-4o"],
21
16
  typing.Literal["gpt-4o-2024-11-20"],
22
17
  typing.Literal["gpt-4o-2024-08-06"],
@@ -26,6 +21,10 @@ CompletionCreateParamsNonStreamingModel = typing.Union[
26
21
  typing.Literal["gpt-4o-audio-preview-2024-12-17"],
27
22
  typing.Literal["gpt-4o-mini-audio-preview"],
28
23
  typing.Literal["gpt-4o-mini-audio-preview-2024-12-17"],
24
+ typing.Literal["gpt-4o-search-preview"],
25
+ typing.Literal["gpt-4o-mini-search-preview"],
26
+ typing.Literal["gpt-4o-search-preview-2025-03-11"],
27
+ typing.Literal["gpt-4o-mini-search-preview-2025-03-11"],
29
28
  typing.Literal["chatgpt-4o-latest"],
30
29
  typing.Literal["gpt-4o-mini"],
31
30
  typing.Literal["gpt-4o-mini-2024-07-18"],
@@ -12,11 +12,6 @@ CompletionCreateParamsStreamingModel = typing.Union[
12
12
  typing.Literal["o1-preview-2024-09-12"],
13
13
  typing.Literal["o1-mini"],
14
14
  typing.Literal["o1-mini-2024-09-12"],
15
- typing.Literal["computer-use-preview"],
16
- typing.Literal["computer-use-preview-2025-02-04"],
17
- typing.Literal["computer-use-preview-2025-03-11"],
18
- typing.Literal["gpt-4.5-preview"],
19
- typing.Literal["gpt-4.5-preview-2025-02-27"],
20
15
  typing.Literal["gpt-4o"],
21
16
  typing.Literal["gpt-4o-2024-11-20"],
22
17
  typing.Literal["gpt-4o-2024-08-06"],
@@ -26,6 +21,10 @@ CompletionCreateParamsStreamingModel = typing.Union[
26
21
  typing.Literal["gpt-4o-audio-preview-2024-12-17"],
27
22
  typing.Literal["gpt-4o-mini-audio-preview"],
28
23
  typing.Literal["gpt-4o-mini-audio-preview-2024-12-17"],
24
+ typing.Literal["gpt-4o-search-preview"],
25
+ typing.Literal["gpt-4o-mini-search-preview"],
26
+ typing.Literal["gpt-4o-search-preview-2025-03-11"],
27
+ typing.Literal["gpt-4o-mini-search-preview-2025-03-11"],
29
28
  typing.Literal["chatgpt-4o-latest"],
30
29
  typing.Literal["gpt-4o-mini"],
31
30
  typing.Literal["gpt-4o-mini-2024-07-18"],
@@ -9,7 +9,7 @@ import pydantic
9
9
  class FileFile(UncheckedBaseModel):
10
10
  file_data: typing.Optional[str] = None
11
11
  file_id: typing.Optional[str] = None
12
- file_name: typing.Optional[str] = None
12
+ filename: typing.Optional[str] = None
13
13
 
14
14
  if IS_PYDANTIC_V2:
15
15
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ import datetime as dt
5
+ import typing
6
+ from .hidden_reasoning_message_state import HiddenReasoningMessageState
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class HiddenReasoningMessage(UncheckedBaseModel):
12
+ """
13
+ Representation of an agent's internal reasoning where reasoning content
14
+ has been hidden from the response.
15
+
16
+ Args:
17
+ id (str): The ID of the message
18
+ date (datetime): The date the message was created in ISO format
19
+ name (Optional[str]): The name of the sender of the message
20
+ state (Literal["redacted", "omitted"]): Whether the reasoning
21
+ content was redacted by the provider or simply omitted by the API
22
+ hidden_reasoning (Optional[str]): The internal reasoning of the agent
23
+ """
24
+
25
+ id: str
26
+ date: dt.datetime
27
+ name: typing.Optional[str] = None
28
+ message_type: typing.Literal["hidden_reasoning_message"] = "hidden_reasoning_message"
29
+ state: HiddenReasoningMessageState
30
+ hidden_reasoning: typing.Optional[str] = None
31
+
32
+ if IS_PYDANTIC_V2:
33
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
34
+ else:
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ extra = pydantic.Extra.allow
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ HiddenReasoningMessageState = typing.Union[typing.Literal["redacted", "omitted"], typing.Any]
@@ -4,10 +4,17 @@ import typing
4
4
  from .system_message import SystemMessage
5
5
  from .user_message import UserMessage
6
6
  from .reasoning_message import ReasoningMessage
7
+ from .hidden_reasoning_message import HiddenReasoningMessage
7
8
  from .tool_call_message import ToolCallMessage
8
9
  from .tool_return_message import ToolReturnMessage
9
10
  from .assistant_message import AssistantMessage
10
11
 
11
12
  LettaMessageUnion = typing.Union[
12
- SystemMessage, UserMessage, ReasoningMessage, ToolCallMessage, ToolReturnMessage, AssistantMessage
13
+ SystemMessage,
14
+ UserMessage,
15
+ ReasoningMessage,
16
+ HiddenReasoningMessage,
17
+ ToolCallMessage,
18
+ ToolReturnMessage,
19
+ AssistantMessage,
13
20
  ]
@@ -67,6 +67,16 @@ class LlmConfig(UncheckedBaseModel):
67
67
  The maximum number of tokens to generate. If not set, the model will use its default value.
68
68
  """
69
69
 
70
+ enable_reasoner: typing.Optional[bool] = pydantic.Field(default=None)
71
+ """
72
+ Whether or not the model should use extended thinking if it is a 'reasoning' style model
73
+ """
74
+
75
+ max_reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
76
+ """
77
+ Configurable thinking budget for extended thinking, only used if enable_reasoner is True. Minimum value is 1024.
78
+ """
79
+
70
80
  if IS_PYDANTIC_V2:
71
81
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
72
82
  else:
@@ -23,6 +23,11 @@ class Organization(UncheckedBaseModel):
23
23
  The creation date of the organization.
24
24
  """
25
25
 
26
+ privileged_tools: typing.Optional[bool] = pydantic.Field(default=None)
27
+ """
28
+ Whether the organization has access to privileged tools.
29
+ """
30
+
26
31
  if IS_PYDANTIC_V2:
27
32
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
28
33
  else:
@@ -12,6 +12,11 @@ class OrganizationCreate(UncheckedBaseModel):
12
12
  The name of the organization.
13
13
  """
14
14
 
15
+ privileged_tools: typing.Optional[bool] = pydantic.Field(default=None)
16
+ """
17
+ Whether the organization has access to privileged tools.
18
+ """
19
+
15
20
  if IS_PYDANTIC_V2:
16
21
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
17
22
  else:
@@ -0,0 +1,27 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ import typing
5
+ import pydantic
6
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
7
+
8
+
9
+ class OrganizationUpdate(UncheckedBaseModel):
10
+ name: typing.Optional[str] = pydantic.Field(default=None)
11
+ """
12
+ The name of the organization.
13
+ """
14
+
15
+ privileged_tools: typing.Optional[bool] = pydantic.Field(default=None)
16
+ """
17
+ Whether the organization has access to privileged tools.
18
+ """
19
+
20
+ if IS_PYDANTIC_V2:
21
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
22
+ else:
23
+
24
+ class Config:
25
+ frozen = True
26
+ smart_union = True
27
+ extra = pydantic.Extra.allow
@@ -19,6 +19,7 @@ class ReasoningMessage(UncheckedBaseModel):
19
19
  source (Literal["reasoner_model", "non_reasoner_model"]): Whether the reasoning
20
20
  content was generated natively by a reasoner model or derived via prompting
21
21
  reasoning (str): The internal reasoning of the agent
22
+ signature (Optional[str]): The model-generated signature of the reasoning step
22
23
  """
23
24
 
24
25
  id: str
@@ -27,6 +28,7 @@ class ReasoningMessage(UncheckedBaseModel):
27
28
  message_type: typing.Literal["reasoning_message"] = "reasoning_message"
28
29
  source: typing.Optional[ReasoningMessageSource] = None
29
30
  reasoning: str
31
+ signature: typing.Optional[str] = None
30
32
 
31
33
  if IS_PYDANTIC_V2:
32
34
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,5 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import CreateVoiceChatCompletionsRequest
3
+ from .types import CreateVoiceChatCompletionsRequestBody
4
4
 
5
- __all__ = ["CreateVoiceChatCompletionsRequest"]
5
+ __all__ = ["CreateVoiceChatCompletionsRequestBody"]
@@ -2,7 +2,7 @@
2
2
 
3
3
  import typing
4
4
  from ..core.client_wrapper import SyncClientWrapper
5
- from .types.create_voice_chat_completions_request import CreateVoiceChatCompletionsRequest
5
+ from .types.create_voice_chat_completions_request_body import CreateVoiceChatCompletionsRequestBody
6
6
  from ..core.request_options import RequestOptions
7
7
  from ..core.jsonable_encoder import jsonable_encoder
8
8
  from ..core.serialization import convert_and_respect_annotation_metadata
@@ -25,7 +25,8 @@ class VoiceClient:
25
25
  self,
26
26
  agent_id: str,
27
27
  *,
28
- request: CreateVoiceChatCompletionsRequest,
28
+ request: CreateVoiceChatCompletionsRequestBody,
29
+ user_id: typing.Optional[str] = None,
29
30
  request_options: typing.Optional[RequestOptions] = None,
30
31
  ) -> typing.Optional[typing.Any]:
31
32
  """
@@ -33,7 +34,9 @@ class VoiceClient:
33
34
  ----------
34
35
  agent_id : str
35
36
 
36
- request : CreateVoiceChatCompletionsRequest
37
+ request : CreateVoiceChatCompletionsRequestBody
38
+
39
+ user_id : typing.Optional[str]
37
40
 
38
41
  request_options : typing.Optional[RequestOptions]
39
42
  Request-specific configuration.
@@ -70,8 +73,11 @@ class VoiceClient:
70
73
  f"v1/voice-beta/{jsonable_encoder(agent_id)}/chat/completions",
71
74
  method="POST",
72
75
  json=convert_and_respect_annotation_metadata(
73
- object_=request, annotation=CreateVoiceChatCompletionsRequest, direction="write"
76
+ object_=request, annotation=CreateVoiceChatCompletionsRequestBody, direction="write"
74
77
  ),
78
+ headers={
79
+ "user-id": str(user_id) if user_id is not None else None,
80
+ },
75
81
  request_options=request_options,
76
82
  omit=OMIT,
77
83
  )
@@ -108,7 +114,8 @@ class AsyncVoiceClient:
108
114
  self,
109
115
  agent_id: str,
110
116
  *,
111
- request: CreateVoiceChatCompletionsRequest,
117
+ request: CreateVoiceChatCompletionsRequestBody,
118
+ user_id: typing.Optional[str] = None,
112
119
  request_options: typing.Optional[RequestOptions] = None,
113
120
  ) -> typing.Optional[typing.Any]:
114
121
  """
@@ -116,7 +123,9 @@ class AsyncVoiceClient:
116
123
  ----------
117
124
  agent_id : str
118
125
 
119
- request : CreateVoiceChatCompletionsRequest
126
+ request : CreateVoiceChatCompletionsRequestBody
127
+
128
+ user_id : typing.Optional[str]
120
129
 
121
130
  request_options : typing.Optional[RequestOptions]
122
131
  Request-specific configuration.
@@ -161,8 +170,11 @@ class AsyncVoiceClient:
161
170
  f"v1/voice-beta/{jsonable_encoder(agent_id)}/chat/completions",
162
171
  method="POST",
163
172
  json=convert_and_respect_annotation_metadata(
164
- object_=request, annotation=CreateVoiceChatCompletionsRequest, direction="write"
173
+ object_=request, annotation=CreateVoiceChatCompletionsRequestBody, direction="write"
165
174
  ),
175
+ headers={
176
+ "user-id": str(user_id) if user_id is not None else None,
177
+ },
166
178
  request_options=request_options,
167
179
  omit=OMIT,
168
180
  )
@@ -1,5 +1,5 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .create_voice_chat_completions_request import CreateVoiceChatCompletionsRequest
3
+ from .create_voice_chat_completions_request_body import CreateVoiceChatCompletionsRequestBody
4
4
 
5
- __all__ = ["CreateVoiceChatCompletionsRequest"]
5
+ __all__ = ["CreateVoiceChatCompletionsRequestBody"]
@@ -4,4 +4,6 @@ import typing
4
4
  from ...types.completion_create_params_non_streaming import CompletionCreateParamsNonStreaming
5
5
  from ...types.completion_create_params_streaming import CompletionCreateParamsStreaming
6
6
 
7
- CreateVoiceChatCompletionsRequest = typing.Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming]
7
+ CreateVoiceChatCompletionsRequestBody = typing.Union[
8
+ CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming
9
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.83
3
+ Version: 0.1.85
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,8 +1,8 @@
1
- letta_client/__init__.py,sha256=6C8DmUhpqCPjTd_KamRBrptGiL8NnkPGO-e70sfZpiA,65901
2
- letta_client/agents/__init__.py,sha256=5zJALonfv-KgVXgPFZjAlTSo-Fm7Fe3S7i3F8vccAvg,25764
1
+ letta_client/__init__.py,sha256=riWzZSKXFaVAqdWeMV2wHbuOnJjMJHgSLGoZKP0g7mc,66617
2
+ letta_client/agents/__init__.py,sha256=EZeH7kHAWnifaPd0MwY_sD3BCchrB29ZprJzqwKeTMM,26012
3
3
  letta_client/agents/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
4
4
  letta_client/agents/blocks/client.py,sha256=u5zvutxoH_DqfSLWhRtNSRBC9_ezQDx682cxkxDz3JA,23822
5
- letta_client/agents/client.py,sha256=ZUwxURBPy4jW4YgeZR_VijXQB2iUPadzQaSoG2-ZcX0,85152
5
+ letta_client/agents/client.py,sha256=noGlyMJyKLp7HvMogDK3QtxXl0KWic_X6pNsMy06apY,86754
6
6
  letta_client/agents/context/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
7
7
  letta_client/agents/context/client.py,sha256=GKKvoG4N_K8Biz9yDjeIHpFG0C8Cwc7tHmEX3pTL_9U,4815
8
8
  letta_client/agents/core_memory/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -16,7 +16,7 @@ letta_client/agents/messages/client.py,sha256=eFYRQUTubKq6N1CTebdLL_oT7LIkOyVEXi
16
16
  letta_client/agents/messages/types/__init__.py,sha256=Oc2j0oGOs96IEFf9xsJIkjBjoq3OMtse64YwWv3F9Io,335
17
17
  letta_client/agents/messages/types/letta_streaming_response.py,sha256=MdE2PxQ1x1AviakHXsWVcFv97a3RchzzzIiD77w4EC8,665
18
18
  letta_client/agents/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
19
- letta_client/agents/messages/types/messages_modify_response.py,sha256=f2eITUx-zQ4qzcYd1JPS_mFSqJw7xVsxX7GR7d2RYRI,552
19
+ letta_client/agents/messages/types/messages_modify_response.py,sha256=THyiUMxZyzVSp0kk1s0XOLW1LUass7mXcfFER1PTLyw,671
20
20
  letta_client/agents/passages/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
21
21
  letta_client/agents/passages/client.py,sha256=hWC-WHKU-0kwkn5ncPhxALL_wGLCu1JmLlmfDaAOVww,15586
22
22
  letta_client/agents/sources/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -28,7 +28,7 @@ letta_client/agents/templates/types/templates_create_response.py,sha256=kKjkyjv3
28
28
  letta_client/agents/templates/types/templates_migrate_response.py,sha256=7N4JtAaiao-LrNdi72K7XB01uXJVkczaKYIJIMf0QYs,577
29
29
  letta_client/agents/tools/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
30
30
  letta_client/agents/tools/client.py,sha256=xZMRZhG8mI_h8_QqgI4lXh3FieRCLeoPwdtB56GB-XU,12685
31
- letta_client/agents/types/__init__.py,sha256=vWZEiEPxWIefq0fkaRJUcdzu6hJOoGR_K_stM0v-iig,41009
31
+ letta_client/agents/types/__init__.py,sha256=hVfHOKotPmiacVAvJDd6zq1qjpXG6VPaI5OnBHYYljE,41420
32
32
  letta_client/agents/types/agents_search_request_search_item.py,sha256=9wZPvTP5ESFOhdF9YqdYwv4h_fEFF9TWbGtDO9xkrzA,494
33
33
  letta_client/agents/types/agents_search_request_search_item_field.py,sha256=06cbjgIRD2GL7Ck7ZYxLVNbrKP9HLHNOCi9DSPspATQ,692
34
34
  letta_client/agents/types/agents_search_request_search_item_one.py,sha256=ECWv-hDZen6AomM01zmRsOz0PlXVEwIwLHjid9yko9o,779
@@ -62,9 +62,11 @@ letta_client/agents/types/agents_search_response_agents_item_embedding_config_ha
62
62
  letta_client/agents/types/agents_search_response_agents_item_identity_ids.py,sha256=Me2QPiXqsJiSWcpmnAu-n6runXGk0OCTv2z-FSHx-iA,184
63
63
  letta_client/agents/types/agents_search_response_agents_item_last_updated_by_id.py,sha256=WpPW5-0-xegQTh_-7igpbuFr5wFeFM359kuBqbQDYRE,428
64
64
  letta_client/agents/types/agents_search_response_agents_item_last_updated_by_id_item.py,sha256=ewpvZ8ScpPBI1Vi7cWjTPQ1eeYBqU8BcsdmFwXR3fsM,172
65
- letta_client/agents/types/agents_search_response_agents_item_llm_config.py,sha256=QLL21-fKxgjgHcpAZR1VwyRj8G1i1j7mTx_U6qZ25d8,2226
65
+ letta_client/agents/types/agents_search_response_agents_item_llm_config.py,sha256=JOJ8U4d66KpbgFI68U0x2ylHv53hFFxyu4DitxBDtuE,2718
66
+ letta_client/agents/types/agents_search_response_agents_item_llm_config_enable_reasoner.py,sha256=6q8s7q-UnvHFiGvZp7TdXdw2x2Wgo5dBMKBG502J1dA,184
66
67
  letta_client/agents/types/agents_search_response_agents_item_llm_config_handle.py,sha256=OkFtdvvJ0GCcYZr8p7nHNbC9JTgUky1W8nYmo30WRpo,418
67
68
  letta_client/agents/types/agents_search_response_agents_item_llm_config_handle_item.py,sha256=Ykt36D4KDmb1X-t3R7EaECAHbDq8xQqTBU--lSo2g7g,172
69
+ letta_client/agents/types/agents_search_response_agents_item_llm_config_max_reasoning_tokens.py,sha256=VtcctcNE5cvTe08u9cZYOP6emQUKWDPcJKUhD0IRbQk,189
68
70
  letta_client/agents/types/agents_search_response_agents_item_llm_config_max_tokens.py,sha256=oS9ztkcyf_1lABiND482ROlh7ghOLhhNBLCe69tRkwc,442
69
71
  letta_client/agents/types/agents_search_response_agents_item_llm_config_max_tokens_item.py,sha256=mfB0NNoNIQJPbAv1VLjSKKc2F4UaeOuNkUMzkqay7xY,177
70
72
  letta_client/agents/types/agents_search_response_agents_item_llm_config_model_endpoint.py,sha256=ejEayxna8G74hNJxeVXNRfDnDaQsl81Fi_KoqNsfeCk,456
@@ -234,7 +236,7 @@ letta_client/blocks/client.py,sha256=LE9dsHaBxFLC3G035f0VpNDG7XKWRK8y9OXpeFCMvUw
234
236
  letta_client/client.py,sha256=k2mZqqEWciVmEQHgipjCK4kQILk74hpSqzcdNwdql9A,21212
235
237
  letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
236
238
  letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
237
- letta_client/core/client_wrapper.py,sha256=ahtzMKKVpae6lXJSin1QYu6XciJ0foI-HnQyFlG8c6Y,1997
239
+ letta_client/core/client_wrapper.py,sha256=FOxgN5Wq-Cq4fA_W3Bp9oXiC_gLMjhJgW2FIWukqxq0,1997
238
240
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
239
241
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
240
242
  letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -276,9 +278,9 @@ letta_client/steps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_p
276
278
  letta_client/steps/client.py,sha256=g4XUUtdKzkSiRkxJW6ACrYe8ySvJ_tUMGK4ag6QRZT4,11284
277
279
  letta_client/tag/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
278
280
  letta_client/tag/client.py,sha256=TBAotdb0e2_x2pANF4dOE1qmWY3GIgb7nOhvN7iZ3_4,5183
279
- letta_client/templates/__init__.py,sha256=mKuy259yyTkJiU4Vlrhcivwf5yGTdkeNBKj8W4doJZ8,28009
281
+ letta_client/templates/__init__.py,sha256=Bq3bRs6E-kRxhQK-mcuW_9Hg_WIH-Xus0N4bSv6WLec,28293
280
282
  letta_client/templates/client.py,sha256=k1eya9jpfKw8CwKP8U7qIhKVmpk1IBqPG-oeK8C6fOM,7188
281
- letta_client/templates/types/__init__.py,sha256=l5Q0UIzuIgvWxV5-f-BNE5JefHD8NrZt910a54qEERk,45863
283
+ letta_client/templates/types/__init__.py,sha256=y4mGN4JSA-aDZVXfCQRmnakS0LtrE67OVj8TRkODc8I,46330
282
284
  letta_client/templates/types/templates_create_agents_response.py,sha256=UNMZSUckqoug1sq-gqC7luO392eItxxy0NNdUL0CRfQ,725
283
285
  letta_client/templates/types/templates_create_agents_response_agents_item.py,sha256=yp0fKlo9CTbpETu9x5ahDEBDVV8eCSRGW47eFFZRfYc,5445
284
286
  letta_client/templates/types/templates_create_agents_response_agents_item_agent_type.py,sha256=667uXDfYpS48UZIiSpjdgY4xDcApSpW5viyc3T4eKjo,240
@@ -307,9 +309,11 @@ letta_client/templates/types/templates_create_agents_response_agents_item_embedd
307
309
  letta_client/templates/types/templates_create_agents_response_agents_item_identity_ids.py,sha256=-ss5Ifi4HdGrP0g5X8yPPv-V3EGltzmPz2oVec4HGCc,193
308
310
  letta_client/templates/types/templates_create_agents_response_agents_item_last_updated_by_id.py,sha256=h0ZU1lx4hDem3EkXpQfcZe0T6llCxudbWL4D0wzs8Gw,465
309
311
  letta_client/templates/types/templates_create_agents_response_agents_item_last_updated_by_id_item.py,sha256=Gm8rlP1HXMx0kf6yVTiHvn3ak1YfUQ-MrRs4njYg6ds,181
310
- letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py,sha256=yN1LYm0YhsSNEZvy8IaxMlTDQifhWCHELfyVipg2pUw,2447
312
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py,sha256=T2-zkGdLW3Ae74F-VlE6rTp6oDCqYY6qFoBqnOCPvo4,2995
313
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_enable_reasoner.py,sha256=VUCyPaiqPp9Qlw4GkgfuA8gLLqjkEWmLsJ2UL4WYHVA,193
311
314
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_handle.py,sha256=6iViMHSzqSIYklNasY3z5KxZUunTmdLGDBRHhdveq98,464
312
315
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_handle_item.py,sha256=SX6bP8RWlynXRdOjoqg3NLNZNOnpVIn4ZTtEQZmFFD0,181
316
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens.py,sha256=9zXTM6Mh0P5UrqHKQ-GP30ahClk9GD_vGEiTeTyXf_U,198
313
317
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_tokens.py,sha256=mk3l02cY3_xWUtUYr0y9i82QwwsODgidF4mNCv0tW6I,479
314
318
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_tokens_item.py,sha256=7FWlQX8vYiYuiZlFz74EQTPGUKr9m0IWbu1eKPE95Vw,186
315
319
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_model_endpoint.py,sha256=ksCAK3zWAtSUDcuk6QZeb4cX824hnvaS5koJzOFliQU,493
@@ -472,13 +476,13 @@ letta_client/templates/types/templates_create_agents_response_agents_item_tools_
472
476
  letta_client/templates/types/templates_create_agents_response_agents_item_updated_at.py,sha256=Md7WfCTT1_AGvyd24EeWzUibPvnrun9rhyxqCLeAURg,439
473
477
  letta_client/templates/types/templates_create_agents_response_agents_item_updated_at_item.py,sha256=T3rYnv5m_cBAEPBnEjUkkHJLYtFZfXNMbb7a9FrIwKY,175
474
478
  letta_client/tools/__init__.py,sha256=XsuAkxHDA-Z98gLNNW_fiEwFP3fP4XQipflrK2bHl8k,353
475
- letta_client/tools/client.py,sha256=y5WTB5Wzh5gv1IWaSraU6pHYck7RBZlCnQW157OFjuw,77574
479
+ letta_client/tools/client.py,sha256=aJqW1sNecrsjBAs6eFubMo2Up0u3lJxpafo1mkj2fnQ,78344
476
480
  letta_client/tools/types/__init__.py,sha256=R11LYBi6lxkud_DRyaHFUHtlnbfnEI93-SEo7FL4tzs,478
477
481
  letta_client/tools/types/add_mcp_server_request.py,sha256=EieZjfOT95sjkpxXdqy7glpxF4J4J3fm6tlaHFnYk84,265
478
482
  letta_client/tools/types/add_mcp_server_response_item.py,sha256=TWdsKqGb1INhYtpGnAckz0Pw4nZShumSp4pfocRfxCA,270
479
483
  letta_client/tools/types/delete_mcp_server_response_item.py,sha256=MeZObU-7tMSCd-S5yuUjNDse6A1hUz1LLjbko0pXaro,273
480
484
  letta_client/tools/types/list_mcp_servers_response_value.py,sha256=AIoXu4bO8QNSU7zjL1jj0Rg4313wVtPaTt13W0aevLQ,273
481
- letta_client/types/__init__.py,sha256=p5lV9jyVA6byEwICd0TH7OqYcJOvzFa8xyv_22qr9-I,18551
485
+ letta_client/types/__init__.py,sha256=7gQk1HcKIdNShFr6J2RTVb0zzuBYFAHc_T0yTdgSZ74,18827
482
486
  letta_client/types/action_model.py,sha256=y1e2XMv3skFaNJIBdYoBKgiORzGh05aOVvu-qVR9uHg,1240
483
487
  letta_client/types/action_parameters_model.py,sha256=LgKf5aPZG3-OHGxFdXiSokIDgce8c02xPYIAY05VgW8,828
484
488
  letta_client/types/action_response_model.py,sha256=yq2Fd9UU8j7vvtE3VqXUoRRvDzWcfJPj_95ynGdeHCs,824
@@ -531,7 +535,7 @@ letta_client/types/completion_create_params_non_streaming.py,sha256=hjEJ-wJWuFuT
531
535
  letta_client/types/completion_create_params_non_streaming_function_call.py,sha256=6iCjgXwsXnflllhfDDKtHRyxzKqtLcX6-HVr7AXlyUM,329
532
536
  letta_client/types/completion_create_params_non_streaming_messages_item.py,sha256=pKMxLh1XFgMl7LqcjKJmdeKYTCwlr3FLFPTuvaLf3D0,883
533
537
  letta_client/types/completion_create_params_non_streaming_modalities_item.py,sha256=BuyCf2nTCWVhishXFk3CsQphnPwNXj-kBdPMjkb8X10,189
534
- letta_client/types/completion_create_params_non_streaming_model.py,sha256=bWeGbuTRstRZEW3AiHwZIeHFnsLWGRI5wfrpWxv91ko,1966
538
+ letta_client/types/completion_create_params_non_streaming_model.py,sha256=RhKFYjt4pgTgHfoihS1VdfIDjIlR1KCvPVMDIBY6UbY,1935
535
539
  letta_client/types/completion_create_params_non_streaming_reasoning_effort.py,sha256=f1hBX3qksGoGC6O2W5qHblCQXtoZiEhiN8LUy1Rv9Ig,198
536
540
  letta_client/types/completion_create_params_non_streaming_response_format.py,sha256=c16kBch59yhxAgMeFTxGNrEBNl4Vu3fPmZ2RqqS6bkU,407
537
541
  letta_client/types/completion_create_params_non_streaming_service_tier.py,sha256=Tfw62WLF3WSHWZy8VOVXal1INDQNtZhoB8DSA0btJ0g,188
@@ -541,7 +545,7 @@ letta_client/types/completion_create_params_streaming.py,sha256=sgazDkBKpQTk2Ntr
541
545
  letta_client/types/completion_create_params_streaming_function_call.py,sha256=cxsVe0wAIKPAsndL5vB_BCTy6oSxFph7qB1c1LWmeDw,326
542
546
  letta_client/types/completion_create_params_streaming_messages_item.py,sha256=S4E0fe3LgVyetb2PEqhGNxqMj5kgQx4q6Qk2bvvu2Ok,880
543
547
  letta_client/types/completion_create_params_streaming_modalities_item.py,sha256=o9ZU7r22WrE6z-BSJ72LJXHtVRIpK499WArVgY-ODgI,186
544
- letta_client/types/completion_create_params_streaming_model.py,sha256=ESDrTKo0396c3uH7-bxu9Xu7--e0RnBLmuVUN_0R3ho,1963
548
+ letta_client/types/completion_create_params_streaming_model.py,sha256=f80smBsCDdtc7oGKFz4sx8h_wnj_Ls4tyvjZeHdrkwc,1932
545
549
  letta_client/types/completion_create_params_streaming_reasoning_effort.py,sha256=4-JFyaD92zia-kN7bPyCWwf_AMDnG2xUXWx8GQU1EFE,195
546
550
  letta_client/types/completion_create_params_streaming_response_format.py,sha256=31sy6fKZ4r50zvjVTnoOpwNX81Bx7kFM75Mn7-obbYI,404
547
551
  letta_client/types/completion_create_params_streaming_service_tier.py,sha256=chHakgbKOYCMtxdtGmP85rcjGkyOqt2S_JJ9SabSd-o,185
@@ -559,7 +563,7 @@ letta_client/types/e_2_b_sandbox_config.py,sha256=w3R4QpPjeie5aKw8sb_eKhl78J0k5v
559
563
  letta_client/types/embedding_config.py,sha256=ubGDLn8_H1qOoZUUj6de0MVrQnM2umVR2vdnOolPyr4,2539
560
564
  letta_client/types/embedding_config_embedding_endpoint_type.py,sha256=Ho1HSODi21PkzsZR58g7FlIMReFU2yf0hAS5OyUsW6Q,559
561
565
  letta_client/types/file.py,sha256=ZLCEYJqIJ1pzAJn4Pke6gVdKivKU9FrIg98P4GmFY8M,628
562
- letta_client/types/file_file.py,sha256=5hunDKL7BFz4jvXp9X2oF_YH50Veg1G19fBOQBVcuCQ,666
566
+ letta_client/types/file_file.py,sha256=jbWcPKn-fSUlq9kl8n2us9fPU6x-Z20IKScHD_pJruw,665
563
567
  letta_client/types/file_metadata.py,sha256=vORZH5WZO8AwAuKq0h0W9TTuydjmDlkZC6YyZMy2jbc,1973
564
568
  letta_client/types/function_call.py,sha256=eE6VYWK3A-2xRrIV-QKqrofvaVFcPNqSzl6lrWnopZA,576
565
569
  letta_client/types/function_definition_input.py,sha256=UpoD7ftRpHquJ5zhy28TjXPBVzxj7rOHKv3gX84Nfj8,740
@@ -570,6 +574,8 @@ letta_client/types/group.py,sha256=Y8iaGI08uSt99AE0GkiGV95YB5ywiAZOWd0jXojgwqU,1
570
574
  letta_client/types/group_create.py,sha256=TPYqXPVtriwhTVwHm_MRDNrKyl2mlice7Q22vVbOHwg,857
571
575
  letta_client/types/group_create_manager_config.py,sha256=4NhJEsVCuLBfHD1lbT7xKAgfKWycrwXV7W_u7ifjh1E,319
572
576
  letta_client/types/health.py,sha256=nQwx5ysn_cJMKUoqsfaPcGNSRSjfwX5S272UiSQJ03w,618
577
+ letta_client/types/hidden_reasoning_message.py,sha256=2ExD6XKtWsMQQQCiZcyAGr-Tzgk-i3L663lT3p778pc,1447
578
+ letta_client/types/hidden_reasoning_message_state.py,sha256=qotAgF_P4T7OEHzbhGDVFaLZYOs1ULMPVHmiFvoRIfM,174
573
579
  letta_client/types/http_validation_error.py,sha256=yHa4_NHIMB-VKNZpk7agjLTwWIg7mv7ml3d7I-Bqiog,661
574
580
  letta_client/types/identity.py,sha256=ODegiJaCyiFFfYd177v-hRdJBnIwbCQImB9U_fk4s4E,1591
575
581
  letta_client/types/identity_create.py,sha256=QuYCfc7dL5aHQqRIt6SlOV00bWyeMouxpKiY3Wx10o0,1538
@@ -588,13 +594,13 @@ letta_client/types/job_status.py,sha256=0Gu5Tku79SDVzCxnjVXQyDPNCizGWUP1ppohAck6
588
594
  letta_client/types/job_type.py,sha256=Roa04Ry0I-8YMYcDHiHSQwqBavZyPonzkZtjf098e-Q,145
589
595
  letta_client/types/json_schema.py,sha256=EHcLKBSGRsSzCKTpujKFHylcLJG6ODQIBrjQkU4lWDQ,870
590
596
  letta_client/types/letta_message_content_union.py,sha256=YxzyXKxUMeqbqWOlDs9LC8HUiqEhgkNCV9a76GS3spg,486
591
- letta_client/types/letta_message_union.py,sha256=FM4Zippr5fJ05AZ2aZRFlqp348xNgLbzVOcrnyNfytI,493
597
+ letta_client/types/letta_message_union.py,sha256=TTQwlur2CZNdZ466Nb_2TFcSFXrgoMliaNzD33t7Ktw,603
592
598
  letta_client/types/letta_request.py,sha256=bCPDRJhSJSo5eILJp0mTw_k26O3dZL1vChfAcaZ0rE8,1240
593
599
  letta_client/types/letta_request_config.py,sha256=b6K4QtDdHjcZKfBb1fugUuoPrT2N4d5TTB0PIRNI2SU,1085
594
600
  letta_client/types/letta_response.py,sha256=i5gAUTgWzIst_RP8I_zSh0GSnLIS3z--1BmK6EF1mkQ,1315
595
601
  letta_client/types/letta_streaming_request.py,sha256=jm0HLzfzWzIRs8uwtX33V5f5Ljw_hFOKOhPjdIZX9cA,1465
596
602
  letta_client/types/letta_usage_statistics.py,sha256=pdlEk_GYVTiDUgW0ZePOdyrJZ6zoSCGEgm_gM3B1wr8,1721
597
- letta_client/types/llm_config.py,sha256=B-LJpzPB5RNSPG-cag65yTIWc0mbD7iKg77N6ejPL64,3045
603
+ letta_client/types/llm_config.py,sha256=cycdnu-lgQsLsFmFQrc9S_O20snEdxRLcvwWwLMFnik,3441
598
604
  letta_client/types/llm_config_model_endpoint_type.py,sha256=HOSM5kIZDCNAVCWmASvAk52K819plqGlD66yKQ1xFkI,620
599
605
  letta_client/types/local_sandbox_config.py,sha256=jfe7akG_YrJJ8csLaLdev04Zg1x-PTN0XCAL4KifaZI,1387
600
606
  letta_client/types/manager_type.py,sha256=hV271989JpEhJQH02MzLpJ34EsbGnyMlckbz2TXBc-E,184
@@ -615,15 +621,16 @@ letta_client/types/omitted_reasoning_content.py,sha256=TL6zor7HxJ_oIYzvdAAdrgR_P
615
621
  letta_client/types/openai_types_chat_chat_completion_message_tool_call_param_function.py,sha256=glG5tG6g2uxP4R5jwsChkf3F0sb208uEbR-25dnrTiM,621
616
622
  letta_client/types/openai_types_chat_chat_completion_named_tool_choice_param_function.py,sha256=20aPdyj3_-cD_p33yZ0ca3IbU9Apq1UrnxCSaU6OgYg,602
617
623
  letta_client/types/openai_types_chat_completion_create_params_function.py,sha256=oTjYqRv8z6SMSdFgTl4W9oI-QUQxz8Unf4yn90sByss,721
618
- letta_client/types/organization.py,sha256=vSXwqYTpxGZgpMTv8rw5jzklZnUYjS6yBTkEFNPNSrU,927
619
- letta_client/types/organization_create.py,sha256=xlF1FgDRa7zpv49kVGWYchcSEUjPEsjF5_m2xHWb9VM,661
624
+ letta_client/types/organization.py,sha256=w8D3x4fEWwXBpDKZkR7C6CjXx2WBSM-ltrslscw8HzM,1080
625
+ letta_client/types/organization_create.py,sha256=qi37VLCejxTYpJe9gC1slhENIdz6B4DchD3ZdVJciT4,814
626
+ letta_client/types/organization_update.py,sha256=uCQAcWm8az3VbMtCEidPBZLh6Qyo4Z0FQco1Hdrk4LY,814
620
627
  letta_client/types/parameter_properties.py,sha256=KVQGp_csoiNzyf9XsL083fwlX_a2Tc8GsCKyWB323C8,609
621
628
  letta_client/types/parameters_schema.py,sha256=ptXcwjuaCwqRhfizeiWAsu3pqT87Jcj_P3YaEkL4asM,748
622
629
  letta_client/types/passage.py,sha256=1OM19TyVCQEL1P3BC58hmzWfawZM4vejiKr0P11dOUk,3034
623
630
  letta_client/types/pip_requirement.py,sha256=Hmh7VpJhdSfFkafh6QwAehCp0MQUBXv1YAoYP-2wV2M,773
624
631
  letta_client/types/provider.py,sha256=RvdE9dzGFJ4hcmyvk2xeO7RNpxQvXhB_S9DNy8t_z-E,1053
625
632
  letta_client/types/reasoning_content.py,sha256=aId-87QjQ4sm_fuCmzIdZZghr-9DFeVV-Lv9x5iVw3I,995
626
- letta_client/types/reasoning_message.py,sha256=HbSYz0TbnGsFb1MELz0oCDMVC2dg5mY9jdmn3KCeFm0,1354
633
+ letta_client/types/reasoning_message.py,sha256=hlD4UCaCIJjSmhgJTUpHzO_WAkK9B6ilFaN1Xbhh-ok,1484
627
634
  letta_client/types/reasoning_message_source.py,sha256=GYOWGm2mje1yYbR8E2kbAeQS--VDrGlpsobEBQHE2cU,186
628
635
  letta_client/types/redacted_reasoning_content.py,sha256=ROAcdqOjM-kaw23HrVJrh0a49TRYuijanHDaCqcMErM,735
629
636
  letta_client/types/response_format_json_object.py,sha256=ZSWmwdN8itFr5q77mxuBhEWRBh2CubAonJUCi88UjbA,611
@@ -685,10 +692,10 @@ letta_client/types/web_search_options_search_context_size.py,sha256=RgJGV4rkuaCT
685
692
  letta_client/types/web_search_options_user_location.py,sha256=4aXfFcwUBu7YNA5XBjfhmD6tgRb0e8LTFexmn-rkDfw,770
686
693
  letta_client/types/web_search_options_user_location_approximate.py,sha256=Ywk01J9H67L6_498E5E6ceJ2VbJUfcLiIJWD_s92_M0,731
687
694
  letta_client/version.py,sha256=bttKLbIhO3UonCYQlqs600zzbQgfhCCMjeXR9WRzid4,79
688
- letta_client/voice/__init__.py,sha256=ZrZEuXIukVGhsfM-i0dIFfqjeSOBMPeEgDva7VvnipE,167
689
- letta_client/voice/client.py,sha256=2KKJiteGk5HQM79ne1jOPl_ZyUTfZM_gXNdZZ_ndPU8,6485
690
- letta_client/voice/types/__init__.py,sha256=hBLJcrom99DkDxxsVRU2ni8kPx6SsCy8gtAJvNOz26w,199
691
- letta_client/voice/types/create_voice_chat_completions_request.py,sha256=K4__83rXRCshfdobyAmH-5fUDJQ_PeSQetTUeC4Abk0,381
692
- letta_client-0.1.83.dist-info/METADATA,sha256=6xzEtVHpF91M2AWDWjYp1TcD2kbdvh1K_TguJpxb9_8,5041
693
- letta_client-0.1.83.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
694
- letta_client-0.1.83.dist-info/RECORD,,
695
+ letta_client/voice/__init__.py,sha256=7hX85553PiRMtIMM12a0DSoFzsglNiUziYR2ekS84Qw,175
696
+ letta_client/voice/client.py,sha256=STjswa5oOLoP59QwTJvQwi73kgn0UzKOaXc2CsTRI4k,6912
697
+ letta_client/voice/types/__init__.py,sha256=FRc3iKRTONE4N8Lf1IqvnqWZ2kXdrFFvkL7PxVcR8Ew,212
698
+ letta_client/voice/types/create_voice_chat_completions_request_body.py,sha256=ZLfKgNK1T6IAwLEvaBVFfy7jEAoPUXP28n-nfmHkklc,391
699
+ letta_client-0.1.85.dist-info/METADATA,sha256=VJ54060_zlBDtPw1l4RNyNV4dyj62UmknQ154kPup9U,5041
700
+ letta_client-0.1.85.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
701
+ letta_client-0.1.85.dist-info/RECORD,,