letta-client 0.1.84__py3-none-any.whl → 0.1.85__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

Files changed (23) hide show
  1. letta_client/__init__.py +12 -0
  2. letta_client/agents/__init__.py +4 -0
  3. letta_client/agents/client.py +30 -0
  4. letta_client/agents/messages/types/messages_modify_response.py +8 -1
  5. letta_client/agents/types/__init__.py +8 -0
  6. letta_client/agents/types/agents_search_response_agents_item_llm_config.py +8 -0
  7. letta_client/agents/types/agents_search_response_agents_item_llm_config_enable_reasoner.py +5 -0
  8. letta_client/agents/types/agents_search_response_agents_item_llm_config_max_reasoning_tokens.py +5 -0
  9. letta_client/core/client_wrapper.py +1 -1
  10. letta_client/templates/__init__.py +4 -0
  11. letta_client/templates/types/__init__.py +8 -0
  12. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py +8 -0
  13. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_enable_reasoner.py +5 -0
  14. letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens.py +5 -0
  15. letta_client/types/__init__.py +4 -0
  16. letta_client/types/hidden_reasoning_message.py +39 -0
  17. letta_client/types/hidden_reasoning_message_state.py +5 -0
  18. letta_client/types/letta_message_union.py +8 -1
  19. letta_client/types/llm_config.py +10 -0
  20. letta_client/types/reasoning_message.py +2 -0
  21. {letta_client-0.1.84.dist-info → letta_client-0.1.85.dist-info}/METADATA +1 -1
  22. {letta_client-0.1.84.dist-info → letta_client-0.1.85.dist-info}/RECORD +23 -17
  23. {letta_client-0.1.84.dist-info → letta_client-0.1.85.dist-info}/WHEEL +0 -0
letta_client/__init__.py CHANGED
@@ -92,6 +92,8 @@ from .types import (
92
92
  GroupCreate,
93
93
  GroupCreateManagerConfig,
94
94
  Health,
95
+ HiddenReasoningMessage,
96
+ HiddenReasoningMessageState,
95
97
  HttpValidationError,
96
98
  Identity,
97
99
  IdentityCreate,
@@ -261,8 +263,10 @@ from .agents import (
261
263
  AgentsSearchResponseAgentsItemLastUpdatedById,
262
264
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
263
265
  AgentsSearchResponseAgentsItemLlmConfig,
266
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
264
267
  AgentsSearchResponseAgentsItemLlmConfigHandle,
265
268
  AgentsSearchResponseAgentsItemLlmConfigHandleItem,
269
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
266
270
  AgentsSearchResponseAgentsItemLlmConfigMaxTokens,
267
271
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
268
272
  AgentsSearchResponseAgentsItemLlmConfigModelEndpoint,
@@ -459,8 +463,10 @@ from .templates import (
459
463
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedById,
460
464
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
461
465
  TemplatesCreateAgentsResponseAgentsItemLlmConfig,
466
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
462
467
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
463
468
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
469
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
464
470
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
465
471
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem,
466
472
  TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint,
@@ -677,8 +683,10 @@ __all__ = [
677
683
  "AgentsSearchResponseAgentsItemLastUpdatedById",
678
684
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
679
685
  "AgentsSearchResponseAgentsItemLlmConfig",
686
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
680
687
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
681
688
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
689
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
682
690
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
683
691
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
684
692
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -928,6 +936,8 @@ __all__ = [
928
936
  "GroupCreate",
929
937
  "GroupCreateManagerConfig",
930
938
  "Health",
939
+ "HiddenReasoningMessage",
940
+ "HiddenReasoningMessageState",
931
941
  "HttpValidationError",
932
942
  "Identity",
933
943
  "IdentityCreate",
@@ -1040,8 +1050,10 @@ __all__ = [
1040
1050
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
1041
1051
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
1042
1052
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
1053
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
1043
1054
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
1044
1055
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
1056
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
1045
1057
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
1046
1058
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
1047
1059
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -35,8 +35,10 @@ from .types import (
35
35
  AgentsSearchResponseAgentsItemLastUpdatedById,
36
36
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
37
37
  AgentsSearchResponseAgentsItemLlmConfig,
38
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
38
39
  AgentsSearchResponseAgentsItemLlmConfigHandle,
39
40
  AgentsSearchResponseAgentsItemLlmConfigHandleItem,
41
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
40
42
  AgentsSearchResponseAgentsItemLlmConfigMaxTokens,
41
43
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
42
44
  AgentsSearchResponseAgentsItemLlmConfigModelEndpoint,
@@ -241,8 +243,10 @@ __all__ = [
241
243
  "AgentsSearchResponseAgentsItemLastUpdatedById",
242
244
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
243
245
  "AgentsSearchResponseAgentsItemLlmConfig",
246
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
244
247
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
245
248
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
249
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
246
250
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
247
251
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
248
252
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -218,6 +218,9 @@ class AgentsClient:
218
218
  embedding: typing.Optional[str] = OMIT,
219
219
  context_window_limit: typing.Optional[int] = OMIT,
220
220
  embedding_chunk_size: typing.Optional[int] = OMIT,
221
+ max_tokens: typing.Optional[int] = OMIT,
222
+ max_reasoning_tokens: typing.Optional[int] = OMIT,
223
+ enable_reasoner: typing.Optional[bool] = OMIT,
221
224
  from_template: typing.Optional[str] = OMIT,
222
225
  template: typing.Optional[bool] = OMIT,
223
226
  create_agent_request_project: typing.Optional[str] = OMIT,
@@ -303,6 +306,15 @@ class AgentsClient:
303
306
  embedding_chunk_size : typing.Optional[int]
304
307
  The embedding chunk size used by the agent.
305
308
 
309
+ max_tokens : typing.Optional[int]
310
+ The maximum number of tokens to generate, including reasoning step. If not set, the model will use its default value.
311
+
312
+ max_reasoning_tokens : typing.Optional[int]
313
+ The maximum number of tokens to generate for reasoning step. If not set, the model will use its default value.
314
+
315
+ enable_reasoner : typing.Optional[bool]
316
+ Whether to enable internal extended thinking step for a reasoner model.
317
+
306
318
  from_template : typing.Optional[str]
307
319
  The template id used to configure the agent
308
320
 
@@ -386,6 +398,9 @@ class AgentsClient:
386
398
  "embedding": embedding,
387
399
  "context_window_limit": context_window_limit,
388
400
  "embedding_chunk_size": embedding_chunk_size,
401
+ "max_tokens": max_tokens,
402
+ "max_reasoning_tokens": max_reasoning_tokens,
403
+ "enable_reasoner": enable_reasoner,
389
404
  "from_template": from_template,
390
405
  "template": template,
391
406
  "project": create_agent_request_project,
@@ -1307,6 +1322,9 @@ class AsyncAgentsClient:
1307
1322
  embedding: typing.Optional[str] = OMIT,
1308
1323
  context_window_limit: typing.Optional[int] = OMIT,
1309
1324
  embedding_chunk_size: typing.Optional[int] = OMIT,
1325
+ max_tokens: typing.Optional[int] = OMIT,
1326
+ max_reasoning_tokens: typing.Optional[int] = OMIT,
1327
+ enable_reasoner: typing.Optional[bool] = OMIT,
1310
1328
  from_template: typing.Optional[str] = OMIT,
1311
1329
  template: typing.Optional[bool] = OMIT,
1312
1330
  create_agent_request_project: typing.Optional[str] = OMIT,
@@ -1392,6 +1410,15 @@ class AsyncAgentsClient:
1392
1410
  embedding_chunk_size : typing.Optional[int]
1393
1411
  The embedding chunk size used by the agent.
1394
1412
 
1413
+ max_tokens : typing.Optional[int]
1414
+ The maximum number of tokens to generate, including reasoning step. If not set, the model will use its default value.
1415
+
1416
+ max_reasoning_tokens : typing.Optional[int]
1417
+ The maximum number of tokens to generate for reasoning step. If not set, the model will use its default value.
1418
+
1419
+ enable_reasoner : typing.Optional[bool]
1420
+ Whether to enable internal extended thinking step for a reasoner model.
1421
+
1395
1422
  from_template : typing.Optional[str]
1396
1423
  The template id used to configure the agent
1397
1424
 
@@ -1483,6 +1510,9 @@ class AsyncAgentsClient:
1483
1510
  "embedding": embedding,
1484
1511
  "context_window_limit": context_window_limit,
1485
1512
  "embedding_chunk_size": embedding_chunk_size,
1513
+ "max_tokens": max_tokens,
1514
+ "max_reasoning_tokens": max_reasoning_tokens,
1515
+ "enable_reasoner": enable_reasoner,
1486
1516
  "from_template": from_template,
1487
1517
  "template": template,
1488
1518
  "project": create_agent_request_project,
@@ -4,10 +4,17 @@ import typing
4
4
  from ....types.system_message import SystemMessage
5
5
  from ....types.user_message import UserMessage
6
6
  from ....types.reasoning_message import ReasoningMessage
7
+ from ....types.hidden_reasoning_message import HiddenReasoningMessage
7
8
  from ....types.tool_call_message import ToolCallMessage
8
9
  from ....types.tool_return_message import ToolReturnMessage
9
10
  from ....types.assistant_message import AssistantMessage
10
11
 
11
12
  MessagesModifyResponse = typing.Union[
12
- SystemMessage, UserMessage, ReasoningMessage, ToolCallMessage, ToolReturnMessage, AssistantMessage
13
+ SystemMessage,
14
+ UserMessage,
15
+ ReasoningMessage,
16
+ HiddenReasoningMessage,
17
+ ToolCallMessage,
18
+ ToolReturnMessage,
19
+ AssistantMessage,
13
20
  ]
@@ -62,8 +62,14 @@ from .agents_search_response_agents_item_last_updated_by_id_item import (
62
62
  AgentsSearchResponseAgentsItemLastUpdatedByIdItem,
63
63
  )
64
64
  from .agents_search_response_agents_item_llm_config import AgentsSearchResponseAgentsItemLlmConfig
65
+ from .agents_search_response_agents_item_llm_config_enable_reasoner import (
66
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
67
+ )
65
68
  from .agents_search_response_agents_item_llm_config_handle import AgentsSearchResponseAgentsItemLlmConfigHandle
66
69
  from .agents_search_response_agents_item_llm_config_handle_item import AgentsSearchResponseAgentsItemLlmConfigHandleItem
70
+ from .agents_search_response_agents_item_llm_config_max_reasoning_tokens import (
71
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
72
+ )
67
73
  from .agents_search_response_agents_item_llm_config_max_tokens import AgentsSearchResponseAgentsItemLlmConfigMaxTokens
68
74
  from .agents_search_response_agents_item_llm_config_max_tokens_item import (
69
75
  AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem,
@@ -515,8 +521,10 @@ __all__ = [
515
521
  "AgentsSearchResponseAgentsItemLastUpdatedById",
516
522
  "AgentsSearchResponseAgentsItemLastUpdatedByIdItem",
517
523
  "AgentsSearchResponseAgentsItemLlmConfig",
524
+ "AgentsSearchResponseAgentsItemLlmConfigEnableReasoner",
518
525
  "AgentsSearchResponseAgentsItemLlmConfigHandle",
519
526
  "AgentsSearchResponseAgentsItemLlmConfigHandleItem",
527
+ "AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens",
520
528
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokens",
521
529
  "AgentsSearchResponseAgentsItemLlmConfigMaxTokensItem",
522
530
  "AgentsSearchResponseAgentsItemLlmConfigModelEndpoint",
@@ -19,6 +19,12 @@ from .agents_search_response_agents_item_llm_config_temperature import (
19
19
  AgentsSearchResponseAgentsItemLlmConfigTemperature,
20
20
  )
21
21
  from .agents_search_response_agents_item_llm_config_max_tokens import AgentsSearchResponseAgentsItemLlmConfigMaxTokens
22
+ from .agents_search_response_agents_item_llm_config_enable_reasoner import (
23
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner,
24
+ )
25
+ from .agents_search_response_agents_item_llm_config_max_reasoning_tokens import (
26
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens,
27
+ )
22
28
  from ...core.pydantic_utilities import IS_PYDANTIC_V2
23
29
  import pydantic
24
30
 
@@ -35,6 +41,8 @@ class AgentsSearchResponseAgentsItemLlmConfig(UncheckedBaseModel):
35
41
  handle: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigHandle] = None
36
42
  temperature: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigTemperature] = None
37
43
  max_tokens: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigMaxTokens] = None
44
+ enable_reasoner: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigEnableReasoner] = None
45
+ max_reasoning_tokens: typing.Optional[AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens] = None
38
46
 
39
47
  if IS_PYDANTIC_V2:
40
48
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ AgentsSearchResponseAgentsItemLlmConfigEnableReasoner = typing.Union[bool, typing.Optional[typing.Any]]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ AgentsSearchResponseAgentsItemLlmConfigMaxReasoningTokens = typing.Union[float, typing.Optional[typing.Any]]
@@ -16,7 +16,7 @@ class BaseClientWrapper:
16
16
  headers: typing.Dict[str, str] = {
17
17
  "X-Fern-Language": "Python",
18
18
  "X-Fern-SDK-Name": "letta-client",
19
- "X-Fern-SDK-Version": "0.1.84",
19
+ "X-Fern-SDK-Version": "0.1.85",
20
20
  }
21
21
  if self.token is not None:
22
22
  headers["Authorization"] = f"Bearer {self.token}"
@@ -30,8 +30,10 @@ from .types import (
30
30
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedById,
31
31
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
32
32
  TemplatesCreateAgentsResponseAgentsItemLlmConfig,
33
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
33
34
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
34
35
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
36
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
35
37
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
36
38
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem,
37
39
  TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint,
@@ -225,8 +227,10 @@ __all__ = [
225
227
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
226
228
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
227
229
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
230
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
228
231
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
229
232
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
233
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
230
234
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
231
235
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
232
236
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -75,12 +75,18 @@ from .templates_create_agents_response_agents_item_last_updated_by_id_item impor
75
75
  TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem,
76
76
  )
77
77
  from .templates_create_agents_response_agents_item_llm_config import TemplatesCreateAgentsResponseAgentsItemLlmConfig
78
+ from .templates_create_agents_response_agents_item_llm_config_enable_reasoner import (
79
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
80
+ )
78
81
  from .templates_create_agents_response_agents_item_llm_config_handle import (
79
82
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle,
80
83
  )
81
84
  from .templates_create_agents_response_agents_item_llm_config_handle_item import (
82
85
  TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem,
83
86
  )
87
+ from .templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens import (
88
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
89
+ )
84
90
  from .templates_create_agents_response_agents_item_llm_config_max_tokens import (
85
91
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
86
92
  )
@@ -579,8 +585,10 @@ __all__ = [
579
585
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedById",
580
586
  "TemplatesCreateAgentsResponseAgentsItemLastUpdatedByIdItem",
581
587
  "TemplatesCreateAgentsResponseAgentsItemLlmConfig",
588
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner",
582
589
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle",
583
590
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigHandleItem",
591
+ "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens",
584
592
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens",
585
593
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokensItem",
586
594
  "TemplatesCreateAgentsResponseAgentsItemLlmConfigModelEndpoint",
@@ -23,6 +23,12 @@ from .templates_create_agents_response_agents_item_llm_config_temperature import
23
23
  from .templates_create_agents_response_agents_item_llm_config_max_tokens import (
24
24
  TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens,
25
25
  )
26
+ from .templates_create_agents_response_agents_item_llm_config_enable_reasoner import (
27
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner,
28
+ )
29
+ from .templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens import (
30
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens,
31
+ )
26
32
  from ...core.pydantic_utilities import IS_PYDANTIC_V2
27
33
  import pydantic
28
34
 
@@ -39,6 +45,8 @@ class TemplatesCreateAgentsResponseAgentsItemLlmConfig(UncheckedBaseModel):
39
45
  handle: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigHandle] = None
40
46
  temperature: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigTemperature] = None
41
47
  max_tokens: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxTokens] = None
48
+ enable_reasoner: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner] = None
49
+ max_reasoning_tokens: typing.Optional[TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens] = None
42
50
 
43
51
  if IS_PYDANTIC_V2:
44
52
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigEnableReasoner = typing.Union[bool, typing.Optional[typing.Any]]
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ TemplatesCreateAgentsResponseAgentsItemLlmConfigMaxReasoningTokens = typing.Union[float, typing.Optional[typing.Any]]
@@ -91,6 +91,8 @@ from .group import Group
91
91
  from .group_create import GroupCreate
92
92
  from .group_create_manager_config import GroupCreateManagerConfig
93
93
  from .health import Health
94
+ from .hidden_reasoning_message import HiddenReasoningMessage
95
+ from .hidden_reasoning_message_state import HiddenReasoningMessageState
94
96
  from .http_validation_error import HttpValidationError
95
97
  from .identity import Identity
96
98
  from .identity_create import IdentityCreate
@@ -303,6 +305,8 @@ __all__ = [
303
305
  "GroupCreate",
304
306
  "GroupCreateManagerConfig",
305
307
  "Health",
308
+ "HiddenReasoningMessage",
309
+ "HiddenReasoningMessageState",
306
310
  "HttpValidationError",
307
311
  "Identity",
308
312
  "IdentityCreate",
@@ -0,0 +1,39 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.unchecked_base_model import UncheckedBaseModel
4
+ import datetime as dt
5
+ import typing
6
+ from .hidden_reasoning_message_state import HiddenReasoningMessageState
7
+ from ..core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class HiddenReasoningMessage(UncheckedBaseModel):
12
+ """
13
+ Representation of an agent's internal reasoning where reasoning content
14
+ has been hidden from the response.
15
+
16
+ Args:
17
+ id (str): The ID of the message
18
+ date (datetime): The date the message was created in ISO format
19
+ name (Optional[str]): The name of the sender of the message
20
+ state (Literal["redacted", "omitted"]): Whether the reasoning
21
+ content was redacted by the provider or simply omitted by the API
22
+ hidden_reasoning (Optional[str]): The internal reasoning of the agent
23
+ """
24
+
25
+ id: str
26
+ date: dt.datetime
27
+ name: typing.Optional[str] = None
28
+ message_type: typing.Literal["hidden_reasoning_message"] = "hidden_reasoning_message"
29
+ state: HiddenReasoningMessageState
30
+ hidden_reasoning: typing.Optional[str] = None
31
+
32
+ if IS_PYDANTIC_V2:
33
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
34
+ else:
35
+
36
+ class Config:
37
+ frozen = True
38
+ smart_union = True
39
+ extra = pydantic.Extra.allow
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ HiddenReasoningMessageState = typing.Union[typing.Literal["redacted", "omitted"], typing.Any]
@@ -4,10 +4,17 @@ import typing
4
4
  from .system_message import SystemMessage
5
5
  from .user_message import UserMessage
6
6
  from .reasoning_message import ReasoningMessage
7
+ from .hidden_reasoning_message import HiddenReasoningMessage
7
8
  from .tool_call_message import ToolCallMessage
8
9
  from .tool_return_message import ToolReturnMessage
9
10
  from .assistant_message import AssistantMessage
10
11
 
11
12
  LettaMessageUnion = typing.Union[
12
- SystemMessage, UserMessage, ReasoningMessage, ToolCallMessage, ToolReturnMessage, AssistantMessage
13
+ SystemMessage,
14
+ UserMessage,
15
+ ReasoningMessage,
16
+ HiddenReasoningMessage,
17
+ ToolCallMessage,
18
+ ToolReturnMessage,
19
+ AssistantMessage,
13
20
  ]
@@ -67,6 +67,16 @@ class LlmConfig(UncheckedBaseModel):
67
67
  The maximum number of tokens to generate. If not set, the model will use its default value.
68
68
  """
69
69
 
70
+ enable_reasoner: typing.Optional[bool] = pydantic.Field(default=None)
71
+ """
72
+ Whether or not the model should use extended thinking if it is a 'reasoning' style model
73
+ """
74
+
75
+ max_reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
76
+ """
77
+ Configurable thinking budget for extended thinking, only used if enable_reasoner is True. Minimum value is 1024.
78
+ """
79
+
70
80
  if IS_PYDANTIC_V2:
71
81
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
72
82
  else:
@@ -19,6 +19,7 @@ class ReasoningMessage(UncheckedBaseModel):
19
19
  source (Literal["reasoner_model", "non_reasoner_model"]): Whether the reasoning
20
20
  content was generated natively by a reasoner model or derived via prompting
21
21
  reasoning (str): The internal reasoning of the agent
22
+ signature (Optional[str]): The model-generated signature of the reasoning step
22
23
  """
23
24
 
24
25
  id: str
@@ -27,6 +28,7 @@ class ReasoningMessage(UncheckedBaseModel):
27
28
  message_type: typing.Literal["reasoning_message"] = "reasoning_message"
28
29
  source: typing.Optional[ReasoningMessageSource] = None
29
30
  reasoning: str
31
+ signature: typing.Optional[str] = None
30
32
 
31
33
  if IS_PYDANTIC_V2:
32
34
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.84
3
+ Version: 0.1.85
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,8 +1,8 @@
1
- letta_client/__init__.py,sha256=bMJ2QVwB5uaFZFCplyqr74M69gu7egtuZwIs7LOoQUA,65959
2
- letta_client/agents/__init__.py,sha256=5zJALonfv-KgVXgPFZjAlTSo-Fm7Fe3S7i3F8vccAvg,25764
1
+ letta_client/__init__.py,sha256=riWzZSKXFaVAqdWeMV2wHbuOnJjMJHgSLGoZKP0g7mc,66617
2
+ letta_client/agents/__init__.py,sha256=EZeH7kHAWnifaPd0MwY_sD3BCchrB29ZprJzqwKeTMM,26012
3
3
  letta_client/agents/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
4
4
  letta_client/agents/blocks/client.py,sha256=u5zvutxoH_DqfSLWhRtNSRBC9_ezQDx682cxkxDz3JA,23822
5
- letta_client/agents/client.py,sha256=ZUwxURBPy4jW4YgeZR_VijXQB2iUPadzQaSoG2-ZcX0,85152
5
+ letta_client/agents/client.py,sha256=noGlyMJyKLp7HvMogDK3QtxXl0KWic_X6pNsMy06apY,86754
6
6
  letta_client/agents/context/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
7
7
  letta_client/agents/context/client.py,sha256=GKKvoG4N_K8Biz9yDjeIHpFG0C8Cwc7tHmEX3pTL_9U,4815
8
8
  letta_client/agents/core_memory/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -16,7 +16,7 @@ letta_client/agents/messages/client.py,sha256=eFYRQUTubKq6N1CTebdLL_oT7LIkOyVEXi
16
16
  letta_client/agents/messages/types/__init__.py,sha256=Oc2j0oGOs96IEFf9xsJIkjBjoq3OMtse64YwWv3F9Io,335
17
17
  letta_client/agents/messages/types/letta_streaming_response.py,sha256=MdE2PxQ1x1AviakHXsWVcFv97a3RchzzzIiD77w4EC8,665
18
18
  letta_client/agents/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
19
- letta_client/agents/messages/types/messages_modify_response.py,sha256=f2eITUx-zQ4qzcYd1JPS_mFSqJw7xVsxX7GR7d2RYRI,552
19
+ letta_client/agents/messages/types/messages_modify_response.py,sha256=THyiUMxZyzVSp0kk1s0XOLW1LUass7mXcfFER1PTLyw,671
20
20
  letta_client/agents/passages/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
21
21
  letta_client/agents/passages/client.py,sha256=hWC-WHKU-0kwkn5ncPhxALL_wGLCu1JmLlmfDaAOVww,15586
22
22
  letta_client/agents/sources/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -28,7 +28,7 @@ letta_client/agents/templates/types/templates_create_response.py,sha256=kKjkyjv3
28
28
  letta_client/agents/templates/types/templates_migrate_response.py,sha256=7N4JtAaiao-LrNdi72K7XB01uXJVkczaKYIJIMf0QYs,577
29
29
  letta_client/agents/tools/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
30
30
  letta_client/agents/tools/client.py,sha256=xZMRZhG8mI_h8_QqgI4lXh3FieRCLeoPwdtB56GB-XU,12685
31
- letta_client/agents/types/__init__.py,sha256=vWZEiEPxWIefq0fkaRJUcdzu6hJOoGR_K_stM0v-iig,41009
31
+ letta_client/agents/types/__init__.py,sha256=hVfHOKotPmiacVAvJDd6zq1qjpXG6VPaI5OnBHYYljE,41420
32
32
  letta_client/agents/types/agents_search_request_search_item.py,sha256=9wZPvTP5ESFOhdF9YqdYwv4h_fEFF9TWbGtDO9xkrzA,494
33
33
  letta_client/agents/types/agents_search_request_search_item_field.py,sha256=06cbjgIRD2GL7Ck7ZYxLVNbrKP9HLHNOCi9DSPspATQ,692
34
34
  letta_client/agents/types/agents_search_request_search_item_one.py,sha256=ECWv-hDZen6AomM01zmRsOz0PlXVEwIwLHjid9yko9o,779
@@ -62,9 +62,11 @@ letta_client/agents/types/agents_search_response_agents_item_embedding_config_ha
62
62
  letta_client/agents/types/agents_search_response_agents_item_identity_ids.py,sha256=Me2QPiXqsJiSWcpmnAu-n6runXGk0OCTv2z-FSHx-iA,184
63
63
  letta_client/agents/types/agents_search_response_agents_item_last_updated_by_id.py,sha256=WpPW5-0-xegQTh_-7igpbuFr5wFeFM359kuBqbQDYRE,428
64
64
  letta_client/agents/types/agents_search_response_agents_item_last_updated_by_id_item.py,sha256=ewpvZ8ScpPBI1Vi7cWjTPQ1eeYBqU8BcsdmFwXR3fsM,172
65
- letta_client/agents/types/agents_search_response_agents_item_llm_config.py,sha256=QLL21-fKxgjgHcpAZR1VwyRj8G1i1j7mTx_U6qZ25d8,2226
65
+ letta_client/agents/types/agents_search_response_agents_item_llm_config.py,sha256=JOJ8U4d66KpbgFI68U0x2ylHv53hFFxyu4DitxBDtuE,2718
66
+ letta_client/agents/types/agents_search_response_agents_item_llm_config_enable_reasoner.py,sha256=6q8s7q-UnvHFiGvZp7TdXdw2x2Wgo5dBMKBG502J1dA,184
66
67
  letta_client/agents/types/agents_search_response_agents_item_llm_config_handle.py,sha256=OkFtdvvJ0GCcYZr8p7nHNbC9JTgUky1W8nYmo30WRpo,418
67
68
  letta_client/agents/types/agents_search_response_agents_item_llm_config_handle_item.py,sha256=Ykt36D4KDmb1X-t3R7EaECAHbDq8xQqTBU--lSo2g7g,172
69
+ letta_client/agents/types/agents_search_response_agents_item_llm_config_max_reasoning_tokens.py,sha256=VtcctcNE5cvTe08u9cZYOP6emQUKWDPcJKUhD0IRbQk,189
68
70
  letta_client/agents/types/agents_search_response_agents_item_llm_config_max_tokens.py,sha256=oS9ztkcyf_1lABiND482ROlh7ghOLhhNBLCe69tRkwc,442
69
71
  letta_client/agents/types/agents_search_response_agents_item_llm_config_max_tokens_item.py,sha256=mfB0NNoNIQJPbAv1VLjSKKc2F4UaeOuNkUMzkqay7xY,177
70
72
  letta_client/agents/types/agents_search_response_agents_item_llm_config_model_endpoint.py,sha256=ejEayxna8G74hNJxeVXNRfDnDaQsl81Fi_KoqNsfeCk,456
@@ -234,7 +236,7 @@ letta_client/blocks/client.py,sha256=LE9dsHaBxFLC3G035f0VpNDG7XKWRK8y9OXpeFCMvUw
234
236
  letta_client/client.py,sha256=k2mZqqEWciVmEQHgipjCK4kQILk74hpSqzcdNwdql9A,21212
235
237
  letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
236
238
  letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
237
- letta_client/core/client_wrapper.py,sha256=zDW4PmX_uTU1Hjf-_FeeEad1PsKrSZMe6_xtiVes198,1997
239
+ letta_client/core/client_wrapper.py,sha256=FOxgN5Wq-Cq4fA_W3Bp9oXiC_gLMjhJgW2FIWukqxq0,1997
238
240
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
239
241
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
240
242
  letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -276,9 +278,9 @@ letta_client/steps/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_p
276
278
  letta_client/steps/client.py,sha256=g4XUUtdKzkSiRkxJW6ACrYe8ySvJ_tUMGK4ag6QRZT4,11284
277
279
  letta_client/tag/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
278
280
  letta_client/tag/client.py,sha256=TBAotdb0e2_x2pANF4dOE1qmWY3GIgb7nOhvN7iZ3_4,5183
279
- letta_client/templates/__init__.py,sha256=mKuy259yyTkJiU4Vlrhcivwf5yGTdkeNBKj8W4doJZ8,28009
281
+ letta_client/templates/__init__.py,sha256=Bq3bRs6E-kRxhQK-mcuW_9Hg_WIH-Xus0N4bSv6WLec,28293
280
282
  letta_client/templates/client.py,sha256=k1eya9jpfKw8CwKP8U7qIhKVmpk1IBqPG-oeK8C6fOM,7188
281
- letta_client/templates/types/__init__.py,sha256=l5Q0UIzuIgvWxV5-f-BNE5JefHD8NrZt910a54qEERk,45863
283
+ letta_client/templates/types/__init__.py,sha256=y4mGN4JSA-aDZVXfCQRmnakS0LtrE67OVj8TRkODc8I,46330
282
284
  letta_client/templates/types/templates_create_agents_response.py,sha256=UNMZSUckqoug1sq-gqC7luO392eItxxy0NNdUL0CRfQ,725
283
285
  letta_client/templates/types/templates_create_agents_response_agents_item.py,sha256=yp0fKlo9CTbpETu9x5ahDEBDVV8eCSRGW47eFFZRfYc,5445
284
286
  letta_client/templates/types/templates_create_agents_response_agents_item_agent_type.py,sha256=667uXDfYpS48UZIiSpjdgY4xDcApSpW5viyc3T4eKjo,240
@@ -307,9 +309,11 @@ letta_client/templates/types/templates_create_agents_response_agents_item_embedd
307
309
  letta_client/templates/types/templates_create_agents_response_agents_item_identity_ids.py,sha256=-ss5Ifi4HdGrP0g5X8yPPv-V3EGltzmPz2oVec4HGCc,193
308
310
  letta_client/templates/types/templates_create_agents_response_agents_item_last_updated_by_id.py,sha256=h0ZU1lx4hDem3EkXpQfcZe0T6llCxudbWL4D0wzs8Gw,465
309
311
  letta_client/templates/types/templates_create_agents_response_agents_item_last_updated_by_id_item.py,sha256=Gm8rlP1HXMx0kf6yVTiHvn3ak1YfUQ-MrRs4njYg6ds,181
310
- letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py,sha256=yN1LYm0YhsSNEZvy8IaxMlTDQifhWCHELfyVipg2pUw,2447
312
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config.py,sha256=T2-zkGdLW3Ae74F-VlE6rTp6oDCqYY6qFoBqnOCPvo4,2995
313
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_enable_reasoner.py,sha256=VUCyPaiqPp9Qlw4GkgfuA8gLLqjkEWmLsJ2UL4WYHVA,193
311
314
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_handle.py,sha256=6iViMHSzqSIYklNasY3z5KxZUunTmdLGDBRHhdveq98,464
312
315
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_handle_item.py,sha256=SX6bP8RWlynXRdOjoqg3NLNZNOnpVIn4ZTtEQZmFFD0,181
316
+ letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_reasoning_tokens.py,sha256=9zXTM6Mh0P5UrqHKQ-GP30ahClk9GD_vGEiTeTyXf_U,198
313
317
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_tokens.py,sha256=mk3l02cY3_xWUtUYr0y9i82QwwsODgidF4mNCv0tW6I,479
314
318
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_max_tokens_item.py,sha256=7FWlQX8vYiYuiZlFz74EQTPGUKr9m0IWbu1eKPE95Vw,186
315
319
  letta_client/templates/types/templates_create_agents_response_agents_item_llm_config_model_endpoint.py,sha256=ksCAK3zWAtSUDcuk6QZeb4cX824hnvaS5koJzOFliQU,493
@@ -478,7 +482,7 @@ letta_client/tools/types/add_mcp_server_request.py,sha256=EieZjfOT95sjkpxXdqy7gl
478
482
  letta_client/tools/types/add_mcp_server_response_item.py,sha256=TWdsKqGb1INhYtpGnAckz0Pw4nZShumSp4pfocRfxCA,270
479
483
  letta_client/tools/types/delete_mcp_server_response_item.py,sha256=MeZObU-7tMSCd-S5yuUjNDse6A1hUz1LLjbko0pXaro,273
480
484
  letta_client/tools/types/list_mcp_servers_response_value.py,sha256=AIoXu4bO8QNSU7zjL1jj0Rg4313wVtPaTt13W0aevLQ,273
481
- letta_client/types/__init__.py,sha256=jPtbBLUzunOPB2nW-wVHYIi-PiYBOvb1EeCO5i7ulUE,18629
485
+ letta_client/types/__init__.py,sha256=7gQk1HcKIdNShFr6J2RTVb0zzuBYFAHc_T0yTdgSZ74,18827
482
486
  letta_client/types/action_model.py,sha256=y1e2XMv3skFaNJIBdYoBKgiORzGh05aOVvu-qVR9uHg,1240
483
487
  letta_client/types/action_parameters_model.py,sha256=LgKf5aPZG3-OHGxFdXiSokIDgce8c02xPYIAY05VgW8,828
484
488
  letta_client/types/action_response_model.py,sha256=yq2Fd9UU8j7vvtE3VqXUoRRvDzWcfJPj_95ynGdeHCs,824
@@ -570,6 +574,8 @@ letta_client/types/group.py,sha256=Y8iaGI08uSt99AE0GkiGV95YB5ywiAZOWd0jXojgwqU,1
570
574
  letta_client/types/group_create.py,sha256=TPYqXPVtriwhTVwHm_MRDNrKyl2mlice7Q22vVbOHwg,857
571
575
  letta_client/types/group_create_manager_config.py,sha256=4NhJEsVCuLBfHD1lbT7xKAgfKWycrwXV7W_u7ifjh1E,319
572
576
  letta_client/types/health.py,sha256=nQwx5ysn_cJMKUoqsfaPcGNSRSjfwX5S272UiSQJ03w,618
577
+ letta_client/types/hidden_reasoning_message.py,sha256=2ExD6XKtWsMQQQCiZcyAGr-Tzgk-i3L663lT3p778pc,1447
578
+ letta_client/types/hidden_reasoning_message_state.py,sha256=qotAgF_P4T7OEHzbhGDVFaLZYOs1ULMPVHmiFvoRIfM,174
573
579
  letta_client/types/http_validation_error.py,sha256=yHa4_NHIMB-VKNZpk7agjLTwWIg7mv7ml3d7I-Bqiog,661
574
580
  letta_client/types/identity.py,sha256=ODegiJaCyiFFfYd177v-hRdJBnIwbCQImB9U_fk4s4E,1591
575
581
  letta_client/types/identity_create.py,sha256=QuYCfc7dL5aHQqRIt6SlOV00bWyeMouxpKiY3Wx10o0,1538
@@ -588,13 +594,13 @@ letta_client/types/job_status.py,sha256=0Gu5Tku79SDVzCxnjVXQyDPNCizGWUP1ppohAck6
588
594
  letta_client/types/job_type.py,sha256=Roa04Ry0I-8YMYcDHiHSQwqBavZyPonzkZtjf098e-Q,145
589
595
  letta_client/types/json_schema.py,sha256=EHcLKBSGRsSzCKTpujKFHylcLJG6ODQIBrjQkU4lWDQ,870
590
596
  letta_client/types/letta_message_content_union.py,sha256=YxzyXKxUMeqbqWOlDs9LC8HUiqEhgkNCV9a76GS3spg,486
591
- letta_client/types/letta_message_union.py,sha256=FM4Zippr5fJ05AZ2aZRFlqp348xNgLbzVOcrnyNfytI,493
597
+ letta_client/types/letta_message_union.py,sha256=TTQwlur2CZNdZ466Nb_2TFcSFXrgoMliaNzD33t7Ktw,603
592
598
  letta_client/types/letta_request.py,sha256=bCPDRJhSJSo5eILJp0mTw_k26O3dZL1vChfAcaZ0rE8,1240
593
599
  letta_client/types/letta_request_config.py,sha256=b6K4QtDdHjcZKfBb1fugUuoPrT2N4d5TTB0PIRNI2SU,1085
594
600
  letta_client/types/letta_response.py,sha256=i5gAUTgWzIst_RP8I_zSh0GSnLIS3z--1BmK6EF1mkQ,1315
595
601
  letta_client/types/letta_streaming_request.py,sha256=jm0HLzfzWzIRs8uwtX33V5f5Ljw_hFOKOhPjdIZX9cA,1465
596
602
  letta_client/types/letta_usage_statistics.py,sha256=pdlEk_GYVTiDUgW0ZePOdyrJZ6zoSCGEgm_gM3B1wr8,1721
597
- letta_client/types/llm_config.py,sha256=B-LJpzPB5RNSPG-cag65yTIWc0mbD7iKg77N6ejPL64,3045
603
+ letta_client/types/llm_config.py,sha256=cycdnu-lgQsLsFmFQrc9S_O20snEdxRLcvwWwLMFnik,3441
598
604
  letta_client/types/llm_config_model_endpoint_type.py,sha256=HOSM5kIZDCNAVCWmASvAk52K819plqGlD66yKQ1xFkI,620
599
605
  letta_client/types/local_sandbox_config.py,sha256=jfe7akG_YrJJ8csLaLdev04Zg1x-PTN0XCAL4KifaZI,1387
600
606
  letta_client/types/manager_type.py,sha256=hV271989JpEhJQH02MzLpJ34EsbGnyMlckbz2TXBc-E,184
@@ -624,7 +630,7 @@ letta_client/types/passage.py,sha256=1OM19TyVCQEL1P3BC58hmzWfawZM4vejiKr0P11dOUk
624
630
  letta_client/types/pip_requirement.py,sha256=Hmh7VpJhdSfFkafh6QwAehCp0MQUBXv1YAoYP-2wV2M,773
625
631
  letta_client/types/provider.py,sha256=RvdE9dzGFJ4hcmyvk2xeO7RNpxQvXhB_S9DNy8t_z-E,1053
626
632
  letta_client/types/reasoning_content.py,sha256=aId-87QjQ4sm_fuCmzIdZZghr-9DFeVV-Lv9x5iVw3I,995
627
- letta_client/types/reasoning_message.py,sha256=HbSYz0TbnGsFb1MELz0oCDMVC2dg5mY9jdmn3KCeFm0,1354
633
+ letta_client/types/reasoning_message.py,sha256=hlD4UCaCIJjSmhgJTUpHzO_WAkK9B6ilFaN1Xbhh-ok,1484
628
634
  letta_client/types/reasoning_message_source.py,sha256=GYOWGm2mje1yYbR8E2kbAeQS--VDrGlpsobEBQHE2cU,186
629
635
  letta_client/types/redacted_reasoning_content.py,sha256=ROAcdqOjM-kaw23HrVJrh0a49TRYuijanHDaCqcMErM,735
630
636
  letta_client/types/response_format_json_object.py,sha256=ZSWmwdN8itFr5q77mxuBhEWRBh2CubAonJUCi88UjbA,611
@@ -690,6 +696,6 @@ letta_client/voice/__init__.py,sha256=7hX85553PiRMtIMM12a0DSoFzsglNiUziYR2ekS84Q
690
696
  letta_client/voice/client.py,sha256=STjswa5oOLoP59QwTJvQwi73kgn0UzKOaXc2CsTRI4k,6912
691
697
  letta_client/voice/types/__init__.py,sha256=FRc3iKRTONE4N8Lf1IqvnqWZ2kXdrFFvkL7PxVcR8Ew,212
692
698
  letta_client/voice/types/create_voice_chat_completions_request_body.py,sha256=ZLfKgNK1T6IAwLEvaBVFfy7jEAoPUXP28n-nfmHkklc,391
693
- letta_client-0.1.84.dist-info/METADATA,sha256=2oxhbs4fOa6Mc7303-xWZKJ7s_LHZK19DKT9PkLBTK4,5041
694
- letta_client-0.1.84.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
695
- letta_client-0.1.84.dist-info/RECORD,,
699
+ letta_client-0.1.85.dist-info/METADATA,sha256=VJ54060_zlBDtPw1l4RNyNV4dyj62UmknQ154kPup9U,5041
700
+ letta_client-0.1.85.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
701
+ letta_client-0.1.85.dist-info/RECORD,,