letta-client 0.1.224__py3-none-any.whl → 0.1.225__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

@@ -146,6 +146,7 @@ class MessagesClient:
146
146
  assistant_message_tool_name: typing.Optional[str] = OMIT,
147
147
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
148
148
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
149
+ enable_thinking: typing.Optional[str] = OMIT,
149
150
  request_options: typing.Optional[RequestOptions] = None,
150
151
  ) -> LettaResponse:
151
152
  """
@@ -174,6 +175,9 @@ class MessagesClient:
174
175
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
175
176
  Only return specified message types in the response. If `None` (default) returns all messages.
176
177
 
178
+ enable_thinking : typing.Optional[str]
179
+ If set to True, enables reasoning before responses or tool calls from the agent.
180
+
177
181
  request_options : typing.Optional[RequestOptions]
178
182
  Request-specific configuration.
179
183
 
@@ -216,6 +220,7 @@ class MessagesClient:
216
220
  "assistant_message_tool_name": assistant_message_tool_name,
217
221
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
218
222
  "include_return_message_types": include_return_message_types,
223
+ "enable_thinking": enable_thinking,
219
224
  },
220
225
  request_options=request_options,
221
226
  omit=OMIT,
@@ -330,6 +335,7 @@ class MessagesClient:
330
335
  assistant_message_tool_name: typing.Optional[str] = OMIT,
331
336
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
332
337
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
338
+ enable_thinking: typing.Optional[str] = OMIT,
333
339
  stream_tokens: typing.Optional[bool] = OMIT,
334
340
  request_options: typing.Optional[RequestOptions] = None,
335
341
  ) -> typing.Iterator[LettaStreamingResponse]:
@@ -360,6 +366,9 @@ class MessagesClient:
360
366
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
361
367
  Only return specified message types in the response. If `None` (default) returns all messages.
362
368
 
369
+ enable_thinking : typing.Optional[str]
370
+ If set to True, enables reasoning before responses or tool calls from the agent.
371
+
363
372
  stream_tokens : typing.Optional[bool]
364
373
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
365
374
 
@@ -407,6 +416,7 @@ class MessagesClient:
407
416
  "assistant_message_tool_name": assistant_message_tool_name,
408
417
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
409
418
  "include_return_message_types": include_return_message_types,
419
+ "enable_thinking": enable_thinking,
410
420
  "stream_tokens": stream_tokens,
411
421
  },
412
422
  request_options=request_options,
@@ -522,6 +532,7 @@ class MessagesClient:
522
532
  assistant_message_tool_name: typing.Optional[str] = OMIT,
523
533
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
524
534
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
535
+ enable_thinking: typing.Optional[str] = OMIT,
525
536
  callback_url: typing.Optional[str] = OMIT,
526
537
  request_options: typing.Optional[RequestOptions] = None,
527
538
  ) -> Run:
@@ -554,6 +565,9 @@ class MessagesClient:
554
565
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
555
566
  Only return specified message types in the response. If `None` (default) returns all messages.
556
567
 
568
+ enable_thinking : typing.Optional[str]
569
+ If set to True, enables reasoning before responses or tool calls from the agent.
570
+
557
571
  callback_url : typing.Optional[str]
558
572
  Optional callback URL to POST to when the job completes
559
573
 
@@ -599,6 +613,7 @@ class MessagesClient:
599
613
  "assistant_message_tool_name": assistant_message_tool_name,
600
614
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
601
615
  "include_return_message_types": include_return_message_types,
616
+ "enable_thinking": enable_thinking,
602
617
  "callback_url": callback_url,
603
618
  },
604
619
  headers={
@@ -912,6 +927,7 @@ class AsyncMessagesClient:
912
927
  assistant_message_tool_name: typing.Optional[str] = OMIT,
913
928
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
914
929
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
930
+ enable_thinking: typing.Optional[str] = OMIT,
915
931
  request_options: typing.Optional[RequestOptions] = None,
916
932
  ) -> LettaResponse:
917
933
  """
@@ -940,6 +956,9 @@ class AsyncMessagesClient:
940
956
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
941
957
  Only return specified message types in the response. If `None` (default) returns all messages.
942
958
 
959
+ enable_thinking : typing.Optional[str]
960
+ If set to True, enables reasoning before responses or tool calls from the agent.
961
+
943
962
  request_options : typing.Optional[RequestOptions]
944
963
  Request-specific configuration.
945
964
 
@@ -990,6 +1009,7 @@ class AsyncMessagesClient:
990
1009
  "assistant_message_tool_name": assistant_message_tool_name,
991
1010
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
992
1011
  "include_return_message_types": include_return_message_types,
1012
+ "enable_thinking": enable_thinking,
993
1013
  },
994
1014
  request_options=request_options,
995
1015
  omit=OMIT,
@@ -1112,6 +1132,7 @@ class AsyncMessagesClient:
1112
1132
  assistant_message_tool_name: typing.Optional[str] = OMIT,
1113
1133
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
1114
1134
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
1135
+ enable_thinking: typing.Optional[str] = OMIT,
1115
1136
  stream_tokens: typing.Optional[bool] = OMIT,
1116
1137
  request_options: typing.Optional[RequestOptions] = None,
1117
1138
  ) -> typing.AsyncIterator[LettaStreamingResponse]:
@@ -1142,6 +1163,9 @@ class AsyncMessagesClient:
1142
1163
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
1143
1164
  Only return specified message types in the response. If `None` (default) returns all messages.
1144
1165
 
1166
+ enable_thinking : typing.Optional[str]
1167
+ If set to True, enables reasoning before responses or tool calls from the agent.
1168
+
1145
1169
  stream_tokens : typing.Optional[bool]
1146
1170
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
1147
1171
 
@@ -1197,6 +1221,7 @@ class AsyncMessagesClient:
1197
1221
  "assistant_message_tool_name": assistant_message_tool_name,
1198
1222
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
1199
1223
  "include_return_message_types": include_return_message_types,
1224
+ "enable_thinking": enable_thinking,
1200
1225
  "stream_tokens": stream_tokens,
1201
1226
  },
1202
1227
  request_options=request_options,
@@ -1320,6 +1345,7 @@ class AsyncMessagesClient:
1320
1345
  assistant_message_tool_name: typing.Optional[str] = OMIT,
1321
1346
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
1322
1347
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
1348
+ enable_thinking: typing.Optional[str] = OMIT,
1323
1349
  callback_url: typing.Optional[str] = OMIT,
1324
1350
  request_options: typing.Optional[RequestOptions] = None,
1325
1351
  ) -> Run:
@@ -1352,6 +1378,9 @@ class AsyncMessagesClient:
1352
1378
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
1353
1379
  Only return specified message types in the response. If `None` (default) returns all messages.
1354
1380
 
1381
+ enable_thinking : typing.Optional[str]
1382
+ If set to True, enables reasoning before responses or tool calls from the agent.
1383
+
1355
1384
  callback_url : typing.Optional[str]
1356
1385
  Optional callback URL to POST to when the job completes
1357
1386
 
@@ -1405,6 +1434,7 @@ class AsyncMessagesClient:
1405
1434
  "assistant_message_tool_name": assistant_message_tool_name,
1406
1435
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
1407
1436
  "include_return_message_types": include_return_message_types,
1437
+ "enable_thinking": enable_thinking,
1408
1438
  "callback_url": callback_url,
1409
1439
  },
1410
1440
  headers={
@@ -24,7 +24,7 @@ class BaseClientWrapper:
24
24
  headers: typing.Dict[str, str] = {
25
25
  "X-Fern-Language": "Python",
26
26
  "X-Fern-SDK-Name": "letta-client",
27
- "X-Fern-SDK-Version": "0.1.224",
27
+ "X-Fern-SDK-Version": "0.1.225",
28
28
  }
29
29
  if self._project is not None:
30
30
  headers["X-Project"] = self._project
@@ -133,6 +133,7 @@ class MessagesClient:
133
133
  assistant_message_tool_name: typing.Optional[str] = OMIT,
134
134
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
135
135
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
136
+ enable_thinking: typing.Optional[str] = OMIT,
136
137
  request_options: typing.Optional[RequestOptions] = None,
137
138
  ) -> LettaResponse:
138
139
  """
@@ -161,6 +162,9 @@ class MessagesClient:
161
162
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
162
163
  Only return specified message types in the response. If `None` (default) returns all messages.
163
164
 
165
+ enable_thinking : typing.Optional[str]
166
+ If set to True, enables reasoning before responses or tool calls from the agent.
167
+
164
168
  request_options : typing.Optional[RequestOptions]
165
169
  Request-specific configuration.
166
170
 
@@ -203,6 +207,7 @@ class MessagesClient:
203
207
  "assistant_message_tool_name": assistant_message_tool_name,
204
208
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
205
209
  "include_return_message_types": include_return_message_types,
210
+ "enable_thinking": enable_thinking,
206
211
  },
207
212
  request_options=request_options,
208
213
  omit=OMIT,
@@ -241,6 +246,7 @@ class MessagesClient:
241
246
  assistant_message_tool_name: typing.Optional[str] = OMIT,
242
247
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
243
248
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
249
+ enable_thinking: typing.Optional[str] = OMIT,
244
250
  stream_tokens: typing.Optional[bool] = OMIT,
245
251
  request_options: typing.Optional[RequestOptions] = None,
246
252
  ) -> typing.Iterator[LettaStreamingResponse]:
@@ -271,6 +277,9 @@ class MessagesClient:
271
277
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
272
278
  Only return specified message types in the response. If `None` (default) returns all messages.
273
279
 
280
+ enable_thinking : typing.Optional[str]
281
+ If set to True, enables reasoning before responses or tool calls from the agent.
282
+
274
283
  stream_tokens : typing.Optional[bool]
275
284
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
276
285
 
@@ -318,6 +327,7 @@ class MessagesClient:
318
327
  "assistant_message_tool_name": assistant_message_tool_name,
319
328
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
320
329
  "include_return_message_types": include_return_message_types,
330
+ "enable_thinking": enable_thinking,
321
331
  "stream_tokens": stream_tokens,
322
332
  },
323
333
  request_options=request_options,
@@ -606,6 +616,7 @@ class AsyncMessagesClient:
606
616
  assistant_message_tool_name: typing.Optional[str] = OMIT,
607
617
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
608
618
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
619
+ enable_thinking: typing.Optional[str] = OMIT,
609
620
  request_options: typing.Optional[RequestOptions] = None,
610
621
  ) -> LettaResponse:
611
622
  """
@@ -634,6 +645,9 @@ class AsyncMessagesClient:
634
645
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
635
646
  Only return specified message types in the response. If `None` (default) returns all messages.
636
647
 
648
+ enable_thinking : typing.Optional[str]
649
+ If set to True, enables reasoning before responses or tool calls from the agent.
650
+
637
651
  request_options : typing.Optional[RequestOptions]
638
652
  Request-specific configuration.
639
653
 
@@ -684,6 +698,7 @@ class AsyncMessagesClient:
684
698
  "assistant_message_tool_name": assistant_message_tool_name,
685
699
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
686
700
  "include_return_message_types": include_return_message_types,
701
+ "enable_thinking": enable_thinking,
687
702
  },
688
703
  request_options=request_options,
689
704
  omit=OMIT,
@@ -722,6 +737,7 @@ class AsyncMessagesClient:
722
737
  assistant_message_tool_name: typing.Optional[str] = OMIT,
723
738
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
724
739
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
740
+ enable_thinking: typing.Optional[str] = OMIT,
725
741
  stream_tokens: typing.Optional[bool] = OMIT,
726
742
  request_options: typing.Optional[RequestOptions] = None,
727
743
  ) -> typing.AsyncIterator[LettaStreamingResponse]:
@@ -752,6 +768,9 @@ class AsyncMessagesClient:
752
768
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
753
769
  Only return specified message types in the response. If `None` (default) returns all messages.
754
770
 
771
+ enable_thinking : typing.Optional[str]
772
+ If set to True, enables reasoning before responses or tool calls from the agent.
773
+
755
774
  stream_tokens : typing.Optional[bool]
756
775
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
757
776
 
@@ -807,6 +826,7 @@ class AsyncMessagesClient:
807
826
  "assistant_message_tool_name": assistant_message_tool_name,
808
827
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
809
828
  "include_return_message_types": include_return_message_types,
829
+ "enable_thinking": enable_thinking,
810
830
  "stream_tokens": stream_tokens,
811
831
  },
812
832
  request_options=request_options,
@@ -39,6 +39,11 @@ class LettaBatchRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  agent_id: str = pydantic.Field()
43
48
  """
44
49
  The ID of the agent to send this batch request for
@@ -39,6 +39,11 @@ class LettaRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  if IS_PYDANTIC_V2:
43
48
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
44
49
  else:
@@ -39,6 +39,11 @@ class LettaStreamingRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  stream_tokens: typing.Optional[bool] = pydantic.Field(default=None)
43
48
  """
44
49
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.224
3
+ Version: 0.1.225
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -18,7 +18,7 @@ letta_client/agents/memory_variables/client.py,sha256=DGJvV5k5H-BRE-FWMLNrCqKkHJ
18
18
  letta_client/agents/memory_variables/types/__init__.py,sha256=EoznK0WvhCyFYd4KDdU-cGDQWpSXmq79BSkqVHN-j7A,180
19
19
  letta_client/agents/memory_variables/types/memory_variables_list_response.py,sha256=bsF__n_B4ZXEHzg--OVD6tHHXt_aM-FjHm2x1ZXPnL0,599
20
20
  letta_client/agents/messages/__init__.py,sha256=RSmlezGH90RirElX0LHusjD03EN6UqGsrS-A0Bue2hA,353
21
- letta_client/agents/messages/client.py,sha256=J3VPSf9oSFb9qdyBfOcbRTkKtvnyT903PR6ADsofH5M,58066
21
+ letta_client/agents/messages/client.py,sha256=yh3gXasaH-rwTRyz64CJjR_AbpX05Ncb4LvilqnaN0Y,59548
22
22
  letta_client/agents/messages/types/__init__.py,sha256=uDn0e1-EJ7g2U0EHD8SkP38liALoVCTJ7bJhUAOutwE,473
23
23
  letta_client/agents/messages/types/letta_streaming_response.py,sha256=8VR2F32xjoPFXL4YBvBbAZclaJG4ENPTjk7BrlZkmtw,742
24
24
  letta_client/agents/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
@@ -75,7 +75,7 @@ letta_client/client_side_access_tokens/types/client_side_access_tokens_list_clie
75
75
  letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item_policy_data_item_access_item.py,sha256=kNHfEWFl7u71Pu8NPqutod0a2NXfvq8il05Hqm0iBB4,284
76
76
  letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
77
77
  letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
78
- letta_client/core/client_wrapper.py,sha256=xSoUSJswIYwHXAHKAT7yDaEoUmNqKq707H1hsvj6AKw,2336
78
+ letta_client/core/client_wrapper.py,sha256=ErqtnwbakfN5oBeIrdNzodPKrJ6xxSjh39WYPUi4SLg,2336
79
79
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
80
80
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
81
81
  letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -105,7 +105,7 @@ letta_client/folders/passages/client.py,sha256=ejEUmwrwfUBG4UihNTrIk7b1-a1v6QziR
105
105
  letta_client/groups/__init__.py,sha256=WzkNp5Q_5zQj_NHv4hJCOKvW6ftM9EuNxw8hkPRRbko,434
106
106
  letta_client/groups/client.py,sha256=H6PW_E2QUX_hfoa5aunu90KEU1sF5d4xpTL1hxZFifU,30369
107
107
  letta_client/groups/messages/__init__.py,sha256=M7Ar6Rmb8we4dfYE6jj3FCL9UvVFy1bNQIPflUXMWHA,243
108
- letta_client/groups/messages/client.py,sha256=lK8P1J3ETHxdsS8zbdNnxagVSef03Z_tLiqQiEXq6t0,36386
108
+ letta_client/groups/messages/client.py,sha256=POUMzgG6YgvLzfKeWsDPYL706VbNL6lWOOnqW8w9Mrc,37374
109
109
  letta_client/groups/messages/types/__init__.py,sha256=Oc2j0oGOs96IEFf9xsJIkjBjoq3OMtse64YwWv3F9Io,335
110
110
  letta_client/groups/messages/types/letta_streaming_response.py,sha256=MdE2PxQ1x1AviakHXsWVcFv97a3RchzzzIiD77w4EC8,665
111
111
  letta_client/groups/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
@@ -310,15 +310,15 @@ letta_client/types/json_object_response_format.py,sha256=kz1wkWKO2H9Ad9GgLzLHgnY
310
310
  letta_client/types/json_schema.py,sha256=EHcLKBSGRsSzCKTpujKFHylcLJG6ODQIBrjQkU4lWDQ,870
311
311
  letta_client/types/json_schema_response_format.py,sha256=vTBC5qyuUm9u1uf1IZmNyEH-wSXm8c_7cOwd7ua_aJw,816
312
312
  letta_client/types/letta_batch_messages.py,sha256=kMefbiarujv7hCw3FyU-eVY2RgDV0ZXLOpkOooWNw6g,613
313
- letta_client/types/letta_batch_request.py,sha256=s0lLeWYKxS4ATaElzdZJyTxn7nVnaPTXeLYa01Ok-rA,1773
313
+ letta_client/types/letta_batch_request.py,sha256=cNxnDcz7ob626oCJb4a6tpD3cQqk5f9ZcIyQXV5gJIQ,1948
314
314
  letta_client/types/letta_image.py,sha256=HA__0Gq0BpKHeUUxIT2TmEfyY18F2DrauiGlijWv-HA,1138
315
315
  letta_client/types/letta_message_content_union.py,sha256=ypGyeR8PGqPGaAWtLWjDkWwG9oa4AWcL44y-RfLJVvw,540
316
316
  letta_client/types/letta_message_union.py,sha256=TTQwlur2CZNdZ466Nb_2TFcSFXrgoMliaNzD33t7Ktw,603
317
- letta_client/types/letta_request.py,sha256=vhS3qQYxNN2dlE_kXWPW3BJekBDMTSRp1MXmj4-ztfQ,1659
317
+ letta_client/types/letta_request.py,sha256=icbaI29DCFiCE5Y19WrdXz0VdIu3kq34hlGTlD9uTZE,1834
318
318
  letta_client/types/letta_request_config.py,sha256=OW5TNyTfbBMfRcWp5Aje_-7vjS0zVzYCmQGKLUtJezU,1346
319
319
  letta_client/types/letta_response.py,sha256=5brJ39B8PxTBpm5_clL9XGP1SQ1pox-G0gxMDkitMlw,1395
320
320
  letta_client/types/letta_stop_reason.py,sha256=5uqJibhaT6LFTj6Sf6m0VJKS1FJzIIgymjZTvu2a0Zk,931
321
- letta_client/types/letta_streaming_request.py,sha256=YJ_6ge5xrzQjOs2UTY3FxwhTv6QoCi9XWv5Rod01W1E,1884
321
+ letta_client/types/letta_streaming_request.py,sha256=-mWkurh8AywX0YjIt5AYbxlE3m6iDYCUCkgpkzUE3G4,2059
322
322
  letta_client/types/letta_usage_statistics.py,sha256=k6V72J2TEPd-RQBuUQxF3oylrAMcuSKBskd2nnZmGOw,1886
323
323
  letta_client/types/letta_user_message_content_union.py,sha256=3Gbs3mRk-tJj2z0Mf-BNDomWHEytQd3OTUN4xnEVsuE,229
324
324
  letta_client/types/llm_config.py,sha256=m8IlVCS4uSXVYsuIadilgO32UpuD4_ij0a1hcRQiMLo,4306
@@ -445,6 +445,6 @@ letta_client/types/web_search_options_user_location_approximate.py,sha256=Ywk01J
445
445
  letta_client/version.py,sha256=bttKLbIhO3UonCYQlqs600zzbQgfhCCMjeXR9WRzid4,79
446
446
  letta_client/voice/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
447
447
  letta_client/voice/client.py,sha256=47iQYCuW_qpKI4hM3pYVxn3hw7kgQj3emU1_oRpkRMA,5811
448
- letta_client-0.1.224.dist-info/METADATA,sha256=vWY5btWhUxfgAemsZCMFxzK2SDhykSCQkkJAHABYCT8,5177
449
- letta_client-0.1.224.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
450
- letta_client-0.1.224.dist-info/RECORD,,
448
+ letta_client-0.1.225.dist-info/METADATA,sha256=AX3EKG8xTs-A-jtFrRHxJZbCED82DcM-0_m3-EuUE_0,5177
449
+ letta_client-0.1.225.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
450
+ letta_client-0.1.225.dist-info/RECORD,,