letta-client 0.1.224__py3-none-any.whl → 0.1.226__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
letta_client/__init__.py CHANGED
@@ -335,8 +335,10 @@ from .tools import (
335
335
  AddMcpServerRequest,
336
336
  AddMcpServerResponseItem,
337
337
  ConnectMcpServerRequest,
338
+ ConnectMcpServerResponseEvent,
338
339
  DeleteMcpServerResponseItem,
339
340
  ListMcpServersResponseValue,
341
+ StreamingResponse,
340
342
  TestMcpServerRequest,
341
343
  UpdateMcpServerRequest,
342
344
  UpdateMcpServerResponse,
@@ -451,6 +453,7 @@ __all__ = [
451
453
  "ConflictError",
452
454
  "ConflictErrorBody",
453
455
  "ConnectMcpServerRequest",
456
+ "ConnectMcpServerResponseEvent",
454
457
  "ContextWindowOverview",
455
458
  "ContinueToolRule",
456
459
  "CoreMemoryBlockSchema",
@@ -597,6 +600,7 @@ __all__ = [
597
600
  "StepsListRequestFeedback",
598
601
  "StopReasonType",
599
602
  "StreamableHttpServerConfig",
603
+ "StreamingResponse",
600
604
  "SupervisorManager",
601
605
  "SupervisorManagerUpdate",
602
606
  "SystemMessage",
@@ -146,6 +146,7 @@ class MessagesClient:
146
146
  assistant_message_tool_name: typing.Optional[str] = OMIT,
147
147
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
148
148
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
149
+ enable_thinking: typing.Optional[str] = OMIT,
149
150
  request_options: typing.Optional[RequestOptions] = None,
150
151
  ) -> LettaResponse:
151
152
  """
@@ -174,6 +175,9 @@ class MessagesClient:
174
175
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
175
176
  Only return specified message types in the response. If `None` (default) returns all messages.
176
177
 
178
+ enable_thinking : typing.Optional[str]
179
+ If set to True, enables reasoning before responses or tool calls from the agent.
180
+
177
181
  request_options : typing.Optional[RequestOptions]
178
182
  Request-specific configuration.
179
183
 
@@ -216,6 +220,7 @@ class MessagesClient:
216
220
  "assistant_message_tool_name": assistant_message_tool_name,
217
221
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
218
222
  "include_return_message_types": include_return_message_types,
223
+ "enable_thinking": enable_thinking,
219
224
  },
220
225
  request_options=request_options,
221
226
  omit=OMIT,
@@ -330,6 +335,7 @@ class MessagesClient:
330
335
  assistant_message_tool_name: typing.Optional[str] = OMIT,
331
336
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
332
337
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
338
+ enable_thinking: typing.Optional[str] = OMIT,
333
339
  stream_tokens: typing.Optional[bool] = OMIT,
334
340
  request_options: typing.Optional[RequestOptions] = None,
335
341
  ) -> typing.Iterator[LettaStreamingResponse]:
@@ -360,6 +366,9 @@ class MessagesClient:
360
366
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
361
367
  Only return specified message types in the response. If `None` (default) returns all messages.
362
368
 
369
+ enable_thinking : typing.Optional[str]
370
+ If set to True, enables reasoning before responses or tool calls from the agent.
371
+
363
372
  stream_tokens : typing.Optional[bool]
364
373
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
365
374
 
@@ -407,6 +416,7 @@ class MessagesClient:
407
416
  "assistant_message_tool_name": assistant_message_tool_name,
408
417
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
409
418
  "include_return_message_types": include_return_message_types,
419
+ "enable_thinking": enable_thinking,
410
420
  "stream_tokens": stream_tokens,
411
421
  },
412
422
  request_options=request_options,
@@ -522,6 +532,7 @@ class MessagesClient:
522
532
  assistant_message_tool_name: typing.Optional[str] = OMIT,
523
533
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
524
534
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
535
+ enable_thinking: typing.Optional[str] = OMIT,
525
536
  callback_url: typing.Optional[str] = OMIT,
526
537
  request_options: typing.Optional[RequestOptions] = None,
527
538
  ) -> Run:
@@ -554,6 +565,9 @@ class MessagesClient:
554
565
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
555
566
  Only return specified message types in the response. If `None` (default) returns all messages.
556
567
 
568
+ enable_thinking : typing.Optional[str]
569
+ If set to True, enables reasoning before responses or tool calls from the agent.
570
+
557
571
  callback_url : typing.Optional[str]
558
572
  Optional callback URL to POST to when the job completes
559
573
 
@@ -599,6 +613,7 @@ class MessagesClient:
599
613
  "assistant_message_tool_name": assistant_message_tool_name,
600
614
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
601
615
  "include_return_message_types": include_return_message_types,
616
+ "enable_thinking": enable_thinking,
602
617
  "callback_url": callback_url,
603
618
  },
604
619
  headers={
@@ -912,6 +927,7 @@ class AsyncMessagesClient:
912
927
  assistant_message_tool_name: typing.Optional[str] = OMIT,
913
928
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
914
929
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
930
+ enable_thinking: typing.Optional[str] = OMIT,
915
931
  request_options: typing.Optional[RequestOptions] = None,
916
932
  ) -> LettaResponse:
917
933
  """
@@ -940,6 +956,9 @@ class AsyncMessagesClient:
940
956
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
941
957
  Only return specified message types in the response. If `None` (default) returns all messages.
942
958
 
959
+ enable_thinking : typing.Optional[str]
960
+ If set to True, enables reasoning before responses or tool calls from the agent.
961
+
943
962
  request_options : typing.Optional[RequestOptions]
944
963
  Request-specific configuration.
945
964
 
@@ -990,6 +1009,7 @@ class AsyncMessagesClient:
990
1009
  "assistant_message_tool_name": assistant_message_tool_name,
991
1010
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
992
1011
  "include_return_message_types": include_return_message_types,
1012
+ "enable_thinking": enable_thinking,
993
1013
  },
994
1014
  request_options=request_options,
995
1015
  omit=OMIT,
@@ -1112,6 +1132,7 @@ class AsyncMessagesClient:
1112
1132
  assistant_message_tool_name: typing.Optional[str] = OMIT,
1113
1133
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
1114
1134
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
1135
+ enable_thinking: typing.Optional[str] = OMIT,
1115
1136
  stream_tokens: typing.Optional[bool] = OMIT,
1116
1137
  request_options: typing.Optional[RequestOptions] = None,
1117
1138
  ) -> typing.AsyncIterator[LettaStreamingResponse]:
@@ -1142,6 +1163,9 @@ class AsyncMessagesClient:
1142
1163
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
1143
1164
  Only return specified message types in the response. If `None` (default) returns all messages.
1144
1165
 
1166
+ enable_thinking : typing.Optional[str]
1167
+ If set to True, enables reasoning before responses or tool calls from the agent.
1168
+
1145
1169
  stream_tokens : typing.Optional[bool]
1146
1170
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
1147
1171
 
@@ -1197,6 +1221,7 @@ class AsyncMessagesClient:
1197
1221
  "assistant_message_tool_name": assistant_message_tool_name,
1198
1222
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
1199
1223
  "include_return_message_types": include_return_message_types,
1224
+ "enable_thinking": enable_thinking,
1200
1225
  "stream_tokens": stream_tokens,
1201
1226
  },
1202
1227
  request_options=request_options,
@@ -1320,6 +1345,7 @@ class AsyncMessagesClient:
1320
1345
  assistant_message_tool_name: typing.Optional[str] = OMIT,
1321
1346
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
1322
1347
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
1348
+ enable_thinking: typing.Optional[str] = OMIT,
1323
1349
  callback_url: typing.Optional[str] = OMIT,
1324
1350
  request_options: typing.Optional[RequestOptions] = None,
1325
1351
  ) -> Run:
@@ -1352,6 +1378,9 @@ class AsyncMessagesClient:
1352
1378
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
1353
1379
  Only return specified message types in the response. If `None` (default) returns all messages.
1354
1380
 
1381
+ enable_thinking : typing.Optional[str]
1382
+ If set to True, enables reasoning before responses or tool calls from the agent.
1383
+
1355
1384
  callback_url : typing.Optional[str]
1356
1385
  Optional callback URL to POST to when the job completes
1357
1386
 
@@ -1405,6 +1434,7 @@ class AsyncMessagesClient:
1405
1434
  "assistant_message_tool_name": assistant_message_tool_name,
1406
1435
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
1407
1436
  "include_return_message_types": include_return_message_types,
1437
+ "enable_thinking": enable_thinking,
1408
1438
  "callback_url": callback_url,
1409
1439
  },
1410
1440
  headers={
@@ -24,7 +24,7 @@ class BaseClientWrapper:
24
24
  headers: typing.Dict[str, str] = {
25
25
  "X-Fern-Language": "Python",
26
26
  "X-Fern-SDK-Name": "letta-client",
27
- "X-Fern-SDK-Version": "0.1.224",
27
+ "X-Fern-SDK-Version": "0.1.226",
28
28
  }
29
29
  if self._project is not None:
30
30
  headers["X-Project"] = self._project
@@ -133,6 +133,7 @@ class MessagesClient:
133
133
  assistant_message_tool_name: typing.Optional[str] = OMIT,
134
134
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
135
135
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
136
+ enable_thinking: typing.Optional[str] = OMIT,
136
137
  request_options: typing.Optional[RequestOptions] = None,
137
138
  ) -> LettaResponse:
138
139
  """
@@ -161,6 +162,9 @@ class MessagesClient:
161
162
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
162
163
  Only return specified message types in the response. If `None` (default) returns all messages.
163
164
 
165
+ enable_thinking : typing.Optional[str]
166
+ If set to True, enables reasoning before responses or tool calls from the agent.
167
+
164
168
  request_options : typing.Optional[RequestOptions]
165
169
  Request-specific configuration.
166
170
 
@@ -203,6 +207,7 @@ class MessagesClient:
203
207
  "assistant_message_tool_name": assistant_message_tool_name,
204
208
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
205
209
  "include_return_message_types": include_return_message_types,
210
+ "enable_thinking": enable_thinking,
206
211
  },
207
212
  request_options=request_options,
208
213
  omit=OMIT,
@@ -241,6 +246,7 @@ class MessagesClient:
241
246
  assistant_message_tool_name: typing.Optional[str] = OMIT,
242
247
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
243
248
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
249
+ enable_thinking: typing.Optional[str] = OMIT,
244
250
  stream_tokens: typing.Optional[bool] = OMIT,
245
251
  request_options: typing.Optional[RequestOptions] = None,
246
252
  ) -> typing.Iterator[LettaStreamingResponse]:
@@ -271,6 +277,9 @@ class MessagesClient:
271
277
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
272
278
  Only return specified message types in the response. If `None` (default) returns all messages.
273
279
 
280
+ enable_thinking : typing.Optional[str]
281
+ If set to True, enables reasoning before responses or tool calls from the agent.
282
+
274
283
  stream_tokens : typing.Optional[bool]
275
284
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
276
285
 
@@ -318,6 +327,7 @@ class MessagesClient:
318
327
  "assistant_message_tool_name": assistant_message_tool_name,
319
328
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
320
329
  "include_return_message_types": include_return_message_types,
330
+ "enable_thinking": enable_thinking,
321
331
  "stream_tokens": stream_tokens,
322
332
  },
323
333
  request_options=request_options,
@@ -606,6 +616,7 @@ class AsyncMessagesClient:
606
616
  assistant_message_tool_name: typing.Optional[str] = OMIT,
607
617
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
608
618
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
619
+ enable_thinking: typing.Optional[str] = OMIT,
609
620
  request_options: typing.Optional[RequestOptions] = None,
610
621
  ) -> LettaResponse:
611
622
  """
@@ -634,6 +645,9 @@ class AsyncMessagesClient:
634
645
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
635
646
  Only return specified message types in the response. If `None` (default) returns all messages.
636
647
 
648
+ enable_thinking : typing.Optional[str]
649
+ If set to True, enables reasoning before responses or tool calls from the agent.
650
+
637
651
  request_options : typing.Optional[RequestOptions]
638
652
  Request-specific configuration.
639
653
 
@@ -684,6 +698,7 @@ class AsyncMessagesClient:
684
698
  "assistant_message_tool_name": assistant_message_tool_name,
685
699
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
686
700
  "include_return_message_types": include_return_message_types,
701
+ "enable_thinking": enable_thinking,
687
702
  },
688
703
  request_options=request_options,
689
704
  omit=OMIT,
@@ -722,6 +737,7 @@ class AsyncMessagesClient:
722
737
  assistant_message_tool_name: typing.Optional[str] = OMIT,
723
738
  assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
724
739
  include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
740
+ enable_thinking: typing.Optional[str] = OMIT,
725
741
  stream_tokens: typing.Optional[bool] = OMIT,
726
742
  request_options: typing.Optional[RequestOptions] = None,
727
743
  ) -> typing.AsyncIterator[LettaStreamingResponse]:
@@ -752,6 +768,9 @@ class AsyncMessagesClient:
752
768
  include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
753
769
  Only return specified message types in the response. If `None` (default) returns all messages.
754
770
 
771
+ enable_thinking : typing.Optional[str]
772
+ If set to True, enables reasoning before responses or tool calls from the agent.
773
+
755
774
  stream_tokens : typing.Optional[bool]
756
775
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
757
776
 
@@ -807,6 +826,7 @@ class AsyncMessagesClient:
807
826
  "assistant_message_tool_name": assistant_message_tool_name,
808
827
  "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
809
828
  "include_return_message_types": include_return_message_types,
829
+ "enable_thinking": enable_thinking,
810
830
  "stream_tokens": stream_tokens,
811
831
  },
812
832
  request_options=request_options,
@@ -4,8 +4,10 @@ from .types import (
4
4
  AddMcpServerRequest,
5
5
  AddMcpServerResponseItem,
6
6
  ConnectMcpServerRequest,
7
+ ConnectMcpServerResponseEvent,
7
8
  DeleteMcpServerResponseItem,
8
9
  ListMcpServersResponseValue,
10
+ StreamingResponse,
9
11
  TestMcpServerRequest,
10
12
  UpdateMcpServerRequest,
11
13
  UpdateMcpServerResponse,
@@ -15,8 +17,10 @@ __all__ = [
15
17
  "AddMcpServerRequest",
16
18
  "AddMcpServerResponseItem",
17
19
  "ConnectMcpServerRequest",
20
+ "ConnectMcpServerResponseEvent",
18
21
  "DeleteMcpServerResponseItem",
19
22
  "ListMcpServersResponseValue",
23
+ "StreamingResponse",
20
24
  "TestMcpServerRequest",
21
25
  "UpdateMcpServerRequest",
22
26
  "UpdateMcpServerResponse",
@@ -24,6 +24,9 @@ from .types.update_mcp_server_request import UpdateMcpServerRequest
24
24
  from .types.update_mcp_server_response import UpdateMcpServerResponse
25
25
  from .types.test_mcp_server_request import TestMcpServerRequest
26
26
  from .types.connect_mcp_server_request import ConnectMcpServerRequest
27
+ from .types.streaming_response import StreamingResponse
28
+ import httpx_sse
29
+ import json
27
30
  from ..core.client_wrapper import AsyncClientWrapper
28
31
 
29
32
  # this is used as the default value for optional parameters
@@ -1324,7 +1327,7 @@ class ToolsClient:
1324
1327
  ) -> typing.Optional[typing.Any]:
1325
1328
  """
1326
1329
  Test connection to an MCP server without adding it.
1327
- Returns the list of available tools if successful, or OAuth information if OAuth is required.
1330
+ Returns the list of available tools if successful.
1328
1331
 
1329
1332
  Parameters
1330
1333
  ----------
@@ -1389,7 +1392,7 @@ class ToolsClient:
1389
1392
 
1390
1393
  def connect_mcp_server(
1391
1394
  self, *, request: ConnectMcpServerRequest, request_options: typing.Optional[RequestOptions] = None
1392
- ) -> typing.Optional[typing.Any]:
1395
+ ) -> typing.Iterator[StreamingResponse]:
1393
1396
  """
1394
1397
  Connect to an MCP server with support for OAuth via SSE.
1395
1398
  Returns a stream of events handling authorization state and exchange if OAuth is required.
@@ -1401,9 +1404,9 @@ class ToolsClient:
1401
1404
  request_options : typing.Optional[RequestOptions]
1402
1405
  Request-specific configuration.
1403
1406
 
1404
- Returns
1405
- -------
1406
- typing.Optional[typing.Any]
1407
+ Yields
1408
+ ------
1409
+ typing.Iterator[StreamingResponse]
1407
1410
  Successful response
1408
1411
 
1409
1412
  Examples
@@ -1414,15 +1417,17 @@ class ToolsClient:
1414
1417
  project="YOUR_PROJECT",
1415
1418
  token="YOUR_TOKEN",
1416
1419
  )
1417
- client.tools.connect_mcp_server(
1420
+ response = client.tools.connect_mcp_server(
1418
1421
  request=StdioServerConfig(
1419
1422
  server_name="server_name",
1420
1423
  command="command",
1421
1424
  args=["args"],
1422
1425
  ),
1423
1426
  )
1427
+ for chunk in response:
1428
+ yield chunk
1424
1429
  """
1425
- _response = self._client_wrapper.httpx_client.request(
1430
+ with self._client_wrapper.httpx_client.stream(
1426
1431
  "v1/tools/mcp/servers/connect",
1427
1432
  method="POST",
1428
1433
  json=convert_and_respect_annotation_metadata(
@@ -1430,107 +1435,37 @@ class ToolsClient:
1430
1435
  ),
1431
1436
  request_options=request_options,
1432
1437
  omit=OMIT,
1433
- )
1434
- try:
1435
- if 200 <= _response.status_code < 300:
1436
- return typing.cast(
1437
- typing.Optional[typing.Any],
1438
- construct_type(
1439
- type_=typing.Optional[typing.Any], # type: ignore
1440
- object_=_response.json(),
1441
- ),
1442
- )
1443
- if _response.status_code == 422:
1444
- raise UnprocessableEntityError(
1445
- typing.cast(
1446
- HttpValidationError,
1447
- construct_type(
1448
- type_=HttpValidationError, # type: ignore
1449
- object_=_response.json(),
1450
- ),
1438
+ ) as _response:
1439
+ try:
1440
+ if 200 <= _response.status_code < 300:
1441
+ _event_source = httpx_sse.EventSource(_response)
1442
+ for _sse in _event_source.iter_sse():
1443
+ try:
1444
+ yield typing.cast(
1445
+ StreamingResponse,
1446
+ construct_type(
1447
+ type_=StreamingResponse, # type: ignore
1448
+ object_=json.loads(_sse.data),
1449
+ ),
1450
+ )
1451
+ except:
1452
+ pass
1453
+ return
1454
+ _response.read()
1455
+ if _response.status_code == 422:
1456
+ raise UnprocessableEntityError(
1457
+ typing.cast(
1458
+ HttpValidationError,
1459
+ construct_type(
1460
+ type_=HttpValidationError, # type: ignore
1461
+ object_=_response.json(),
1462
+ ),
1463
+ )
1451
1464
  )
1452
- )
1453
- _response_json = _response.json()
1454
- except JSONDecodeError:
1455
- raise ApiError(status_code=_response.status_code, body=_response.text)
1456
- raise ApiError(status_code=_response.status_code, body=_response_json)
1457
-
1458
- def mcp_oauth_callback(
1459
- self,
1460
- session_id: str,
1461
- *,
1462
- code: typing.Optional[str] = None,
1463
- state: typing.Optional[str] = None,
1464
- error: typing.Optional[str] = None,
1465
- error_description: typing.Optional[str] = None,
1466
- request_options: typing.Optional[RequestOptions] = None,
1467
- ) -> None:
1468
- """
1469
- Handle OAuth callback for MCP server authentication.
1470
-
1471
- Parameters
1472
- ----------
1473
- session_id : str
1474
-
1475
- code : typing.Optional[str]
1476
- OAuth authorization code
1477
-
1478
- state : typing.Optional[str]
1479
- OAuth state parameter
1480
-
1481
- error : typing.Optional[str]
1482
- OAuth error
1483
-
1484
- error_description : typing.Optional[str]
1485
- OAuth error description
1486
-
1487
- request_options : typing.Optional[RequestOptions]
1488
- Request-specific configuration.
1489
-
1490
- Returns
1491
- -------
1492
- None
1493
-
1494
- Examples
1495
- --------
1496
- from letta_client import Letta
1497
-
1498
- client = Letta(
1499
- project="YOUR_PROJECT",
1500
- token="YOUR_TOKEN",
1501
- )
1502
- client.tools.mcp_oauth_callback(
1503
- session_id="session_id",
1504
- )
1505
- """
1506
- _response = self._client_wrapper.httpx_client.request(
1507
- f"v1/tools/mcp/oauth/callback/{jsonable_encoder(session_id)}",
1508
- method="GET",
1509
- params={
1510
- "code": code,
1511
- "state": state,
1512
- "error": error,
1513
- "error_description": error_description,
1514
- },
1515
- request_options=request_options,
1516
- )
1517
- try:
1518
- if 200 <= _response.status_code < 300:
1519
- return
1520
- if _response.status_code == 422:
1521
- raise UnprocessableEntityError(
1522
- typing.cast(
1523
- HttpValidationError,
1524
- construct_type(
1525
- type_=HttpValidationError, # type: ignore
1526
- object_=_response.json(),
1527
- ),
1528
- )
1529
- )
1530
- _response_json = _response.json()
1531
- except JSONDecodeError:
1532
- raise ApiError(status_code=_response.status_code, body=_response.text)
1533
- raise ApiError(status_code=_response.status_code, body=_response_json)
1465
+ _response_json = _response.json()
1466
+ except JSONDecodeError:
1467
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1468
+ raise ApiError(status_code=_response.status_code, body=_response_json)
1534
1469
 
1535
1470
 
1536
1471
  class AsyncToolsClient:
@@ -2971,7 +2906,7 @@ class AsyncToolsClient:
2971
2906
  ) -> typing.Optional[typing.Any]:
2972
2907
  """
2973
2908
  Test connection to an MCP server without adding it.
2974
- Returns the list of available tools if successful, or OAuth information if OAuth is required.
2909
+ Returns the list of available tools if successful.
2975
2910
 
2976
2911
  Parameters
2977
2912
  ----------
@@ -3044,7 +2979,7 @@ class AsyncToolsClient:
3044
2979
 
3045
2980
  async def connect_mcp_server(
3046
2981
  self, *, request: ConnectMcpServerRequest, request_options: typing.Optional[RequestOptions] = None
3047
- ) -> typing.Optional[typing.Any]:
2982
+ ) -> typing.AsyncIterator[StreamingResponse]:
3048
2983
  """
3049
2984
  Connect to an MCP server with support for OAuth via SSE.
3050
2985
  Returns a stream of events handling authorization state and exchange if OAuth is required.
@@ -3056,9 +2991,9 @@ class AsyncToolsClient:
3056
2991
  request_options : typing.Optional[RequestOptions]
3057
2992
  Request-specific configuration.
3058
2993
 
3059
- Returns
3060
- -------
3061
- typing.Optional[typing.Any]
2994
+ Yields
2995
+ ------
2996
+ typing.AsyncIterator[StreamingResponse]
3062
2997
  Successful response
3063
2998
 
3064
2999
  Examples
@@ -3074,18 +3009,20 @@ class AsyncToolsClient:
3074
3009
 
3075
3010
 
3076
3011
  async def main() -> None:
3077
- await client.tools.connect_mcp_server(
3012
+ response = await client.tools.connect_mcp_server(
3078
3013
  request=StdioServerConfig(
3079
3014
  server_name="server_name",
3080
3015
  command="command",
3081
3016
  args=["args"],
3082
3017
  ),
3083
3018
  )
3019
+ async for chunk in response:
3020
+ yield chunk
3084
3021
 
3085
3022
 
3086
3023
  asyncio.run(main())
3087
3024
  """
3088
- _response = await self._client_wrapper.httpx_client.request(
3025
+ async with self._client_wrapper.httpx_client.stream(
3089
3026
  "v1/tools/mcp/servers/connect",
3090
3027
  method="POST",
3091
3028
  json=convert_and_respect_annotation_metadata(
@@ -3093,112 +3030,34 @@ class AsyncToolsClient:
3093
3030
  ),
3094
3031
  request_options=request_options,
3095
3032
  omit=OMIT,
3096
- )
3097
- try:
3098
- if 200 <= _response.status_code < 300:
3099
- return typing.cast(
3100
- typing.Optional[typing.Any],
3101
- construct_type(
3102
- type_=typing.Optional[typing.Any], # type: ignore
3103
- object_=_response.json(),
3104
- ),
3105
- )
3106
- if _response.status_code == 422:
3107
- raise UnprocessableEntityError(
3108
- typing.cast(
3109
- HttpValidationError,
3110
- construct_type(
3111
- type_=HttpValidationError, # type: ignore
3112
- object_=_response.json(),
3113
- ),
3033
+ ) as _response:
3034
+ try:
3035
+ if 200 <= _response.status_code < 300:
3036
+ _event_source = httpx_sse.EventSource(_response)
3037
+ async for _sse in _event_source.aiter_sse():
3038
+ try:
3039
+ yield typing.cast(
3040
+ StreamingResponse,
3041
+ construct_type(
3042
+ type_=StreamingResponse, # type: ignore
3043
+ object_=json.loads(_sse.data),
3044
+ ),
3045
+ )
3046
+ except:
3047
+ pass
3048
+ return
3049
+ await _response.aread()
3050
+ if _response.status_code == 422:
3051
+ raise UnprocessableEntityError(
3052
+ typing.cast(
3053
+ HttpValidationError,
3054
+ construct_type(
3055
+ type_=HttpValidationError, # type: ignore
3056
+ object_=_response.json(),
3057
+ ),
3058
+ )
3114
3059
  )
3115
- )
3116
- _response_json = _response.json()
3117
- except JSONDecodeError:
3118
- raise ApiError(status_code=_response.status_code, body=_response.text)
3119
- raise ApiError(status_code=_response.status_code, body=_response_json)
3120
-
3121
- async def mcp_oauth_callback(
3122
- self,
3123
- session_id: str,
3124
- *,
3125
- code: typing.Optional[str] = None,
3126
- state: typing.Optional[str] = None,
3127
- error: typing.Optional[str] = None,
3128
- error_description: typing.Optional[str] = None,
3129
- request_options: typing.Optional[RequestOptions] = None,
3130
- ) -> None:
3131
- """
3132
- Handle OAuth callback for MCP server authentication.
3133
-
3134
- Parameters
3135
- ----------
3136
- session_id : str
3137
-
3138
- code : typing.Optional[str]
3139
- OAuth authorization code
3140
-
3141
- state : typing.Optional[str]
3142
- OAuth state parameter
3143
-
3144
- error : typing.Optional[str]
3145
- OAuth error
3146
-
3147
- error_description : typing.Optional[str]
3148
- OAuth error description
3149
-
3150
- request_options : typing.Optional[RequestOptions]
3151
- Request-specific configuration.
3152
-
3153
- Returns
3154
- -------
3155
- None
3156
-
3157
- Examples
3158
- --------
3159
- import asyncio
3160
-
3161
- from letta_client import AsyncLetta
3162
-
3163
- client = AsyncLetta(
3164
- project="YOUR_PROJECT",
3165
- token="YOUR_TOKEN",
3166
- )
3167
-
3168
-
3169
- async def main() -> None:
3170
- await client.tools.mcp_oauth_callback(
3171
- session_id="session_id",
3172
- )
3173
-
3174
-
3175
- asyncio.run(main())
3176
- """
3177
- _response = await self._client_wrapper.httpx_client.request(
3178
- f"v1/tools/mcp/oauth/callback/{jsonable_encoder(session_id)}",
3179
- method="GET",
3180
- params={
3181
- "code": code,
3182
- "state": state,
3183
- "error": error,
3184
- "error_description": error_description,
3185
- },
3186
- request_options=request_options,
3187
- )
3188
- try:
3189
- if 200 <= _response.status_code < 300:
3190
- return
3191
- if _response.status_code == 422:
3192
- raise UnprocessableEntityError(
3193
- typing.cast(
3194
- HttpValidationError,
3195
- construct_type(
3196
- type_=HttpValidationError, # type: ignore
3197
- object_=_response.json(),
3198
- ),
3199
- )
3200
- )
3201
- _response_json = _response.json()
3202
- except JSONDecodeError:
3203
- raise ApiError(status_code=_response.status_code, body=_response.text)
3204
- raise ApiError(status_code=_response.status_code, body=_response_json)
3060
+ _response_json = _response.json()
3061
+ except JSONDecodeError:
3062
+ raise ApiError(status_code=_response.status_code, body=_response.text)
3063
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -3,8 +3,10 @@
3
3
  from .add_mcp_server_request import AddMcpServerRequest
4
4
  from .add_mcp_server_response_item import AddMcpServerResponseItem
5
5
  from .connect_mcp_server_request import ConnectMcpServerRequest
6
+ from .connect_mcp_server_response_event import ConnectMcpServerResponseEvent
6
7
  from .delete_mcp_server_response_item import DeleteMcpServerResponseItem
7
8
  from .list_mcp_servers_response_value import ListMcpServersResponseValue
9
+ from .streaming_response import StreamingResponse
8
10
  from .test_mcp_server_request import TestMcpServerRequest
9
11
  from .update_mcp_server_request import UpdateMcpServerRequest
10
12
  from .update_mcp_server_response import UpdateMcpServerResponse
@@ -13,8 +15,10 @@ __all__ = [
13
15
  "AddMcpServerRequest",
14
16
  "AddMcpServerResponseItem",
15
17
  "ConnectMcpServerRequest",
18
+ "ConnectMcpServerResponseEvent",
16
19
  "DeleteMcpServerResponseItem",
17
20
  "ListMcpServersResponseValue",
21
+ "StreamingResponse",
18
22
  "TestMcpServerRequest",
19
23
  "UpdateMcpServerRequest",
20
24
  "UpdateMcpServerResponse",
@@ -0,0 +1,8 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ ConnectMcpServerResponseEvent = typing.Union[
6
+ typing.Literal["connection_attempt", "success", "error", "oauth_required", "authorization_url", "waiting_for_auth"],
7
+ typing.Any,
8
+ ]
@@ -0,0 +1,23 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ...core.unchecked_base_model import UncheckedBaseModel
4
+ from .connect_mcp_server_response_event import ConnectMcpServerResponseEvent
5
+ import typing
6
+ from ...types.mcp_tool import McpTool
7
+ from ...core.pydantic_utilities import IS_PYDANTIC_V2
8
+ import pydantic
9
+
10
+
11
+ class StreamingResponse(UncheckedBaseModel):
12
+ event: ConnectMcpServerResponseEvent
13
+ message: typing.Optional[str] = None
14
+ tools: typing.Optional[McpTool] = None
15
+
16
+ if IS_PYDANTIC_V2:
17
+ model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
18
+ else:
19
+
20
+ class Config:
21
+ frozen = True
22
+ smart_union = True
23
+ extra = pydantic.Extra.allow
@@ -39,6 +39,11 @@ class LettaBatchRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  agent_id: str = pydantic.Field()
43
48
  """
44
49
  The ID of the agent to send this batch request for
@@ -39,6 +39,11 @@ class LettaRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  if IS_PYDANTIC_V2:
43
48
  model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
44
49
  else:
@@ -39,6 +39,11 @@ class LettaStreamingRequest(UncheckedBaseModel):
39
39
  Only return specified message types in the response. If `None` (default) returns all messages.
40
40
  """
41
41
 
42
+ enable_thinking: typing.Optional[str] = pydantic.Field(default=None)
43
+ """
44
+ If set to True, enables reasoning before responses or tool calls from the agent.
45
+ """
46
+
42
47
  stream_tokens: typing.Optional[bool] = pydantic.Field(default=None)
43
48
  """
44
49
  Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.224
3
+ Version: 0.1.226
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -102,24 +102,18 @@ except ApiError as e:
102
102
  The SDK supports streaming responses, as well, the response will be a generator that you can loop over.
103
103
 
104
104
  ```python
105
- from letta_client import Letta, MessageCreate, TextContent
105
+ from letta_client import Letta, StdioServerConfig
106
106
 
107
107
  client = Letta(
108
108
  project="YOUR_PROJECT",
109
109
  token="YOUR_TOKEN",
110
110
  )
111
- response = client.agents.messages.create_stream(
112
- agent_id="agent_id",
113
- messages=[
114
- MessageCreate(
115
- role="user",
116
- content=[
117
- TextContent(
118
- text="text",
119
- )
120
- ],
121
- )
122
- ],
111
+ response = client.tools.connect_mcp_server(
112
+ request=StdioServerConfig(
113
+ server_name="server_name",
114
+ command="command",
115
+ args=["args"],
116
+ ),
123
117
  )
124
118
  for chunk in response:
125
119
  yield chunk
@@ -1,4 +1,4 @@
1
- letta_client/__init__.py,sha256=SHGQJZEiJUN0sO7ZVdwwTenizJTldEs9YqrB-ux8vCk,19484
1
+ letta_client/__init__.py,sha256=kQxkWRS8b6iG1Tb3TvpqswqflQ_k9nexhmX_MxZXRiE,19604
2
2
  letta_client/agents/__init__.py,sha256=9wEJMighDL1OFg_7Qh-D50bubPbV4BWo1ZKYxdDJGIQ,2146
3
3
  letta_client/agents/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
4
4
  letta_client/agents/blocks/client.py,sha256=kCAcU1ACOKRCrjEYPyCQslWBjtsq7m4QboQP0wwLQy8,25392
@@ -18,7 +18,7 @@ letta_client/agents/memory_variables/client.py,sha256=DGJvV5k5H-BRE-FWMLNrCqKkHJ
18
18
  letta_client/agents/memory_variables/types/__init__.py,sha256=EoznK0WvhCyFYd4KDdU-cGDQWpSXmq79BSkqVHN-j7A,180
19
19
  letta_client/agents/memory_variables/types/memory_variables_list_response.py,sha256=bsF__n_B4ZXEHzg--OVD6tHHXt_aM-FjHm2x1ZXPnL0,599
20
20
  letta_client/agents/messages/__init__.py,sha256=RSmlezGH90RirElX0LHusjD03EN6UqGsrS-A0Bue2hA,353
21
- letta_client/agents/messages/client.py,sha256=J3VPSf9oSFb9qdyBfOcbRTkKtvnyT903PR6ADsofH5M,58066
21
+ letta_client/agents/messages/client.py,sha256=yh3gXasaH-rwTRyz64CJjR_AbpX05Ncb4LvilqnaN0Y,59548
22
22
  letta_client/agents/messages/types/__init__.py,sha256=uDn0e1-EJ7g2U0EHD8SkP38liALoVCTJ7bJhUAOutwE,473
23
23
  letta_client/agents/messages/types/letta_streaming_response.py,sha256=8VR2F32xjoPFXL4YBvBbAZclaJG4ENPTjk7BrlZkmtw,742
24
24
  letta_client/agents/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
@@ -75,7 +75,7 @@ letta_client/client_side_access_tokens/types/client_side_access_tokens_list_clie
75
75
  letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item_policy_data_item_access_item.py,sha256=kNHfEWFl7u71Pu8NPqutod0a2NXfvq8il05Hqm0iBB4,284
76
76
  letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
77
77
  letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
78
- letta_client/core/client_wrapper.py,sha256=xSoUSJswIYwHXAHKAT7yDaEoUmNqKq707H1hsvj6AKw,2336
78
+ letta_client/core/client_wrapper.py,sha256=IaGc6llVLIjSdRed41AC3ZZDUFKMyrENDHxGP4yyrcs,2336
79
79
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
80
80
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
81
81
  letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -105,7 +105,7 @@ letta_client/folders/passages/client.py,sha256=ejEUmwrwfUBG4UihNTrIk7b1-a1v6QziR
105
105
  letta_client/groups/__init__.py,sha256=WzkNp5Q_5zQj_NHv4hJCOKvW6ftM9EuNxw8hkPRRbko,434
106
106
  letta_client/groups/client.py,sha256=H6PW_E2QUX_hfoa5aunu90KEU1sF5d4xpTL1hxZFifU,30369
107
107
  letta_client/groups/messages/__init__.py,sha256=M7Ar6Rmb8we4dfYE6jj3FCL9UvVFy1bNQIPflUXMWHA,243
108
- letta_client/groups/messages/client.py,sha256=lK8P1J3ETHxdsS8zbdNnxagVSef03Z_tLiqQiEXq6t0,36386
108
+ letta_client/groups/messages/client.py,sha256=POUMzgG6YgvLzfKeWsDPYL706VbNL6lWOOnqW8w9Mrc,37374
109
109
  letta_client/groups/messages/types/__init__.py,sha256=Oc2j0oGOs96IEFf9xsJIkjBjoq3OMtse64YwWv3F9Io,335
110
110
  letta_client/groups/messages/types/letta_streaming_response.py,sha256=MdE2PxQ1x1AviakHXsWVcFv97a3RchzzzIiD77w4EC8,665
111
111
  letta_client/groups/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
@@ -168,14 +168,16 @@ letta_client/templates/client.py,sha256=wcidkaF0eRgKEYRrDTgKAS_A57MO7H2S_muzznEV
168
168
  letta_client/templates/types/__init__.py,sha256=dAr_dEh0BdwUxAcV1sJ9RM07Z8nCv4dCK6fmTltqQ6c,286
169
169
  letta_client/templates/types/templates_list_response.py,sha256=HYloMVzk086c6fFGRYZz-Ozc_Yylozp2aPpweHS5uXI,866
170
170
  letta_client/templates/types/templates_list_response_templates_item.py,sha256=yyJq8wEOb2XIg99uhRMKoy2qD2CbuvI_5FAspwYWnfI,593
171
- letta_client/tools/__init__.py,sha256=qPV3aBl81CCoPAvuGdX-GiMyYkz0i49KerSRO-7BWXA,585
172
- letta_client/tools/client.py,sha256=ljRP0WZUhkgMFGXwM327QJReaW_dt4WRYkcDWJ1SIzY,107013
173
- letta_client/tools/types/__init__.py,sha256=U6Z2ZrKY4czFQyksSjoeoQHPbNQ0gJBM7QOGPJltqAA,846
171
+ letta_client/tools/__init__.py,sha256=S7wUWB3P-uxAsKEFnzvVBpsDloETL1gks3l6ecdPpBY,705
172
+ letta_client/tools/client.py,sha256=26C9XDrSucsPXxgUYzIYdmKloZPGRevXwFLhoZxyVO8,103238
173
+ letta_client/tools/types/__init__.py,sha256=IodcTbnUxVsTQrNyZnuSfre7Cl2XESyiDkdHmFYfbHY,1035
174
174
  letta_client/tools/types/add_mcp_server_request.py,sha256=m3QdTmY2ZHQUWbxMTNsOhPnseWHVipsOTdSXuC7KHQI,371
175
175
  letta_client/tools/types/add_mcp_server_response_item.py,sha256=DNrB3LwstJzKrw_GRJ8tb3XCEJWfD16WzBoGrGY_ZQI,376
176
176
  letta_client/tools/types/connect_mcp_server_request.py,sha256=KDHYKXnRRf9g8bHUUWvWdyIiJk-enBZ5EO7N80r-i80,375
177
+ letta_client/tools/types/connect_mcp_server_response_event.py,sha256=P_IXBx-DeJ2FdsIzIbRhekUKnU8ugiTgBqOzpRwEK6U,265
177
178
  letta_client/tools/types/delete_mcp_server_response_item.py,sha256=YLIBE7OD535NJAncGpzMDGaQRe1831DNKcj2UzS9e0c,379
178
179
  letta_client/tools/types/list_mcp_servers_response_value.py,sha256=Eyji5qB7FhowiogsAbpcU_aMyH9zClv9lUMmHOmNPYk,379
180
+ letta_client/tools/types/streaming_response.py,sha256=E7W0OqxHFmhU2slJdzVZBxQA508AF5Im3pAM4UmuLs4,790
179
181
  letta_client/tools/types/test_mcp_server_request.py,sha256=sLlOEZdmLfkHqHCkUjntGbr8_MkBhsqpMQ-HwdNOnq0,372
180
182
  letta_client/tools/types/update_mcp_server_request.py,sha256=nCpx9-OvpH0l5iJxEi8kgSok1F1r7liEAZm-kaqBtEo,402
181
183
  letta_client/tools/types/update_mcp_server_response.py,sha256=muwHagaQBMwQI0of9EBCBtG9lD-jELFAevgTB2MjpFQ,375
@@ -310,15 +312,15 @@ letta_client/types/json_object_response_format.py,sha256=kz1wkWKO2H9Ad9GgLzLHgnY
310
312
  letta_client/types/json_schema.py,sha256=EHcLKBSGRsSzCKTpujKFHylcLJG6ODQIBrjQkU4lWDQ,870
311
313
  letta_client/types/json_schema_response_format.py,sha256=vTBC5qyuUm9u1uf1IZmNyEH-wSXm8c_7cOwd7ua_aJw,816
312
314
  letta_client/types/letta_batch_messages.py,sha256=kMefbiarujv7hCw3FyU-eVY2RgDV0ZXLOpkOooWNw6g,613
313
- letta_client/types/letta_batch_request.py,sha256=s0lLeWYKxS4ATaElzdZJyTxn7nVnaPTXeLYa01Ok-rA,1773
315
+ letta_client/types/letta_batch_request.py,sha256=cNxnDcz7ob626oCJb4a6tpD3cQqk5f9ZcIyQXV5gJIQ,1948
314
316
  letta_client/types/letta_image.py,sha256=HA__0Gq0BpKHeUUxIT2TmEfyY18F2DrauiGlijWv-HA,1138
315
317
  letta_client/types/letta_message_content_union.py,sha256=ypGyeR8PGqPGaAWtLWjDkWwG9oa4AWcL44y-RfLJVvw,540
316
318
  letta_client/types/letta_message_union.py,sha256=TTQwlur2CZNdZ466Nb_2TFcSFXrgoMliaNzD33t7Ktw,603
317
- letta_client/types/letta_request.py,sha256=vhS3qQYxNN2dlE_kXWPW3BJekBDMTSRp1MXmj4-ztfQ,1659
319
+ letta_client/types/letta_request.py,sha256=icbaI29DCFiCE5Y19WrdXz0VdIu3kq34hlGTlD9uTZE,1834
318
320
  letta_client/types/letta_request_config.py,sha256=OW5TNyTfbBMfRcWp5Aje_-7vjS0zVzYCmQGKLUtJezU,1346
319
321
  letta_client/types/letta_response.py,sha256=5brJ39B8PxTBpm5_clL9XGP1SQ1pox-G0gxMDkitMlw,1395
320
322
  letta_client/types/letta_stop_reason.py,sha256=5uqJibhaT6LFTj6Sf6m0VJKS1FJzIIgymjZTvu2a0Zk,931
321
- letta_client/types/letta_streaming_request.py,sha256=YJ_6ge5xrzQjOs2UTY3FxwhTv6QoCi9XWv5Rod01W1E,1884
323
+ letta_client/types/letta_streaming_request.py,sha256=-mWkurh8AywX0YjIt5AYbxlE3m6iDYCUCkgpkzUE3G4,2059
322
324
  letta_client/types/letta_usage_statistics.py,sha256=k6V72J2TEPd-RQBuUQxF3oylrAMcuSKBskd2nnZmGOw,1886
323
325
  letta_client/types/letta_user_message_content_union.py,sha256=3Gbs3mRk-tJj2z0Mf-BNDomWHEytQd3OTUN4xnEVsuE,229
324
326
  letta_client/types/llm_config.py,sha256=m8IlVCS4uSXVYsuIadilgO32UpuD4_ij0a1hcRQiMLo,4306
@@ -445,6 +447,6 @@ letta_client/types/web_search_options_user_location_approximate.py,sha256=Ywk01J
445
447
  letta_client/version.py,sha256=bttKLbIhO3UonCYQlqs600zzbQgfhCCMjeXR9WRzid4,79
446
448
  letta_client/voice/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
447
449
  letta_client/voice/client.py,sha256=47iQYCuW_qpKI4hM3pYVxn3hw7kgQj3emU1_oRpkRMA,5811
448
- letta_client-0.1.224.dist-info/METADATA,sha256=vWY5btWhUxfgAemsZCMFxzK2SDhykSCQkkJAHABYCT8,5177
449
- letta_client-0.1.224.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
450
- letta_client-0.1.224.dist-info/RECORD,,
450
+ letta_client-0.1.226.dist-info/METADATA,sha256=M0_FnvWA9H9CyQ4oeGVYPOJhy66xl2ODW5bdV09-hj8,5064
451
+ letta_client-0.1.226.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
452
+ letta_client-0.1.226.dist-info/RECORD,,