letta-client 0.1.196__py3-none-any.whl → 0.1.197__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

@@ -18,7 +18,12 @@ from .types import (
18
18
  )
19
19
  from . import blocks, context, core_memory, groups, memory_variables, messages, passages, sources, templates, tools
20
20
  from .memory_variables import MemoryVariablesListResponse
21
- from .messages import LettaStreamingResponse, MessagesModifyRequest, MessagesModifyResponse
21
+ from .messages import (
22
+ LettaStreamingResponse,
23
+ MessagesModifyRequest,
24
+ MessagesModifyResponse,
25
+ MessagesPreviewRawPayloadRequest,
26
+ )
22
27
  from .templates import TemplatesCreateResponse, TemplatesCreateVersionRequestReturnAgentState, TemplatesMigrateResponse
23
28
 
24
29
  __all__ = [
@@ -38,6 +43,7 @@ __all__ = [
38
43
  "MemoryVariablesListResponse",
39
44
  "MessagesModifyRequest",
40
45
  "MessagesModifyResponse",
46
+ "MessagesPreviewRawPayloadRequest",
41
47
  "TemplatesCreateResponse",
42
48
  "TemplatesCreateVersionRequestReturnAgentState",
43
49
  "TemplatesMigrateResponse",
@@ -32,7 +32,6 @@ from .. import core
32
32
  from .types.update_agent_tool_rules_item import UpdateAgentToolRulesItem
33
33
  from .types.update_agent_response_format import UpdateAgentResponseFormat
34
34
  import datetime as dt
35
- from ..types.message_type import MessageType
36
35
  from .types.agents_search_request_search_item import AgentsSearchRequestSearchItem
37
36
  from .types.agents_search_request_sort_by import AgentsSearchRequestSortBy
38
37
  from .types.agents_search_response import AgentsSearchResponse
@@ -1062,117 +1061,6 @@ class AgentsClient:
1062
1061
  raise ApiError(status_code=_response.status_code, body=_response.text)
1063
1062
  raise ApiError(status_code=_response.status_code, body=_response_json)
1064
1063
 
1065
- def preview_raw_payload(
1066
- self,
1067
- agent_id: str,
1068
- *,
1069
- messages: typing.Sequence[MessageCreate],
1070
- max_steps: typing.Optional[int] = OMIT,
1071
- use_assistant_message: typing.Optional[bool] = OMIT,
1072
- assistant_message_tool_name: typing.Optional[str] = OMIT,
1073
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
1074
- include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
1075
- request_options: typing.Optional[RequestOptions] = None,
1076
- ) -> typing.Dict[str, typing.Optional[typing.Any]]:
1077
- """
1078
- Inspect the raw LLM request payload without sending it.
1079
-
1080
- This endpoint processes the message through the agent loop up until
1081
- the LLM request, then returns the raw request payload that would
1082
- be sent to the LLM provider. Useful for debugging and inspection.
1083
-
1084
- Parameters
1085
- ----------
1086
- agent_id : str
1087
-
1088
- messages : typing.Sequence[MessageCreate]
1089
- The messages to be sent to the agent.
1090
-
1091
- max_steps : typing.Optional[int]
1092
- Maximum number of steps the agent should take to process the request.
1093
-
1094
- use_assistant_message : typing.Optional[bool]
1095
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
1096
-
1097
- assistant_message_tool_name : typing.Optional[str]
1098
- The name of the designated message tool.
1099
-
1100
- assistant_message_tool_kwarg : typing.Optional[str]
1101
- The name of the message argument in the designated message tool.
1102
-
1103
- include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
1104
- Only return specified message types in the response. If `None` (default) returns all messages.
1105
-
1106
- request_options : typing.Optional[RequestOptions]
1107
- Request-specific configuration.
1108
-
1109
- Returns
1110
- -------
1111
- typing.Dict[str, typing.Optional[typing.Any]]
1112
- Successful Response
1113
-
1114
- Examples
1115
- --------
1116
- from letta_client import Letta, MessageCreate, TextContent
1117
-
1118
- client = Letta(
1119
- project="YOUR_PROJECT",
1120
- token="YOUR_TOKEN",
1121
- )
1122
- client.agents.preview_raw_payload(
1123
- agent_id="agent_id",
1124
- messages=[
1125
- MessageCreate(
1126
- role="user",
1127
- content=[
1128
- TextContent(
1129
- text="text",
1130
- )
1131
- ],
1132
- )
1133
- ],
1134
- )
1135
- """
1136
- _response = self._client_wrapper.httpx_client.request(
1137
- f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
1138
- method="POST",
1139
- json={
1140
- "messages": convert_and_respect_annotation_metadata(
1141
- object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
1142
- ),
1143
- "max_steps": max_steps,
1144
- "use_assistant_message": use_assistant_message,
1145
- "assistant_message_tool_name": assistant_message_tool_name,
1146
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
1147
- "include_return_message_types": include_return_message_types,
1148
- },
1149
- request_options=request_options,
1150
- omit=OMIT,
1151
- )
1152
- try:
1153
- if 200 <= _response.status_code < 300:
1154
- return typing.cast(
1155
- typing.Dict[str, typing.Optional[typing.Any]],
1156
- construct_type(
1157
- type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
1158
- object_=_response.json(),
1159
- ),
1160
- )
1161
- if _response.status_code == 422:
1162
- raise UnprocessableEntityError(
1163
- typing.cast(
1164
- HttpValidationError,
1165
- construct_type(
1166
- type_=HttpValidationError, # type: ignore
1167
- object_=_response.json(),
1168
- ),
1169
- )
1170
- )
1171
- _response_json = _response.json()
1172
- except JSONDecodeError:
1173
- raise ApiError(status_code=_response.status_code, body=_response.text)
1174
- raise ApiError(status_code=_response.status_code, body=_response_json)
1175
-
1176
1064
  def summarize_agent_conversation(
1177
1065
  self, agent_id: str, *, max_message_length: int, request_options: typing.Optional[RequestOptions] = None
1178
1066
  ) -> AgentState:
@@ -2410,125 +2298,6 @@ class AsyncAgentsClient:
2410
2298
  raise ApiError(status_code=_response.status_code, body=_response.text)
2411
2299
  raise ApiError(status_code=_response.status_code, body=_response_json)
2412
2300
 
2413
- async def preview_raw_payload(
2414
- self,
2415
- agent_id: str,
2416
- *,
2417
- messages: typing.Sequence[MessageCreate],
2418
- max_steps: typing.Optional[int] = OMIT,
2419
- use_assistant_message: typing.Optional[bool] = OMIT,
2420
- assistant_message_tool_name: typing.Optional[str] = OMIT,
2421
- assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
2422
- include_return_message_types: typing.Optional[typing.Sequence[MessageType]] = OMIT,
2423
- request_options: typing.Optional[RequestOptions] = None,
2424
- ) -> typing.Dict[str, typing.Optional[typing.Any]]:
2425
- """
2426
- Inspect the raw LLM request payload without sending it.
2427
-
2428
- This endpoint processes the message through the agent loop up until
2429
- the LLM request, then returns the raw request payload that would
2430
- be sent to the LLM provider. Useful for debugging and inspection.
2431
-
2432
- Parameters
2433
- ----------
2434
- agent_id : str
2435
-
2436
- messages : typing.Sequence[MessageCreate]
2437
- The messages to be sent to the agent.
2438
-
2439
- max_steps : typing.Optional[int]
2440
- Maximum number of steps the agent should take to process the request.
2441
-
2442
- use_assistant_message : typing.Optional[bool]
2443
- Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
2444
-
2445
- assistant_message_tool_name : typing.Optional[str]
2446
- The name of the designated message tool.
2447
-
2448
- assistant_message_tool_kwarg : typing.Optional[str]
2449
- The name of the message argument in the designated message tool.
2450
-
2451
- include_return_message_types : typing.Optional[typing.Sequence[MessageType]]
2452
- Only return specified message types in the response. If `None` (default) returns all messages.
2453
-
2454
- request_options : typing.Optional[RequestOptions]
2455
- Request-specific configuration.
2456
-
2457
- Returns
2458
- -------
2459
- typing.Dict[str, typing.Optional[typing.Any]]
2460
- Successful Response
2461
-
2462
- Examples
2463
- --------
2464
- import asyncio
2465
-
2466
- from letta_client import AsyncLetta, MessageCreate, TextContent
2467
-
2468
- client = AsyncLetta(
2469
- project="YOUR_PROJECT",
2470
- token="YOUR_TOKEN",
2471
- )
2472
-
2473
-
2474
- async def main() -> None:
2475
- await client.agents.preview_raw_payload(
2476
- agent_id="agent_id",
2477
- messages=[
2478
- MessageCreate(
2479
- role="user",
2480
- content=[
2481
- TextContent(
2482
- text="text",
2483
- )
2484
- ],
2485
- )
2486
- ],
2487
- )
2488
-
2489
-
2490
- asyncio.run(main())
2491
- """
2492
- _response = await self._client_wrapper.httpx_client.request(
2493
- f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
2494
- method="POST",
2495
- json={
2496
- "messages": convert_and_respect_annotation_metadata(
2497
- object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
2498
- ),
2499
- "max_steps": max_steps,
2500
- "use_assistant_message": use_assistant_message,
2501
- "assistant_message_tool_name": assistant_message_tool_name,
2502
- "assistant_message_tool_kwarg": assistant_message_tool_kwarg,
2503
- "include_return_message_types": include_return_message_types,
2504
- },
2505
- request_options=request_options,
2506
- omit=OMIT,
2507
- )
2508
- try:
2509
- if 200 <= _response.status_code < 300:
2510
- return typing.cast(
2511
- typing.Dict[str, typing.Optional[typing.Any]],
2512
- construct_type(
2513
- type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
2514
- object_=_response.json(),
2515
- ),
2516
- )
2517
- if _response.status_code == 422:
2518
- raise UnprocessableEntityError(
2519
- typing.cast(
2520
- HttpValidationError,
2521
- construct_type(
2522
- type_=HttpValidationError, # type: ignore
2523
- object_=_response.json(),
2524
- ),
2525
- )
2526
- )
2527
- _response_json = _response.json()
2528
- except JSONDecodeError:
2529
- raise ApiError(status_code=_response.status_code, body=_response.text)
2530
- raise ApiError(status_code=_response.status_code, body=_response_json)
2531
-
2532
2301
  async def summarize_agent_conversation(
2533
2302
  self, agent_id: str, *, max_message_length: int, request_options: typing.Optional[RequestOptions] = None
2534
2303
  ) -> AgentState:
@@ -1,5 +1,15 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
- from .types import LettaStreamingResponse, MessagesModifyRequest, MessagesModifyResponse
3
+ from .types import (
4
+ LettaStreamingResponse,
5
+ MessagesModifyRequest,
6
+ MessagesModifyResponse,
7
+ MessagesPreviewRawPayloadRequest,
8
+ )
4
9
 
5
- __all__ = ["LettaStreamingResponse", "MessagesModifyRequest", "MessagesModifyResponse"]
10
+ __all__ = [
11
+ "LettaStreamingResponse",
12
+ "MessagesModifyRequest",
13
+ "MessagesModifyResponse",
14
+ "MessagesPreviewRawPayloadRequest",
15
+ ]
@@ -21,6 +21,7 @@ import httpx_sse
21
21
  import json
22
22
  from ...types.run import Run
23
23
  from ...types.agent_state import AgentState
24
+ from .types.messages_preview_raw_payload_request import MessagesPreviewRawPayloadRequest
24
25
  from ...core.client_wrapper import AsyncClientWrapper
25
26
 
26
27
  # this is used as the default value for optional parameters
@@ -694,6 +695,91 @@ class MessagesClient:
694
695
  raise ApiError(status_code=_response.status_code, body=_response.text)
695
696
  raise ApiError(status_code=_response.status_code, body=_response_json)
696
697
 
698
+ def preview_raw_payload(
699
+ self,
700
+ agent_id: str,
701
+ *,
702
+ request: MessagesPreviewRawPayloadRequest,
703
+ request_options: typing.Optional[RequestOptions] = None,
704
+ ) -> typing.Dict[str, typing.Optional[typing.Any]]:
705
+ """
706
+ Inspect the raw LLM request payload without sending it.
707
+
708
+ This endpoint processes the message through the agent loop up until
709
+ the LLM request, then returns the raw request payload that would
710
+ be sent to the LLM provider. Useful for debugging and inspection.
711
+
712
+ Parameters
713
+ ----------
714
+ agent_id : str
715
+
716
+ request : MessagesPreviewRawPayloadRequest
717
+
718
+ request_options : typing.Optional[RequestOptions]
719
+ Request-specific configuration.
720
+
721
+ Returns
722
+ -------
723
+ typing.Dict[str, typing.Optional[typing.Any]]
724
+ Successful Response
725
+
726
+ Examples
727
+ --------
728
+ from letta_client import Letta, LettaRequest, MessageCreate, TextContent
729
+
730
+ client = Letta(
731
+ project="YOUR_PROJECT",
732
+ token="YOUR_TOKEN",
733
+ )
734
+ client.agents.messages.preview_raw_payload(
735
+ agent_id="agent_id",
736
+ request=LettaRequest(
737
+ messages=[
738
+ MessageCreate(
739
+ role="user",
740
+ content=[
741
+ TextContent(
742
+ text="text",
743
+ )
744
+ ],
745
+ )
746
+ ],
747
+ ),
748
+ )
749
+ """
750
+ _response = self._client_wrapper.httpx_client.request(
751
+ f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
752
+ method="POST",
753
+ json=convert_and_respect_annotation_metadata(
754
+ object_=request, annotation=MessagesPreviewRawPayloadRequest, direction="write"
755
+ ),
756
+ request_options=request_options,
757
+ omit=OMIT,
758
+ )
759
+ try:
760
+ if 200 <= _response.status_code < 300:
761
+ return typing.cast(
762
+ typing.Dict[str, typing.Optional[typing.Any]],
763
+ construct_type(
764
+ type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
765
+ object_=_response.json(),
766
+ ),
767
+ )
768
+ if _response.status_code == 422:
769
+ raise UnprocessableEntityError(
770
+ typing.cast(
771
+ HttpValidationError,
772
+ construct_type(
773
+ type_=HttpValidationError, # type: ignore
774
+ object_=_response.json(),
775
+ ),
776
+ )
777
+ )
778
+ _response_json = _response.json()
779
+ except JSONDecodeError:
780
+ raise ApiError(status_code=_response.status_code, body=_response.text)
781
+ raise ApiError(status_code=_response.status_code, body=_response_json)
782
+
697
783
 
698
784
  class AsyncMessagesClient:
699
785
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -1417,3 +1503,96 @@ class AsyncMessagesClient:
1417
1503
  except JSONDecodeError:
1418
1504
  raise ApiError(status_code=_response.status_code, body=_response.text)
1419
1505
  raise ApiError(status_code=_response.status_code, body=_response_json)
1506
+
1507
+ async def preview_raw_payload(
1508
+ self,
1509
+ agent_id: str,
1510
+ *,
1511
+ request: MessagesPreviewRawPayloadRequest,
1512
+ request_options: typing.Optional[RequestOptions] = None,
1513
+ ) -> typing.Dict[str, typing.Optional[typing.Any]]:
1514
+ """
1515
+ Inspect the raw LLM request payload without sending it.
1516
+
1517
+ This endpoint processes the message through the agent loop up until
1518
+ the LLM request, then returns the raw request payload that would
1519
+ be sent to the LLM provider. Useful for debugging and inspection.
1520
+
1521
+ Parameters
1522
+ ----------
1523
+ agent_id : str
1524
+
1525
+ request : MessagesPreviewRawPayloadRequest
1526
+
1527
+ request_options : typing.Optional[RequestOptions]
1528
+ Request-specific configuration.
1529
+
1530
+ Returns
1531
+ -------
1532
+ typing.Dict[str, typing.Optional[typing.Any]]
1533
+ Successful Response
1534
+
1535
+ Examples
1536
+ --------
1537
+ import asyncio
1538
+
1539
+ from letta_client import AsyncLetta, LettaRequest, MessageCreate, TextContent
1540
+
1541
+ client = AsyncLetta(
1542
+ project="YOUR_PROJECT",
1543
+ token="YOUR_TOKEN",
1544
+ )
1545
+
1546
+
1547
+ async def main() -> None:
1548
+ await client.agents.messages.preview_raw_payload(
1549
+ agent_id="agent_id",
1550
+ request=LettaRequest(
1551
+ messages=[
1552
+ MessageCreate(
1553
+ role="user",
1554
+ content=[
1555
+ TextContent(
1556
+ text="text",
1557
+ )
1558
+ ],
1559
+ )
1560
+ ],
1561
+ ),
1562
+ )
1563
+
1564
+
1565
+ asyncio.run(main())
1566
+ """
1567
+ _response = await self._client_wrapper.httpx_client.request(
1568
+ f"v1/agents/{jsonable_encoder(agent_id)}/messages/preview-raw-payload",
1569
+ method="POST",
1570
+ json=convert_and_respect_annotation_metadata(
1571
+ object_=request, annotation=MessagesPreviewRawPayloadRequest, direction="write"
1572
+ ),
1573
+ request_options=request_options,
1574
+ omit=OMIT,
1575
+ )
1576
+ try:
1577
+ if 200 <= _response.status_code < 300:
1578
+ return typing.cast(
1579
+ typing.Dict[str, typing.Optional[typing.Any]],
1580
+ construct_type(
1581
+ type_=typing.Dict[str, typing.Optional[typing.Any]], # type: ignore
1582
+ object_=_response.json(),
1583
+ ),
1584
+ )
1585
+ if _response.status_code == 422:
1586
+ raise UnprocessableEntityError(
1587
+ typing.cast(
1588
+ HttpValidationError,
1589
+ construct_type(
1590
+ type_=HttpValidationError, # type: ignore
1591
+ object_=_response.json(),
1592
+ ),
1593
+ )
1594
+ )
1595
+ _response_json = _response.json()
1596
+ except JSONDecodeError:
1597
+ raise ApiError(status_code=_response.status_code, body=_response.text)
1598
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -3,5 +3,11 @@
3
3
  from .letta_streaming_response import LettaStreamingResponse
4
4
  from .messages_modify_request import MessagesModifyRequest
5
5
  from .messages_modify_response import MessagesModifyResponse
6
+ from .messages_preview_raw_payload_request import MessagesPreviewRawPayloadRequest
6
7
 
7
- __all__ = ["LettaStreamingResponse", "MessagesModifyRequest", "MessagesModifyResponse"]
8
+ __all__ = [
9
+ "LettaStreamingResponse",
10
+ "MessagesModifyRequest",
11
+ "MessagesModifyResponse",
12
+ "MessagesPreviewRawPayloadRequest",
13
+ ]
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from ....types.letta_request import LettaRequest
5
+ from ....types.letta_streaming_request import LettaStreamingRequest
6
+
7
+ MessagesPreviewRawPayloadRequest = typing.Union[LettaRequest, LettaStreamingRequest]
@@ -24,7 +24,7 @@ class BaseClientWrapper:
24
24
  headers: typing.Dict[str, str] = {
25
25
  "X-Fern-Language": "Python",
26
26
  "X-Fern-SDK-Name": "letta-client",
27
- "X-Fern-SDK-Version": "0.1.196",
27
+ "X-Fern-SDK-Version": "0.1.197",
28
28
  }
29
29
  if self._project is not None:
30
30
  headers["X-Project"] = self._project
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.196
3
+ Version: 0.1.197
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,8 +1,8 @@
1
1
  letta_client/__init__.py,sha256=PaPQ6XnP5KGKv2jElXjj6d483LOtbaRGJu3n88rW1WA,18256
2
- letta_client/agents/__init__.py,sha256=9L60SAZIihZzh_KhVxu0uX4RS7z2iKKctzQsS8ycXHc,1954
2
+ letta_client/agents/__init__.py,sha256=i9PmBueIWESDLqmpzWt1oZVgZNr1rNkO6j0pl5sgvGo,2049
3
3
  letta_client/agents/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
4
4
  letta_client/agents/blocks/client.py,sha256=4UGPYxfGwNN3ZW-SkIdfVZK6cvCcumVAw0_AM8OmoBY,25046
5
- letta_client/agents/client.py,sha256=yjzXAxIPz8u0zdRzH4ubZT7Bef1QwM6zDVWbMwCDBC8,102846
5
+ letta_client/agents/client.py,sha256=B7ZsJGQqmeTUdP8yybRWh6qaoybopFCujzP99EDwk_k,93753
6
6
  letta_client/agents/context/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
7
7
  letta_client/agents/context/client.py,sha256=O1gxStQyfzXi4MblatWalLTWM425gS_fndW3W_es08U,4887
8
8
  letta_client/agents/core_memory/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -13,12 +13,13 @@ letta_client/agents/memory_variables/__init__.py,sha256=goz3kTaLM-v8g-hYEhzyqBYz
13
13
  letta_client/agents/memory_variables/client.py,sha256=DGJvV5k5H-BRE-FWMLNrCqKkHJJuMK9P4K7ncJis8PU,5037
14
14
  letta_client/agents/memory_variables/types/__init__.py,sha256=EoznK0WvhCyFYd4KDdU-cGDQWpSXmq79BSkqVHN-j7A,180
15
15
  letta_client/agents/memory_variables/types/memory_variables_list_response.py,sha256=bsF__n_B4ZXEHzg--OVD6tHHXt_aM-FjHm2x1ZXPnL0,599
16
- letta_client/agents/messages/__init__.py,sha256=M7Ar6Rmb8we4dfYE6jj3FCL9UvVFy1bNQIPflUXMWHA,243
17
- letta_client/agents/messages/client.py,sha256=ZZ6m0PQGvgpkN4gp0N4ouVhqaCMQ8BGKWpaik9Q6F3E,51271
18
- letta_client/agents/messages/types/__init__.py,sha256=Oc2j0oGOs96IEFf9xsJIkjBjoq3OMtse64YwWv3F9Io,335
16
+ letta_client/agents/messages/__init__.py,sha256=RSmlezGH90RirElX0LHusjD03EN6UqGsrS-A0Bue2hA,353
17
+ letta_client/agents/messages/client.py,sha256=dlVqB2PO29NgVhKvZr5vWtrcDsfCrgIh5C1CVeJ5cRM,57596
18
+ letta_client/agents/messages/types/__init__.py,sha256=uDn0e1-EJ7g2U0EHD8SkP38liALoVCTJ7bJhUAOutwE,473
19
19
  letta_client/agents/messages/types/letta_streaming_response.py,sha256=8VR2F32xjoPFXL4YBvBbAZclaJG4ENPTjk7BrlZkmtw,742
20
20
  letta_client/agents/messages/types/messages_modify_request.py,sha256=7C2X3BKye-YDSXOkdEmxxt34seI4jkLK0-govtc4nhg,475
21
21
  letta_client/agents/messages/types/messages_modify_response.py,sha256=THyiUMxZyzVSp0kk1s0XOLW1LUass7mXcfFER1PTLyw,671
22
+ letta_client/agents/messages/types/messages_preview_raw_payload_request.py,sha256=Lat4Nj4LETdWswrF9PJfDZVw23KvQGVUvw9JXXPYY8I,282
22
23
  letta_client/agents/passages/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
23
24
  letta_client/agents/passages/client.py,sha256=XgEzJvHv5yOFK--W5mU3Hgqv5-mmlVRLPOb3aQ1QOUA,26434
24
25
  letta_client/agents/sources/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
@@ -65,7 +66,7 @@ letta_client/client_side_access_tokens/types/client_side_access_tokens_create_re
65
66
  letta_client/client_side_access_tokens/types/client_side_access_tokens_create_response_policy_data_item_access_item.py,sha256=R-H25IpNp9feSrW8Yj3h9O3UTMVvFniQJElogKxLuoE,254
66
67
  letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
67
68
  letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
68
- letta_client/core/client_wrapper.py,sha256=M4wzmiGIyliMl5J4ktQr7yvZyNx480SW0k8ahbdETto,2336
69
+ letta_client/core/client_wrapper.py,sha256=xUxNRGy3q-2inkpv1DjYNocNzmS0b9YoeGy6NAntVIs,2336
69
70
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
70
71
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
71
72
  letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
@@ -418,6 +419,6 @@ letta_client/types/web_search_options_user_location_approximate.py,sha256=Ywk01J
418
419
  letta_client/version.py,sha256=bttKLbIhO3UonCYQlqs600zzbQgfhCCMjeXR9WRzid4,79
419
420
  letta_client/voice/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
420
421
  letta_client/voice/client.py,sha256=47iQYCuW_qpKI4hM3pYVxn3hw7kgQj3emU1_oRpkRMA,5811
421
- letta_client-0.1.196.dist-info/METADATA,sha256=HAjEGfvqyk0MiQTDjvv6hDh8Q8Epvwj-B-kgII73P1Y,5177
422
- letta_client-0.1.196.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
423
- letta_client-0.1.196.dist-info/RECORD,,
422
+ letta_client-0.1.197.dist-info/METADATA,sha256=zleshuJ2dHoLmE0SOmUMFd6IM9Znmj4OFQutMG28yP8,5177
423
+ letta_client-0.1.197.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
424
+ letta_client-0.1.197.dist-info/RECORD,,