letta-client 0.1.116__py3-none-any.whl → 0.1.117__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-client might be problematic. Click here for more details.
- letta_client/__init__.py +4 -2
- letta_client/agents/client.py +0 -293
- letta_client/base_client.py +4 -0
- letta_client/core/client_wrapper.py +1 -1
- letta_client/messages/__init__.py +5 -0
- letta_client/messages/batches/__init__.py +2 -0
- letta_client/messages/batches/client.py +521 -0
- letta_client/messages/client.py +150 -0
- letta_client/types/__init__.py +2 -2
- letta_client/types/batch_job.py +61 -0
- letta_client/types/job_type.py +1 -1
- {letta_client-0.1.116.dist-info → letta_client-0.1.117.dist-info}/METADATA +1 -1
- {letta_client-0.1.116.dist-info → letta_client-0.1.117.dist-info}/RECORD +14 -10
- letta_client/types/letta_batch_response.py +0 -44
- {letta_client-0.1.116.dist-info → letta_client-0.1.117.dist-info}/WHEEL +0 -0
letta_client/__init__.py
CHANGED
|
@@ -21,6 +21,7 @@ from .types import (
|
|
|
21
21
|
AuthSchemeField,
|
|
22
22
|
BadRequestErrorBody,
|
|
23
23
|
BaseToolRuleSchema,
|
|
24
|
+
BatchJob,
|
|
24
25
|
Block,
|
|
25
26
|
BlockUpdate,
|
|
26
27
|
ChatCompletionAssistantMessageParam,
|
|
@@ -115,7 +116,6 @@ from .types import (
|
|
|
115
116
|
JobType,
|
|
116
117
|
JsonSchema,
|
|
117
118
|
LettaBatchRequest,
|
|
118
|
-
LettaBatchResponse,
|
|
119
119
|
LettaMessageContentUnion,
|
|
120
120
|
LettaMessageUnion,
|
|
121
121
|
LettaRequest,
|
|
@@ -230,6 +230,7 @@ from . import (
|
|
|
230
230
|
health,
|
|
231
231
|
identities,
|
|
232
232
|
jobs,
|
|
233
|
+
messages,
|
|
233
234
|
models,
|
|
234
235
|
projects,
|
|
235
236
|
providers,
|
|
@@ -310,6 +311,7 @@ __all__ = [
|
|
|
310
311
|
"BadRequestError",
|
|
311
312
|
"BadRequestErrorBody",
|
|
312
313
|
"BaseToolRuleSchema",
|
|
314
|
+
"BatchJob",
|
|
313
315
|
"Block",
|
|
314
316
|
"BlockUpdate",
|
|
315
317
|
"ChatCompletionAssistantMessageParam",
|
|
@@ -418,7 +420,6 @@ __all__ = [
|
|
|
418
420
|
"JsonSchema",
|
|
419
421
|
"Letta",
|
|
420
422
|
"LettaBatchRequest",
|
|
421
|
-
"LettaBatchResponse",
|
|
422
423
|
"LettaEnvironment",
|
|
423
424
|
"LettaMessageContentUnion",
|
|
424
425
|
"LettaMessageUnion",
|
|
@@ -541,6 +542,7 @@ __all__ = [
|
|
|
541
542
|
"health",
|
|
542
543
|
"identities",
|
|
543
544
|
"jobs",
|
|
545
|
+
"messages",
|
|
544
546
|
"models",
|
|
545
547
|
"projects",
|
|
546
548
|
"providers",
|
letta_client/agents/client.py
CHANGED
|
@@ -31,8 +31,6 @@ from .types.update_agent_tool_rules_item import UpdateAgentToolRulesItem
|
|
|
31
31
|
import datetime as dt
|
|
32
32
|
from ..types.passage import Passage
|
|
33
33
|
from ..types.group import Group
|
|
34
|
-
from ..types.letta_batch_request import LettaBatchRequest
|
|
35
|
-
from ..types.letta_batch_response import LettaBatchResponse
|
|
36
34
|
from .types.agents_search_request_search_item import AgentsSearchRequestSearchItem
|
|
37
35
|
from .types.agents_search_response import AgentsSearchResponse
|
|
38
36
|
from ..core.client_wrapper import AsyncClientWrapper
|
|
@@ -1160,141 +1158,6 @@ class AgentsClient:
|
|
|
1160
1158
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1161
1159
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1162
1160
|
|
|
1163
|
-
def create_batch_message_request(
|
|
1164
|
-
self, *, request: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
|
|
1165
|
-
) -> LettaBatchResponse:
|
|
1166
|
-
"""
|
|
1167
|
-
Submit a batch of agent messages for asynchronous processing.
|
|
1168
|
-
Creates a job that will fan out messages to all listed agents and process them in parallel.
|
|
1169
|
-
|
|
1170
|
-
Parameters
|
|
1171
|
-
----------
|
|
1172
|
-
request : typing.Sequence[LettaBatchRequest]
|
|
1173
|
-
|
|
1174
|
-
request_options : typing.Optional[RequestOptions]
|
|
1175
|
-
Request-specific configuration.
|
|
1176
|
-
|
|
1177
|
-
Returns
|
|
1178
|
-
-------
|
|
1179
|
-
LettaBatchResponse
|
|
1180
|
-
Successful Response
|
|
1181
|
-
|
|
1182
|
-
Examples
|
|
1183
|
-
--------
|
|
1184
|
-
from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent
|
|
1185
|
-
|
|
1186
|
-
client = Letta(
|
|
1187
|
-
token="YOUR_TOKEN",
|
|
1188
|
-
)
|
|
1189
|
-
client.agents.create_batch_message_request(
|
|
1190
|
-
request=[
|
|
1191
|
-
LettaBatchRequest(
|
|
1192
|
-
messages=[
|
|
1193
|
-
MessageCreate(
|
|
1194
|
-
role="user",
|
|
1195
|
-
content=[
|
|
1196
|
-
TextContent(
|
|
1197
|
-
text="text",
|
|
1198
|
-
)
|
|
1199
|
-
],
|
|
1200
|
-
)
|
|
1201
|
-
],
|
|
1202
|
-
agent_id="agent_id",
|
|
1203
|
-
)
|
|
1204
|
-
],
|
|
1205
|
-
)
|
|
1206
|
-
"""
|
|
1207
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1208
|
-
"v1/agents/messages/batches",
|
|
1209
|
-
method="POST",
|
|
1210
|
-
json=convert_and_respect_annotation_metadata(
|
|
1211
|
-
object_=request, annotation=typing.Sequence[LettaBatchRequest], direction="write"
|
|
1212
|
-
),
|
|
1213
|
-
request_options=request_options,
|
|
1214
|
-
omit=OMIT,
|
|
1215
|
-
)
|
|
1216
|
-
try:
|
|
1217
|
-
if 200 <= _response.status_code < 300:
|
|
1218
|
-
return typing.cast(
|
|
1219
|
-
LettaBatchResponse,
|
|
1220
|
-
construct_type(
|
|
1221
|
-
type_=LettaBatchResponse, # type: ignore
|
|
1222
|
-
object_=_response.json(),
|
|
1223
|
-
),
|
|
1224
|
-
)
|
|
1225
|
-
if _response.status_code == 422:
|
|
1226
|
-
raise UnprocessableEntityError(
|
|
1227
|
-
typing.cast(
|
|
1228
|
-
HttpValidationError,
|
|
1229
|
-
construct_type(
|
|
1230
|
-
type_=HttpValidationError, # type: ignore
|
|
1231
|
-
object_=_response.json(),
|
|
1232
|
-
),
|
|
1233
|
-
)
|
|
1234
|
-
)
|
|
1235
|
-
_response_json = _response.json()
|
|
1236
|
-
except JSONDecodeError:
|
|
1237
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1238
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1239
|
-
|
|
1240
|
-
def retrieve_batch_message_request(
|
|
1241
|
-
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
1242
|
-
) -> LettaBatchResponse:
|
|
1243
|
-
"""
|
|
1244
|
-
Retrieve the result or current status of a previously submitted batch message request.
|
|
1245
|
-
|
|
1246
|
-
Parameters
|
|
1247
|
-
----------
|
|
1248
|
-
batch_id : str
|
|
1249
|
-
|
|
1250
|
-
request_options : typing.Optional[RequestOptions]
|
|
1251
|
-
Request-specific configuration.
|
|
1252
|
-
|
|
1253
|
-
Returns
|
|
1254
|
-
-------
|
|
1255
|
-
LettaBatchResponse
|
|
1256
|
-
Successful Response
|
|
1257
|
-
|
|
1258
|
-
Examples
|
|
1259
|
-
--------
|
|
1260
|
-
from letta_client import Letta
|
|
1261
|
-
|
|
1262
|
-
client = Letta(
|
|
1263
|
-
token="YOUR_TOKEN",
|
|
1264
|
-
)
|
|
1265
|
-
client.agents.retrieve_batch_message_request(
|
|
1266
|
-
batch_id="batch_id",
|
|
1267
|
-
)
|
|
1268
|
-
"""
|
|
1269
|
-
_response = self._client_wrapper.httpx_client.request(
|
|
1270
|
-
f"v1/agents/messages/batches/{jsonable_encoder(batch_id)}",
|
|
1271
|
-
method="GET",
|
|
1272
|
-
request_options=request_options,
|
|
1273
|
-
)
|
|
1274
|
-
try:
|
|
1275
|
-
if 200 <= _response.status_code < 300:
|
|
1276
|
-
return typing.cast(
|
|
1277
|
-
LettaBatchResponse,
|
|
1278
|
-
construct_type(
|
|
1279
|
-
type_=LettaBatchResponse, # type: ignore
|
|
1280
|
-
object_=_response.json(),
|
|
1281
|
-
),
|
|
1282
|
-
)
|
|
1283
|
-
if _response.status_code == 422:
|
|
1284
|
-
raise UnprocessableEntityError(
|
|
1285
|
-
typing.cast(
|
|
1286
|
-
HttpValidationError,
|
|
1287
|
-
construct_type(
|
|
1288
|
-
type_=HttpValidationError, # type: ignore
|
|
1289
|
-
object_=_response.json(),
|
|
1290
|
-
),
|
|
1291
|
-
)
|
|
1292
|
-
)
|
|
1293
|
-
_response_json = _response.json()
|
|
1294
|
-
except JSONDecodeError:
|
|
1295
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1296
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1297
|
-
|
|
1298
1161
|
def search(
|
|
1299
1162
|
self,
|
|
1300
1163
|
*,
|
|
@@ -2564,162 +2427,6 @@ class AsyncAgentsClient:
|
|
|
2564
2427
|
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2565
2428
|
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2566
2429
|
|
|
2567
|
-
async def create_batch_message_request(
|
|
2568
|
-
self, *, request: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
|
|
2569
|
-
) -> LettaBatchResponse:
|
|
2570
|
-
"""
|
|
2571
|
-
Submit a batch of agent messages for asynchronous processing.
|
|
2572
|
-
Creates a job that will fan out messages to all listed agents and process them in parallel.
|
|
2573
|
-
|
|
2574
|
-
Parameters
|
|
2575
|
-
----------
|
|
2576
|
-
request : typing.Sequence[LettaBatchRequest]
|
|
2577
|
-
|
|
2578
|
-
request_options : typing.Optional[RequestOptions]
|
|
2579
|
-
Request-specific configuration.
|
|
2580
|
-
|
|
2581
|
-
Returns
|
|
2582
|
-
-------
|
|
2583
|
-
LettaBatchResponse
|
|
2584
|
-
Successful Response
|
|
2585
|
-
|
|
2586
|
-
Examples
|
|
2587
|
-
--------
|
|
2588
|
-
import asyncio
|
|
2589
|
-
|
|
2590
|
-
from letta_client import (
|
|
2591
|
-
AsyncLetta,
|
|
2592
|
-
LettaBatchRequest,
|
|
2593
|
-
MessageCreate,
|
|
2594
|
-
TextContent,
|
|
2595
|
-
)
|
|
2596
|
-
|
|
2597
|
-
client = AsyncLetta(
|
|
2598
|
-
token="YOUR_TOKEN",
|
|
2599
|
-
)
|
|
2600
|
-
|
|
2601
|
-
|
|
2602
|
-
async def main() -> None:
|
|
2603
|
-
await client.agents.create_batch_message_request(
|
|
2604
|
-
request=[
|
|
2605
|
-
LettaBatchRequest(
|
|
2606
|
-
messages=[
|
|
2607
|
-
MessageCreate(
|
|
2608
|
-
role="user",
|
|
2609
|
-
content=[
|
|
2610
|
-
TextContent(
|
|
2611
|
-
text="text",
|
|
2612
|
-
)
|
|
2613
|
-
],
|
|
2614
|
-
)
|
|
2615
|
-
],
|
|
2616
|
-
agent_id="agent_id",
|
|
2617
|
-
)
|
|
2618
|
-
],
|
|
2619
|
-
)
|
|
2620
|
-
|
|
2621
|
-
|
|
2622
|
-
asyncio.run(main())
|
|
2623
|
-
"""
|
|
2624
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2625
|
-
"v1/agents/messages/batches",
|
|
2626
|
-
method="POST",
|
|
2627
|
-
json=convert_and_respect_annotation_metadata(
|
|
2628
|
-
object_=request, annotation=typing.Sequence[LettaBatchRequest], direction="write"
|
|
2629
|
-
),
|
|
2630
|
-
request_options=request_options,
|
|
2631
|
-
omit=OMIT,
|
|
2632
|
-
)
|
|
2633
|
-
try:
|
|
2634
|
-
if 200 <= _response.status_code < 300:
|
|
2635
|
-
return typing.cast(
|
|
2636
|
-
LettaBatchResponse,
|
|
2637
|
-
construct_type(
|
|
2638
|
-
type_=LettaBatchResponse, # type: ignore
|
|
2639
|
-
object_=_response.json(),
|
|
2640
|
-
),
|
|
2641
|
-
)
|
|
2642
|
-
if _response.status_code == 422:
|
|
2643
|
-
raise UnprocessableEntityError(
|
|
2644
|
-
typing.cast(
|
|
2645
|
-
HttpValidationError,
|
|
2646
|
-
construct_type(
|
|
2647
|
-
type_=HttpValidationError, # type: ignore
|
|
2648
|
-
object_=_response.json(),
|
|
2649
|
-
),
|
|
2650
|
-
)
|
|
2651
|
-
)
|
|
2652
|
-
_response_json = _response.json()
|
|
2653
|
-
except JSONDecodeError:
|
|
2654
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2655
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2656
|
-
|
|
2657
|
-
async def retrieve_batch_message_request(
|
|
2658
|
-
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
2659
|
-
) -> LettaBatchResponse:
|
|
2660
|
-
"""
|
|
2661
|
-
Retrieve the result or current status of a previously submitted batch message request.
|
|
2662
|
-
|
|
2663
|
-
Parameters
|
|
2664
|
-
----------
|
|
2665
|
-
batch_id : str
|
|
2666
|
-
|
|
2667
|
-
request_options : typing.Optional[RequestOptions]
|
|
2668
|
-
Request-specific configuration.
|
|
2669
|
-
|
|
2670
|
-
Returns
|
|
2671
|
-
-------
|
|
2672
|
-
LettaBatchResponse
|
|
2673
|
-
Successful Response
|
|
2674
|
-
|
|
2675
|
-
Examples
|
|
2676
|
-
--------
|
|
2677
|
-
import asyncio
|
|
2678
|
-
|
|
2679
|
-
from letta_client import AsyncLetta
|
|
2680
|
-
|
|
2681
|
-
client = AsyncLetta(
|
|
2682
|
-
token="YOUR_TOKEN",
|
|
2683
|
-
)
|
|
2684
|
-
|
|
2685
|
-
|
|
2686
|
-
async def main() -> None:
|
|
2687
|
-
await client.agents.retrieve_batch_message_request(
|
|
2688
|
-
batch_id="batch_id",
|
|
2689
|
-
)
|
|
2690
|
-
|
|
2691
|
-
|
|
2692
|
-
asyncio.run(main())
|
|
2693
|
-
"""
|
|
2694
|
-
_response = await self._client_wrapper.httpx_client.request(
|
|
2695
|
-
f"v1/agents/messages/batches/{jsonable_encoder(batch_id)}",
|
|
2696
|
-
method="GET",
|
|
2697
|
-
request_options=request_options,
|
|
2698
|
-
)
|
|
2699
|
-
try:
|
|
2700
|
-
if 200 <= _response.status_code < 300:
|
|
2701
|
-
return typing.cast(
|
|
2702
|
-
LettaBatchResponse,
|
|
2703
|
-
construct_type(
|
|
2704
|
-
type_=LettaBatchResponse, # type: ignore
|
|
2705
|
-
object_=_response.json(),
|
|
2706
|
-
),
|
|
2707
|
-
)
|
|
2708
|
-
if _response.status_code == 422:
|
|
2709
|
-
raise UnprocessableEntityError(
|
|
2710
|
-
typing.cast(
|
|
2711
|
-
HttpValidationError,
|
|
2712
|
-
construct_type(
|
|
2713
|
-
type_=HttpValidationError, # type: ignore
|
|
2714
|
-
object_=_response.json(),
|
|
2715
|
-
),
|
|
2716
|
-
)
|
|
2717
|
-
)
|
|
2718
|
-
_response_json = _response.json()
|
|
2719
|
-
except JSONDecodeError:
|
|
2720
|
-
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
2721
|
-
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
2722
|
-
|
|
2723
2430
|
async def search(
|
|
2724
2431
|
self,
|
|
2725
2432
|
*,
|
letta_client/base_client.py
CHANGED
|
@@ -17,6 +17,7 @@ from .providers.client import ProvidersClient
|
|
|
17
17
|
from .runs.client import RunsClient
|
|
18
18
|
from .steps.client import StepsClient
|
|
19
19
|
from .tag.client import TagClient
|
|
20
|
+
from .messages.client import MessagesClient
|
|
20
21
|
from .voice.client import VoiceClient
|
|
21
22
|
from .templates.client import TemplatesClient
|
|
22
23
|
from .client_side_access_tokens.client import ClientSideAccessTokensClient
|
|
@@ -35,6 +36,7 @@ from .providers.client import AsyncProvidersClient
|
|
|
35
36
|
from .runs.client import AsyncRunsClient
|
|
36
37
|
from .steps.client import AsyncStepsClient
|
|
37
38
|
from .tag.client import AsyncTagClient
|
|
39
|
+
from .messages.client import AsyncMessagesClient
|
|
38
40
|
from .voice.client import AsyncVoiceClient
|
|
39
41
|
from .templates.client import AsyncTemplatesClient
|
|
40
42
|
from .client_side_access_tokens.client import AsyncClientSideAccessTokensClient
|
|
@@ -112,6 +114,7 @@ class LettaBase:
|
|
|
112
114
|
self.runs = RunsClient(client_wrapper=self._client_wrapper)
|
|
113
115
|
self.steps = StepsClient(client_wrapper=self._client_wrapper)
|
|
114
116
|
self.tag = TagClient(client_wrapper=self._client_wrapper)
|
|
117
|
+
self.messages = MessagesClient(client_wrapper=self._client_wrapper)
|
|
115
118
|
self.voice = VoiceClient(client_wrapper=self._client_wrapper)
|
|
116
119
|
self.templates = TemplatesClient(client_wrapper=self._client_wrapper)
|
|
117
120
|
self.client_side_access_tokens = ClientSideAccessTokensClient(client_wrapper=self._client_wrapper)
|
|
@@ -189,6 +192,7 @@ class AsyncLettaBase:
|
|
|
189
192
|
self.runs = AsyncRunsClient(client_wrapper=self._client_wrapper)
|
|
190
193
|
self.steps = AsyncStepsClient(client_wrapper=self._client_wrapper)
|
|
191
194
|
self.tag = AsyncTagClient(client_wrapper=self._client_wrapper)
|
|
195
|
+
self.messages = AsyncMessagesClient(client_wrapper=self._client_wrapper)
|
|
192
196
|
self.voice = AsyncVoiceClient(client_wrapper=self._client_wrapper)
|
|
193
197
|
self.templates = AsyncTemplatesClient(client_wrapper=self._client_wrapper)
|
|
194
198
|
self.client_side_access_tokens = AsyncClientSideAccessTokensClient(client_wrapper=self._client_wrapper)
|
|
@@ -16,7 +16,7 @@ class BaseClientWrapper:
|
|
|
16
16
|
headers: typing.Dict[str, str] = {
|
|
17
17
|
"X-Fern-Language": "Python",
|
|
18
18
|
"X-Fern-SDK-Name": "letta-client",
|
|
19
|
-
"X-Fern-SDK-Version": "0.1.
|
|
19
|
+
"X-Fern-SDK-Version": "0.1.117",
|
|
20
20
|
}
|
|
21
21
|
if self.token is not None:
|
|
22
22
|
headers["Authorization"] = f"Bearer {self.token}"
|
|
@@ -0,0 +1,521 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from ...core.client_wrapper import SyncClientWrapper
|
|
5
|
+
from ...core.request_options import RequestOptions
|
|
6
|
+
from ...types.batch_job import BatchJob
|
|
7
|
+
from ...core.unchecked_base_model import construct_type
|
|
8
|
+
from ...errors.unprocessable_entity_error import UnprocessableEntityError
|
|
9
|
+
from ...types.http_validation_error import HttpValidationError
|
|
10
|
+
from json.decoder import JSONDecodeError
|
|
11
|
+
from ...core.api_error import ApiError
|
|
12
|
+
from ...types.letta_batch_request import LettaBatchRequest
|
|
13
|
+
from ...core.serialization import convert_and_respect_annotation_metadata
|
|
14
|
+
from ...core.jsonable_encoder import jsonable_encoder
|
|
15
|
+
from ...core.client_wrapper import AsyncClientWrapper
|
|
16
|
+
|
|
17
|
+
# this is used as the default value for optional parameters
|
|
18
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class BatchesClient:
|
|
22
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
23
|
+
self._client_wrapper = client_wrapper
|
|
24
|
+
|
|
25
|
+
def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
|
|
26
|
+
"""
|
|
27
|
+
List all batch runs.
|
|
28
|
+
|
|
29
|
+
Parameters
|
|
30
|
+
----------
|
|
31
|
+
request_options : typing.Optional[RequestOptions]
|
|
32
|
+
Request-specific configuration.
|
|
33
|
+
|
|
34
|
+
Returns
|
|
35
|
+
-------
|
|
36
|
+
typing.List[BatchJob]
|
|
37
|
+
Successful Response
|
|
38
|
+
|
|
39
|
+
Examples
|
|
40
|
+
--------
|
|
41
|
+
from letta_client import Letta
|
|
42
|
+
|
|
43
|
+
client = Letta(
|
|
44
|
+
token="YOUR_TOKEN",
|
|
45
|
+
)
|
|
46
|
+
client.messages.batches.list()
|
|
47
|
+
"""
|
|
48
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
49
|
+
"v1/messages/batches",
|
|
50
|
+
method="GET",
|
|
51
|
+
request_options=request_options,
|
|
52
|
+
)
|
|
53
|
+
try:
|
|
54
|
+
if 200 <= _response.status_code < 300:
|
|
55
|
+
return typing.cast(
|
|
56
|
+
typing.List[BatchJob],
|
|
57
|
+
construct_type(
|
|
58
|
+
type_=typing.List[BatchJob], # type: ignore
|
|
59
|
+
object_=_response.json(),
|
|
60
|
+
),
|
|
61
|
+
)
|
|
62
|
+
if _response.status_code == 422:
|
|
63
|
+
raise UnprocessableEntityError(
|
|
64
|
+
typing.cast(
|
|
65
|
+
HttpValidationError,
|
|
66
|
+
construct_type(
|
|
67
|
+
type_=HttpValidationError, # type: ignore
|
|
68
|
+
object_=_response.json(),
|
|
69
|
+
),
|
|
70
|
+
)
|
|
71
|
+
)
|
|
72
|
+
_response_json = _response.json()
|
|
73
|
+
except JSONDecodeError:
|
|
74
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
75
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
76
|
+
|
|
77
|
+
def create(
|
|
78
|
+
self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
|
|
79
|
+
) -> BatchJob:
|
|
80
|
+
"""
|
|
81
|
+
Submit a batch of agent messages for asynchronous processing.
|
|
82
|
+
Creates a job that will fan out messages to all listed agents and process them in parallel.
|
|
83
|
+
|
|
84
|
+
Parameters
|
|
85
|
+
----------
|
|
86
|
+
requests : typing.Sequence[LettaBatchRequest]
|
|
87
|
+
List of requests to be processed in batch.
|
|
88
|
+
|
|
89
|
+
request_options : typing.Optional[RequestOptions]
|
|
90
|
+
Request-specific configuration.
|
|
91
|
+
|
|
92
|
+
Returns
|
|
93
|
+
-------
|
|
94
|
+
BatchJob
|
|
95
|
+
Successful Response
|
|
96
|
+
|
|
97
|
+
Examples
|
|
98
|
+
--------
|
|
99
|
+
from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent
|
|
100
|
+
|
|
101
|
+
client = Letta(
|
|
102
|
+
token="YOUR_TOKEN",
|
|
103
|
+
)
|
|
104
|
+
client.messages.batches.create(
|
|
105
|
+
requests=[
|
|
106
|
+
LettaBatchRequest(
|
|
107
|
+
messages=[
|
|
108
|
+
MessageCreate(
|
|
109
|
+
role="user",
|
|
110
|
+
content=[
|
|
111
|
+
TextContent(
|
|
112
|
+
text="text",
|
|
113
|
+
)
|
|
114
|
+
],
|
|
115
|
+
)
|
|
116
|
+
],
|
|
117
|
+
agent_id="agent_id",
|
|
118
|
+
)
|
|
119
|
+
],
|
|
120
|
+
)
|
|
121
|
+
"""
|
|
122
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
123
|
+
"v1/messages/batches",
|
|
124
|
+
method="POST",
|
|
125
|
+
json={
|
|
126
|
+
"requests": convert_and_respect_annotation_metadata(
|
|
127
|
+
object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
|
|
128
|
+
),
|
|
129
|
+
},
|
|
130
|
+
headers={
|
|
131
|
+
"content-type": "application/json",
|
|
132
|
+
},
|
|
133
|
+
request_options=request_options,
|
|
134
|
+
omit=OMIT,
|
|
135
|
+
)
|
|
136
|
+
try:
|
|
137
|
+
if 200 <= _response.status_code < 300:
|
|
138
|
+
return typing.cast(
|
|
139
|
+
BatchJob,
|
|
140
|
+
construct_type(
|
|
141
|
+
type_=BatchJob, # type: ignore
|
|
142
|
+
object_=_response.json(),
|
|
143
|
+
),
|
|
144
|
+
)
|
|
145
|
+
if _response.status_code == 422:
|
|
146
|
+
raise UnprocessableEntityError(
|
|
147
|
+
typing.cast(
|
|
148
|
+
HttpValidationError,
|
|
149
|
+
construct_type(
|
|
150
|
+
type_=HttpValidationError, # type: ignore
|
|
151
|
+
object_=_response.json(),
|
|
152
|
+
),
|
|
153
|
+
)
|
|
154
|
+
)
|
|
155
|
+
_response_json = _response.json()
|
|
156
|
+
except JSONDecodeError:
|
|
157
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
158
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
159
|
+
|
|
160
|
+
def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
|
|
161
|
+
"""
|
|
162
|
+
Get the status of a batch run.
|
|
163
|
+
|
|
164
|
+
Parameters
|
|
165
|
+
----------
|
|
166
|
+
batch_id : str
|
|
167
|
+
|
|
168
|
+
request_options : typing.Optional[RequestOptions]
|
|
169
|
+
Request-specific configuration.
|
|
170
|
+
|
|
171
|
+
Returns
|
|
172
|
+
-------
|
|
173
|
+
BatchJob
|
|
174
|
+
Successful Response
|
|
175
|
+
|
|
176
|
+
Examples
|
|
177
|
+
--------
|
|
178
|
+
from letta_client import Letta
|
|
179
|
+
|
|
180
|
+
client = Letta(
|
|
181
|
+
token="YOUR_TOKEN",
|
|
182
|
+
)
|
|
183
|
+
client.messages.batches.retrieve(
|
|
184
|
+
batch_id="batch_id",
|
|
185
|
+
)
|
|
186
|
+
"""
|
|
187
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
188
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}",
|
|
189
|
+
method="GET",
|
|
190
|
+
request_options=request_options,
|
|
191
|
+
)
|
|
192
|
+
try:
|
|
193
|
+
if 200 <= _response.status_code < 300:
|
|
194
|
+
return typing.cast(
|
|
195
|
+
BatchJob,
|
|
196
|
+
construct_type(
|
|
197
|
+
type_=BatchJob, # type: ignore
|
|
198
|
+
object_=_response.json(),
|
|
199
|
+
),
|
|
200
|
+
)
|
|
201
|
+
if _response.status_code == 422:
|
|
202
|
+
raise UnprocessableEntityError(
|
|
203
|
+
typing.cast(
|
|
204
|
+
HttpValidationError,
|
|
205
|
+
construct_type(
|
|
206
|
+
type_=HttpValidationError, # type: ignore
|
|
207
|
+
object_=_response.json(),
|
|
208
|
+
),
|
|
209
|
+
)
|
|
210
|
+
)
|
|
211
|
+
_response_json = _response.json()
|
|
212
|
+
except JSONDecodeError:
|
|
213
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
214
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
215
|
+
|
|
216
|
+
def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
|
|
217
|
+
"""
|
|
218
|
+
Parameters
|
|
219
|
+
----------
|
|
220
|
+
batch_id : str
|
|
221
|
+
|
|
222
|
+
request_options : typing.Optional[RequestOptions]
|
|
223
|
+
Request-specific configuration.
|
|
224
|
+
|
|
225
|
+
Returns
|
|
226
|
+
-------
|
|
227
|
+
None
|
|
228
|
+
|
|
229
|
+
Examples
|
|
230
|
+
--------
|
|
231
|
+
from letta_client import Letta
|
|
232
|
+
|
|
233
|
+
client = Letta(
|
|
234
|
+
token="YOUR_TOKEN",
|
|
235
|
+
)
|
|
236
|
+
client.messages.batches.cancel(
|
|
237
|
+
batch_id="batch_id",
|
|
238
|
+
)
|
|
239
|
+
"""
|
|
240
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
241
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}",
|
|
242
|
+
method="PATCH",
|
|
243
|
+
request_options=request_options,
|
|
244
|
+
)
|
|
245
|
+
try:
|
|
246
|
+
if 200 <= _response.status_code < 300:
|
|
247
|
+
return
|
|
248
|
+
_response_json = _response.json()
|
|
249
|
+
except JSONDecodeError:
|
|
250
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
251
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
class AsyncBatchesClient:
|
|
255
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
256
|
+
self._client_wrapper = client_wrapper
|
|
257
|
+
|
|
258
|
+
async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
|
|
259
|
+
"""
|
|
260
|
+
List all batch runs.
|
|
261
|
+
|
|
262
|
+
Parameters
|
|
263
|
+
----------
|
|
264
|
+
request_options : typing.Optional[RequestOptions]
|
|
265
|
+
Request-specific configuration.
|
|
266
|
+
|
|
267
|
+
Returns
|
|
268
|
+
-------
|
|
269
|
+
typing.List[BatchJob]
|
|
270
|
+
Successful Response
|
|
271
|
+
|
|
272
|
+
Examples
|
|
273
|
+
--------
|
|
274
|
+
import asyncio
|
|
275
|
+
|
|
276
|
+
from letta_client import AsyncLetta
|
|
277
|
+
|
|
278
|
+
client = AsyncLetta(
|
|
279
|
+
token="YOUR_TOKEN",
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
async def main() -> None:
|
|
284
|
+
await client.messages.batches.list()
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
asyncio.run(main())
|
|
288
|
+
"""
|
|
289
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
290
|
+
"v1/messages/batches",
|
|
291
|
+
method="GET",
|
|
292
|
+
request_options=request_options,
|
|
293
|
+
)
|
|
294
|
+
try:
|
|
295
|
+
if 200 <= _response.status_code < 300:
|
|
296
|
+
return typing.cast(
|
|
297
|
+
typing.List[BatchJob],
|
|
298
|
+
construct_type(
|
|
299
|
+
type_=typing.List[BatchJob], # type: ignore
|
|
300
|
+
object_=_response.json(),
|
|
301
|
+
),
|
|
302
|
+
)
|
|
303
|
+
if _response.status_code == 422:
|
|
304
|
+
raise UnprocessableEntityError(
|
|
305
|
+
typing.cast(
|
|
306
|
+
HttpValidationError,
|
|
307
|
+
construct_type(
|
|
308
|
+
type_=HttpValidationError, # type: ignore
|
|
309
|
+
object_=_response.json(),
|
|
310
|
+
),
|
|
311
|
+
)
|
|
312
|
+
)
|
|
313
|
+
_response_json = _response.json()
|
|
314
|
+
except JSONDecodeError:
|
|
315
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
316
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
317
|
+
|
|
318
|
+
async def create(
|
|
319
|
+
self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
|
|
320
|
+
) -> BatchJob:
|
|
321
|
+
"""
|
|
322
|
+
Submit a batch of agent messages for asynchronous processing.
|
|
323
|
+
Creates a job that will fan out messages to all listed agents and process them in parallel.
|
|
324
|
+
|
|
325
|
+
Parameters
|
|
326
|
+
----------
|
|
327
|
+
requests : typing.Sequence[LettaBatchRequest]
|
|
328
|
+
List of requests to be processed in batch.
|
|
329
|
+
|
|
330
|
+
request_options : typing.Optional[RequestOptions]
|
|
331
|
+
Request-specific configuration.
|
|
332
|
+
|
|
333
|
+
Returns
|
|
334
|
+
-------
|
|
335
|
+
BatchJob
|
|
336
|
+
Successful Response
|
|
337
|
+
|
|
338
|
+
Examples
|
|
339
|
+
--------
|
|
340
|
+
import asyncio
|
|
341
|
+
|
|
342
|
+
from letta_client import (
|
|
343
|
+
AsyncLetta,
|
|
344
|
+
LettaBatchRequest,
|
|
345
|
+
MessageCreate,
|
|
346
|
+
TextContent,
|
|
347
|
+
)
|
|
348
|
+
|
|
349
|
+
client = AsyncLetta(
|
|
350
|
+
token="YOUR_TOKEN",
|
|
351
|
+
)
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
async def main() -> None:
|
|
355
|
+
await client.messages.batches.create(
|
|
356
|
+
requests=[
|
|
357
|
+
LettaBatchRequest(
|
|
358
|
+
messages=[
|
|
359
|
+
MessageCreate(
|
|
360
|
+
role="user",
|
|
361
|
+
content=[
|
|
362
|
+
TextContent(
|
|
363
|
+
text="text",
|
|
364
|
+
)
|
|
365
|
+
],
|
|
366
|
+
)
|
|
367
|
+
],
|
|
368
|
+
agent_id="agent_id",
|
|
369
|
+
)
|
|
370
|
+
],
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
asyncio.run(main())
|
|
375
|
+
"""
|
|
376
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
377
|
+
"v1/messages/batches",
|
|
378
|
+
method="POST",
|
|
379
|
+
json={
|
|
380
|
+
"requests": convert_and_respect_annotation_metadata(
|
|
381
|
+
object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
|
|
382
|
+
),
|
|
383
|
+
},
|
|
384
|
+
headers={
|
|
385
|
+
"content-type": "application/json",
|
|
386
|
+
},
|
|
387
|
+
request_options=request_options,
|
|
388
|
+
omit=OMIT,
|
|
389
|
+
)
|
|
390
|
+
try:
|
|
391
|
+
if 200 <= _response.status_code < 300:
|
|
392
|
+
return typing.cast(
|
|
393
|
+
BatchJob,
|
|
394
|
+
construct_type(
|
|
395
|
+
type_=BatchJob, # type: ignore
|
|
396
|
+
object_=_response.json(),
|
|
397
|
+
),
|
|
398
|
+
)
|
|
399
|
+
if _response.status_code == 422:
|
|
400
|
+
raise UnprocessableEntityError(
|
|
401
|
+
typing.cast(
|
|
402
|
+
HttpValidationError,
|
|
403
|
+
construct_type(
|
|
404
|
+
type_=HttpValidationError, # type: ignore
|
|
405
|
+
object_=_response.json(),
|
|
406
|
+
),
|
|
407
|
+
)
|
|
408
|
+
)
|
|
409
|
+
_response_json = _response.json()
|
|
410
|
+
except JSONDecodeError:
|
|
411
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
412
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
413
|
+
|
|
414
|
+
async def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
|
|
415
|
+
"""
|
|
416
|
+
Get the status of a batch run.
|
|
417
|
+
|
|
418
|
+
Parameters
|
|
419
|
+
----------
|
|
420
|
+
batch_id : str
|
|
421
|
+
|
|
422
|
+
request_options : typing.Optional[RequestOptions]
|
|
423
|
+
Request-specific configuration.
|
|
424
|
+
|
|
425
|
+
Returns
|
|
426
|
+
-------
|
|
427
|
+
BatchJob
|
|
428
|
+
Successful Response
|
|
429
|
+
|
|
430
|
+
Examples
|
|
431
|
+
--------
|
|
432
|
+
import asyncio
|
|
433
|
+
|
|
434
|
+
from letta_client import AsyncLetta
|
|
435
|
+
|
|
436
|
+
client = AsyncLetta(
|
|
437
|
+
token="YOUR_TOKEN",
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
async def main() -> None:
|
|
442
|
+
await client.messages.batches.retrieve(
|
|
443
|
+
batch_id="batch_id",
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
asyncio.run(main())
|
|
448
|
+
"""
|
|
449
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
450
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}",
|
|
451
|
+
method="GET",
|
|
452
|
+
request_options=request_options,
|
|
453
|
+
)
|
|
454
|
+
try:
|
|
455
|
+
if 200 <= _response.status_code < 300:
|
|
456
|
+
return typing.cast(
|
|
457
|
+
BatchJob,
|
|
458
|
+
construct_type(
|
|
459
|
+
type_=BatchJob, # type: ignore
|
|
460
|
+
object_=_response.json(),
|
|
461
|
+
),
|
|
462
|
+
)
|
|
463
|
+
if _response.status_code == 422:
|
|
464
|
+
raise UnprocessableEntityError(
|
|
465
|
+
typing.cast(
|
|
466
|
+
HttpValidationError,
|
|
467
|
+
construct_type(
|
|
468
|
+
type_=HttpValidationError, # type: ignore
|
|
469
|
+
object_=_response.json(),
|
|
470
|
+
),
|
|
471
|
+
)
|
|
472
|
+
)
|
|
473
|
+
_response_json = _response.json()
|
|
474
|
+
except JSONDecodeError:
|
|
475
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
476
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
477
|
+
|
|
478
|
+
async def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
|
|
479
|
+
"""
|
|
480
|
+
Parameters
|
|
481
|
+
----------
|
|
482
|
+
batch_id : str
|
|
483
|
+
|
|
484
|
+
request_options : typing.Optional[RequestOptions]
|
|
485
|
+
Request-specific configuration.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
None
|
|
490
|
+
|
|
491
|
+
Examples
|
|
492
|
+
--------
|
|
493
|
+
import asyncio
|
|
494
|
+
|
|
495
|
+
from letta_client import AsyncLetta
|
|
496
|
+
|
|
497
|
+
client = AsyncLetta(
|
|
498
|
+
token="YOUR_TOKEN",
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
|
|
502
|
+
async def main() -> None:
|
|
503
|
+
await client.messages.batches.cancel(
|
|
504
|
+
batch_id="batch_id",
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
|
|
508
|
+
asyncio.run(main())
|
|
509
|
+
"""
|
|
510
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
511
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}",
|
|
512
|
+
method="PATCH",
|
|
513
|
+
request_options=request_options,
|
|
514
|
+
)
|
|
515
|
+
try:
|
|
516
|
+
if 200 <= _response.status_code < 300:
|
|
517
|
+
return
|
|
518
|
+
_response_json = _response.json()
|
|
519
|
+
except JSONDecodeError:
|
|
520
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
521
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from ..core.client_wrapper import SyncClientWrapper
|
|
4
|
+
from .batches.client import BatchesClient
|
|
5
|
+
import typing
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..core.jsonable_encoder import jsonable_encoder
|
|
8
|
+
from ..core.unchecked_base_model import construct_type
|
|
9
|
+
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
|
10
|
+
from ..types.http_validation_error import HttpValidationError
|
|
11
|
+
from json.decoder import JSONDecodeError
|
|
12
|
+
from ..core.api_error import ApiError
|
|
13
|
+
from ..core.client_wrapper import AsyncClientWrapper
|
|
14
|
+
from .batches.client import AsyncBatchesClient
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MessagesClient:
|
|
18
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
19
|
+
self._client_wrapper = client_wrapper
|
|
20
|
+
self.batches = BatchesClient(client_wrapper=self._client_wrapper)
|
|
21
|
+
|
|
22
|
+
def cancel_batch_run(
|
|
23
|
+
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
24
|
+
) -> typing.Optional[typing.Any]:
|
|
25
|
+
"""
|
|
26
|
+
Cancel a batch run.
|
|
27
|
+
|
|
28
|
+
Parameters
|
|
29
|
+
----------
|
|
30
|
+
batch_id : str
|
|
31
|
+
|
|
32
|
+
request_options : typing.Optional[RequestOptions]
|
|
33
|
+
Request-specific configuration.
|
|
34
|
+
|
|
35
|
+
Returns
|
|
36
|
+
-------
|
|
37
|
+
typing.Optional[typing.Any]
|
|
38
|
+
Successful Response
|
|
39
|
+
|
|
40
|
+
Examples
|
|
41
|
+
--------
|
|
42
|
+
from letta_client import Letta
|
|
43
|
+
|
|
44
|
+
client = Letta(
|
|
45
|
+
token="YOUR_TOKEN",
|
|
46
|
+
)
|
|
47
|
+
client.messages.cancel_batch_run(
|
|
48
|
+
batch_id="batch_id",
|
|
49
|
+
)
|
|
50
|
+
"""
|
|
51
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
52
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
|
|
53
|
+
method="PATCH",
|
|
54
|
+
request_options=request_options,
|
|
55
|
+
)
|
|
56
|
+
try:
|
|
57
|
+
if 200 <= _response.status_code < 300:
|
|
58
|
+
return typing.cast(
|
|
59
|
+
typing.Optional[typing.Any],
|
|
60
|
+
construct_type(
|
|
61
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
62
|
+
object_=_response.json(),
|
|
63
|
+
),
|
|
64
|
+
)
|
|
65
|
+
if _response.status_code == 422:
|
|
66
|
+
raise UnprocessableEntityError(
|
|
67
|
+
typing.cast(
|
|
68
|
+
HttpValidationError,
|
|
69
|
+
construct_type(
|
|
70
|
+
type_=HttpValidationError, # type: ignore
|
|
71
|
+
object_=_response.json(),
|
|
72
|
+
),
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
_response_json = _response.json()
|
|
76
|
+
except JSONDecodeError:
|
|
77
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
78
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class AsyncMessagesClient:
|
|
82
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
83
|
+
self._client_wrapper = client_wrapper
|
|
84
|
+
self.batches = AsyncBatchesClient(client_wrapper=self._client_wrapper)
|
|
85
|
+
|
|
86
|
+
async def cancel_batch_run(
|
|
87
|
+
self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
88
|
+
) -> typing.Optional[typing.Any]:
|
|
89
|
+
"""
|
|
90
|
+
Cancel a batch run.
|
|
91
|
+
|
|
92
|
+
Parameters
|
|
93
|
+
----------
|
|
94
|
+
batch_id : str
|
|
95
|
+
|
|
96
|
+
request_options : typing.Optional[RequestOptions]
|
|
97
|
+
Request-specific configuration.
|
|
98
|
+
|
|
99
|
+
Returns
|
|
100
|
+
-------
|
|
101
|
+
typing.Optional[typing.Any]
|
|
102
|
+
Successful Response
|
|
103
|
+
|
|
104
|
+
Examples
|
|
105
|
+
--------
|
|
106
|
+
import asyncio
|
|
107
|
+
|
|
108
|
+
from letta_client import AsyncLetta
|
|
109
|
+
|
|
110
|
+
client = AsyncLetta(
|
|
111
|
+
token="YOUR_TOKEN",
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
async def main() -> None:
|
|
116
|
+
await client.messages.cancel_batch_run(
|
|
117
|
+
batch_id="batch_id",
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
asyncio.run(main())
|
|
122
|
+
"""
|
|
123
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
124
|
+
f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
|
|
125
|
+
method="PATCH",
|
|
126
|
+
request_options=request_options,
|
|
127
|
+
)
|
|
128
|
+
try:
|
|
129
|
+
if 200 <= _response.status_code < 300:
|
|
130
|
+
return typing.cast(
|
|
131
|
+
typing.Optional[typing.Any],
|
|
132
|
+
construct_type(
|
|
133
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
134
|
+
object_=_response.json(),
|
|
135
|
+
),
|
|
136
|
+
)
|
|
137
|
+
if _response.status_code == 422:
|
|
138
|
+
raise UnprocessableEntityError(
|
|
139
|
+
typing.cast(
|
|
140
|
+
HttpValidationError,
|
|
141
|
+
construct_type(
|
|
142
|
+
type_=HttpValidationError, # type: ignore
|
|
143
|
+
object_=_response.json(),
|
|
144
|
+
),
|
|
145
|
+
)
|
|
146
|
+
)
|
|
147
|
+
_response_json = _response.json()
|
|
148
|
+
except JSONDecodeError:
|
|
149
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
150
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
letta_client/types/__init__.py
CHANGED
|
@@ -20,6 +20,7 @@ from .auth_response import AuthResponse
|
|
|
20
20
|
from .auth_scheme_field import AuthSchemeField
|
|
21
21
|
from .bad_request_error_body import BadRequestErrorBody
|
|
22
22
|
from .base_tool_rule_schema import BaseToolRuleSchema
|
|
23
|
+
from .batch_job import BatchJob
|
|
23
24
|
from .block import Block
|
|
24
25
|
from .block_update import BlockUpdate
|
|
25
26
|
from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
|
|
@@ -114,7 +115,6 @@ from .job_status import JobStatus
|
|
|
114
115
|
from .job_type import JobType
|
|
115
116
|
from .json_schema import JsonSchema
|
|
116
117
|
from .letta_batch_request import LettaBatchRequest
|
|
117
|
-
from .letta_batch_response import LettaBatchResponse
|
|
118
118
|
from .letta_message_content_union import LettaMessageContentUnion
|
|
119
119
|
from .letta_message_union import LettaMessageUnion
|
|
120
120
|
from .letta_request import LettaRequest
|
|
@@ -245,6 +245,7 @@ __all__ = [
|
|
|
245
245
|
"AuthSchemeField",
|
|
246
246
|
"BadRequestErrorBody",
|
|
247
247
|
"BaseToolRuleSchema",
|
|
248
|
+
"BatchJob",
|
|
248
249
|
"Block",
|
|
249
250
|
"BlockUpdate",
|
|
250
251
|
"ChatCompletionAssistantMessageParam",
|
|
@@ -339,7 +340,6 @@ __all__ = [
|
|
|
339
340
|
"JobType",
|
|
340
341
|
"JsonSchema",
|
|
341
342
|
"LettaBatchRequest",
|
|
342
|
-
"LettaBatchResponse",
|
|
343
343
|
"LettaMessageContentUnion",
|
|
344
344
|
"LettaMessageUnion",
|
|
345
345
|
"LettaRequest",
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
from ..core.unchecked_base_model import UncheckedBaseModel
|
|
4
|
+
import typing
|
|
5
|
+
import pydantic
|
|
6
|
+
import datetime as dt
|
|
7
|
+
from .job_status import JobStatus
|
|
8
|
+
from .job_type import JobType
|
|
9
|
+
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class BatchJob(UncheckedBaseModel):
|
|
13
|
+
created_by_id: typing.Optional[str] = pydantic.Field(default=None)
|
|
14
|
+
"""
|
|
15
|
+
The id of the user that made this object.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
last_updated_by_id: typing.Optional[str] = pydantic.Field(default=None)
|
|
19
|
+
"""
|
|
20
|
+
The id of the user that made this object.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
|
|
24
|
+
"""
|
|
25
|
+
The timestamp when the object was created.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
updated_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
|
|
29
|
+
"""
|
|
30
|
+
The timestamp when the object was last updated.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
status: typing.Optional[JobStatus] = pydantic.Field(default=None)
|
|
34
|
+
"""
|
|
35
|
+
The status of the job.
|
|
36
|
+
"""
|
|
37
|
+
|
|
38
|
+
completed_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
|
|
39
|
+
"""
|
|
40
|
+
The unix timestamp of when the job was completed.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
|
|
44
|
+
"""
|
|
45
|
+
The metadata of the job.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
job_type: typing.Optional[JobType] = None
|
|
49
|
+
id: typing.Optional[str] = pydantic.Field(default=None)
|
|
50
|
+
"""
|
|
51
|
+
The human-friendly ID of the Job
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
if IS_PYDANTIC_V2:
|
|
55
|
+
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
56
|
+
else:
|
|
57
|
+
|
|
58
|
+
class Config:
|
|
59
|
+
frozen = True
|
|
60
|
+
smart_union = True
|
|
61
|
+
extra = pydantic.Extra.allow
|
letta_client/types/job_type.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
letta_client/__init__.py,sha256=
|
|
1
|
+
letta_client/__init__.py,sha256=oU-1Vq1arydqXE9q_Rp4B7VshBLJO4C3ZIZ6Sg2Q70s,16174
|
|
2
2
|
letta_client/agents/__init__.py,sha256=CveigJGrnkw3yZ8S9yZ2DpK1HV0v1fU-khsiLJJ0uaU,1452
|
|
3
3
|
letta_client/agents/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
4
4
|
letta_client/agents/blocks/client.py,sha256=u5zvutxoH_DqfSLWhRtNSRBC9_ezQDx682cxkxDz3JA,23822
|
|
5
|
-
letta_client/agents/client.py,sha256=
|
|
5
|
+
letta_client/agents/client.py,sha256=ku9SdcBr0WmR5pjwD1kfq4EMuH-BM3WROO-uda2pul4,93545
|
|
6
6
|
letta_client/agents/context/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
7
7
|
letta_client/agents/context/client.py,sha256=GKKvoG4N_K8Biz9yDjeIHpFG0C8Cwc7tHmEX3pTL_9U,4815
|
|
8
8
|
letta_client/agents/core_memory/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
@@ -38,7 +38,7 @@ letta_client/agents/types/agents_search_request_search_item_zero.py,sha256=tGjwn
|
|
|
38
38
|
letta_client/agents/types/agents_search_response.py,sha256=AQJVKps-bjCx2ujqESzW1Iy9ZYFS17hH_UFIeBeK4S8,815
|
|
39
39
|
letta_client/agents/types/create_agent_request_tool_rules_item.py,sha256=L3FNsFTG9kVmuPbQhbCKNg3H2E5bB2Rgp92gWmGd-LM,689
|
|
40
40
|
letta_client/agents/types/update_agent_tool_rules_item.py,sha256=k9MmcVPsK-EGl8XlT3JQwdlBNLgpGw528jmi8fCFS7g,682
|
|
41
|
-
letta_client/base_client.py,sha256=
|
|
41
|
+
letta_client/base_client.py,sha256=37vSxqxIwuotfwpLaJWyqePXUUoNt7ZS6TgJcp4G77c,9503
|
|
42
42
|
letta_client/blocks/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
43
43
|
letta_client/blocks/client.py,sha256=LE9dsHaBxFLC3G035f0VpNDG7XKWRK8y9OXpeFCMvUw,30082
|
|
44
44
|
letta_client/client.py,sha256=k2mZqqEWciVmEQHgipjCK4kQILk74hpSqzcdNwdql9A,21212
|
|
@@ -53,7 +53,7 @@ letta_client/client_side_access_tokens/types/client_side_access_tokens_create_cl
|
|
|
53
53
|
letta_client/client_side_access_tokens/types/client_side_access_tokens_create_client_side_access_token_response_policy_data_item_access_item.py,sha256=vefuUVB3pLCbCNBJACXMsEKZuwhMAGvLwSU3TU3Q4l4,275
|
|
54
54
|
letta_client/core/__init__.py,sha256=OKbX2aCZXgHCDUsCouqv-OiX32xA6eFFCKIUH9M5Vzk,1591
|
|
55
55
|
letta_client/core/api_error.py,sha256=RE8LELok2QCjABadECTvtDp7qejA1VmINCh6TbqPwSE,426
|
|
56
|
-
letta_client/core/client_wrapper.py,sha256=
|
|
56
|
+
letta_client/core/client_wrapper.py,sha256=S7b3SY_tgDb58OaozkpNkuqGQ2s5L2LpUl83woui6nM,1998
|
|
57
57
|
letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
|
|
58
58
|
letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
|
|
59
59
|
letta_client/core/http_client.py,sha256=Z77OIxIbL4OAB2IDqjRq_sYa5yNYAWfmdhdCSSvh6Y4,19552
|
|
@@ -88,6 +88,10 @@ letta_client/identities/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPA
|
|
|
88
88
|
letta_client/identities/client.py,sha256=kFcShBwJF3RSmOcEVBYM1-IFTFWD3zvCD0JsHSWDeh4,40986
|
|
89
89
|
letta_client/jobs/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
90
90
|
letta_client/jobs/client.py,sha256=z1Zq6dGs2xbf3EAFuD3-m-qbpbUeqpCBYqtIFKkGoMk,15622
|
|
91
|
+
letta_client/messages/__init__.py,sha256=YH5-krRUPFJQGtFgUr_krZfjlEHcRUAIJZ5J1sIE9bM,110
|
|
92
|
+
letta_client/messages/batches/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
93
|
+
letta_client/messages/batches/client.py,sha256=cG5Cq7MwNCtYN3wnQlo4feur9T988XU6lhA7u35ai0E,16797
|
|
94
|
+
letta_client/messages/client.py,sha256=MTBnxo2irqz8Y3i0iDEtW6SWYOIG4XjEqUZRur3edrQ,5016
|
|
91
95
|
letta_client/models/__init__.py,sha256=FTtvy8EDg9nNNg9WCatVgKTRYV8-_v1roeGPAKoa_pw,65
|
|
92
96
|
letta_client/models/client.py,sha256=Rd9IHjSdXRzzZyabpq8pDTc9XDnwLPnmm5by335g1D0,6306
|
|
93
97
|
letta_client/projects/__init__.py,sha256=CccxELBuPLqC6SfAJMNP3I4tEzYBNNA9CUrhuPk-TVI,243
|
|
@@ -123,7 +127,7 @@ letta_client/tools/types/add_mcp_server_request.py,sha256=EieZjfOT95sjkpxXdqy7gl
|
|
|
123
127
|
letta_client/tools/types/add_mcp_server_response_item.py,sha256=TWdsKqGb1INhYtpGnAckz0Pw4nZShumSp4pfocRfxCA,270
|
|
124
128
|
letta_client/tools/types/delete_mcp_server_response_item.py,sha256=MeZObU-7tMSCd-S5yuUjNDse6A1hUz1LLjbko0pXaro,273
|
|
125
129
|
letta_client/tools/types/list_mcp_servers_response_value.py,sha256=AIoXu4bO8QNSU7zjL1jj0Rg4313wVtPaTt13W0aevLQ,273
|
|
126
|
-
letta_client/types/__init__.py,sha256=
|
|
130
|
+
letta_client/types/__init__.py,sha256=X-4mwCyHwKdCmS6dQZeoaZT2fCR6ChlNl9mRH1IAdBI,19827
|
|
127
131
|
letta_client/types/action_model.py,sha256=y1e2XMv3skFaNJIBdYoBKgiORzGh05aOVvu-qVR9uHg,1240
|
|
128
132
|
letta_client/types/action_parameters_model.py,sha256=LgKf5aPZG3-OHGxFdXiSokIDgce8c02xPYIAY05VgW8,828
|
|
129
133
|
letta_client/types/action_response_model.py,sha256=yq2Fd9UU8j7vvtE3VqXUoRRvDzWcfJPj_95ynGdeHCs,824
|
|
@@ -144,6 +148,7 @@ letta_client/types/auth_response.py,sha256=jtG9Nn0voJcOWkBtvnuGGwhpUhYz9A8O7soOJ
|
|
|
144
148
|
letta_client/types/auth_scheme_field.py,sha256=W4-qgKtKUSpBHaSvjLyzLybOIsGo7Ggk4VECpsoPnqQ,881
|
|
145
149
|
letta_client/types/bad_request_error_body.py,sha256=E4_eWEc9xeW9BkXGViBDrevV8Jf6PjgEweeGS3vJLD4,567
|
|
146
150
|
letta_client/types/base_tool_rule_schema.py,sha256=FbnJy6gb8wY_DPiU3Gs-u1Ol_l4K7-nAmPTc1oR3kOo,582
|
|
151
|
+
letta_client/types/batch_job.py,sha256=SyBdYiC0sI2FqtEjaSfecJsfrV-TJWpxzrv4uWIwbeI,1759
|
|
147
152
|
letta_client/types/block.py,sha256=J8McqSpellhd-KsPYontU8DYg3YV41_fQW5rR-85qMk,2900
|
|
148
153
|
letta_client/types/block_update.py,sha256=oIgxvSnav5vxztBdslRMiWOgRaAp3dh43pinZpoLzxk,1496
|
|
149
154
|
letta_client/types/chat_completion_assistant_message_param.py,sha256=QwxAJ9RQqxtZKnt6g6RfDppuMIt-1RAIlpnfSrVdHgg,1219
|
|
@@ -235,10 +240,9 @@ letta_client/types/input_audio_format.py,sha256=QQFfndI9w66wIbGyHwfmJnk2bEJDPmEs
|
|
|
235
240
|
letta_client/types/internal_server_error_body.py,sha256=xR9n1zptgmImbH6apQAuwBblYOWAYNLFzY8s0SUcEug,653
|
|
236
241
|
letta_client/types/job.py,sha256=VJBdFIY0rwqh4hObTchlU2jrloTjZwUEA44pNtY_JBg,2321
|
|
237
242
|
letta_client/types/job_status.py,sha256=lX5Q0QMQFnw-WiirqHD6kgBvGr6I7r8rKLnMJdqhlT8,239
|
|
238
|
-
letta_client/types/job_type.py,sha256=
|
|
243
|
+
letta_client/types/job_type.py,sha256=HXYrfzPwxI54PqV7OVcMhewSJ_pBNHc14s9LcExr7Ss,154
|
|
239
244
|
letta_client/types/json_schema.py,sha256=EHcLKBSGRsSzCKTpujKFHylcLJG6ODQIBrjQkU4lWDQ,870
|
|
240
245
|
letta_client/types/letta_batch_request.py,sha256=HdIuaaUaqINbau98jPqyIc7Ge84VlCf6VhAQpIhCFvQ,1354
|
|
241
|
-
letta_client/types/letta_batch_response.py,sha256=i1KP1aZh710acu5bKNmcmdiheuPrKbkJr4_e2K2ki_k,1161
|
|
242
246
|
letta_client/types/letta_message_content_union.py,sha256=YxzyXKxUMeqbqWOlDs9LC8HUiqEhgkNCV9a76GS3spg,486
|
|
243
247
|
letta_client/types/letta_message_union.py,sha256=TTQwlur2CZNdZ466Nb_2TFcSFXrgoMliaNzD33t7Ktw,603
|
|
244
248
|
letta_client/types/letta_request.py,sha256=bCPDRJhSJSo5eILJp0mTw_k26O3dZL1vChfAcaZ0rE8,1240
|
|
@@ -348,6 +352,6 @@ letta_client/voice/__init__.py,sha256=7hX85553PiRMtIMM12a0DSoFzsglNiUziYR2ekS84Q
|
|
|
348
352
|
letta_client/voice/client.py,sha256=STjswa5oOLoP59QwTJvQwi73kgn0UzKOaXc2CsTRI4k,6912
|
|
349
353
|
letta_client/voice/types/__init__.py,sha256=FRc3iKRTONE4N8Lf1IqvnqWZ2kXdrFFvkL7PxVcR8Ew,212
|
|
350
354
|
letta_client/voice/types/create_voice_chat_completions_request_body.py,sha256=ZLfKgNK1T6IAwLEvaBVFfy7jEAoPUXP28n-nfmHkklc,391
|
|
351
|
-
letta_client-0.1.
|
|
352
|
-
letta_client-0.1.
|
|
353
|
-
letta_client-0.1.
|
|
355
|
+
letta_client-0.1.117.dist-info/METADATA,sha256=-ZUQYGScs53WPyFaYLTG_5jN8S-dPf7W4RKvujbJHpI,5042
|
|
356
|
+
letta_client-0.1.117.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
|
|
357
|
+
letta_client-0.1.117.dist-info/RECORD,,
|
|
@@ -1,44 +0,0 @@
|
|
|
1
|
-
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
-
|
|
3
|
-
from ..core.unchecked_base_model import UncheckedBaseModel
|
|
4
|
-
import pydantic
|
|
5
|
-
from .job_status import JobStatus
|
|
6
|
-
import datetime as dt
|
|
7
|
-
from ..core.pydantic_utilities import IS_PYDANTIC_V2
|
|
8
|
-
import typing
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class LettaBatchResponse(UncheckedBaseModel):
|
|
12
|
-
batch_id: str = pydantic.Field()
|
|
13
|
-
"""
|
|
14
|
-
A unique identifier for this batch request.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
status: JobStatus = pydantic.Field()
|
|
18
|
-
"""
|
|
19
|
-
The current status of the batch request.
|
|
20
|
-
"""
|
|
21
|
-
|
|
22
|
-
agent_count: int = pydantic.Field()
|
|
23
|
-
"""
|
|
24
|
-
The number of agents in the batch request.
|
|
25
|
-
"""
|
|
26
|
-
|
|
27
|
-
last_polled_at: dt.datetime = pydantic.Field()
|
|
28
|
-
"""
|
|
29
|
-
The timestamp when the batch was last polled for updates.
|
|
30
|
-
"""
|
|
31
|
-
|
|
32
|
-
created_at: dt.datetime = pydantic.Field()
|
|
33
|
-
"""
|
|
34
|
-
The timestamp when the batch request was created.
|
|
35
|
-
"""
|
|
36
|
-
|
|
37
|
-
if IS_PYDANTIC_V2:
|
|
38
|
-
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
|
|
39
|
-
else:
|
|
40
|
-
|
|
41
|
-
class Config:
|
|
42
|
-
frozen = True
|
|
43
|
-
smart_union = True
|
|
44
|
-
extra = pydantic.Extra.allow
|
|
File without changes
|