letta-client 0.1.116__py3-none-any.whl → 0.1.118__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

@@ -0,0 +1,537 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from ...core.client_wrapper import SyncClientWrapper
5
+ from ...core.request_options import RequestOptions
6
+ from ...types.batch_job import BatchJob
7
+ from ...core.unchecked_base_model import construct_type
8
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
9
+ from ...types.http_validation_error import HttpValidationError
10
+ from json.decoder import JSONDecodeError
11
+ from ...core.api_error import ApiError
12
+ from ...types.letta_batch_request import LettaBatchRequest
13
+ from ...core.serialization import convert_and_respect_annotation_metadata
14
+ from ...core.jsonable_encoder import jsonable_encoder
15
+ from ...core.client_wrapper import AsyncClientWrapper
16
+
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
20
+
21
+ class BatchesClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._client_wrapper = client_wrapper
24
+
25
+ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
26
+ """
27
+ List all batch runs.
28
+
29
+ Parameters
30
+ ----------
31
+ request_options : typing.Optional[RequestOptions]
32
+ Request-specific configuration.
33
+
34
+ Returns
35
+ -------
36
+ typing.List[BatchJob]
37
+ Successful Response
38
+
39
+ Examples
40
+ --------
41
+ from letta_client import Letta
42
+
43
+ client = Letta(
44
+ token="YOUR_TOKEN",
45
+ )
46
+ client.messages.batches.list()
47
+ """
48
+ _response = self._client_wrapper.httpx_client.request(
49
+ "v1/messages/batches",
50
+ method="GET",
51
+ request_options=request_options,
52
+ )
53
+ try:
54
+ if 200 <= _response.status_code < 300:
55
+ return typing.cast(
56
+ typing.List[BatchJob],
57
+ construct_type(
58
+ type_=typing.List[BatchJob], # type: ignore
59
+ object_=_response.json(),
60
+ ),
61
+ )
62
+ if _response.status_code == 422:
63
+ raise UnprocessableEntityError(
64
+ typing.cast(
65
+ HttpValidationError,
66
+ construct_type(
67
+ type_=HttpValidationError, # type: ignore
68
+ object_=_response.json(),
69
+ ),
70
+ )
71
+ )
72
+ _response_json = _response.json()
73
+ except JSONDecodeError:
74
+ raise ApiError(status_code=_response.status_code, body=_response.text)
75
+ raise ApiError(status_code=_response.status_code, body=_response_json)
76
+
77
+ def create(
78
+ self,
79
+ *,
80
+ requests: typing.Sequence[LettaBatchRequest],
81
+ callback_url: typing.Optional[str] = OMIT,
82
+ request_options: typing.Optional[RequestOptions] = None,
83
+ ) -> BatchJob:
84
+ """
85
+ Submit a batch of agent messages for asynchronous processing.
86
+ Creates a job that will fan out messages to all listed agents and process them in parallel.
87
+
88
+ Parameters
89
+ ----------
90
+ requests : typing.Sequence[LettaBatchRequest]
91
+ List of requests to be processed in batch.
92
+
93
+ callback_url : typing.Optional[str]
94
+ Optional URL to call via POST when the batch completes.
95
+
96
+ request_options : typing.Optional[RequestOptions]
97
+ Request-specific configuration.
98
+
99
+ Returns
100
+ -------
101
+ BatchJob
102
+ Successful Response
103
+
104
+ Examples
105
+ --------
106
+ from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent
107
+
108
+ client = Letta(
109
+ token="YOUR_TOKEN",
110
+ )
111
+ client.messages.batches.create(
112
+ requests=[
113
+ LettaBatchRequest(
114
+ messages=[
115
+ MessageCreate(
116
+ role="user",
117
+ content=[
118
+ TextContent(
119
+ text="text",
120
+ )
121
+ ],
122
+ )
123
+ ],
124
+ agent_id="agent_id",
125
+ )
126
+ ],
127
+ )
128
+ """
129
+ _response = self._client_wrapper.httpx_client.request(
130
+ "v1/messages/batches",
131
+ method="POST",
132
+ json={
133
+ "requests": convert_and_respect_annotation_metadata(
134
+ object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
135
+ ),
136
+ "callback_url": callback_url,
137
+ },
138
+ headers={
139
+ "content-type": "application/json",
140
+ },
141
+ request_options=request_options,
142
+ omit=OMIT,
143
+ )
144
+ try:
145
+ if 200 <= _response.status_code < 300:
146
+ return typing.cast(
147
+ BatchJob,
148
+ construct_type(
149
+ type_=BatchJob, # type: ignore
150
+ object_=_response.json(),
151
+ ),
152
+ )
153
+ if _response.status_code == 422:
154
+ raise UnprocessableEntityError(
155
+ typing.cast(
156
+ HttpValidationError,
157
+ construct_type(
158
+ type_=HttpValidationError, # type: ignore
159
+ object_=_response.json(),
160
+ ),
161
+ )
162
+ )
163
+ _response_json = _response.json()
164
+ except JSONDecodeError:
165
+ raise ApiError(status_code=_response.status_code, body=_response.text)
166
+ raise ApiError(status_code=_response.status_code, body=_response_json)
167
+
168
+ def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
169
+ """
170
+ Get the status of a batch run.
171
+
172
+ Parameters
173
+ ----------
174
+ batch_id : str
175
+
176
+ request_options : typing.Optional[RequestOptions]
177
+ Request-specific configuration.
178
+
179
+ Returns
180
+ -------
181
+ BatchJob
182
+ Successful Response
183
+
184
+ Examples
185
+ --------
186
+ from letta_client import Letta
187
+
188
+ client = Letta(
189
+ token="YOUR_TOKEN",
190
+ )
191
+ client.messages.batches.retrieve(
192
+ batch_id="batch_id",
193
+ )
194
+ """
195
+ _response = self._client_wrapper.httpx_client.request(
196
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
197
+ method="GET",
198
+ request_options=request_options,
199
+ )
200
+ try:
201
+ if 200 <= _response.status_code < 300:
202
+ return typing.cast(
203
+ BatchJob,
204
+ construct_type(
205
+ type_=BatchJob, # type: ignore
206
+ object_=_response.json(),
207
+ ),
208
+ )
209
+ if _response.status_code == 422:
210
+ raise UnprocessableEntityError(
211
+ typing.cast(
212
+ HttpValidationError,
213
+ construct_type(
214
+ type_=HttpValidationError, # type: ignore
215
+ object_=_response.json(),
216
+ ),
217
+ )
218
+ )
219
+ _response_json = _response.json()
220
+ except JSONDecodeError:
221
+ raise ApiError(status_code=_response.status_code, body=_response.text)
222
+ raise ApiError(status_code=_response.status_code, body=_response_json)
223
+
224
+ def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
225
+ """
226
+ Parameters
227
+ ----------
228
+ batch_id : str
229
+
230
+ request_options : typing.Optional[RequestOptions]
231
+ Request-specific configuration.
232
+
233
+ Returns
234
+ -------
235
+ None
236
+
237
+ Examples
238
+ --------
239
+ from letta_client import Letta
240
+
241
+ client = Letta(
242
+ token="YOUR_TOKEN",
243
+ )
244
+ client.messages.batches.cancel(
245
+ batch_id="batch_id",
246
+ )
247
+ """
248
+ _response = self._client_wrapper.httpx_client.request(
249
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
250
+ method="PATCH",
251
+ request_options=request_options,
252
+ )
253
+ try:
254
+ if 200 <= _response.status_code < 300:
255
+ return
256
+ _response_json = _response.json()
257
+ except JSONDecodeError:
258
+ raise ApiError(status_code=_response.status_code, body=_response.text)
259
+ raise ApiError(status_code=_response.status_code, body=_response_json)
260
+
261
+
262
+ class AsyncBatchesClient:
263
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
264
+ self._client_wrapper = client_wrapper
265
+
266
+ async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
267
+ """
268
+ List all batch runs.
269
+
270
+ Parameters
271
+ ----------
272
+ request_options : typing.Optional[RequestOptions]
273
+ Request-specific configuration.
274
+
275
+ Returns
276
+ -------
277
+ typing.List[BatchJob]
278
+ Successful Response
279
+
280
+ Examples
281
+ --------
282
+ import asyncio
283
+
284
+ from letta_client import AsyncLetta
285
+
286
+ client = AsyncLetta(
287
+ token="YOUR_TOKEN",
288
+ )
289
+
290
+
291
+ async def main() -> None:
292
+ await client.messages.batches.list()
293
+
294
+
295
+ asyncio.run(main())
296
+ """
297
+ _response = await self._client_wrapper.httpx_client.request(
298
+ "v1/messages/batches",
299
+ method="GET",
300
+ request_options=request_options,
301
+ )
302
+ try:
303
+ if 200 <= _response.status_code < 300:
304
+ return typing.cast(
305
+ typing.List[BatchJob],
306
+ construct_type(
307
+ type_=typing.List[BatchJob], # type: ignore
308
+ object_=_response.json(),
309
+ ),
310
+ )
311
+ if _response.status_code == 422:
312
+ raise UnprocessableEntityError(
313
+ typing.cast(
314
+ HttpValidationError,
315
+ construct_type(
316
+ type_=HttpValidationError, # type: ignore
317
+ object_=_response.json(),
318
+ ),
319
+ )
320
+ )
321
+ _response_json = _response.json()
322
+ except JSONDecodeError:
323
+ raise ApiError(status_code=_response.status_code, body=_response.text)
324
+ raise ApiError(status_code=_response.status_code, body=_response_json)
325
+
326
+ async def create(
327
+ self,
328
+ *,
329
+ requests: typing.Sequence[LettaBatchRequest],
330
+ callback_url: typing.Optional[str] = OMIT,
331
+ request_options: typing.Optional[RequestOptions] = None,
332
+ ) -> BatchJob:
333
+ """
334
+ Submit a batch of agent messages for asynchronous processing.
335
+ Creates a job that will fan out messages to all listed agents and process them in parallel.
336
+
337
+ Parameters
338
+ ----------
339
+ requests : typing.Sequence[LettaBatchRequest]
340
+ List of requests to be processed in batch.
341
+
342
+ callback_url : typing.Optional[str]
343
+ Optional URL to call via POST when the batch completes.
344
+
345
+ request_options : typing.Optional[RequestOptions]
346
+ Request-specific configuration.
347
+
348
+ Returns
349
+ -------
350
+ BatchJob
351
+ Successful Response
352
+
353
+ Examples
354
+ --------
355
+ import asyncio
356
+
357
+ from letta_client import (
358
+ AsyncLetta,
359
+ LettaBatchRequest,
360
+ MessageCreate,
361
+ TextContent,
362
+ )
363
+
364
+ client = AsyncLetta(
365
+ token="YOUR_TOKEN",
366
+ )
367
+
368
+
369
+ async def main() -> None:
370
+ await client.messages.batches.create(
371
+ requests=[
372
+ LettaBatchRequest(
373
+ messages=[
374
+ MessageCreate(
375
+ role="user",
376
+ content=[
377
+ TextContent(
378
+ text="text",
379
+ )
380
+ ],
381
+ )
382
+ ],
383
+ agent_id="agent_id",
384
+ )
385
+ ],
386
+ )
387
+
388
+
389
+ asyncio.run(main())
390
+ """
391
+ _response = await self._client_wrapper.httpx_client.request(
392
+ "v1/messages/batches",
393
+ method="POST",
394
+ json={
395
+ "requests": convert_and_respect_annotation_metadata(
396
+ object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
397
+ ),
398
+ "callback_url": callback_url,
399
+ },
400
+ headers={
401
+ "content-type": "application/json",
402
+ },
403
+ request_options=request_options,
404
+ omit=OMIT,
405
+ )
406
+ try:
407
+ if 200 <= _response.status_code < 300:
408
+ return typing.cast(
409
+ BatchJob,
410
+ construct_type(
411
+ type_=BatchJob, # type: ignore
412
+ object_=_response.json(),
413
+ ),
414
+ )
415
+ if _response.status_code == 422:
416
+ raise UnprocessableEntityError(
417
+ typing.cast(
418
+ HttpValidationError,
419
+ construct_type(
420
+ type_=HttpValidationError, # type: ignore
421
+ object_=_response.json(),
422
+ ),
423
+ )
424
+ )
425
+ _response_json = _response.json()
426
+ except JSONDecodeError:
427
+ raise ApiError(status_code=_response.status_code, body=_response.text)
428
+ raise ApiError(status_code=_response.status_code, body=_response_json)
429
+
430
+ async def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
431
+ """
432
+ Get the status of a batch run.
433
+
434
+ Parameters
435
+ ----------
436
+ batch_id : str
437
+
438
+ request_options : typing.Optional[RequestOptions]
439
+ Request-specific configuration.
440
+
441
+ Returns
442
+ -------
443
+ BatchJob
444
+ Successful Response
445
+
446
+ Examples
447
+ --------
448
+ import asyncio
449
+
450
+ from letta_client import AsyncLetta
451
+
452
+ client = AsyncLetta(
453
+ token="YOUR_TOKEN",
454
+ )
455
+
456
+
457
+ async def main() -> None:
458
+ await client.messages.batches.retrieve(
459
+ batch_id="batch_id",
460
+ )
461
+
462
+
463
+ asyncio.run(main())
464
+ """
465
+ _response = await self._client_wrapper.httpx_client.request(
466
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
467
+ method="GET",
468
+ request_options=request_options,
469
+ )
470
+ try:
471
+ if 200 <= _response.status_code < 300:
472
+ return typing.cast(
473
+ BatchJob,
474
+ construct_type(
475
+ type_=BatchJob, # type: ignore
476
+ object_=_response.json(),
477
+ ),
478
+ )
479
+ if _response.status_code == 422:
480
+ raise UnprocessableEntityError(
481
+ typing.cast(
482
+ HttpValidationError,
483
+ construct_type(
484
+ type_=HttpValidationError, # type: ignore
485
+ object_=_response.json(),
486
+ ),
487
+ )
488
+ )
489
+ _response_json = _response.json()
490
+ except JSONDecodeError:
491
+ raise ApiError(status_code=_response.status_code, body=_response.text)
492
+ raise ApiError(status_code=_response.status_code, body=_response_json)
493
+
494
+ async def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
495
+ """
496
+ Parameters
497
+ ----------
498
+ batch_id : str
499
+
500
+ request_options : typing.Optional[RequestOptions]
501
+ Request-specific configuration.
502
+
503
+ Returns
504
+ -------
505
+ None
506
+
507
+ Examples
508
+ --------
509
+ import asyncio
510
+
511
+ from letta_client import AsyncLetta
512
+
513
+ client = AsyncLetta(
514
+ token="YOUR_TOKEN",
515
+ )
516
+
517
+
518
+ async def main() -> None:
519
+ await client.messages.batches.cancel(
520
+ batch_id="batch_id",
521
+ )
522
+
523
+
524
+ asyncio.run(main())
525
+ """
526
+ _response = await self._client_wrapper.httpx_client.request(
527
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
528
+ method="PATCH",
529
+ request_options=request_options,
530
+ )
531
+ try:
532
+ if 200 <= _response.status_code < 300:
533
+ return
534
+ _response_json = _response.json()
535
+ except JSONDecodeError:
536
+ raise ApiError(status_code=_response.status_code, body=_response.text)
537
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,150 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.client_wrapper import SyncClientWrapper
4
+ from .batches.client import BatchesClient
5
+ import typing
6
+ from ..core.request_options import RequestOptions
7
+ from ..core.jsonable_encoder import jsonable_encoder
8
+ from ..core.unchecked_base_model import construct_type
9
+ from ..errors.unprocessable_entity_error import UnprocessableEntityError
10
+ from ..types.http_validation_error import HttpValidationError
11
+ from json.decoder import JSONDecodeError
12
+ from ..core.api_error import ApiError
13
+ from ..core.client_wrapper import AsyncClientWrapper
14
+ from .batches.client import AsyncBatchesClient
15
+
16
+
17
+ class MessagesClient:
18
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
19
+ self._client_wrapper = client_wrapper
20
+ self.batches = BatchesClient(client_wrapper=self._client_wrapper)
21
+
22
+ def cancel_batch_run(
23
+ self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
24
+ ) -> typing.Optional[typing.Any]:
25
+ """
26
+ Cancel a batch run.
27
+
28
+ Parameters
29
+ ----------
30
+ batch_id : str
31
+
32
+ request_options : typing.Optional[RequestOptions]
33
+ Request-specific configuration.
34
+
35
+ Returns
36
+ -------
37
+ typing.Optional[typing.Any]
38
+ Successful Response
39
+
40
+ Examples
41
+ --------
42
+ from letta_client import Letta
43
+
44
+ client = Letta(
45
+ token="YOUR_TOKEN",
46
+ )
47
+ client.messages.cancel_batch_run(
48
+ batch_id="batch_id",
49
+ )
50
+ """
51
+ _response = self._client_wrapper.httpx_client.request(
52
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
53
+ method="PATCH",
54
+ request_options=request_options,
55
+ )
56
+ try:
57
+ if 200 <= _response.status_code < 300:
58
+ return typing.cast(
59
+ typing.Optional[typing.Any],
60
+ construct_type(
61
+ type_=typing.Optional[typing.Any], # type: ignore
62
+ object_=_response.json(),
63
+ ),
64
+ )
65
+ if _response.status_code == 422:
66
+ raise UnprocessableEntityError(
67
+ typing.cast(
68
+ HttpValidationError,
69
+ construct_type(
70
+ type_=HttpValidationError, # type: ignore
71
+ object_=_response.json(),
72
+ ),
73
+ )
74
+ )
75
+ _response_json = _response.json()
76
+ except JSONDecodeError:
77
+ raise ApiError(status_code=_response.status_code, body=_response.text)
78
+ raise ApiError(status_code=_response.status_code, body=_response_json)
79
+
80
+
81
+ class AsyncMessagesClient:
82
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
83
+ self._client_wrapper = client_wrapper
84
+ self.batches = AsyncBatchesClient(client_wrapper=self._client_wrapper)
85
+
86
+ async def cancel_batch_run(
87
+ self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
88
+ ) -> typing.Optional[typing.Any]:
89
+ """
90
+ Cancel a batch run.
91
+
92
+ Parameters
93
+ ----------
94
+ batch_id : str
95
+
96
+ request_options : typing.Optional[RequestOptions]
97
+ Request-specific configuration.
98
+
99
+ Returns
100
+ -------
101
+ typing.Optional[typing.Any]
102
+ Successful Response
103
+
104
+ Examples
105
+ --------
106
+ import asyncio
107
+
108
+ from letta_client import AsyncLetta
109
+
110
+ client = AsyncLetta(
111
+ token="YOUR_TOKEN",
112
+ )
113
+
114
+
115
+ async def main() -> None:
116
+ await client.messages.cancel_batch_run(
117
+ batch_id="batch_id",
118
+ )
119
+
120
+
121
+ asyncio.run(main())
122
+ """
123
+ _response = await self._client_wrapper.httpx_client.request(
124
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
125
+ method="PATCH",
126
+ request_options=request_options,
127
+ )
128
+ try:
129
+ if 200 <= _response.status_code < 300:
130
+ return typing.cast(
131
+ typing.Optional[typing.Any],
132
+ construct_type(
133
+ type_=typing.Optional[typing.Any], # type: ignore
134
+ object_=_response.json(),
135
+ ),
136
+ )
137
+ if _response.status_code == 422:
138
+ raise UnprocessableEntityError(
139
+ typing.cast(
140
+ HttpValidationError,
141
+ construct_type(
142
+ type_=HttpValidationError, # type: ignore
143
+ object_=_response.json(),
144
+ ),
145
+ )
146
+ )
147
+ _response_json = _response.json()
148
+ except JSONDecodeError:
149
+ raise ApiError(status_code=_response.status_code, body=_response.text)
150
+ raise ApiError(status_code=_response.status_code, body=_response_json)