letta-client 0.1.115__py3-none-any.whl → 0.1.117__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

@@ -0,0 +1,521 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+ from ...core.client_wrapper import SyncClientWrapper
5
+ from ...core.request_options import RequestOptions
6
+ from ...types.batch_job import BatchJob
7
+ from ...core.unchecked_base_model import construct_type
8
+ from ...errors.unprocessable_entity_error import UnprocessableEntityError
9
+ from ...types.http_validation_error import HttpValidationError
10
+ from json.decoder import JSONDecodeError
11
+ from ...core.api_error import ApiError
12
+ from ...types.letta_batch_request import LettaBatchRequest
13
+ from ...core.serialization import convert_and_respect_annotation_metadata
14
+ from ...core.jsonable_encoder import jsonable_encoder
15
+ from ...core.client_wrapper import AsyncClientWrapper
16
+
17
+ # this is used as the default value for optional parameters
18
+ OMIT = typing.cast(typing.Any, ...)
19
+
20
+
21
+ class BatchesClient:
22
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
23
+ self._client_wrapper = client_wrapper
24
+
25
+ def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
26
+ """
27
+ List all batch runs.
28
+
29
+ Parameters
30
+ ----------
31
+ request_options : typing.Optional[RequestOptions]
32
+ Request-specific configuration.
33
+
34
+ Returns
35
+ -------
36
+ typing.List[BatchJob]
37
+ Successful Response
38
+
39
+ Examples
40
+ --------
41
+ from letta_client import Letta
42
+
43
+ client = Letta(
44
+ token="YOUR_TOKEN",
45
+ )
46
+ client.messages.batches.list()
47
+ """
48
+ _response = self._client_wrapper.httpx_client.request(
49
+ "v1/messages/batches",
50
+ method="GET",
51
+ request_options=request_options,
52
+ )
53
+ try:
54
+ if 200 <= _response.status_code < 300:
55
+ return typing.cast(
56
+ typing.List[BatchJob],
57
+ construct_type(
58
+ type_=typing.List[BatchJob], # type: ignore
59
+ object_=_response.json(),
60
+ ),
61
+ )
62
+ if _response.status_code == 422:
63
+ raise UnprocessableEntityError(
64
+ typing.cast(
65
+ HttpValidationError,
66
+ construct_type(
67
+ type_=HttpValidationError, # type: ignore
68
+ object_=_response.json(),
69
+ ),
70
+ )
71
+ )
72
+ _response_json = _response.json()
73
+ except JSONDecodeError:
74
+ raise ApiError(status_code=_response.status_code, body=_response.text)
75
+ raise ApiError(status_code=_response.status_code, body=_response_json)
76
+
77
+ def create(
78
+ self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
79
+ ) -> BatchJob:
80
+ """
81
+ Submit a batch of agent messages for asynchronous processing.
82
+ Creates a job that will fan out messages to all listed agents and process them in parallel.
83
+
84
+ Parameters
85
+ ----------
86
+ requests : typing.Sequence[LettaBatchRequest]
87
+ List of requests to be processed in batch.
88
+
89
+ request_options : typing.Optional[RequestOptions]
90
+ Request-specific configuration.
91
+
92
+ Returns
93
+ -------
94
+ BatchJob
95
+ Successful Response
96
+
97
+ Examples
98
+ --------
99
+ from letta_client import Letta, LettaBatchRequest, MessageCreate, TextContent
100
+
101
+ client = Letta(
102
+ token="YOUR_TOKEN",
103
+ )
104
+ client.messages.batches.create(
105
+ requests=[
106
+ LettaBatchRequest(
107
+ messages=[
108
+ MessageCreate(
109
+ role="user",
110
+ content=[
111
+ TextContent(
112
+ text="text",
113
+ )
114
+ ],
115
+ )
116
+ ],
117
+ agent_id="agent_id",
118
+ )
119
+ ],
120
+ )
121
+ """
122
+ _response = self._client_wrapper.httpx_client.request(
123
+ "v1/messages/batches",
124
+ method="POST",
125
+ json={
126
+ "requests": convert_and_respect_annotation_metadata(
127
+ object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
128
+ ),
129
+ },
130
+ headers={
131
+ "content-type": "application/json",
132
+ },
133
+ request_options=request_options,
134
+ omit=OMIT,
135
+ )
136
+ try:
137
+ if 200 <= _response.status_code < 300:
138
+ return typing.cast(
139
+ BatchJob,
140
+ construct_type(
141
+ type_=BatchJob, # type: ignore
142
+ object_=_response.json(),
143
+ ),
144
+ )
145
+ if _response.status_code == 422:
146
+ raise UnprocessableEntityError(
147
+ typing.cast(
148
+ HttpValidationError,
149
+ construct_type(
150
+ type_=HttpValidationError, # type: ignore
151
+ object_=_response.json(),
152
+ ),
153
+ )
154
+ )
155
+ _response_json = _response.json()
156
+ except JSONDecodeError:
157
+ raise ApiError(status_code=_response.status_code, body=_response.text)
158
+ raise ApiError(status_code=_response.status_code, body=_response_json)
159
+
160
+ def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
161
+ """
162
+ Get the status of a batch run.
163
+
164
+ Parameters
165
+ ----------
166
+ batch_id : str
167
+
168
+ request_options : typing.Optional[RequestOptions]
169
+ Request-specific configuration.
170
+
171
+ Returns
172
+ -------
173
+ BatchJob
174
+ Successful Response
175
+
176
+ Examples
177
+ --------
178
+ from letta_client import Letta
179
+
180
+ client = Letta(
181
+ token="YOUR_TOKEN",
182
+ )
183
+ client.messages.batches.retrieve(
184
+ batch_id="batch_id",
185
+ )
186
+ """
187
+ _response = self._client_wrapper.httpx_client.request(
188
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
189
+ method="GET",
190
+ request_options=request_options,
191
+ )
192
+ try:
193
+ if 200 <= _response.status_code < 300:
194
+ return typing.cast(
195
+ BatchJob,
196
+ construct_type(
197
+ type_=BatchJob, # type: ignore
198
+ object_=_response.json(),
199
+ ),
200
+ )
201
+ if _response.status_code == 422:
202
+ raise UnprocessableEntityError(
203
+ typing.cast(
204
+ HttpValidationError,
205
+ construct_type(
206
+ type_=HttpValidationError, # type: ignore
207
+ object_=_response.json(),
208
+ ),
209
+ )
210
+ )
211
+ _response_json = _response.json()
212
+ except JSONDecodeError:
213
+ raise ApiError(status_code=_response.status_code, body=_response.text)
214
+ raise ApiError(status_code=_response.status_code, body=_response_json)
215
+
216
+ def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
217
+ """
218
+ Parameters
219
+ ----------
220
+ batch_id : str
221
+
222
+ request_options : typing.Optional[RequestOptions]
223
+ Request-specific configuration.
224
+
225
+ Returns
226
+ -------
227
+ None
228
+
229
+ Examples
230
+ --------
231
+ from letta_client import Letta
232
+
233
+ client = Letta(
234
+ token="YOUR_TOKEN",
235
+ )
236
+ client.messages.batches.cancel(
237
+ batch_id="batch_id",
238
+ )
239
+ """
240
+ _response = self._client_wrapper.httpx_client.request(
241
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
242
+ method="PATCH",
243
+ request_options=request_options,
244
+ )
245
+ try:
246
+ if 200 <= _response.status_code < 300:
247
+ return
248
+ _response_json = _response.json()
249
+ except JSONDecodeError:
250
+ raise ApiError(status_code=_response.status_code, body=_response.text)
251
+ raise ApiError(status_code=_response.status_code, body=_response_json)
252
+
253
+
254
+ class AsyncBatchesClient:
255
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
256
+ self._client_wrapper = client_wrapper
257
+
258
+ async def list(self, *, request_options: typing.Optional[RequestOptions] = None) -> typing.List[BatchJob]:
259
+ """
260
+ List all batch runs.
261
+
262
+ Parameters
263
+ ----------
264
+ request_options : typing.Optional[RequestOptions]
265
+ Request-specific configuration.
266
+
267
+ Returns
268
+ -------
269
+ typing.List[BatchJob]
270
+ Successful Response
271
+
272
+ Examples
273
+ --------
274
+ import asyncio
275
+
276
+ from letta_client import AsyncLetta
277
+
278
+ client = AsyncLetta(
279
+ token="YOUR_TOKEN",
280
+ )
281
+
282
+
283
+ async def main() -> None:
284
+ await client.messages.batches.list()
285
+
286
+
287
+ asyncio.run(main())
288
+ """
289
+ _response = await self._client_wrapper.httpx_client.request(
290
+ "v1/messages/batches",
291
+ method="GET",
292
+ request_options=request_options,
293
+ )
294
+ try:
295
+ if 200 <= _response.status_code < 300:
296
+ return typing.cast(
297
+ typing.List[BatchJob],
298
+ construct_type(
299
+ type_=typing.List[BatchJob], # type: ignore
300
+ object_=_response.json(),
301
+ ),
302
+ )
303
+ if _response.status_code == 422:
304
+ raise UnprocessableEntityError(
305
+ typing.cast(
306
+ HttpValidationError,
307
+ construct_type(
308
+ type_=HttpValidationError, # type: ignore
309
+ object_=_response.json(),
310
+ ),
311
+ )
312
+ )
313
+ _response_json = _response.json()
314
+ except JSONDecodeError:
315
+ raise ApiError(status_code=_response.status_code, body=_response.text)
316
+ raise ApiError(status_code=_response.status_code, body=_response_json)
317
+
318
+ async def create(
319
+ self, *, requests: typing.Sequence[LettaBatchRequest], request_options: typing.Optional[RequestOptions] = None
320
+ ) -> BatchJob:
321
+ """
322
+ Submit a batch of agent messages for asynchronous processing.
323
+ Creates a job that will fan out messages to all listed agents and process them in parallel.
324
+
325
+ Parameters
326
+ ----------
327
+ requests : typing.Sequence[LettaBatchRequest]
328
+ List of requests to be processed in batch.
329
+
330
+ request_options : typing.Optional[RequestOptions]
331
+ Request-specific configuration.
332
+
333
+ Returns
334
+ -------
335
+ BatchJob
336
+ Successful Response
337
+
338
+ Examples
339
+ --------
340
+ import asyncio
341
+
342
+ from letta_client import (
343
+ AsyncLetta,
344
+ LettaBatchRequest,
345
+ MessageCreate,
346
+ TextContent,
347
+ )
348
+
349
+ client = AsyncLetta(
350
+ token="YOUR_TOKEN",
351
+ )
352
+
353
+
354
+ async def main() -> None:
355
+ await client.messages.batches.create(
356
+ requests=[
357
+ LettaBatchRequest(
358
+ messages=[
359
+ MessageCreate(
360
+ role="user",
361
+ content=[
362
+ TextContent(
363
+ text="text",
364
+ )
365
+ ],
366
+ )
367
+ ],
368
+ agent_id="agent_id",
369
+ )
370
+ ],
371
+ )
372
+
373
+
374
+ asyncio.run(main())
375
+ """
376
+ _response = await self._client_wrapper.httpx_client.request(
377
+ "v1/messages/batches",
378
+ method="POST",
379
+ json={
380
+ "requests": convert_and_respect_annotation_metadata(
381
+ object_=requests, annotation=typing.Sequence[LettaBatchRequest], direction="write"
382
+ ),
383
+ },
384
+ headers={
385
+ "content-type": "application/json",
386
+ },
387
+ request_options=request_options,
388
+ omit=OMIT,
389
+ )
390
+ try:
391
+ if 200 <= _response.status_code < 300:
392
+ return typing.cast(
393
+ BatchJob,
394
+ construct_type(
395
+ type_=BatchJob, # type: ignore
396
+ object_=_response.json(),
397
+ ),
398
+ )
399
+ if _response.status_code == 422:
400
+ raise UnprocessableEntityError(
401
+ typing.cast(
402
+ HttpValidationError,
403
+ construct_type(
404
+ type_=HttpValidationError, # type: ignore
405
+ object_=_response.json(),
406
+ ),
407
+ )
408
+ )
409
+ _response_json = _response.json()
410
+ except JSONDecodeError:
411
+ raise ApiError(status_code=_response.status_code, body=_response.text)
412
+ raise ApiError(status_code=_response.status_code, body=_response_json)
413
+
414
+ async def retrieve(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> BatchJob:
415
+ """
416
+ Get the status of a batch run.
417
+
418
+ Parameters
419
+ ----------
420
+ batch_id : str
421
+
422
+ request_options : typing.Optional[RequestOptions]
423
+ Request-specific configuration.
424
+
425
+ Returns
426
+ -------
427
+ BatchJob
428
+ Successful Response
429
+
430
+ Examples
431
+ --------
432
+ import asyncio
433
+
434
+ from letta_client import AsyncLetta
435
+
436
+ client = AsyncLetta(
437
+ token="YOUR_TOKEN",
438
+ )
439
+
440
+
441
+ async def main() -> None:
442
+ await client.messages.batches.retrieve(
443
+ batch_id="batch_id",
444
+ )
445
+
446
+
447
+ asyncio.run(main())
448
+ """
449
+ _response = await self._client_wrapper.httpx_client.request(
450
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
451
+ method="GET",
452
+ request_options=request_options,
453
+ )
454
+ try:
455
+ if 200 <= _response.status_code < 300:
456
+ return typing.cast(
457
+ BatchJob,
458
+ construct_type(
459
+ type_=BatchJob, # type: ignore
460
+ object_=_response.json(),
461
+ ),
462
+ )
463
+ if _response.status_code == 422:
464
+ raise UnprocessableEntityError(
465
+ typing.cast(
466
+ HttpValidationError,
467
+ construct_type(
468
+ type_=HttpValidationError, # type: ignore
469
+ object_=_response.json(),
470
+ ),
471
+ )
472
+ )
473
+ _response_json = _response.json()
474
+ except JSONDecodeError:
475
+ raise ApiError(status_code=_response.status_code, body=_response.text)
476
+ raise ApiError(status_code=_response.status_code, body=_response_json)
477
+
478
+ async def cancel(self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None) -> None:
479
+ """
480
+ Parameters
481
+ ----------
482
+ batch_id : str
483
+
484
+ request_options : typing.Optional[RequestOptions]
485
+ Request-specific configuration.
486
+
487
+ Returns
488
+ -------
489
+ None
490
+
491
+ Examples
492
+ --------
493
+ import asyncio
494
+
495
+ from letta_client import AsyncLetta
496
+
497
+ client = AsyncLetta(
498
+ token="YOUR_TOKEN",
499
+ )
500
+
501
+
502
+ async def main() -> None:
503
+ await client.messages.batches.cancel(
504
+ batch_id="batch_id",
505
+ )
506
+
507
+
508
+ asyncio.run(main())
509
+ """
510
+ _response = await self._client_wrapper.httpx_client.request(
511
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}",
512
+ method="PATCH",
513
+ request_options=request_options,
514
+ )
515
+ try:
516
+ if 200 <= _response.status_code < 300:
517
+ return
518
+ _response_json = _response.json()
519
+ except JSONDecodeError:
520
+ raise ApiError(status_code=_response.status_code, body=_response.text)
521
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,150 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from ..core.client_wrapper import SyncClientWrapper
4
+ from .batches.client import BatchesClient
5
+ import typing
6
+ from ..core.request_options import RequestOptions
7
+ from ..core.jsonable_encoder import jsonable_encoder
8
+ from ..core.unchecked_base_model import construct_type
9
+ from ..errors.unprocessable_entity_error import UnprocessableEntityError
10
+ from ..types.http_validation_error import HttpValidationError
11
+ from json.decoder import JSONDecodeError
12
+ from ..core.api_error import ApiError
13
+ from ..core.client_wrapper import AsyncClientWrapper
14
+ from .batches.client import AsyncBatchesClient
15
+
16
+
17
+ class MessagesClient:
18
+ def __init__(self, *, client_wrapper: SyncClientWrapper):
19
+ self._client_wrapper = client_wrapper
20
+ self.batches = BatchesClient(client_wrapper=self._client_wrapper)
21
+
22
+ def cancel_batch_run(
23
+ self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
24
+ ) -> typing.Optional[typing.Any]:
25
+ """
26
+ Cancel a batch run.
27
+
28
+ Parameters
29
+ ----------
30
+ batch_id : str
31
+
32
+ request_options : typing.Optional[RequestOptions]
33
+ Request-specific configuration.
34
+
35
+ Returns
36
+ -------
37
+ typing.Optional[typing.Any]
38
+ Successful Response
39
+
40
+ Examples
41
+ --------
42
+ from letta_client import Letta
43
+
44
+ client = Letta(
45
+ token="YOUR_TOKEN",
46
+ )
47
+ client.messages.cancel_batch_run(
48
+ batch_id="batch_id",
49
+ )
50
+ """
51
+ _response = self._client_wrapper.httpx_client.request(
52
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
53
+ method="PATCH",
54
+ request_options=request_options,
55
+ )
56
+ try:
57
+ if 200 <= _response.status_code < 300:
58
+ return typing.cast(
59
+ typing.Optional[typing.Any],
60
+ construct_type(
61
+ type_=typing.Optional[typing.Any], # type: ignore
62
+ object_=_response.json(),
63
+ ),
64
+ )
65
+ if _response.status_code == 422:
66
+ raise UnprocessableEntityError(
67
+ typing.cast(
68
+ HttpValidationError,
69
+ construct_type(
70
+ type_=HttpValidationError, # type: ignore
71
+ object_=_response.json(),
72
+ ),
73
+ )
74
+ )
75
+ _response_json = _response.json()
76
+ except JSONDecodeError:
77
+ raise ApiError(status_code=_response.status_code, body=_response.text)
78
+ raise ApiError(status_code=_response.status_code, body=_response_json)
79
+
80
+
81
+ class AsyncMessagesClient:
82
+ def __init__(self, *, client_wrapper: AsyncClientWrapper):
83
+ self._client_wrapper = client_wrapper
84
+ self.batches = AsyncBatchesClient(client_wrapper=self._client_wrapper)
85
+
86
+ async def cancel_batch_run(
87
+ self, batch_id: str, *, request_options: typing.Optional[RequestOptions] = None
88
+ ) -> typing.Optional[typing.Any]:
89
+ """
90
+ Cancel a batch run.
91
+
92
+ Parameters
93
+ ----------
94
+ batch_id : str
95
+
96
+ request_options : typing.Optional[RequestOptions]
97
+ Request-specific configuration.
98
+
99
+ Returns
100
+ -------
101
+ typing.Optional[typing.Any]
102
+ Successful Response
103
+
104
+ Examples
105
+ --------
106
+ import asyncio
107
+
108
+ from letta_client import AsyncLetta
109
+
110
+ client = AsyncLetta(
111
+ token="YOUR_TOKEN",
112
+ )
113
+
114
+
115
+ async def main() -> None:
116
+ await client.messages.cancel_batch_run(
117
+ batch_id="batch_id",
118
+ )
119
+
120
+
121
+ asyncio.run(main())
122
+ """
123
+ _response = await self._client_wrapper.httpx_client.request(
124
+ f"v1/messages/batches/{jsonable_encoder(batch_id)}/cancel",
125
+ method="PATCH",
126
+ request_options=request_options,
127
+ )
128
+ try:
129
+ if 200 <= _response.status_code < 300:
130
+ return typing.cast(
131
+ typing.Optional[typing.Any],
132
+ construct_type(
133
+ type_=typing.Optional[typing.Any], # type: ignore
134
+ object_=_response.json(),
135
+ ),
136
+ )
137
+ if _response.status_code == 422:
138
+ raise UnprocessableEntityError(
139
+ typing.cast(
140
+ HttpValidationError,
141
+ construct_type(
142
+ type_=HttpValidationError, # type: ignore
143
+ object_=_response.json(),
144
+ ),
145
+ )
146
+ )
147
+ _response_json = _response.json()
148
+ except JSONDecodeError:
149
+ raise ApiError(status_code=_response.status_code, body=_response.text)
150
+ raise ApiError(status_code=_response.status_code, body=_response_json)
@@ -0,0 +1,5 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ from .types import ProjectsListProjectsResponse, ProjectsListProjectsResponseProjectsItem
4
+
5
+ __all__ = ["ProjectsListProjectsResponse", "ProjectsListProjectsResponseProjectsItem"]