letta-client 0.1.276__py3-none-any.whl → 0.1.277__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-client might be problematic. Click here for more details.

letta_client/__init__.py CHANGED
@@ -370,6 +370,7 @@ from .client_side_access_tokens import (
370
370
  from .environment import LettaEnvironment
371
371
  from .groups import GroupCreateManagerConfig, GroupUpdateManagerConfig
372
372
  from .projects import ProjectsListResponse, ProjectsListResponseProjectsItem
373
+ from .runs import LettaStreamingResponse
373
374
  from .steps import StepsListRequestFeedback
374
375
  from .templates import (
375
376
  TemplatesCreateTemplateResponse,
@@ -623,6 +624,7 @@ __all__ = [
623
624
  "LettaSerializeSchemasPydanticAgentSchemaToolSchema",
624
625
  "LettaStopReason",
625
626
  "LettaStreamingRequest",
627
+ "LettaStreamingResponse",
626
628
  "LettaUsageStatistics",
627
629
  "LettaUserMessageContentUnion",
628
630
  "ListMcpServersResponseValue",
@@ -24,10 +24,10 @@ class BaseClientWrapper:
24
24
 
25
25
  def get_headers(self) -> typing.Dict[str, str]:
26
26
  headers: typing.Dict[str, str] = {
27
- "User-Agent": "letta-client/0.1.276",
27
+ "User-Agent": "letta-client/0.1.277",
28
28
  "X-Fern-Language": "Python",
29
29
  "X-Fern-SDK-Name": "letta-client",
30
- "X-Fern-SDK-Version": "0.1.276",
30
+ "X-Fern-SDK-Version": "0.1.277",
31
31
  **(self.get_custom_headers() or {}),
32
32
  }
33
33
  if self._project is not None:
@@ -2,6 +2,7 @@
2
2
 
3
3
  # isort: skip_file
4
4
 
5
+ from .types import LettaStreamingResponse
5
6
  from . import messages, steps, usage
6
7
 
7
- __all__ = ["messages", "steps", "usage"]
8
+ __all__ = ["LettaStreamingResponse", "messages", "steps", "usage"]
@@ -8,8 +8,12 @@ from ..types.run import Run
8
8
  from .messages.client import AsyncMessagesClient, MessagesClient
9
9
  from .raw_client import AsyncRawRunsClient, RawRunsClient
10
10
  from .steps.client import AsyncStepsClient, StepsClient
11
+ from .types.letta_streaming_response import LettaStreamingResponse
11
12
  from .usage.client import AsyncUsageClient, UsageClient
12
13
 
14
+ # this is used as the default value for optional parameters
15
+ OMIT = typing.cast(typing.Any, ...)
16
+
13
17
 
14
18
  class RunsClient:
15
19
  def __init__(self, *, client_wrapper: SyncClientWrapper):
@@ -163,6 +167,65 @@ class RunsClient:
163
167
  _response = self._raw_client.delete(run_id, request_options=request_options)
164
168
  return _response.data
165
169
 
170
+ def stream(
171
+ self,
172
+ run_id: str,
173
+ *,
174
+ starting_after: typing.Optional[int] = OMIT,
175
+ include_pings: typing.Optional[bool] = OMIT,
176
+ poll_interval: typing.Optional[float] = OMIT,
177
+ batch_size: typing.Optional[int] = OMIT,
178
+ request_options: typing.Optional[RequestOptions] = None,
179
+ ) -> typing.Iterator[LettaStreamingResponse]:
180
+ """
181
+ Parameters
182
+ ----------
183
+ run_id : str
184
+
185
+ starting_after : typing.Optional[int]
186
+ Sequence id to use as a cursor for pagination. Response will start streaming after this chunk sequence id
187
+
188
+ include_pings : typing.Optional[bool]
189
+ Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
190
+
191
+ poll_interval : typing.Optional[float]
192
+ Seconds to wait between polls when no new data.
193
+
194
+ batch_size : typing.Optional[int]
195
+ Number of entries to read per batch.
196
+
197
+ request_options : typing.Optional[RequestOptions]
198
+ Request-specific configuration.
199
+
200
+ Yields
201
+ ------
202
+ typing.Iterator[LettaStreamingResponse]
203
+ Successful response
204
+
205
+ Examples
206
+ --------
207
+ from letta_client import Letta
208
+
209
+ client = Letta(
210
+ project="YOUR_PROJECT",
211
+ token="YOUR_TOKEN",
212
+ )
213
+ response = client.runs.stream(
214
+ run_id="run_id",
215
+ )
216
+ for chunk in response:
217
+ yield chunk
218
+ """
219
+ with self._raw_client.stream(
220
+ run_id,
221
+ starting_after=starting_after,
222
+ include_pings=include_pings,
223
+ poll_interval=poll_interval,
224
+ batch_size=batch_size,
225
+ request_options=request_options,
226
+ ) as r:
227
+ yield from r.data
228
+
166
229
 
167
230
  class AsyncRunsClient:
168
231
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -347,3 +410,71 @@ class AsyncRunsClient:
347
410
  """
348
411
  _response = await self._raw_client.delete(run_id, request_options=request_options)
349
412
  return _response.data
413
+
414
+ async def stream(
415
+ self,
416
+ run_id: str,
417
+ *,
418
+ starting_after: typing.Optional[int] = OMIT,
419
+ include_pings: typing.Optional[bool] = OMIT,
420
+ poll_interval: typing.Optional[float] = OMIT,
421
+ batch_size: typing.Optional[int] = OMIT,
422
+ request_options: typing.Optional[RequestOptions] = None,
423
+ ) -> typing.AsyncIterator[LettaStreamingResponse]:
424
+ """
425
+ Parameters
426
+ ----------
427
+ run_id : str
428
+
429
+ starting_after : typing.Optional[int]
430
+ Sequence id to use as a cursor for pagination. Response will start streaming after this chunk sequence id
431
+
432
+ include_pings : typing.Optional[bool]
433
+ Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
434
+
435
+ poll_interval : typing.Optional[float]
436
+ Seconds to wait between polls when no new data.
437
+
438
+ batch_size : typing.Optional[int]
439
+ Number of entries to read per batch.
440
+
441
+ request_options : typing.Optional[RequestOptions]
442
+ Request-specific configuration.
443
+
444
+ Yields
445
+ ------
446
+ typing.AsyncIterator[LettaStreamingResponse]
447
+ Successful response
448
+
449
+ Examples
450
+ --------
451
+ import asyncio
452
+
453
+ from letta_client import AsyncLetta
454
+
455
+ client = AsyncLetta(
456
+ project="YOUR_PROJECT",
457
+ token="YOUR_TOKEN",
458
+ )
459
+
460
+
461
+ async def main() -> None:
462
+ response = await client.runs.stream(
463
+ run_id="run_id",
464
+ )
465
+ async for chunk in response:
466
+ yield chunk
467
+
468
+
469
+ asyncio.run(main())
470
+ """
471
+ async with self._raw_client.stream(
472
+ run_id,
473
+ starting_after=starting_after,
474
+ include_pings=include_pings,
475
+ poll_interval=poll_interval,
476
+ batch_size=batch_size,
477
+ request_options=request_options,
478
+ ) as r:
479
+ async for _chunk in r.data:
480
+ yield _chunk
@@ -1,8 +1,11 @@
1
1
  # This file was auto-generated by Fern from our API Definition.
2
2
 
3
+ import contextlib
4
+ import json
3
5
  import typing
4
6
  from json.decoder import JSONDecodeError
5
7
 
8
+ import httpx_sse
6
9
  from ..core.api_error import ApiError
7
10
  from ..core.client_wrapper import AsyncClientWrapper, SyncClientWrapper
8
11
  from ..core.http_response import AsyncHttpResponse, HttpResponse
@@ -12,6 +15,10 @@ from ..core.unchecked_base_model import construct_type
12
15
  from ..errors.unprocessable_entity_error import UnprocessableEntityError
13
16
  from ..types.http_validation_error import HttpValidationError
14
17
  from ..types.run import Run
18
+ from .types.letta_streaming_response import LettaStreamingResponse
19
+
20
+ # this is used as the default value for optional parameters
21
+ OMIT = typing.cast(typing.Any, ...)
15
22
 
16
23
 
17
24
  class RawRunsClient:
@@ -224,6 +231,101 @@ class RawRunsClient:
224
231
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
225
232
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
226
233
 
234
+ @contextlib.contextmanager
235
+ def stream(
236
+ self,
237
+ run_id: str,
238
+ *,
239
+ starting_after: typing.Optional[int] = OMIT,
240
+ include_pings: typing.Optional[bool] = OMIT,
241
+ poll_interval: typing.Optional[float] = OMIT,
242
+ batch_size: typing.Optional[int] = OMIT,
243
+ request_options: typing.Optional[RequestOptions] = None,
244
+ ) -> typing.Iterator[HttpResponse[typing.Iterator[LettaStreamingResponse]]]:
245
+ """
246
+ Parameters
247
+ ----------
248
+ run_id : str
249
+
250
+ starting_after : typing.Optional[int]
251
+ Sequence id to use as a cursor for pagination. Response will start streaming after this chunk sequence id
252
+
253
+ include_pings : typing.Optional[bool]
254
+ Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
255
+
256
+ poll_interval : typing.Optional[float]
257
+ Seconds to wait between polls when no new data.
258
+
259
+ batch_size : typing.Optional[int]
260
+ Number of entries to read per batch.
261
+
262
+ request_options : typing.Optional[RequestOptions]
263
+ Request-specific configuration.
264
+
265
+ Yields
266
+ ------
267
+ typing.Iterator[HttpResponse[typing.Iterator[LettaStreamingResponse]]]
268
+ Successful response
269
+ """
270
+ with self._client_wrapper.httpx_client.stream(
271
+ f"v1/runs/{jsonable_encoder(run_id)}/stream",
272
+ method="POST",
273
+ json={
274
+ "starting_after": starting_after,
275
+ "include_pings": include_pings,
276
+ "poll_interval": poll_interval,
277
+ "batch_size": batch_size,
278
+ },
279
+ headers={
280
+ "content-type": "application/json",
281
+ },
282
+ request_options=request_options,
283
+ omit=OMIT,
284
+ ) as _response:
285
+
286
+ def _stream() -> HttpResponse[typing.Iterator[LettaStreamingResponse]]:
287
+ try:
288
+ if 200 <= _response.status_code < 300:
289
+
290
+ def _iter():
291
+ _event_source = httpx_sse.EventSource(_response)
292
+ for _sse in _event_source.iter_sse():
293
+ if _sse.data == None:
294
+ return
295
+ try:
296
+ yield typing.cast(
297
+ LettaStreamingResponse,
298
+ construct_type(
299
+ type_=LettaStreamingResponse, # type: ignore
300
+ object_=json.loads(_sse.data),
301
+ ),
302
+ )
303
+ except Exception:
304
+ pass
305
+ return
306
+
307
+ return HttpResponse(response=_response, data=_iter())
308
+ _response.read()
309
+ if _response.status_code == 422:
310
+ raise UnprocessableEntityError(
311
+ headers=dict(_response.headers),
312
+ body=typing.cast(
313
+ HttpValidationError,
314
+ construct_type(
315
+ type_=HttpValidationError, # type: ignore
316
+ object_=_response.json(),
317
+ ),
318
+ ),
319
+ )
320
+ _response_json = _response.json()
321
+ except JSONDecodeError:
322
+ raise ApiError(
323
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
324
+ )
325
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
326
+
327
+ yield _stream()
328
+
227
329
 
228
330
  class AsyncRawRunsClient:
229
331
  def __init__(self, *, client_wrapper: AsyncClientWrapper):
@@ -438,3 +540,98 @@ class AsyncRawRunsClient:
438
540
  except JSONDecodeError:
439
541
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response.text)
440
542
  raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
543
+
544
+ @contextlib.asynccontextmanager
545
+ async def stream(
546
+ self,
547
+ run_id: str,
548
+ *,
549
+ starting_after: typing.Optional[int] = OMIT,
550
+ include_pings: typing.Optional[bool] = OMIT,
551
+ poll_interval: typing.Optional[float] = OMIT,
552
+ batch_size: typing.Optional[int] = OMIT,
553
+ request_options: typing.Optional[RequestOptions] = None,
554
+ ) -> typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]]:
555
+ """
556
+ Parameters
557
+ ----------
558
+ run_id : str
559
+
560
+ starting_after : typing.Optional[int]
561
+ Sequence id to use as a cursor for pagination. Response will start streaming after this chunk sequence id
562
+
563
+ include_pings : typing.Optional[bool]
564
+ Whether to include periodic keepalive ping messages in the stream to prevent connection timeouts.
565
+
566
+ poll_interval : typing.Optional[float]
567
+ Seconds to wait between polls when no new data.
568
+
569
+ batch_size : typing.Optional[int]
570
+ Number of entries to read per batch.
571
+
572
+ request_options : typing.Optional[RequestOptions]
573
+ Request-specific configuration.
574
+
575
+ Yields
576
+ ------
577
+ typing.AsyncIterator[AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]]
578
+ Successful response
579
+ """
580
+ async with self._client_wrapper.httpx_client.stream(
581
+ f"v1/runs/{jsonable_encoder(run_id)}/stream",
582
+ method="POST",
583
+ json={
584
+ "starting_after": starting_after,
585
+ "include_pings": include_pings,
586
+ "poll_interval": poll_interval,
587
+ "batch_size": batch_size,
588
+ },
589
+ headers={
590
+ "content-type": "application/json",
591
+ },
592
+ request_options=request_options,
593
+ omit=OMIT,
594
+ ) as _response:
595
+
596
+ async def _stream() -> AsyncHttpResponse[typing.AsyncIterator[LettaStreamingResponse]]:
597
+ try:
598
+ if 200 <= _response.status_code < 300:
599
+
600
+ async def _iter():
601
+ _event_source = httpx_sse.EventSource(_response)
602
+ async for _sse in _event_source.aiter_sse():
603
+ if _sse.data == None:
604
+ return
605
+ try:
606
+ yield typing.cast(
607
+ LettaStreamingResponse,
608
+ construct_type(
609
+ type_=LettaStreamingResponse, # type: ignore
610
+ object_=json.loads(_sse.data),
611
+ ),
612
+ )
613
+ except Exception:
614
+ pass
615
+ return
616
+
617
+ return AsyncHttpResponse(response=_response, data=_iter())
618
+ await _response.aread()
619
+ if _response.status_code == 422:
620
+ raise UnprocessableEntityError(
621
+ headers=dict(_response.headers),
622
+ body=typing.cast(
623
+ HttpValidationError,
624
+ construct_type(
625
+ type_=HttpValidationError, # type: ignore
626
+ object_=_response.json(),
627
+ ),
628
+ ),
629
+ )
630
+ _response_json = _response.json()
631
+ except JSONDecodeError:
632
+ raise ApiError(
633
+ status_code=_response.status_code, headers=dict(_response.headers), body=_response.text
634
+ )
635
+ raise ApiError(status_code=_response.status_code, headers=dict(_response.headers), body=_response_json)
636
+
637
+ yield await _stream()
@@ -0,0 +1,7 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ # isort: skip_file
4
+
5
+ from .letta_streaming_response import LettaStreamingResponse
6
+
7
+ __all__ = ["LettaStreamingResponse"]
@@ -0,0 +1,25 @@
1
+ # This file was auto-generated by Fern from our API Definition.
2
+
3
+ import typing
4
+
5
+ from ...types.assistant_message import AssistantMessage
6
+ from ...types.letta_ping import LettaPing
7
+ from ...types.letta_stop_reason import LettaStopReason
8
+ from ...types.letta_usage_statistics import LettaUsageStatistics
9
+ from ...types.reasoning_message import ReasoningMessage
10
+ from ...types.system_message import SystemMessage
11
+ from ...types.tool_call_message import ToolCallMessage
12
+ from ...types.tool_return_message import ToolReturnMessage
13
+ from ...types.user_message import UserMessage
14
+
15
+ LettaStreamingResponse = typing.Union[
16
+ SystemMessage,
17
+ UserMessage,
18
+ ReasoningMessage,
19
+ ToolCallMessage,
20
+ ToolReturnMessage,
21
+ AssistantMessage,
22
+ LettaPing,
23
+ LettaStopReason,
24
+ LettaUsageStatistics,
25
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-client
3
- Version: 0.1.276
3
+ Version: 0.1.277
4
4
  Summary:
5
5
  Requires-Python: >=3.8,<4.0
6
6
  Classifier: Intended Audience :: Developers
@@ -1,4 +1,4 @@
1
- letta_client/__init__.py,sha256=lqKjfPSuq_YX3zT6hJr-dhiio2gsyVjh6y3QuWSfe44,26442
1
+ letta_client/__init__.py,sha256=DpYv21zmjmXHKtq_-pCoVE63jYtsWeAqby9F2y6makk,26513
2
2
  letta_client/agents/__init__.py,sha256=yl1d02BPp-nGZLaUdH9mWcYvHu-1RhRyZUgpZQKOMGo,2010
3
3
  letta_client/agents/blocks/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
4
4
  letta_client/agents/blocks/client.py,sha256=Akx-1SYEXkmdtLtytPtdFNhVts8JkjC2aMQnnWgd8Ug,14735
@@ -90,7 +90,7 @@ letta_client/client_side_access_tokens/types/client_side_access_tokens_list_clie
90
90
  letta_client/client_side_access_tokens/types/client_side_access_tokens_list_client_side_access_tokens_response_tokens_item_policy_data_item_access_item.py,sha256=kNHfEWFl7u71Pu8NPqutod0a2NXfvq8il05Hqm0iBB4,284
91
91
  letta_client/core/__init__.py,sha256=tpn7rjb6C2UIkYZYIqdrNpI7Yax2jw88sXh2baxaxAI,1715
92
92
  letta_client/core/api_error.py,sha256=44vPoTyWN59gonCIZMdzw7M1uspygiLnr3GNFOoVL2Q,614
93
- letta_client/core/client_wrapper.py,sha256=2TcuR2LCJC4Kj7RHavzhLX1DTQaE4OMStypfv1DNG1s,2776
93
+ letta_client/core/client_wrapper.py,sha256=qM2jq77f0f7-rxfEu2Bs-StIY2KCCf7374oISiMizqw,2776
94
94
  letta_client/core/datetime_utils.py,sha256=nBys2IsYrhPdszxGKCNRPSOCwa-5DWOHG95FB8G9PKo,1047
95
95
  letta_client/core/file.py,sha256=d4NNbX8XvXP32z8KpK2Xovv33nFfruIrpz0QWxlgpZk,2663
96
96
  letta_client/core/force_multipart.py,sha256=awxh5MtcRYe74ehY8U76jzv6fYM_w_D3Rur7KQQzSDk,429
@@ -163,15 +163,17 @@ letta_client/providers/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzq
163
163
  letta_client/providers/client.py,sha256=999OcO9GFtwmgx9PxA3lF-dEOp4ZEADsWDckeIKcKI0,18717
164
164
  letta_client/providers/raw_client.py,sha256=vg3z7P7UOjtLraW6GYb2YS5q496GYoyWN1s7u127q8E,30135
165
165
  letta_client/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
166
- letta_client/runs/__init__.py,sha256=uuC70ZTE_Kcll7LqC18Orc60J5kBAfUo4ZJFB6FRgWA,164
167
- letta_client/runs/client.py,sha256=FCH2UqXqe12RORyO10mcctNiDh5T1725CmKewCM76uk,9235
166
+ letta_client/runs/__init__.py,sha256=rTMzYM1OUKbBt0EOGKEhjNDseImuXG9gUdMCjy2mEbQ,232
167
+ letta_client/runs/client.py,sha256=SOg_BxqMyHafUJnS7Gf-J5pbMsx87RZ-s7D198neiHs,13242
168
168
  letta_client/runs/messages/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
169
169
  letta_client/runs/messages/client.py,sha256=Ir9uA6W7kSpMLaOgmJeUKAokT4FgAH3l3sMucH-yqds,6998
170
170
  letta_client/runs/messages/raw_client.py,sha256=PbbkMSucuK-AmhcUyAdMtdtbnDSZGHb0nvw0vJqlb3s,8963
171
- letta_client/runs/raw_client.py,sha256=acA0F0mkLuuLxgwMCR3T4lDnZeHTCzQ8rLrreR6M9Cs,16457
171
+ letta_client/runs/raw_client.py,sha256=oQFNOL2zY46DiaM3qsBuEXm_j0vPZC2EC6NhxNLQ8N8,24658
172
172
  letta_client/runs/steps/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
173
173
  letta_client/runs/steps/client.py,sha256=4MiWOwmSiIX-0U_ic5a-x72Svjh2N7q6etXA5VJ9V30,6074
174
174
  letta_client/runs/steps/raw_client.py,sha256=dbM7QYusDn_u4UYZl0I_jK-hCOek_m525we6boGo8jA,7973
175
+ letta_client/runs/types/__init__.py,sha256=_T3fEaCnP6BEwVQKFUrBv8iIjFqUrd-DF3hQFwjSb6Y,184
176
+ letta_client/runs/types/letta_streaming_response.py,sha256=_UJOBKMBZ6S6naOyltSnQwAhDj3MFvAXh-K107dsKKU,792
175
177
  letta_client/runs/usage/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
176
178
  letta_client/runs/usage/client.py,sha256=pTs7XvHjETe7lbSWjgy1GdLzS03j9VWHrY3TULhlcVM,2919
177
179
  letta_client/runs/usage/raw_client.py,sha256=cNOsh9fT6MifL9Nh7sEpuNo6LmU0C0AfRv-K2HWPLFo,4628
@@ -564,6 +566,6 @@ letta_client/version.py,sha256=bttKLbIhO3UonCYQlqs600zzbQgfhCCMjeXR9WRzid4,79
564
566
  letta_client/voice/__init__.py,sha256=_VhToAyIt_5axN6CLJwtxg3-CO7THa_23pbUzqhXJa4,85
565
567
  letta_client/voice/client.py,sha256=EbIVOQh4HXqU9McATxwga08STk-HUwPEAUr_UHqyKHg,3748
566
568
  letta_client/voice/raw_client.py,sha256=KvM_3GXuSf51bubM0RVBnxvlf20qZTFMnaA_BzhXzjQ,5938
567
- letta_client-0.1.276.dist-info/METADATA,sha256=YEVmu_Uy9sXwyrkjbp-DvZk4ZdusBx9nG05UTA6HsEM,5781
568
- letta_client-0.1.276.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
569
- letta_client-0.1.276.dist-info/RECORD,,
569
+ letta_client-0.1.277.dist-info/METADATA,sha256=jYPmqfBlNJx64sEmeU4N7WhE20n-rwrAULTToFf_2_I,5781
570
+ letta_client-0.1.277.dist-info/WHEEL,sha256=Zb28QaM1gQi8f4VCBhsUklF61CTlNYfs9YAZn-TOGFk,88
571
+ letta_client-0.1.277.dist-info/RECORD,,