letta-client 0.1.65__py3-none-any.whl → 0.1.67__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-client might be problematic. Click here for more details.
- letta_client/__init__.py +92 -2
- letta_client/agents/__init__.py +36 -0
- letta_client/agents/messages/client.py +12 -10
- letta_client/agents/messages/types/messages_modify_response.py +7 -5
- letta_client/agents/types/__init__.py +68 -0
- letta_client/agents/types/agents_search_response_agents_item.py +2 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group.py +14 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids.py +37 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_manager_agent_id.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_manager_agent_id_item.py +5 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_manager_type.py +7 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_max_turns.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_max_turns_item.py +5 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_termination_token.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_agent_ids_termination_token_item.py +5 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item.py +10 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids.py +37 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_manager_agent_id.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_manager_agent_id_item.py +5 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_manager_type.py +7 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_max_turns.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_max_turns_item.py +5 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_termination_token.py +13 -0
- letta_client/agents/types/agents_search_response_agents_item_multi_agent_group_item_agent_ids_termination_token_item.py +5 -0
- letta_client/base_client.py +4 -0
- letta_client/core/client_wrapper.py +1 -1
- letta_client/groups/__init__.py +2 -0
- letta_client/groups/client.py +1294 -0
- letta_client/templates/__init__.py +36 -0
- letta_client/templates/types/__init__.py +72 -0
- letta_client/templates/types/templates_create_agents_response_agents_item.py +4 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group.py +16 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids.py +41 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_manager_agent_id.py +13 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_manager_agent_id_item.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_manager_type.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_max_turns.py +13 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_max_turns_item.py +5 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_termination_token.py +13 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_agent_ids_termination_token_item.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item.py +10 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids.py +41 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_manager_agent_id.py +13 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_manager_agent_id_item.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_manager_type.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_max_turns.py +13 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_max_turns_item.py +7 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_termination_token.py +15 -0
- letta_client/templates/types/templates_create_agents_response_agents_item_multi_agent_group_item_agent_ids_termination_token_item.py +7 -0
- letta_client/types/__init__.py +18 -2
- letta_client/types/agent_state.py +6 -0
- letta_client/types/assistant_message.py +14 -2
- letta_client/types/assistant_message_content.py +2 -2
- letta_client/types/{update_reasoning_message_reasoning.py → components_schemas_text_content.py} +1 -2
- letta_client/types/dynamic_manager.py +33 -0
- letta_client/types/group.py +53 -0
- letta_client/types/group_create.py +33 -0
- letta_client/types/group_create_manager_config.py +8 -0
- letta_client/types/letta_streaming_request.py +43 -0
- letta_client/types/manager_type.py +5 -0
- letta_client/types/message.py +5 -0
- letta_client/types/reasoning_message.py +2 -2
- letta_client/types/round_robin_manager.py +23 -0
- letta_client/types/supervisor_manager.py +23 -0
- letta_client/types/system_message.py +7 -4
- letta_client/types/system_message_content.py +2 -2
- letta_client/types/tool_call_message.py +2 -2
- letta_client/types/tool_return_message.py +3 -3
- letta_client/types/update_assistant_message.py +7 -4
- letta_client/types/update_assistant_message_content.py +2 -2
- letta_client/types/update_reasoning_message.py +2 -3
- letta_client/types/update_system_message.py +7 -4
- letta_client/types/update_system_message_content.py +2 -2
- letta_client/types/update_user_message.py +7 -4
- letta_client/types/update_user_message_content.py +2 -2
- letta_client/types/user_message.py +7 -4
- letta_client/types/user_message_content.py +2 -2
- {letta_client-0.1.65.dist-info → letta_client-0.1.67.dist-info}/METADATA +1 -1
- {letta_client-0.1.65.dist-info → letta_client-0.1.67.dist-info}/RECORD +80 -34
- {letta_client-0.1.65.dist-info → letta_client-0.1.67.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,1294 @@
|
|
|
1
|
+
# This file was auto-generated by Fern from our API Definition.
|
|
2
|
+
|
|
3
|
+
import typing
|
|
4
|
+
from ..core.client_wrapper import SyncClientWrapper
|
|
5
|
+
from ..types.manager_type import ManagerType
|
|
6
|
+
from ..core.request_options import RequestOptions
|
|
7
|
+
from ..types.group import Group
|
|
8
|
+
from ..core.unchecked_base_model import construct_type
|
|
9
|
+
from ..errors.unprocessable_entity_error import UnprocessableEntityError
|
|
10
|
+
from ..types.http_validation_error import HttpValidationError
|
|
11
|
+
from json.decoder import JSONDecodeError
|
|
12
|
+
from ..core.api_error import ApiError
|
|
13
|
+
from ..types.group_create_manager_config import GroupCreateManagerConfig
|
|
14
|
+
from ..core.serialization import convert_and_respect_annotation_metadata
|
|
15
|
+
from ..core.jsonable_encoder import jsonable_encoder
|
|
16
|
+
from ..types.letta_message_union import LettaMessageUnion
|
|
17
|
+
from ..types.message_create import MessageCreate
|
|
18
|
+
from ..types.letta_response import LettaResponse
|
|
19
|
+
from ..core.client_wrapper import AsyncClientWrapper
|
|
20
|
+
|
|
21
|
+
# this is used as the default value for optional parameters
|
|
22
|
+
OMIT = typing.cast(typing.Any, ...)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class GroupsClient:
|
|
26
|
+
def __init__(self, *, client_wrapper: SyncClientWrapper):
|
|
27
|
+
self._client_wrapper = client_wrapper
|
|
28
|
+
|
|
29
|
+
def list_groups(
|
|
30
|
+
self,
|
|
31
|
+
*,
|
|
32
|
+
manager_type: typing.Optional[ManagerType] = None,
|
|
33
|
+
before: typing.Optional[str] = None,
|
|
34
|
+
after: typing.Optional[str] = None,
|
|
35
|
+
limit: typing.Optional[int] = None,
|
|
36
|
+
project_id: typing.Optional[str] = None,
|
|
37
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
38
|
+
) -> typing.List[Group]:
|
|
39
|
+
"""
|
|
40
|
+
Fetch all multi-agent groups matching query.
|
|
41
|
+
|
|
42
|
+
Parameters
|
|
43
|
+
----------
|
|
44
|
+
manager_type : typing.Optional[ManagerType]
|
|
45
|
+
Search groups by manager type
|
|
46
|
+
|
|
47
|
+
before : typing.Optional[str]
|
|
48
|
+
Cursor for pagination
|
|
49
|
+
|
|
50
|
+
after : typing.Optional[str]
|
|
51
|
+
Cursor for pagination
|
|
52
|
+
|
|
53
|
+
limit : typing.Optional[int]
|
|
54
|
+
Limit for pagination
|
|
55
|
+
|
|
56
|
+
project_id : typing.Optional[str]
|
|
57
|
+
Search groups by project id
|
|
58
|
+
|
|
59
|
+
request_options : typing.Optional[RequestOptions]
|
|
60
|
+
Request-specific configuration.
|
|
61
|
+
|
|
62
|
+
Returns
|
|
63
|
+
-------
|
|
64
|
+
typing.List[Group]
|
|
65
|
+
Successful Response
|
|
66
|
+
|
|
67
|
+
Examples
|
|
68
|
+
--------
|
|
69
|
+
from letta_client import Letta
|
|
70
|
+
|
|
71
|
+
client = Letta(
|
|
72
|
+
token="YOUR_TOKEN",
|
|
73
|
+
)
|
|
74
|
+
client.groups.list_groups()
|
|
75
|
+
"""
|
|
76
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
77
|
+
"v1/groups/",
|
|
78
|
+
method="GET",
|
|
79
|
+
params={
|
|
80
|
+
"manager_type": manager_type,
|
|
81
|
+
"before": before,
|
|
82
|
+
"after": after,
|
|
83
|
+
"limit": limit,
|
|
84
|
+
"project_id": project_id,
|
|
85
|
+
},
|
|
86
|
+
request_options=request_options,
|
|
87
|
+
)
|
|
88
|
+
try:
|
|
89
|
+
if 200 <= _response.status_code < 300:
|
|
90
|
+
return typing.cast(
|
|
91
|
+
typing.List[Group],
|
|
92
|
+
construct_type(
|
|
93
|
+
type_=typing.List[Group], # type: ignore
|
|
94
|
+
object_=_response.json(),
|
|
95
|
+
),
|
|
96
|
+
)
|
|
97
|
+
if _response.status_code == 422:
|
|
98
|
+
raise UnprocessableEntityError(
|
|
99
|
+
typing.cast(
|
|
100
|
+
HttpValidationError,
|
|
101
|
+
construct_type(
|
|
102
|
+
type_=HttpValidationError, # type: ignore
|
|
103
|
+
object_=_response.json(),
|
|
104
|
+
),
|
|
105
|
+
)
|
|
106
|
+
)
|
|
107
|
+
_response_json = _response.json()
|
|
108
|
+
except JSONDecodeError:
|
|
109
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
110
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
111
|
+
|
|
112
|
+
def create_group(
|
|
113
|
+
self,
|
|
114
|
+
*,
|
|
115
|
+
agent_ids: typing.Sequence[str],
|
|
116
|
+
description: str,
|
|
117
|
+
project: typing.Optional[str] = None,
|
|
118
|
+
manager_config: typing.Optional[GroupCreateManagerConfig] = OMIT,
|
|
119
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
120
|
+
) -> Group:
|
|
121
|
+
"""
|
|
122
|
+
Create a new multi-agent group with the specified configuration.
|
|
123
|
+
|
|
124
|
+
Parameters
|
|
125
|
+
----------
|
|
126
|
+
agent_ids : typing.Sequence[str]
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
description : str
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
project : typing.Optional[str]
|
|
133
|
+
|
|
134
|
+
manager_config : typing.Optional[GroupCreateManagerConfig]
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
request_options : typing.Optional[RequestOptions]
|
|
138
|
+
Request-specific configuration.
|
|
139
|
+
|
|
140
|
+
Returns
|
|
141
|
+
-------
|
|
142
|
+
Group
|
|
143
|
+
Successful Response
|
|
144
|
+
|
|
145
|
+
Examples
|
|
146
|
+
--------
|
|
147
|
+
from letta_client import Letta
|
|
148
|
+
|
|
149
|
+
client = Letta(
|
|
150
|
+
token="YOUR_TOKEN",
|
|
151
|
+
)
|
|
152
|
+
client.groups.create_group(
|
|
153
|
+
agent_ids=["agent_ids"],
|
|
154
|
+
description="description",
|
|
155
|
+
)
|
|
156
|
+
"""
|
|
157
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
158
|
+
"v1/groups/",
|
|
159
|
+
method="POST",
|
|
160
|
+
json={
|
|
161
|
+
"agent_ids": agent_ids,
|
|
162
|
+
"description": description,
|
|
163
|
+
"manager_config": convert_and_respect_annotation_metadata(
|
|
164
|
+
object_=manager_config, annotation=GroupCreateManagerConfig, direction="write"
|
|
165
|
+
),
|
|
166
|
+
},
|
|
167
|
+
headers={
|
|
168
|
+
"X-Project": str(project) if project is not None else None,
|
|
169
|
+
},
|
|
170
|
+
request_options=request_options,
|
|
171
|
+
omit=OMIT,
|
|
172
|
+
)
|
|
173
|
+
try:
|
|
174
|
+
if 200 <= _response.status_code < 300:
|
|
175
|
+
return typing.cast(
|
|
176
|
+
Group,
|
|
177
|
+
construct_type(
|
|
178
|
+
type_=Group, # type: ignore
|
|
179
|
+
object_=_response.json(),
|
|
180
|
+
),
|
|
181
|
+
)
|
|
182
|
+
if _response.status_code == 422:
|
|
183
|
+
raise UnprocessableEntityError(
|
|
184
|
+
typing.cast(
|
|
185
|
+
HttpValidationError,
|
|
186
|
+
construct_type(
|
|
187
|
+
type_=HttpValidationError, # type: ignore
|
|
188
|
+
object_=_response.json(),
|
|
189
|
+
),
|
|
190
|
+
)
|
|
191
|
+
)
|
|
192
|
+
_response_json = _response.json()
|
|
193
|
+
except JSONDecodeError:
|
|
194
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
195
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
196
|
+
|
|
197
|
+
def upsert_group(
|
|
198
|
+
self,
|
|
199
|
+
*,
|
|
200
|
+
agent_ids: typing.Sequence[str],
|
|
201
|
+
description: str,
|
|
202
|
+
project: typing.Optional[str] = None,
|
|
203
|
+
manager_config: typing.Optional[GroupCreateManagerConfig] = OMIT,
|
|
204
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
205
|
+
) -> Group:
|
|
206
|
+
"""
|
|
207
|
+
Create a new multi-agent group with the specified configuration.
|
|
208
|
+
|
|
209
|
+
Parameters
|
|
210
|
+
----------
|
|
211
|
+
agent_ids : typing.Sequence[str]
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
description : str
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
project : typing.Optional[str]
|
|
218
|
+
|
|
219
|
+
manager_config : typing.Optional[GroupCreateManagerConfig]
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
request_options : typing.Optional[RequestOptions]
|
|
223
|
+
Request-specific configuration.
|
|
224
|
+
|
|
225
|
+
Returns
|
|
226
|
+
-------
|
|
227
|
+
Group
|
|
228
|
+
Successful Response
|
|
229
|
+
|
|
230
|
+
Examples
|
|
231
|
+
--------
|
|
232
|
+
from letta_client import Letta
|
|
233
|
+
|
|
234
|
+
client = Letta(
|
|
235
|
+
token="YOUR_TOKEN",
|
|
236
|
+
)
|
|
237
|
+
client.groups.upsert_group(
|
|
238
|
+
agent_ids=["agent_ids"],
|
|
239
|
+
description="description",
|
|
240
|
+
)
|
|
241
|
+
"""
|
|
242
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
243
|
+
"v1/groups/",
|
|
244
|
+
method="PUT",
|
|
245
|
+
json={
|
|
246
|
+
"agent_ids": agent_ids,
|
|
247
|
+
"description": description,
|
|
248
|
+
"manager_config": convert_and_respect_annotation_metadata(
|
|
249
|
+
object_=manager_config, annotation=GroupCreateManagerConfig, direction="write"
|
|
250
|
+
),
|
|
251
|
+
},
|
|
252
|
+
headers={
|
|
253
|
+
"X-Project": str(project) if project is not None else None,
|
|
254
|
+
},
|
|
255
|
+
request_options=request_options,
|
|
256
|
+
omit=OMIT,
|
|
257
|
+
)
|
|
258
|
+
try:
|
|
259
|
+
if 200 <= _response.status_code < 300:
|
|
260
|
+
return typing.cast(
|
|
261
|
+
Group,
|
|
262
|
+
construct_type(
|
|
263
|
+
type_=Group, # type: ignore
|
|
264
|
+
object_=_response.json(),
|
|
265
|
+
),
|
|
266
|
+
)
|
|
267
|
+
if _response.status_code == 422:
|
|
268
|
+
raise UnprocessableEntityError(
|
|
269
|
+
typing.cast(
|
|
270
|
+
HttpValidationError,
|
|
271
|
+
construct_type(
|
|
272
|
+
type_=HttpValidationError, # type: ignore
|
|
273
|
+
object_=_response.json(),
|
|
274
|
+
),
|
|
275
|
+
)
|
|
276
|
+
)
|
|
277
|
+
_response_json = _response.json()
|
|
278
|
+
except JSONDecodeError:
|
|
279
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
280
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
281
|
+
|
|
282
|
+
def delete_group(
|
|
283
|
+
self, group_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
284
|
+
) -> typing.Optional[typing.Any]:
|
|
285
|
+
"""
|
|
286
|
+
Delete a multi-agent group.
|
|
287
|
+
|
|
288
|
+
Parameters
|
|
289
|
+
----------
|
|
290
|
+
group_id : str
|
|
291
|
+
|
|
292
|
+
request_options : typing.Optional[RequestOptions]
|
|
293
|
+
Request-specific configuration.
|
|
294
|
+
|
|
295
|
+
Returns
|
|
296
|
+
-------
|
|
297
|
+
typing.Optional[typing.Any]
|
|
298
|
+
Successful Response
|
|
299
|
+
|
|
300
|
+
Examples
|
|
301
|
+
--------
|
|
302
|
+
from letta_client import Letta
|
|
303
|
+
|
|
304
|
+
client = Letta(
|
|
305
|
+
token="YOUR_TOKEN",
|
|
306
|
+
)
|
|
307
|
+
client.groups.delete_group(
|
|
308
|
+
group_id="group_id",
|
|
309
|
+
)
|
|
310
|
+
"""
|
|
311
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
312
|
+
f"v1/groups/{jsonable_encoder(group_id)}",
|
|
313
|
+
method="DELETE",
|
|
314
|
+
request_options=request_options,
|
|
315
|
+
)
|
|
316
|
+
try:
|
|
317
|
+
if 200 <= _response.status_code < 300:
|
|
318
|
+
return typing.cast(
|
|
319
|
+
typing.Optional[typing.Any],
|
|
320
|
+
construct_type(
|
|
321
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
322
|
+
object_=_response.json(),
|
|
323
|
+
),
|
|
324
|
+
)
|
|
325
|
+
if _response.status_code == 422:
|
|
326
|
+
raise UnprocessableEntityError(
|
|
327
|
+
typing.cast(
|
|
328
|
+
HttpValidationError,
|
|
329
|
+
construct_type(
|
|
330
|
+
type_=HttpValidationError, # type: ignore
|
|
331
|
+
object_=_response.json(),
|
|
332
|
+
),
|
|
333
|
+
)
|
|
334
|
+
)
|
|
335
|
+
_response_json = _response.json()
|
|
336
|
+
except JSONDecodeError:
|
|
337
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
338
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
339
|
+
|
|
340
|
+
def list_group_messages(
|
|
341
|
+
self,
|
|
342
|
+
group_id: str,
|
|
343
|
+
*,
|
|
344
|
+
after: typing.Optional[str] = None,
|
|
345
|
+
before: typing.Optional[str] = None,
|
|
346
|
+
limit: typing.Optional[int] = None,
|
|
347
|
+
use_assistant_message: typing.Optional[bool] = None,
|
|
348
|
+
assistant_message_tool_name: typing.Optional[str] = None,
|
|
349
|
+
assistant_message_tool_kwarg: typing.Optional[str] = None,
|
|
350
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
351
|
+
) -> typing.List[LettaMessageUnion]:
|
|
352
|
+
"""
|
|
353
|
+
Retrieve message history for an agent.
|
|
354
|
+
|
|
355
|
+
Parameters
|
|
356
|
+
----------
|
|
357
|
+
group_id : str
|
|
358
|
+
|
|
359
|
+
after : typing.Optional[str]
|
|
360
|
+
Message after which to retrieve the returned messages.
|
|
361
|
+
|
|
362
|
+
before : typing.Optional[str]
|
|
363
|
+
Message before which to retrieve the returned messages.
|
|
364
|
+
|
|
365
|
+
limit : typing.Optional[int]
|
|
366
|
+
Maximum number of messages to retrieve.
|
|
367
|
+
|
|
368
|
+
use_assistant_message : typing.Optional[bool]
|
|
369
|
+
Whether to use assistant messages
|
|
370
|
+
|
|
371
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
372
|
+
The name of the designated message tool.
|
|
373
|
+
|
|
374
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
375
|
+
The name of the message argument.
|
|
376
|
+
|
|
377
|
+
request_options : typing.Optional[RequestOptions]
|
|
378
|
+
Request-specific configuration.
|
|
379
|
+
|
|
380
|
+
Returns
|
|
381
|
+
-------
|
|
382
|
+
typing.List[LettaMessageUnion]
|
|
383
|
+
Successful Response
|
|
384
|
+
|
|
385
|
+
Examples
|
|
386
|
+
--------
|
|
387
|
+
from letta_client import Letta
|
|
388
|
+
|
|
389
|
+
client = Letta(
|
|
390
|
+
token="YOUR_TOKEN",
|
|
391
|
+
)
|
|
392
|
+
client.groups.list_group_messages(
|
|
393
|
+
group_id="group_id",
|
|
394
|
+
)
|
|
395
|
+
"""
|
|
396
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
397
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages",
|
|
398
|
+
method="GET",
|
|
399
|
+
params={
|
|
400
|
+
"after": after,
|
|
401
|
+
"before": before,
|
|
402
|
+
"limit": limit,
|
|
403
|
+
"use_assistant_message": use_assistant_message,
|
|
404
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
405
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
406
|
+
},
|
|
407
|
+
request_options=request_options,
|
|
408
|
+
)
|
|
409
|
+
try:
|
|
410
|
+
if 200 <= _response.status_code < 300:
|
|
411
|
+
return typing.cast(
|
|
412
|
+
typing.List[LettaMessageUnion],
|
|
413
|
+
construct_type(
|
|
414
|
+
type_=typing.List[LettaMessageUnion], # type: ignore
|
|
415
|
+
object_=_response.json(),
|
|
416
|
+
),
|
|
417
|
+
)
|
|
418
|
+
if _response.status_code == 422:
|
|
419
|
+
raise UnprocessableEntityError(
|
|
420
|
+
typing.cast(
|
|
421
|
+
HttpValidationError,
|
|
422
|
+
construct_type(
|
|
423
|
+
type_=HttpValidationError, # type: ignore
|
|
424
|
+
object_=_response.json(),
|
|
425
|
+
),
|
|
426
|
+
)
|
|
427
|
+
)
|
|
428
|
+
_response_json = _response.json()
|
|
429
|
+
except JSONDecodeError:
|
|
430
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
431
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
432
|
+
|
|
433
|
+
def send_group_message(
|
|
434
|
+
self,
|
|
435
|
+
group_id: str,
|
|
436
|
+
*,
|
|
437
|
+
agent_id: str,
|
|
438
|
+
messages: typing.Sequence[MessageCreate],
|
|
439
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
440
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
441
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
442
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
443
|
+
) -> LettaResponse:
|
|
444
|
+
"""
|
|
445
|
+
Process a user message and return the group's response.
|
|
446
|
+
This endpoint accepts a message from a user and processes it through through agents in the group based on the specified pattern
|
|
447
|
+
|
|
448
|
+
Parameters
|
|
449
|
+
----------
|
|
450
|
+
group_id : str
|
|
451
|
+
|
|
452
|
+
agent_id : str
|
|
453
|
+
|
|
454
|
+
messages : typing.Sequence[MessageCreate]
|
|
455
|
+
The messages to be sent to the agent.
|
|
456
|
+
|
|
457
|
+
use_assistant_message : typing.Optional[bool]
|
|
458
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
459
|
+
|
|
460
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
461
|
+
The name of the designated message tool.
|
|
462
|
+
|
|
463
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
464
|
+
The name of the message argument in the designated message tool.
|
|
465
|
+
|
|
466
|
+
request_options : typing.Optional[RequestOptions]
|
|
467
|
+
Request-specific configuration.
|
|
468
|
+
|
|
469
|
+
Returns
|
|
470
|
+
-------
|
|
471
|
+
LettaResponse
|
|
472
|
+
Successful Response
|
|
473
|
+
|
|
474
|
+
Examples
|
|
475
|
+
--------
|
|
476
|
+
from letta_client import Letta, MessageCreate
|
|
477
|
+
|
|
478
|
+
client = Letta(
|
|
479
|
+
token="YOUR_TOKEN",
|
|
480
|
+
)
|
|
481
|
+
client.groups.send_group_message(
|
|
482
|
+
group_id="group_id",
|
|
483
|
+
agent_id="agent_id",
|
|
484
|
+
messages=[
|
|
485
|
+
MessageCreate(
|
|
486
|
+
role="user",
|
|
487
|
+
content="content",
|
|
488
|
+
)
|
|
489
|
+
],
|
|
490
|
+
)
|
|
491
|
+
"""
|
|
492
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
493
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages",
|
|
494
|
+
method="POST",
|
|
495
|
+
params={
|
|
496
|
+
"agent_id": agent_id,
|
|
497
|
+
},
|
|
498
|
+
json={
|
|
499
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
500
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
501
|
+
),
|
|
502
|
+
"use_assistant_message": use_assistant_message,
|
|
503
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
504
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
505
|
+
},
|
|
506
|
+
request_options=request_options,
|
|
507
|
+
omit=OMIT,
|
|
508
|
+
)
|
|
509
|
+
try:
|
|
510
|
+
if 200 <= _response.status_code < 300:
|
|
511
|
+
return typing.cast(
|
|
512
|
+
LettaResponse,
|
|
513
|
+
construct_type(
|
|
514
|
+
type_=LettaResponse, # type: ignore
|
|
515
|
+
object_=_response.json(),
|
|
516
|
+
),
|
|
517
|
+
)
|
|
518
|
+
if _response.status_code == 422:
|
|
519
|
+
raise UnprocessableEntityError(
|
|
520
|
+
typing.cast(
|
|
521
|
+
HttpValidationError,
|
|
522
|
+
construct_type(
|
|
523
|
+
type_=HttpValidationError, # type: ignore
|
|
524
|
+
object_=_response.json(),
|
|
525
|
+
),
|
|
526
|
+
)
|
|
527
|
+
)
|
|
528
|
+
_response_json = _response.json()
|
|
529
|
+
except JSONDecodeError:
|
|
530
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
531
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
532
|
+
|
|
533
|
+
def send_group_message_streaming(
|
|
534
|
+
self,
|
|
535
|
+
group_id: str,
|
|
536
|
+
*,
|
|
537
|
+
messages: typing.Sequence[MessageCreate],
|
|
538
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
539
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
540
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
541
|
+
stream_tokens: typing.Optional[bool] = OMIT,
|
|
542
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
543
|
+
) -> typing.Optional[typing.Any]:
|
|
544
|
+
"""
|
|
545
|
+
Process a user message and return the group's responses.
|
|
546
|
+
This endpoint accepts a message from a user and processes it through agents in the group based on the specified pattern.
|
|
547
|
+
It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
|
|
548
|
+
|
|
549
|
+
Parameters
|
|
550
|
+
----------
|
|
551
|
+
group_id : str
|
|
552
|
+
|
|
553
|
+
messages : typing.Sequence[MessageCreate]
|
|
554
|
+
The messages to be sent to the agent.
|
|
555
|
+
|
|
556
|
+
use_assistant_message : typing.Optional[bool]
|
|
557
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
558
|
+
|
|
559
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
560
|
+
The name of the designated message tool.
|
|
561
|
+
|
|
562
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
563
|
+
The name of the message argument in the designated message tool.
|
|
564
|
+
|
|
565
|
+
stream_tokens : typing.Optional[bool]
|
|
566
|
+
Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
|
|
567
|
+
|
|
568
|
+
request_options : typing.Optional[RequestOptions]
|
|
569
|
+
Request-specific configuration.
|
|
570
|
+
|
|
571
|
+
Returns
|
|
572
|
+
-------
|
|
573
|
+
typing.Optional[typing.Any]
|
|
574
|
+
Successful response
|
|
575
|
+
|
|
576
|
+
Examples
|
|
577
|
+
--------
|
|
578
|
+
from letta_client import Letta, MessageCreate
|
|
579
|
+
|
|
580
|
+
client = Letta(
|
|
581
|
+
token="YOUR_TOKEN",
|
|
582
|
+
)
|
|
583
|
+
client.groups.send_group_message_streaming(
|
|
584
|
+
group_id="group_id",
|
|
585
|
+
messages=[
|
|
586
|
+
MessageCreate(
|
|
587
|
+
role="user",
|
|
588
|
+
content="content",
|
|
589
|
+
)
|
|
590
|
+
],
|
|
591
|
+
)
|
|
592
|
+
"""
|
|
593
|
+
_response = self._client_wrapper.httpx_client.request(
|
|
594
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages/stream",
|
|
595
|
+
method="POST",
|
|
596
|
+
json={
|
|
597
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
598
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
599
|
+
),
|
|
600
|
+
"use_assistant_message": use_assistant_message,
|
|
601
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
602
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
603
|
+
"stream_tokens": stream_tokens,
|
|
604
|
+
},
|
|
605
|
+
request_options=request_options,
|
|
606
|
+
omit=OMIT,
|
|
607
|
+
)
|
|
608
|
+
try:
|
|
609
|
+
if 200 <= _response.status_code < 300:
|
|
610
|
+
return typing.cast(
|
|
611
|
+
typing.Optional[typing.Any],
|
|
612
|
+
construct_type(
|
|
613
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
614
|
+
object_=_response.json(),
|
|
615
|
+
),
|
|
616
|
+
)
|
|
617
|
+
if _response.status_code == 422:
|
|
618
|
+
raise UnprocessableEntityError(
|
|
619
|
+
typing.cast(
|
|
620
|
+
HttpValidationError,
|
|
621
|
+
construct_type(
|
|
622
|
+
type_=HttpValidationError, # type: ignore
|
|
623
|
+
object_=_response.json(),
|
|
624
|
+
),
|
|
625
|
+
)
|
|
626
|
+
)
|
|
627
|
+
_response_json = _response.json()
|
|
628
|
+
except JSONDecodeError:
|
|
629
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
630
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
631
|
+
|
|
632
|
+
|
|
633
|
+
class AsyncGroupsClient:
|
|
634
|
+
def __init__(self, *, client_wrapper: AsyncClientWrapper):
|
|
635
|
+
self._client_wrapper = client_wrapper
|
|
636
|
+
|
|
637
|
+
async def list_groups(
|
|
638
|
+
self,
|
|
639
|
+
*,
|
|
640
|
+
manager_type: typing.Optional[ManagerType] = None,
|
|
641
|
+
before: typing.Optional[str] = None,
|
|
642
|
+
after: typing.Optional[str] = None,
|
|
643
|
+
limit: typing.Optional[int] = None,
|
|
644
|
+
project_id: typing.Optional[str] = None,
|
|
645
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
646
|
+
) -> typing.List[Group]:
|
|
647
|
+
"""
|
|
648
|
+
Fetch all multi-agent groups matching query.
|
|
649
|
+
|
|
650
|
+
Parameters
|
|
651
|
+
----------
|
|
652
|
+
manager_type : typing.Optional[ManagerType]
|
|
653
|
+
Search groups by manager type
|
|
654
|
+
|
|
655
|
+
before : typing.Optional[str]
|
|
656
|
+
Cursor for pagination
|
|
657
|
+
|
|
658
|
+
after : typing.Optional[str]
|
|
659
|
+
Cursor for pagination
|
|
660
|
+
|
|
661
|
+
limit : typing.Optional[int]
|
|
662
|
+
Limit for pagination
|
|
663
|
+
|
|
664
|
+
project_id : typing.Optional[str]
|
|
665
|
+
Search groups by project id
|
|
666
|
+
|
|
667
|
+
request_options : typing.Optional[RequestOptions]
|
|
668
|
+
Request-specific configuration.
|
|
669
|
+
|
|
670
|
+
Returns
|
|
671
|
+
-------
|
|
672
|
+
typing.List[Group]
|
|
673
|
+
Successful Response
|
|
674
|
+
|
|
675
|
+
Examples
|
|
676
|
+
--------
|
|
677
|
+
import asyncio
|
|
678
|
+
|
|
679
|
+
from letta_client import AsyncLetta
|
|
680
|
+
|
|
681
|
+
client = AsyncLetta(
|
|
682
|
+
token="YOUR_TOKEN",
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
|
|
686
|
+
async def main() -> None:
|
|
687
|
+
await client.groups.list_groups()
|
|
688
|
+
|
|
689
|
+
|
|
690
|
+
asyncio.run(main())
|
|
691
|
+
"""
|
|
692
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
693
|
+
"v1/groups/",
|
|
694
|
+
method="GET",
|
|
695
|
+
params={
|
|
696
|
+
"manager_type": manager_type,
|
|
697
|
+
"before": before,
|
|
698
|
+
"after": after,
|
|
699
|
+
"limit": limit,
|
|
700
|
+
"project_id": project_id,
|
|
701
|
+
},
|
|
702
|
+
request_options=request_options,
|
|
703
|
+
)
|
|
704
|
+
try:
|
|
705
|
+
if 200 <= _response.status_code < 300:
|
|
706
|
+
return typing.cast(
|
|
707
|
+
typing.List[Group],
|
|
708
|
+
construct_type(
|
|
709
|
+
type_=typing.List[Group], # type: ignore
|
|
710
|
+
object_=_response.json(),
|
|
711
|
+
),
|
|
712
|
+
)
|
|
713
|
+
if _response.status_code == 422:
|
|
714
|
+
raise UnprocessableEntityError(
|
|
715
|
+
typing.cast(
|
|
716
|
+
HttpValidationError,
|
|
717
|
+
construct_type(
|
|
718
|
+
type_=HttpValidationError, # type: ignore
|
|
719
|
+
object_=_response.json(),
|
|
720
|
+
),
|
|
721
|
+
)
|
|
722
|
+
)
|
|
723
|
+
_response_json = _response.json()
|
|
724
|
+
except JSONDecodeError:
|
|
725
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
726
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
727
|
+
|
|
728
|
+
async def create_group(
|
|
729
|
+
self,
|
|
730
|
+
*,
|
|
731
|
+
agent_ids: typing.Sequence[str],
|
|
732
|
+
description: str,
|
|
733
|
+
project: typing.Optional[str] = None,
|
|
734
|
+
manager_config: typing.Optional[GroupCreateManagerConfig] = OMIT,
|
|
735
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
736
|
+
) -> Group:
|
|
737
|
+
"""
|
|
738
|
+
Create a new multi-agent group with the specified configuration.
|
|
739
|
+
|
|
740
|
+
Parameters
|
|
741
|
+
----------
|
|
742
|
+
agent_ids : typing.Sequence[str]
|
|
743
|
+
|
|
744
|
+
|
|
745
|
+
description : str
|
|
746
|
+
|
|
747
|
+
|
|
748
|
+
project : typing.Optional[str]
|
|
749
|
+
|
|
750
|
+
manager_config : typing.Optional[GroupCreateManagerConfig]
|
|
751
|
+
|
|
752
|
+
|
|
753
|
+
request_options : typing.Optional[RequestOptions]
|
|
754
|
+
Request-specific configuration.
|
|
755
|
+
|
|
756
|
+
Returns
|
|
757
|
+
-------
|
|
758
|
+
Group
|
|
759
|
+
Successful Response
|
|
760
|
+
|
|
761
|
+
Examples
|
|
762
|
+
--------
|
|
763
|
+
import asyncio
|
|
764
|
+
|
|
765
|
+
from letta_client import AsyncLetta
|
|
766
|
+
|
|
767
|
+
client = AsyncLetta(
|
|
768
|
+
token="YOUR_TOKEN",
|
|
769
|
+
)
|
|
770
|
+
|
|
771
|
+
|
|
772
|
+
async def main() -> None:
|
|
773
|
+
await client.groups.create_group(
|
|
774
|
+
agent_ids=["agent_ids"],
|
|
775
|
+
description="description",
|
|
776
|
+
)
|
|
777
|
+
|
|
778
|
+
|
|
779
|
+
asyncio.run(main())
|
|
780
|
+
"""
|
|
781
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
782
|
+
"v1/groups/",
|
|
783
|
+
method="POST",
|
|
784
|
+
json={
|
|
785
|
+
"agent_ids": agent_ids,
|
|
786
|
+
"description": description,
|
|
787
|
+
"manager_config": convert_and_respect_annotation_metadata(
|
|
788
|
+
object_=manager_config, annotation=GroupCreateManagerConfig, direction="write"
|
|
789
|
+
),
|
|
790
|
+
},
|
|
791
|
+
headers={
|
|
792
|
+
"X-Project": str(project) if project is not None else None,
|
|
793
|
+
},
|
|
794
|
+
request_options=request_options,
|
|
795
|
+
omit=OMIT,
|
|
796
|
+
)
|
|
797
|
+
try:
|
|
798
|
+
if 200 <= _response.status_code < 300:
|
|
799
|
+
return typing.cast(
|
|
800
|
+
Group,
|
|
801
|
+
construct_type(
|
|
802
|
+
type_=Group, # type: ignore
|
|
803
|
+
object_=_response.json(),
|
|
804
|
+
),
|
|
805
|
+
)
|
|
806
|
+
if _response.status_code == 422:
|
|
807
|
+
raise UnprocessableEntityError(
|
|
808
|
+
typing.cast(
|
|
809
|
+
HttpValidationError,
|
|
810
|
+
construct_type(
|
|
811
|
+
type_=HttpValidationError, # type: ignore
|
|
812
|
+
object_=_response.json(),
|
|
813
|
+
),
|
|
814
|
+
)
|
|
815
|
+
)
|
|
816
|
+
_response_json = _response.json()
|
|
817
|
+
except JSONDecodeError:
|
|
818
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
819
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
820
|
+
|
|
821
|
+
async def upsert_group(
|
|
822
|
+
self,
|
|
823
|
+
*,
|
|
824
|
+
agent_ids: typing.Sequence[str],
|
|
825
|
+
description: str,
|
|
826
|
+
project: typing.Optional[str] = None,
|
|
827
|
+
manager_config: typing.Optional[GroupCreateManagerConfig] = OMIT,
|
|
828
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
829
|
+
) -> Group:
|
|
830
|
+
"""
|
|
831
|
+
Create a new multi-agent group with the specified configuration.
|
|
832
|
+
|
|
833
|
+
Parameters
|
|
834
|
+
----------
|
|
835
|
+
agent_ids : typing.Sequence[str]
|
|
836
|
+
|
|
837
|
+
|
|
838
|
+
description : str
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
project : typing.Optional[str]
|
|
842
|
+
|
|
843
|
+
manager_config : typing.Optional[GroupCreateManagerConfig]
|
|
844
|
+
|
|
845
|
+
|
|
846
|
+
request_options : typing.Optional[RequestOptions]
|
|
847
|
+
Request-specific configuration.
|
|
848
|
+
|
|
849
|
+
Returns
|
|
850
|
+
-------
|
|
851
|
+
Group
|
|
852
|
+
Successful Response
|
|
853
|
+
|
|
854
|
+
Examples
|
|
855
|
+
--------
|
|
856
|
+
import asyncio
|
|
857
|
+
|
|
858
|
+
from letta_client import AsyncLetta
|
|
859
|
+
|
|
860
|
+
client = AsyncLetta(
|
|
861
|
+
token="YOUR_TOKEN",
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
|
|
865
|
+
async def main() -> None:
|
|
866
|
+
await client.groups.upsert_group(
|
|
867
|
+
agent_ids=["agent_ids"],
|
|
868
|
+
description="description",
|
|
869
|
+
)
|
|
870
|
+
|
|
871
|
+
|
|
872
|
+
asyncio.run(main())
|
|
873
|
+
"""
|
|
874
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
875
|
+
"v1/groups/",
|
|
876
|
+
method="PUT",
|
|
877
|
+
json={
|
|
878
|
+
"agent_ids": agent_ids,
|
|
879
|
+
"description": description,
|
|
880
|
+
"manager_config": convert_and_respect_annotation_metadata(
|
|
881
|
+
object_=manager_config, annotation=GroupCreateManagerConfig, direction="write"
|
|
882
|
+
),
|
|
883
|
+
},
|
|
884
|
+
headers={
|
|
885
|
+
"X-Project": str(project) if project is not None else None,
|
|
886
|
+
},
|
|
887
|
+
request_options=request_options,
|
|
888
|
+
omit=OMIT,
|
|
889
|
+
)
|
|
890
|
+
try:
|
|
891
|
+
if 200 <= _response.status_code < 300:
|
|
892
|
+
return typing.cast(
|
|
893
|
+
Group,
|
|
894
|
+
construct_type(
|
|
895
|
+
type_=Group, # type: ignore
|
|
896
|
+
object_=_response.json(),
|
|
897
|
+
),
|
|
898
|
+
)
|
|
899
|
+
if _response.status_code == 422:
|
|
900
|
+
raise UnprocessableEntityError(
|
|
901
|
+
typing.cast(
|
|
902
|
+
HttpValidationError,
|
|
903
|
+
construct_type(
|
|
904
|
+
type_=HttpValidationError, # type: ignore
|
|
905
|
+
object_=_response.json(),
|
|
906
|
+
),
|
|
907
|
+
)
|
|
908
|
+
)
|
|
909
|
+
_response_json = _response.json()
|
|
910
|
+
except JSONDecodeError:
|
|
911
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
912
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
913
|
+
|
|
914
|
+
async def delete_group(
|
|
915
|
+
self, group_id: str, *, request_options: typing.Optional[RequestOptions] = None
|
|
916
|
+
) -> typing.Optional[typing.Any]:
|
|
917
|
+
"""
|
|
918
|
+
Delete a multi-agent group.
|
|
919
|
+
|
|
920
|
+
Parameters
|
|
921
|
+
----------
|
|
922
|
+
group_id : str
|
|
923
|
+
|
|
924
|
+
request_options : typing.Optional[RequestOptions]
|
|
925
|
+
Request-specific configuration.
|
|
926
|
+
|
|
927
|
+
Returns
|
|
928
|
+
-------
|
|
929
|
+
typing.Optional[typing.Any]
|
|
930
|
+
Successful Response
|
|
931
|
+
|
|
932
|
+
Examples
|
|
933
|
+
--------
|
|
934
|
+
import asyncio
|
|
935
|
+
|
|
936
|
+
from letta_client import AsyncLetta
|
|
937
|
+
|
|
938
|
+
client = AsyncLetta(
|
|
939
|
+
token="YOUR_TOKEN",
|
|
940
|
+
)
|
|
941
|
+
|
|
942
|
+
|
|
943
|
+
async def main() -> None:
|
|
944
|
+
await client.groups.delete_group(
|
|
945
|
+
group_id="group_id",
|
|
946
|
+
)
|
|
947
|
+
|
|
948
|
+
|
|
949
|
+
asyncio.run(main())
|
|
950
|
+
"""
|
|
951
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
952
|
+
f"v1/groups/{jsonable_encoder(group_id)}",
|
|
953
|
+
method="DELETE",
|
|
954
|
+
request_options=request_options,
|
|
955
|
+
)
|
|
956
|
+
try:
|
|
957
|
+
if 200 <= _response.status_code < 300:
|
|
958
|
+
return typing.cast(
|
|
959
|
+
typing.Optional[typing.Any],
|
|
960
|
+
construct_type(
|
|
961
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
962
|
+
object_=_response.json(),
|
|
963
|
+
),
|
|
964
|
+
)
|
|
965
|
+
if _response.status_code == 422:
|
|
966
|
+
raise UnprocessableEntityError(
|
|
967
|
+
typing.cast(
|
|
968
|
+
HttpValidationError,
|
|
969
|
+
construct_type(
|
|
970
|
+
type_=HttpValidationError, # type: ignore
|
|
971
|
+
object_=_response.json(),
|
|
972
|
+
),
|
|
973
|
+
)
|
|
974
|
+
)
|
|
975
|
+
_response_json = _response.json()
|
|
976
|
+
except JSONDecodeError:
|
|
977
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
978
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
979
|
+
|
|
980
|
+
async def list_group_messages(
|
|
981
|
+
self,
|
|
982
|
+
group_id: str,
|
|
983
|
+
*,
|
|
984
|
+
after: typing.Optional[str] = None,
|
|
985
|
+
before: typing.Optional[str] = None,
|
|
986
|
+
limit: typing.Optional[int] = None,
|
|
987
|
+
use_assistant_message: typing.Optional[bool] = None,
|
|
988
|
+
assistant_message_tool_name: typing.Optional[str] = None,
|
|
989
|
+
assistant_message_tool_kwarg: typing.Optional[str] = None,
|
|
990
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
991
|
+
) -> typing.List[LettaMessageUnion]:
|
|
992
|
+
"""
|
|
993
|
+
Retrieve message history for an agent.
|
|
994
|
+
|
|
995
|
+
Parameters
|
|
996
|
+
----------
|
|
997
|
+
group_id : str
|
|
998
|
+
|
|
999
|
+
after : typing.Optional[str]
|
|
1000
|
+
Message after which to retrieve the returned messages.
|
|
1001
|
+
|
|
1002
|
+
before : typing.Optional[str]
|
|
1003
|
+
Message before which to retrieve the returned messages.
|
|
1004
|
+
|
|
1005
|
+
limit : typing.Optional[int]
|
|
1006
|
+
Maximum number of messages to retrieve.
|
|
1007
|
+
|
|
1008
|
+
use_assistant_message : typing.Optional[bool]
|
|
1009
|
+
Whether to use assistant messages
|
|
1010
|
+
|
|
1011
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
1012
|
+
The name of the designated message tool.
|
|
1013
|
+
|
|
1014
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
1015
|
+
The name of the message argument.
|
|
1016
|
+
|
|
1017
|
+
request_options : typing.Optional[RequestOptions]
|
|
1018
|
+
Request-specific configuration.
|
|
1019
|
+
|
|
1020
|
+
Returns
|
|
1021
|
+
-------
|
|
1022
|
+
typing.List[LettaMessageUnion]
|
|
1023
|
+
Successful Response
|
|
1024
|
+
|
|
1025
|
+
Examples
|
|
1026
|
+
--------
|
|
1027
|
+
import asyncio
|
|
1028
|
+
|
|
1029
|
+
from letta_client import AsyncLetta
|
|
1030
|
+
|
|
1031
|
+
client = AsyncLetta(
|
|
1032
|
+
token="YOUR_TOKEN",
|
|
1033
|
+
)
|
|
1034
|
+
|
|
1035
|
+
|
|
1036
|
+
async def main() -> None:
|
|
1037
|
+
await client.groups.list_group_messages(
|
|
1038
|
+
group_id="group_id",
|
|
1039
|
+
)
|
|
1040
|
+
|
|
1041
|
+
|
|
1042
|
+
asyncio.run(main())
|
|
1043
|
+
"""
|
|
1044
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1045
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages",
|
|
1046
|
+
method="GET",
|
|
1047
|
+
params={
|
|
1048
|
+
"after": after,
|
|
1049
|
+
"before": before,
|
|
1050
|
+
"limit": limit,
|
|
1051
|
+
"use_assistant_message": use_assistant_message,
|
|
1052
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
1053
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
1054
|
+
},
|
|
1055
|
+
request_options=request_options,
|
|
1056
|
+
)
|
|
1057
|
+
try:
|
|
1058
|
+
if 200 <= _response.status_code < 300:
|
|
1059
|
+
return typing.cast(
|
|
1060
|
+
typing.List[LettaMessageUnion],
|
|
1061
|
+
construct_type(
|
|
1062
|
+
type_=typing.List[LettaMessageUnion], # type: ignore
|
|
1063
|
+
object_=_response.json(),
|
|
1064
|
+
),
|
|
1065
|
+
)
|
|
1066
|
+
if _response.status_code == 422:
|
|
1067
|
+
raise UnprocessableEntityError(
|
|
1068
|
+
typing.cast(
|
|
1069
|
+
HttpValidationError,
|
|
1070
|
+
construct_type(
|
|
1071
|
+
type_=HttpValidationError, # type: ignore
|
|
1072
|
+
object_=_response.json(),
|
|
1073
|
+
),
|
|
1074
|
+
)
|
|
1075
|
+
)
|
|
1076
|
+
_response_json = _response.json()
|
|
1077
|
+
except JSONDecodeError:
|
|
1078
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1079
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1080
|
+
|
|
1081
|
+
async def send_group_message(
|
|
1082
|
+
self,
|
|
1083
|
+
group_id: str,
|
|
1084
|
+
*,
|
|
1085
|
+
agent_id: str,
|
|
1086
|
+
messages: typing.Sequence[MessageCreate],
|
|
1087
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
1088
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
1089
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
1090
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1091
|
+
) -> LettaResponse:
|
|
1092
|
+
"""
|
|
1093
|
+
Process a user message and return the group's response.
|
|
1094
|
+
This endpoint accepts a message from a user and processes it through through agents in the group based on the specified pattern
|
|
1095
|
+
|
|
1096
|
+
Parameters
|
|
1097
|
+
----------
|
|
1098
|
+
group_id : str
|
|
1099
|
+
|
|
1100
|
+
agent_id : str
|
|
1101
|
+
|
|
1102
|
+
messages : typing.Sequence[MessageCreate]
|
|
1103
|
+
The messages to be sent to the agent.
|
|
1104
|
+
|
|
1105
|
+
use_assistant_message : typing.Optional[bool]
|
|
1106
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
1107
|
+
|
|
1108
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
1109
|
+
The name of the designated message tool.
|
|
1110
|
+
|
|
1111
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
1112
|
+
The name of the message argument in the designated message tool.
|
|
1113
|
+
|
|
1114
|
+
request_options : typing.Optional[RequestOptions]
|
|
1115
|
+
Request-specific configuration.
|
|
1116
|
+
|
|
1117
|
+
Returns
|
|
1118
|
+
-------
|
|
1119
|
+
LettaResponse
|
|
1120
|
+
Successful Response
|
|
1121
|
+
|
|
1122
|
+
Examples
|
|
1123
|
+
--------
|
|
1124
|
+
import asyncio
|
|
1125
|
+
|
|
1126
|
+
from letta_client import AsyncLetta, MessageCreate
|
|
1127
|
+
|
|
1128
|
+
client = AsyncLetta(
|
|
1129
|
+
token="YOUR_TOKEN",
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
|
|
1133
|
+
async def main() -> None:
|
|
1134
|
+
await client.groups.send_group_message(
|
|
1135
|
+
group_id="group_id",
|
|
1136
|
+
agent_id="agent_id",
|
|
1137
|
+
messages=[
|
|
1138
|
+
MessageCreate(
|
|
1139
|
+
role="user",
|
|
1140
|
+
content="content",
|
|
1141
|
+
)
|
|
1142
|
+
],
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
|
|
1146
|
+
asyncio.run(main())
|
|
1147
|
+
"""
|
|
1148
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1149
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages",
|
|
1150
|
+
method="POST",
|
|
1151
|
+
params={
|
|
1152
|
+
"agent_id": agent_id,
|
|
1153
|
+
},
|
|
1154
|
+
json={
|
|
1155
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
1156
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
1157
|
+
),
|
|
1158
|
+
"use_assistant_message": use_assistant_message,
|
|
1159
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
1160
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
1161
|
+
},
|
|
1162
|
+
request_options=request_options,
|
|
1163
|
+
omit=OMIT,
|
|
1164
|
+
)
|
|
1165
|
+
try:
|
|
1166
|
+
if 200 <= _response.status_code < 300:
|
|
1167
|
+
return typing.cast(
|
|
1168
|
+
LettaResponse,
|
|
1169
|
+
construct_type(
|
|
1170
|
+
type_=LettaResponse, # type: ignore
|
|
1171
|
+
object_=_response.json(),
|
|
1172
|
+
),
|
|
1173
|
+
)
|
|
1174
|
+
if _response.status_code == 422:
|
|
1175
|
+
raise UnprocessableEntityError(
|
|
1176
|
+
typing.cast(
|
|
1177
|
+
HttpValidationError,
|
|
1178
|
+
construct_type(
|
|
1179
|
+
type_=HttpValidationError, # type: ignore
|
|
1180
|
+
object_=_response.json(),
|
|
1181
|
+
),
|
|
1182
|
+
)
|
|
1183
|
+
)
|
|
1184
|
+
_response_json = _response.json()
|
|
1185
|
+
except JSONDecodeError:
|
|
1186
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1187
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|
|
1188
|
+
|
|
1189
|
+
async def send_group_message_streaming(
|
|
1190
|
+
self,
|
|
1191
|
+
group_id: str,
|
|
1192
|
+
*,
|
|
1193
|
+
messages: typing.Sequence[MessageCreate],
|
|
1194
|
+
use_assistant_message: typing.Optional[bool] = OMIT,
|
|
1195
|
+
assistant_message_tool_name: typing.Optional[str] = OMIT,
|
|
1196
|
+
assistant_message_tool_kwarg: typing.Optional[str] = OMIT,
|
|
1197
|
+
stream_tokens: typing.Optional[bool] = OMIT,
|
|
1198
|
+
request_options: typing.Optional[RequestOptions] = None,
|
|
1199
|
+
) -> typing.Optional[typing.Any]:
|
|
1200
|
+
"""
|
|
1201
|
+
Process a user message and return the group's responses.
|
|
1202
|
+
This endpoint accepts a message from a user and processes it through agents in the group based on the specified pattern.
|
|
1203
|
+
It will stream the steps of the response always, and stream the tokens if 'stream_tokens' is set to True.
|
|
1204
|
+
|
|
1205
|
+
Parameters
|
|
1206
|
+
----------
|
|
1207
|
+
group_id : str
|
|
1208
|
+
|
|
1209
|
+
messages : typing.Sequence[MessageCreate]
|
|
1210
|
+
The messages to be sent to the agent.
|
|
1211
|
+
|
|
1212
|
+
use_assistant_message : typing.Optional[bool]
|
|
1213
|
+
Whether the server should parse specific tool call arguments (default `send_message`) as `AssistantMessage` objects.
|
|
1214
|
+
|
|
1215
|
+
assistant_message_tool_name : typing.Optional[str]
|
|
1216
|
+
The name of the designated message tool.
|
|
1217
|
+
|
|
1218
|
+
assistant_message_tool_kwarg : typing.Optional[str]
|
|
1219
|
+
The name of the message argument in the designated message tool.
|
|
1220
|
+
|
|
1221
|
+
stream_tokens : typing.Optional[bool]
|
|
1222
|
+
Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).
|
|
1223
|
+
|
|
1224
|
+
request_options : typing.Optional[RequestOptions]
|
|
1225
|
+
Request-specific configuration.
|
|
1226
|
+
|
|
1227
|
+
Returns
|
|
1228
|
+
-------
|
|
1229
|
+
typing.Optional[typing.Any]
|
|
1230
|
+
Successful response
|
|
1231
|
+
|
|
1232
|
+
Examples
|
|
1233
|
+
--------
|
|
1234
|
+
import asyncio
|
|
1235
|
+
|
|
1236
|
+
from letta_client import AsyncLetta, MessageCreate
|
|
1237
|
+
|
|
1238
|
+
client = AsyncLetta(
|
|
1239
|
+
token="YOUR_TOKEN",
|
|
1240
|
+
)
|
|
1241
|
+
|
|
1242
|
+
|
|
1243
|
+
async def main() -> None:
|
|
1244
|
+
await client.groups.send_group_message_streaming(
|
|
1245
|
+
group_id="group_id",
|
|
1246
|
+
messages=[
|
|
1247
|
+
MessageCreate(
|
|
1248
|
+
role="user",
|
|
1249
|
+
content="content",
|
|
1250
|
+
)
|
|
1251
|
+
],
|
|
1252
|
+
)
|
|
1253
|
+
|
|
1254
|
+
|
|
1255
|
+
asyncio.run(main())
|
|
1256
|
+
"""
|
|
1257
|
+
_response = await self._client_wrapper.httpx_client.request(
|
|
1258
|
+
f"v1/groups/{jsonable_encoder(group_id)}/messages/stream",
|
|
1259
|
+
method="POST",
|
|
1260
|
+
json={
|
|
1261
|
+
"messages": convert_and_respect_annotation_metadata(
|
|
1262
|
+
object_=messages, annotation=typing.Sequence[MessageCreate], direction="write"
|
|
1263
|
+
),
|
|
1264
|
+
"use_assistant_message": use_assistant_message,
|
|
1265
|
+
"assistant_message_tool_name": assistant_message_tool_name,
|
|
1266
|
+
"assistant_message_tool_kwarg": assistant_message_tool_kwarg,
|
|
1267
|
+
"stream_tokens": stream_tokens,
|
|
1268
|
+
},
|
|
1269
|
+
request_options=request_options,
|
|
1270
|
+
omit=OMIT,
|
|
1271
|
+
)
|
|
1272
|
+
try:
|
|
1273
|
+
if 200 <= _response.status_code < 300:
|
|
1274
|
+
return typing.cast(
|
|
1275
|
+
typing.Optional[typing.Any],
|
|
1276
|
+
construct_type(
|
|
1277
|
+
type_=typing.Optional[typing.Any], # type: ignore
|
|
1278
|
+
object_=_response.json(),
|
|
1279
|
+
),
|
|
1280
|
+
)
|
|
1281
|
+
if _response.status_code == 422:
|
|
1282
|
+
raise UnprocessableEntityError(
|
|
1283
|
+
typing.cast(
|
|
1284
|
+
HttpValidationError,
|
|
1285
|
+
construct_type(
|
|
1286
|
+
type_=HttpValidationError, # type: ignore
|
|
1287
|
+
object_=_response.json(),
|
|
1288
|
+
),
|
|
1289
|
+
)
|
|
1290
|
+
)
|
|
1291
|
+
_response_json = _response.json()
|
|
1292
|
+
except JSONDecodeError:
|
|
1293
|
+
raise ApiError(status_code=_response.status_code, body=_response.text)
|
|
1294
|
+
raise ApiError(status_code=_response.status_code, body=_response_json)
|