agno 2.3.16__py3-none-any.whl → 2.3.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/__init__.py +2 -0
- agno/agent/agent.py +4 -53
- agno/agent/remote.py +351 -0
- agno/client/__init__.py +3 -0
- agno/client/os.py +2669 -0
- agno/db/base.py +20 -0
- agno/db/mongo/async_mongo.py +11 -0
- agno/db/mongo/mongo.py +10 -0
- agno/db/mysql/async_mysql.py +9 -0
- agno/db/mysql/mysql.py +9 -0
- agno/db/postgres/async_postgres.py +9 -0
- agno/db/postgres/postgres.py +9 -0
- agno/db/postgres/utils.py +3 -2
- agno/db/sqlite/async_sqlite.py +9 -0
- agno/db/sqlite/sqlite.py +11 -1
- agno/exceptions.py +23 -0
- agno/knowledge/chunking/semantic.py +123 -46
- agno/knowledge/reader/csv_reader.py +1 -1
- agno/knowledge/reader/field_labeled_csv_reader.py +1 -1
- agno/knowledge/reader/json_reader.py +1 -1
- agno/models/google/gemini.py +5 -0
- agno/os/app.py +108 -25
- agno/os/auth.py +25 -1
- agno/os/interfaces/a2a/a2a.py +7 -6
- agno/os/interfaces/a2a/router.py +13 -13
- agno/os/interfaces/agui/agui.py +5 -3
- agno/os/interfaces/agui/router.py +23 -16
- agno/os/interfaces/base.py +7 -7
- agno/os/interfaces/slack/router.py +6 -6
- agno/os/interfaces/slack/slack.py +7 -7
- agno/os/interfaces/whatsapp/router.py +29 -6
- agno/os/interfaces/whatsapp/whatsapp.py +11 -8
- agno/os/managers.py +326 -0
- agno/os/mcp.py +651 -79
- agno/os/router.py +125 -18
- agno/os/routers/agents/router.py +65 -22
- agno/os/routers/agents/schema.py +16 -4
- agno/os/routers/database.py +5 -0
- agno/os/routers/evals/evals.py +93 -11
- agno/os/routers/evals/utils.py +6 -6
- agno/os/routers/knowledge/knowledge.py +104 -16
- agno/os/routers/memory/memory.py +124 -7
- agno/os/routers/metrics/metrics.py +21 -4
- agno/os/routers/session/session.py +141 -12
- agno/os/routers/teams/router.py +40 -14
- agno/os/routers/teams/schema.py +12 -4
- agno/os/routers/traces/traces.py +54 -4
- agno/os/routers/workflows/router.py +223 -117
- agno/os/routers/workflows/schema.py +65 -1
- agno/os/schema.py +38 -12
- agno/os/utils.py +87 -166
- agno/remote/__init__.py +3 -0
- agno/remote/base.py +484 -0
- agno/run/workflow.py +1 -0
- agno/team/__init__.py +2 -0
- agno/team/remote.py +287 -0
- agno/team/team.py +25 -54
- agno/tracing/exporter.py +10 -6
- agno/tracing/setup.py +2 -1
- agno/utils/agent.py +58 -1
- agno/utils/http.py +68 -20
- agno/utils/os.py +0 -0
- agno/utils/remote.py +23 -0
- agno/vectordb/chroma/chromadb.py +452 -16
- agno/vectordb/pgvector/pgvector.py +7 -0
- agno/vectordb/redis/redisdb.py +1 -1
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +2 -2
- agno/workflow/remote.py +222 -0
- agno/workflow/types.py +0 -73
- agno/workflow/workflow.py +119 -68
- {agno-2.3.16.dist-info → agno-2.3.18.dist-info}/METADATA +1 -1
- {agno-2.3.16.dist-info → agno-2.3.18.dist-info}/RECORD +76 -66
- {agno-2.3.16.dist-info → agno-2.3.18.dist-info}/WHEEL +0 -0
- {agno-2.3.16.dist-info → agno-2.3.18.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.16.dist-info → agno-2.3.18.dist-info}/top_level.txt +0 -0
agno/client/os.py
ADDED
|
@@ -0,0 +1,2669 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import date
|
|
3
|
+
from typing import Any, AsyncIterator, Callable, Dict, List, Optional, Sequence, Union
|
|
4
|
+
|
|
5
|
+
from fastapi import UploadFile
|
|
6
|
+
from httpx import ConnectError, ConnectTimeout, TimeoutException
|
|
7
|
+
|
|
8
|
+
from agno.db.base import SessionType
|
|
9
|
+
from agno.db.schemas.evals import EvalFilterType, EvalType
|
|
10
|
+
from agno.exceptions import RemoteServerUnavailableError
|
|
11
|
+
from agno.media import Audio, File, Image, Video
|
|
12
|
+
from agno.media import File as MediaFile
|
|
13
|
+
from agno.models.response import ToolExecution
|
|
14
|
+
from agno.os.routers.agents.schema import AgentResponse
|
|
15
|
+
from agno.os.routers.evals.schemas import (
|
|
16
|
+
DeleteEvalRunsRequest,
|
|
17
|
+
EvalRunInput,
|
|
18
|
+
EvalSchema,
|
|
19
|
+
UpdateEvalRunRequest,
|
|
20
|
+
)
|
|
21
|
+
from agno.os.routers.knowledge.schemas import (
|
|
22
|
+
ConfigResponseSchema as KnowledgeConfigResponse,
|
|
23
|
+
)
|
|
24
|
+
from agno.os.routers.knowledge.schemas import (
|
|
25
|
+
ContentResponseSchema,
|
|
26
|
+
ContentStatusResponse,
|
|
27
|
+
VectorSearchResult,
|
|
28
|
+
)
|
|
29
|
+
from agno.os.routers.memory.schemas import (
|
|
30
|
+
DeleteMemoriesRequest,
|
|
31
|
+
OptimizeMemoriesRequest,
|
|
32
|
+
OptimizeMemoriesResponse,
|
|
33
|
+
UserMemoryCreateSchema,
|
|
34
|
+
UserMemorySchema,
|
|
35
|
+
UserStatsSchema,
|
|
36
|
+
)
|
|
37
|
+
from agno.os.routers.metrics.schemas import DayAggregatedMetrics, MetricsResponse
|
|
38
|
+
from agno.os.routers.teams.schema import TeamResponse
|
|
39
|
+
from agno.os.routers.traces.schemas import (
|
|
40
|
+
TraceDetail,
|
|
41
|
+
TraceNode,
|
|
42
|
+
TraceSessionStats,
|
|
43
|
+
TraceSummary,
|
|
44
|
+
)
|
|
45
|
+
from agno.os.routers.workflows.schema import WorkflowResponse
|
|
46
|
+
from agno.os.schema import (
|
|
47
|
+
AgentSessionDetailSchema,
|
|
48
|
+
AgentSummaryResponse,
|
|
49
|
+
ConfigResponse,
|
|
50
|
+
CreateSessionRequest,
|
|
51
|
+
DeleteSessionRequest,
|
|
52
|
+
Model,
|
|
53
|
+
PaginatedResponse,
|
|
54
|
+
PaginationInfo,
|
|
55
|
+
RunSchema,
|
|
56
|
+
SessionSchema,
|
|
57
|
+
TeamRunSchema,
|
|
58
|
+
TeamSessionDetailSchema,
|
|
59
|
+
TeamSummaryResponse,
|
|
60
|
+
UpdateSessionRequest,
|
|
61
|
+
WorkflowRunSchema,
|
|
62
|
+
WorkflowSessionDetailSchema,
|
|
63
|
+
WorkflowSummaryResponse,
|
|
64
|
+
)
|
|
65
|
+
from agno.run.agent import RunOutput, RunOutputEvent, run_output_event_from_dict
|
|
66
|
+
from agno.run.team import TeamRunOutput, TeamRunOutputEvent, team_run_output_event_from_dict
|
|
67
|
+
from agno.run.workflow import WorkflowRunOutput, WorkflowRunOutputEvent, workflow_run_output_event_from_dict
|
|
68
|
+
from agno.utils.http import get_default_async_client, get_default_sync_client
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class AgentOSClient:
|
|
72
|
+
"""Client for interacting with AgentOS API endpoints.
|
|
73
|
+
|
|
74
|
+
Attributes:
|
|
75
|
+
base_url: Base URL of the AgentOS instance
|
|
76
|
+
timeout: Request timeout in seconds
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
base_url: str,
|
|
82
|
+
timeout: float = 60.0,
|
|
83
|
+
):
|
|
84
|
+
"""Initialize AgentOSClient.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
base_url: Base URL of the AgentOS instance (e.g., "http://localhost:7777")
|
|
88
|
+
timeout: Request timeout in seconds (default: 60.0)
|
|
89
|
+
"""
|
|
90
|
+
self.base_url = base_url.rstrip("/")
|
|
91
|
+
self.timeout = timeout
|
|
92
|
+
|
|
93
|
+
def _request(
|
|
94
|
+
self,
|
|
95
|
+
method: str,
|
|
96
|
+
endpoint: str,
|
|
97
|
+
data: Optional[Dict[str, Any]] = None,
|
|
98
|
+
params: Optional[Dict[str, Any]] = None,
|
|
99
|
+
headers: Optional[Dict[str, str]] = None,
|
|
100
|
+
as_form: bool = False,
|
|
101
|
+
) -> Any:
|
|
102
|
+
"""Execute synchronous HTTP request.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
method: HTTP method (GET, POST, PATCH, DELETE)
|
|
106
|
+
endpoint: API endpoint path (without base URL)
|
|
107
|
+
data: Request body data (optional)
|
|
108
|
+
params: Query parameters (optional)
|
|
109
|
+
headers: HTTP headers to include in the request (optional)
|
|
110
|
+
as_form: If True, send data as form data instead of JSON
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Parsed JSON response, or None for empty responses
|
|
114
|
+
|
|
115
|
+
Raises:
|
|
116
|
+
RemoteServerUnavailableError: When the remote server is unavailable
|
|
117
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
118
|
+
"""
|
|
119
|
+
url = f"{self.base_url}{endpoint}"
|
|
120
|
+
|
|
121
|
+
kwargs: Dict[str, Any] = {"headers": headers or {}}
|
|
122
|
+
if data is not None:
|
|
123
|
+
if as_form:
|
|
124
|
+
kwargs["data"] = data
|
|
125
|
+
else:
|
|
126
|
+
kwargs["json"] = data
|
|
127
|
+
if params is not None:
|
|
128
|
+
kwargs["params"] = params
|
|
129
|
+
|
|
130
|
+
sync_client = get_default_sync_client()
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
response = sync_client.request(method, url, timeout=self.timeout, **kwargs)
|
|
134
|
+
response.raise_for_status()
|
|
135
|
+
|
|
136
|
+
# Return None for empty responses (204 No Content, etc.)
|
|
137
|
+
if not response.content:
|
|
138
|
+
return None
|
|
139
|
+
return response.json()
|
|
140
|
+
except (ConnectError, ConnectTimeout) as e:
|
|
141
|
+
raise RemoteServerUnavailableError(
|
|
142
|
+
message=f"Failed to connect to remote server at {self.base_url}",
|
|
143
|
+
base_url=self.base_url,
|
|
144
|
+
original_error=e,
|
|
145
|
+
) from e
|
|
146
|
+
except TimeoutException as e:
|
|
147
|
+
raise RemoteServerUnavailableError(
|
|
148
|
+
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds.",
|
|
149
|
+
base_url=self.base_url,
|
|
150
|
+
original_error=e,
|
|
151
|
+
) from e
|
|
152
|
+
|
|
153
|
+
async def _arequest(
|
|
154
|
+
self,
|
|
155
|
+
method: str,
|
|
156
|
+
endpoint: str,
|
|
157
|
+
data: Optional[Dict[str, Any]] = None,
|
|
158
|
+
params: Optional[Dict[str, Any]] = None,
|
|
159
|
+
headers: Optional[Dict[str, str]] = None,
|
|
160
|
+
as_form: bool = False,
|
|
161
|
+
) -> Any:
|
|
162
|
+
"""Execute asynchronous HTTP request.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
method: HTTP method (GET, POST, PATCH, DELETE)
|
|
166
|
+
endpoint: API endpoint path (without base URL)
|
|
167
|
+
data: Request body data (optional)
|
|
168
|
+
params: Query parameters (optional)
|
|
169
|
+
headers: HTTP headers to include in the request (optional)
|
|
170
|
+
as_form: If True, send data as form data instead of JSON
|
|
171
|
+
|
|
172
|
+
Returns:
|
|
173
|
+
Parsed JSON response, or None for empty responses
|
|
174
|
+
|
|
175
|
+
Raises:
|
|
176
|
+
RemoteServerUnavailableError: When the remote server is unavailable
|
|
177
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
178
|
+
"""
|
|
179
|
+
url = f"{self.base_url}{endpoint}"
|
|
180
|
+
|
|
181
|
+
kwargs: Dict[str, Any] = {"headers": headers or {}}
|
|
182
|
+
if data is not None:
|
|
183
|
+
if as_form:
|
|
184
|
+
kwargs["data"] = data
|
|
185
|
+
else:
|
|
186
|
+
kwargs["json"] = data
|
|
187
|
+
if params is not None:
|
|
188
|
+
kwargs["params"] = params
|
|
189
|
+
|
|
190
|
+
async_client = get_default_async_client()
|
|
191
|
+
|
|
192
|
+
try:
|
|
193
|
+
response = await async_client.request(method, url, timeout=self.timeout, **kwargs)
|
|
194
|
+
response.raise_for_status()
|
|
195
|
+
|
|
196
|
+
# Return None for empty responses (204 No Content, etc.)
|
|
197
|
+
if not response.content:
|
|
198
|
+
return None
|
|
199
|
+
return response.json()
|
|
200
|
+
except (ConnectError, ConnectTimeout) as e:
|
|
201
|
+
raise RemoteServerUnavailableError(
|
|
202
|
+
message=f"Failed to connect to remote server at {self.base_url}",
|
|
203
|
+
base_url=self.base_url,
|
|
204
|
+
original_error=e,
|
|
205
|
+
) from e
|
|
206
|
+
except TimeoutException as e:
|
|
207
|
+
raise RemoteServerUnavailableError(
|
|
208
|
+
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds",
|
|
209
|
+
base_url=self.base_url,
|
|
210
|
+
original_error=e,
|
|
211
|
+
) from e
|
|
212
|
+
|
|
213
|
+
def _get(
|
|
214
|
+
self,
|
|
215
|
+
endpoint: str,
|
|
216
|
+
params: Optional[Dict[str, Any]] = None,
|
|
217
|
+
headers: Optional[Dict[str, str]] = None,
|
|
218
|
+
) -> Any:
|
|
219
|
+
"""Execute synchronous GET request.
|
|
220
|
+
|
|
221
|
+
Args:
|
|
222
|
+
endpoint: API endpoint path (without base URL)
|
|
223
|
+
params: Query parameters (optional)
|
|
224
|
+
headers: HTTP headers to include in the request (optional)
|
|
225
|
+
|
|
226
|
+
Returns:
|
|
227
|
+
Parsed JSON response
|
|
228
|
+
|
|
229
|
+
Raises:
|
|
230
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
231
|
+
"""
|
|
232
|
+
return self._request("GET", endpoint, params=params, headers=headers)
|
|
233
|
+
|
|
234
|
+
async def _aget(
|
|
235
|
+
self,
|
|
236
|
+
endpoint: str,
|
|
237
|
+
params: Optional[Dict[str, Any]] = None,
|
|
238
|
+
headers: Optional[Dict[str, str]] = None,
|
|
239
|
+
) -> Any:
|
|
240
|
+
"""Execute asynchronous GET request.
|
|
241
|
+
|
|
242
|
+
Args:
|
|
243
|
+
endpoint: API endpoint path (without base URL)
|
|
244
|
+
params: Query parameters (optional)
|
|
245
|
+
headers: HTTP headers to include in the request (optional)
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Parsed JSON response
|
|
249
|
+
|
|
250
|
+
Raises:
|
|
251
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
252
|
+
"""
|
|
253
|
+
return await self._arequest("GET", endpoint, params=params, headers=headers)
|
|
254
|
+
|
|
255
|
+
async def _apost(
|
|
256
|
+
self,
|
|
257
|
+
endpoint: str,
|
|
258
|
+
data: Optional[Dict[str, Any]] = None,
|
|
259
|
+
params: Optional[Dict[str, Any]] = None,
|
|
260
|
+
headers: Optional[Dict[str, str]] = None,
|
|
261
|
+
as_form: bool = False,
|
|
262
|
+
) -> Any:
|
|
263
|
+
"""Execute asynchronous POST request.
|
|
264
|
+
|
|
265
|
+
Args:
|
|
266
|
+
endpoint: API endpoint path (without base URL)
|
|
267
|
+
data: Request body data (optional)
|
|
268
|
+
params: Query parameters (optional)
|
|
269
|
+
headers: HTTP headers to include in the request (optional)
|
|
270
|
+
as_form: If True, send data as form data instead of JSON
|
|
271
|
+
|
|
272
|
+
Returns:
|
|
273
|
+
Parsed JSON response
|
|
274
|
+
|
|
275
|
+
Raises:
|
|
276
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
277
|
+
"""
|
|
278
|
+
return await self._arequest("POST", endpoint, data=data, params=params, headers=headers, as_form=as_form)
|
|
279
|
+
|
|
280
|
+
async def _apatch(
|
|
281
|
+
self,
|
|
282
|
+
endpoint: str,
|
|
283
|
+
data: Optional[Dict[str, Any]] = None,
|
|
284
|
+
params: Optional[Dict[str, Any]] = None,
|
|
285
|
+
headers: Optional[Dict[str, str]] = None,
|
|
286
|
+
) -> Any:
|
|
287
|
+
"""Execute asynchronous PATCH request.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
endpoint: API endpoint path (without base URL)
|
|
291
|
+
data: Request body data
|
|
292
|
+
params: Query parameters (optional)
|
|
293
|
+
headers: HTTP headers to include in the request (optional)
|
|
294
|
+
|
|
295
|
+
Returns:
|
|
296
|
+
Parsed JSON response
|
|
297
|
+
|
|
298
|
+
Raises:
|
|
299
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
300
|
+
"""
|
|
301
|
+
return await self._arequest("PATCH", endpoint, data=data, params=params, headers=headers)
|
|
302
|
+
|
|
303
|
+
async def _adelete(
|
|
304
|
+
self,
|
|
305
|
+
endpoint: str,
|
|
306
|
+
data: Optional[Dict[str, Any]] = None,
|
|
307
|
+
params: Optional[Dict[str, Any]] = None,
|
|
308
|
+
headers: Optional[Dict[str, str]] = None,
|
|
309
|
+
) -> None:
|
|
310
|
+
"""Execute asynchronous DELETE request.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
endpoint: API endpoint path (without base URL)
|
|
314
|
+
data: Optional request body data
|
|
315
|
+
params: Query parameters (optional)
|
|
316
|
+
headers: HTTP headers to include in the request (optional)
|
|
317
|
+
|
|
318
|
+
Raises:
|
|
319
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
320
|
+
"""
|
|
321
|
+
await self._arequest("DELETE", endpoint, data=data, params=params, headers=headers)
|
|
322
|
+
|
|
323
|
+
async def _astream_post_form_data(
|
|
324
|
+
self,
|
|
325
|
+
endpoint: str,
|
|
326
|
+
data: Dict[str, Any],
|
|
327
|
+
headers: Optional[Dict[str, str]] = None,
|
|
328
|
+
) -> AsyncIterator[str]:
|
|
329
|
+
"""Stream POST request with form data.
|
|
330
|
+
|
|
331
|
+
Args:
|
|
332
|
+
endpoint: API endpoint path (without base URL)
|
|
333
|
+
data: Form data dictionary
|
|
334
|
+
headers: HTTP headers to include in the request (optional)
|
|
335
|
+
|
|
336
|
+
Yields:
|
|
337
|
+
str: Lines from the streaming response
|
|
338
|
+
|
|
339
|
+
Raises:
|
|
340
|
+
RemoteServerUnavailableError: When the remote server is unavailable
|
|
341
|
+
"""
|
|
342
|
+
url = f"{self.base_url}{endpoint}"
|
|
343
|
+
async_client = get_default_async_client()
|
|
344
|
+
|
|
345
|
+
try:
|
|
346
|
+
async with async_client.stream(
|
|
347
|
+
"POST", url, data=data, headers=headers or {}, timeout=self.timeout
|
|
348
|
+
) as response:
|
|
349
|
+
response.raise_for_status()
|
|
350
|
+
async for line in response.aiter_lines():
|
|
351
|
+
yield line
|
|
352
|
+
except (ConnectError, ConnectTimeout) as e:
|
|
353
|
+
raise RemoteServerUnavailableError(
|
|
354
|
+
message=f"Failed to connect to remote server at {self.base_url}",
|
|
355
|
+
base_url=self.base_url,
|
|
356
|
+
original_error=e,
|
|
357
|
+
) from e
|
|
358
|
+
except TimeoutException as e:
|
|
359
|
+
raise RemoteServerUnavailableError(
|
|
360
|
+
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds",
|
|
361
|
+
base_url=self.base_url,
|
|
362
|
+
original_error=e,
|
|
363
|
+
) from e
|
|
364
|
+
|
|
365
|
+
async def _parse_sse_events(
|
|
366
|
+
self,
|
|
367
|
+
raw_stream: AsyncIterator[str],
|
|
368
|
+
event_parser: Callable[[dict], Any],
|
|
369
|
+
) -> AsyncIterator[Any]:
|
|
370
|
+
"""Parse SSE stream into typed event objects.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
raw_stream: Raw SSE lines from streaming response
|
|
374
|
+
event_parser: Function to parse event dict into typed object
|
|
375
|
+
|
|
376
|
+
Yields:
|
|
377
|
+
Parsed event objects
|
|
378
|
+
"""
|
|
379
|
+
from agno.utils.log import logger
|
|
380
|
+
|
|
381
|
+
async for line in raw_stream:
|
|
382
|
+
# Skip empty lines and comments (SSE protocol)
|
|
383
|
+
if not line or line.startswith(":"):
|
|
384
|
+
continue
|
|
385
|
+
|
|
386
|
+
# Parse SSE data lines
|
|
387
|
+
if line.startswith("data: "):
|
|
388
|
+
try:
|
|
389
|
+
# Extract and parse JSON payload
|
|
390
|
+
json_str = line[6:] # Remove "data: " prefix
|
|
391
|
+
event_dict = json.loads(json_str)
|
|
392
|
+
|
|
393
|
+
# Parse into typed event using provided factory
|
|
394
|
+
event = event_parser(event_dict)
|
|
395
|
+
yield event
|
|
396
|
+
|
|
397
|
+
except json.JSONDecodeError as e:
|
|
398
|
+
logger.error(f"Failed to parse SSE JSON: {line[:100]}... | Error: {e}")
|
|
399
|
+
continue # Skip bad events, continue stream
|
|
400
|
+
|
|
401
|
+
except ValueError as e:
|
|
402
|
+
logger.error(f"Unknown event type: {line[:100]}... | Error: {e}")
|
|
403
|
+
continue # Skip unknown events, continue stream
|
|
404
|
+
|
|
405
|
+
# Discovery & Configuration Operations
|
|
406
|
+
|
|
407
|
+
def get_config(self, headers: Optional[Dict[str, str]] = None) -> ConfigResponse:
|
|
408
|
+
"""Get AgentOS configuration and metadata.
|
|
409
|
+
|
|
410
|
+
Returns comprehensive OS configuration including:
|
|
411
|
+
- OS metadata (id, description, version)
|
|
412
|
+
- List of available agents
|
|
413
|
+
- List of available teams
|
|
414
|
+
- List of available workflows
|
|
415
|
+
- Interface configurations
|
|
416
|
+
- Knowledge, evals, and metrics settings
|
|
417
|
+
|
|
418
|
+
Args:
|
|
419
|
+
headers: HTTP headers to include in the request (optional)
|
|
420
|
+
|
|
421
|
+
Returns:
|
|
422
|
+
ConfigResponse: Complete OS configuration
|
|
423
|
+
|
|
424
|
+
We need this sync version so it can be used for other sync use-cases upstream
|
|
425
|
+
"""
|
|
426
|
+
data = self._get("/config", headers=headers)
|
|
427
|
+
return ConfigResponse.model_validate(data)
|
|
428
|
+
|
|
429
|
+
async def aget_config(self, headers: Optional[Dict[str, str]] = None) -> ConfigResponse:
|
|
430
|
+
"""Get AgentOS configuration and metadata.
|
|
431
|
+
|
|
432
|
+
Returns comprehensive OS configuration including:
|
|
433
|
+
- OS metadata (id, description, version)
|
|
434
|
+
- List of available agents
|
|
435
|
+
- List of available teams
|
|
436
|
+
- List of available workflows
|
|
437
|
+
- Interface configurations
|
|
438
|
+
- Knowledge, evals, and metrics settings
|
|
439
|
+
|
|
440
|
+
Args:
|
|
441
|
+
headers: HTTP headers to include in the request (optional)
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
ConfigResponse: Complete OS configuration
|
|
445
|
+
|
|
446
|
+
Raises:
|
|
447
|
+
HTTPStatusError: On HTTP errors
|
|
448
|
+
"""
|
|
449
|
+
data = await self._aget("/config", headers=headers)
|
|
450
|
+
return ConfigResponse.model_validate(data)
|
|
451
|
+
|
|
452
|
+
async def get_models(self, headers: Optional[Dict[str, str]] = None) -> List[Model]:
|
|
453
|
+
"""Get list of all models used by agents and teams.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
headers: HTTP headers to include in the request (optional)
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
List[Model]: List of model configurations
|
|
460
|
+
|
|
461
|
+
Raises:
|
|
462
|
+
HTTPStatusError: On HTTP errors
|
|
463
|
+
"""
|
|
464
|
+
data = await self._aget("/models", headers=headers)
|
|
465
|
+
return [Model.model_validate(item) for item in data]
|
|
466
|
+
|
|
467
|
+
async def migrate_database(
|
|
468
|
+
self, db_id: str, target_version: Optional[str] = None, headers: Optional[Dict[str, str]] = None
|
|
469
|
+
) -> None:
|
|
470
|
+
"""Migrate a database to a target version.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
db_id: ID of the database to migrate
|
|
474
|
+
target_version: Target version to migrate to
|
|
475
|
+
headers: HTTP headers to include in the request (optional)
|
|
476
|
+
"""
|
|
477
|
+
return await self._apost(
|
|
478
|
+
f"/databases/{db_id}/migrate", data={"target_version": target_version}, headers=headers
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
async def list_agents(self, headers: Optional[Dict[str, str]] = None) -> List[AgentSummaryResponse]:
|
|
482
|
+
"""List all agents configured in the AgentOS instance.
|
|
483
|
+
|
|
484
|
+
Returns summary information for each agent including:
|
|
485
|
+
- Agent ID, name, description
|
|
486
|
+
- Model configuration
|
|
487
|
+
- Basic settings
|
|
488
|
+
|
|
489
|
+
Args:
|
|
490
|
+
headers: HTTP headers to include in the request (optional)
|
|
491
|
+
|
|
492
|
+
Returns:
|
|
493
|
+
List[AgentSummaryResponse]: List of agent summaries
|
|
494
|
+
|
|
495
|
+
Raises:
|
|
496
|
+
HTTPStatusError: On HTTP errors
|
|
497
|
+
"""
|
|
498
|
+
data = await self._aget("/agents", headers=headers)
|
|
499
|
+
return [AgentSummaryResponse.model_validate(item) for item in data]
|
|
500
|
+
|
|
501
|
+
def get_agent(self, agent_id: str, headers: Optional[Dict[str, str]] = None) -> AgentResponse:
|
|
502
|
+
"""Get detailed configuration for a specific agent.
|
|
503
|
+
|
|
504
|
+
Args:
|
|
505
|
+
agent_id: ID of the agent to retrieve
|
|
506
|
+
headers: HTTP headers to include in the request (optional)
|
|
507
|
+
|
|
508
|
+
Returns:
|
|
509
|
+
AgentResponse: Detailed agent configuration
|
|
510
|
+
|
|
511
|
+
Raises:
|
|
512
|
+
HTTPStatusError: On HTTP errors (404 if agent not found)
|
|
513
|
+
"""
|
|
514
|
+
data = self._get(f"/agents/{agent_id}", headers=headers)
|
|
515
|
+
return AgentResponse.model_validate(data)
|
|
516
|
+
|
|
517
|
+
async def aget_agent(self, agent_id: str, headers: Optional[Dict[str, str]] = None) -> AgentResponse:
|
|
518
|
+
"""Get detailed configuration for a specific agent.
|
|
519
|
+
|
|
520
|
+
Args:
|
|
521
|
+
agent_id: ID of the agent to retrieve
|
|
522
|
+
headers: HTTP headers to include in the request (optional)
|
|
523
|
+
|
|
524
|
+
Returns:
|
|
525
|
+
AgentResponse: Detailed agent configuration
|
|
526
|
+
|
|
527
|
+
Raises:
|
|
528
|
+
HTTPStatusError: On HTTP errors (404 if agent not found)
|
|
529
|
+
"""
|
|
530
|
+
data = await self._aget(f"/agents/{agent_id}", headers=headers)
|
|
531
|
+
return AgentResponse.model_validate(data)
|
|
532
|
+
|
|
533
|
+
async def run_agent(
|
|
534
|
+
self,
|
|
535
|
+
agent_id: str,
|
|
536
|
+
message: str,
|
|
537
|
+
session_id: Optional[str] = None,
|
|
538
|
+
user_id: Optional[str] = None,
|
|
539
|
+
images: Optional[Sequence[Image]] = None,
|
|
540
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
541
|
+
videos: Optional[Sequence[Video]] = None,
|
|
542
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
543
|
+
headers: Optional[Dict[str, str]] = None,
|
|
544
|
+
**kwargs: Any,
|
|
545
|
+
) -> RunOutput:
|
|
546
|
+
"""Execute an agent run.
|
|
547
|
+
|
|
548
|
+
Args:
|
|
549
|
+
agent_id: ID of the agent to run
|
|
550
|
+
message: The message/prompt for the agent
|
|
551
|
+
session_id: Optional session ID for context
|
|
552
|
+
user_id: Optional user ID
|
|
553
|
+
images: Optional list of Image objects
|
|
554
|
+
audio: Optional list of Audio objects
|
|
555
|
+
videos: Optional list of Video objects
|
|
556
|
+
files: Optional list of MediaFile objects
|
|
557
|
+
headers: HTTP headers to include in the request (optional)
|
|
558
|
+
**kwargs: Additional parameters passed to the agent run, such as:
|
|
559
|
+
- session_state: Dict for session state
|
|
560
|
+
- dependencies: Dict for dependencies
|
|
561
|
+
- metadata: Dict for metadata
|
|
562
|
+
- knowledge_filters: Filters for knowledge search
|
|
563
|
+
- output_schema: JSON schema for structured output
|
|
564
|
+
|
|
565
|
+
Returns:
|
|
566
|
+
RunOutput: The run response
|
|
567
|
+
|
|
568
|
+
Raises:
|
|
569
|
+
HTTPStatusError: On HTTP errors
|
|
570
|
+
"""
|
|
571
|
+
endpoint = f"/agents/{agent_id}/runs"
|
|
572
|
+
data: Dict[str, Any] = {"message": message, "stream": "false"}
|
|
573
|
+
if session_id:
|
|
574
|
+
data["session_id"] = session_id
|
|
575
|
+
if user_id:
|
|
576
|
+
data["user_id"] = user_id
|
|
577
|
+
if images:
|
|
578
|
+
data["images"] = json.dumps([img.model_dump() for img in images])
|
|
579
|
+
if audio:
|
|
580
|
+
data["audio"] = json.dumps([a.model_dump() for a in audio])
|
|
581
|
+
if videos:
|
|
582
|
+
data["videos"] = json.dumps([v.model_dump() for v in videos])
|
|
583
|
+
if files:
|
|
584
|
+
data["files"] = json.dumps([f.model_dump() for f in files])
|
|
585
|
+
|
|
586
|
+
# Add kwargs to data, serializing dicts as JSON
|
|
587
|
+
for key, value in kwargs.items():
|
|
588
|
+
if isinstance(value, dict):
|
|
589
|
+
data[key] = json.dumps(value)
|
|
590
|
+
else:
|
|
591
|
+
data[key] = value
|
|
592
|
+
|
|
593
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
594
|
+
|
|
595
|
+
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
|
|
596
|
+
return RunOutput.from_dict(response_data)
|
|
597
|
+
|
|
598
|
+
async def run_agent_stream(
|
|
599
|
+
self,
|
|
600
|
+
agent_id: str,
|
|
601
|
+
message: str,
|
|
602
|
+
session_id: Optional[str] = None,
|
|
603
|
+
user_id: Optional[str] = None,
|
|
604
|
+
images: Optional[Sequence[Image]] = None,
|
|
605
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
606
|
+
videos: Optional[Sequence[Video]] = None,
|
|
607
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
608
|
+
headers: Optional[Dict[str, str]] = None,
|
|
609
|
+
**kwargs: Any,
|
|
610
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
611
|
+
"""Stream an agent run response.
|
|
612
|
+
|
|
613
|
+
Args:
|
|
614
|
+
agent_id: ID of the agent to run
|
|
615
|
+
message: The message/prompt for the agent
|
|
616
|
+
session_id: Optional session ID for context
|
|
617
|
+
user_id: Optional user ID
|
|
618
|
+
images: Optional list of Image objects
|
|
619
|
+
audio: Optional list of Audio objects
|
|
620
|
+
videos: Optional list of Video objects
|
|
621
|
+
files: Optional list of MediaFile objects
|
|
622
|
+
headers: HTTP headers to include in the request (optional)
|
|
623
|
+
**kwargs: Additional parameters (session_state, dependencies, metadata, etc.)
|
|
624
|
+
|
|
625
|
+
Yields:
|
|
626
|
+
RunOutputEvent: Typed event objects (RunStartedEvent, RunContentEvent, etc.)
|
|
627
|
+
|
|
628
|
+
Raises:
|
|
629
|
+
HTTPStatusError: On HTTP errors
|
|
630
|
+
"""
|
|
631
|
+
endpoint = f"/agents/{agent_id}/runs"
|
|
632
|
+
data: Dict[str, Any] = {"message": message, "stream": "true"}
|
|
633
|
+
if session_id:
|
|
634
|
+
data["session_id"] = session_id
|
|
635
|
+
if user_id:
|
|
636
|
+
data["user_id"] = user_id
|
|
637
|
+
if images:
|
|
638
|
+
data["images"] = json.dumps([img.model_dump() for img in images])
|
|
639
|
+
if audio:
|
|
640
|
+
data["audio"] = json.dumps([a.model_dump() for a in audio])
|
|
641
|
+
if videos:
|
|
642
|
+
data["videos"] = json.dumps([v.model_dump() for v in videos])
|
|
643
|
+
if files:
|
|
644
|
+
data["files"] = json.dumps([f.model_dump() for f in files])
|
|
645
|
+
|
|
646
|
+
for key, value in kwargs.items():
|
|
647
|
+
if isinstance(value, dict):
|
|
648
|
+
data[key] = json.dumps(value)
|
|
649
|
+
else:
|
|
650
|
+
data[key] = value
|
|
651
|
+
|
|
652
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
653
|
+
|
|
654
|
+
# Get raw SSE stream and parse into typed events
|
|
655
|
+
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
|
|
656
|
+
async for event in self._parse_sse_events(raw_stream, run_output_event_from_dict):
|
|
657
|
+
yield event
|
|
658
|
+
|
|
659
|
+
async def continue_agent_run(
|
|
660
|
+
self,
|
|
661
|
+
agent_id: str,
|
|
662
|
+
run_id: str,
|
|
663
|
+
tools: List[ToolExecution],
|
|
664
|
+
session_id: Optional[str] = None,
|
|
665
|
+
user_id: Optional[str] = None,
|
|
666
|
+
headers: Optional[Dict[str, str]] = None,
|
|
667
|
+
**kwargs: Any,
|
|
668
|
+
) -> RunOutput:
|
|
669
|
+
"""Continue a paused agent run with tool results.
|
|
670
|
+
|
|
671
|
+
Args:
|
|
672
|
+
agent_id: ID of the agent
|
|
673
|
+
run_id: ID of the run to continue
|
|
674
|
+
tools: List of ToolExecution objects with tool results
|
|
675
|
+
stream: Whether to stream the response
|
|
676
|
+
session_id: Optional session ID
|
|
677
|
+
user_id: Optional user ID
|
|
678
|
+
headers: HTTP headers to include in the request (optional)
|
|
679
|
+
|
|
680
|
+
Returns:
|
|
681
|
+
RunOutput: The continued run response
|
|
682
|
+
|
|
683
|
+
Raises:
|
|
684
|
+
HTTPStatusError: On HTTP errors
|
|
685
|
+
"""
|
|
686
|
+
endpoint = f"/agents/{agent_id}/runs/{run_id}/continue"
|
|
687
|
+
data: Dict[str, Any] = {"tools": json.dumps([tool.to_dict() for tool in tools]), "stream": "false"}
|
|
688
|
+
if session_id:
|
|
689
|
+
data["session_id"] = session_id
|
|
690
|
+
if user_id:
|
|
691
|
+
data["user_id"] = user_id
|
|
692
|
+
|
|
693
|
+
for key, value in kwargs.items():
|
|
694
|
+
if isinstance(value, dict):
|
|
695
|
+
data[key] = json.dumps(value)
|
|
696
|
+
else:
|
|
697
|
+
data[key] = value
|
|
698
|
+
|
|
699
|
+
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
|
|
700
|
+
return RunOutput.from_dict(response_data)
|
|
701
|
+
|
|
702
|
+
async def continue_agent_run_stream(
|
|
703
|
+
self,
|
|
704
|
+
agent_id: str,
|
|
705
|
+
run_id: str,
|
|
706
|
+
tools: List[ToolExecution],
|
|
707
|
+
session_id: Optional[str] = None,
|
|
708
|
+
user_id: Optional[str] = None,
|
|
709
|
+
headers: Optional[Dict[str, str]] = None,
|
|
710
|
+
**kwargs: Any,
|
|
711
|
+
) -> AsyncIterator[RunOutputEvent]:
|
|
712
|
+
"""Stream a continued agent run response.
|
|
713
|
+
|
|
714
|
+
Args:
|
|
715
|
+
agent_id: ID of the agent
|
|
716
|
+
run_id: ID of the run to continue
|
|
717
|
+
tools: List of ToolExecution objects with tool results
|
|
718
|
+
session_id: Optional session ID
|
|
719
|
+
user_id: Optional user ID
|
|
720
|
+
headers: HTTP headers to include in the request (optional)
|
|
721
|
+
|
|
722
|
+
Yields:
|
|
723
|
+
RunOutputEvent: Typed event objects (RunStartedEvent, RunContentEvent, etc.)
|
|
724
|
+
|
|
725
|
+
Raises:
|
|
726
|
+
HTTPStatusError: On HTTP errors
|
|
727
|
+
"""
|
|
728
|
+
endpoint = f"/agents/{agent_id}/runs/{run_id}/continue"
|
|
729
|
+
data: Dict[str, Any] = {"tools": json.dumps([tool.to_dict() for tool in tools]), "stream": "true"}
|
|
730
|
+
if session_id:
|
|
731
|
+
data["session_id"] = session_id
|
|
732
|
+
if user_id:
|
|
733
|
+
data["user_id"] = user_id
|
|
734
|
+
|
|
735
|
+
for key, value in kwargs.items():
|
|
736
|
+
if isinstance(value, dict):
|
|
737
|
+
data[key] = json.dumps(value)
|
|
738
|
+
else:
|
|
739
|
+
data[key] = value
|
|
740
|
+
|
|
741
|
+
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
|
|
742
|
+
async for event in self._parse_sse_events(raw_stream, run_output_event_from_dict):
|
|
743
|
+
yield event
|
|
744
|
+
|
|
745
|
+
async def cancel_agent_run(self, agent_id: str, run_id: str, headers: Optional[Dict[str, str]] = None) -> None:
|
|
746
|
+
"""Cancel an agent run.
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
agent_id: ID of the agent
|
|
750
|
+
run_id: ID of the run to cancel
|
|
751
|
+
headers: HTTP headers to include in the request (optional)
|
|
752
|
+
|
|
753
|
+
Raises:
|
|
754
|
+
HTTPStatusError: On HTTP errors
|
|
755
|
+
"""
|
|
756
|
+
await self._apost(f"/agents/{agent_id}/runs/{run_id}/cancel", headers=headers)
|
|
757
|
+
|
|
758
|
+
async def list_teams(self, headers: Optional[Dict[str, str]] = None) -> List[TeamSummaryResponse]:
|
|
759
|
+
"""List all teams configured in the AgentOS instance.
|
|
760
|
+
|
|
761
|
+
Returns summary information for each team including:
|
|
762
|
+
- Team ID, name, description
|
|
763
|
+
- Model configuration
|
|
764
|
+
- Member information
|
|
765
|
+
|
|
766
|
+
Args:
|
|
767
|
+
headers: HTTP headers to include in the request (optional)
|
|
768
|
+
|
|
769
|
+
Returns:
|
|
770
|
+
List[TeamSummaryResponse]: List of team summaries
|
|
771
|
+
|
|
772
|
+
Raises:
|
|
773
|
+
HTTPStatusError: On HTTP errors
|
|
774
|
+
"""
|
|
775
|
+
data = await self._aget("/teams", headers=headers)
|
|
776
|
+
return [TeamSummaryResponse.model_validate(item) for item in data]
|
|
777
|
+
|
|
778
|
+
def get_team(self, team_id: str, headers: Optional[Dict[str, str]] = None) -> TeamResponse:
|
|
779
|
+
"""Get detailed configuration for a specific team.
|
|
780
|
+
|
|
781
|
+
Args:
|
|
782
|
+
team_id: ID of the team to retrieve
|
|
783
|
+
headers: HTTP headers to include in the request (optional)
|
|
784
|
+
|
|
785
|
+
Returns:
|
|
786
|
+
TeamResponse: Detailed team configuration
|
|
787
|
+
|
|
788
|
+
Raises:
|
|
789
|
+
HTTPStatusError: On HTTP errors (404 if team not found)
|
|
790
|
+
"""
|
|
791
|
+
data = self._get(f"/teams/{team_id}", headers=headers)
|
|
792
|
+
return TeamResponse.model_validate(data)
|
|
793
|
+
|
|
794
|
+
async def aget_team(self, team_id: str, headers: Optional[Dict[str, str]] = None) -> TeamResponse:
|
|
795
|
+
"""Get detailed configuration for a specific team.
|
|
796
|
+
|
|
797
|
+
Args:
|
|
798
|
+
team_id: ID of the team to retrieve
|
|
799
|
+
headers: HTTP headers to include in the request (optional)
|
|
800
|
+
|
|
801
|
+
Returns:
|
|
802
|
+
TeamResponse: Detailed team configuration
|
|
803
|
+
|
|
804
|
+
Raises:
|
|
805
|
+
HTTPStatusError: On HTTP errors (404 if team not found)
|
|
806
|
+
"""
|
|
807
|
+
data = await self._aget(f"/teams/{team_id}", headers=headers)
|
|
808
|
+
return TeamResponse.model_validate(data)
|
|
809
|
+
|
|
810
|
+
async def run_team(
|
|
811
|
+
self,
|
|
812
|
+
team_id: str,
|
|
813
|
+
message: str,
|
|
814
|
+
session_id: Optional[str] = None,
|
|
815
|
+
user_id: Optional[str] = None,
|
|
816
|
+
images: Optional[Sequence[Image]] = None,
|
|
817
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
818
|
+
videos: Optional[Sequence[Video]] = None,
|
|
819
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
820
|
+
headers: Optional[Dict[str, str]] = None,
|
|
821
|
+
**kwargs: Any,
|
|
822
|
+
) -> TeamRunOutput:
|
|
823
|
+
"""Execute a team run.
|
|
824
|
+
|
|
825
|
+
Args:
|
|
826
|
+
team_id: ID of the team to run
|
|
827
|
+
message: The message/prompt for the team
|
|
828
|
+
session_id: Optional session ID for context
|
|
829
|
+
user_id: Optional user ID
|
|
830
|
+
images: Optional list of images
|
|
831
|
+
audio: Optional audio data
|
|
832
|
+
videos: Optional list of videos
|
|
833
|
+
files: Optional list of files
|
|
834
|
+
headers: HTTP headers to include in the request (optional)
|
|
835
|
+
**kwargs: Additional parameters passed to the team run
|
|
836
|
+
|
|
837
|
+
Returns:
|
|
838
|
+
TeamRunOutput: The team run response
|
|
839
|
+
|
|
840
|
+
Raises:
|
|
841
|
+
HTTPStatusError: On HTTP errors
|
|
842
|
+
"""
|
|
843
|
+
endpoint = f"/teams/{team_id}/runs"
|
|
844
|
+
data: Dict[str, Any] = {"message": message, "stream": "false"}
|
|
845
|
+
if session_id:
|
|
846
|
+
data["session_id"] = session_id
|
|
847
|
+
if user_id:
|
|
848
|
+
data["user_id"] = user_id
|
|
849
|
+
if images:
|
|
850
|
+
data["images"] = json.dumps(images)
|
|
851
|
+
if audio:
|
|
852
|
+
data["audio"] = json.dumps(audio)
|
|
853
|
+
if videos:
|
|
854
|
+
data["videos"] = json.dumps(videos)
|
|
855
|
+
if files:
|
|
856
|
+
data["files"] = json.dumps(files)
|
|
857
|
+
|
|
858
|
+
# Add kwargs to data, serializing dicts as JSON
|
|
859
|
+
for key, value in kwargs.items():
|
|
860
|
+
if isinstance(value, dict):
|
|
861
|
+
data[key] = json.dumps(value)
|
|
862
|
+
else:
|
|
863
|
+
data[key] = value
|
|
864
|
+
|
|
865
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
866
|
+
|
|
867
|
+
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
|
|
868
|
+
return TeamRunOutput.from_dict(response_data)
|
|
869
|
+
|
|
870
|
+
async def run_team_stream(
|
|
871
|
+
self,
|
|
872
|
+
team_id: str,
|
|
873
|
+
message: str,
|
|
874
|
+
session_id: Optional[str] = None,
|
|
875
|
+
user_id: Optional[str] = None,
|
|
876
|
+
images: Optional[Sequence[Image]] = None,
|
|
877
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
878
|
+
videos: Optional[Sequence[Video]] = None,
|
|
879
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
880
|
+
headers: Optional[Dict[str, str]] = None,
|
|
881
|
+
**kwargs: Any,
|
|
882
|
+
) -> AsyncIterator[TeamRunOutputEvent]:
|
|
883
|
+
"""Stream a team run response.
|
|
884
|
+
|
|
885
|
+
Args:
|
|
886
|
+
team_id: ID of the team to run
|
|
887
|
+
message: The message/prompt for the team
|
|
888
|
+
session_id: Optional session ID for context
|
|
889
|
+
user_id: Optional user ID
|
|
890
|
+
images: Optional list of images
|
|
891
|
+
audio: Optional audio data
|
|
892
|
+
videos: Optional list of videos
|
|
893
|
+
files: Optional list of files
|
|
894
|
+
headers: HTTP headers to include in the request (optional)
|
|
895
|
+
**kwargs: Additional parameters passed to the team run
|
|
896
|
+
|
|
897
|
+
Yields:
|
|
898
|
+
TeamRunOutputEvent: Typed event objects (team and agent events)
|
|
899
|
+
|
|
900
|
+
Raises:
|
|
901
|
+
HTTPStatusError: On HTTP errors
|
|
902
|
+
"""
|
|
903
|
+
endpoint = f"/teams/{team_id}/runs"
|
|
904
|
+
data: Dict[str, Any] = {"message": message, "stream": "true"}
|
|
905
|
+
if session_id:
|
|
906
|
+
data["session_id"] = session_id
|
|
907
|
+
if user_id:
|
|
908
|
+
data["user_id"] = user_id
|
|
909
|
+
if images:
|
|
910
|
+
data["images"] = json.dumps(images)
|
|
911
|
+
if audio:
|
|
912
|
+
data["audio"] = json.dumps(audio)
|
|
913
|
+
if videos:
|
|
914
|
+
data["videos"] = json.dumps(videos)
|
|
915
|
+
if files:
|
|
916
|
+
data["files"] = json.dumps(files)
|
|
917
|
+
|
|
918
|
+
# Add kwargs to data, serializing dicts as JSON
|
|
919
|
+
for key, value in kwargs.items():
|
|
920
|
+
if isinstance(value, dict):
|
|
921
|
+
data[key] = json.dumps(value)
|
|
922
|
+
else:
|
|
923
|
+
data[key] = value
|
|
924
|
+
|
|
925
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
926
|
+
|
|
927
|
+
# Get raw SSE stream and parse into typed events
|
|
928
|
+
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
|
|
929
|
+
async for event in self._parse_sse_events(raw_stream, team_run_output_event_from_dict):
|
|
930
|
+
yield event
|
|
931
|
+
|
|
932
|
+
async def cancel_team_run(self, team_id: str, run_id: str, headers: Optional[Dict[str, str]] = None) -> None:
|
|
933
|
+
"""Cancel a team run.
|
|
934
|
+
|
|
935
|
+
Args:
|
|
936
|
+
team_id: ID of the team
|
|
937
|
+
run_id: ID of the run to cancel
|
|
938
|
+
headers: HTTP headers to include in the request (optional)
|
|
939
|
+
|
|
940
|
+
Raises:
|
|
941
|
+
HTTPStatusError: On HTTP errors
|
|
942
|
+
"""
|
|
943
|
+
await self._apost(f"/teams/{team_id}/runs/{run_id}/cancel", headers=headers)
|
|
944
|
+
|
|
945
|
+
async def list_workflows(self, headers: Optional[Dict[str, str]] = None) -> List[WorkflowSummaryResponse]:
|
|
946
|
+
"""List all workflows configured in the AgentOS instance.
|
|
947
|
+
|
|
948
|
+
Returns summary information for each workflow including:
|
|
949
|
+
- Workflow ID, name, description
|
|
950
|
+
- Step information
|
|
951
|
+
|
|
952
|
+
Args:
|
|
953
|
+
headers: HTTP headers to include in the request (optional)
|
|
954
|
+
|
|
955
|
+
Returns:
|
|
956
|
+
List[WorkflowSummaryResponse]: List of workflow summaries
|
|
957
|
+
|
|
958
|
+
Raises:
|
|
959
|
+
HTTPStatusError: On HTTP errors
|
|
960
|
+
"""
|
|
961
|
+
data = await self._aget("/workflows", headers=headers)
|
|
962
|
+
return [WorkflowSummaryResponse.model_validate(item) for item in data]
|
|
963
|
+
|
|
964
|
+
def get_workflow(self, workflow_id: str, headers: Optional[Dict[str, str]] = None) -> WorkflowResponse:
|
|
965
|
+
"""Get detailed configuration for a specific workflow.
|
|
966
|
+
|
|
967
|
+
Args:
|
|
968
|
+
workflow_id: ID of the workflow to retrieve
|
|
969
|
+
headers: HTTP headers to include in the request (optional)
|
|
970
|
+
|
|
971
|
+
Returns:
|
|
972
|
+
WorkflowResponse: Detailed workflow configuration
|
|
973
|
+
|
|
974
|
+
Raises:
|
|
975
|
+
HTTPStatusError: On HTTP errors (404 if workflow not found)
|
|
976
|
+
"""
|
|
977
|
+
data = self._get(f"/workflows/{workflow_id}", headers=headers)
|
|
978
|
+
return WorkflowResponse.model_validate(data)
|
|
979
|
+
|
|
980
|
+
async def aget_workflow(self, workflow_id: str, headers: Optional[Dict[str, str]] = None) -> WorkflowResponse:
|
|
981
|
+
"""Get detailed configuration for a specific workflow.
|
|
982
|
+
|
|
983
|
+
Args:
|
|
984
|
+
workflow_id: ID of the workflow to retrieve
|
|
985
|
+
headers: HTTP headers to include in the request (optional)
|
|
986
|
+
|
|
987
|
+
Returns:
|
|
988
|
+
WorkflowResponse: Detailed workflow configuration
|
|
989
|
+
|
|
990
|
+
Raises:
|
|
991
|
+
HTTPStatusError: On HTTP errors (404 if workflow not found)
|
|
992
|
+
"""
|
|
993
|
+
data = await self._aget(f"/workflows/{workflow_id}", headers=headers)
|
|
994
|
+
return WorkflowResponse.model_validate(data)
|
|
995
|
+
|
|
996
|
+
async def run_workflow(
|
|
997
|
+
self,
|
|
998
|
+
workflow_id: str,
|
|
999
|
+
message: str,
|
|
1000
|
+
session_id: Optional[str] = None,
|
|
1001
|
+
user_id: Optional[str] = None,
|
|
1002
|
+
images: Optional[Sequence[Image]] = None,
|
|
1003
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
1004
|
+
videos: Optional[Sequence[Video]] = None,
|
|
1005
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
1006
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1007
|
+
**kwargs: Any,
|
|
1008
|
+
) -> WorkflowRunOutput:
|
|
1009
|
+
"""Execute a workflow run.
|
|
1010
|
+
|
|
1011
|
+
Args:
|
|
1012
|
+
workflow_id: ID of the workflow to run
|
|
1013
|
+
message: The message/prompt for the workflow
|
|
1014
|
+
session_id: Optional session ID for context
|
|
1015
|
+
user_id: Optional user ID
|
|
1016
|
+
images: Optional list of images
|
|
1017
|
+
audio: Optional audio data
|
|
1018
|
+
videos: Optional list of videos
|
|
1019
|
+
files: Optional list of files
|
|
1020
|
+
headers: HTTP headers to include in the request (optional)
|
|
1021
|
+
**kwargs: Additional parameters passed to the workflow run
|
|
1022
|
+
Returns:
|
|
1023
|
+
WorkflowRunOutput: The workflow run response
|
|
1024
|
+
|
|
1025
|
+
Raises:
|
|
1026
|
+
HTTPStatusError: On HTTP errors
|
|
1027
|
+
"""
|
|
1028
|
+
endpoint = f"/workflows/{workflow_id}/runs"
|
|
1029
|
+
data: Dict[str, Any] = {"message": message, "stream": "false"}
|
|
1030
|
+
if session_id:
|
|
1031
|
+
data["session_id"] = session_id
|
|
1032
|
+
if user_id:
|
|
1033
|
+
data["user_id"] = user_id
|
|
1034
|
+
if images:
|
|
1035
|
+
data["images"] = json.dumps(images)
|
|
1036
|
+
if audio:
|
|
1037
|
+
data["audio"] = json.dumps(audio)
|
|
1038
|
+
if videos:
|
|
1039
|
+
data["videos"] = json.dumps(videos)
|
|
1040
|
+
if files:
|
|
1041
|
+
data["files"] = json.dumps(files)
|
|
1042
|
+
|
|
1043
|
+
# Add kwargs to data, serializing dicts as JSON
|
|
1044
|
+
for key, value in kwargs.items():
|
|
1045
|
+
if isinstance(value, dict):
|
|
1046
|
+
data[key] = json.dumps(value)
|
|
1047
|
+
else:
|
|
1048
|
+
data[key] = value
|
|
1049
|
+
|
|
1050
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
1051
|
+
|
|
1052
|
+
response_data = await self._apost(endpoint, data, headers=headers, as_form=True)
|
|
1053
|
+
return WorkflowRunOutput.from_dict(response_data)
|
|
1054
|
+
|
|
1055
|
+
async def run_workflow_stream(
|
|
1056
|
+
self,
|
|
1057
|
+
workflow_id: str,
|
|
1058
|
+
message: str,
|
|
1059
|
+
session_id: Optional[str] = None,
|
|
1060
|
+
user_id: Optional[str] = None,
|
|
1061
|
+
images: Optional[Sequence[Image]] = None,
|
|
1062
|
+
audio: Optional[Sequence[Audio]] = None,
|
|
1063
|
+
videos: Optional[Sequence[Video]] = None,
|
|
1064
|
+
files: Optional[Sequence[MediaFile]] = None,
|
|
1065
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1066
|
+
**kwargs: Any,
|
|
1067
|
+
) -> AsyncIterator[WorkflowRunOutputEvent]:
|
|
1068
|
+
"""Stream a workflow run response.
|
|
1069
|
+
|
|
1070
|
+
Args:
|
|
1071
|
+
workflow_id: ID of the workflow to run
|
|
1072
|
+
message: The message/prompt for the workflow
|
|
1073
|
+
session_id: Optional session ID for context
|
|
1074
|
+
user_id: Optional user ID
|
|
1075
|
+
images: Optional list of images
|
|
1076
|
+
audio: Optional audio data
|
|
1077
|
+
videos: Optional list of videos
|
|
1078
|
+
files: Optional list of files
|
|
1079
|
+
headers: HTTP headers to include in the request (optional)
|
|
1080
|
+
**kwargs: Additional parameters passed to the workflow run.
|
|
1081
|
+
|
|
1082
|
+
Yields:
|
|
1083
|
+
WorkflowRunOutputEvent: Typed event objects (workflow, team, and agent events)
|
|
1084
|
+
|
|
1085
|
+
Raises:
|
|
1086
|
+
HTTPStatusError: On HTTP errors
|
|
1087
|
+
"""
|
|
1088
|
+
endpoint = f"/workflows/{workflow_id}/runs"
|
|
1089
|
+
data: Dict[str, Any] = {"message": message, "stream": "true"}
|
|
1090
|
+
if session_id:
|
|
1091
|
+
data["session_id"] = session_id
|
|
1092
|
+
if user_id:
|
|
1093
|
+
data["user_id"] = user_id
|
|
1094
|
+
if images:
|
|
1095
|
+
data["images"] = json.dumps(images)
|
|
1096
|
+
if audio:
|
|
1097
|
+
data["audio"] = json.dumps(audio)
|
|
1098
|
+
if videos:
|
|
1099
|
+
data["videos"] = json.dumps(videos)
|
|
1100
|
+
if files:
|
|
1101
|
+
data["files"] = json.dumps(files)
|
|
1102
|
+
|
|
1103
|
+
# Add kwargs to data, serializing dicts as JSON
|
|
1104
|
+
for key, value in kwargs.items():
|
|
1105
|
+
if isinstance(value, dict):
|
|
1106
|
+
data[key] = json.dumps(value)
|
|
1107
|
+
else:
|
|
1108
|
+
data[key] = value
|
|
1109
|
+
|
|
1110
|
+
data = {k: v for k, v in data.items() if v is not None}
|
|
1111
|
+
|
|
1112
|
+
# Get raw SSE stream and parse into typed events
|
|
1113
|
+
raw_stream = self._astream_post_form_data(endpoint, data, headers=headers)
|
|
1114
|
+
async for event in self._parse_sse_events(raw_stream, workflow_run_output_event_from_dict):
|
|
1115
|
+
yield event
|
|
1116
|
+
|
|
1117
|
+
async def cancel_workflow_run(
|
|
1118
|
+
self, workflow_id: str, run_id: str, headers: Optional[Dict[str, str]] = None
|
|
1119
|
+
) -> None:
|
|
1120
|
+
"""Cancel a workflow run.
|
|
1121
|
+
|
|
1122
|
+
Args:
|
|
1123
|
+
workflow_id: ID of the workflow
|
|
1124
|
+
run_id: ID of the run to cancel
|
|
1125
|
+
headers: HTTP headers to include in the request (optional)
|
|
1126
|
+
|
|
1127
|
+
Raises:
|
|
1128
|
+
HTTPStatusError: On HTTP errors
|
|
1129
|
+
"""
|
|
1130
|
+
await self._apost(f"/workflows/{workflow_id}/runs/{run_id}/cancel", headers=headers)
|
|
1131
|
+
|
|
1132
|
+
async def create_memory(
|
|
1133
|
+
self,
|
|
1134
|
+
memory: str,
|
|
1135
|
+
user_id: str,
|
|
1136
|
+
topics: Optional[List[str]] = None,
|
|
1137
|
+
db_id: Optional[str] = None,
|
|
1138
|
+
table: Optional[str] = None,
|
|
1139
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1140
|
+
) -> UserMemorySchema:
|
|
1141
|
+
"""Create a new user memory.
|
|
1142
|
+
|
|
1143
|
+
Args:
|
|
1144
|
+
memory: The memory content to store
|
|
1145
|
+
user_id: User ID to associate with the memory
|
|
1146
|
+
topics: Optional list of topics to categorize the memory
|
|
1147
|
+
db_id: Optional database ID to use
|
|
1148
|
+
table: Optional table name to use
|
|
1149
|
+
headers: HTTP headers to include in the request (optional)
|
|
1150
|
+
|
|
1151
|
+
Returns:
|
|
1152
|
+
UserMemorySchema: The created memory
|
|
1153
|
+
|
|
1154
|
+
Raises:
|
|
1155
|
+
HTTPStatusError: On HTTP errors
|
|
1156
|
+
"""
|
|
1157
|
+
params = {"db_id": db_id, "table": table}
|
|
1158
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1159
|
+
|
|
1160
|
+
# Use schema for type-safe payload construction
|
|
1161
|
+
payload = UserMemoryCreateSchema(memory=memory, user_id=user_id, topics=topics)
|
|
1162
|
+
|
|
1163
|
+
data = await self._apost("/memories", payload.model_dump(exclude_none=True), params=params, headers=headers)
|
|
1164
|
+
return UserMemorySchema.model_validate(data)
|
|
1165
|
+
|
|
1166
|
+
async def get_memory(
|
|
1167
|
+
self,
|
|
1168
|
+
memory_id: str,
|
|
1169
|
+
user_id: Optional[str] = None,
|
|
1170
|
+
db_id: Optional[str] = None,
|
|
1171
|
+
table: Optional[str] = None,
|
|
1172
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1173
|
+
) -> UserMemorySchema:
|
|
1174
|
+
"""Get a specific memory by ID.
|
|
1175
|
+
|
|
1176
|
+
Args:
|
|
1177
|
+
memory_id: ID of the memory to retrieve
|
|
1178
|
+
user_id: Optional user ID filter
|
|
1179
|
+
db_id: Optional database ID to use
|
|
1180
|
+
table: Optional table name to use
|
|
1181
|
+
headers: HTTP headers to include in the request (optional)
|
|
1182
|
+
|
|
1183
|
+
Returns:
|
|
1184
|
+
UserMemorySchema: The requested memory
|
|
1185
|
+
|
|
1186
|
+
Raises:
|
|
1187
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1188
|
+
"""
|
|
1189
|
+
params = {"db_id": db_id, "table": table, "user_id": user_id}
|
|
1190
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1191
|
+
|
|
1192
|
+
data = await self._aget(f"/memories/{memory_id}", params=params, headers=headers)
|
|
1193
|
+
return UserMemorySchema.model_validate(data)
|
|
1194
|
+
|
|
1195
|
+
async def list_memories(
|
|
1196
|
+
self,
|
|
1197
|
+
user_id: Optional[str] = None,
|
|
1198
|
+
agent_id: Optional[str] = None,
|
|
1199
|
+
team_id: Optional[str] = None,
|
|
1200
|
+
topics: Optional[List[str]] = None,
|
|
1201
|
+
search_content: Optional[str] = None,
|
|
1202
|
+
limit: int = 20,
|
|
1203
|
+
page: int = 1,
|
|
1204
|
+
sort_by: str = "updated_at",
|
|
1205
|
+
sort_order: str = "desc",
|
|
1206
|
+
db_id: Optional[str] = None,
|
|
1207
|
+
table: Optional[str] = None,
|
|
1208
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1209
|
+
) -> PaginatedResponse[UserMemorySchema]:
|
|
1210
|
+
"""List user memories with filtering and pagination.
|
|
1211
|
+
|
|
1212
|
+
Args:
|
|
1213
|
+
user_id: Filter by user ID
|
|
1214
|
+
agent_id: Filter by agent ID
|
|
1215
|
+
team_id: Filter by team ID
|
|
1216
|
+
topics: Filter by topics
|
|
1217
|
+
search_content: Search within memory content
|
|
1218
|
+
limit: Number of memories per page
|
|
1219
|
+
page: Page number
|
|
1220
|
+
sort_by: Field to sort by
|
|
1221
|
+
sort_order: Sort order (asc or desc)
|
|
1222
|
+
db_id: Optional database ID to use
|
|
1223
|
+
table: Optional table name to use
|
|
1224
|
+
headers: HTTP headers to include in the request (optional)
|
|
1225
|
+
|
|
1226
|
+
Returns:
|
|
1227
|
+
PaginatedResponse[UserMemorySchema]: Paginated list of memories
|
|
1228
|
+
|
|
1229
|
+
Raises:
|
|
1230
|
+
HTTPStatusError: On HTTP errors
|
|
1231
|
+
"""
|
|
1232
|
+
params: Dict[str, Any] = {
|
|
1233
|
+
"limit": limit,
|
|
1234
|
+
"page": page,
|
|
1235
|
+
"sort_by": sort_by,
|
|
1236
|
+
"sort_order": sort_order,
|
|
1237
|
+
"db_id": db_id,
|
|
1238
|
+
"table": table,
|
|
1239
|
+
"user_id": user_id,
|
|
1240
|
+
"agent_id": agent_id,
|
|
1241
|
+
"team_id": team_id,
|
|
1242
|
+
"topics": topics,
|
|
1243
|
+
"search_content": search_content,
|
|
1244
|
+
}
|
|
1245
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1246
|
+
|
|
1247
|
+
data = await self._aget("/memories", params=params, headers=headers)
|
|
1248
|
+
return PaginatedResponse[UserMemorySchema].model_validate(data)
|
|
1249
|
+
|
|
1250
|
+
async def update_memory(
|
|
1251
|
+
self,
|
|
1252
|
+
memory_id: str,
|
|
1253
|
+
memory: str,
|
|
1254
|
+
user_id: str,
|
|
1255
|
+
topics: Optional[List[str]] = None,
|
|
1256
|
+
db_id: Optional[str] = None,
|
|
1257
|
+
table: Optional[str] = None,
|
|
1258
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1259
|
+
) -> UserMemorySchema:
|
|
1260
|
+
"""Update an existing memory.
|
|
1261
|
+
|
|
1262
|
+
Args:
|
|
1263
|
+
memory_id: ID of the memory to update
|
|
1264
|
+
memory: New memory content
|
|
1265
|
+
user_id: User ID associated with the memory
|
|
1266
|
+
topics: Optional new list of topics
|
|
1267
|
+
db_id: Optional database ID to use
|
|
1268
|
+
table: Optional table name to use
|
|
1269
|
+
headers: HTTP headers to include in the request (optional)
|
|
1270
|
+
|
|
1271
|
+
Returns:
|
|
1272
|
+
UserMemorySchema: The updated memory
|
|
1273
|
+
|
|
1274
|
+
Raises:
|
|
1275
|
+
HTTPStatusError: On HTTP errors
|
|
1276
|
+
"""
|
|
1277
|
+
params = {"db_id": db_id, "table": table}
|
|
1278
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1279
|
+
|
|
1280
|
+
# Use schema for type-safe payload construction
|
|
1281
|
+
payload = UserMemoryCreateSchema(memory=memory, user_id=user_id, topics=topics)
|
|
1282
|
+
|
|
1283
|
+
data = await self._apatch(
|
|
1284
|
+
f"/memories/{memory_id}", payload.model_dump(exclude_none=True), params=params, headers=headers
|
|
1285
|
+
)
|
|
1286
|
+
return UserMemorySchema.model_validate(data)
|
|
1287
|
+
|
|
1288
|
+
async def delete_memory(
|
|
1289
|
+
self,
|
|
1290
|
+
memory_id: str,
|
|
1291
|
+
user_id: Optional[str] = None,
|
|
1292
|
+
db_id: Optional[str] = None,
|
|
1293
|
+
table: Optional[str] = None,
|
|
1294
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1295
|
+
) -> None:
|
|
1296
|
+
"""Delete a specific memory.
|
|
1297
|
+
|
|
1298
|
+
Args:
|
|
1299
|
+
memory_id: ID of the memory to delete
|
|
1300
|
+
user_id: Optional user ID filter
|
|
1301
|
+
db_id: Optional database ID to use
|
|
1302
|
+
table: Optional table name to use
|
|
1303
|
+
headers: HTTP headers to include in the request (optional)
|
|
1304
|
+
|
|
1305
|
+
Raises:
|
|
1306
|
+
HTTPStatusError: On HTTP errors
|
|
1307
|
+
"""
|
|
1308
|
+
params = {"db_id": db_id, "table": table, "user_id": user_id}
|
|
1309
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1310
|
+
|
|
1311
|
+
await self._adelete(f"/memories/{memory_id}", params=params, headers=headers)
|
|
1312
|
+
|
|
1313
|
+
async def delete_memories(
|
|
1314
|
+
self,
|
|
1315
|
+
memory_ids: List[str],
|
|
1316
|
+
user_id: Optional[str] = None,
|
|
1317
|
+
db_id: Optional[str] = None,
|
|
1318
|
+
table: Optional[str] = None,
|
|
1319
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1320
|
+
) -> None:
|
|
1321
|
+
"""Delete multiple memories.
|
|
1322
|
+
|
|
1323
|
+
Args:
|
|
1324
|
+
memory_ids: List of memory IDs to delete
|
|
1325
|
+
user_id: Optional user ID filter
|
|
1326
|
+
db_id: Optional database ID to use
|
|
1327
|
+
table: Optional table name to use
|
|
1328
|
+
headers: HTTP headers to include in the request (optional)
|
|
1329
|
+
|
|
1330
|
+
Raises:
|
|
1331
|
+
HTTPStatusError: On HTTP errors
|
|
1332
|
+
"""
|
|
1333
|
+
params = {"db_id": db_id, "table": table}
|
|
1334
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1335
|
+
|
|
1336
|
+
# Use schema for type-safe payload construction
|
|
1337
|
+
payload = DeleteMemoriesRequest(memory_ids=memory_ids, user_id=user_id)
|
|
1338
|
+
|
|
1339
|
+
await self._adelete("/memories", payload.model_dump(exclude_none=True), params=params, headers=headers)
|
|
1340
|
+
|
|
1341
|
+
async def get_memory_topics(
|
|
1342
|
+
self,
|
|
1343
|
+
db_id: Optional[str] = None,
|
|
1344
|
+
table: Optional[str] = None,
|
|
1345
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1346
|
+
) -> List[str]:
|
|
1347
|
+
"""Get all unique memory topics.
|
|
1348
|
+
|
|
1349
|
+
Args:
|
|
1350
|
+
db_id: Optional database ID to use
|
|
1351
|
+
table: Optional table name to use
|
|
1352
|
+
headers: HTTP headers to include in the request (optional)
|
|
1353
|
+
|
|
1354
|
+
Returns:
|
|
1355
|
+
List[str]: List of unique topic names
|
|
1356
|
+
|
|
1357
|
+
Raises:
|
|
1358
|
+
HTTPStatusError: On HTTP errors
|
|
1359
|
+
"""
|
|
1360
|
+
params = {"db_id": db_id, "table": table}
|
|
1361
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1362
|
+
|
|
1363
|
+
return await self._aget("/memory_topics", params=params, headers=headers)
|
|
1364
|
+
|
|
1365
|
+
async def get_user_memory_stats(
|
|
1366
|
+
self,
|
|
1367
|
+
limit: int = 20,
|
|
1368
|
+
page: int = 1,
|
|
1369
|
+
db_id: Optional[str] = None,
|
|
1370
|
+
table: Optional[str] = None,
|
|
1371
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1372
|
+
) -> PaginatedResponse[UserStatsSchema]:
|
|
1373
|
+
"""Get user memory statistics.
|
|
1374
|
+
|
|
1375
|
+
Args:
|
|
1376
|
+
limit: Number of stats per page
|
|
1377
|
+
page: Page number
|
|
1378
|
+
db_id: Optional database ID to use
|
|
1379
|
+
table: Optional table name to use
|
|
1380
|
+
headers: HTTP headers to include in the request (optional)
|
|
1381
|
+
|
|
1382
|
+
Returns:
|
|
1383
|
+
PaginatedResponse[UserStatsSchema]: Paginated user statistics
|
|
1384
|
+
|
|
1385
|
+
Raises:
|
|
1386
|
+
HTTPStatusError: On HTTP errors
|
|
1387
|
+
"""
|
|
1388
|
+
params: Dict[str, Any] = {"limit": limit, "page": page, "db_id": db_id, "table": table}
|
|
1389
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1390
|
+
|
|
1391
|
+
data = await self._aget("/user_memory_stats", params=params, headers=headers)
|
|
1392
|
+
return PaginatedResponse[UserStatsSchema].model_validate(data)
|
|
1393
|
+
|
|
1394
|
+
async def optimize_memories(
|
|
1395
|
+
self,
|
|
1396
|
+
user_id: str,
|
|
1397
|
+
model: Optional[str] = None,
|
|
1398
|
+
apply: bool = True,
|
|
1399
|
+
db_id: Optional[str] = None,
|
|
1400
|
+
table: Optional[str] = None,
|
|
1401
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1402
|
+
) -> OptimizeMemoriesResponse:
|
|
1403
|
+
"""Optimize user memories.
|
|
1404
|
+
|
|
1405
|
+
Args:
|
|
1406
|
+
user_id: User ID to optimize memories for
|
|
1407
|
+
model: Optional model to use for optimization
|
|
1408
|
+
apply: If True, automatically replace memories in database
|
|
1409
|
+
db_id: Optional database ID to use
|
|
1410
|
+
table: Optional table name to use
|
|
1411
|
+
headers: HTTP headers to include in the request (optional)
|
|
1412
|
+
|
|
1413
|
+
Returns:
|
|
1414
|
+
OptimizeMemoriesResponse
|
|
1415
|
+
"""
|
|
1416
|
+
params: Dict[str, Any] = {"db_id": db_id, "table": table}
|
|
1417
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1418
|
+
|
|
1419
|
+
# Use schema for type-safe payload construction
|
|
1420
|
+
payload = OptimizeMemoriesRequest(user_id=user_id, model=model, apply=apply)
|
|
1421
|
+
|
|
1422
|
+
data = await self._apost(
|
|
1423
|
+
"/optimize-memories", payload.model_dump(exclude_none=True), params=params, headers=headers
|
|
1424
|
+
)
|
|
1425
|
+
return OptimizeMemoriesResponse.model_validate(data)
|
|
1426
|
+
|
|
1427
|
+
# Session Operations
|
|
1428
|
+
async def create_session(
|
|
1429
|
+
self,
|
|
1430
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1431
|
+
session_id: Optional[str] = None,
|
|
1432
|
+
user_id: Optional[str] = None,
|
|
1433
|
+
session_name: Optional[str] = None,
|
|
1434
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
1435
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1436
|
+
agent_id: Optional[str] = None,
|
|
1437
|
+
team_id: Optional[str] = None,
|
|
1438
|
+
workflow_id: Optional[str] = None,
|
|
1439
|
+
db_id: Optional[str] = None,
|
|
1440
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1441
|
+
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
|
|
1442
|
+
"""Create a new session.
|
|
1443
|
+
|
|
1444
|
+
Args:
|
|
1445
|
+
session_type: Type of session to create (agent, team, or workflow)
|
|
1446
|
+
session_id: Optional session ID (auto-generated if not provided)
|
|
1447
|
+
user_id: User ID to associate with the session
|
|
1448
|
+
session_name: Optional session name
|
|
1449
|
+
session_state: Optional initial session state
|
|
1450
|
+
metadata: Optional session metadata
|
|
1451
|
+
agent_id: Agent ID (for agent sessions)
|
|
1452
|
+
team_id: Team ID (for team sessions)
|
|
1453
|
+
workflow_id: Workflow ID (for workflow sessions)
|
|
1454
|
+
db_id: Optional database ID to use
|
|
1455
|
+
headers: HTTP headers to include in the request (optional)
|
|
1456
|
+
|
|
1457
|
+
Returns:
|
|
1458
|
+
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
|
|
1459
|
+
|
|
1460
|
+
Raises:
|
|
1461
|
+
HTTPStatusError: On HTTP errors
|
|
1462
|
+
"""
|
|
1463
|
+
params: Dict[str, Any] = {"type": session_type.value, "db_id": db_id}
|
|
1464
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1465
|
+
|
|
1466
|
+
# Use schema for type-safe payload construction
|
|
1467
|
+
payload = CreateSessionRequest(
|
|
1468
|
+
session_id=session_id,
|
|
1469
|
+
user_id=user_id,
|
|
1470
|
+
session_name=session_name,
|
|
1471
|
+
session_state=session_state,
|
|
1472
|
+
metadata=metadata,
|
|
1473
|
+
agent_id=agent_id,
|
|
1474
|
+
team_id=team_id,
|
|
1475
|
+
workflow_id=workflow_id,
|
|
1476
|
+
)
|
|
1477
|
+
|
|
1478
|
+
data = await self._apost("/sessions", payload.model_dump(), params=params, headers=headers)
|
|
1479
|
+
|
|
1480
|
+
if session_type == SessionType.AGENT:
|
|
1481
|
+
return AgentSessionDetailSchema.model_validate(data)
|
|
1482
|
+
elif session_type == SessionType.TEAM:
|
|
1483
|
+
return TeamSessionDetailSchema.model_validate(data)
|
|
1484
|
+
else:
|
|
1485
|
+
return WorkflowSessionDetailSchema.model_validate(data)
|
|
1486
|
+
|
|
1487
|
+
async def get_sessions(
|
|
1488
|
+
self,
|
|
1489
|
+
session_type: Optional[SessionType] = None,
|
|
1490
|
+
component_id: Optional[str] = None,
|
|
1491
|
+
user_id: Optional[str] = None,
|
|
1492
|
+
session_name: Optional[str] = None,
|
|
1493
|
+
limit: int = 20,
|
|
1494
|
+
page: int = 1,
|
|
1495
|
+
sort_by: str = "created_at",
|
|
1496
|
+
sort_order: str = "desc",
|
|
1497
|
+
db_id: Optional[str] = None,
|
|
1498
|
+
table: Optional[str] = None,
|
|
1499
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1500
|
+
) -> PaginatedResponse[SessionSchema]:
|
|
1501
|
+
"""Get a specific session by ID.
|
|
1502
|
+
|
|
1503
|
+
Args:
|
|
1504
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1505
|
+
component_id: Optional component ID filter
|
|
1506
|
+
user_id: Optional user ID filter
|
|
1507
|
+
session_name: Optional session name filter
|
|
1508
|
+
limit: Number of sessions per page
|
|
1509
|
+
page: Page number
|
|
1510
|
+
sort_by: Field to sort by
|
|
1511
|
+
sort_order: Sort order (asc or desc)
|
|
1512
|
+
db_id: Optional database ID to use
|
|
1513
|
+
table: Optional table name to use
|
|
1514
|
+
headers: HTTP headers to include in the request (optional)
|
|
1515
|
+
|
|
1516
|
+
Returns:
|
|
1517
|
+
PaginatedResponse[SessionSchema]
|
|
1518
|
+
"""
|
|
1519
|
+
params: Dict[str, Any] = {
|
|
1520
|
+
"type": session_type.value if session_type else None,
|
|
1521
|
+
"limit": str(limit),
|
|
1522
|
+
"page": str(page),
|
|
1523
|
+
"sort_by": sort_by,
|
|
1524
|
+
"sort_order": sort_order,
|
|
1525
|
+
"db_id": db_id,
|
|
1526
|
+
"table": table,
|
|
1527
|
+
"user_id": user_id,
|
|
1528
|
+
"session_name": session_name,
|
|
1529
|
+
"component_id": component_id,
|
|
1530
|
+
}
|
|
1531
|
+
|
|
1532
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1533
|
+
|
|
1534
|
+
response = await self._aget("/sessions", params=params, headers=headers)
|
|
1535
|
+
data = response.get("data", [])
|
|
1536
|
+
pagination_info = PaginationInfo.model_validate(response.get("meta", {}))
|
|
1537
|
+
return PaginatedResponse[SessionSchema](
|
|
1538
|
+
data=[SessionSchema.from_dict(session) for session in data],
|
|
1539
|
+
meta=pagination_info,
|
|
1540
|
+
)
|
|
1541
|
+
|
|
1542
|
+
async def get_session(
|
|
1543
|
+
self,
|
|
1544
|
+
session_id: str,
|
|
1545
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1546
|
+
user_id: Optional[str] = None,
|
|
1547
|
+
db_id: Optional[str] = None,
|
|
1548
|
+
table: Optional[str] = None,
|
|
1549
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1550
|
+
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
|
|
1551
|
+
"""Get a specific session by ID.
|
|
1552
|
+
|
|
1553
|
+
Args:
|
|
1554
|
+
session_id: ID of the session to retrieve
|
|
1555
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1556
|
+
user_id: Optional user ID filter
|
|
1557
|
+
db_id: Optional database ID to use
|
|
1558
|
+
table: Optional table name to use
|
|
1559
|
+
headers: HTTP headers to include in the request (optional)
|
|
1560
|
+
|
|
1561
|
+
Returns:
|
|
1562
|
+
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
|
|
1563
|
+
|
|
1564
|
+
Raises:
|
|
1565
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1566
|
+
"""
|
|
1567
|
+
params: Dict[str, Any] = {
|
|
1568
|
+
"type": session_type.value,
|
|
1569
|
+
"user_id": user_id,
|
|
1570
|
+
"db_id": db_id,
|
|
1571
|
+
"table": table,
|
|
1572
|
+
}
|
|
1573
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1574
|
+
|
|
1575
|
+
data = await self._aget(f"/sessions/{session_id}", params=params, headers=headers)
|
|
1576
|
+
|
|
1577
|
+
if session_type == SessionType.AGENT:
|
|
1578
|
+
return AgentSessionDetailSchema.model_validate(data)
|
|
1579
|
+
elif session_type == SessionType.TEAM:
|
|
1580
|
+
return TeamSessionDetailSchema.model_validate(data)
|
|
1581
|
+
else:
|
|
1582
|
+
return WorkflowSessionDetailSchema.model_validate(data)
|
|
1583
|
+
|
|
1584
|
+
async def get_session_runs(
|
|
1585
|
+
self,
|
|
1586
|
+
session_id: str,
|
|
1587
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1588
|
+
user_id: Optional[str] = None,
|
|
1589
|
+
created_after: Optional[int] = None,
|
|
1590
|
+
created_before: Optional[int] = None,
|
|
1591
|
+
db_id: Optional[str] = None,
|
|
1592
|
+
table: Optional[str] = None,
|
|
1593
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1594
|
+
) -> List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]]:
|
|
1595
|
+
"""Get all runs for a specific session.
|
|
1596
|
+
|
|
1597
|
+
Args:
|
|
1598
|
+
session_id: ID of the session
|
|
1599
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1600
|
+
user_id: Optional user ID filter
|
|
1601
|
+
created_after: Filter runs created after this Unix timestamp
|
|
1602
|
+
created_before: Filter runs created before this Unix timestamp
|
|
1603
|
+
db_id: Optional database ID to use
|
|
1604
|
+
table: Optional table name to use
|
|
1605
|
+
headers: HTTP headers to include in the request (optional)
|
|
1606
|
+
|
|
1607
|
+
Returns:
|
|
1608
|
+
List of runs (RunSchema, TeamRunSchema, or WorkflowRunSchema)
|
|
1609
|
+
|
|
1610
|
+
Raises:
|
|
1611
|
+
HTTPStatusError: On HTTP errors
|
|
1612
|
+
"""
|
|
1613
|
+
params: Dict[str, Any] = {
|
|
1614
|
+
"type": session_type.value,
|
|
1615
|
+
"user_id": user_id,
|
|
1616
|
+
"created_after": created_after,
|
|
1617
|
+
"created_before": created_before,
|
|
1618
|
+
"db_id": db_id,
|
|
1619
|
+
"table": table,
|
|
1620
|
+
}
|
|
1621
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1622
|
+
|
|
1623
|
+
data = await self._aget(f"/sessions/{session_id}/runs", params=params, headers=headers)
|
|
1624
|
+
|
|
1625
|
+
# Parse runs based on session type and run content
|
|
1626
|
+
runs: List[Union[RunSchema, TeamRunSchema, WorkflowRunSchema]] = []
|
|
1627
|
+
for run in data:
|
|
1628
|
+
if run.get("workflow_id") is not None:
|
|
1629
|
+
runs.append(WorkflowRunSchema.model_validate(run))
|
|
1630
|
+
elif run.get("team_id") is not None:
|
|
1631
|
+
runs.append(TeamRunSchema.model_validate(run))
|
|
1632
|
+
else:
|
|
1633
|
+
runs.append(RunSchema.model_validate(run))
|
|
1634
|
+
return runs
|
|
1635
|
+
|
|
1636
|
+
async def get_session_run(
|
|
1637
|
+
self,
|
|
1638
|
+
session_id: str,
|
|
1639
|
+
run_id: str,
|
|
1640
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1641
|
+
user_id: Optional[str] = None,
|
|
1642
|
+
db_id: Optional[str] = None,
|
|
1643
|
+
table: Optional[str] = None,
|
|
1644
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1645
|
+
) -> Union[RunSchema, TeamRunSchema, WorkflowRunSchema]:
|
|
1646
|
+
"""Get a specific run from a session.
|
|
1647
|
+
|
|
1648
|
+
Args:
|
|
1649
|
+
session_id: ID of the session
|
|
1650
|
+
run_id: ID of the run to retrieve
|
|
1651
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1652
|
+
user_id: Optional user ID filter
|
|
1653
|
+
db_id: Optional database ID to use
|
|
1654
|
+
table: Optional table name to use
|
|
1655
|
+
headers: HTTP headers to include in the request (optional)
|
|
1656
|
+
|
|
1657
|
+
Returns:
|
|
1658
|
+
RunSchema, TeamRunSchema, or WorkflowRunSchema
|
|
1659
|
+
|
|
1660
|
+
Raises:
|
|
1661
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1662
|
+
"""
|
|
1663
|
+
params: Dict[str, Any] = {
|
|
1664
|
+
"type": session_type.value,
|
|
1665
|
+
"user_id": user_id,
|
|
1666
|
+
"db_id": db_id,
|
|
1667
|
+
"table": table,
|
|
1668
|
+
}
|
|
1669
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1670
|
+
|
|
1671
|
+
data = await self._aget(f"/sessions/{session_id}/runs/{run_id}", params=params, headers=headers)
|
|
1672
|
+
|
|
1673
|
+
# Return appropriate schema based on run type
|
|
1674
|
+
if data.get("workflow_id") is not None:
|
|
1675
|
+
return WorkflowRunSchema.model_validate(data)
|
|
1676
|
+
elif data.get("team_id") is not None:
|
|
1677
|
+
return TeamRunSchema.model_validate(data)
|
|
1678
|
+
else:
|
|
1679
|
+
return RunSchema.model_validate(data)
|
|
1680
|
+
|
|
1681
|
+
async def delete_session(
|
|
1682
|
+
self,
|
|
1683
|
+
session_id: str,
|
|
1684
|
+
db_id: Optional[str] = None,
|
|
1685
|
+
table: Optional[str] = None,
|
|
1686
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1687
|
+
) -> None:
|
|
1688
|
+
"""Delete a specific session.
|
|
1689
|
+
|
|
1690
|
+
Args:
|
|
1691
|
+
session_id: ID of the session to delete
|
|
1692
|
+
db_id: Optional database ID to use
|
|
1693
|
+
table: Optional table name to use
|
|
1694
|
+
headers: HTTP headers to include in the request (optional)
|
|
1695
|
+
|
|
1696
|
+
Raises:
|
|
1697
|
+
HTTPStatusError: On HTTP errors
|
|
1698
|
+
"""
|
|
1699
|
+
params: Dict[str, Any] = {
|
|
1700
|
+
"db_id": db_id,
|
|
1701
|
+
"table": table,
|
|
1702
|
+
}
|
|
1703
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1704
|
+
|
|
1705
|
+
await self._adelete(f"/sessions/{session_id}", params=params, headers=headers)
|
|
1706
|
+
|
|
1707
|
+
async def delete_sessions(
|
|
1708
|
+
self,
|
|
1709
|
+
session_ids: List[str],
|
|
1710
|
+
session_types: List[SessionType],
|
|
1711
|
+
db_id: Optional[str] = None,
|
|
1712
|
+
table: Optional[str] = None,
|
|
1713
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1714
|
+
) -> None:
|
|
1715
|
+
"""Delete multiple sessions.
|
|
1716
|
+
|
|
1717
|
+
Args:
|
|
1718
|
+
session_ids: List of session IDs to delete
|
|
1719
|
+
session_types: List of session types corresponding to each session ID
|
|
1720
|
+
db_id: Optional database ID to use
|
|
1721
|
+
table: Optional table name to use
|
|
1722
|
+
headers: HTTP headers to include in the request (optional)
|
|
1723
|
+
|
|
1724
|
+
Raises:
|
|
1725
|
+
HTTPStatusError: On HTTP errors
|
|
1726
|
+
"""
|
|
1727
|
+
params: Dict[str, Any] = {
|
|
1728
|
+
"db_id": db_id,
|
|
1729
|
+
"table": table,
|
|
1730
|
+
}
|
|
1731
|
+
|
|
1732
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1733
|
+
|
|
1734
|
+
# Use schema for type-safe payload construction
|
|
1735
|
+
payload = DeleteSessionRequest(session_ids=session_ids, session_types=session_types)
|
|
1736
|
+
|
|
1737
|
+
await self._adelete("/sessions", payload.model_dump(mode="json"), params=params, headers=headers)
|
|
1738
|
+
|
|
1739
|
+
async def rename_session(
|
|
1740
|
+
self,
|
|
1741
|
+
session_id: str,
|
|
1742
|
+
session_name: str,
|
|
1743
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1744
|
+
db_id: Optional[str] = None,
|
|
1745
|
+
table: Optional[str] = None,
|
|
1746
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1747
|
+
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
|
|
1748
|
+
"""Rename a session.
|
|
1749
|
+
|
|
1750
|
+
Args:
|
|
1751
|
+
session_id: ID of the session to rename
|
|
1752
|
+
session_name: New name for the session
|
|
1753
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1754
|
+
db_id: Optional database ID to use
|
|
1755
|
+
table: Optional table name to use
|
|
1756
|
+
headers: HTTP headers to include in the request (optional)
|
|
1757
|
+
|
|
1758
|
+
Returns:
|
|
1759
|
+
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
|
|
1760
|
+
|
|
1761
|
+
Raises:
|
|
1762
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1763
|
+
"""
|
|
1764
|
+
params: Dict[str, Any] = {
|
|
1765
|
+
"type": session_type.value,
|
|
1766
|
+
"db_id": db_id,
|
|
1767
|
+
"table": table,
|
|
1768
|
+
}
|
|
1769
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1770
|
+
|
|
1771
|
+
payload = {"session_name": session_name}
|
|
1772
|
+
data = await self._apost(f"/sessions/{session_id}/rename", payload, params=params, headers=headers)
|
|
1773
|
+
|
|
1774
|
+
if session_type == SessionType.AGENT:
|
|
1775
|
+
return AgentSessionDetailSchema.model_validate(data)
|
|
1776
|
+
elif session_type == SessionType.TEAM:
|
|
1777
|
+
return TeamSessionDetailSchema.model_validate(data)
|
|
1778
|
+
else:
|
|
1779
|
+
return WorkflowSessionDetailSchema.model_validate(data)
|
|
1780
|
+
|
|
1781
|
+
async def update_session(
|
|
1782
|
+
self,
|
|
1783
|
+
session_id: str,
|
|
1784
|
+
session_type: SessionType = SessionType.AGENT,
|
|
1785
|
+
session_name: Optional[str] = None,
|
|
1786
|
+
session_state: Optional[Dict[str, Any]] = None,
|
|
1787
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
1788
|
+
summary: Optional[Dict[str, Any]] = None,
|
|
1789
|
+
user_id: Optional[str] = None,
|
|
1790
|
+
db_id: Optional[str] = None,
|
|
1791
|
+
table: Optional[str] = None,
|
|
1792
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1793
|
+
) -> Union[AgentSessionDetailSchema, TeamSessionDetailSchema, WorkflowSessionDetailSchema]:
|
|
1794
|
+
"""Update session properties.
|
|
1795
|
+
|
|
1796
|
+
Args:
|
|
1797
|
+
session_id: ID of the session to update
|
|
1798
|
+
session_type: Type of session (agent, team, or workflow)
|
|
1799
|
+
session_name: Optional new session name
|
|
1800
|
+
session_state: Optional new session state
|
|
1801
|
+
metadata: Optional new metadata
|
|
1802
|
+
summary: Optional new summary
|
|
1803
|
+
user_id: Optional user ID
|
|
1804
|
+
db_id: Optional database ID to use
|
|
1805
|
+
table: Optional table name to use
|
|
1806
|
+
headers: HTTP headers to include in the request (optional)
|
|
1807
|
+
|
|
1808
|
+
Returns:
|
|
1809
|
+
AgentSessionDetailSchema, TeamSessionDetailSchema, or WorkflowSessionDetailSchema
|
|
1810
|
+
|
|
1811
|
+
Raises:
|
|
1812
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1813
|
+
"""
|
|
1814
|
+
params: Dict[str, Any] = {
|
|
1815
|
+
"type": session_type.value,
|
|
1816
|
+
"user_id": user_id,
|
|
1817
|
+
"db_id": db_id,
|
|
1818
|
+
"table": table,
|
|
1819
|
+
}
|
|
1820
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1821
|
+
|
|
1822
|
+
# Use schema for type-safe payload construction
|
|
1823
|
+
payload = UpdateSessionRequest(
|
|
1824
|
+
session_name=session_name,
|
|
1825
|
+
session_state=session_state,
|
|
1826
|
+
metadata=metadata,
|
|
1827
|
+
summary=summary,
|
|
1828
|
+
)
|
|
1829
|
+
|
|
1830
|
+
data = await self._apatch(
|
|
1831
|
+
f"/sessions/{session_id}", payload.model_dump(exclude_none=True), params=params, headers=headers
|
|
1832
|
+
)
|
|
1833
|
+
|
|
1834
|
+
if session_type == SessionType.AGENT:
|
|
1835
|
+
return AgentSessionDetailSchema.model_validate(data)
|
|
1836
|
+
elif session_type == SessionType.TEAM:
|
|
1837
|
+
return TeamSessionDetailSchema.model_validate(data)
|
|
1838
|
+
else:
|
|
1839
|
+
return WorkflowSessionDetailSchema.model_validate(data)
|
|
1840
|
+
|
|
1841
|
+
# Eval Operations
|
|
1842
|
+
|
|
1843
|
+
async def list_eval_runs(
|
|
1844
|
+
self,
|
|
1845
|
+
agent_id: Optional[str] = None,
|
|
1846
|
+
team_id: Optional[str] = None,
|
|
1847
|
+
workflow_id: Optional[str] = None,
|
|
1848
|
+
model_id: Optional[str] = None,
|
|
1849
|
+
filter_type: Optional[EvalFilterType] = None,
|
|
1850
|
+
eval_types: Optional[List[EvalType]] = None,
|
|
1851
|
+
limit: int = 20,
|
|
1852
|
+
page: int = 1,
|
|
1853
|
+
sort_by: str = "created_at",
|
|
1854
|
+
sort_order: str = "desc",
|
|
1855
|
+
db_id: Optional[str] = None,
|
|
1856
|
+
table: Optional[str] = None,
|
|
1857
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1858
|
+
) -> PaginatedResponse[EvalSchema]:
|
|
1859
|
+
"""List evaluation runs with filtering and pagination.
|
|
1860
|
+
|
|
1861
|
+
Args:
|
|
1862
|
+
agent_id: Filter by agent ID
|
|
1863
|
+
team_id: Filter by team ID
|
|
1864
|
+
workflow_id: Filter by workflow ID
|
|
1865
|
+
model_id: Filter by model ID
|
|
1866
|
+
filter_type: Filter type (agent, team, workflow)
|
|
1867
|
+
eval_types: List of eval types to filter by (accuracy, performance, reliability)
|
|
1868
|
+
limit: Number of eval runs per page
|
|
1869
|
+
page: Page number
|
|
1870
|
+
sort_by: Field to sort by
|
|
1871
|
+
sort_order: Sort order (asc or desc)
|
|
1872
|
+
db_id: Optional database ID to use
|
|
1873
|
+
table: Optional table name to use
|
|
1874
|
+
headers: HTTP headers to include in the request (optional)
|
|
1875
|
+
|
|
1876
|
+
Returns:
|
|
1877
|
+
PaginatedResponse[EvalSchema]: Paginated list of evaluation runs
|
|
1878
|
+
|
|
1879
|
+
Raises:
|
|
1880
|
+
HTTPStatusError: On HTTP errors
|
|
1881
|
+
"""
|
|
1882
|
+
params: Dict[str, Any] = {
|
|
1883
|
+
"limit": limit,
|
|
1884
|
+
"page": page,
|
|
1885
|
+
"sort_by": sort_by,
|
|
1886
|
+
"sort_order": sort_order,
|
|
1887
|
+
"agent_id": agent_id,
|
|
1888
|
+
"team_id": team_id,
|
|
1889
|
+
"workflow_id": workflow_id,
|
|
1890
|
+
"model_id": model_id,
|
|
1891
|
+
"type": filter_type.value if filter_type else None,
|
|
1892
|
+
"eval_types": ",".join(et.value for et in eval_types) if eval_types else None,
|
|
1893
|
+
"db_id": db_id,
|
|
1894
|
+
"table": table,
|
|
1895
|
+
}
|
|
1896
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1897
|
+
|
|
1898
|
+
data = await self._aget("/eval-runs", params=params, headers=headers)
|
|
1899
|
+
return PaginatedResponse[EvalSchema].model_validate(data)
|
|
1900
|
+
|
|
1901
|
+
async def get_eval_run(
|
|
1902
|
+
self,
|
|
1903
|
+
eval_run_id: str,
|
|
1904
|
+
db_id: Optional[str] = None,
|
|
1905
|
+
table: Optional[str] = None,
|
|
1906
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1907
|
+
) -> EvalSchema:
|
|
1908
|
+
"""Get a specific evaluation run by ID.
|
|
1909
|
+
|
|
1910
|
+
Args:
|
|
1911
|
+
eval_run_id: ID of the evaluation run to retrieve
|
|
1912
|
+
db_id: Optional database ID to use
|
|
1913
|
+
table: Optional table name to use
|
|
1914
|
+
headers: HTTP headers to include in the request (optional)
|
|
1915
|
+
|
|
1916
|
+
Returns:
|
|
1917
|
+
EvalSchema: The evaluation run details
|
|
1918
|
+
|
|
1919
|
+
Raises:
|
|
1920
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1921
|
+
"""
|
|
1922
|
+
params: Dict[str, Any] = {
|
|
1923
|
+
"db_id": db_id,
|
|
1924
|
+
"table": table,
|
|
1925
|
+
}
|
|
1926
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1927
|
+
|
|
1928
|
+
data = await self._aget(f"/eval-runs/{eval_run_id}", params=params, headers=headers)
|
|
1929
|
+
return EvalSchema.model_validate(data)
|
|
1930
|
+
|
|
1931
|
+
async def delete_eval_runs(
|
|
1932
|
+
self,
|
|
1933
|
+
eval_run_ids: List[str],
|
|
1934
|
+
db_id: Optional[str] = None,
|
|
1935
|
+
table: Optional[str] = None,
|
|
1936
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1937
|
+
) -> None:
|
|
1938
|
+
"""Delete multiple evaluation runs.
|
|
1939
|
+
|
|
1940
|
+
Args:
|
|
1941
|
+
eval_run_ids: List of evaluation run IDs to delete
|
|
1942
|
+
db_id: Optional database ID to use
|
|
1943
|
+
table: Optional table name to use
|
|
1944
|
+
headers: HTTP headers to include in the request (optional)
|
|
1945
|
+
|
|
1946
|
+
Raises:
|
|
1947
|
+
HTTPStatusError: On HTTP errors
|
|
1948
|
+
"""
|
|
1949
|
+
params: Dict[str, Any] = {
|
|
1950
|
+
"db_id": db_id,
|
|
1951
|
+
"table": table,
|
|
1952
|
+
}
|
|
1953
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1954
|
+
|
|
1955
|
+
# Use schema for type-safe payload construction
|
|
1956
|
+
payload = DeleteEvalRunsRequest(eval_run_ids=eval_run_ids)
|
|
1957
|
+
await self._adelete("/eval-runs", payload.model_dump(), params=params, headers=headers)
|
|
1958
|
+
|
|
1959
|
+
async def update_eval_run(
|
|
1960
|
+
self,
|
|
1961
|
+
eval_run_id: str,
|
|
1962
|
+
name: str,
|
|
1963
|
+
db_id: Optional[str] = None,
|
|
1964
|
+
table: Optional[str] = None,
|
|
1965
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1966
|
+
) -> EvalSchema:
|
|
1967
|
+
"""Update an evaluation run (rename).
|
|
1968
|
+
|
|
1969
|
+
Args:
|
|
1970
|
+
eval_run_id: ID of the evaluation run to update
|
|
1971
|
+
name: New name for the evaluation run
|
|
1972
|
+
db_id: Optional database ID to use
|
|
1973
|
+
table: Optional table name to use
|
|
1974
|
+
headers: HTTP headers to include in the request (optional)
|
|
1975
|
+
|
|
1976
|
+
Returns:
|
|
1977
|
+
EvalSchema: The updated evaluation run
|
|
1978
|
+
|
|
1979
|
+
Raises:
|
|
1980
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
1981
|
+
"""
|
|
1982
|
+
params: Dict[str, Any] = {
|
|
1983
|
+
"db_id": db_id,
|
|
1984
|
+
"table": table,
|
|
1985
|
+
}
|
|
1986
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
1987
|
+
|
|
1988
|
+
# Use schema for type-safe payload construction
|
|
1989
|
+
payload = UpdateEvalRunRequest(name=name)
|
|
1990
|
+
data = await self._apatch(f"/eval-runs/{eval_run_id}", payload.model_dump(), params=params, headers=headers)
|
|
1991
|
+
return EvalSchema.model_validate(data)
|
|
1992
|
+
|
|
1993
|
+
async def run_eval(
|
|
1994
|
+
self,
|
|
1995
|
+
eval_type: EvalType,
|
|
1996
|
+
input_text: str,
|
|
1997
|
+
agent_id: Optional[str] = None,
|
|
1998
|
+
team_id: Optional[str] = None,
|
|
1999
|
+
model_id: Optional[str] = None,
|
|
2000
|
+
model_provider: Optional[str] = None,
|
|
2001
|
+
expected_output: Optional[str] = None,
|
|
2002
|
+
expected_tool_calls: Optional[List[str]] = None,
|
|
2003
|
+
num_iterations: int = 1,
|
|
2004
|
+
db_id: Optional[str] = None,
|
|
2005
|
+
table: Optional[str] = None,
|
|
2006
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2007
|
+
) -> Optional[EvalSchema]:
|
|
2008
|
+
"""Execute an evaluation on an agent or team.
|
|
2009
|
+
|
|
2010
|
+
Args:
|
|
2011
|
+
eval_type: Type of evaluation (accuracy, performance, reliability)
|
|
2012
|
+
input_text: Input text for the evaluation
|
|
2013
|
+
agent_id: Agent ID to evaluate (mutually exclusive with team_id)
|
|
2014
|
+
team_id: Team ID to evaluate (mutually exclusive with agent_id)
|
|
2015
|
+
model_id: Optional model ID to use (overrides agent/team default)
|
|
2016
|
+
model_provider: Optional model provider to use
|
|
2017
|
+
expected_output: Expected output for accuracy evaluations
|
|
2018
|
+
expected_tool_calls: Expected tool calls for reliability evaluations
|
|
2019
|
+
num_iterations: Number of iterations for performance evaluations
|
|
2020
|
+
db_id: Optional database ID to use
|
|
2021
|
+
table: Optional table name to use
|
|
2022
|
+
headers: HTTP headers to include in the request (optional)
|
|
2023
|
+
|
|
2024
|
+
Returns:
|
|
2025
|
+
EvalSchema: The evaluation result, or None if evaluation against remote agents
|
|
2026
|
+
|
|
2027
|
+
Raises:
|
|
2028
|
+
HTTPStatusError: On HTTP errors
|
|
2029
|
+
"""
|
|
2030
|
+
params: Dict[str, Any] = {
|
|
2031
|
+
"db_id": db_id,
|
|
2032
|
+
"table": table,
|
|
2033
|
+
}
|
|
2034
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2035
|
+
|
|
2036
|
+
# Use schema for type-safe payload construction
|
|
2037
|
+
payload = EvalRunInput(
|
|
2038
|
+
eval_type=eval_type,
|
|
2039
|
+
input=input_text,
|
|
2040
|
+
agent_id=agent_id,
|
|
2041
|
+
team_id=team_id,
|
|
2042
|
+
model_id=model_id,
|
|
2043
|
+
model_provider=model_provider,
|
|
2044
|
+
expected_output=expected_output,
|
|
2045
|
+
expected_tool_calls=expected_tool_calls,
|
|
2046
|
+
num_iterations=num_iterations,
|
|
2047
|
+
)
|
|
2048
|
+
|
|
2049
|
+
endpoint = "/eval-runs"
|
|
2050
|
+
data = await self._apost(
|
|
2051
|
+
endpoint, payload.model_dump(exclude_none=True, mode="json"), params=params, headers=headers
|
|
2052
|
+
)
|
|
2053
|
+
if data is None:
|
|
2054
|
+
return None
|
|
2055
|
+
return EvalSchema.model_validate(data)
|
|
2056
|
+
|
|
2057
|
+
# Knowledge Operations
|
|
2058
|
+
|
|
2059
|
+
async def _apost_multipart(
|
|
2060
|
+
self,
|
|
2061
|
+
endpoint: str,
|
|
2062
|
+
form_data: Dict[str, Any],
|
|
2063
|
+
files: Optional[Dict[str, Any]] = None,
|
|
2064
|
+
params: Optional[Dict[str, Any]] = None,
|
|
2065
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2066
|
+
) -> Any:
|
|
2067
|
+
"""Execute asynchronous POST request with multipart form data and optional files.
|
|
2068
|
+
|
|
2069
|
+
Args:
|
|
2070
|
+
endpoint: API endpoint path (without base URL)
|
|
2071
|
+
form_data: Form data dictionary
|
|
2072
|
+
files: Optional files dictionary for multipart upload
|
|
2073
|
+
params: Query parameters (optional)
|
|
2074
|
+
headers: HTTP headers to include in the request (optional)
|
|
2075
|
+
|
|
2076
|
+
Returns:
|
|
2077
|
+
Parsed JSON response
|
|
2078
|
+
|
|
2079
|
+
Raises:
|
|
2080
|
+
RemoteServerUnavailableError: When the remote server is unavailable
|
|
2081
|
+
HTTPStatusError: On HTTP errors (4xx, 5xx)
|
|
2082
|
+
"""
|
|
2083
|
+
url = f"{self.base_url}{endpoint}"
|
|
2084
|
+
|
|
2085
|
+
async_client = get_default_async_client()
|
|
2086
|
+
|
|
2087
|
+
try:
|
|
2088
|
+
if files:
|
|
2089
|
+
response = await async_client.post(
|
|
2090
|
+
url, data=form_data, files=files, params=params, headers=headers or {}, timeout=self.timeout
|
|
2091
|
+
)
|
|
2092
|
+
else:
|
|
2093
|
+
response = await async_client.post(
|
|
2094
|
+
url, data=form_data, params=params, headers=headers or {}, timeout=self.timeout
|
|
2095
|
+
)
|
|
2096
|
+
response.raise_for_status()
|
|
2097
|
+
return response.json()
|
|
2098
|
+
except (ConnectError, ConnectTimeout) as e:
|
|
2099
|
+
raise RemoteServerUnavailableError(
|
|
2100
|
+
message=f"Failed to connect to remote server at {self.base_url}",
|
|
2101
|
+
base_url=self.base_url,
|
|
2102
|
+
original_error=e,
|
|
2103
|
+
) from e
|
|
2104
|
+
except TimeoutException as e:
|
|
2105
|
+
raise RemoteServerUnavailableError(
|
|
2106
|
+
message=f"Request to remote server at {self.base_url} timed out after {self.timeout} seconds.",
|
|
2107
|
+
base_url=self.base_url,
|
|
2108
|
+
original_error=e,
|
|
2109
|
+
) from e
|
|
2110
|
+
|
|
2111
|
+
async def upload_knowledge_content(
|
|
2112
|
+
self,
|
|
2113
|
+
name: Optional[str] = None,
|
|
2114
|
+
description: Optional[str] = None,
|
|
2115
|
+
url: Optional[str] = None,
|
|
2116
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
2117
|
+
file: Optional[Union[File, "UploadFile"]] = None,
|
|
2118
|
+
text_content: Optional[str] = None,
|
|
2119
|
+
reader_id: Optional[str] = None,
|
|
2120
|
+
chunker: Optional[str] = None,
|
|
2121
|
+
chunk_size: Optional[int] = None,
|
|
2122
|
+
chunk_overlap: Optional[int] = None,
|
|
2123
|
+
db_id: Optional[str] = None,
|
|
2124
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2125
|
+
) -> ContentResponseSchema:
|
|
2126
|
+
"""Upload content to the knowledge base.
|
|
2127
|
+
|
|
2128
|
+
Args:
|
|
2129
|
+
name: Content name (auto-generated from file/URL if not provided)
|
|
2130
|
+
description: Content description
|
|
2131
|
+
url: URL to fetch content from (can be a single URL string or a JSON-encoded array of URLs)
|
|
2132
|
+
metadata: Metadata dictionary for the content
|
|
2133
|
+
file: File object containing content (bytes or file-like object), filename, and mime_type. Can also be a FastAPI UploadFile.
|
|
2134
|
+
text_content: Raw text content to process
|
|
2135
|
+
reader_id: ID of the reader to use for processing
|
|
2136
|
+
chunker: Chunking strategy to apply
|
|
2137
|
+
chunk_size: Chunk size for processing
|
|
2138
|
+
chunk_overlap: Chunk overlap for processing
|
|
2139
|
+
db_id: Optional database ID to use
|
|
2140
|
+
headers: HTTP headers to include in the request (optional)
|
|
2141
|
+
|
|
2142
|
+
Returns:
|
|
2143
|
+
ContentResponseSchema: The uploaded content info
|
|
2144
|
+
|
|
2145
|
+
Raises:
|
|
2146
|
+
HTTPStatusError: On HTTP errors
|
|
2147
|
+
|
|
2148
|
+
"""
|
|
2149
|
+
params: Dict[str, Any] = {"db_id": db_id}
|
|
2150
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2151
|
+
|
|
2152
|
+
# Build multipart form data
|
|
2153
|
+
form_data: Dict[str, Any] = {}
|
|
2154
|
+
files: Optional[Dict[str, Any]] = None
|
|
2155
|
+
|
|
2156
|
+
if name:
|
|
2157
|
+
form_data["name"] = name
|
|
2158
|
+
if description:
|
|
2159
|
+
form_data["description"] = description
|
|
2160
|
+
if url:
|
|
2161
|
+
form_data["url"] = url
|
|
2162
|
+
if metadata:
|
|
2163
|
+
form_data["metadata"] = json.dumps(metadata)
|
|
2164
|
+
if text_content:
|
|
2165
|
+
form_data["text_content"] = text_content
|
|
2166
|
+
if reader_id:
|
|
2167
|
+
form_data["reader_id"] = reader_id
|
|
2168
|
+
if chunker:
|
|
2169
|
+
form_data["chunker"] = chunker
|
|
2170
|
+
if chunk_size:
|
|
2171
|
+
form_data["chunk_size"] = str(chunk_size)
|
|
2172
|
+
if chunk_overlap:
|
|
2173
|
+
form_data["chunk_overlap"] = str(chunk_overlap)
|
|
2174
|
+
|
|
2175
|
+
if file:
|
|
2176
|
+
# Handle both agno.media.File and FastAPI UploadFile
|
|
2177
|
+
if isinstance(file, UploadFile):
|
|
2178
|
+
files = {
|
|
2179
|
+
"file": (file.filename or "upload", file.file, file.content_type or "application/octet-stream")
|
|
2180
|
+
}
|
|
2181
|
+
elif file.content:
|
|
2182
|
+
files = {
|
|
2183
|
+
"file": (file.filename or "upload", file.content, file.mime_type or "application/octet-stream")
|
|
2184
|
+
}
|
|
2185
|
+
|
|
2186
|
+
data = await self._apost_multipart("/knowledge/content", form_data, files=files, params=params, headers=headers)
|
|
2187
|
+
return ContentResponseSchema.model_validate(data)
|
|
2188
|
+
|
|
2189
|
+
async def update_knowledge_content(
|
|
2190
|
+
self,
|
|
2191
|
+
content_id: str,
|
|
2192
|
+
name: Optional[str] = None,
|
|
2193
|
+
description: Optional[str] = None,
|
|
2194
|
+
metadata: Optional[Dict[str, Any]] = None,
|
|
2195
|
+
reader_id: Optional[str] = None,
|
|
2196
|
+
db_id: Optional[str] = None,
|
|
2197
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2198
|
+
) -> ContentResponseSchema:
|
|
2199
|
+
"""Update content properties.
|
|
2200
|
+
|
|
2201
|
+
Args:
|
|
2202
|
+
content_id: ID of the content to update
|
|
2203
|
+
name: New content name
|
|
2204
|
+
description: New content description
|
|
2205
|
+
metadata: New metadata dictionary
|
|
2206
|
+
reader_id: ID of the reader to use
|
|
2207
|
+
db_id: Optional database ID to use
|
|
2208
|
+
headers: HTTP headers to include in the request (optional)
|
|
2209
|
+
|
|
2210
|
+
Returns:
|
|
2211
|
+
ContentResponseSchema: The updated content
|
|
2212
|
+
|
|
2213
|
+
Raises:
|
|
2214
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
2215
|
+
"""
|
|
2216
|
+
params: Dict[str, Any] = {"db_id": db_id}
|
|
2217
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2218
|
+
|
|
2219
|
+
form_data: Dict[str, Any] = {}
|
|
2220
|
+
if name:
|
|
2221
|
+
form_data["name"] = name
|
|
2222
|
+
if description:
|
|
2223
|
+
form_data["description"] = description
|
|
2224
|
+
if metadata:
|
|
2225
|
+
form_data["metadata"] = json.dumps(metadata)
|
|
2226
|
+
if reader_id:
|
|
2227
|
+
form_data["reader_id"] = reader_id
|
|
2228
|
+
|
|
2229
|
+
data = await self._arequest(
|
|
2230
|
+
"PATCH", f"/knowledge/content/{content_id}", data=form_data, params=params, headers=headers, as_form=True
|
|
2231
|
+
)
|
|
2232
|
+
return ContentResponseSchema.model_validate(data)
|
|
2233
|
+
|
|
2234
|
+
async def list_knowledge_content(
|
|
2235
|
+
self,
|
|
2236
|
+
limit: Optional[int] = 20,
|
|
2237
|
+
page: Optional[int] = 1,
|
|
2238
|
+
sort_by: Optional[str] = "created_at",
|
|
2239
|
+
sort_order: Optional[str] = "desc",
|
|
2240
|
+
db_id: Optional[str] = None,
|
|
2241
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2242
|
+
) -> PaginatedResponse[ContentResponseSchema]:
|
|
2243
|
+
"""List all content in the knowledge base.
|
|
2244
|
+
|
|
2245
|
+
Args:
|
|
2246
|
+
limit: Number of content entries per page
|
|
2247
|
+
page: Page number
|
|
2248
|
+
sort_by: Field to sort by
|
|
2249
|
+
sort_order: Sort order (asc or desc)
|
|
2250
|
+
db_id: Optional database ID to use
|
|
2251
|
+
headers: HTTP headers to include in the request (optional)
|
|
2252
|
+
|
|
2253
|
+
Returns:
|
|
2254
|
+
PaginatedResponse[ContentResponseSchema]: Paginated list of content
|
|
2255
|
+
|
|
2256
|
+
Raises:
|
|
2257
|
+
HTTPStatusError: On HTTP errors
|
|
2258
|
+
"""
|
|
2259
|
+
params: Dict[str, Any] = {
|
|
2260
|
+
"limit": limit,
|
|
2261
|
+
"page": page,
|
|
2262
|
+
"sort_by": sort_by,
|
|
2263
|
+
"sort_order": sort_order,
|
|
2264
|
+
"db_id": db_id,
|
|
2265
|
+
}
|
|
2266
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2267
|
+
|
|
2268
|
+
data = await self._aget("/knowledge/content", params=params, headers=headers)
|
|
2269
|
+
return PaginatedResponse[ContentResponseSchema].model_validate(data)
|
|
2270
|
+
|
|
2271
|
+
async def get_knowledge_content(
|
|
2272
|
+
self,
|
|
2273
|
+
content_id: str,
|
|
2274
|
+
db_id: Optional[str] = None,
|
|
2275
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2276
|
+
) -> ContentResponseSchema:
|
|
2277
|
+
"""Get a specific content by ID.
|
|
2278
|
+
|
|
2279
|
+
Args:
|
|
2280
|
+
content_id: ID of the content to retrieve
|
|
2281
|
+
db_id: Optional database ID to use
|
|
2282
|
+
headers: HTTP headers to include in the request (optional)
|
|
2283
|
+
|
|
2284
|
+
Returns:
|
|
2285
|
+
ContentResponseSchema: The content details
|
|
2286
|
+
|
|
2287
|
+
Raises:
|
|
2288
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
2289
|
+
"""
|
|
2290
|
+
params: Dict[str, Any] = {"db_id": db_id}
|
|
2291
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2292
|
+
|
|
2293
|
+
data = await self._aget(f"/knowledge/content/{content_id}", params=params, headers=headers)
|
|
2294
|
+
return ContentResponseSchema.model_validate(data)
|
|
2295
|
+
|
|
2296
|
+
async def delete_knowledge_content(
|
|
2297
|
+
self,
|
|
2298
|
+
content_id: str,
|
|
2299
|
+
db_id: Optional[str] = None,
|
|
2300
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2301
|
+
) -> ContentResponseSchema:
|
|
2302
|
+
"""Delete a specific content.
|
|
2303
|
+
|
|
2304
|
+
Args:
|
|
2305
|
+
content_id: ID of the content to delete
|
|
2306
|
+
db_id: Optional database ID to use
|
|
2307
|
+
headers: HTTP headers to include in the request (optional)
|
|
2308
|
+
|
|
2309
|
+
Returns:
|
|
2310
|
+
ContentResponseSchema: The deleted content info
|
|
2311
|
+
|
|
2312
|
+
Raises:
|
|
2313
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
2314
|
+
"""
|
|
2315
|
+
params = {}
|
|
2316
|
+
if db_id:
|
|
2317
|
+
params["db_id"] = db_id
|
|
2318
|
+
|
|
2319
|
+
endpoint = f"/knowledge/content/{content_id}"
|
|
2320
|
+
|
|
2321
|
+
data = await self._arequest("DELETE", endpoint, params=params, headers=headers)
|
|
2322
|
+
return ContentResponseSchema.model_validate(data)
|
|
2323
|
+
|
|
2324
|
+
async def delete_all_knowledge_content(
|
|
2325
|
+
self,
|
|
2326
|
+
db_id: Optional[str] = None,
|
|
2327
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2328
|
+
) -> str:
|
|
2329
|
+
"""Delete all content from the knowledge base.
|
|
2330
|
+
|
|
2331
|
+
WARNING: This is a destructive operation that cannot be undone.
|
|
2332
|
+
|
|
2333
|
+
Args:
|
|
2334
|
+
db_id: Optional database ID to use
|
|
2335
|
+
headers: HTTP headers to include in the request (optional)
|
|
2336
|
+
|
|
2337
|
+
Returns:
|
|
2338
|
+
str: "success" if successful
|
|
2339
|
+
|
|
2340
|
+
Raises:
|
|
2341
|
+
HTTPStatusError: On HTTP errors
|
|
2342
|
+
"""
|
|
2343
|
+
params = {}
|
|
2344
|
+
if db_id:
|
|
2345
|
+
params["db_id"] = db_id
|
|
2346
|
+
|
|
2347
|
+
endpoint = "/knowledge/content"
|
|
2348
|
+
|
|
2349
|
+
return await self._arequest("DELETE", endpoint, params=params, headers=headers)
|
|
2350
|
+
|
|
2351
|
+
async def get_knowledge_content_status(
|
|
2352
|
+
self,
|
|
2353
|
+
content_id: str,
|
|
2354
|
+
db_id: Optional[str] = None,
|
|
2355
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2356
|
+
) -> ContentStatusResponse:
|
|
2357
|
+
"""Get the processing status of a content item.
|
|
2358
|
+
|
|
2359
|
+
Args:
|
|
2360
|
+
content_id: ID of the content
|
|
2361
|
+
db_id: Optional database ID to use
|
|
2362
|
+
headers: HTTP headers to include in the request (optional)
|
|
2363
|
+
|
|
2364
|
+
Returns:
|
|
2365
|
+
ContentStatusResponse: The content processing status
|
|
2366
|
+
|
|
2367
|
+
Raises:
|
|
2368
|
+
HTTPStatusError: On HTTP errors
|
|
2369
|
+
"""
|
|
2370
|
+
params: Dict[str, Any] = {"db_id": db_id}
|
|
2371
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2372
|
+
|
|
2373
|
+
data = await self._aget(f"/knowledge/content/{content_id}/status", params=params, headers=headers)
|
|
2374
|
+
return ContentStatusResponse.model_validate(data)
|
|
2375
|
+
|
|
2376
|
+
async def search_knowledge(
|
|
2377
|
+
self,
|
|
2378
|
+
query: str,
|
|
2379
|
+
max_results: Optional[int] = None,
|
|
2380
|
+
filters: Optional[Dict[str, Any]] = None,
|
|
2381
|
+
search_type: Optional[str] = None,
|
|
2382
|
+
vector_db_ids: Optional[List[str]] = None,
|
|
2383
|
+
limit: int = 20,
|
|
2384
|
+
page: int = 1,
|
|
2385
|
+
db_id: Optional[str] = None,
|
|
2386
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2387
|
+
) -> PaginatedResponse[VectorSearchResult]:
|
|
2388
|
+
"""Search the knowledge base.
|
|
2389
|
+
|
|
2390
|
+
Args:
|
|
2391
|
+
query: Search query string
|
|
2392
|
+
max_results: Maximum number of results to return from search
|
|
2393
|
+
filters: Optional filters to apply
|
|
2394
|
+
search_type: Type of search (vector, keyword, hybrid)
|
|
2395
|
+
vector_db_ids: Optional list of vector DB IDs to search
|
|
2396
|
+
limit: Number of results per page
|
|
2397
|
+
page: Page number
|
|
2398
|
+
db_id: Optional database ID to use
|
|
2399
|
+
headers: HTTP headers to include in the request (optional)
|
|
2400
|
+
|
|
2401
|
+
Returns:
|
|
2402
|
+
PaginatedResponse[VectorSearchResult]: Paginated search results
|
|
2403
|
+
|
|
2404
|
+
Raises:
|
|
2405
|
+
HTTPStatusError: On HTTP errors
|
|
2406
|
+
"""
|
|
2407
|
+
payload: Dict[str, Any] = {"query": query}
|
|
2408
|
+
if max_results:
|
|
2409
|
+
payload["max_results"] = max_results
|
|
2410
|
+
if filters:
|
|
2411
|
+
payload["filters"] = filters
|
|
2412
|
+
if search_type:
|
|
2413
|
+
payload["search_type"] = search_type
|
|
2414
|
+
if vector_db_ids:
|
|
2415
|
+
payload["vector_db_ids"] = vector_db_ids
|
|
2416
|
+
payload["meta"] = {"limit": limit, "page": page}
|
|
2417
|
+
if db_id:
|
|
2418
|
+
payload["db_id"] = db_id
|
|
2419
|
+
|
|
2420
|
+
data = await self._apost("/knowledge/search", payload, headers=headers)
|
|
2421
|
+
return PaginatedResponse[VectorSearchResult].model_validate(data)
|
|
2422
|
+
|
|
2423
|
+
async def get_knowledge_config(
|
|
2424
|
+
self,
|
|
2425
|
+
db_id: Optional[str] = None,
|
|
2426
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2427
|
+
) -> KnowledgeConfigResponse:
|
|
2428
|
+
"""Get knowledge base configuration.
|
|
2429
|
+
|
|
2430
|
+
Returns available readers, chunkers, vector DBs, and filters.
|
|
2431
|
+
|
|
2432
|
+
Args:
|
|
2433
|
+
db_id: Optional database ID to use
|
|
2434
|
+
headers: HTTP headers to include in the request (optional)
|
|
2435
|
+
|
|
2436
|
+
Returns:
|
|
2437
|
+
KnowledgeConfigResponse: Knowledge configuration
|
|
2438
|
+
|
|
2439
|
+
Raises:
|
|
2440
|
+
HTTPStatusError: On HTTP errors
|
|
2441
|
+
"""
|
|
2442
|
+
params: Dict[str, Any] = {"db_id": db_id}
|
|
2443
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2444
|
+
|
|
2445
|
+
data = await self._aget("/knowledge/config", params=params, headers=headers)
|
|
2446
|
+
return KnowledgeConfigResponse.model_validate(data)
|
|
2447
|
+
|
|
2448
|
+
# Trace Operations
|
|
2449
|
+
async def get_traces(
|
|
2450
|
+
self,
|
|
2451
|
+
run_id: Optional[str] = None,
|
|
2452
|
+
session_id: Optional[str] = None,
|
|
2453
|
+
user_id: Optional[str] = None,
|
|
2454
|
+
agent_id: Optional[str] = None,
|
|
2455
|
+
team_id: Optional[str] = None,
|
|
2456
|
+
workflow_id: Optional[str] = None,
|
|
2457
|
+
status: Optional[str] = None,
|
|
2458
|
+
start_time: Optional[str] = None,
|
|
2459
|
+
end_time: Optional[str] = None,
|
|
2460
|
+
page: int = 1,
|
|
2461
|
+
limit: int = 20,
|
|
2462
|
+
db_id: Optional[str] = None,
|
|
2463
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2464
|
+
) -> PaginatedResponse[TraceSummary]:
|
|
2465
|
+
"""List execution traces with filtering and pagination.
|
|
2466
|
+
|
|
2467
|
+
Traces provide observability into agent execution flows, model invocations,
|
|
2468
|
+
tool calls, errors, and performance bottlenecks.
|
|
2469
|
+
|
|
2470
|
+
Args:
|
|
2471
|
+
run_id: Filter by run ID
|
|
2472
|
+
session_id: Filter by session ID
|
|
2473
|
+
user_id: Filter by user ID
|
|
2474
|
+
agent_id: Filter by agent ID
|
|
2475
|
+
team_id: Filter by team ID
|
|
2476
|
+
workflow_id: Filter by workflow ID
|
|
2477
|
+
status: Filter by status (OK, ERROR)
|
|
2478
|
+
start_time: Filter traces starting after this time (ISO 8601 format)
|
|
2479
|
+
end_time: Filter traces ending before this time (ISO 8601 format)
|
|
2480
|
+
page: Page number (1-indexed)
|
|
2481
|
+
limit: Number of traces per page
|
|
2482
|
+
db_id: Optional database ID to use
|
|
2483
|
+
headers: HTTP headers to include in the request (optional)
|
|
2484
|
+
|
|
2485
|
+
Returns:
|
|
2486
|
+
PaginatedResponse[TraceSummary]: Paginated list of trace summaries
|
|
2487
|
+
|
|
2488
|
+
Raises:
|
|
2489
|
+
HTTPStatusError: On HTTP errors
|
|
2490
|
+
"""
|
|
2491
|
+
params: Dict[str, Any] = {
|
|
2492
|
+
"page": page,
|
|
2493
|
+
"limit": limit,
|
|
2494
|
+
"run_id": run_id,
|
|
2495
|
+
"session_id": session_id,
|
|
2496
|
+
"user_id": user_id,
|
|
2497
|
+
"agent_id": agent_id,
|
|
2498
|
+
"team_id": team_id,
|
|
2499
|
+
"workflow_id": workflow_id,
|
|
2500
|
+
"status": status,
|
|
2501
|
+
"start_time": start_time,
|
|
2502
|
+
"end_time": end_time,
|
|
2503
|
+
"db_id": db_id,
|
|
2504
|
+
}
|
|
2505
|
+
|
|
2506
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2507
|
+
|
|
2508
|
+
data = await self._aget("/traces", params=params, headers=headers)
|
|
2509
|
+
return PaginatedResponse[TraceSummary].model_validate(data)
|
|
2510
|
+
|
|
2511
|
+
async def get_trace(
|
|
2512
|
+
self,
|
|
2513
|
+
trace_id: str,
|
|
2514
|
+
span_id: Optional[str] = None,
|
|
2515
|
+
run_id: Optional[str] = None,
|
|
2516
|
+
db_id: Optional[str] = None,
|
|
2517
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2518
|
+
) -> Union[TraceDetail, TraceNode]:
|
|
2519
|
+
"""Get detailed trace information with hierarchical span tree, or a specific span.
|
|
2520
|
+
|
|
2521
|
+
Without span_id: Returns the full trace with hierarchical span tree including
|
|
2522
|
+
trace metadata, timing, status, and all spans organized hierarchically.
|
|
2523
|
+
|
|
2524
|
+
With span_id: Returns details for a specific span within the trace including
|
|
2525
|
+
span metadata, timing, status, and type-specific attributes.
|
|
2526
|
+
|
|
2527
|
+
Args:
|
|
2528
|
+
trace_id: ID of the trace to retrieve
|
|
2529
|
+
span_id: Optional span ID to retrieve a specific span
|
|
2530
|
+
run_id: Optional run ID to retrieve trace for
|
|
2531
|
+
db_id: Optional database ID to use
|
|
2532
|
+
headers: HTTP headers to include in the request (optional)
|
|
2533
|
+
|
|
2534
|
+
Returns:
|
|
2535
|
+
TraceDetail if no span_id provided, TraceNode if span_id provided
|
|
2536
|
+
|
|
2537
|
+
Raises:
|
|
2538
|
+
HTTPStatusError: On HTTP errors (404 if not found)
|
|
2539
|
+
"""
|
|
2540
|
+
params: Dict[str, Any] = {
|
|
2541
|
+
"span_id": span_id,
|
|
2542
|
+
"run_id": run_id,
|
|
2543
|
+
"db_id": db_id,
|
|
2544
|
+
}
|
|
2545
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2546
|
+
|
|
2547
|
+
data = await self._aget(f"/traces/{trace_id}", params=params, headers=headers)
|
|
2548
|
+
|
|
2549
|
+
# If span_id was provided, return TraceNode, otherwise TraceDetail
|
|
2550
|
+
if span_id:
|
|
2551
|
+
return TraceNode.model_validate(data)
|
|
2552
|
+
return TraceDetail.model_validate(data)
|
|
2553
|
+
|
|
2554
|
+
async def get_trace_session_stats(
|
|
2555
|
+
self,
|
|
2556
|
+
user_id: Optional[str] = None,
|
|
2557
|
+
agent_id: Optional[str] = None,
|
|
2558
|
+
team_id: Optional[str] = None,
|
|
2559
|
+
workflow_id: Optional[str] = None,
|
|
2560
|
+
start_time: Optional[str] = None,
|
|
2561
|
+
end_time: Optional[str] = None,
|
|
2562
|
+
page: int = 1,
|
|
2563
|
+
limit: int = 20,
|
|
2564
|
+
db_id: Optional[str] = None,
|
|
2565
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2566
|
+
) -> PaginatedResponse[TraceSessionStats]:
|
|
2567
|
+
"""Get aggregated trace statistics grouped by session ID.
|
|
2568
|
+
|
|
2569
|
+
Provides insights into total traces per session, first and last trace
|
|
2570
|
+
timestamps, and associated user and agent information.
|
|
2571
|
+
|
|
2572
|
+
Args:
|
|
2573
|
+
user_id: Filter by user ID
|
|
2574
|
+
agent_id: Filter by agent ID
|
|
2575
|
+
team_id: Filter by team ID
|
|
2576
|
+
workflow_id: Filter by workflow ID
|
|
2577
|
+
start_time: Filter sessions with traces created after this time (ISO 8601 format)
|
|
2578
|
+
end_time: Filter sessions with traces created before this time (ISO 8601 format)
|
|
2579
|
+
page: Page number (1-indexed)
|
|
2580
|
+
limit: Number of sessions per page
|
|
2581
|
+
db_id: Optional database ID to use
|
|
2582
|
+
headers: HTTP headers to include in the request (optional)
|
|
2583
|
+
|
|
2584
|
+
Returns:
|
|
2585
|
+
PaginatedResponse[TraceSessionStats]: Paginated list of session statistics
|
|
2586
|
+
|
|
2587
|
+
Raises:
|
|
2588
|
+
HTTPStatusError: On HTTP errors
|
|
2589
|
+
"""
|
|
2590
|
+
params: Dict[str, Any] = {
|
|
2591
|
+
"page": page,
|
|
2592
|
+
"limit": limit,
|
|
2593
|
+
"user_id": user_id,
|
|
2594
|
+
"agent_id": agent_id,
|
|
2595
|
+
"team_id": team_id,
|
|
2596
|
+
"workflow_id": workflow_id,
|
|
2597
|
+
"start_time": start_time,
|
|
2598
|
+
"end_time": end_time,
|
|
2599
|
+
"db_id": db_id,
|
|
2600
|
+
}
|
|
2601
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2602
|
+
|
|
2603
|
+
data = await self._aget("/trace_session_stats", params=params, headers=headers)
|
|
2604
|
+
return PaginatedResponse[TraceSessionStats].model_validate(data)
|
|
2605
|
+
|
|
2606
|
+
# Metrics Operations
|
|
2607
|
+
async def get_metrics(
|
|
2608
|
+
self,
|
|
2609
|
+
starting_date: Optional[date] = None,
|
|
2610
|
+
ending_date: Optional[date] = None,
|
|
2611
|
+
db_id: Optional[str] = None,
|
|
2612
|
+
table: Optional[str] = None,
|
|
2613
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2614
|
+
) -> MetricsResponse:
|
|
2615
|
+
"""Retrieve AgentOS metrics and analytics data for a specified date range.
|
|
2616
|
+
|
|
2617
|
+
If no date range is specified, returns all available metrics.
|
|
2618
|
+
|
|
2619
|
+
Args:
|
|
2620
|
+
starting_date: Starting date for metrics range (YYYY-MM-DD format)
|
|
2621
|
+
ending_date: Ending date for metrics range (YYYY-MM-DD format)
|
|
2622
|
+
db_id: Optional database ID to use
|
|
2623
|
+
table: Optional database table to use
|
|
2624
|
+
headers: HTTP headers to include in the request (optional)
|
|
2625
|
+
|
|
2626
|
+
Returns:
|
|
2627
|
+
MetricsResponse: Metrics data including daily aggregated metrics
|
|
2628
|
+
|
|
2629
|
+
Raises:
|
|
2630
|
+
HTTPStatusError: On HTTP errors
|
|
2631
|
+
"""
|
|
2632
|
+
params: Dict[str, Any] = {
|
|
2633
|
+
"starting_date": starting_date.strftime("%Y-%m-%d") if starting_date else None,
|
|
2634
|
+
"ending_date": ending_date.strftime("%Y-%m-%d") if ending_date else None,
|
|
2635
|
+
"db_id": db_id,
|
|
2636
|
+
"table": table,
|
|
2637
|
+
}
|
|
2638
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2639
|
+
|
|
2640
|
+
data = await self._aget("/metrics", params=params, headers=headers)
|
|
2641
|
+
return MetricsResponse.model_validate(data)
|
|
2642
|
+
|
|
2643
|
+
async def refresh_metrics(
|
|
2644
|
+
self,
|
|
2645
|
+
db_id: Optional[str] = None,
|
|
2646
|
+
table: Optional[str] = None,
|
|
2647
|
+
headers: Optional[Dict[str, str]] = None,
|
|
2648
|
+
) -> List[DayAggregatedMetrics]:
|
|
2649
|
+
"""Manually trigger recalculation of system metrics from raw data.
|
|
2650
|
+
|
|
2651
|
+
This operation analyzes system activity logs and regenerates aggregated metrics.
|
|
2652
|
+
Useful for ensuring metrics are up-to-date or after system maintenance.
|
|
2653
|
+
|
|
2654
|
+
Args:
|
|
2655
|
+
db_id: Optional database ID to use
|
|
2656
|
+
table: Optional database table to use
|
|
2657
|
+
headers: HTTP headers to include in the request (optional)
|
|
2658
|
+
|
|
2659
|
+
Returns:
|
|
2660
|
+
List[DayAggregatedMetrics]: List of refreshed daily aggregated metrics
|
|
2661
|
+
|
|
2662
|
+
Raises:
|
|
2663
|
+
HTTPStatusError: On HTTP errors
|
|
2664
|
+
"""
|
|
2665
|
+
params: Dict[str, Any] = {"db_id": db_id, "table": table}
|
|
2666
|
+
params = {k: v for k, v in params.items() if v is not None}
|
|
2667
|
+
|
|
2668
|
+
data = await self._apost("/metrics/refresh", params=params, headers=headers)
|
|
2669
|
+
return [DayAggregatedMetrics.model_validate(m) for m in data]
|