agno 2.1.10__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +1578 -1247
- agno/models/anthropic/claude.py +2 -2
- agno/models/ollama/chat.py +7 -2
- agno/os/app.py +1 -1
- agno/os/interfaces/a2a/router.py +2 -2
- agno/os/interfaces/agui/router.py +2 -2
- agno/os/router.py +7 -7
- agno/os/routers/evals/schemas.py +31 -31
- agno/os/routers/health.py +6 -2
- agno/os/routers/knowledge/schemas.py +49 -47
- agno/os/routers/memory/schemas.py +16 -16
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +382 -7
- agno/os/schema.py +254 -231
- agno/os/utils.py +1 -1
- agno/run/agent.py +49 -1
- agno/run/team.py +43 -0
- agno/session/summary.py +45 -13
- agno/session/team.py +90 -5
- agno/team/team.py +1117 -856
- agno/utils/agent.py +372 -0
- agno/utils/events.py +144 -2
- agno/utils/print_response/agent.py +10 -6
- agno/utils/print_response/team.py +6 -4
- agno/utils/print_response/workflow.py +7 -5
- agno/utils/team.py +9 -8
- agno/workflow/condition.py +17 -9
- agno/workflow/loop.py +18 -10
- agno/workflow/parallel.py +14 -6
- agno/workflow/router.py +16 -8
- agno/workflow/step.py +14 -6
- agno/workflow/steps.py +14 -6
- agno/workflow/workflow.py +245 -122
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/METADATA +60 -23
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/RECORD +38 -37
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/WHEEL +0 -0
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.10.dist-info → agno-2.2.0.dist-info}/top_level.txt +0 -0
agno/models/anthropic/claude.py
CHANGED
|
@@ -75,7 +75,7 @@ class Claude(Model):
|
|
|
75
75
|
provider: str = "Anthropic"
|
|
76
76
|
|
|
77
77
|
# Request parameters
|
|
78
|
-
max_tokens: Optional[int] =
|
|
78
|
+
max_tokens: Optional[int] = 8192
|
|
79
79
|
thinking: Optional[Dict[str, Any]] = None
|
|
80
80
|
temperature: Optional[float] = None
|
|
81
81
|
stop_sequences: Optional[List[str]] = None
|
|
@@ -656,7 +656,7 @@ class Claude(Model):
|
|
|
656
656
|
|
|
657
657
|
# Anthropic-specific additional fields
|
|
658
658
|
if response_usage.server_tool_use:
|
|
659
|
-
metrics.provider_metrics = {"server_tool_use": response_usage.server_tool_use}
|
|
659
|
+
metrics.provider_metrics = {"server_tool_use": response_usage.server_tool_use.model_dump()}
|
|
660
660
|
if isinstance(response_usage, Usage):
|
|
661
661
|
if response_usage.service_tier:
|
|
662
662
|
metrics.provider_metrics = metrics.provider_metrics or {}
|
agno/models/ollama/chat.py
CHANGED
|
@@ -429,8 +429,13 @@ class Ollama(Model):
|
|
|
429
429
|
"""
|
|
430
430
|
metrics = Metrics()
|
|
431
431
|
|
|
432
|
-
|
|
433
|
-
|
|
432
|
+
# Safely handle None values from Ollama Cloud responses
|
|
433
|
+
input_tokens = response.get("prompt_eval_count")
|
|
434
|
+
output_tokens = response.get("eval_count")
|
|
435
|
+
|
|
436
|
+
# Default to 0 if None
|
|
437
|
+
metrics.input_tokens = input_tokens if input_tokens is not None else 0
|
|
438
|
+
metrics.output_tokens = output_tokens if output_tokens is not None else 0
|
|
434
439
|
metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
|
|
435
440
|
|
|
436
441
|
return metrics
|
agno/os/app.py
CHANGED
|
@@ -312,7 +312,7 @@ class AgentOS:
|
|
|
312
312
|
async with self._mcp_app.lifespan(app): # type: ignore
|
|
313
313
|
yield
|
|
314
314
|
|
|
315
|
-
final_lifespan = combined_lifespan
|
|
315
|
+
final_lifespan = combined_lifespan # type: ignore
|
|
316
316
|
|
|
317
317
|
fastapi_app = self._make_app(lifespan=final_lifespan)
|
|
318
318
|
else:
|
agno/os/interfaces/a2a/router.py
CHANGED
|
@@ -221,7 +221,7 @@ def attach_routes(
|
|
|
221
221
|
session_id=context_id,
|
|
222
222
|
user_id=user_id,
|
|
223
223
|
stream=True,
|
|
224
|
-
|
|
224
|
+
stream_events=True,
|
|
225
225
|
**kwargs,
|
|
226
226
|
)
|
|
227
227
|
else:
|
|
@@ -234,7 +234,7 @@ def attach_routes(
|
|
|
234
234
|
session_id=context_id,
|
|
235
235
|
user_id=user_id,
|
|
236
236
|
stream=True,
|
|
237
|
-
|
|
237
|
+
stream_events=True,
|
|
238
238
|
**kwargs,
|
|
239
239
|
)
|
|
240
240
|
|
|
@@ -44,7 +44,7 @@ async def run_agent(agent: Agent, run_input: RunAgentInput) -> AsyncIterator[Bas
|
|
|
44
44
|
input=messages,
|
|
45
45
|
session_id=run_input.thread_id,
|
|
46
46
|
stream=True,
|
|
47
|
-
|
|
47
|
+
stream_events=True,
|
|
48
48
|
user_id=user_id,
|
|
49
49
|
)
|
|
50
50
|
|
|
@@ -80,7 +80,7 @@ async def run_team(team: Team, input: RunAgentInput) -> AsyncIterator[BaseEvent]
|
|
|
80
80
|
input=messages,
|
|
81
81
|
session_id=input.thread_id,
|
|
82
82
|
stream=True,
|
|
83
|
-
|
|
83
|
+
stream_steps=True,
|
|
84
84
|
user_id=user_id,
|
|
85
85
|
)
|
|
86
86
|
|
agno/os/router.py
CHANGED
|
@@ -250,7 +250,7 @@ async def agent_response_streamer(
|
|
|
250
250
|
videos=videos,
|
|
251
251
|
files=files,
|
|
252
252
|
stream=True,
|
|
253
|
-
|
|
253
|
+
stream_events=True,
|
|
254
254
|
**kwargs,
|
|
255
255
|
)
|
|
256
256
|
async for run_response_chunk in run_response:
|
|
@@ -287,7 +287,7 @@ async def agent_continue_response_streamer(
|
|
|
287
287
|
session_id=session_id,
|
|
288
288
|
user_id=user_id,
|
|
289
289
|
stream=True,
|
|
290
|
-
|
|
290
|
+
stream_events=True,
|
|
291
291
|
)
|
|
292
292
|
async for run_response_chunk in continue_response:
|
|
293
293
|
yield format_sse_event(run_response_chunk) # type: ignore
|
|
@@ -335,7 +335,7 @@ async def team_response_streamer(
|
|
|
335
335
|
videos=videos,
|
|
336
336
|
files=files,
|
|
337
337
|
stream=True,
|
|
338
|
-
|
|
338
|
+
stream_events=True,
|
|
339
339
|
**kwargs,
|
|
340
340
|
)
|
|
341
341
|
async for run_response_chunk in run_response:
|
|
@@ -389,12 +389,12 @@ async def handle_workflow_via_websocket(websocket: WebSocket, message: dict, os:
|
|
|
389
389
|
session_id = str(uuid4())
|
|
390
390
|
|
|
391
391
|
# Execute workflow in background with streaming
|
|
392
|
-
workflow_result = await workflow.arun(
|
|
392
|
+
workflow_result = await workflow.arun( # type: ignore
|
|
393
393
|
input=user_message,
|
|
394
394
|
session_id=session_id,
|
|
395
395
|
user_id=user_id,
|
|
396
396
|
stream=True,
|
|
397
|
-
|
|
397
|
+
stream_events=True,
|
|
398
398
|
background=True,
|
|
399
399
|
websocket=websocket,
|
|
400
400
|
)
|
|
@@ -435,12 +435,12 @@ async def workflow_response_streamer(
|
|
|
435
435
|
**kwargs: Any,
|
|
436
436
|
) -> AsyncGenerator:
|
|
437
437
|
try:
|
|
438
|
-
run_response =
|
|
438
|
+
run_response = workflow.arun(
|
|
439
439
|
input=input,
|
|
440
440
|
session_id=session_id,
|
|
441
441
|
user_id=user_id,
|
|
442
442
|
stream=True,
|
|
443
|
-
|
|
443
|
+
stream_events=True,
|
|
444
444
|
**kwargs,
|
|
445
445
|
)
|
|
446
446
|
|
agno/os/routers/evals/schemas.py
CHANGED
|
@@ -2,7 +2,7 @@ from dataclasses import asdict
|
|
|
2
2
|
from datetime import datetime, timezone
|
|
3
3
|
from typing import Any, Dict, List, Optional
|
|
4
4
|
|
|
5
|
-
from pydantic import BaseModel
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
6
|
|
|
7
7
|
from agno.db.schemas.evals import EvalType
|
|
8
8
|
from agno.eval import AccuracyResult, PerformanceResult, ReliabilityResult
|
|
@@ -12,43 +12,43 @@ from agno.eval.reliability import ReliabilityEval
|
|
|
12
12
|
|
|
13
13
|
|
|
14
14
|
class EvalRunInput(BaseModel):
|
|
15
|
-
agent_id: Optional[str] = None
|
|
16
|
-
team_id: Optional[str] = None
|
|
17
|
-
|
|
18
|
-
model_id: Optional[str] = None
|
|
19
|
-
model_provider: Optional[str] = None
|
|
20
|
-
eval_type: EvalType
|
|
21
|
-
input: str
|
|
22
|
-
additional_guidelines: Optional[str] = None
|
|
23
|
-
additional_context: Optional[str] = None
|
|
24
|
-
num_iterations:
|
|
25
|
-
name: Optional[str] = None
|
|
15
|
+
agent_id: Optional[str] = Field(None, description="Agent ID to evaluate")
|
|
16
|
+
team_id: Optional[str] = Field(None, description="Team ID to evaluate")
|
|
17
|
+
|
|
18
|
+
model_id: Optional[str] = Field(None, description="Model ID to use for evaluation")
|
|
19
|
+
model_provider: Optional[str] = Field(None, description="Model provider name")
|
|
20
|
+
eval_type: EvalType = Field(..., description="Type of evaluation to run (accuracy, performance, or reliability)")
|
|
21
|
+
input: str = Field(..., description="Input text/query for the evaluation", min_length=1)
|
|
22
|
+
additional_guidelines: Optional[str] = Field(None, description="Additional guidelines for the evaluation")
|
|
23
|
+
additional_context: Optional[str] = Field(None, description="Additional context for the evaluation")
|
|
24
|
+
num_iterations: int = Field(1, description="Number of times to run the evaluation", ge=1, le=100)
|
|
25
|
+
name: Optional[str] = Field(None, description="Name for this evaluation run")
|
|
26
26
|
|
|
27
27
|
# Accuracy eval specific fields
|
|
28
|
-
expected_output: Optional[str] = None
|
|
28
|
+
expected_output: Optional[str] = Field(None, description="Expected output for accuracy evaluation")
|
|
29
29
|
|
|
30
30
|
# Performance eval specific fields
|
|
31
|
-
warmup_runs:
|
|
31
|
+
warmup_runs: int = Field(0, description="Number of warmup runs before measuring performance", ge=0, le=10)
|
|
32
32
|
|
|
33
33
|
# Reliability eval specific fields
|
|
34
|
-
expected_tool_calls: Optional[List[str]] = None
|
|
34
|
+
expected_tool_calls: Optional[List[str]] = Field(None, description="Expected tool calls for reliability evaluation")
|
|
35
35
|
|
|
36
36
|
|
|
37
37
|
class EvalSchema(BaseModel):
|
|
38
|
-
id: str
|
|
39
|
-
|
|
40
|
-
agent_id: Optional[str] = None
|
|
41
|
-
model_id: Optional[str] = None
|
|
42
|
-
model_provider: Optional[str] = None
|
|
43
|
-
team_id: Optional[str] = None
|
|
44
|
-
workflow_id: Optional[str] = None
|
|
45
|
-
name: Optional[str] = None
|
|
46
|
-
evaluated_component_name: Optional[str] = None
|
|
47
|
-
eval_type: EvalType
|
|
48
|
-
eval_data: Dict[str, Any]
|
|
49
|
-
eval_input: Optional[Dict[str, Any]] = None
|
|
50
|
-
created_at: Optional[datetime] = None
|
|
51
|
-
updated_at: Optional[datetime] = None
|
|
38
|
+
id: str = Field(..., description="Unique identifier for the evaluation run")
|
|
39
|
+
|
|
40
|
+
agent_id: Optional[str] = Field(None, description="Agent ID that was evaluated")
|
|
41
|
+
model_id: Optional[str] = Field(None, description="Model ID used in evaluation")
|
|
42
|
+
model_provider: Optional[str] = Field(None, description="Model provider name")
|
|
43
|
+
team_id: Optional[str] = Field(None, description="Team ID that was evaluated")
|
|
44
|
+
workflow_id: Optional[str] = Field(None, description="Workflow ID that was evaluated")
|
|
45
|
+
name: Optional[str] = Field(None, description="Name of the evaluation run")
|
|
46
|
+
evaluated_component_name: Optional[str] = Field(None, description="Name of the evaluated component")
|
|
47
|
+
eval_type: EvalType = Field(..., description="Type of evaluation (accuracy, performance, or reliability)")
|
|
48
|
+
eval_data: Dict[str, Any] = Field(..., description="Evaluation results and metrics")
|
|
49
|
+
eval_input: Optional[Dict[str, Any]] = Field(None, description="Input parameters used for the evaluation")
|
|
50
|
+
created_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was created")
|
|
51
|
+
updated_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was last updated")
|
|
52
52
|
|
|
53
53
|
@classmethod
|
|
54
54
|
def from_dict(cls, eval_run: Dict[str, Any]) -> "EvalSchema":
|
|
@@ -135,8 +135,8 @@ class EvalSchema(BaseModel):
|
|
|
135
135
|
|
|
136
136
|
|
|
137
137
|
class DeleteEvalRunsRequest(BaseModel):
|
|
138
|
-
eval_run_ids: List[str]
|
|
138
|
+
eval_run_ids: List[str] = Field(..., description="List of evaluation run IDs to delete", min_length=1)
|
|
139
139
|
|
|
140
140
|
|
|
141
141
|
class UpdateEvalRunRequest(BaseModel):
|
|
142
|
-
name: str
|
|
142
|
+
name: str = Field(..., description="New name for the evaluation run", min_length=1, max_length=255)
|
agno/os/routers/health.py
CHANGED
|
@@ -1,3 +1,5 @@
|
|
|
1
|
+
from datetime import datetime, timezone
|
|
2
|
+
|
|
1
3
|
from fastapi import APIRouter
|
|
2
4
|
|
|
3
5
|
from agno.os.schema import HealthResponse
|
|
@@ -6,6 +8,8 @@ from agno.os.schema import HealthResponse
|
|
|
6
8
|
def get_health_router() -> APIRouter:
|
|
7
9
|
router = APIRouter(tags=["Health"])
|
|
8
10
|
|
|
11
|
+
started_time_stamp = datetime.now(timezone.utc).timestamp()
|
|
12
|
+
|
|
9
13
|
@router.get(
|
|
10
14
|
"/health",
|
|
11
15
|
operation_id="health_check",
|
|
@@ -15,11 +19,11 @@ def get_health_router() -> APIRouter:
|
|
|
15
19
|
responses={
|
|
16
20
|
200: {
|
|
17
21
|
"description": "API is healthy and operational",
|
|
18
|
-
"content": {"application/json": {"example": {"status": "ok"}}},
|
|
22
|
+
"content": {"application/json": {"example": {"status": "ok", "instantiated_at": "1760169236.778903"}}},
|
|
19
23
|
}
|
|
20
24
|
},
|
|
21
25
|
)
|
|
22
26
|
async def health_check() -> HealthResponse:
|
|
23
|
-
return HealthResponse(status="ok")
|
|
27
|
+
return HealthResponse(status="ok", instantiated_at=str(started_time_stamp))
|
|
24
28
|
|
|
25
29
|
return router
|
|
@@ -16,23 +16,23 @@ class ContentStatus(str, Enum):
|
|
|
16
16
|
class ContentStatusResponse(BaseModel):
|
|
17
17
|
"""Response model for content status endpoint."""
|
|
18
18
|
|
|
19
|
-
status: ContentStatus
|
|
20
|
-
status_message: str = ""
|
|
19
|
+
status: ContentStatus = Field(..., description="Current processing status of the content")
|
|
20
|
+
status_message: str = Field("", description="Status message or error details")
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
class ContentResponseSchema(BaseModel):
|
|
24
|
-
id: str
|
|
25
|
-
name: Optional[str] = None
|
|
26
|
-
description: Optional[str] = None
|
|
27
|
-
type: Optional[str] = None
|
|
28
|
-
size: Optional[str] = None
|
|
29
|
-
linked_to: Optional[str] = None
|
|
30
|
-
metadata: Optional[dict] = None
|
|
31
|
-
access_count: Optional[int] = None
|
|
32
|
-
status: Optional[ContentStatus] = None
|
|
33
|
-
status_message: Optional[str] = None
|
|
34
|
-
created_at: Optional[datetime] = None
|
|
35
|
-
updated_at: Optional[datetime] = None
|
|
24
|
+
id: str = Field(..., description="Unique identifier for the content")
|
|
25
|
+
name: Optional[str] = Field(None, description="Name of the content")
|
|
26
|
+
description: Optional[str] = Field(None, description="Description of the content")
|
|
27
|
+
type: Optional[str] = Field(None, description="MIME type of the content")
|
|
28
|
+
size: Optional[str] = Field(None, description="Size of the content in bytes")
|
|
29
|
+
linked_to: Optional[str] = Field(None, description="ID of related content if linked")
|
|
30
|
+
metadata: Optional[dict] = Field(None, description="Additional metadata as key-value pairs")
|
|
31
|
+
access_count: Optional[int] = Field(None, description="Number of times content has been accessed", ge=0)
|
|
32
|
+
status: Optional[ContentStatus] = Field(None, description="Processing status of the content")
|
|
33
|
+
status_message: Optional[str] = Field(None, description="Status message or error details")
|
|
34
|
+
created_at: Optional[datetime] = Field(None, description="Timestamp when content was created")
|
|
35
|
+
updated_at: Optional[datetime] = Field(None, description="Timestamp when content was last updated")
|
|
36
36
|
|
|
37
37
|
@classmethod
|
|
38
38
|
def from_dict(cls, content: Dict[str, Any]) -> "ContentResponseSchema":
|
|
@@ -99,37 +99,39 @@ class ContentUpdateSchema(BaseModel):
|
|
|
99
99
|
|
|
100
100
|
|
|
101
101
|
class ReaderSchema(BaseModel):
|
|
102
|
-
id: str
|
|
103
|
-
name: Optional[str] = None
|
|
104
|
-
description: Optional[str] = None
|
|
105
|
-
chunkers: Optional[List[str]] = None
|
|
102
|
+
id: str = Field(..., description="Unique identifier for the reader")
|
|
103
|
+
name: Optional[str] = Field(None, description="Name of the reader")
|
|
104
|
+
description: Optional[str] = Field(None, description="Description of the reader's capabilities")
|
|
105
|
+
chunkers: Optional[List[str]] = Field(None, description="List of supported chunking strategies")
|
|
106
106
|
|
|
107
107
|
|
|
108
108
|
class ChunkerSchema(BaseModel):
|
|
109
|
-
key: str
|
|
110
|
-
name: Optional[str] = None
|
|
111
|
-
description: Optional[str] = None
|
|
109
|
+
key: str = Field(..., description="Unique key for the chunker")
|
|
110
|
+
name: Optional[str] = Field(None, description="Name of the chunker")
|
|
111
|
+
description: Optional[str] = Field(None, description="Description of the chunking strategy")
|
|
112
112
|
|
|
113
113
|
|
|
114
114
|
class VectorDbSchema(BaseModel):
|
|
115
|
-
id: str
|
|
116
|
-
name: Optional[str] = None
|
|
117
|
-
description: Optional[str] = None
|
|
118
|
-
search_types: Optional[List[str]] =
|
|
115
|
+
id: str = Field(..., description="Unique identifier for the vector database")
|
|
116
|
+
name: Optional[str] = Field(None, description="Name of the vector database")
|
|
117
|
+
description: Optional[str] = Field(None, description="Description of the vector database")
|
|
118
|
+
search_types: Optional[List[str]] = Field(
|
|
119
|
+
None, description="List of supported search types (vector, keyword, hybrid)"
|
|
120
|
+
)
|
|
119
121
|
|
|
120
122
|
|
|
121
123
|
class VectorSearchResult(BaseModel):
|
|
122
124
|
"""Schema for search result documents."""
|
|
123
125
|
|
|
124
|
-
id: str
|
|
125
|
-
content: str
|
|
126
|
-
name: Optional[str] = None
|
|
127
|
-
meta_data: Optional[Dict[str, Any]] = None
|
|
128
|
-
usage: Optional[Dict[str, Any]] = None
|
|
129
|
-
reranking_score: Optional[float] = None
|
|
130
|
-
content_id: Optional[str] = None
|
|
131
|
-
content_origin: Optional[str] = None
|
|
132
|
-
size: Optional[int] = None
|
|
126
|
+
id: str = Field(..., description="Unique identifier for the search result document")
|
|
127
|
+
content: str = Field(..., description="Content text of the document")
|
|
128
|
+
name: Optional[str] = Field(None, description="Name of the document")
|
|
129
|
+
meta_data: Optional[Dict[str, Any]] = Field(None, description="Metadata associated with the document")
|
|
130
|
+
usage: Optional[Dict[str, Any]] = Field(None, description="Usage statistics (e.g., token counts)")
|
|
131
|
+
reranking_score: Optional[float] = Field(None, description="Reranking score for relevance", ge=0.0, le=1.0)
|
|
132
|
+
content_id: Optional[str] = Field(None, description="ID of the source content")
|
|
133
|
+
content_origin: Optional[str] = Field(None, description="Origin URL or source of the content")
|
|
134
|
+
size: Optional[int] = Field(None, description="Size of the content in bytes", ge=0)
|
|
133
135
|
|
|
134
136
|
@classmethod
|
|
135
137
|
def from_document(cls, document) -> "VectorSearchResult":
|
|
@@ -153,23 +155,23 @@ class VectorSearchRequestSchema(BaseModel):
|
|
|
153
155
|
class Meta(BaseModel):
|
|
154
156
|
"""Inline metadata schema for pagination."""
|
|
155
157
|
|
|
156
|
-
limit:
|
|
157
|
-
page:
|
|
158
|
+
limit: int = Field(20, description="Number of results per page", ge=1, le=100)
|
|
159
|
+
page: int = Field(1, description="Page number", ge=1)
|
|
158
160
|
|
|
159
|
-
query: str = Field(..., description="The search query")
|
|
160
|
-
db_id: Optional[str] = Field(None, description="The content database
|
|
161
|
-
vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database
|
|
162
|
-
search_type: Optional[str] = Field(None, description="The type of search to perform")
|
|
163
|
-
max_results: Optional[int] = Field(None, description="The maximum number of results to return")
|
|
164
|
-
filters: Optional[Dict[str, Any]] = Field(None, description="
|
|
161
|
+
query: str = Field(..., description="The search query text")
|
|
162
|
+
db_id: Optional[str] = Field(None, description="The content database ID to search in")
|
|
163
|
+
vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database IDs to search in")
|
|
164
|
+
search_type: Optional[str] = Field(None, description="The type of search to perform (vector, keyword, hybrid)")
|
|
165
|
+
max_results: Optional[int] = Field(None, description="The maximum number of results to return", ge=1, le=1000)
|
|
166
|
+
filters: Optional[Dict[str, Any]] = Field(None, description="Filters to apply to the search results")
|
|
165
167
|
meta: Optional[Meta] = Field(
|
|
166
168
|
None, description="Pagination metadata. Limit and page number to return a subset of results."
|
|
167
169
|
)
|
|
168
170
|
|
|
169
171
|
|
|
170
172
|
class ConfigResponseSchema(BaseModel):
|
|
171
|
-
readers: Optional[Dict[str, ReaderSchema]] = None
|
|
172
|
-
readersForType: Optional[Dict[str, List[str]]] = None
|
|
173
|
-
chunkers: Optional[Dict[str, ChunkerSchema]] = None
|
|
174
|
-
filters: Optional[List[str]] = None
|
|
175
|
-
vector_dbs: Optional[List[VectorDbSchema]] = None
|
|
173
|
+
readers: Optional[Dict[str, ReaderSchema]] = Field(None, description="Available content readers")
|
|
174
|
+
readersForType: Optional[Dict[str, List[str]]] = Field(None, description="Mapping of content types to reader IDs")
|
|
175
|
+
chunkers: Optional[Dict[str, ChunkerSchema]] = Field(None, description="Available chunking strategies")
|
|
176
|
+
filters: Optional[List[str]] = Field(None, description="Available filter tags")
|
|
177
|
+
vector_dbs: Optional[List[VectorDbSchema]] = Field(None, description="Configured vector databases")
|
|
@@ -1,24 +1,24 @@
|
|
|
1
1
|
from datetime import datetime, timezone
|
|
2
2
|
from typing import Any, Dict, List, Optional
|
|
3
3
|
|
|
4
|
-
from pydantic import BaseModel
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class DeleteMemoriesRequest(BaseModel):
|
|
8
|
-
memory_ids: List[str]
|
|
9
|
-
user_id: Optional[str] = None
|
|
8
|
+
memory_ids: List[str] = Field(..., description="List of memory IDs to delete", min_length=1)
|
|
9
|
+
user_id: Optional[str] = Field(None, description="User ID to filter memories for deletion")
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class UserMemorySchema(BaseModel):
|
|
13
|
-
memory_id: str
|
|
14
|
-
memory: str
|
|
15
|
-
topics: Optional[List[str]]
|
|
13
|
+
memory_id: str = Field(..., description="Unique identifier for the memory")
|
|
14
|
+
memory: str = Field(..., description="Memory content text", min_length=1)
|
|
15
|
+
topics: Optional[List[str]] = Field(None, description="Topics or tags associated with the memory")
|
|
16
16
|
|
|
17
|
-
agent_id: Optional[str]
|
|
18
|
-
team_id: Optional[str]
|
|
19
|
-
user_id: Optional[str]
|
|
17
|
+
agent_id: Optional[str] = Field(None, description="Agent ID associated with this memory")
|
|
18
|
+
team_id: Optional[str] = Field(None, description="Team ID associated with this memory")
|
|
19
|
+
user_id: Optional[str] = Field(None, description="User ID who owns this memory")
|
|
20
20
|
|
|
21
|
-
updated_at: Optional[datetime]
|
|
21
|
+
updated_at: Optional[datetime] = Field(None, description="Timestamp when memory was last updated")
|
|
22
22
|
|
|
23
23
|
@classmethod
|
|
24
24
|
def from_dict(cls, memory_dict: Dict[str, Any]) -> "UserMemorySchema":
|
|
@@ -36,17 +36,17 @@ class UserMemorySchema(BaseModel):
|
|
|
36
36
|
class UserMemoryCreateSchema(BaseModel):
|
|
37
37
|
"""Define the payload expected for creating a new user memory"""
|
|
38
38
|
|
|
39
|
-
memory: str
|
|
40
|
-
user_id: Optional[str] = None
|
|
41
|
-
topics: Optional[List[str]] = None
|
|
39
|
+
memory: str = Field(..., description="Memory content text", min_length=1, max_length=5000)
|
|
40
|
+
user_id: Optional[str] = Field(None, description="User ID who owns this memory")
|
|
41
|
+
topics: Optional[List[str]] = Field(None, description="Topics or tags to categorize the memory")
|
|
42
42
|
|
|
43
43
|
|
|
44
44
|
class UserStatsSchema(BaseModel):
|
|
45
45
|
"""Schema for user memory statistics"""
|
|
46
46
|
|
|
47
|
-
user_id: str
|
|
48
|
-
total_memories: int
|
|
49
|
-
last_memory_updated_at: Optional[datetime] = None
|
|
47
|
+
user_id: str = Field(..., description="User ID")
|
|
48
|
+
total_memories: int = Field(..., description="Total number of memories for this user", ge=0)
|
|
49
|
+
last_memory_updated_at: Optional[datetime] = Field(None, description="Timestamp of the most recent memory update")
|
|
50
50
|
|
|
51
51
|
@classmethod
|
|
52
52
|
def from_dict(cls, user_stats_dict: Dict[str, Any]) -> "UserStatsSchema":
|
|
@@ -1,27 +1,27 @@
|
|
|
1
1
|
from datetime import datetime
|
|
2
2
|
from typing import Any, Dict, List, Optional
|
|
3
3
|
|
|
4
|
-
from pydantic import BaseModel
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class DayAggregatedMetrics(BaseModel):
|
|
8
8
|
"""Aggregated metrics for a given day"""
|
|
9
9
|
|
|
10
|
-
id: str
|
|
10
|
+
id: str = Field(..., description="Unique identifier for the metrics record")
|
|
11
11
|
|
|
12
|
-
agent_runs_count: int
|
|
13
|
-
agent_sessions_count: int
|
|
14
|
-
team_runs_count: int
|
|
15
|
-
team_sessions_count: int
|
|
16
|
-
workflow_runs_count: int
|
|
17
|
-
workflow_sessions_count: int
|
|
18
|
-
users_count: int
|
|
19
|
-
token_metrics: Dict[str, Any]
|
|
20
|
-
model_metrics: List[Dict[str, Any]]
|
|
12
|
+
agent_runs_count: int = Field(..., description="Total number of agent runs", ge=0)
|
|
13
|
+
agent_sessions_count: int = Field(..., description="Total number of agent sessions", ge=0)
|
|
14
|
+
team_runs_count: int = Field(..., description="Total number of team runs", ge=0)
|
|
15
|
+
team_sessions_count: int = Field(..., description="Total number of team sessions", ge=0)
|
|
16
|
+
workflow_runs_count: int = Field(..., description="Total number of workflow runs", ge=0)
|
|
17
|
+
workflow_sessions_count: int = Field(..., description="Total number of workflow sessions", ge=0)
|
|
18
|
+
users_count: int = Field(..., description="Total number of unique users", ge=0)
|
|
19
|
+
token_metrics: Dict[str, Any] = Field(..., description="Token usage metrics (input, output, cached, etc.)")
|
|
20
|
+
model_metrics: List[Dict[str, Any]] = Field(..., description="Metrics grouped by model (model_id, provider, count)")
|
|
21
21
|
|
|
22
|
-
date: datetime
|
|
23
|
-
created_at: int
|
|
24
|
-
updated_at: int
|
|
22
|
+
date: datetime = Field(..., description="Date for which these metrics are aggregated")
|
|
23
|
+
created_at: int = Field(..., description="Unix timestamp when metrics were created", ge=0)
|
|
24
|
+
updated_at: int = Field(..., description="Unix timestamp when metrics were last updated", ge=0)
|
|
25
25
|
|
|
26
26
|
@classmethod
|
|
27
27
|
def from_dict(cls, metrics_dict: Dict[str, Any]) -> "DayAggregatedMetrics":
|
|
@@ -43,5 +43,5 @@ class DayAggregatedMetrics(BaseModel):
|
|
|
43
43
|
|
|
44
44
|
|
|
45
45
|
class MetricsResponse(BaseModel):
|
|
46
|
-
metrics: List[DayAggregatedMetrics]
|
|
47
|
-
updated_at: Optional[datetime]
|
|
46
|
+
metrics: List[DayAggregatedMetrics] = Field(..., description="List of daily aggregated metrics")
|
|
47
|
+
updated_at: Optional[datetime] = Field(None, description="Timestamp of the most recent metrics update")
|