agno 2.1.10__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. agno/agent/agent.py +1594 -1248
  2. agno/knowledge/knowledge.py +11 -0
  3. agno/knowledge/reader/pptx_reader.py +101 -0
  4. agno/knowledge/reader/reader_factory.py +14 -0
  5. agno/knowledge/types.py +1 -0
  6. agno/models/anthropic/claude.py +2 -2
  7. agno/models/base.py +4 -4
  8. agno/models/ollama/chat.py +7 -2
  9. agno/os/app.py +1 -1
  10. agno/os/interfaces/a2a/router.py +2 -2
  11. agno/os/interfaces/agui/router.py +2 -2
  12. agno/os/router.py +7 -7
  13. agno/os/routers/evals/schemas.py +31 -31
  14. agno/os/routers/health.py +6 -2
  15. agno/os/routers/knowledge/schemas.py +49 -47
  16. agno/os/routers/memory/schemas.py +16 -16
  17. agno/os/routers/metrics/schemas.py +16 -16
  18. agno/os/routers/session/session.py +382 -7
  19. agno/os/schema.py +254 -231
  20. agno/os/utils.py +1 -1
  21. agno/run/agent.py +54 -1
  22. agno/run/team.py +48 -0
  23. agno/run/workflow.py +15 -5
  24. agno/session/summary.py +45 -13
  25. agno/session/team.py +90 -5
  26. agno/team/team.py +1130 -849
  27. agno/utils/agent.py +372 -0
  28. agno/utils/events.py +144 -2
  29. agno/utils/message.py +60 -0
  30. agno/utils/print_response/agent.py +10 -6
  31. agno/utils/print_response/team.py +6 -4
  32. agno/utils/print_response/workflow.py +7 -5
  33. agno/utils/team.py +9 -8
  34. agno/workflow/condition.py +17 -9
  35. agno/workflow/loop.py +18 -10
  36. agno/workflow/parallel.py +14 -6
  37. agno/workflow/router.py +16 -8
  38. agno/workflow/step.py +14 -6
  39. agno/workflow/steps.py +14 -6
  40. agno/workflow/workflow.py +331 -123
  41. {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/METADATA +63 -23
  42. {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/RECORD +45 -43
  43. {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/WHEEL +0 -0
  44. {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/licenses/LICENSE +0 -0
  45. {agno-2.1.10.dist-info → agno-2.2.1.dist-info}/top_level.txt +0 -0
@@ -547,6 +547,8 @@ class Knowledge:
547
547
  reader = self.pdf_reader
548
548
  elif file_extension == ".docx":
549
549
  reader = self.docx_reader
550
+ elif file_extension == ".pptx":
551
+ reader = self.pptx_reader
550
552
  elif file_extension == ".json":
551
553
  reader = self.json_reader
552
554
  elif file_extension == ".markdown":
@@ -835,6 +837,8 @@ class Knowledge:
835
837
  reader = self.csv_reader
836
838
  elif s3_object.uri.endswith(".docx"):
837
839
  reader = self.docx_reader
840
+ elif s3_object.uri.endswith(".pptx"):
841
+ reader = self.pptx_reader
838
842
  elif s3_object.uri.endswith(".json"):
839
843
  reader = self.json_reader
840
844
  elif s3_object.uri.endswith(".markdown"):
@@ -917,6 +921,8 @@ class Knowledge:
917
921
  reader = self.csv_reader
918
922
  elif gcs_object.name.endswith(".docx"):
919
923
  reader = self.docx_reader
924
+ elif gcs_object.name.endswith(".pptx"):
925
+ reader = self.pptx_reader
920
926
  elif gcs_object.name.endswith(".json"):
921
927
  reader = self.json_reader
922
928
  elif gcs_object.name.endswith(".markdown"):
@@ -1893,6 +1899,11 @@ class Knowledge:
1893
1899
  """Docx reader - lazy loaded via factory."""
1894
1900
  return self._get_reader("docx")
1895
1901
 
1902
+ @property
1903
+ def pptx_reader(self) -> Optional[Reader]:
1904
+ """PPTX reader - lazy loaded via factory."""
1905
+ return self._get_reader("pptx")
1906
+
1896
1907
  @property
1897
1908
  def json_reader(self) -> Optional[Reader]:
1898
1909
  """JSON reader - lazy loaded via factory."""
@@ -0,0 +1,101 @@
1
+ import asyncio
2
+ from pathlib import Path
3
+ from typing import IO, Any, List, Optional, Union
4
+ from uuid import uuid4
5
+
6
+ from agno.knowledge.chunking.document import DocumentChunking
7
+ from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyType
8
+ from agno.knowledge.document.base import Document
9
+ from agno.knowledge.reader.base import Reader
10
+ from agno.knowledge.types import ContentType
11
+ from agno.utils.log import log_info, logger
12
+
13
+ try:
14
+ from pptx import Presentation # type: ignore
15
+ except ImportError:
16
+ raise ImportError("The `python-pptx` package is not installed. Please install it via `pip install python-pptx`.")
17
+
18
+
19
+ class PPTXReader(Reader):
20
+ """Reader for PPTX files"""
21
+
22
+ def __init__(self, chunking_strategy: Optional[ChunkingStrategy] = DocumentChunking(), **kwargs):
23
+ super().__init__(chunking_strategy=chunking_strategy, **kwargs)
24
+
25
+ @classmethod
26
+ def get_supported_chunking_strategies(self) -> List[ChunkingStrategyType]:
27
+ """Get the list of supported chunking strategies for PPTX readers."""
28
+ return [
29
+ ChunkingStrategyType.DOCUMENT_CHUNKER,
30
+ ChunkingStrategyType.FIXED_SIZE_CHUNKER,
31
+ ChunkingStrategyType.SEMANTIC_CHUNKER,
32
+ ChunkingStrategyType.AGENTIC_CHUNKER,
33
+ ChunkingStrategyType.RECURSIVE_CHUNKER,
34
+ ]
35
+
36
+ @classmethod
37
+ def get_supported_content_types(self) -> List[ContentType]:
38
+ return [ContentType.PPTX]
39
+
40
+ def read(self, file: Union[Path, IO[Any]], name: Optional[str] = None) -> List[Document]:
41
+ """Read a pptx file and return a list of documents"""
42
+ try:
43
+ if isinstance(file, Path):
44
+ if not file.exists():
45
+ raise FileNotFoundError(f"Could not find file: {file}")
46
+ log_info(f"Reading: {file}")
47
+ presentation = Presentation(str(file))
48
+ doc_name = name or file.stem
49
+ else:
50
+ log_info(f"Reading uploaded file: {getattr(file, 'name', 'pptx_file')}")
51
+ presentation = Presentation(file)
52
+ doc_name = name or (
53
+ getattr(file, "name", "pptx_file").split(".")[0] if hasattr(file, "name") else "pptx_file"
54
+ )
55
+
56
+ # Extract text from all slides
57
+ slide_texts = []
58
+ for slide_number, slide in enumerate(presentation.slides, 1):
59
+ slide_text = f"Slide {slide_number}:\n"
60
+
61
+ # Extract text from shapes that contain text
62
+ text_content = []
63
+ for shape in slide.shapes:
64
+ if hasattr(shape, "text") and shape.text.strip():
65
+ text_content.append(shape.text.strip())
66
+
67
+ if text_content:
68
+ slide_text += "\n".join(text_content)
69
+ else:
70
+ slide_text += "(No text content)"
71
+
72
+ slide_texts.append(slide_text)
73
+
74
+ doc_content = "\n\n".join(slide_texts)
75
+
76
+ documents = [
77
+ Document(
78
+ name=doc_name,
79
+ id=str(uuid4()),
80
+ content=doc_content,
81
+ )
82
+ ]
83
+
84
+ if self.chunk:
85
+ chunked_documents = []
86
+ for document in documents:
87
+ chunked_documents.extend(self.chunk_document(document))
88
+ return chunked_documents
89
+ return documents
90
+
91
+ except Exception as e:
92
+ logger.error(f"Error reading file: {e}")
93
+ return []
94
+
95
+ async def async_read(self, file: Union[Path, IO[Any]], name: Optional[str] = None) -> List[Document]:
96
+ """Asynchronously read a pptx file and return a list of documents"""
97
+ try:
98
+ return await asyncio.to_thread(self.read, file, name)
99
+ except Exception as e:
100
+ logger.error(f"Error reading file asynchronously: {e}")
101
+ return []
@@ -58,6 +58,18 @@ class ReaderFactory:
58
58
  config.update(kwargs)
59
59
  return DocxReader(**config)
60
60
 
61
+ @classmethod
62
+ def _get_pptx_reader(cls, **kwargs) -> Reader:
63
+ """Get PPTX reader instance."""
64
+ from agno.knowledge.reader.pptx_reader import PPTXReader
65
+
66
+ config: Dict[str, Any] = {
67
+ "name": "PPTX Reader",
68
+ "description": "Extracts text content from Microsoft PowerPoint presentations (.pptx format)",
69
+ }
70
+ config.update(kwargs)
71
+ return PPTXReader(**config)
72
+
61
73
  @classmethod
62
74
  def _get_json_reader(cls, **kwargs) -> Reader:
63
75
  """Get JSON reader instance."""
@@ -202,6 +214,8 @@ class ReaderFactory:
202
214
  return cls.create_reader("csv")
203
215
  elif extension in [".docx", ".doc", "application/vnd.openxmlformats-officedocument.wordprocessingml.document"]:
204
216
  return cls.create_reader("docx")
217
+ elif extension == ".pptx":
218
+ return cls.create_reader("pptx")
205
219
  elif extension == ".json":
206
220
  return cls.create_reader("json")
207
221
  elif extension in [".md", ".markdown"]:
agno/knowledge/types.py CHANGED
@@ -20,6 +20,7 @@ class ContentType(str, Enum):
20
20
  MARKDOWN = ".md"
21
21
  DOCX = ".docx"
22
22
  DOC = ".doc"
23
+ PPTX = ".pptx"
23
24
  JSON = ".json"
24
25
 
25
26
  # Spreadsheet file extensions
@@ -75,7 +75,7 @@ class Claude(Model):
75
75
  provider: str = "Anthropic"
76
76
 
77
77
  # Request parameters
78
- max_tokens: Optional[int] = 4096
78
+ max_tokens: Optional[int] = 8192
79
79
  thinking: Optional[Dict[str, Any]] = None
80
80
  temperature: Optional[float] = None
81
81
  stop_sequences: Optional[List[str]] = None
@@ -656,7 +656,7 @@ class Claude(Model):
656
656
 
657
657
  # Anthropic-specific additional fields
658
658
  if response_usage.server_tool_use:
659
- metrics.provider_metrics = {"server_tool_use": response_usage.server_tool_use}
659
+ metrics.provider_metrics = {"server_tool_use": response_usage.server_tool_use.model_dump()}
660
660
  if isinstance(response_usage, Usage):
661
661
  if response_usage.service_tier:
662
662
  metrics.provider_metrics = metrics.provider_metrics or {}
agno/models/base.py CHANGED
@@ -1237,8 +1237,8 @@ class Model(ABC):
1237
1237
  if function_call.function.show_result:
1238
1238
  yield ModelResponse(content=item.content)
1239
1239
 
1240
- if isinstance(item, CustomEvent):
1241
- function_call_output += str(item)
1240
+ if isinstance(item, CustomEvent):
1241
+ function_call_output += str(item)
1242
1242
 
1243
1243
  # Yield the event itself to bubble it up
1244
1244
  yield item
@@ -1626,8 +1626,8 @@ class Model(ABC):
1626
1626
  await event_queue.put(ModelResponse(content=item.content))
1627
1627
  continue
1628
1628
 
1629
- if isinstance(item, CustomEvent):
1630
- function_call_output += str(item)
1629
+ if isinstance(item, CustomEvent):
1630
+ function_call_output += str(item)
1631
1631
 
1632
1632
  # Put the event into the queue to be yielded
1633
1633
  await event_queue.put(item)
@@ -429,8 +429,13 @@ class Ollama(Model):
429
429
  """
430
430
  metrics = Metrics()
431
431
 
432
- metrics.input_tokens = response.get("prompt_eval_count", 0)
433
- metrics.output_tokens = response.get("eval_count", 0)
432
+ # Safely handle None values from Ollama Cloud responses
433
+ input_tokens = response.get("prompt_eval_count")
434
+ output_tokens = response.get("eval_count")
435
+
436
+ # Default to 0 if None
437
+ metrics.input_tokens = input_tokens if input_tokens is not None else 0
438
+ metrics.output_tokens = output_tokens if output_tokens is not None else 0
434
439
  metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
435
440
 
436
441
  return metrics
agno/os/app.py CHANGED
@@ -312,7 +312,7 @@ class AgentOS:
312
312
  async with self._mcp_app.lifespan(app): # type: ignore
313
313
  yield
314
314
 
315
- final_lifespan = combined_lifespan
315
+ final_lifespan = combined_lifespan # type: ignore
316
316
 
317
317
  fastapi_app = self._make_app(lifespan=final_lifespan)
318
318
  else:
@@ -221,7 +221,7 @@ def attach_routes(
221
221
  session_id=context_id,
222
222
  user_id=user_id,
223
223
  stream=True,
224
- stream_intermediate_steps=True,
224
+ stream_events=True,
225
225
  **kwargs,
226
226
  )
227
227
  else:
@@ -234,7 +234,7 @@ def attach_routes(
234
234
  session_id=context_id,
235
235
  user_id=user_id,
236
236
  stream=True,
237
- stream_intermediate_steps=True,
237
+ stream_events=True,
238
238
  **kwargs,
239
239
  )
240
240
 
@@ -44,7 +44,7 @@ async def run_agent(agent: Agent, run_input: RunAgentInput) -> AsyncIterator[Bas
44
44
  input=messages,
45
45
  session_id=run_input.thread_id,
46
46
  stream=True,
47
- stream_intermediate_steps=True,
47
+ stream_events=True,
48
48
  user_id=user_id,
49
49
  )
50
50
 
@@ -80,7 +80,7 @@ async def run_team(team: Team, input: RunAgentInput) -> AsyncIterator[BaseEvent]
80
80
  input=messages,
81
81
  session_id=input.thread_id,
82
82
  stream=True,
83
- stream_intermediate_steps=True,
83
+ stream_steps=True,
84
84
  user_id=user_id,
85
85
  )
86
86
 
agno/os/router.py CHANGED
@@ -250,7 +250,7 @@ async def agent_response_streamer(
250
250
  videos=videos,
251
251
  files=files,
252
252
  stream=True,
253
- stream_intermediate_steps=True,
253
+ stream_events=True,
254
254
  **kwargs,
255
255
  )
256
256
  async for run_response_chunk in run_response:
@@ -287,7 +287,7 @@ async def agent_continue_response_streamer(
287
287
  session_id=session_id,
288
288
  user_id=user_id,
289
289
  stream=True,
290
- stream_intermediate_steps=True,
290
+ stream_events=True,
291
291
  )
292
292
  async for run_response_chunk in continue_response:
293
293
  yield format_sse_event(run_response_chunk) # type: ignore
@@ -335,7 +335,7 @@ async def team_response_streamer(
335
335
  videos=videos,
336
336
  files=files,
337
337
  stream=True,
338
- stream_intermediate_steps=True,
338
+ stream_events=True,
339
339
  **kwargs,
340
340
  )
341
341
  async for run_response_chunk in run_response:
@@ -389,12 +389,12 @@ async def handle_workflow_via_websocket(websocket: WebSocket, message: dict, os:
389
389
  session_id = str(uuid4())
390
390
 
391
391
  # Execute workflow in background with streaming
392
- workflow_result = await workflow.arun(
392
+ workflow_result = await workflow.arun( # type: ignore
393
393
  input=user_message,
394
394
  session_id=session_id,
395
395
  user_id=user_id,
396
396
  stream=True,
397
- stream_intermediate_steps=True,
397
+ stream_events=True,
398
398
  background=True,
399
399
  websocket=websocket,
400
400
  )
@@ -435,12 +435,12 @@ async def workflow_response_streamer(
435
435
  **kwargs: Any,
436
436
  ) -> AsyncGenerator:
437
437
  try:
438
- run_response = await workflow.arun(
438
+ run_response = workflow.arun(
439
439
  input=input,
440
440
  session_id=session_id,
441
441
  user_id=user_id,
442
442
  stream=True,
443
- stream_intermediate_steps=True,
443
+ stream_events=True,
444
444
  **kwargs,
445
445
  )
446
446
 
@@ -2,7 +2,7 @@ from dataclasses import asdict
2
2
  from datetime import datetime, timezone
3
3
  from typing import Any, Dict, List, Optional
4
4
 
5
- from pydantic import BaseModel
5
+ from pydantic import BaseModel, Field
6
6
 
7
7
  from agno.db.schemas.evals import EvalType
8
8
  from agno.eval import AccuracyResult, PerformanceResult, ReliabilityResult
@@ -12,43 +12,43 @@ from agno.eval.reliability import ReliabilityEval
12
12
 
13
13
 
14
14
  class EvalRunInput(BaseModel):
15
- agent_id: Optional[str] = None
16
- team_id: Optional[str] = None
17
-
18
- model_id: Optional[str] = None
19
- model_provider: Optional[str] = None
20
- eval_type: EvalType
21
- input: str
22
- additional_guidelines: Optional[str] = None
23
- additional_context: Optional[str] = None
24
- num_iterations: Optional[int] = 1
25
- name: Optional[str] = None
15
+ agent_id: Optional[str] = Field(None, description="Agent ID to evaluate")
16
+ team_id: Optional[str] = Field(None, description="Team ID to evaluate")
17
+
18
+ model_id: Optional[str] = Field(None, description="Model ID to use for evaluation")
19
+ model_provider: Optional[str] = Field(None, description="Model provider name")
20
+ eval_type: EvalType = Field(..., description="Type of evaluation to run (accuracy, performance, or reliability)")
21
+ input: str = Field(..., description="Input text/query for the evaluation", min_length=1)
22
+ additional_guidelines: Optional[str] = Field(None, description="Additional guidelines for the evaluation")
23
+ additional_context: Optional[str] = Field(None, description="Additional context for the evaluation")
24
+ num_iterations: int = Field(1, description="Number of times to run the evaluation", ge=1, le=100)
25
+ name: Optional[str] = Field(None, description="Name for this evaluation run")
26
26
 
27
27
  # Accuracy eval specific fields
28
- expected_output: Optional[str] = None
28
+ expected_output: Optional[str] = Field(None, description="Expected output for accuracy evaluation")
29
29
 
30
30
  # Performance eval specific fields
31
- warmup_runs: Optional[int] = 0
31
+ warmup_runs: int = Field(0, description="Number of warmup runs before measuring performance", ge=0, le=10)
32
32
 
33
33
  # Reliability eval specific fields
34
- expected_tool_calls: Optional[List[str]] = None
34
+ expected_tool_calls: Optional[List[str]] = Field(None, description="Expected tool calls for reliability evaluation")
35
35
 
36
36
 
37
37
  class EvalSchema(BaseModel):
38
- id: str
39
-
40
- agent_id: Optional[str] = None
41
- model_id: Optional[str] = None
42
- model_provider: Optional[str] = None
43
- team_id: Optional[str] = None
44
- workflow_id: Optional[str] = None
45
- name: Optional[str] = None
46
- evaluated_component_name: Optional[str] = None
47
- eval_type: EvalType
48
- eval_data: Dict[str, Any]
49
- eval_input: Optional[Dict[str, Any]] = None
50
- created_at: Optional[datetime] = None
51
- updated_at: Optional[datetime] = None
38
+ id: str = Field(..., description="Unique identifier for the evaluation run")
39
+
40
+ agent_id: Optional[str] = Field(None, description="Agent ID that was evaluated")
41
+ model_id: Optional[str] = Field(None, description="Model ID used in evaluation")
42
+ model_provider: Optional[str] = Field(None, description="Model provider name")
43
+ team_id: Optional[str] = Field(None, description="Team ID that was evaluated")
44
+ workflow_id: Optional[str] = Field(None, description="Workflow ID that was evaluated")
45
+ name: Optional[str] = Field(None, description="Name of the evaluation run")
46
+ evaluated_component_name: Optional[str] = Field(None, description="Name of the evaluated component")
47
+ eval_type: EvalType = Field(..., description="Type of evaluation (accuracy, performance, or reliability)")
48
+ eval_data: Dict[str, Any] = Field(..., description="Evaluation results and metrics")
49
+ eval_input: Optional[Dict[str, Any]] = Field(None, description="Input parameters used for the evaluation")
50
+ created_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was created")
51
+ updated_at: Optional[datetime] = Field(None, description="Timestamp when evaluation was last updated")
52
52
 
53
53
  @classmethod
54
54
  def from_dict(cls, eval_run: Dict[str, Any]) -> "EvalSchema":
@@ -135,8 +135,8 @@ class EvalSchema(BaseModel):
135
135
 
136
136
 
137
137
  class DeleteEvalRunsRequest(BaseModel):
138
- eval_run_ids: List[str]
138
+ eval_run_ids: List[str] = Field(..., description="List of evaluation run IDs to delete", min_length=1)
139
139
 
140
140
 
141
141
  class UpdateEvalRunRequest(BaseModel):
142
- name: str
142
+ name: str = Field(..., description="New name for the evaluation run", min_length=1, max_length=255)
agno/os/routers/health.py CHANGED
@@ -1,3 +1,5 @@
1
+ from datetime import datetime, timezone
2
+
1
3
  from fastapi import APIRouter
2
4
 
3
5
  from agno.os.schema import HealthResponse
@@ -6,6 +8,8 @@ from agno.os.schema import HealthResponse
6
8
  def get_health_router() -> APIRouter:
7
9
  router = APIRouter(tags=["Health"])
8
10
 
11
+ started_time_stamp = datetime.now(timezone.utc).timestamp()
12
+
9
13
  @router.get(
10
14
  "/health",
11
15
  operation_id="health_check",
@@ -15,11 +19,11 @@ def get_health_router() -> APIRouter:
15
19
  responses={
16
20
  200: {
17
21
  "description": "API is healthy and operational",
18
- "content": {"application/json": {"example": {"status": "ok"}}},
22
+ "content": {"application/json": {"example": {"status": "ok", "instantiated_at": "1760169236.778903"}}},
19
23
  }
20
24
  },
21
25
  )
22
26
  async def health_check() -> HealthResponse:
23
- return HealthResponse(status="ok")
27
+ return HealthResponse(status="ok", instantiated_at=str(started_time_stamp))
24
28
 
25
29
  return router
@@ -16,23 +16,23 @@ class ContentStatus(str, Enum):
16
16
  class ContentStatusResponse(BaseModel):
17
17
  """Response model for content status endpoint."""
18
18
 
19
- status: ContentStatus
20
- status_message: str = ""
19
+ status: ContentStatus = Field(..., description="Current processing status of the content")
20
+ status_message: str = Field("", description="Status message or error details")
21
21
 
22
22
 
23
23
  class ContentResponseSchema(BaseModel):
24
- id: str
25
- name: Optional[str] = None
26
- description: Optional[str] = None
27
- type: Optional[str] = None
28
- size: Optional[str] = None
29
- linked_to: Optional[str] = None
30
- metadata: Optional[dict] = None
31
- access_count: Optional[int] = None
32
- status: Optional[ContentStatus] = None
33
- status_message: Optional[str] = None
34
- created_at: Optional[datetime] = None
35
- updated_at: Optional[datetime] = None
24
+ id: str = Field(..., description="Unique identifier for the content")
25
+ name: Optional[str] = Field(None, description="Name of the content")
26
+ description: Optional[str] = Field(None, description="Description of the content")
27
+ type: Optional[str] = Field(None, description="MIME type of the content")
28
+ size: Optional[str] = Field(None, description="Size of the content in bytes")
29
+ linked_to: Optional[str] = Field(None, description="ID of related content if linked")
30
+ metadata: Optional[dict] = Field(None, description="Additional metadata as key-value pairs")
31
+ access_count: Optional[int] = Field(None, description="Number of times content has been accessed", ge=0)
32
+ status: Optional[ContentStatus] = Field(None, description="Processing status of the content")
33
+ status_message: Optional[str] = Field(None, description="Status message or error details")
34
+ created_at: Optional[datetime] = Field(None, description="Timestamp when content was created")
35
+ updated_at: Optional[datetime] = Field(None, description="Timestamp when content was last updated")
36
36
 
37
37
  @classmethod
38
38
  def from_dict(cls, content: Dict[str, Any]) -> "ContentResponseSchema":
@@ -99,37 +99,39 @@ class ContentUpdateSchema(BaseModel):
99
99
 
100
100
 
101
101
  class ReaderSchema(BaseModel):
102
- id: str
103
- name: Optional[str] = None
104
- description: Optional[str] = None
105
- chunkers: Optional[List[str]] = None
102
+ id: str = Field(..., description="Unique identifier for the reader")
103
+ name: Optional[str] = Field(None, description="Name of the reader")
104
+ description: Optional[str] = Field(None, description="Description of the reader's capabilities")
105
+ chunkers: Optional[List[str]] = Field(None, description="List of supported chunking strategies")
106
106
 
107
107
 
108
108
  class ChunkerSchema(BaseModel):
109
- key: str
110
- name: Optional[str] = None
111
- description: Optional[str] = None
109
+ key: str = Field(..., description="Unique key for the chunker")
110
+ name: Optional[str] = Field(None, description="Name of the chunker")
111
+ description: Optional[str] = Field(None, description="Description of the chunking strategy")
112
112
 
113
113
 
114
114
  class VectorDbSchema(BaseModel):
115
- id: str
116
- name: Optional[str] = None
117
- description: Optional[str] = None
118
- search_types: Optional[List[str]] = None
115
+ id: str = Field(..., description="Unique identifier for the vector database")
116
+ name: Optional[str] = Field(None, description="Name of the vector database")
117
+ description: Optional[str] = Field(None, description="Description of the vector database")
118
+ search_types: Optional[List[str]] = Field(
119
+ None, description="List of supported search types (vector, keyword, hybrid)"
120
+ )
119
121
 
120
122
 
121
123
  class VectorSearchResult(BaseModel):
122
124
  """Schema for search result documents."""
123
125
 
124
- id: str
125
- content: str
126
- name: Optional[str] = None
127
- meta_data: Optional[Dict[str, Any]] = None
128
- usage: Optional[Dict[str, Any]] = None
129
- reranking_score: Optional[float] = None
130
- content_id: Optional[str] = None
131
- content_origin: Optional[str] = None
132
- size: Optional[int] = None
126
+ id: str = Field(..., description="Unique identifier for the search result document")
127
+ content: str = Field(..., description="Content text of the document")
128
+ name: Optional[str] = Field(None, description="Name of the document")
129
+ meta_data: Optional[Dict[str, Any]] = Field(None, description="Metadata associated with the document")
130
+ usage: Optional[Dict[str, Any]] = Field(None, description="Usage statistics (e.g., token counts)")
131
+ reranking_score: Optional[float] = Field(None, description="Reranking score for relevance", ge=0.0, le=1.0)
132
+ content_id: Optional[str] = Field(None, description="ID of the source content")
133
+ content_origin: Optional[str] = Field(None, description="Origin URL or source of the content")
134
+ size: Optional[int] = Field(None, description="Size of the content in bytes", ge=0)
133
135
 
134
136
  @classmethod
135
137
  def from_document(cls, document) -> "VectorSearchResult":
@@ -153,23 +155,23 @@ class VectorSearchRequestSchema(BaseModel):
153
155
  class Meta(BaseModel):
154
156
  """Inline metadata schema for pagination."""
155
157
 
156
- limit: Optional[int] = Field(20, description="Number of results per page", ge=1, le=100)
157
- page: Optional[int] = Field(1, description="Page number", ge=1)
158
+ limit: int = Field(20, description="Number of results per page", ge=1, le=100)
159
+ page: int = Field(1, description="Page number", ge=1)
158
160
 
159
- query: str = Field(..., description="The search query")
160
- db_id: Optional[str] = Field(None, description="The content database id")
161
- vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database ids to search in")
162
- search_type: Optional[str] = Field(None, description="The type of search to perform")
163
- max_results: Optional[int] = Field(None, description="The maximum number of results to return")
164
- filters: Optional[Dict[str, Any]] = Field(None, description="The filters to apply to the search")
161
+ query: str = Field(..., description="The search query text")
162
+ db_id: Optional[str] = Field(None, description="The content database ID to search in")
163
+ vector_db_ids: Optional[List[str]] = Field(None, description="List of vector database IDs to search in")
164
+ search_type: Optional[str] = Field(None, description="The type of search to perform (vector, keyword, hybrid)")
165
+ max_results: Optional[int] = Field(None, description="The maximum number of results to return", ge=1, le=1000)
166
+ filters: Optional[Dict[str, Any]] = Field(None, description="Filters to apply to the search results")
165
167
  meta: Optional[Meta] = Field(
166
168
  None, description="Pagination metadata. Limit and page number to return a subset of results."
167
169
  )
168
170
 
169
171
 
170
172
  class ConfigResponseSchema(BaseModel):
171
- readers: Optional[Dict[str, ReaderSchema]] = None
172
- readersForType: Optional[Dict[str, List[str]]] = None
173
- chunkers: Optional[Dict[str, ChunkerSchema]] = None
174
- filters: Optional[List[str]] = None
175
- vector_dbs: Optional[List[VectorDbSchema]] = None
173
+ readers: Optional[Dict[str, ReaderSchema]] = Field(None, description="Available content readers")
174
+ readersForType: Optional[Dict[str, List[str]]] = Field(None, description="Mapping of content types to reader IDs")
175
+ chunkers: Optional[Dict[str, ChunkerSchema]] = Field(None, description="Available chunking strategies")
176
+ filters: Optional[List[str]] = Field(None, description="Available filter tags")
177
+ vector_dbs: Optional[List[VectorDbSchema]] = Field(None, description="Configured vector databases")
@@ -1,24 +1,24 @@
1
1
  from datetime import datetime, timezone
2
2
  from typing import Any, Dict, List, Optional
3
3
 
4
- from pydantic import BaseModel
4
+ from pydantic import BaseModel, Field
5
5
 
6
6
 
7
7
  class DeleteMemoriesRequest(BaseModel):
8
- memory_ids: List[str]
9
- user_id: Optional[str] = None
8
+ memory_ids: List[str] = Field(..., description="List of memory IDs to delete", min_length=1)
9
+ user_id: Optional[str] = Field(None, description="User ID to filter memories for deletion")
10
10
 
11
11
 
12
12
  class UserMemorySchema(BaseModel):
13
- memory_id: str
14
- memory: str
15
- topics: Optional[List[str]]
13
+ memory_id: str = Field(..., description="Unique identifier for the memory")
14
+ memory: str = Field(..., description="Memory content text", min_length=1)
15
+ topics: Optional[List[str]] = Field(None, description="Topics or tags associated with the memory")
16
16
 
17
- agent_id: Optional[str]
18
- team_id: Optional[str]
19
- user_id: Optional[str]
17
+ agent_id: Optional[str] = Field(None, description="Agent ID associated with this memory")
18
+ team_id: Optional[str] = Field(None, description="Team ID associated with this memory")
19
+ user_id: Optional[str] = Field(None, description="User ID who owns this memory")
20
20
 
21
- updated_at: Optional[datetime]
21
+ updated_at: Optional[datetime] = Field(None, description="Timestamp when memory was last updated")
22
22
 
23
23
  @classmethod
24
24
  def from_dict(cls, memory_dict: Dict[str, Any]) -> "UserMemorySchema":
@@ -36,17 +36,17 @@ class UserMemorySchema(BaseModel):
36
36
  class UserMemoryCreateSchema(BaseModel):
37
37
  """Define the payload expected for creating a new user memory"""
38
38
 
39
- memory: str
40
- user_id: Optional[str] = None
41
- topics: Optional[List[str]] = None
39
+ memory: str = Field(..., description="Memory content text", min_length=1, max_length=5000)
40
+ user_id: Optional[str] = Field(None, description="User ID who owns this memory")
41
+ topics: Optional[List[str]] = Field(None, description="Topics or tags to categorize the memory")
42
42
 
43
43
 
44
44
  class UserStatsSchema(BaseModel):
45
45
  """Schema for user memory statistics"""
46
46
 
47
- user_id: str
48
- total_memories: int
49
- last_memory_updated_at: Optional[datetime] = None
47
+ user_id: str = Field(..., description="User ID")
48
+ total_memories: int = Field(..., description="Total number of memories for this user", ge=0)
49
+ last_memory_updated_at: Optional[datetime] = Field(None, description="Timestamp of the most recent memory update")
50
50
 
51
51
  @classmethod
52
52
  def from_dict(cls, user_stats_dict: Dict[str, Any]) -> "UserStatsSchema":