agno 2.2.10__py3-none-any.whl → 2.2.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
agno/os/routers/health.py CHANGED
@@ -5,13 +5,13 @@ from fastapi import APIRouter
5
5
  from agno.os.schema import HealthResponse
6
6
 
7
7
 
8
- def get_health_router() -> APIRouter:
8
+ def get_health_router(health_endpoint: str = "/health") -> APIRouter:
9
9
  router = APIRouter(tags=["Health"])
10
10
 
11
11
  started_time_stamp = datetime.now(timezone.utc).timestamp()
12
12
 
13
13
  @router.get(
14
- "/health",
14
+ health_endpoint,
15
15
  operation_id="health_check",
16
16
  summary="Health Check",
17
17
  description="Check the health status of the AgentOS API. Returns a simple status indicator.",
@@ -19,7 +19,9 @@ def get_health_router() -> APIRouter:
19
19
  responses={
20
20
  200: {
21
21
  "description": "API is healthy and operational",
22
- "content": {"application/json": {"example": {"status": "ok", "instantiated_at": "1760169236.778903"}}},
22
+ "content": {
23
+ "application/json": {"example": {"status": "ok", "instantiated_at": str(started_time_stamp)}}
24
+ },
23
25
  }
24
26
  },
25
27
  )
@@ -102,6 +102,8 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
102
102
  text_content: Optional[str] = Form(None, description="Raw text content to process"),
103
103
  reader_id: Optional[str] = Form(None, description="ID of the reader to use for content processing"),
104
104
  chunker: Optional[str] = Form(None, description="Chunking strategy to apply during processing"),
105
+ chunk_size: Optional[int] = Form(None, description="Chunk size to use for processing"),
106
+ chunk_overlap: Optional[int] = Form(None, description="Chunk overlap to use for processing"),
105
107
  db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
106
108
  ):
107
109
  knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
@@ -172,7 +174,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
172
174
  content.content_hash = content_hash
173
175
  content.id = generate_id(content_hash)
174
176
 
175
- background_tasks.add_task(process_content, knowledge, content, reader_id, chunker)
177
+ background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
176
178
 
177
179
  response = ContentResponseSchema(
178
180
  id=content.id,
@@ -801,36 +803,55 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
801
803
  "key": "AgenticChunker",
802
804
  "name": "AgenticChunker",
803
805
  "description": "Chunking strategy that uses an LLM to determine natural breakpoints in the text",
806
+ "metadata": {"chunk_size": 5000},
804
807
  },
805
808
  "DocumentChunker": {
806
809
  "key": "DocumentChunker",
807
810
  "name": "DocumentChunker",
808
811
  "description": "A chunking strategy that splits text based on document structure like paragraphs and sections",
809
- },
810
- "RecursiveChunker": {
811
- "key": "RecursiveChunker",
812
- "name": "RecursiveChunker",
813
- "description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
814
- },
815
- "SemanticChunker": {
816
- "key": "SemanticChunker",
817
- "name": "SemanticChunker",
818
- "description": "Chunking strategy that splits text into semantic chunks using chonkie",
812
+ "metadata": {
813
+ "chunk_size": 5000,
814
+ "chunk_overlap": 0,
815
+ },
819
816
  },
820
817
  "FixedSizeChunker": {
821
818
  "key": "FixedSizeChunker",
822
819
  "name": "FixedSizeChunker",
823
820
  "description": "Chunking strategy that splits text into fixed-size chunks with optional overlap",
821
+ "metadata": {
822
+ "chunk_size": 5000,
823
+ "chunk_overlap": 0,
824
+ },
825
+ },
826
+ "MarkdownChunker": {
827
+ "key": "MarkdownChunker",
828
+ "name": "MarkdownChunker",
829
+ "description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
830
+ "metadata": {
831
+ "chunk_size": 5000,
832
+ "chunk_overlap": 0,
833
+ },
834
+ },
835
+ "RecursiveChunker": {
836
+ "key": "RecursiveChunker",
837
+ "name": "RecursiveChunker",
838
+ "description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
839
+ "metadata": {
840
+ "chunk_size": 5000,
841
+ "chunk_overlap": 0,
842
+ },
824
843
  },
825
844
  "RowChunker": {
826
845
  "key": "RowChunker",
827
846
  "name": "RowChunker",
828
847
  "description": "RowChunking chunking strategy",
848
+ "metadata": {},
829
849
  },
830
- "MarkdownChunker": {
831
- "key": "MarkdownChunker",
832
- "name": "MarkdownChunker",
833
- "description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
850
+ "SemanticChunker": {
851
+ "key": "SemanticChunker",
852
+ "name": "SemanticChunker",
853
+ "description": "Chunking strategy that splits text into semantic chunks using chonkie",
854
+ "metadata": {"chunk_size": 5000},
834
855
  },
835
856
  },
836
857
  "vector_dbs": [
@@ -896,7 +917,10 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
896
917
  chunker_key = chunker_info.get("key")
897
918
  if chunker_key:
898
919
  chunkers_dict[chunker_key] = ChunkerSchema(
899
- key=chunker_key, name=chunker_info.get("name"), description=chunker_info.get("description")
920
+ key=chunker_key,
921
+ name=chunker_info.get("name"),
922
+ description=chunker_info.get("description"),
923
+ metadata=chunker_info.get("metadata", {}),
900
924
  )
901
925
 
902
926
  vector_dbs = []
@@ -929,6 +953,8 @@ async def process_content(
929
953
  content: Content,
930
954
  reader_id: Optional[str] = None,
931
955
  chunker: Optional[str] = None,
956
+ chunk_size: Optional[int] = None,
957
+ chunk_overlap: Optional[int] = None,
932
958
  ):
933
959
  """Background task to process the content"""
934
960
 
@@ -951,7 +977,7 @@ async def process_content(
951
977
  content.reader = reader
952
978
  if chunker and content.reader:
953
979
  # Set the chunker name on the reader - let the reader handle it internally
954
- content.reader.set_chunking_strategy_from_string(chunker)
980
+ content.reader.set_chunking_strategy_from_string(chunker, chunk_size=chunk_size, overlap=chunk_overlap)
955
981
  log_debug(f"Set chunking strategy: {chunker}")
956
982
 
957
983
  log_debug(f"Using reader: {content.reader.__class__.__name__}")
@@ -106,9 +106,10 @@ class ReaderSchema(BaseModel):
106
106
 
107
107
 
108
108
  class ChunkerSchema(BaseModel):
109
- key: str = Field(..., description="Unique key for the chunker")
110
- name: Optional[str] = Field(None, description="Name of the chunker")
111
- description: Optional[str] = Field(None, description="Description of the chunking strategy")
109
+ key: str
110
+ name: Optional[str] = None
111
+ description: Optional[str] = None
112
+ metadata: Optional[Dict[str, Any]] = None
112
113
 
113
114
 
114
115
  class VectorDbSchema(BaseModel):
agno/run/agent.py CHANGED
@@ -696,7 +696,17 @@ class RunOutput:
696
696
  data = data.pop("run")
697
697
 
698
698
  events = data.pop("events", None)
699
- events = [run_output_event_from_dict(event) for event in events] if events else None
699
+ final_events = []
700
+ for event in events or []:
701
+ if "agent_id" in event:
702
+ event = run_output_event_from_dict(event)
703
+ else:
704
+ # Use the factory from response.py for agent events
705
+ from agno.run.team import team_run_output_event_from_dict
706
+
707
+ event = team_run_output_event_from_dict(event)
708
+ final_events.append(event)
709
+ events = final_events
700
710
 
701
711
  messages = data.pop("messages", None)
702
712
  messages = [Message.from_dict(message) for message in messages] if messages else None
agno/team/team.py CHANGED
@@ -4271,7 +4271,7 @@ class Team:
4271
4271
  """Calculate session metrics"""
4272
4272
 
4273
4273
  session_messages: List[Message] = []
4274
- for run in session.runs: # type: ignore
4274
+ for run in session.runs or []:
4275
4275
  if run.messages is not None:
4276
4276
  for m in run.messages:
4277
4277
  # Skipping messages from history to avoid duplicates
@@ -7519,6 +7519,9 @@ class Team:
7519
7519
  session = self.db.get_session(session_id=session_id, session_type=session_type)
7520
7520
  return session # type: ignore
7521
7521
  except Exception as e:
7522
+ import traceback
7523
+
7524
+ traceback.print_exc(limit=3)
7522
7525
  log_warning(f"Error getting session from db: {e}")
7523
7526
  return None
7524
7527
 
@@ -7533,6 +7536,9 @@ class Team:
7533
7536
  session = await self.db.get_session(session_id=session_id, session_type=session_type)
7534
7537
  return session # type: ignore
7535
7538
  except Exception as e:
7539
+ import traceback
7540
+
7541
+ traceback.print_exc(limit=3)
7536
7542
  log_warning(f"Error getting session from db: {e}")
7537
7543
  return None
7538
7544
 
@@ -7544,6 +7550,9 @@ class Team:
7544
7550
  raise ValueError("Db not initialized")
7545
7551
  return self.db.upsert_session(session=session) # type: ignore
7546
7552
  except Exception as e:
7553
+ import traceback
7554
+
7555
+ traceback.print_exc(limit=3)
7547
7556
  log_warning(f"Error upserting session into db: {e}")
7548
7557
  return None
7549
7558
 
@@ -7555,6 +7564,9 @@ class Team:
7555
7564
  raise ValueError("Db not initialized")
7556
7565
  return await self.db.upsert_session(session=session) # type: ignore
7557
7566
  except Exception as e:
7567
+ import traceback
7568
+
7569
+ traceback.print_exc(limit=3)
7558
7570
  log_warning(f"Error upserting session into db: {e}")
7559
7571
  return None
7560
7572
 
@@ -116,7 +116,7 @@ class FileGenerationTools(Toolkit):
116
116
  file_type="json",
117
117
  filename=filename,
118
118
  size=len(json_content.encode("utf-8")),
119
- url=f"file://{file_path}" if file_path else None,
119
+ filepath=file_path if file_path else None,
120
120
  )
121
121
 
122
122
  log_debug("JSON file generated successfully")
@@ -203,7 +203,7 @@ class FileGenerationTools(Toolkit):
203
203
  file_type="csv",
204
204
  filename=filename,
205
205
  size=len(csv_content.encode("utf-8")),
206
- url=f"file://{file_path}" if file_path else None,
206
+ filepath=file_path if file_path else None,
207
207
  )
208
208
 
209
209
  log_debug("CSV file generated successfully")
@@ -287,7 +287,7 @@ class FileGenerationTools(Toolkit):
287
287
  file_type="pdf",
288
288
  filename=filename,
289
289
  size=len(pdf_content),
290
- url=f"file://{file_path}" if file_path else None,
290
+ filepath=file_path if file_path else None,
291
291
  )
292
292
 
293
293
  log_debug("PDF file generated successfully")
@@ -333,7 +333,7 @@ class FileGenerationTools(Toolkit):
333
333
  file_type="txt",
334
334
  filename=filename,
335
335
  size=len(content.encode("utf-8")),
336
- url=f"file://{file_path}" if file_path else None,
336
+ filepath=file_path if file_path else None,
337
337
  )
338
338
 
339
339
  log_debug("Text file generated successfully")
agno/tools/gmail.py CHANGED
@@ -141,6 +141,11 @@ class GmailTools(Toolkit):
141
141
  self.create_draft_email,
142
142
  self.send_email,
143
143
  self.send_email_reply,
144
+ # Label management
145
+ self.list_custom_labels,
146
+ self.apply_label,
147
+ self.remove_label,
148
+ self.delete_custom_label,
144
149
  ]
145
150
 
146
151
  super().__init__(name="gmail_tools", tools=tools, **kwargs)
@@ -161,6 +166,7 @@ class GmailTools(Toolkit):
161
166
  "get_emails_by_date",
162
167
  "get_emails_by_thread",
163
168
  "search_emails",
169
+ "list_custom_labels",
164
170
  ]
165
171
  modify_operations = ["mark_email_as_read", "mark_email_as_unread"]
166
172
  if any(read_operation in self.functions for read_operation in read_operations):
@@ -600,6 +606,179 @@ class GmailTools(Toolkit):
600
606
  except Exception as error:
601
607
  return f"Error marking email {message_id} as unread: {type(error).__name__}: {error}"
602
608
 
609
+ @authenticate
610
+ def list_custom_labels(self) -> str:
611
+ """
612
+ List only user-created custom labels (filters out system labels) in a numbered format.
613
+
614
+ Returns:
615
+ str: A numbered list of custom labels only
616
+ """
617
+ try:
618
+ results = self.service.users().labels().list(userId="me").execute() # type: ignore
619
+ labels = results.get("labels", [])
620
+
621
+ # Filter out only user-created labels
622
+ custom_labels = [label["name"] for label in labels if label.get("type") == "user"]
623
+
624
+ if not custom_labels:
625
+ return "No custom labels found.\nCreate labels using apply_label function!"
626
+
627
+ # Create numbered list
628
+ numbered_labels = [f"{i}. {name}" for i, name in enumerate(custom_labels, 1)]
629
+ return f"Your Custom Labels ({len(custom_labels)} total):\n\n" + "\n".join(numbered_labels)
630
+
631
+ except HttpError as e:
632
+ return f"Error fetching labels: {e}"
633
+ except Exception as e:
634
+ return f"Unexpected error: {type(e).__name__}: {e}"
635
+
636
+ @authenticate
637
+ def apply_label(self, context: str, label_name: str, count: int = 10) -> str:
638
+ """
639
+ Find emails matching a context (search query) and apply a label, creating it if necessary.
640
+
641
+ Args:
642
+ context (str): Gmail search query (e.g., 'is:unread category:promotions')
643
+ label_name (str): Name of the label to apply
644
+ count (int): Maximum number of emails to process
645
+ Returns:
646
+ str: Summary of labeled emails
647
+ """
648
+ try:
649
+ # Fetch messages matching context
650
+ results = self.service.users().messages().list(userId="me", q=context, maxResults=count).execute() # type: ignore
651
+
652
+ messages = results.get("messages", [])
653
+ if not messages:
654
+ return f"No emails found matching: '{context}'"
655
+
656
+ # Check if label exists, create if not
657
+ labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
658
+ label_id = None
659
+ for label in labels:
660
+ if label["name"].lower() == label_name.lower():
661
+ label_id = label["id"]
662
+ break
663
+
664
+ if not label_id:
665
+ label = (
666
+ self.service.users() # type: ignore
667
+ .labels()
668
+ .create(
669
+ userId="me",
670
+ body={"name": label_name, "labelListVisibility": "labelShow", "messageListVisibility": "show"},
671
+ )
672
+ .execute()
673
+ )
674
+ label_id = label["id"]
675
+
676
+ # Apply label to all matching messages
677
+ for msg in messages:
678
+ self.service.users().messages().modify( # type: ignore
679
+ userId="me", id=msg["id"], body={"addLabelIds": [label_id]}
680
+ ).execute() # type: ignore
681
+
682
+ return f"Applied label '{label_name}' to {len(messages)} emails matching '{context}'."
683
+
684
+ except HttpError as e:
685
+ return f"Error applying label '{label_name}': {e}"
686
+ except Exception as e:
687
+ return f"Unexpected error: {type(e).__name__}: {e}"
688
+
689
+ @authenticate
690
+ def remove_label(self, context: str, label_name: str, count: int = 10) -> str:
691
+ """
692
+ Remove a label from emails matching a context (search query).
693
+
694
+ Args:
695
+ context (str): Gmail search query (e.g., 'is:unread category:promotions')
696
+ label_name (str): Name of the label to remove
697
+ count (int): Maximum number of emails to process
698
+ Returns:
699
+ str: Summary of emails with label removed
700
+ """
701
+ try:
702
+ # Get all labels to find the target label
703
+ labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
704
+ label_id = None
705
+
706
+ for label in labels:
707
+ if label["name"].lower() == label_name.lower():
708
+ label_id = label["id"]
709
+ break
710
+
711
+ if not label_id:
712
+ return f"Label '{label_name}' not found."
713
+
714
+ # Fetch messages matching context that have this label
715
+ results = (
716
+ self.service.users() # type: ignore
717
+ .messages()
718
+ .list(userId="me", q=f"{context} label:{label_name}", maxResults=count)
719
+ .execute()
720
+ )
721
+
722
+ messages = results.get("messages", [])
723
+ if not messages:
724
+ return f"No emails found matching: '{context}' with label '{label_name}'"
725
+
726
+ # Remove label from all matching messages
727
+ removed_count = 0
728
+ for msg in messages:
729
+ self.service.users().messages().modify( # type: ignore
730
+ userId="me", id=msg["id"], body={"removeLabelIds": [label_id]}
731
+ ).execute() # type: ignore
732
+ removed_count += 1
733
+
734
+ return f"Removed label '{label_name}' from {removed_count} emails matching '{context}'."
735
+
736
+ except HttpError as e:
737
+ return f"Error removing label '{label_name}': {e}"
738
+ except Exception as e:
739
+ return f"Unexpected error: {type(e).__name__}: {e}"
740
+
741
+ @authenticate
742
+ def delete_custom_label(self, label_name: str, confirm: bool = False) -> str:
743
+ """
744
+ Delete a custom label (with safety confirmation).
745
+
746
+ Args:
747
+ label_name (str): Name of the label to delete
748
+ confirm (bool): Must be True to actually delete the label
749
+ Returns:
750
+ str: Confirmation message or warning
751
+ """
752
+ if not confirm:
753
+ return f"LABEL DELETION REQUIRES CONFIRMATION. This will permanently delete the label '{label_name}' from all emails. Set confirm=True to proceed."
754
+
755
+ try:
756
+ # Get all labels to find the target label
757
+ labels = self.service.users().labels().list(userId="me").execute().get("labels", []) # type: ignore
758
+ target_label = None
759
+
760
+ for label in labels:
761
+ if label["name"].lower() == label_name.lower():
762
+ target_label = label
763
+ break
764
+
765
+ if not target_label:
766
+ return f"Label '{label_name}' not found."
767
+
768
+ # Check if it's a system label using the type field
769
+ if target_label.get("type") != "user":
770
+ return f"Cannot delete system label '{label_name}'. Only user-created labels can be deleted."
771
+
772
+ # Delete the label
773
+ self.service.users().labels().delete(userId="me", id=target_label["id"]).execute() # type: ignore
774
+
775
+ return f"Successfully deleted label '{label_name}'. This label has been removed from all emails."
776
+
777
+ except HttpError as e:
778
+ return f"Error deleting label '{label_name}': {e}"
779
+ except Exception as e:
780
+ return f"Unexpected error: {type(e).__name__}: {e}"
781
+
603
782
  def _validate_email_params(self, to: str, subject: str, body: str) -> None:
604
783
  """Validate email parameters."""
605
784
  if not to: