agno 2.4.0__py3-none-any.whl → 2.4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. agno/db/firestore/firestore.py +58 -65
  2. agno/db/mysql/async_mysql.py +47 -55
  3. agno/db/postgres/async_postgres.py +52 -61
  4. agno/db/postgres/postgres.py +25 -12
  5. agno/db/sqlite/async_sqlite.py +52 -61
  6. agno/db/sqlite/sqlite.py +24 -11
  7. agno/integrations/discord/client.py +12 -1
  8. agno/knowledge/knowledge.py +1511 -47
  9. agno/knowledge/reader/csv_reader.py +231 -8
  10. agno/knowledge/reader/field_labeled_csv_reader.py +167 -3
  11. agno/knowledge/reader/reader_factory.py +8 -1
  12. agno/knowledge/remote_content/__init__.py +33 -0
  13. agno/knowledge/remote_content/config.py +266 -0
  14. agno/knowledge/remote_content/remote_content.py +105 -17
  15. agno/models/base.py +12 -2
  16. agno/models/cerebras/cerebras.py +34 -2
  17. agno/models/n1n/__init__.py +3 -0
  18. agno/models/n1n/n1n.py +57 -0
  19. agno/models/ollama/__init__.py +2 -0
  20. agno/models/ollama/responses.py +100 -0
  21. agno/models/openai/__init__.py +2 -0
  22. agno/models/openai/chat.py +18 -1
  23. agno/models/openai/open_responses.py +46 -0
  24. agno/models/openrouter/__init__.py +2 -0
  25. agno/models/openrouter/responses.py +146 -0
  26. agno/models/perplexity/perplexity.py +2 -0
  27. agno/os/interfaces/slack/router.py +10 -1
  28. agno/os/interfaces/whatsapp/router.py +6 -0
  29. agno/os/routers/components/components.py +10 -1
  30. agno/os/routers/knowledge/knowledge.py +125 -0
  31. agno/os/routers/knowledge/schemas.py +12 -0
  32. agno/run/agent.py +2 -0
  33. agno/team/team.py +20 -4
  34. agno/vectordb/lightrag/lightrag.py +7 -6
  35. agno/vectordb/milvus/milvus.py +79 -48
  36. agno/vectordb/pgvector/pgvector.py +3 -3
  37. {agno-2.4.0.dist-info → agno-2.4.2.dist-info}/METADATA +4 -1
  38. {agno-2.4.0.dist-info → agno-2.4.2.dist-info}/RECORD +41 -35
  39. {agno-2.4.0.dist-info → agno-2.4.2.dist-info}/WHEEL +1 -1
  40. {agno-2.4.0.dist-info → agno-2.4.2.dist-info}/licenses/LICENSE +0 -0
  41. {agno-2.4.0.dist-info → agno-2.4.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,100 @@
1
+ from dataclasses import dataclass, field
2
+ from os import getenv
3
+ from typing import Any, Dict, Optional
4
+
5
+ from agno.models.openai.open_responses import OpenResponses
6
+ from agno.utils.log import log_debug
7
+
8
+
9
+ @dataclass
10
+ class OllamaResponses(OpenResponses):
11
+ """
12
+ A class for interacting with Ollama models using the OpenAI Responses API.
13
+
14
+ This uses Ollama's OpenAI-compatible `/v1/responses` endpoint, which was added
15
+ in Ollama v0.13.3. It allows using Ollama models with the Responses API format.
16
+
17
+ Note: Ollama's Responses API is stateless - it does not support `previous_response_id`
18
+ or conversation chaining. Each request is independent.
19
+
20
+ Requirements:
21
+ - Ollama v0.13.3 or later
22
+ - For local usage: Ollama server running at http://localhost:11434
23
+ - For Ollama Cloud: Set OLLAMA_API_KEY environment variable
24
+
25
+ For more information, see: https://docs.ollama.com/api/openai-compatibility
26
+
27
+ Attributes:
28
+ id (str): The model id. Defaults to "gpt-oss:20b".
29
+ name (str): The model name. Defaults to "OllamaResponses".
30
+ provider (str): The provider name. Defaults to "Ollama".
31
+ host (Optional[str]): The Ollama server host. Defaults to "http://localhost:11434".
32
+ api_key (Optional[str]): The API key for Ollama Cloud. Not required for local usage.
33
+ """
34
+
35
+ id: str = "gpt-oss:20b"
36
+ name: str = "OllamaResponses"
37
+ provider: str = "Ollama"
38
+
39
+ # Ollama server host - defaults to local instance
40
+ host: Optional[str] = None
41
+
42
+ # API key for Ollama Cloud (not required for local)
43
+ api_key: Optional[str] = field(default_factory=lambda: getenv("OLLAMA_API_KEY"))
44
+
45
+ # Ollama's Responses API is stateless
46
+ store: Optional[bool] = False
47
+
48
+ def _get_client_params(self) -> Dict[str, Any]:
49
+ """
50
+ Get client parameters for API requests.
51
+
52
+ Returns:
53
+ Dict[str, Any]: Client parameters including base_url and optional api_key.
54
+ """
55
+ # Determine the base URL
56
+ if self.host:
57
+ base_url = self.host.rstrip("/")
58
+ if not base_url.endswith("/v1"):
59
+ base_url = f"{base_url}/v1"
60
+ elif self.api_key:
61
+ # Ollama Cloud
62
+ base_url = "https://ollama.com/v1"
63
+ log_debug(f"Using Ollama Cloud endpoint: {base_url}")
64
+ else:
65
+ # Local Ollama instance
66
+ base_url = "http://localhost:11434/v1"
67
+
68
+ # Build client params
69
+ base_params: Dict[str, Any] = {
70
+ "base_url": base_url,
71
+ "timeout": self.timeout,
72
+ "max_retries": self.max_retries,
73
+ "default_headers": self.default_headers,
74
+ "default_query": self.default_query,
75
+ }
76
+
77
+ # Add API key if provided (required for Ollama Cloud, ignored for local)
78
+ if self.api_key:
79
+ base_params["api_key"] = self.api_key
80
+ else:
81
+ # OpenAI client requires an api_key, but Ollama ignores it locally
82
+ base_params["api_key"] = "ollama"
83
+
84
+ # Filter out None values
85
+ client_params = {k: v for k, v in base_params.items() if v is not None}
86
+
87
+ # Add additional client params if provided
88
+ if self.client_params:
89
+ client_params.update(self.client_params)
90
+
91
+ return client_params
92
+
93
+ def _using_reasoning_model(self) -> bool:
94
+ """
95
+ Ollama doesn't have native reasoning models like OpenAI's o-series.
96
+
97
+ Some models may support thinking/reasoning through their architecture
98
+ (like DeepSeek-R1), but they don't use OpenAI's reasoning API format.
99
+ """
100
+ return False
@@ -1,9 +1,11 @@
1
1
  from agno.models.openai.chat import OpenAIChat
2
2
  from agno.models.openai.like import OpenAILike
3
+ from agno.models.openai.open_responses import OpenResponses
3
4
  from agno.models.openai.responses import OpenAIResponses
4
5
 
5
6
  __all__ = [
6
7
  "OpenAIChat",
7
8
  "OpenAILike",
8
9
  "OpenAIResponses",
10
+ "OpenResponses",
9
11
  ]
@@ -43,6 +43,8 @@ class OpenAIChat(Model):
43
43
  name: str = "OpenAIChat"
44
44
  provider: str = "OpenAI"
45
45
  supports_native_structured_outputs: bool = True
46
+ # If True, only collect metrics on the final streaming chunk (for providers with cumulative token counts)
47
+ collect_metrics_on_completion: bool = False
46
48
 
47
49
  # Request parameters
48
50
  store: Optional[bool] = None
@@ -752,6 +754,21 @@ class OpenAIChat(Model):
752
754
  tool_call_entry["type"] = _tool_call_type
753
755
  return tool_calls
754
756
 
757
+ def _should_collect_metrics(self, response: ChatCompletionChunk) -> bool:
758
+ """
759
+ Determine if metrics should be collected from the response.
760
+ """
761
+ if not response.usage:
762
+ return False
763
+
764
+ if not self.collect_metrics_on_completion:
765
+ return True
766
+
767
+ if not response.choices:
768
+ return False
769
+
770
+ return response.choices[0].finish_reason is not None
771
+
755
772
  def _parse_provider_response(
756
773
  self,
757
774
  response: ChatCompletion,
@@ -920,7 +937,7 @@ class OpenAIChat(Model):
920
937
  log_warning(f"Error processing audio: {e}")
921
938
 
922
939
  # Add usage metrics if present
923
- if response_delta.usage is not None:
940
+ if self._should_collect_metrics(response_delta) and response_delta.usage is not None:
924
941
  model_response.response_usage = self._get_metrics(response_delta.usage)
925
942
 
926
943
  return model_response
@@ -0,0 +1,46 @@
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+
4
+ from agno.models.openai.responses import OpenAIResponses
5
+
6
+
7
+ @dataclass
8
+ class OpenResponses(OpenAIResponses):
9
+ """
10
+ A base class for interacting with any provider using the Open Responses API specification.
11
+
12
+ Open Responses is an open-source specification for building multi-provider, interoperable
13
+ LLM interfaces based on the OpenAI Responses API. This class provides a foundation for
14
+ providers that implement the spec (e.g., Ollama, OpenRouter).
15
+
16
+ For more information, see: https://openresponses.org
17
+
18
+ Key differences from OpenAIResponses:
19
+ - Configurable base_url for pointing to different API endpoints
20
+ - Stateless by default (no previous_response_id chaining)
21
+ - Flexible api_key handling for providers that don't require authentication
22
+
23
+ Args:
24
+ id (str): The model id. Defaults to "not-provided".
25
+ name (str): The model name. Defaults to "OpenResponses".
26
+ api_key (Optional[str]): The API key. Defaults to "not-provided".
27
+ """
28
+
29
+ id: str = "not-provided"
30
+ name: str = "OpenResponses"
31
+ provider: str = "OpenResponses"
32
+ api_key: Optional[str] = "not-provided"
33
+
34
+ # Disable stateful features by default for compatible providers
35
+ # Most OpenAI-compatible providers don't support previous_response_id chaining
36
+ store: Optional[bool] = False
37
+
38
+ def _using_reasoning_model(self) -> bool:
39
+ """
40
+ Override to disable reasoning model detection for compatible providers.
41
+
42
+ Most compatible providers don't support OpenAI's reasoning models,
43
+ so we disable the special handling by default. Subclasses can override
44
+ this if they support specific reasoning models.
45
+ """
46
+ return False
@@ -1,5 +1,7 @@
1
1
  from agno.models.openrouter.openrouter import OpenRouter
2
+ from agno.models.openrouter.responses import OpenRouterResponses
2
3
 
3
4
  __all__ = [
4
5
  "OpenRouter",
6
+ "OpenRouterResponses",
5
7
  ]
@@ -0,0 +1,146 @@
1
+ from dataclasses import dataclass
2
+ from os import getenv
3
+ from typing import Any, Dict, List, Optional, Type, Union
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from agno.exceptions import ModelAuthenticationError
8
+ from agno.models.openai.open_responses import OpenResponses
9
+ from agno.models.message import Message
10
+
11
+
12
+ @dataclass
13
+ class OpenRouterResponses(OpenResponses):
14
+ """
15
+ A class for interacting with OpenRouter models using the OpenAI Responses API.
16
+
17
+ OpenRouter's Responses API (currently in beta) provides OpenAI-compatible access
18
+ to multiple AI models through a unified interface. It supports tools, reasoning,
19
+ streaming, and plugins.
20
+
21
+ Note: OpenRouter's Responses API is stateless - each request is independent and
22
+ no server-side state is persisted.
23
+
24
+ For more information, see: https://openrouter.ai/docs/api/reference/responses/overview
25
+
26
+ Attributes:
27
+ id (str): The model id. Defaults to "openai/gpt-oss-20b".
28
+ name (str): The model name. Defaults to "OpenRouterResponses".
29
+ provider (str): The provider name. Defaults to "OpenRouter".
30
+ api_key (Optional[str]): The API key. Uses OPENROUTER_API_KEY env var if not set.
31
+ base_url (str): The base URL. Defaults to "https://openrouter.ai/api/v1".
32
+ models (Optional[List[str]]): List of fallback model IDs to use if the primary model
33
+ fails due to rate limits, timeouts, or unavailability. OpenRouter will automatically
34
+ try these models in order. Example: ["anthropic/claude-sonnet-4", "deepseek/deepseek-r1"]
35
+
36
+ Example:
37
+ ```python
38
+ from agno.agent import Agent
39
+ from agno.models.openrouter import OpenRouterResponses
40
+
41
+ agent = Agent(
42
+ model=OpenRouterResponses(id="anthropic/claude-sonnet-4"),
43
+ markdown=True,
44
+ )
45
+ agent.print_response("Write a haiku about coding")
46
+ ```
47
+ """
48
+
49
+ id: str = "openai/gpt-oss-20b"
50
+ name: str = "OpenRouterResponses"
51
+ provider: str = "OpenRouter"
52
+
53
+ api_key: Optional[str] = None
54
+ base_url: str = "https://openrouter.ai/api/v1"
55
+
56
+ # Dynamic model routing - fallback models if primary fails
57
+ # https://openrouter.ai/docs/features/model-routing
58
+ models: Optional[List[str]] = None
59
+
60
+ # OpenRouter's Responses API is stateless
61
+ store: Optional[bool] = False
62
+
63
+ def _get_client_params(self) -> Dict[str, Any]:
64
+ """
65
+ Returns client parameters for API requests, checking for OPENROUTER_API_KEY.
66
+
67
+ Returns:
68
+ Dict[str, Any]: A dictionary of client parameters for API requests.
69
+
70
+ Raises:
71
+ ModelAuthenticationError: If OPENROUTER_API_KEY is not set.
72
+ """
73
+ # Fetch API key from env if not already set
74
+ if not self.api_key:
75
+ self.api_key = getenv("OPENROUTER_API_KEY")
76
+ if not self.api_key:
77
+ raise ModelAuthenticationError(
78
+ message="OPENROUTER_API_KEY not set. Please set the OPENROUTER_API_KEY environment variable.",
79
+ model_name=self.name,
80
+ )
81
+
82
+ # Build client params
83
+ base_params: Dict[str, Any] = {
84
+ "api_key": self.api_key,
85
+ "base_url": self.base_url,
86
+ "organization": self.organization,
87
+ "timeout": self.timeout,
88
+ "max_retries": self.max_retries,
89
+ "default_headers": self.default_headers,
90
+ "default_query": self.default_query,
91
+ }
92
+
93
+ # Filter out None values
94
+ client_params = {k: v for k, v in base_params.items() if v is not None}
95
+
96
+ # Add additional client params if provided
97
+ if self.client_params:
98
+ client_params.update(self.client_params)
99
+
100
+ return client_params
101
+
102
+ def get_request_params(
103
+ self,
104
+ messages: Optional[List[Message]] = None,
105
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
106
+ tools: Optional[List[Dict[str, Any]]] = None,
107
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
108
+ ) -> Dict[str, Any]:
109
+ """
110
+ Returns keyword arguments for API requests, including fallback models configuration.
111
+
112
+ Returns:
113
+ Dict[str, Any]: A dictionary of keyword arguments for API requests.
114
+ """
115
+ # Get base request params from parent class
116
+ request_params = super().get_request_params(
117
+ messages=messages,
118
+ response_format=response_format,
119
+ tools=tools,
120
+ tool_choice=tool_choice,
121
+ )
122
+
123
+ # Add fallback models to extra_body if specified
124
+ if self.models:
125
+ # Get existing extra_body or create new dict
126
+ extra_body = request_params.get("extra_body") or {}
127
+
128
+ # Merge fallback models into extra_body
129
+ extra_body["models"] = self.models
130
+
131
+ # Update request params
132
+ request_params["extra_body"] = extra_body
133
+
134
+ return request_params
135
+
136
+ def _using_reasoning_model(self) -> bool:
137
+ """
138
+ Check if the model is a reasoning model that requires special handling.
139
+
140
+ OpenRouter hosts various reasoning models, but they may not all use
141
+ OpenAI's reasoning API format. We check for known reasoning model patterns.
142
+ """
143
+ # Check for OpenAI reasoning models hosted on OpenRouter
144
+ if self.id.startswith("openai/o3") or self.id.startswith("openai/o4"):
145
+ return True
146
+ return False
@@ -41,6 +41,8 @@ class Perplexity(OpenAILike):
41
41
  id: str = "sonar"
42
42
  name: str = "Perplexity"
43
43
  provider: str = "Perplexity"
44
+ # Perplexity returns cumulative token counts in each streaming chunk, so only collect on final chunk
45
+ collect_metrics_on_completion: bool = True
44
46
 
45
47
  api_key: Optional[str] = field(default_factory=lambda: getenv("PERPLEXITY_API_KEY"))
46
48
  base_url: str = "https://api.perplexity.ai/"
@@ -7,7 +7,7 @@ from agno.agent import Agent, RemoteAgent
7
7
  from agno.os.interfaces.slack.security import verify_slack_signature
8
8
  from agno.team import RemoteTeam, Team
9
9
  from agno.tools.slack import SlackTools
10
- from agno.utils.log import log_info
10
+ from agno.utils.log import log_error, log_info
11
11
  from agno.workflow import RemoteWorkflow, Workflow
12
12
 
13
13
 
@@ -112,6 +112,15 @@ def attach_routes(
112
112
  response = await workflow.arun(message_text, user_id=user, session_id=session_id) # type: ignore
113
113
 
114
114
  if response:
115
+ if response.status == "ERROR":
116
+ log_error(f"Error processing message: {response.content}")
117
+ _send_slack_message(
118
+ channel=channel_id,
119
+ message="Sorry, there was an error processing your message. Please try again later.",
120
+ thread_ts=ts,
121
+ )
122
+ return
123
+
115
124
  if hasattr(response, "reasoning_content") and response.reasoning_content:
116
125
  _send_slack_message(
117
126
  channel=channel_id,
@@ -162,6 +162,12 @@ def attach_routes(
162
162
  videos=[Video(content=await get_media_async(message_video))] if message_video else None,
163
163
  audio=[Audio(content=await get_media_async(message_audio))] if message_audio else None,
164
164
  )
165
+ if response.status == "ERROR":
166
+ await _send_whatsapp_message(
167
+ phone_number, "Sorry, there was an error processing your message. Please try again later."
168
+ )
169
+ log_error(response.content)
170
+ return
165
171
 
166
172
  if response.reasoning_content:
167
173
  await _send_whatsapp_message(phone_number, f"Reasoning: \n{response.reasoning_content}", italics=True)
@@ -25,7 +25,7 @@ from agno.os.schema import (
25
25
  )
26
26
  from agno.os.settings import AgnoAPISettings
27
27
  from agno.registry import Registry
28
- from agno.utils.log import log_error
28
+ from agno.utils.log import log_error, log_warning
29
29
  from agno.utils.string import generate_id_from_name
30
30
 
31
31
  logger = logging.getLogger(__name__)
@@ -167,6 +167,15 @@ def attach_routes(
167
167
  config = body.config or {}
168
168
  config = _resolve_db_in_config(config, db, registry)
169
169
 
170
+ # Warn if creating a team without members
171
+ if body.component_type == ComponentType.TEAM:
172
+ members = config.get("members")
173
+ if not members or len(members) == 0:
174
+ log_warning(
175
+ f"Creating team '{body.name}' without members. "
176
+ "If this is unintended, add members to the config."
177
+ )
178
+
170
179
  component, _config = db.create_component_with_config(
171
180
  component_id=component_id,
172
181
  component_type=DbComponentType(body.component_type.value),
@@ -206,6 +206,113 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Union[Knowledge,
206
206
  )
207
207
  return response
208
208
 
209
+ @router.post(
210
+ "/knowledge/remote-content",
211
+ response_model=ContentResponseSchema,
212
+ status_code=202,
213
+ operation_id="upload_remote_content",
214
+ summary="Upload Remote Content",
215
+ description=(
216
+ "Upload content from a remote source (S3, GCS, SharePoint, GitHub) to the knowledge base. "
217
+ "Content is processed asynchronously in the background. "
218
+ "Use the /knowledge/config endpoint to see available remote content sources."
219
+ ),
220
+ responses={
221
+ 202: {
222
+ "description": "Remote content upload accepted for processing",
223
+ "content": {
224
+ "application/json": {
225
+ "example": {
226
+ "id": "content-456",
227
+ "name": "reports/q1-2024.pdf",
228
+ "description": "Q1 Report from S3",
229
+ "metadata": {"source": "s3-docs"},
230
+ "status": "processing",
231
+ }
232
+ }
233
+ },
234
+ },
235
+ 400: {
236
+ "description": "Invalid request - unknown config or missing path",
237
+ "model": BadRequestResponse,
238
+ },
239
+ 422: {"description": "Validation error in request body", "model": ValidationErrorResponse},
240
+ },
241
+ )
242
+ async def upload_remote_content(
243
+ request: Request,
244
+ background_tasks: BackgroundTasks,
245
+ config_id: str = Form(..., description="ID of the configured remote content source (from /knowledge/config)"),
246
+ path: str = Form(..., description="Path to file or folder in the remote source"),
247
+ name: Optional[str] = Form(None, description="Content name (auto-generated if not provided)"),
248
+ description: Optional[str] = Form(None, description="Content description"),
249
+ metadata: Optional[str] = Form(None, description="JSON metadata object"),
250
+ reader_id: Optional[str] = Form(None, description="ID of the reader to use for processing"),
251
+ chunker: Optional[str] = Form(None, description="Chunking strategy to apply"),
252
+ chunk_size: Optional[int] = Form(None, description="Chunk size for processing"),
253
+ chunk_overlap: Optional[int] = Form(None, description="Chunk overlap for processing"),
254
+ db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
255
+ ):
256
+ knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
257
+
258
+ if isinstance(knowledge, RemoteKnowledge):
259
+ # TODO: Forward to remote knowledge instance
260
+ raise HTTPException(status_code=501, detail="Remote content upload not yet supported for RemoteKnowledge")
261
+
262
+ # Validate that the config_id exists in configured sources
263
+ config = knowledge._get_remote_config_by_id(config_id)
264
+ if config is None:
265
+ raise HTTPException(
266
+ status_code=400,
267
+ detail=f"Unknown content source: {config_id}. Check /knowledge/config for available sources.",
268
+ )
269
+
270
+ # Parse metadata if provided
271
+ parsed_metadata = None
272
+ if metadata:
273
+ try:
274
+ parsed_metadata = json.loads(metadata)
275
+ except json.JSONDecodeError:
276
+ parsed_metadata = {"value": metadata}
277
+
278
+ # Use the config's factory methods to create the remote content object
279
+ # If path ends with '/', treat as folder, otherwise treat as file
280
+ is_folder = path.endswith("/")
281
+ if is_folder:
282
+ if hasattr(config, "folder"):
283
+ remote_content = config.folder(path.rstrip("/"))
284
+ else:
285
+ raise HTTPException(status_code=400, detail=f"Config {config_id} does not support folder uploads")
286
+ else:
287
+ if hasattr(config, "file"):
288
+ remote_content = config.file(path)
289
+ else:
290
+ raise HTTPException(status_code=400, detail=f"Config {config_id} does not support file uploads")
291
+
292
+ # Set name from path if not provided
293
+ content_name = name or path
294
+
295
+ content = Content(
296
+ name=content_name,
297
+ description=description,
298
+ metadata=parsed_metadata,
299
+ remote_content=remote_content,
300
+ )
301
+ content_hash = knowledge._build_content_hash(content)
302
+ content.content_hash = content_hash
303
+ content.id = generate_id(content_hash)
304
+
305
+ background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
306
+
307
+ response = ContentResponseSchema(
308
+ id=content.id,
309
+ name=content_name,
310
+ description=description,
311
+ metadata=parsed_metadata,
312
+ status=ContentStatus.PROCESSING,
313
+ )
314
+ return response
315
+
209
316
  @router.patch(
210
317
  "/knowledge/content/{content_id}",
211
318
  response_model=ContentResponseSchema,
@@ -1048,12 +1155,30 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Union[Knowledge,
1048
1155
  )
1049
1156
  )
1050
1157
  filters = await knowledge.aget_valid_filters()
1158
+
1159
+ # Get remote content sources if available
1160
+ remote_content_sources = None
1161
+ if hasattr(knowledge, "_get_remote_configs") and callable(knowledge._get_remote_configs):
1162
+ remote_configs = knowledge._get_remote_configs()
1163
+ if remote_configs:
1164
+ from agno.os.routers.knowledge.schemas import RemoteContentSourceSchema
1165
+
1166
+ remote_content_sources = [
1167
+ RemoteContentSourceSchema(
1168
+ id=config.id,
1169
+ name=config.name,
1170
+ type=config.__class__.__name__.replace("Config", "").lower(),
1171
+ metadata=config.metadata,
1172
+ )
1173
+ for config in remote_configs
1174
+ ]
1051
1175
  return ConfigResponseSchema(
1052
1176
  readers=reader_schemas,
1053
1177
  vector_dbs=vector_dbs,
1054
1178
  readersForType=types_of_readers,
1055
1179
  chunkers=chunkers_dict,
1056
1180
  filters=filters,
1181
+ remote_content_sources=remote_content_sources,
1057
1182
  )
1058
1183
 
1059
1184
  return router
@@ -170,9 +170,21 @@ class VectorSearchRequestSchema(BaseModel):
170
170
  )
171
171
 
172
172
 
173
+ class RemoteContentSourceSchema(BaseModel):
174
+ """Schema for remote content source configuration."""
175
+
176
+ id: str = Field(..., description="Unique identifier for the content source")
177
+ name: str = Field(..., description="Display name for the content source")
178
+ type: str = Field(..., description="Type of content source (s3, gcs, sharepoint, github, azureblob)")
179
+ metadata: Optional[Dict[str, Any]] = Field(None, description="Custom metadata for the content source")
180
+
181
+
173
182
  class ConfigResponseSchema(BaseModel):
174
183
  readers: Optional[Dict[str, ReaderSchema]] = Field(None, description="Available content readers")
175
184
  readersForType: Optional[Dict[str, List[str]]] = Field(None, description="Mapping of content types to reader IDs")
176
185
  chunkers: Optional[Dict[str, ChunkerSchema]] = Field(None, description="Available chunking strategies")
177
186
  filters: Optional[List[str]] = Field(None, description="Available filter tags")
178
187
  vector_dbs: Optional[List[VectorDbSchema]] = Field(None, description="Configured vector databases")
188
+ remote_content_sources: Optional[List[RemoteContentSourceSchema]] = Field(
189
+ None, description="Configured remote content sources (S3, GCS, SharePoint, GitHub)"
190
+ )
agno/run/agent.py CHANGED
@@ -485,6 +485,8 @@ class CompressionCompletedEvent(BaseAgentRunEvent):
485
485
  @dataclass
486
486
  class CustomEvent(BaseAgentRunEvent):
487
487
  event: str = RunEvent.custom_event.value
488
+ # tool_call_id for ToolExecution
489
+ tool_call_id: Optional[str] = None
488
490
 
489
491
  def __init__(self, **kwargs):
490
492
  # Store arbitrary attributes directly on the instance
agno/team/team.py CHANGED
@@ -7852,7 +7852,7 @@ class Team:
7852
7852
  stream_events=stream_events or self.stream_member_events,
7853
7853
  debug_mode=debug_mode,
7854
7854
  knowledge_filters=run_context.knowledge_filters
7855
- if not member_agent.knowledge_filters and member_agent.knowledge
7855
+ if not agent.knowledge_filters and agent.knowledge
7856
7856
  else None,
7857
7857
  dependencies=run_context.dependencies,
7858
7858
  add_dependencies_to_context=add_dependencies_to_context,
@@ -7877,7 +7877,7 @@ class Team:
7877
7877
  finally:
7878
7878
  _process_delegate_task_to_member(
7879
7879
  member_agent_run_response,
7880
- member_agent,
7880
+ agent,
7881
7881
  member_agent_task, # type: ignore
7882
7882
  member_session_state_copy, # type: ignore
7883
7883
  )
@@ -7915,10 +7915,15 @@ class Team:
7915
7915
  current_agent = member_agent
7916
7916
  member_agent_task, history = _setup_delegate_task_to_member(member_agent=current_agent, task=task)
7917
7917
 
7918
- async def run_member_agent(agent=current_agent) -> str:
7918
+ async def run_member_agent(
7919
+ member_agent=current_agent,
7920
+ member_agent_task=member_agent_task,
7921
+ history=history,
7922
+ member_agent_index=member_agent_index,
7923
+ ) -> str:
7919
7924
  member_session_state_copy = copy(run_context.session_state)
7920
7925
 
7921
- member_agent_run_response = await agent.arun(
7926
+ member_agent_run_response = await member_agent.arun(
7922
7927
  input=member_agent_task if not history else history,
7923
7928
  user_id=user_id,
7924
7929
  # All members have the same session_id
@@ -8536,6 +8541,7 @@ class Team:
8536
8541
  # --- Handle Members reconstruction ---
8537
8542
  members: Optional[List[Union[Agent, "Team"]]] = None
8538
8543
  from agno.agent import get_agent_by_id
8544
+ from agno.team import get_team_by_id
8539
8545
 
8540
8546
  if "members" in config and config["members"]:
8541
8547
  members = []
@@ -8551,6 +8557,16 @@ class Team:
8551
8557
  members.append(agent)
8552
8558
  else:
8553
8559
  log_warning(f"Agent not found: {member_data['agent_id']}")
8560
+ elif member_type == "team":
8561
+ # Handle nested teams as members
8562
+ if db is None:
8563
+ log_warning(f"Cannot load member team {member_data['team_id']}: db is None")
8564
+ continue
8565
+ nested_team = get_team_by_id(id=member_data["team_id"], db=db, registry=registry)
8566
+ if nested_team:
8567
+ members.append(nested_team)
8568
+ else:
8569
+ log_warning(f"Team not found: {member_data['team_id']}")
8554
8570
 
8555
8571
  # --- Handle reasoning_model reconstruction ---
8556
8572
  # TODO: implement reasoning model deserialization