agno 2.0.11__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (93) hide show
  1. agno/agent/agent.py +607 -176
  2. agno/db/in_memory/in_memory_db.py +42 -29
  3. agno/db/mongo/mongo.py +65 -66
  4. agno/db/postgres/postgres.py +6 -4
  5. agno/db/utils.py +50 -22
  6. agno/exceptions.py +62 -1
  7. agno/guardrails/__init__.py +6 -0
  8. agno/guardrails/base.py +19 -0
  9. agno/guardrails/openai.py +144 -0
  10. agno/guardrails/pii.py +94 -0
  11. agno/guardrails/prompt_injection.py +51 -0
  12. agno/knowledge/embedder/aws_bedrock.py +9 -4
  13. agno/knowledge/embedder/azure_openai.py +54 -0
  14. agno/knowledge/embedder/base.py +2 -0
  15. agno/knowledge/embedder/cohere.py +184 -5
  16. agno/knowledge/embedder/google.py +79 -1
  17. agno/knowledge/embedder/huggingface.py +9 -4
  18. agno/knowledge/embedder/jina.py +63 -0
  19. agno/knowledge/embedder/mistral.py +78 -11
  20. agno/knowledge/embedder/ollama.py +5 -0
  21. agno/knowledge/embedder/openai.py +18 -54
  22. agno/knowledge/embedder/voyageai.py +69 -16
  23. agno/knowledge/knowledge.py +11 -4
  24. agno/knowledge/reader/pdf_reader.py +4 -3
  25. agno/knowledge/reader/website_reader.py +3 -2
  26. agno/models/base.py +125 -32
  27. agno/models/cerebras/cerebras.py +1 -0
  28. agno/models/cerebras/cerebras_openai.py +1 -0
  29. agno/models/dashscope/dashscope.py +1 -0
  30. agno/models/google/gemini.py +27 -5
  31. agno/models/openai/chat.py +13 -4
  32. agno/models/openai/responses.py +1 -1
  33. agno/models/perplexity/perplexity.py +2 -3
  34. agno/models/requesty/__init__.py +5 -0
  35. agno/models/requesty/requesty.py +49 -0
  36. agno/models/vllm/vllm.py +1 -0
  37. agno/models/xai/xai.py +1 -0
  38. agno/os/app.py +98 -126
  39. agno/os/interfaces/__init__.py +1 -0
  40. agno/os/interfaces/agui/agui.py +21 -5
  41. agno/os/interfaces/base.py +4 -2
  42. agno/os/interfaces/slack/slack.py +13 -8
  43. agno/os/interfaces/whatsapp/router.py +2 -0
  44. agno/os/interfaces/whatsapp/whatsapp.py +12 -5
  45. agno/os/mcp.py +2 -2
  46. agno/os/middleware/__init__.py +7 -0
  47. agno/os/middleware/jwt.py +233 -0
  48. agno/os/router.py +182 -46
  49. agno/os/routers/home.py +2 -2
  50. agno/os/routers/memory/memory.py +23 -1
  51. agno/os/routers/memory/schemas.py +1 -1
  52. agno/os/routers/session/session.py +20 -3
  53. agno/os/utils.py +74 -8
  54. agno/run/agent.py +120 -77
  55. agno/run/base.py +2 -13
  56. agno/run/team.py +115 -72
  57. agno/run/workflow.py +5 -15
  58. agno/session/summary.py +9 -10
  59. agno/session/team.py +2 -1
  60. agno/team/team.py +721 -169
  61. agno/tools/firecrawl.py +4 -4
  62. agno/tools/function.py +42 -2
  63. agno/tools/knowledge.py +3 -3
  64. agno/tools/searxng.py +2 -2
  65. agno/tools/serper.py +2 -2
  66. agno/tools/spider.py +2 -2
  67. agno/tools/workflow.py +4 -5
  68. agno/utils/events.py +66 -1
  69. agno/utils/hooks.py +57 -0
  70. agno/utils/media.py +11 -9
  71. agno/utils/print_response/agent.py +43 -5
  72. agno/utils/print_response/team.py +48 -12
  73. agno/utils/serialize.py +32 -0
  74. agno/vectordb/cassandra/cassandra.py +44 -4
  75. agno/vectordb/chroma/chromadb.py +79 -8
  76. agno/vectordb/clickhouse/clickhousedb.py +43 -6
  77. agno/vectordb/couchbase/couchbase.py +76 -5
  78. agno/vectordb/lancedb/lance_db.py +38 -3
  79. agno/vectordb/milvus/milvus.py +76 -4
  80. agno/vectordb/mongodb/mongodb.py +76 -4
  81. agno/vectordb/pgvector/pgvector.py +50 -6
  82. agno/vectordb/pineconedb/pineconedb.py +39 -2
  83. agno/vectordb/qdrant/qdrant.py +76 -26
  84. agno/vectordb/singlestore/singlestore.py +77 -4
  85. agno/vectordb/upstashdb/upstashdb.py +42 -2
  86. agno/vectordb/weaviate/weaviate.py +39 -3
  87. agno/workflow/types.py +5 -6
  88. agno/workflow/workflow.py +58 -2
  89. {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/METADATA +4 -3
  90. {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/RECORD +93 -82
  91. {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/WHEEL +0 -0
  92. {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/licenses/LICENSE +0 -0
  93. {agno-2.0.11.dist-info → agno-2.1.1.dist-info}/top_level.txt +0 -0
@@ -26,6 +26,7 @@ try:
26
26
  from google.genai.types import (
27
27
  Content,
28
28
  DynamicRetrievalConfig,
29
+ FunctionCallingConfigMode,
29
30
  GenerateContentConfig,
30
31
  GenerateContentResponse,
31
32
  GenerateContentResponseUsageMetadata,
@@ -150,6 +151,7 @@ class Gemini(Model):
150
151
  system_message: Optional[str] = None,
151
152
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
152
153
  tools: Optional[List[Dict[str, Any]]] = None,
154
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
153
155
  ) -> Dict[str, Any]:
154
156
  """
155
157
  Returns the request keyword arguments for the GenerativeModel client.
@@ -245,6 +247,18 @@ class Gemini(Model):
245
247
  elif tools:
246
248
  config["tools"] = [format_function_definitions(tools)]
247
249
 
250
+ if tool_choice is not None:
251
+ if isinstance(tool_choice, str) and tool_choice.lower() == "auto":
252
+ config["tool_config"] = {"function_calling_config": {"mode": FunctionCallingConfigMode.AUTO}}
253
+ elif isinstance(tool_choice, str) and tool_choice.lower() == "none":
254
+ config["tool_config"] = {"function_calling_config": {"mode": FunctionCallingConfigMode.NONE}}
255
+ elif isinstance(tool_choice, str) and tool_choice.lower() == "validated":
256
+ config["tool_config"] = {"function_calling_config": {"mode": FunctionCallingConfigMode.VALIDATED}}
257
+ elif isinstance(tool_choice, str) and tool_choice.lower() == "any":
258
+ config["tool_config"] = {"function_calling_config": {"mode": FunctionCallingConfigMode.ANY}}
259
+ else:
260
+ config["tool_config"] = {"function_calling_config": {"mode": tool_choice}}
261
+
248
262
  config = {k: v for k, v in config.items() if v is not None}
249
263
 
250
264
  if config:
@@ -271,7 +285,9 @@ class Gemini(Model):
271
285
  Invokes the model with a list of messages and returns the response.
272
286
  """
273
287
  formatted_messages, system_message = self._format_messages(messages)
274
- request_kwargs = self.get_request_params(system_message, response_format=response_format, tools=tools)
288
+ request_kwargs = self.get_request_params(
289
+ system_message, response_format=response_format, tools=tools, tool_choice=tool_choice
290
+ )
275
291
  try:
276
292
  if run_response and run_response.metrics:
277
293
  run_response.metrics.set_time_to_first_token()
@@ -315,7 +331,9 @@ class Gemini(Model):
315
331
  """
316
332
  formatted_messages, system_message = self._format_messages(messages)
317
333
 
318
- request_kwargs = self.get_request_params(system_message, response_format=response_format, tools=tools)
334
+ request_kwargs = self.get_request_params(
335
+ system_message, response_format=response_format, tools=tools, tool_choice=tool_choice
336
+ )
319
337
  try:
320
338
  if run_response and run_response.metrics:
321
339
  run_response.metrics.set_time_to_first_token()
@@ -356,7 +374,9 @@ class Gemini(Model):
356
374
  """
357
375
  formatted_messages, system_message = self._format_messages(messages)
358
376
 
359
- request_kwargs = self.get_request_params(system_message, response_format=response_format, tools=tools)
377
+ request_kwargs = self.get_request_params(
378
+ system_message, response_format=response_format, tools=tools, tool_choice=tool_choice
379
+ )
360
380
 
361
381
  try:
362
382
  if run_response and run_response.metrics:
@@ -400,7 +420,9 @@ class Gemini(Model):
400
420
  """
401
421
  formatted_messages, system_message = self._format_messages(messages)
402
422
 
403
- request_kwargs = self.get_request_params(system_message, response_format=response_format, tools=tools)
423
+ request_kwargs = self.get_request_params(
424
+ system_message, response_format=response_format, tools=tools, tool_choice=tool_choice
425
+ )
404
426
 
405
427
  try:
406
428
  if run_response and run_response.metrics:
@@ -1051,9 +1073,9 @@ class Gemini(Model):
1051
1073
 
1052
1074
  metrics.input_tokens = response_usage.prompt_token_count or 0
1053
1075
  metrics.output_tokens = response_usage.candidates_token_count or 0
1054
- metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
1055
1076
  if response_usage.thoughts_token_count is not None:
1056
1077
  metrics.output_tokens += response_usage.thoughts_token_count or 0
1078
+ metrics.total_tokens = metrics.input_tokens + metrics.output_tokens
1057
1079
 
1058
1080
  metrics.cache_read_tokens = response_usage.cached_content_token_count or 0
1059
1081
 
@@ -160,6 +160,7 @@ class OpenAIChat(Model):
160
160
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
161
161
  tools: Optional[List[Dict[str, Any]]] = None,
162
162
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
163
+ run_response: Optional[RunOutput] = None,
163
164
  ) -> Dict[str, Any]:
164
165
  """
165
166
  Returns keyword arguments for API requests.
@@ -370,7 +371,9 @@ class OpenAIChat(Model):
370
371
  provider_response = self.get_client().chat.completions.create(
371
372
  model=self.id,
372
373
  messages=[self._format_message(m) for m in messages], # type: ignore
373
- **self.get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice),
374
+ **self.get_request_params(
375
+ response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
376
+ ),
374
377
  )
375
378
  assistant_message.metrics.stop_timer()
376
379
 
@@ -447,7 +450,9 @@ class OpenAIChat(Model):
447
450
  response = await self.get_async_client().chat.completions.create(
448
451
  model=self.id,
449
452
  messages=[self._format_message(m) for m in messages], # type: ignore
450
- **self.get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice),
453
+ **self.get_request_params(
454
+ response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
455
+ ),
451
456
  )
452
457
  assistant_message.metrics.stop_timer()
453
458
 
@@ -524,7 +529,9 @@ class OpenAIChat(Model):
524
529
  messages=[self._format_message(m) for m in messages], # type: ignore
525
530
  stream=True,
526
531
  stream_options={"include_usage": True},
527
- **self.get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice),
532
+ **self.get_request_params(
533
+ response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
534
+ ),
528
535
  ):
529
536
  yield self._parse_provider_response_delta(chunk)
530
537
 
@@ -598,7 +605,9 @@ class OpenAIChat(Model):
598
605
  messages=[self._format_message(m) for m in messages], # type: ignore
599
606
  stream=True,
600
607
  stream_options={"include_usage": True},
601
- **self.get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice),
608
+ **self.get_request_params(
609
+ response_format=response_format, tools=tools, tool_choice=tool_choice, run_response=run_response
610
+ ),
602
611
  )
603
612
 
604
613
  async for chunk in async_stream:
@@ -45,7 +45,7 @@ class OpenAIResponses(Model):
45
45
  parallel_tool_calls: Optional[bool] = None
46
46
  reasoning: Optional[Dict[str, Any]] = None
47
47
  verbosity: Optional[Literal["low", "medium", "high"]] = None
48
- reasoning_effort: Optional[Literal["minimal", "medium", "high"]] = None
48
+ reasoning_effort: Optional[Literal["minimal", "low", "medium", "high"]] = None
49
49
  reasoning_summary: Optional[Literal["auto", "concise", "detailed"]] = None
50
50
  store: Optional[bool] = None
51
51
  temperature: Optional[float] = None
@@ -1,6 +1,6 @@
1
1
  from dataclasses import dataclass, field
2
2
  from os import getenv
3
- from typing import Any, Dict, List, Optional, Type, Union
3
+ from typing import Any, Dict, Optional, Type, Union
4
4
 
5
5
  from pydantic import BaseModel
6
6
 
@@ -53,8 +53,7 @@ class Perplexity(OpenAILike):
53
53
  def get_request_params(
54
54
  self,
55
55
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
56
- tools: Optional[List[Dict[str, Any]]] = None,
57
- tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
56
+ **kwargs: Any,
58
57
  ) -> Dict[str, Any]:
59
58
  """
60
59
  Returns keyword arguments for API requests.
@@ -0,0 +1,5 @@
1
+ from agno.models.requesty.requesty import Requesty
2
+
3
+ __all__ = [
4
+ "Requesty",
5
+ ]
@@ -0,0 +1,49 @@
1
+ from dataclasses import dataclass, field
2
+ from os import getenv
3
+ from typing import Any, Dict, List, Optional, Type, Union
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from agno.models.openai.like import OpenAILike
8
+ from agno.run.agent import RunOutput
9
+
10
+
11
+ @dataclass
12
+ class Requesty(OpenAILike):
13
+ """
14
+ A class for using models hosted on Requesty.
15
+
16
+ Attributes:
17
+ id (str): The model id. Defaults to "openai/gpt-4.1".
18
+ provider (str): The provider name. Defaults to "Requesty".
19
+ api_key (Optional[str]): The API key.
20
+ base_url (str): The base URL. Defaults to "https://router.requesty.ai/v1".
21
+ max_tokens (int): The maximum number of tokens. Defaults to 1024.
22
+ """
23
+
24
+ id: str = "openai/gpt-4.1"
25
+ name: str = "Requesty"
26
+ provider: str = "Requesty"
27
+
28
+ api_key: Optional[str] = field(default_factory=lambda: getenv("REQUESTY_API_KEY"))
29
+ base_url: str = "https://router.requesty.ai/v1"
30
+ max_tokens: int = 1024
31
+
32
+ def get_request_params(
33
+ self,
34
+ response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
35
+ tools: Optional[List[Dict[str, Any]]] = None,
36
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
37
+ run_response: Optional[RunOutput] = None,
38
+ ) -> Dict[str, Any]:
39
+ params = super().get_request_params(response_format=response_format, tools=tools, tool_choice=tool_choice)
40
+
41
+ if "extra_body" not in params:
42
+ params["extra_body"] = {}
43
+ params["extra_body"]["requesty"] = {}
44
+ if run_response and run_response.user_id:
45
+ params["extra_body"]["requesty"]["user_id"] = run_response.user_id
46
+ if run_response and run_response.session_id:
47
+ params["extra_body"]["requesty"]["trace_id"] = run_response.session_id
48
+
49
+ return params
agno/models/vllm/vllm.py CHANGED
@@ -57,6 +57,7 @@ class VLLM(OpenAILike):
57
57
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
58
58
  tools: Optional[List[Dict[str, Any]]] = None,
59
59
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
60
+ **kwargs: Any,
60
61
  ) -> Dict[str, Any]:
61
62
  request_kwargs = super().get_request_params(
62
63
  response_format=response_format, tools=tools, tool_choice=tool_choice
agno/models/xai/xai.py CHANGED
@@ -44,6 +44,7 @@ class xAI(OpenAILike):
44
44
  response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
45
45
  tools: Optional[List[Dict[str, Any]]] = None,
46
46
  tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
47
+ **kwargs: Any,
47
48
  ) -> Dict[str, Any]:
48
49
  """
49
50
  Returns keyword arguments for API requests, including search parameters.
agno/os/app.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from contextlib import asynccontextmanager
2
2
  from functools import partial
3
3
  from os import getenv
4
- from typing import Any, Dict, List, Optional, Set, Union
4
+ from typing import Any, Dict, List, Literal, Optional, Union
5
5
  from uuid import uuid4
6
6
 
7
7
  from fastapi import APIRouter, FastAPI, HTTPException
@@ -40,6 +40,8 @@ from agno.os.settings import AgnoAPISettings
40
40
  from agno.os.utils import (
41
41
  collect_mcp_tools_from_team,
42
42
  collect_mcp_tools_from_workflow,
43
+ find_conflicting_routes,
44
+ load_yaml_config,
43
45
  update_cors_middleware,
44
46
  )
45
47
  from agno.team.team import Team
@@ -49,7 +51,7 @@ from agno.workflow.workflow import Workflow
49
51
 
50
52
 
51
53
  @asynccontextmanager
52
- async def mcp_lifespan(app, mcp_tools):
54
+ async def mcp_lifespan(_, mcp_tools):
53
55
  """Manage MCP connection lifecycle inside a FastAPI app"""
54
56
  # Startup logic: connect to all contextual MCP servers
55
57
  for tool in mcp_tools:
@@ -65,7 +67,8 @@ async def mcp_lifespan(app, mcp_tools):
65
67
  class AgentOS:
66
68
  def __init__(
67
69
  self,
68
- os_id: Optional[str] = None,
70
+ id: Optional[str] = None,
71
+ os_id: Optional[str] = None, # Deprecated
69
72
  name: Optional[str] = None,
70
73
  description: Optional[str] = None,
71
74
  version: Optional[str] = None,
@@ -75,16 +78,19 @@ class AgentOS:
75
78
  interfaces: Optional[List[BaseInterface]] = None,
76
79
  config: Optional[Union[str, AgentOSConfig]] = None,
77
80
  settings: Optional[AgnoAPISettings] = None,
78
- fastapi_app: Optional[FastAPI] = None,
79
81
  lifespan: Optional[Any] = None,
80
- enable_mcp: bool = False,
81
- replace_routes: bool = True,
82
+ enable_mcp: bool = False, # Deprecated
83
+ enable_mcp_server: bool = False,
84
+ fastapi_app: Optional[FastAPI] = None, # Deprecated
85
+ base_app: Optional[FastAPI] = None,
86
+ replace_routes: Optional[bool] = None, # Deprecated
87
+ on_route_conflict: Literal["preserve_agentos", "preserve_base_app", "error"] = "preserve_agentos",
82
88
  telemetry: bool = True,
83
89
  ):
84
90
  """Initialize AgentOS.
85
91
 
86
92
  Args:
87
- os_id: Unique identifier for this AgentOS instance
93
+ id: Unique identifier for this AgentOS instance
88
94
  name: Name of the AgentOS instance
89
95
  description: Description of the AgentOS instance
90
96
  version: Version of the AgentOS instance
@@ -94,18 +100,16 @@ class AgentOS:
94
100
  interfaces: List of interfaces to include in the OS
95
101
  config: Configuration file path or AgentOSConfig instance
96
102
  settings: API settings for the OS
97
- fastapi_app: Optional custom FastAPI app to use instead of creating a new one
98
103
  lifespan: Optional lifespan context manager for the FastAPI app
99
- enable_mcp: Whether to enable MCP (Model Context Protocol)
100
- replace_routes: If False and using a custom fastapi_app, skip AgentOS routes that
101
- conflict with existing routes, preferring the user's custom routes.
102
- If True (default), AgentOS routes will override conflicting custom routes.
104
+ enable_mcp_server: Whether to enable MCP (Model Context Protocol)
105
+ base_app: Optional base FastAPI app to use for the AgentOS. All routes and middleware will be added to this app.
106
+ on_route_conflict: What to do when a route conflict is detected in case a custom base_app is provided.
103
107
  telemetry: Whether to enable telemetry
104
108
  """
105
109
  if not agents and not workflows and not teams:
106
110
  raise ValueError("Either agents, teams or workflows must be provided.")
107
111
 
108
- self.config = self._load_yaml_config(config) if isinstance(config, str) else config
112
+ self.config = load_yaml_config(config) if isinstance(config, str) else config
109
113
 
110
114
  self.agents: Optional[List[Agent]] = agents
111
115
  self.workflows: Optional[List[Workflow]] = workflows
@@ -115,27 +119,42 @@ class AgentOS:
115
119
  self.settings: AgnoAPISettings = settings or AgnoAPISettings()
116
120
 
117
121
  self._app_set = False
118
- self.fastapi_app: Optional[FastAPI] = None
119
- if fastapi_app:
120
- self.fastapi_app = fastapi_app
122
+
123
+ if base_app:
124
+ self.base_app: Optional[FastAPI] = base_app
125
+ self._app_set = True
126
+ self.on_route_conflict = on_route_conflict
127
+ elif fastapi_app:
128
+ self.base_app = fastapi_app
121
129
  self._app_set = True
130
+ if replace_routes is not None:
131
+ self.on_route_conflict = "preserve_agentos" if replace_routes else "preserve_base_app"
132
+ else:
133
+ self.on_route_conflict = on_route_conflict
134
+ else:
135
+ self.base_app = None
136
+ self._app_set = False
137
+ self.on_route_conflict = on_route_conflict
122
138
 
123
139
  self.interfaces = interfaces or []
124
140
 
125
- self.os_id = os_id
126
141
  self.name = name
142
+
143
+ self.id = id or os_id
144
+ if not self.id:
145
+ self.id = generate_id(self.name) if self.name else str(uuid4())
146
+
127
147
  self.version = version
128
148
  self.description = description
129
149
 
130
- self.replace_routes = replace_routes
131
-
132
150
  self.telemetry = telemetry
133
151
 
134
- self.enable_mcp = enable_mcp
152
+ self.enable_mcp_server = enable_mcp or enable_mcp_server
135
153
  self.lifespan = lifespan
136
154
 
137
155
  # List of all MCP tools used inside the AgentOS
138
156
  self.mcp_tools: List[Any] = []
157
+ self._mcp_app: Optional[Any] = None
139
158
 
140
159
  if self.agents:
141
160
  for agent in self.agents:
@@ -177,13 +196,10 @@ class AgentOS:
177
196
  if not workflow.id:
178
197
  workflow.id = generate_id_from_name(workflow.name)
179
198
 
180
- if not self.os_id:
181
- self.os_id = generate_id(self.name) if self.name else str(uuid4())
182
-
183
199
  if self.telemetry:
184
200
  from agno.api.os import OSLaunch, log_os_telemetry
185
201
 
186
- log_os_telemetry(launch=OSLaunch(os_id=self.os_id, data=self._get_telemetry_data()))
202
+ log_os_telemetry(launch=OSLaunch(os_id=self.id, data=self._get_telemetry_data()))
187
203
 
188
204
  def _make_app(self, lifespan: Optional[Any] = None) -> FastAPI:
189
205
  # Adjust the FastAPI app lifespan to handle MCP connections if relevant
@@ -215,39 +231,41 @@ class AgentOS:
215
231
  )
216
232
 
217
233
  def get_app(self) -> FastAPI:
218
- if not self.fastapi_app:
219
- if self.enable_mcp:
234
+ if self.base_app:
235
+ fastapi_app = self.base_app
236
+ else:
237
+ if self.enable_mcp_server:
220
238
  from contextlib import asynccontextmanager
221
239
 
222
240
  from agno.os.mcp import get_mcp_server
223
241
 
224
- self.mcp_app = get_mcp_server(self)
242
+ self._mcp_app = get_mcp_server(self)
225
243
 
226
- final_lifespan = self.mcp_app.lifespan
244
+ final_lifespan = self._mcp_app.lifespan # type: ignore
227
245
  if self.lifespan is not None:
228
246
  # Combine both lifespans
229
247
  @asynccontextmanager
230
248
  async def combined_lifespan(app: FastAPI):
231
249
  # Run both lifespans
232
250
  async with self.lifespan(app): # type: ignore
233
- async with self.mcp_app.lifespan(app): # type: ignore
251
+ async with self._mcp_app.lifespan(app): # type: ignore
234
252
  yield
235
253
 
236
254
  final_lifespan = combined_lifespan # type: ignore
237
255
 
238
- self.fastapi_app = self._make_app(lifespan=final_lifespan)
256
+ fastapi_app = self._make_app(lifespan=final_lifespan)
239
257
  else:
240
- self.fastapi_app = self._make_app(lifespan=self.lifespan)
258
+ fastapi_app = self._make_app(lifespan=self.lifespan)
241
259
 
242
- # Add routes with conflict detection
243
- self._add_router(get_base_router(self, settings=self.settings))
244
- self._add_router(get_websocket_router(self, settings=self.settings))
245
- self._add_router(get_health_router())
246
- self._add_router(get_home_router(self))
260
+ # Add routes
261
+ self._add_router(fastapi_app, get_base_router(self, settings=self.settings))
262
+ self._add_router(fastapi_app, get_websocket_router(self, settings=self.settings))
263
+ self._add_router(fastapi_app, get_health_router())
264
+ self._add_router(fastapi_app, get_home_router(self))
247
265
 
248
266
  for interface in self.interfaces:
249
267
  interface_router = interface.get_router()
250
- self._add_router(interface_router)
268
+ self._add_router(fastapi_app, interface_router)
251
269
 
252
270
  self._auto_discover_databases()
253
271
  self._auto_discover_knowledge_instances()
@@ -261,17 +279,19 @@ class AgentOS:
261
279
  ]
262
280
 
263
281
  for router in routers:
264
- self._add_router(router)
282
+ self._add_router(fastapi_app, router)
265
283
 
266
284
  # Mount MCP if needed
267
- if self.enable_mcp and self.mcp_app:
268
- self.fastapi_app.mount("/", self.mcp_app)
285
+ if self.enable_mcp_server and self._mcp_app:
286
+ fastapi_app.mount("/", self._mcp_app)
287
+ else:
288
+ # Add the home router
289
+ self._add_router(fastapi_app, get_home_router(self))
269
290
 
270
- # Add middleware (only if app is not set)
271
291
  if not self._app_set:
272
292
 
273
- @self.fastapi_app.exception_handler(HTTPException)
274
- async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
293
+ @fastapi_app.exception_handler(HTTPException)
294
+ async def http_exception_handler(_, exc: HTTPException) -> JSONResponse:
275
295
  return JSONResponse(
276
296
  status_code=exc.status_code,
277
297
  content={"detail": str(exc.detail)},
@@ -286,12 +306,12 @@ class AgentOS:
286
306
  content={"detail": str(e)},
287
307
  )
288
308
 
289
- self.fastapi_app.middleware("http")(general_exception_handler)
309
+ fastapi_app.middleware("http")(general_exception_handler)
290
310
 
291
311
  # Update CORS middleware
292
- update_cors_middleware(self.fastapi_app, self.settings.cors_origin_list) # type: ignore
312
+ update_cors_middleware(fastapi_app, self.settings.cors_origin_list) # type: ignore
293
313
 
294
- return self.fastapi_app
314
+ return fastapi_app
295
315
 
296
316
  def get_routes(self) -> List[Any]:
297
317
  """Retrieve all routes from the FastAPI app.
@@ -303,55 +323,37 @@ class AgentOS:
303
323
 
304
324
  return app.routes
305
325
 
306
- def _get_existing_route_paths(self) -> Dict[str, List[str]]:
307
- """Get all existing route paths and methods from the FastAPI app.
308
-
309
- Returns:
310
- Dict[str, List[str]]: Dictionary mapping paths to list of HTTP methods
311
- """
312
- if not self.fastapi_app:
313
- return {}
314
-
315
- existing_paths: Dict[str, Any] = {}
316
- for route in self.fastapi_app.routes:
317
- if isinstance(route, APIRoute):
318
- path = route.path
319
- methods = list(route.methods) if route.methods else []
320
- if path in existing_paths:
321
- existing_paths[path].extend(methods)
322
- else:
323
- existing_paths[path] = methods
324
- return existing_paths
325
-
326
- def _add_router(self, router: APIRouter) -> None:
326
+ def _add_router(self, fastapi_app: FastAPI, router: APIRouter) -> None:
327
327
  """Add a router to the FastAPI app, avoiding route conflicts.
328
328
 
329
329
  Args:
330
330
  router: The APIRouter to add
331
331
  """
332
- if not self.fastapi_app:
333
- return
334
332
 
335
- # Get existing routes
336
- existing_paths = self._get_existing_route_paths()
333
+ conflicts = find_conflicting_routes(fastapi_app, router)
334
+ conflicting_routes = [conflict["route"] for conflict in conflicts]
337
335
 
338
- # Check for conflicts
339
- conflicts = []
340
- conflicting_routes = []
336
+ if conflicts and self._app_set:
337
+ if self.on_route_conflict == "preserve_base_app":
338
+ # Skip conflicting AgentOS routes, prefer user's existing routes
339
+ for conflict in conflicts:
340
+ methods_str = ", ".join(conflict["methods"]) # type: ignore
341
+ logger.debug(
342
+ f"Skipping conflicting AgentOS route: {methods_str} {conflict['path']} - "
343
+ f"Using existing custom route instead"
344
+ )
341
345
 
342
- for route in router.routes:
343
- if isinstance(route, APIRoute):
344
- full_path = route.path
345
- route_methods = list(route.methods) if route.methods else []
346
+ # Create a new router without the conflicting routes
347
+ filtered_router = APIRouter()
348
+ for route in router.routes:
349
+ if route not in conflicting_routes:
350
+ filtered_router.routes.append(route)
346
351
 
347
- if full_path in existing_paths:
348
- conflicting_methods: Set[str] = set(route_methods) & set(existing_paths[full_path])
349
- if conflicting_methods:
350
- conflicts.append({"path": full_path, "methods": list(conflicting_methods), "route": route})
351
- conflicting_routes.append(route)
352
+ # Use the filtered router if it has any routes left
353
+ if filtered_router.routes:
354
+ fastapi_app.include_router(filtered_router)
352
355
 
353
- if conflicts and self._app_set:
354
- if self.replace_routes:
356
+ elif self.on_route_conflict == "preserve_agentos":
355
357
  # Log warnings but still add all routes (AgentOS routes will override)
356
358
  for conflict in conflicts:
357
359
  methods_str = ", ".join(conflict["methods"]) # type: ignore
@@ -361,35 +363,21 @@ class AgentOS:
361
363
  )
362
364
 
363
365
  # Remove conflicting routes
364
- for route in self.fastapi_app.routes:
366
+ for route in fastapi_app.routes:
365
367
  for conflict in conflicts:
366
368
  if isinstance(route, APIRoute):
367
369
  if route.path == conflict["path"] and list(route.methods) == list(conflict["methods"]): # type: ignore
368
- self.fastapi_app.routes.pop(self.fastapi_app.routes.index(route))
370
+ fastapi_app.routes.pop(fastapi_app.routes.index(route))
369
371
 
370
- self.fastapi_app.include_router(router)
372
+ fastapi_app.include_router(router)
371
373
 
372
- else:
373
- # Skip conflicting AgentOS routes, prefer user's existing routes
374
- for conflict in conflicts:
375
- methods_str = ", ".join(conflict["methods"]) # type: ignore
376
- logger.debug(
377
- f"Skipping conflicting AgentOS route: {methods_str} {conflict['path']} - "
378
- f"Using existing custom route instead"
379
- )
374
+ elif self.on_route_conflict == "error":
375
+ conflicting_paths = [conflict["path"] for conflict in conflicts]
376
+ raise ValueError(f"Route conflict detected: {conflicting_paths}")
380
377
 
381
- # Create a new router without the conflicting routes
382
- filtered_router = APIRouter()
383
- for route in router.routes:
384
- if route not in conflicting_routes:
385
- filtered_router.routes.append(route)
386
-
387
- # Use the filtered router if it has any routes left
388
- if filtered_router.routes:
389
- self.fastapi_app.include_router(filtered_router)
390
378
  else:
391
379
  # No conflicts, add router normally
392
- self.fastapi_app.include_router(router)
380
+ fastapi_app.include_router(router)
393
381
 
394
382
  def _get_telemetry_data(self) -> Dict[str, Any]:
395
383
  """Get the telemetry data for the OS"""
@@ -400,21 +388,6 @@ class AgentOS:
400
388
  "interfaces": [interface.type for interface in self.interfaces] if self.interfaces else None,
401
389
  }
402
390
 
403
- def _load_yaml_config(self, config_file_path: str) -> AgentOSConfig:
404
- """Load a YAML config file and return the configuration as an AgentOSConfig instance."""
405
- from pathlib import Path
406
-
407
- import yaml
408
-
409
- # Validate that the path points to a YAML file
410
- path = Path(config_file_path)
411
- if path.suffix.lower() not in [".yaml", ".yml"]:
412
- raise ValueError(f"Config file must have a .yaml or .yml extension, got: {config_file_path}")
413
-
414
- # Load the YAML file
415
- with open(config_file_path, "r") as f:
416
- return AgentOSConfig.model_validate(yaml.safe_load(f))
417
-
418
391
  def _auto_discover_databases(self) -> None:
419
392
  """Auto-discover the databases used by all contextual agents, teams and workflows."""
420
393
  from agno.db.base import BaseDb
@@ -641,11 +614,10 @@ class AgentOS:
641
614
  from rich.align import Align
642
615
  from rich.console import Console, Group
643
616
 
644
- panel_group = []
645
- panel_group.append(Align.center(f"[bold cyan]{public_endpoint}[/bold cyan]"))
646
- panel_group.append(
647
- Align.center(f"\n\n[bold dark_orange]OS running on:[/bold dark_orange] http://{host}:{port}")
648
- )
617
+ panel_group = [
618
+ Align.center(f"[bold cyan]{public_endpoint}[/bold cyan]"),
619
+ Align.center(f"\n\n[bold dark_orange]OS running on:[/bold dark_orange] http://{host}:{port}"),
620
+ ]
649
621
  if bool(self.settings.os_security_key):
650
622
  panel_group.append(Align.center("\n\n[bold chartreuse3]:lock: Security Enabled[/bold chartreuse3]"))
651
623
 
@@ -0,0 +1 @@
1
+