agno 2.2.10__py3-none-any.whl → 2.2.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +75 -48
- agno/db/dynamo/utils.py +1 -1
- agno/db/firestore/utils.py +1 -1
- agno/db/gcs_json/utils.py +1 -1
- agno/db/in_memory/utils.py +1 -1
- agno/db/json/utils.py +1 -1
- agno/db/mongo/utils.py +3 -3
- agno/db/mysql/mysql.py +1 -1
- agno/db/mysql/utils.py +1 -1
- agno/db/postgres/utils.py +1 -1
- agno/db/redis/utils.py +1 -1
- agno/db/singlestore/singlestore.py +1 -1
- agno/db/singlestore/utils.py +1 -1
- agno/db/sqlite/async_sqlite.py +1 -1
- agno/db/sqlite/sqlite.py +1 -1
- agno/db/sqlite/utils.py +1 -1
- agno/filters.py +354 -0
- agno/knowledge/chunking/agentic.py +8 -9
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/knowledge.py +43 -22
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/utils.py +20 -0
- agno/models/anthropic/claude.py +45 -9
- agno/models/base.py +4 -0
- agno/os/app.py +23 -7
- agno/os/interfaces/slack/router.py +53 -33
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/router.py +25 -1
- agno/os/routers/health.py +5 -3
- agno/os/routers/knowledge/knowledge.py +43 -17
- agno/os/routers/knowledge/schemas.py +4 -3
- agno/run/agent.py +11 -1
- agno/run/base.py +3 -2
- agno/session/agent.py +10 -5
- agno/team/team.py +57 -18
- agno/tools/file_generation.py +4 -4
- agno/tools/gmail.py +179 -0
- agno/tools/parallel.py +314 -0
- agno/utils/agent.py +22 -17
- agno/utils/gemini.py +15 -5
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/models/claude.py +2 -1
- agno/utils/print_response/agent.py +5 -4
- agno/utils/print_response/team.py +5 -4
- agno/vectordb/base.py +2 -4
- agno/vectordb/cassandra/cassandra.py +12 -5
- agno/vectordb/chroma/chromadb.py +10 -4
- agno/vectordb/clickhouse/clickhousedb.py +12 -4
- agno/vectordb/couchbase/couchbase.py +12 -3
- agno/vectordb/lancedb/lance_db.py +69 -144
- agno/vectordb/langchaindb/langchaindb.py +13 -4
- agno/vectordb/lightrag/lightrag.py +8 -3
- agno/vectordb/llamaindex/llamaindexdb.py +10 -4
- agno/vectordb/milvus/milvus.py +16 -5
- agno/vectordb/mongodb/mongodb.py +14 -3
- agno/vectordb/pgvector/pgvector.py +73 -15
- agno/vectordb/pineconedb/pineconedb.py +6 -2
- agno/vectordb/qdrant/qdrant.py +25 -13
- agno/vectordb/redis/redisdb.py +37 -30
- agno/vectordb/singlestore/singlestore.py +9 -4
- agno/vectordb/surrealdb/surrealdb.py +13 -3
- agno/vectordb/upstashdb/upstashdb.py +8 -5
- agno/vectordb/weaviate/weaviate.py +29 -12
- agno/workflow/step.py +3 -2
- agno/workflow/types.py +20 -1
- agno/workflow/workflow.py +103 -14
- {agno-2.2.10.dist-info → agno-2.2.12.dist-info}/METADATA +4 -1
- {agno-2.2.10.dist-info → agno-2.2.12.dist-info}/RECORD +73 -71
- {agno-2.2.10.dist-info → agno-2.2.12.dist-info}/WHEEL +0 -0
- {agno-2.2.10.dist-info → agno-2.2.12.dist-info}/licenses/LICENSE +0 -0
- {agno-2.2.10.dist-info → agno-2.2.12.dist-info}/top_level.txt +0 -0
agno/knowledge/utils.py
CHANGED
|
@@ -129,12 +129,32 @@ def get_chunker_info(chunker_key: str) -> Dict:
|
|
|
129
129
|
class_name = chunker_class.__name__
|
|
130
130
|
docstring = chunker_class.__doc__ or f"{class_name} chunking strategy"
|
|
131
131
|
|
|
132
|
+
# Check class __init__ signature for chunk_size and overlap parameters
|
|
133
|
+
metadata = {}
|
|
134
|
+
import inspect
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
sig = inspect.signature(chunker_class.__init__)
|
|
138
|
+
param_names = set(sig.parameters.keys())
|
|
139
|
+
|
|
140
|
+
# If class has chunk_size or max_chunk_size parameter, set default chunk_size
|
|
141
|
+
if "chunk_size" in param_names or "max_chunk_size" in param_names:
|
|
142
|
+
metadata["chunk_size"] = 5000
|
|
143
|
+
|
|
144
|
+
# If class has overlap parameter, set default overlap
|
|
145
|
+
if "overlap" in param_names:
|
|
146
|
+
metadata["chunk_overlap"] = 0
|
|
147
|
+
except Exception:
|
|
148
|
+
# If we can't inspect, skip metadata
|
|
149
|
+
pass
|
|
150
|
+
|
|
132
151
|
return {
|
|
133
152
|
"key": chunker_key,
|
|
134
153
|
"class_name": class_name,
|
|
135
154
|
"name": chunker_key,
|
|
136
155
|
"description": docstring.strip(),
|
|
137
156
|
"strategy_type": strategy_type.value,
|
|
157
|
+
"metadata": metadata,
|
|
138
158
|
}
|
|
139
159
|
except ValueError:
|
|
140
160
|
raise ValueError(f"Unknown chunker key: {chunker_key}")
|
agno/models/anthropic/claude.py
CHANGED
|
@@ -45,6 +45,8 @@ except ImportError as e:
|
|
|
45
45
|
# Import Beta types
|
|
46
46
|
try:
|
|
47
47
|
from anthropic.types.beta import BetaRawContentBlockDeltaEvent, BetaTextDelta
|
|
48
|
+
from anthropic.types.beta.beta_message import BetaMessage
|
|
49
|
+
from anthropic.types.beta.beta_usage import BetaUsage
|
|
48
50
|
except ImportError as e:
|
|
49
51
|
raise ImportError(
|
|
50
52
|
"`anthropic` not installed or missing beta components. Please install with `pip install anthropic`"
|
|
@@ -84,13 +86,14 @@ class Claude(Model):
|
|
|
84
86
|
cache_system_prompt: Optional[bool] = False
|
|
85
87
|
extended_cache_time: Optional[bool] = False
|
|
86
88
|
request_params: Optional[Dict[str, Any]] = None
|
|
87
|
-
mcp_servers: Optional[List[MCPServerConfiguration]] = None
|
|
88
89
|
|
|
89
|
-
#
|
|
90
|
+
# Anthropic beta and experimental features
|
|
91
|
+
betas: Optional[List[str]] = None # Enables specific experimental or newly released features.
|
|
92
|
+
context_management: Optional[Dict[str, Any]] = None
|
|
93
|
+
mcp_servers: Optional[List[MCPServerConfiguration]] = None
|
|
90
94
|
skills: Optional[List[Dict[str, str]]] = (
|
|
91
95
|
None # e.g., [{"type": "anthropic", "skill_id": "pptx", "version": "latest"}]
|
|
92
96
|
)
|
|
93
|
-
betas: Optional[List[str]] = None # Enables specific experimental or newly released features.
|
|
94
97
|
|
|
95
98
|
# Client parameters
|
|
96
99
|
api_key: Optional[str] = None
|
|
@@ -129,6 +132,15 @@ class Claude(Model):
|
|
|
129
132
|
client_params["default_headers"] = self.default_headers
|
|
130
133
|
return client_params
|
|
131
134
|
|
|
135
|
+
def _has_beta_features(self) -> bool:
|
|
136
|
+
"""Check if the model has any Anthropic beta features enabled."""
|
|
137
|
+
return (
|
|
138
|
+
self.mcp_servers is not None
|
|
139
|
+
or self.context_management is not None
|
|
140
|
+
or self.skills is not None
|
|
141
|
+
or self.betas is not None
|
|
142
|
+
)
|
|
143
|
+
|
|
132
144
|
def get_client(self) -> AnthropicClient:
|
|
133
145
|
"""
|
|
134
146
|
Returns an instance of the Anthropic client.
|
|
@@ -208,6 +220,10 @@ class Claude(Model):
|
|
|
208
220
|
_request_params["top_p"] = self.top_p
|
|
209
221
|
if self.top_k:
|
|
210
222
|
_request_params["top_k"] = self.top_k
|
|
223
|
+
if self.betas:
|
|
224
|
+
_request_params["betas"] = self.betas
|
|
225
|
+
if self.context_management:
|
|
226
|
+
_request_params["context_management"] = self.context_management
|
|
211
227
|
if self.mcp_servers:
|
|
212
228
|
_request_params["mcp_servers"] = [
|
|
213
229
|
{k: v for k, v in asdict(server).items() if v is not None} for server in self.mcp_servers
|
|
@@ -279,7 +295,7 @@ class Claude(Model):
|
|
|
279
295
|
chat_messages, system_message = format_messages(messages)
|
|
280
296
|
request_kwargs = self._prepare_request_kwargs(system_message, tools)
|
|
281
297
|
|
|
282
|
-
if self.
|
|
298
|
+
if self._has_beta_features():
|
|
283
299
|
assistant_message.metrics.start_timer()
|
|
284
300
|
provider_response = self.get_client().beta.messages.create(
|
|
285
301
|
model=self.id,
|
|
@@ -346,7 +362,8 @@ class Claude(Model):
|
|
|
346
362
|
if run_response and run_response.metrics:
|
|
347
363
|
run_response.metrics.set_time_to_first_token()
|
|
348
364
|
|
|
349
|
-
|
|
365
|
+
# Beta features
|
|
366
|
+
if self._has_beta_features():
|
|
350
367
|
assistant_message.metrics.start_timer()
|
|
351
368
|
with self.get_client().beta.messages.stream(
|
|
352
369
|
model=self.id,
|
|
@@ -401,7 +418,8 @@ class Claude(Model):
|
|
|
401
418
|
chat_messages, system_message = format_messages(messages)
|
|
402
419
|
request_kwargs = self._prepare_request_kwargs(system_message, tools)
|
|
403
420
|
|
|
404
|
-
|
|
421
|
+
# Beta features
|
|
422
|
+
if self._has_beta_features():
|
|
405
423
|
assistant_message.metrics.start_timer()
|
|
406
424
|
provider_response = await self.get_async_client().beta.messages.create(
|
|
407
425
|
model=self.id,
|
|
@@ -465,7 +483,7 @@ class Claude(Model):
|
|
|
465
483
|
chat_messages, system_message = format_messages(messages)
|
|
466
484
|
request_kwargs = self._prepare_request_kwargs(system_message, tools)
|
|
467
485
|
|
|
468
|
-
if self.
|
|
486
|
+
if self._has_beta_features():
|
|
469
487
|
assistant_message.metrics.start_timer()
|
|
470
488
|
async with self.get_async_client().beta.messages.stream(
|
|
471
489
|
model=self.id,
|
|
@@ -507,7 +525,7 @@ class Claude(Model):
|
|
|
507
525
|
return tool_call_prompt
|
|
508
526
|
return None
|
|
509
527
|
|
|
510
|
-
def _parse_provider_response(self, response: AnthropicMessage, **kwargs) -> ModelResponse:
|
|
528
|
+
def _parse_provider_response(self, response: Union[AnthropicMessage, BetaMessage], **kwargs) -> ModelResponse:
|
|
511
529
|
"""
|
|
512
530
|
Parse the Claude response into a ModelResponse.
|
|
513
531
|
|
|
@@ -582,6 +600,14 @@ class Claude(Model):
|
|
|
582
600
|
if response.usage is not None:
|
|
583
601
|
model_response.response_usage = self._get_metrics(response.usage)
|
|
584
602
|
|
|
603
|
+
# Capture context management information if present
|
|
604
|
+
if self.context_management is not None and hasattr(response, "context_management"):
|
|
605
|
+
if response.context_management is not None: # type: ignore
|
|
606
|
+
model_response.provider_data = model_response.provider_data or {}
|
|
607
|
+
if hasattr(response.context_management, "model_dump"):
|
|
608
|
+
model_response.provider_data["context_management"] = response.context_management.model_dump() # type: ignore
|
|
609
|
+
else:
|
|
610
|
+
model_response.provider_data["context_management"] = response.context_management # type: ignore
|
|
585
611
|
# Extract file IDs if skills are enabled
|
|
586
612
|
if self.skills and response.content:
|
|
587
613
|
file_ids: List[str] = []
|
|
@@ -676,6 +702,16 @@ class Claude(Model):
|
|
|
676
702
|
DocumentCitation(document_title=citation.document_title, cited_text=citation.cited_text)
|
|
677
703
|
)
|
|
678
704
|
|
|
705
|
+
# Capture context management information if present
|
|
706
|
+
if self.context_management is not None and hasattr(response.message, "context_management"): # type: ignore
|
|
707
|
+
context_mgmt = response.message.context_management # type: ignore
|
|
708
|
+
if context_mgmt is not None:
|
|
709
|
+
model_response.provider_data = model_response.provider_data or {}
|
|
710
|
+
if hasattr(context_mgmt, "model_dump"):
|
|
711
|
+
model_response.provider_data["context_management"] = context_mgmt.model_dump()
|
|
712
|
+
else:
|
|
713
|
+
model_response.provider_data["context_management"] = context_mgmt
|
|
714
|
+
|
|
679
715
|
if hasattr(response, "message") and hasattr(response.message, "usage") and response.message.usage is not None: # type: ignore
|
|
680
716
|
model_response.response_usage = self._get_metrics(response.message.usage) # type: ignore
|
|
681
717
|
|
|
@@ -692,7 +728,7 @@ class Claude(Model):
|
|
|
692
728
|
|
|
693
729
|
return model_response
|
|
694
730
|
|
|
695
|
-
def _get_metrics(self, response_usage: Union[Usage, MessageDeltaUsage]) -> Metrics:
|
|
731
|
+
def _get_metrics(self, response_usage: Union[Usage, MessageDeltaUsage, BetaUsage]) -> Metrics:
|
|
696
732
|
"""
|
|
697
733
|
Parse the given Anthropic-specific usage into an Agno Metrics object.
|
|
698
734
|
|
agno/models/base.py
CHANGED
|
@@ -692,6 +692,8 @@ class Model(ABC):
|
|
|
692
692
|
if model_response.extra is None:
|
|
693
693
|
model_response.extra = {}
|
|
694
694
|
model_response.extra.update(provider_response.extra)
|
|
695
|
+
if provider_response.provider_data is not None:
|
|
696
|
+
model_response.provider_data = provider_response.provider_data
|
|
695
697
|
|
|
696
698
|
async def _aprocess_model_response(
|
|
697
699
|
self,
|
|
@@ -745,6 +747,8 @@ class Model(ABC):
|
|
|
745
747
|
if model_response.extra is None:
|
|
746
748
|
model_response.extra = {}
|
|
747
749
|
model_response.extra.update(provider_response.extra)
|
|
750
|
+
if provider_response.provider_data is not None:
|
|
751
|
+
model_response.provider_data = provider_response.provider_data
|
|
748
752
|
|
|
749
753
|
def _populate_assistant_message(
|
|
750
754
|
self,
|
agno/os/app.py
CHANGED
|
@@ -232,6 +232,12 @@ class AgentOS:
|
|
|
232
232
|
self._initialize_workflows()
|
|
233
233
|
self._auto_discover_databases()
|
|
234
234
|
self._auto_discover_knowledge_instances()
|
|
235
|
+
|
|
236
|
+
if self.enable_mcp_server:
|
|
237
|
+
from agno.os.mcp import get_mcp_server
|
|
238
|
+
|
|
239
|
+
self._mcp_app = get_mcp_server(self)
|
|
240
|
+
|
|
235
241
|
self._reprovision_routers(app=app)
|
|
236
242
|
|
|
237
243
|
def _reprovision_routers(self, app: FastAPI) -> None:
|
|
@@ -248,7 +254,9 @@ class AgentOS:
|
|
|
248
254
|
app.router.routes = [
|
|
249
255
|
route
|
|
250
256
|
for route in app.router.routes
|
|
251
|
-
if hasattr(route, "path")
|
|
257
|
+
if hasattr(route, "path")
|
|
258
|
+
and route.path in ["/docs", "/redoc", "/openapi.json", "/docs/oauth2-redirect"]
|
|
259
|
+
or route.path.startswith("/mcp") # type: ignore
|
|
252
260
|
]
|
|
253
261
|
|
|
254
262
|
# Add the built-in routes
|
|
@@ -258,13 +266,17 @@ class AgentOS:
|
|
|
258
266
|
for router in updated_routers:
|
|
259
267
|
self._add_router(app, router)
|
|
260
268
|
|
|
269
|
+
# Mount MCP if needed
|
|
270
|
+
if self.enable_mcp_server and self._mcp_app:
|
|
271
|
+
app.mount("/", self._mcp_app)
|
|
272
|
+
|
|
261
273
|
def _add_built_in_routes(self, app: FastAPI) -> None:
|
|
262
274
|
"""Add all AgentOSbuilt-in routes to the given app."""
|
|
263
275
|
# Add the home router if MCP server is not enabled
|
|
264
276
|
if not self.enable_mcp_server:
|
|
265
277
|
self._add_router(app, get_home_router(self))
|
|
266
278
|
|
|
267
|
-
self._add_router(app, get_health_router())
|
|
279
|
+
self._add_router(app, get_health_router(health_endpoint="/health"))
|
|
268
280
|
self._add_router(app, get_base_router(self, settings=self.settings))
|
|
269
281
|
self._add_router(app, get_websocket_router(self, settings=self.settings))
|
|
270
282
|
|
|
@@ -381,20 +393,24 @@ class AgentOS:
|
|
|
381
393
|
# Collect all lifespans that need to be combined
|
|
382
394
|
lifespans = []
|
|
383
395
|
|
|
396
|
+
# The user provided lifespan
|
|
397
|
+
if self.lifespan:
|
|
398
|
+
# Wrap the user lifespan with agent_os parameter
|
|
399
|
+
wrapped_lifespan = self._add_agent_os_to_lifespan_function(self.lifespan)
|
|
400
|
+
lifespans.append(wrapped_lifespan)
|
|
401
|
+
|
|
402
|
+
# The provided app's existing lifespan
|
|
384
403
|
if fastapi_app.router.lifespan_context:
|
|
385
404
|
lifespans.append(fastapi_app.router.lifespan_context)
|
|
386
405
|
|
|
406
|
+
# The MCP tools lifespan
|
|
387
407
|
if self.mcp_tools:
|
|
388
408
|
lifespans.append(partial(mcp_lifespan, mcp_tools=self.mcp_tools))
|
|
389
409
|
|
|
410
|
+
# The /mcp server lifespan
|
|
390
411
|
if self.enable_mcp_server and self._mcp_app:
|
|
391
412
|
lifespans.append(self._mcp_app.lifespan)
|
|
392
413
|
|
|
393
|
-
if self.lifespan:
|
|
394
|
-
# Wrap the user lifespan with agent_os parameter
|
|
395
|
-
wrapped_lifespan = self._add_agent_os_to_lifespan_function(self.lifespan)
|
|
396
|
-
lifespans.append(wrapped_lifespan)
|
|
397
|
-
|
|
398
414
|
# Combine lifespans and set them in the app
|
|
399
415
|
if lifespans:
|
|
400
416
|
fastapi_app.router.lifespan_context = _combine_app_lifespans(lifespans)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
from typing import Optional
|
|
1
|
+
from typing import Optional, Union
|
|
2
2
|
|
|
3
3
|
from fastapi import APIRouter, BackgroundTasks, HTTPException, Request
|
|
4
4
|
from pydantic import BaseModel, Field
|
|
@@ -24,7 +24,11 @@ class SlackChallengeResponse(BaseModel):
|
|
|
24
24
|
|
|
25
25
|
|
|
26
26
|
def attach_routes(
|
|
27
|
-
router: APIRouter,
|
|
27
|
+
router: APIRouter,
|
|
28
|
+
agent: Optional[Agent] = None,
|
|
29
|
+
team: Optional[Team] = None,
|
|
30
|
+
workflow: Optional[Workflow] = None,
|
|
31
|
+
reply_to_mentions_only: bool = True,
|
|
28
32
|
) -> APIRouter:
|
|
29
33
|
# Determine entity type for documentation
|
|
30
34
|
entity_type = "agent" if agent else "team" if team else "workflow" if workflow else "unknown"
|
|
@@ -34,7 +38,7 @@ def attach_routes(
|
|
|
34
38
|
operation_id=f"slack_events_{entity_type}",
|
|
35
39
|
name="slack_events",
|
|
36
40
|
description="Process incoming Slack events",
|
|
37
|
-
response_model=SlackEventResponse,
|
|
41
|
+
response_model=Union[SlackChallengeResponse, SlackEventResponse],
|
|
38
42
|
response_model_exclude_none=True,
|
|
39
43
|
responses={
|
|
40
44
|
200: {"description": "Event processed successfully"},
|
|
@@ -71,36 +75,52 @@ def attach_routes(
|
|
|
71
75
|
return SlackEventResponse(status="ok")
|
|
72
76
|
|
|
73
77
|
async def _process_slack_event(event: dict):
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
78
|
+
event_type = event.get("type")
|
|
79
|
+
|
|
80
|
+
# Only handle app_mention and message events
|
|
81
|
+
if event_type not in ("app_mention", "message"):
|
|
82
|
+
return
|
|
83
|
+
|
|
84
|
+
channel_type = event.get("channel_type", "")
|
|
85
|
+
|
|
86
|
+
# Handle duplicate replies
|
|
87
|
+
if not reply_to_mentions_only and event_type == "app_mention":
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
# If reply_to_mentions_only is True, ignore every message that is not a DM
|
|
91
|
+
if reply_to_mentions_only and event_type == "message" and channel_type != "im":
|
|
92
|
+
return
|
|
93
|
+
|
|
94
|
+
# Extract event data
|
|
95
|
+
user = None
|
|
96
|
+
message_text = event.get("text", "")
|
|
97
|
+
channel_id = event.get("channel", "")
|
|
98
|
+
user = event.get("user")
|
|
99
|
+
if event.get("thread_ts"):
|
|
100
|
+
ts = event.get("thread_ts", "")
|
|
101
|
+
else:
|
|
102
|
+
ts = event.get("ts", "")
|
|
103
|
+
|
|
104
|
+
# Use the timestamp as the session id, so that each thread is a separate session
|
|
105
|
+
session_id = ts
|
|
106
|
+
|
|
107
|
+
if agent:
|
|
108
|
+
response = await agent.arun(message_text, user_id=user, session_id=session_id)
|
|
109
|
+
elif team:
|
|
110
|
+
response = await team.arun(message_text, user_id=user, session_id=session_id) # type: ignore
|
|
111
|
+
elif workflow:
|
|
112
|
+
response = await workflow.arun(message_text, user_id=user, session_id=session_id) # type: ignore
|
|
113
|
+
|
|
114
|
+
if response:
|
|
115
|
+
if hasattr(response, "reasoning_content") and response.reasoning_content:
|
|
116
|
+
_send_slack_message(
|
|
117
|
+
channel=channel_id,
|
|
118
|
+
message=f"Reasoning: \n{response.reasoning_content}",
|
|
119
|
+
thread_ts=ts,
|
|
120
|
+
italics=True,
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
_send_slack_message(channel=channel_id, message=response.content or "", thread_ts=ts)
|
|
104
124
|
|
|
105
125
|
def _send_slack_message(channel: str, thread_ts: str, message: str, italics: bool = False):
|
|
106
126
|
if len(message) <= 40000:
|
|
@@ -21,12 +21,14 @@ class Slack(BaseInterface):
|
|
|
21
21
|
workflow: Optional[Workflow] = None,
|
|
22
22
|
prefix: str = "/slack",
|
|
23
23
|
tags: Optional[List[str]] = None,
|
|
24
|
+
reply_to_mentions_only: bool = True,
|
|
24
25
|
):
|
|
25
26
|
self.agent = agent
|
|
26
27
|
self.team = team
|
|
27
28
|
self.workflow = workflow
|
|
28
29
|
self.prefix = prefix
|
|
29
30
|
self.tags = tags or ["Slack"]
|
|
31
|
+
self.reply_to_mentions_only = reply_to_mentions_only
|
|
30
32
|
|
|
31
33
|
if not (self.agent or self.team or self.workflow):
|
|
32
34
|
raise ValueError("Slack requires an agent, team or workflow")
|
|
@@ -34,6 +36,12 @@ class Slack(BaseInterface):
|
|
|
34
36
|
def get_router(self) -> APIRouter:
|
|
35
37
|
self.router = APIRouter(prefix=self.prefix, tags=self.tags) # type: ignore
|
|
36
38
|
|
|
37
|
-
self.router = attach_routes(
|
|
39
|
+
self.router = attach_routes(
|
|
40
|
+
router=self.router,
|
|
41
|
+
agent=self.agent,
|
|
42
|
+
team=self.team,
|
|
43
|
+
workflow=self.workflow,
|
|
44
|
+
reply_to_mentions_only=self.reply_to_mentions_only,
|
|
45
|
+
)
|
|
38
46
|
|
|
39
47
|
return self.router
|
agno/os/router.py
CHANGED
|
@@ -106,10 +106,34 @@ async def _get_request_kwargs(request: Request, endpoint_func: Callable) -> Dict
|
|
|
106
106
|
try:
|
|
107
107
|
if isinstance(knowledge_filters, str):
|
|
108
108
|
knowledge_filters_dict = json.loads(knowledge_filters) # type: ignore
|
|
109
|
-
|
|
109
|
+
|
|
110
|
+
# Try to deserialize FilterExpr objects
|
|
111
|
+
from agno.filters import from_dict
|
|
112
|
+
|
|
113
|
+
# Check if it's a single FilterExpr dict or a list of FilterExpr dicts
|
|
114
|
+
if isinstance(knowledge_filters_dict, dict) and "op" in knowledge_filters_dict:
|
|
115
|
+
# Single FilterExpr - convert to list format
|
|
116
|
+
kwargs["knowledge_filters"] = [from_dict(knowledge_filters_dict)]
|
|
117
|
+
elif isinstance(knowledge_filters_dict, list):
|
|
118
|
+
# List of FilterExprs or mixed content
|
|
119
|
+
deserialized = []
|
|
120
|
+
for item in knowledge_filters_dict:
|
|
121
|
+
if isinstance(item, dict) and "op" in item:
|
|
122
|
+
deserialized.append(from_dict(item))
|
|
123
|
+
else:
|
|
124
|
+
# Keep non-FilterExpr items as-is
|
|
125
|
+
deserialized.append(item)
|
|
126
|
+
kwargs["knowledge_filters"] = deserialized
|
|
127
|
+
else:
|
|
128
|
+
# Regular dict filter
|
|
129
|
+
kwargs["knowledge_filters"] = knowledge_filters_dict
|
|
110
130
|
except json.JSONDecodeError:
|
|
111
131
|
kwargs.pop("knowledge_filters")
|
|
112
132
|
log_warning(f"Invalid knowledge_filters parameter couldn't be loaded: {knowledge_filters}")
|
|
133
|
+
except ValueError as e:
|
|
134
|
+
# Filter deserialization failed
|
|
135
|
+
kwargs.pop("knowledge_filters")
|
|
136
|
+
log_warning(f"Invalid FilterExpr in knowledge_filters: {e}")
|
|
113
137
|
|
|
114
138
|
# Parse boolean and null values
|
|
115
139
|
for key, value in kwargs.items():
|
agno/os/routers/health.py
CHANGED
|
@@ -5,13 +5,13 @@ from fastapi import APIRouter
|
|
|
5
5
|
from agno.os.schema import HealthResponse
|
|
6
6
|
|
|
7
7
|
|
|
8
|
-
def get_health_router() -> APIRouter:
|
|
8
|
+
def get_health_router(health_endpoint: str = "/health") -> APIRouter:
|
|
9
9
|
router = APIRouter(tags=["Health"])
|
|
10
10
|
|
|
11
11
|
started_time_stamp = datetime.now(timezone.utc).timestamp()
|
|
12
12
|
|
|
13
13
|
@router.get(
|
|
14
|
-
|
|
14
|
+
health_endpoint,
|
|
15
15
|
operation_id="health_check",
|
|
16
16
|
summary="Health Check",
|
|
17
17
|
description="Check the health status of the AgentOS API. Returns a simple status indicator.",
|
|
@@ -19,7 +19,9 @@ def get_health_router() -> APIRouter:
|
|
|
19
19
|
responses={
|
|
20
20
|
200: {
|
|
21
21
|
"description": "API is healthy and operational",
|
|
22
|
-
"content": {
|
|
22
|
+
"content": {
|
|
23
|
+
"application/json": {"example": {"status": "ok", "instantiated_at": str(started_time_stamp)}}
|
|
24
|
+
},
|
|
23
25
|
}
|
|
24
26
|
},
|
|
25
27
|
)
|
|
@@ -102,6 +102,8 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
102
102
|
text_content: Optional[str] = Form(None, description="Raw text content to process"),
|
|
103
103
|
reader_id: Optional[str] = Form(None, description="ID of the reader to use for content processing"),
|
|
104
104
|
chunker: Optional[str] = Form(None, description="Chunking strategy to apply during processing"),
|
|
105
|
+
chunk_size: Optional[int] = Form(None, description="Chunk size to use for processing"),
|
|
106
|
+
chunk_overlap: Optional[int] = Form(None, description="Chunk overlap to use for processing"),
|
|
105
107
|
db_id: Optional[str] = Query(default=None, description="Database ID to use for content storage"),
|
|
106
108
|
):
|
|
107
109
|
knowledge = get_knowledge_instance_by_db_id(knowledge_instances, db_id)
|
|
@@ -172,7 +174,7 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
172
174
|
content.content_hash = content_hash
|
|
173
175
|
content.id = generate_id(content_hash)
|
|
174
176
|
|
|
175
|
-
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker)
|
|
177
|
+
background_tasks.add_task(process_content, knowledge, content, reader_id, chunker, chunk_size, chunk_overlap)
|
|
176
178
|
|
|
177
179
|
response = ContentResponseSchema(
|
|
178
180
|
id=content.id,
|
|
@@ -801,36 +803,55 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
801
803
|
"key": "AgenticChunker",
|
|
802
804
|
"name": "AgenticChunker",
|
|
803
805
|
"description": "Chunking strategy that uses an LLM to determine natural breakpoints in the text",
|
|
806
|
+
"metadata": {"chunk_size": 5000},
|
|
804
807
|
},
|
|
805
808
|
"DocumentChunker": {
|
|
806
809
|
"key": "DocumentChunker",
|
|
807
810
|
"name": "DocumentChunker",
|
|
808
811
|
"description": "A chunking strategy that splits text based on document structure like paragraphs and sections",
|
|
809
|
-
|
|
810
|
-
|
|
811
|
-
|
|
812
|
-
|
|
813
|
-
"description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
|
|
814
|
-
},
|
|
815
|
-
"SemanticChunker": {
|
|
816
|
-
"key": "SemanticChunker",
|
|
817
|
-
"name": "SemanticChunker",
|
|
818
|
-
"description": "Chunking strategy that splits text into semantic chunks using chonkie",
|
|
812
|
+
"metadata": {
|
|
813
|
+
"chunk_size": 5000,
|
|
814
|
+
"chunk_overlap": 0,
|
|
815
|
+
},
|
|
819
816
|
},
|
|
820
817
|
"FixedSizeChunker": {
|
|
821
818
|
"key": "FixedSizeChunker",
|
|
822
819
|
"name": "FixedSizeChunker",
|
|
823
820
|
"description": "Chunking strategy that splits text into fixed-size chunks with optional overlap",
|
|
821
|
+
"metadata": {
|
|
822
|
+
"chunk_size": 5000,
|
|
823
|
+
"chunk_overlap": 0,
|
|
824
|
+
},
|
|
825
|
+
},
|
|
826
|
+
"MarkdownChunker": {
|
|
827
|
+
"key": "MarkdownChunker",
|
|
828
|
+
"name": "MarkdownChunker",
|
|
829
|
+
"description": "A chunking strategy that splits markdown based on structure like headers, paragraphs and sections",
|
|
830
|
+
"metadata": {
|
|
831
|
+
"chunk_size": 5000,
|
|
832
|
+
"chunk_overlap": 0,
|
|
833
|
+
},
|
|
834
|
+
},
|
|
835
|
+
"RecursiveChunker": {
|
|
836
|
+
"key": "RecursiveChunker",
|
|
837
|
+
"name": "RecursiveChunker",
|
|
838
|
+
"description": "Chunking strategy that recursively splits text into chunks by finding natural break points",
|
|
839
|
+
"metadata": {
|
|
840
|
+
"chunk_size": 5000,
|
|
841
|
+
"chunk_overlap": 0,
|
|
842
|
+
},
|
|
824
843
|
},
|
|
825
844
|
"RowChunker": {
|
|
826
845
|
"key": "RowChunker",
|
|
827
846
|
"name": "RowChunker",
|
|
828
847
|
"description": "RowChunking chunking strategy",
|
|
848
|
+
"metadata": {},
|
|
829
849
|
},
|
|
830
|
-
"
|
|
831
|
-
"key": "
|
|
832
|
-
"name": "
|
|
833
|
-
"description": "
|
|
850
|
+
"SemanticChunker": {
|
|
851
|
+
"key": "SemanticChunker",
|
|
852
|
+
"name": "SemanticChunker",
|
|
853
|
+
"description": "Chunking strategy that splits text into semantic chunks using chonkie",
|
|
854
|
+
"metadata": {"chunk_size": 5000},
|
|
834
855
|
},
|
|
835
856
|
},
|
|
836
857
|
"vector_dbs": [
|
|
@@ -896,7 +917,10 @@ def attach_routes(router: APIRouter, knowledge_instances: List[Knowledge]) -> AP
|
|
|
896
917
|
chunker_key = chunker_info.get("key")
|
|
897
918
|
if chunker_key:
|
|
898
919
|
chunkers_dict[chunker_key] = ChunkerSchema(
|
|
899
|
-
key=chunker_key,
|
|
920
|
+
key=chunker_key,
|
|
921
|
+
name=chunker_info.get("name"),
|
|
922
|
+
description=chunker_info.get("description"),
|
|
923
|
+
metadata=chunker_info.get("metadata", {}),
|
|
900
924
|
)
|
|
901
925
|
|
|
902
926
|
vector_dbs = []
|
|
@@ -929,6 +953,8 @@ async def process_content(
|
|
|
929
953
|
content: Content,
|
|
930
954
|
reader_id: Optional[str] = None,
|
|
931
955
|
chunker: Optional[str] = None,
|
|
956
|
+
chunk_size: Optional[int] = None,
|
|
957
|
+
chunk_overlap: Optional[int] = None,
|
|
932
958
|
):
|
|
933
959
|
"""Background task to process the content"""
|
|
934
960
|
|
|
@@ -951,7 +977,7 @@ async def process_content(
|
|
|
951
977
|
content.reader = reader
|
|
952
978
|
if chunker and content.reader:
|
|
953
979
|
# Set the chunker name on the reader - let the reader handle it internally
|
|
954
|
-
content.reader.set_chunking_strategy_from_string(chunker)
|
|
980
|
+
content.reader.set_chunking_strategy_from_string(chunker, chunk_size=chunk_size, overlap=chunk_overlap)
|
|
955
981
|
log_debug(f"Set chunking strategy: {chunker}")
|
|
956
982
|
|
|
957
983
|
log_debug(f"Using reader: {content.reader.__class__.__name__}")
|
|
@@ -106,9 +106,10 @@ class ReaderSchema(BaseModel):
|
|
|
106
106
|
|
|
107
107
|
|
|
108
108
|
class ChunkerSchema(BaseModel):
|
|
109
|
-
key: str
|
|
110
|
-
name: Optional[str] =
|
|
111
|
-
description: Optional[str] =
|
|
109
|
+
key: str
|
|
110
|
+
name: Optional[str] = None
|
|
111
|
+
description: Optional[str] = None
|
|
112
|
+
metadata: Optional[Dict[str, Any]] = None
|
|
112
113
|
|
|
113
114
|
|
|
114
115
|
class VectorDbSchema(BaseModel):
|
agno/run/agent.py
CHANGED
|
@@ -696,7 +696,17 @@ class RunOutput:
|
|
|
696
696
|
data = data.pop("run")
|
|
697
697
|
|
|
698
698
|
events = data.pop("events", None)
|
|
699
|
-
|
|
699
|
+
final_events = []
|
|
700
|
+
for event in events or []:
|
|
701
|
+
if "agent_id" in event:
|
|
702
|
+
event = run_output_event_from_dict(event)
|
|
703
|
+
else:
|
|
704
|
+
# Use the factory from response.py for agent events
|
|
705
|
+
from agno.run.team import team_run_output_event_from_dict
|
|
706
|
+
|
|
707
|
+
event = team_run_output_event_from_dict(event)
|
|
708
|
+
final_events.append(event)
|
|
709
|
+
events = final_events
|
|
700
710
|
|
|
701
711
|
messages = data.pop("messages", None)
|
|
702
712
|
messages = [Message.from_dict(message) for message in messages] if messages else None
|
agno/run/base.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
from dataclasses import asdict, dataclass
|
|
2
2
|
from enum import Enum
|
|
3
|
-
from typing import Any, Dict, Optional
|
|
3
|
+
from typing import Any, Dict, List, Optional, Union
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel
|
|
6
6
|
|
|
7
|
+
from agno.filters import FilterExpr
|
|
7
8
|
from agno.media import Audio, Image, Video
|
|
8
9
|
from agno.models.message import Citations, Message, MessageReferences
|
|
9
10
|
from agno.models.metrics import Metrics
|
|
@@ -18,7 +19,7 @@ class RunContext:
|
|
|
18
19
|
user_id: Optional[str] = None
|
|
19
20
|
|
|
20
21
|
dependencies: Optional[Dict[str, Any]] = None
|
|
21
|
-
knowledge_filters: Optional[Dict[str, Any]] = None
|
|
22
|
+
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
|
|
22
23
|
metadata: Optional[Dict[str, Any]] = None
|
|
23
24
|
session_state: Optional[Dict[str, Any]] = None
|
|
24
25
|
|