agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5540 -2273
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +689 -6
- agno/db/dynamo/dynamo.py +933 -37
- agno/db/dynamo/schemas.py +174 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +831 -9
- agno/db/firestore/schemas.py +51 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +660 -12
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +287 -14
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +590 -14
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +879 -11
- agno/db/mongo/schemas.py +42 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +946 -68
- agno/db/mysql/schemas.py +72 -10
- agno/db/mysql/utils.py +198 -7
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +942 -57
- agno/db/postgres/schemas.py +81 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +671 -7
- agno/db/redis/schemas.py +50 -0
- agno/db/redis/utils.py +65 -7
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +17 -2
- agno/db/singlestore/schemas.py +63 -0
- agno/db/singlestore/singlestore.py +949 -83
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +62 -0
- agno/db/sqlite/sqlite.py +965 -46
- agno/db/sqlite/utils.py +169 -8
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +2 -0
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +75 -55
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +16 -7
- agno/eval/reliability.py +28 -16
- agno/eval/utils.py +35 -17
- agno/exceptions.py +27 -2
- agno/filters.py +354 -0
- agno/guardrails/prompt_injection.py +1 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +1 -1
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/semantic.py +9 -4
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +8 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +1618 -318
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
- agno/knowledge/reader/json_reader.py +5 -4
- agno/knowledge/reader/markdown_reader.py +8 -8
- agno/knowledge/reader/pdf_reader.py +17 -19
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +32 -3
- agno/knowledge/reader/s3_reader.py +3 -3
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +22 -10
- agno/knowledge/reader/web_search_reader.py +1 -48
- agno/knowledge/reader/website_reader.py +10 -10
- agno/knowledge/reader/wikipedia_reader.py +33 -1
- agno/knowledge/types.py +1 -0
- agno/knowledge/utils.py +72 -7
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +544 -83
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +515 -40
- agno/models/aws/bedrock.py +102 -21
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +41 -19
- agno/models/azure/openai_chat.py +39 -8
- agno/models/base.py +1249 -525
- agno/models/cerebras/cerebras.py +91 -21
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +40 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +877 -80
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +51 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +44 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +28 -5
- agno/models/meta/llama.py +47 -14
- agno/models/meta/llama_openai.py +22 -17
- agno/models/mistral/mistral.py +8 -4
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/chat.py +24 -8
- agno/models/openai/chat.py +104 -29
- agno/models/openai/responses.py +101 -81
- agno/models/openrouter/openrouter.py +60 -3
- agno/models/perplexity/perplexity.py +17 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +24 -4
- agno/models/response.py +73 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +549 -152
- agno/os/auth.py +190 -3
- agno/os/config.py +23 -0
- agno/os/interfaces/a2a/router.py +8 -11
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +18 -3
- agno/os/interfaces/agui/utils.py +152 -39
- agno/os/interfaces/slack/router.py +55 -37
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/interfaces/whatsapp/router.py +0 -1
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/mcp.py +110 -52
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/jwt.py +676 -112
- agno/os/router.py +40 -1478
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +96 -39
- agno/os/routers/evals/schemas.py +65 -33
- agno/os/routers/evals/utils.py +80 -10
- agno/os/routers/health.py +10 -4
- agno/os/routers/knowledge/knowledge.py +196 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +279 -52
- agno/os/routers/memory/schemas.py +46 -17
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +462 -34
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +256 -693
- agno/os/scopes.py +469 -0
- agno/os/utils.py +514 -36
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +155 -32
- agno/run/base.py +55 -3
- agno/run/requirement.py +181 -0
- agno/run/team.py +125 -38
- agno/run/workflow.py +72 -18
- agno/session/agent.py +102 -89
- agno/session/summary.py +56 -15
- agno/session/team.py +164 -90
- agno/session/workflow.py +405 -40
- agno/table.py +10 -0
- agno/team/team.py +3974 -1903
- agno/tools/dalle.py +2 -4
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +16 -10
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +193 -38
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +271 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +76 -36
- agno/tools/redshift.py +406 -0
- agno/tools/scrapegraph.py +1 -1
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +18 -3
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +146 -0
- agno/tools/toolkit.py +25 -0
- agno/tools/workflow.py +8 -1
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +151 -3
- agno/utils/gemini.py +15 -5
- agno/utils/hooks.py +118 -4
- agno/utils/http.py +113 -2
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +187 -1
- agno/utils/merge_dict.py +3 -3
- agno/utils/message.py +60 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +49 -14
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +109 -16
- agno/utils/print_response/team.py +223 -30
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/streamlit.py +1 -1
- agno/utils/team.py +98 -9
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +39 -7
- agno/vectordb/cassandra/cassandra.py +21 -5
- agno/vectordb/chroma/chromadb.py +43 -12
- agno/vectordb/clickhouse/clickhousedb.py +21 -5
- agno/vectordb/couchbase/couchbase.py +29 -5
- agno/vectordb/lancedb/lance_db.py +92 -181
- agno/vectordb/langchaindb/langchaindb.py +24 -4
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/llamaindexdb.py +25 -5
- agno/vectordb/milvus/milvus.py +50 -37
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +36 -30
- agno/vectordb/pgvector/pgvector.py +201 -77
- agno/vectordb/pineconedb/pineconedb.py +41 -23
- agno/vectordb/qdrant/qdrant.py +67 -54
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/singlestore.py +50 -29
- agno/vectordb/surrealdb/surrealdb.py +31 -41
- agno/vectordb/upstashdb/upstashdb.py +34 -6
- agno/vectordb/weaviate/weaviate.py +53 -14
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +120 -18
- agno/workflow/loop.py +77 -10
- agno/workflow/parallel.py +231 -143
- agno/workflow/router.py +118 -17
- agno/workflow/step.py +609 -170
- agno/workflow/steps.py +73 -6
- agno/workflow/types.py +96 -21
- agno/workflow/workflow.py +2039 -262
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
- agno-2.3.13.dist-info/RECORD +613 -0
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -679
- agno/tools/memori.py +0 -339
- agno-2.1.2.dist-info/RECORD +0 -543
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/db/mongo/mongo.py
CHANGED
|
@@ -1,8 +1,11 @@
|
|
|
1
1
|
import time
|
|
2
2
|
from datetime import date, datetime, timedelta, timezone
|
|
3
|
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
3
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
|
4
4
|
from uuid import uuid4
|
|
5
5
|
|
|
6
|
+
if TYPE_CHECKING:
|
|
7
|
+
from agno.tracing.schemas import Span, Trace
|
|
8
|
+
|
|
6
9
|
from agno.db.base import BaseDb, SessionType
|
|
7
10
|
from agno.db.mongo.utils import (
|
|
8
11
|
apply_pagination,
|
|
@@ -10,9 +13,12 @@ from agno.db.mongo.utils import (
|
|
|
10
13
|
bulk_upsert_metrics,
|
|
11
14
|
calculate_date_metrics,
|
|
12
15
|
create_collection_indexes,
|
|
16
|
+
deserialize_cultural_knowledge_from_db,
|
|
13
17
|
fetch_all_sessions_data,
|
|
14
18
|
get_dates_to_calculate_metrics_for,
|
|
19
|
+
serialize_cultural_knowledge_for_db,
|
|
15
20
|
)
|
|
21
|
+
from agno.db.schemas.culture import CulturalKnowledge
|
|
16
22
|
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
|
|
17
23
|
from agno.db.schemas.knowledge import KnowledgeRow
|
|
18
24
|
from agno.db.schemas.memory import UserMemory
|
|
@@ -41,6 +47,9 @@ class MongoDb(BaseDb):
|
|
|
41
47
|
metrics_collection: Optional[str] = None,
|
|
42
48
|
eval_collection: Optional[str] = None,
|
|
43
49
|
knowledge_collection: Optional[str] = None,
|
|
50
|
+
culture_collection: Optional[str] = None,
|
|
51
|
+
traces_collection: Optional[str] = None,
|
|
52
|
+
spans_collection: Optional[str] = None,
|
|
44
53
|
id: Optional[str] = None,
|
|
45
54
|
):
|
|
46
55
|
"""
|
|
@@ -55,6 +64,9 @@ class MongoDb(BaseDb):
|
|
|
55
64
|
metrics_collection (Optional[str]): Name of the collection to store metrics.
|
|
56
65
|
eval_collection (Optional[str]): Name of the collection to store evaluation runs.
|
|
57
66
|
knowledge_collection (Optional[str]): Name of the collection to store knowledge documents.
|
|
67
|
+
culture_collection (Optional[str]): Name of the collection to store cultural knowledge.
|
|
68
|
+
traces_collection (Optional[str]): Name of the collection to store traces.
|
|
69
|
+
spans_collection (Optional[str]): Name of the collection to store spans.
|
|
58
70
|
id (Optional[str]): ID of the database.
|
|
59
71
|
|
|
60
72
|
Raises:
|
|
@@ -73,6 +85,9 @@ class MongoDb(BaseDb):
|
|
|
73
85
|
metrics_table=metrics_collection,
|
|
74
86
|
eval_table=eval_collection,
|
|
75
87
|
knowledge_table=knowledge_collection,
|
|
88
|
+
culture_table=culture_collection,
|
|
89
|
+
traces_table=traces_collection,
|
|
90
|
+
spans_table=spans_collection,
|
|
76
91
|
)
|
|
77
92
|
|
|
78
93
|
_client: Optional[MongoClient] = db_client
|
|
@@ -94,6 +109,31 @@ class MongoDb(BaseDb):
|
|
|
94
109
|
return self._database
|
|
95
110
|
|
|
96
111
|
# -- DB methods --
|
|
112
|
+
def table_exists(self, table_name: str) -> bool:
|
|
113
|
+
"""Check if a collection with the given name exists in the MongoDB database.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
table_name: Name of the collection to check
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
bool: True if the collection exists in the database, False otherwise
|
|
120
|
+
"""
|
|
121
|
+
return table_name in self.database.list_collection_names()
|
|
122
|
+
|
|
123
|
+
def _create_all_tables(self):
|
|
124
|
+
"""Create all configured MongoDB collections if they don't exist."""
|
|
125
|
+
collections_to_create = [
|
|
126
|
+
("sessions", self.session_table_name),
|
|
127
|
+
("memories", self.memory_table_name),
|
|
128
|
+
("metrics", self.metrics_table_name),
|
|
129
|
+
("evals", self.eval_table_name),
|
|
130
|
+
("knowledge", self.knowledge_table_name),
|
|
131
|
+
("culture", self.culture_table_name),
|
|
132
|
+
]
|
|
133
|
+
|
|
134
|
+
for collection_type, collection_name in collections_to_create:
|
|
135
|
+
if collection_name and not self.table_exists(collection_name):
|
|
136
|
+
self._get_collection(collection_type, create_collection_if_not_found=True)
|
|
97
137
|
|
|
98
138
|
def _get_collection(
|
|
99
139
|
self, table_type: str, create_collection_if_not_found: Optional[bool] = True
|
|
@@ -161,6 +201,39 @@ class MongoDb(BaseDb):
|
|
|
161
201
|
)
|
|
162
202
|
return self.knowledge_collection
|
|
163
203
|
|
|
204
|
+
if table_type == "culture":
|
|
205
|
+
if not hasattr(self, "culture_collection"):
|
|
206
|
+
if self.culture_table_name is None:
|
|
207
|
+
raise ValueError("Culture collection was not provided on initialization")
|
|
208
|
+
self.culture_collection = self._get_or_create_collection(
|
|
209
|
+
collection_name=self.culture_table_name,
|
|
210
|
+
collection_type="culture",
|
|
211
|
+
create_collection_if_not_found=create_collection_if_not_found,
|
|
212
|
+
)
|
|
213
|
+
return self.culture_collection
|
|
214
|
+
|
|
215
|
+
if table_type == "traces":
|
|
216
|
+
if not hasattr(self, "traces_collection"):
|
|
217
|
+
if self.trace_table_name is None:
|
|
218
|
+
raise ValueError("Traces collection was not provided on initialization")
|
|
219
|
+
self.traces_collection = self._get_or_create_collection(
|
|
220
|
+
collection_name=self.trace_table_name,
|
|
221
|
+
collection_type="traces",
|
|
222
|
+
create_collection_if_not_found=create_collection_if_not_found,
|
|
223
|
+
)
|
|
224
|
+
return self.traces_collection
|
|
225
|
+
|
|
226
|
+
if table_type == "spans":
|
|
227
|
+
if not hasattr(self, "spans_collection"):
|
|
228
|
+
if self.span_table_name is None:
|
|
229
|
+
raise ValueError("Spans collection was not provided on initialization")
|
|
230
|
+
self.spans_collection = self._get_or_create_collection(
|
|
231
|
+
collection_name=self.span_table_name,
|
|
232
|
+
collection_type="spans",
|
|
233
|
+
create_collection_if_not_found=create_collection_if_not_found,
|
|
234
|
+
)
|
|
235
|
+
return self.spans_collection
|
|
236
|
+
|
|
164
237
|
raise ValueError(f"Unknown table type: {table_type}")
|
|
165
238
|
|
|
166
239
|
def _get_or_create_collection(
|
|
@@ -194,6 +267,14 @@ class MongoDb(BaseDb):
|
|
|
194
267
|
log_error(f"Error getting collection {collection_name}: {e}")
|
|
195
268
|
raise
|
|
196
269
|
|
|
270
|
+
def get_latest_schema_version(self):
|
|
271
|
+
"""Get the latest version of the database schema."""
|
|
272
|
+
pass
|
|
273
|
+
|
|
274
|
+
def upsert_schema_version(self, version: str) -> None:
|
|
275
|
+
"""Upsert the schema version into the database."""
|
|
276
|
+
pass
|
|
277
|
+
|
|
197
278
|
# -- Session methods --
|
|
198
279
|
|
|
199
280
|
def delete_session(self, session_id: str) -> bool:
|
|
@@ -274,8 +355,6 @@ class MongoDb(BaseDb):
|
|
|
274
355
|
query = {"session_id": session_id}
|
|
275
356
|
if user_id is not None:
|
|
276
357
|
query["user_id"] = user_id
|
|
277
|
-
if session_type is not None:
|
|
278
|
-
query["session_type"] = session_type
|
|
279
358
|
|
|
280
359
|
result = collection.find_one(query)
|
|
281
360
|
if result is None:
|
|
@@ -588,7 +667,7 @@ class MongoDb(BaseDb):
|
|
|
588
667
|
raise e
|
|
589
668
|
|
|
590
669
|
def upsert_sessions(
|
|
591
|
-
self, sessions: List[Session], deserialize: Optional[bool] = True
|
|
670
|
+
self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
592
671
|
) -> List[Union[Session, Dict[str, Any]]]:
|
|
593
672
|
"""
|
|
594
673
|
Bulk upsert multiple sessions for improved performance on large datasets.
|
|
@@ -596,6 +675,7 @@ class MongoDb(BaseDb):
|
|
|
596
675
|
Args:
|
|
597
676
|
sessions (List[Session]): List of sessions to upsert.
|
|
598
677
|
deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
|
|
678
|
+
preserve_updated_at (bool): If True, preserve the updated_at from the session object.
|
|
599
679
|
|
|
600
680
|
Returns:
|
|
601
681
|
List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
|
|
@@ -629,6 +709,9 @@ class MongoDb(BaseDb):
|
|
|
629
709
|
|
|
630
710
|
session_dict = session.to_dict()
|
|
631
711
|
|
|
712
|
+
# Use preserved updated_at if flag is set and value exists, otherwise use current time
|
|
713
|
+
updated_at = session_dict.get("updated_at") if preserve_updated_at else int(time.time())
|
|
714
|
+
|
|
632
715
|
if isinstance(session, AgentSession):
|
|
633
716
|
record = {
|
|
634
717
|
"session_id": session_dict.get("session_id"),
|
|
@@ -641,7 +724,7 @@ class MongoDb(BaseDb):
|
|
|
641
724
|
"summary": session_dict.get("summary"),
|
|
642
725
|
"metadata": session_dict.get("metadata"),
|
|
643
726
|
"created_at": session_dict.get("created_at"),
|
|
644
|
-
"updated_at":
|
|
727
|
+
"updated_at": updated_at,
|
|
645
728
|
}
|
|
646
729
|
elif isinstance(session, TeamSession):
|
|
647
730
|
record = {
|
|
@@ -655,7 +738,7 @@ class MongoDb(BaseDb):
|
|
|
655
738
|
"summary": session_dict.get("summary"),
|
|
656
739
|
"metadata": session_dict.get("metadata"),
|
|
657
740
|
"created_at": session_dict.get("created_at"),
|
|
658
|
-
"updated_at":
|
|
741
|
+
"updated_at": updated_at,
|
|
659
742
|
}
|
|
660
743
|
elif isinstance(session, WorkflowSession):
|
|
661
744
|
record = {
|
|
@@ -669,7 +752,7 @@ class MongoDb(BaseDb):
|
|
|
669
752
|
"summary": session_dict.get("summary"),
|
|
670
753
|
"metadata": session_dict.get("metadata"),
|
|
671
754
|
"created_at": session_dict.get("created_at"),
|
|
672
|
-
"updated_at":
|
|
755
|
+
"updated_at": updated_at,
|
|
673
756
|
}
|
|
674
757
|
else:
|
|
675
758
|
continue
|
|
@@ -932,12 +1015,14 @@ class MongoDb(BaseDb):
|
|
|
932
1015
|
self,
|
|
933
1016
|
limit: Optional[int] = None,
|
|
934
1017
|
page: Optional[int] = None,
|
|
1018
|
+
user_id: Optional[str] = None,
|
|
935
1019
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
936
1020
|
"""Get user memories stats.
|
|
937
1021
|
|
|
938
1022
|
Args:
|
|
939
1023
|
limit (Optional[int]): The limit of the memories to get.
|
|
940
1024
|
page (Optional[int]): The page number to get.
|
|
1025
|
+
user_id (Optional[str]): User ID for filtering.
|
|
941
1026
|
|
|
942
1027
|
Returns:
|
|
943
1028
|
Tuple[List[Dict[str, Any]], int]: A tuple containing the memories stats and the total count.
|
|
@@ -950,9 +1035,11 @@ class MongoDb(BaseDb):
|
|
|
950
1035
|
if collection is None:
|
|
951
1036
|
return [], 0
|
|
952
1037
|
|
|
953
|
-
match_stage = {"user_id": {"$ne": None}}
|
|
1038
|
+
match_stage: Dict[str, Any] = {"user_id": {"$ne": None}}
|
|
1039
|
+
if user_id is not None:
|
|
1040
|
+
match_stage["user_id"] = user_id
|
|
954
1041
|
|
|
955
|
-
pipeline = [
|
|
1042
|
+
pipeline: List[Dict[str, Any]] = [
|
|
956
1043
|
{"$match": match_stage},
|
|
957
1044
|
{
|
|
958
1045
|
"$group": {
|
|
@@ -1044,7 +1131,7 @@ class MongoDb(BaseDb):
|
|
|
1044
1131
|
raise e
|
|
1045
1132
|
|
|
1046
1133
|
def upsert_memories(
|
|
1047
|
-
self, memories: List[UserMemory], deserialize: Optional[bool] = True
|
|
1134
|
+
self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
1048
1135
|
) -> List[Union[UserMemory, Dict[str, Any]]]:
|
|
1049
1136
|
"""
|
|
1050
1137
|
Bulk upsert multiple user memories for improved performance on large datasets.
|
|
@@ -1079,6 +1166,7 @@ class MongoDb(BaseDb):
|
|
|
1079
1166
|
operations = []
|
|
1080
1167
|
results: List[Union[UserMemory, Dict[str, Any]]] = []
|
|
1081
1168
|
|
|
1169
|
+
current_time = int(time.time())
|
|
1082
1170
|
for memory in memories:
|
|
1083
1171
|
if memory is None:
|
|
1084
1172
|
continue
|
|
@@ -1086,14 +1174,20 @@ class MongoDb(BaseDb):
|
|
|
1086
1174
|
if memory.memory_id is None:
|
|
1087
1175
|
memory.memory_id = str(uuid4())
|
|
1088
1176
|
|
|
1177
|
+
# Use preserved updated_at if flag is set and value exists, otherwise use current time
|
|
1178
|
+
updated_at = memory.updated_at if preserve_updated_at else current_time
|
|
1179
|
+
|
|
1089
1180
|
record = {
|
|
1090
1181
|
"user_id": memory.user_id,
|
|
1091
1182
|
"agent_id": memory.agent_id,
|
|
1092
1183
|
"team_id": memory.team_id,
|
|
1093
1184
|
"memory_id": memory.memory_id,
|
|
1094
1185
|
"memory": memory.memory,
|
|
1186
|
+
"input": memory.input,
|
|
1187
|
+
"feedback": memory.feedback,
|
|
1095
1188
|
"topics": memory.topics,
|
|
1096
|
-
"
|
|
1189
|
+
"created_at": memory.created_at,
|
|
1190
|
+
"updated_at": updated_at,
|
|
1097
1191
|
}
|
|
1098
1192
|
|
|
1099
1193
|
operations.append(ReplaceOne(filter={"memory_id": memory.memory_id}, replacement=record, upsert=True))
|
|
@@ -1145,6 +1239,211 @@ class MongoDb(BaseDb):
|
|
|
1145
1239
|
log_error(f"Exception deleting all memories: {e}")
|
|
1146
1240
|
raise e
|
|
1147
1241
|
|
|
1242
|
+
# -- Cultural Knowledge methods --
|
|
1243
|
+
def clear_cultural_knowledge(self) -> None:
|
|
1244
|
+
"""Delete all cultural knowledge from the database.
|
|
1245
|
+
|
|
1246
|
+
Raises:
|
|
1247
|
+
Exception: If an error occurs during deletion.
|
|
1248
|
+
"""
|
|
1249
|
+
try:
|
|
1250
|
+
collection = self._get_collection(table_type="culture")
|
|
1251
|
+
if collection is None:
|
|
1252
|
+
return
|
|
1253
|
+
|
|
1254
|
+
collection.delete_many({})
|
|
1255
|
+
|
|
1256
|
+
except Exception as e:
|
|
1257
|
+
log_error(f"Exception deleting all cultural knowledge: {e}")
|
|
1258
|
+
raise e
|
|
1259
|
+
|
|
1260
|
+
def delete_cultural_knowledge(self, id: str) -> None:
|
|
1261
|
+
"""Delete cultural knowledge by ID.
|
|
1262
|
+
|
|
1263
|
+
Args:
|
|
1264
|
+
id (str): The ID of the cultural knowledge to delete.
|
|
1265
|
+
|
|
1266
|
+
Raises:
|
|
1267
|
+
Exception: If an error occurs during deletion.
|
|
1268
|
+
"""
|
|
1269
|
+
try:
|
|
1270
|
+
collection = self._get_collection(table_type="culture")
|
|
1271
|
+
if collection is None:
|
|
1272
|
+
return
|
|
1273
|
+
|
|
1274
|
+
collection.delete_one({"id": id})
|
|
1275
|
+
log_debug(f"Deleted cultural knowledge with ID: {id}")
|
|
1276
|
+
|
|
1277
|
+
except Exception as e:
|
|
1278
|
+
log_error(f"Error deleting cultural knowledge: {e}")
|
|
1279
|
+
raise e
|
|
1280
|
+
|
|
1281
|
+
def get_cultural_knowledge(
|
|
1282
|
+
self, id: str, deserialize: Optional[bool] = True
|
|
1283
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1284
|
+
"""Get cultural knowledge by ID.
|
|
1285
|
+
|
|
1286
|
+
Args:
|
|
1287
|
+
id (str): The ID of the cultural knowledge to retrieve.
|
|
1288
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
|
|
1289
|
+
|
|
1290
|
+
Returns:
|
|
1291
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
|
|
1292
|
+
|
|
1293
|
+
Raises:
|
|
1294
|
+
Exception: If an error occurs during retrieval.
|
|
1295
|
+
"""
|
|
1296
|
+
try:
|
|
1297
|
+
collection = self._get_collection(table_type="culture")
|
|
1298
|
+
if collection is None:
|
|
1299
|
+
return None
|
|
1300
|
+
|
|
1301
|
+
result = collection.find_one({"id": id})
|
|
1302
|
+
if result is None:
|
|
1303
|
+
return None
|
|
1304
|
+
|
|
1305
|
+
# Remove MongoDB's _id field
|
|
1306
|
+
result_filtered = {k: v for k, v in result.items() if k != "_id"}
|
|
1307
|
+
|
|
1308
|
+
if not deserialize:
|
|
1309
|
+
return result_filtered
|
|
1310
|
+
|
|
1311
|
+
return deserialize_cultural_knowledge_from_db(result_filtered)
|
|
1312
|
+
|
|
1313
|
+
except Exception as e:
|
|
1314
|
+
log_error(f"Error getting cultural knowledge: {e}")
|
|
1315
|
+
raise e
|
|
1316
|
+
|
|
1317
|
+
def get_all_cultural_knowledge(
|
|
1318
|
+
self,
|
|
1319
|
+
agent_id: Optional[str] = None,
|
|
1320
|
+
team_id: Optional[str] = None,
|
|
1321
|
+
name: Optional[str] = None,
|
|
1322
|
+
limit: Optional[int] = None,
|
|
1323
|
+
page: Optional[int] = None,
|
|
1324
|
+
sort_by: Optional[str] = None,
|
|
1325
|
+
sort_order: Optional[str] = None,
|
|
1326
|
+
deserialize: Optional[bool] = True,
|
|
1327
|
+
) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1328
|
+
"""Get all cultural knowledge with filtering and pagination.
|
|
1329
|
+
|
|
1330
|
+
Args:
|
|
1331
|
+
agent_id (Optional[str]): Filter by agent ID.
|
|
1332
|
+
team_id (Optional[str]): Filter by team ID.
|
|
1333
|
+
name (Optional[str]): Filter by name (case-insensitive partial match).
|
|
1334
|
+
limit (Optional[int]): Maximum number of results to return.
|
|
1335
|
+
page (Optional[int]): Page number for pagination.
|
|
1336
|
+
sort_by (Optional[str]): Field to sort by.
|
|
1337
|
+
sort_order (Optional[str]): Sort order ('asc' or 'desc').
|
|
1338
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
|
|
1339
|
+
|
|
1340
|
+
Returns:
|
|
1341
|
+
Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1342
|
+
- When deserialize=True: List of CulturalKnowledge objects
|
|
1343
|
+
- When deserialize=False: Tuple with list of dictionaries and total count
|
|
1344
|
+
|
|
1345
|
+
Raises:
|
|
1346
|
+
Exception: If an error occurs during retrieval.
|
|
1347
|
+
"""
|
|
1348
|
+
try:
|
|
1349
|
+
collection = self._get_collection(table_type="culture")
|
|
1350
|
+
if collection is None:
|
|
1351
|
+
if not deserialize:
|
|
1352
|
+
return [], 0
|
|
1353
|
+
return []
|
|
1354
|
+
|
|
1355
|
+
# Build query
|
|
1356
|
+
query: Dict[str, Any] = {}
|
|
1357
|
+
if agent_id is not None:
|
|
1358
|
+
query["agent_id"] = agent_id
|
|
1359
|
+
if team_id is not None:
|
|
1360
|
+
query["team_id"] = team_id
|
|
1361
|
+
if name is not None:
|
|
1362
|
+
query["name"] = {"$regex": name, "$options": "i"}
|
|
1363
|
+
|
|
1364
|
+
# Get total count for pagination
|
|
1365
|
+
total_count = collection.count_documents(query)
|
|
1366
|
+
|
|
1367
|
+
# Apply sorting
|
|
1368
|
+
sort_criteria = apply_sorting({}, sort_by, sort_order)
|
|
1369
|
+
|
|
1370
|
+
# Apply pagination
|
|
1371
|
+
query_args = apply_pagination({}, limit, page)
|
|
1372
|
+
|
|
1373
|
+
cursor = collection.find(query)
|
|
1374
|
+
if sort_criteria:
|
|
1375
|
+
cursor = cursor.sort(sort_criteria)
|
|
1376
|
+
if query_args.get("skip"):
|
|
1377
|
+
cursor = cursor.skip(query_args["skip"])
|
|
1378
|
+
if query_args.get("limit"):
|
|
1379
|
+
cursor = cursor.limit(query_args["limit"])
|
|
1380
|
+
|
|
1381
|
+
# Remove MongoDB's _id field from all results
|
|
1382
|
+
results_filtered = [{k: v for k, v in item.items() if k != "_id"} for item in cursor]
|
|
1383
|
+
|
|
1384
|
+
if not deserialize:
|
|
1385
|
+
return results_filtered, total_count
|
|
1386
|
+
|
|
1387
|
+
return [deserialize_cultural_knowledge_from_db(item) for item in results_filtered]
|
|
1388
|
+
|
|
1389
|
+
except Exception as e:
|
|
1390
|
+
log_error(f"Error getting all cultural knowledge: {e}")
|
|
1391
|
+
raise e
|
|
1392
|
+
|
|
1393
|
+
def upsert_cultural_knowledge(
|
|
1394
|
+
self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
|
|
1395
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1396
|
+
"""Upsert cultural knowledge in MongoDB.
|
|
1397
|
+
|
|
1398
|
+
Args:
|
|
1399
|
+
cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
|
|
1400
|
+
deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
|
|
1401
|
+
|
|
1402
|
+
Returns:
|
|
1403
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
|
|
1404
|
+
|
|
1405
|
+
Raises:
|
|
1406
|
+
Exception: If an error occurs during upsert.
|
|
1407
|
+
"""
|
|
1408
|
+
try:
|
|
1409
|
+
collection = self._get_collection(table_type="culture", create_collection_if_not_found=True)
|
|
1410
|
+
if collection is None:
|
|
1411
|
+
return None
|
|
1412
|
+
|
|
1413
|
+
# Serialize content, categories, and notes into a dict for DB storage
|
|
1414
|
+
content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
|
|
1415
|
+
|
|
1416
|
+
# Create the document with serialized content
|
|
1417
|
+
update_doc = {
|
|
1418
|
+
"id": cultural_knowledge.id,
|
|
1419
|
+
"name": cultural_knowledge.name,
|
|
1420
|
+
"summary": cultural_knowledge.summary,
|
|
1421
|
+
"content": content_dict if content_dict else None,
|
|
1422
|
+
"metadata": cultural_knowledge.metadata,
|
|
1423
|
+
"input": cultural_knowledge.input,
|
|
1424
|
+
"created_at": cultural_knowledge.created_at,
|
|
1425
|
+
"updated_at": int(time.time()),
|
|
1426
|
+
"agent_id": cultural_knowledge.agent_id,
|
|
1427
|
+
"team_id": cultural_knowledge.team_id,
|
|
1428
|
+
}
|
|
1429
|
+
|
|
1430
|
+
result = collection.replace_one({"id": cultural_knowledge.id}, update_doc, upsert=True)
|
|
1431
|
+
|
|
1432
|
+
if result.upserted_id:
|
|
1433
|
+
update_doc["_id"] = result.upserted_id
|
|
1434
|
+
|
|
1435
|
+
# Remove MongoDB's _id field
|
|
1436
|
+
doc_filtered = {k: v for k, v in update_doc.items() if k != "_id"}
|
|
1437
|
+
|
|
1438
|
+
if not deserialize:
|
|
1439
|
+
return doc_filtered
|
|
1440
|
+
|
|
1441
|
+
return deserialize_cultural_knowledge_from_db(doc_filtered)
|
|
1442
|
+
|
|
1443
|
+
except Exception as e:
|
|
1444
|
+
log_error(f"Error upserting cultural knowledge: {e}")
|
|
1445
|
+
raise e
|
|
1446
|
+
|
|
1148
1447
|
# -- Metrics methods --
|
|
1149
1448
|
|
|
1150
1449
|
def _get_all_sessions_for_metrics_calculation(
|
|
@@ -1727,3 +2026,572 @@ class MongoDb(BaseDb):
|
|
|
1727
2026
|
for memory in memories:
|
|
1728
2027
|
self.upsert_user_memory(memory)
|
|
1729
2028
|
log_info(f"Migrated {len(memories)} memories to collection: {self.memory_table_name}")
|
|
2029
|
+
|
|
2030
|
+
# --- Traces ---
|
|
2031
|
+
def _get_component_level(
|
|
2032
|
+
self, workflow_id: Optional[str], team_id: Optional[str], agent_id: Optional[str], name: str
|
|
2033
|
+
) -> int:
|
|
2034
|
+
"""Get the component level for a trace based on its context.
|
|
2035
|
+
|
|
2036
|
+
Component levels (higher = more important):
|
|
2037
|
+
- 3: Workflow root (.run or .arun with workflow_id)
|
|
2038
|
+
- 2: Team root (.run or .arun with team_id)
|
|
2039
|
+
- 1: Agent root (.run or .arun with agent_id)
|
|
2040
|
+
- 0: Child span (not a root)
|
|
2041
|
+
|
|
2042
|
+
Args:
|
|
2043
|
+
workflow_id: The workflow ID of the trace.
|
|
2044
|
+
team_id: The team ID of the trace.
|
|
2045
|
+
agent_id: The agent ID of the trace.
|
|
2046
|
+
name: The name of the trace.
|
|
2047
|
+
|
|
2048
|
+
Returns:
|
|
2049
|
+
int: The component level (0-3).
|
|
2050
|
+
"""
|
|
2051
|
+
# Check if name indicates a root span
|
|
2052
|
+
is_root_name = ".run" in name or ".arun" in name
|
|
2053
|
+
|
|
2054
|
+
if not is_root_name:
|
|
2055
|
+
return 0 # Child span (not a root)
|
|
2056
|
+
elif workflow_id:
|
|
2057
|
+
return 3 # Workflow root
|
|
2058
|
+
elif team_id:
|
|
2059
|
+
return 2 # Team root
|
|
2060
|
+
elif agent_id:
|
|
2061
|
+
return 1 # Agent root
|
|
2062
|
+
else:
|
|
2063
|
+
return 0 # Unknown
|
|
2064
|
+
|
|
2065
|
+
def upsert_trace(self, trace: "Trace") -> None:
|
|
2066
|
+
"""Create or update a single trace record in the database.
|
|
2067
|
+
|
|
2068
|
+
Uses MongoDB's update_one with upsert=True and aggregation pipeline
|
|
2069
|
+
to handle concurrent inserts atomically and avoid race conditions.
|
|
2070
|
+
|
|
2071
|
+
Args:
|
|
2072
|
+
trace: The Trace object to store (one per trace_id).
|
|
2073
|
+
"""
|
|
2074
|
+
try:
|
|
2075
|
+
collection = self._get_collection(table_type="traces", create_collection_if_not_found=True)
|
|
2076
|
+
if collection is None:
|
|
2077
|
+
return
|
|
2078
|
+
|
|
2079
|
+
trace_dict = trace.to_dict()
|
|
2080
|
+
trace_dict.pop("total_spans", None)
|
|
2081
|
+
trace_dict.pop("error_count", None)
|
|
2082
|
+
|
|
2083
|
+
# Calculate the component level for the new trace
|
|
2084
|
+
new_level = self._get_component_level(trace.workflow_id, trace.team_id, trace.agent_id, trace.name)
|
|
2085
|
+
|
|
2086
|
+
# Use MongoDB aggregation pipeline update for atomic upsert
|
|
2087
|
+
# This allows conditional logic within a single atomic operation
|
|
2088
|
+
pipeline: List[Dict[str, Any]] = [
|
|
2089
|
+
{
|
|
2090
|
+
"$set": {
|
|
2091
|
+
# Always update these fields
|
|
2092
|
+
"status": trace.status,
|
|
2093
|
+
"created_at": {"$ifNull": ["$created_at", trace_dict.get("created_at")]},
|
|
2094
|
+
# Use $min for start_time (keep earliest)
|
|
2095
|
+
"start_time": {
|
|
2096
|
+
"$cond": {
|
|
2097
|
+
"if": {"$eq": [{"$type": "$start_time"}, "missing"]},
|
|
2098
|
+
"then": trace_dict.get("start_time"),
|
|
2099
|
+
"else": {"$min": ["$start_time", trace_dict.get("start_time")]},
|
|
2100
|
+
}
|
|
2101
|
+
},
|
|
2102
|
+
# Use $max for end_time (keep latest)
|
|
2103
|
+
"end_time": {
|
|
2104
|
+
"$cond": {
|
|
2105
|
+
"if": {"$eq": [{"$type": "$end_time"}, "missing"]},
|
|
2106
|
+
"then": trace_dict.get("end_time"),
|
|
2107
|
+
"else": {"$max": ["$end_time", trace_dict.get("end_time")]},
|
|
2108
|
+
}
|
|
2109
|
+
},
|
|
2110
|
+
# Preserve existing non-null context values using $ifNull
|
|
2111
|
+
"run_id": {"$ifNull": [trace.run_id, "$run_id"]},
|
|
2112
|
+
"session_id": {"$ifNull": [trace.session_id, "$session_id"]},
|
|
2113
|
+
"user_id": {"$ifNull": [trace.user_id, "$user_id"]},
|
|
2114
|
+
"agent_id": {"$ifNull": [trace.agent_id, "$agent_id"]},
|
|
2115
|
+
"team_id": {"$ifNull": [trace.team_id, "$team_id"]},
|
|
2116
|
+
"workflow_id": {"$ifNull": [trace.workflow_id, "$workflow_id"]},
|
|
2117
|
+
}
|
|
2118
|
+
},
|
|
2119
|
+
{
|
|
2120
|
+
"$set": {
|
|
2121
|
+
# Calculate duration_ms from the (potentially updated) start_time and end_time
|
|
2122
|
+
# MongoDB stores dates as strings in ISO format, so we need to parse them
|
|
2123
|
+
"duration_ms": {
|
|
2124
|
+
"$cond": {
|
|
2125
|
+
"if": {
|
|
2126
|
+
"$and": [
|
|
2127
|
+
{"$ne": [{"$type": "$start_time"}, "missing"]},
|
|
2128
|
+
{"$ne": [{"$type": "$end_time"}, "missing"]},
|
|
2129
|
+
]
|
|
2130
|
+
},
|
|
2131
|
+
"then": {
|
|
2132
|
+
"$subtract": [
|
|
2133
|
+
{"$toLong": {"$toDate": "$end_time"}},
|
|
2134
|
+
{"$toLong": {"$toDate": "$start_time"}},
|
|
2135
|
+
]
|
|
2136
|
+
},
|
|
2137
|
+
"else": trace_dict.get("duration_ms", 0),
|
|
2138
|
+
}
|
|
2139
|
+
},
|
|
2140
|
+
# Update name based on component level priority
|
|
2141
|
+
# Only update if new trace is from a higher-level component
|
|
2142
|
+
"name": {
|
|
2143
|
+
"$cond": {
|
|
2144
|
+
"if": {"$eq": [{"$type": "$name"}, "missing"]},
|
|
2145
|
+
"then": trace.name,
|
|
2146
|
+
"else": {
|
|
2147
|
+
"$cond": {
|
|
2148
|
+
"if": {
|
|
2149
|
+
"$gt": [
|
|
2150
|
+
new_level,
|
|
2151
|
+
{
|
|
2152
|
+
"$switch": {
|
|
2153
|
+
"branches": [
|
|
2154
|
+
# Check if existing name is a root span
|
|
2155
|
+
{
|
|
2156
|
+
"case": {
|
|
2157
|
+
"$not": {
|
|
2158
|
+
"$or": [
|
|
2159
|
+
{
|
|
2160
|
+
"$regexMatch": {
|
|
2161
|
+
"input": {"$ifNull": ["$name", ""]},
|
|
2162
|
+
"regex": "\\.run",
|
|
2163
|
+
}
|
|
2164
|
+
},
|
|
2165
|
+
{
|
|
2166
|
+
"$regexMatch": {
|
|
2167
|
+
"input": {"$ifNull": ["$name", ""]},
|
|
2168
|
+
"regex": "\\.arun",
|
|
2169
|
+
}
|
|
2170
|
+
},
|
|
2171
|
+
]
|
|
2172
|
+
}
|
|
2173
|
+
},
|
|
2174
|
+
"then": 0,
|
|
2175
|
+
},
|
|
2176
|
+
# Workflow root (level 3)
|
|
2177
|
+
{
|
|
2178
|
+
"case": {"$ne": ["$workflow_id", None]},
|
|
2179
|
+
"then": 3,
|
|
2180
|
+
},
|
|
2181
|
+
# Team root (level 2)
|
|
2182
|
+
{
|
|
2183
|
+
"case": {"$ne": ["$team_id", None]},
|
|
2184
|
+
"then": 2,
|
|
2185
|
+
},
|
|
2186
|
+
# Agent root (level 1)
|
|
2187
|
+
{
|
|
2188
|
+
"case": {"$ne": ["$agent_id", None]},
|
|
2189
|
+
"then": 1,
|
|
2190
|
+
},
|
|
2191
|
+
],
|
|
2192
|
+
"default": 0,
|
|
2193
|
+
}
|
|
2194
|
+
},
|
|
2195
|
+
]
|
|
2196
|
+
},
|
|
2197
|
+
"then": trace.name,
|
|
2198
|
+
"else": "$name",
|
|
2199
|
+
}
|
|
2200
|
+
},
|
|
2201
|
+
}
|
|
2202
|
+
},
|
|
2203
|
+
}
|
|
2204
|
+
},
|
|
2205
|
+
]
|
|
2206
|
+
|
|
2207
|
+
# Perform atomic upsert using aggregation pipeline
|
|
2208
|
+
collection.update_one(
|
|
2209
|
+
{"trace_id": trace.trace_id},
|
|
2210
|
+
pipeline,
|
|
2211
|
+
upsert=True,
|
|
2212
|
+
)
|
|
2213
|
+
|
|
2214
|
+
except Exception as e:
|
|
2215
|
+
log_error(f"Error creating trace: {e}")
|
|
2216
|
+
# Don't raise - tracing should not break the main application flow
|
|
2217
|
+
|
|
2218
|
+
def get_trace(
|
|
2219
|
+
self,
|
|
2220
|
+
trace_id: Optional[str] = None,
|
|
2221
|
+
run_id: Optional[str] = None,
|
|
2222
|
+
):
|
|
2223
|
+
"""Get a single trace by trace_id or other filters.
|
|
2224
|
+
|
|
2225
|
+
Args:
|
|
2226
|
+
trace_id: The unique trace identifier.
|
|
2227
|
+
run_id: Filter by run ID (returns first match).
|
|
2228
|
+
|
|
2229
|
+
Returns:
|
|
2230
|
+
Optional[Trace]: The trace if found, None otherwise.
|
|
2231
|
+
|
|
2232
|
+
Note:
|
|
2233
|
+
If multiple filters are provided, trace_id takes precedence.
|
|
2234
|
+
For other filters, the most recent trace is returned.
|
|
2235
|
+
"""
|
|
2236
|
+
try:
|
|
2237
|
+
from agno.tracing.schemas import Trace as TraceSchema
|
|
2238
|
+
|
|
2239
|
+
collection = self._get_collection(table_type="traces")
|
|
2240
|
+
if collection is None:
|
|
2241
|
+
return None
|
|
2242
|
+
|
|
2243
|
+
# Get spans collection for aggregation
|
|
2244
|
+
spans_collection = self._get_collection(table_type="spans")
|
|
2245
|
+
|
|
2246
|
+
query: Dict[str, Any] = {}
|
|
2247
|
+
if trace_id:
|
|
2248
|
+
query["trace_id"] = trace_id
|
|
2249
|
+
elif run_id:
|
|
2250
|
+
query["run_id"] = run_id
|
|
2251
|
+
else:
|
|
2252
|
+
log_debug("get_trace called without any filter parameters")
|
|
2253
|
+
return None
|
|
2254
|
+
|
|
2255
|
+
# Find trace with sorting by most recent
|
|
2256
|
+
result = collection.find_one(query, sort=[("start_time", -1)])
|
|
2257
|
+
|
|
2258
|
+
if result:
|
|
2259
|
+
# Calculate total_spans and error_count from spans collection
|
|
2260
|
+
total_spans = 0
|
|
2261
|
+
error_count = 0
|
|
2262
|
+
if spans_collection is not None:
|
|
2263
|
+
total_spans = spans_collection.count_documents({"trace_id": result["trace_id"]})
|
|
2264
|
+
error_count = spans_collection.count_documents(
|
|
2265
|
+
{"trace_id": result["trace_id"], "status_code": "ERROR"}
|
|
2266
|
+
)
|
|
2267
|
+
|
|
2268
|
+
result["total_spans"] = total_spans
|
|
2269
|
+
result["error_count"] = error_count
|
|
2270
|
+
# Remove MongoDB's _id field
|
|
2271
|
+
result.pop("_id", None)
|
|
2272
|
+
return TraceSchema.from_dict(result)
|
|
2273
|
+
return None
|
|
2274
|
+
|
|
2275
|
+
except Exception as e:
|
|
2276
|
+
log_error(f"Error getting trace: {e}")
|
|
2277
|
+
return None
|
|
2278
|
+
|
|
2279
|
+
def get_traces(
|
|
2280
|
+
self,
|
|
2281
|
+
run_id: Optional[str] = None,
|
|
2282
|
+
session_id: Optional[str] = None,
|
|
2283
|
+
user_id: Optional[str] = None,
|
|
2284
|
+
agent_id: Optional[str] = None,
|
|
2285
|
+
team_id: Optional[str] = None,
|
|
2286
|
+
workflow_id: Optional[str] = None,
|
|
2287
|
+
status: Optional[str] = None,
|
|
2288
|
+
start_time: Optional[datetime] = None,
|
|
2289
|
+
end_time: Optional[datetime] = None,
|
|
2290
|
+
limit: Optional[int] = 20,
|
|
2291
|
+
page: Optional[int] = 1,
|
|
2292
|
+
) -> tuple[List, int]:
|
|
2293
|
+
"""Get traces matching the provided filters with pagination.
|
|
2294
|
+
|
|
2295
|
+
Args:
|
|
2296
|
+
run_id: Filter by run ID.
|
|
2297
|
+
session_id: Filter by session ID.
|
|
2298
|
+
user_id: Filter by user ID.
|
|
2299
|
+
agent_id: Filter by agent ID.
|
|
2300
|
+
team_id: Filter by team ID.
|
|
2301
|
+
workflow_id: Filter by workflow ID.
|
|
2302
|
+
status: Filter by status (OK, ERROR, UNSET).
|
|
2303
|
+
start_time: Filter traces starting after this datetime.
|
|
2304
|
+
end_time: Filter traces ending before this datetime.
|
|
2305
|
+
limit: Maximum number of traces to return per page.
|
|
2306
|
+
page: Page number (1-indexed).
|
|
2307
|
+
|
|
2308
|
+
Returns:
|
|
2309
|
+
tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
|
|
2310
|
+
"""
|
|
2311
|
+
try:
|
|
2312
|
+
from agno.tracing.schemas import Trace as TraceSchema
|
|
2313
|
+
|
|
2314
|
+
collection = self._get_collection(table_type="traces")
|
|
2315
|
+
if collection is None:
|
|
2316
|
+
log_debug("Traces collection not found")
|
|
2317
|
+
return [], 0
|
|
2318
|
+
|
|
2319
|
+
# Get spans collection for aggregation
|
|
2320
|
+
spans_collection = self._get_collection(table_type="spans")
|
|
2321
|
+
|
|
2322
|
+
# Build query
|
|
2323
|
+
query: Dict[str, Any] = {}
|
|
2324
|
+
if run_id:
|
|
2325
|
+
query["run_id"] = run_id
|
|
2326
|
+
if session_id:
|
|
2327
|
+
query["session_id"] = session_id
|
|
2328
|
+
if user_id:
|
|
2329
|
+
query["user_id"] = user_id
|
|
2330
|
+
if agent_id:
|
|
2331
|
+
query["agent_id"] = agent_id
|
|
2332
|
+
if team_id:
|
|
2333
|
+
query["team_id"] = team_id
|
|
2334
|
+
if workflow_id:
|
|
2335
|
+
query["workflow_id"] = workflow_id
|
|
2336
|
+
if status:
|
|
2337
|
+
query["status"] = status
|
|
2338
|
+
if start_time:
|
|
2339
|
+
query["start_time"] = {"$gte": start_time.isoformat()}
|
|
2340
|
+
if end_time:
|
|
2341
|
+
if "end_time" in query:
|
|
2342
|
+
query["end_time"]["$lte"] = end_time.isoformat()
|
|
2343
|
+
else:
|
|
2344
|
+
query["end_time"] = {"$lte": end_time.isoformat()}
|
|
2345
|
+
|
|
2346
|
+
# Get total count
|
|
2347
|
+
total_count = collection.count_documents(query)
|
|
2348
|
+
|
|
2349
|
+
# Apply pagination
|
|
2350
|
+
skip = ((page or 1) - 1) * (limit or 20)
|
|
2351
|
+
cursor = collection.find(query).sort("start_time", -1).skip(skip).limit(limit or 20)
|
|
2352
|
+
|
|
2353
|
+
results = list(cursor)
|
|
2354
|
+
|
|
2355
|
+
traces = []
|
|
2356
|
+
for row in results:
|
|
2357
|
+
# Calculate total_spans and error_count from spans collection
|
|
2358
|
+
total_spans = 0
|
|
2359
|
+
error_count = 0
|
|
2360
|
+
if spans_collection is not None:
|
|
2361
|
+
total_spans = spans_collection.count_documents({"trace_id": row["trace_id"]})
|
|
2362
|
+
error_count = spans_collection.count_documents(
|
|
2363
|
+
{"trace_id": row["trace_id"], "status_code": "ERROR"}
|
|
2364
|
+
)
|
|
2365
|
+
|
|
2366
|
+
row["total_spans"] = total_spans
|
|
2367
|
+
row["error_count"] = error_count
|
|
2368
|
+
# Remove MongoDB's _id field
|
|
2369
|
+
row.pop("_id", None)
|
|
2370
|
+
traces.append(TraceSchema.from_dict(row))
|
|
2371
|
+
|
|
2372
|
+
return traces, total_count
|
|
2373
|
+
|
|
2374
|
+
except Exception as e:
|
|
2375
|
+
log_error(f"Error getting traces: {e}")
|
|
2376
|
+
return [], 0
|
|
2377
|
+
|
|
2378
|
+
def get_trace_stats(
|
|
2379
|
+
self,
|
|
2380
|
+
user_id: Optional[str] = None,
|
|
2381
|
+
agent_id: Optional[str] = None,
|
|
2382
|
+
team_id: Optional[str] = None,
|
|
2383
|
+
workflow_id: Optional[str] = None,
|
|
2384
|
+
start_time: Optional[datetime] = None,
|
|
2385
|
+
end_time: Optional[datetime] = None,
|
|
2386
|
+
limit: Optional[int] = 20,
|
|
2387
|
+
page: Optional[int] = 1,
|
|
2388
|
+
) -> tuple[List[Dict[str, Any]], int]:
|
|
2389
|
+
"""Get trace statistics grouped by session.
|
|
2390
|
+
|
|
2391
|
+
Args:
|
|
2392
|
+
user_id: Filter by user ID.
|
|
2393
|
+
agent_id: Filter by agent ID.
|
|
2394
|
+
team_id: Filter by team ID.
|
|
2395
|
+
workflow_id: Filter by workflow ID.
|
|
2396
|
+
start_time: Filter sessions with traces created after this datetime.
|
|
2397
|
+
end_time: Filter sessions with traces created before this datetime.
|
|
2398
|
+
limit: Maximum number of sessions to return per page.
|
|
2399
|
+
page: Page number (1-indexed).
|
|
2400
|
+
|
|
2401
|
+
Returns:
|
|
2402
|
+
tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
|
|
2403
|
+
Each dict contains: session_id, user_id, agent_id, team_id, total_traces,
|
|
2404
|
+
workflow_id, first_trace_at, last_trace_at.
|
|
2405
|
+
"""
|
|
2406
|
+
try:
|
|
2407
|
+
collection = self._get_collection(table_type="traces")
|
|
2408
|
+
if collection is None:
|
|
2409
|
+
log_debug("Traces collection not found")
|
|
2410
|
+
return [], 0
|
|
2411
|
+
|
|
2412
|
+
# Build match stage
|
|
2413
|
+
match_stage: Dict[str, Any] = {"session_id": {"$ne": None}}
|
|
2414
|
+
if user_id:
|
|
2415
|
+
match_stage["user_id"] = user_id
|
|
2416
|
+
if agent_id:
|
|
2417
|
+
match_stage["agent_id"] = agent_id
|
|
2418
|
+
if team_id:
|
|
2419
|
+
match_stage["team_id"] = team_id
|
|
2420
|
+
if workflow_id:
|
|
2421
|
+
match_stage["workflow_id"] = workflow_id
|
|
2422
|
+
if start_time:
|
|
2423
|
+
match_stage["created_at"] = {"$gte": start_time.isoformat()}
|
|
2424
|
+
if end_time:
|
|
2425
|
+
if "created_at" in match_stage:
|
|
2426
|
+
match_stage["created_at"]["$lte"] = end_time.isoformat()
|
|
2427
|
+
else:
|
|
2428
|
+
match_stage["created_at"] = {"$lte": end_time.isoformat()}
|
|
2429
|
+
|
|
2430
|
+
# Build aggregation pipeline
|
|
2431
|
+
pipeline: List[Dict[str, Any]] = [
|
|
2432
|
+
{"$match": match_stage},
|
|
2433
|
+
{
|
|
2434
|
+
"$group": {
|
|
2435
|
+
"_id": "$session_id",
|
|
2436
|
+
"user_id": {"$first": "$user_id"},
|
|
2437
|
+
"agent_id": {"$first": "$agent_id"},
|
|
2438
|
+
"team_id": {"$first": "$team_id"},
|
|
2439
|
+
"workflow_id": {"$first": "$workflow_id"},
|
|
2440
|
+
"total_traces": {"$sum": 1},
|
|
2441
|
+
"first_trace_at": {"$min": "$created_at"},
|
|
2442
|
+
"last_trace_at": {"$max": "$created_at"},
|
|
2443
|
+
}
|
|
2444
|
+
},
|
|
2445
|
+
{"$sort": {"last_trace_at": -1}},
|
|
2446
|
+
]
|
|
2447
|
+
|
|
2448
|
+
# Get total count
|
|
2449
|
+
count_pipeline = pipeline + [{"$count": "total"}]
|
|
2450
|
+
count_result = list(collection.aggregate(count_pipeline))
|
|
2451
|
+
total_count = count_result[0]["total"] if count_result else 0
|
|
2452
|
+
|
|
2453
|
+
# Apply pagination
|
|
2454
|
+
skip = ((page or 1) - 1) * (limit or 20)
|
|
2455
|
+
pipeline.append({"$skip": skip})
|
|
2456
|
+
pipeline.append({"$limit": limit or 20})
|
|
2457
|
+
|
|
2458
|
+
results = list(collection.aggregate(pipeline))
|
|
2459
|
+
|
|
2460
|
+
# Convert to list of dicts with datetime objects
|
|
2461
|
+
stats_list = []
|
|
2462
|
+
for row in results:
|
|
2463
|
+
# Convert ISO strings to datetime objects
|
|
2464
|
+
first_trace_at_str = row["first_trace_at"]
|
|
2465
|
+
last_trace_at_str = row["last_trace_at"]
|
|
2466
|
+
|
|
2467
|
+
# Parse ISO format strings to datetime objects
|
|
2468
|
+
first_trace_at = datetime.fromisoformat(first_trace_at_str.replace("Z", "+00:00"))
|
|
2469
|
+
last_trace_at = datetime.fromisoformat(last_trace_at_str.replace("Z", "+00:00"))
|
|
2470
|
+
|
|
2471
|
+
stats_list.append(
|
|
2472
|
+
{
|
|
2473
|
+
"session_id": row["_id"],
|
|
2474
|
+
"user_id": row["user_id"],
|
|
2475
|
+
"agent_id": row["agent_id"],
|
|
2476
|
+
"team_id": row["team_id"],
|
|
2477
|
+
"workflow_id": row["workflow_id"],
|
|
2478
|
+
"total_traces": row["total_traces"],
|
|
2479
|
+
"first_trace_at": first_trace_at,
|
|
2480
|
+
"last_trace_at": last_trace_at,
|
|
2481
|
+
}
|
|
2482
|
+
)
|
|
2483
|
+
|
|
2484
|
+
return stats_list, total_count
|
|
2485
|
+
|
|
2486
|
+
except Exception as e:
|
|
2487
|
+
log_error(f"Error getting trace stats: {e}")
|
|
2488
|
+
return [], 0
|
|
2489
|
+
|
|
2490
|
+
# --- Spans ---
|
|
2491
|
+
def create_span(self, span: "Span") -> None:
|
|
2492
|
+
"""Create a single span in the database.
|
|
2493
|
+
|
|
2494
|
+
Args:
|
|
2495
|
+
span: The Span object to store.
|
|
2496
|
+
"""
|
|
2497
|
+
try:
|
|
2498
|
+
collection = self._get_collection(table_type="spans", create_collection_if_not_found=True)
|
|
2499
|
+
if collection is None:
|
|
2500
|
+
return
|
|
2501
|
+
|
|
2502
|
+
collection.insert_one(span.to_dict())
|
|
2503
|
+
|
|
2504
|
+
except Exception as e:
|
|
2505
|
+
log_error(f"Error creating span: {e}")
|
|
2506
|
+
|
|
2507
|
+
def create_spans(self, spans: List) -> None:
|
|
2508
|
+
"""Create multiple spans in the database as a batch.
|
|
2509
|
+
|
|
2510
|
+
Args:
|
|
2511
|
+
spans: List of Span objects to store.
|
|
2512
|
+
"""
|
|
2513
|
+
if not spans:
|
|
2514
|
+
return
|
|
2515
|
+
|
|
2516
|
+
try:
|
|
2517
|
+
collection = self._get_collection(table_type="spans", create_collection_if_not_found=True)
|
|
2518
|
+
if collection is None:
|
|
2519
|
+
return
|
|
2520
|
+
|
|
2521
|
+
span_dicts = [span.to_dict() for span in spans]
|
|
2522
|
+
collection.insert_many(span_dicts)
|
|
2523
|
+
|
|
2524
|
+
except Exception as e:
|
|
2525
|
+
log_error(f"Error creating spans batch: {e}")
|
|
2526
|
+
|
|
2527
|
+
def get_span(self, span_id: str):
|
|
2528
|
+
"""Get a single span by its span_id.
|
|
2529
|
+
|
|
2530
|
+
Args:
|
|
2531
|
+
span_id: The unique span identifier.
|
|
2532
|
+
|
|
2533
|
+
Returns:
|
|
2534
|
+
Optional[Span]: The span if found, None otherwise.
|
|
2535
|
+
"""
|
|
2536
|
+
try:
|
|
2537
|
+
from agno.tracing.schemas import Span as SpanSchema
|
|
2538
|
+
|
|
2539
|
+
collection = self._get_collection(table_type="spans")
|
|
2540
|
+
if collection is None:
|
|
2541
|
+
return None
|
|
2542
|
+
|
|
2543
|
+
result = collection.find_one({"span_id": span_id})
|
|
2544
|
+
if result:
|
|
2545
|
+
# Remove MongoDB's _id field
|
|
2546
|
+
result.pop("_id", None)
|
|
2547
|
+
return SpanSchema.from_dict(result)
|
|
2548
|
+
return None
|
|
2549
|
+
|
|
2550
|
+
except Exception as e:
|
|
2551
|
+
log_error(f"Error getting span: {e}")
|
|
2552
|
+
return None
|
|
2553
|
+
|
|
2554
|
+
def get_spans(
|
|
2555
|
+
self,
|
|
2556
|
+
trace_id: Optional[str] = None,
|
|
2557
|
+
parent_span_id: Optional[str] = None,
|
|
2558
|
+
limit: Optional[int] = 1000,
|
|
2559
|
+
) -> List:
|
|
2560
|
+
"""Get spans matching the provided filters.
|
|
2561
|
+
|
|
2562
|
+
Args:
|
|
2563
|
+
trace_id: Filter by trace ID.
|
|
2564
|
+
parent_span_id: Filter by parent span ID.
|
|
2565
|
+
limit: Maximum number of spans to return.
|
|
2566
|
+
|
|
2567
|
+
Returns:
|
|
2568
|
+
List[Span]: List of matching spans.
|
|
2569
|
+
"""
|
|
2570
|
+
try:
|
|
2571
|
+
from agno.tracing.schemas import Span as SpanSchema
|
|
2572
|
+
|
|
2573
|
+
collection = self._get_collection(table_type="spans")
|
|
2574
|
+
if collection is None:
|
|
2575
|
+
return []
|
|
2576
|
+
|
|
2577
|
+
# Build query
|
|
2578
|
+
query: Dict[str, Any] = {}
|
|
2579
|
+
if trace_id:
|
|
2580
|
+
query["trace_id"] = trace_id
|
|
2581
|
+
if parent_span_id:
|
|
2582
|
+
query["parent_span_id"] = parent_span_id
|
|
2583
|
+
|
|
2584
|
+
cursor = collection.find(query).limit(limit or 1000)
|
|
2585
|
+
results = list(cursor)
|
|
2586
|
+
|
|
2587
|
+
spans = []
|
|
2588
|
+
for row in results:
|
|
2589
|
+
# Remove MongoDB's _id field
|
|
2590
|
+
row.pop("_id", None)
|
|
2591
|
+
spans.append(SpanSchema.from_dict(row))
|
|
2592
|
+
|
|
2593
|
+
return spans
|
|
2594
|
+
|
|
2595
|
+
except Exception as e:
|
|
2596
|
+
log_error(f"Error getting spans: {e}")
|
|
2597
|
+
return []
|