agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5540 -2273
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +689 -6
- agno/db/dynamo/dynamo.py +933 -37
- agno/db/dynamo/schemas.py +174 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +831 -9
- agno/db/firestore/schemas.py +51 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +660 -12
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +287 -14
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +590 -14
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +879 -11
- agno/db/mongo/schemas.py +42 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +946 -68
- agno/db/mysql/schemas.py +72 -10
- agno/db/mysql/utils.py +198 -7
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +942 -57
- agno/db/postgres/schemas.py +81 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +671 -7
- agno/db/redis/schemas.py +50 -0
- agno/db/redis/utils.py +65 -7
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +17 -2
- agno/db/singlestore/schemas.py +63 -0
- agno/db/singlestore/singlestore.py +949 -83
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +62 -0
- agno/db/sqlite/sqlite.py +965 -46
- agno/db/sqlite/utils.py +169 -8
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +2 -0
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +75 -55
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +16 -7
- agno/eval/reliability.py +28 -16
- agno/eval/utils.py +35 -17
- agno/exceptions.py +27 -2
- agno/filters.py +354 -0
- agno/guardrails/prompt_injection.py +1 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +1 -1
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/semantic.py +9 -4
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +8 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +1618 -318
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
- agno/knowledge/reader/json_reader.py +5 -4
- agno/knowledge/reader/markdown_reader.py +8 -8
- agno/knowledge/reader/pdf_reader.py +17 -19
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +32 -3
- agno/knowledge/reader/s3_reader.py +3 -3
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +22 -10
- agno/knowledge/reader/web_search_reader.py +1 -48
- agno/knowledge/reader/website_reader.py +10 -10
- agno/knowledge/reader/wikipedia_reader.py +33 -1
- agno/knowledge/types.py +1 -0
- agno/knowledge/utils.py +72 -7
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +544 -83
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +515 -40
- agno/models/aws/bedrock.py +102 -21
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +41 -19
- agno/models/azure/openai_chat.py +39 -8
- agno/models/base.py +1249 -525
- agno/models/cerebras/cerebras.py +91 -21
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +40 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +877 -80
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +51 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +44 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +28 -5
- agno/models/meta/llama.py +47 -14
- agno/models/meta/llama_openai.py +22 -17
- agno/models/mistral/mistral.py +8 -4
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/chat.py +24 -8
- agno/models/openai/chat.py +104 -29
- agno/models/openai/responses.py +101 -81
- agno/models/openrouter/openrouter.py +60 -3
- agno/models/perplexity/perplexity.py +17 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +24 -4
- agno/models/response.py +73 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +549 -152
- agno/os/auth.py +190 -3
- agno/os/config.py +23 -0
- agno/os/interfaces/a2a/router.py +8 -11
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +18 -3
- agno/os/interfaces/agui/utils.py +152 -39
- agno/os/interfaces/slack/router.py +55 -37
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/interfaces/whatsapp/router.py +0 -1
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/mcp.py +110 -52
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/jwt.py +676 -112
- agno/os/router.py +40 -1478
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +96 -39
- agno/os/routers/evals/schemas.py +65 -33
- agno/os/routers/evals/utils.py +80 -10
- agno/os/routers/health.py +10 -4
- agno/os/routers/knowledge/knowledge.py +196 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +279 -52
- agno/os/routers/memory/schemas.py +46 -17
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +462 -34
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +256 -693
- agno/os/scopes.py +469 -0
- agno/os/utils.py +514 -36
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +155 -32
- agno/run/base.py +55 -3
- agno/run/requirement.py +181 -0
- agno/run/team.py +125 -38
- agno/run/workflow.py +72 -18
- agno/session/agent.py +102 -89
- agno/session/summary.py +56 -15
- agno/session/team.py +164 -90
- agno/session/workflow.py +405 -40
- agno/table.py +10 -0
- agno/team/team.py +3974 -1903
- agno/tools/dalle.py +2 -4
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +16 -10
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +193 -38
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +271 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +76 -36
- agno/tools/redshift.py +406 -0
- agno/tools/scrapegraph.py +1 -1
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +18 -3
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +146 -0
- agno/tools/toolkit.py +25 -0
- agno/tools/workflow.py +8 -1
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +151 -3
- agno/utils/gemini.py +15 -5
- agno/utils/hooks.py +118 -4
- agno/utils/http.py +113 -2
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +187 -1
- agno/utils/merge_dict.py +3 -3
- agno/utils/message.py +60 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +49 -14
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +109 -16
- agno/utils/print_response/team.py +223 -30
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/streamlit.py +1 -1
- agno/utils/team.py +98 -9
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +39 -7
- agno/vectordb/cassandra/cassandra.py +21 -5
- agno/vectordb/chroma/chromadb.py +43 -12
- agno/vectordb/clickhouse/clickhousedb.py +21 -5
- agno/vectordb/couchbase/couchbase.py +29 -5
- agno/vectordb/lancedb/lance_db.py +92 -181
- agno/vectordb/langchaindb/langchaindb.py +24 -4
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/llamaindexdb.py +25 -5
- agno/vectordb/milvus/milvus.py +50 -37
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +36 -30
- agno/vectordb/pgvector/pgvector.py +201 -77
- agno/vectordb/pineconedb/pineconedb.py +41 -23
- agno/vectordb/qdrant/qdrant.py +67 -54
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/singlestore.py +50 -29
- agno/vectordb/surrealdb/surrealdb.py +31 -41
- agno/vectordb/upstashdb/upstashdb.py +34 -6
- agno/vectordb/weaviate/weaviate.py +53 -14
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +120 -18
- agno/workflow/loop.py +77 -10
- agno/workflow/parallel.py +231 -143
- agno/workflow/router.py +118 -17
- agno/workflow/step.py +609 -170
- agno/workflow/steps.py +73 -6
- agno/workflow/types.py +96 -21
- agno/workflow/workflow.py +2039 -262
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
- agno-2.3.13.dist-info/RECORD +613 -0
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -679
- agno/tools/memori.py +0 -339
- agno-2.1.2.dist-info/RECORD +0 -543
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/db/gcs_json/gcs_json_db.py
CHANGED
|
@@ -1,16 +1,22 @@
|
|
|
1
1
|
import json
|
|
2
2
|
import time
|
|
3
3
|
from datetime import date, datetime, timedelta, timezone
|
|
4
|
-
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
|
5
5
|
from uuid import uuid4
|
|
6
6
|
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from agno.tracing.schemas import Span, Trace
|
|
9
|
+
|
|
7
10
|
from agno.db.base import BaseDb, SessionType
|
|
8
11
|
from agno.db.gcs_json.utils import (
|
|
9
12
|
apply_sorting,
|
|
10
13
|
calculate_date_metrics,
|
|
14
|
+
deserialize_cultural_knowledge_from_db,
|
|
11
15
|
fetch_all_sessions_data,
|
|
12
16
|
get_dates_to_calculate_metrics_for,
|
|
17
|
+
serialize_cultural_knowledge_for_db,
|
|
13
18
|
)
|
|
19
|
+
from agno.db.schemas.culture import CulturalKnowledge
|
|
14
20
|
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
|
|
15
21
|
from agno.db.schemas.knowledge import KnowledgeRow
|
|
16
22
|
from agno.db.schemas.memory import UserMemory
|
|
@@ -34,6 +40,9 @@ class GcsJsonDb(BaseDb):
|
|
|
34
40
|
metrics_table: Optional[str] = None,
|
|
35
41
|
eval_table: Optional[str] = None,
|
|
36
42
|
knowledge_table: Optional[str] = None,
|
|
43
|
+
culture_table: Optional[str] = None,
|
|
44
|
+
traces_table: Optional[str] = None,
|
|
45
|
+
spans_table: Optional[str] = None,
|
|
37
46
|
project: Optional[str] = None,
|
|
38
47
|
credentials: Optional[Any] = None,
|
|
39
48
|
id: Optional[str] = None,
|
|
@@ -49,6 +58,9 @@ class GcsJsonDb(BaseDb):
|
|
|
49
58
|
metrics_table (Optional[str]): Name of the JSON file to store metrics.
|
|
50
59
|
eval_table (Optional[str]): Name of the JSON file to store evaluation runs.
|
|
51
60
|
knowledge_table (Optional[str]): Name of the JSON file to store knowledge content.
|
|
61
|
+
culture_table (Optional[str]): Name of the JSON file to store cultural knowledge.
|
|
62
|
+
traces_table (Optional[str]): Name of the JSON file to store traces.
|
|
63
|
+
spans_table (Optional[str]): Name of the JSON file to store spans.
|
|
52
64
|
project (Optional[str]): GCP project ID. If None, uses default project.
|
|
53
65
|
location (Optional[str]): GCS bucket location. If None, uses default location.
|
|
54
66
|
credentials (Optional[Any]): GCP credentials. If None, uses default credentials.
|
|
@@ -66,6 +78,9 @@ class GcsJsonDb(BaseDb):
|
|
|
66
78
|
metrics_table=metrics_table,
|
|
67
79
|
eval_table=eval_table,
|
|
68
80
|
knowledge_table=knowledge_table,
|
|
81
|
+
culture_table=culture_table,
|
|
82
|
+
traces_table=traces_table,
|
|
83
|
+
spans_table=spans_table,
|
|
69
84
|
)
|
|
70
85
|
|
|
71
86
|
self.bucket_name = bucket_name
|
|
@@ -77,6 +92,10 @@ class GcsJsonDb(BaseDb):
|
|
|
77
92
|
self.client = gcs.Client(project=project, credentials=credentials)
|
|
78
93
|
self.bucket = self.client.bucket(self.bucket_name)
|
|
79
94
|
|
|
95
|
+
def table_exists(self, table_name: str) -> bool:
|
|
96
|
+
"""JSON implementation, always returns True."""
|
|
97
|
+
return True
|
|
98
|
+
|
|
80
99
|
def _get_blob_name(self, filename: str) -> str:
|
|
81
100
|
"""Get the full blob name including prefix for a given filename."""
|
|
82
101
|
return f"{self.prefix}{filename}.json"
|
|
@@ -132,6 +151,14 @@ class GcsJsonDb(BaseDb):
|
|
|
132
151
|
log_error(f"Error writing to the {blob_name} JSON file in GCS: {e}")
|
|
133
152
|
return
|
|
134
153
|
|
|
154
|
+
def get_latest_schema_version(self):
|
|
155
|
+
"""Get the latest version of the database schema."""
|
|
156
|
+
pass
|
|
157
|
+
|
|
158
|
+
def upsert_schema_version(self, version: str) -> None:
|
|
159
|
+
"""Upsert the schema version into the database."""
|
|
160
|
+
pass
|
|
161
|
+
|
|
135
162
|
# -- Session methods --
|
|
136
163
|
|
|
137
164
|
def delete_session(self, session_id: str) -> bool:
|
|
@@ -214,10 +241,6 @@ class GcsJsonDb(BaseDb):
|
|
|
214
241
|
if user_id is not None and session_data.get("user_id") != user_id:
|
|
215
242
|
continue
|
|
216
243
|
|
|
217
|
-
session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
|
|
218
|
-
if session_data.get("session_type") != session_type_value:
|
|
219
|
-
continue
|
|
220
|
-
|
|
221
244
|
if not deserialize:
|
|
222
245
|
return session_data
|
|
223
246
|
|
|
@@ -412,7 +435,7 @@ class GcsJsonDb(BaseDb):
|
|
|
412
435
|
raise e
|
|
413
436
|
|
|
414
437
|
def upsert_sessions(
|
|
415
|
-
self, sessions: List[Session], deserialize: Optional[bool] = True
|
|
438
|
+
self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
416
439
|
) -> List[Union[Session, Dict[str, Any]]]:
|
|
417
440
|
"""
|
|
418
441
|
Bulk upsert multiple sessions for improved performance on large datasets.
|
|
@@ -472,7 +495,8 @@ class GcsJsonDb(BaseDb):
|
|
|
472
495
|
|
|
473
496
|
# Filter out the memory, with optional user_id verification
|
|
474
497
|
memories = [
|
|
475
|
-
m
|
|
498
|
+
m
|
|
499
|
+
for m in memories
|
|
476
500
|
if not (m.get("memory_id") == memory_id and (user_id is None or m.get("user_id") == user_id))
|
|
477
501
|
]
|
|
478
502
|
|
|
@@ -499,7 +523,8 @@ class GcsJsonDb(BaseDb):
|
|
|
499
523
|
|
|
500
524
|
# Filter out memories, with optional user_id verification
|
|
501
525
|
memories = [
|
|
502
|
-
m
|
|
526
|
+
m
|
|
527
|
+
for m in memories
|
|
503
528
|
if not (m.get("memory_id") in memory_ids and (user_id is None or m.get("user_id") == user_id))
|
|
504
529
|
]
|
|
505
530
|
|
|
@@ -619,13 +644,14 @@ class GcsJsonDb(BaseDb):
|
|
|
619
644
|
raise e
|
|
620
645
|
|
|
621
646
|
def get_user_memory_stats(
|
|
622
|
-
self, limit: Optional[int] = None, page: Optional[int] = None
|
|
647
|
+
self, limit: Optional[int] = None, page: Optional[int] = None, user_id: Optional[str] = None
|
|
623
648
|
) -> Tuple[List[Dict[str, Any]], int]:
|
|
624
649
|
"""Get user memory statistics.
|
|
625
650
|
|
|
626
651
|
Args:
|
|
627
652
|
limit (Optional[int]): Maximum number of results to return.
|
|
628
653
|
page (Optional[int]): Page number for pagination.
|
|
654
|
+
user_id (Optional[str]): User ID for filtering.
|
|
629
655
|
|
|
630
656
|
Returns:
|
|
631
657
|
Tuple[List[Dict[str, Any]], int]: List of user memory statistics and total count.
|
|
@@ -636,13 +662,15 @@ class GcsJsonDb(BaseDb):
|
|
|
636
662
|
|
|
637
663
|
for memory in memories:
|
|
638
664
|
memory_user_id = memory.get("user_id")
|
|
639
|
-
|
|
665
|
+
# filter by user_id if provided
|
|
666
|
+
if user_id is not None and memory_user_id != user_id:
|
|
667
|
+
continue
|
|
640
668
|
if memory_user_id:
|
|
641
669
|
if memory_user_id not in user_stats:
|
|
642
670
|
user_stats[memory_user_id] = {
|
|
643
671
|
"user_id": memory_user_id,
|
|
644
672
|
"total_memories": 0,
|
|
645
|
-
"last_memory_updated_at": 0
|
|
673
|
+
"last_memory_updated_at": 0,
|
|
646
674
|
}
|
|
647
675
|
user_stats[memory_user_id]["total_memories"] += 1
|
|
648
676
|
updated_at = memory.get("updated_at", 0)
|
|
@@ -702,7 +730,7 @@ class GcsJsonDb(BaseDb):
|
|
|
702
730
|
raise e
|
|
703
731
|
|
|
704
732
|
def upsert_memories(
|
|
705
|
-
self, memories: List[UserMemory], deserialize: Optional[bool] = True
|
|
733
|
+
self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
706
734
|
) -> List[Union[UserMemory, Dict[str, Any]]]:
|
|
707
735
|
"""
|
|
708
736
|
Bulk upsert multiple user memories for improved performance on large datasets.
|
|
@@ -1141,3 +1169,623 @@ class GcsJsonDb(BaseDb):
|
|
|
1141
1169
|
except Exception as e:
|
|
1142
1170
|
log_warning(f"Error renaming eval run {eval_run_id}: {e}")
|
|
1143
1171
|
raise e
|
|
1172
|
+
|
|
1173
|
+
# -- Cultural Knowledge methods --
|
|
1174
|
+
def clear_cultural_knowledge(self) -> None:
|
|
1175
|
+
"""Delete all cultural knowledge from the database.
|
|
1176
|
+
|
|
1177
|
+
Raises:
|
|
1178
|
+
Exception: If an error occurs during deletion.
|
|
1179
|
+
"""
|
|
1180
|
+
try:
|
|
1181
|
+
self._write_json_file(self.culture_table_name, [])
|
|
1182
|
+
except Exception as e:
|
|
1183
|
+
log_warning(f"Exception deleting all cultural knowledge: {e}")
|
|
1184
|
+
raise e
|
|
1185
|
+
|
|
1186
|
+
def delete_cultural_knowledge(self, id: str) -> None:
|
|
1187
|
+
"""Delete cultural knowledge by ID.
|
|
1188
|
+
|
|
1189
|
+
Args:
|
|
1190
|
+
id (str): The ID of the cultural knowledge to delete.
|
|
1191
|
+
|
|
1192
|
+
Raises:
|
|
1193
|
+
Exception: If an error occurs during deletion.
|
|
1194
|
+
"""
|
|
1195
|
+
try:
|
|
1196
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1197
|
+
cultural_knowledge = [item for item in cultural_knowledge if item.get("id") != id]
|
|
1198
|
+
self._write_json_file(self.culture_table_name, cultural_knowledge)
|
|
1199
|
+
log_debug(f"Deleted cultural knowledge with ID: {id}")
|
|
1200
|
+
except Exception as e:
|
|
1201
|
+
log_warning(f"Error deleting cultural knowledge: {e}")
|
|
1202
|
+
raise e
|
|
1203
|
+
|
|
1204
|
+
def get_cultural_knowledge(
|
|
1205
|
+
self, id: str, deserialize: Optional[bool] = True
|
|
1206
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1207
|
+
"""Get cultural knowledge by ID.
|
|
1208
|
+
|
|
1209
|
+
Args:
|
|
1210
|
+
id (str): The ID of the cultural knowledge to retrieve.
|
|
1211
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
|
|
1212
|
+
|
|
1213
|
+
Returns:
|
|
1214
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
|
|
1215
|
+
|
|
1216
|
+
Raises:
|
|
1217
|
+
Exception: If an error occurs during retrieval.
|
|
1218
|
+
"""
|
|
1219
|
+
try:
|
|
1220
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1221
|
+
|
|
1222
|
+
for item in cultural_knowledge:
|
|
1223
|
+
if item.get("id") == id:
|
|
1224
|
+
if not deserialize:
|
|
1225
|
+
return item
|
|
1226
|
+
return deserialize_cultural_knowledge_from_db(item)
|
|
1227
|
+
|
|
1228
|
+
return None
|
|
1229
|
+
except Exception as e:
|
|
1230
|
+
log_warning(f"Error getting cultural knowledge: {e}")
|
|
1231
|
+
raise e
|
|
1232
|
+
|
|
1233
|
+
def get_all_cultural_knowledge(
|
|
1234
|
+
self,
|
|
1235
|
+
agent_id: Optional[str] = None,
|
|
1236
|
+
team_id: Optional[str] = None,
|
|
1237
|
+
name: Optional[str] = None,
|
|
1238
|
+
limit: Optional[int] = None,
|
|
1239
|
+
page: Optional[int] = None,
|
|
1240
|
+
sort_by: Optional[str] = None,
|
|
1241
|
+
sort_order: Optional[str] = None,
|
|
1242
|
+
deserialize: Optional[bool] = True,
|
|
1243
|
+
) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1244
|
+
"""Get all cultural knowledge with filtering and pagination.
|
|
1245
|
+
|
|
1246
|
+
Args:
|
|
1247
|
+
agent_id (Optional[str]): Filter by agent ID.
|
|
1248
|
+
team_id (Optional[str]): Filter by team ID.
|
|
1249
|
+
name (Optional[str]): Filter by name (case-insensitive partial match).
|
|
1250
|
+
limit (Optional[int]): Maximum number of results to return.
|
|
1251
|
+
page (Optional[int]): Page number for pagination.
|
|
1252
|
+
sort_by (Optional[str]): Field to sort by.
|
|
1253
|
+
sort_order (Optional[str]): Sort order ('asc' or 'desc').
|
|
1254
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
|
|
1255
|
+
|
|
1256
|
+
Returns:
|
|
1257
|
+
Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1258
|
+
- When deserialize=True: List of CulturalKnowledge objects
|
|
1259
|
+
- When deserialize=False: Tuple with list of dictionaries and total count
|
|
1260
|
+
|
|
1261
|
+
Raises:
|
|
1262
|
+
Exception: If an error occurs during retrieval.
|
|
1263
|
+
"""
|
|
1264
|
+
try:
|
|
1265
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1266
|
+
|
|
1267
|
+
# Apply filters
|
|
1268
|
+
filtered_items = []
|
|
1269
|
+
for item in cultural_knowledge:
|
|
1270
|
+
if agent_id is not None and item.get("agent_id") != agent_id:
|
|
1271
|
+
continue
|
|
1272
|
+
if team_id is not None and item.get("team_id") != team_id:
|
|
1273
|
+
continue
|
|
1274
|
+
if name is not None and name.lower() not in item.get("name", "").lower():
|
|
1275
|
+
continue
|
|
1276
|
+
|
|
1277
|
+
filtered_items.append(item)
|
|
1278
|
+
|
|
1279
|
+
total_count = len(filtered_items)
|
|
1280
|
+
|
|
1281
|
+
# Apply sorting
|
|
1282
|
+
filtered_items = apply_sorting(filtered_items, sort_by, sort_order)
|
|
1283
|
+
|
|
1284
|
+
# Apply pagination
|
|
1285
|
+
if limit is not None:
|
|
1286
|
+
start_idx = 0
|
|
1287
|
+
if page is not None:
|
|
1288
|
+
start_idx = (page - 1) * limit
|
|
1289
|
+
filtered_items = filtered_items[start_idx : start_idx + limit]
|
|
1290
|
+
|
|
1291
|
+
if not deserialize:
|
|
1292
|
+
return filtered_items, total_count
|
|
1293
|
+
|
|
1294
|
+
return [deserialize_cultural_knowledge_from_db(item) for item in filtered_items]
|
|
1295
|
+
|
|
1296
|
+
except Exception as e:
|
|
1297
|
+
log_warning(f"Error getting all cultural knowledge: {e}")
|
|
1298
|
+
raise e
|
|
1299
|
+
|
|
1300
|
+
def upsert_cultural_knowledge(
|
|
1301
|
+
self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
|
|
1302
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1303
|
+
"""Upsert cultural knowledge in the GCS JSON file.
|
|
1304
|
+
|
|
1305
|
+
Args:
|
|
1306
|
+
cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
|
|
1307
|
+
deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
|
|
1308
|
+
|
|
1309
|
+
Returns:
|
|
1310
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
|
|
1311
|
+
|
|
1312
|
+
Raises:
|
|
1313
|
+
Exception: If an error occurs during upsert.
|
|
1314
|
+
"""
|
|
1315
|
+
try:
|
|
1316
|
+
cultural_knowledge_list = self._read_json_file(self.culture_table_name, create_table_if_not_found=True)
|
|
1317
|
+
|
|
1318
|
+
# Serialize content, categories, and notes into a dict for DB storage
|
|
1319
|
+
content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
|
|
1320
|
+
|
|
1321
|
+
# Create the item dict with serialized content
|
|
1322
|
+
cultural_knowledge_dict = {
|
|
1323
|
+
"id": cultural_knowledge.id,
|
|
1324
|
+
"name": cultural_knowledge.name,
|
|
1325
|
+
"summary": cultural_knowledge.summary,
|
|
1326
|
+
"content": content_dict if content_dict else None,
|
|
1327
|
+
"metadata": cultural_knowledge.metadata,
|
|
1328
|
+
"input": cultural_knowledge.input,
|
|
1329
|
+
"created_at": cultural_knowledge.created_at,
|
|
1330
|
+
"updated_at": int(time.time()),
|
|
1331
|
+
"agent_id": cultural_knowledge.agent_id,
|
|
1332
|
+
"team_id": cultural_knowledge.team_id,
|
|
1333
|
+
}
|
|
1334
|
+
|
|
1335
|
+
# Find existing item to update
|
|
1336
|
+
item_updated = False
|
|
1337
|
+
for i, existing_item in enumerate(cultural_knowledge_list):
|
|
1338
|
+
if existing_item.get("id") == cultural_knowledge.id:
|
|
1339
|
+
cultural_knowledge_list[i] = cultural_knowledge_dict
|
|
1340
|
+
item_updated = True
|
|
1341
|
+
break
|
|
1342
|
+
|
|
1343
|
+
if not item_updated:
|
|
1344
|
+
cultural_knowledge_list.append(cultural_knowledge_dict)
|
|
1345
|
+
|
|
1346
|
+
self._write_json_file(self.culture_table_name, cultural_knowledge_list)
|
|
1347
|
+
|
|
1348
|
+
if not deserialize:
|
|
1349
|
+
return cultural_knowledge_dict
|
|
1350
|
+
|
|
1351
|
+
return deserialize_cultural_knowledge_from_db(cultural_knowledge_dict)
|
|
1352
|
+
|
|
1353
|
+
except Exception as e:
|
|
1354
|
+
log_warning(f"Error upserting cultural knowledge: {e}")
|
|
1355
|
+
raise e
|
|
1356
|
+
|
|
1357
|
+
# --- Traces ---
|
|
1358
|
+
def upsert_trace(self, trace: "Trace") -> None:
|
|
1359
|
+
"""Create or update a single trace record in the database.
|
|
1360
|
+
|
|
1361
|
+
Args:
|
|
1362
|
+
trace: The Trace object to store (one per trace_id).
|
|
1363
|
+
"""
|
|
1364
|
+
try:
|
|
1365
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=True)
|
|
1366
|
+
|
|
1367
|
+
# Check if trace exists
|
|
1368
|
+
existing_idx = None
|
|
1369
|
+
for i, existing in enumerate(traces):
|
|
1370
|
+
if existing.get("trace_id") == trace.trace_id:
|
|
1371
|
+
existing_idx = i
|
|
1372
|
+
break
|
|
1373
|
+
|
|
1374
|
+
if existing_idx is not None:
|
|
1375
|
+
existing = traces[existing_idx]
|
|
1376
|
+
|
|
1377
|
+
# workflow (level 3) > team (level 2) > agent (level 1) > child/unknown (level 0)
|
|
1378
|
+
def get_component_level(workflow_id: Any, team_id: Any, agent_id: Any, name: str) -> int:
|
|
1379
|
+
is_root_name = ".run" in name or ".arun" in name
|
|
1380
|
+
if not is_root_name:
|
|
1381
|
+
return 0
|
|
1382
|
+
elif workflow_id:
|
|
1383
|
+
return 3
|
|
1384
|
+
elif team_id:
|
|
1385
|
+
return 2
|
|
1386
|
+
elif agent_id:
|
|
1387
|
+
return 1
|
|
1388
|
+
else:
|
|
1389
|
+
return 0
|
|
1390
|
+
|
|
1391
|
+
existing_level = get_component_level(
|
|
1392
|
+
existing.get("workflow_id"),
|
|
1393
|
+
existing.get("team_id"),
|
|
1394
|
+
existing.get("agent_id"),
|
|
1395
|
+
existing.get("name", ""),
|
|
1396
|
+
)
|
|
1397
|
+
new_level = get_component_level(trace.workflow_id, trace.team_id, trace.agent_id, trace.name)
|
|
1398
|
+
should_update_name = new_level > existing_level
|
|
1399
|
+
|
|
1400
|
+
# Parse existing start_time to calculate correct duration
|
|
1401
|
+
existing_start_time_str = existing.get("start_time")
|
|
1402
|
+
if isinstance(existing_start_time_str, str):
|
|
1403
|
+
existing_start_time = datetime.fromisoformat(existing_start_time_str.replace("Z", "+00:00"))
|
|
1404
|
+
else:
|
|
1405
|
+
existing_start_time = trace.start_time
|
|
1406
|
+
|
|
1407
|
+
recalculated_duration_ms = int((trace.end_time - existing_start_time).total_seconds() * 1000)
|
|
1408
|
+
|
|
1409
|
+
# Update existing trace
|
|
1410
|
+
existing["end_time"] = trace.end_time.isoformat()
|
|
1411
|
+
existing["duration_ms"] = recalculated_duration_ms
|
|
1412
|
+
existing["status"] = trace.status
|
|
1413
|
+
if should_update_name:
|
|
1414
|
+
existing["name"] = trace.name
|
|
1415
|
+
|
|
1416
|
+
# Update context fields only if new value is not None
|
|
1417
|
+
if trace.run_id is not None:
|
|
1418
|
+
existing["run_id"] = trace.run_id
|
|
1419
|
+
if trace.session_id is not None:
|
|
1420
|
+
existing["session_id"] = trace.session_id
|
|
1421
|
+
if trace.user_id is not None:
|
|
1422
|
+
existing["user_id"] = trace.user_id
|
|
1423
|
+
if trace.agent_id is not None:
|
|
1424
|
+
existing["agent_id"] = trace.agent_id
|
|
1425
|
+
if trace.team_id is not None:
|
|
1426
|
+
existing["team_id"] = trace.team_id
|
|
1427
|
+
if trace.workflow_id is not None:
|
|
1428
|
+
existing["workflow_id"] = trace.workflow_id
|
|
1429
|
+
|
|
1430
|
+
traces[existing_idx] = existing
|
|
1431
|
+
else:
|
|
1432
|
+
# Add new trace
|
|
1433
|
+
trace_dict = trace.to_dict()
|
|
1434
|
+
trace_dict.pop("total_spans", None)
|
|
1435
|
+
trace_dict.pop("error_count", None)
|
|
1436
|
+
traces.append(trace_dict)
|
|
1437
|
+
|
|
1438
|
+
self._write_json_file(self.trace_table_name, traces)
|
|
1439
|
+
|
|
1440
|
+
except Exception as e:
|
|
1441
|
+
log_error(f"Error creating trace: {e}")
|
|
1442
|
+
|
|
1443
|
+
def get_trace(
|
|
1444
|
+
self,
|
|
1445
|
+
trace_id: Optional[str] = None,
|
|
1446
|
+
run_id: Optional[str] = None,
|
|
1447
|
+
):
|
|
1448
|
+
"""Get a single trace by trace_id or other filters.
|
|
1449
|
+
|
|
1450
|
+
Args:
|
|
1451
|
+
trace_id: The unique trace identifier.
|
|
1452
|
+
run_id: Filter by run ID (returns first match).
|
|
1453
|
+
|
|
1454
|
+
Returns:
|
|
1455
|
+
Optional[Trace]: The trace if found, None otherwise.
|
|
1456
|
+
|
|
1457
|
+
Note:
|
|
1458
|
+
If multiple filters are provided, trace_id takes precedence.
|
|
1459
|
+
For other filters, the most recent trace is returned.
|
|
1460
|
+
"""
|
|
1461
|
+
try:
|
|
1462
|
+
from agno.tracing.schemas import Trace
|
|
1463
|
+
|
|
1464
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1465
|
+
if not traces:
|
|
1466
|
+
return None
|
|
1467
|
+
|
|
1468
|
+
# Get spans for calculating total_spans and error_count
|
|
1469
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1470
|
+
|
|
1471
|
+
# Filter traces
|
|
1472
|
+
filtered = []
|
|
1473
|
+
for t in traces:
|
|
1474
|
+
if trace_id and t.get("trace_id") == trace_id:
|
|
1475
|
+
filtered.append(t)
|
|
1476
|
+
break
|
|
1477
|
+
elif run_id and t.get("run_id") == run_id:
|
|
1478
|
+
filtered.append(t)
|
|
1479
|
+
|
|
1480
|
+
if not filtered:
|
|
1481
|
+
return None
|
|
1482
|
+
|
|
1483
|
+
# Sort by start_time desc and get first
|
|
1484
|
+
filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
1485
|
+
trace_data = filtered[0]
|
|
1486
|
+
|
|
1487
|
+
# Calculate total_spans and error_count
|
|
1488
|
+
trace_spans = [s for s in spans if s.get("trace_id") == trace_data.get("trace_id")]
|
|
1489
|
+
trace_data["total_spans"] = len(trace_spans)
|
|
1490
|
+
trace_data["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
|
|
1491
|
+
|
|
1492
|
+
return Trace.from_dict(trace_data)
|
|
1493
|
+
|
|
1494
|
+
except Exception as e:
|
|
1495
|
+
log_error(f"Error getting trace: {e}")
|
|
1496
|
+
return None
|
|
1497
|
+
|
|
1498
|
+
def get_traces(
|
|
1499
|
+
self,
|
|
1500
|
+
run_id: Optional[str] = None,
|
|
1501
|
+
session_id: Optional[str] = None,
|
|
1502
|
+
user_id: Optional[str] = None,
|
|
1503
|
+
agent_id: Optional[str] = None,
|
|
1504
|
+
team_id: Optional[str] = None,
|
|
1505
|
+
workflow_id: Optional[str] = None,
|
|
1506
|
+
status: Optional[str] = None,
|
|
1507
|
+
start_time: Optional[datetime] = None,
|
|
1508
|
+
end_time: Optional[datetime] = None,
|
|
1509
|
+
limit: Optional[int] = 20,
|
|
1510
|
+
page: Optional[int] = 1,
|
|
1511
|
+
) -> tuple[List, int]:
|
|
1512
|
+
"""Get traces matching the provided filters with pagination.
|
|
1513
|
+
|
|
1514
|
+
Args:
|
|
1515
|
+
run_id: Filter by run ID.
|
|
1516
|
+
session_id: Filter by session ID.
|
|
1517
|
+
user_id: Filter by user ID.
|
|
1518
|
+
agent_id: Filter by agent ID.
|
|
1519
|
+
team_id: Filter by team ID.
|
|
1520
|
+
workflow_id: Filter by workflow ID.
|
|
1521
|
+
status: Filter by status (OK, ERROR, UNSET).
|
|
1522
|
+
start_time: Filter traces starting after this datetime.
|
|
1523
|
+
end_time: Filter traces ending before this datetime.
|
|
1524
|
+
limit: Maximum number of traces to return per page.
|
|
1525
|
+
page: Page number (1-indexed).
|
|
1526
|
+
|
|
1527
|
+
Returns:
|
|
1528
|
+
tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
|
|
1529
|
+
"""
|
|
1530
|
+
try:
|
|
1531
|
+
from agno.tracing.schemas import Trace
|
|
1532
|
+
|
|
1533
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1534
|
+
if not traces:
|
|
1535
|
+
return [], 0
|
|
1536
|
+
|
|
1537
|
+
# Get spans for calculating total_spans and error_count
|
|
1538
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1539
|
+
|
|
1540
|
+
# Apply filters
|
|
1541
|
+
filtered = []
|
|
1542
|
+
for t in traces:
|
|
1543
|
+
if run_id and t.get("run_id") != run_id:
|
|
1544
|
+
continue
|
|
1545
|
+
if session_id and t.get("session_id") != session_id:
|
|
1546
|
+
continue
|
|
1547
|
+
if user_id and t.get("user_id") != user_id:
|
|
1548
|
+
continue
|
|
1549
|
+
if agent_id and t.get("agent_id") != agent_id:
|
|
1550
|
+
continue
|
|
1551
|
+
if team_id and t.get("team_id") != team_id:
|
|
1552
|
+
continue
|
|
1553
|
+
if workflow_id and t.get("workflow_id") != workflow_id:
|
|
1554
|
+
continue
|
|
1555
|
+
if status and t.get("status") != status:
|
|
1556
|
+
continue
|
|
1557
|
+
if start_time:
|
|
1558
|
+
trace_start = t.get("start_time", "")
|
|
1559
|
+
if trace_start < start_time.isoformat():
|
|
1560
|
+
continue
|
|
1561
|
+
if end_time:
|
|
1562
|
+
trace_end = t.get("end_time", "")
|
|
1563
|
+
if trace_end > end_time.isoformat():
|
|
1564
|
+
continue
|
|
1565
|
+
filtered.append(t)
|
|
1566
|
+
|
|
1567
|
+
total_count = len(filtered)
|
|
1568
|
+
|
|
1569
|
+
# Sort by start_time desc
|
|
1570
|
+
filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
1571
|
+
|
|
1572
|
+
# Apply pagination
|
|
1573
|
+
if limit and page:
|
|
1574
|
+
start_idx = (page - 1) * limit
|
|
1575
|
+
filtered = filtered[start_idx : start_idx + limit]
|
|
1576
|
+
|
|
1577
|
+
# Add total_spans and error_count to each trace
|
|
1578
|
+
result_traces = []
|
|
1579
|
+
for t in filtered:
|
|
1580
|
+
trace_spans = [s for s in spans if s.get("trace_id") == t.get("trace_id")]
|
|
1581
|
+
t["total_spans"] = len(trace_spans)
|
|
1582
|
+
t["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
|
|
1583
|
+
result_traces.append(Trace.from_dict(t))
|
|
1584
|
+
|
|
1585
|
+
return result_traces, total_count
|
|
1586
|
+
|
|
1587
|
+
except Exception as e:
|
|
1588
|
+
log_error(f"Error getting traces: {e}")
|
|
1589
|
+
return [], 0
|
|
1590
|
+
|
|
1591
|
+
def get_trace_stats(
|
|
1592
|
+
self,
|
|
1593
|
+
user_id: Optional[str] = None,
|
|
1594
|
+
agent_id: Optional[str] = None,
|
|
1595
|
+
team_id: Optional[str] = None,
|
|
1596
|
+
workflow_id: Optional[str] = None,
|
|
1597
|
+
start_time: Optional[datetime] = None,
|
|
1598
|
+
end_time: Optional[datetime] = None,
|
|
1599
|
+
limit: Optional[int] = 20,
|
|
1600
|
+
page: Optional[int] = 1,
|
|
1601
|
+
) -> tuple[List[Dict[str, Any]], int]:
|
|
1602
|
+
"""Get trace statistics grouped by session.
|
|
1603
|
+
|
|
1604
|
+
Args:
|
|
1605
|
+
user_id: Filter by user ID.
|
|
1606
|
+
agent_id: Filter by agent ID.
|
|
1607
|
+
team_id: Filter by team ID.
|
|
1608
|
+
workflow_id: Filter by workflow ID.
|
|
1609
|
+
start_time: Filter sessions with traces created after this datetime.
|
|
1610
|
+
end_time: Filter sessions with traces created before this datetime.
|
|
1611
|
+
limit: Maximum number of sessions to return per page.
|
|
1612
|
+
page: Page number (1-indexed).
|
|
1613
|
+
|
|
1614
|
+
Returns:
|
|
1615
|
+
tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
|
|
1616
|
+
Each dict contains: session_id, user_id, agent_id, team_id, workflow_id, total_traces,
|
|
1617
|
+
first_trace_at, last_trace_at.
|
|
1618
|
+
"""
|
|
1619
|
+
try:
|
|
1620
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1621
|
+
if not traces:
|
|
1622
|
+
return [], 0
|
|
1623
|
+
|
|
1624
|
+
# Group by session_id
|
|
1625
|
+
session_stats: Dict[str, Dict[str, Any]] = {}
|
|
1626
|
+
|
|
1627
|
+
for t in traces:
|
|
1628
|
+
trace_session_id = t.get("session_id")
|
|
1629
|
+
if not trace_session_id:
|
|
1630
|
+
continue
|
|
1631
|
+
|
|
1632
|
+
# Apply filters
|
|
1633
|
+
if user_id and t.get("user_id") != user_id:
|
|
1634
|
+
continue
|
|
1635
|
+
if agent_id and t.get("agent_id") != agent_id:
|
|
1636
|
+
continue
|
|
1637
|
+
if team_id and t.get("team_id") != team_id:
|
|
1638
|
+
continue
|
|
1639
|
+
if workflow_id and t.get("workflow_id") != workflow_id:
|
|
1640
|
+
continue
|
|
1641
|
+
|
|
1642
|
+
created_at = t.get("created_at", "")
|
|
1643
|
+
if start_time and created_at < start_time.isoformat():
|
|
1644
|
+
continue
|
|
1645
|
+
if end_time and created_at > end_time.isoformat():
|
|
1646
|
+
continue
|
|
1647
|
+
|
|
1648
|
+
if trace_session_id not in session_stats:
|
|
1649
|
+
session_stats[trace_session_id] = {
|
|
1650
|
+
"session_id": trace_session_id,
|
|
1651
|
+
"user_id": t.get("user_id"),
|
|
1652
|
+
"agent_id": t.get("agent_id"),
|
|
1653
|
+
"team_id": t.get("team_id"),
|
|
1654
|
+
"workflow_id": t.get("workflow_id"),
|
|
1655
|
+
"total_traces": 0,
|
|
1656
|
+
"first_trace_at": created_at,
|
|
1657
|
+
"last_trace_at": created_at,
|
|
1658
|
+
}
|
|
1659
|
+
|
|
1660
|
+
session_stats[trace_session_id]["total_traces"] += 1
|
|
1661
|
+
if created_at and session_stats[trace_session_id]["first_trace_at"]:
|
|
1662
|
+
if created_at < session_stats[trace_session_id]["first_trace_at"]:
|
|
1663
|
+
session_stats[trace_session_id]["first_trace_at"] = created_at
|
|
1664
|
+
if created_at and session_stats[trace_session_id]["last_trace_at"]:
|
|
1665
|
+
if created_at > session_stats[trace_session_id]["last_trace_at"]:
|
|
1666
|
+
session_stats[trace_session_id]["last_trace_at"] = created_at
|
|
1667
|
+
|
|
1668
|
+
stats_list = list(session_stats.values())
|
|
1669
|
+
total_count = len(stats_list)
|
|
1670
|
+
|
|
1671
|
+
# Sort by last_trace_at desc
|
|
1672
|
+
stats_list.sort(key=lambda x: x.get("last_trace_at", ""), reverse=True)
|
|
1673
|
+
|
|
1674
|
+
# Apply pagination
|
|
1675
|
+
if limit and page:
|
|
1676
|
+
start_idx = (page - 1) * limit
|
|
1677
|
+
stats_list = stats_list[start_idx : start_idx + limit]
|
|
1678
|
+
|
|
1679
|
+
# Convert ISO strings to datetime objects
|
|
1680
|
+
for stat in stats_list:
|
|
1681
|
+
first_at = stat.get("first_trace_at", "")
|
|
1682
|
+
last_at = stat.get("last_trace_at", "")
|
|
1683
|
+
if first_at:
|
|
1684
|
+
stat["first_trace_at"] = datetime.fromisoformat(first_at.replace("Z", "+00:00"))
|
|
1685
|
+
if last_at:
|
|
1686
|
+
stat["last_trace_at"] = datetime.fromisoformat(last_at.replace("Z", "+00:00"))
|
|
1687
|
+
|
|
1688
|
+
return stats_list, total_count
|
|
1689
|
+
|
|
1690
|
+
except Exception as e:
|
|
1691
|
+
log_error(f"Error getting trace stats: {e}")
|
|
1692
|
+
return [], 0
|
|
1693
|
+
|
|
1694
|
+
# --- Spans ---
|
|
1695
|
+
def create_span(self, span: "Span") -> None:
|
|
1696
|
+
"""Create a single span in the database.
|
|
1697
|
+
|
|
1698
|
+
Args:
|
|
1699
|
+
span: The Span object to store.
|
|
1700
|
+
"""
|
|
1701
|
+
try:
|
|
1702
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
|
|
1703
|
+
spans.append(span.to_dict())
|
|
1704
|
+
self._write_json_file(self.span_table_name, spans)
|
|
1705
|
+
|
|
1706
|
+
except Exception as e:
|
|
1707
|
+
log_error(f"Error creating span: {e}")
|
|
1708
|
+
|
|
1709
|
+
def create_spans(self, spans: List) -> None:
|
|
1710
|
+
"""Create multiple spans in the database as a batch.
|
|
1711
|
+
|
|
1712
|
+
Args:
|
|
1713
|
+
spans: List of Span objects to store.
|
|
1714
|
+
"""
|
|
1715
|
+
if not spans:
|
|
1716
|
+
return
|
|
1717
|
+
|
|
1718
|
+
try:
|
|
1719
|
+
existing_spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
|
|
1720
|
+
for span in spans:
|
|
1721
|
+
existing_spans.append(span.to_dict())
|
|
1722
|
+
self._write_json_file(self.span_table_name, existing_spans)
|
|
1723
|
+
|
|
1724
|
+
except Exception as e:
|
|
1725
|
+
log_error(f"Error creating spans batch: {e}")
|
|
1726
|
+
|
|
1727
|
+
def get_span(self, span_id: str):
|
|
1728
|
+
"""Get a single span by its span_id.
|
|
1729
|
+
|
|
1730
|
+
Args:
|
|
1731
|
+
span_id: The unique span identifier.
|
|
1732
|
+
|
|
1733
|
+
Returns:
|
|
1734
|
+
Optional[Span]: The span if found, None otherwise.
|
|
1735
|
+
"""
|
|
1736
|
+
try:
|
|
1737
|
+
from agno.tracing.schemas import Span
|
|
1738
|
+
|
|
1739
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1740
|
+
|
|
1741
|
+
for s in spans:
|
|
1742
|
+
if s.get("span_id") == span_id:
|
|
1743
|
+
return Span.from_dict(s)
|
|
1744
|
+
|
|
1745
|
+
return None
|
|
1746
|
+
|
|
1747
|
+
except Exception as e:
|
|
1748
|
+
log_error(f"Error getting span: {e}")
|
|
1749
|
+
return None
|
|
1750
|
+
|
|
1751
|
+
def get_spans(
|
|
1752
|
+
self,
|
|
1753
|
+
trace_id: Optional[str] = None,
|
|
1754
|
+
parent_span_id: Optional[str] = None,
|
|
1755
|
+
limit: Optional[int] = 1000,
|
|
1756
|
+
) -> List:
|
|
1757
|
+
"""Get spans matching the provided filters.
|
|
1758
|
+
|
|
1759
|
+
Args:
|
|
1760
|
+
trace_id: Filter by trace ID.
|
|
1761
|
+
parent_span_id: Filter by parent span ID.
|
|
1762
|
+
limit: Maximum number of spans to return.
|
|
1763
|
+
|
|
1764
|
+
Returns:
|
|
1765
|
+
List[Span]: List of matching spans.
|
|
1766
|
+
"""
|
|
1767
|
+
try:
|
|
1768
|
+
from agno.tracing.schemas import Span
|
|
1769
|
+
|
|
1770
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1771
|
+
if not spans:
|
|
1772
|
+
return []
|
|
1773
|
+
|
|
1774
|
+
# Apply filters
|
|
1775
|
+
filtered = []
|
|
1776
|
+
for s in spans:
|
|
1777
|
+
if trace_id and s.get("trace_id") != trace_id:
|
|
1778
|
+
continue
|
|
1779
|
+
if parent_span_id and s.get("parent_span_id") != parent_span_id:
|
|
1780
|
+
continue
|
|
1781
|
+
filtered.append(s)
|
|
1782
|
+
|
|
1783
|
+
# Apply limit
|
|
1784
|
+
if limit:
|
|
1785
|
+
filtered = filtered[:limit]
|
|
1786
|
+
|
|
1787
|
+
return [Span.from_dict(s) for s in filtered]
|
|
1788
|
+
|
|
1789
|
+
except Exception as e:
|
|
1790
|
+
log_error(f"Error getting spans: {e}")
|
|
1791
|
+
return []
|