agno 0.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +44 -5
- agno/agent/agent.py +10531 -2975
- agno/api/agent.py +14 -53
- agno/api/api.py +7 -46
- agno/api/evals.py +22 -0
- agno/api/os.py +17 -0
- agno/api/routes.py +6 -25
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +6 -9
- agno/api/schemas/evals.py +16 -0
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/team.py +10 -10
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +16 -0
- agno/api/settings.py +53 -0
- agno/api/team.py +22 -26
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/__init__.py +24 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +946 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +2781 -0
- agno/db/dynamo/schemas.py +442 -0
- agno/db/dynamo/utils.py +743 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +2379 -0
- agno/db/firestore/schemas.py +181 -0
- agno/db/firestore/utils.py +376 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1791 -0
- agno/db/gcs_json/utils.py +228 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +1312 -0
- agno/db/in_memory/utils.py +230 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1777 -0
- agno/db/json/utils.py +230 -0
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +635 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +17 -0
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +2597 -0
- agno/db/mongo/schemas.py +119 -0
- agno/db/mongo/utils.py +276 -0
- agno/db/mysql/__init__.py +4 -0
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +2923 -0
- agno/db/mysql/schemas.py +186 -0
- agno/db/mysql/utils.py +488 -0
- agno/db/postgres/__init__.py +4 -0
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +2870 -0
- agno/db/postgres/schemas.py +187 -0
- agno/db/postgres/utils.py +442 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +2141 -0
- agno/db/redis/schemas.py +159 -0
- agno/db/redis/utils.py +346 -0
- agno/db/schemas/__init__.py +4 -0
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +34 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +61 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +179 -0
- agno/db/singlestore/singlestore.py +2877 -0
- agno/db/singlestore/utils.py +384 -0
- agno/db/sqlite/__init__.py +4 -0
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +181 -0
- agno/db/sqlite/sqlite.py +2908 -0
- agno/db/sqlite/utils.py +429 -0
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +118 -0
- agno/eval/__init__.py +24 -0
- agno/eval/accuracy.py +666 -276
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +779 -0
- agno/eval/reliability.py +241 -62
- agno/eval/utils.py +120 -0
- agno/exceptions.py +143 -1
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/__init__.py +3 -0
- agno/integrations/discord/client.py +203 -0
- agno/knowledge/__init__.py +5 -1
- agno/{document → knowledge}/chunking/agentic.py +22 -14
- agno/{document → knowledge}/chunking/document.py +2 -2
- agno/{document → knowledge}/chunking/fixed.py +7 -6
- agno/knowledge/chunking/markdown.py +151 -0
- agno/{document → knowledge}/chunking/recursive.py +15 -3
- agno/knowledge/chunking/row.py +39 -0
- agno/knowledge/chunking/semantic.py +91 -0
- agno/knowledge/chunking/strategy.py +165 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/{document → knowledge/document}/base.py +12 -2
- agno/knowledge/embedder/__init__.py +5 -0
- agno/knowledge/embedder/aws_bedrock.py +343 -0
- agno/knowledge/embedder/azure_openai.py +210 -0
- agno/{embedder → knowledge/embedder}/base.py +8 -0
- agno/knowledge/embedder/cohere.py +323 -0
- agno/knowledge/embedder/fastembed.py +62 -0
- agno/{embedder → knowledge/embedder}/fireworks.py +1 -1
- agno/knowledge/embedder/google.py +258 -0
- agno/knowledge/embedder/huggingface.py +94 -0
- agno/knowledge/embedder/jina.py +182 -0
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +206 -0
- agno/knowledge/embedder/nebius.py +13 -0
- agno/knowledge/embedder/ollama.py +154 -0
- agno/knowledge/embedder/openai.py +195 -0
- agno/knowledge/embedder/sentence_transformer.py +63 -0
- agno/{embedder → knowledge/embedder}/together.py +1 -1
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +165 -0
- agno/knowledge/knowledge.py +3006 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/knowledge/reader/arxiv_reader.py +81 -0
- agno/knowledge/reader/base.py +95 -0
- agno/knowledge/reader/csv_reader.py +164 -0
- agno/knowledge/reader/docx_reader.py +82 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +290 -0
- agno/knowledge/reader/firecrawl_reader.py +201 -0
- agno/knowledge/reader/json_reader.py +88 -0
- agno/knowledge/reader/markdown_reader.py +137 -0
- agno/knowledge/reader/pdf_reader.py +431 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +313 -0
- agno/knowledge/reader/s3_reader.py +89 -0
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +127 -0
- agno/knowledge/reader/web_search_reader.py +325 -0
- agno/knowledge/reader/website_reader.py +455 -0
- agno/knowledge/reader/wikipedia_reader.py +91 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/knowledge/reranker/__init__.py +3 -0
- agno/{reranker → knowledge/reranker}/base.py +1 -1
- agno/{reranker → knowledge/reranker}/cohere.py +2 -2
- agno/knowledge/reranker/infinity.py +195 -0
- agno/knowledge/reranker/sentence_transformer.py +54 -0
- agno/knowledge/types.py +39 -0
- agno/knowledge/utils.py +234 -0
- agno/media.py +439 -95
- agno/memory/__init__.py +16 -3
- agno/memory/manager.py +1474 -123
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/__init__.py +5 -0
- agno/models/aimlapi/aimlapi.py +62 -0
- agno/models/anthropic/__init__.py +4 -0
- agno/models/anthropic/claude.py +960 -496
- agno/models/aws/__init__.py +15 -0
- agno/models/aws/bedrock.py +686 -451
- agno/models/aws/claude.py +190 -183
- agno/models/azure/__init__.py +18 -1
- agno/models/azure/ai_foundry.py +489 -0
- agno/models/azure/openai_chat.py +89 -40
- agno/models/base.py +2477 -550
- agno/models/cerebras/__init__.py +12 -0
- agno/models/cerebras/cerebras.py +565 -0
- agno/models/cerebras/cerebras_openai.py +131 -0
- agno/models/cohere/__init__.py +4 -0
- agno/models/cohere/chat.py +306 -492
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +74 -0
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +90 -0
- agno/models/deepinfra/__init__.py +5 -0
- agno/models/deepinfra/deepinfra.py +45 -0
- agno/models/deepseek/__init__.py +4 -0
- agno/models/deepseek/deepseek.py +110 -9
- agno/models/fireworks/__init__.py +4 -0
- agno/models/fireworks/fireworks.py +19 -22
- agno/models/google/__init__.py +3 -7
- agno/models/google/gemini.py +1717 -662
- agno/models/google/utils.py +22 -0
- agno/models/groq/__init__.py +4 -0
- agno/models/groq/groq.py +391 -666
- agno/models/huggingface/__init__.py +4 -0
- agno/models/huggingface/huggingface.py +266 -538
- agno/models/ibm/__init__.py +5 -0
- agno/models/ibm/watsonx.py +432 -0
- agno/models/internlm/__init__.py +3 -0
- agno/models/internlm/internlm.py +20 -3
- agno/models/langdb/__init__.py +1 -0
- agno/models/langdb/langdb.py +60 -0
- agno/models/litellm/__init__.py +14 -0
- agno/models/litellm/chat.py +503 -0
- agno/models/litellm/litellm_openai.py +42 -0
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/lmstudio/__init__.py +5 -0
- agno/models/lmstudio/lmstudio.py +25 -0
- agno/models/message.py +361 -39
- agno/models/meta/__init__.py +12 -0
- agno/models/meta/llama.py +502 -0
- agno/models/meta/llama_openai.py +79 -0
- agno/models/metrics.py +120 -0
- agno/models/mistral/__init__.py +4 -0
- agno/models/mistral/mistral.py +293 -393
- agno/models/nebius/__init__.py +3 -0
- agno/models/nebius/nebius.py +53 -0
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/__init__.py +4 -0
- agno/models/nvidia/nvidia.py +22 -3
- agno/models/ollama/__init__.py +4 -2
- agno/models/ollama/chat.py +257 -492
- agno/models/openai/__init__.py +7 -0
- agno/models/openai/chat.py +725 -770
- agno/models/openai/like.py +16 -2
- agno/models/openai/responses.py +1121 -0
- agno/models/openrouter/__init__.py +4 -0
- agno/models/openrouter/openrouter.py +62 -5
- agno/models/perplexity/__init__.py +5 -0
- agno/models/perplexity/perplexity.py +203 -0
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +82 -0
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +69 -0
- agno/models/response.py +177 -7
- agno/models/sambanova/__init__.py +4 -0
- agno/models/sambanova/sambanova.py +23 -4
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +42 -0
- agno/models/together/__init__.py +4 -0
- agno/models/together/together.py +21 -164
- agno/models/utils.py +266 -0
- agno/models/vercel/__init__.py +3 -0
- agno/models/vercel/v0.py +43 -0
- agno/models/vertexai/__init__.py +0 -1
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/__init__.py +3 -0
- agno/models/vllm/vllm.py +83 -0
- agno/models/xai/__init__.py +2 -0
- agno/models/xai/xai.py +111 -7
- agno/os/__init__.py +3 -0
- agno/os/app.py +1027 -0
- agno/os/auth.py +244 -0
- agno/os/config.py +126 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +249 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +47 -0
- agno/os/interfaces/agui/router.py +147 -0
- agno/os/interfaces/agui/utils.py +574 -0
- agno/os/interfaces/base.py +25 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/os/interfaces/slack/router.py +148 -0
- agno/os/interfaces/slack/security.py +30 -0
- agno/os/interfaces/slack/slack.py +47 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/os/interfaces/whatsapp/router.py +210 -0
- agno/os/interfaces/whatsapp/security.py +55 -0
- agno/os/interfaces/whatsapp/whatsapp.py +36 -0
- agno/os/mcp.py +293 -0
- agno/os/middleware/__init__.py +9 -0
- agno/os/middleware/jwt.py +797 -0
- agno/os/router.py +258 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +450 -0
- agno/os/routers/evals/schemas.py +174 -0
- agno/os/routers/evals/utils.py +231 -0
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +1008 -0
- agno/os/routers/knowledge/schemas.py +178 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +661 -0
- agno/os/routers/memory/schemas.py +88 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +190 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +997 -0
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +534 -0
- agno/os/scopes.py +469 -0
- agno/{playground → os}/settings.py +7 -15
- agno/os/utils.py +973 -0
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +67 -0
- agno/reasoning/deepseek.py +63 -0
- agno/reasoning/default.py +97 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +71 -0
- agno/reasoning/helpers.py +24 -1
- agno/reasoning/ollama.py +67 -0
- agno/reasoning/openai.py +86 -0
- agno/reasoning/step.py +2 -1
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +822 -0
- agno/run/base.py +247 -0
- agno/run/cancel.py +81 -0
- agno/run/requirement.py +181 -0
- agno/run/team.py +767 -0
- agno/run/workflow.py +708 -0
- agno/session/__init__.py +10 -0
- agno/session/agent.py +260 -0
- agno/session/summary.py +265 -0
- agno/session/team.py +342 -0
- agno/session/workflow.py +501 -0
- agno/table.py +10 -0
- agno/team/__init__.py +37 -0
- agno/team/team.py +9536 -0
- agno/tools/__init__.py +7 -0
- agno/tools/agentql.py +120 -0
- agno/tools/airflow.py +22 -12
- agno/tools/api.py +122 -0
- agno/tools/apify.py +276 -83
- agno/tools/{arxiv_toolkit.py → arxiv.py} +20 -12
- agno/tools/aws_lambda.py +28 -7
- agno/tools/aws_ses.py +66 -0
- agno/tools/baidusearch.py +11 -4
- agno/tools/bitbucket.py +292 -0
- agno/tools/brandfetch.py +213 -0
- agno/tools/bravesearch.py +106 -0
- agno/tools/brightdata.py +367 -0
- agno/tools/browserbase.py +209 -0
- agno/tools/calcom.py +32 -23
- agno/tools/calculator.py +24 -37
- agno/tools/cartesia.py +187 -0
- agno/tools/{clickup_tool.py → clickup.py} +17 -28
- agno/tools/confluence.py +91 -26
- agno/tools/crawl4ai.py +139 -43
- agno/tools/csv_toolkit.py +28 -22
- agno/tools/dalle.py +36 -22
- agno/tools/daytona.py +475 -0
- agno/tools/decorator.py +169 -14
- agno/tools/desi_vocal.py +23 -11
- agno/tools/discord.py +32 -29
- agno/tools/docker.py +716 -0
- agno/tools/duckdb.py +76 -81
- agno/tools/duckduckgo.py +43 -40
- agno/tools/e2b.py +703 -0
- agno/tools/eleven_labs.py +65 -54
- agno/tools/email.py +13 -5
- agno/tools/evm.py +129 -0
- agno/tools/exa.py +324 -42
- agno/tools/fal.py +39 -35
- agno/tools/file.py +196 -30
- agno/tools/file_generation.py +356 -0
- agno/tools/financial_datasets.py +288 -0
- agno/tools/firecrawl.py +108 -33
- agno/tools/function.py +960 -122
- agno/tools/giphy.py +34 -12
- agno/tools/github.py +1294 -97
- agno/tools/gmail.py +922 -0
- agno/tools/google_bigquery.py +117 -0
- agno/tools/google_drive.py +271 -0
- agno/tools/google_maps.py +253 -0
- agno/tools/googlecalendar.py +607 -107
- agno/tools/googlesheets.py +377 -0
- agno/tools/hackernews.py +20 -12
- agno/tools/jina.py +24 -14
- agno/tools/jira.py +48 -19
- agno/tools/knowledge.py +218 -0
- agno/tools/linear.py +82 -43
- agno/tools/linkup.py +58 -0
- agno/tools/local_file_system.py +15 -7
- agno/tools/lumalab.py +41 -26
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +193 -0
- agno/tools/memory.py +419 -0
- agno/tools/mlx_transcribe.py +11 -9
- agno/tools/models/azure_openai.py +190 -0
- agno/tools/models/gemini.py +203 -0
- agno/tools/models/groq.py +158 -0
- agno/tools/models/morph.py +186 -0
- agno/tools/models/nebius.py +124 -0
- agno/tools/models_labs.py +163 -82
- agno/tools/moviepy_video.py +18 -13
- agno/tools/nano_banana.py +151 -0
- agno/tools/neo4j.py +134 -0
- agno/tools/newspaper.py +15 -4
- agno/tools/newspaper4k.py +19 -6
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +181 -17
- agno/tools/openbb.py +27 -20
- agno/tools/opencv.py +321 -0
- agno/tools/openweather.py +233 -0
- agno/tools/oxylabs.py +385 -0
- agno/tools/pandas.py +25 -15
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +238 -185
- agno/tools/pubmed.py +125 -13
- agno/tools/python.py +48 -35
- agno/tools/reasoning.py +283 -0
- agno/tools/reddit.py +207 -29
- agno/tools/redshift.py +406 -0
- agno/tools/replicate.py +69 -26
- agno/tools/resend.py +11 -6
- agno/tools/scrapegraph.py +179 -19
- agno/tools/searxng.py +23 -31
- agno/tools/serpapi.py +15 -10
- agno/tools/serper.py +255 -0
- agno/tools/shell.py +23 -12
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +56 -14
- agno/tools/sleep.py +8 -6
- agno/tools/spider.py +35 -11
- agno/tools/spotify.py +919 -0
- agno/tools/sql.py +34 -19
- agno/tools/tavily.py +158 -8
- agno/tools/telegram.py +18 -8
- agno/tools/todoist.py +218 -0
- agno/tools/toolkit.py +134 -9
- agno/tools/trafilatura.py +388 -0
- agno/tools/trello.py +25 -28
- agno/tools/twilio.py +18 -9
- agno/tools/user_control_flow.py +78 -0
- agno/tools/valyu.py +228 -0
- agno/tools/visualization.py +467 -0
- agno/tools/webbrowser.py +28 -0
- agno/tools/webex.py +76 -0
- agno/tools/website.py +23 -19
- agno/tools/webtools.py +45 -0
- agno/tools/whatsapp.py +286 -0
- agno/tools/wikipedia.py +28 -19
- agno/tools/workflow.py +285 -0
- agno/tools/{twitter.py → x.py} +142 -46
- agno/tools/yfinance.py +41 -39
- agno/tools/youtube.py +34 -17
- agno/tools/zendesk.py +15 -5
- agno/tools/zep.py +454 -0
- agno/tools/zoom.py +86 -37
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/audio.py +37 -1
- agno/utils/certs.py +27 -0
- agno/utils/code_execution.py +11 -0
- agno/utils/common.py +103 -20
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +700 -0
- agno/utils/functions.py +107 -37
- agno/utils/gemini.py +426 -0
- agno/utils/hooks.py +171 -0
- agno/utils/http.py +185 -0
- agno/utils/json_schema.py +159 -37
- agno/utils/knowledge.py +36 -0
- agno/utils/location.py +19 -0
- agno/utils/log.py +221 -8
- agno/utils/mcp.py +214 -0
- agno/utils/media.py +335 -14
- agno/utils/merge_dict.py +22 -1
- agno/utils/message.py +77 -2
- agno/utils/models/ai_foundry.py +50 -0
- agno/utils/models/claude.py +373 -0
- agno/utils/models/cohere.py +94 -0
- agno/utils/models/llama.py +85 -0
- agno/utils/models/mistral.py +100 -0
- agno/utils/models/openai_responses.py +140 -0
- agno/utils/models/schema_utils.py +153 -0
- agno/utils/models/watsonx.py +41 -0
- agno/utils/openai.py +257 -0
- agno/utils/pickle.py +1 -1
- agno/utils/pprint.py +124 -8
- agno/utils/print_response/agent.py +930 -0
- agno/utils/print_response/team.py +1914 -0
- agno/utils/print_response/workflow.py +1668 -0
- agno/utils/prompts.py +111 -0
- agno/utils/reasoning.py +108 -0
- agno/utils/response.py +163 -0
- agno/utils/serialize.py +32 -0
- agno/utils/shell.py +4 -4
- agno/utils/streamlit.py +487 -0
- agno/utils/string.py +204 -51
- agno/utils/team.py +139 -0
- agno/utils/timer.py +9 -2
- agno/utils/tokens.py +657 -0
- agno/utils/tools.py +19 -1
- agno/utils/whatsapp.py +305 -0
- agno/utils/yaml_io.py +3 -3
- agno/vectordb/__init__.py +2 -0
- agno/vectordb/base.py +87 -9
- agno/vectordb/cassandra/__init__.py +5 -1
- agno/vectordb/cassandra/cassandra.py +383 -27
- agno/vectordb/chroma/__init__.py +4 -0
- agno/vectordb/chroma/chromadb.py +748 -83
- agno/vectordb/clickhouse/__init__.py +7 -1
- agno/vectordb/clickhouse/clickhousedb.py +554 -53
- agno/vectordb/couchbase/__init__.py +3 -0
- agno/vectordb/couchbase/couchbase.py +1446 -0
- agno/vectordb/lancedb/__init__.py +5 -0
- agno/vectordb/lancedb/lance_db.py +730 -98
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +163 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +388 -0
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +166 -0
- agno/vectordb/milvus/__init__.py +3 -0
- agno/vectordb/milvus/milvus.py +966 -78
- agno/vectordb/mongodb/__init__.py +9 -1
- agno/vectordb/mongodb/mongodb.py +1175 -172
- agno/vectordb/pgvector/__init__.py +8 -0
- agno/vectordb/pgvector/pgvector.py +599 -115
- agno/vectordb/pineconedb/__init__.py +5 -1
- agno/vectordb/pineconedb/pineconedb.py +406 -43
- agno/vectordb/qdrant/__init__.py +4 -0
- agno/vectordb/qdrant/qdrant.py +914 -61
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/__init__.py +8 -1
- agno/vectordb/singlestore/singlestore.py +771 -0
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +663 -0
- agno/vectordb/upstashdb/__init__.py +5 -0
- agno/vectordb/upstashdb/upstashdb.py +718 -0
- agno/vectordb/weaviate/__init__.py +8 -0
- agno/vectordb/weaviate/index.py +15 -0
- agno/vectordb/weaviate/weaviate.py +1009 -0
- agno/workflow/__init__.py +23 -1
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +759 -0
- agno/workflow/loop.py +756 -0
- agno/workflow/parallel.py +853 -0
- agno/workflow/router.py +723 -0
- agno/workflow/step.py +1564 -0
- agno/workflow/steps.py +613 -0
- agno/workflow/types.py +556 -0
- agno/workflow/workflow.py +4327 -514
- agno-2.3.13.dist-info/METADATA +639 -0
- agno-2.3.13.dist-info/RECORD +613 -0
- {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +1 -1
- agno-2.3.13.dist-info/licenses/LICENSE +201 -0
- agno/api/playground.py +0 -91
- agno/api/schemas/playground.py +0 -22
- agno/api/schemas/user.py +0 -22
- agno/api/schemas/workspace.py +0 -46
- agno/api/user.py +0 -160
- agno/api/workspace.py +0 -151
- agno/cli/auth_server.py +0 -118
- agno/cli/config.py +0 -275
- agno/cli/console.py +0 -88
- agno/cli/credentials.py +0 -23
- agno/cli/entrypoint.py +0 -571
- agno/cli/operator.py +0 -355
- agno/cli/settings.py +0 -85
- agno/cli/ws/ws_cli.py +0 -817
- agno/constants.py +0 -13
- agno/document/__init__.py +0 -1
- agno/document/chunking/semantic.py +0 -47
- agno/document/chunking/strategy.py +0 -31
- agno/document/reader/__init__.py +0 -1
- agno/document/reader/arxiv_reader.py +0 -41
- agno/document/reader/base.py +0 -22
- agno/document/reader/csv_reader.py +0 -84
- agno/document/reader/docx_reader.py +0 -46
- agno/document/reader/firecrawl_reader.py +0 -99
- agno/document/reader/json_reader.py +0 -43
- agno/document/reader/pdf_reader.py +0 -219
- agno/document/reader/s3/pdf_reader.py +0 -46
- agno/document/reader/s3/text_reader.py +0 -51
- agno/document/reader/text_reader.py +0 -41
- agno/document/reader/website_reader.py +0 -175
- agno/document/reader/youtube_reader.py +0 -50
- agno/embedder/__init__.py +0 -1
- agno/embedder/azure_openai.py +0 -86
- agno/embedder/cohere.py +0 -72
- agno/embedder/fastembed.py +0 -37
- agno/embedder/google.py +0 -73
- agno/embedder/huggingface.py +0 -54
- agno/embedder/mistral.py +0 -80
- agno/embedder/ollama.py +0 -57
- agno/embedder/openai.py +0 -74
- agno/embedder/sentence_transformer.py +0 -38
- agno/embedder/voyageai.py +0 -64
- agno/eval/perf.py +0 -201
- agno/file/__init__.py +0 -1
- agno/file/file.py +0 -16
- agno/file/local/csv.py +0 -32
- agno/file/local/txt.py +0 -19
- agno/infra/app.py +0 -240
- agno/infra/base.py +0 -144
- agno/infra/context.py +0 -20
- agno/infra/db_app.py +0 -52
- agno/infra/resource.py +0 -205
- agno/infra/resources.py +0 -55
- agno/knowledge/agent.py +0 -230
- agno/knowledge/arxiv.py +0 -22
- agno/knowledge/combined.py +0 -22
- agno/knowledge/csv.py +0 -28
- agno/knowledge/csv_url.py +0 -19
- agno/knowledge/document.py +0 -20
- agno/knowledge/docx.py +0 -30
- agno/knowledge/json.py +0 -28
- agno/knowledge/langchain.py +0 -71
- agno/knowledge/llamaindex.py +0 -66
- agno/knowledge/pdf.py +0 -28
- agno/knowledge/pdf_url.py +0 -26
- agno/knowledge/s3/base.py +0 -60
- agno/knowledge/s3/pdf.py +0 -21
- agno/knowledge/s3/text.py +0 -23
- agno/knowledge/text.py +0 -30
- agno/knowledge/website.py +0 -88
- agno/knowledge/wikipedia.py +0 -31
- agno/knowledge/youtube.py +0 -22
- agno/memory/agent.py +0 -392
- agno/memory/classifier.py +0 -104
- agno/memory/db/__init__.py +0 -1
- agno/memory/db/base.py +0 -42
- agno/memory/db/mongodb.py +0 -189
- agno/memory/db/postgres.py +0 -203
- agno/memory/db/sqlite.py +0 -193
- agno/memory/memory.py +0 -15
- agno/memory/row.py +0 -36
- agno/memory/summarizer.py +0 -192
- agno/memory/summary.py +0 -19
- agno/memory/workflow.py +0 -38
- agno/models/google/gemini_openai.py +0 -26
- agno/models/ollama/hermes.py +0 -221
- agno/models/ollama/tools.py +0 -362
- agno/models/vertexai/gemini.py +0 -595
- agno/playground/__init__.py +0 -3
- agno/playground/async_router.py +0 -421
- agno/playground/deploy.py +0 -249
- agno/playground/operator.py +0 -92
- agno/playground/playground.py +0 -91
- agno/playground/schemas.py +0 -76
- agno/playground/serve.py +0 -55
- agno/playground/sync_router.py +0 -405
- agno/reasoning/agent.py +0 -68
- agno/run/response.py +0 -112
- agno/storage/agent/__init__.py +0 -0
- agno/storage/agent/base.py +0 -38
- agno/storage/agent/dynamodb.py +0 -350
- agno/storage/agent/json.py +0 -92
- agno/storage/agent/mongodb.py +0 -228
- agno/storage/agent/postgres.py +0 -367
- agno/storage/agent/session.py +0 -79
- agno/storage/agent/singlestore.py +0 -303
- agno/storage/agent/sqlite.py +0 -357
- agno/storage/agent/yaml.py +0 -93
- agno/storage/workflow/__init__.py +0 -0
- agno/storage/workflow/base.py +0 -40
- agno/storage/workflow/mongodb.py +0 -233
- agno/storage/workflow/postgres.py +0 -366
- agno/storage/workflow/session.py +0 -60
- agno/storage/workflow/sqlite.py +0 -359
- agno/tools/googlesearch.py +0 -88
- agno/utils/defaults.py +0 -57
- agno/utils/filesystem.py +0 -39
- agno/utils/git.py +0 -52
- agno/utils/json_io.py +0 -30
- agno/utils/load_env.py +0 -19
- agno/utils/py_io.py +0 -19
- agno/utils/pyproject.py +0 -18
- agno/utils/resource_filter.py +0 -31
- agno/vectordb/singlestore/s2vectordb.py +0 -390
- agno/vectordb/singlestore/s2vectordb2.py +0 -355
- agno/workspace/__init__.py +0 -0
- agno/workspace/config.py +0 -325
- agno/workspace/enums.py +0 -6
- agno/workspace/helpers.py +0 -48
- agno/workspace/operator.py +0 -758
- agno/workspace/settings.py +0 -63
- agno-0.1.2.dist-info/LICENSE +0 -375
- agno-0.1.2.dist-info/METADATA +0 -502
- agno-0.1.2.dist-info/RECORD +0 -352
- agno-0.1.2.dist-info/entry_points.txt +0 -3
- /agno/{cli → db/migrations}/__init__.py +0 -0
- /agno/{cli/ws → db/migrations/versions}/__init__.py +0 -0
- /agno/{document/chunking/__init__.py → db/schemas/metrics.py} +0 -0
- /agno/{document/reader/s3 → integrations}/__init__.py +0 -0
- /agno/{file/local → knowledge/chunking}/__init__.py +0 -0
- /agno/{infra → knowledge/remote_content}/__init__.py +0 -0
- /agno/{knowledge/s3 → tools/models}/__init__.py +0 -0
- /agno/{reranker → utils/models}/__init__.py +0 -0
- /agno/{storage → utils/print_response}/__init__.py +0 -0
- {agno-0.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/db/json/json_db.py
ADDED
|
@@ -0,0 +1,1777 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import os
|
|
3
|
+
import time
|
|
4
|
+
from datetime import date, datetime, timedelta, timezone
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
|
|
7
|
+
from uuid import uuid4
|
|
8
|
+
|
|
9
|
+
if TYPE_CHECKING:
|
|
10
|
+
from agno.tracing.schemas import Span, Trace
|
|
11
|
+
|
|
12
|
+
from agno.db.base import BaseDb, SessionType
|
|
13
|
+
from agno.db.json.utils import (
|
|
14
|
+
apply_sorting,
|
|
15
|
+
calculate_date_metrics,
|
|
16
|
+
deserialize_cultural_knowledge_from_db,
|
|
17
|
+
fetch_all_sessions_data,
|
|
18
|
+
get_dates_to_calculate_metrics_for,
|
|
19
|
+
serialize_cultural_knowledge_for_db,
|
|
20
|
+
)
|
|
21
|
+
from agno.db.schemas.culture import CulturalKnowledge
|
|
22
|
+
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
|
|
23
|
+
from agno.db.schemas.knowledge import KnowledgeRow
|
|
24
|
+
from agno.db.schemas.memory import UserMemory
|
|
25
|
+
from agno.session import AgentSession, Session, TeamSession, WorkflowSession
|
|
26
|
+
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
27
|
+
from agno.utils.string import generate_id
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class JsonDb(BaseDb):
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
db_path: Optional[str] = None,
|
|
34
|
+
session_table: Optional[str] = None,
|
|
35
|
+
culture_table: Optional[str] = None,
|
|
36
|
+
memory_table: Optional[str] = None,
|
|
37
|
+
metrics_table: Optional[str] = None,
|
|
38
|
+
eval_table: Optional[str] = None,
|
|
39
|
+
knowledge_table: Optional[str] = None,
|
|
40
|
+
traces_table: Optional[str] = None,
|
|
41
|
+
spans_table: Optional[str] = None,
|
|
42
|
+
id: Optional[str] = None,
|
|
43
|
+
):
|
|
44
|
+
"""
|
|
45
|
+
Interface for interacting with JSON files as database.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
db_path (Optional[str]): Path to the directory where JSON files will be stored.
|
|
49
|
+
session_table (Optional[str]): Name of the JSON file to store sessions (without .json extension).
|
|
50
|
+
culture_table (Optional[str]): Name of the JSON file to store cultural knowledge.
|
|
51
|
+
memory_table (Optional[str]): Name of the JSON file to store memories.
|
|
52
|
+
metrics_table (Optional[str]): Name of the JSON file to store metrics.
|
|
53
|
+
eval_table (Optional[str]): Name of the JSON file to store evaluation runs.
|
|
54
|
+
knowledge_table (Optional[str]): Name of the JSON file to store knowledge content.
|
|
55
|
+
traces_table (Optional[str]): Name of the JSON file to store run traces.
|
|
56
|
+
spans_table (Optional[str]): Name of the JSON file to store span events.
|
|
57
|
+
id (Optional[str]): ID of the database.
|
|
58
|
+
"""
|
|
59
|
+
if id is None:
|
|
60
|
+
seed = db_path or "agno_json_db"
|
|
61
|
+
id = generate_id(seed)
|
|
62
|
+
|
|
63
|
+
super().__init__(
|
|
64
|
+
id=id,
|
|
65
|
+
session_table=session_table,
|
|
66
|
+
culture_table=culture_table,
|
|
67
|
+
memory_table=memory_table,
|
|
68
|
+
metrics_table=metrics_table,
|
|
69
|
+
eval_table=eval_table,
|
|
70
|
+
knowledge_table=knowledge_table,
|
|
71
|
+
traces_table=traces_table,
|
|
72
|
+
spans_table=spans_table,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
# Create the directory where the JSON files will be stored, if it doesn't exist
|
|
76
|
+
self.db_path = Path(db_path or os.path.join(os.getcwd(), "agno_json_db"))
|
|
77
|
+
|
|
78
|
+
def table_exists(self, table_name: str) -> bool:
|
|
79
|
+
"""JSON implementation, always returns True."""
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
def _read_json_file(self, filename: str, create_table_if_not_found: Optional[bool] = True) -> List[Dict[str, Any]]:
|
|
83
|
+
"""Read data from a JSON file, creating it if it doesn't exist.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
filename (str): The name of the JSON file to read.
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
List[Dict[str, Any]]: The data from the JSON file.
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
json.JSONDecodeError: If the JSON file is not valid.
|
|
93
|
+
"""
|
|
94
|
+
file_path = self.db_path / f"{filename}.json"
|
|
95
|
+
|
|
96
|
+
# Create directory if it doesn't exist
|
|
97
|
+
self.db_path.mkdir(parents=True, exist_ok=True)
|
|
98
|
+
|
|
99
|
+
try:
|
|
100
|
+
with open(file_path, "r") as f:
|
|
101
|
+
return json.load(f)
|
|
102
|
+
|
|
103
|
+
except FileNotFoundError:
|
|
104
|
+
if create_table_if_not_found:
|
|
105
|
+
with open(file_path, "w") as f:
|
|
106
|
+
json.dump([], f)
|
|
107
|
+
return []
|
|
108
|
+
|
|
109
|
+
except json.JSONDecodeError as e:
|
|
110
|
+
log_error(f"Error reading the {file_path} JSON file")
|
|
111
|
+
raise e
|
|
112
|
+
|
|
113
|
+
def _write_json_file(self, filename: str, data: List[Dict[str, Any]]) -> None:
|
|
114
|
+
"""Write data to a JSON file.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
filename (str): The name of the JSON file to write.
|
|
118
|
+
data (List[Dict[str, Any]]): The data to write to the JSON file.
|
|
119
|
+
|
|
120
|
+
Raises:
|
|
121
|
+
Exception: If an error occurs while writing to the JSON file.
|
|
122
|
+
"""
|
|
123
|
+
file_path = self.db_path / f"{filename}.json"
|
|
124
|
+
|
|
125
|
+
# Create directory if it doesn't exist
|
|
126
|
+
self.db_path.mkdir(parents=True, exist_ok=True)
|
|
127
|
+
|
|
128
|
+
try:
|
|
129
|
+
with open(file_path, "w") as f:
|
|
130
|
+
json.dump(data, f, indent=2, default=str)
|
|
131
|
+
|
|
132
|
+
except Exception as e:
|
|
133
|
+
log_error(f"Error writing to the {file_path} JSON file: {e}")
|
|
134
|
+
raise e
|
|
135
|
+
|
|
136
|
+
def get_latest_schema_version(self):
|
|
137
|
+
"""Get the latest version of the database schema."""
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
def upsert_schema_version(self, version: str) -> None:
|
|
141
|
+
"""Upsert the schema version into the database."""
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
# -- Session methods --
|
|
145
|
+
|
|
146
|
+
def delete_session(self, session_id: str) -> bool:
|
|
147
|
+
"""Delete a session from the JSON file.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
session_id (str): The ID of the session to delete.
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
bool: True if the session was deleted, False otherwise.
|
|
154
|
+
|
|
155
|
+
Raises:
|
|
156
|
+
Exception: If an error occurs during deletion.
|
|
157
|
+
"""
|
|
158
|
+
try:
|
|
159
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
160
|
+
original_count = len(sessions)
|
|
161
|
+
sessions = [s for s in sessions if s.get("session_id") != session_id]
|
|
162
|
+
|
|
163
|
+
if len(sessions) < original_count:
|
|
164
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
165
|
+
log_debug(f"Successfully deleted session with session_id: {session_id}")
|
|
166
|
+
return True
|
|
167
|
+
|
|
168
|
+
else:
|
|
169
|
+
log_debug(f"No session found to delete with session_id: {session_id}")
|
|
170
|
+
return False
|
|
171
|
+
|
|
172
|
+
except Exception as e:
|
|
173
|
+
log_error(f"Error deleting session: {e}")
|
|
174
|
+
raise e
|
|
175
|
+
|
|
176
|
+
def delete_sessions(self, session_ids: List[str]) -> None:
|
|
177
|
+
"""Delete multiple sessions from the JSON file.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
session_ids (List[str]): The IDs of the sessions to delete.
|
|
181
|
+
|
|
182
|
+
Raises:
|
|
183
|
+
Exception: If an error occurs during deletion.
|
|
184
|
+
"""
|
|
185
|
+
try:
|
|
186
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
187
|
+
sessions = [s for s in sessions if s.get("session_id") not in session_ids]
|
|
188
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
189
|
+
log_debug(f"Successfully deleted sessions with ids: {session_ids}")
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
log_error(f"Error deleting sessions: {e}")
|
|
193
|
+
raise e
|
|
194
|
+
|
|
195
|
+
def get_session(
|
|
196
|
+
self,
|
|
197
|
+
session_id: str,
|
|
198
|
+
session_type: SessionType,
|
|
199
|
+
user_id: Optional[str] = None,
|
|
200
|
+
deserialize: Optional[bool] = True,
|
|
201
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession, Dict[str, Any]]]:
|
|
202
|
+
"""Read a session from the JSON file.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
session_id (str): The ID of the session to read.
|
|
206
|
+
session_type (SessionType): The type of the session to read.
|
|
207
|
+
user_id (Optional[str]): The ID of the user to read the session for.
|
|
208
|
+
deserialize (Optional[bool]): Whether to deserialize the session.
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Union[Session, Dict[str, Any], None]:
|
|
212
|
+
- When deserialize=True: Session object
|
|
213
|
+
- When deserialize=False: Session dictionary
|
|
214
|
+
|
|
215
|
+
Raises:
|
|
216
|
+
Exception: If an error occurs while reading the session.
|
|
217
|
+
"""
|
|
218
|
+
try:
|
|
219
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
220
|
+
|
|
221
|
+
for session_data in sessions:
|
|
222
|
+
if session_data.get("session_id") == session_id:
|
|
223
|
+
if user_id is not None and session_data.get("user_id") != user_id:
|
|
224
|
+
continue
|
|
225
|
+
|
|
226
|
+
if not deserialize:
|
|
227
|
+
return session_data
|
|
228
|
+
|
|
229
|
+
if session_type == SessionType.AGENT:
|
|
230
|
+
return AgentSession.from_dict(session_data)
|
|
231
|
+
elif session_type == SessionType.TEAM:
|
|
232
|
+
return TeamSession.from_dict(session_data)
|
|
233
|
+
elif session_type == SessionType.WORKFLOW:
|
|
234
|
+
return WorkflowSession.from_dict(session_data)
|
|
235
|
+
else:
|
|
236
|
+
raise ValueError(f"Invalid session type: {session_type}")
|
|
237
|
+
|
|
238
|
+
return None
|
|
239
|
+
|
|
240
|
+
except Exception as e:
|
|
241
|
+
log_error(f"Exception reading from session file: {e}")
|
|
242
|
+
raise e
|
|
243
|
+
|
|
244
|
+
def get_sessions(
|
|
245
|
+
self,
|
|
246
|
+
session_type: Optional[SessionType] = None,
|
|
247
|
+
user_id: Optional[str] = None,
|
|
248
|
+
component_id: Optional[str] = None,
|
|
249
|
+
session_name: Optional[str] = None,
|
|
250
|
+
start_timestamp: Optional[int] = None,
|
|
251
|
+
end_timestamp: Optional[int] = None,
|
|
252
|
+
limit: Optional[int] = None,
|
|
253
|
+
page: Optional[int] = None,
|
|
254
|
+
sort_by: Optional[str] = None,
|
|
255
|
+
sort_order: Optional[str] = None,
|
|
256
|
+
deserialize: Optional[bool] = True,
|
|
257
|
+
) -> Union[List[Session], Tuple[List[Dict[str, Any]], int]]:
|
|
258
|
+
"""Get all sessions from the JSON file with filtering and pagination.
|
|
259
|
+
|
|
260
|
+
Args:
|
|
261
|
+
session_type (Optional[SessionType]): The type of the sessions to read.
|
|
262
|
+
user_id (Optional[str]): The ID of the user to read the sessions for.
|
|
263
|
+
component_id (Optional[str]): The ID of the component to read the sessions for.
|
|
264
|
+
session_name (Optional[str]): The name of the session to read.
|
|
265
|
+
start_timestamp (Optional[int]): The start timestamp of the sessions to read.
|
|
266
|
+
end_timestamp (Optional[int]): The end timestamp of the sessions to read.
|
|
267
|
+
limit (Optional[int]): The limit of the sessions to read.
|
|
268
|
+
page (Optional[int]): The page of the sessions to read.
|
|
269
|
+
sort_by (Optional[str]): The field to sort the sessions by.
|
|
270
|
+
sort_order (Optional[str]): The order to sort the sessions by.
|
|
271
|
+
deserialize (Optional[bool]): Whether to deserialize the sessions.
|
|
272
|
+
create_table_if_not_found (Optional[bool]): Whether to create a json file to track sessions if it doesn't exist.
|
|
273
|
+
|
|
274
|
+
Returns:
|
|
275
|
+
Union[List[AgentSession], List[TeamSession], List[WorkflowSession], Tuple[List[Dict[str, Any]], int]]:
|
|
276
|
+
- When deserialize=True: List of sessions
|
|
277
|
+
- When deserialize=False: Tuple with list of sessions and total count
|
|
278
|
+
|
|
279
|
+
Raises:
|
|
280
|
+
Exception: If an error occurs while reading the sessions.
|
|
281
|
+
"""
|
|
282
|
+
try:
|
|
283
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
284
|
+
|
|
285
|
+
# Apply filters
|
|
286
|
+
filtered_sessions = []
|
|
287
|
+
for session_data in sessions:
|
|
288
|
+
if user_id is not None and session_data.get("user_id") != user_id:
|
|
289
|
+
continue
|
|
290
|
+
if component_id is not None:
|
|
291
|
+
if session_type == SessionType.AGENT and session_data.get("agent_id") != component_id:
|
|
292
|
+
continue
|
|
293
|
+
elif session_type == SessionType.TEAM and session_data.get("team_id") != component_id:
|
|
294
|
+
continue
|
|
295
|
+
elif session_type == SessionType.WORKFLOW and session_data.get("workflow_id") != component_id:
|
|
296
|
+
continue
|
|
297
|
+
if start_timestamp is not None and session_data.get("created_at", 0) < start_timestamp:
|
|
298
|
+
continue
|
|
299
|
+
if end_timestamp is not None and session_data.get("created_at", 0) > end_timestamp:
|
|
300
|
+
continue
|
|
301
|
+
if session_name is not None:
|
|
302
|
+
stored_name = session_data.get("session_data", {}).get("session_name", "")
|
|
303
|
+
if session_name.lower() not in stored_name.lower():
|
|
304
|
+
continue
|
|
305
|
+
session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
|
|
306
|
+
if session_data.get("session_type") != session_type_value:
|
|
307
|
+
continue
|
|
308
|
+
|
|
309
|
+
filtered_sessions.append(session_data)
|
|
310
|
+
|
|
311
|
+
total_count = len(filtered_sessions)
|
|
312
|
+
|
|
313
|
+
# Apply sorting
|
|
314
|
+
filtered_sessions = apply_sorting(filtered_sessions, sort_by, sort_order)
|
|
315
|
+
|
|
316
|
+
# Apply pagination
|
|
317
|
+
if limit is not None:
|
|
318
|
+
start_idx = 0
|
|
319
|
+
if page is not None:
|
|
320
|
+
start_idx = (page - 1) * limit
|
|
321
|
+
filtered_sessions = filtered_sessions[start_idx : start_idx + limit]
|
|
322
|
+
|
|
323
|
+
if not deserialize:
|
|
324
|
+
return filtered_sessions, total_count
|
|
325
|
+
|
|
326
|
+
if session_type == SessionType.AGENT:
|
|
327
|
+
return [AgentSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
328
|
+
elif session_type == SessionType.TEAM:
|
|
329
|
+
return [TeamSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
330
|
+
elif session_type == SessionType.WORKFLOW:
|
|
331
|
+
return [WorkflowSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
332
|
+
else:
|
|
333
|
+
raise ValueError(f"Invalid session type: {session_type}")
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
log_error(f"Exception reading from session file: {e}")
|
|
337
|
+
raise e
|
|
338
|
+
|
|
339
|
+
def rename_session(
|
|
340
|
+
self, session_id: str, session_type: SessionType, session_name: str, deserialize: Optional[bool] = True
|
|
341
|
+
) -> Optional[Union[Session, Dict[str, Any]]]:
|
|
342
|
+
"""Rename a session in the JSON file."""
|
|
343
|
+
try:
|
|
344
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
345
|
+
|
|
346
|
+
for i, session in enumerate(sessions):
|
|
347
|
+
if session.get("session_id") == session_id and session.get("session_type") == session_type.value:
|
|
348
|
+
# Update session name in session_data
|
|
349
|
+
if "session_data" not in session:
|
|
350
|
+
session["session_data"] = {}
|
|
351
|
+
session["session_data"]["session_name"] = session_name
|
|
352
|
+
|
|
353
|
+
sessions[i] = session
|
|
354
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
355
|
+
|
|
356
|
+
log_debug(f"Renamed session with id '{session_id}' to '{session_name}'")
|
|
357
|
+
|
|
358
|
+
if not deserialize:
|
|
359
|
+
return session
|
|
360
|
+
|
|
361
|
+
if session_type == SessionType.AGENT:
|
|
362
|
+
return AgentSession.from_dict(session)
|
|
363
|
+
elif session_type == SessionType.TEAM:
|
|
364
|
+
return TeamSession.from_dict(session)
|
|
365
|
+
elif session_type == SessionType.WORKFLOW:
|
|
366
|
+
return WorkflowSession.from_dict(session)
|
|
367
|
+
else:
|
|
368
|
+
raise ValueError(f"Invalid session type: {session_type}")
|
|
369
|
+
|
|
370
|
+
return None
|
|
371
|
+
|
|
372
|
+
except Exception as e:
|
|
373
|
+
log_error(f"Exception renaming session: {e}")
|
|
374
|
+
raise e
|
|
375
|
+
|
|
376
|
+
def upsert_session(
|
|
377
|
+
self, session: Session, deserialize: Optional[bool] = True
|
|
378
|
+
) -> Optional[Union[Session, Dict[str, Any]]]:
|
|
379
|
+
"""Insert or update a session in the JSON file."""
|
|
380
|
+
try:
|
|
381
|
+
sessions = self._read_json_file(self.session_table_name, create_table_if_not_found=True)
|
|
382
|
+
session_dict = session.to_dict()
|
|
383
|
+
|
|
384
|
+
# Add session_type based on session instance type
|
|
385
|
+
if isinstance(session, AgentSession):
|
|
386
|
+
session_dict["session_type"] = SessionType.AGENT.value
|
|
387
|
+
elif isinstance(session, TeamSession):
|
|
388
|
+
session_dict["session_type"] = SessionType.TEAM.value
|
|
389
|
+
elif isinstance(session, WorkflowSession):
|
|
390
|
+
session_dict["session_type"] = SessionType.WORKFLOW.value
|
|
391
|
+
|
|
392
|
+
# Find existing session to update
|
|
393
|
+
session_updated = False
|
|
394
|
+
for i, existing_session in enumerate(sessions):
|
|
395
|
+
if existing_session.get("session_id") == session_dict.get("session_id") and self._matches_session_key(
|
|
396
|
+
existing_session, session
|
|
397
|
+
):
|
|
398
|
+
# Update existing session
|
|
399
|
+
session_dict["updated_at"] = int(time.time())
|
|
400
|
+
sessions[i] = session_dict
|
|
401
|
+
session_updated = True
|
|
402
|
+
break
|
|
403
|
+
|
|
404
|
+
if not session_updated:
|
|
405
|
+
# Add new session
|
|
406
|
+
session_dict["created_at"] = session_dict.get("created_at", int(time.time()))
|
|
407
|
+
session_dict["updated_at"] = session_dict.get("created_at")
|
|
408
|
+
sessions.append(session_dict)
|
|
409
|
+
|
|
410
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
411
|
+
|
|
412
|
+
if not deserialize:
|
|
413
|
+
return session_dict
|
|
414
|
+
|
|
415
|
+
return session
|
|
416
|
+
|
|
417
|
+
except Exception as e:
|
|
418
|
+
log_error(f"Exception upserting session: {e}")
|
|
419
|
+
raise e
|
|
420
|
+
|
|
421
|
+
def upsert_sessions(
|
|
422
|
+
self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
423
|
+
) -> List[Union[Session, Dict[str, Any]]]:
|
|
424
|
+
"""
|
|
425
|
+
Bulk upsert multiple sessions for improved performance on large datasets.
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
sessions (List[Session]): List of sessions to upsert.
|
|
429
|
+
deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
|
|
433
|
+
|
|
434
|
+
Raises:
|
|
435
|
+
Exception: If an error occurs during bulk upsert.
|
|
436
|
+
"""
|
|
437
|
+
if not sessions:
|
|
438
|
+
return []
|
|
439
|
+
|
|
440
|
+
try:
|
|
441
|
+
log_info(
|
|
442
|
+
f"JsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(sessions)} sessions"
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
# Fall back to individual upserts
|
|
446
|
+
results = []
|
|
447
|
+
for session in sessions:
|
|
448
|
+
if session is not None:
|
|
449
|
+
result = self.upsert_session(session, deserialize=deserialize)
|
|
450
|
+
if result is not None:
|
|
451
|
+
results.append(result)
|
|
452
|
+
return results
|
|
453
|
+
|
|
454
|
+
except Exception as e:
|
|
455
|
+
log_error(f"Exception during bulk session upsert: {e}")
|
|
456
|
+
return []
|
|
457
|
+
|
|
458
|
+
def _matches_session_key(self, existing_session: Dict[str, Any], session: Session) -> bool:
|
|
459
|
+
"""Check if existing session matches the key for the session type."""
|
|
460
|
+
if isinstance(session, AgentSession):
|
|
461
|
+
return existing_session.get("agent_id") == session.agent_id
|
|
462
|
+
elif isinstance(session, TeamSession):
|
|
463
|
+
return existing_session.get("team_id") == session.team_id
|
|
464
|
+
elif isinstance(session, WorkflowSession):
|
|
465
|
+
return existing_session.get("workflow_id") == session.workflow_id
|
|
466
|
+
return False
|
|
467
|
+
|
|
468
|
+
# -- Memory methods --
|
|
469
|
+
def delete_user_memory(self, memory_id: str, user_id: Optional[str] = None):
|
|
470
|
+
"""Delete a user memory from the JSON file.
|
|
471
|
+
|
|
472
|
+
Args:
|
|
473
|
+
memory_id (str): The ID of the memory to delete.
|
|
474
|
+
user_id (Optional[str]): The ID of the user (optional, for filtering).
|
|
475
|
+
"""
|
|
476
|
+
try:
|
|
477
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
478
|
+
original_count = len(memories)
|
|
479
|
+
|
|
480
|
+
# If user_id is provided, verify the memory belongs to the user before deleting
|
|
481
|
+
if user_id:
|
|
482
|
+
memory_to_delete = None
|
|
483
|
+
for m in memories:
|
|
484
|
+
if m.get("memory_id") == memory_id:
|
|
485
|
+
memory_to_delete = m
|
|
486
|
+
break
|
|
487
|
+
|
|
488
|
+
if memory_to_delete and memory_to_delete.get("user_id") != user_id:
|
|
489
|
+
log_debug(f"Memory {memory_id} does not belong to user {user_id}")
|
|
490
|
+
return
|
|
491
|
+
|
|
492
|
+
memories = [m for m in memories if m.get("memory_id") != memory_id]
|
|
493
|
+
|
|
494
|
+
if len(memories) < original_count:
|
|
495
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
496
|
+
log_debug(f"Successfully deleted user memory id: {memory_id}")
|
|
497
|
+
else:
|
|
498
|
+
log_debug(f"No memory found with id: {memory_id}")
|
|
499
|
+
|
|
500
|
+
except Exception as e:
|
|
501
|
+
log_error(f"Error deleting memory: {e}")
|
|
502
|
+
raise e
|
|
503
|
+
|
|
504
|
+
def delete_user_memories(self, memory_ids: List[str], user_id: Optional[str] = None) -> None:
|
|
505
|
+
"""Delete multiple user memories from the JSON file.
|
|
506
|
+
|
|
507
|
+
Args:
|
|
508
|
+
memory_ids (List[str]): List of memory IDs to delete.
|
|
509
|
+
user_id (Optional[str]): The ID of the user (optional, for filtering).
|
|
510
|
+
"""
|
|
511
|
+
try:
|
|
512
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
513
|
+
|
|
514
|
+
# If user_id is provided, filter memory_ids to only those belonging to the user
|
|
515
|
+
if user_id:
|
|
516
|
+
filtered_memory_ids: List[str] = []
|
|
517
|
+
for memory in memories:
|
|
518
|
+
if memory.get("memory_id") in memory_ids and memory.get("user_id") == user_id:
|
|
519
|
+
filtered_memory_ids.append(memory.get("memory_id")) # type: ignore
|
|
520
|
+
memory_ids = filtered_memory_ids
|
|
521
|
+
|
|
522
|
+
memories = [m for m in memories if m.get("memory_id") not in memory_ids]
|
|
523
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
524
|
+
|
|
525
|
+
log_debug(f"Successfully deleted {len(memory_ids)} user memories")
|
|
526
|
+
|
|
527
|
+
except Exception as e:
|
|
528
|
+
log_error(f"Error deleting memories: {e}")
|
|
529
|
+
raise e
|
|
530
|
+
|
|
531
|
+
def get_all_memory_topics(self) -> List[str]:
|
|
532
|
+
"""Get all memory topics from the JSON file.
|
|
533
|
+
|
|
534
|
+
Returns:
|
|
535
|
+
List[str]: List of unique memory topics.
|
|
536
|
+
"""
|
|
537
|
+
try:
|
|
538
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
539
|
+
|
|
540
|
+
topics = set()
|
|
541
|
+
for memory in memories:
|
|
542
|
+
memory_topics = memory.get("topics", [])
|
|
543
|
+
if isinstance(memory_topics, list):
|
|
544
|
+
topics.update(memory_topics)
|
|
545
|
+
return list(topics)
|
|
546
|
+
|
|
547
|
+
except Exception as e:
|
|
548
|
+
log_error(f"Exception reading from memory file: {e}")
|
|
549
|
+
raise e
|
|
550
|
+
|
|
551
|
+
def get_user_memory(
|
|
552
|
+
self,
|
|
553
|
+
memory_id: str,
|
|
554
|
+
deserialize: Optional[bool] = True,
|
|
555
|
+
user_id: Optional[str] = None,
|
|
556
|
+
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
|
|
557
|
+
"""Get a memory from the JSON file.
|
|
558
|
+
|
|
559
|
+
Args:
|
|
560
|
+
memory_id (str): The ID of the memory to get.
|
|
561
|
+
deserialize (Optional[bool]): Whether to deserialize the memory.
|
|
562
|
+
user_id (Optional[str]): The ID of the user (optional, for filtering).
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
Optional[Union[UserMemory, Dict[str, Any]]]: The user memory data if found, None otherwise.
|
|
566
|
+
"""
|
|
567
|
+
try:
|
|
568
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
569
|
+
|
|
570
|
+
for memory_data in memories:
|
|
571
|
+
if memory_data.get("memory_id") == memory_id:
|
|
572
|
+
# Filter by user_id if provided
|
|
573
|
+
if user_id and memory_data.get("user_id") != user_id:
|
|
574
|
+
return None
|
|
575
|
+
|
|
576
|
+
if not deserialize:
|
|
577
|
+
return memory_data
|
|
578
|
+
return UserMemory.from_dict(memory_data)
|
|
579
|
+
|
|
580
|
+
return None
|
|
581
|
+
|
|
582
|
+
except Exception as e:
|
|
583
|
+
log_error(f"Exception reading from memory file: {e}")
|
|
584
|
+
raise e
|
|
585
|
+
|
|
586
|
+
def get_user_memories(
|
|
587
|
+
self,
|
|
588
|
+
user_id: Optional[str] = None,
|
|
589
|
+
agent_id: Optional[str] = None,
|
|
590
|
+
team_id: Optional[str] = None,
|
|
591
|
+
topics: Optional[List[str]] = None,
|
|
592
|
+
search_content: Optional[str] = None,
|
|
593
|
+
limit: Optional[int] = None,
|
|
594
|
+
page: Optional[int] = None,
|
|
595
|
+
sort_by: Optional[str] = None,
|
|
596
|
+
sort_order: Optional[str] = None,
|
|
597
|
+
deserialize: Optional[bool] = True,
|
|
598
|
+
) -> Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
|
|
599
|
+
"""Get all memories from the JSON file with filtering and pagination."""
|
|
600
|
+
try:
|
|
601
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
602
|
+
|
|
603
|
+
# Apply filters
|
|
604
|
+
filtered_memories = []
|
|
605
|
+
for memory_data in memories:
|
|
606
|
+
if user_id is not None and memory_data.get("user_id") != user_id:
|
|
607
|
+
continue
|
|
608
|
+
if agent_id is not None and memory_data.get("agent_id") != agent_id:
|
|
609
|
+
continue
|
|
610
|
+
if team_id is not None and memory_data.get("team_id") != team_id:
|
|
611
|
+
continue
|
|
612
|
+
if topics is not None:
|
|
613
|
+
memory_topics = memory_data.get("topics", [])
|
|
614
|
+
if not any(topic in memory_topics for topic in topics):
|
|
615
|
+
continue
|
|
616
|
+
if search_content is not None:
|
|
617
|
+
memory_content = str(memory_data.get("memory", ""))
|
|
618
|
+
if search_content.lower() not in memory_content.lower():
|
|
619
|
+
continue
|
|
620
|
+
|
|
621
|
+
filtered_memories.append(memory_data)
|
|
622
|
+
|
|
623
|
+
total_count = len(filtered_memories)
|
|
624
|
+
|
|
625
|
+
# Apply sorting
|
|
626
|
+
filtered_memories = apply_sorting(filtered_memories, sort_by, sort_order)
|
|
627
|
+
|
|
628
|
+
# Apply pagination
|
|
629
|
+
if limit is not None:
|
|
630
|
+
start_idx = 0
|
|
631
|
+
if page is not None:
|
|
632
|
+
start_idx = (page - 1) * limit
|
|
633
|
+
filtered_memories = filtered_memories[start_idx : start_idx + limit]
|
|
634
|
+
|
|
635
|
+
if not deserialize:
|
|
636
|
+
return filtered_memories, total_count
|
|
637
|
+
|
|
638
|
+
return [UserMemory.from_dict(memory) for memory in filtered_memories]
|
|
639
|
+
|
|
640
|
+
except Exception as e:
|
|
641
|
+
log_error(f"Exception reading from memory file: {e}")
|
|
642
|
+
raise e
|
|
643
|
+
|
|
644
|
+
def get_user_memory_stats(
|
|
645
|
+
self, limit: Optional[int] = None, page: Optional[int] = None, user_id: Optional[str] = None
|
|
646
|
+
) -> Tuple[List[Dict[str, Any]], int]:
|
|
647
|
+
"""Get user memory statistics.
|
|
648
|
+
|
|
649
|
+
Args:
|
|
650
|
+
limit (Optional[int]): The maximum number of user stats to return.
|
|
651
|
+
page (Optional[int]): The page number.
|
|
652
|
+
user_id (Optional[str]): User ID for filtering.
|
|
653
|
+
|
|
654
|
+
Returns:
|
|
655
|
+
Tuple[List[Dict[str, Any]], int]: A list of dictionaries containing user stats and total count.
|
|
656
|
+
"""
|
|
657
|
+
try:
|
|
658
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
659
|
+
user_stats = {}
|
|
660
|
+
|
|
661
|
+
for memory in memories:
|
|
662
|
+
memory_user_id = memory.get("user_id")
|
|
663
|
+
# filter by user_id if provided
|
|
664
|
+
if user_id is not None and memory_user_id != user_id:
|
|
665
|
+
continue
|
|
666
|
+
if memory_user_id:
|
|
667
|
+
if memory_user_id not in user_stats:
|
|
668
|
+
user_stats[memory_user_id] = {
|
|
669
|
+
"user_id": memory_user_id,
|
|
670
|
+
"total_memories": 0,
|
|
671
|
+
"last_memory_updated_at": 0,
|
|
672
|
+
}
|
|
673
|
+
user_stats[memory_user_id]["total_memories"] += 1
|
|
674
|
+
updated_at = memory.get("updated_at", 0)
|
|
675
|
+
if updated_at > user_stats[memory_user_id]["last_memory_updated_at"]:
|
|
676
|
+
user_stats[memory_user_id]["last_memory_updated_at"] = updated_at
|
|
677
|
+
|
|
678
|
+
stats_list = list(user_stats.values())
|
|
679
|
+
stats_list.sort(key=lambda x: x["last_memory_updated_at"], reverse=True)
|
|
680
|
+
|
|
681
|
+
total_count = len(stats_list)
|
|
682
|
+
|
|
683
|
+
# Apply pagination
|
|
684
|
+
if limit is not None:
|
|
685
|
+
start_idx = 0
|
|
686
|
+
if page is not None:
|
|
687
|
+
start_idx = (page - 1) * limit
|
|
688
|
+
stats_list = stats_list[start_idx : start_idx + limit]
|
|
689
|
+
|
|
690
|
+
return stats_list, total_count
|
|
691
|
+
|
|
692
|
+
except Exception as e:
|
|
693
|
+
log_error(f"Exception getting user memory stats: {e}")
|
|
694
|
+
raise e
|
|
695
|
+
|
|
696
|
+
def upsert_user_memory(
|
|
697
|
+
self, memory: UserMemory, deserialize: Optional[bool] = True
|
|
698
|
+
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
|
|
699
|
+
"""Upsert a user memory in the JSON file."""
|
|
700
|
+
try:
|
|
701
|
+
memories = self._read_json_file(self.memory_table_name, create_table_if_not_found=True)
|
|
702
|
+
|
|
703
|
+
if memory.memory_id is None:
|
|
704
|
+
memory.memory_id = str(uuid4())
|
|
705
|
+
|
|
706
|
+
memory_dict = memory.to_dict() if hasattr(memory, "to_dict") else memory.__dict__
|
|
707
|
+
memory_dict["updated_at"] = int(time.time())
|
|
708
|
+
|
|
709
|
+
# Find existing memory to update
|
|
710
|
+
memory_updated = False
|
|
711
|
+
for i, existing_memory in enumerate(memories):
|
|
712
|
+
if existing_memory.get("memory_id") == memory.memory_id:
|
|
713
|
+
memories[i] = memory_dict
|
|
714
|
+
memory_updated = True
|
|
715
|
+
break
|
|
716
|
+
|
|
717
|
+
if not memory_updated:
|
|
718
|
+
memories.append(memory_dict)
|
|
719
|
+
|
|
720
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
721
|
+
|
|
722
|
+
if not deserialize:
|
|
723
|
+
return memory_dict
|
|
724
|
+
return UserMemory.from_dict(memory_dict)
|
|
725
|
+
|
|
726
|
+
except Exception as e:
|
|
727
|
+
log_warning(f"Exception upserting user memory: {e}")
|
|
728
|
+
raise e
|
|
729
|
+
|
|
730
|
+
def upsert_memories(
|
|
731
|
+
self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
732
|
+
) -> List[Union[UserMemory, Dict[str, Any]]]:
|
|
733
|
+
"""
|
|
734
|
+
Bulk upsert multiple user memories for improved performance on large datasets.
|
|
735
|
+
|
|
736
|
+
Args:
|
|
737
|
+
memories (List[UserMemory]): List of memories to upsert.
|
|
738
|
+
deserialize (Optional[bool]): Whether to deserialize the memories. Defaults to True.
|
|
739
|
+
|
|
740
|
+
Returns:
|
|
741
|
+
List[Union[UserMemory, Dict[str, Any]]]: List of upserted memories.
|
|
742
|
+
|
|
743
|
+
Raises:
|
|
744
|
+
Exception: If an error occurs during bulk upsert.
|
|
745
|
+
"""
|
|
746
|
+
if not memories:
|
|
747
|
+
return []
|
|
748
|
+
|
|
749
|
+
try:
|
|
750
|
+
log_info(
|
|
751
|
+
f"JsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(memories)} memories"
|
|
752
|
+
)
|
|
753
|
+
# Fall back to individual upserts
|
|
754
|
+
results = []
|
|
755
|
+
for memory in memories:
|
|
756
|
+
if memory is not None:
|
|
757
|
+
result = self.upsert_user_memory(memory, deserialize=deserialize)
|
|
758
|
+
if result is not None:
|
|
759
|
+
results.append(result)
|
|
760
|
+
return results
|
|
761
|
+
|
|
762
|
+
except Exception as e:
|
|
763
|
+
log_error(f"Exception during bulk memory upsert: {e}")
|
|
764
|
+
return []
|
|
765
|
+
|
|
766
|
+
def clear_memories(self) -> None:
|
|
767
|
+
"""Delete all memories from the database.
|
|
768
|
+
|
|
769
|
+
Raises:
|
|
770
|
+
Exception: If an error occurs during deletion.
|
|
771
|
+
"""
|
|
772
|
+
try:
|
|
773
|
+
# Simply write an empty list to the memory JSON file
|
|
774
|
+
self._write_json_file(self.memory_table_name, [])
|
|
775
|
+
|
|
776
|
+
except Exception as e:
|
|
777
|
+
log_warning(f"Exception deleting all memories: {e}")
|
|
778
|
+
raise e
|
|
779
|
+
|
|
780
|
+
# -- Metrics methods --
|
|
781
|
+
def calculate_metrics(self) -> Optional[list[dict]]:
|
|
782
|
+
"""Calculate metrics for all dates without complete metrics."""
|
|
783
|
+
try:
|
|
784
|
+
metrics = self._read_json_file(self.metrics_table_name, create_table_if_not_found=True)
|
|
785
|
+
|
|
786
|
+
starting_date = self._get_metrics_calculation_starting_date(metrics)
|
|
787
|
+
if starting_date is None:
|
|
788
|
+
log_info("No session data found. Won't calculate metrics.")
|
|
789
|
+
return None
|
|
790
|
+
|
|
791
|
+
dates_to_process = get_dates_to_calculate_metrics_for(starting_date)
|
|
792
|
+
if not dates_to_process:
|
|
793
|
+
log_info("Metrics already calculated for all relevant dates.")
|
|
794
|
+
return None
|
|
795
|
+
|
|
796
|
+
start_timestamp = int(datetime.combine(dates_to_process[0], datetime.min.time()).timestamp())
|
|
797
|
+
end_timestamp = int(
|
|
798
|
+
datetime.combine(dates_to_process[-1] + timedelta(days=1), datetime.min.time()).timestamp()
|
|
799
|
+
)
|
|
800
|
+
|
|
801
|
+
sessions = self._get_all_sessions_for_metrics_calculation(start_timestamp, end_timestamp)
|
|
802
|
+
all_sessions_data = fetch_all_sessions_data(
|
|
803
|
+
sessions=sessions, dates_to_process=dates_to_process, start_timestamp=start_timestamp
|
|
804
|
+
)
|
|
805
|
+
if not all_sessions_data:
|
|
806
|
+
log_info("No new session data found. Won't calculate metrics.")
|
|
807
|
+
return None
|
|
808
|
+
|
|
809
|
+
results = []
|
|
810
|
+
|
|
811
|
+
for date_to_process in dates_to_process:
|
|
812
|
+
date_key = date_to_process.isoformat()
|
|
813
|
+
sessions_for_date = all_sessions_data.get(date_key, {})
|
|
814
|
+
|
|
815
|
+
# Skip dates with no sessions
|
|
816
|
+
if not any(len(sessions) > 0 for sessions in sessions_for_date.values()):
|
|
817
|
+
continue
|
|
818
|
+
|
|
819
|
+
metrics_record = calculate_date_metrics(date_to_process, sessions_for_date)
|
|
820
|
+
|
|
821
|
+
# Upsert metrics record
|
|
822
|
+
existing_record_idx = None
|
|
823
|
+
for i, existing_metric in enumerate(metrics):
|
|
824
|
+
if (
|
|
825
|
+
existing_metric.get("date") == str(date_to_process)
|
|
826
|
+
and existing_metric.get("aggregation_period") == "daily"
|
|
827
|
+
):
|
|
828
|
+
existing_record_idx = i
|
|
829
|
+
break
|
|
830
|
+
|
|
831
|
+
if existing_record_idx is not None:
|
|
832
|
+
metrics[existing_record_idx] = metrics_record
|
|
833
|
+
else:
|
|
834
|
+
metrics.append(metrics_record)
|
|
835
|
+
|
|
836
|
+
results.append(metrics_record)
|
|
837
|
+
|
|
838
|
+
if results:
|
|
839
|
+
self._write_json_file(self.metrics_table_name, metrics)
|
|
840
|
+
|
|
841
|
+
log_debug("Updated metrics calculations")
|
|
842
|
+
|
|
843
|
+
return results
|
|
844
|
+
|
|
845
|
+
except Exception as e:
|
|
846
|
+
log_warning(f"Exception refreshing metrics: {e}")
|
|
847
|
+
raise e
|
|
848
|
+
|
|
849
|
+
def _get_metrics_calculation_starting_date(self, metrics: List[Dict[str, Any]]) -> Optional[date]:
|
|
850
|
+
"""Get the first date for which metrics calculation is needed."""
|
|
851
|
+
if metrics:
|
|
852
|
+
# Sort by date in descending order
|
|
853
|
+
sorted_metrics = sorted(metrics, key=lambda x: x.get("date", ""), reverse=True)
|
|
854
|
+
latest_metric = sorted_metrics[0]
|
|
855
|
+
|
|
856
|
+
if latest_metric.get("completed", False):
|
|
857
|
+
latest_date = datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
|
|
858
|
+
return latest_date + timedelta(days=1)
|
|
859
|
+
else:
|
|
860
|
+
return datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
|
|
861
|
+
|
|
862
|
+
# No metrics records. Return the date of the first recorded session.
|
|
863
|
+
# We need to get sessions of all types, so we'll read directly from the file
|
|
864
|
+
all_sessions = self._read_json_file(self.session_table_name)
|
|
865
|
+
if all_sessions:
|
|
866
|
+
# Sort by created_at
|
|
867
|
+
all_sessions.sort(key=lambda x: x.get("created_at", 0))
|
|
868
|
+
first_session_date = all_sessions[0]["created_at"]
|
|
869
|
+
return datetime.fromtimestamp(first_session_date, tz=timezone.utc).date()
|
|
870
|
+
|
|
871
|
+
return None
|
|
872
|
+
|
|
873
|
+
def _get_all_sessions_for_metrics_calculation(
|
|
874
|
+
self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
|
|
875
|
+
) -> List[Dict[str, Any]]:
|
|
876
|
+
"""Get all sessions for metrics calculation."""
|
|
877
|
+
try:
|
|
878
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
879
|
+
|
|
880
|
+
filtered_sessions = []
|
|
881
|
+
for session in sessions:
|
|
882
|
+
created_at = session.get("created_at", 0)
|
|
883
|
+
if start_timestamp is not None and created_at < start_timestamp:
|
|
884
|
+
continue
|
|
885
|
+
if end_timestamp is not None and created_at >= end_timestamp:
|
|
886
|
+
continue
|
|
887
|
+
|
|
888
|
+
# Only include necessary fields for metrics
|
|
889
|
+
filtered_session = {
|
|
890
|
+
"user_id": session.get("user_id"),
|
|
891
|
+
"session_data": session.get("session_data"),
|
|
892
|
+
"runs": session.get("runs"),
|
|
893
|
+
"created_at": session.get("created_at"),
|
|
894
|
+
"session_type": session.get("session_type"),
|
|
895
|
+
}
|
|
896
|
+
filtered_sessions.append(filtered_session)
|
|
897
|
+
|
|
898
|
+
return filtered_sessions
|
|
899
|
+
|
|
900
|
+
except Exception as e:
|
|
901
|
+
log_error(f"Exception reading sessions for metrics: {e}")
|
|
902
|
+
raise e
|
|
903
|
+
|
|
904
|
+
def get_metrics(
|
|
905
|
+
self,
|
|
906
|
+
starting_date: Optional[date] = None,
|
|
907
|
+
ending_date: Optional[date] = None,
|
|
908
|
+
) -> Tuple[List[dict], Optional[int]]:
|
|
909
|
+
"""Get all metrics matching the given date range."""
|
|
910
|
+
try:
|
|
911
|
+
metrics = self._read_json_file(self.metrics_table_name)
|
|
912
|
+
|
|
913
|
+
filtered_metrics = []
|
|
914
|
+
latest_updated_at = None
|
|
915
|
+
|
|
916
|
+
for metric in metrics:
|
|
917
|
+
metric_date = datetime.strptime(metric.get("date", ""), "%Y-%m-%d").date()
|
|
918
|
+
|
|
919
|
+
if starting_date and metric_date < starting_date:
|
|
920
|
+
continue
|
|
921
|
+
if ending_date and metric_date > ending_date:
|
|
922
|
+
continue
|
|
923
|
+
|
|
924
|
+
filtered_metrics.append(metric)
|
|
925
|
+
|
|
926
|
+
updated_at = metric.get("updated_at")
|
|
927
|
+
if updated_at and (latest_updated_at is None or updated_at > latest_updated_at):
|
|
928
|
+
latest_updated_at = updated_at
|
|
929
|
+
|
|
930
|
+
return filtered_metrics, latest_updated_at
|
|
931
|
+
|
|
932
|
+
except Exception as e:
|
|
933
|
+
log_error(f"Exception getting metrics: {e}")
|
|
934
|
+
raise e
|
|
935
|
+
|
|
936
|
+
# -- Knowledge methods --
|
|
937
|
+
|
|
938
|
+
def delete_knowledge_content(self, id: str):
|
|
939
|
+
"""Delete a knowledge row from the database.
|
|
940
|
+
|
|
941
|
+
Args:
|
|
942
|
+
id (str): The ID of the knowledge row to delete.
|
|
943
|
+
|
|
944
|
+
Raises:
|
|
945
|
+
Exception: If an error occurs during deletion.
|
|
946
|
+
"""
|
|
947
|
+
try:
|
|
948
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
949
|
+
knowledge_items = [item for item in knowledge_items if item.get("id") != id]
|
|
950
|
+
self._write_json_file(self.knowledge_table_name, knowledge_items)
|
|
951
|
+
|
|
952
|
+
except Exception as e:
|
|
953
|
+
log_error(f"Error deleting knowledge content: {e}")
|
|
954
|
+
raise e
|
|
955
|
+
|
|
956
|
+
def get_knowledge_content(self, id: str) -> Optional[KnowledgeRow]:
|
|
957
|
+
"""Get a knowledge row from the database.
|
|
958
|
+
|
|
959
|
+
Args:
|
|
960
|
+
id (str): The ID of the knowledge row to get.
|
|
961
|
+
|
|
962
|
+
Returns:
|
|
963
|
+
Optional[KnowledgeRow]: The knowledge row, or None if it doesn't exist.
|
|
964
|
+
|
|
965
|
+
Raises:
|
|
966
|
+
Exception: If an error occurs during retrieval.
|
|
967
|
+
"""
|
|
968
|
+
try:
|
|
969
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
970
|
+
|
|
971
|
+
for item in knowledge_items:
|
|
972
|
+
if item.get("id") == id:
|
|
973
|
+
return KnowledgeRow.model_validate(item)
|
|
974
|
+
|
|
975
|
+
return None
|
|
976
|
+
|
|
977
|
+
except Exception as e:
|
|
978
|
+
log_error(f"Error getting knowledge content: {e}")
|
|
979
|
+
raise e
|
|
980
|
+
|
|
981
|
+
def get_knowledge_contents(
|
|
982
|
+
self,
|
|
983
|
+
limit: Optional[int] = None,
|
|
984
|
+
page: Optional[int] = None,
|
|
985
|
+
sort_by: Optional[str] = None,
|
|
986
|
+
sort_order: Optional[str] = None,
|
|
987
|
+
) -> Tuple[List[KnowledgeRow], int]:
|
|
988
|
+
"""Get all knowledge contents from the database.
|
|
989
|
+
|
|
990
|
+
Args:
|
|
991
|
+
limit (Optional[int]): The maximum number of knowledge contents to return.
|
|
992
|
+
page (Optional[int]): The page number.
|
|
993
|
+
sort_by (Optional[str]): The column to sort by.
|
|
994
|
+
sort_order (Optional[str]): The order to sort by.
|
|
995
|
+
|
|
996
|
+
Returns:
|
|
997
|
+
Tuple[List[KnowledgeRow], int]: The knowledge contents and total count.
|
|
998
|
+
|
|
999
|
+
Raises:
|
|
1000
|
+
Exception: If an error occurs during retrieval.
|
|
1001
|
+
"""
|
|
1002
|
+
try:
|
|
1003
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
1004
|
+
|
|
1005
|
+
total_count = len(knowledge_items)
|
|
1006
|
+
|
|
1007
|
+
# Apply sorting
|
|
1008
|
+
knowledge_items = apply_sorting(knowledge_items, sort_by, sort_order)
|
|
1009
|
+
|
|
1010
|
+
# Apply pagination
|
|
1011
|
+
if limit is not None:
|
|
1012
|
+
start_idx = 0
|
|
1013
|
+
if page is not None:
|
|
1014
|
+
start_idx = (page - 1) * limit
|
|
1015
|
+
knowledge_items = knowledge_items[start_idx : start_idx + limit]
|
|
1016
|
+
|
|
1017
|
+
return [KnowledgeRow.model_validate(item) for item in knowledge_items], total_count
|
|
1018
|
+
|
|
1019
|
+
except Exception as e:
|
|
1020
|
+
log_error(f"Error getting knowledge contents: {e}")
|
|
1021
|
+
raise e
|
|
1022
|
+
|
|
1023
|
+
def upsert_knowledge_content(self, knowledge_row: KnowledgeRow):
|
|
1024
|
+
"""Upsert knowledge content in the database.
|
|
1025
|
+
|
|
1026
|
+
Args:
|
|
1027
|
+
knowledge_row (KnowledgeRow): The knowledge row to upsert.
|
|
1028
|
+
|
|
1029
|
+
Returns:
|
|
1030
|
+
Optional[KnowledgeRow]: The upserted knowledge row, or None if the operation fails.
|
|
1031
|
+
|
|
1032
|
+
Raises:
|
|
1033
|
+
Exception: If an error occurs during upsert.
|
|
1034
|
+
"""
|
|
1035
|
+
try:
|
|
1036
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name, create_table_if_not_found=True)
|
|
1037
|
+
knowledge_dict = knowledge_row.model_dump()
|
|
1038
|
+
|
|
1039
|
+
# Find existing item to update
|
|
1040
|
+
item_updated = False
|
|
1041
|
+
for i, existing_item in enumerate(knowledge_items):
|
|
1042
|
+
if existing_item.get("id") == knowledge_row.id:
|
|
1043
|
+
knowledge_items[i] = knowledge_dict
|
|
1044
|
+
item_updated = True
|
|
1045
|
+
break
|
|
1046
|
+
|
|
1047
|
+
if not item_updated:
|
|
1048
|
+
knowledge_items.append(knowledge_dict)
|
|
1049
|
+
|
|
1050
|
+
self._write_json_file(self.knowledge_table_name, knowledge_items)
|
|
1051
|
+
|
|
1052
|
+
return knowledge_row
|
|
1053
|
+
|
|
1054
|
+
except Exception as e:
|
|
1055
|
+
log_error(f"Error upserting knowledge row: {e}")
|
|
1056
|
+
raise e
|
|
1057
|
+
|
|
1058
|
+
# -- Eval methods --
|
|
1059
|
+
|
|
1060
|
+
def create_eval_run(self, eval_run: EvalRunRecord) -> Optional[EvalRunRecord]:
|
|
1061
|
+
"""Create an EvalRunRecord in the JSON file."""
|
|
1062
|
+
try:
|
|
1063
|
+
eval_runs = self._read_json_file(self.eval_table_name, create_table_if_not_found=True)
|
|
1064
|
+
|
|
1065
|
+
current_time = int(time.time())
|
|
1066
|
+
eval_dict = eval_run.model_dump()
|
|
1067
|
+
eval_dict["created_at"] = current_time
|
|
1068
|
+
eval_dict["updated_at"] = current_time
|
|
1069
|
+
|
|
1070
|
+
eval_runs.append(eval_dict)
|
|
1071
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1072
|
+
|
|
1073
|
+
log_debug(f"Created eval run with id '{eval_run.run_id}'")
|
|
1074
|
+
|
|
1075
|
+
return eval_run
|
|
1076
|
+
|
|
1077
|
+
except Exception as e:
|
|
1078
|
+
log_error(f"Error creating eval run: {e}")
|
|
1079
|
+
raise e
|
|
1080
|
+
|
|
1081
|
+
def delete_eval_run(self, eval_run_id: str) -> None:
|
|
1082
|
+
"""Delete an eval run from the JSON file."""
|
|
1083
|
+
try:
|
|
1084
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1085
|
+
original_count = len(eval_runs)
|
|
1086
|
+
eval_runs = [run for run in eval_runs if run.get("run_id") != eval_run_id]
|
|
1087
|
+
|
|
1088
|
+
if len(eval_runs) < original_count:
|
|
1089
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1090
|
+
log_debug(f"Deleted eval run with ID: {eval_run_id}")
|
|
1091
|
+
else:
|
|
1092
|
+
log_debug(f"No eval run found with ID: {eval_run_id}")
|
|
1093
|
+
|
|
1094
|
+
except Exception as e:
|
|
1095
|
+
log_error(f"Error deleting eval run {eval_run_id}: {e}")
|
|
1096
|
+
raise e
|
|
1097
|
+
|
|
1098
|
+
def delete_eval_runs(self, eval_run_ids: List[str]) -> None:
|
|
1099
|
+
"""Delete multiple eval runs from the JSON file."""
|
|
1100
|
+
try:
|
|
1101
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1102
|
+
original_count = len(eval_runs)
|
|
1103
|
+
eval_runs = [run for run in eval_runs if run.get("run_id") not in eval_run_ids]
|
|
1104
|
+
|
|
1105
|
+
deleted_count = original_count - len(eval_runs)
|
|
1106
|
+
if deleted_count > 0:
|
|
1107
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1108
|
+
log_debug(f"Deleted {deleted_count} eval runs")
|
|
1109
|
+
else:
|
|
1110
|
+
log_debug(f"No eval runs found with IDs: {eval_run_ids}")
|
|
1111
|
+
|
|
1112
|
+
except Exception as e:
|
|
1113
|
+
log_error(f"Error deleting eval runs {eval_run_ids}: {e}")
|
|
1114
|
+
raise e
|
|
1115
|
+
|
|
1116
|
+
def get_eval_run(
|
|
1117
|
+
self, eval_run_id: str, deserialize: Optional[bool] = True
|
|
1118
|
+
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
|
|
1119
|
+
"""Get an eval run from the JSON file."""
|
|
1120
|
+
try:
|
|
1121
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1122
|
+
|
|
1123
|
+
for run_data in eval_runs:
|
|
1124
|
+
if run_data.get("run_id") == eval_run_id:
|
|
1125
|
+
if not deserialize:
|
|
1126
|
+
return run_data
|
|
1127
|
+
return EvalRunRecord.model_validate(run_data)
|
|
1128
|
+
|
|
1129
|
+
return None
|
|
1130
|
+
|
|
1131
|
+
except Exception as e:
|
|
1132
|
+
log_error(f"Exception getting eval run {eval_run_id}: {e}")
|
|
1133
|
+
raise e
|
|
1134
|
+
|
|
1135
|
+
def get_eval_runs(
|
|
1136
|
+
self,
|
|
1137
|
+
limit: Optional[int] = None,
|
|
1138
|
+
page: Optional[int] = None,
|
|
1139
|
+
sort_by: Optional[str] = None,
|
|
1140
|
+
sort_order: Optional[str] = None,
|
|
1141
|
+
agent_id: Optional[str] = None,
|
|
1142
|
+
team_id: Optional[str] = None,
|
|
1143
|
+
workflow_id: Optional[str] = None,
|
|
1144
|
+
model_id: Optional[str] = None,
|
|
1145
|
+
filter_type: Optional[EvalFilterType] = None,
|
|
1146
|
+
eval_type: Optional[List[EvalType]] = None,
|
|
1147
|
+
deserialize: Optional[bool] = True,
|
|
1148
|
+
) -> Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
|
|
1149
|
+
"""Get all eval runs from the JSON file with filtering and pagination."""
|
|
1150
|
+
try:
|
|
1151
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1152
|
+
|
|
1153
|
+
# Apply filters
|
|
1154
|
+
filtered_runs = []
|
|
1155
|
+
for run_data in eval_runs:
|
|
1156
|
+
if agent_id is not None and run_data.get("agent_id") != agent_id:
|
|
1157
|
+
continue
|
|
1158
|
+
if team_id is not None and run_data.get("team_id") != team_id:
|
|
1159
|
+
continue
|
|
1160
|
+
if workflow_id is not None and run_data.get("workflow_id") != workflow_id:
|
|
1161
|
+
continue
|
|
1162
|
+
if model_id is not None and run_data.get("model_id") != model_id:
|
|
1163
|
+
continue
|
|
1164
|
+
if eval_type is not None and len(eval_type) > 0:
|
|
1165
|
+
if run_data.get("eval_type") not in eval_type:
|
|
1166
|
+
continue
|
|
1167
|
+
if filter_type is not None:
|
|
1168
|
+
if filter_type == EvalFilterType.AGENT and run_data.get("agent_id") is None:
|
|
1169
|
+
continue
|
|
1170
|
+
elif filter_type == EvalFilterType.TEAM and run_data.get("team_id") is None:
|
|
1171
|
+
continue
|
|
1172
|
+
elif filter_type == EvalFilterType.WORKFLOW and run_data.get("workflow_id") is None:
|
|
1173
|
+
continue
|
|
1174
|
+
|
|
1175
|
+
filtered_runs.append(run_data)
|
|
1176
|
+
|
|
1177
|
+
total_count = len(filtered_runs)
|
|
1178
|
+
|
|
1179
|
+
# Apply sorting (default by created_at desc)
|
|
1180
|
+
if sort_by is None:
|
|
1181
|
+
filtered_runs.sort(key=lambda x: x.get("created_at", 0), reverse=True)
|
|
1182
|
+
else:
|
|
1183
|
+
filtered_runs = apply_sorting(filtered_runs, sort_by, sort_order)
|
|
1184
|
+
|
|
1185
|
+
# Apply pagination
|
|
1186
|
+
if limit is not None:
|
|
1187
|
+
start_idx = 0
|
|
1188
|
+
if page is not None:
|
|
1189
|
+
start_idx = (page - 1) * limit
|
|
1190
|
+
filtered_runs = filtered_runs[start_idx : start_idx + limit]
|
|
1191
|
+
|
|
1192
|
+
if not deserialize:
|
|
1193
|
+
return filtered_runs, total_count
|
|
1194
|
+
|
|
1195
|
+
return [EvalRunRecord.model_validate(run) for run in filtered_runs]
|
|
1196
|
+
|
|
1197
|
+
except Exception as e:
|
|
1198
|
+
log_error(f"Exception getting eval runs: {e}")
|
|
1199
|
+
raise e
|
|
1200
|
+
|
|
1201
|
+
def rename_eval_run(
|
|
1202
|
+
self, eval_run_id: str, name: str, deserialize: Optional[bool] = True
|
|
1203
|
+
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
|
|
1204
|
+
"""Rename an eval run in the JSON file."""
|
|
1205
|
+
try:
|
|
1206
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1207
|
+
|
|
1208
|
+
for i, run_data in enumerate(eval_runs):
|
|
1209
|
+
if run_data.get("run_id") == eval_run_id:
|
|
1210
|
+
run_data["name"] = name
|
|
1211
|
+
run_data["updated_at"] = int(time.time())
|
|
1212
|
+
eval_runs[i] = run_data
|
|
1213
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1214
|
+
|
|
1215
|
+
log_debug(f"Renamed eval run with id '{eval_run_id}' to '{name}'")
|
|
1216
|
+
|
|
1217
|
+
if not deserialize:
|
|
1218
|
+
return run_data
|
|
1219
|
+
|
|
1220
|
+
return EvalRunRecord.model_validate(run_data)
|
|
1221
|
+
|
|
1222
|
+
return None
|
|
1223
|
+
|
|
1224
|
+
except Exception as e:
|
|
1225
|
+
log_error(f"Error renaming eval run {eval_run_id}: {e}")
|
|
1226
|
+
raise e
|
|
1227
|
+
|
|
1228
|
+
# -- Culture methods --
|
|
1229
|
+
|
|
1230
|
+
def clear_cultural_knowledge(self) -> None:
|
|
1231
|
+
"""Delete all cultural knowledge from JSON file."""
|
|
1232
|
+
try:
|
|
1233
|
+
self._write_json_file(self.culture_table_name, [])
|
|
1234
|
+
except Exception as e:
|
|
1235
|
+
log_error(f"Error clearing cultural knowledge: {e}")
|
|
1236
|
+
raise e
|
|
1237
|
+
|
|
1238
|
+
def delete_cultural_knowledge(self, id: str) -> None:
|
|
1239
|
+
"""Delete a cultural knowledge entry from JSON file."""
|
|
1240
|
+
try:
|
|
1241
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1242
|
+
cultural_knowledge = [ck for ck in cultural_knowledge if ck.get("id") != id]
|
|
1243
|
+
self._write_json_file(self.culture_table_name, cultural_knowledge)
|
|
1244
|
+
except Exception as e:
|
|
1245
|
+
log_error(f"Error deleting cultural knowledge: {e}")
|
|
1246
|
+
raise e
|
|
1247
|
+
|
|
1248
|
+
def get_cultural_knowledge(
|
|
1249
|
+
self, id: str, deserialize: Optional[bool] = True
|
|
1250
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1251
|
+
"""Get a cultural knowledge entry from JSON file."""
|
|
1252
|
+
try:
|
|
1253
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1254
|
+
for ck in cultural_knowledge:
|
|
1255
|
+
if ck.get("id") == id:
|
|
1256
|
+
if not deserialize:
|
|
1257
|
+
return ck
|
|
1258
|
+
return deserialize_cultural_knowledge_from_db(ck)
|
|
1259
|
+
return None
|
|
1260
|
+
except Exception as e:
|
|
1261
|
+
log_error(f"Error getting cultural knowledge: {e}")
|
|
1262
|
+
raise e
|
|
1263
|
+
|
|
1264
|
+
def get_all_cultural_knowledge(
|
|
1265
|
+
self,
|
|
1266
|
+
name: Optional[str] = None,
|
|
1267
|
+
agent_id: Optional[str] = None,
|
|
1268
|
+
team_id: Optional[str] = None,
|
|
1269
|
+
limit: Optional[int] = None,
|
|
1270
|
+
page: Optional[int] = None,
|
|
1271
|
+
sort_by: Optional[str] = None,
|
|
1272
|
+
sort_order: Optional[str] = None,
|
|
1273
|
+
deserialize: Optional[bool] = True,
|
|
1274
|
+
) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1275
|
+
"""Get all cultural knowledge from JSON file."""
|
|
1276
|
+
try:
|
|
1277
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1278
|
+
|
|
1279
|
+
# Filter
|
|
1280
|
+
filtered = []
|
|
1281
|
+
for ck in cultural_knowledge:
|
|
1282
|
+
if name and ck.get("name") != name:
|
|
1283
|
+
continue
|
|
1284
|
+
if agent_id and ck.get("agent_id") != agent_id:
|
|
1285
|
+
continue
|
|
1286
|
+
if team_id and ck.get("team_id") != team_id:
|
|
1287
|
+
continue
|
|
1288
|
+
filtered.append(ck)
|
|
1289
|
+
|
|
1290
|
+
# Sort
|
|
1291
|
+
if sort_by:
|
|
1292
|
+
filtered = apply_sorting(filtered, sort_by, sort_order)
|
|
1293
|
+
|
|
1294
|
+
total_count = len(filtered)
|
|
1295
|
+
|
|
1296
|
+
# Paginate
|
|
1297
|
+
if limit and page:
|
|
1298
|
+
start = (page - 1) * limit
|
|
1299
|
+
filtered = filtered[start : start + limit]
|
|
1300
|
+
elif limit:
|
|
1301
|
+
filtered = filtered[:limit]
|
|
1302
|
+
|
|
1303
|
+
if not deserialize:
|
|
1304
|
+
return filtered, total_count
|
|
1305
|
+
|
|
1306
|
+
return [deserialize_cultural_knowledge_from_db(ck) for ck in filtered]
|
|
1307
|
+
except Exception as e:
|
|
1308
|
+
log_error(f"Error getting all cultural knowledge: {e}")
|
|
1309
|
+
raise e
|
|
1310
|
+
|
|
1311
|
+
def upsert_cultural_knowledge(
|
|
1312
|
+
self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
|
|
1313
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1314
|
+
"""Upsert a cultural knowledge entry into JSON file."""
|
|
1315
|
+
try:
|
|
1316
|
+
if not cultural_knowledge.id:
|
|
1317
|
+
cultural_knowledge.id = str(uuid4())
|
|
1318
|
+
|
|
1319
|
+
all_cultural_knowledge = self._read_json_file(self.culture_table_name, create_table_if_not_found=True)
|
|
1320
|
+
|
|
1321
|
+
# Serialize content, categories, and notes into a dict for DB storage
|
|
1322
|
+
content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
|
|
1323
|
+
|
|
1324
|
+
# Create the item dict with serialized content
|
|
1325
|
+
ck_dict = {
|
|
1326
|
+
"id": cultural_knowledge.id,
|
|
1327
|
+
"name": cultural_knowledge.name,
|
|
1328
|
+
"summary": cultural_knowledge.summary,
|
|
1329
|
+
"content": content_dict if content_dict else None,
|
|
1330
|
+
"metadata": cultural_knowledge.metadata,
|
|
1331
|
+
"input": cultural_knowledge.input,
|
|
1332
|
+
"created_at": cultural_knowledge.created_at,
|
|
1333
|
+
"updated_at": int(time.time()),
|
|
1334
|
+
"agent_id": cultural_knowledge.agent_id,
|
|
1335
|
+
"team_id": cultural_knowledge.team_id,
|
|
1336
|
+
}
|
|
1337
|
+
|
|
1338
|
+
# Remove existing entry
|
|
1339
|
+
all_cultural_knowledge = [ck for ck in all_cultural_knowledge if ck.get("id") != cultural_knowledge.id]
|
|
1340
|
+
|
|
1341
|
+
# Add new entry
|
|
1342
|
+
all_cultural_knowledge.append(ck_dict)
|
|
1343
|
+
|
|
1344
|
+
self._write_json_file(self.culture_table_name, all_cultural_knowledge)
|
|
1345
|
+
|
|
1346
|
+
return self.get_cultural_knowledge(cultural_knowledge.id, deserialize=deserialize)
|
|
1347
|
+
except Exception as e:
|
|
1348
|
+
log_error(f"Error upserting cultural knowledge: {e}")
|
|
1349
|
+
raise e
|
|
1350
|
+
|
|
1351
|
+
# --- Traces ---
|
|
1352
|
+
def upsert_trace(self, trace: "Trace") -> None:
|
|
1353
|
+
"""Create or update a single trace record in the database.
|
|
1354
|
+
|
|
1355
|
+
Args:
|
|
1356
|
+
trace: The Trace object to store (one per trace_id).
|
|
1357
|
+
"""
|
|
1358
|
+
try:
|
|
1359
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=True)
|
|
1360
|
+
|
|
1361
|
+
# Check if trace exists
|
|
1362
|
+
existing_idx = None
|
|
1363
|
+
for i, existing in enumerate(traces):
|
|
1364
|
+
if existing.get("trace_id") == trace.trace_id:
|
|
1365
|
+
existing_idx = i
|
|
1366
|
+
break
|
|
1367
|
+
|
|
1368
|
+
if existing_idx is not None:
|
|
1369
|
+
existing = traces[existing_idx]
|
|
1370
|
+
|
|
1371
|
+
# workflow (level 3) > team (level 2) > agent (level 1) > child/unknown (level 0)
|
|
1372
|
+
def get_component_level(workflow_id, team_id, agent_id, name):
|
|
1373
|
+
is_root_name = ".run" in name or ".arun" in name
|
|
1374
|
+
if not is_root_name:
|
|
1375
|
+
return 0
|
|
1376
|
+
elif workflow_id:
|
|
1377
|
+
return 3
|
|
1378
|
+
elif team_id:
|
|
1379
|
+
return 2
|
|
1380
|
+
elif agent_id:
|
|
1381
|
+
return 1
|
|
1382
|
+
else:
|
|
1383
|
+
return 0
|
|
1384
|
+
|
|
1385
|
+
existing_level = get_component_level(
|
|
1386
|
+
existing.get("workflow_id"),
|
|
1387
|
+
existing.get("team_id"),
|
|
1388
|
+
existing.get("agent_id"),
|
|
1389
|
+
existing.get("name", ""),
|
|
1390
|
+
)
|
|
1391
|
+
new_level = get_component_level(trace.workflow_id, trace.team_id, trace.agent_id, trace.name)
|
|
1392
|
+
should_update_name = new_level > existing_level
|
|
1393
|
+
|
|
1394
|
+
# Parse existing start_time to calculate correct duration
|
|
1395
|
+
existing_start_time_str = existing.get("start_time")
|
|
1396
|
+
if isinstance(existing_start_time_str, str):
|
|
1397
|
+
existing_start_time = datetime.fromisoformat(existing_start_time_str.replace("Z", "+00:00"))
|
|
1398
|
+
else:
|
|
1399
|
+
existing_start_time = trace.start_time
|
|
1400
|
+
|
|
1401
|
+
recalculated_duration_ms = int((trace.end_time - existing_start_time).total_seconds() * 1000)
|
|
1402
|
+
|
|
1403
|
+
# Update existing trace
|
|
1404
|
+
existing["end_time"] = trace.end_time.isoformat()
|
|
1405
|
+
existing["duration_ms"] = recalculated_duration_ms
|
|
1406
|
+
existing["status"] = trace.status
|
|
1407
|
+
if should_update_name:
|
|
1408
|
+
existing["name"] = trace.name
|
|
1409
|
+
|
|
1410
|
+
# Update context fields only if new value is not None
|
|
1411
|
+
if trace.run_id is not None:
|
|
1412
|
+
existing["run_id"] = trace.run_id
|
|
1413
|
+
if trace.session_id is not None:
|
|
1414
|
+
existing["session_id"] = trace.session_id
|
|
1415
|
+
if trace.user_id is not None:
|
|
1416
|
+
existing["user_id"] = trace.user_id
|
|
1417
|
+
if trace.agent_id is not None:
|
|
1418
|
+
existing["agent_id"] = trace.agent_id
|
|
1419
|
+
if trace.team_id is not None:
|
|
1420
|
+
existing["team_id"] = trace.team_id
|
|
1421
|
+
if trace.workflow_id is not None:
|
|
1422
|
+
existing["workflow_id"] = trace.workflow_id
|
|
1423
|
+
|
|
1424
|
+
traces[existing_idx] = existing
|
|
1425
|
+
else:
|
|
1426
|
+
# Add new trace
|
|
1427
|
+
trace_dict = trace.to_dict()
|
|
1428
|
+
trace_dict.pop("total_spans", None)
|
|
1429
|
+
trace_dict.pop("error_count", None)
|
|
1430
|
+
traces.append(trace_dict)
|
|
1431
|
+
|
|
1432
|
+
self._write_json_file(self.trace_table_name, traces)
|
|
1433
|
+
|
|
1434
|
+
except Exception as e:
|
|
1435
|
+
log_error(f"Error creating trace: {e}")
|
|
1436
|
+
|
|
1437
|
+
def get_trace(
|
|
1438
|
+
self,
|
|
1439
|
+
trace_id: Optional[str] = None,
|
|
1440
|
+
run_id: Optional[str] = None,
|
|
1441
|
+
):
|
|
1442
|
+
"""Get a single trace by trace_id or other filters.
|
|
1443
|
+
|
|
1444
|
+
Args:
|
|
1445
|
+
trace_id: The unique trace identifier.
|
|
1446
|
+
run_id: Filter by run ID (returns first match).
|
|
1447
|
+
|
|
1448
|
+
Returns:
|
|
1449
|
+
Optional[Trace]: The trace if found, None otherwise.
|
|
1450
|
+
"""
|
|
1451
|
+
try:
|
|
1452
|
+
from agno.tracing.schemas import Trace
|
|
1453
|
+
|
|
1454
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1455
|
+
if not traces:
|
|
1456
|
+
return None
|
|
1457
|
+
|
|
1458
|
+
# Get spans for calculating total_spans and error_count
|
|
1459
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1460
|
+
|
|
1461
|
+
# Filter traces
|
|
1462
|
+
filtered = []
|
|
1463
|
+
for t in traces:
|
|
1464
|
+
if trace_id and t.get("trace_id") == trace_id:
|
|
1465
|
+
filtered.append(t)
|
|
1466
|
+
break
|
|
1467
|
+
elif run_id and t.get("run_id") == run_id:
|
|
1468
|
+
filtered.append(t)
|
|
1469
|
+
|
|
1470
|
+
if not filtered:
|
|
1471
|
+
return None
|
|
1472
|
+
|
|
1473
|
+
# Sort by start_time desc and get first
|
|
1474
|
+
filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
1475
|
+
trace_data = filtered[0]
|
|
1476
|
+
|
|
1477
|
+
# Calculate total_spans and error_count
|
|
1478
|
+
trace_spans = [s for s in spans if s.get("trace_id") == trace_data.get("trace_id")]
|
|
1479
|
+
trace_data["total_spans"] = len(trace_spans)
|
|
1480
|
+
trace_data["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
|
|
1481
|
+
|
|
1482
|
+
return Trace.from_dict(trace_data)
|
|
1483
|
+
|
|
1484
|
+
except Exception as e:
|
|
1485
|
+
log_error(f"Error getting trace: {e}")
|
|
1486
|
+
return None
|
|
1487
|
+
|
|
1488
|
+
def get_traces(
|
|
1489
|
+
self,
|
|
1490
|
+
run_id: Optional[str] = None,
|
|
1491
|
+
session_id: Optional[str] = None,
|
|
1492
|
+
user_id: Optional[str] = None,
|
|
1493
|
+
agent_id: Optional[str] = None,
|
|
1494
|
+
team_id: Optional[str] = None,
|
|
1495
|
+
workflow_id: Optional[str] = None,
|
|
1496
|
+
status: Optional[str] = None,
|
|
1497
|
+
start_time: Optional[datetime] = None,
|
|
1498
|
+
end_time: Optional[datetime] = None,
|
|
1499
|
+
limit: Optional[int] = 20,
|
|
1500
|
+
page: Optional[int] = 1,
|
|
1501
|
+
) -> tuple[List, int]:
|
|
1502
|
+
"""Get traces matching the provided filters with pagination.
|
|
1503
|
+
|
|
1504
|
+
Args:
|
|
1505
|
+
run_id: Filter by run ID.
|
|
1506
|
+
session_id: Filter by session ID.
|
|
1507
|
+
user_id: Filter by user ID.
|
|
1508
|
+
agent_id: Filter by agent ID.
|
|
1509
|
+
team_id: Filter by team ID.
|
|
1510
|
+
workflow_id: Filter by workflow ID.
|
|
1511
|
+
status: Filter by status (OK, ERROR, UNSET).
|
|
1512
|
+
start_time: Filter traces starting after this datetime.
|
|
1513
|
+
end_time: Filter traces ending before this datetime.
|
|
1514
|
+
limit: Maximum number of traces to return per page.
|
|
1515
|
+
page: Page number (1-indexed).
|
|
1516
|
+
|
|
1517
|
+
Returns:
|
|
1518
|
+
tuple[List[Trace], int]: Tuple of (list of matching traces, total count).
|
|
1519
|
+
"""
|
|
1520
|
+
try:
|
|
1521
|
+
from agno.tracing.schemas import Trace
|
|
1522
|
+
|
|
1523
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1524
|
+
if not traces:
|
|
1525
|
+
return [], 0
|
|
1526
|
+
|
|
1527
|
+
# Get spans for calculating total_spans and error_count
|
|
1528
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1529
|
+
|
|
1530
|
+
# Apply filters
|
|
1531
|
+
filtered = []
|
|
1532
|
+
for t in traces:
|
|
1533
|
+
if run_id and t.get("run_id") != run_id:
|
|
1534
|
+
continue
|
|
1535
|
+
if session_id and t.get("session_id") != session_id:
|
|
1536
|
+
continue
|
|
1537
|
+
if user_id and t.get("user_id") != user_id:
|
|
1538
|
+
continue
|
|
1539
|
+
if agent_id and t.get("agent_id") != agent_id:
|
|
1540
|
+
continue
|
|
1541
|
+
if team_id and t.get("team_id") != team_id:
|
|
1542
|
+
continue
|
|
1543
|
+
if workflow_id and t.get("workflow_id") != workflow_id:
|
|
1544
|
+
continue
|
|
1545
|
+
if status and t.get("status") != status:
|
|
1546
|
+
continue
|
|
1547
|
+
if start_time:
|
|
1548
|
+
trace_start = t.get("start_time", "")
|
|
1549
|
+
if trace_start < start_time.isoformat():
|
|
1550
|
+
continue
|
|
1551
|
+
if end_time:
|
|
1552
|
+
trace_end = t.get("end_time", "")
|
|
1553
|
+
if trace_end > end_time.isoformat():
|
|
1554
|
+
continue
|
|
1555
|
+
filtered.append(t)
|
|
1556
|
+
|
|
1557
|
+
total_count = len(filtered)
|
|
1558
|
+
|
|
1559
|
+
# Sort by start_time desc
|
|
1560
|
+
filtered.sort(key=lambda x: x.get("start_time", ""), reverse=True)
|
|
1561
|
+
|
|
1562
|
+
# Apply pagination
|
|
1563
|
+
if limit and page:
|
|
1564
|
+
start_idx = (page - 1) * limit
|
|
1565
|
+
filtered = filtered[start_idx : start_idx + limit]
|
|
1566
|
+
|
|
1567
|
+
# Add total_spans and error_count to each trace
|
|
1568
|
+
result_traces = []
|
|
1569
|
+
for t in filtered:
|
|
1570
|
+
trace_spans = [s for s in spans if s.get("trace_id") == t.get("trace_id")]
|
|
1571
|
+
t["total_spans"] = len(trace_spans)
|
|
1572
|
+
t["error_count"] = sum(1 for s in trace_spans if s.get("status_code") == "ERROR")
|
|
1573
|
+
result_traces.append(Trace.from_dict(t))
|
|
1574
|
+
|
|
1575
|
+
return result_traces, total_count
|
|
1576
|
+
|
|
1577
|
+
except Exception as e:
|
|
1578
|
+
log_error(f"Error getting traces: {e}")
|
|
1579
|
+
return [], 0
|
|
1580
|
+
|
|
1581
|
+
def get_trace_stats(
|
|
1582
|
+
self,
|
|
1583
|
+
user_id: Optional[str] = None,
|
|
1584
|
+
agent_id: Optional[str] = None,
|
|
1585
|
+
team_id: Optional[str] = None,
|
|
1586
|
+
workflow_id: Optional[str] = None,
|
|
1587
|
+
start_time: Optional[datetime] = None,
|
|
1588
|
+
end_time: Optional[datetime] = None,
|
|
1589
|
+
limit: Optional[int] = 20,
|
|
1590
|
+
page: Optional[int] = 1,
|
|
1591
|
+
) -> tuple[List[Dict[str, Any]], int]:
|
|
1592
|
+
"""Get trace statistics grouped by session.
|
|
1593
|
+
|
|
1594
|
+
Args:
|
|
1595
|
+
user_id: Filter by user ID.
|
|
1596
|
+
agent_id: Filter by agent ID.
|
|
1597
|
+
team_id: Filter by team ID.
|
|
1598
|
+
workflow_id: Filter by workflow ID.
|
|
1599
|
+
start_time: Filter sessions with traces created after this datetime.
|
|
1600
|
+
end_time: Filter sessions with traces created before this datetime.
|
|
1601
|
+
limit: Maximum number of sessions to return per page.
|
|
1602
|
+
page: Page number (1-indexed).
|
|
1603
|
+
|
|
1604
|
+
Returns:
|
|
1605
|
+
tuple[List[Dict], int]: Tuple of (list of session stats dicts, total count).
|
|
1606
|
+
"""
|
|
1607
|
+
try:
|
|
1608
|
+
traces = self._read_json_file(self.trace_table_name, create_table_if_not_found=False)
|
|
1609
|
+
if not traces:
|
|
1610
|
+
return [], 0
|
|
1611
|
+
|
|
1612
|
+
# Group by session_id
|
|
1613
|
+
session_stats: Dict[str, Dict[str, Any]] = {}
|
|
1614
|
+
|
|
1615
|
+
for t in traces:
|
|
1616
|
+
session_id = t.get("session_id")
|
|
1617
|
+
if not session_id:
|
|
1618
|
+
continue
|
|
1619
|
+
|
|
1620
|
+
# Apply filters
|
|
1621
|
+
if user_id and t.get("user_id") != user_id:
|
|
1622
|
+
continue
|
|
1623
|
+
if agent_id and t.get("agent_id") != agent_id:
|
|
1624
|
+
continue
|
|
1625
|
+
if team_id and t.get("team_id") != team_id:
|
|
1626
|
+
continue
|
|
1627
|
+
if workflow_id and t.get("workflow_id") != workflow_id:
|
|
1628
|
+
continue
|
|
1629
|
+
|
|
1630
|
+
created_at = t.get("created_at", "")
|
|
1631
|
+
if start_time and created_at < start_time.isoformat():
|
|
1632
|
+
continue
|
|
1633
|
+
if end_time and created_at > end_time.isoformat():
|
|
1634
|
+
continue
|
|
1635
|
+
|
|
1636
|
+
if session_id not in session_stats:
|
|
1637
|
+
session_stats[session_id] = {
|
|
1638
|
+
"session_id": session_id,
|
|
1639
|
+
"user_id": t.get("user_id"),
|
|
1640
|
+
"agent_id": t.get("agent_id"),
|
|
1641
|
+
"team_id": t.get("team_id"),
|
|
1642
|
+
"workflow_id": t.get("workflow_id"),
|
|
1643
|
+
"total_traces": 0,
|
|
1644
|
+
"first_trace_at": created_at,
|
|
1645
|
+
"last_trace_at": created_at,
|
|
1646
|
+
}
|
|
1647
|
+
|
|
1648
|
+
session_stats[session_id]["total_traces"] += 1
|
|
1649
|
+
if created_at < session_stats[session_id]["first_trace_at"]:
|
|
1650
|
+
session_stats[session_id]["first_trace_at"] = created_at
|
|
1651
|
+
if created_at > session_stats[session_id]["last_trace_at"]:
|
|
1652
|
+
session_stats[session_id]["last_trace_at"] = created_at
|
|
1653
|
+
|
|
1654
|
+
stats_list = list(session_stats.values())
|
|
1655
|
+
total_count = len(stats_list)
|
|
1656
|
+
|
|
1657
|
+
# Sort by last_trace_at desc
|
|
1658
|
+
stats_list.sort(key=lambda x: x.get("last_trace_at", ""), reverse=True)
|
|
1659
|
+
|
|
1660
|
+
# Apply pagination
|
|
1661
|
+
if limit and page:
|
|
1662
|
+
start_idx = (page - 1) * limit
|
|
1663
|
+
stats_list = stats_list[start_idx : start_idx + limit]
|
|
1664
|
+
|
|
1665
|
+
# Convert ISO strings to datetime objects
|
|
1666
|
+
for stat in stats_list:
|
|
1667
|
+
first_at = stat.get("first_trace_at", "")
|
|
1668
|
+
last_at = stat.get("last_trace_at", "")
|
|
1669
|
+
if first_at:
|
|
1670
|
+
stat["first_trace_at"] = datetime.fromisoformat(first_at.replace("Z", "+00:00"))
|
|
1671
|
+
if last_at:
|
|
1672
|
+
stat["last_trace_at"] = datetime.fromisoformat(last_at.replace("Z", "+00:00"))
|
|
1673
|
+
|
|
1674
|
+
return stats_list, total_count
|
|
1675
|
+
|
|
1676
|
+
except Exception as e:
|
|
1677
|
+
log_error(f"Error getting trace stats: {e}")
|
|
1678
|
+
return [], 0
|
|
1679
|
+
|
|
1680
|
+
# --- Spans ---
|
|
1681
|
+
def create_span(self, span: "Span") -> None:
|
|
1682
|
+
"""Create a single span in the database.
|
|
1683
|
+
|
|
1684
|
+
Args:
|
|
1685
|
+
span: The Span object to store.
|
|
1686
|
+
"""
|
|
1687
|
+
try:
|
|
1688
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
|
|
1689
|
+
spans.append(span.to_dict())
|
|
1690
|
+
self._write_json_file(self.span_table_name, spans)
|
|
1691
|
+
|
|
1692
|
+
except Exception as e:
|
|
1693
|
+
log_error(f"Error creating span: {e}")
|
|
1694
|
+
|
|
1695
|
+
def create_spans(self, spans: List) -> None:
|
|
1696
|
+
"""Create multiple spans in the database as a batch.
|
|
1697
|
+
|
|
1698
|
+
Args:
|
|
1699
|
+
spans: List of Span objects to store.
|
|
1700
|
+
"""
|
|
1701
|
+
if not spans:
|
|
1702
|
+
return
|
|
1703
|
+
|
|
1704
|
+
try:
|
|
1705
|
+
existing_spans = self._read_json_file(self.span_table_name, create_table_if_not_found=True)
|
|
1706
|
+
for span in spans:
|
|
1707
|
+
existing_spans.append(span.to_dict())
|
|
1708
|
+
self._write_json_file(self.span_table_name, existing_spans)
|
|
1709
|
+
|
|
1710
|
+
except Exception as e:
|
|
1711
|
+
log_error(f"Error creating spans batch: {e}")
|
|
1712
|
+
|
|
1713
|
+
def get_span(self, span_id: str):
|
|
1714
|
+
"""Get a single span by its span_id.
|
|
1715
|
+
|
|
1716
|
+
Args:
|
|
1717
|
+
span_id: The unique span identifier.
|
|
1718
|
+
|
|
1719
|
+
Returns:
|
|
1720
|
+
Optional[Span]: The span if found, None otherwise.
|
|
1721
|
+
"""
|
|
1722
|
+
try:
|
|
1723
|
+
from agno.tracing.schemas import Span
|
|
1724
|
+
|
|
1725
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1726
|
+
|
|
1727
|
+
for s in spans:
|
|
1728
|
+
if s.get("span_id") == span_id:
|
|
1729
|
+
return Span.from_dict(s)
|
|
1730
|
+
|
|
1731
|
+
return None
|
|
1732
|
+
|
|
1733
|
+
except Exception as e:
|
|
1734
|
+
log_error(f"Error getting span: {e}")
|
|
1735
|
+
return None
|
|
1736
|
+
|
|
1737
|
+
def get_spans(
|
|
1738
|
+
self,
|
|
1739
|
+
trace_id: Optional[str] = None,
|
|
1740
|
+
parent_span_id: Optional[str] = None,
|
|
1741
|
+
limit: Optional[int] = 1000,
|
|
1742
|
+
) -> List:
|
|
1743
|
+
"""Get spans matching the provided filters.
|
|
1744
|
+
|
|
1745
|
+
Args:
|
|
1746
|
+
trace_id: Filter by trace ID.
|
|
1747
|
+
parent_span_id: Filter by parent span ID.
|
|
1748
|
+
limit: Maximum number of spans to return.
|
|
1749
|
+
|
|
1750
|
+
Returns:
|
|
1751
|
+
List[Span]: List of matching spans.
|
|
1752
|
+
"""
|
|
1753
|
+
try:
|
|
1754
|
+
from agno.tracing.schemas import Span
|
|
1755
|
+
|
|
1756
|
+
spans = self._read_json_file(self.span_table_name, create_table_if_not_found=False)
|
|
1757
|
+
if not spans:
|
|
1758
|
+
return []
|
|
1759
|
+
|
|
1760
|
+
# Apply filters
|
|
1761
|
+
filtered = []
|
|
1762
|
+
for s in spans:
|
|
1763
|
+
if trace_id and s.get("trace_id") != trace_id:
|
|
1764
|
+
continue
|
|
1765
|
+
if parent_span_id and s.get("parent_span_id") != parent_span_id:
|
|
1766
|
+
continue
|
|
1767
|
+
filtered.append(s)
|
|
1768
|
+
|
|
1769
|
+
# Apply limit
|
|
1770
|
+
if limit:
|
|
1771
|
+
filtered = filtered[:limit]
|
|
1772
|
+
|
|
1773
|
+
return [Span.from_dict(s) for s in filtered]
|
|
1774
|
+
|
|
1775
|
+
except Exception as e:
|
|
1776
|
+
log_error(f"Error getting spans: {e}")
|
|
1777
|
+
return []
|