agno 2.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/__init__.py +8 -0
- agno/agent/__init__.py +51 -0
- agno/agent/agent.py +10405 -0
- agno/api/__init__.py +0 -0
- agno/api/agent.py +28 -0
- agno/api/api.py +40 -0
- agno/api/evals.py +22 -0
- agno/api/os.py +17 -0
- agno/api/routes.py +13 -0
- agno/api/schemas/__init__.py +9 -0
- agno/api/schemas/agent.py +16 -0
- agno/api/schemas/evals.py +16 -0
- agno/api/schemas/os.py +14 -0
- agno/api/schemas/response.py +6 -0
- agno/api/schemas/team.py +16 -0
- agno/api/schemas/utils.py +21 -0
- agno/api/schemas/workflows.py +16 -0
- agno/api/settings.py +53 -0
- agno/api/team.py +30 -0
- agno/api/workflow.py +28 -0
- agno/cloud/aws/base.py +214 -0
- agno/cloud/aws/s3/__init__.py +2 -0
- agno/cloud/aws/s3/api_client.py +43 -0
- agno/cloud/aws/s3/bucket.py +195 -0
- agno/cloud/aws/s3/object.py +57 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/__init__.py +24 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +598 -0
- agno/db/dynamo/__init__.py +3 -0
- agno/db/dynamo/dynamo.py +2042 -0
- agno/db/dynamo/schemas.py +314 -0
- agno/db/dynamo/utils.py +743 -0
- agno/db/firestore/__init__.py +3 -0
- agno/db/firestore/firestore.py +1795 -0
- agno/db/firestore/schemas.py +140 -0
- agno/db/firestore/utils.py +376 -0
- agno/db/gcs_json/__init__.py +3 -0
- agno/db/gcs_json/gcs_json_db.py +1335 -0
- agno/db/gcs_json/utils.py +228 -0
- agno/db/in_memory/__init__.py +3 -0
- agno/db/in_memory/in_memory_db.py +1160 -0
- agno/db/in_memory/utils.py +230 -0
- agno/db/json/__init__.py +3 -0
- agno/db/json/json_db.py +1328 -0
- agno/db/json/utils.py +230 -0
- agno/db/migrations/__init__.py +0 -0
- agno/db/migrations/v1_to_v2.py +635 -0
- agno/db/mongo/__init__.py +17 -0
- agno/db/mongo/async_mongo.py +2026 -0
- agno/db/mongo/mongo.py +1982 -0
- agno/db/mongo/schemas.py +87 -0
- agno/db/mongo/utils.py +259 -0
- agno/db/mysql/__init__.py +3 -0
- agno/db/mysql/mysql.py +2308 -0
- agno/db/mysql/schemas.py +138 -0
- agno/db/mysql/utils.py +355 -0
- agno/db/postgres/__init__.py +4 -0
- agno/db/postgres/async_postgres.py +1927 -0
- agno/db/postgres/postgres.py +2260 -0
- agno/db/postgres/schemas.py +139 -0
- agno/db/postgres/utils.py +442 -0
- agno/db/redis/__init__.py +3 -0
- agno/db/redis/redis.py +1660 -0
- agno/db/redis/schemas.py +123 -0
- agno/db/redis/utils.py +346 -0
- agno/db/schemas/__init__.py +4 -0
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +33 -0
- agno/db/schemas/knowledge.py +40 -0
- agno/db/schemas/memory.py +46 -0
- agno/db/schemas/metrics.py +0 -0
- agno/db/singlestore/__init__.py +3 -0
- agno/db/singlestore/schemas.py +130 -0
- agno/db/singlestore/singlestore.py +2272 -0
- agno/db/singlestore/utils.py +384 -0
- agno/db/sqlite/__init__.py +4 -0
- agno/db/sqlite/async_sqlite.py +2293 -0
- agno/db/sqlite/schemas.py +133 -0
- agno/db/sqlite/sqlite.py +2288 -0
- agno/db/sqlite/utils.py +431 -0
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +309 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1353 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +116 -0
- agno/debug.py +18 -0
- agno/eval/__init__.py +14 -0
- agno/eval/accuracy.py +834 -0
- agno/eval/performance.py +773 -0
- agno/eval/reliability.py +306 -0
- agno/eval/utils.py +119 -0
- agno/exceptions.py +161 -0
- agno/filters.py +354 -0
- agno/guardrails/__init__.py +6 -0
- agno/guardrails/base.py +19 -0
- agno/guardrails/openai.py +144 -0
- agno/guardrails/pii.py +94 -0
- agno/guardrails/prompt_injection.py +52 -0
- agno/integrations/__init__.py +0 -0
- agno/integrations/discord/__init__.py +3 -0
- agno/integrations/discord/client.py +203 -0
- agno/knowledge/__init__.py +5 -0
- agno/knowledge/chunking/__init__.py +0 -0
- agno/knowledge/chunking/agentic.py +79 -0
- agno/knowledge/chunking/document.py +91 -0
- agno/knowledge/chunking/fixed.py +57 -0
- agno/knowledge/chunking/markdown.py +151 -0
- agno/knowledge/chunking/recursive.py +63 -0
- agno/knowledge/chunking/row.py +39 -0
- agno/knowledge/chunking/semantic.py +86 -0
- agno/knowledge/chunking/strategy.py +165 -0
- agno/knowledge/content.py +74 -0
- agno/knowledge/document/__init__.py +5 -0
- agno/knowledge/document/base.py +58 -0
- agno/knowledge/embedder/__init__.py +5 -0
- agno/knowledge/embedder/aws_bedrock.py +343 -0
- agno/knowledge/embedder/azure_openai.py +210 -0
- agno/knowledge/embedder/base.py +23 -0
- agno/knowledge/embedder/cohere.py +323 -0
- agno/knowledge/embedder/fastembed.py +62 -0
- agno/knowledge/embedder/fireworks.py +13 -0
- agno/knowledge/embedder/google.py +258 -0
- agno/knowledge/embedder/huggingface.py +94 -0
- agno/knowledge/embedder/jina.py +182 -0
- agno/knowledge/embedder/langdb.py +22 -0
- agno/knowledge/embedder/mistral.py +206 -0
- agno/knowledge/embedder/nebius.py +13 -0
- agno/knowledge/embedder/ollama.py +154 -0
- agno/knowledge/embedder/openai.py +195 -0
- agno/knowledge/embedder/sentence_transformer.py +63 -0
- agno/knowledge/embedder/together.py +13 -0
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/embedder/voyageai.py +165 -0
- agno/knowledge/knowledge.py +1988 -0
- agno/knowledge/reader/__init__.py +7 -0
- agno/knowledge/reader/arxiv_reader.py +81 -0
- agno/knowledge/reader/base.py +95 -0
- agno/knowledge/reader/csv_reader.py +166 -0
- agno/knowledge/reader/docx_reader.py +82 -0
- agno/knowledge/reader/field_labeled_csv_reader.py +292 -0
- agno/knowledge/reader/firecrawl_reader.py +201 -0
- agno/knowledge/reader/json_reader.py +87 -0
- agno/knowledge/reader/markdown_reader.py +137 -0
- agno/knowledge/reader/pdf_reader.py +431 -0
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +313 -0
- agno/knowledge/reader/s3_reader.py +89 -0
- agno/knowledge/reader/tavily_reader.py +194 -0
- agno/knowledge/reader/text_reader.py +115 -0
- agno/knowledge/reader/web_search_reader.py +372 -0
- agno/knowledge/reader/website_reader.py +455 -0
- agno/knowledge/reader/wikipedia_reader.py +59 -0
- agno/knowledge/reader/youtube_reader.py +78 -0
- agno/knowledge/remote_content/__init__.py +0 -0
- agno/knowledge/remote_content/remote_content.py +88 -0
- agno/knowledge/reranker/__init__.py +3 -0
- agno/knowledge/reranker/base.py +14 -0
- agno/knowledge/reranker/cohere.py +64 -0
- agno/knowledge/reranker/infinity.py +195 -0
- agno/knowledge/reranker/sentence_transformer.py +54 -0
- agno/knowledge/types.py +39 -0
- agno/knowledge/utils.py +189 -0
- agno/media.py +462 -0
- agno/memory/__init__.py +3 -0
- agno/memory/manager.py +1327 -0
- agno/models/__init__.py +0 -0
- agno/models/aimlapi/__init__.py +5 -0
- agno/models/aimlapi/aimlapi.py +45 -0
- agno/models/anthropic/__init__.py +5 -0
- agno/models/anthropic/claude.py +757 -0
- agno/models/aws/__init__.py +15 -0
- agno/models/aws/bedrock.py +701 -0
- agno/models/aws/claude.py +378 -0
- agno/models/azure/__init__.py +18 -0
- agno/models/azure/ai_foundry.py +485 -0
- agno/models/azure/openai_chat.py +131 -0
- agno/models/base.py +2175 -0
- agno/models/cerebras/__init__.py +12 -0
- agno/models/cerebras/cerebras.py +501 -0
- agno/models/cerebras/cerebras_openai.py +112 -0
- agno/models/cohere/__init__.py +5 -0
- agno/models/cohere/chat.py +389 -0
- agno/models/cometapi/__init__.py +5 -0
- agno/models/cometapi/cometapi.py +57 -0
- agno/models/dashscope/__init__.py +5 -0
- agno/models/dashscope/dashscope.py +91 -0
- agno/models/deepinfra/__init__.py +5 -0
- agno/models/deepinfra/deepinfra.py +28 -0
- agno/models/deepseek/__init__.py +5 -0
- agno/models/deepseek/deepseek.py +61 -0
- agno/models/defaults.py +1 -0
- agno/models/fireworks/__init__.py +5 -0
- agno/models/fireworks/fireworks.py +26 -0
- agno/models/google/__init__.py +5 -0
- agno/models/google/gemini.py +1085 -0
- agno/models/groq/__init__.py +5 -0
- agno/models/groq/groq.py +556 -0
- agno/models/huggingface/__init__.py +5 -0
- agno/models/huggingface/huggingface.py +491 -0
- agno/models/ibm/__init__.py +5 -0
- agno/models/ibm/watsonx.py +422 -0
- agno/models/internlm/__init__.py +3 -0
- agno/models/internlm/internlm.py +26 -0
- agno/models/langdb/__init__.py +1 -0
- agno/models/langdb/langdb.py +48 -0
- agno/models/litellm/__init__.py +14 -0
- agno/models/litellm/chat.py +468 -0
- agno/models/litellm/litellm_openai.py +25 -0
- agno/models/llama_cpp/__init__.py +5 -0
- agno/models/llama_cpp/llama_cpp.py +22 -0
- agno/models/lmstudio/__init__.py +5 -0
- agno/models/lmstudio/lmstudio.py +25 -0
- agno/models/message.py +434 -0
- agno/models/meta/__init__.py +12 -0
- agno/models/meta/llama.py +475 -0
- agno/models/meta/llama_openai.py +78 -0
- agno/models/metrics.py +120 -0
- agno/models/mistral/__init__.py +5 -0
- agno/models/mistral/mistral.py +432 -0
- agno/models/nebius/__init__.py +3 -0
- agno/models/nebius/nebius.py +54 -0
- agno/models/nexus/__init__.py +3 -0
- agno/models/nexus/nexus.py +22 -0
- agno/models/nvidia/__init__.py +5 -0
- agno/models/nvidia/nvidia.py +28 -0
- agno/models/ollama/__init__.py +5 -0
- agno/models/ollama/chat.py +441 -0
- agno/models/openai/__init__.py +9 -0
- agno/models/openai/chat.py +883 -0
- agno/models/openai/like.py +27 -0
- agno/models/openai/responses.py +1050 -0
- agno/models/openrouter/__init__.py +5 -0
- agno/models/openrouter/openrouter.py +66 -0
- agno/models/perplexity/__init__.py +5 -0
- agno/models/perplexity/perplexity.py +187 -0
- agno/models/portkey/__init__.py +3 -0
- agno/models/portkey/portkey.py +81 -0
- agno/models/requesty/__init__.py +5 -0
- agno/models/requesty/requesty.py +52 -0
- agno/models/response.py +199 -0
- agno/models/sambanova/__init__.py +5 -0
- agno/models/sambanova/sambanova.py +28 -0
- agno/models/siliconflow/__init__.py +5 -0
- agno/models/siliconflow/siliconflow.py +25 -0
- agno/models/together/__init__.py +5 -0
- agno/models/together/together.py +25 -0
- agno/models/utils.py +266 -0
- agno/models/vercel/__init__.py +3 -0
- agno/models/vercel/v0.py +26 -0
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +70 -0
- agno/models/vllm/__init__.py +3 -0
- agno/models/vllm/vllm.py +78 -0
- agno/models/xai/__init__.py +3 -0
- agno/models/xai/xai.py +113 -0
- agno/os/__init__.py +3 -0
- agno/os/app.py +876 -0
- agno/os/auth.py +57 -0
- agno/os/config.py +104 -0
- agno/os/interfaces/__init__.py +1 -0
- agno/os/interfaces/a2a/__init__.py +3 -0
- agno/os/interfaces/a2a/a2a.py +42 -0
- agno/os/interfaces/a2a/router.py +250 -0
- agno/os/interfaces/a2a/utils.py +924 -0
- agno/os/interfaces/agui/__init__.py +3 -0
- agno/os/interfaces/agui/agui.py +47 -0
- agno/os/interfaces/agui/router.py +144 -0
- agno/os/interfaces/agui/utils.py +534 -0
- agno/os/interfaces/base.py +25 -0
- agno/os/interfaces/slack/__init__.py +3 -0
- agno/os/interfaces/slack/router.py +148 -0
- agno/os/interfaces/slack/security.py +30 -0
- agno/os/interfaces/slack/slack.py +47 -0
- agno/os/interfaces/whatsapp/__init__.py +3 -0
- agno/os/interfaces/whatsapp/router.py +211 -0
- agno/os/interfaces/whatsapp/security.py +53 -0
- agno/os/interfaces/whatsapp/whatsapp.py +36 -0
- agno/os/mcp.py +292 -0
- agno/os/middleware/__init__.py +7 -0
- agno/os/middleware/jwt.py +233 -0
- agno/os/router.py +1763 -0
- agno/os/routers/__init__.py +3 -0
- agno/os/routers/evals/__init__.py +3 -0
- agno/os/routers/evals/evals.py +430 -0
- agno/os/routers/evals/schemas.py +142 -0
- agno/os/routers/evals/utils.py +162 -0
- agno/os/routers/health.py +31 -0
- agno/os/routers/home.py +52 -0
- agno/os/routers/knowledge/__init__.py +3 -0
- agno/os/routers/knowledge/knowledge.py +997 -0
- agno/os/routers/knowledge/schemas.py +178 -0
- agno/os/routers/memory/__init__.py +3 -0
- agno/os/routers/memory/memory.py +515 -0
- agno/os/routers/memory/schemas.py +62 -0
- agno/os/routers/metrics/__init__.py +3 -0
- agno/os/routers/metrics/metrics.py +190 -0
- agno/os/routers/metrics/schemas.py +47 -0
- agno/os/routers/session/__init__.py +3 -0
- agno/os/routers/session/session.py +997 -0
- agno/os/schema.py +1055 -0
- agno/os/settings.py +43 -0
- agno/os/utils.py +630 -0
- agno/py.typed +0 -0
- agno/reasoning/__init__.py +0 -0
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/azure_ai_foundry.py +67 -0
- agno/reasoning/deepseek.py +63 -0
- agno/reasoning/default.py +97 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/groq.py +71 -0
- agno/reasoning/helpers.py +63 -0
- agno/reasoning/ollama.py +67 -0
- agno/reasoning/openai.py +86 -0
- agno/reasoning/step.py +31 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +787 -0
- agno/run/base.py +229 -0
- agno/run/cancel.py +81 -0
- agno/run/messages.py +32 -0
- agno/run/team.py +753 -0
- agno/run/workflow.py +708 -0
- agno/session/__init__.py +10 -0
- agno/session/agent.py +295 -0
- agno/session/summary.py +265 -0
- agno/session/team.py +392 -0
- agno/session/workflow.py +205 -0
- agno/team/__init__.py +37 -0
- agno/team/team.py +8793 -0
- agno/tools/__init__.py +10 -0
- agno/tools/agentql.py +120 -0
- agno/tools/airflow.py +69 -0
- agno/tools/api.py +122 -0
- agno/tools/apify.py +314 -0
- agno/tools/arxiv.py +127 -0
- agno/tools/aws_lambda.py +53 -0
- agno/tools/aws_ses.py +66 -0
- agno/tools/baidusearch.py +89 -0
- agno/tools/bitbucket.py +292 -0
- agno/tools/brandfetch.py +213 -0
- agno/tools/bravesearch.py +106 -0
- agno/tools/brightdata.py +367 -0
- agno/tools/browserbase.py +209 -0
- agno/tools/calcom.py +255 -0
- agno/tools/calculator.py +151 -0
- agno/tools/cartesia.py +187 -0
- agno/tools/clickup.py +244 -0
- agno/tools/confluence.py +240 -0
- agno/tools/crawl4ai.py +158 -0
- agno/tools/csv_toolkit.py +185 -0
- agno/tools/dalle.py +110 -0
- agno/tools/daytona.py +475 -0
- agno/tools/decorator.py +262 -0
- agno/tools/desi_vocal.py +108 -0
- agno/tools/discord.py +161 -0
- agno/tools/docker.py +716 -0
- agno/tools/duckdb.py +379 -0
- agno/tools/duckduckgo.py +91 -0
- agno/tools/e2b.py +703 -0
- agno/tools/eleven_labs.py +196 -0
- agno/tools/email.py +67 -0
- agno/tools/evm.py +129 -0
- agno/tools/exa.py +396 -0
- agno/tools/fal.py +127 -0
- agno/tools/file.py +240 -0
- agno/tools/file_generation.py +350 -0
- agno/tools/financial_datasets.py +288 -0
- agno/tools/firecrawl.py +143 -0
- agno/tools/function.py +1187 -0
- agno/tools/giphy.py +93 -0
- agno/tools/github.py +1760 -0
- agno/tools/gmail.py +922 -0
- agno/tools/google_bigquery.py +117 -0
- agno/tools/google_drive.py +270 -0
- agno/tools/google_maps.py +253 -0
- agno/tools/googlecalendar.py +674 -0
- agno/tools/googlesearch.py +98 -0
- agno/tools/googlesheets.py +377 -0
- agno/tools/hackernews.py +77 -0
- agno/tools/jina.py +101 -0
- agno/tools/jira.py +170 -0
- agno/tools/knowledge.py +218 -0
- agno/tools/linear.py +426 -0
- agno/tools/linkup.py +58 -0
- agno/tools/local_file_system.py +90 -0
- agno/tools/lumalab.py +183 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +284 -0
- agno/tools/mem0.py +193 -0
- agno/tools/memori.py +339 -0
- agno/tools/memory.py +419 -0
- agno/tools/mlx_transcribe.py +139 -0
- agno/tools/models/__init__.py +0 -0
- agno/tools/models/azure_openai.py +190 -0
- agno/tools/models/gemini.py +203 -0
- agno/tools/models/groq.py +158 -0
- agno/tools/models/morph.py +186 -0
- agno/tools/models/nebius.py +124 -0
- agno/tools/models_labs.py +195 -0
- agno/tools/moviepy_video.py +349 -0
- agno/tools/neo4j.py +134 -0
- agno/tools/newspaper.py +46 -0
- agno/tools/newspaper4k.py +93 -0
- agno/tools/notion.py +204 -0
- agno/tools/openai.py +202 -0
- agno/tools/openbb.py +160 -0
- agno/tools/opencv.py +321 -0
- agno/tools/openweather.py +233 -0
- agno/tools/oxylabs.py +385 -0
- agno/tools/pandas.py +102 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +257 -0
- agno/tools/pubmed.py +188 -0
- agno/tools/python.py +205 -0
- agno/tools/reasoning.py +283 -0
- agno/tools/reddit.py +467 -0
- agno/tools/replicate.py +117 -0
- agno/tools/resend.py +62 -0
- agno/tools/scrapegraph.py +222 -0
- agno/tools/searxng.py +152 -0
- agno/tools/serpapi.py +116 -0
- agno/tools/serper.py +255 -0
- agno/tools/shell.py +53 -0
- agno/tools/slack.py +136 -0
- agno/tools/sleep.py +20 -0
- agno/tools/spider.py +116 -0
- agno/tools/sql.py +154 -0
- agno/tools/streamlit/__init__.py +0 -0
- agno/tools/streamlit/components.py +113 -0
- agno/tools/tavily.py +254 -0
- agno/tools/telegram.py +48 -0
- agno/tools/todoist.py +218 -0
- agno/tools/tool_registry.py +1 -0
- agno/tools/toolkit.py +146 -0
- agno/tools/trafilatura.py +388 -0
- agno/tools/trello.py +274 -0
- agno/tools/twilio.py +186 -0
- agno/tools/user_control_flow.py +78 -0
- agno/tools/valyu.py +228 -0
- agno/tools/visualization.py +467 -0
- agno/tools/webbrowser.py +28 -0
- agno/tools/webex.py +76 -0
- agno/tools/website.py +54 -0
- agno/tools/webtools.py +45 -0
- agno/tools/whatsapp.py +286 -0
- agno/tools/wikipedia.py +63 -0
- agno/tools/workflow.py +278 -0
- agno/tools/x.py +335 -0
- agno/tools/yfinance.py +257 -0
- agno/tools/youtube.py +184 -0
- agno/tools/zendesk.py +82 -0
- agno/tools/zep.py +454 -0
- agno/tools/zoom.py +382 -0
- agno/utils/__init__.py +0 -0
- agno/utils/agent.py +820 -0
- agno/utils/audio.py +49 -0
- agno/utils/certs.py +27 -0
- agno/utils/code_execution.py +11 -0
- agno/utils/common.py +132 -0
- agno/utils/dttm.py +13 -0
- agno/utils/enum.py +22 -0
- agno/utils/env.py +11 -0
- agno/utils/events.py +696 -0
- agno/utils/format_str.py +16 -0
- agno/utils/functions.py +166 -0
- agno/utils/gemini.py +426 -0
- agno/utils/hooks.py +57 -0
- agno/utils/http.py +74 -0
- agno/utils/json_schema.py +234 -0
- agno/utils/knowledge.py +36 -0
- agno/utils/location.py +19 -0
- agno/utils/log.py +255 -0
- agno/utils/mcp.py +214 -0
- agno/utils/media.py +352 -0
- agno/utils/merge_dict.py +41 -0
- agno/utils/message.py +118 -0
- agno/utils/models/__init__.py +0 -0
- agno/utils/models/ai_foundry.py +43 -0
- agno/utils/models/claude.py +358 -0
- agno/utils/models/cohere.py +87 -0
- agno/utils/models/llama.py +78 -0
- agno/utils/models/mistral.py +98 -0
- agno/utils/models/openai_responses.py +140 -0
- agno/utils/models/schema_utils.py +153 -0
- agno/utils/models/watsonx.py +41 -0
- agno/utils/openai.py +257 -0
- agno/utils/pickle.py +32 -0
- agno/utils/pprint.py +178 -0
- agno/utils/print_response/__init__.py +0 -0
- agno/utils/print_response/agent.py +842 -0
- agno/utils/print_response/team.py +1724 -0
- agno/utils/print_response/workflow.py +1668 -0
- agno/utils/prompts.py +111 -0
- agno/utils/reasoning.py +108 -0
- agno/utils/response.py +163 -0
- agno/utils/response_iterator.py +17 -0
- agno/utils/safe_formatter.py +24 -0
- agno/utils/serialize.py +32 -0
- agno/utils/shell.py +22 -0
- agno/utils/streamlit.py +487 -0
- agno/utils/string.py +231 -0
- agno/utils/team.py +139 -0
- agno/utils/timer.py +41 -0
- agno/utils/tools.py +102 -0
- agno/utils/web.py +23 -0
- agno/utils/whatsapp.py +305 -0
- agno/utils/yaml_io.py +25 -0
- agno/vectordb/__init__.py +3 -0
- agno/vectordb/base.py +127 -0
- agno/vectordb/cassandra/__init__.py +5 -0
- agno/vectordb/cassandra/cassandra.py +501 -0
- agno/vectordb/cassandra/extra_param_mixin.py +11 -0
- agno/vectordb/cassandra/index.py +13 -0
- agno/vectordb/chroma/__init__.py +5 -0
- agno/vectordb/chroma/chromadb.py +929 -0
- agno/vectordb/clickhouse/__init__.py +9 -0
- agno/vectordb/clickhouse/clickhousedb.py +835 -0
- agno/vectordb/clickhouse/index.py +9 -0
- agno/vectordb/couchbase/__init__.py +3 -0
- agno/vectordb/couchbase/couchbase.py +1442 -0
- agno/vectordb/distance.py +7 -0
- agno/vectordb/lancedb/__init__.py +6 -0
- agno/vectordb/lancedb/lance_db.py +995 -0
- agno/vectordb/langchaindb/__init__.py +5 -0
- agno/vectordb/langchaindb/langchaindb.py +163 -0
- agno/vectordb/lightrag/__init__.py +5 -0
- agno/vectordb/lightrag/lightrag.py +388 -0
- agno/vectordb/llamaindex/__init__.py +3 -0
- agno/vectordb/llamaindex/llamaindexdb.py +166 -0
- agno/vectordb/milvus/__init__.py +4 -0
- agno/vectordb/milvus/milvus.py +1182 -0
- agno/vectordb/mongodb/__init__.py +9 -0
- agno/vectordb/mongodb/mongodb.py +1417 -0
- agno/vectordb/pgvector/__init__.py +12 -0
- agno/vectordb/pgvector/index.py +23 -0
- agno/vectordb/pgvector/pgvector.py +1462 -0
- agno/vectordb/pineconedb/__init__.py +5 -0
- agno/vectordb/pineconedb/pineconedb.py +747 -0
- agno/vectordb/qdrant/__init__.py +5 -0
- agno/vectordb/qdrant/qdrant.py +1134 -0
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +694 -0
- agno/vectordb/search.py +7 -0
- agno/vectordb/singlestore/__init__.py +10 -0
- agno/vectordb/singlestore/index.py +41 -0
- agno/vectordb/singlestore/singlestore.py +763 -0
- agno/vectordb/surrealdb/__init__.py +3 -0
- agno/vectordb/surrealdb/surrealdb.py +699 -0
- agno/vectordb/upstashdb/__init__.py +5 -0
- agno/vectordb/upstashdb/upstashdb.py +718 -0
- agno/vectordb/weaviate/__init__.py +8 -0
- agno/vectordb/weaviate/index.py +15 -0
- agno/vectordb/weaviate/weaviate.py +1005 -0
- agno/workflow/__init__.py +23 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +738 -0
- agno/workflow/loop.py +735 -0
- agno/workflow/parallel.py +824 -0
- agno/workflow/router.py +702 -0
- agno/workflow/step.py +1432 -0
- agno/workflow/steps.py +592 -0
- agno/workflow/types.py +520 -0
- agno/workflow/workflow.py +4321 -0
- agno-2.2.13.dist-info/METADATA +614 -0
- agno-2.2.13.dist-info/RECORD +575 -0
- agno-2.2.13.dist-info/WHEEL +5 -0
- agno-2.2.13.dist-info/licenses/LICENSE +201 -0
- agno-2.2.13.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1335 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import time
|
|
3
|
+
from datetime import date, datetime, timedelta, timezone
|
|
4
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
5
|
+
from uuid import uuid4
|
|
6
|
+
|
|
7
|
+
from agno.db.base import BaseDb, SessionType
|
|
8
|
+
from agno.db.gcs_json.utils import (
|
|
9
|
+
apply_sorting,
|
|
10
|
+
calculate_date_metrics,
|
|
11
|
+
deserialize_cultural_knowledge_from_db,
|
|
12
|
+
fetch_all_sessions_data,
|
|
13
|
+
get_dates_to_calculate_metrics_for,
|
|
14
|
+
serialize_cultural_knowledge_for_db,
|
|
15
|
+
)
|
|
16
|
+
from agno.db.schemas.culture import CulturalKnowledge
|
|
17
|
+
from agno.db.schemas.evals import EvalFilterType, EvalRunRecord, EvalType
|
|
18
|
+
from agno.db.schemas.knowledge import KnowledgeRow
|
|
19
|
+
from agno.db.schemas.memory import UserMemory
|
|
20
|
+
from agno.session import AgentSession, Session, TeamSession, WorkflowSession
|
|
21
|
+
from agno.utils.log import log_debug, log_error, log_info, log_warning
|
|
22
|
+
from agno.utils.string import generate_id
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
from google.cloud import storage as gcs # type: ignore
|
|
26
|
+
except ImportError:
|
|
27
|
+
raise ImportError("`google-cloud-storage` not installed. Please install it with `pip install google-cloud-storage`")
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class GcsJsonDb(BaseDb):
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
bucket_name: str,
|
|
34
|
+
prefix: Optional[str] = None,
|
|
35
|
+
session_table: Optional[str] = None,
|
|
36
|
+
memory_table: Optional[str] = None,
|
|
37
|
+
metrics_table: Optional[str] = None,
|
|
38
|
+
eval_table: Optional[str] = None,
|
|
39
|
+
knowledge_table: Optional[str] = None,
|
|
40
|
+
culture_table: Optional[str] = None,
|
|
41
|
+
project: Optional[str] = None,
|
|
42
|
+
credentials: Optional[Any] = None,
|
|
43
|
+
id: Optional[str] = None,
|
|
44
|
+
):
|
|
45
|
+
"""
|
|
46
|
+
Interface for interacting with JSON files stored in Google Cloud Storage as database.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
bucket_name (str): Name of the GCS bucket where JSON files will be stored.
|
|
50
|
+
prefix (Optional[str]): Path prefix for organizing files in the bucket. Defaults to "agno/".
|
|
51
|
+
session_table (Optional[str]): Name of the JSON file to store sessions (without .json extension).
|
|
52
|
+
memory_table (Optional[str]): Name of the JSON file to store user memories.
|
|
53
|
+
metrics_table (Optional[str]): Name of the JSON file to store metrics.
|
|
54
|
+
eval_table (Optional[str]): Name of the JSON file to store evaluation runs.
|
|
55
|
+
knowledge_table (Optional[str]): Name of the JSON file to store knowledge content.
|
|
56
|
+
culture_table (Optional[str]): Name of the JSON file to store cultural knowledge.
|
|
57
|
+
project (Optional[str]): GCP project ID. If None, uses default project.
|
|
58
|
+
location (Optional[str]): GCS bucket location. If None, uses default location.
|
|
59
|
+
credentials (Optional[Any]): GCP credentials. If None, uses default credentials.
|
|
60
|
+
id (Optional[str]): ID of the database.
|
|
61
|
+
"""
|
|
62
|
+
if id is None:
|
|
63
|
+
prefix_suffix = prefix or "agno/"
|
|
64
|
+
seed = f"{bucket_name}_{project}#{prefix_suffix}"
|
|
65
|
+
id = generate_id(seed)
|
|
66
|
+
|
|
67
|
+
super().__init__(
|
|
68
|
+
id=id,
|
|
69
|
+
session_table=session_table,
|
|
70
|
+
memory_table=memory_table,
|
|
71
|
+
metrics_table=metrics_table,
|
|
72
|
+
eval_table=eval_table,
|
|
73
|
+
knowledge_table=knowledge_table,
|
|
74
|
+
culture_table=culture_table,
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
self.bucket_name = bucket_name
|
|
78
|
+
self.prefix = prefix or "agno/"
|
|
79
|
+
if self.prefix and not self.prefix.endswith("/"):
|
|
80
|
+
self.prefix += "/"
|
|
81
|
+
|
|
82
|
+
# Initialize GCS client and bucket
|
|
83
|
+
self.client = gcs.Client(project=project, credentials=credentials)
|
|
84
|
+
self.bucket = self.client.bucket(self.bucket_name)
|
|
85
|
+
|
|
86
|
+
def table_exists(self, table_name: str) -> bool:
|
|
87
|
+
"""JSON implementation, always returns True."""
|
|
88
|
+
return True
|
|
89
|
+
|
|
90
|
+
def _get_blob_name(self, filename: str) -> str:
|
|
91
|
+
"""Get the full blob name including prefix for a given filename."""
|
|
92
|
+
return f"{self.prefix}{filename}.json"
|
|
93
|
+
|
|
94
|
+
def _read_json_file(self, filename: str, create_table_if_not_found: Optional[bool] = False) -> List[Dict[str, Any]]:
|
|
95
|
+
"""Read data from a JSON file in GCS, creating it if it doesn't exist.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
filename (str): The name of the JSON file to read.
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
List[Dict[str, Any]]: The data from the JSON file.
|
|
102
|
+
|
|
103
|
+
Raises:
|
|
104
|
+
json.JSONDecodeError: If the JSON file is not valid.
|
|
105
|
+
"""
|
|
106
|
+
blob_name = self._get_blob_name(filename)
|
|
107
|
+
blob = self.bucket.blob(blob_name)
|
|
108
|
+
|
|
109
|
+
try:
|
|
110
|
+
data_str = blob.download_as_bytes().decode("utf-8")
|
|
111
|
+
return json.loads(data_str)
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
# Check if it's a 404 (file not found) error
|
|
115
|
+
if "404" in str(e) or "Not Found" in str(e):
|
|
116
|
+
if create_table_if_not_found:
|
|
117
|
+
log_debug(f"Creating new GCS JSON file: {blob_name}")
|
|
118
|
+
blob.upload_from_string("[]", content_type="application/json")
|
|
119
|
+
return []
|
|
120
|
+
else:
|
|
121
|
+
log_error(f"Error reading the {blob_name} JSON file from GCS: {e}")
|
|
122
|
+
raise json.JSONDecodeError(f"Error reading {blob_name}", "", 0)
|
|
123
|
+
|
|
124
|
+
def _write_json_file(self, filename: str, data: List[Dict[str, Any]]) -> None:
|
|
125
|
+
"""Write data to a JSON file in GCS.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
filename (str): The name of the JSON file to write.
|
|
129
|
+
data (List[Dict[str, Any]]): The data to write to the JSON file.
|
|
130
|
+
|
|
131
|
+
Raises:
|
|
132
|
+
Exception: If an error occurs while writing to the JSON file.
|
|
133
|
+
"""
|
|
134
|
+
blob_name = self._get_blob_name(filename)
|
|
135
|
+
blob = self.bucket.blob(blob_name)
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
json_data = json.dumps(data, indent=2, default=str)
|
|
139
|
+
blob.upload_from_string(json_data, content_type="application/json")
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
log_error(f"Error writing to the {blob_name} JSON file in GCS: {e}")
|
|
143
|
+
return
|
|
144
|
+
|
|
145
|
+
# -- Session methods --
|
|
146
|
+
|
|
147
|
+
def delete_session(self, session_id: str) -> bool:
|
|
148
|
+
"""Delete a session from the GCS JSON file.
|
|
149
|
+
|
|
150
|
+
Args:
|
|
151
|
+
session_id (str): The ID of the session to delete.
|
|
152
|
+
|
|
153
|
+
Returns:
|
|
154
|
+
bool: True if the session was deleted, False otherwise.
|
|
155
|
+
|
|
156
|
+
Raises:
|
|
157
|
+
Exception: If an error occurs during deletion.
|
|
158
|
+
"""
|
|
159
|
+
try:
|
|
160
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
161
|
+
original_count = len(sessions)
|
|
162
|
+
sessions = [s for s in sessions if s.get("session_id") != session_id]
|
|
163
|
+
|
|
164
|
+
if len(sessions) < original_count:
|
|
165
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
166
|
+
log_debug(f"Successfully deleted session with session_id: {session_id}")
|
|
167
|
+
return True
|
|
168
|
+
|
|
169
|
+
else:
|
|
170
|
+
log_debug(f"No session found to delete with session_id: {session_id}")
|
|
171
|
+
return False
|
|
172
|
+
|
|
173
|
+
except Exception as e:
|
|
174
|
+
log_warning(f"Error deleting session: {e}")
|
|
175
|
+
raise e
|
|
176
|
+
|
|
177
|
+
def delete_sessions(self, session_ids: List[str]) -> None:
|
|
178
|
+
"""Delete multiple sessions from the GCS JSON file.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
session_ids (List[str]): The IDs of the sessions to delete.
|
|
182
|
+
|
|
183
|
+
Raises:
|
|
184
|
+
Exception: If an error occurs during deletion.
|
|
185
|
+
"""
|
|
186
|
+
try:
|
|
187
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
188
|
+
sessions = [s for s in sessions if s.get("session_id") not in session_ids]
|
|
189
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
190
|
+
log_debug(f"Successfully deleted sessions with ids: {session_ids}")
|
|
191
|
+
|
|
192
|
+
except Exception as e:
|
|
193
|
+
log_warning(f"Error deleting sessions: {e}")
|
|
194
|
+
raise e
|
|
195
|
+
|
|
196
|
+
def get_session(
|
|
197
|
+
self,
|
|
198
|
+
session_id: str,
|
|
199
|
+
session_type: SessionType,
|
|
200
|
+
user_id: Optional[str] = None,
|
|
201
|
+
deserialize: Optional[bool] = True,
|
|
202
|
+
) -> Optional[Union[AgentSession, TeamSession, WorkflowSession, Dict[str, Any]]]:
|
|
203
|
+
"""Read a session from the GCS JSON file.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
session_id (str): The ID of the session to read.
|
|
207
|
+
session_type (SessionType): The type of the session to read.
|
|
208
|
+
user_id (Optional[str]): The ID of the user to read the session for.
|
|
209
|
+
deserialize (Optional[bool]): Whether to deserialize the session.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Union[Session, Dict[str, Any], None]:
|
|
213
|
+
- When deserialize=True: Session object
|
|
214
|
+
- When deserialize=False: Session dictionary
|
|
215
|
+
|
|
216
|
+
Raises:
|
|
217
|
+
Exception: If an error occurs while reading the session.
|
|
218
|
+
"""
|
|
219
|
+
try:
|
|
220
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
221
|
+
|
|
222
|
+
for session_data in sessions:
|
|
223
|
+
if session_data.get("session_id") == session_id:
|
|
224
|
+
if user_id is not None and session_data.get("user_id") != user_id:
|
|
225
|
+
continue
|
|
226
|
+
|
|
227
|
+
if not deserialize:
|
|
228
|
+
return session_data
|
|
229
|
+
|
|
230
|
+
if session_type == SessionType.AGENT:
|
|
231
|
+
return AgentSession.from_dict(session_data)
|
|
232
|
+
elif session_type == SessionType.TEAM:
|
|
233
|
+
return TeamSession.from_dict(session_data)
|
|
234
|
+
elif session_type == SessionType.WORKFLOW:
|
|
235
|
+
return WorkflowSession.from_dict(session_data)
|
|
236
|
+
else:
|
|
237
|
+
raise ValueError(f"Invalid session type: {session_type}")
|
|
238
|
+
|
|
239
|
+
return None
|
|
240
|
+
|
|
241
|
+
except Exception as e:
|
|
242
|
+
log_warning(f"Exception reading from session file: {e}")
|
|
243
|
+
raise e
|
|
244
|
+
|
|
245
|
+
def get_sessions(
|
|
246
|
+
self,
|
|
247
|
+
session_type: Optional[SessionType] = None,
|
|
248
|
+
user_id: Optional[str] = None,
|
|
249
|
+
component_id: Optional[str] = None,
|
|
250
|
+
session_name: Optional[str] = None,
|
|
251
|
+
start_timestamp: Optional[int] = None,
|
|
252
|
+
end_timestamp: Optional[int] = None,
|
|
253
|
+
limit: Optional[int] = None,
|
|
254
|
+
page: Optional[int] = None,
|
|
255
|
+
sort_by: Optional[str] = None,
|
|
256
|
+
sort_order: Optional[str] = None,
|
|
257
|
+
deserialize: Optional[bool] = True,
|
|
258
|
+
) -> Union[List[Session], Tuple[List[Dict[str, Any]], int]]:
|
|
259
|
+
"""Get all sessions from the GCS JSON file with filtering and pagination.
|
|
260
|
+
|
|
261
|
+
Args:
|
|
262
|
+
session_type (Optional[SessionType]): The type of the sessions to read.
|
|
263
|
+
user_id (Optional[str]): The ID of the user to read the sessions for.
|
|
264
|
+
component_id (Optional[str]): The ID of the component to read the sessions for.
|
|
265
|
+
session_name (Optional[str]): The name of the session to read.
|
|
266
|
+
start_timestamp (Optional[int]): The start timestamp of the sessions to read.
|
|
267
|
+
end_timestamp (Optional[int]): The end timestamp of the sessions to read.
|
|
268
|
+
limit (Optional[int]): The limit of the sessions to read.
|
|
269
|
+
page (Optional[int]): The page of the sessions to read.
|
|
270
|
+
sort_by (Optional[str]): The field to sort the sessions by.
|
|
271
|
+
sort_order (Optional[str]): The order to sort the sessions by.
|
|
272
|
+
deserialize (Optional[bool]): Whether to deserialize the sessions.
|
|
273
|
+
create_table_if_not_found (Optional[bool]): Whether to create a file to track sessions if it doesn't exist.
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
Union[List[AgentSession], List[TeamSession], List[WorkflowSession], Tuple[List[Dict[str, Any]], int]]:
|
|
277
|
+
- When deserialize=True: List of sessions
|
|
278
|
+
- When deserialize=False: Tuple with list of sessions and total count
|
|
279
|
+
|
|
280
|
+
Raises:
|
|
281
|
+
Exception: If an error occurs while reading the sessions.
|
|
282
|
+
"""
|
|
283
|
+
try:
|
|
284
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
285
|
+
|
|
286
|
+
# Apply filters
|
|
287
|
+
filtered_sessions = []
|
|
288
|
+
for session_data in sessions:
|
|
289
|
+
if user_id is not None and session_data.get("user_id") != user_id:
|
|
290
|
+
continue
|
|
291
|
+
if component_id is not None:
|
|
292
|
+
if session_type == SessionType.AGENT and session_data.get("agent_id") != component_id:
|
|
293
|
+
continue
|
|
294
|
+
elif session_type == SessionType.TEAM and session_data.get("team_id") != component_id:
|
|
295
|
+
continue
|
|
296
|
+
elif session_type == SessionType.WORKFLOW and session_data.get("workflow_id") != component_id:
|
|
297
|
+
continue
|
|
298
|
+
if start_timestamp is not None and session_data.get("created_at", 0) < start_timestamp:
|
|
299
|
+
continue
|
|
300
|
+
if end_timestamp is not None and session_data.get("created_at", 0) > end_timestamp:
|
|
301
|
+
continue
|
|
302
|
+
if session_name is not None:
|
|
303
|
+
stored_name = session_data.get("session_data", {}).get("session_name", "")
|
|
304
|
+
if session_name.lower() not in stored_name.lower():
|
|
305
|
+
continue
|
|
306
|
+
session_type_value = session_type.value if isinstance(session_type, SessionType) else session_type
|
|
307
|
+
if session_data.get("session_type") != session_type_value:
|
|
308
|
+
continue
|
|
309
|
+
|
|
310
|
+
filtered_sessions.append(session_data)
|
|
311
|
+
|
|
312
|
+
total_count = len(filtered_sessions)
|
|
313
|
+
|
|
314
|
+
# Apply sorting
|
|
315
|
+
filtered_sessions = apply_sorting(filtered_sessions, sort_by, sort_order)
|
|
316
|
+
|
|
317
|
+
# Apply pagination
|
|
318
|
+
if limit is not None:
|
|
319
|
+
start_idx = 0
|
|
320
|
+
if page is not None:
|
|
321
|
+
start_idx = (page - 1) * limit
|
|
322
|
+
filtered_sessions = filtered_sessions[start_idx : start_idx + limit]
|
|
323
|
+
|
|
324
|
+
if not deserialize:
|
|
325
|
+
return filtered_sessions, total_count
|
|
326
|
+
|
|
327
|
+
if session_type == SessionType.AGENT:
|
|
328
|
+
return [AgentSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
329
|
+
elif session_type == SessionType.TEAM:
|
|
330
|
+
return [TeamSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
331
|
+
elif session_type == SessionType.WORKFLOW:
|
|
332
|
+
return [WorkflowSession.from_dict(session) for session in filtered_sessions] # type: ignore
|
|
333
|
+
else:
|
|
334
|
+
raise ValueError(f"Invalid session type: {session_type}")
|
|
335
|
+
|
|
336
|
+
except Exception as e:
|
|
337
|
+
log_warning(f"Exception reading from session file: {e}")
|
|
338
|
+
raise e
|
|
339
|
+
|
|
340
|
+
def rename_session(
|
|
341
|
+
self, session_id: str, session_type: SessionType, session_name: str, deserialize: Optional[bool] = True
|
|
342
|
+
) -> Optional[Union[Session, Dict[str, Any]]]:
|
|
343
|
+
"""Rename a session in the GCS JSON file."""
|
|
344
|
+
try:
|
|
345
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
346
|
+
|
|
347
|
+
for i, session_data in enumerate(sessions):
|
|
348
|
+
if (
|
|
349
|
+
session_data.get("session_id") == session_id
|
|
350
|
+
and session_data.get("session_type") == session_type.value
|
|
351
|
+
):
|
|
352
|
+
# Update session name in session_data
|
|
353
|
+
if "session_data" not in session_data:
|
|
354
|
+
session_data["session_data"] = {}
|
|
355
|
+
session_data["session_data"]["session_name"] = session_name
|
|
356
|
+
|
|
357
|
+
sessions[i] = session_data
|
|
358
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
359
|
+
|
|
360
|
+
if not deserialize:
|
|
361
|
+
return session_data
|
|
362
|
+
|
|
363
|
+
if session_type == SessionType.AGENT:
|
|
364
|
+
return AgentSession.from_dict(session_data)
|
|
365
|
+
elif session_type == SessionType.TEAM:
|
|
366
|
+
return TeamSession.from_dict(session_data)
|
|
367
|
+
elif session_type == SessionType.WORKFLOW:
|
|
368
|
+
return WorkflowSession.from_dict(session_data)
|
|
369
|
+
|
|
370
|
+
return None
|
|
371
|
+
except Exception as e:
|
|
372
|
+
log_warning(f"Exception renaming session: {e}")
|
|
373
|
+
raise e
|
|
374
|
+
|
|
375
|
+
def upsert_session(
|
|
376
|
+
self, session: Session, deserialize: Optional[bool] = True
|
|
377
|
+
) -> Optional[Union[Session, Dict[str, Any]]]:
|
|
378
|
+
"""Insert or update a session in the GCS JSON file."""
|
|
379
|
+
try:
|
|
380
|
+
sessions = self._read_json_file(self.session_table_name, create_table_if_not_found=True)
|
|
381
|
+
session_dict = session.to_dict()
|
|
382
|
+
|
|
383
|
+
# Add session_type based on session instance type
|
|
384
|
+
if isinstance(session, AgentSession):
|
|
385
|
+
session_dict["session_type"] = SessionType.AGENT.value
|
|
386
|
+
elif isinstance(session, TeamSession):
|
|
387
|
+
session_dict["session_type"] = SessionType.TEAM.value
|
|
388
|
+
elif isinstance(session, WorkflowSession):
|
|
389
|
+
session_dict["session_type"] = SessionType.WORKFLOW.value
|
|
390
|
+
|
|
391
|
+
# Find existing session to update
|
|
392
|
+
session_updated = False
|
|
393
|
+
for i, existing_session in enumerate(sessions):
|
|
394
|
+
if existing_session.get("session_id") == session_dict.get("session_id") and self._matches_session_key(
|
|
395
|
+
existing_session, session
|
|
396
|
+
):
|
|
397
|
+
# Update existing session
|
|
398
|
+
session_dict["updated_at"] = int(time.time())
|
|
399
|
+
sessions[i] = session_dict
|
|
400
|
+
session_updated = True
|
|
401
|
+
break
|
|
402
|
+
|
|
403
|
+
if not session_updated:
|
|
404
|
+
# Add new session
|
|
405
|
+
session_dict["created_at"] = session_dict.get("created_at", int(time.time()))
|
|
406
|
+
session_dict["updated_at"] = session_dict.get("created_at")
|
|
407
|
+
sessions.append(session_dict)
|
|
408
|
+
|
|
409
|
+
self._write_json_file(self.session_table_name, sessions)
|
|
410
|
+
|
|
411
|
+
if not deserialize:
|
|
412
|
+
return session_dict
|
|
413
|
+
|
|
414
|
+
return session
|
|
415
|
+
|
|
416
|
+
except Exception as e:
|
|
417
|
+
log_warning(f"Exception upserting session: {e}")
|
|
418
|
+
raise e
|
|
419
|
+
|
|
420
|
+
def upsert_sessions(
|
|
421
|
+
self, sessions: List[Session], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
422
|
+
) -> List[Union[Session, Dict[str, Any]]]:
|
|
423
|
+
"""
|
|
424
|
+
Bulk upsert multiple sessions for improved performance on large datasets.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
sessions (List[Session]): List of sessions to upsert.
|
|
428
|
+
deserialize (Optional[bool]): Whether to deserialize the sessions. Defaults to True.
|
|
429
|
+
|
|
430
|
+
Returns:
|
|
431
|
+
List[Union[Session, Dict[str, Any]]]: List of upserted sessions.
|
|
432
|
+
|
|
433
|
+
Raises:
|
|
434
|
+
Exception: If an error occurs during bulk upsert.
|
|
435
|
+
"""
|
|
436
|
+
if not sessions:
|
|
437
|
+
return []
|
|
438
|
+
|
|
439
|
+
try:
|
|
440
|
+
log_info(
|
|
441
|
+
f"GcsJsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(sessions)} sessions"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# Fall back to individual upserts
|
|
445
|
+
results = []
|
|
446
|
+
for session in sessions:
|
|
447
|
+
if session is not None:
|
|
448
|
+
result = self.upsert_session(session, deserialize=deserialize)
|
|
449
|
+
if result is not None:
|
|
450
|
+
results.append(result)
|
|
451
|
+
return results
|
|
452
|
+
|
|
453
|
+
except Exception as e:
|
|
454
|
+
log_error(f"Exception during bulk session upsert: {e}")
|
|
455
|
+
return []
|
|
456
|
+
|
|
457
|
+
def _matches_session_key(self, existing_session: Dict[str, Any], session: Session) -> bool:
|
|
458
|
+
"""Check if existing session matches the key for the session type."""
|
|
459
|
+
if isinstance(session, AgentSession):
|
|
460
|
+
return existing_session.get("agent_id") == session.agent_id
|
|
461
|
+
elif isinstance(session, TeamSession):
|
|
462
|
+
return existing_session.get("team_id") == session.team_id
|
|
463
|
+
elif isinstance(session, WorkflowSession):
|
|
464
|
+
return existing_session.get("workflow_id") == session.workflow_id
|
|
465
|
+
return False
|
|
466
|
+
|
|
467
|
+
# -- Memory methods --
|
|
468
|
+
def delete_user_memory(self, memory_id: str, user_id: Optional[str] = None) -> None:
|
|
469
|
+
"""Delete a user memory from the GCS JSON file.
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
memory_id (str): The ID of the memory to delete.
|
|
473
|
+
user_id (Optional[str]): The ID of the user. If provided, verifies ownership before deletion.
|
|
474
|
+
"""
|
|
475
|
+
try:
|
|
476
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
477
|
+
original_count = len(memories)
|
|
478
|
+
|
|
479
|
+
# Filter out the memory, with optional user_id verification
|
|
480
|
+
memories = [
|
|
481
|
+
m
|
|
482
|
+
for m in memories
|
|
483
|
+
if not (m.get("memory_id") == memory_id and (user_id is None or m.get("user_id") == user_id))
|
|
484
|
+
]
|
|
485
|
+
|
|
486
|
+
if len(memories) < original_count:
|
|
487
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
488
|
+
log_debug(f"Successfully deleted user memory id: {memory_id}")
|
|
489
|
+
|
|
490
|
+
else:
|
|
491
|
+
log_debug(f"No user memory found with id: {memory_id}")
|
|
492
|
+
|
|
493
|
+
except Exception as e:
|
|
494
|
+
log_warning(f"Error deleting user memory: {e}")
|
|
495
|
+
raise e
|
|
496
|
+
|
|
497
|
+
def delete_user_memories(self, memory_ids: List[str], user_id: Optional[str] = None) -> None:
|
|
498
|
+
"""Delete multiple user memories from the GCS JSON file.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
memory_ids (List[str]): The IDs of the memories to delete.
|
|
502
|
+
user_id (Optional[str]): The ID of the user. If provided, verifies ownership before deletion.
|
|
503
|
+
"""
|
|
504
|
+
try:
|
|
505
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
506
|
+
|
|
507
|
+
# Filter out memories, with optional user_id verification
|
|
508
|
+
memories = [
|
|
509
|
+
m
|
|
510
|
+
for m in memories
|
|
511
|
+
if not (m.get("memory_id") in memory_ids and (user_id is None or m.get("user_id") == user_id))
|
|
512
|
+
]
|
|
513
|
+
|
|
514
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
515
|
+
log_debug(f"Successfully deleted user memories with ids: {memory_ids}")
|
|
516
|
+
except Exception as e:
|
|
517
|
+
log_warning(f"Error deleting user memories: {e}")
|
|
518
|
+
raise e
|
|
519
|
+
|
|
520
|
+
def get_all_memory_topics(self) -> List[str]:
|
|
521
|
+
"""Get all memory topics from the GCS JSON file.
|
|
522
|
+
|
|
523
|
+
Returns:
|
|
524
|
+
List[str]: List of unique memory topics.
|
|
525
|
+
"""
|
|
526
|
+
try:
|
|
527
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
528
|
+
topics = set()
|
|
529
|
+
for memory in memories:
|
|
530
|
+
memory_topics = memory.get("topics", [])
|
|
531
|
+
if isinstance(memory_topics, list):
|
|
532
|
+
topics.update(memory_topics)
|
|
533
|
+
return list(topics)
|
|
534
|
+
|
|
535
|
+
except Exception as e:
|
|
536
|
+
log_warning(f"Exception reading from memory file: {e}")
|
|
537
|
+
raise e
|
|
538
|
+
|
|
539
|
+
def get_user_memory(
|
|
540
|
+
self, memory_id: str, deserialize: Optional[bool] = True, user_id: Optional[str] = None
|
|
541
|
+
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
|
|
542
|
+
"""Get a memory from the GCS JSON file.
|
|
543
|
+
|
|
544
|
+
Args:
|
|
545
|
+
memory_id (str): The ID of the memory to retrieve.
|
|
546
|
+
deserialize (Optional[bool]): Whether to deserialize to UserMemory object. Defaults to True.
|
|
547
|
+
user_id (Optional[str]): The ID of the user. If provided, verifies ownership before returning.
|
|
548
|
+
|
|
549
|
+
Returns:
|
|
550
|
+
Optional[Union[UserMemory, Dict[str, Any]]]: The memory if found and ownership matches, None otherwise.
|
|
551
|
+
"""
|
|
552
|
+
try:
|
|
553
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
554
|
+
|
|
555
|
+
for memory_data in memories:
|
|
556
|
+
if memory_data.get("memory_id") == memory_id:
|
|
557
|
+
# Verify user ownership if user_id is provided
|
|
558
|
+
if user_id is not None and memory_data.get("user_id") != user_id:
|
|
559
|
+
continue
|
|
560
|
+
|
|
561
|
+
if not deserialize:
|
|
562
|
+
return memory_data
|
|
563
|
+
|
|
564
|
+
return UserMemory.from_dict(memory_data)
|
|
565
|
+
|
|
566
|
+
return None
|
|
567
|
+
except Exception as e:
|
|
568
|
+
log_warning(f"Exception reading from memory file: {e}")
|
|
569
|
+
raise e
|
|
570
|
+
|
|
571
|
+
def get_user_memories(
|
|
572
|
+
self,
|
|
573
|
+
user_id: Optional[str] = None,
|
|
574
|
+
agent_id: Optional[str] = None,
|
|
575
|
+
team_id: Optional[str] = None,
|
|
576
|
+
topics: Optional[List[str]] = None,
|
|
577
|
+
search_content: Optional[str] = None,
|
|
578
|
+
limit: Optional[int] = None,
|
|
579
|
+
page: Optional[int] = None,
|
|
580
|
+
sort_by: Optional[str] = None,
|
|
581
|
+
sort_order: Optional[str] = None,
|
|
582
|
+
deserialize: Optional[bool] = True,
|
|
583
|
+
) -> Union[List[UserMemory], Tuple[List[Dict[str, Any]], int]]:
|
|
584
|
+
"""Get all memories from the GCS JSON file with filtering and pagination."""
|
|
585
|
+
try:
|
|
586
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
587
|
+
|
|
588
|
+
# Apply filters
|
|
589
|
+
filtered_memories = []
|
|
590
|
+
for memory_data in memories:
|
|
591
|
+
if user_id is not None and memory_data.get("user_id") != user_id:
|
|
592
|
+
continue
|
|
593
|
+
if agent_id is not None and memory_data.get("agent_id") != agent_id:
|
|
594
|
+
continue
|
|
595
|
+
if team_id is not None and memory_data.get("team_id") != team_id:
|
|
596
|
+
continue
|
|
597
|
+
if topics is not None:
|
|
598
|
+
memory_topics = memory_data.get("topics", [])
|
|
599
|
+
if not any(topic in memory_topics for topic in topics):
|
|
600
|
+
continue
|
|
601
|
+
if search_content is not None:
|
|
602
|
+
memory_content = str(memory_data.get("memory", ""))
|
|
603
|
+
if search_content.lower() not in memory_content.lower():
|
|
604
|
+
continue
|
|
605
|
+
|
|
606
|
+
filtered_memories.append(memory_data)
|
|
607
|
+
|
|
608
|
+
total_count = len(filtered_memories)
|
|
609
|
+
|
|
610
|
+
# Apply sorting
|
|
611
|
+
filtered_memories = apply_sorting(filtered_memories, sort_by, sort_order)
|
|
612
|
+
|
|
613
|
+
# Apply pagination
|
|
614
|
+
if limit is not None:
|
|
615
|
+
start_idx = 0
|
|
616
|
+
if page is not None:
|
|
617
|
+
start_idx = (page - 1) * limit
|
|
618
|
+
filtered_memories = filtered_memories[start_idx : start_idx + limit]
|
|
619
|
+
|
|
620
|
+
if not deserialize:
|
|
621
|
+
return filtered_memories, total_count
|
|
622
|
+
|
|
623
|
+
return [UserMemory.from_dict(memory) for memory in filtered_memories]
|
|
624
|
+
|
|
625
|
+
except Exception as e:
|
|
626
|
+
log_warning(f"Exception reading from memory file: {e}")
|
|
627
|
+
raise e
|
|
628
|
+
|
|
629
|
+
def get_user_memory_stats(
|
|
630
|
+
self, limit: Optional[int] = None, page: Optional[int] = None
|
|
631
|
+
) -> Tuple[List[Dict[str, Any]], int]:
|
|
632
|
+
"""Get user memory statistics.
|
|
633
|
+
|
|
634
|
+
Args:
|
|
635
|
+
limit (Optional[int]): Maximum number of results to return.
|
|
636
|
+
page (Optional[int]): Page number for pagination.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
Tuple[List[Dict[str, Any]], int]: List of user memory statistics and total count.
|
|
640
|
+
"""
|
|
641
|
+
try:
|
|
642
|
+
memories = self._read_json_file(self.memory_table_name)
|
|
643
|
+
user_stats = {}
|
|
644
|
+
|
|
645
|
+
for memory in memories:
|
|
646
|
+
memory_user_id = memory.get("user_id")
|
|
647
|
+
|
|
648
|
+
if memory_user_id:
|
|
649
|
+
if memory_user_id not in user_stats:
|
|
650
|
+
user_stats[memory_user_id] = {
|
|
651
|
+
"user_id": memory_user_id,
|
|
652
|
+
"total_memories": 0,
|
|
653
|
+
"last_memory_updated_at": 0,
|
|
654
|
+
}
|
|
655
|
+
user_stats[memory_user_id]["total_memories"] += 1
|
|
656
|
+
updated_at = memory.get("updated_at", 0)
|
|
657
|
+
if updated_at > user_stats[memory_user_id]["last_memory_updated_at"]:
|
|
658
|
+
user_stats[memory_user_id]["last_memory_updated_at"] = updated_at
|
|
659
|
+
|
|
660
|
+
stats_list = list(user_stats.values())
|
|
661
|
+
stats_list.sort(key=lambda x: x["last_memory_updated_at"], reverse=True)
|
|
662
|
+
|
|
663
|
+
total_count = len(stats_list)
|
|
664
|
+
|
|
665
|
+
# Apply pagination
|
|
666
|
+
if limit is not None:
|
|
667
|
+
start_idx = 0
|
|
668
|
+
if page is not None:
|
|
669
|
+
start_idx = (page - 1) * limit
|
|
670
|
+
stats_list = stats_list[start_idx : start_idx + limit]
|
|
671
|
+
|
|
672
|
+
return stats_list, total_count
|
|
673
|
+
|
|
674
|
+
except Exception as e:
|
|
675
|
+
log_warning(f"Exception getting user memory stats: {e}")
|
|
676
|
+
raise e
|
|
677
|
+
|
|
678
|
+
def upsert_user_memory(
|
|
679
|
+
self, memory: UserMemory, deserialize: Optional[bool] = True
|
|
680
|
+
) -> Optional[Union[UserMemory, Dict[str, Any]]]:
|
|
681
|
+
"""Upsert a user memory in the GCS JSON file."""
|
|
682
|
+
try:
|
|
683
|
+
memories = self._read_json_file(self.memory_table_name, create_table_if_not_found=True)
|
|
684
|
+
|
|
685
|
+
if memory.memory_id is None:
|
|
686
|
+
memory.memory_id = str(uuid4())
|
|
687
|
+
|
|
688
|
+
memory_dict = memory.to_dict() if hasattr(memory, "to_dict") else memory.__dict__
|
|
689
|
+
memory_dict["updated_at"] = int(time.time())
|
|
690
|
+
|
|
691
|
+
# Find existing memory to update
|
|
692
|
+
memory_updated = False
|
|
693
|
+
for i, existing_memory in enumerate(memories):
|
|
694
|
+
if existing_memory.get("memory_id") == memory.memory_id:
|
|
695
|
+
memories[i] = memory_dict
|
|
696
|
+
memory_updated = True
|
|
697
|
+
break
|
|
698
|
+
|
|
699
|
+
if not memory_updated:
|
|
700
|
+
memories.append(memory_dict)
|
|
701
|
+
|
|
702
|
+
self._write_json_file(self.memory_table_name, memories)
|
|
703
|
+
|
|
704
|
+
if not deserialize:
|
|
705
|
+
return memory_dict
|
|
706
|
+
return UserMemory.from_dict(memory_dict)
|
|
707
|
+
|
|
708
|
+
except Exception as e:
|
|
709
|
+
log_error(f"Exception upserting user memory: {e}")
|
|
710
|
+
raise e
|
|
711
|
+
|
|
712
|
+
def upsert_memories(
|
|
713
|
+
self, memories: List[UserMemory], deserialize: Optional[bool] = True, preserve_updated_at: bool = False
|
|
714
|
+
) -> List[Union[UserMemory, Dict[str, Any]]]:
|
|
715
|
+
"""
|
|
716
|
+
Bulk upsert multiple user memories for improved performance on large datasets.
|
|
717
|
+
|
|
718
|
+
Args:
|
|
719
|
+
memories (List[UserMemory]): List of memories to upsert.
|
|
720
|
+
deserialize (Optional[bool]): Whether to deserialize the memories. Defaults to True.
|
|
721
|
+
|
|
722
|
+
Returns:
|
|
723
|
+
List[Union[UserMemory, Dict[str, Any]]]: List of upserted memories.
|
|
724
|
+
|
|
725
|
+
Raises:
|
|
726
|
+
Exception: If an error occurs during bulk upsert.
|
|
727
|
+
"""
|
|
728
|
+
if not memories:
|
|
729
|
+
return []
|
|
730
|
+
|
|
731
|
+
try:
|
|
732
|
+
log_info(
|
|
733
|
+
f"GcsJsonDb doesn't support efficient bulk operations, falling back to individual upserts for {len(memories)} memories"
|
|
734
|
+
)
|
|
735
|
+
# Fall back to individual upserts
|
|
736
|
+
results = []
|
|
737
|
+
for memory in memories:
|
|
738
|
+
if memory is not None:
|
|
739
|
+
result = self.upsert_user_memory(memory, deserialize=deserialize)
|
|
740
|
+
if result is not None:
|
|
741
|
+
results.append(result)
|
|
742
|
+
return results
|
|
743
|
+
|
|
744
|
+
except Exception as e:
|
|
745
|
+
log_error(f"Exception during bulk memory upsert: {e}")
|
|
746
|
+
return []
|
|
747
|
+
|
|
748
|
+
def clear_memories(self) -> None:
|
|
749
|
+
"""Delete all memories from the database.
|
|
750
|
+
|
|
751
|
+
Raises:
|
|
752
|
+
Exception: If an error occurs during deletion.
|
|
753
|
+
"""
|
|
754
|
+
try:
|
|
755
|
+
# Simply write an empty list to the memory JSON file
|
|
756
|
+
self._write_json_file(self.memory_table_name, [])
|
|
757
|
+
|
|
758
|
+
except Exception as e:
|
|
759
|
+
log_warning(f"Exception deleting all memories: {e}")
|
|
760
|
+
raise e
|
|
761
|
+
|
|
762
|
+
# -- Metrics methods --
|
|
763
|
+
def calculate_metrics(self) -> Optional[list[dict]]:
|
|
764
|
+
"""Calculate metrics for all dates without complete metrics."""
|
|
765
|
+
try:
|
|
766
|
+
metrics = self._read_json_file(self.metrics_table_name, create_table_if_not_found=True)
|
|
767
|
+
|
|
768
|
+
starting_date = self._get_metrics_calculation_starting_date(metrics)
|
|
769
|
+
if starting_date is None:
|
|
770
|
+
log_info("No session data found. Won't calculate metrics.")
|
|
771
|
+
return None
|
|
772
|
+
|
|
773
|
+
dates_to_process = get_dates_to_calculate_metrics_for(starting_date)
|
|
774
|
+
if not dates_to_process:
|
|
775
|
+
log_info("Metrics already calculated for all relevant dates.")
|
|
776
|
+
return None
|
|
777
|
+
|
|
778
|
+
start_timestamp = int(datetime.combine(dates_to_process[0], datetime.min.time()).timestamp())
|
|
779
|
+
end_timestamp = int(
|
|
780
|
+
datetime.combine(dates_to_process[-1] + timedelta(days=1), datetime.min.time()).timestamp()
|
|
781
|
+
)
|
|
782
|
+
|
|
783
|
+
sessions = self._get_all_sessions_for_metrics_calculation(start_timestamp, end_timestamp)
|
|
784
|
+
all_sessions_data = fetch_all_sessions_data(
|
|
785
|
+
sessions=sessions, dates_to_process=dates_to_process, start_timestamp=start_timestamp
|
|
786
|
+
)
|
|
787
|
+
if not all_sessions_data:
|
|
788
|
+
log_info("No new session data found. Won't calculate metrics.")
|
|
789
|
+
return None
|
|
790
|
+
|
|
791
|
+
results = []
|
|
792
|
+
|
|
793
|
+
for date_to_process in dates_to_process:
|
|
794
|
+
date_key = date_to_process.isoformat()
|
|
795
|
+
sessions_for_date = all_sessions_data.get(date_key, {})
|
|
796
|
+
|
|
797
|
+
# Skip dates with no sessions
|
|
798
|
+
if not any(len(sessions) > 0 for sessions in sessions_for_date.values()):
|
|
799
|
+
continue
|
|
800
|
+
|
|
801
|
+
metrics_record = calculate_date_metrics(date_to_process, sessions_for_date)
|
|
802
|
+
|
|
803
|
+
# Upsert metrics record
|
|
804
|
+
existing_record_idx = None
|
|
805
|
+
for i, existing_metric in enumerate(metrics):
|
|
806
|
+
if (
|
|
807
|
+
existing_metric.get("date") == str(date_to_process)
|
|
808
|
+
and existing_metric.get("aggregation_period") == "daily"
|
|
809
|
+
):
|
|
810
|
+
existing_record_idx = i
|
|
811
|
+
break
|
|
812
|
+
|
|
813
|
+
if existing_record_idx is not None:
|
|
814
|
+
metrics[existing_record_idx] = metrics_record
|
|
815
|
+
else:
|
|
816
|
+
metrics.append(metrics_record)
|
|
817
|
+
|
|
818
|
+
results.append(metrics_record)
|
|
819
|
+
|
|
820
|
+
if results:
|
|
821
|
+
self._write_json_file(self.metrics_table_name, metrics)
|
|
822
|
+
|
|
823
|
+
return results
|
|
824
|
+
|
|
825
|
+
except Exception as e:
|
|
826
|
+
log_warning(f"Exception refreshing metrics: {e}")
|
|
827
|
+
raise e
|
|
828
|
+
|
|
829
|
+
def _get_metrics_calculation_starting_date(self, metrics: List[Dict[str, Any]]) -> Optional[date]:
|
|
830
|
+
"""Get the first date for which metrics calculation is needed."""
|
|
831
|
+
if metrics:
|
|
832
|
+
# Sort by date in descending order
|
|
833
|
+
sorted_metrics = sorted(metrics, key=lambda x: x.get("date", ""), reverse=True)
|
|
834
|
+
latest_metric = sorted_metrics[0]
|
|
835
|
+
|
|
836
|
+
if latest_metric.get("completed", False):
|
|
837
|
+
latest_date = datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
|
|
838
|
+
return latest_date + timedelta(days=1)
|
|
839
|
+
else:
|
|
840
|
+
return datetime.strptime(latest_metric["date"], "%Y-%m-%d").date()
|
|
841
|
+
|
|
842
|
+
# No metrics records. Return the date of the first recorded session.
|
|
843
|
+
# We need to get sessions of all types, so we'll read directly from the file
|
|
844
|
+
all_sessions = self._read_json_file(self.session_table_name)
|
|
845
|
+
if all_sessions:
|
|
846
|
+
# Sort by created_at
|
|
847
|
+
all_sessions.sort(key=lambda x: x.get("created_at", 0))
|
|
848
|
+
first_session_date = all_sessions[0]["created_at"]
|
|
849
|
+
return datetime.fromtimestamp(first_session_date, tz=timezone.utc).date()
|
|
850
|
+
|
|
851
|
+
return None
|
|
852
|
+
|
|
853
|
+
def _get_all_sessions_for_metrics_calculation(
|
|
854
|
+
self, start_timestamp: Optional[int] = None, end_timestamp: Optional[int] = None
|
|
855
|
+
) -> List[Dict[str, Any]]:
|
|
856
|
+
"""Get all sessions for metrics calculation."""
|
|
857
|
+
try:
|
|
858
|
+
sessions = self._read_json_file(self.session_table_name)
|
|
859
|
+
|
|
860
|
+
filtered_sessions = []
|
|
861
|
+
for session in sessions:
|
|
862
|
+
created_at = session.get("created_at", 0)
|
|
863
|
+
if start_timestamp is not None and created_at < start_timestamp:
|
|
864
|
+
continue
|
|
865
|
+
if end_timestamp is not None and created_at >= end_timestamp:
|
|
866
|
+
continue
|
|
867
|
+
|
|
868
|
+
# Only include necessary fields for metrics
|
|
869
|
+
filtered_session = {
|
|
870
|
+
"user_id": session.get("user_id"),
|
|
871
|
+
"session_data": session.get("session_data"),
|
|
872
|
+
"runs": session.get("runs"),
|
|
873
|
+
"created_at": session.get("created_at"),
|
|
874
|
+
"session_type": session.get("session_type"),
|
|
875
|
+
}
|
|
876
|
+
filtered_sessions.append(filtered_session)
|
|
877
|
+
|
|
878
|
+
return filtered_sessions
|
|
879
|
+
|
|
880
|
+
except Exception as e:
|
|
881
|
+
log_warning(f"Exception reading sessions for metrics: {e}")
|
|
882
|
+
raise e
|
|
883
|
+
|
|
884
|
+
def get_metrics(
|
|
885
|
+
self,
|
|
886
|
+
starting_date: Optional[date] = None,
|
|
887
|
+
ending_date: Optional[date] = None,
|
|
888
|
+
) -> Tuple[List[dict], Optional[int]]:
|
|
889
|
+
"""Get all metrics matching the given date range."""
|
|
890
|
+
try:
|
|
891
|
+
metrics = self._read_json_file(self.metrics_table_name)
|
|
892
|
+
|
|
893
|
+
filtered_metrics = []
|
|
894
|
+
latest_updated_at = None
|
|
895
|
+
|
|
896
|
+
for metric in metrics:
|
|
897
|
+
metric_date = datetime.strptime(metric.get("date", ""), "%Y-%m-%d").date()
|
|
898
|
+
|
|
899
|
+
if starting_date and metric_date < starting_date:
|
|
900
|
+
continue
|
|
901
|
+
if ending_date and metric_date > ending_date:
|
|
902
|
+
continue
|
|
903
|
+
|
|
904
|
+
filtered_metrics.append(metric)
|
|
905
|
+
|
|
906
|
+
updated_at = metric.get("updated_at")
|
|
907
|
+
if updated_at and (latest_updated_at is None or updated_at > latest_updated_at):
|
|
908
|
+
latest_updated_at = updated_at
|
|
909
|
+
|
|
910
|
+
return filtered_metrics, latest_updated_at
|
|
911
|
+
|
|
912
|
+
except Exception as e:
|
|
913
|
+
log_warning(f"Exception getting metrics: {e}")
|
|
914
|
+
raise e
|
|
915
|
+
|
|
916
|
+
# -- Knowledge methods --
|
|
917
|
+
def delete_knowledge_content(self, id: str):
|
|
918
|
+
"""Delete knowledge content by ID."""
|
|
919
|
+
try:
|
|
920
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
921
|
+
knowledge_items = [item for item in knowledge_items if item.get("id") != id]
|
|
922
|
+
self._write_json_file(self.knowledge_table_name, knowledge_items)
|
|
923
|
+
except Exception as e:
|
|
924
|
+
log_warning(f"Error deleting knowledge content: {e}")
|
|
925
|
+
raise e
|
|
926
|
+
|
|
927
|
+
def get_knowledge_content(self, id: str) -> Optional[KnowledgeRow]:
|
|
928
|
+
"""Get knowledge content by ID."""
|
|
929
|
+
try:
|
|
930
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
931
|
+
|
|
932
|
+
for item in knowledge_items:
|
|
933
|
+
if item.get("id") == id:
|
|
934
|
+
return KnowledgeRow.model_validate(item)
|
|
935
|
+
|
|
936
|
+
return None
|
|
937
|
+
except Exception as e:
|
|
938
|
+
log_warning(f"Error getting knowledge content: {e}")
|
|
939
|
+
raise e
|
|
940
|
+
|
|
941
|
+
def get_knowledge_contents(
|
|
942
|
+
self,
|
|
943
|
+
limit: Optional[int] = None,
|
|
944
|
+
page: Optional[int] = None,
|
|
945
|
+
sort_by: Optional[str] = None,
|
|
946
|
+
sort_order: Optional[str] = None,
|
|
947
|
+
) -> Tuple[List[KnowledgeRow], int]:
|
|
948
|
+
"""Get all knowledge contents from the GCS JSON file."""
|
|
949
|
+
try:
|
|
950
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name)
|
|
951
|
+
|
|
952
|
+
total_count = len(knowledge_items)
|
|
953
|
+
|
|
954
|
+
# Apply sorting
|
|
955
|
+
knowledge_items = apply_sorting(knowledge_items, sort_by, sort_order)
|
|
956
|
+
|
|
957
|
+
# Apply pagination
|
|
958
|
+
if limit is not None:
|
|
959
|
+
start_idx = 0
|
|
960
|
+
if page is not None:
|
|
961
|
+
start_idx = (page - 1) * limit
|
|
962
|
+
knowledge_items = knowledge_items[start_idx : start_idx + limit]
|
|
963
|
+
|
|
964
|
+
return [KnowledgeRow.model_validate(item) for item in knowledge_items], total_count
|
|
965
|
+
|
|
966
|
+
except Exception as e:
|
|
967
|
+
log_warning(f"Error getting knowledge contents: {e}")
|
|
968
|
+
raise e
|
|
969
|
+
|
|
970
|
+
def upsert_knowledge_content(self, knowledge_row: KnowledgeRow):
|
|
971
|
+
"""Upsert knowledge content in the GCS JSON file."""
|
|
972
|
+
try:
|
|
973
|
+
knowledge_items = self._read_json_file(self.knowledge_table_name, create_table_if_not_found=True)
|
|
974
|
+
knowledge_dict = knowledge_row.model_dump()
|
|
975
|
+
|
|
976
|
+
# Find existing item to update
|
|
977
|
+
item_updated = False
|
|
978
|
+
for i, existing_item in enumerate(knowledge_items):
|
|
979
|
+
if existing_item.get("id") == knowledge_row.id:
|
|
980
|
+
knowledge_items[i] = knowledge_dict
|
|
981
|
+
item_updated = True
|
|
982
|
+
break
|
|
983
|
+
|
|
984
|
+
if not item_updated:
|
|
985
|
+
knowledge_items.append(knowledge_dict)
|
|
986
|
+
|
|
987
|
+
self._write_json_file(self.knowledge_table_name, knowledge_items)
|
|
988
|
+
return knowledge_row
|
|
989
|
+
|
|
990
|
+
except Exception as e:
|
|
991
|
+
log_warning(f"Error upserting knowledge row: {e}")
|
|
992
|
+
raise e
|
|
993
|
+
|
|
994
|
+
# -- Eval methods --
|
|
995
|
+
def create_eval_run(self, eval_run: EvalRunRecord) -> Optional[EvalRunRecord]:
|
|
996
|
+
"""Create an EvalRunRecord in the GCS JSON file."""
|
|
997
|
+
try:
|
|
998
|
+
eval_runs = self._read_json_file(self.eval_table_name, create_table_if_not_found=True)
|
|
999
|
+
|
|
1000
|
+
current_time = int(time.time())
|
|
1001
|
+
eval_dict = eval_run.model_dump()
|
|
1002
|
+
eval_dict["created_at"] = current_time
|
|
1003
|
+
eval_dict["updated_at"] = current_time
|
|
1004
|
+
|
|
1005
|
+
eval_runs.append(eval_dict)
|
|
1006
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1007
|
+
|
|
1008
|
+
return eval_run
|
|
1009
|
+
except Exception as e:
|
|
1010
|
+
log_warning(f"Error creating eval run: {e}")
|
|
1011
|
+
raise e
|
|
1012
|
+
|
|
1013
|
+
def delete_eval_run(self, eval_run_id: str) -> None:
|
|
1014
|
+
"""Delete an eval run from the GCS JSON file."""
|
|
1015
|
+
try:
|
|
1016
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1017
|
+
original_count = len(eval_runs)
|
|
1018
|
+
eval_runs = [run for run in eval_runs if run.get("run_id") != eval_run_id]
|
|
1019
|
+
|
|
1020
|
+
if len(eval_runs) < original_count:
|
|
1021
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1022
|
+
log_debug(f"Deleted eval run with ID: {eval_run_id}")
|
|
1023
|
+
else:
|
|
1024
|
+
log_warning(f"No eval run found with ID: {eval_run_id}")
|
|
1025
|
+
except Exception as e:
|
|
1026
|
+
log_warning(f"Error deleting eval run {eval_run_id}: {e}")
|
|
1027
|
+
raise e
|
|
1028
|
+
|
|
1029
|
+
def delete_eval_runs(self, eval_run_ids: List[str]) -> None:
|
|
1030
|
+
"""Delete multiple eval runs from the GCS JSON file."""
|
|
1031
|
+
try:
|
|
1032
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1033
|
+
original_count = len(eval_runs)
|
|
1034
|
+
eval_runs = [run for run in eval_runs if run.get("run_id") not in eval_run_ids]
|
|
1035
|
+
|
|
1036
|
+
deleted_count = original_count - len(eval_runs)
|
|
1037
|
+
if deleted_count > 0:
|
|
1038
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1039
|
+
log_debug(f"Deleted {deleted_count} eval runs")
|
|
1040
|
+
else:
|
|
1041
|
+
log_warning(f"No eval runs found with IDs: {eval_run_ids}")
|
|
1042
|
+
except Exception as e:
|
|
1043
|
+
log_warning(f"Error deleting eval runs {eval_run_ids}: {e}")
|
|
1044
|
+
raise e
|
|
1045
|
+
|
|
1046
|
+
def get_eval_run(
|
|
1047
|
+
self, eval_run_id: str, deserialize: Optional[bool] = True
|
|
1048
|
+
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
|
|
1049
|
+
"""Get an eval run from the GCS JSON file."""
|
|
1050
|
+
try:
|
|
1051
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1052
|
+
|
|
1053
|
+
for run_data in eval_runs:
|
|
1054
|
+
if run_data.get("run_id") == eval_run_id:
|
|
1055
|
+
if not deserialize:
|
|
1056
|
+
return run_data
|
|
1057
|
+
return EvalRunRecord.model_validate(run_data)
|
|
1058
|
+
|
|
1059
|
+
return None
|
|
1060
|
+
except Exception as e:
|
|
1061
|
+
log_warning(f"Exception getting eval run {eval_run_id}: {e}")
|
|
1062
|
+
raise e
|
|
1063
|
+
|
|
1064
|
+
def get_eval_runs(
|
|
1065
|
+
self,
|
|
1066
|
+
limit: Optional[int] = None,
|
|
1067
|
+
page: Optional[int] = None,
|
|
1068
|
+
sort_by: Optional[str] = None,
|
|
1069
|
+
sort_order: Optional[str] = None,
|
|
1070
|
+
agent_id: Optional[str] = None,
|
|
1071
|
+
team_id: Optional[str] = None,
|
|
1072
|
+
workflow_id: Optional[str] = None,
|
|
1073
|
+
model_id: Optional[str] = None,
|
|
1074
|
+
filter_type: Optional[EvalFilterType] = None,
|
|
1075
|
+
eval_type: Optional[List[EvalType]] = None,
|
|
1076
|
+
deserialize: Optional[bool] = True,
|
|
1077
|
+
) -> Union[List[EvalRunRecord], Tuple[List[Dict[str, Any]], int]]:
|
|
1078
|
+
"""Get all eval runs from the GCS JSON file with filtering and pagination."""
|
|
1079
|
+
try:
|
|
1080
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1081
|
+
|
|
1082
|
+
# Apply filters
|
|
1083
|
+
filtered_runs = []
|
|
1084
|
+
for run_data in eval_runs:
|
|
1085
|
+
if agent_id is not None and run_data.get("agent_id") != agent_id:
|
|
1086
|
+
continue
|
|
1087
|
+
if team_id is not None and run_data.get("team_id") != team_id:
|
|
1088
|
+
continue
|
|
1089
|
+
if workflow_id is not None and run_data.get("workflow_id") != workflow_id:
|
|
1090
|
+
continue
|
|
1091
|
+
if model_id is not None and run_data.get("model_id") != model_id:
|
|
1092
|
+
continue
|
|
1093
|
+
if eval_type is not None and len(eval_type) > 0:
|
|
1094
|
+
if run_data.get("eval_type") not in eval_type:
|
|
1095
|
+
continue
|
|
1096
|
+
if filter_type is not None:
|
|
1097
|
+
if filter_type == EvalFilterType.AGENT and run_data.get("agent_id") is None:
|
|
1098
|
+
continue
|
|
1099
|
+
elif filter_type == EvalFilterType.TEAM and run_data.get("team_id") is None:
|
|
1100
|
+
continue
|
|
1101
|
+
elif filter_type == EvalFilterType.WORKFLOW and run_data.get("workflow_id") is None:
|
|
1102
|
+
continue
|
|
1103
|
+
|
|
1104
|
+
filtered_runs.append(run_data)
|
|
1105
|
+
|
|
1106
|
+
total_count = len(filtered_runs)
|
|
1107
|
+
|
|
1108
|
+
# Apply sorting (default by created_at desc)
|
|
1109
|
+
if sort_by is None:
|
|
1110
|
+
filtered_runs.sort(key=lambda x: x.get("created_at", 0), reverse=True)
|
|
1111
|
+
else:
|
|
1112
|
+
filtered_runs = apply_sorting(filtered_runs, sort_by, sort_order)
|
|
1113
|
+
|
|
1114
|
+
# Apply pagination
|
|
1115
|
+
if limit is not None:
|
|
1116
|
+
start_idx = 0
|
|
1117
|
+
if page is not None:
|
|
1118
|
+
start_idx = (page - 1) * limit
|
|
1119
|
+
filtered_runs = filtered_runs[start_idx : start_idx + limit]
|
|
1120
|
+
|
|
1121
|
+
if not deserialize:
|
|
1122
|
+
return filtered_runs, total_count
|
|
1123
|
+
|
|
1124
|
+
return [EvalRunRecord.model_validate(run) for run in filtered_runs]
|
|
1125
|
+
|
|
1126
|
+
except Exception as e:
|
|
1127
|
+
log_warning(f"Exception getting eval runs: {e}")
|
|
1128
|
+
raise e
|
|
1129
|
+
|
|
1130
|
+
def rename_eval_run(
|
|
1131
|
+
self, eval_run_id: str, name: str, deserialize: Optional[bool] = True
|
|
1132
|
+
) -> Optional[Union[EvalRunRecord, Dict[str, Any]]]:
|
|
1133
|
+
"""Rename an eval run in the GCS JSON file."""
|
|
1134
|
+
try:
|
|
1135
|
+
eval_runs = self._read_json_file(self.eval_table_name)
|
|
1136
|
+
|
|
1137
|
+
for i, run_data in enumerate(eval_runs):
|
|
1138
|
+
if run_data.get("run_id") == eval_run_id:
|
|
1139
|
+
run_data["name"] = name
|
|
1140
|
+
run_data["updated_at"] = int(time.time())
|
|
1141
|
+
eval_runs[i] = run_data
|
|
1142
|
+
self._write_json_file(self.eval_table_name, eval_runs)
|
|
1143
|
+
|
|
1144
|
+
if not deserialize:
|
|
1145
|
+
return run_data
|
|
1146
|
+
return EvalRunRecord.model_validate(run_data)
|
|
1147
|
+
|
|
1148
|
+
return None
|
|
1149
|
+
except Exception as e:
|
|
1150
|
+
log_warning(f"Error renaming eval run {eval_run_id}: {e}")
|
|
1151
|
+
raise e
|
|
1152
|
+
|
|
1153
|
+
# -- Cultural Knowledge methods --
|
|
1154
|
+
def clear_cultural_knowledge(self) -> None:
|
|
1155
|
+
"""Delete all cultural knowledge from the database.
|
|
1156
|
+
|
|
1157
|
+
Raises:
|
|
1158
|
+
Exception: If an error occurs during deletion.
|
|
1159
|
+
"""
|
|
1160
|
+
try:
|
|
1161
|
+
self._write_json_file(self.culture_table_name, [])
|
|
1162
|
+
except Exception as e:
|
|
1163
|
+
log_warning(f"Exception deleting all cultural knowledge: {e}")
|
|
1164
|
+
raise e
|
|
1165
|
+
|
|
1166
|
+
def delete_cultural_knowledge(self, id: str) -> None:
|
|
1167
|
+
"""Delete cultural knowledge by ID.
|
|
1168
|
+
|
|
1169
|
+
Args:
|
|
1170
|
+
id (str): The ID of the cultural knowledge to delete.
|
|
1171
|
+
|
|
1172
|
+
Raises:
|
|
1173
|
+
Exception: If an error occurs during deletion.
|
|
1174
|
+
"""
|
|
1175
|
+
try:
|
|
1176
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1177
|
+
cultural_knowledge = [item for item in cultural_knowledge if item.get("id") != id]
|
|
1178
|
+
self._write_json_file(self.culture_table_name, cultural_knowledge)
|
|
1179
|
+
log_debug(f"Deleted cultural knowledge with ID: {id}")
|
|
1180
|
+
except Exception as e:
|
|
1181
|
+
log_warning(f"Error deleting cultural knowledge: {e}")
|
|
1182
|
+
raise e
|
|
1183
|
+
|
|
1184
|
+
def get_cultural_knowledge(
|
|
1185
|
+
self, id: str, deserialize: Optional[bool] = True
|
|
1186
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1187
|
+
"""Get cultural knowledge by ID.
|
|
1188
|
+
|
|
1189
|
+
Args:
|
|
1190
|
+
id (str): The ID of the cultural knowledge to retrieve.
|
|
1191
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge object. Defaults to True.
|
|
1192
|
+
|
|
1193
|
+
Returns:
|
|
1194
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The cultural knowledge if found, None otherwise.
|
|
1195
|
+
|
|
1196
|
+
Raises:
|
|
1197
|
+
Exception: If an error occurs during retrieval.
|
|
1198
|
+
"""
|
|
1199
|
+
try:
|
|
1200
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1201
|
+
|
|
1202
|
+
for item in cultural_knowledge:
|
|
1203
|
+
if item.get("id") == id:
|
|
1204
|
+
if not deserialize:
|
|
1205
|
+
return item
|
|
1206
|
+
return deserialize_cultural_knowledge_from_db(item)
|
|
1207
|
+
|
|
1208
|
+
return None
|
|
1209
|
+
except Exception as e:
|
|
1210
|
+
log_warning(f"Error getting cultural knowledge: {e}")
|
|
1211
|
+
raise e
|
|
1212
|
+
|
|
1213
|
+
def get_all_cultural_knowledge(
|
|
1214
|
+
self,
|
|
1215
|
+
agent_id: Optional[str] = None,
|
|
1216
|
+
team_id: Optional[str] = None,
|
|
1217
|
+
name: Optional[str] = None,
|
|
1218
|
+
limit: Optional[int] = None,
|
|
1219
|
+
page: Optional[int] = None,
|
|
1220
|
+
sort_by: Optional[str] = None,
|
|
1221
|
+
sort_order: Optional[str] = None,
|
|
1222
|
+
deserialize: Optional[bool] = True,
|
|
1223
|
+
) -> Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1224
|
+
"""Get all cultural knowledge with filtering and pagination.
|
|
1225
|
+
|
|
1226
|
+
Args:
|
|
1227
|
+
agent_id (Optional[str]): Filter by agent ID.
|
|
1228
|
+
team_id (Optional[str]): Filter by team ID.
|
|
1229
|
+
name (Optional[str]): Filter by name (case-insensitive partial match).
|
|
1230
|
+
limit (Optional[int]): Maximum number of results to return.
|
|
1231
|
+
page (Optional[int]): Page number for pagination.
|
|
1232
|
+
sort_by (Optional[str]): Field to sort by.
|
|
1233
|
+
sort_order (Optional[str]): Sort order ('asc' or 'desc').
|
|
1234
|
+
deserialize (Optional[bool]): Whether to deserialize to CulturalKnowledge objects. Defaults to True.
|
|
1235
|
+
|
|
1236
|
+
Returns:
|
|
1237
|
+
Union[List[CulturalKnowledge], Tuple[List[Dict[str, Any]], int]]:
|
|
1238
|
+
- When deserialize=True: List of CulturalKnowledge objects
|
|
1239
|
+
- When deserialize=False: Tuple with list of dictionaries and total count
|
|
1240
|
+
|
|
1241
|
+
Raises:
|
|
1242
|
+
Exception: If an error occurs during retrieval.
|
|
1243
|
+
"""
|
|
1244
|
+
try:
|
|
1245
|
+
cultural_knowledge = self._read_json_file(self.culture_table_name)
|
|
1246
|
+
|
|
1247
|
+
# Apply filters
|
|
1248
|
+
filtered_items = []
|
|
1249
|
+
for item in cultural_knowledge:
|
|
1250
|
+
if agent_id is not None and item.get("agent_id") != agent_id:
|
|
1251
|
+
continue
|
|
1252
|
+
if team_id is not None and item.get("team_id") != team_id:
|
|
1253
|
+
continue
|
|
1254
|
+
if name is not None and name.lower() not in item.get("name", "").lower():
|
|
1255
|
+
continue
|
|
1256
|
+
|
|
1257
|
+
filtered_items.append(item)
|
|
1258
|
+
|
|
1259
|
+
total_count = len(filtered_items)
|
|
1260
|
+
|
|
1261
|
+
# Apply sorting
|
|
1262
|
+
filtered_items = apply_sorting(filtered_items, sort_by, sort_order)
|
|
1263
|
+
|
|
1264
|
+
# Apply pagination
|
|
1265
|
+
if limit is not None:
|
|
1266
|
+
start_idx = 0
|
|
1267
|
+
if page is not None:
|
|
1268
|
+
start_idx = (page - 1) * limit
|
|
1269
|
+
filtered_items = filtered_items[start_idx : start_idx + limit]
|
|
1270
|
+
|
|
1271
|
+
if not deserialize:
|
|
1272
|
+
return filtered_items, total_count
|
|
1273
|
+
|
|
1274
|
+
return [deserialize_cultural_knowledge_from_db(item) for item in filtered_items]
|
|
1275
|
+
|
|
1276
|
+
except Exception as e:
|
|
1277
|
+
log_warning(f"Error getting all cultural knowledge: {e}")
|
|
1278
|
+
raise e
|
|
1279
|
+
|
|
1280
|
+
def upsert_cultural_knowledge(
|
|
1281
|
+
self, cultural_knowledge: CulturalKnowledge, deserialize: Optional[bool] = True
|
|
1282
|
+
) -> Optional[Union[CulturalKnowledge, Dict[str, Any]]]:
|
|
1283
|
+
"""Upsert cultural knowledge in the GCS JSON file.
|
|
1284
|
+
|
|
1285
|
+
Args:
|
|
1286
|
+
cultural_knowledge (CulturalKnowledge): The cultural knowledge to upsert.
|
|
1287
|
+
deserialize (Optional[bool]): Whether to deserialize the result. Defaults to True.
|
|
1288
|
+
|
|
1289
|
+
Returns:
|
|
1290
|
+
Optional[Union[CulturalKnowledge, Dict[str, Any]]]: The upserted cultural knowledge.
|
|
1291
|
+
|
|
1292
|
+
Raises:
|
|
1293
|
+
Exception: If an error occurs during upsert.
|
|
1294
|
+
"""
|
|
1295
|
+
try:
|
|
1296
|
+
cultural_knowledge_list = self._read_json_file(self.culture_table_name, create_table_if_not_found=True)
|
|
1297
|
+
|
|
1298
|
+
# Serialize content, categories, and notes into a dict for DB storage
|
|
1299
|
+
content_dict = serialize_cultural_knowledge_for_db(cultural_knowledge)
|
|
1300
|
+
|
|
1301
|
+
# Create the item dict with serialized content
|
|
1302
|
+
cultural_knowledge_dict = {
|
|
1303
|
+
"id": cultural_knowledge.id,
|
|
1304
|
+
"name": cultural_knowledge.name,
|
|
1305
|
+
"summary": cultural_knowledge.summary,
|
|
1306
|
+
"content": content_dict if content_dict else None,
|
|
1307
|
+
"metadata": cultural_knowledge.metadata,
|
|
1308
|
+
"input": cultural_knowledge.input,
|
|
1309
|
+
"created_at": cultural_knowledge.created_at,
|
|
1310
|
+
"updated_at": int(time.time()),
|
|
1311
|
+
"agent_id": cultural_knowledge.agent_id,
|
|
1312
|
+
"team_id": cultural_knowledge.team_id,
|
|
1313
|
+
}
|
|
1314
|
+
|
|
1315
|
+
# Find existing item to update
|
|
1316
|
+
item_updated = False
|
|
1317
|
+
for i, existing_item in enumerate(cultural_knowledge_list):
|
|
1318
|
+
if existing_item.get("id") == cultural_knowledge.id:
|
|
1319
|
+
cultural_knowledge_list[i] = cultural_knowledge_dict
|
|
1320
|
+
item_updated = True
|
|
1321
|
+
break
|
|
1322
|
+
|
|
1323
|
+
if not item_updated:
|
|
1324
|
+
cultural_knowledge_list.append(cultural_knowledge_dict)
|
|
1325
|
+
|
|
1326
|
+
self._write_json_file(self.culture_table_name, cultural_knowledge_list)
|
|
1327
|
+
|
|
1328
|
+
if not deserialize:
|
|
1329
|
+
return cultural_knowledge_dict
|
|
1330
|
+
|
|
1331
|
+
return deserialize_cultural_knowledge_from_db(cultural_knowledge_dict)
|
|
1332
|
+
|
|
1333
|
+
except Exception as e:
|
|
1334
|
+
log_warning(f"Error upserting cultural knowledge: {e}")
|
|
1335
|
+
raise e
|