agno 2.1.2__py3-none-any.whl → 2.3.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +5540 -2273
- agno/api/api.py +2 -0
- agno/api/os.py +1 -1
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +247 -0
- agno/culture/__init__.py +3 -0
- agno/culture/manager.py +956 -0
- agno/db/async_postgres/__init__.py +3 -0
- agno/db/base.py +689 -6
- agno/db/dynamo/dynamo.py +933 -37
- agno/db/dynamo/schemas.py +174 -10
- agno/db/dynamo/utils.py +63 -4
- agno/db/firestore/firestore.py +831 -9
- agno/db/firestore/schemas.py +51 -0
- agno/db/firestore/utils.py +102 -4
- agno/db/gcs_json/gcs_json_db.py +660 -12
- agno/db/gcs_json/utils.py +60 -26
- agno/db/in_memory/in_memory_db.py +287 -14
- agno/db/in_memory/utils.py +60 -2
- agno/db/json/json_db.py +590 -14
- agno/db/json/utils.py +60 -26
- agno/db/migrations/manager.py +199 -0
- agno/db/migrations/v1_to_v2.py +43 -13
- agno/db/migrations/versions/__init__.py +0 -0
- agno/db/migrations/versions/v2_3_0.py +938 -0
- agno/db/mongo/__init__.py +15 -1
- agno/db/mongo/async_mongo.py +2760 -0
- agno/db/mongo/mongo.py +879 -11
- agno/db/mongo/schemas.py +42 -0
- agno/db/mongo/utils.py +80 -8
- agno/db/mysql/__init__.py +2 -1
- agno/db/mysql/async_mysql.py +2912 -0
- agno/db/mysql/mysql.py +946 -68
- agno/db/mysql/schemas.py +72 -10
- agno/db/mysql/utils.py +198 -7
- agno/db/postgres/__init__.py +2 -1
- agno/db/postgres/async_postgres.py +2579 -0
- agno/db/postgres/postgres.py +942 -57
- agno/db/postgres/schemas.py +81 -18
- agno/db/postgres/utils.py +164 -2
- agno/db/redis/redis.py +671 -7
- agno/db/redis/schemas.py +50 -0
- agno/db/redis/utils.py +65 -7
- agno/db/schemas/__init__.py +2 -1
- agno/db/schemas/culture.py +120 -0
- agno/db/schemas/evals.py +1 -0
- agno/db/schemas/memory.py +17 -2
- agno/db/singlestore/schemas.py +63 -0
- agno/db/singlestore/singlestore.py +949 -83
- agno/db/singlestore/utils.py +60 -2
- agno/db/sqlite/__init__.py +2 -1
- agno/db/sqlite/async_sqlite.py +2911 -0
- agno/db/sqlite/schemas.py +62 -0
- agno/db/sqlite/sqlite.py +965 -46
- agno/db/sqlite/utils.py +169 -8
- agno/db/surrealdb/__init__.py +3 -0
- agno/db/surrealdb/metrics.py +292 -0
- agno/db/surrealdb/models.py +334 -0
- agno/db/surrealdb/queries.py +71 -0
- agno/db/surrealdb/surrealdb.py +1908 -0
- agno/db/surrealdb/utils.py +147 -0
- agno/db/utils.py +2 -0
- agno/eval/__init__.py +10 -0
- agno/eval/accuracy.py +75 -55
- agno/eval/agent_as_judge.py +861 -0
- agno/eval/base.py +29 -0
- agno/eval/performance.py +16 -7
- agno/eval/reliability.py +28 -16
- agno/eval/utils.py +35 -17
- agno/exceptions.py +27 -2
- agno/filters.py +354 -0
- agno/guardrails/prompt_injection.py +1 -0
- agno/hooks/__init__.py +3 -0
- agno/hooks/decorator.py +164 -0
- agno/integrations/discord/client.py +1 -1
- agno/knowledge/chunking/agentic.py +13 -10
- agno/knowledge/chunking/fixed.py +4 -1
- agno/knowledge/chunking/semantic.py +9 -4
- agno/knowledge/chunking/strategy.py +59 -15
- agno/knowledge/embedder/fastembed.py +1 -1
- agno/knowledge/embedder/nebius.py +1 -1
- agno/knowledge/embedder/ollama.py +8 -0
- agno/knowledge/embedder/openai.py +8 -8
- agno/knowledge/embedder/sentence_transformer.py +6 -2
- agno/knowledge/embedder/vllm.py +262 -0
- agno/knowledge/knowledge.py +1618 -318
- agno/knowledge/reader/base.py +6 -2
- agno/knowledge/reader/csv_reader.py +8 -10
- agno/knowledge/reader/docx_reader.py +5 -6
- agno/knowledge/reader/field_labeled_csv_reader.py +16 -20
- agno/knowledge/reader/json_reader.py +5 -4
- agno/knowledge/reader/markdown_reader.py +8 -8
- agno/knowledge/reader/pdf_reader.py +17 -19
- agno/knowledge/reader/pptx_reader.py +101 -0
- agno/knowledge/reader/reader_factory.py +32 -3
- agno/knowledge/reader/s3_reader.py +3 -3
- agno/knowledge/reader/tavily_reader.py +193 -0
- agno/knowledge/reader/text_reader.py +22 -10
- agno/knowledge/reader/web_search_reader.py +1 -48
- agno/knowledge/reader/website_reader.py +10 -10
- agno/knowledge/reader/wikipedia_reader.py +33 -1
- agno/knowledge/types.py +1 -0
- agno/knowledge/utils.py +72 -7
- agno/media.py +22 -6
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +544 -83
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +66 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/aimlapi/aimlapi.py +17 -0
- agno/models/anthropic/claude.py +515 -40
- agno/models/aws/bedrock.py +102 -21
- agno/models/aws/claude.py +131 -274
- agno/models/azure/ai_foundry.py +41 -19
- agno/models/azure/openai_chat.py +39 -8
- agno/models/base.py +1249 -525
- agno/models/cerebras/cerebras.py +91 -21
- agno/models/cerebras/cerebras_openai.py +21 -2
- agno/models/cohere/chat.py +40 -6
- agno/models/cometapi/cometapi.py +18 -1
- agno/models/dashscope/dashscope.py +2 -3
- agno/models/deepinfra/deepinfra.py +18 -1
- agno/models/deepseek/deepseek.py +69 -3
- agno/models/fireworks/fireworks.py +18 -1
- agno/models/google/gemini.py +877 -80
- agno/models/google/utils.py +22 -0
- agno/models/groq/groq.py +51 -18
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/internlm/internlm.py +18 -1
- agno/models/langdb/langdb.py +13 -1
- agno/models/litellm/chat.py +44 -9
- agno/models/litellm/litellm_openai.py +18 -1
- agno/models/message.py +28 -5
- agno/models/meta/llama.py +47 -14
- agno/models/meta/llama_openai.py +22 -17
- agno/models/mistral/mistral.py +8 -4
- agno/models/nebius/nebius.py +6 -7
- agno/models/nvidia/nvidia.py +20 -3
- agno/models/ollama/chat.py +24 -8
- agno/models/openai/chat.py +104 -29
- agno/models/openai/responses.py +101 -81
- agno/models/openrouter/openrouter.py +60 -3
- agno/models/perplexity/perplexity.py +17 -1
- agno/models/portkey/portkey.py +7 -6
- agno/models/requesty/requesty.py +24 -4
- agno/models/response.py +73 -2
- agno/models/sambanova/sambanova.py +20 -3
- agno/models/siliconflow/siliconflow.py +19 -2
- agno/models/together/together.py +20 -3
- agno/models/utils.py +254 -8
- agno/models/vercel/v0.py +20 -3
- agno/models/vertexai/__init__.py +0 -0
- agno/models/vertexai/claude.py +190 -0
- agno/models/vllm/vllm.py +19 -14
- agno/models/xai/xai.py +19 -2
- agno/os/app.py +549 -152
- agno/os/auth.py +190 -3
- agno/os/config.py +23 -0
- agno/os/interfaces/a2a/router.py +8 -11
- agno/os/interfaces/a2a/utils.py +1 -1
- agno/os/interfaces/agui/router.py +18 -3
- agno/os/interfaces/agui/utils.py +152 -39
- agno/os/interfaces/slack/router.py +55 -37
- agno/os/interfaces/slack/slack.py +9 -1
- agno/os/interfaces/whatsapp/router.py +0 -1
- agno/os/interfaces/whatsapp/security.py +3 -1
- agno/os/mcp.py +110 -52
- agno/os/middleware/__init__.py +2 -0
- agno/os/middleware/jwt.py +676 -112
- agno/os/router.py +40 -1478
- agno/os/routers/agents/__init__.py +3 -0
- agno/os/routers/agents/router.py +599 -0
- agno/os/routers/agents/schema.py +261 -0
- agno/os/routers/evals/evals.py +96 -39
- agno/os/routers/evals/schemas.py +65 -33
- agno/os/routers/evals/utils.py +80 -10
- agno/os/routers/health.py +10 -4
- agno/os/routers/knowledge/knowledge.py +196 -38
- agno/os/routers/knowledge/schemas.py +82 -22
- agno/os/routers/memory/memory.py +279 -52
- agno/os/routers/memory/schemas.py +46 -17
- agno/os/routers/metrics/metrics.py +20 -8
- agno/os/routers/metrics/schemas.py +16 -16
- agno/os/routers/session/session.py +462 -34
- agno/os/routers/teams/__init__.py +3 -0
- agno/os/routers/teams/router.py +512 -0
- agno/os/routers/teams/schema.py +257 -0
- agno/os/routers/traces/__init__.py +3 -0
- agno/os/routers/traces/schemas.py +414 -0
- agno/os/routers/traces/traces.py +499 -0
- agno/os/routers/workflows/__init__.py +3 -0
- agno/os/routers/workflows/router.py +624 -0
- agno/os/routers/workflows/schema.py +75 -0
- agno/os/schema.py +256 -693
- agno/os/scopes.py +469 -0
- agno/os/utils.py +514 -36
- agno/reasoning/anthropic.py +80 -0
- agno/reasoning/gemini.py +73 -0
- agno/reasoning/openai.py +5 -0
- agno/reasoning/vertexai.py +76 -0
- agno/run/__init__.py +6 -0
- agno/run/agent.py +155 -32
- agno/run/base.py +55 -3
- agno/run/requirement.py +181 -0
- agno/run/team.py +125 -38
- agno/run/workflow.py +72 -18
- agno/session/agent.py +102 -89
- agno/session/summary.py +56 -15
- agno/session/team.py +164 -90
- agno/session/workflow.py +405 -40
- agno/table.py +10 -0
- agno/team/team.py +3974 -1903
- agno/tools/dalle.py +2 -4
- agno/tools/eleven_labs.py +23 -25
- agno/tools/exa.py +21 -16
- agno/tools/file.py +153 -23
- agno/tools/file_generation.py +16 -10
- agno/tools/firecrawl.py +15 -7
- agno/tools/function.py +193 -38
- agno/tools/gmail.py +238 -14
- agno/tools/google_drive.py +271 -0
- agno/tools/googlecalendar.py +36 -8
- agno/tools/googlesheets.py +20 -5
- agno/tools/jira.py +20 -0
- agno/tools/mcp/__init__.py +10 -0
- agno/tools/mcp/mcp.py +331 -0
- agno/tools/mcp/multi_mcp.py +347 -0
- agno/tools/mcp/params.py +24 -0
- agno/tools/mcp_toolbox.py +3 -3
- agno/tools/models/nebius.py +5 -5
- agno/tools/models_labs.py +20 -10
- agno/tools/nano_banana.py +151 -0
- agno/tools/notion.py +204 -0
- agno/tools/parallel.py +314 -0
- agno/tools/postgres.py +76 -36
- agno/tools/redshift.py +406 -0
- agno/tools/scrapegraph.py +1 -1
- agno/tools/shopify.py +1519 -0
- agno/tools/slack.py +18 -3
- agno/tools/spotify.py +919 -0
- agno/tools/tavily.py +146 -0
- agno/tools/toolkit.py +25 -0
- agno/tools/workflow.py +8 -1
- agno/tools/yfinance.py +12 -11
- agno/tracing/__init__.py +12 -0
- agno/tracing/exporter.py +157 -0
- agno/tracing/schemas.py +276 -0
- agno/tracing/setup.py +111 -0
- agno/utils/agent.py +938 -0
- agno/utils/cryptography.py +22 -0
- agno/utils/dttm.py +33 -0
- agno/utils/events.py +151 -3
- agno/utils/gemini.py +15 -5
- agno/utils/hooks.py +118 -4
- agno/utils/http.py +113 -2
- agno/utils/knowledge.py +12 -5
- agno/utils/log.py +1 -0
- agno/utils/mcp.py +92 -2
- agno/utils/media.py +187 -1
- agno/utils/merge_dict.py +3 -3
- agno/utils/message.py +60 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +49 -14
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +109 -16
- agno/utils/print_response/team.py +223 -30
- agno/utils/print_response/workflow.py +251 -34
- agno/utils/streamlit.py +1 -1
- agno/utils/team.py +98 -9
- agno/utils/tokens.py +657 -0
- agno/vectordb/base.py +39 -7
- agno/vectordb/cassandra/cassandra.py +21 -5
- agno/vectordb/chroma/chromadb.py +43 -12
- agno/vectordb/clickhouse/clickhousedb.py +21 -5
- agno/vectordb/couchbase/couchbase.py +29 -5
- agno/vectordb/lancedb/lance_db.py +92 -181
- agno/vectordb/langchaindb/langchaindb.py +24 -4
- agno/vectordb/lightrag/lightrag.py +17 -3
- agno/vectordb/llamaindex/llamaindexdb.py +25 -5
- agno/vectordb/milvus/milvus.py +50 -37
- agno/vectordb/mongodb/__init__.py +7 -1
- agno/vectordb/mongodb/mongodb.py +36 -30
- agno/vectordb/pgvector/pgvector.py +201 -77
- agno/vectordb/pineconedb/pineconedb.py +41 -23
- agno/vectordb/qdrant/qdrant.py +67 -54
- agno/vectordb/redis/__init__.py +9 -0
- agno/vectordb/redis/redisdb.py +682 -0
- agno/vectordb/singlestore/singlestore.py +50 -29
- agno/vectordb/surrealdb/surrealdb.py +31 -41
- agno/vectordb/upstashdb/upstashdb.py +34 -6
- agno/vectordb/weaviate/weaviate.py +53 -14
- agno/workflow/__init__.py +2 -0
- agno/workflow/agent.py +299 -0
- agno/workflow/condition.py +120 -18
- agno/workflow/loop.py +77 -10
- agno/workflow/parallel.py +231 -143
- agno/workflow/router.py +118 -17
- agno/workflow/step.py +609 -170
- agno/workflow/steps.py +73 -6
- agno/workflow/types.py +96 -21
- agno/workflow/workflow.py +2039 -262
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/METADATA +201 -66
- agno-2.3.13.dist-info/RECORD +613 -0
- agno/tools/googlesearch.py +0 -98
- agno/tools/mcp.py +0 -679
- agno/tools/memori.py +0 -339
- agno-2.1.2.dist-info/RECORD +0 -543
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/WHEEL +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/licenses/LICENSE +0 -0
- {agno-2.1.2.dist-info → agno-2.3.13.dist-info}/top_level.txt +0 -0
agno/os/utils.py
CHANGED
|
@@ -1,39 +1,225 @@
|
|
|
1
|
-
|
|
1
|
+
import json
|
|
2
|
+
from datetime import datetime, timezone
|
|
3
|
+
from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
|
|
2
4
|
|
|
3
|
-
from fastapi import FastAPI, HTTPException, UploadFile
|
|
5
|
+
from fastapi import FastAPI, HTTPException, Request, UploadFile
|
|
4
6
|
from fastapi.routing import APIRoute, APIRouter
|
|
7
|
+
from pydantic import BaseModel, create_model
|
|
5
8
|
from starlette.middleware.cors import CORSMiddleware
|
|
6
9
|
|
|
7
10
|
from agno.agent.agent import Agent
|
|
8
|
-
from agno.db.base import BaseDb
|
|
11
|
+
from agno.db.base import AsyncBaseDb, BaseDb
|
|
9
12
|
from agno.knowledge.knowledge import Knowledge
|
|
10
13
|
from agno.media import Audio, Image, Video
|
|
11
14
|
from agno.media import File as FileMedia
|
|
15
|
+
from agno.models.message import Message
|
|
12
16
|
from agno.os.config import AgentOSConfig
|
|
17
|
+
from agno.run.agent import RunOutputEvent
|
|
18
|
+
from agno.run.team import TeamRunOutputEvent
|
|
19
|
+
from agno.run.workflow import WorkflowRunOutputEvent
|
|
13
20
|
from agno.team.team import Team
|
|
14
21
|
from agno.tools import Toolkit
|
|
15
22
|
from agno.tools.function import Function
|
|
16
|
-
from agno.utils.log import logger
|
|
23
|
+
from agno.utils.log import log_warning, logger
|
|
17
24
|
from agno.workflow.workflow import Workflow
|
|
18
25
|
|
|
19
26
|
|
|
20
|
-
def
|
|
21
|
-
"""
|
|
27
|
+
async def get_request_kwargs(request: Request, endpoint_func: Callable) -> Dict[str, Any]:
|
|
28
|
+
"""Given a Request and an endpoint function, return a dictionary with all extra form data fields.
|
|
29
|
+
Args:
|
|
30
|
+
request: The FastAPI Request object
|
|
31
|
+
endpoint_func: The function exposing the endpoint that received the request
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
A dictionary of kwargs
|
|
35
|
+
"""
|
|
36
|
+
import inspect
|
|
37
|
+
|
|
38
|
+
form_data = await request.form()
|
|
39
|
+
sig = inspect.signature(endpoint_func)
|
|
40
|
+
known_fields = set(sig.parameters.keys())
|
|
41
|
+
kwargs: Dict[str, Any] = {key: value for key, value in form_data.items() if key not in known_fields}
|
|
42
|
+
|
|
43
|
+
# Handle JSON parameters. They are passed as strings and need to be deserialized.
|
|
44
|
+
if session_state := kwargs.get("session_state"):
|
|
45
|
+
try:
|
|
46
|
+
if isinstance(session_state, str):
|
|
47
|
+
session_state_dict = json.loads(session_state) # type: ignore
|
|
48
|
+
kwargs["session_state"] = session_state_dict
|
|
49
|
+
except json.JSONDecodeError:
|
|
50
|
+
kwargs.pop("session_state")
|
|
51
|
+
log_warning(f"Invalid session_state parameter couldn't be loaded: {session_state}")
|
|
52
|
+
|
|
53
|
+
if dependencies := kwargs.get("dependencies"):
|
|
54
|
+
try:
|
|
55
|
+
if isinstance(dependencies, str):
|
|
56
|
+
dependencies_dict = json.loads(dependencies) # type: ignore
|
|
57
|
+
kwargs["dependencies"] = dependencies_dict
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
kwargs.pop("dependencies")
|
|
60
|
+
log_warning(f"Invalid dependencies parameter couldn't be loaded: {dependencies}")
|
|
61
|
+
|
|
62
|
+
if metadata := kwargs.get("metadata"):
|
|
63
|
+
try:
|
|
64
|
+
if isinstance(metadata, str):
|
|
65
|
+
metadata_dict = json.loads(metadata) # type: ignore
|
|
66
|
+
kwargs["metadata"] = metadata_dict
|
|
67
|
+
except json.JSONDecodeError:
|
|
68
|
+
kwargs.pop("metadata")
|
|
69
|
+
log_warning(f"Invalid metadata parameter couldn't be loaded: {metadata}")
|
|
70
|
+
|
|
71
|
+
if knowledge_filters := kwargs.get("knowledge_filters"):
|
|
72
|
+
try:
|
|
73
|
+
if isinstance(knowledge_filters, str):
|
|
74
|
+
knowledge_filters_dict = json.loads(knowledge_filters) # type: ignore
|
|
75
|
+
|
|
76
|
+
# Try to deserialize FilterExpr objects
|
|
77
|
+
from agno.filters import from_dict
|
|
78
|
+
|
|
79
|
+
# Check if it's a single FilterExpr dict or a list of FilterExpr dicts
|
|
80
|
+
if isinstance(knowledge_filters_dict, dict) and "op" in knowledge_filters_dict:
|
|
81
|
+
# Single FilterExpr - convert to list format
|
|
82
|
+
kwargs["knowledge_filters"] = [from_dict(knowledge_filters_dict)]
|
|
83
|
+
elif isinstance(knowledge_filters_dict, list):
|
|
84
|
+
# List of FilterExprs or mixed content
|
|
85
|
+
deserialized = []
|
|
86
|
+
for item in knowledge_filters_dict:
|
|
87
|
+
if isinstance(item, dict) and "op" in item:
|
|
88
|
+
deserialized.append(from_dict(item))
|
|
89
|
+
else:
|
|
90
|
+
# Keep non-FilterExpr items as-is
|
|
91
|
+
deserialized.append(item)
|
|
92
|
+
kwargs["knowledge_filters"] = deserialized
|
|
93
|
+
else:
|
|
94
|
+
# Regular dict filter
|
|
95
|
+
kwargs["knowledge_filters"] = knowledge_filters_dict
|
|
96
|
+
except json.JSONDecodeError:
|
|
97
|
+
kwargs.pop("knowledge_filters")
|
|
98
|
+
log_warning(f"Invalid knowledge_filters parameter couldn't be loaded: {knowledge_filters}")
|
|
99
|
+
except ValueError as e:
|
|
100
|
+
# Filter deserialization failed
|
|
101
|
+
kwargs.pop("knowledge_filters")
|
|
102
|
+
log_warning(f"Invalid FilterExpr in knowledge_filters: {e}")
|
|
103
|
+
|
|
104
|
+
# Handle output_schema - convert JSON schema to dynamic Pydantic model
|
|
105
|
+
if output_schema := kwargs.get("output_schema"):
|
|
106
|
+
try:
|
|
107
|
+
if isinstance(output_schema, str):
|
|
108
|
+
from agno.os.utils import json_schema_to_pydantic_model
|
|
109
|
+
|
|
110
|
+
schema_dict = json.loads(output_schema)
|
|
111
|
+
dynamic_model = json_schema_to_pydantic_model(schema_dict)
|
|
112
|
+
kwargs["output_schema"] = dynamic_model
|
|
113
|
+
except json.JSONDecodeError:
|
|
114
|
+
kwargs.pop("output_schema")
|
|
115
|
+
log_warning(f"Invalid output_schema JSON: {output_schema}")
|
|
116
|
+
except Exception as e:
|
|
117
|
+
kwargs.pop("output_schema")
|
|
118
|
+
log_warning(f"Failed to create output_schema model: {e}")
|
|
119
|
+
|
|
120
|
+
# Parse boolean and null values
|
|
121
|
+
for key, value in kwargs.items():
|
|
122
|
+
if isinstance(value, str) and value.lower() in ["true", "false"]:
|
|
123
|
+
kwargs[key] = value.lower() == "true"
|
|
124
|
+
elif isinstance(value, str) and value.lower() in ["null", "none"]:
|
|
125
|
+
kwargs[key] = None
|
|
126
|
+
|
|
127
|
+
return kwargs
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def format_sse_event(event: Union[RunOutputEvent, TeamRunOutputEvent, WorkflowRunOutputEvent]) -> str:
|
|
131
|
+
"""Parse JSON data into SSE-compliant format.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
event_dict: Dictionary containing the event data
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
SSE-formatted response:
|
|
138
|
+
|
|
139
|
+
```
|
|
140
|
+
event: EventName
|
|
141
|
+
data: { ... }
|
|
142
|
+
|
|
143
|
+
event: AnotherEventName
|
|
144
|
+
data: { ... }
|
|
145
|
+
```
|
|
146
|
+
"""
|
|
147
|
+
try:
|
|
148
|
+
# Parse the JSON to extract the event type
|
|
149
|
+
event_type = event.event or "message"
|
|
150
|
+
|
|
151
|
+
# Serialize to valid JSON with double quotes and no newlines
|
|
152
|
+
clean_json = event.to_json(separators=(",", ":"), indent=None)
|
|
153
|
+
|
|
154
|
+
return f"event: {event_type}\ndata: {clean_json}\n\n"
|
|
155
|
+
except json.JSONDecodeError:
|
|
156
|
+
clean_json = event.to_json(separators=(",", ":"), indent=None)
|
|
157
|
+
return f"event: message\ndata: {clean_json}\n\n"
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
async def get_db(
|
|
161
|
+
dbs: dict[str, list[Union[BaseDb, AsyncBaseDb]]], db_id: Optional[str] = None, table: Optional[str] = None
|
|
162
|
+
) -> Union[BaseDb, AsyncBaseDb]:
|
|
163
|
+
"""Return the database with the given ID and/or table, or the first database if no ID/table is provided."""
|
|
164
|
+
|
|
165
|
+
if table and not db_id:
|
|
166
|
+
raise HTTPException(status_code=400, detail="The db_id query parameter is required when passing a table")
|
|
167
|
+
|
|
168
|
+
async def _has_table(db: Union[BaseDb, AsyncBaseDb], table_name: str) -> bool:
|
|
169
|
+
"""Check if this database has the specified table (configured and actually exists)."""
|
|
170
|
+
# First check if table name is configured
|
|
171
|
+
is_configured = (
|
|
172
|
+
hasattr(db, "session_table_name")
|
|
173
|
+
and db.session_table_name == table_name
|
|
174
|
+
or hasattr(db, "memory_table_name")
|
|
175
|
+
and db.memory_table_name == table_name
|
|
176
|
+
or hasattr(db, "metrics_table_name")
|
|
177
|
+
and db.metrics_table_name == table_name
|
|
178
|
+
or hasattr(db, "eval_table_name")
|
|
179
|
+
and db.eval_table_name == table_name
|
|
180
|
+
or hasattr(db, "knowledge_table_name")
|
|
181
|
+
and db.knowledge_table_name == table_name
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
if not is_configured:
|
|
185
|
+
return False
|
|
186
|
+
|
|
187
|
+
# Then check if table actually exists in the database
|
|
188
|
+
try:
|
|
189
|
+
if isinstance(db, AsyncBaseDb):
|
|
190
|
+
# For async databases, await the check
|
|
191
|
+
return await db.table_exists(table_name)
|
|
192
|
+
else:
|
|
193
|
+
# For sync databases, call directly
|
|
194
|
+
return db.table_exists(table_name)
|
|
195
|
+
except (NotImplementedError, AttributeError):
|
|
196
|
+
# If table_exists not implemented, fall back to configuration check
|
|
197
|
+
return is_configured
|
|
198
|
+
|
|
199
|
+
# If db_id is provided, first find the database with that ID
|
|
200
|
+
if db_id:
|
|
201
|
+
target_db_list = dbs.get(db_id)
|
|
202
|
+
if not target_db_list:
|
|
203
|
+
raise HTTPException(status_code=404, detail=f"No database found with id '{db_id}'")
|
|
204
|
+
|
|
205
|
+
# If table is also specified, search through all databases with this ID to find one with the table
|
|
206
|
+
if table:
|
|
207
|
+
for db in target_db_list:
|
|
208
|
+
if await _has_table(db, table):
|
|
209
|
+
return db
|
|
210
|
+
raise HTTPException(status_code=404, detail=f"No database with id '{db_id}' has table '{table}'")
|
|
211
|
+
|
|
212
|
+
# If no table specified, return the first database with this ID
|
|
213
|
+
return target_db_list[0]
|
|
22
214
|
|
|
23
215
|
# Raise if multiple databases are provided but no db_id is provided
|
|
24
|
-
if
|
|
216
|
+
if len(dbs) > 1:
|
|
25
217
|
raise HTTPException(
|
|
26
218
|
status_code=400, detail="The db_id query parameter is required when using multiple databases"
|
|
27
219
|
)
|
|
28
220
|
|
|
29
|
-
#
|
|
30
|
-
|
|
31
|
-
db = dbs.get(db_id)
|
|
32
|
-
if not db:
|
|
33
|
-
raise HTTPException(status_code=404, detail=f"Database with id '{db_id}' not found")
|
|
34
|
-
else:
|
|
35
|
-
db = next(iter(dbs.values()))
|
|
36
|
-
return db
|
|
221
|
+
# Return the first (and only) database
|
|
222
|
+
return next(db for dbs in dbs.values() for db in dbs)
|
|
37
223
|
|
|
38
224
|
|
|
39
225
|
def get_knowledge_instance_by_db_id(knowledge_instances: List[Knowledge], db_id: Optional[str] = None) -> Knowledge:
|
|
@@ -54,23 +240,31 @@ def get_knowledge_instance_by_db_id(knowledge_instances: List[Knowledge], db_id:
|
|
|
54
240
|
|
|
55
241
|
|
|
56
242
|
def get_run_input(run_dict: Dict[str, Any], is_workflow_run: bool = False) -> str:
|
|
57
|
-
"""Get the run input from the given run dictionary
|
|
243
|
+
"""Get the run input from the given run dictionary
|
|
244
|
+
|
|
245
|
+
Uses the RunInput/TeamRunInput object which stores the original user input.
|
|
246
|
+
"""
|
|
247
|
+
|
|
248
|
+
# For agent or team runs, use the stored input_content
|
|
249
|
+
if not is_workflow_run and run_dict.get("input") is not None:
|
|
250
|
+
input_data = run_dict.get("input")
|
|
251
|
+
if isinstance(input_data, dict) and input_data.get("input_content") is not None:
|
|
252
|
+
return stringify_input_content(input_data["input_content"])
|
|
58
253
|
|
|
59
254
|
if is_workflow_run:
|
|
255
|
+
# Check the input field directly
|
|
256
|
+
if run_dict.get("input") is not None:
|
|
257
|
+
input_value = run_dict.get("input")
|
|
258
|
+
return str(input_value)
|
|
259
|
+
|
|
260
|
+
# Check the step executor runs for fallback
|
|
60
261
|
step_executor_runs = run_dict.get("step_executor_runs", [])
|
|
61
262
|
if step_executor_runs:
|
|
62
263
|
for message in reversed(step_executor_runs[0].get("messages", [])):
|
|
63
264
|
if message.get("role") == "user":
|
|
64
265
|
return message.get("content", "")
|
|
65
266
|
|
|
66
|
-
|
|
67
|
-
if run_dict.get("input") is not None:
|
|
68
|
-
input_value = run_dict.get("input")
|
|
69
|
-
if isinstance(input_value, str):
|
|
70
|
-
return input_value
|
|
71
|
-
else:
|
|
72
|
-
return str(input_value)
|
|
73
|
-
|
|
267
|
+
# Final fallback: scan messages
|
|
74
268
|
if run_dict.get("messages") is not None:
|
|
75
269
|
for message in reversed(run_dict["messages"]):
|
|
76
270
|
if message.get("role") == "user":
|
|
@@ -89,16 +283,17 @@ def get_session_name(session: Dict[str, Any]) -> str:
|
|
|
89
283
|
|
|
90
284
|
# Otherwise use the original user message
|
|
91
285
|
else:
|
|
92
|
-
runs = session.get("runs", [])
|
|
286
|
+
runs = session.get("runs", []) or []
|
|
93
287
|
|
|
94
288
|
# For teams, identify the first Team run and avoid using the first member's run
|
|
95
289
|
if session.get("session_type") == "team":
|
|
96
290
|
run = None
|
|
97
291
|
for r in runs:
|
|
98
292
|
# If agent_id is not present, it's a team run
|
|
99
|
-
if not r.get("agent_id"):
|
|
293
|
+
if not r.get("agent_id"):
|
|
100
294
|
run = r
|
|
101
295
|
break
|
|
296
|
+
|
|
102
297
|
# Fallback to first run if no team run found
|
|
103
298
|
if run is None and runs:
|
|
104
299
|
run = runs[0]
|
|
@@ -112,6 +307,7 @@ def get_session_name(session: Dict[str, Any]) -> str:
|
|
|
112
307
|
elif isinstance(workflow_input, dict):
|
|
113
308
|
try:
|
|
114
309
|
import json
|
|
310
|
+
|
|
115
311
|
return json.dumps(workflow_input)
|
|
116
312
|
except (TypeError, ValueError):
|
|
117
313
|
pass
|
|
@@ -138,6 +334,23 @@ def get_session_name(session: Dict[str, Any]) -> str:
|
|
|
138
334
|
return ""
|
|
139
335
|
|
|
140
336
|
|
|
337
|
+
def extract_input_media(run_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
338
|
+
input_media: Dict[str, List[Any]] = {
|
|
339
|
+
"images": [],
|
|
340
|
+
"videos": [],
|
|
341
|
+
"audios": [],
|
|
342
|
+
"files": [],
|
|
343
|
+
}
|
|
344
|
+
|
|
345
|
+
input = run_dict.get("input", {})
|
|
346
|
+
input_media["images"].extend(input.get("images", []))
|
|
347
|
+
input_media["videos"].extend(input.get("videos", []))
|
|
348
|
+
input_media["audios"].extend(input.get("audios", []))
|
|
349
|
+
input_media["files"].extend(input.get("files", []))
|
|
350
|
+
|
|
351
|
+
return input_media
|
|
352
|
+
|
|
353
|
+
|
|
141
354
|
def process_image(file: UploadFile) -> Image:
|
|
142
355
|
content = file.file.read()
|
|
143
356
|
if not content:
|
|
@@ -204,8 +417,15 @@ def format_tools(agent_tools: List[Union[Dict[str, Any], Toolkit, Function, Call
|
|
|
204
417
|
return formatted_tools
|
|
205
418
|
|
|
206
419
|
|
|
207
|
-
def format_team_tools(team_tools: List[Function]):
|
|
208
|
-
|
|
420
|
+
def format_team_tools(team_tools: List[Union[Function, dict]]):
|
|
421
|
+
formatted_tools: List[Dict] = []
|
|
422
|
+
if team_tools is not None:
|
|
423
|
+
for tool in team_tools:
|
|
424
|
+
if isinstance(tool, dict):
|
|
425
|
+
formatted_tools.append(tool)
|
|
426
|
+
elif isinstance(tool, Function):
|
|
427
|
+
formatted_tools.append(tool.to_dict())
|
|
428
|
+
return formatted_tools
|
|
209
429
|
|
|
210
430
|
|
|
211
431
|
def get_agent_by_id(agent_id: str, agents: Optional[List[Agent]] = None) -> Optional[Agent]:
|
|
@@ -238,6 +458,33 @@ def get_workflow_by_id(workflow_id: str, workflows: Optional[List[Workflow]] = N
|
|
|
238
458
|
return None
|
|
239
459
|
|
|
240
460
|
|
|
461
|
+
# INPUT SCHEMA VALIDATIONS
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def get_agent_input_schema_dict(agent: Agent) -> Optional[Dict[str, Any]]:
|
|
465
|
+
"""Get input schema as dictionary for API responses"""
|
|
466
|
+
|
|
467
|
+
if agent.input_schema is not None:
|
|
468
|
+
try:
|
|
469
|
+
return agent.input_schema.model_json_schema()
|
|
470
|
+
except Exception:
|
|
471
|
+
return None
|
|
472
|
+
|
|
473
|
+
return None
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def get_team_input_schema_dict(team: Team) -> Optional[Dict[str, Any]]:
|
|
477
|
+
"""Get input schema as dictionary for API responses"""
|
|
478
|
+
|
|
479
|
+
if team.input_schema is not None:
|
|
480
|
+
try:
|
|
481
|
+
return team.input_schema.model_json_schema()
|
|
482
|
+
except Exception:
|
|
483
|
+
return None
|
|
484
|
+
|
|
485
|
+
return None
|
|
486
|
+
|
|
487
|
+
|
|
241
488
|
def get_workflow_input_schema_dict(workflow: Workflow) -> Optional[Dict[str, Any]]:
|
|
242
489
|
"""Get input schema as dictionary for API responses"""
|
|
243
490
|
|
|
@@ -303,6 +550,31 @@ def _generate_schema_from_params(params: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
303
550
|
return schema
|
|
304
551
|
|
|
305
552
|
|
|
553
|
+
def resolve_origins(user_origins: Optional[List[str]] = None, default_origins: Optional[List[str]] = None) -> List[str]:
|
|
554
|
+
"""
|
|
555
|
+
Get CORS origins - user-provided origins override defaults.
|
|
556
|
+
|
|
557
|
+
Args:
|
|
558
|
+
user_origins: Optional list of user-provided CORS origins
|
|
559
|
+
|
|
560
|
+
Returns:
|
|
561
|
+
List of allowed CORS origins (user-provided if set, otherwise defaults)
|
|
562
|
+
"""
|
|
563
|
+
# User-provided origins override defaults
|
|
564
|
+
if user_origins:
|
|
565
|
+
return user_origins
|
|
566
|
+
|
|
567
|
+
# Default Agno domains
|
|
568
|
+
return default_origins or [
|
|
569
|
+
"http://localhost:3000",
|
|
570
|
+
"https://agno.com",
|
|
571
|
+
"https://www.agno.com",
|
|
572
|
+
"https://app.agno.com",
|
|
573
|
+
"https://os-stg.agno.com",
|
|
574
|
+
"https://os.agno.com",
|
|
575
|
+
]
|
|
576
|
+
|
|
577
|
+
|
|
306
578
|
def update_cors_middleware(app: FastAPI, new_origins: list):
|
|
307
579
|
existing_origins: List[str] = []
|
|
308
580
|
|
|
@@ -402,8 +674,10 @@ def collect_mcp_tools_from_team(team: Team, mcp_tools: List[Any]) -> None:
|
|
|
402
674
|
# Check the team tools
|
|
403
675
|
if team.tools:
|
|
404
676
|
for tool in team.tools:
|
|
405
|
-
|
|
406
|
-
if
|
|
677
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
678
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
679
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
680
|
+
):
|
|
407
681
|
if tool not in mcp_tools:
|
|
408
682
|
mcp_tools.append(tool)
|
|
409
683
|
|
|
@@ -413,8 +687,10 @@ def collect_mcp_tools_from_team(team: Team, mcp_tools: List[Any]) -> None:
|
|
|
413
687
|
if isinstance(member, Agent):
|
|
414
688
|
if member.tools:
|
|
415
689
|
for tool in member.tools:
|
|
416
|
-
|
|
417
|
-
if
|
|
690
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
691
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
692
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
693
|
+
):
|
|
418
694
|
if tool not in mcp_tools:
|
|
419
695
|
mcp_tools.append(tool)
|
|
420
696
|
|
|
@@ -458,8 +734,10 @@ def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> Non
|
|
|
458
734
|
if step.agent:
|
|
459
735
|
if step.agent.tools:
|
|
460
736
|
for tool in step.agent.tools:
|
|
461
|
-
|
|
462
|
-
if
|
|
737
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
738
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
739
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
740
|
+
):
|
|
463
741
|
if tool not in mcp_tools:
|
|
464
742
|
mcp_tools.append(tool)
|
|
465
743
|
# Check step's team
|
|
@@ -481,8 +759,10 @@ def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> Non
|
|
|
481
759
|
# Direct agent in workflow steps
|
|
482
760
|
if step.tools:
|
|
483
761
|
for tool in step.tools:
|
|
484
|
-
|
|
485
|
-
if
|
|
762
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
763
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
764
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
765
|
+
):
|
|
486
766
|
if tool not in mcp_tools:
|
|
487
767
|
mcp_tools.append(tool)
|
|
488
768
|
|
|
@@ -493,3 +773,201 @@ def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> Non
|
|
|
493
773
|
elif isinstance(step, Workflow):
|
|
494
774
|
# Nested workflow
|
|
495
775
|
collect_mcp_tools_from_workflow(step, mcp_tools)
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
def stringify_input_content(input_content: Union[str, Dict[str, Any], List[Any], BaseModel]) -> str:
|
|
779
|
+
"""Convert any given input_content into its string representation.
|
|
780
|
+
|
|
781
|
+
This handles both serialized (dict) and live (object) input_content formats.
|
|
782
|
+
"""
|
|
783
|
+
import json
|
|
784
|
+
|
|
785
|
+
if isinstance(input_content, str):
|
|
786
|
+
return input_content
|
|
787
|
+
elif isinstance(input_content, Message):
|
|
788
|
+
return json.dumps(input_content.to_dict())
|
|
789
|
+
elif isinstance(input_content, dict):
|
|
790
|
+
return json.dumps(input_content, indent=2, default=str)
|
|
791
|
+
elif isinstance(input_content, list):
|
|
792
|
+
if input_content:
|
|
793
|
+
# Handle live Message objects
|
|
794
|
+
if isinstance(input_content[0], Message):
|
|
795
|
+
return json.dumps([m.to_dict() for m in input_content])
|
|
796
|
+
# Handle serialized Message dicts
|
|
797
|
+
elif isinstance(input_content[0], dict) and input_content[0].get("role") == "user":
|
|
798
|
+
return input_content[0].get("content", str(input_content))
|
|
799
|
+
return str(input_content)
|
|
800
|
+
else:
|
|
801
|
+
return str(input_content)
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
def _get_python_type_from_json_schema(field_schema: Dict[str, Any], field_name: str = "NestedModel") -> Type:
|
|
805
|
+
"""Map JSON schema type to Python type with recursive handling.
|
|
806
|
+
|
|
807
|
+
Args:
|
|
808
|
+
field_schema: JSON schema dictionary for a single field
|
|
809
|
+
field_name: Name of the field (used for nested model naming)
|
|
810
|
+
|
|
811
|
+
Returns:
|
|
812
|
+
Python type corresponding to the JSON schema type
|
|
813
|
+
"""
|
|
814
|
+
if not isinstance(field_schema, dict):
|
|
815
|
+
return Any
|
|
816
|
+
|
|
817
|
+
json_type = field_schema.get("type")
|
|
818
|
+
|
|
819
|
+
# Handle basic types
|
|
820
|
+
if json_type == "string":
|
|
821
|
+
return str
|
|
822
|
+
elif json_type == "integer":
|
|
823
|
+
return int
|
|
824
|
+
elif json_type == "number":
|
|
825
|
+
return float
|
|
826
|
+
elif json_type == "boolean":
|
|
827
|
+
return bool
|
|
828
|
+
elif json_type == "null":
|
|
829
|
+
return type(None)
|
|
830
|
+
elif json_type == "array":
|
|
831
|
+
# Handle arrays with item type specification
|
|
832
|
+
items_schema = field_schema.get("items")
|
|
833
|
+
if items_schema and isinstance(items_schema, dict):
|
|
834
|
+
item_type = _get_python_type_from_json_schema(items_schema, f"{field_name}Item")
|
|
835
|
+
return List[item_type] # type: ignore
|
|
836
|
+
else:
|
|
837
|
+
# No item type specified - use generic list
|
|
838
|
+
return List[Any]
|
|
839
|
+
elif json_type == "object":
|
|
840
|
+
# Recursively create nested Pydantic model
|
|
841
|
+
nested_properties = field_schema.get("properties", {})
|
|
842
|
+
nested_required = field_schema.get("required", [])
|
|
843
|
+
nested_title = field_schema.get("title", field_name)
|
|
844
|
+
|
|
845
|
+
# Build field definitions for nested model
|
|
846
|
+
nested_fields = {}
|
|
847
|
+
for nested_field_name, nested_field_schema in nested_properties.items():
|
|
848
|
+
nested_field_type = _get_python_type_from_json_schema(nested_field_schema, nested_field_name)
|
|
849
|
+
|
|
850
|
+
if nested_field_name in nested_required:
|
|
851
|
+
nested_fields[nested_field_name] = (nested_field_type, ...)
|
|
852
|
+
else:
|
|
853
|
+
nested_fields[nested_field_name] = (Optional[nested_field_type], None) # type: ignore[assignment]
|
|
854
|
+
|
|
855
|
+
# Create nested model if it has fields
|
|
856
|
+
if nested_fields:
|
|
857
|
+
return create_model(nested_title, **nested_fields) # type: ignore
|
|
858
|
+
else:
|
|
859
|
+
# Empty object schema - use generic dict
|
|
860
|
+
return Dict[str, Any]
|
|
861
|
+
else:
|
|
862
|
+
# Unknown or unspecified type - fallback to Any
|
|
863
|
+
if json_type:
|
|
864
|
+
logger.warning(f"Unknown JSON schema type '{json_type}' for field '{field_name}', using Any")
|
|
865
|
+
return Any
|
|
866
|
+
|
|
867
|
+
|
|
868
|
+
def json_schema_to_pydantic_model(schema: Dict[str, Any]) -> Type[BaseModel]:
|
|
869
|
+
"""Convert a JSON schema dictionary to a Pydantic BaseModel class.
|
|
870
|
+
|
|
871
|
+
This function dynamically creates a Pydantic model from a JSON schema specification,
|
|
872
|
+
handling nested objects, arrays, and optional fields.
|
|
873
|
+
|
|
874
|
+
Args:
|
|
875
|
+
schema: JSON schema dictionary with 'properties', 'required', 'type', etc.
|
|
876
|
+
|
|
877
|
+
Returns:
|
|
878
|
+
Dynamically created Pydantic BaseModel class
|
|
879
|
+
"""
|
|
880
|
+
import copy
|
|
881
|
+
|
|
882
|
+
# Deep copy to avoid modifying the original schema
|
|
883
|
+
schema = copy.deepcopy(schema)
|
|
884
|
+
|
|
885
|
+
# Extract schema components
|
|
886
|
+
model_name = schema.get("title", "DynamicModel")
|
|
887
|
+
properties = schema.get("properties", {})
|
|
888
|
+
required_fields = schema.get("required", [])
|
|
889
|
+
|
|
890
|
+
# Validate schema has properties
|
|
891
|
+
if not properties:
|
|
892
|
+
logger.warning(f"JSON schema '{model_name}' has no properties, creating empty model")
|
|
893
|
+
|
|
894
|
+
# Build field definitions for create_model
|
|
895
|
+
field_definitions = {}
|
|
896
|
+
for field_name, field_schema in properties.items():
|
|
897
|
+
try:
|
|
898
|
+
field_type = _get_python_type_from_json_schema(field_schema, field_name)
|
|
899
|
+
|
|
900
|
+
if field_name in required_fields:
|
|
901
|
+
# Required field: (type, ...)
|
|
902
|
+
field_definitions[field_name] = (field_type, ...)
|
|
903
|
+
else:
|
|
904
|
+
# Optional field: (Optional[type], None)
|
|
905
|
+
field_definitions[field_name] = (Optional[field_type], None) # type: ignore[assignment]
|
|
906
|
+
except Exception as e:
|
|
907
|
+
logger.warning(f"Failed to process field '{field_name}' in schema '{model_name}': {e}")
|
|
908
|
+
# Skip problematic fields rather than failing entirely
|
|
909
|
+
continue
|
|
910
|
+
|
|
911
|
+
# Create and return the dynamic model
|
|
912
|
+
try:
|
|
913
|
+
return create_model(model_name, **field_definitions) # type: ignore
|
|
914
|
+
except Exception as e:
|
|
915
|
+
logger.error(f"Failed to create dynamic model '{model_name}': {e}")
|
|
916
|
+
# Return a minimal model as fallback
|
|
917
|
+
return create_model(model_name)
|
|
918
|
+
|
|
919
|
+
|
|
920
|
+
def setup_tracing_for_os(db: Union[BaseDb, AsyncBaseDb]) -> None:
|
|
921
|
+
"""Set up OpenTelemetry tracing for this agent/team/workflow."""
|
|
922
|
+
try:
|
|
923
|
+
from agno.tracing import setup_tracing
|
|
924
|
+
|
|
925
|
+
setup_tracing(db=db)
|
|
926
|
+
except ImportError:
|
|
927
|
+
logger.warning(
|
|
928
|
+
"tracing=True but OpenTelemetry packages not installed. "
|
|
929
|
+
"Install with: pip install opentelemetry-api opentelemetry-sdk openinference-instrumentation-agno"
|
|
930
|
+
)
|
|
931
|
+
except Exception as e:
|
|
932
|
+
logger.warning(f"Failed to enable tracing: {e}")
|
|
933
|
+
|
|
934
|
+
|
|
935
|
+
def format_duration_ms(duration_ms: Optional[int]) -> str:
|
|
936
|
+
"""Format a duration in milliseconds to a human-readable string.
|
|
937
|
+
|
|
938
|
+
Args:
|
|
939
|
+
duration_ms: Duration in milliseconds
|
|
940
|
+
|
|
941
|
+
Returns:
|
|
942
|
+
Formatted string like "150ms" or "1.50s"
|
|
943
|
+
"""
|
|
944
|
+
if duration_ms is None or duration_ms < 1000:
|
|
945
|
+
return f"{duration_ms or 0}ms"
|
|
946
|
+
return f"{duration_ms / 1000:.2f}s"
|
|
947
|
+
|
|
948
|
+
|
|
949
|
+
def parse_datetime_to_utc(datetime_str: str, param_name: str = "datetime") -> "datetime":
|
|
950
|
+
"""Parse an ISO 8601 datetime string and convert to UTC.
|
|
951
|
+
|
|
952
|
+
Args:
|
|
953
|
+
datetime_str: ISO 8601 formatted datetime string (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T15:30:00+05:30')
|
|
954
|
+
param_name: Name of the parameter for error messages
|
|
955
|
+
|
|
956
|
+
Returns:
|
|
957
|
+
datetime object in UTC timezone
|
|
958
|
+
|
|
959
|
+
Raises:
|
|
960
|
+
HTTPException: If the datetime string is invalid
|
|
961
|
+
"""
|
|
962
|
+
try:
|
|
963
|
+
dt = datetime.fromisoformat(datetime_str.replace("Z", "+00:00"))
|
|
964
|
+
# Convert to UTC if timezone-aware, otherwise assume UTC
|
|
965
|
+
if dt.tzinfo is not None:
|
|
966
|
+
return dt.astimezone(timezone.utc)
|
|
967
|
+
else:
|
|
968
|
+
return dt.replace(tzinfo=timezone.utc)
|
|
969
|
+
except ValueError as e:
|
|
970
|
+
raise HTTPException(
|
|
971
|
+
status_code=400,
|
|
972
|
+
detail=f"Invalid {param_name} format. Use ISO 8601 format (e.g., '2025-11-19T10:00:00Z' or '2025-11-19T10:00:00+05:30'): {e}",
|
|
973
|
+
)
|