agno 2.3.1__py3-none-any.whl → 2.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agno/agent/agent.py +514 -186
- agno/compression/__init__.py +3 -0
- agno/compression/manager.py +176 -0
- agno/db/dynamo/dynamo.py +11 -0
- agno/db/firestore/firestore.py +5 -1
- agno/db/gcs_json/gcs_json_db.py +5 -2
- agno/db/in_memory/in_memory_db.py +5 -2
- agno/db/json/json_db.py +5 -1
- agno/db/migrations/manager.py +4 -4
- agno/db/mongo/async_mongo.py +158 -34
- agno/db/mongo/mongo.py +6 -2
- agno/db/mysql/mysql.py +48 -54
- agno/db/postgres/async_postgres.py +61 -51
- agno/db/postgres/postgres.py +42 -50
- agno/db/redis/redis.py +5 -0
- agno/db/redis/utils.py +5 -5
- agno/db/schemas/memory.py +7 -5
- agno/db/singlestore/singlestore.py +99 -108
- agno/db/sqlite/async_sqlite.py +32 -30
- agno/db/sqlite/sqlite.py +34 -30
- agno/knowledge/reader/pdf_reader.py +2 -2
- agno/knowledge/reader/tavily_reader.py +0 -1
- agno/memory/__init__.py +14 -1
- agno/memory/manager.py +223 -8
- agno/memory/strategies/__init__.py +15 -0
- agno/memory/strategies/base.py +67 -0
- agno/memory/strategies/summarize.py +196 -0
- agno/memory/strategies/types.py +37 -0
- agno/models/anthropic/claude.py +84 -80
- agno/models/aws/bedrock.py +38 -16
- agno/models/aws/claude.py +97 -277
- agno/models/azure/ai_foundry.py +8 -4
- agno/models/base.py +101 -14
- agno/models/cerebras/cerebras.py +18 -7
- agno/models/cerebras/cerebras_openai.py +4 -2
- agno/models/cohere/chat.py +8 -4
- agno/models/google/gemini.py +578 -20
- agno/models/groq/groq.py +18 -5
- agno/models/huggingface/huggingface.py +17 -6
- agno/models/ibm/watsonx.py +16 -6
- agno/models/litellm/chat.py +17 -7
- agno/models/message.py +19 -5
- agno/models/meta/llama.py +20 -4
- agno/models/mistral/mistral.py +8 -4
- agno/models/ollama/chat.py +17 -6
- agno/models/openai/chat.py +17 -6
- agno/models/openai/responses.py +23 -9
- agno/models/vertexai/claude.py +99 -5
- agno/os/interfaces/agui/router.py +1 -0
- agno/os/interfaces/agui/utils.py +97 -57
- agno/os/router.py +16 -1
- agno/os/routers/memory/memory.py +146 -0
- agno/os/routers/memory/schemas.py +26 -0
- agno/os/schema.py +21 -6
- agno/os/utils.py +134 -10
- agno/run/base.py +2 -1
- agno/run/workflow.py +1 -1
- agno/team/team.py +571 -225
- agno/tools/mcp/mcp.py +1 -1
- agno/utils/agent.py +119 -1
- agno/utils/dttm.py +33 -0
- agno/utils/models/ai_foundry.py +9 -2
- agno/utils/models/claude.py +12 -5
- agno/utils/models/cohere.py +9 -2
- agno/utils/models/llama.py +9 -2
- agno/utils/models/mistral.py +4 -2
- agno/utils/print_response/agent.py +37 -2
- agno/utils/print_response/team.py +52 -0
- agno/utils/tokens.py +41 -0
- agno/workflow/types.py +2 -2
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/METADATA +45 -40
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/RECORD +75 -68
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/WHEEL +0 -0
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/licenses/LICENSE +0 -0
- {agno-2.3.1.dist-info → agno-2.3.3.dist-info}/top_level.txt +0 -0
agno/os/routers/memory/memory.py
CHANGED
|
@@ -8,9 +8,12 @@ from fastapi.routing import APIRouter
|
|
|
8
8
|
|
|
9
9
|
from agno.db.base import AsyncBaseDb, BaseDb
|
|
10
10
|
from agno.db.schemas import UserMemory
|
|
11
|
+
from agno.models.utils import get_model
|
|
11
12
|
from agno.os.auth import get_authentication_dependency
|
|
12
13
|
from agno.os.routers.memory.schemas import (
|
|
13
14
|
DeleteMemoriesRequest,
|
|
15
|
+
OptimizeMemoriesRequest,
|
|
16
|
+
OptimizeMemoriesResponse,
|
|
14
17
|
UserMemoryCreateSchema,
|
|
15
18
|
UserMemorySchema,
|
|
16
19
|
UserStatsSchema,
|
|
@@ -470,6 +473,9 @@ def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBase
|
|
|
470
473
|
) -> PaginatedResponse[UserStatsSchema]:
|
|
471
474
|
db = await get_db(dbs, db_id, table)
|
|
472
475
|
try:
|
|
476
|
+
# Ensure limit and page are integers
|
|
477
|
+
limit = int(limit) if limit is not None else 20
|
|
478
|
+
page = int(page) if page is not None else 1
|
|
473
479
|
if isinstance(db, AsyncBaseDb):
|
|
474
480
|
db = cast(AsyncBaseDb, db)
|
|
475
481
|
user_stats, total_count = await db.get_user_memory_stats(
|
|
@@ -494,6 +500,146 @@ def attach_routes(router: APIRouter, dbs: dict[str, list[Union[BaseDb, AsyncBase
|
|
|
494
500
|
except Exception as e:
|
|
495
501
|
raise HTTPException(status_code=500, detail=f"Failed to get user statistics: {str(e)}")
|
|
496
502
|
|
|
503
|
+
@router.post(
|
|
504
|
+
"/optimize-memories",
|
|
505
|
+
response_model=OptimizeMemoriesResponse,
|
|
506
|
+
status_code=200,
|
|
507
|
+
operation_id="optimize_memories",
|
|
508
|
+
summary="Optimize User Memories",
|
|
509
|
+
description=(
|
|
510
|
+
"Optimize all memories for a given user using the default summarize strategy. "
|
|
511
|
+
"This operation combines all memories into a single comprehensive summary, "
|
|
512
|
+
"achieving maximum token reduction while preserving all key information. "
|
|
513
|
+
"To use a custom model, specify the model parameter in 'provider:model_id' format "
|
|
514
|
+
"(e.g., 'openai:gpt-4o-mini', 'anthropic:claude-3-5-sonnet-20241022'). "
|
|
515
|
+
"If not specified, uses MemoryManager's default model (gpt-4o). "
|
|
516
|
+
"Set apply=false to preview optimization results without saving to database."
|
|
517
|
+
),
|
|
518
|
+
responses={
|
|
519
|
+
200: {
|
|
520
|
+
"description": "Memories optimized successfully",
|
|
521
|
+
"content": {
|
|
522
|
+
"application/json": {
|
|
523
|
+
"example": {
|
|
524
|
+
"memories": [
|
|
525
|
+
{
|
|
526
|
+
"memory_id": "f9361a69-2997-40c7-ae4e-a5861d434047",
|
|
527
|
+
"memory": "User has a 3-year-old golden retriever named Max who loves fetch and walks. Lives in San Francisco's Mission district, works as a product manager in tech. Enjoys hiking Bay Area trails, trying new restaurants (especially Japanese, Thai, Mexican), and learning piano for 1.5 years.",
|
|
528
|
+
"topics": ["pets", "location", "work", "hobbies", "food_preferences"],
|
|
529
|
+
"user_id": "user2",
|
|
530
|
+
"updated_at": "2025-11-18T10:30:00Z",
|
|
531
|
+
}
|
|
532
|
+
],
|
|
533
|
+
"memories_before": 4,
|
|
534
|
+
"memories_after": 1,
|
|
535
|
+
"tokens_before": 450,
|
|
536
|
+
"tokens_after": 180,
|
|
537
|
+
"tokens_saved": 270,
|
|
538
|
+
"reduction_percentage": 60.0,
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
},
|
|
542
|
+
},
|
|
543
|
+
400: {
|
|
544
|
+
"description": "Bad request - User ID is required or invalid model string format",
|
|
545
|
+
"model": BadRequestResponse,
|
|
546
|
+
},
|
|
547
|
+
404: {"description": "No memories found for user", "model": NotFoundResponse},
|
|
548
|
+
500: {"description": "Failed to optimize memories", "model": InternalServerErrorResponse},
|
|
549
|
+
},
|
|
550
|
+
)
|
|
551
|
+
async def optimize_memories(
|
|
552
|
+
request: OptimizeMemoriesRequest,
|
|
553
|
+
db_id: Optional[str] = Query(default=None, description="Database ID to use for optimization"),
|
|
554
|
+
table: Optional[str] = Query(default=None, description="Table to use for optimization"),
|
|
555
|
+
) -> OptimizeMemoriesResponse:
|
|
556
|
+
"""Optimize user memories using the default summarize strategy."""
|
|
557
|
+
from agno.memory import MemoryManager
|
|
558
|
+
from agno.memory.strategies.types import MemoryOptimizationStrategyType
|
|
559
|
+
|
|
560
|
+
try:
|
|
561
|
+
# Get database instance
|
|
562
|
+
db = await get_db(dbs, db_id, table)
|
|
563
|
+
|
|
564
|
+
# Create memory manager with optional model
|
|
565
|
+
if request.model:
|
|
566
|
+
try:
|
|
567
|
+
model_instance = get_model(request.model)
|
|
568
|
+
except ValueError as e:
|
|
569
|
+
raise HTTPException(status_code=400, detail=str(e))
|
|
570
|
+
memory_manager = MemoryManager(model=model_instance, db=db)
|
|
571
|
+
else:
|
|
572
|
+
# No model specified - use MemoryManager's default
|
|
573
|
+
memory_manager = MemoryManager(db=db)
|
|
574
|
+
|
|
575
|
+
# Get current memories to count tokens before optimization
|
|
576
|
+
if isinstance(db, AsyncBaseDb):
|
|
577
|
+
memories_before = await memory_manager.aget_user_memories(user_id=request.user_id)
|
|
578
|
+
else:
|
|
579
|
+
memories_before = memory_manager.get_user_memories(user_id=request.user_id)
|
|
580
|
+
|
|
581
|
+
if not memories_before:
|
|
582
|
+
raise HTTPException(status_code=404, detail=f"No memories found for user {request.user_id}")
|
|
583
|
+
|
|
584
|
+
# Count tokens before optimization
|
|
585
|
+
from agno.memory.strategies.summarize import SummarizeStrategy
|
|
586
|
+
|
|
587
|
+
strategy = SummarizeStrategy()
|
|
588
|
+
tokens_before = strategy.count_tokens(memories_before)
|
|
589
|
+
memories_before_count = len(memories_before)
|
|
590
|
+
|
|
591
|
+
# Optimize memories with default SUMMARIZE strategy
|
|
592
|
+
if isinstance(db, AsyncBaseDb):
|
|
593
|
+
optimized_memories = await memory_manager.aoptimize_memories(
|
|
594
|
+
user_id=request.user_id,
|
|
595
|
+
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
|
|
596
|
+
apply=request.apply,
|
|
597
|
+
)
|
|
598
|
+
else:
|
|
599
|
+
optimized_memories = memory_manager.optimize_memories(
|
|
600
|
+
user_id=request.user_id,
|
|
601
|
+
strategy=MemoryOptimizationStrategyType.SUMMARIZE,
|
|
602
|
+
apply=request.apply,
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
# Count tokens after optimization
|
|
606
|
+
tokens_after = strategy.count_tokens(optimized_memories)
|
|
607
|
+
memories_after_count = len(optimized_memories)
|
|
608
|
+
|
|
609
|
+
# Calculate statistics
|
|
610
|
+
tokens_saved = tokens_before - tokens_after
|
|
611
|
+
reduction_percentage = (tokens_saved / tokens_before * 100.0) if tokens_before > 0 else 0.0
|
|
612
|
+
|
|
613
|
+
# Convert to schema objects
|
|
614
|
+
optimized_memory_schemas = [
|
|
615
|
+
UserMemorySchema(
|
|
616
|
+
memory_id=mem.memory_id or "",
|
|
617
|
+
memory=mem.memory or "",
|
|
618
|
+
topics=mem.topics,
|
|
619
|
+
agent_id=mem.agent_id,
|
|
620
|
+
team_id=mem.team_id,
|
|
621
|
+
user_id=mem.user_id,
|
|
622
|
+
updated_at=mem.updated_at,
|
|
623
|
+
)
|
|
624
|
+
for mem in optimized_memories
|
|
625
|
+
]
|
|
626
|
+
|
|
627
|
+
return OptimizeMemoriesResponse(
|
|
628
|
+
memories=optimized_memory_schemas,
|
|
629
|
+
memories_before=memories_before_count,
|
|
630
|
+
memories_after=memories_after_count,
|
|
631
|
+
tokens_before=tokens_before,
|
|
632
|
+
tokens_after=tokens_after,
|
|
633
|
+
tokens_saved=tokens_saved,
|
|
634
|
+
reduction_percentage=reduction_percentage,
|
|
635
|
+
)
|
|
636
|
+
|
|
637
|
+
except HTTPException:
|
|
638
|
+
raise
|
|
639
|
+
except Exception as e:
|
|
640
|
+
logger.error(f"Failed to optimize memories for user {request.user_id}: {str(e)}")
|
|
641
|
+
raise HTTPException(status_code=500, detail=f"Failed to optimize memories: {str(e)}")
|
|
642
|
+
|
|
497
643
|
return router
|
|
498
644
|
|
|
499
645
|
|
|
@@ -60,3 +60,29 @@ class UserStatsSchema(BaseModel):
|
|
|
60
60
|
total_memories=user_stats_dict["total_memories"],
|
|
61
61
|
last_memory_updated_at=datetime.fromtimestamp(updated_at, tz=timezone.utc) if updated_at else None,
|
|
62
62
|
)
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class OptimizeMemoriesRequest(BaseModel):
|
|
66
|
+
"""Schema for memory optimization request"""
|
|
67
|
+
|
|
68
|
+
user_id: str = Field(..., description="User ID to optimize memories for")
|
|
69
|
+
model: Optional[str] = Field(
|
|
70
|
+
default=None,
|
|
71
|
+
description="Model to use for optimization in format 'provider:model_id' (e.g., 'openai:gpt-4o-mini', 'anthropic:claude-3-5-sonnet-20241022', 'google:gemini-2.0-flash-exp'). If not specified, uses MemoryManager's default model (gpt-4o).",
|
|
72
|
+
)
|
|
73
|
+
apply: bool = Field(
|
|
74
|
+
default=True,
|
|
75
|
+
description="If True, apply optimization changes to database. If False, return preview only without saving.",
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class OptimizeMemoriesResponse(BaseModel):
|
|
80
|
+
"""Schema for memory optimization response"""
|
|
81
|
+
|
|
82
|
+
memories: List[UserMemorySchema] = Field(..., description="List of optimized memory objects")
|
|
83
|
+
memories_before: int = Field(..., description="Number of memories before optimization", ge=0)
|
|
84
|
+
memories_after: int = Field(..., description="Number of memories after optimization", ge=0)
|
|
85
|
+
tokens_before: int = Field(..., description="Token count before optimization", ge=0)
|
|
86
|
+
tokens_after: int = Field(..., description="Token count after optimization", ge=0)
|
|
87
|
+
tokens_saved: int = Field(..., description="Number of tokens saved through optimization", ge=0)
|
|
88
|
+
reduction_percentage: float = Field(..., description="Percentage of token reduction achieved", ge=0.0, le=100.0)
|
agno/os/schema.py
CHANGED
|
@@ -24,6 +24,7 @@ from agno.run.agent import RunOutput
|
|
|
24
24
|
from agno.run.team import TeamRunOutput
|
|
25
25
|
from agno.session import AgentSession, TeamSession, WorkflowSession
|
|
26
26
|
from agno.team.team import Team
|
|
27
|
+
from agno.utils.agent import aexecute_instructions, aexecute_system_message
|
|
27
28
|
from agno.workflow.agent import WorkflowAgent
|
|
28
29
|
from agno.workflow.workflow import Workflow
|
|
29
30
|
|
|
@@ -337,12 +338,20 @@ class AgentResponse(BaseModel):
|
|
|
337
338
|
"read_tool_call_history": agent.read_tool_call_history,
|
|
338
339
|
}
|
|
339
340
|
|
|
341
|
+
instructions = agent.instructions if agent.instructions else None
|
|
342
|
+
if instructions and callable(instructions):
|
|
343
|
+
instructions = await aexecute_instructions(instructions=instructions, agent=agent)
|
|
344
|
+
|
|
345
|
+
system_message = agent.system_message if agent.system_message else None
|
|
346
|
+
if system_message and callable(system_message):
|
|
347
|
+
system_message = await aexecute_system_message(system_message=system_message, agent=agent)
|
|
348
|
+
|
|
340
349
|
system_message_info = {
|
|
341
|
-
"system_message": str(
|
|
350
|
+
"system_message": str(system_message) if system_message else None,
|
|
342
351
|
"system_message_role": agent.system_message_role,
|
|
343
352
|
"build_context": agent.build_context,
|
|
344
353
|
"description": agent.description,
|
|
345
|
-
"instructions":
|
|
354
|
+
"instructions": instructions,
|
|
346
355
|
"expected_output": agent.expected_output,
|
|
347
356
|
"additional_context": agent.additional_context,
|
|
348
357
|
"markdown": agent.markdown,
|
|
@@ -560,12 +569,18 @@ class TeamResponse(BaseModel):
|
|
|
560
569
|
"get_member_information_tool": team.get_member_information_tool,
|
|
561
570
|
}
|
|
562
571
|
|
|
563
|
-
team_instructions =
|
|
564
|
-
|
|
565
|
-
|
|
572
|
+
team_instructions = team.instructions if team.instructions else None
|
|
573
|
+
if team_instructions and callable(team_instructions):
|
|
574
|
+
team_instructions = await aexecute_instructions(instructions=team_instructions, agent=team, team=team)
|
|
575
|
+
|
|
576
|
+
team_system_message = team.system_message if team.system_message else None
|
|
577
|
+
if team_system_message and callable(team_system_message):
|
|
578
|
+
team_system_message = await aexecute_system_message(
|
|
579
|
+
system_message=team_system_message, agent=team, team=team
|
|
580
|
+
)
|
|
566
581
|
|
|
567
582
|
system_message_info = {
|
|
568
|
-
"system_message":
|
|
583
|
+
"system_message": team_system_message,
|
|
569
584
|
"system_message_role": team.system_message_role,
|
|
570
585
|
"description": team.description,
|
|
571
586
|
"instructions": team_instructions,
|
agno/os/utils.py
CHANGED
|
@@ -1,8 +1,8 @@
|
|
|
1
|
-
from typing import Any, Callable, Dict, List, Optional, Set, Union
|
|
1
|
+
from typing import Any, Callable, Dict, List, Optional, Set, Type, Union
|
|
2
2
|
|
|
3
3
|
from fastapi import FastAPI, HTTPException, UploadFile
|
|
4
4
|
from fastapi.routing import APIRoute, APIRouter
|
|
5
|
-
from pydantic import BaseModel
|
|
5
|
+
from pydantic import BaseModel, create_model
|
|
6
6
|
from starlette.middleware.cors import CORSMiddleware
|
|
7
7
|
|
|
8
8
|
from agno.agent.agent import Agent
|
|
@@ -511,8 +511,10 @@ def collect_mcp_tools_from_team(team: Team, mcp_tools: List[Any]) -> None:
|
|
|
511
511
|
# Check the team tools
|
|
512
512
|
if team.tools:
|
|
513
513
|
for tool in team.tools:
|
|
514
|
-
|
|
515
|
-
if
|
|
514
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
515
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
516
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
517
|
+
):
|
|
516
518
|
if tool not in mcp_tools:
|
|
517
519
|
mcp_tools.append(tool)
|
|
518
520
|
|
|
@@ -522,8 +524,10 @@ def collect_mcp_tools_from_team(team: Team, mcp_tools: List[Any]) -> None:
|
|
|
522
524
|
if isinstance(member, Agent):
|
|
523
525
|
if member.tools:
|
|
524
526
|
for tool in member.tools:
|
|
525
|
-
|
|
526
|
-
if
|
|
527
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
528
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
529
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
530
|
+
):
|
|
527
531
|
if tool not in mcp_tools:
|
|
528
532
|
mcp_tools.append(tool)
|
|
529
533
|
|
|
@@ -567,8 +571,10 @@ def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> Non
|
|
|
567
571
|
if step.agent:
|
|
568
572
|
if step.agent.tools:
|
|
569
573
|
for tool in step.agent.tools:
|
|
570
|
-
|
|
571
|
-
if
|
|
574
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
575
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
576
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
577
|
+
):
|
|
572
578
|
if tool not in mcp_tools:
|
|
573
579
|
mcp_tools.append(tool)
|
|
574
580
|
# Check step's team
|
|
@@ -590,8 +596,10 @@ def collect_mcp_tools_from_workflow_step(step: Any, mcp_tools: List[Any]) -> Non
|
|
|
590
596
|
# Direct agent in workflow steps
|
|
591
597
|
if step.tools:
|
|
592
598
|
for tool in step.tools:
|
|
593
|
-
|
|
594
|
-
if
|
|
599
|
+
# Alternate method of using isinstance(tool, (MCPTools, MultiMCPTools)) to avoid imports
|
|
600
|
+
if hasattr(type(tool), "__mro__") and any(
|
|
601
|
+
c.__name__ in ["MCPTools", "MultiMCPTools"] for c in type(tool).__mro__
|
|
602
|
+
):
|
|
595
603
|
if tool not in mcp_tools:
|
|
596
604
|
mcp_tools.append(tool)
|
|
597
605
|
|
|
@@ -628,3 +636,119 @@ def stringify_input_content(input_content: Union[str, Dict[str, Any], List[Any],
|
|
|
628
636
|
return str(input_content)
|
|
629
637
|
else:
|
|
630
638
|
return str(input_content)
|
|
639
|
+
|
|
640
|
+
|
|
641
|
+
def _get_python_type_from_json_schema(field_schema: Dict[str, Any], field_name: str = "NestedModel") -> Type:
|
|
642
|
+
"""Map JSON schema type to Python type with recursive handling.
|
|
643
|
+
|
|
644
|
+
Args:
|
|
645
|
+
field_schema: JSON schema dictionary for a single field
|
|
646
|
+
field_name: Name of the field (used for nested model naming)
|
|
647
|
+
|
|
648
|
+
Returns:
|
|
649
|
+
Python type corresponding to the JSON schema type
|
|
650
|
+
"""
|
|
651
|
+
if not isinstance(field_schema, dict):
|
|
652
|
+
return Any
|
|
653
|
+
|
|
654
|
+
json_type = field_schema.get("type")
|
|
655
|
+
|
|
656
|
+
# Handle basic types
|
|
657
|
+
if json_type == "string":
|
|
658
|
+
return str
|
|
659
|
+
elif json_type == "integer":
|
|
660
|
+
return int
|
|
661
|
+
elif json_type == "number":
|
|
662
|
+
return float
|
|
663
|
+
elif json_type == "boolean":
|
|
664
|
+
return bool
|
|
665
|
+
elif json_type == "null":
|
|
666
|
+
return type(None)
|
|
667
|
+
elif json_type == "array":
|
|
668
|
+
# Handle arrays with item type specification
|
|
669
|
+
items_schema = field_schema.get("items")
|
|
670
|
+
if items_schema and isinstance(items_schema, dict):
|
|
671
|
+
item_type = _get_python_type_from_json_schema(items_schema, f"{field_name}Item")
|
|
672
|
+
return List[item_type] # type: ignore
|
|
673
|
+
else:
|
|
674
|
+
# No item type specified - use generic list
|
|
675
|
+
return List[Any]
|
|
676
|
+
elif json_type == "object":
|
|
677
|
+
# Recursively create nested Pydantic model
|
|
678
|
+
nested_properties = field_schema.get("properties", {})
|
|
679
|
+
nested_required = field_schema.get("required", [])
|
|
680
|
+
nested_title = field_schema.get("title", field_name)
|
|
681
|
+
|
|
682
|
+
# Build field definitions for nested model
|
|
683
|
+
nested_fields = {}
|
|
684
|
+
for nested_field_name, nested_field_schema in nested_properties.items():
|
|
685
|
+
nested_field_type = _get_python_type_from_json_schema(nested_field_schema, nested_field_name)
|
|
686
|
+
|
|
687
|
+
if nested_field_name in nested_required:
|
|
688
|
+
nested_fields[nested_field_name] = (nested_field_type, ...)
|
|
689
|
+
else:
|
|
690
|
+
nested_fields[nested_field_name] = (Optional[nested_field_type], None) # type: ignore[assignment]
|
|
691
|
+
|
|
692
|
+
# Create nested model if it has fields
|
|
693
|
+
if nested_fields:
|
|
694
|
+
return create_model(nested_title, **nested_fields) # type: ignore
|
|
695
|
+
else:
|
|
696
|
+
# Empty object schema - use generic dict
|
|
697
|
+
return Dict[str, Any]
|
|
698
|
+
else:
|
|
699
|
+
# Unknown or unspecified type - fallback to Any
|
|
700
|
+
if json_type:
|
|
701
|
+
logger.warning(f"Unknown JSON schema type '{json_type}' for field '{field_name}', using Any")
|
|
702
|
+
return Any
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
def json_schema_to_pydantic_model(schema: Dict[str, Any]) -> Type[BaseModel]:
|
|
706
|
+
"""Convert a JSON schema dictionary to a Pydantic BaseModel class.
|
|
707
|
+
|
|
708
|
+
This function dynamically creates a Pydantic model from a JSON schema specification,
|
|
709
|
+
handling nested objects, arrays, and optional fields.
|
|
710
|
+
|
|
711
|
+
Args:
|
|
712
|
+
schema: JSON schema dictionary with 'properties', 'required', 'type', etc.
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
Dynamically created Pydantic BaseModel class
|
|
716
|
+
"""
|
|
717
|
+
import copy
|
|
718
|
+
|
|
719
|
+
# Deep copy to avoid modifying the original schema
|
|
720
|
+
schema = copy.deepcopy(schema)
|
|
721
|
+
|
|
722
|
+
# Extract schema components
|
|
723
|
+
model_name = schema.get("title", "DynamicModel")
|
|
724
|
+
properties = schema.get("properties", {})
|
|
725
|
+
required_fields = schema.get("required", [])
|
|
726
|
+
|
|
727
|
+
# Validate schema has properties
|
|
728
|
+
if not properties:
|
|
729
|
+
logger.warning(f"JSON schema '{model_name}' has no properties, creating empty model")
|
|
730
|
+
|
|
731
|
+
# Build field definitions for create_model
|
|
732
|
+
field_definitions = {}
|
|
733
|
+
for field_name, field_schema in properties.items():
|
|
734
|
+
try:
|
|
735
|
+
field_type = _get_python_type_from_json_schema(field_schema, field_name)
|
|
736
|
+
|
|
737
|
+
if field_name in required_fields:
|
|
738
|
+
# Required field: (type, ...)
|
|
739
|
+
field_definitions[field_name] = (field_type, ...)
|
|
740
|
+
else:
|
|
741
|
+
# Optional field: (Optional[type], None)
|
|
742
|
+
field_definitions[field_name] = (Optional[field_type], None) # type: ignore[assignment]
|
|
743
|
+
except Exception as e:
|
|
744
|
+
logger.warning(f"Failed to process field '{field_name}' in schema '{model_name}': {e}")
|
|
745
|
+
# Skip problematic fields rather than failing entirely
|
|
746
|
+
continue
|
|
747
|
+
|
|
748
|
+
# Create and return the dynamic model
|
|
749
|
+
try:
|
|
750
|
+
return create_model(model_name, **field_definitions) # type: ignore
|
|
751
|
+
except Exception as e:
|
|
752
|
+
logger.error(f"Failed to create dynamic model '{model_name}': {e}")
|
|
753
|
+
# Return a minimal model as fallback
|
|
754
|
+
return create_model(model_name)
|
agno/run/base.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from dataclasses import asdict, dataclass
|
|
2
2
|
from enum import Enum
|
|
3
|
-
from typing import Any, Dict, List, Optional, Union
|
|
3
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel
|
|
6
6
|
|
|
@@ -22,6 +22,7 @@ class RunContext:
|
|
|
22
22
|
knowledge_filters: Optional[Union[Dict[str, Any], List[FilterExpr]]] = None
|
|
23
23
|
metadata: Optional[Dict[str, Any]] = None
|
|
24
24
|
session_state: Optional[Dict[str, Any]] = None
|
|
25
|
+
output_schema: Optional[Type[BaseModel]] = None
|
|
25
26
|
|
|
26
27
|
|
|
27
28
|
@dataclass
|
agno/run/workflow.py
CHANGED
|
@@ -597,7 +597,7 @@ class WorkflowRunOutput:
|
|
|
597
597
|
_dict["input"] = self.input
|
|
598
598
|
|
|
599
599
|
if self.content and isinstance(self.content, BaseModel):
|
|
600
|
-
_dict["content"] = self.content.model_dump(exclude_none=True)
|
|
600
|
+
_dict["content"] = self.content.model_dump(exclude_none=True, mode="json")
|
|
601
601
|
|
|
602
602
|
if self.events is not None:
|
|
603
603
|
_dict["events"] = [e.to_dict() for e in self.events]
|