vanna 0.7.9__py3-none-any.whl ā 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- vanna/__init__.py +167 -395
- vanna/agents/__init__.py +7 -0
- vanna/capabilities/__init__.py +17 -0
- vanna/capabilities/agent_memory/__init__.py +21 -0
- vanna/capabilities/agent_memory/base.py +103 -0
- vanna/capabilities/agent_memory/models.py +53 -0
- vanna/capabilities/file_system/__init__.py +14 -0
- vanna/capabilities/file_system/base.py +71 -0
- vanna/capabilities/file_system/models.py +25 -0
- vanna/capabilities/sql_runner/__init__.py +13 -0
- vanna/capabilities/sql_runner/base.py +37 -0
- vanna/capabilities/sql_runner/models.py +13 -0
- vanna/components/__init__.py +92 -0
- vanna/components/base.py +11 -0
- vanna/components/rich/__init__.py +83 -0
- vanna/components/rich/containers/__init__.py +7 -0
- vanna/components/rich/containers/card.py +20 -0
- vanna/components/rich/data/__init__.py +9 -0
- vanna/components/rich/data/chart.py +17 -0
- vanna/components/rich/data/dataframe.py +93 -0
- vanna/components/rich/feedback/__init__.py +21 -0
- vanna/components/rich/feedback/badge.py +16 -0
- vanna/components/rich/feedback/icon_text.py +14 -0
- vanna/components/rich/feedback/log_viewer.py +41 -0
- vanna/components/rich/feedback/notification.py +19 -0
- vanna/components/rich/feedback/progress.py +37 -0
- vanna/components/rich/feedback/status_card.py +28 -0
- vanna/components/rich/feedback/status_indicator.py +14 -0
- vanna/components/rich/interactive/__init__.py +21 -0
- vanna/components/rich/interactive/button.py +95 -0
- vanna/components/rich/interactive/task_list.py +58 -0
- vanna/components/rich/interactive/ui_state.py +93 -0
- vanna/components/rich/specialized/__init__.py +7 -0
- vanna/components/rich/specialized/artifact.py +20 -0
- vanna/components/rich/text.py +16 -0
- vanna/components/simple/__init__.py +15 -0
- vanna/components/simple/image.py +15 -0
- vanna/components/simple/link.py +15 -0
- vanna/components/simple/text.py +11 -0
- vanna/core/__init__.py +193 -0
- vanna/core/_compat.py +19 -0
- vanna/core/agent/__init__.py +10 -0
- vanna/core/agent/agent.py +1407 -0
- vanna/core/agent/config.py +123 -0
- vanna/core/audit/__init__.py +28 -0
- vanna/core/audit/base.py +299 -0
- vanna/core/audit/models.py +131 -0
- vanna/core/component_manager.py +329 -0
- vanna/core/components.py +53 -0
- vanna/core/enhancer/__init__.py +11 -0
- vanna/core/enhancer/base.py +94 -0
- vanna/core/enhancer/default.py +118 -0
- vanna/core/enricher/__init__.py +10 -0
- vanna/core/enricher/base.py +59 -0
- vanna/core/errors.py +47 -0
- vanna/core/evaluation/__init__.py +81 -0
- vanna/core/evaluation/base.py +186 -0
- vanna/core/evaluation/dataset.py +254 -0
- vanna/core/evaluation/evaluators.py +376 -0
- vanna/core/evaluation/report.py +289 -0
- vanna/core/evaluation/runner.py +313 -0
- vanna/core/filter/__init__.py +10 -0
- vanna/core/filter/base.py +67 -0
- vanna/core/lifecycle/__init__.py +10 -0
- vanna/core/lifecycle/base.py +83 -0
- vanna/core/llm/__init__.py +16 -0
- vanna/core/llm/base.py +40 -0
- vanna/core/llm/models.py +61 -0
- vanna/core/middleware/__init__.py +10 -0
- vanna/core/middleware/base.py +69 -0
- vanna/core/observability/__init__.py +11 -0
- vanna/core/observability/base.py +88 -0
- vanna/core/observability/models.py +47 -0
- vanna/core/recovery/__init__.py +11 -0
- vanna/core/recovery/base.py +84 -0
- vanna/core/recovery/models.py +32 -0
- vanna/core/registry.py +278 -0
- vanna/core/rich_component.py +156 -0
- vanna/core/simple_component.py +27 -0
- vanna/core/storage/__init__.py +14 -0
- vanna/core/storage/base.py +46 -0
- vanna/core/storage/models.py +46 -0
- vanna/core/system_prompt/__init__.py +13 -0
- vanna/core/system_prompt/base.py +36 -0
- vanna/core/system_prompt/default.py +157 -0
- vanna/core/tool/__init__.py +18 -0
- vanna/core/tool/base.py +70 -0
- vanna/core/tool/models.py +84 -0
- vanna/core/user/__init__.py +17 -0
- vanna/core/user/base.py +29 -0
- vanna/core/user/models.py +25 -0
- vanna/core/user/request_context.py +70 -0
- vanna/core/user/resolver.py +42 -0
- vanna/core/validation.py +164 -0
- vanna/core/workflow/__init__.py +12 -0
- vanna/core/workflow/base.py +254 -0
- vanna/core/workflow/default.py +789 -0
- vanna/examples/__init__.py +1 -0
- vanna/examples/__main__.py +44 -0
- vanna/examples/anthropic_quickstart.py +80 -0
- vanna/examples/artifact_example.py +293 -0
- vanna/examples/claude_sqlite_example.py +236 -0
- vanna/examples/coding_agent_example.py +300 -0
- vanna/examples/custom_system_prompt_example.py +174 -0
- vanna/examples/default_workflow_handler_example.py +208 -0
- vanna/examples/email_auth_example.py +340 -0
- vanna/examples/evaluation_example.py +269 -0
- vanna/examples/extensibility_example.py +262 -0
- vanna/examples/minimal_example.py +67 -0
- vanna/examples/mock_auth_example.py +227 -0
- vanna/examples/mock_custom_tool.py +311 -0
- vanna/examples/mock_quickstart.py +79 -0
- vanna/examples/mock_quota_example.py +145 -0
- vanna/examples/mock_rich_components_demo.py +396 -0
- vanna/examples/mock_sqlite_example.py +223 -0
- vanna/examples/openai_quickstart.py +83 -0
- vanna/examples/primitive_components_demo.py +305 -0
- vanna/examples/quota_lifecycle_example.py +139 -0
- vanna/examples/visualization_example.py +251 -0
- vanna/integrations/__init__.py +17 -0
- vanna/integrations/anthropic/__init__.py +9 -0
- vanna/integrations/anthropic/llm.py +270 -0
- vanna/integrations/azureopenai/__init__.py +9 -0
- vanna/integrations/azureopenai/llm.py +329 -0
- vanna/integrations/azuresearch/__init__.py +7 -0
- vanna/integrations/azuresearch/agent_memory.py +413 -0
- vanna/integrations/bigquery/__init__.py +5 -0
- vanna/integrations/bigquery/sql_runner.py +81 -0
- vanna/integrations/chromadb/__init__.py +104 -0
- vanna/integrations/chromadb/agent_memory.py +416 -0
- vanna/integrations/clickhouse/__init__.py +5 -0
- vanna/integrations/clickhouse/sql_runner.py +82 -0
- vanna/integrations/duckdb/__init__.py +5 -0
- vanna/integrations/duckdb/sql_runner.py +65 -0
- vanna/integrations/faiss/__init__.py +7 -0
- vanna/integrations/faiss/agent_memory.py +431 -0
- vanna/integrations/google/__init__.py +9 -0
- vanna/integrations/google/gemini.py +370 -0
- vanna/integrations/hive/__init__.py +5 -0
- vanna/integrations/hive/sql_runner.py +87 -0
- vanna/integrations/local/__init__.py +17 -0
- vanna/integrations/local/agent_memory/__init__.py +7 -0
- vanna/integrations/local/agent_memory/in_memory.py +285 -0
- vanna/integrations/local/audit.py +59 -0
- vanna/integrations/local/file_system.py +242 -0
- vanna/integrations/local/file_system_conversation_store.py +255 -0
- vanna/integrations/local/storage.py +62 -0
- vanna/integrations/marqo/__init__.py +7 -0
- vanna/integrations/marqo/agent_memory.py +354 -0
- vanna/integrations/milvus/__init__.py +7 -0
- vanna/integrations/milvus/agent_memory.py +458 -0
- vanna/integrations/mock/__init__.py +9 -0
- vanna/integrations/mock/llm.py +65 -0
- vanna/integrations/mssql/__init__.py +5 -0
- vanna/integrations/mssql/sql_runner.py +66 -0
- vanna/integrations/mysql/__init__.py +5 -0
- vanna/integrations/mysql/sql_runner.py +92 -0
- vanna/integrations/ollama/__init__.py +7 -0
- vanna/integrations/ollama/llm.py +252 -0
- vanna/integrations/openai/__init__.py +10 -0
- vanna/integrations/openai/llm.py +267 -0
- vanna/integrations/openai/responses.py +163 -0
- vanna/integrations/opensearch/__init__.py +7 -0
- vanna/integrations/opensearch/agent_memory.py +411 -0
- vanna/integrations/oracle/__init__.py +5 -0
- vanna/integrations/oracle/sql_runner.py +75 -0
- vanna/integrations/pinecone/__init__.py +7 -0
- vanna/integrations/pinecone/agent_memory.py +329 -0
- vanna/integrations/plotly/__init__.py +5 -0
- vanna/integrations/plotly/chart_generator.py +313 -0
- vanna/integrations/postgres/__init__.py +9 -0
- vanna/integrations/postgres/sql_runner.py +112 -0
- vanna/integrations/premium/agent_memory/__init__.py +7 -0
- vanna/integrations/premium/agent_memory/premium.py +186 -0
- vanna/integrations/presto/__init__.py +5 -0
- vanna/integrations/presto/sql_runner.py +107 -0
- vanna/integrations/qdrant/__init__.py +7 -0
- vanna/integrations/qdrant/agent_memory.py +461 -0
- vanna/integrations/snowflake/__init__.py +5 -0
- vanna/integrations/snowflake/sql_runner.py +147 -0
- vanna/integrations/sqlite/__init__.py +9 -0
- vanna/integrations/sqlite/sql_runner.py +65 -0
- vanna/integrations/weaviate/__init__.py +7 -0
- vanna/integrations/weaviate/agent_memory.py +428 -0
- vanna/{ZhipuAI ā legacy/ZhipuAI}/ZhipuAI_embeddings.py +11 -11
- vanna/legacy/__init__.py +403 -0
- vanna/legacy/adapter.py +463 -0
- vanna/{advanced ā legacy/advanced}/__init__.py +3 -1
- vanna/{anthropic ā legacy/anthropic}/anthropic_chat.py +9 -7
- vanna/{azuresearch ā legacy/azuresearch}/azuresearch_vector.py +79 -41
- vanna/{base ā legacy/base}/base.py +224 -217
- vanna/legacy/bedrock/__init__.py +1 -0
- vanna/{bedrock ā legacy/bedrock}/bedrock_converse.py +13 -12
- vanna/{chromadb ā legacy/chromadb}/chromadb_vector.py +3 -1
- vanna/legacy/cohere/__init__.py +2 -0
- vanna/{cohere ā legacy/cohere}/cohere_chat.py +19 -14
- vanna/{cohere ā legacy/cohere}/cohere_embeddings.py +25 -19
- vanna/{deepseek ā legacy/deepseek}/deepseek_chat.py +5 -6
- vanna/legacy/faiss/__init__.py +1 -0
- vanna/{faiss ā legacy/faiss}/faiss.py +113 -59
- vanna/{flask ā legacy/flask}/__init__.py +84 -43
- vanna/{flask ā legacy/flask}/assets.py +5 -5
- vanna/{flask ā legacy/flask}/auth.py +5 -4
- vanna/{google ā legacy/google}/bigquery_vector.py +75 -42
- vanna/{google ā legacy/google}/gemini_chat.py +7 -3
- vanna/{hf ā legacy/hf}/hf.py +0 -1
- vanna/{milvus ā legacy/milvus}/milvus_vector.py +58 -35
- vanna/{mock ā legacy/mock}/llm.py +0 -1
- vanna/legacy/mock/vectordb.py +67 -0
- vanna/legacy/ollama/ollama.py +110 -0
- vanna/{openai ā legacy/openai}/openai_chat.py +2 -6
- vanna/legacy/opensearch/opensearch_vector.py +369 -0
- vanna/legacy/opensearch/opensearch_vector_semantic.py +200 -0
- vanna/legacy/oracle/oracle_vector.py +584 -0
- vanna/{pgvector ā legacy/pgvector}/pgvector.py +42 -13
- vanna/{qdrant ā legacy/qdrant}/qdrant.py +2 -6
- vanna/legacy/qianfan/Qianfan_Chat.py +170 -0
- vanna/legacy/qianfan/Qianfan_embeddings.py +36 -0
- vanna/legacy/qianwen/QianwenAI_chat.py +132 -0
- vanna/{remote.py ā legacy/remote.py} +28 -26
- vanna/{utils.py ā legacy/utils.py} +6 -11
- vanna/{vannadb ā legacy/vannadb}/vannadb_vector.py +115 -46
- vanna/{vllm ā legacy/vllm}/vllm.py +5 -6
- vanna/{weaviate ā legacy/weaviate}/weaviate_vector.py +59 -40
- vanna/{xinference ā legacy/xinference}/xinference.py +6 -6
- vanna/py.typed +0 -0
- vanna/servers/__init__.py +16 -0
- vanna/servers/__main__.py +8 -0
- vanna/servers/base/__init__.py +18 -0
- vanna/servers/base/chat_handler.py +65 -0
- vanna/servers/base/models.py +111 -0
- vanna/servers/base/rich_chat_handler.py +141 -0
- vanna/servers/base/templates.py +331 -0
- vanna/servers/cli/__init__.py +7 -0
- vanna/servers/cli/server_runner.py +204 -0
- vanna/servers/fastapi/__init__.py +7 -0
- vanna/servers/fastapi/app.py +163 -0
- vanna/servers/fastapi/routes.py +183 -0
- vanna/servers/flask/__init__.py +7 -0
- vanna/servers/flask/app.py +132 -0
- vanna/servers/flask/routes.py +137 -0
- vanna/tools/__init__.py +41 -0
- vanna/tools/agent_memory.py +322 -0
- vanna/tools/file_system.py +879 -0
- vanna/tools/python.py +222 -0
- vanna/tools/run_sql.py +165 -0
- vanna/tools/visualize_data.py +195 -0
- vanna/utils/__init__.py +0 -0
- vanna/web_components/__init__.py +44 -0
- vanna-2.0.0.dist-info/METADATA +485 -0
- vanna-2.0.0.dist-info/RECORD +289 -0
- vanna-2.0.0.dist-info/entry_points.txt +3 -0
- vanna/bedrock/__init__.py +0 -1
- vanna/cohere/__init__.py +0 -2
- vanna/faiss/__init__.py +0 -1
- vanna/mock/vectordb.py +0 -55
- vanna/ollama/ollama.py +0 -103
- vanna/opensearch/opensearch_vector.py +0 -392
- vanna/opensearch/opensearch_vector_semantic.py +0 -175
- vanna/oracle/oracle_vector.py +0 -585
- vanna/qianfan/Qianfan_Chat.py +0 -165
- vanna/qianfan/Qianfan_embeddings.py +0 -36
- vanna/qianwen/QianwenAI_chat.py +0 -133
- vanna-0.7.9.dist-info/METADATA +0 -408
- vanna-0.7.9.dist-info/RECORD +0 -79
- /vanna/{ZhipuAI ā legacy/ZhipuAI}/ZhipuAI_Chat.py +0 -0
- /vanna/{ZhipuAI ā legacy/ZhipuAI}/__init__.py +0 -0
- /vanna/{anthropic ā legacy/anthropic}/__init__.py +0 -0
- /vanna/{azuresearch ā legacy/azuresearch}/__init__.py +0 -0
- /vanna/{base ā legacy/base}/__init__.py +0 -0
- /vanna/{chromadb ā legacy/chromadb}/__init__.py +0 -0
- /vanna/{deepseek ā legacy/deepseek}/__init__.py +0 -0
- /vanna/{exceptions ā legacy/exceptions}/__init__.py +0 -0
- /vanna/{google ā legacy/google}/__init__.py +0 -0
- /vanna/{hf ā legacy/hf}/__init__.py +0 -0
- /vanna/{local.py ā legacy/local.py} +0 -0
- /vanna/{marqo ā legacy/marqo}/__init__.py +0 -0
- /vanna/{marqo ā legacy/marqo}/marqo.py +0 -0
- /vanna/{milvus ā legacy/milvus}/__init__.py +0 -0
- /vanna/{mistral ā legacy/mistral}/__init__.py +0 -0
- /vanna/{mistral ā legacy/mistral}/mistral.py +0 -0
- /vanna/{mock ā legacy/mock}/__init__.py +0 -0
- /vanna/{mock ā legacy/mock}/embedding.py +0 -0
- /vanna/{ollama ā legacy/ollama}/__init__.py +0 -0
- /vanna/{openai ā legacy/openai}/__init__.py +0 -0
- /vanna/{openai ā legacy/openai}/openai_embeddings.py +0 -0
- /vanna/{opensearch ā legacy/opensearch}/__init__.py +0 -0
- /vanna/{oracle ā legacy/oracle}/__init__.py +0 -0
- /vanna/{pgvector ā legacy/pgvector}/__init__.py +0 -0
- /vanna/{pinecone ā legacy/pinecone}/__init__.py +0 -0
- /vanna/{pinecone ā legacy/pinecone}/pinecone_vector.py +0 -0
- /vanna/{qdrant ā legacy/qdrant}/__init__.py +0 -0
- /vanna/{qianfan ā legacy/qianfan}/__init__.py +0 -0
- /vanna/{qianwen ā legacy/qianwen}/QianwenAI_embeddings.py +0 -0
- /vanna/{qianwen ā legacy/qianwen}/__init__.py +0 -0
- /vanna/{types ā legacy/types}/__init__.py +0 -0
- /vanna/{vannadb ā legacy/vannadb}/__init__.py +0 -0
- /vanna/{vllm ā legacy/vllm}/__init__.py +0 -0
- /vanna/{weaviate ā legacy/weaviate}/__init__.py +0 -0
- /vanna/{xinference ā legacy/xinference}/__init__.py +0 -0
- {vanna-0.7.9.dist-info ā vanna-2.0.0.dist-info}/WHEEL +0 -0
- {vanna-0.7.9.dist-info ā vanna-2.0.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Email authentication example for the Vanna Agents framework.
|
|
3
|
+
|
|
4
|
+
This example demonstrates how to create an agent with email-based authentication
|
|
5
|
+
where users are prompted for their email address in chat and the system creates
|
|
6
|
+
a user profile based on that email.
|
|
7
|
+
|
|
8
|
+
## What This Example Shows
|
|
9
|
+
|
|
10
|
+
1. **UserService Implementation**: A demo `DemoEmailUserService` that:
|
|
11
|
+
- Stores users in memory
|
|
12
|
+
- Authenticates users by email validation
|
|
13
|
+
- Creates user profiles automatically
|
|
14
|
+
- Manages user permissions
|
|
15
|
+
|
|
16
|
+
2. **Authentication Tool**: An `AuthTool` that:
|
|
17
|
+
- Takes an email address as input
|
|
18
|
+
- Uses the UserService to authenticate/create users
|
|
19
|
+
- Returns rich UI components for success/error feedback
|
|
20
|
+
- Provides structured results for the LLM
|
|
21
|
+
|
|
22
|
+
3. **In-Chat Authentication Flow**: Shows how:
|
|
23
|
+
- Users can provide their email in natural conversation
|
|
24
|
+
- The agent can prompt for authentication when needed
|
|
25
|
+
- Authentication results are displayed with rich UI components
|
|
26
|
+
- The system maintains user context across conversations
|
|
27
|
+
|
|
28
|
+
## Key Components
|
|
29
|
+
|
|
30
|
+
- `DemoEmailUserService`: Implements the `UserService` interface
|
|
31
|
+
- `AuthTool`: Implements the `Tool` interface for authentication
|
|
32
|
+
- Rich UI components for authentication feedback
|
|
33
|
+
- Integration with the agent's tool registry and conversation store
|
|
34
|
+
|
|
35
|
+
## Usage
|
|
36
|
+
|
|
37
|
+
Interactive: python -m vanna.examples.email_auth_example
|
|
38
|
+
|
|
39
|
+
## Note
|
|
40
|
+
|
|
41
|
+
This example uses a simplified mock LLM that doesn't actually call tools.
|
|
42
|
+
In a real implementation with OpenAI or Anthropic, the LLM would automatically
|
|
43
|
+
detect email addresses in user messages and call the authenticate_user tool.
|
|
44
|
+
|
|
45
|
+
For production use, you would:
|
|
46
|
+
- Replace DemoEmailUserService with a database-backed implementation
|
|
47
|
+
- Add proper email validation and security measures
|
|
48
|
+
- Implement session management in the server layer
|
|
49
|
+
- Add proper error handling and rate limiting
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
import asyncio
|
|
53
|
+
from typing import Any, Dict, Optional, Type
|
|
54
|
+
|
|
55
|
+
from pydantic import BaseModel, Field
|
|
56
|
+
|
|
57
|
+
from vanna import (
|
|
58
|
+
AgentConfig,
|
|
59
|
+
Agent,
|
|
60
|
+
MemoryConversationStore,
|
|
61
|
+
MockLlmService,
|
|
62
|
+
User,
|
|
63
|
+
)
|
|
64
|
+
from vanna.core import Tool, UserService
|
|
65
|
+
from vanna.core import ToolContext, ToolResult
|
|
66
|
+
from vanna.core.registry import ToolRegistry
|
|
67
|
+
from vanna.core.components import UiComponent
|
|
68
|
+
from vanna.core import RichComponent
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
# Demo User Service Implementation
|
|
72
|
+
class DemoEmailUserService(UserService):
|
|
73
|
+
"""Demo user service that authenticates users by email."""
|
|
74
|
+
|
|
75
|
+
def __init__(self):
|
|
76
|
+
"""Initialize with in-memory user store."""
|
|
77
|
+
self._users: Dict[str, User] = {} # user_id -> User
|
|
78
|
+
self._email_to_id: Dict[str, str] = {} # email -> user_id
|
|
79
|
+
|
|
80
|
+
async def get_user(self, user_id: str) -> Optional[User]:
|
|
81
|
+
"""Get user by ID."""
|
|
82
|
+
return self._users.get(user_id)
|
|
83
|
+
|
|
84
|
+
async def authenticate(self, credentials: Dict[str, Any]) -> Optional[User]:
|
|
85
|
+
"""Authenticate user by email."""
|
|
86
|
+
email = credentials.get("email")
|
|
87
|
+
if not email or not self._is_valid_email(email):
|
|
88
|
+
return None
|
|
89
|
+
|
|
90
|
+
# Check if user exists
|
|
91
|
+
user_id = self._email_to_id.get(email)
|
|
92
|
+
if user_id:
|
|
93
|
+
return self._users[user_id]
|
|
94
|
+
|
|
95
|
+
# Create new user
|
|
96
|
+
user_id = f"user_{len(self._users) + 1}"
|
|
97
|
+
username = email.split("@")[0]
|
|
98
|
+
|
|
99
|
+
user = User(
|
|
100
|
+
id=user_id,
|
|
101
|
+
username=username,
|
|
102
|
+
email=email,
|
|
103
|
+
permissions=["basic_user"],
|
|
104
|
+
metadata={"auth_method": "email"},
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
self._users[user_id] = user
|
|
108
|
+
self._email_to_id[email] = user_id
|
|
109
|
+
return user
|
|
110
|
+
|
|
111
|
+
async def has_permission(self, user: User, permission: str) -> bool:
|
|
112
|
+
"""Check if user has permission."""
|
|
113
|
+
return permission in user.permissions
|
|
114
|
+
|
|
115
|
+
def _is_valid_email(self, email: str) -> bool:
|
|
116
|
+
"""Simple email validation."""
|
|
117
|
+
return "@" in email and "." in email.split("@")[1]
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
# Authentication Tool
|
|
121
|
+
class AuthArgs(BaseModel):
|
|
122
|
+
"""Arguments for authentication."""
|
|
123
|
+
|
|
124
|
+
email: str = Field(description="User's email address")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
class AuthTool(Tool[AuthArgs]):
|
|
128
|
+
"""Tool to authenticate users by email."""
|
|
129
|
+
|
|
130
|
+
def __init__(self, user_service: DemoEmailUserService):
|
|
131
|
+
self.user_service = user_service
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def name(self) -> str:
|
|
135
|
+
return "authenticate_user"
|
|
136
|
+
|
|
137
|
+
@property
|
|
138
|
+
def description(self) -> str:
|
|
139
|
+
return "Authenticate a user by their email address. Use this when the user provides an email."
|
|
140
|
+
|
|
141
|
+
def get_args_schema(self) -> Type[AuthArgs]:
|
|
142
|
+
return AuthArgs
|
|
143
|
+
|
|
144
|
+
async def execute(self, context: ToolContext, args: AuthArgs) -> ToolResult:
|
|
145
|
+
"""Execute authentication."""
|
|
146
|
+
user = await self.user_service.authenticate({"email": args.email})
|
|
147
|
+
|
|
148
|
+
if user:
|
|
149
|
+
success_msg = (
|
|
150
|
+
f"ā
Welcome {user.username}! You're now authenticated as {user.email}"
|
|
151
|
+
)
|
|
152
|
+
|
|
153
|
+
auth_component = RichComponent(
|
|
154
|
+
type="status_card",
|
|
155
|
+
data={
|
|
156
|
+
"title": "Authentication Success",
|
|
157
|
+
"status": "success",
|
|
158
|
+
"description": success_msg,
|
|
159
|
+
"icon": "ā
",
|
|
160
|
+
"metadata": {
|
|
161
|
+
"user_id": user.id,
|
|
162
|
+
"username": user.username,
|
|
163
|
+
"email": user.email,
|
|
164
|
+
},
|
|
165
|
+
},
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
return ToolResult(
|
|
169
|
+
success=True,
|
|
170
|
+
result_for_llm=f"User successfully authenticated as {user.username} ({user.email}). They can now access personalized features.",
|
|
171
|
+
ui_component=UiComponent(rich_component=auth_component),
|
|
172
|
+
)
|
|
173
|
+
else:
|
|
174
|
+
error_msg = f"ā Invalid email format: {args.email}"
|
|
175
|
+
error_component = RichComponent(
|
|
176
|
+
type="status_card",
|
|
177
|
+
data={
|
|
178
|
+
"title": "Authentication Failed",
|
|
179
|
+
"status": "error",
|
|
180
|
+
"description": error_msg,
|
|
181
|
+
"icon": "ā",
|
|
182
|
+
"metadata": {"email": args.email},
|
|
183
|
+
},
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
return ToolResult(
|
|
187
|
+
success=False,
|
|
188
|
+
result_for_llm=f"Authentication failed for {args.email}. Please provide a valid email address.",
|
|
189
|
+
ui_component=UiComponent(rich_component=error_component),
|
|
190
|
+
error=error_msg,
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def create_demo_agent() -> Agent:
|
|
195
|
+
"""Create a demo agent for REPL and server usage.
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
Configured Agent instance with email authentication
|
|
199
|
+
"""
|
|
200
|
+
return create_auth_agent()
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
def create_auth_agent() -> Agent:
|
|
204
|
+
"""Create agent with email authentication."""
|
|
205
|
+
|
|
206
|
+
# Create user service
|
|
207
|
+
user_service = DemoEmailUserService()
|
|
208
|
+
|
|
209
|
+
# Use simple mock LLM - the system prompt will guide behavior
|
|
210
|
+
llm_service = MockLlmService(
|
|
211
|
+
response_content="Hello! I'm your AI assistant. To provide you with personalized help, I'll need your email address for authentication. Please share your email with me, and I'll use the authenticate_user tool to set up your profile."
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
# Create tool registry with auth tool
|
|
215
|
+
tool_registry = ToolRegistry()
|
|
216
|
+
auth_tool = AuthTool(user_service)
|
|
217
|
+
tool_registry.register(auth_tool)
|
|
218
|
+
|
|
219
|
+
# Create agent with authentication system prompt
|
|
220
|
+
agent = Agent(
|
|
221
|
+
llm_service=llm_service,
|
|
222
|
+
config=AgentConfig(
|
|
223
|
+
stream_responses=True,
|
|
224
|
+
include_thinking_indicators=False, # Cleaner output for demo
|
|
225
|
+
system_prompt="""You are a helpful AI assistant with an email-based authentication system.
|
|
226
|
+
|
|
227
|
+
AUTHENTICATION BEHAVIOR:
|
|
228
|
+
1. When a user provides an email address in their message, immediately use the 'authenticate_user' tool
|
|
229
|
+
2. Look for emails in patterns like "my email is...", "I'm john@example.com", or any text with @ symbols
|
|
230
|
+
3. If user isn't authenticated, politely ask for their email address to get started
|
|
231
|
+
4. After successful authentication, welcome them by name and offer personalized assistance
|
|
232
|
+
5. Be friendly and helpful throughout the process
|
|
233
|
+
|
|
234
|
+
Remember: Authentication is required for personalized features!""",
|
|
235
|
+
),
|
|
236
|
+
tool_registry=tool_registry,
|
|
237
|
+
conversation_store=MemoryConversationStore(),
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
return agent
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
async def demo_auth_flow():
|
|
244
|
+
"""Demonstrate the authentication flow with simple output."""
|
|
245
|
+
agent = create_auth_agent()
|
|
246
|
+
|
|
247
|
+
# Start with anonymous user
|
|
248
|
+
user = User(id="anonymous", username="guest", email=None, permissions=[])
|
|
249
|
+
conversation_id = "auth_demo_conv"
|
|
250
|
+
|
|
251
|
+
print("=== Email Authentication Demo ===")
|
|
252
|
+
print("This example shows how an agent can authenticate users via email in chat.")
|
|
253
|
+
print("Note: This uses a simple mock LLM for demonstration purposes.\n")
|
|
254
|
+
|
|
255
|
+
# Demo conversation
|
|
256
|
+
print("š¹ Step 1: Initial greeting")
|
|
257
|
+
print("User: Hello!")
|
|
258
|
+
print("Agent: ", end="")
|
|
259
|
+
|
|
260
|
+
async for component in agent.send_message(
|
|
261
|
+
user=user, message="Hello!", conversation_id=conversation_id
|
|
262
|
+
):
|
|
263
|
+
if (
|
|
264
|
+
hasattr(component, "rich_component")
|
|
265
|
+
and component.rich_component.type.value == "text"
|
|
266
|
+
):
|
|
267
|
+
content = component.rich_component.data.get("content") or getattr(
|
|
268
|
+
component.rich_component, "content", ""
|
|
269
|
+
)
|
|
270
|
+
if content:
|
|
271
|
+
print(content)
|
|
272
|
+
break
|
|
273
|
+
|
|
274
|
+
print("\n" + "=" * 60)
|
|
275
|
+
|
|
276
|
+
print("\nš¹ Step 2: User provides email for authentication")
|
|
277
|
+
print("User: My email is alice@example.com")
|
|
278
|
+
print("Agent: ", end="")
|
|
279
|
+
|
|
280
|
+
# This should trigger the auth tool
|
|
281
|
+
auth_shown = False
|
|
282
|
+
async for component in agent.send_message(
|
|
283
|
+
user=user,
|
|
284
|
+
message="My email is alice@example.com",
|
|
285
|
+
conversation_id=conversation_id,
|
|
286
|
+
):
|
|
287
|
+
if hasattr(component, "rich_component"):
|
|
288
|
+
rich_comp = component.rich_component
|
|
289
|
+
if rich_comp.type.value == "status_card" and not auth_shown:
|
|
290
|
+
status = rich_comp.data.get("status", "")
|
|
291
|
+
desc = rich_comp.data.get("description", "")
|
|
292
|
+
if status == "success":
|
|
293
|
+
auth_shown = True
|
|
294
|
+
print(f"š {desc}")
|
|
295
|
+
break
|
|
296
|
+
|
|
297
|
+
print("\n" + "=" * 60)
|
|
298
|
+
|
|
299
|
+
print("\nš¹ Step 3: Post-authentication interaction")
|
|
300
|
+
print("User: What can you help me with now?")
|
|
301
|
+
print("Agent: ", end="")
|
|
302
|
+
|
|
303
|
+
async for component in agent.send_message(
|
|
304
|
+
user=user,
|
|
305
|
+
message="What can you help me with now?",
|
|
306
|
+
conversation_id=conversation_id,
|
|
307
|
+
):
|
|
308
|
+
if (
|
|
309
|
+
hasattr(component, "rich_component")
|
|
310
|
+
and component.rich_component.type.value == "text"
|
|
311
|
+
):
|
|
312
|
+
content = component.rich_component.data.get("content") or getattr(
|
|
313
|
+
component.rich_component, "content", ""
|
|
314
|
+
)
|
|
315
|
+
if content:
|
|
316
|
+
print(content)
|
|
317
|
+
break
|
|
318
|
+
|
|
319
|
+
print("\n" + "=" * 60)
|
|
320
|
+
print("\nā
Authentication demo complete!")
|
|
321
|
+
print("\nKey Features Demonstrated:")
|
|
322
|
+
print("⢠Email-based user authentication")
|
|
323
|
+
print("⢠Tool-based authentication flow")
|
|
324
|
+
print("⢠In-memory user storage and management")
|
|
325
|
+
print("⢠Rich UI components for auth feedback")
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
async def main():
|
|
329
|
+
"""Run the authentication example."""
|
|
330
|
+
await demo_auth_flow()
|
|
331
|
+
|
|
332
|
+
|
|
333
|
+
def run_interactive():
|
|
334
|
+
"""Entry point for interactive usage."""
|
|
335
|
+
print("Starting email authentication example...")
|
|
336
|
+
asyncio.run(main())
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
if __name__ == "__main__":
|
|
340
|
+
run_interactive()
|
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Evaluation System Example
|
|
3
|
+
|
|
4
|
+
This example demonstrates how to use the evaluation framework to test
|
|
5
|
+
and compare agents. Shows:
|
|
6
|
+
- Creating test cases programmatically
|
|
7
|
+
- Running evaluations with multiple evaluators
|
|
8
|
+
- Comparing agent variants (e.g., different LLMs)
|
|
9
|
+
- Generating reports
|
|
10
|
+
|
|
11
|
+
Usage:
|
|
12
|
+
PYTHONPATH=. python vanna/examples/evaluation_example.py
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import asyncio
|
|
16
|
+
from vanna import Agent, MockLlmService, MemoryConversationStore, User
|
|
17
|
+
from vanna.core.evaluation import (
|
|
18
|
+
EvaluationRunner,
|
|
19
|
+
EvaluationDataset,
|
|
20
|
+
TestCase,
|
|
21
|
+
ExpectedOutcome,
|
|
22
|
+
AgentVariant,
|
|
23
|
+
TrajectoryEvaluator,
|
|
24
|
+
OutputEvaluator,
|
|
25
|
+
EfficiencyEvaluator,
|
|
26
|
+
)
|
|
27
|
+
from vanna.core.registry import ToolRegistry
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def create_sample_dataset() -> EvaluationDataset:
|
|
31
|
+
"""Create a sample dataset for demonstration."""
|
|
32
|
+
|
|
33
|
+
eval_user = User(
|
|
34
|
+
id="eval_user", username="evaluator", email="eval@example.com", permissions=[]
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
test_cases = [
|
|
38
|
+
TestCase(
|
|
39
|
+
id="test_001",
|
|
40
|
+
user=eval_user,
|
|
41
|
+
message="Hello, how are you?",
|
|
42
|
+
expected_outcome=ExpectedOutcome(
|
|
43
|
+
final_answer_contains=["hello", "hi"],
|
|
44
|
+
max_execution_time_ms=3000,
|
|
45
|
+
),
|
|
46
|
+
metadata={"category": "greeting", "difficulty": "easy"},
|
|
47
|
+
),
|
|
48
|
+
TestCase(
|
|
49
|
+
id="test_002",
|
|
50
|
+
user=eval_user,
|
|
51
|
+
message="What can you help me with?",
|
|
52
|
+
expected_outcome=ExpectedOutcome(
|
|
53
|
+
final_answer_contains=["help", "assist"],
|
|
54
|
+
max_execution_time_ms=3000,
|
|
55
|
+
),
|
|
56
|
+
metadata={"category": "capabilities", "difficulty": "easy"},
|
|
57
|
+
),
|
|
58
|
+
TestCase(
|
|
59
|
+
id="test_003",
|
|
60
|
+
user=eval_user,
|
|
61
|
+
message="Explain quantum computing",
|
|
62
|
+
expected_outcome=ExpectedOutcome(
|
|
63
|
+
final_answer_contains=["quantum", "computing"],
|
|
64
|
+
min_components=1,
|
|
65
|
+
max_execution_time_ms=5000,
|
|
66
|
+
),
|
|
67
|
+
metadata={"category": "explanation", "difficulty": "medium"},
|
|
68
|
+
),
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
return EvaluationDataset(
|
|
72
|
+
name="Demo Test Cases",
|
|
73
|
+
test_cases=test_cases,
|
|
74
|
+
description="Sample test cases for evaluation demo",
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def create_test_agent(name: str, response_content: str) -> Agent:
|
|
79
|
+
"""Create a test agent with mock LLM."""
|
|
80
|
+
return Agent(
|
|
81
|
+
llm_service=MockLlmService(response_content=response_content),
|
|
82
|
+
tool_registry=ToolRegistry(),
|
|
83
|
+
conversation_store=MemoryConversationStore(),
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def demo_single_agent_evaluation():
|
|
88
|
+
"""Demonstrate evaluating a single agent."""
|
|
89
|
+
print("\n" + "=" * 80)
|
|
90
|
+
print("DEMO 1: Single Agent Evaluation")
|
|
91
|
+
print("=" * 80 + "\n")
|
|
92
|
+
|
|
93
|
+
# Create dataset
|
|
94
|
+
dataset = create_sample_dataset()
|
|
95
|
+
print(f"Loaded dataset: {dataset.name}")
|
|
96
|
+
print(f"Test cases: {len(dataset.test_cases)}\n")
|
|
97
|
+
|
|
98
|
+
# Create agent
|
|
99
|
+
agent = create_test_agent(
|
|
100
|
+
"test-agent",
|
|
101
|
+
"Hello! I'm here to help you with various tasks including answering questions about topics like quantum computing.",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
# Create evaluators
|
|
105
|
+
evaluators = [
|
|
106
|
+
TrajectoryEvaluator(),
|
|
107
|
+
OutputEvaluator(),
|
|
108
|
+
EfficiencyEvaluator(max_execution_time_ms=5000),
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
# Run evaluation
|
|
112
|
+
runner = EvaluationRunner(evaluators=evaluators, max_concurrency=5)
|
|
113
|
+
print("Running evaluation...")
|
|
114
|
+
report = await runner.run_evaluation(agent, dataset.test_cases)
|
|
115
|
+
|
|
116
|
+
# Print results
|
|
117
|
+
report.print_summary()
|
|
118
|
+
|
|
119
|
+
# Show failures
|
|
120
|
+
failures = report.get_failures()
|
|
121
|
+
if failures:
|
|
122
|
+
print("\nFailed test cases:")
|
|
123
|
+
for result in failures:
|
|
124
|
+
print(f" - {result.test_case.id}: {result.test_case.message}")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
async def demo_agent_comparison():
|
|
128
|
+
"""Demonstrate comparing multiple agent variants."""
|
|
129
|
+
print("\n" + "=" * 80)
|
|
130
|
+
print("DEMO 2: Agent Comparison (LLM Comparison Use Case)")
|
|
131
|
+
print("=" * 80 + "\n")
|
|
132
|
+
|
|
133
|
+
# Create dataset
|
|
134
|
+
dataset = create_sample_dataset()
|
|
135
|
+
print(f"Loaded dataset: {dataset.name}")
|
|
136
|
+
print(f"Test cases: {len(dataset.test_cases)}\n")
|
|
137
|
+
|
|
138
|
+
# Create agent variants
|
|
139
|
+
variants = [
|
|
140
|
+
AgentVariant(
|
|
141
|
+
name="agent-v1",
|
|
142
|
+
agent=create_test_agent(
|
|
143
|
+
"v1",
|
|
144
|
+
"Hi there! I can help you with many things including explaining complex topics like quantum computing.",
|
|
145
|
+
),
|
|
146
|
+
metadata={"version": "1.0", "model": "mock-v1"},
|
|
147
|
+
),
|
|
148
|
+
AgentVariant(
|
|
149
|
+
name="agent-v2",
|
|
150
|
+
agent=create_test_agent(
|
|
151
|
+
"v2",
|
|
152
|
+
"Hello! I'm your helpful assistant. I can assist with various tasks and explain topics like quantum computing in detail.",
|
|
153
|
+
),
|
|
154
|
+
metadata={"version": "2.0", "model": "mock-v2"},
|
|
155
|
+
),
|
|
156
|
+
AgentVariant(
|
|
157
|
+
name="agent-v3",
|
|
158
|
+
agent=create_test_agent(
|
|
159
|
+
"v3",
|
|
160
|
+
"Greetings! I'm designed to help you with a wide range of tasks, from simple questions to complex explanations about quantum computing and more.",
|
|
161
|
+
),
|
|
162
|
+
metadata={"version": "3.0", "model": "mock-v3"},
|
|
163
|
+
),
|
|
164
|
+
]
|
|
165
|
+
|
|
166
|
+
print(f"Created {len(variants)} agent variants:")
|
|
167
|
+
for v in variants:
|
|
168
|
+
print(f" - {v.name}")
|
|
169
|
+
print()
|
|
170
|
+
|
|
171
|
+
# Create evaluators
|
|
172
|
+
evaluators = [
|
|
173
|
+
OutputEvaluator(),
|
|
174
|
+
EfficiencyEvaluator(max_execution_time_ms=5000),
|
|
175
|
+
]
|
|
176
|
+
|
|
177
|
+
# Run comparison
|
|
178
|
+
runner = EvaluationRunner(evaluators=evaluators, max_concurrency=10)
|
|
179
|
+
print(
|
|
180
|
+
f"Running comparison ({len(variants)} variants Ć {len(dataset.test_cases)} test cases)..."
|
|
181
|
+
)
|
|
182
|
+
print("All variants running in parallel for maximum efficiency...\n")
|
|
183
|
+
|
|
184
|
+
comparison = await runner.compare_agents(variants, dataset.test_cases)
|
|
185
|
+
|
|
186
|
+
# Print results
|
|
187
|
+
comparison.print_summary()
|
|
188
|
+
|
|
189
|
+
# Show best variants
|
|
190
|
+
print("Best Performing Variants:")
|
|
191
|
+
print(f" š Best score: {comparison.get_best_variant('score')}")
|
|
192
|
+
print(f" ā” Fastest: {comparison.get_best_variant('speed')}")
|
|
193
|
+
print(f" ā
Best pass rate: {comparison.get_best_variant('pass_rate')}")
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
async def demo_dataset_operations():
|
|
197
|
+
"""Demonstrate dataset creation and manipulation."""
|
|
198
|
+
print("\n" + "=" * 80)
|
|
199
|
+
print("DEMO 3: Dataset Operations")
|
|
200
|
+
print("=" * 80 + "\n")
|
|
201
|
+
|
|
202
|
+
# Create dataset
|
|
203
|
+
dataset = create_sample_dataset()
|
|
204
|
+
|
|
205
|
+
# Show dataset info
|
|
206
|
+
print(f"Dataset: {dataset.name}")
|
|
207
|
+
print(f"Description: {dataset.description}")
|
|
208
|
+
print(f"Total test cases: {len(dataset)}\n")
|
|
209
|
+
|
|
210
|
+
# Filter by metadata
|
|
211
|
+
easy_tests = dataset.filter_by_metadata(difficulty="easy")
|
|
212
|
+
medium_tests = dataset.filter_by_metadata(difficulty="medium")
|
|
213
|
+
|
|
214
|
+
print(f"Easy test cases: {len(easy_tests)}")
|
|
215
|
+
print(f"Medium test cases: {len(medium_tests)}\n")
|
|
216
|
+
|
|
217
|
+
# Save to file (for demonstration)
|
|
218
|
+
import tempfile
|
|
219
|
+
import os
|
|
220
|
+
|
|
221
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
222
|
+
yaml_path = os.path.join(tmpdir, "dataset.yaml")
|
|
223
|
+
json_path = os.path.join(tmpdir, "dataset.json")
|
|
224
|
+
|
|
225
|
+
dataset.save_yaml(yaml_path)
|
|
226
|
+
dataset.save_json(json_path)
|
|
227
|
+
|
|
228
|
+
print("Dataset saved to temporary files:")
|
|
229
|
+
print(f" - YAML: {yaml_path}")
|
|
230
|
+
print(f" - JSON: {json_path}\n")
|
|
231
|
+
|
|
232
|
+
# Load back
|
|
233
|
+
loaded_yaml = EvaluationDataset.from_yaml(yaml_path)
|
|
234
|
+
loaded_json = EvaluationDataset.from_json(json_path)
|
|
235
|
+
|
|
236
|
+
print("Loaded datasets:")
|
|
237
|
+
print(f" - From YAML: {len(loaded_yaml)} test cases")
|
|
238
|
+
print(f" - From JSON: {len(loaded_json)} test cases")
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
async def main():
|
|
242
|
+
"""Run all evaluation demos."""
|
|
243
|
+
print("\nš Vanna Agents Evaluation System Demo")
|
|
244
|
+
print("=" * 80)
|
|
245
|
+
|
|
246
|
+
# Demo 1: Single agent evaluation
|
|
247
|
+
await demo_single_agent_evaluation()
|
|
248
|
+
|
|
249
|
+
# Demo 2: Agent comparison (main use case)
|
|
250
|
+
await demo_agent_comparison()
|
|
251
|
+
|
|
252
|
+
# Demo 3: Dataset operations
|
|
253
|
+
await demo_dataset_operations()
|
|
254
|
+
|
|
255
|
+
print("\n" + "=" * 80)
|
|
256
|
+
print("ā
All demos completed!")
|
|
257
|
+
print("=" * 80)
|
|
258
|
+
print("\nKey Takeaways:")
|
|
259
|
+
print(" 1. Evaluations are integral to the Vanna package")
|
|
260
|
+
print(" 2. Parallel execution handles I/O-bound LLM calls efficiently")
|
|
261
|
+
print(" 3. Agent comparison is a first-class use case")
|
|
262
|
+
print(" 4. Multiple evaluators can be composed for comprehensive testing")
|
|
263
|
+
print(" 5. Reports can be exported to HTML, CSV, or printed to console")
|
|
264
|
+
print("\nFor LLM comparison, see: evals/benchmarks/llm_comparison.py")
|
|
265
|
+
print()
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
if __name__ == "__main__":
|
|
269
|
+
asyncio.run(main())
|