mindroom 0.0.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mindroom/__init__.py +3 -0
- mindroom/agent_prompts.py +963 -0
- mindroom/agents.py +248 -0
- mindroom/ai.py +421 -0
- mindroom/api/__init__.py +1 -0
- mindroom/api/credentials.py +137 -0
- mindroom/api/google_integration.py +355 -0
- mindroom/api/google_tools_helper.py +40 -0
- mindroom/api/homeassistant_integration.py +421 -0
- mindroom/api/integrations.py +189 -0
- mindroom/api/main.py +506 -0
- mindroom/api/matrix_operations.py +219 -0
- mindroom/api/tools.py +94 -0
- mindroom/background_tasks.py +87 -0
- mindroom/bot.py +2470 -0
- mindroom/cli.py +86 -0
- mindroom/commands.py +377 -0
- mindroom/config.py +343 -0
- mindroom/config_commands.py +324 -0
- mindroom/config_confirmation.py +411 -0
- mindroom/constants.py +52 -0
- mindroom/credentials.py +146 -0
- mindroom/credentials_sync.py +134 -0
- mindroom/custom_tools/__init__.py +8 -0
- mindroom/custom_tools/config_manager.py +765 -0
- mindroom/custom_tools/gmail.py +92 -0
- mindroom/custom_tools/google_calendar.py +92 -0
- mindroom/custom_tools/google_sheets.py +92 -0
- mindroom/custom_tools/homeassistant.py +341 -0
- mindroom/error_handling.py +35 -0
- mindroom/file_watcher.py +49 -0
- mindroom/interactive.py +313 -0
- mindroom/logging_config.py +207 -0
- mindroom/matrix/__init__.py +1 -0
- mindroom/matrix/client.py +782 -0
- mindroom/matrix/event_info.py +173 -0
- mindroom/matrix/identity.py +149 -0
- mindroom/matrix/large_messages.py +267 -0
- mindroom/matrix/mentions.py +141 -0
- mindroom/matrix/message_builder.py +94 -0
- mindroom/matrix/message_content.py +209 -0
- mindroom/matrix/presence.py +178 -0
- mindroom/matrix/rooms.py +311 -0
- mindroom/matrix/state.py +77 -0
- mindroom/matrix/typing.py +91 -0
- mindroom/matrix/users.py +217 -0
- mindroom/memory/__init__.py +21 -0
- mindroom/memory/config.py +137 -0
- mindroom/memory/functions.py +396 -0
- mindroom/py.typed +0 -0
- mindroom/response_tracker.py +128 -0
- mindroom/room_cleanup.py +139 -0
- mindroom/routing.py +107 -0
- mindroom/scheduling.py +758 -0
- mindroom/stop.py +207 -0
- mindroom/streaming.py +203 -0
- mindroom/teams.py +749 -0
- mindroom/thread_utils.py +318 -0
- mindroom/tools/__init__.py +520 -0
- mindroom/tools/agentql.py +64 -0
- mindroom/tools/airflow.py +57 -0
- mindroom/tools/apify.py +49 -0
- mindroom/tools/arxiv.py +64 -0
- mindroom/tools/aws_lambda.py +41 -0
- mindroom/tools/aws_ses.py +57 -0
- mindroom/tools/baidusearch.py +87 -0
- mindroom/tools/brightdata.py +116 -0
- mindroom/tools/browserbase.py +62 -0
- mindroom/tools/cal_com.py +98 -0
- mindroom/tools/calculator.py +112 -0
- mindroom/tools/cartesia.py +84 -0
- mindroom/tools/composio.py +166 -0
- mindroom/tools/config_manager.py +44 -0
- mindroom/tools/confluence.py +73 -0
- mindroom/tools/crawl4ai.py +101 -0
- mindroom/tools/csv.py +104 -0
- mindroom/tools/custom_api.py +106 -0
- mindroom/tools/dalle.py +85 -0
- mindroom/tools/daytona.py +180 -0
- mindroom/tools/discord.py +81 -0
- mindroom/tools/docker.py +73 -0
- mindroom/tools/duckdb.py +124 -0
- mindroom/tools/duckduckgo.py +99 -0
- mindroom/tools/e2b.py +121 -0
- mindroom/tools/eleven_labs.py +77 -0
- mindroom/tools/email.py +74 -0
- mindroom/tools/exa.py +246 -0
- mindroom/tools/fal.py +50 -0
- mindroom/tools/file.py +80 -0
- mindroom/tools/financial_datasets_api.py +112 -0
- mindroom/tools/firecrawl.py +124 -0
- mindroom/tools/gemini.py +85 -0
- mindroom/tools/giphy.py +49 -0
- mindroom/tools/github.py +376 -0
- mindroom/tools/gmail.py +102 -0
- mindroom/tools/google_calendar.py +55 -0
- mindroom/tools/google_maps.py +112 -0
- mindroom/tools/google_sheets.py +86 -0
- mindroom/tools/googlesearch.py +83 -0
- mindroom/tools/groq.py +77 -0
- mindroom/tools/hackernews.py +54 -0
- mindroom/tools/jina.py +108 -0
- mindroom/tools/jira.py +70 -0
- mindroom/tools/linear.py +103 -0
- mindroom/tools/linkup.py +65 -0
- mindroom/tools/lumalabs.py +71 -0
- mindroom/tools/mem0.py +82 -0
- mindroom/tools/modelslabs.py +85 -0
- mindroom/tools/moviepy_video_tools.py +62 -0
- mindroom/tools/newspaper4k.py +63 -0
- mindroom/tools/openai.py +143 -0
- mindroom/tools/openweather.py +89 -0
- mindroom/tools/oxylabs.py +54 -0
- mindroom/tools/pandas.py +35 -0
- mindroom/tools/pubmed.py +64 -0
- mindroom/tools/python.py +120 -0
- mindroom/tools/reddit.py +155 -0
- mindroom/tools/replicate.py +56 -0
- mindroom/tools/resend.py +55 -0
- mindroom/tools/scrapegraph.py +87 -0
- mindroom/tools/searxng.py +120 -0
- mindroom/tools/serpapi.py +55 -0
- mindroom/tools/serper.py +81 -0
- mindroom/tools/shell.py +46 -0
- mindroom/tools/slack.py +80 -0
- mindroom/tools/sleep.py +38 -0
- mindroom/tools/spider.py +62 -0
- mindroom/tools/sql.py +138 -0
- mindroom/tools/tavily.py +104 -0
- mindroom/tools/telegram.py +54 -0
- mindroom/tools/todoist.py +103 -0
- mindroom/tools/trello.py +121 -0
- mindroom/tools/twilio.py +97 -0
- mindroom/tools/web_browser_tools.py +37 -0
- mindroom/tools/webex.py +63 -0
- mindroom/tools/website.py +45 -0
- mindroom/tools/whatsapp.py +81 -0
- mindroom/tools/wikipedia.py +45 -0
- mindroom/tools/x.py +97 -0
- mindroom/tools/yfinance.py +121 -0
- mindroom/tools/youtube.py +81 -0
- mindroom/tools/zendesk.py +62 -0
- mindroom/tools/zep.py +107 -0
- mindroom/tools/zoom.py +62 -0
- mindroom/tools_metadata.json +7643 -0
- mindroom/tools_metadata.py +220 -0
- mindroom/topic_generator.py +153 -0
- mindroom/voice_handler.py +266 -0
- mindroom-0.1.1.dist-info/METADATA +425 -0
- mindroom-0.1.1.dist-info/RECORD +152 -0
- {mindroom-0.0.0.dist-info → mindroom-0.1.1.dist-info}/WHEEL +1 -2
- mindroom-0.1.1.dist-info/entry_points.txt +2 -0
- mindroom-0.0.0.dist-info/METADATA +0 -24
- mindroom-0.0.0.dist-info/RECORD +0 -4
- mindroom-0.0.0.dist-info/top_level.txt +0 -1
mindroom/agents.py
ADDED
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""Agent loader that reads agent configurations from YAML file."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import TYPE_CHECKING
|
|
7
|
+
from zoneinfo import ZoneInfo
|
|
8
|
+
|
|
9
|
+
from agno.agent import Agent
|
|
10
|
+
from agno.storage.sqlite import SqliteStorage
|
|
11
|
+
|
|
12
|
+
from . import agent_prompts
|
|
13
|
+
from . import tools as _tools_module # noqa: F401
|
|
14
|
+
from .constants import ROUTER_AGENT_NAME, SESSIONS_DIR
|
|
15
|
+
from .logging_config import get_logger
|
|
16
|
+
from .tools_metadata import get_tool_by_name
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from .config import Config
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
# Maximum length for instruction descriptions to include in agent summary
|
|
24
|
+
MAX_INSTRUCTION_LENGTH = 100
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def get_datetime_context(timezone_str: str) -> str:
|
|
28
|
+
"""Generate current date and time context for the agent.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
timezone_str: Timezone string (e.g., 'America/New_York', 'UTC')
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Formatted string with current date and time information
|
|
35
|
+
|
|
36
|
+
"""
|
|
37
|
+
tz = ZoneInfo(timezone_str)
|
|
38
|
+
now = datetime.now(tz)
|
|
39
|
+
|
|
40
|
+
# Format the datetime in a clear, readable way
|
|
41
|
+
date_str = now.strftime("%A, %B %d, %Y")
|
|
42
|
+
time_str = now.strftime("%H:%M %Z") # 24-hour format
|
|
43
|
+
|
|
44
|
+
return f"""## Current Date and Time
|
|
45
|
+
Today is {date_str}.
|
|
46
|
+
The current time is {time_str} ({timezone_str} timezone).
|
|
47
|
+
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
# Rich prompt mapping - agents that use detailed prompts instead of simple roles
|
|
52
|
+
RICH_PROMPTS = {
|
|
53
|
+
"code": agent_prompts.CODE_AGENT_PROMPT,
|
|
54
|
+
"research": agent_prompts.RESEARCH_AGENT_PROMPT,
|
|
55
|
+
"calculator": agent_prompts.CALCULATOR_AGENT_PROMPT,
|
|
56
|
+
"general": agent_prompts.GENERAL_AGENT_PROMPT,
|
|
57
|
+
"shell": agent_prompts.SHELL_AGENT_PROMPT,
|
|
58
|
+
"summary": agent_prompts.SUMMARY_AGENT_PROMPT,
|
|
59
|
+
"finance": agent_prompts.FINANCE_AGENT_PROMPT,
|
|
60
|
+
"news": agent_prompts.NEWS_AGENT_PROMPT,
|
|
61
|
+
"data_analyst": agent_prompts.DATA_ANALYST_AGENT_PROMPT,
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def create_agent(agent_name: str, config: Config) -> Agent:
|
|
66
|
+
"""Create an agent instance from configuration.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
agent_name: Name of the agent to create
|
|
70
|
+
config: Application configuration
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
Configured Agent instance
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
ValueError: If agent_name is not found in configuration
|
|
77
|
+
|
|
78
|
+
"""
|
|
79
|
+
from .ai import get_model_instance # noqa: PLC0415
|
|
80
|
+
|
|
81
|
+
# Use passed config (config_path is deprecated)
|
|
82
|
+
agent_config = config.get_agent(agent_name)
|
|
83
|
+
defaults = config.defaults
|
|
84
|
+
|
|
85
|
+
# Create tools
|
|
86
|
+
tools: list = [] # Use list type to satisfy Agent's parameter type
|
|
87
|
+
for tool_name in agent_config.tools:
|
|
88
|
+
try:
|
|
89
|
+
tool = get_tool_by_name(tool_name)
|
|
90
|
+
tools.append(tool)
|
|
91
|
+
except ValueError as e:
|
|
92
|
+
logger.warning(f"Could not load tool '{tool_name}' for agent '{agent_name}': {e}")
|
|
93
|
+
|
|
94
|
+
SESSIONS_DIR.mkdir(parents=True, exist_ok=True)
|
|
95
|
+
storage = SqliteStorage(table_name=f"{agent_name}_sessions", db_file=str(SESSIONS_DIR / f"{agent_name}.db"))
|
|
96
|
+
|
|
97
|
+
# Get model config for identity context
|
|
98
|
+
model_name = agent_config.model or "default"
|
|
99
|
+
if model_name in config.models:
|
|
100
|
+
model_config = config.models[model_name]
|
|
101
|
+
model_provider = model_config.provider.title() # Capitalize provider name
|
|
102
|
+
model_id = model_config.id
|
|
103
|
+
else:
|
|
104
|
+
# Fallback if model not found
|
|
105
|
+
model_provider = "AI"
|
|
106
|
+
model_id = model_name
|
|
107
|
+
|
|
108
|
+
# Add identity context to all agents using the unified template
|
|
109
|
+
identity_context = agent_prompts.AGENT_IDENTITY_CONTEXT.format(
|
|
110
|
+
display_name=agent_config.display_name,
|
|
111
|
+
agent_name=agent_name,
|
|
112
|
+
model_provider=model_provider,
|
|
113
|
+
model_id=model_id,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
# Add current date and time context with user's configured timezone
|
|
117
|
+
datetime_context = get_datetime_context(config.timezone)
|
|
118
|
+
|
|
119
|
+
# Combine identity and datetime contexts
|
|
120
|
+
full_context = identity_context + datetime_context
|
|
121
|
+
|
|
122
|
+
# Use rich prompt if available, otherwise use YAML config
|
|
123
|
+
if agent_name in RICH_PROMPTS:
|
|
124
|
+
logger.info(f"Using rich prompt for agent: {agent_name}")
|
|
125
|
+
# Prepend full context to the rich prompt
|
|
126
|
+
role = full_context + RICH_PROMPTS[agent_name]
|
|
127
|
+
instructions = [] # Instructions are in the rich prompt
|
|
128
|
+
else:
|
|
129
|
+
logger.info(f"Using YAML config for agent: {agent_name}")
|
|
130
|
+
# For YAML agents, prepend full context to role and keep original instructions
|
|
131
|
+
role = full_context + agent_config.role
|
|
132
|
+
instructions = agent_config.instructions
|
|
133
|
+
|
|
134
|
+
# Create agent with defaults applied
|
|
135
|
+
model = get_model_instance(config, agent_config.model)
|
|
136
|
+
logger.info(f"Creating agent '{agent_name}' with model: {model.__class__.__name__}(id={model.id})")
|
|
137
|
+
|
|
138
|
+
instructions.append(agent_prompts.INTERACTIVE_QUESTION_PROMPT)
|
|
139
|
+
|
|
140
|
+
agent = Agent(
|
|
141
|
+
name=agent_config.display_name,
|
|
142
|
+
role=role,
|
|
143
|
+
model=model,
|
|
144
|
+
tools=tools,
|
|
145
|
+
instructions=instructions,
|
|
146
|
+
storage=storage,
|
|
147
|
+
add_history_to_messages=agent_config.add_history_to_messages
|
|
148
|
+
if agent_config.add_history_to_messages is not None
|
|
149
|
+
else defaults.add_history_to_messages,
|
|
150
|
+
num_history_runs=agent_config.num_history_runs or defaults.num_history_runs,
|
|
151
|
+
markdown=agent_config.markdown if agent_config.markdown is not None else defaults.markdown,
|
|
152
|
+
)
|
|
153
|
+
logger.info(f"Created agent '{agent_name}' ({agent_config.display_name}) with {len(tools)} tools")
|
|
154
|
+
|
|
155
|
+
return agent
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
def describe_agent(agent_name: str, config: Config) -> str:
|
|
159
|
+
"""Generate a description of an agent or team based on its configuration.
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
agent_name: Name of the agent or team to describe
|
|
163
|
+
config: Application configuration
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Human-readable description of the agent or team
|
|
167
|
+
|
|
168
|
+
"""
|
|
169
|
+
# Handle built-in router agent
|
|
170
|
+
if agent_name == ROUTER_AGENT_NAME:
|
|
171
|
+
return (
|
|
172
|
+
"router\n"
|
|
173
|
+
" - Route messages to the most appropriate agent based on context and expertise.\n"
|
|
174
|
+
" - Analyzes incoming messages and determines which agent is best suited to respond."
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Check if it's a team
|
|
178
|
+
if agent_name in config.teams:
|
|
179
|
+
team_config = config.teams[agent_name]
|
|
180
|
+
parts = [f"{agent_name}"]
|
|
181
|
+
if team_config.role:
|
|
182
|
+
parts.append(f"- {team_config.role}")
|
|
183
|
+
parts.append(f"- Team of agents: {', '.join(team_config.agents)}")
|
|
184
|
+
parts.append(f"- Collaboration mode: {team_config.mode}")
|
|
185
|
+
return "\n ".join(parts)
|
|
186
|
+
|
|
187
|
+
# Check if agent exists
|
|
188
|
+
if agent_name not in config.agents:
|
|
189
|
+
return f"{agent_name}: Unknown agent or team"
|
|
190
|
+
|
|
191
|
+
agent_config = config.agents[agent_name]
|
|
192
|
+
|
|
193
|
+
# Start with agent name (not display name, for routing consistency)
|
|
194
|
+
parts = [f"{agent_name}"]
|
|
195
|
+
if agent_config.role:
|
|
196
|
+
parts.append(f"- {agent_config.role}")
|
|
197
|
+
|
|
198
|
+
# Add tools if any
|
|
199
|
+
if agent_config.tools:
|
|
200
|
+
tool_list = ", ".join(agent_config.tools)
|
|
201
|
+
parts.append(f"- Tools: {tool_list}")
|
|
202
|
+
|
|
203
|
+
# Add key instructions if any
|
|
204
|
+
if agent_config.instructions:
|
|
205
|
+
# Take first instruction as it's usually the most descriptive
|
|
206
|
+
first_instruction = agent_config.instructions[0]
|
|
207
|
+
if len(first_instruction) < MAX_INSTRUCTION_LENGTH: # Only include if reasonably short
|
|
208
|
+
parts.append(f"- {first_instruction}")
|
|
209
|
+
|
|
210
|
+
return "\n ".join(parts)
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
def get_agent_ids_for_room(room_key: str, config: Config) -> list[str]:
|
|
214
|
+
"""Get all agent Matrix IDs assigned to a specific room."""
|
|
215
|
+
# Always include the router agent
|
|
216
|
+
agent_ids = [config.ids[ROUTER_AGENT_NAME].full_id]
|
|
217
|
+
|
|
218
|
+
# Add agents from config
|
|
219
|
+
for agent_name, agent_cfg in config.agents.items():
|
|
220
|
+
if room_key in agent_cfg.rooms:
|
|
221
|
+
agent_ids.append(config.ids[agent_name].full_id)
|
|
222
|
+
return agent_ids
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def get_rooms_for_entity(entity_name: str, config: Config) -> list[str]:
|
|
226
|
+
"""Get the list of room aliases that an entity (agent/team) should be in.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
entity_name: Name of the agent or team
|
|
230
|
+
config: Configuration object
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
List of room aliases the entity should be in
|
|
234
|
+
|
|
235
|
+
"""
|
|
236
|
+
# TeamBot check (teams)
|
|
237
|
+
if entity_name in config.teams:
|
|
238
|
+
return config.teams[entity_name].rooms
|
|
239
|
+
|
|
240
|
+
# Router agent special case - gets all rooms
|
|
241
|
+
if entity_name == ROUTER_AGENT_NAME:
|
|
242
|
+
return list(config.get_all_configured_rooms())
|
|
243
|
+
|
|
244
|
+
# Regular agents
|
|
245
|
+
if entity_name in config.agents:
|
|
246
|
+
return config.agents[entity_name].rooms
|
|
247
|
+
|
|
248
|
+
return []
|
mindroom/ai.py
ADDED
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
"""AI integration module for MindRoom agents and memory management."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import functools
|
|
6
|
+
import os
|
|
7
|
+
from typing import TYPE_CHECKING, Any
|
|
8
|
+
|
|
9
|
+
import diskcache
|
|
10
|
+
from agno.models.anthropic import Claude
|
|
11
|
+
from agno.models.cerebras import Cerebras
|
|
12
|
+
from agno.models.google import Gemini
|
|
13
|
+
from agno.models.ollama import Ollama
|
|
14
|
+
from agno.models.openai import OpenAIChat
|
|
15
|
+
from agno.models.openrouter import OpenRouter
|
|
16
|
+
from agno.run.response import (
|
|
17
|
+
RunResponse,
|
|
18
|
+
RunResponseContentEvent,
|
|
19
|
+
ToolCallCompletedEvent,
|
|
20
|
+
ToolCallStartedEvent,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
from .agents import create_agent
|
|
24
|
+
from .constants import ENABLE_AI_CACHE
|
|
25
|
+
from .credentials_sync import get_api_key_for_provider, get_ollama_host
|
|
26
|
+
from .error_handling import get_user_friendly_error_message
|
|
27
|
+
from .logging_config import get_logger
|
|
28
|
+
from .memory import build_memory_enhanced_prompt
|
|
29
|
+
|
|
30
|
+
if TYPE_CHECKING:
|
|
31
|
+
from collections.abc import AsyncIterator
|
|
32
|
+
from pathlib import Path
|
|
33
|
+
|
|
34
|
+
from agno.agent import Agent
|
|
35
|
+
from agno.models.base import Model
|
|
36
|
+
|
|
37
|
+
from .config import Config, ModelConfig
|
|
38
|
+
|
|
39
|
+
logger = get_logger(__name__)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _extract_response_content(response: RunResponse) -> str:
|
|
43
|
+
response_parts = []
|
|
44
|
+
|
|
45
|
+
# Add main content if present
|
|
46
|
+
if response.content:
|
|
47
|
+
response_parts.append(response.content)
|
|
48
|
+
|
|
49
|
+
# Add formatted tool calls if present (similar to agno's print_response)
|
|
50
|
+
# Only add if there are actual tool calls to display
|
|
51
|
+
if response.formatted_tool_calls and any(response.formatted_tool_calls):
|
|
52
|
+
tool_calls_section = "\n\n**Tool Calls:**"
|
|
53
|
+
for tool_call in response.formatted_tool_calls:
|
|
54
|
+
tool_calls_section += f"\n• {tool_call}"
|
|
55
|
+
response_parts.append(tool_calls_section)
|
|
56
|
+
|
|
57
|
+
return "\n".join(response_parts) if response_parts else ""
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _format_tool_started_message(event: ToolCallStartedEvent) -> str:
|
|
61
|
+
if not event.tool:
|
|
62
|
+
return ""
|
|
63
|
+
|
|
64
|
+
tool_name = event.tool.tool_name if event.tool.tool_name else "tool"
|
|
65
|
+
tool_args = event.tool.tool_args if event.tool.tool_args else {}
|
|
66
|
+
|
|
67
|
+
# Format similar to agno's formatted_tool_calls
|
|
68
|
+
if tool_args:
|
|
69
|
+
args_str = ", ".join(f"{k}={v}" for k, v in tool_args.items())
|
|
70
|
+
msg = f"\n\n🔧 **Tool Call:** `{tool_name}({args_str})`\n"
|
|
71
|
+
else:
|
|
72
|
+
msg = f"\n\n🔧 **Tool Call:** `{tool_name}()`\n"
|
|
73
|
+
|
|
74
|
+
return msg
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _format_tool_completed_message(event: ToolCallCompletedEvent) -> str:
|
|
78
|
+
if not event.tool:
|
|
79
|
+
return ""
|
|
80
|
+
|
|
81
|
+
tool_name = event.tool.tool_name if event.tool.tool_name else "tool"
|
|
82
|
+
|
|
83
|
+
# Check both event.content and tool.result for the output
|
|
84
|
+
result = event.content or (event.tool.result if event.tool else None)
|
|
85
|
+
|
|
86
|
+
if result:
|
|
87
|
+
# Format the result nicely
|
|
88
|
+
return f"✅ **`{tool_name}` result:**\n{result}\n\n"
|
|
89
|
+
|
|
90
|
+
return f"✅ **`{tool_name}`** completed\n\n"
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
@functools.cache
|
|
94
|
+
def get_cache(storage_path: Path) -> diskcache.Cache | None:
|
|
95
|
+
"""Get or create a cache instance for the given storage path."""
|
|
96
|
+
return diskcache.Cache(storage_path / ".ai_cache") if ENABLE_AI_CACHE else None
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _set_api_key_env_var(provider: str) -> None:
|
|
100
|
+
"""Set environment variable for a provider from CredentialsManager.
|
|
101
|
+
|
|
102
|
+
Since we sync from .env to CredentialsManager on startup,
|
|
103
|
+
this will always use the latest keys from .env.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
provider: Provider name (e.g., 'openai', 'anthropic')
|
|
107
|
+
|
|
108
|
+
"""
|
|
109
|
+
# Map provider names to environment variable names
|
|
110
|
+
env_vars = {
|
|
111
|
+
"openai": "OPENAI_API_KEY",
|
|
112
|
+
"anthropic": "ANTHROPIC_API_KEY",
|
|
113
|
+
"openrouter": "OPENROUTER_API_KEY",
|
|
114
|
+
"gemini": "GOOGLE_API_KEY",
|
|
115
|
+
"google": "GOOGLE_API_KEY",
|
|
116
|
+
"cerebras": "CEREBRAS_API_KEY",
|
|
117
|
+
"deepseek": "DEEPSEEK_API_KEY",
|
|
118
|
+
"groq": "GROQ_API_KEY",
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
if provider not in env_vars:
|
|
122
|
+
return
|
|
123
|
+
|
|
124
|
+
# Get API key from CredentialsManager (which has been synced from .env)
|
|
125
|
+
api_key = get_api_key_for_provider(provider)
|
|
126
|
+
|
|
127
|
+
# Set environment variable if key exists
|
|
128
|
+
if api_key:
|
|
129
|
+
os.environ[env_vars[provider]] = api_key
|
|
130
|
+
logger.debug(f"Set {env_vars[provider]} from CredentialsManager")
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def _create_model_for_provider(provider: str, model_id: str, model_config: ModelConfig, extra_kwargs: dict) -> Model:
|
|
134
|
+
"""Create a model instance for a specific provider.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
provider: The AI provider name
|
|
138
|
+
model_id: The model identifier
|
|
139
|
+
model_config: The model configuration object
|
|
140
|
+
extra_kwargs: Additional keyword arguments for the model
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Instantiated model for the provider
|
|
144
|
+
|
|
145
|
+
Raises:
|
|
146
|
+
ValueError: If provider not supported
|
|
147
|
+
|
|
148
|
+
"""
|
|
149
|
+
if provider == "ollama":
|
|
150
|
+
# Priority: model config > env/CredentialsManager > default
|
|
151
|
+
# This allows per-model host configuration in config.yaml
|
|
152
|
+
host = model_config.host or get_ollama_host() or "http://localhost:11434"
|
|
153
|
+
logger.debug(f"Using Ollama host: {host}")
|
|
154
|
+
return Ollama(id=model_id, host=host, **extra_kwargs)
|
|
155
|
+
if provider == "openai":
|
|
156
|
+
return OpenAIChat(id=model_id, **extra_kwargs)
|
|
157
|
+
if provider == "anthropic":
|
|
158
|
+
return Claude(id=model_id, **extra_kwargs)
|
|
159
|
+
if provider == "openrouter":
|
|
160
|
+
# OpenRouter needs the API key passed explicitly because it captures
|
|
161
|
+
# the environment variable at import time, not at instantiation time
|
|
162
|
+
api_key = get_api_key_for_provider(provider)
|
|
163
|
+
if not api_key:
|
|
164
|
+
logger.warning("No OpenRouter API key found in environment or CredentialsManager")
|
|
165
|
+
return OpenRouter(id=model_id, api_key=api_key, **extra_kwargs)
|
|
166
|
+
if provider in ("gemini", "google"):
|
|
167
|
+
return Gemini(id=model_id, **extra_kwargs)
|
|
168
|
+
if provider == "cerebras":
|
|
169
|
+
return Cerebras(id=model_id, **extra_kwargs)
|
|
170
|
+
|
|
171
|
+
msg = f"Unsupported AI provider: {provider}"
|
|
172
|
+
raise ValueError(msg)
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def get_model_instance(config: Config, model_name: str = "default") -> Model:
|
|
176
|
+
"""Get a model instance from config.yaml.
|
|
177
|
+
|
|
178
|
+
Args:
|
|
179
|
+
config: Application configuration
|
|
180
|
+
model_name: Name of the model configuration to use (default: "default")
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
Instantiated model
|
|
184
|
+
|
|
185
|
+
Raises:
|
|
186
|
+
ValueError: If model not found or provider not supported
|
|
187
|
+
|
|
188
|
+
"""
|
|
189
|
+
if model_name not in config.models:
|
|
190
|
+
available = ", ".join(sorted(config.models.keys()))
|
|
191
|
+
msg = f"Unknown model: {model_name}. Available models: {available}"
|
|
192
|
+
raise ValueError(msg)
|
|
193
|
+
|
|
194
|
+
model_config = config.models[model_name]
|
|
195
|
+
provider = model_config.provider
|
|
196
|
+
model_id = model_config.id
|
|
197
|
+
|
|
198
|
+
logger.info("Using AI model", model=model_name, provider=provider, id=model_id)
|
|
199
|
+
|
|
200
|
+
# Set environment variable from CredentialsManager for Agno to use
|
|
201
|
+
_set_api_key_env_var(provider)
|
|
202
|
+
|
|
203
|
+
# Get extra kwargs if specified
|
|
204
|
+
extra_kwargs = model_config.extra_kwargs or {}
|
|
205
|
+
|
|
206
|
+
return _create_model_for_provider(provider, model_id, model_config, extra_kwargs)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _build_full_prompt(prompt: str, thread_history: list[dict[str, Any]] | None = None) -> str:
|
|
210
|
+
"""Build full prompt with thread history context."""
|
|
211
|
+
if not thread_history:
|
|
212
|
+
return prompt
|
|
213
|
+
|
|
214
|
+
context = "Previous conversation in this thread:\n"
|
|
215
|
+
for msg in thread_history:
|
|
216
|
+
context += f"{msg['sender']}: {msg['body']}\n"
|
|
217
|
+
context += "\nCurrent message:\n"
|
|
218
|
+
return context + prompt
|
|
219
|
+
|
|
220
|
+
|
|
221
|
+
def _build_cache_key(agent: Agent, full_prompt: str, session_id: str) -> str:
|
|
222
|
+
model = agent.model
|
|
223
|
+
assert model is not None
|
|
224
|
+
return f"{agent.name}:{model.__class__.__name__}:{model.id}:{full_prompt}:{session_id}"
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
async def _cached_agent_run(
|
|
228
|
+
agent: Agent,
|
|
229
|
+
full_prompt: str,
|
|
230
|
+
session_id: str,
|
|
231
|
+
agent_name: str,
|
|
232
|
+
storage_path: Path,
|
|
233
|
+
) -> RunResponse:
|
|
234
|
+
"""Cached wrapper for agent.arun() calls."""
|
|
235
|
+
cache = get_cache(storage_path)
|
|
236
|
+
if cache is None:
|
|
237
|
+
return await agent.arun(full_prompt, session_id=session_id) # type: ignore[no-any-return]
|
|
238
|
+
|
|
239
|
+
model = agent.model
|
|
240
|
+
assert model is not None
|
|
241
|
+
cache_key = _build_cache_key(agent, full_prompt, session_id)
|
|
242
|
+
cached_result = cache.get(cache_key)
|
|
243
|
+
if cached_result is not None:
|
|
244
|
+
logger.info("Cache hit", agent=agent_name)
|
|
245
|
+
return cached_result # type: ignore[no-any-return]
|
|
246
|
+
|
|
247
|
+
response = await agent.arun(full_prompt, session_id=session_id)
|
|
248
|
+
|
|
249
|
+
cache.set(cache_key, response)
|
|
250
|
+
logger.info("Response cached", agent=agent_name)
|
|
251
|
+
|
|
252
|
+
return response # type: ignore[no-any-return]
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
async def _prepare_agent_and_prompt(
|
|
256
|
+
agent_name: str,
|
|
257
|
+
prompt: str,
|
|
258
|
+
storage_path: Path,
|
|
259
|
+
room_id: str | None,
|
|
260
|
+
config: Config,
|
|
261
|
+
thread_history: list[dict[str, Any]] | None = None,
|
|
262
|
+
) -> tuple[Agent, str]:
|
|
263
|
+
"""Prepare agent and full prompt for AI processing.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Tuple of (agent, full_prompt, session_id)
|
|
267
|
+
|
|
268
|
+
"""
|
|
269
|
+
enhanced_prompt = await build_memory_enhanced_prompt(prompt, agent_name, storage_path, config, room_id)
|
|
270
|
+
full_prompt = _build_full_prompt(enhanced_prompt, thread_history)
|
|
271
|
+
logger.info("Preparing agent and prompt", agent=agent_name, full_prompt=full_prompt)
|
|
272
|
+
agent = create_agent(agent_name, config)
|
|
273
|
+
return agent, full_prompt
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
async def ai_response(
|
|
277
|
+
agent_name: str,
|
|
278
|
+
prompt: str,
|
|
279
|
+
session_id: str,
|
|
280
|
+
storage_path: Path,
|
|
281
|
+
config: Config,
|
|
282
|
+
thread_history: list[dict[str, Any]] | None = None,
|
|
283
|
+
room_id: str | None = None,
|
|
284
|
+
) -> str:
|
|
285
|
+
"""Generates a response using the specified agno Agent with memory integration.
|
|
286
|
+
|
|
287
|
+
Args:
|
|
288
|
+
agent_name: Name of the agent to use
|
|
289
|
+
prompt: User prompt
|
|
290
|
+
session_id: Session ID for conversation tracking
|
|
291
|
+
storage_path: Path for storing agent data
|
|
292
|
+
config: Application configuration
|
|
293
|
+
thread_history: Optional thread history
|
|
294
|
+
room_id: Optional room ID for room memory access
|
|
295
|
+
|
|
296
|
+
Returns:
|
|
297
|
+
Agent response string
|
|
298
|
+
|
|
299
|
+
"""
|
|
300
|
+
logger.info("AI request", agent=agent_name)
|
|
301
|
+
|
|
302
|
+
# Prepare agent and prompt - this can fail if agent creation fails (e.g., missing API key)
|
|
303
|
+
try:
|
|
304
|
+
agent, full_prompt = await _prepare_agent_and_prompt(
|
|
305
|
+
agent_name,
|
|
306
|
+
prompt,
|
|
307
|
+
storage_path,
|
|
308
|
+
room_id,
|
|
309
|
+
config,
|
|
310
|
+
thread_history,
|
|
311
|
+
)
|
|
312
|
+
except Exception as e:
|
|
313
|
+
logger.exception("Error preparing agent", agent=agent_name)
|
|
314
|
+
return get_user_friendly_error_message(e, agent_name)
|
|
315
|
+
|
|
316
|
+
# Execute the AI call - this can fail for network, rate limits, etc.
|
|
317
|
+
try:
|
|
318
|
+
response = await _cached_agent_run(agent, full_prompt, session_id, agent_name, storage_path)
|
|
319
|
+
except Exception as e:
|
|
320
|
+
logger.exception("Error generating AI response", agent=agent_name)
|
|
321
|
+
return get_user_friendly_error_message(e, agent_name)
|
|
322
|
+
|
|
323
|
+
# Extract response content - this shouldn't fail
|
|
324
|
+
return _extract_response_content(response)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
async def stream_agent_response( # noqa: C901, PLR0912
|
|
328
|
+
agent_name: str,
|
|
329
|
+
prompt: str,
|
|
330
|
+
session_id: str,
|
|
331
|
+
storage_path: Path,
|
|
332
|
+
config: Config,
|
|
333
|
+
thread_history: list[dict[str, Any]] | None = None,
|
|
334
|
+
room_id: str | None = None,
|
|
335
|
+
) -> AsyncIterator[str]:
|
|
336
|
+
"""Generate streaming AI response using Agno's streaming API.
|
|
337
|
+
|
|
338
|
+
Checks cache first - if found, yields the cached response immediately.
|
|
339
|
+
Otherwise streams the new response and caches it.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
agent_name: Name of the agent to use
|
|
343
|
+
prompt: User prompt
|
|
344
|
+
session_id: Session ID for conversation tracking
|
|
345
|
+
storage_path: Path for storing agent data
|
|
346
|
+
config: Application configuration
|
|
347
|
+
thread_history: Optional thread history
|
|
348
|
+
room_id: Optional room ID for room memory access
|
|
349
|
+
|
|
350
|
+
Yields:
|
|
351
|
+
Chunks of the AI response as they become available
|
|
352
|
+
|
|
353
|
+
"""
|
|
354
|
+
logger.info("AI streaming request", agent=agent_name)
|
|
355
|
+
|
|
356
|
+
# Prepare agent and prompt - this can fail if agent creation fails
|
|
357
|
+
try:
|
|
358
|
+
agent, full_prompt = await _prepare_agent_and_prompt(
|
|
359
|
+
agent_name,
|
|
360
|
+
prompt,
|
|
361
|
+
storage_path,
|
|
362
|
+
room_id,
|
|
363
|
+
config,
|
|
364
|
+
thread_history,
|
|
365
|
+
)
|
|
366
|
+
except Exception as e:
|
|
367
|
+
logger.exception("Error preparing agent for streaming", agent=agent_name)
|
|
368
|
+
yield get_user_friendly_error_message(e, agent_name)
|
|
369
|
+
return
|
|
370
|
+
|
|
371
|
+
# Check cache (this shouldn't fail)
|
|
372
|
+
cache = get_cache(storage_path)
|
|
373
|
+
if cache is not None:
|
|
374
|
+
model = agent.model
|
|
375
|
+
assert model is not None
|
|
376
|
+
cache_key = _build_cache_key(agent, full_prompt, session_id)
|
|
377
|
+
cached_result = cache.get(cache_key)
|
|
378
|
+
if cached_result is not None:
|
|
379
|
+
logger.info("Cache hit", agent=agent_name)
|
|
380
|
+
response_text = cached_result.content or ""
|
|
381
|
+
yield response_text
|
|
382
|
+
return
|
|
383
|
+
|
|
384
|
+
full_response = ""
|
|
385
|
+
|
|
386
|
+
# Execute the streaming AI call - this can fail for network, rate limits, etc.
|
|
387
|
+
try:
|
|
388
|
+
stream_generator = await agent.arun(full_prompt, session_id=session_id, stream=True)
|
|
389
|
+
except Exception as e:
|
|
390
|
+
logger.exception("Error starting streaming AI response")
|
|
391
|
+
yield get_user_friendly_error_message(e, agent_name)
|
|
392
|
+
return
|
|
393
|
+
|
|
394
|
+
# Process the stream events
|
|
395
|
+
try:
|
|
396
|
+
async for event in stream_generator:
|
|
397
|
+
if isinstance(event, RunResponseContentEvent) and event.content:
|
|
398
|
+
chunk_text = str(event.content)
|
|
399
|
+
full_response += chunk_text
|
|
400
|
+
yield chunk_text
|
|
401
|
+
elif isinstance(event, ToolCallStartedEvent):
|
|
402
|
+
tool_msg = _format_tool_started_message(event)
|
|
403
|
+
if tool_msg:
|
|
404
|
+
full_response += tool_msg
|
|
405
|
+
yield tool_msg
|
|
406
|
+
elif isinstance(event, ToolCallCompletedEvent):
|
|
407
|
+
result_msg = _format_tool_completed_message(event)
|
|
408
|
+
if result_msg:
|
|
409
|
+
full_response += result_msg
|
|
410
|
+
yield result_msg
|
|
411
|
+
else:
|
|
412
|
+
logger.warning(f"Unhandled event type: {type(event).__name__} - {event}")
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.exception("Error during streaming AI response")
|
|
415
|
+
yield get_user_friendly_error_message(e, agent_name)
|
|
416
|
+
return
|
|
417
|
+
|
|
418
|
+
if cache is not None and full_response:
|
|
419
|
+
cached_response = RunResponse(content=full_response)
|
|
420
|
+
cache.set(cache_key, cached_response)
|
|
421
|
+
logger.info("Response cached", agent=agent_name)
|
mindroom/api/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Backend initialization for the widget module."""
|