mindroom 0.0.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mindroom/__init__.py +3 -0
- mindroom/agent_prompts.py +963 -0
- mindroom/agents.py +248 -0
- mindroom/ai.py +421 -0
- mindroom/api/__init__.py +1 -0
- mindroom/api/credentials.py +137 -0
- mindroom/api/google_integration.py +355 -0
- mindroom/api/google_tools_helper.py +40 -0
- mindroom/api/homeassistant_integration.py +421 -0
- mindroom/api/integrations.py +189 -0
- mindroom/api/main.py +506 -0
- mindroom/api/matrix_operations.py +219 -0
- mindroom/api/tools.py +94 -0
- mindroom/background_tasks.py +87 -0
- mindroom/bot.py +2470 -0
- mindroom/cli.py +86 -0
- mindroom/commands.py +377 -0
- mindroom/config.py +343 -0
- mindroom/config_commands.py +324 -0
- mindroom/config_confirmation.py +411 -0
- mindroom/constants.py +52 -0
- mindroom/credentials.py +146 -0
- mindroom/credentials_sync.py +134 -0
- mindroom/custom_tools/__init__.py +8 -0
- mindroom/custom_tools/config_manager.py +765 -0
- mindroom/custom_tools/gmail.py +92 -0
- mindroom/custom_tools/google_calendar.py +92 -0
- mindroom/custom_tools/google_sheets.py +92 -0
- mindroom/custom_tools/homeassistant.py +341 -0
- mindroom/error_handling.py +35 -0
- mindroom/file_watcher.py +49 -0
- mindroom/interactive.py +313 -0
- mindroom/logging_config.py +207 -0
- mindroom/matrix/__init__.py +1 -0
- mindroom/matrix/client.py +782 -0
- mindroom/matrix/event_info.py +173 -0
- mindroom/matrix/identity.py +149 -0
- mindroom/matrix/large_messages.py +267 -0
- mindroom/matrix/mentions.py +141 -0
- mindroom/matrix/message_builder.py +94 -0
- mindroom/matrix/message_content.py +209 -0
- mindroom/matrix/presence.py +178 -0
- mindroom/matrix/rooms.py +311 -0
- mindroom/matrix/state.py +77 -0
- mindroom/matrix/typing.py +91 -0
- mindroom/matrix/users.py +217 -0
- mindroom/memory/__init__.py +21 -0
- mindroom/memory/config.py +137 -0
- mindroom/memory/functions.py +396 -0
- mindroom/py.typed +0 -0
- mindroom/response_tracker.py +128 -0
- mindroom/room_cleanup.py +139 -0
- mindroom/routing.py +107 -0
- mindroom/scheduling.py +758 -0
- mindroom/stop.py +207 -0
- mindroom/streaming.py +203 -0
- mindroom/teams.py +749 -0
- mindroom/thread_utils.py +318 -0
- mindroom/tools/__init__.py +520 -0
- mindroom/tools/agentql.py +64 -0
- mindroom/tools/airflow.py +57 -0
- mindroom/tools/apify.py +49 -0
- mindroom/tools/arxiv.py +64 -0
- mindroom/tools/aws_lambda.py +41 -0
- mindroom/tools/aws_ses.py +57 -0
- mindroom/tools/baidusearch.py +87 -0
- mindroom/tools/brightdata.py +116 -0
- mindroom/tools/browserbase.py +62 -0
- mindroom/tools/cal_com.py +98 -0
- mindroom/tools/calculator.py +112 -0
- mindroom/tools/cartesia.py +84 -0
- mindroom/tools/composio.py +166 -0
- mindroom/tools/config_manager.py +44 -0
- mindroom/tools/confluence.py +73 -0
- mindroom/tools/crawl4ai.py +101 -0
- mindroom/tools/csv.py +104 -0
- mindroom/tools/custom_api.py +106 -0
- mindroom/tools/dalle.py +85 -0
- mindroom/tools/daytona.py +180 -0
- mindroom/tools/discord.py +81 -0
- mindroom/tools/docker.py +73 -0
- mindroom/tools/duckdb.py +124 -0
- mindroom/tools/duckduckgo.py +99 -0
- mindroom/tools/e2b.py +121 -0
- mindroom/tools/eleven_labs.py +77 -0
- mindroom/tools/email.py +74 -0
- mindroom/tools/exa.py +246 -0
- mindroom/tools/fal.py +50 -0
- mindroom/tools/file.py +80 -0
- mindroom/tools/financial_datasets_api.py +112 -0
- mindroom/tools/firecrawl.py +124 -0
- mindroom/tools/gemini.py +85 -0
- mindroom/tools/giphy.py +49 -0
- mindroom/tools/github.py +376 -0
- mindroom/tools/gmail.py +102 -0
- mindroom/tools/google_calendar.py +55 -0
- mindroom/tools/google_maps.py +112 -0
- mindroom/tools/google_sheets.py +86 -0
- mindroom/tools/googlesearch.py +83 -0
- mindroom/tools/groq.py +77 -0
- mindroom/tools/hackernews.py +54 -0
- mindroom/tools/jina.py +108 -0
- mindroom/tools/jira.py +70 -0
- mindroom/tools/linear.py +103 -0
- mindroom/tools/linkup.py +65 -0
- mindroom/tools/lumalabs.py +71 -0
- mindroom/tools/mem0.py +82 -0
- mindroom/tools/modelslabs.py +85 -0
- mindroom/tools/moviepy_video_tools.py +62 -0
- mindroom/tools/newspaper4k.py +63 -0
- mindroom/tools/openai.py +143 -0
- mindroom/tools/openweather.py +89 -0
- mindroom/tools/oxylabs.py +54 -0
- mindroom/tools/pandas.py +35 -0
- mindroom/tools/pubmed.py +64 -0
- mindroom/tools/python.py +120 -0
- mindroom/tools/reddit.py +155 -0
- mindroom/tools/replicate.py +56 -0
- mindroom/tools/resend.py +55 -0
- mindroom/tools/scrapegraph.py +87 -0
- mindroom/tools/searxng.py +120 -0
- mindroom/tools/serpapi.py +55 -0
- mindroom/tools/serper.py +81 -0
- mindroom/tools/shell.py +46 -0
- mindroom/tools/slack.py +80 -0
- mindroom/tools/sleep.py +38 -0
- mindroom/tools/spider.py +62 -0
- mindroom/tools/sql.py +138 -0
- mindroom/tools/tavily.py +104 -0
- mindroom/tools/telegram.py +54 -0
- mindroom/tools/todoist.py +103 -0
- mindroom/tools/trello.py +121 -0
- mindroom/tools/twilio.py +97 -0
- mindroom/tools/web_browser_tools.py +37 -0
- mindroom/tools/webex.py +63 -0
- mindroom/tools/website.py +45 -0
- mindroom/tools/whatsapp.py +81 -0
- mindroom/tools/wikipedia.py +45 -0
- mindroom/tools/x.py +97 -0
- mindroom/tools/yfinance.py +121 -0
- mindroom/tools/youtube.py +81 -0
- mindroom/tools/zendesk.py +62 -0
- mindroom/tools/zep.py +107 -0
- mindroom/tools/zoom.py +62 -0
- mindroom/tools_metadata.json +7643 -0
- mindroom/tools_metadata.py +220 -0
- mindroom/topic_generator.py +153 -0
- mindroom/voice_handler.py +266 -0
- mindroom-0.1.1.dist-info/METADATA +425 -0
- mindroom-0.1.1.dist-info/RECORD +152 -0
- {mindroom-0.0.0.dist-info → mindroom-0.1.1.dist-info}/WHEEL +1 -2
- mindroom-0.1.1.dist-info/entry_points.txt +2 -0
- mindroom-0.0.0.dist-info/METADATA +0 -24
- mindroom-0.0.0.dist-info/RECORD +0 -4
- mindroom-0.0.0.dist-info/top_level.txt +0 -1
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
"""Memory management for mindroom agents and rooms."""
|
|
2
|
+
|
|
3
|
+
from .functions import (
|
|
4
|
+
add_agent_memory,
|
|
5
|
+
add_room_memory,
|
|
6
|
+
build_memory_enhanced_prompt,
|
|
7
|
+
format_memories_as_context,
|
|
8
|
+
search_agent_memories,
|
|
9
|
+
search_room_memories,
|
|
10
|
+
store_conversation_memory,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"add_agent_memory",
|
|
15
|
+
"add_room_memory",
|
|
16
|
+
"build_memory_enhanced_prompt",
|
|
17
|
+
"format_memories_as_context",
|
|
18
|
+
"search_agent_memories",
|
|
19
|
+
"search_room_memories",
|
|
20
|
+
"store_conversation_memory",
|
|
21
|
+
]
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Memory configuration and setup."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from mem0 import AsyncMemory
|
|
8
|
+
|
|
9
|
+
from mindroom.config import Config
|
|
10
|
+
from mindroom.credentials import get_credentials_manager
|
|
11
|
+
from mindroom.logging_config import get_logger
|
|
12
|
+
|
|
13
|
+
logger = get_logger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def get_memory_config(storage_path: Path, config: Config) -> dict: # noqa: C901, PLR0912
|
|
17
|
+
"""Get Mem0 configuration with ChromaDB backend.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
storage_path: Base directory for memory storage
|
|
21
|
+
config: Application configuration
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
Configuration dictionary for Mem0
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
app_config = config
|
|
28
|
+
creds_manager = get_credentials_manager()
|
|
29
|
+
|
|
30
|
+
# Ensure storage directories exist
|
|
31
|
+
chroma_path = storage_path / "chroma"
|
|
32
|
+
chroma_path.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
|
|
34
|
+
# Build embedder config from config.yaml
|
|
35
|
+
embedder_config: dict[str, Any] = {
|
|
36
|
+
"provider": app_config.memory.embedder.provider,
|
|
37
|
+
"config": {
|
|
38
|
+
"model": app_config.memory.embedder.config.model,
|
|
39
|
+
},
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
# Add provider-specific configuration
|
|
43
|
+
if app_config.memory.embedder.provider == "openai":
|
|
44
|
+
# Set environment variable from CredentialsManager for Mem0 to use
|
|
45
|
+
api_key = creds_manager.get_api_key("openai")
|
|
46
|
+
if api_key:
|
|
47
|
+
os.environ["OPENAI_API_KEY"] = api_key
|
|
48
|
+
elif app_config.memory.embedder.provider == "ollama":
|
|
49
|
+
# Check CredentialsManager for Ollama host
|
|
50
|
+
ollama_creds = creds_manager.load_credentials("ollama")
|
|
51
|
+
if ollama_creds and "host" in ollama_creds:
|
|
52
|
+
host = ollama_creds["host"]
|
|
53
|
+
else:
|
|
54
|
+
host = app_config.memory.embedder.config.host or "http://localhost:11434"
|
|
55
|
+
embedder_config["config"]["ollama_base_url"] = host
|
|
56
|
+
|
|
57
|
+
# Build LLM config from memory configuration
|
|
58
|
+
if app_config.memory.llm:
|
|
59
|
+
llm_config: dict[str, Any] = {
|
|
60
|
+
"provider": app_config.memory.llm.provider,
|
|
61
|
+
"config": {},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
# Copy config but handle provider-specific field names
|
|
65
|
+
for key, value in app_config.memory.llm.config.items():
|
|
66
|
+
if key == "host" and app_config.memory.llm.provider == "ollama":
|
|
67
|
+
# Check CredentialsManager for Ollama host
|
|
68
|
+
ollama_creds = creds_manager.load_credentials("ollama")
|
|
69
|
+
if ollama_creds and "host" in ollama_creds:
|
|
70
|
+
llm_config["config"]["ollama_base_url"] = ollama_creds["host"]
|
|
71
|
+
else:
|
|
72
|
+
llm_config["config"]["ollama_base_url"] = value or "http://localhost:11434"
|
|
73
|
+
elif key != "host": # Skip host for other fields
|
|
74
|
+
llm_config["config"][key] = value
|
|
75
|
+
|
|
76
|
+
# Set environment variables from CredentialsManager for Mem0 to use
|
|
77
|
+
if app_config.memory.llm.provider == "openai":
|
|
78
|
+
api_key = creds_manager.get_api_key("openai")
|
|
79
|
+
if api_key:
|
|
80
|
+
os.environ["OPENAI_API_KEY"] = api_key
|
|
81
|
+
elif app_config.memory.llm.provider == "anthropic":
|
|
82
|
+
api_key = creds_manager.get_api_key("anthropic")
|
|
83
|
+
if api_key:
|
|
84
|
+
os.environ["ANTHROPIC_API_KEY"] = api_key
|
|
85
|
+
|
|
86
|
+
logger.info(
|
|
87
|
+
f"Using {app_config.memory.llm.provider} model '{app_config.memory.llm.config.get('model')}' for memory",
|
|
88
|
+
)
|
|
89
|
+
else:
|
|
90
|
+
# Fallback if no LLM configured
|
|
91
|
+
logger.warning("No memory LLM configured, using default ollama/llama3.2")
|
|
92
|
+
# Check CredentialsManager for Ollama host
|
|
93
|
+
ollama_creds = creds_manager.load_credentials("ollama")
|
|
94
|
+
ollama_host = ollama_creds["host"] if ollama_creds and "host" in ollama_creds else "http://localhost:11434"
|
|
95
|
+
|
|
96
|
+
llm_config = {
|
|
97
|
+
"provider": "ollama",
|
|
98
|
+
"config": {
|
|
99
|
+
"model": "llama3.2",
|
|
100
|
+
"ollama_base_url": ollama_host,
|
|
101
|
+
"temperature": 0.1,
|
|
102
|
+
"top_p": 1,
|
|
103
|
+
},
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return {
|
|
107
|
+
"embedder": embedder_config,
|
|
108
|
+
"llm": llm_config,
|
|
109
|
+
"vector_store": {
|
|
110
|
+
"provider": "chroma",
|
|
111
|
+
"config": {
|
|
112
|
+
"collection_name": "mindroom_memories",
|
|
113
|
+
"path": str(chroma_path),
|
|
114
|
+
},
|
|
115
|
+
},
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
async def create_memory_instance(storage_path: Path, config: Config) -> AsyncMemory:
|
|
120
|
+
"""Create a Mem0 memory instance with ChromaDB backend.
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
storage_path: Base directory for memory storage
|
|
124
|
+
config: Application configuration
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
Configured AsyncMemory instance
|
|
128
|
+
|
|
129
|
+
"""
|
|
130
|
+
config_dict = get_memory_config(storage_path, config)
|
|
131
|
+
|
|
132
|
+
# Create AsyncMemory instance with dictionary config directly
|
|
133
|
+
# Mem0 expects a dict for configuration, not config objects
|
|
134
|
+
memory = await AsyncMemory.from_config(config_dict)
|
|
135
|
+
|
|
136
|
+
logger.info(f"Created memory instance with ChromaDB at {storage_path}")
|
|
137
|
+
return memory
|
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
"""Simple memory management functions following Mem0 patterns."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import TYPE_CHECKING, Any, TypedDict
|
|
6
|
+
|
|
7
|
+
from mindroom.logging_config import get_logger
|
|
8
|
+
|
|
9
|
+
from .config import create_memory_instance
|
|
10
|
+
|
|
11
|
+
if TYPE_CHECKING:
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from mindroom.config import Config
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MemoryResult(TypedDict, total=False):
|
|
18
|
+
"""Type for memory search results from Mem0."""
|
|
19
|
+
|
|
20
|
+
id: str
|
|
21
|
+
memory: str
|
|
22
|
+
hash: str
|
|
23
|
+
metadata: dict[str, Any] | None
|
|
24
|
+
score: float
|
|
25
|
+
created_at: str
|
|
26
|
+
updated_at: str | None
|
|
27
|
+
user_id: str
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
logger = get_logger(__name__)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def add_agent_memory(
|
|
34
|
+
content: str,
|
|
35
|
+
agent_name: str,
|
|
36
|
+
storage_path: Path,
|
|
37
|
+
config: Config,
|
|
38
|
+
metadata: dict | None = None,
|
|
39
|
+
) -> None:
|
|
40
|
+
"""Add a memory for an agent.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
content: The memory content to store
|
|
44
|
+
agent_name: Name of the agent
|
|
45
|
+
storage_path: Storage path for memory
|
|
46
|
+
config: Application configuration
|
|
47
|
+
metadata: Optional metadata to store with memory
|
|
48
|
+
|
|
49
|
+
"""
|
|
50
|
+
memory = await create_memory_instance(storage_path, config)
|
|
51
|
+
|
|
52
|
+
if metadata is None:
|
|
53
|
+
metadata = {}
|
|
54
|
+
metadata["agent"] = agent_name
|
|
55
|
+
|
|
56
|
+
messages = [{"role": "user", "content": content}]
|
|
57
|
+
|
|
58
|
+
# Use agent_name as user_id to namespace memories per agent
|
|
59
|
+
try:
|
|
60
|
+
await memory.add(messages, user_id=f"agent_{agent_name}", metadata=metadata)
|
|
61
|
+
logger.info("Memory added", agent=agent_name)
|
|
62
|
+
except Exception as e:
|
|
63
|
+
logger.exception("Failed to add memory", agent=agent_name, error=str(e))
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def get_team_ids_for_agent(agent_name: str, config: Config) -> list[str]:
|
|
67
|
+
"""Get all team IDs that include the specified agent.
|
|
68
|
+
|
|
69
|
+
Args:
|
|
70
|
+
agent_name: Name of the agent to find teams for
|
|
71
|
+
config: Application configuration containing team definitions
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
List of team IDs (in the format "team_agent1+agent2+...")
|
|
75
|
+
|
|
76
|
+
"""
|
|
77
|
+
team_ids: list[str] = []
|
|
78
|
+
|
|
79
|
+
if not config.teams:
|
|
80
|
+
return team_ids
|
|
81
|
+
|
|
82
|
+
for team_config in config.teams.values():
|
|
83
|
+
if agent_name in team_config.agents:
|
|
84
|
+
# Create the same team ID format used in storage
|
|
85
|
+
sorted_agents = sorted(team_config.agents)
|
|
86
|
+
team_id = f"team_{'+'.join(sorted_agents)}"
|
|
87
|
+
team_ids.append(team_id)
|
|
88
|
+
|
|
89
|
+
return team_ids
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
async def search_agent_memories(
|
|
93
|
+
query: str,
|
|
94
|
+
agent_name: str,
|
|
95
|
+
storage_path: Path,
|
|
96
|
+
config: Config,
|
|
97
|
+
limit: int = 3,
|
|
98
|
+
) -> list[MemoryResult]:
|
|
99
|
+
"""Search agent memories including team memories.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
query: Search query
|
|
103
|
+
agent_name: Name of the agent
|
|
104
|
+
storage_path: Storage path for memory
|
|
105
|
+
config: Application configuration
|
|
106
|
+
limit: Maximum number of results
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
List of relevant memories from both individual and team contexts
|
|
110
|
+
|
|
111
|
+
"""
|
|
112
|
+
memory = await create_memory_instance(storage_path, config)
|
|
113
|
+
|
|
114
|
+
# Search individual agent memories
|
|
115
|
+
search_result = await memory.search(query, user_id=f"agent_{agent_name}", limit=limit)
|
|
116
|
+
results = search_result["results"] if isinstance(search_result, dict) and "results" in search_result else []
|
|
117
|
+
|
|
118
|
+
# Also search team memories
|
|
119
|
+
team_ids = get_team_ids_for_agent(agent_name, config)
|
|
120
|
+
for team_id in team_ids:
|
|
121
|
+
team_result = await memory.search(query, user_id=team_id, limit=limit)
|
|
122
|
+
team_memories = team_result["results"] if isinstance(team_result, dict) and "results" in team_result else []
|
|
123
|
+
|
|
124
|
+
# Merge results, avoiding duplicates based on memory content
|
|
125
|
+
existing_memories = {r.get("memory", "") for r in results}
|
|
126
|
+
for mem in team_memories:
|
|
127
|
+
if mem.get("memory", "") not in existing_memories:
|
|
128
|
+
results.append(mem)
|
|
129
|
+
|
|
130
|
+
logger.debug("Team memories found", team_id=team_id, count=len(team_memories))
|
|
131
|
+
|
|
132
|
+
logger.debug("Total memories found", count=len(results), agent=agent_name)
|
|
133
|
+
|
|
134
|
+
# Return top results after merging
|
|
135
|
+
return results[:limit]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
async def add_room_memory(
|
|
139
|
+
content: str,
|
|
140
|
+
room_id: str,
|
|
141
|
+
storage_path: Path,
|
|
142
|
+
config: Config,
|
|
143
|
+
agent_name: str | None = None,
|
|
144
|
+
metadata: dict | None = None,
|
|
145
|
+
) -> None:
|
|
146
|
+
"""Add a memory for a room.
|
|
147
|
+
|
|
148
|
+
Args:
|
|
149
|
+
content: The memory content to store
|
|
150
|
+
room_id: Room ID
|
|
151
|
+
storage_path: Storage path for memory
|
|
152
|
+
config: Application configuration
|
|
153
|
+
agent_name: Optional agent that created this memory
|
|
154
|
+
metadata: Optional metadata to store with memory
|
|
155
|
+
|
|
156
|
+
"""
|
|
157
|
+
memory = await create_memory_instance(storage_path, config)
|
|
158
|
+
|
|
159
|
+
if metadata is None:
|
|
160
|
+
metadata = {}
|
|
161
|
+
metadata["room_id"] = room_id
|
|
162
|
+
if agent_name:
|
|
163
|
+
metadata["contributed_by"] = agent_name
|
|
164
|
+
|
|
165
|
+
messages = [{"role": "user", "content": content}]
|
|
166
|
+
|
|
167
|
+
safe_room_id = room_id.replace(":", "_").replace("!", "")
|
|
168
|
+
await memory.add(messages, user_id=f"room_{safe_room_id}", metadata=metadata)
|
|
169
|
+
logger.debug("Room memory added", room_id=room_id)
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
async def search_room_memories(
|
|
173
|
+
query: str,
|
|
174
|
+
room_id: str,
|
|
175
|
+
storage_path: Path,
|
|
176
|
+
config: Config,
|
|
177
|
+
limit: int = 3,
|
|
178
|
+
) -> list[MemoryResult]:
|
|
179
|
+
"""Search room memories.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
query: Search query
|
|
183
|
+
room_id: Room ID
|
|
184
|
+
storage_path: Storage path for memory
|
|
185
|
+
config: Application configuration
|
|
186
|
+
limit: Maximum number of results
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
List of relevant memories
|
|
190
|
+
|
|
191
|
+
"""
|
|
192
|
+
memory = await create_memory_instance(storage_path, config)
|
|
193
|
+
safe_room_id = room_id.replace(":", "_").replace("!", "")
|
|
194
|
+
search_result = await memory.search(query, user_id=f"room_{safe_room_id}", limit=limit)
|
|
195
|
+
|
|
196
|
+
results = search_result["results"] if isinstance(search_result, dict) and "results" in search_result else []
|
|
197
|
+
|
|
198
|
+
logger.debug("Room memories found", count=len(results), room_id=room_id)
|
|
199
|
+
return results
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def format_memories_as_context(memories: list[MemoryResult], context_type: str = "agent") -> str:
|
|
203
|
+
"""Format memories into a context string.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
memories: List of memory objects from search
|
|
207
|
+
context_type: Type of context ("agent" or "room")
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Formatted context string
|
|
211
|
+
|
|
212
|
+
"""
|
|
213
|
+
if not memories:
|
|
214
|
+
return ""
|
|
215
|
+
|
|
216
|
+
context_parts = [
|
|
217
|
+
f"[Automatically extracted {context_type} memories - may not be relevant to current context]",
|
|
218
|
+
f"Previous {context_type} memories that might be related:",
|
|
219
|
+
]
|
|
220
|
+
for memory in memories:
|
|
221
|
+
content = memory.get("memory", "")
|
|
222
|
+
context_parts.append(f"- {content}")
|
|
223
|
+
|
|
224
|
+
return "\n".join(context_parts)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
async def build_memory_enhanced_prompt(
|
|
228
|
+
prompt: str,
|
|
229
|
+
agent_name: str,
|
|
230
|
+
storage_path: Path,
|
|
231
|
+
config: Config,
|
|
232
|
+
room_id: str | None = None,
|
|
233
|
+
) -> str:
|
|
234
|
+
"""Build a prompt enhanced with relevant memories.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
prompt: The original user prompt
|
|
238
|
+
agent_name: Name of the agent
|
|
239
|
+
storage_path: Path for memory storage
|
|
240
|
+
config: Application configuration
|
|
241
|
+
room_id: Optional room ID for room context
|
|
242
|
+
|
|
243
|
+
Returns:
|
|
244
|
+
Enhanced prompt with memory context
|
|
245
|
+
|
|
246
|
+
"""
|
|
247
|
+
logger.debug("Building enhanced prompt", agent=agent_name)
|
|
248
|
+
enhanced_prompt = prompt
|
|
249
|
+
|
|
250
|
+
agent_memories = await search_agent_memories(prompt, agent_name, storage_path, config)
|
|
251
|
+
if agent_memories:
|
|
252
|
+
agent_context = format_memories_as_context(agent_memories, "agent")
|
|
253
|
+
enhanced_prompt = f"{agent_context}\n\n{prompt}"
|
|
254
|
+
logger.debug("Agent memories added", count=len(agent_memories))
|
|
255
|
+
|
|
256
|
+
if room_id:
|
|
257
|
+
room_memories = await search_room_memories(prompt, room_id, storage_path, config)
|
|
258
|
+
if room_memories:
|
|
259
|
+
room_context = format_memories_as_context(room_memories, "room")
|
|
260
|
+
enhanced_prompt = f"{room_context}\n\n{enhanced_prompt}"
|
|
261
|
+
logger.debug("Room memories added", count=len(room_memories))
|
|
262
|
+
|
|
263
|
+
return enhanced_prompt
|
|
264
|
+
|
|
265
|
+
|
|
266
|
+
def _build_conversation_messages(
|
|
267
|
+
thread_history: list[dict],
|
|
268
|
+
current_prompt: str,
|
|
269
|
+
user_id: str,
|
|
270
|
+
) -> list[dict]:
|
|
271
|
+
"""Build conversation messages in mem0 format from thread history.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
thread_history: List of messages with sender and body
|
|
275
|
+
current_prompt: The current user prompt being processed
|
|
276
|
+
user_id: The Matrix user ID to identify user messages
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
List of messages in mem0 format with role and content
|
|
280
|
+
|
|
281
|
+
"""
|
|
282
|
+
messages = []
|
|
283
|
+
|
|
284
|
+
# Process thread history
|
|
285
|
+
for msg in thread_history:
|
|
286
|
+
body = msg.get("body", "").strip()
|
|
287
|
+
if not body:
|
|
288
|
+
continue
|
|
289
|
+
|
|
290
|
+
sender = msg.get("sender", "")
|
|
291
|
+
# Determine role based on sender
|
|
292
|
+
# If sender matches the user, it's a user message; otherwise it's assistant
|
|
293
|
+
role = "user" if sender == user_id else "assistant"
|
|
294
|
+
messages.append({"role": role, "content": body})
|
|
295
|
+
|
|
296
|
+
# Add the current prompt as a user message
|
|
297
|
+
messages.append({"role": "user", "content": current_prompt})
|
|
298
|
+
|
|
299
|
+
return messages
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
async def store_conversation_memory(
|
|
303
|
+
prompt: str,
|
|
304
|
+
agent_name: str | list[str],
|
|
305
|
+
storage_path: Path,
|
|
306
|
+
session_id: str,
|
|
307
|
+
config: Config,
|
|
308
|
+
room_id: str | None = None,
|
|
309
|
+
thread_history: list[dict] | None = None,
|
|
310
|
+
user_id: str | None = None,
|
|
311
|
+
) -> None:
|
|
312
|
+
"""Store conversation in memory for future recall.
|
|
313
|
+
|
|
314
|
+
Uses mem0's intelligent extraction to identify relevant facts, preferences,
|
|
315
|
+
and context from the conversation. Provides full conversation context when
|
|
316
|
+
available to allow better understanding of user intent.
|
|
317
|
+
|
|
318
|
+
For teams, pass a list of agent names to store memory once under a shared
|
|
319
|
+
namespace, avoiding duplicate LLM processing.
|
|
320
|
+
|
|
321
|
+
Args:
|
|
322
|
+
prompt: The current user prompt
|
|
323
|
+
agent_name: Name of the agent or list of agent names for teams
|
|
324
|
+
storage_path: Path for memory storage
|
|
325
|
+
session_id: Session ID for the conversation
|
|
326
|
+
config: Application configuration
|
|
327
|
+
room_id: Optional room ID for room memory
|
|
328
|
+
thread_history: Optional thread history for context
|
|
329
|
+
user_id: Optional user ID to identify user messages in thread
|
|
330
|
+
|
|
331
|
+
"""
|
|
332
|
+
if not prompt:
|
|
333
|
+
return
|
|
334
|
+
|
|
335
|
+
# Build conversation messages in mem0 format
|
|
336
|
+
if thread_history and user_id:
|
|
337
|
+
# Use structured messages with roles for better context
|
|
338
|
+
messages = _build_conversation_messages(thread_history, prompt, user_id)
|
|
339
|
+
else:
|
|
340
|
+
# Fallback to simple user message
|
|
341
|
+
messages = [{"role": "user", "content": prompt}]
|
|
342
|
+
|
|
343
|
+
# Store for agent memory with structured messages
|
|
344
|
+
memory = await create_memory_instance(storage_path, config)
|
|
345
|
+
|
|
346
|
+
# Handle both single agents and teams
|
|
347
|
+
is_team = isinstance(agent_name, list)
|
|
348
|
+
|
|
349
|
+
if is_team:
|
|
350
|
+
# For teams, store once under a team namespace
|
|
351
|
+
# Sort agent names for consistent team ID
|
|
352
|
+
sorted_agents = sorted(agent_name)
|
|
353
|
+
team_id = f"team_{'+'.join(sorted_agents)}"
|
|
354
|
+
|
|
355
|
+
metadata = {
|
|
356
|
+
"type": "conversation",
|
|
357
|
+
"session_id": session_id,
|
|
358
|
+
"is_team": True,
|
|
359
|
+
"team_members": agent_name, # Keep original order for reference
|
|
360
|
+
}
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
await memory.add(messages, user_id=team_id, metadata=metadata)
|
|
364
|
+
logger.info("Team memory added", team_id=team_id, members=agent_name)
|
|
365
|
+
except Exception as e:
|
|
366
|
+
logger.exception("Failed to add team memory", team_id=team_id, error=str(e))
|
|
367
|
+
else:
|
|
368
|
+
# Single agent - store normally
|
|
369
|
+
metadata = {
|
|
370
|
+
"type": "conversation",
|
|
371
|
+
"session_id": session_id,
|
|
372
|
+
"agent": agent_name,
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
try:
|
|
376
|
+
await memory.add(messages, user_id=f"agent_{agent_name}", metadata=metadata)
|
|
377
|
+
logger.info("Memory added", agent=agent_name)
|
|
378
|
+
except Exception as e:
|
|
379
|
+
logger.exception("Failed to add memory", agent=agent_name, error=str(e))
|
|
380
|
+
|
|
381
|
+
if room_id:
|
|
382
|
+
# Also store for room context
|
|
383
|
+
contributed_by = agent_name if isinstance(agent_name, str) else f"team:{','.join(agent_name)}"
|
|
384
|
+
room_metadata = {
|
|
385
|
+
"type": "conversation",
|
|
386
|
+
"session_id": session_id,
|
|
387
|
+
"room_id": room_id,
|
|
388
|
+
"contributed_by": contributed_by,
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
safe_room_id = room_id.replace(":", "_").replace("!", "")
|
|
392
|
+
try:
|
|
393
|
+
await memory.add(messages, user_id=f"room_{safe_room_id}", metadata=room_metadata)
|
|
394
|
+
logger.debug("Room memory added", room_id=room_id)
|
|
395
|
+
except Exception as e:
|
|
396
|
+
logger.exception("Failed to add room memory", room_id=room_id, error=str(e))
|
mindroom/py.typed
ADDED
|
File without changes
|
|
@@ -0,0 +1,128 @@
|
|
|
1
|
+
"""Track which messages have been responded to by agents."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import fcntl
|
|
6
|
+
import json
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from typing import TYPE_CHECKING, TypedDict
|
|
10
|
+
|
|
11
|
+
from .constants import TRACKING_DIR
|
|
12
|
+
from .logging_config import get_logger
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
|
|
17
|
+
logger = get_logger(__name__)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ResponseRecord(TypedDict):
|
|
21
|
+
"""Record of a response to a user message."""
|
|
22
|
+
|
|
23
|
+
timestamp: float
|
|
24
|
+
response_id: str | None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ResponseTracker:
|
|
29
|
+
"""Track which event IDs have been responded to by an agent."""
|
|
30
|
+
|
|
31
|
+
agent_name: str
|
|
32
|
+
base_path: Path = TRACKING_DIR
|
|
33
|
+
_responses: dict[str, ResponseRecord] = field(default_factory=dict, init=False)
|
|
34
|
+
_responses_file: Path = field(init=False)
|
|
35
|
+
|
|
36
|
+
def __post_init__(self) -> None:
|
|
37
|
+
"""Initialize paths and load existing responses."""
|
|
38
|
+
self.base_path.mkdir(parents=True, exist_ok=True)
|
|
39
|
+
self._responses_file = self.base_path / f"{self.agent_name}_responded.json"
|
|
40
|
+
self._load_responses()
|
|
41
|
+
# Perform automatic cleanup on initialization
|
|
42
|
+
self._cleanup_old_events()
|
|
43
|
+
|
|
44
|
+
def has_responded(self, event_id: str) -> bool:
|
|
45
|
+
"""Check if we've already responded to this event.
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
event_id: The Matrix event ID
|
|
49
|
+
|
|
50
|
+
Returns:
|
|
51
|
+
True if we've already responded to this event
|
|
52
|
+
|
|
53
|
+
"""
|
|
54
|
+
return event_id in self._responses
|
|
55
|
+
|
|
56
|
+
def mark_responded(self, event_id: str, response_event_id: str | None = None) -> None:
|
|
57
|
+
"""Mark an event as responded to with current timestamp.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
event_id: The Matrix event ID we responded to
|
|
61
|
+
response_event_id: The event ID of our response message (optional)
|
|
62
|
+
|
|
63
|
+
"""
|
|
64
|
+
self._responses[event_id] = {
|
|
65
|
+
"timestamp": time.time(),
|
|
66
|
+
"response_id": response_event_id,
|
|
67
|
+
}
|
|
68
|
+
self._save_responses()
|
|
69
|
+
logger.debug(f"Marked event {event_id} as responded for agent {self.agent_name}")
|
|
70
|
+
|
|
71
|
+
def get_response_event_id(self, user_event_id: str) -> str | None:
|
|
72
|
+
"""Get the response event ID for a given user message event ID.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
user_event_id: The user's message event ID
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
The agent's response event ID if it exists, None otherwise
|
|
79
|
+
|
|
80
|
+
"""
|
|
81
|
+
record = self._responses.get(user_event_id)
|
|
82
|
+
return record["response_id"] if record else None
|
|
83
|
+
|
|
84
|
+
def _load_responses(self) -> None:
|
|
85
|
+
"""Load the responses from disk."""
|
|
86
|
+
if not self._responses_file.exists():
|
|
87
|
+
self._responses = {}
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
with self._responses_file.open() as f:
|
|
91
|
+
data = json.load(f)
|
|
92
|
+
self._responses = data
|
|
93
|
+
|
|
94
|
+
def _save_responses(self) -> None:
|
|
95
|
+
"""Save the responses to disk using file locking."""
|
|
96
|
+
with self._responses_file.open("w") as f:
|
|
97
|
+
fcntl.flock(f.fileno(), fcntl.LOCK_EX)
|
|
98
|
+
try:
|
|
99
|
+
json.dump(self._responses, f, indent=2)
|
|
100
|
+
finally:
|
|
101
|
+
fcntl.flock(f.fileno(), fcntl.LOCK_UN)
|
|
102
|
+
|
|
103
|
+
def _cleanup_old_events(self, max_events: int = 10000, max_age_days: int = 30) -> None:
|
|
104
|
+
"""Remove old events based on count and age.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
max_events: Maximum number of events to track
|
|
108
|
+
max_age_days: Maximum age of events in days
|
|
109
|
+
|
|
110
|
+
"""
|
|
111
|
+
current_time = time.time()
|
|
112
|
+
max_age_seconds = max_age_days * 24 * 60 * 60
|
|
113
|
+
|
|
114
|
+
# First remove events older than max_age_days
|
|
115
|
+
self._responses = {
|
|
116
|
+
event_id: record
|
|
117
|
+
for event_id, record in self._responses.items()
|
|
118
|
+
if current_time - record["timestamp"] < max_age_seconds
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
# Then trim to max_events if still over limit
|
|
122
|
+
if len(self._responses) > max_events:
|
|
123
|
+
# Sort by timestamp and keep only the most recent ones
|
|
124
|
+
sorted_events = sorted(self._responses.items(), key=lambda x: x[1]["timestamp"])
|
|
125
|
+
self._responses = dict(sorted_events[-max_events:])
|
|
126
|
+
|
|
127
|
+
self._save_responses()
|
|
128
|
+
logger.info(f"Cleaned up old events for {self.agent_name}, keeping {len(self._responses)} events")
|