memorisdk 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of memorisdk might be problematic. Click here for more details.
- memoriai/__init__.py +140 -0
- memoriai/agents/__init__.py +7 -0
- memoriai/agents/conscious_agent.py +506 -0
- memoriai/agents/memory_agent.py +322 -0
- memoriai/agents/retrieval_agent.py +579 -0
- memoriai/config/__init__.py +14 -0
- memoriai/config/manager.py +281 -0
- memoriai/config/settings.py +287 -0
- memoriai/core/__init__.py +6 -0
- memoriai/core/database.py +966 -0
- memoriai/core/memory.py +1349 -0
- memoriai/database/__init__.py +5 -0
- memoriai/database/connectors/__init__.py +9 -0
- memoriai/database/connectors/mysql_connector.py +159 -0
- memoriai/database/connectors/postgres_connector.py +158 -0
- memoriai/database/connectors/sqlite_connector.py +148 -0
- memoriai/database/queries/__init__.py +15 -0
- memoriai/database/queries/base_queries.py +204 -0
- memoriai/database/queries/chat_queries.py +157 -0
- memoriai/database/queries/entity_queries.py +236 -0
- memoriai/database/queries/memory_queries.py +178 -0
- memoriai/database/templates/__init__.py +0 -0
- memoriai/database/templates/basic_template.py +0 -0
- memoriai/database/templates/schemas/__init__.py +0 -0
- memoriai/integrations/__init__.py +68 -0
- memoriai/integrations/anthropic_integration.py +194 -0
- memoriai/integrations/litellm_integration.py +11 -0
- memoriai/integrations/openai_integration.py +273 -0
- memoriai/scripts/llm_text.py +50 -0
- memoriai/tools/__init__.py +5 -0
- memoriai/tools/memory_tool.py +544 -0
- memoriai/utils/__init__.py +89 -0
- memoriai/utils/exceptions.py +418 -0
- memoriai/utils/helpers.py +433 -0
- memoriai/utils/logging.py +204 -0
- memoriai/utils/pydantic_models.py +258 -0
- memoriai/utils/schemas.py +0 -0
- memoriai/utils/validators.py +339 -0
- memorisdk-1.0.0.dist-info/METADATA +386 -0
- memorisdk-1.0.0.dist-info/RECORD +44 -0
- memorisdk-1.0.0.dist-info/WHEEL +5 -0
- memorisdk-1.0.0.dist-info/entry_points.txt +2 -0
- memorisdk-1.0.0.dist-info/licenses/LICENSE +203 -0
- memorisdk-1.0.0.dist-info/top_level.txt +1 -0
memoriai/__init__.py
ADDED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memoriai - The Open-Source Memory Layer for AI Agents & Multi-Agent Systems v1.0
|
|
3
|
+
|
|
4
|
+
Professional-grade memory layer with comprehensive error handling, configuration
|
|
5
|
+
management, and modular architecture for production AI systems.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
__version__ = "1.0.0"
|
|
9
|
+
__author__ = "Harshal More"
|
|
10
|
+
__email__ = "harshalmore2468@gmail.com"
|
|
11
|
+
|
|
12
|
+
# Memory agents
|
|
13
|
+
from .agents.memory_agent import MemoryAgent
|
|
14
|
+
from .agents.retrieval_agent import MemorySearchEngine
|
|
15
|
+
|
|
16
|
+
# Configuration system
|
|
17
|
+
from .config import (
|
|
18
|
+
AgentSettings,
|
|
19
|
+
ConfigManager,
|
|
20
|
+
DatabaseSettings,
|
|
21
|
+
LoggingSettings,
|
|
22
|
+
MemoriSettings,
|
|
23
|
+
)
|
|
24
|
+
from .core.database import DatabaseManager
|
|
25
|
+
|
|
26
|
+
# Core components
|
|
27
|
+
from .core.memory import Memori
|
|
28
|
+
|
|
29
|
+
# Database system
|
|
30
|
+
from .database.connectors import MySQLConnector, PostgreSQLConnector, SQLiteConnector
|
|
31
|
+
from .database.queries import BaseQueries, ChatQueries, EntityQueries, MemoryQueries
|
|
32
|
+
|
|
33
|
+
# Wrapper integrations
|
|
34
|
+
from .integrations import MemoriAnthropic, MemoriOpenAI
|
|
35
|
+
|
|
36
|
+
# Tools and integrations
|
|
37
|
+
from .tools.memory_tool import MemoryTool, create_memory_search_tool, create_memory_tool
|
|
38
|
+
|
|
39
|
+
# Utils and models
|
|
40
|
+
from .utils import ( # Pydantic models; Enhanced exceptions; Validators and helpers; Logging
|
|
41
|
+
AgentError,
|
|
42
|
+
AsyncUtils,
|
|
43
|
+
AuthenticationError,
|
|
44
|
+
ConfigurationError,
|
|
45
|
+
ConversationContext,
|
|
46
|
+
DatabaseError,
|
|
47
|
+
DataValidator,
|
|
48
|
+
DateTimeUtils,
|
|
49
|
+
EntityType,
|
|
50
|
+
ExceptionHandler,
|
|
51
|
+
ExtractedEntities,
|
|
52
|
+
FileUtils,
|
|
53
|
+
IntegrationError,
|
|
54
|
+
JsonUtils,
|
|
55
|
+
LoggingManager,
|
|
56
|
+
MemoriError,
|
|
57
|
+
MemoryCategory,
|
|
58
|
+
MemoryCategoryType,
|
|
59
|
+
MemoryImportance,
|
|
60
|
+
MemoryNotFoundError,
|
|
61
|
+
MemoryValidator,
|
|
62
|
+
PerformanceUtils,
|
|
63
|
+
ProcessedMemory,
|
|
64
|
+
ProcessingError,
|
|
65
|
+
RateLimitError,
|
|
66
|
+
ResourceExhaustedError,
|
|
67
|
+
RetentionType,
|
|
68
|
+
RetryUtils,
|
|
69
|
+
StringUtils,
|
|
70
|
+
TimeoutError,
|
|
71
|
+
ValidationError,
|
|
72
|
+
get_logger,
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
__all__ = [
|
|
76
|
+
# Core
|
|
77
|
+
"Memori",
|
|
78
|
+
"DatabaseManager",
|
|
79
|
+
# Configuration
|
|
80
|
+
"MemoriSettings",
|
|
81
|
+
"DatabaseSettings",
|
|
82
|
+
"AgentSettings",
|
|
83
|
+
"LoggingSettings",
|
|
84
|
+
"ConfigManager",
|
|
85
|
+
# Agents
|
|
86
|
+
"MemoryAgent",
|
|
87
|
+
"MemorySearchEngine",
|
|
88
|
+
# Database
|
|
89
|
+
"SQLiteConnector",
|
|
90
|
+
"PostgreSQLConnector",
|
|
91
|
+
"MySQLConnector",
|
|
92
|
+
"BaseQueries",
|
|
93
|
+
"MemoryQueries",
|
|
94
|
+
"ChatQueries",
|
|
95
|
+
"EntityQueries",
|
|
96
|
+
# Tools
|
|
97
|
+
"MemoryTool",
|
|
98
|
+
"create_memory_tool",
|
|
99
|
+
"create_memory_search_tool",
|
|
100
|
+
# Integrations
|
|
101
|
+
"MemoriOpenAI",
|
|
102
|
+
"MemoriAnthropic",
|
|
103
|
+
# Pydantic Models
|
|
104
|
+
"ProcessedMemory",
|
|
105
|
+
"MemoryCategory",
|
|
106
|
+
"ExtractedEntities",
|
|
107
|
+
"MemoryImportance",
|
|
108
|
+
"ConversationContext",
|
|
109
|
+
"MemoryCategoryType",
|
|
110
|
+
"RetentionType",
|
|
111
|
+
"EntityType",
|
|
112
|
+
# Enhanced Exceptions
|
|
113
|
+
"MemoriError",
|
|
114
|
+
"DatabaseError",
|
|
115
|
+
"AgentError",
|
|
116
|
+
"ConfigurationError",
|
|
117
|
+
"ValidationError",
|
|
118
|
+
"IntegrationError",
|
|
119
|
+
"AuthenticationError",
|
|
120
|
+
"RateLimitError",
|
|
121
|
+
"MemoryNotFoundError",
|
|
122
|
+
"ProcessingError",
|
|
123
|
+
"TimeoutError",
|
|
124
|
+
"ResourceExhaustedError",
|
|
125
|
+
"ExceptionHandler",
|
|
126
|
+
# Validators
|
|
127
|
+
"DataValidator",
|
|
128
|
+
"MemoryValidator",
|
|
129
|
+
# Helpers
|
|
130
|
+
"StringUtils",
|
|
131
|
+
"DateTimeUtils",
|
|
132
|
+
"JsonUtils",
|
|
133
|
+
"FileUtils",
|
|
134
|
+
"RetryUtils",
|
|
135
|
+
"PerformanceUtils",
|
|
136
|
+
"AsyncUtils",
|
|
137
|
+
# Logging
|
|
138
|
+
"LoggingManager",
|
|
139
|
+
"get_logger",
|
|
140
|
+
]
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
"""Intelligent agents for memory processing and retrieval"""
|
|
2
|
+
|
|
3
|
+
from .conscious_agent import ConsciouscAgent
|
|
4
|
+
from .memory_agent import MemoryAgent
|
|
5
|
+
from .retrieval_agent import MemorySearchEngine
|
|
6
|
+
|
|
7
|
+
__all__ = ["MemoryAgent", "MemorySearchEngine", "ConsciouscAgent"]
|
|
@@ -0,0 +1,506 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Conscious Agent for Background Memory Processing
|
|
3
|
+
|
|
4
|
+
This agent analyzes long-term memory patterns to extract essential personal facts
|
|
5
|
+
and promote them to short-term memory for immediate context injection.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import os
|
|
10
|
+
import uuid
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
from loguru import logger
|
|
15
|
+
from openai import AsyncOpenAI
|
|
16
|
+
from pydantic import BaseModel, Field
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class EssentialMemory(BaseModel):
|
|
20
|
+
"""Essential conversation memory identified for short-term storage"""
|
|
21
|
+
|
|
22
|
+
memory_id: str = Field(description="Original memory ID from long-term storage")
|
|
23
|
+
summary: str = Field(description="Summary of the conversation")
|
|
24
|
+
category: str = Field(description="Memory category")
|
|
25
|
+
importance_score: float = Field(ge=0.0, le=1.0, description="Importance score")
|
|
26
|
+
frequency_score: float = Field(
|
|
27
|
+
ge=0.0, le=1.0, description="How frequently this is referenced"
|
|
28
|
+
)
|
|
29
|
+
recency_score: float = Field(
|
|
30
|
+
ge=0.0, le=1.0, description="How recent this information is"
|
|
31
|
+
)
|
|
32
|
+
relevance_reasoning: str = Field(description="Why this memory is essential")
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class EssentialMemoriesAnalysis(BaseModel):
|
|
36
|
+
"""Analysis result containing essential memories to promote to short-term"""
|
|
37
|
+
|
|
38
|
+
essential_memories: List[EssentialMemory] = Field(
|
|
39
|
+
default_factory=list,
|
|
40
|
+
description="Conversations that should be promoted to short-term memory",
|
|
41
|
+
)
|
|
42
|
+
analysis_reasoning: str = Field(
|
|
43
|
+
description="Overall reasoning for memory selection"
|
|
44
|
+
)
|
|
45
|
+
total_analyzed: int = Field(description="Total memories analyzed")
|
|
46
|
+
promoted_count: int = Field(
|
|
47
|
+
description="Number of memories recommended for promotion"
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class ConsciouscAgent:
|
|
52
|
+
"""
|
|
53
|
+
Background agent that analyzes long-term memory to extract essential personal facts.
|
|
54
|
+
|
|
55
|
+
This agent mimics the conscious mind's ability to keep essential information
|
|
56
|
+
readily accessible in short-term memory.
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
def __init__(self, api_key: Optional[str] = None, model: str = "gpt-4o"):
|
|
60
|
+
"""
|
|
61
|
+
Initialize the conscious agent
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
api_key: OpenAI API key (if None, uses environment variable)
|
|
65
|
+
model: OpenAI model to use for analysis (gpt-4o recommended)
|
|
66
|
+
"""
|
|
67
|
+
self.api_key = api_key
|
|
68
|
+
self.model = model
|
|
69
|
+
|
|
70
|
+
# Check if API key is available (either provided or in environment)
|
|
71
|
+
effective_api_key = api_key or os.getenv("OPENAI_API_KEY")
|
|
72
|
+
|
|
73
|
+
if effective_api_key:
|
|
74
|
+
self.client = AsyncOpenAI(
|
|
75
|
+
api_key=api_key
|
|
76
|
+
) # AsyncOpenAI handles None api_key automatically
|
|
77
|
+
else:
|
|
78
|
+
self.client = None
|
|
79
|
+
logger.warning(
|
|
80
|
+
"ConsciouscAgent: No OpenAI API key found. Set OPENAI_API_KEY environment variable or provide api_key parameter."
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
self.last_analysis = None
|
|
84
|
+
self.analysis_interval = timedelta(hours=6) # Analyze every 6 hours
|
|
85
|
+
|
|
86
|
+
# System prompt for memory selection
|
|
87
|
+
self.system_prompt = """You are a Conscious Agent responsible for selecting essential conversations from long-term memory to promote to short-term memory.
|
|
88
|
+
|
|
89
|
+
Your role is to identify the most important conversations that should be readily available for immediate context injection.
|
|
90
|
+
|
|
91
|
+
SELECTION CRITERIA:
|
|
92
|
+
|
|
93
|
+
1. PERSONAL IDENTITY: Conversations where the user shares their name, occupation, location, or basic info
|
|
94
|
+
2. PREFERENCES & HABITS: Conversations revealing likes, dislikes, routines, sleep schedule, work patterns
|
|
95
|
+
3. SKILLS & EXPERTISE: Conversations about their technical skills, programming languages, tools they use
|
|
96
|
+
4. CURRENT PROJECTS: Conversations about ongoing work, projects, or learning goals
|
|
97
|
+
5. RELATIONSHIPS: Conversations mentioning important people, colleagues, or connections
|
|
98
|
+
6. REPEATED REFERENCES: Conversations that get referenced or built upon in later discussions
|
|
99
|
+
|
|
100
|
+
SCORING GUIDELINES:
|
|
101
|
+
- **Frequency Score**: How often this information is referenced or mentioned again
|
|
102
|
+
- **Recency Score**: How recent and relevant this information remains
|
|
103
|
+
- **Importance Score**: How critical this information is for understanding the person
|
|
104
|
+
|
|
105
|
+
SELECT conversations that:
|
|
106
|
+
- Contain foundational information about the person (name, role, preferences)
|
|
107
|
+
- Are frequently referenced or built upon in later conversations
|
|
108
|
+
- Provide essential context for understanding future conversations
|
|
109
|
+
- Represent stable, long-term characteristics rather than temporary states
|
|
110
|
+
|
|
111
|
+
AVOID conversations that:
|
|
112
|
+
- Are purely transactional or generic
|
|
113
|
+
- Contain outdated or superseded information
|
|
114
|
+
- Are highly specific to a single context that hasn't been revisited"""
|
|
115
|
+
|
|
116
|
+
async def analyze_memory_patterns(
|
|
117
|
+
self, db_manager, namespace: str = "default", min_memories: int = 10
|
|
118
|
+
) -> Optional[EssentialMemoriesAnalysis]:
|
|
119
|
+
"""
|
|
120
|
+
Analyze long-term memory patterns to select essential conversations
|
|
121
|
+
|
|
122
|
+
Args:
|
|
123
|
+
db_manager: Database manager instance
|
|
124
|
+
namespace: Memory namespace to analyze
|
|
125
|
+
min_memories: Minimum number of memories needed for analysis
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
EssentialMemoriesAnalysis with selected conversations or None if insufficient data
|
|
129
|
+
"""
|
|
130
|
+
if not self.client:
|
|
131
|
+
logger.debug("ConsciouscAgent: No API client available, skipping analysis")
|
|
132
|
+
return None
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
# Get all long-term memories for analysis
|
|
136
|
+
memories = await self._get_long_term_memories(db_manager, namespace)
|
|
137
|
+
|
|
138
|
+
if len(memories) < min_memories:
|
|
139
|
+
logger.info(
|
|
140
|
+
f"ConsciouscAgent: Insufficient memories ({len(memories)}) for analysis"
|
|
141
|
+
)
|
|
142
|
+
return None
|
|
143
|
+
|
|
144
|
+
# Prepare memory data for analysis
|
|
145
|
+
memory_summaries = []
|
|
146
|
+
for memory in memories:
|
|
147
|
+
try:
|
|
148
|
+
processed_data = json.loads(memory.get("processed_data", "{}"))
|
|
149
|
+
memory_summaries.append(
|
|
150
|
+
{
|
|
151
|
+
"memory_id": memory.get("memory_id", ""),
|
|
152
|
+
"summary": memory.get("summary", ""),
|
|
153
|
+
"category": memory.get("category_primary", ""),
|
|
154
|
+
"created_at": memory.get("created_at", ""),
|
|
155
|
+
"entities": processed_data.get("entities", {}),
|
|
156
|
+
"importance": memory.get("importance_score", 0.0),
|
|
157
|
+
"access_count": memory.get("access_count", 0),
|
|
158
|
+
}
|
|
159
|
+
)
|
|
160
|
+
except json.JSONDecodeError:
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
if not memory_summaries:
|
|
164
|
+
logger.warning("ConsciouscAgent: No valid memories found for analysis")
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
# Perform AI analysis to select essential conversations
|
|
168
|
+
analysis = await self._perform_memory_selection(memory_summaries)
|
|
169
|
+
|
|
170
|
+
if analysis:
|
|
171
|
+
self.last_analysis = datetime.now()
|
|
172
|
+
logger.info(
|
|
173
|
+
f"ConsciouscAgent: Selected {len(analysis.essential_memories)} essential conversations"
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
return analysis
|
|
177
|
+
|
|
178
|
+
except Exception as e:
|
|
179
|
+
logger.error(f"ConsciouscAgent: Memory analysis failed: {e}")
|
|
180
|
+
return None
|
|
181
|
+
|
|
182
|
+
async def _get_long_term_memories(
|
|
183
|
+
self, db_manager, namespace: str
|
|
184
|
+
) -> List[Dict[str, Any]]:
|
|
185
|
+
"""Get long-term memories for analysis"""
|
|
186
|
+
try:
|
|
187
|
+
# Get memories from the last 30 days for pattern analysis
|
|
188
|
+
cutoff_date = datetime.now() - timedelta(days=30)
|
|
189
|
+
|
|
190
|
+
query = """
|
|
191
|
+
SELECT memory_id, summary, category_primary, processed_data,
|
|
192
|
+
importance_score, created_at, access_count
|
|
193
|
+
FROM long_term_memory
|
|
194
|
+
WHERE namespace = ? AND created_at >= ?
|
|
195
|
+
ORDER BY importance_score DESC, access_count DESC
|
|
196
|
+
LIMIT 100
|
|
197
|
+
"""
|
|
198
|
+
|
|
199
|
+
# Execute query through database manager
|
|
200
|
+
with db_manager._get_connection() as connection:
|
|
201
|
+
cursor = connection.execute(query, (namespace, cutoff_date.isoformat()))
|
|
202
|
+
|
|
203
|
+
memories = []
|
|
204
|
+
for row in cursor.fetchall():
|
|
205
|
+
memories.append(
|
|
206
|
+
{
|
|
207
|
+
"memory_id": row[0],
|
|
208
|
+
"summary": row[1],
|
|
209
|
+
"category_primary": row[2],
|
|
210
|
+
"processed_data": row[3],
|
|
211
|
+
"importance_score": row[4],
|
|
212
|
+
"created_at": row[5],
|
|
213
|
+
"access_count": row[6],
|
|
214
|
+
}
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
return memories
|
|
218
|
+
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.error(f"ConsciouscAgent: Failed to get long-term memories: {e}")
|
|
221
|
+
return []
|
|
222
|
+
|
|
223
|
+
async def _perform_memory_selection(
|
|
224
|
+
self, memory_summaries: List[Dict]
|
|
225
|
+
) -> Optional[EssentialMemoriesAnalysis]:
|
|
226
|
+
"""Use AI to select essential conversations from memory patterns"""
|
|
227
|
+
try:
|
|
228
|
+
# Prepare context for AI analysis
|
|
229
|
+
memory_context = self._prepare_memory_context(memory_summaries)
|
|
230
|
+
|
|
231
|
+
# Create the analysis prompt
|
|
232
|
+
user_prompt = f"""Analyze the following conversations from long-term memory and select the most essential ones to promote to short-term memory:
|
|
233
|
+
|
|
234
|
+
AVAILABLE CONVERSATIONS:
|
|
235
|
+
{memory_context}
|
|
236
|
+
|
|
237
|
+
Select conversations that should be promoted to short-term memory for immediate context. Focus on conversations that:
|
|
238
|
+
1. Contain foundational personal information (name, occupation, preferences)
|
|
239
|
+
2. Are frequently referenced or built upon in later conversations
|
|
240
|
+
3. Provide essential context for understanding the person
|
|
241
|
+
4. Represent stable, long-term characteristics
|
|
242
|
+
|
|
243
|
+
For each selected conversation, provide:
|
|
244
|
+
- The memory_id
|
|
245
|
+
- Frequency score (how often this info is referenced)
|
|
246
|
+
- Recency score (how current/relevant this remains)
|
|
247
|
+
- Importance score (how critical for understanding the person)
|
|
248
|
+
- Clear reasoning for why this conversation is essential
|
|
249
|
+
|
|
250
|
+
Limit selection to the top 5-10 most essential conversations."""
|
|
251
|
+
|
|
252
|
+
# Make API call with structured output
|
|
253
|
+
response = await self.client.beta.chat.completions.parse(
|
|
254
|
+
model=self.model,
|
|
255
|
+
messages=[
|
|
256
|
+
{"role": "system", "content": self.system_prompt},
|
|
257
|
+
{"role": "user", "content": user_prompt},
|
|
258
|
+
],
|
|
259
|
+
response_format=EssentialMemoriesAnalysis,
|
|
260
|
+
temperature=0.1,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
analysis = response.choices[0].message.parsed
|
|
264
|
+
return analysis
|
|
265
|
+
|
|
266
|
+
except Exception as e:
|
|
267
|
+
logger.error(f"ConsciouscAgent: Memory selection failed: {e}")
|
|
268
|
+
return None
|
|
269
|
+
|
|
270
|
+
def _prepare_memory_context(self, memory_summaries: List[Dict]) -> str:
|
|
271
|
+
"""Prepare memory data for AI analysis"""
|
|
272
|
+
context_lines = []
|
|
273
|
+
|
|
274
|
+
for i, memory in enumerate(
|
|
275
|
+
memory_summaries[:50], 1
|
|
276
|
+
): # Limit to 50 most important
|
|
277
|
+
line = f"{i}. ID: {memory['memory_id']} | [{memory['category']}] {memory['summary']}"
|
|
278
|
+
line += f" | Importance: {memory['importance']:.2f} | Access: {memory.get('access_count', 0)}"
|
|
279
|
+
|
|
280
|
+
if memory.get("entities"):
|
|
281
|
+
entities = []
|
|
282
|
+
for _entity_type, values in memory["entities"].items():
|
|
283
|
+
if values and isinstance(values, list):
|
|
284
|
+
# Handle both string entities and structured entities
|
|
285
|
+
for value in values:
|
|
286
|
+
if isinstance(value, str):
|
|
287
|
+
entities.append(value)
|
|
288
|
+
elif isinstance(value, dict) and "value" in value:
|
|
289
|
+
# Handle structured entities
|
|
290
|
+
entities.append(value["value"])
|
|
291
|
+
elif hasattr(value, "value"):
|
|
292
|
+
# Handle Pydantic model entities
|
|
293
|
+
entities.append(value.value)
|
|
294
|
+
else:
|
|
295
|
+
# Convert any other type to string
|
|
296
|
+
entities.append(str(value))
|
|
297
|
+
|
|
298
|
+
if entities:
|
|
299
|
+
line += f" | Entities: {', '.join(entities[:5])}"
|
|
300
|
+
|
|
301
|
+
context_lines.append(line)
|
|
302
|
+
|
|
303
|
+
return "\n".join(context_lines)
|
|
304
|
+
|
|
305
|
+
async def update_short_term_memories(
|
|
306
|
+
self,
|
|
307
|
+
db_manager,
|
|
308
|
+
analysis: EssentialMemoriesAnalysis,
|
|
309
|
+
namespace: str = "default",
|
|
310
|
+
) -> int:
|
|
311
|
+
"""
|
|
312
|
+
Update short-term memory with selected essential conversations
|
|
313
|
+
|
|
314
|
+
Args:
|
|
315
|
+
db_manager: Database manager instance
|
|
316
|
+
analysis: Analysis containing selected essential memories
|
|
317
|
+
namespace: Memory namespace
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
Number of conversations copied to short-term memory
|
|
321
|
+
"""
|
|
322
|
+
try:
|
|
323
|
+
updated_count = 0
|
|
324
|
+
|
|
325
|
+
# Clear existing essential conversations from short-term memory
|
|
326
|
+
await self._clear_essential_conversations(db_manager, namespace)
|
|
327
|
+
|
|
328
|
+
# Copy each essential conversation to short-term memory
|
|
329
|
+
for essential_memory in analysis.essential_memories:
|
|
330
|
+
success = await self._copy_conversation_to_short_term(
|
|
331
|
+
db_manager, essential_memory, namespace
|
|
332
|
+
)
|
|
333
|
+
if success:
|
|
334
|
+
updated_count += 1
|
|
335
|
+
|
|
336
|
+
logger.info(
|
|
337
|
+
f"ConsciouscAgent: Copied {updated_count} essential conversations to short-term memory"
|
|
338
|
+
)
|
|
339
|
+
return updated_count
|
|
340
|
+
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.error(f"ConsciouscAgent: Failed to update short-term memories: {e}")
|
|
343
|
+
return 0
|
|
344
|
+
|
|
345
|
+
async def _clear_essential_conversations(self, db_manager, namespace: str):
|
|
346
|
+
"""Clear existing essential conversations from short-term memory"""
|
|
347
|
+
try:
|
|
348
|
+
with db_manager._get_connection() as connection:
|
|
349
|
+
# Delete conversations marked as essential
|
|
350
|
+
query = """
|
|
351
|
+
DELETE FROM short_term_memory
|
|
352
|
+
WHERE namespace = ? AND category_primary LIKE 'essential_%'
|
|
353
|
+
"""
|
|
354
|
+
|
|
355
|
+
connection.execute(query, (namespace,))
|
|
356
|
+
connection.commit()
|
|
357
|
+
|
|
358
|
+
except Exception as e:
|
|
359
|
+
logger.error(
|
|
360
|
+
f"ConsciouscAgent: Failed to clear essential conversations: {e}"
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
async def _copy_conversation_to_short_term(
|
|
364
|
+
self, db_manager, essential_memory: EssentialMemory, namespace: str
|
|
365
|
+
) -> bool:
|
|
366
|
+
"""Copy an essential conversation from long-term to short-term memory"""
|
|
367
|
+
try:
|
|
368
|
+
# First, get the original conversation from long-term memory
|
|
369
|
+
original_memory = await self._get_original_memory(
|
|
370
|
+
db_manager, essential_memory.memory_id
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
if not original_memory:
|
|
374
|
+
logger.warning(
|
|
375
|
+
f"ConsciouscAgent: Could not find original memory {essential_memory.memory_id}"
|
|
376
|
+
)
|
|
377
|
+
return False
|
|
378
|
+
|
|
379
|
+
# Create new memory ID for short-term storage
|
|
380
|
+
new_memory_id = str(uuid.uuid4())
|
|
381
|
+
now = datetime.now()
|
|
382
|
+
|
|
383
|
+
# Create enhanced processed data
|
|
384
|
+
try:
|
|
385
|
+
original_processed_data = json.loads(
|
|
386
|
+
original_memory.get("processed_data", "{}")
|
|
387
|
+
)
|
|
388
|
+
except json.JSONDecodeError:
|
|
389
|
+
original_processed_data = {}
|
|
390
|
+
|
|
391
|
+
enhanced_processed_data = original_processed_data.copy()
|
|
392
|
+
enhanced_processed_data.update(
|
|
393
|
+
{
|
|
394
|
+
"promoted_by": "conscious_agent",
|
|
395
|
+
"promoted_at": now.isoformat(),
|
|
396
|
+
"original_memory_id": essential_memory.memory_id,
|
|
397
|
+
"frequency_score": essential_memory.frequency_score,
|
|
398
|
+
"recency_score": essential_memory.recency_score,
|
|
399
|
+
"promotion_reasoning": essential_memory.relevance_reasoning,
|
|
400
|
+
}
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Store in short-term memory
|
|
404
|
+
with db_manager._get_connection() as connection:
|
|
405
|
+
query = """
|
|
406
|
+
INSERT INTO short_term_memory (
|
|
407
|
+
memory_id, chat_id, processed_data, importance_score,
|
|
408
|
+
category_primary, retention_type, namespace, created_at,
|
|
409
|
+
expires_at, searchable_content, summary
|
|
410
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
411
|
+
"""
|
|
412
|
+
|
|
413
|
+
# Essential conversations expire after 30 days (refreshed by re-analysis)
|
|
414
|
+
expires_at = now + timedelta(days=30)
|
|
415
|
+
|
|
416
|
+
connection.execute(
|
|
417
|
+
query,
|
|
418
|
+
(
|
|
419
|
+
new_memory_id,
|
|
420
|
+
original_memory.get(
|
|
421
|
+
"original_chat_id"
|
|
422
|
+
), # Preserve original chat_id link
|
|
423
|
+
json.dumps(enhanced_processed_data),
|
|
424
|
+
essential_memory.importance_score,
|
|
425
|
+
f"essential_{original_memory.get('category_primary', 'conversation')}", # Mark as essential
|
|
426
|
+
"short_term",
|
|
427
|
+
namespace,
|
|
428
|
+
now.isoformat(),
|
|
429
|
+
expires_at.isoformat(),
|
|
430
|
+
original_memory.get(
|
|
431
|
+
"searchable_content", essential_memory.summary
|
|
432
|
+
),
|
|
433
|
+
essential_memory.summary,
|
|
434
|
+
),
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
connection.commit()
|
|
438
|
+
return True
|
|
439
|
+
|
|
440
|
+
except Exception as e:
|
|
441
|
+
logger.error(
|
|
442
|
+
f"ConsciouscAgent: Failed to copy conversation to short-term: {e}"
|
|
443
|
+
)
|
|
444
|
+
return False
|
|
445
|
+
|
|
446
|
+
async def _get_original_memory(self, db_manager, memory_id: str) -> Optional[Dict]:
|
|
447
|
+
"""Get original memory from long-term storage"""
|
|
448
|
+
try:
|
|
449
|
+
with db_manager._get_connection() as connection:
|
|
450
|
+
query = """
|
|
451
|
+
SELECT memory_id, original_chat_id, processed_data, importance_score,
|
|
452
|
+
category_primary, searchable_content, summary
|
|
453
|
+
FROM long_term_memory
|
|
454
|
+
WHERE memory_id = ?
|
|
455
|
+
"""
|
|
456
|
+
|
|
457
|
+
cursor = connection.execute(query, (memory_id,))
|
|
458
|
+
row = cursor.fetchone()
|
|
459
|
+
|
|
460
|
+
if row:
|
|
461
|
+
return {
|
|
462
|
+
"memory_id": row[0],
|
|
463
|
+
"original_chat_id": row[1],
|
|
464
|
+
"processed_data": row[2],
|
|
465
|
+
"importance_score": row[3],
|
|
466
|
+
"category_primary": row[4],
|
|
467
|
+
"searchable_content": row[5],
|
|
468
|
+
"summary": row[6],
|
|
469
|
+
}
|
|
470
|
+
return None
|
|
471
|
+
|
|
472
|
+
except Exception as e:
|
|
473
|
+
logger.error(f"ConsciouscAgent: Failed to get original memory: {e}")
|
|
474
|
+
return None
|
|
475
|
+
|
|
476
|
+
def should_run_analysis(self) -> bool:
|
|
477
|
+
"""Check if it's time to run memory analysis"""
|
|
478
|
+
if self.last_analysis is None:
|
|
479
|
+
return True
|
|
480
|
+
|
|
481
|
+
return datetime.now() - self.last_analysis >= self.analysis_interval
|
|
482
|
+
|
|
483
|
+
async def run_background_analysis(self, db_manager, namespace: str = "default"):
|
|
484
|
+
"""Run the complete background analysis workflow"""
|
|
485
|
+
try:
|
|
486
|
+
if not self.should_run_analysis():
|
|
487
|
+
return
|
|
488
|
+
|
|
489
|
+
logger.info("ConsciouscAgent: Starting background memory analysis")
|
|
490
|
+
|
|
491
|
+
# Analyze memory patterns
|
|
492
|
+
analysis = await self.analyze_memory_patterns(db_manager, namespace)
|
|
493
|
+
|
|
494
|
+
if analysis:
|
|
495
|
+
# Update short-term memory with selected conversations
|
|
496
|
+
await self.update_short_term_memories(db_manager, analysis, namespace)
|
|
497
|
+
logger.info(
|
|
498
|
+
"ConsciouscAgent: Background analysis completed successfully"
|
|
499
|
+
)
|
|
500
|
+
else:
|
|
501
|
+
logger.info(
|
|
502
|
+
"ConsciouscAgent: No analysis performed (insufficient data)"
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
except Exception as e:
|
|
506
|
+
logger.error(f"ConsciouscAgent: Background analysis failed: {e}")
|