memorisdk 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of memorisdk might be problematic. Click here for more details.
- memoriai/__init__.py +140 -0
- memoriai/agents/__init__.py +7 -0
- memoriai/agents/conscious_agent.py +506 -0
- memoriai/agents/memory_agent.py +322 -0
- memoriai/agents/retrieval_agent.py +579 -0
- memoriai/config/__init__.py +14 -0
- memoriai/config/manager.py +281 -0
- memoriai/config/settings.py +287 -0
- memoriai/core/__init__.py +6 -0
- memoriai/core/database.py +966 -0
- memoriai/core/memory.py +1349 -0
- memoriai/database/__init__.py +5 -0
- memoriai/database/connectors/__init__.py +9 -0
- memoriai/database/connectors/mysql_connector.py +159 -0
- memoriai/database/connectors/postgres_connector.py +158 -0
- memoriai/database/connectors/sqlite_connector.py +148 -0
- memoriai/database/queries/__init__.py +15 -0
- memoriai/database/queries/base_queries.py +204 -0
- memoriai/database/queries/chat_queries.py +157 -0
- memoriai/database/queries/entity_queries.py +236 -0
- memoriai/database/queries/memory_queries.py +178 -0
- memoriai/database/templates/__init__.py +0 -0
- memoriai/database/templates/basic_template.py +0 -0
- memoriai/database/templates/schemas/__init__.py +0 -0
- memoriai/integrations/__init__.py +68 -0
- memoriai/integrations/anthropic_integration.py +194 -0
- memoriai/integrations/litellm_integration.py +11 -0
- memoriai/integrations/openai_integration.py +273 -0
- memoriai/scripts/llm_text.py +50 -0
- memoriai/tools/__init__.py +5 -0
- memoriai/tools/memory_tool.py +544 -0
- memoriai/utils/__init__.py +89 -0
- memoriai/utils/exceptions.py +418 -0
- memoriai/utils/helpers.py +433 -0
- memoriai/utils/logging.py +204 -0
- memoriai/utils/pydantic_models.py +258 -0
- memoriai/utils/schemas.py +0 -0
- memoriai/utils/validators.py +339 -0
- memorisdk-1.0.0.dist-info/METADATA +386 -0
- memorisdk-1.0.0.dist-info/RECORD +44 -0
- memorisdk-1.0.0.dist-info/WHEEL +5 -0
- memorisdk-1.0.0.dist-info/entry_points.txt +2 -0
- memorisdk-1.0.0.dist-info/licenses/LICENSE +203 -0
- memorisdk-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,544 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory Tool - A tool/function for manual integration with any LLM library
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any, Callable, Dict
|
|
7
|
+
|
|
8
|
+
from loguru import logger
|
|
9
|
+
|
|
10
|
+
from ..core.memory import Memori
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class MemoryTool:
|
|
14
|
+
"""
|
|
15
|
+
A tool that can be attached to any LLM library for using Memori functionality.
|
|
16
|
+
|
|
17
|
+
This provides a standardized interface for:
|
|
18
|
+
1. Recording conversations manually
|
|
19
|
+
2. Retrieving relevant context
|
|
20
|
+
3. Getting memory statistics
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, memori_instance: Memori):
|
|
24
|
+
"""
|
|
25
|
+
Initialize MemoryTool with a Memori instance
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
memori_instance: The Memori instance to use for memory operations
|
|
29
|
+
"""
|
|
30
|
+
self.memori = memori_instance
|
|
31
|
+
self.tool_name = "memori_memory"
|
|
32
|
+
self.description = "Access and manage AI conversation memory"
|
|
33
|
+
|
|
34
|
+
def get_tool_schema(self) -> Dict[str, Any]:
|
|
35
|
+
"""
|
|
36
|
+
Get the tool schema for function calling in LLMs
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Tool schema compatible with OpenAI function calling format
|
|
40
|
+
"""
|
|
41
|
+
return {
|
|
42
|
+
"name": self.tool_name,
|
|
43
|
+
"description": "Search and retrieve information from conversation memory",
|
|
44
|
+
"parameters": {
|
|
45
|
+
"type": "object",
|
|
46
|
+
"properties": {
|
|
47
|
+
"query": {
|
|
48
|
+
"type": "string",
|
|
49
|
+
"description": "Search query to find relevant memories, conversations, or personal information about the user",
|
|
50
|
+
},
|
|
51
|
+
},
|
|
52
|
+
"required": ["query"],
|
|
53
|
+
},
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
def execute(self, query: str = None, **kwargs) -> str:
|
|
57
|
+
"""
|
|
58
|
+
Execute a memory search/retrieve action
|
|
59
|
+
|
|
60
|
+
Args:
|
|
61
|
+
query: Search query string
|
|
62
|
+
**kwargs: Additional parameters for backward compatibility
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
String result of the memory search
|
|
66
|
+
"""
|
|
67
|
+
# Accept query as direct parameter or from kwargs
|
|
68
|
+
if query is None:
|
|
69
|
+
query = kwargs.get("query", "")
|
|
70
|
+
|
|
71
|
+
if not query:
|
|
72
|
+
return "Error: Query is required for memory search"
|
|
73
|
+
|
|
74
|
+
# Use retrieval agent for intelligent search
|
|
75
|
+
try:
|
|
76
|
+
from ..agents.retrieval_agent import MemorySearchEngine
|
|
77
|
+
|
|
78
|
+
# Create search engine if not already initialized
|
|
79
|
+
if not hasattr(self, "_search_engine"):
|
|
80
|
+
self._search_engine = MemorySearchEngine()
|
|
81
|
+
|
|
82
|
+
# Execute search using retrieval agent
|
|
83
|
+
results = self._search_engine.execute_search(
|
|
84
|
+
query=query,
|
|
85
|
+
db_manager=self.memori.db_manager,
|
|
86
|
+
namespace=self.memori.namespace,
|
|
87
|
+
limit=5,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if not results:
|
|
91
|
+
return f"No relevant memories found for query: '{query}'"
|
|
92
|
+
|
|
93
|
+
# Format results as a readable string
|
|
94
|
+
formatted_output = f"🔍 Memory Search Results for: '{query}'\n\n"
|
|
95
|
+
|
|
96
|
+
for i, result in enumerate(results, 1):
|
|
97
|
+
try:
|
|
98
|
+
# Try to parse processed data for better formatting
|
|
99
|
+
if "processed_data" in result:
|
|
100
|
+
import json
|
|
101
|
+
|
|
102
|
+
processed_data = json.loads(result["processed_data"])
|
|
103
|
+
summary = processed_data.get("summary", "")
|
|
104
|
+
category = processed_data.get("category", {}).get(
|
|
105
|
+
"primary_category", ""
|
|
106
|
+
)
|
|
107
|
+
else:
|
|
108
|
+
summary = result.get(
|
|
109
|
+
"summary",
|
|
110
|
+
result.get("searchable_content", "")[:100] + "...",
|
|
111
|
+
)
|
|
112
|
+
category = result.get("category_primary", "unknown")
|
|
113
|
+
|
|
114
|
+
importance = result.get("importance_score", 0.0)
|
|
115
|
+
created_at = result.get("created_at", "")
|
|
116
|
+
|
|
117
|
+
formatted_output += f"{i}. [{category.upper()}] {summary}\n"
|
|
118
|
+
formatted_output += (
|
|
119
|
+
f" 📊 Importance: {importance:.2f} | 📅 {created_at}\n"
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if result.get("search_reasoning"):
|
|
123
|
+
formatted_output += f" 🎯 {result['search_reasoning']}\n"
|
|
124
|
+
|
|
125
|
+
formatted_output += "\n"
|
|
126
|
+
|
|
127
|
+
except Exception:
|
|
128
|
+
# Fallback formatting
|
|
129
|
+
content = result.get(
|
|
130
|
+
"searchable_content", "Memory content available"
|
|
131
|
+
)[:100]
|
|
132
|
+
formatted_output += f"{i}. {content}...\n\n"
|
|
133
|
+
|
|
134
|
+
return formatted_output.strip()
|
|
135
|
+
|
|
136
|
+
except ImportError:
|
|
137
|
+
# Fallback to original search methods if retrieval agent is not available
|
|
138
|
+
# Try different search strategies based on query content
|
|
139
|
+
if any(word in query.lower() for word in ["name", "who am i", "about me"]):
|
|
140
|
+
# Personal information query - try essential conversations first
|
|
141
|
+
essential_result = self._get_essential_conversations()
|
|
142
|
+
if essential_result.get("count", 0) > 0:
|
|
143
|
+
return self._format_dict_to_string(essential_result)
|
|
144
|
+
|
|
145
|
+
# General search
|
|
146
|
+
search_result = self._search_memories(query=query, limit=10)
|
|
147
|
+
if search_result.get("results_count", 0) > 0:
|
|
148
|
+
return self._format_dict_to_string(search_result)
|
|
149
|
+
|
|
150
|
+
# Fallback to context retrieval
|
|
151
|
+
context_result = self._retrieve_context(query=query, limit=5)
|
|
152
|
+
return self._format_dict_to_string(context_result)
|
|
153
|
+
|
|
154
|
+
except Exception as e:
|
|
155
|
+
return f"Error searching memories: {str(e)}"
|
|
156
|
+
|
|
157
|
+
def _format_dict_to_string(self, result_dict: Dict[str, Any]) -> str:
|
|
158
|
+
"""Helper method to format dictionary results to readable strings"""
|
|
159
|
+
if result_dict.get("error"):
|
|
160
|
+
return f"Error: {result_dict['error']}"
|
|
161
|
+
|
|
162
|
+
if "essential_conversations" in result_dict:
|
|
163
|
+
conversations = result_dict.get("essential_conversations", [])
|
|
164
|
+
if not conversations:
|
|
165
|
+
return "No essential conversations found in memory."
|
|
166
|
+
|
|
167
|
+
output = f"🧠 Essential Information ({len(conversations)} items):\n\n"
|
|
168
|
+
for i, conv in enumerate(conversations, 1):
|
|
169
|
+
category = conv.get("category", "").title()
|
|
170
|
+
summary = conv.get("summary", "")
|
|
171
|
+
importance = conv.get("importance", 0.0)
|
|
172
|
+
output += f"{i}. [{category}] {summary}\n"
|
|
173
|
+
output += f" 📊 Importance: {importance:.2f}\n\n"
|
|
174
|
+
return output.strip()
|
|
175
|
+
|
|
176
|
+
elif "results" in result_dict:
|
|
177
|
+
results = result_dict.get("results", [])
|
|
178
|
+
if not results:
|
|
179
|
+
return "No memories found for your search."
|
|
180
|
+
|
|
181
|
+
output = f"🔍 Memory Search Results ({len(results)} found):\n\n"
|
|
182
|
+
for i, result in enumerate(results, 1):
|
|
183
|
+
content = result.get("searchable_content", "Memory content")[:100]
|
|
184
|
+
output += f"{i}. {content}...\n\n"
|
|
185
|
+
return output.strip()
|
|
186
|
+
|
|
187
|
+
elif "context" in result_dict:
|
|
188
|
+
context_items = result_dict.get("context", [])
|
|
189
|
+
if not context_items:
|
|
190
|
+
return "No relevant context found in memory."
|
|
191
|
+
|
|
192
|
+
output = f"📚 Relevant Context ({len(context_items)} items):\n\n"
|
|
193
|
+
for i, item in enumerate(context_items, 1):
|
|
194
|
+
content = item.get("content", "")[:100]
|
|
195
|
+
category = item.get("category", "unknown")
|
|
196
|
+
output += f"{i}. [{category.upper()}] {content}...\n\n"
|
|
197
|
+
return output.strip()
|
|
198
|
+
|
|
199
|
+
else:
|
|
200
|
+
# Generic formatting
|
|
201
|
+
message = result_dict.get("message", "Memory search completed")
|
|
202
|
+
return message
|
|
203
|
+
|
|
204
|
+
def _record_conversation(self, **kwargs) -> Dict[str, Any]:
|
|
205
|
+
"""Record a conversation"""
|
|
206
|
+
try:
|
|
207
|
+
user_input = kwargs.get("user_input", "")
|
|
208
|
+
ai_output = kwargs.get("ai_output", "")
|
|
209
|
+
model = kwargs.get("model", "unknown")
|
|
210
|
+
|
|
211
|
+
if not user_input or not ai_output:
|
|
212
|
+
return {
|
|
213
|
+
"error": "Both user_input and ai_output are required for recording"
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
chat_id = self.memori.record_conversation(
|
|
217
|
+
user_input=user_input,
|
|
218
|
+
ai_output=ai_output,
|
|
219
|
+
model=model,
|
|
220
|
+
metadata={"tool": "memory_tool", "manual_record": True},
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
return {
|
|
224
|
+
"success": True,
|
|
225
|
+
"chat_id": chat_id,
|
|
226
|
+
"message": "Conversation recorded successfully",
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
except Exception as e:
|
|
230
|
+
logger.error(f"Failed to record conversation: {e}")
|
|
231
|
+
return {"error": f"Failed to record conversation: {str(e)}"}
|
|
232
|
+
|
|
233
|
+
def _retrieve_context(self, **kwargs) -> Dict[str, Any]:
|
|
234
|
+
"""Retrieve relevant context for a query"""
|
|
235
|
+
try:
|
|
236
|
+
query = kwargs.get("query", "")
|
|
237
|
+
limit = kwargs.get("limit", 5)
|
|
238
|
+
|
|
239
|
+
if not query:
|
|
240
|
+
return {"error": "Query is required for retrieval"}
|
|
241
|
+
|
|
242
|
+
context_items = self.memori.retrieve_context(query, limit)
|
|
243
|
+
|
|
244
|
+
# Format context items for easier consumption
|
|
245
|
+
formatted_context = []
|
|
246
|
+
for item in context_items:
|
|
247
|
+
formatted_context.append(
|
|
248
|
+
{
|
|
249
|
+
"content": item.get("content", ""),
|
|
250
|
+
"category": item.get("category", ""),
|
|
251
|
+
"importance": item.get("importance_score", 0),
|
|
252
|
+
"created_at": item.get("created_at", ""),
|
|
253
|
+
"memory_type": item.get("memory_type", ""),
|
|
254
|
+
}
|
|
255
|
+
)
|
|
256
|
+
|
|
257
|
+
return {
|
|
258
|
+
"success": True,
|
|
259
|
+
"query": query,
|
|
260
|
+
"context_count": len(formatted_context),
|
|
261
|
+
"context": formatted_context,
|
|
262
|
+
"message": f"Retrieved {len(formatted_context)} relevant memories",
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
except Exception as e:
|
|
266
|
+
logger.error(f"Failed to retrieve context: {e}")
|
|
267
|
+
return {"error": f"Failed to retrieve context: {str(e)}"}
|
|
268
|
+
|
|
269
|
+
def _search_memories(self, **kwargs) -> Dict[str, Any]:
|
|
270
|
+
"""Search memories by content"""
|
|
271
|
+
try:
|
|
272
|
+
query = kwargs.get("query", "")
|
|
273
|
+
limit = kwargs.get("limit", 10)
|
|
274
|
+
|
|
275
|
+
if not query:
|
|
276
|
+
return {"error": "Query is required for search"}
|
|
277
|
+
|
|
278
|
+
search_results = self.memori.db_manager.search_memories(
|
|
279
|
+
query=query, namespace=self.memori.namespace, limit=limit
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
return {
|
|
283
|
+
"success": True,
|
|
284
|
+
"query": query,
|
|
285
|
+
"results_count": len(search_results),
|
|
286
|
+
"results": search_results,
|
|
287
|
+
"message": f"Found {len(search_results)} matching memories",
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
except Exception as e:
|
|
291
|
+
logger.error(f"Failed to search memories: {e}")
|
|
292
|
+
return {"error": f"Failed to search memories: {str(e)}"}
|
|
293
|
+
|
|
294
|
+
def _get_stats(self, **kwargs) -> Dict[str, Any]:
|
|
295
|
+
"""Get memory and integration statistics"""
|
|
296
|
+
try:
|
|
297
|
+
memory_stats = self.memori.get_memory_stats()
|
|
298
|
+
integration_stats = self.memori.get_integration_stats()
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
"success": True,
|
|
302
|
+
"memory_stats": memory_stats,
|
|
303
|
+
"integration_stats": integration_stats,
|
|
304
|
+
"namespace": self.memori.namespace,
|
|
305
|
+
"session_id": self.memori.session_id,
|
|
306
|
+
"enabled": self.memori.is_enabled,
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
except Exception as e:
|
|
310
|
+
logger.error(f"Failed to get stats: {e}")
|
|
311
|
+
return {"error": f"Failed to get stats: {str(e)}"}
|
|
312
|
+
|
|
313
|
+
def _get_essential_conversations(self, **kwargs) -> Dict[str, Any]:
|
|
314
|
+
"""Get essential conversations from short-term memory"""
|
|
315
|
+
try:
|
|
316
|
+
limit = kwargs.get("limit", 10)
|
|
317
|
+
|
|
318
|
+
if hasattr(self.memori, "get_essential_conversations"):
|
|
319
|
+
essential_conversations = self.memori.get_essential_conversations(limit)
|
|
320
|
+
|
|
321
|
+
# Format for better readability
|
|
322
|
+
formatted_conversations = []
|
|
323
|
+
for conv in essential_conversations:
|
|
324
|
+
formatted_conversations.append(
|
|
325
|
+
{
|
|
326
|
+
"summary": conv.get("summary", ""),
|
|
327
|
+
"category": conv.get("category_primary", "").replace(
|
|
328
|
+
"essential_", ""
|
|
329
|
+
),
|
|
330
|
+
"importance": conv.get("importance_score", 0),
|
|
331
|
+
"created_at": conv.get("created_at", ""),
|
|
332
|
+
"content": conv.get("searchable_content", ""),
|
|
333
|
+
}
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
return {
|
|
337
|
+
"success": True,
|
|
338
|
+
"essential_conversations": formatted_conversations,
|
|
339
|
+
"count": len(formatted_conversations),
|
|
340
|
+
"message": f"Retrieved {len(formatted_conversations)} essential conversations from short-term memory",
|
|
341
|
+
}
|
|
342
|
+
else:
|
|
343
|
+
return {"error": "Essential conversations feature not available"}
|
|
344
|
+
|
|
345
|
+
except Exception as e:
|
|
346
|
+
logger.error(f"Failed to get essential conversations: {e}")
|
|
347
|
+
return {"error": f"Failed to get essential conversations: {str(e)}"}
|
|
348
|
+
|
|
349
|
+
def _trigger_analysis(self, **kwargs) -> Dict[str, Any]:
|
|
350
|
+
"""Trigger conscious agent analysis"""
|
|
351
|
+
try:
|
|
352
|
+
if hasattr(self.memori, "trigger_conscious_analysis"):
|
|
353
|
+
self.memori.trigger_conscious_analysis()
|
|
354
|
+
return {
|
|
355
|
+
"success": True,
|
|
356
|
+
"message": "Conscious agent analysis triggered successfully. This will analyze memory patterns and update essential conversations in short-term memory.",
|
|
357
|
+
}
|
|
358
|
+
else:
|
|
359
|
+
return {"error": "Conscious analysis feature not available"}
|
|
360
|
+
|
|
361
|
+
except Exception as e:
|
|
362
|
+
logger.error(f"Failed to trigger analysis: {e}")
|
|
363
|
+
return {"error": f"Failed to trigger analysis: {str(e)}"}
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
# Helper function to create a tool instance
|
|
367
|
+
def create_memory_tool(memori_instance: Memori) -> MemoryTool:
|
|
368
|
+
"""
|
|
369
|
+
Create a MemoryTool instance
|
|
370
|
+
|
|
371
|
+
Args:
|
|
372
|
+
memori_instance: The Memori instance to use
|
|
373
|
+
|
|
374
|
+
Returns:
|
|
375
|
+
MemoryTool instance
|
|
376
|
+
"""
|
|
377
|
+
return MemoryTool(memori_instance)
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
# Function calling interface
|
|
381
|
+
def memori_tool_function(memori_instance: Memori, query: str = None, **kwargs) -> str:
|
|
382
|
+
"""
|
|
383
|
+
Direct function interface for memory operations
|
|
384
|
+
|
|
385
|
+
This can be used as a function call in LLM libraries that support function calling.
|
|
386
|
+
|
|
387
|
+
Args:
|
|
388
|
+
memori_instance: The Memori instance to use
|
|
389
|
+
query: Search query string
|
|
390
|
+
**kwargs: Additional parameters for backward compatibility
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
String result of the memory operation
|
|
394
|
+
"""
|
|
395
|
+
tool = MemoryTool(memori_instance)
|
|
396
|
+
return tool.execute(query=query, **kwargs)
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
# Decorator for automatic conversation recording
|
|
400
|
+
def record_conversation(memori_instance: Memori):
|
|
401
|
+
"""
|
|
402
|
+
Decorator to automatically record LLM conversations
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
memori_instance: The Memori instance to use for recording
|
|
406
|
+
|
|
407
|
+
Returns:
|
|
408
|
+
Decorator function
|
|
409
|
+
"""
|
|
410
|
+
|
|
411
|
+
def decorator(func: Callable) -> Callable:
|
|
412
|
+
def wrapper(*args, **kwargs):
|
|
413
|
+
# Call the original function
|
|
414
|
+
result = func(*args, **kwargs)
|
|
415
|
+
|
|
416
|
+
try:
|
|
417
|
+
# Try to extract conversation details from common patterns
|
|
418
|
+
if hasattr(result, "choices") and result.choices:
|
|
419
|
+
# OpenAI-style response
|
|
420
|
+
ai_output = result.choices[0].message.content
|
|
421
|
+
|
|
422
|
+
# Try to find user input in kwargs
|
|
423
|
+
user_input = ""
|
|
424
|
+
if "messages" in kwargs:
|
|
425
|
+
for msg in reversed(kwargs["messages"]):
|
|
426
|
+
if msg.get("role") == "user":
|
|
427
|
+
user_input = msg.get("content", "")
|
|
428
|
+
break
|
|
429
|
+
|
|
430
|
+
model = kwargs.get("model", "unknown")
|
|
431
|
+
|
|
432
|
+
if user_input and ai_output:
|
|
433
|
+
memori_instance.record_conversation(
|
|
434
|
+
user_input=user_input,
|
|
435
|
+
ai_output=ai_output,
|
|
436
|
+
model=model,
|
|
437
|
+
metadata={
|
|
438
|
+
"decorator": "record_conversation",
|
|
439
|
+
"auto_recorded": True,
|
|
440
|
+
},
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
except Exception as e:
|
|
444
|
+
logger.error(f"Failed to auto-record conversation: {e}")
|
|
445
|
+
|
|
446
|
+
return result
|
|
447
|
+
|
|
448
|
+
return wrapper
|
|
449
|
+
|
|
450
|
+
return decorator
|
|
451
|
+
|
|
452
|
+
|
|
453
|
+
def create_memory_search_tool(memori_instance: Memori):
|
|
454
|
+
"""
|
|
455
|
+
Create memory search tool for LLM function calling (v1.0 architecture)
|
|
456
|
+
|
|
457
|
+
This creates a search function compatible with OpenAI function calling
|
|
458
|
+
that uses SQL-based memory retrieval.
|
|
459
|
+
|
|
460
|
+
Args:
|
|
461
|
+
memori_instance: The Memori instance to search
|
|
462
|
+
|
|
463
|
+
Returns:
|
|
464
|
+
Memory search function for LLM tool use
|
|
465
|
+
"""
|
|
466
|
+
|
|
467
|
+
def memory_search(query: str, max_results: int = 5) -> str:
|
|
468
|
+
"""
|
|
469
|
+
Search through stored memories for relevant information
|
|
470
|
+
|
|
471
|
+
Args:
|
|
472
|
+
query: Search query for memories
|
|
473
|
+
max_results: Maximum number of results to return
|
|
474
|
+
|
|
475
|
+
Returns:
|
|
476
|
+
Formatted string with search results
|
|
477
|
+
"""
|
|
478
|
+
try:
|
|
479
|
+
# Use the SQL-based search from the database manager
|
|
480
|
+
results = memori_instance.db_manager.search_memories(
|
|
481
|
+
query=query, namespace=memori_instance.namespace, limit=max_results
|
|
482
|
+
)
|
|
483
|
+
|
|
484
|
+
if not results:
|
|
485
|
+
return f"No relevant memories found for query: '{query}'"
|
|
486
|
+
|
|
487
|
+
# Format results according to v1.0 structure
|
|
488
|
+
formatted_results = []
|
|
489
|
+
for result in results:
|
|
490
|
+
try:
|
|
491
|
+
# Parse the ProcessedMemory JSON
|
|
492
|
+
memory_data = json.loads(result["processed_data"])
|
|
493
|
+
|
|
494
|
+
formatted_result = {
|
|
495
|
+
"summary": memory_data.get("summary", ""),
|
|
496
|
+
"category": memory_data.get("category", {}).get(
|
|
497
|
+
"primary_category", ""
|
|
498
|
+
),
|
|
499
|
+
"importance_score": result.get("importance_score", 0.0),
|
|
500
|
+
"created_at": result.get("created_at", ""),
|
|
501
|
+
"entities": memory_data.get("entities", {}),
|
|
502
|
+
"confidence": memory_data.get("category", {}).get(
|
|
503
|
+
"confidence_score", 0.0
|
|
504
|
+
),
|
|
505
|
+
"searchable_content": result.get("searchable_content", ""),
|
|
506
|
+
"retention_type": memory_data.get("importance", {}).get(
|
|
507
|
+
"retention_type", "short_term"
|
|
508
|
+
),
|
|
509
|
+
}
|
|
510
|
+
formatted_results.append(formatted_result)
|
|
511
|
+
|
|
512
|
+
except (json.JSONDecodeError, KeyError) as e:
|
|
513
|
+
logger.error(f"Error parsing memory data: {e}")
|
|
514
|
+
# Fallback to basic result structure
|
|
515
|
+
formatted_results.append(
|
|
516
|
+
{
|
|
517
|
+
"summary": result.get(
|
|
518
|
+
"summary", "Memory content available"
|
|
519
|
+
),
|
|
520
|
+
"category": result.get("category_primary", "unknown"),
|
|
521
|
+
"importance_score": result.get("importance_score", 0.0),
|
|
522
|
+
"created_at": result.get("created_at", ""),
|
|
523
|
+
}
|
|
524
|
+
)
|
|
525
|
+
|
|
526
|
+
# Format as readable string instead of JSON
|
|
527
|
+
output = f"🔍 Memory Search Results for: '{query}' ({len(formatted_results)} found)\n\n"
|
|
528
|
+
|
|
529
|
+
for i, result in enumerate(formatted_results, 1):
|
|
530
|
+
summary = result.get("summary", "Memory content available")
|
|
531
|
+
category = result.get("category", "unknown")
|
|
532
|
+
importance = result.get("importance_score", 0.0)
|
|
533
|
+
created_at = result.get("created_at", "")
|
|
534
|
+
|
|
535
|
+
output += f"{i}. [{category.upper()}] {summary}\n"
|
|
536
|
+
output += f" 📊 Importance: {importance:.2f} | 📅 {created_at}\n\n"
|
|
537
|
+
|
|
538
|
+
return output.strip()
|
|
539
|
+
|
|
540
|
+
except Exception as e:
|
|
541
|
+
logger.error(f"Memory search error: {e}")
|
|
542
|
+
return f"Error searching memories: {str(e)}"
|
|
543
|
+
|
|
544
|
+
return memory_search
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utils package for Memoriai - Comprehensive utilities and helpers
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
# Enhanced exception handling
|
|
6
|
+
from .exceptions import (
|
|
7
|
+
AgentError,
|
|
8
|
+
AuthenticationError,
|
|
9
|
+
ConfigurationError,
|
|
10
|
+
DatabaseError,
|
|
11
|
+
ExceptionHandler,
|
|
12
|
+
IntegrationError,
|
|
13
|
+
MemoriError,
|
|
14
|
+
MemoryNotFoundError,
|
|
15
|
+
ProcessingError,
|
|
16
|
+
RateLimitError,
|
|
17
|
+
ResourceExhaustedError,
|
|
18
|
+
TimeoutError,
|
|
19
|
+
ValidationError,
|
|
20
|
+
)
|
|
21
|
+
|
|
22
|
+
# Helper utilities
|
|
23
|
+
from .helpers import (
|
|
24
|
+
AsyncUtils,
|
|
25
|
+
DateTimeUtils,
|
|
26
|
+
FileUtils,
|
|
27
|
+
JsonUtils,
|
|
28
|
+
PerformanceUtils,
|
|
29
|
+
RetryUtils,
|
|
30
|
+
StringUtils,
|
|
31
|
+
)
|
|
32
|
+
|
|
33
|
+
# Logging utilities
|
|
34
|
+
from .logging import LoggingManager, get_logger
|
|
35
|
+
|
|
36
|
+
# Core Pydantic models
|
|
37
|
+
from .pydantic_models import (
|
|
38
|
+
ConversationContext,
|
|
39
|
+
EntityType,
|
|
40
|
+
ExtractedEntities,
|
|
41
|
+
MemoryCategory,
|
|
42
|
+
MemoryCategoryType,
|
|
43
|
+
MemoryImportance,
|
|
44
|
+
ProcessedMemory,
|
|
45
|
+
RetentionType,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
# Validation utilities
|
|
49
|
+
from .validators import DataValidator, MemoryValidator
|
|
50
|
+
|
|
51
|
+
__all__ = [
|
|
52
|
+
# Pydantic Models
|
|
53
|
+
"ProcessedMemory",
|
|
54
|
+
"MemoryCategory",
|
|
55
|
+
"ExtractedEntities",
|
|
56
|
+
"MemoryImportance",
|
|
57
|
+
"ConversationContext",
|
|
58
|
+
"MemoryCategoryType",
|
|
59
|
+
"RetentionType",
|
|
60
|
+
"EntityType",
|
|
61
|
+
# Exceptions
|
|
62
|
+
"MemoriError",
|
|
63
|
+
"DatabaseError",
|
|
64
|
+
"AgentError",
|
|
65
|
+
"ConfigurationError",
|
|
66
|
+
"ValidationError",
|
|
67
|
+
"IntegrationError",
|
|
68
|
+
"AuthenticationError",
|
|
69
|
+
"RateLimitError",
|
|
70
|
+
"MemoryNotFoundError",
|
|
71
|
+
"ProcessingError",
|
|
72
|
+
"TimeoutError",
|
|
73
|
+
"ResourceExhaustedError",
|
|
74
|
+
"ExceptionHandler",
|
|
75
|
+
# Validators
|
|
76
|
+
"DataValidator",
|
|
77
|
+
"MemoryValidator",
|
|
78
|
+
# Helpers
|
|
79
|
+
"StringUtils",
|
|
80
|
+
"DateTimeUtils",
|
|
81
|
+
"JsonUtils",
|
|
82
|
+
"FileUtils",
|
|
83
|
+
"RetryUtils",
|
|
84
|
+
"PerformanceUtils",
|
|
85
|
+
"AsyncUtils",
|
|
86
|
+
# Logging
|
|
87
|
+
"LoggingManager",
|
|
88
|
+
"get_logger",
|
|
89
|
+
]
|