memorisdk 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (44) hide show
  1. memoriai/__init__.py +140 -0
  2. memoriai/agents/__init__.py +7 -0
  3. memoriai/agents/conscious_agent.py +506 -0
  4. memoriai/agents/memory_agent.py +322 -0
  5. memoriai/agents/retrieval_agent.py +579 -0
  6. memoriai/config/__init__.py +14 -0
  7. memoriai/config/manager.py +281 -0
  8. memoriai/config/settings.py +287 -0
  9. memoriai/core/__init__.py +6 -0
  10. memoriai/core/database.py +966 -0
  11. memoriai/core/memory.py +1349 -0
  12. memoriai/database/__init__.py +5 -0
  13. memoriai/database/connectors/__init__.py +9 -0
  14. memoriai/database/connectors/mysql_connector.py +159 -0
  15. memoriai/database/connectors/postgres_connector.py +158 -0
  16. memoriai/database/connectors/sqlite_connector.py +148 -0
  17. memoriai/database/queries/__init__.py +15 -0
  18. memoriai/database/queries/base_queries.py +204 -0
  19. memoriai/database/queries/chat_queries.py +157 -0
  20. memoriai/database/queries/entity_queries.py +236 -0
  21. memoriai/database/queries/memory_queries.py +178 -0
  22. memoriai/database/templates/__init__.py +0 -0
  23. memoriai/database/templates/basic_template.py +0 -0
  24. memoriai/database/templates/schemas/__init__.py +0 -0
  25. memoriai/integrations/__init__.py +68 -0
  26. memoriai/integrations/anthropic_integration.py +194 -0
  27. memoriai/integrations/litellm_integration.py +11 -0
  28. memoriai/integrations/openai_integration.py +273 -0
  29. memoriai/scripts/llm_text.py +50 -0
  30. memoriai/tools/__init__.py +5 -0
  31. memoriai/tools/memory_tool.py +544 -0
  32. memoriai/utils/__init__.py +89 -0
  33. memoriai/utils/exceptions.py +418 -0
  34. memoriai/utils/helpers.py +433 -0
  35. memoriai/utils/logging.py +204 -0
  36. memoriai/utils/pydantic_models.py +258 -0
  37. memoriai/utils/schemas.py +0 -0
  38. memoriai/utils/validators.py +339 -0
  39. memorisdk-1.0.0.dist-info/METADATA +386 -0
  40. memorisdk-1.0.0.dist-info/RECORD +44 -0
  41. memorisdk-1.0.0.dist-info/WHEEL +5 -0
  42. memorisdk-1.0.0.dist-info/entry_points.txt +2 -0
  43. memorisdk-1.0.0.dist-info/licenses/LICENSE +203 -0
  44. memorisdk-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,579 @@
1
+ """
2
+ Memory Search Engine - Intelligent memory retrieval using Pydantic models
3
+ """
4
+
5
+ import asyncio
6
+ import json
7
+ import threading
8
+ import time
9
+ from datetime import datetime
10
+ from typing import Any, Dict, List, Optional
11
+
12
+ import openai
13
+ from loguru import logger
14
+
15
+ from ..utils.pydantic_models import MemorySearchQuery
16
+
17
+
18
+ class MemorySearchEngine:
19
+ """
20
+ Pydantic-based search engine for intelligent memory retrieval.
21
+ Uses OpenAI Structured Outputs to understand queries and plan searches.
22
+ """
23
+
24
+ SYSTEM_PROMPT = """You are a Memory Search Agent responsible for understanding user queries and planning effective memory retrieval strategies.
25
+
26
+ Your primary functions:
27
+ 1. **Analyze Query Intent**: Understand what the user is actually looking for
28
+ 2. **Extract Search Parameters**: Identify key entities, topics, and concepts
29
+ 3. **Plan Search Strategy**: Recommend the best approach to find relevant memories
30
+ 4. **Filter Recommendations**: Suggest appropriate filters for category, importance, etc.
31
+
32
+ **MEMORY CATEGORIES AVAILABLE:**
33
+ - **fact**: Factual information, definitions, technical details, specific data points
34
+ - **preference**: User preferences, likes/dislikes, settings, personal choices, opinions
35
+ - **skill**: Skills, abilities, competencies, learning progress, expertise levels
36
+ - **context**: Project context, work environment, current situations, background info
37
+ - **rule**: Rules, policies, procedures, guidelines, constraints
38
+
39
+ **SEARCH STRATEGIES:**
40
+ - **keyword_search**: Direct keyword/phrase matching in content
41
+ - **entity_search**: Search by specific entities (people, technologies, topics)
42
+ - **category_filter**: Filter by memory categories
43
+ - **importance_filter**: Filter by importance levels
44
+ - **temporal_filter**: Search within specific time ranges
45
+ - **semantic_search**: Conceptual/meaning-based search
46
+
47
+ **QUERY INTERPRETATION GUIDELINES:**
48
+ - "What did I learn about X?" → Focus on facts and skills related to X
49
+ - "My preferences for Y" → Focus on preference category
50
+ - "Rules about Z" → Focus on rule category
51
+ - "Recent work on A" → Temporal filter + context/skill categories
52
+ - "Important information about B" → Importance filter + keyword search
53
+
54
+ Be strategic and comprehensive in your search planning."""
55
+
56
+ def __init__(self, api_key: Optional[str] = None, model: str = "gpt-4o"):
57
+ """
58
+ Initialize Memory Search Engine
59
+
60
+ Args:
61
+ api_key: OpenAI API key (if None, uses environment variable)
62
+ model: OpenAI model to use for query understanding
63
+ """
64
+ self.client = openai.OpenAI(api_key=api_key)
65
+ self.model = model
66
+
67
+ # Performance improvements
68
+ self._query_cache = {} # Cache for search plans
69
+ self._cache_ttl = 300 # 5 minutes cache TTL
70
+ self._cache_lock = threading.Lock()
71
+
72
+ # Background processing
73
+ self._background_executor = None
74
+
75
+ def plan_search(
76
+ self, query: str, context: Optional[str] = None
77
+ ) -> MemorySearchQuery:
78
+ """
79
+ Plan search strategy for a user query using OpenAI Structured Outputs with caching
80
+
81
+ Args:
82
+ query: User's search query
83
+ context: Optional additional context
84
+
85
+ Returns:
86
+ Structured search query plan
87
+ """
88
+ try:
89
+ # Create cache key
90
+ cache_key = f"{query}|{context or ''}"
91
+
92
+ # Check cache first
93
+ with self._cache_lock:
94
+ if cache_key in self._query_cache:
95
+ cached_result, timestamp = self._query_cache[cache_key]
96
+ if time.time() - timestamp < self._cache_ttl:
97
+ logger.debug(f"Using cached search plan for: {query}")
98
+ return cached_result
99
+
100
+ # Prepare the prompt
101
+ prompt = f"User query: {query}"
102
+ if context:
103
+ prompt += f"\nAdditional context: {context}"
104
+
105
+ # Call OpenAI Structured Outputs
106
+ completion = self.client.beta.chat.completions.parse(
107
+ model=self.model,
108
+ messages=[
109
+ {"role": "system", "content": self.SYSTEM_PROMPT},
110
+ {
111
+ "role": "user",
112
+ "content": f"Analyze and plan memory search for this query:\n\n{prompt}",
113
+ },
114
+ ],
115
+ response_format=MemorySearchQuery,
116
+ temperature=0.1,
117
+ )
118
+
119
+ # Handle potential refusal
120
+ if completion.choices[0].message.refusal:
121
+ logger.warning(
122
+ f"Search planning refused: {completion.choices[0].message.refusal}"
123
+ )
124
+ return self._create_fallback_query(query)
125
+
126
+ search_query = completion.choices[0].message.parsed
127
+
128
+ # Cache the result
129
+ with self._cache_lock:
130
+ self._query_cache[cache_key] = (search_query, time.time())
131
+ # Clean old cache entries
132
+ self._cleanup_cache()
133
+
134
+ logger.debug(
135
+ f"Planned search for query '{query}': intent='{search_query.intent}', strategies={search_query.search_strategy}"
136
+ )
137
+ return search_query
138
+
139
+ except Exception as e:
140
+ logger.error(f"Search planning failed: {e}")
141
+ return self._create_fallback_query(query)
142
+
143
+ def execute_search(
144
+ self, query: str, db_manager, namespace: str = "default", limit: int = 10
145
+ ) -> List[Dict[str, Any]]:
146
+ """
147
+ Execute intelligent search using planned strategies
148
+
149
+ Args:
150
+ query: User's search query
151
+ db_manager: Database manager instance
152
+ namespace: Memory namespace
153
+ limit: Maximum results to return
154
+
155
+ Returns:
156
+ List of relevant memory items with search metadata
157
+ """
158
+ try:
159
+ # Plan the search
160
+ search_plan = self.plan_search(query)
161
+
162
+ all_results = []
163
+ seen_memory_ids = set()
164
+
165
+ # Execute keyword search (primary strategy)
166
+ if (
167
+ search_plan.entity_filters
168
+ or "keyword_search" in search_plan.search_strategy
169
+ ):
170
+ keyword_results = self._execute_keyword_search(
171
+ search_plan, db_manager, namespace, limit
172
+ )
173
+ for result in keyword_results:
174
+ if result.get("memory_id") not in seen_memory_ids:
175
+ seen_memory_ids.add(result["memory_id"])
176
+ result["search_strategy"] = "keyword_search"
177
+ result["search_reasoning"] = (
178
+ f"Keyword match for: {', '.join(search_plan.entity_filters)}"
179
+ )
180
+ all_results.append(result)
181
+
182
+ # Execute category-based search
183
+ if (
184
+ search_plan.category_filters
185
+ or "category_filter" in search_plan.search_strategy
186
+ ):
187
+ category_results = self._execute_category_search(
188
+ search_plan, db_manager, namespace, limit - len(all_results)
189
+ )
190
+ for result in category_results:
191
+ if result.get("memory_id") not in seen_memory_ids:
192
+ seen_memory_ids.add(result["memory_id"])
193
+ result["search_strategy"] = "category_filter"
194
+ result["search_reasoning"] = (
195
+ f"Category match: {', '.join([c.value for c in search_plan.category_filters])}"
196
+ )
197
+ all_results.append(result)
198
+
199
+ # Execute importance-based search
200
+ if (
201
+ search_plan.min_importance > 0.0
202
+ or "importance_filter" in search_plan.search_strategy
203
+ ):
204
+ importance_results = self._execute_importance_search(
205
+ search_plan, db_manager, namespace, limit - len(all_results)
206
+ )
207
+ for result in importance_results:
208
+ if result.get("memory_id") not in seen_memory_ids:
209
+ seen_memory_ids.add(result["memory_id"])
210
+ result["search_strategy"] = "importance_filter"
211
+ result["search_reasoning"] = (
212
+ f"High importance (≥{search_plan.min_importance})"
213
+ )
214
+ all_results.append(result)
215
+
216
+ # If no specific strategies worked, do a general search
217
+ if not all_results:
218
+ general_results = db_manager.search_memories(
219
+ query=search_plan.query_text, namespace=namespace, limit=limit
220
+ )
221
+ for result in general_results:
222
+ result["search_strategy"] = "general_search"
223
+ result["search_reasoning"] = "General content search"
224
+ all_results.append(result)
225
+
226
+ # Sort by relevance (importance score + recency)
227
+ all_results.sort(
228
+ key=lambda x: (
229
+ x.get("importance_score", 0) * 0.7 # Importance weight
230
+ + (
231
+ datetime.now().replace(tzinfo=None) # Ensure timezone-naive
232
+ - datetime.fromisoformat(
233
+ x.get("created_at", "2000-01-01")
234
+ ).replace(tzinfo=None)
235
+ ).days
236
+ * -0.001 # Recency weight
237
+ ),
238
+ reverse=True,
239
+ )
240
+
241
+ # Add search metadata
242
+ for result in all_results:
243
+ result["search_metadata"] = {
244
+ "original_query": query,
245
+ "interpreted_intent": search_plan.intent,
246
+ "search_timestamp": datetime.now().isoformat(),
247
+ }
248
+
249
+ logger.debug(
250
+ f"Search executed for '{query}': {len(all_results)} results found"
251
+ )
252
+ return all_results[:limit]
253
+
254
+ except Exception as e:
255
+ logger.error(f"Search execution failed: {e}")
256
+ return []
257
+
258
+ def _execute_keyword_search(
259
+ self, search_plan: MemorySearchQuery, db_manager, namespace: str, limit: int
260
+ ) -> List[Dict[str, Any]]:
261
+ """Execute keyword-based search"""
262
+ keywords = search_plan.entity_filters
263
+ if not keywords:
264
+ # Extract keywords from query text
265
+ keywords = [
266
+ word.strip()
267
+ for word in search_plan.query_text.split()
268
+ if len(word.strip()) > 2
269
+ ]
270
+
271
+ search_terms = " ".join(keywords)
272
+ return db_manager.search_memories(
273
+ query=search_terms, namespace=namespace, limit=limit
274
+ )
275
+
276
+ def _execute_category_search(
277
+ self, search_plan: MemorySearchQuery, db_manager, namespace: str, limit: int
278
+ ) -> List[Dict[str, Any]]:
279
+ """Execute category-based search"""
280
+ categories = (
281
+ [cat.value for cat in search_plan.category_filters]
282
+ if search_plan.category_filters
283
+ else []
284
+ )
285
+
286
+ if not categories:
287
+ return []
288
+
289
+ # This would need to be implemented in the database manager
290
+ # For now, get all memories and filter by category
291
+ all_results = db_manager.search_memories(
292
+ query="", namespace=namespace, limit=limit * 3
293
+ )
294
+
295
+ filtered_results = []
296
+ for result in all_results:
297
+ # Extract category from processed_data if it's stored as JSON
298
+ try:
299
+ if "processed_data" in result:
300
+ processed_data = json.loads(result["processed_data"])
301
+ memory_category = processed_data.get("category", {}).get(
302
+ "primary_category", ""
303
+ )
304
+ if memory_category in categories:
305
+ filtered_results.append(result)
306
+ elif result.get("category") in categories:
307
+ filtered_results.append(result)
308
+ except (json.JSONDecodeError, KeyError):
309
+ continue
310
+
311
+ return filtered_results[:limit]
312
+
313
+ def _execute_importance_search(
314
+ self, search_plan: MemorySearchQuery, db_manager, namespace: str, limit: int
315
+ ) -> List[Dict[str, Any]]:
316
+ """Execute importance-based search"""
317
+ min_importance = max(
318
+ search_plan.min_importance, 0.7
319
+ ) # Default to high importance
320
+
321
+ all_results = db_manager.search_memories(
322
+ query="", namespace=namespace, limit=limit * 2
323
+ )
324
+
325
+ high_importance_results = [
326
+ result
327
+ for result in all_results
328
+ if result.get("importance_score", 0) >= min_importance
329
+ ]
330
+
331
+ return high_importance_results[:limit]
332
+
333
+ def _create_fallback_query(self, query: str) -> MemorySearchQuery:
334
+ """Create a fallback search query for error cases"""
335
+ return MemorySearchQuery(
336
+ query_text=query,
337
+ intent="General search (fallback)",
338
+ entity_filters=[word for word in query.split() if len(word) > 2],
339
+ search_strategy=["keyword_search", "general_search"],
340
+ expected_result_types=["any"],
341
+ )
342
+
343
+ def _cleanup_cache(self):
344
+ """Clean up expired cache entries"""
345
+ current_time = time.time()
346
+ expired_keys = [
347
+ key
348
+ for key, (_, timestamp) in self._query_cache.items()
349
+ if current_time - timestamp >= self._cache_ttl
350
+ ]
351
+ for key in expired_keys:
352
+ del self._query_cache[key]
353
+
354
+ async def execute_search_async(
355
+ self, query: str, db_manager, namespace: str = "default", limit: int = 10
356
+ ) -> List[Dict[str, Any]]:
357
+ """
358
+ Async version of execute_search for better performance in background processing
359
+ """
360
+ try:
361
+ # Run search planning in background if needed
362
+ loop = asyncio.get_event_loop()
363
+ search_plan = await loop.run_in_executor(
364
+ self._background_executor, self.plan_search, query
365
+ )
366
+
367
+ # Execute searches concurrently
368
+ search_tasks = []
369
+
370
+ # Keyword search task
371
+ if (
372
+ search_plan.entity_filters
373
+ or "keyword_search" in search_plan.search_strategy
374
+ ):
375
+ search_tasks.append(
376
+ loop.run_in_executor(
377
+ self._background_executor,
378
+ self._execute_keyword_search,
379
+ search_plan,
380
+ db_manager,
381
+ namespace,
382
+ limit,
383
+ )
384
+ )
385
+
386
+ # Category search task
387
+ if (
388
+ search_plan.category_filters
389
+ or "category_filter" in search_plan.search_strategy
390
+ ):
391
+ search_tasks.append(
392
+ loop.run_in_executor(
393
+ self._background_executor,
394
+ self._execute_category_search,
395
+ search_plan,
396
+ db_manager,
397
+ namespace,
398
+ limit,
399
+ )
400
+ )
401
+
402
+ # Execute all searches concurrently
403
+ if search_tasks:
404
+ results_lists = await asyncio.gather(
405
+ *search_tasks, return_exceptions=True
406
+ )
407
+
408
+ all_results = []
409
+ seen_memory_ids = set()
410
+
411
+ for i, results in enumerate(results_lists):
412
+ if isinstance(results, Exception):
413
+ logger.warning(f"Search task {i} failed: {results}")
414
+ continue
415
+
416
+ for result in results:
417
+ if result.get("memory_id") not in seen_memory_ids:
418
+ seen_memory_ids.add(result["memory_id"])
419
+ all_results.append(result)
420
+
421
+ return all_results[:limit]
422
+
423
+ # Fallback to sync execution
424
+ return self.execute_search(query, db_manager, namespace, limit)
425
+
426
+ except Exception as e:
427
+ logger.error(f"Async search execution failed: {e}")
428
+ return []
429
+
430
+ def execute_search_background(
431
+ self,
432
+ query: str,
433
+ db_manager,
434
+ namespace: str = "default",
435
+ limit: int = 10,
436
+ callback=None,
437
+ ):
438
+ """
439
+ Execute search in background thread for non-blocking operation
440
+
441
+ Args:
442
+ query: Search query
443
+ db_manager: Database manager
444
+ namespace: Memory namespace
445
+ limit: Max results
446
+ callback: Optional callback function to handle results
447
+ """
448
+
449
+ def _background_search():
450
+ try:
451
+ results = self.execute_search(query, db_manager, namespace, limit)
452
+ if callback:
453
+ callback(results)
454
+ return results
455
+ except Exception as e:
456
+ logger.error(f"Background search failed: {e}")
457
+ if callback:
458
+ callback([])
459
+ return []
460
+
461
+ # Start background thread
462
+ thread = threading.Thread(target=_background_search, daemon=True)
463
+ thread.start()
464
+ return thread
465
+
466
+ def search_memories(
467
+ self, query: str, max_results: int = 5, namespace: str = "default"
468
+ ) -> List[Dict[str, Any]]:
469
+ """
470
+ Simple search interface for compatibility with memory tools
471
+
472
+ Args:
473
+ query: Search query
474
+ max_results: Maximum number of results
475
+ namespace: Memory namespace
476
+
477
+ Returns:
478
+ List of memory search results
479
+ """
480
+ # This is a compatibility method that uses the database manager directly
481
+ # We'll need the database manager to be injected or passed
482
+ # For now, return empty list and log the issue
483
+ logger.warning(f"search_memories called without database manager: {query}")
484
+ return []
485
+
486
+
487
+ def create_retrieval_agent(
488
+ memori_instance=None, api_key: str = None, model: str = "gpt-4o"
489
+ ) -> MemorySearchEngine:
490
+ """
491
+ Create a retrieval agent instance
492
+
493
+ Args:
494
+ memori_instance: Optional Memori instance for direct database access
495
+ api_key: OpenAI API key
496
+ model: Model to use for query planning
497
+
498
+ Returns:
499
+ MemorySearchEngine instance
500
+ """
501
+ agent = MemorySearchEngine(api_key=api_key, model=model)
502
+ if memori_instance:
503
+ agent._memori_instance = memori_instance
504
+ return agent
505
+
506
+
507
+ def smart_memory_search(query: str, memori_instance, limit: int = 5) -> str:
508
+ """
509
+ Direct string-based memory search function that uses intelligent retrieval
510
+
511
+ Args:
512
+ query: Search query string
513
+ memori_instance: Memori instance with database access
514
+ limit: Maximum number of results
515
+
516
+ Returns:
517
+ Formatted string with search results
518
+ """
519
+ try:
520
+ # Create search engine
521
+ search_engine = MemorySearchEngine()
522
+
523
+ # Execute intelligent search
524
+ results = search_engine.execute_search(
525
+ query=query,
526
+ db_manager=memori_instance.db_manager,
527
+ namespace=memori_instance.namespace,
528
+ limit=limit,
529
+ )
530
+
531
+ if not results:
532
+ return f"No relevant memories found for query: '{query}'"
533
+
534
+ # Format results as a readable string
535
+ output = f"🔍 Smart Memory Search Results for: '{query}'\n\n"
536
+
537
+ for i, result in enumerate(results, 1):
538
+ try:
539
+ # Try to parse processed data for better formatting
540
+ if "processed_data" in result:
541
+ import json
542
+
543
+ processed_data = json.loads(result["processed_data"])
544
+ summary = processed_data.get("summary", "")
545
+ category = processed_data.get("category", {}).get(
546
+ "primary_category", ""
547
+ )
548
+ else:
549
+ summary = result.get(
550
+ "summary", result.get("searchable_content", "")[:100] + "..."
551
+ )
552
+ category = result.get("category_primary", "unknown")
553
+
554
+ importance = result.get("importance_score", 0.0)
555
+ created_at = result.get("created_at", "")
556
+ search_strategy = result.get("search_strategy", "unknown")
557
+ search_reasoning = result.get("search_reasoning", "")
558
+
559
+ output += f"{i}. [{category.upper()}] {summary}\n"
560
+ output += f" 📊 Importance: {importance:.2f} | 📅 {created_at}\n"
561
+ output += f" 🔍 Strategy: {search_strategy}\n"
562
+
563
+ if search_reasoning:
564
+ output += f" 🎯 {search_reasoning}\n"
565
+
566
+ output += "\n"
567
+
568
+ except Exception:
569
+ # Fallback formatting
570
+ content = result.get("searchable_content", "Memory content available")[
571
+ :100
572
+ ]
573
+ output += f"{i}. {content}...\n\n"
574
+
575
+ return output.strip()
576
+
577
+ except Exception as e:
578
+ logger.error(f"Smart memory search failed: {e}")
579
+ return f"Error in smart memory search: {str(e)}"
@@ -0,0 +1,14 @@
1
+ """
2
+ Configuration management for Memoriai
3
+ """
4
+
5
+ from .manager import ConfigManager
6
+ from .settings import AgentSettings, DatabaseSettings, LoggingSettings, MemoriSettings
7
+
8
+ __all__ = [
9
+ "MemoriSettings",
10
+ "DatabaseSettings",
11
+ "AgentSettings",
12
+ "LoggingSettings",
13
+ "ConfigManager",
14
+ ]