memorisdk 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (44) hide show
  1. memoriai/__init__.py +140 -0
  2. memoriai/agents/__init__.py +7 -0
  3. memoriai/agents/conscious_agent.py +506 -0
  4. memoriai/agents/memory_agent.py +322 -0
  5. memoriai/agents/retrieval_agent.py +579 -0
  6. memoriai/config/__init__.py +14 -0
  7. memoriai/config/manager.py +281 -0
  8. memoriai/config/settings.py +287 -0
  9. memoriai/core/__init__.py +6 -0
  10. memoriai/core/database.py +966 -0
  11. memoriai/core/memory.py +1349 -0
  12. memoriai/database/__init__.py +5 -0
  13. memoriai/database/connectors/__init__.py +9 -0
  14. memoriai/database/connectors/mysql_connector.py +159 -0
  15. memoriai/database/connectors/postgres_connector.py +158 -0
  16. memoriai/database/connectors/sqlite_connector.py +148 -0
  17. memoriai/database/queries/__init__.py +15 -0
  18. memoriai/database/queries/base_queries.py +204 -0
  19. memoriai/database/queries/chat_queries.py +157 -0
  20. memoriai/database/queries/entity_queries.py +236 -0
  21. memoriai/database/queries/memory_queries.py +178 -0
  22. memoriai/database/templates/__init__.py +0 -0
  23. memoriai/database/templates/basic_template.py +0 -0
  24. memoriai/database/templates/schemas/__init__.py +0 -0
  25. memoriai/integrations/__init__.py +68 -0
  26. memoriai/integrations/anthropic_integration.py +194 -0
  27. memoriai/integrations/litellm_integration.py +11 -0
  28. memoriai/integrations/openai_integration.py +273 -0
  29. memoriai/scripts/llm_text.py +50 -0
  30. memoriai/tools/__init__.py +5 -0
  31. memoriai/tools/memory_tool.py +544 -0
  32. memoriai/utils/__init__.py +89 -0
  33. memoriai/utils/exceptions.py +418 -0
  34. memoriai/utils/helpers.py +433 -0
  35. memoriai/utils/logging.py +204 -0
  36. memoriai/utils/pydantic_models.py +258 -0
  37. memoriai/utils/schemas.py +0 -0
  38. memoriai/utils/validators.py +339 -0
  39. memorisdk-1.0.0.dist-info/METADATA +386 -0
  40. memorisdk-1.0.0.dist-info/RECORD +44 -0
  41. memorisdk-1.0.0.dist-info/WHEEL +5 -0
  42. memorisdk-1.0.0.dist-info/entry_points.txt +2 -0
  43. memorisdk-1.0.0.dist-info/licenses/LICENSE +203 -0
  44. memorisdk-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,322 @@
1
+ """
2
+ Memory Agent - The heart of Memori v1.0
3
+ Pydantic-based memory processing using OpenAI Structured Outputs
4
+ """
5
+
6
+ from datetime import datetime
7
+ from typing import Any, Dict, Optional
8
+
9
+ import openai
10
+ from loguru import logger
11
+
12
+ from ..utils.pydantic_models import (
13
+ ConversationContext,
14
+ MemoryCategoryType,
15
+ ProcessedMemory,
16
+ RetentionType,
17
+ )
18
+
19
+
20
+ class MemoryAgent:
21
+ """
22
+ Pydantic-based memory agent for intelligent conversation processing.
23
+ Uses OpenAI Structured Outputs for reliable, structured memory extraction.
24
+ """
25
+
26
+ SYSTEM_PROMPT = """You are an advanced Memory Processing Agent responsible for analyzing conversations and extracting structured information for long-term storage.
27
+
28
+ Your primary functions:
29
+ 1. **Categorize Memory Type**: Classify information as fact, preference, skill, context, or rule
30
+ 2. **Extract Entities**: Identify people, technologies, topics, skills, projects, and keywords
31
+ 3. **Score Importance**: Determine retention type and various importance dimensions
32
+ 4. **Create Searchable Content**: Generate optimized summaries and searchable text
33
+ 5. **Make Storage Decisions**: Decide what should be stored and why
34
+
35
+ **CATEGORIZATION GUIDELINES:**
36
+ - **fact**: Factual information, definitions, technical details, specific data points
37
+ - **preference**: User preferences, likes/dislikes, settings, personal choices, opinions
38
+ - **skill**: Skills, abilities, competencies, learning progress, expertise levels
39
+ - **context**: Project context, work environment, current situations, background info
40
+ - **rule**: Rules, policies, procedures, guidelines, constraints, "should/must" statements
41
+
42
+ **RETENTION GUIDELINES:**
43
+ - **short_term**: Recent activities, temporary information, casual mentions (expires ~7 days)
44
+ - **long_term**: Important information, learned skills, preferences, significant context
45
+ - **permanent**: Critical rules, core preferences, essential facts, major milestones
46
+
47
+ **ENTITY EXTRACTION:**
48
+ Focus on extracting specific, searchable entities that would be useful for future retrieval:
49
+ - People: Names, roles, relationships
50
+ - Technologies: Tools, libraries, platforms, programming languages
51
+ - Topics: Subjects, domains, areas of interest
52
+ - Skills: Abilities, competencies, learning areas
53
+ - Projects: Named projects, repositories, initiatives
54
+ - Keywords: Important terms for search and categorization
55
+
56
+ **IMPORTANCE SCORING:**
57
+ Consider multiple dimensions:
58
+ - Overall importance (0.0-1.0): How crucial is this information?
59
+ - Novelty (0.0-1.0): How new or unique is this information?
60
+ - Relevance (0.0-1.0): How relevant to the user's current interests/work?
61
+ - Actionability (0.0-1.0): How actionable or useful is this information?
62
+
63
+ Be thorough but practical. Focus on information that would genuinely help in future conversations."""
64
+
65
+ def __init__(self, api_key: Optional[str] = None, model: str = "gpt-4o"):
66
+ """
67
+ Initialize Memory Agent with OpenAI configuration
68
+
69
+ Args:
70
+ api_key: OpenAI API key (if None, uses environment variable)
71
+ model: OpenAI model to use for structured output (gpt-4o recommended)
72
+ """
73
+ self.client = openai.OpenAI(api_key=api_key)
74
+ self.model = model
75
+
76
+ async def process_conversation(
77
+ self,
78
+ chat_id: str,
79
+ user_input: str,
80
+ ai_output: str,
81
+ context: Optional[ConversationContext] = None,
82
+ mem_prompt: Optional[str] = None,
83
+ filters: Optional[Dict[str, Any]] = None,
84
+ ) -> ProcessedMemory:
85
+ """
86
+ Process a conversation using OpenAI Structured Outputs
87
+
88
+ Args:
89
+ chat_id: Conversation ID
90
+ user_input: User's input message
91
+ ai_output: AI's response
92
+ context: Additional conversation context
93
+ mem_prompt: Optional memory filtering prompt
94
+ filters: Memory filters to apply
95
+
96
+ Returns:
97
+ Structured processed memory
98
+ """
99
+ try:
100
+ # Prepare conversation content
101
+ conversation_text = f"User: {user_input}\nAssistant: {ai_output}"
102
+
103
+ # Build system prompt
104
+ system_prompt = self.SYSTEM_PROMPT
105
+ if mem_prompt:
106
+ system_prompt += f"\n\nSPECIAL FOCUS: {mem_prompt}"
107
+
108
+ # Prepare context information
109
+ context_info = ""
110
+ if context:
111
+ context_info = f"""
112
+ CONVERSATION CONTEXT:
113
+ - Session: {context.session_id}
114
+ - Model: {context.model_used}
115
+ - User Projects: {', '.join(context.current_projects) if context.current_projects else 'None specified'}
116
+ - Relevant Skills: {', '.join(context.relevant_skills) if context.relevant_skills else 'None specified'}
117
+ - Topic Thread: {context.topic_thread or 'General conversation'}
118
+ """
119
+
120
+ # Call OpenAI Structured Outputs
121
+ completion = self.client.beta.chat.completions.parse(
122
+ model=self.model,
123
+ messages=[
124
+ {"role": "system", "content": system_prompt},
125
+ {
126
+ "role": "user",
127
+ "content": f"Process this conversation for memory storage:\n\n{conversation_text}\n{context_info}",
128
+ },
129
+ ],
130
+ response_format=ProcessedMemory,
131
+ temperature=0.1, # Low temperature for consistent processing
132
+ )
133
+
134
+ # Handle potential refusal
135
+ if completion.choices[0].message.refusal:
136
+ logger.warning(
137
+ f"Memory processing refused for chat {chat_id}: {completion.choices[0].message.refusal}"
138
+ )
139
+ return self._create_empty_memory(
140
+ chat_id, "Processing refused for safety reasons"
141
+ )
142
+
143
+ processed_memory = completion.choices[0].message.parsed
144
+
145
+ # Apply filters if provided
146
+ if filters and not self._passes_filters(processed_memory, filters):
147
+ processed_memory.should_store = False
148
+ processed_memory.storage_reasoning = (
149
+ "Filtered out based on memory filters"
150
+ )
151
+
152
+ # Add processing metadata
153
+ processed_memory.processing_metadata = {
154
+ "chat_id": chat_id,
155
+ "model": self.model,
156
+ "processed_at": datetime.now().isoformat(),
157
+ "agent_version": "v1.0_pydantic",
158
+ }
159
+
160
+ logger.debug(
161
+ f"Processed conversation {chat_id}: category={processed_memory.category.primary_category}, should_store={processed_memory.should_store}"
162
+ )
163
+ return processed_memory
164
+
165
+ except Exception as e:
166
+ logger.error(f"Memory agent processing failed for {chat_id}: {e}")
167
+ return self._create_empty_memory(chat_id, f"Processing failed: {str(e)}")
168
+
169
+ def process_conversation_sync(
170
+ self,
171
+ chat_id: str,
172
+ user_input: str,
173
+ ai_output: str,
174
+ context: Optional[ConversationContext] = None,
175
+ mem_prompt: Optional[str] = None,
176
+ filters: Optional[Dict[str, Any]] = None,
177
+ ) -> ProcessedMemory:
178
+ """
179
+ Synchronous version of process_conversation for compatibility
180
+ """
181
+ try:
182
+ # Prepare conversation content
183
+ conversation_text = f"User: {user_input}\nAssistant: {ai_output}"
184
+
185
+ # Build system prompt
186
+ system_prompt = self.SYSTEM_PROMPT
187
+ if mem_prompt:
188
+ system_prompt += f"\n\nSPECIAL FOCUS: {mem_prompt}"
189
+
190
+ # Prepare context information
191
+ context_info = ""
192
+ if context:
193
+ context_info = f"""
194
+ CONVERSATION CONTEXT:
195
+ - Session: {context.session_id}
196
+ - Model: {context.model_used}
197
+ - User Projects: {', '.join(context.current_projects) if context.current_projects else 'None specified'}
198
+ - Relevant Skills: {', '.join(context.relevant_skills) if context.relevant_skills else 'None specified'}
199
+ - Topic Thread: {context.topic_thread or 'General conversation'}
200
+ """
201
+
202
+ # Call OpenAI Structured Outputs
203
+ completion = self.client.beta.chat.completions.parse(
204
+ model=self.model,
205
+ messages=[
206
+ {"role": "system", "content": system_prompt},
207
+ {
208
+ "role": "user",
209
+ "content": f"Process this conversation for memory storage:\n\n{conversation_text}\n{context_info}",
210
+ },
211
+ ],
212
+ response_format=ProcessedMemory,
213
+ temperature=0.1,
214
+ )
215
+
216
+ # Handle potential refusal
217
+ if completion.choices[0].message.refusal:
218
+ logger.warning(
219
+ f"Memory processing refused for chat {chat_id}: {completion.choices[0].message.refusal}"
220
+ )
221
+ return self._create_empty_memory(
222
+ chat_id, "Processing refused for safety reasons"
223
+ )
224
+
225
+ processed_memory = completion.choices[0].message.parsed
226
+
227
+ # Apply filters if provided
228
+ if filters and not self._passes_filters(processed_memory, filters):
229
+ processed_memory.should_store = False
230
+ processed_memory.storage_reasoning = (
231
+ "Filtered out based on memory filters"
232
+ )
233
+
234
+ # Add processing metadata
235
+ processed_memory.processing_metadata = {
236
+ "chat_id": chat_id,
237
+ "model": self.model,
238
+ "processed_at": datetime.now().isoformat(),
239
+ "agent_version": "v1.0_pydantic",
240
+ }
241
+
242
+ logger.debug(
243
+ f"Processed conversation {chat_id}: category={processed_memory.category.primary_category}, should_store={processed_memory.should_store}"
244
+ )
245
+ return processed_memory
246
+
247
+ except Exception as e:
248
+ logger.error(f"Memory agent processing failed for {chat_id}: {e}")
249
+ return self._create_empty_memory(chat_id, f"Processing failed: {str(e)}")
250
+
251
+ def _passes_filters(self, memory: ProcessedMemory, filters: Dict[str, Any]) -> bool:
252
+ """Check if processed memory passes configured filters"""
253
+
254
+ # Include keywords filter
255
+ if "include_keywords" in filters:
256
+ include_keywords = filters["include_keywords"]
257
+ content_lower = memory.searchable_content.lower()
258
+ if not any(
259
+ keyword.lower() in content_lower for keyword in include_keywords
260
+ ):
261
+ return False
262
+
263
+ # Exclude keywords filter
264
+ if "exclude_keywords" in filters:
265
+ exclude_keywords = filters["exclude_keywords"]
266
+ content_lower = memory.searchable_content.lower()
267
+ if any(keyword.lower() in content_lower for keyword in exclude_keywords):
268
+ return False
269
+
270
+ # Minimum importance filter
271
+ if "min_importance" in filters:
272
+ if memory.importance.importance_score < filters["min_importance"]:
273
+ return False
274
+
275
+ # Category filter
276
+ if "allowed_categories" in filters:
277
+ if memory.category.primary_category not in filters["allowed_categories"]:
278
+ return False
279
+
280
+ return True
281
+
282
+ def _create_empty_memory(self, chat_id: str, reason: str) -> ProcessedMemory:
283
+ """Create an empty memory object for error cases"""
284
+ from ..utils.pydantic_models import (
285
+ ExtractedEntities,
286
+ MemoryCategory,
287
+ MemoryCategoryType,
288
+ MemoryImportance,
289
+ RetentionType,
290
+ )
291
+
292
+ return ProcessedMemory(
293
+ category=MemoryCategory(
294
+ primary_category=MemoryCategoryType.fact,
295
+ confidence_score=0.0,
296
+ reasoning="Failed to process",
297
+ ),
298
+ entities=ExtractedEntities(),
299
+ importance=MemoryImportance(
300
+ importance_score=0.0,
301
+ retention_type=RetentionType.short_term,
302
+ reasoning="Processing failed",
303
+ ),
304
+ summary="Processing failed",
305
+ searchable_content="",
306
+ should_store=False,
307
+ storage_reasoning=reason,
308
+ processing_metadata={"chat_id": chat_id, "error": reason},
309
+ )
310
+
311
+ def determine_storage_location(self, processed_memory: ProcessedMemory) -> str:
312
+ """Determine appropriate storage location based on memory properties"""
313
+
314
+ if processed_memory.category.primary_category == MemoryCategoryType.rule:
315
+ return "rules_memory"
316
+
317
+ if processed_memory.importance.retention_type == RetentionType.permanent:
318
+ return "long_term_memory"
319
+ elif processed_memory.importance.retention_type == RetentionType.long_term:
320
+ return "long_term_memory"
321
+ else:
322
+ return "short_term_memory"