memorisdk 1.0.1__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (46) hide show
  1. memori/__init__.py +24 -8
  2. memori/agents/conscious_agent.py +252 -414
  3. memori/agents/memory_agent.py +487 -224
  4. memori/agents/retrieval_agent.py +416 -60
  5. memori/config/memory_manager.py +323 -0
  6. memori/core/conversation.py +393 -0
  7. memori/core/database.py +386 -371
  8. memori/core/memory.py +1676 -534
  9. memori/core/providers.py +217 -0
  10. memori/database/adapters/__init__.py +10 -0
  11. memori/database/adapters/mysql_adapter.py +331 -0
  12. memori/database/adapters/postgresql_adapter.py +291 -0
  13. memori/database/adapters/sqlite_adapter.py +229 -0
  14. memori/database/auto_creator.py +320 -0
  15. memori/database/connection_utils.py +207 -0
  16. memori/database/connectors/base_connector.py +283 -0
  17. memori/database/connectors/mysql_connector.py +240 -18
  18. memori/database/connectors/postgres_connector.py +277 -4
  19. memori/database/connectors/sqlite_connector.py +178 -3
  20. memori/database/models.py +400 -0
  21. memori/database/queries/base_queries.py +1 -1
  22. memori/database/queries/memory_queries.py +91 -2
  23. memori/database/query_translator.py +222 -0
  24. memori/database/schema_generators/__init__.py +7 -0
  25. memori/database/schema_generators/mysql_schema_generator.py +215 -0
  26. memori/database/search/__init__.py +8 -0
  27. memori/database/search/mysql_search_adapter.py +255 -0
  28. memori/database/search/sqlite_search_adapter.py +180 -0
  29. memori/database/search_service.py +548 -0
  30. memori/database/sqlalchemy_manager.py +839 -0
  31. memori/integrations/__init__.py +36 -11
  32. memori/integrations/litellm_integration.py +340 -6
  33. memori/integrations/openai_integration.py +506 -240
  34. memori/utils/input_validator.py +395 -0
  35. memori/utils/pydantic_models.py +138 -36
  36. memori/utils/query_builder.py +530 -0
  37. memori/utils/security_audit.py +594 -0
  38. memori/utils/security_integration.py +339 -0
  39. memori/utils/transaction_manager.py +547 -0
  40. {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/METADATA +144 -34
  41. memorisdk-2.0.0.dist-info/RECORD +67 -0
  42. memorisdk-1.0.1.dist-info/RECORD +0 -44
  43. memorisdk-1.0.1.dist-info/entry_points.txt +0 -2
  44. {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/WHEEL +0 -0
  45. {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/licenses/LICENSE +0 -0
  46. {memorisdk-1.0.1.dist-info → memorisdk-2.0.0.dist-info}/top_level.txt +0 -0
@@ -39,28 +39,53 @@ from loguru import logger
39
39
  from . import anthropic_integration, litellm_integration, openai_integration
40
40
 
41
41
  __all__ = [
42
- # Wrapper classes for direct SDK usage
42
+ # New interceptor classes (recommended)
43
+ "MemoriOpenAIInterceptor",
44
+ # Wrapper classes for direct SDK usage (legacy)
43
45
  "MemoriOpenAI",
44
46
  "MemoriAnthropic",
47
+ # Factory functions
48
+ "create_openai_client",
49
+ "setup_openai_interceptor",
45
50
  ]
46
51
 
47
52
 
48
53
  # For backward compatibility, provide simple passthrough
49
54
  try:
50
55
  from .anthropic_integration import MemoriAnthropic
51
- from .openai_integration import MemoriOpenAI
52
-
53
- # But warn users about the better way
56
+ from .openai_integration import (
57
+ MemoriOpenAI,
58
+ MemoriOpenAIInterceptor,
59
+ create_openai_client,
60
+ setup_openai_interceptor,
61
+ )
62
+
63
+ # But warn users about the better way for deprecated classes
54
64
  def __getattr__(name):
55
- if name in ["MemoriOpenAI", "MemoriAnthropic"]:
65
+ if name == "MemoriOpenAI":
66
+ logger.warning(
67
+ "🚨 MemoriOpenAI wrapper class is deprecated!\n"
68
+ "✅ NEW RECOMMENDED WAY: Use MemoriOpenAIInterceptor or memori.create_openai_client()"
69
+ )
70
+ return MemoriOpenAI
71
+ elif name == "MemoriAnthropic":
56
72
  logger.warning(
57
- f"🚨 {name} wrapper classes are deprecated!\n"
58
- f"✅ NEW SIMPLE WAY: Use memori.enable() and import {name.replace('Memori', '').lower()} normally"
73
+ "🚨 MemoriAnthropic wrapper class is deprecated!\n"
74
+ "✅ NEW SIMPLE WAY: Use memori.enable() and import anthropic normally"
59
75
  )
60
- if name == "MemoriOpenAI":
61
- return MemoriOpenAI
62
- elif name == "MemoriAnthropic":
63
- return MemoriAnthropic
76
+ return MemoriAnthropic
77
+ elif name in [
78
+ "MemoriOpenAIInterceptor",
79
+ "create_openai_client",
80
+ "setup_openai_interceptor",
81
+ ]:
82
+ # These are the new recommended classes/functions
83
+ if name == "MemoriOpenAIInterceptor":
84
+ return MemoriOpenAIInterceptor
85
+ elif name == "create_openai_client":
86
+ return create_openai_client
87
+ elif name == "setup_openai_interceptor":
88
+ return setup_openai_interceptor
64
89
  raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
65
90
 
66
91
  except ImportError:
@@ -1,11 +1,345 @@
1
1
  """
2
- LiteLLM Integration - DEPRECATED
2
+ LiteLLM Integration - Native Callback System
3
3
 
4
- This integration is deprecated. LiteLLM now uses native callbacks
5
- implemented directly in memori/core/memory.py
4
+ This module handles LiteLLM native callback registration for automatic
5
+ memory recording. It uses LiteLLM's official callback mechanism instead
6
+ of monkey-patching.
6
7
 
7
- The native callback system is more robust and uses LiteLLM's official
8
- extension mechanism instead of monkey-patching.
8
+ Usage:
9
+ from memori import Memori
9
10
 
10
- Use: memori.enable() which registers with LiteLLM's success_callback system.
11
+ memori = Memori(...)
12
+ memori.enable() # Automatically registers LiteLLM callbacks
13
+
14
+ # Now use LiteLLM normally - conversations are auto-recorded
15
+ from litellm import completion
16
+ response = completion(model="gpt-4o", messages=[...])
11
17
  """
18
+
19
+ from typing import Optional
20
+
21
+ from loguru import logger
22
+
23
+ try:
24
+ import litellm
25
+
26
+ LITELLM_AVAILABLE = True
27
+
28
+ # Check for modifying input callbacks (for context injection)
29
+ HAS_MODIFYING_ROUTER = hasattr(litellm, "Router") and hasattr(
30
+ litellm.Router, "pre_call_hook"
31
+ )
32
+
33
+ except ImportError:
34
+ LITELLM_AVAILABLE = False
35
+ HAS_MODIFYING_ROUTER = False
36
+ logger.warning("LiteLLM not available - native callback system disabled")
37
+
38
+
39
+ class LiteLLMCallbackManager:
40
+ """
41
+ Manages LiteLLM native callback registration and integration with Memori.
42
+
43
+ This class provides a clean interface for registering and managing
44
+ LiteLLM callbacks that automatically record conversations into Memori.
45
+ """
46
+
47
+ def __init__(self, memori_instance):
48
+ """
49
+ Initialize LiteLLM callback manager.
50
+
51
+ Args:
52
+ memori_instance: The Memori instance to record conversations to
53
+ """
54
+ self.memori_instance = memori_instance
55
+ self._callback_registered = False
56
+ self._original_callbacks = None
57
+ self._original_completion = None # For context injection
58
+
59
+ def register_callbacks(self) -> bool:
60
+ """
61
+ Register LiteLLM native callbacks for automatic memory recording.
62
+
63
+ Returns:
64
+ True if registration successful, False otherwise
65
+ """
66
+ if not LITELLM_AVAILABLE:
67
+ logger.error("LiteLLM not available - cannot register callbacks")
68
+ return False
69
+
70
+ if self._callback_registered:
71
+ logger.warning("LiteLLM callbacks already registered")
72
+ return True
73
+
74
+ try:
75
+ # Store original callbacks for restoration
76
+ self._original_callbacks = getattr(litellm, "success_callback", [])
77
+
78
+ # Register our success callback
79
+ if not hasattr(litellm, "success_callback"):
80
+ litellm.success_callback = []
81
+ elif not isinstance(litellm.success_callback, list):
82
+ litellm.success_callback = [litellm.success_callback]
83
+
84
+ # Add our callback function
85
+ litellm.success_callback.append(self._litellm_success_callback)
86
+
87
+ # For context injection, we need to monkey-patch the completion function
88
+ # This is the only reliable way to inject context before requests in LiteLLM
89
+ if (
90
+ self.memori_instance.conscious_ingest
91
+ or self.memori_instance.auto_ingest
92
+ ):
93
+ self._setup_context_injection()
94
+
95
+ self._callback_registered = True
96
+ logger.info("LiteLLM native callbacks registered successfully")
97
+ return True
98
+
99
+ except Exception as e:
100
+ logger.error(f"Failed to register LiteLLM callbacks: {e}")
101
+ return False
102
+
103
+ def unregister_callbacks(self) -> bool:
104
+ """
105
+ Unregister LiteLLM callbacks and restore original state.
106
+
107
+ Returns:
108
+ True if unregistration successful, False otherwise
109
+ """
110
+ if not LITELLM_AVAILABLE:
111
+ return False
112
+
113
+ if not self._callback_registered:
114
+ logger.warning("LiteLLM callbacks not registered")
115
+ return True
116
+
117
+ try:
118
+ # Remove our callback
119
+ if hasattr(litellm, "success_callback") and isinstance(
120
+ litellm.success_callback, list
121
+ ):
122
+ # Remove all instances of our callback
123
+ litellm.success_callback = [
124
+ cb
125
+ for cb in litellm.success_callback
126
+ if cb != self._litellm_success_callback
127
+ ]
128
+
129
+ # If no callbacks left, restore original state
130
+ if not litellm.success_callback:
131
+ if self._original_callbacks:
132
+ litellm.success_callback = self._original_callbacks
133
+ else:
134
+ delattr(litellm, "success_callback")
135
+
136
+ # Restore original completion function if we modified it
137
+ if self._original_completion is not None:
138
+ litellm.completion = self._original_completion
139
+ self._original_completion = None
140
+
141
+ self._callback_registered = False
142
+ logger.info("LiteLLM native callbacks unregistered successfully")
143
+ return True
144
+
145
+ except Exception as e:
146
+ logger.error(f"Failed to unregister LiteLLM callbacks: {e}")
147
+ return False
148
+
149
+ def _litellm_success_callback(self, kwargs, response, start_time, end_time):
150
+ """
151
+ LiteLLM success callback that records conversations in Memori.
152
+
153
+ This function is automatically called by LiteLLM after successful completions.
154
+
155
+ Args:
156
+ kwargs: Original request parameters
157
+ response: LiteLLM response object
158
+ start_time: Request start time
159
+ end_time: Request end time
160
+ """
161
+ try:
162
+ if not self.memori_instance or not self.memori_instance.is_enabled:
163
+ return
164
+
165
+ # Handle context injection for conscious_ingest and auto_ingest
166
+ if (
167
+ self.memori_instance.conscious_ingest
168
+ or self.memori_instance.auto_ingest
169
+ ):
170
+ # Note: Context injection happens BEFORE the request in LiteLLM
171
+ # This callback is for recording AFTER the response
172
+ pass
173
+
174
+ # Extract user input
175
+ user_input = ""
176
+ messages = kwargs.get("messages", [])
177
+ for msg in reversed(messages):
178
+ if msg.get("role") == "user":
179
+ user_input = msg.get("content", "")
180
+ break
181
+
182
+ # Extract AI output
183
+ ai_output = ""
184
+ if hasattr(response, "choices") and response.choices:
185
+ choice = response.choices[0]
186
+ if hasattr(choice, "message") and hasattr(choice.message, "content"):
187
+ ai_output = choice.message.content or ""
188
+
189
+ # Extract model
190
+ model = kwargs.get("model", "litellm-unknown")
191
+
192
+ # Calculate timing
193
+ duration_ms = 0
194
+ if start_time is not None and end_time is not None:
195
+ try:
196
+ if isinstance(start_time, (int, float)) and isinstance(
197
+ end_time, (int, float)
198
+ ):
199
+ duration_ms = (end_time - start_time) * 1000
200
+ except Exception:
201
+ pass
202
+
203
+ # Extract token usage
204
+ tokens_used = 0
205
+ if hasattr(response, "usage") and response.usage:
206
+ tokens_used = getattr(response.usage, "total_tokens", 0)
207
+
208
+ # Prepare metadata
209
+ metadata = {
210
+ "integration": "litellm_native",
211
+ "api_type": "completion",
212
+ "tokens_used": tokens_used,
213
+ "auto_recorded": True,
214
+ "duration_ms": duration_ms,
215
+ }
216
+
217
+ # Add token details if available
218
+ if hasattr(response, "usage") and response.usage:
219
+ usage = response.usage
220
+ metadata.update(
221
+ {
222
+ "prompt_tokens": getattr(usage, "prompt_tokens", 0),
223
+ "completion_tokens": getattr(usage, "completion_tokens", 0),
224
+ }
225
+ )
226
+
227
+ # Record the conversation
228
+ if user_input and ai_output:
229
+ self.memori_instance.record_conversation(
230
+ user_input=user_input,
231
+ ai_output=ai_output,
232
+ model=model,
233
+ metadata=metadata,
234
+ )
235
+ logger.debug(
236
+ f"LiteLLM callback: Recorded conversation for model {model}"
237
+ )
238
+
239
+ except Exception as e:
240
+ logger.error(f"LiteLLM callback failed: {e}")
241
+
242
+ def _setup_context_injection(self):
243
+ """Set up context injection by wrapping LiteLLM's completion function."""
244
+ try:
245
+ if self._original_completion is not None:
246
+ # Already set up
247
+ return
248
+
249
+ # Store original completion function
250
+ self._original_completion = litellm.completion
251
+
252
+ # Create wrapper function that injects context
253
+ def completion_with_context(*args, **kwargs):
254
+ # Inject context if needed
255
+ kwargs = self._inject_context(kwargs)
256
+ # Call original completion function
257
+ return self._original_completion(*args, **kwargs)
258
+
259
+ # Replace LiteLLM's completion function
260
+ litellm.completion = completion_with_context
261
+
262
+ logger.debug("Context injection wrapper set up for LiteLLM")
263
+
264
+ except Exception as e:
265
+ logger.error(f"Failed to set up context injection: {e}")
266
+
267
+ def _inject_context(self, kwargs):
268
+ """Inject memory context into LiteLLM request kwargs."""
269
+ try:
270
+ if not self.memori_instance:
271
+ return kwargs
272
+
273
+ # Use the existing context injection methods from the Memori instance
274
+ if (
275
+ self.memori_instance.conscious_ingest
276
+ or self.memori_instance.auto_ingest
277
+ ):
278
+ logger.debug("LiteLLM: Starting context injection")
279
+
280
+ # Determine mode
281
+ if self.memori_instance.conscious_ingest:
282
+ mode = "conscious"
283
+ elif self.memori_instance.auto_ingest:
284
+ mode = "auto"
285
+ else:
286
+ mode = "auto" # fallback
287
+
288
+ # Extract user input first to debug what we're working with
289
+ messages = kwargs.get("messages", [])
290
+ user_input = ""
291
+ for msg in reversed(messages):
292
+ if msg.get("role") == "user":
293
+ user_input = msg.get("content", "")
294
+ break
295
+
296
+ logger.debug(
297
+ f"LiteLLM: Injecting context in {mode} mode for input: {user_input[:100]}..."
298
+ )
299
+
300
+ # Use the existing _inject_litellm_context method
301
+ kwargs = self.memori_instance._inject_litellm_context(kwargs, mode=mode)
302
+
303
+ # Verify injection worked
304
+ updated_messages = kwargs.get("messages", [])
305
+ if len(updated_messages) > len(messages):
306
+ logger.debug(
307
+ f"LiteLLM: Context injection successful, message count increased from {len(messages)} to {len(updated_messages)}"
308
+ )
309
+ else:
310
+ logger.debug(
311
+ "LiteLLM: Context injection completed, no new messages added (may be intended)"
312
+ )
313
+
314
+ except Exception as e:
315
+ logger.error(f"Context injection failed in LiteLLM wrapper: {e}")
316
+ import traceback
317
+
318
+ logger.debug(f"LiteLLM injection stack trace: {traceback.format_exc()}")
319
+
320
+ return kwargs
321
+
322
+ @property
323
+ def is_registered(self) -> bool:
324
+ """Check if callbacks are registered."""
325
+ return self._callback_registered
326
+
327
+
328
+ def setup_litellm_callbacks(memori_instance) -> Optional[LiteLLMCallbackManager]:
329
+ """
330
+ Convenience function to set up LiteLLM callbacks for a Memori instance.
331
+
332
+ Args:
333
+ memori_instance: The Memori instance to record conversations to
334
+
335
+ Returns:
336
+ LiteLLMCallbackManager instance if successful, None otherwise
337
+ """
338
+ if not LITELLM_AVAILABLE:
339
+ logger.error("LiteLLM not available - cannot set up callbacks")
340
+ return None
341
+
342
+ callback_manager = LiteLLMCallbackManager(memori_instance)
343
+ if callback_manager.register_callbacks():
344
+ return callback_manager
345
+ return None