memorisdk 1.0.2__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of memorisdk might be problematic. Click here for more details.

Files changed (48) hide show
  1. memori/__init__.py +24 -8
  2. memori/agents/conscious_agent.py +252 -414
  3. memori/agents/memory_agent.py +487 -224
  4. memori/agents/retrieval_agent.py +491 -68
  5. memori/config/memory_manager.py +323 -0
  6. memori/core/conversation.py +393 -0
  7. memori/core/database.py +386 -371
  8. memori/core/memory.py +1683 -532
  9. memori/core/providers.py +217 -0
  10. memori/database/adapters/__init__.py +10 -0
  11. memori/database/adapters/mysql_adapter.py +331 -0
  12. memori/database/adapters/postgresql_adapter.py +291 -0
  13. memori/database/adapters/sqlite_adapter.py +229 -0
  14. memori/database/auto_creator.py +320 -0
  15. memori/database/connection_utils.py +207 -0
  16. memori/database/connectors/base_connector.py +283 -0
  17. memori/database/connectors/mysql_connector.py +240 -18
  18. memori/database/connectors/postgres_connector.py +277 -4
  19. memori/database/connectors/sqlite_connector.py +178 -3
  20. memori/database/models.py +400 -0
  21. memori/database/queries/base_queries.py +1 -1
  22. memori/database/queries/memory_queries.py +91 -2
  23. memori/database/query_translator.py +222 -0
  24. memori/database/schema_generators/__init__.py +7 -0
  25. memori/database/schema_generators/mysql_schema_generator.py +215 -0
  26. memori/database/search/__init__.py +8 -0
  27. memori/database/search/mysql_search_adapter.py +255 -0
  28. memori/database/search/sqlite_search_adapter.py +180 -0
  29. memori/database/search_service.py +700 -0
  30. memori/database/sqlalchemy_manager.py +888 -0
  31. memori/integrations/__init__.py +36 -11
  32. memori/integrations/litellm_integration.py +340 -6
  33. memori/integrations/openai_integration.py +506 -240
  34. memori/tools/memory_tool.py +94 -4
  35. memori/utils/input_validator.py +395 -0
  36. memori/utils/pydantic_models.py +138 -36
  37. memori/utils/query_builder.py +530 -0
  38. memori/utils/security_audit.py +594 -0
  39. memori/utils/security_integration.py +339 -0
  40. memori/utils/transaction_manager.py +547 -0
  41. {memorisdk-1.0.2.dist-info → memorisdk-2.0.1.dist-info}/METADATA +56 -23
  42. memorisdk-2.0.1.dist-info/RECORD +66 -0
  43. memori/scripts/llm_text.py +0 -50
  44. memorisdk-1.0.2.dist-info/RECORD +0 -44
  45. memorisdk-1.0.2.dist-info/entry_points.txt +0 -2
  46. {memorisdk-1.0.2.dist-info → memorisdk-2.0.1.dist-info}/WHEEL +0 -0
  47. {memorisdk-1.0.2.dist-info → memorisdk-2.0.1.dist-info}/licenses/LICENSE +0 -0
  48. {memorisdk-1.0.2.dist-info → memorisdk-2.0.1.dist-info}/top_level.txt +0 -0
@@ -1,273 +1,539 @@
1
1
  """
2
- OpenAI Integration - Clean wrapper without monkey-patching
2
+ OpenAI Integration - Automatic Interception System
3
3
 
4
- RECOMMENDED: Use LiteLLM instead for unified API and native callback support.
5
- This integration is provided for direct OpenAI SDK usage.
4
+ This module provides automatic interception of OpenAI API calls when Memori is enabled.
5
+ Users can import and use the standard OpenAI client normally, and Memori will automatically
6
+ record conversations when enabled.
6
7
 
7
8
  Usage:
8
- from memori.integrations.openai_integration import MemoriOpenAI
9
+ from openai import OpenAI
10
+ from memori import Memori
11
+
12
+ # Initialize Memori and enable it
13
+ openai_memory = Memori(
14
+ database_connect="sqlite:///openai_memory.db",
15
+ conscious_ingest=True,
16
+ verbose=True,
17
+ )
18
+ openai_memory.enable()
19
+
20
+ # Use standard OpenAI client - automatically intercepted!
21
+ client = OpenAI()
22
+ response = client.chat.completions.create(
23
+ model="gpt-4o",
24
+ messages=[{"role": "user", "content": "Hello!"}]
25
+ )
26
+ # Conversation is automatically recorded to Memori
27
+ """
9
28
 
10
- # Initialize with your memori instance
11
- client = MemoriOpenAI(memori_instance, api_key="your-key")
29
+ from loguru import logger
12
30
 
13
- # Use exactly like OpenAI client
14
- response = client.chat.completions.create(...)
15
- """
31
+ # Global registry of enabled Memori instances
32
+ _enabled_memori_instances = []
16
33
 
17
- from typing import Optional
18
34
 
19
- from loguru import logger
35
+ class OpenAIInterceptor:
36
+ """
37
+ Automatic OpenAI interception system that patches the OpenAI module
38
+ to automatically record conversations when Memori instances are enabled.
39
+ """
40
+
41
+ _original_methods = {}
42
+ _is_patched = False
43
+
44
+ @classmethod
45
+ def patch_openai(cls):
46
+ """Patch OpenAI module to intercept API calls."""
47
+ if cls._is_patched:
48
+ return
49
+
50
+ try:
51
+ import openai
52
+
53
+ # Patch sync OpenAI client
54
+ if hasattr(openai, "OpenAI"):
55
+ cls._patch_client_class(openai.OpenAI, "sync")
56
+
57
+ # Patch async OpenAI client
58
+ if hasattr(openai, "AsyncOpenAI"):
59
+ cls._patch_async_client_class(openai.AsyncOpenAI, "async")
60
+
61
+ # Patch Azure clients if available
62
+ if hasattr(openai, "AzureOpenAI"):
63
+ cls._patch_client_class(openai.AzureOpenAI, "azure_sync")
64
+
65
+ if hasattr(openai, "AsyncAzureOpenAI"):
66
+ cls._patch_async_client_class(openai.AsyncAzureOpenAI, "azure_async")
67
+
68
+ cls._is_patched = True
69
+ logger.debug("OpenAI module patched for automatic interception")
70
+
71
+ except ImportError:
72
+ logger.warning("OpenAI not available - skipping patch")
73
+ except Exception as e:
74
+ logger.error(f"Failed to patch OpenAI module: {e}")
75
+
76
+ @classmethod
77
+ def _patch_client_class(cls, client_class, client_type):
78
+ """Patch a sync OpenAI client class."""
79
+ # Store the original unbound method
80
+ original_key = f"{client_type}_process_response"
81
+ if original_key not in cls._original_methods:
82
+ cls._original_methods[original_key] = client_class._process_response
83
+
84
+ original_prepare_key = f"{client_type}_prepare_options"
85
+ if original_prepare_key not in cls._original_methods and hasattr(
86
+ client_class, "_prepare_options"
87
+ ):
88
+ cls._original_methods[original_prepare_key] = client_class._prepare_options
89
+
90
+ # Get reference to original method to avoid recursion
91
+ original_process = cls._original_methods[original_key]
92
+
93
+ def patched_process_response(
94
+ self, *, cast_to, options, response, stream, stream_cls, **kwargs
95
+ ):
96
+ # Call original method first with all kwargs
97
+ result = original_process(
98
+ self,
99
+ cast_to=cast_to,
100
+ options=options,
101
+ response=response,
102
+ stream=stream,
103
+ stream_cls=stream_cls,
104
+ **kwargs,
105
+ )
106
+
107
+ # Record conversation for enabled Memori instances
108
+ if not stream: # Don't record streaming here - handle separately
109
+ cls._record_conversation_for_enabled_instances(
110
+ options, result, client_type
111
+ )
112
+
113
+ return result
114
+
115
+ client_class._process_response = patched_process_response
116
+
117
+ # Patch prepare_options if it exists
118
+ if original_prepare_key in cls._original_methods:
119
+ original_prepare = cls._original_methods[original_prepare_key]
120
+
121
+ def patched_prepare_options(self, options):
122
+ # Call original method first
123
+ options = original_prepare(self, options)
124
+
125
+ # Inject context for enabled Memori instances
126
+ options = cls._inject_context_for_enabled_instances(
127
+ options, client_type
128
+ )
129
+
130
+ return options
131
+
132
+ client_class._prepare_options = patched_prepare_options
133
+
134
+ @classmethod
135
+ def _patch_async_client_class(cls, client_class, client_type):
136
+ """Patch an async OpenAI client class."""
137
+ # Store the original unbound method
138
+ original_key = f"{client_type}_process_response"
139
+ if original_key not in cls._original_methods:
140
+ cls._original_methods[original_key] = client_class._process_response
141
+
142
+ original_prepare_key = f"{client_type}_prepare_options"
143
+ if original_prepare_key not in cls._original_methods and hasattr(
144
+ client_class, "_prepare_options"
145
+ ):
146
+ cls._original_methods[original_prepare_key] = client_class._prepare_options
147
+
148
+ # Get reference to original method to avoid recursion
149
+ original_process = cls._original_methods[original_key]
150
+
151
+ async def patched_async_process_response(
152
+ self, *, cast_to, options, response, stream, stream_cls, **kwargs
153
+ ):
154
+ # Call original method first with all kwargs
155
+ result = await original_process(
156
+ self,
157
+ cast_to=cast_to,
158
+ options=options,
159
+ response=response,
160
+ stream=stream,
161
+ stream_cls=stream_cls,
162
+ **kwargs,
163
+ )
164
+
165
+ # Record conversation for enabled Memori instances
166
+ if not stream:
167
+ cls._record_conversation_for_enabled_instances(
168
+ options, result, client_type
169
+ )
170
+
171
+ return result
172
+
173
+ client_class._process_response = patched_async_process_response
174
+
175
+ # Patch prepare_options if it exists
176
+ if original_prepare_key in cls._original_methods:
177
+ original_prepare = cls._original_methods[original_prepare_key]
178
+
179
+ def patched_async_prepare_options(self, options):
180
+ # Call original method first
181
+ options = original_prepare(self, options)
182
+
183
+ # Inject context for enabled Memori instances
184
+ options = cls._inject_context_for_enabled_instances(
185
+ options, client_type
186
+ )
187
+
188
+ return options
189
+
190
+ client_class._prepare_options = patched_async_prepare_options
191
+
192
+ @classmethod
193
+ def _inject_context_for_enabled_instances(cls, options, client_type):
194
+ """Inject context for all enabled Memori instances with conscious/auto ingest."""
195
+ for memori_instance in _enabled_memori_instances:
196
+ if memori_instance.is_enabled and (
197
+ memori_instance.conscious_ingest or memori_instance.auto_ingest
198
+ ):
199
+ try:
200
+ # Get json_data from options - handle multiple attribute name possibilities
201
+ json_data = None
202
+ for attr_name in ["json_data", "_json_data", "data"]:
203
+ if hasattr(options, attr_name):
204
+ json_data = getattr(options, attr_name, None)
205
+ if json_data:
206
+ break
207
+
208
+ if not json_data:
209
+ # Try to reconstruct from other options attributes
210
+ json_data = {}
211
+ if hasattr(options, "messages"):
212
+ json_data["messages"] = options.messages
213
+ elif hasattr(options, "_messages"):
214
+ json_data["messages"] = options._messages
215
+
216
+ if json_data and "messages" in json_data:
217
+ # This is a chat completion request - inject context
218
+ logger.debug(
219
+ f"OpenAI: Injecting context for {client_type} with {len(json_data['messages'])} messages"
220
+ )
221
+ updated_data = memori_instance._inject_openai_context(
222
+ {"messages": json_data["messages"]}
223
+ )
224
+
225
+ if updated_data.get("messages"):
226
+ # Update the options with modified messages
227
+ if hasattr(options, "json_data") and options.json_data:
228
+ options.json_data["messages"] = updated_data["messages"]
229
+ elif hasattr(options, "messages"):
230
+ options.messages = updated_data["messages"]
231
+
232
+ logger.debug(
233
+ f"OpenAI: Successfully injected context for {client_type}"
234
+ )
235
+ else:
236
+ logger.debug(
237
+ f"OpenAI: No messages found in options for {client_type}, skipping context injection"
238
+ )
239
+
240
+ except Exception as e:
241
+ logger.error(f"Context injection failed for {client_type}: {e}")
242
+
243
+ return options
244
+
245
+ @classmethod
246
+ def _is_internal_agent_call(cls, json_data):
247
+ """Check if this is an internal agent processing call that should not be recorded."""
248
+ try:
249
+ messages = json_data.get("messages", [])
250
+ for message in messages:
251
+ content = message.get("content", "")
252
+ if isinstance(content, str):
253
+ # Check for internal agent processing patterns
254
+ internal_patterns = [
255
+ "Process this conversation for enhanced memory storage:",
256
+ "User query:",
257
+ "Enhanced memory processing:",
258
+ "Memory classification:",
259
+ "Search for relevant memories:",
260
+ "Analyze conversation for:",
261
+ "Extract entities from:",
262
+ "Categorize the following conversation:",
263
+ ]
264
+
265
+ for pattern in internal_patterns:
266
+ if pattern in content:
267
+ return True
268
+
269
+ return False
270
+
271
+ except Exception as e:
272
+ logger.debug(f"Failed to check internal agent call: {e}")
273
+ return False
274
+
275
+ @classmethod
276
+ def _record_conversation_for_enabled_instances(cls, options, response, client_type):
277
+ """Record conversation for all enabled Memori instances."""
278
+ for memori_instance in _enabled_memori_instances:
279
+ if memori_instance.is_enabled:
280
+ try:
281
+ json_data = getattr(options, "json_data", None) or {}
282
+
283
+ if "messages" in json_data:
284
+ # Skip internal agent processing calls
285
+ if cls._is_internal_agent_call(json_data):
286
+ continue
287
+ # Chat completions
288
+ memori_instance._record_openai_conversation(json_data, response)
289
+ elif "prompt" in json_data:
290
+ # Legacy completions
291
+ cls._record_legacy_completion(
292
+ memori_instance, json_data, response, client_type
293
+ )
20
294
 
295
+ except Exception as e:
296
+ logger.error(
297
+ f"Failed to record conversation for {client_type}: {e}"
298
+ )
21
299
 
300
+ @classmethod
301
+ def _record_legacy_completion(
302
+ cls, memori_instance, request_data, response, client_type
303
+ ):
304
+ """Record legacy completion API calls."""
305
+ try:
306
+ prompt = request_data.get("prompt", "")
307
+ model = request_data.get("model", "unknown")
308
+
309
+ # Extract AI response
310
+ ai_output = ""
311
+ if hasattr(response, "choices") and response.choices:
312
+ choice = response.choices[0]
313
+ if hasattr(choice, "text"):
314
+ ai_output = choice.text or ""
315
+
316
+ # Calculate tokens
317
+ tokens_used = 0
318
+ if hasattr(response, "usage") and response.usage:
319
+ tokens_used = getattr(response.usage, "total_tokens", 0)
320
+
321
+ # Record conversation
322
+ memori_instance.record_conversation(
323
+ user_input=prompt,
324
+ ai_output=ai_output,
325
+ model=model,
326
+ metadata={
327
+ "integration": "openai_auto_intercept",
328
+ "client_type": client_type,
329
+ "api_type": "completions",
330
+ "tokens_used": tokens_used,
331
+ "auto_recorded": True,
332
+ },
333
+ )
334
+ except Exception as e:
335
+ logger.error(f"Failed to record legacy completion: {e}")
336
+
337
+ @classmethod
338
+ def unpatch_openai(cls):
339
+ """Restore original OpenAI module methods."""
340
+ if not cls._is_patched:
341
+ return
342
+
343
+ try:
344
+ import openai
345
+
346
+ # Restore sync OpenAI client
347
+ if "sync_process_response" in cls._original_methods:
348
+ openai.OpenAI._process_response = cls._original_methods[
349
+ "sync_process_response"
350
+ ]
351
+
352
+ if "sync_prepare_options" in cls._original_methods:
353
+ openai.OpenAI._prepare_options = cls._original_methods[
354
+ "sync_prepare_options"
355
+ ]
356
+
357
+ # Restore async OpenAI client
358
+ if "async_process_response" in cls._original_methods:
359
+ openai.AsyncOpenAI._process_response = cls._original_methods[
360
+ "async_process_response"
361
+ ]
362
+
363
+ if "async_prepare_options" in cls._original_methods:
364
+ openai.AsyncOpenAI._prepare_options = cls._original_methods[
365
+ "async_prepare_options"
366
+ ]
367
+
368
+ # Restore Azure clients
369
+ if (
370
+ hasattr(openai, "AzureOpenAI")
371
+ and "azure_sync_process_response" in cls._original_methods
372
+ ):
373
+ openai.AzureOpenAI._process_response = cls._original_methods[
374
+ "azure_sync_process_response"
375
+ ]
376
+
377
+ if (
378
+ hasattr(openai, "AzureOpenAI")
379
+ and "azure_sync_prepare_options" in cls._original_methods
380
+ ):
381
+ openai.AzureOpenAI._prepare_options = cls._original_methods[
382
+ "azure_sync_prepare_options"
383
+ ]
384
+
385
+ if (
386
+ hasattr(openai, "AsyncAzureOpenAI")
387
+ and "azure_async_process_response" in cls._original_methods
388
+ ):
389
+ openai.AsyncAzureOpenAI._process_response = cls._original_methods[
390
+ "azure_async_process_response"
391
+ ]
392
+
393
+ if (
394
+ hasattr(openai, "AsyncAzureOpenAI")
395
+ and "azure_async_prepare_options" in cls._original_methods
396
+ ):
397
+ openai.AsyncAzureOpenAI._prepare_options = cls._original_methods[
398
+ "azure_async_prepare_options"
399
+ ]
400
+
401
+ cls._is_patched = False
402
+ cls._original_methods.clear()
403
+ logger.debug("OpenAI module patches removed")
404
+
405
+ except ImportError:
406
+ pass # OpenAI not available
407
+ except Exception as e:
408
+ logger.error(f"Failed to unpatch OpenAI module: {e}")
409
+
410
+
411
+ def register_memori_instance(memori_instance):
412
+ """
413
+ Register a Memori instance for automatic OpenAI interception.
414
+
415
+ Args:
416
+ memori_instance: Memori instance to register
417
+ """
418
+ global _enabled_memori_instances
419
+
420
+ if memori_instance not in _enabled_memori_instances:
421
+ _enabled_memori_instances.append(memori_instance)
422
+ logger.debug("Registered Memori instance for OpenAI interception")
423
+
424
+ # Ensure OpenAI is patched
425
+ OpenAIInterceptor.patch_openai()
426
+
427
+
428
+ def unregister_memori_instance(memori_instance):
429
+ """
430
+ Unregister a Memori instance from automatic OpenAI interception.
431
+
432
+ Args:
433
+ memori_instance: Memori instance to unregister
434
+ """
435
+ global _enabled_memori_instances
436
+
437
+ if memori_instance in _enabled_memori_instances:
438
+ _enabled_memori_instances.remove(memori_instance)
439
+ logger.debug("Unregistered Memori instance from OpenAI interception")
440
+
441
+ # If no more instances, unpatch OpenAI
442
+ if not _enabled_memori_instances:
443
+ OpenAIInterceptor.unpatch_openai()
444
+
445
+
446
+ def get_enabled_instances():
447
+ """Get list of currently enabled Memori instances."""
448
+ return _enabled_memori_instances.copy()
449
+
450
+
451
+ def is_openai_patched():
452
+ """Check if OpenAI module is currently patched."""
453
+ return OpenAIInterceptor._is_patched
454
+
455
+
456
+ # For backward compatibility - keep old classes but mark as deprecated
22
457
  class MemoriOpenAI:
23
458
  """
24
- Clean OpenAI wrapper that automatically records conversations
25
- without monkey-patching. Drop-in replacement for OpenAI client.
459
+ DEPRECATED: Legacy wrapper class.
460
+
461
+ Use automatic interception instead:
462
+ memori = Memori(...)
463
+ memori.enable()
464
+ client = OpenAI() # Automatically intercepted
26
465
  """
27
466
 
28
- def __init__(self, memori_instance, api_key: Optional[str] = None, **kwargs):
29
- """
30
- Initialize MemoriOpenAI wrapper
467
+ def __init__(self, memori_instance, **kwargs):
468
+ logger.warning(
469
+ "MemoriOpenAI is deprecated. Use automatic interception instead:\n"
470
+ "memori.enable() then use OpenAI() client directly."
471
+ )
31
472
 
32
- Args:
33
- memori_instance: Memori instance for recording conversations
34
- api_key: OpenAI API key
35
- **kwargs: Additional arguments passed to OpenAI client
36
- """
37
473
  try:
38
474
  import openai
39
475
 
40
- self._openai = openai.OpenAI(api_key=api_key, **kwargs)
41
- self._memori = memori_instance
476
+ self._openai = openai.OpenAI(**kwargs)
42
477
 
43
- # Create wrapped completions
44
- self.chat = self._create_chat_wrapper()
45
- self.completions = self._create_completions_wrapper()
478
+ # Register for automatic interception
479
+ register_memori_instance(memori_instance)
46
480
 
47
- # Pass through other attributes
481
+ # Pass through all attributes
48
482
  for attr in dir(self._openai):
49
- if not attr.startswith("_") and attr not in ["chat", "completions"]:
483
+ if not attr.startswith("_"):
50
484
  setattr(self, attr, getattr(self._openai, attr))
51
485
 
52
486
  except ImportError as err:
53
487
  raise ImportError("OpenAI package required: pip install openai") from err
54
488
 
55
- def _create_chat_wrapper(self):
56
- """Create wrapped chat completions"""
57
-
58
- class ChatWrapper:
59
- def __init__(self, openai_client, memori_instance):
60
- self._openai = openai_client
61
- self._memori = memori_instance
62
- self.completions = self._create_completions_wrapper()
63
-
64
- def _create_completions_wrapper(self):
65
- class CompletionsWrapper:
66
- def __init__(self, openai_client, memori_instance):
67
- self._openai = openai_client
68
- self._memori = memori_instance
69
-
70
- def create(self, **kwargs):
71
- # Inject context if conscious ingestion is enabled
72
- if self._memori.is_enabled and self._memori.conscious_ingest:
73
- kwargs = self._inject_context(kwargs)
74
-
75
- # Make the actual API call
76
- response = self._openai.chat.completions.create(**kwargs)
77
-
78
- # Record conversation if memori is enabled
79
- if self._memori.is_enabled:
80
- self._record_conversation(kwargs, response)
81
-
82
- return response
83
-
84
- def _inject_context(self, kwargs):
85
- """Inject relevant context into messages"""
86
- try:
87
- # Extract user input from messages
88
- user_input = ""
89
- for msg in reversed(kwargs.get("messages", [])):
90
- if msg.get("role") == "user":
91
- user_input = msg.get("content", "")
92
- break
93
-
94
- if user_input:
95
- # Fetch relevant context
96
- context = self._memori.retrieve_context(
97
- user_input, limit=3
98
- )
99
-
100
- if context:
101
- # Create a context prompt
102
- context_prompt = "--- Relevant Memories ---\n"
103
- for mem in context:
104
- if isinstance(mem, dict):
105
- summary = mem.get("summary", "") or mem.get(
106
- "content", ""
107
- )
108
- context_prompt += f"- {summary}\n"
109
- else:
110
- context_prompt += f"- {str(mem)}\n"
111
- context_prompt += "-------------------------\n"
112
-
113
- # Inject context into the system message
114
- messages = kwargs.get("messages", [])
115
- system_message_found = False
116
- for msg in messages:
117
- if msg.get("role") == "system":
118
- msg["content"] = context_prompt + msg.get(
119
- "content", ""
120
- )
121
- system_message_found = True
122
- break
123
-
124
- if not system_message_found:
125
- messages.insert(
126
- 0,
127
- {
128
- "role": "system",
129
- "content": context_prompt,
130
- },
131
- )
132
-
133
- logger.debug(
134
- f"Injected context: {len(context)} memories"
135
- )
136
- except Exception as e:
137
- logger.error(f"Context injection failed: {e}")
138
-
139
- return kwargs
140
-
141
- def _record_conversation(self, kwargs, response):
142
- """Record the conversation"""
143
- try:
144
- # Extract details
145
- messages = kwargs.get("messages", [])
146
- model = kwargs.get("model", "unknown")
147
-
148
- # Find user input (last user message)
149
- user_input = ""
150
- for message in reversed(messages):
151
- if message.get("role") == "user":
152
- user_input = message.get("content", "")
153
- break
154
-
155
- # Extract AI response
156
- ai_output = ""
157
- if hasattr(response, "choices") and response.choices:
158
- choice = response.choices[0]
159
- if hasattr(choice, "message") and choice.message:
160
- ai_output = choice.message.content or ""
161
-
162
- # Calculate tokens used
163
- tokens_used = 0
164
- if hasattr(response, "usage") and response.usage:
165
- tokens_used = getattr(response.usage, "total_tokens", 0)
166
-
167
- # Record conversation
168
- self._memori.record_conversation(
169
- user_input=user_input,
170
- ai_output=ai_output,
171
- model=model,
172
- metadata={
173
- "integration": "openai_wrapper",
174
- "api_type": "chat_completions",
175
- "tokens_used": tokens_used,
176
- "auto_recorded": True,
177
- },
178
- )
179
- except Exception as e:
180
- logger.error(f"Failed to record OpenAI conversation: {e}")
181
-
182
- return CompletionsWrapper(self._openai, self._memori)
183
-
184
- return ChatWrapper(self._openai, self._memori)
185
-
186
- def _create_completions_wrapper(self):
187
- """Create wrapped legacy completions"""
188
-
189
- class CompletionsWrapper:
190
- def __init__(self, openai_client, memori_instance):
191
- self._openai = openai_client
192
- self._memori = memori_instance
193
-
194
- def create(self, **kwargs):
195
- # Inject context if conscious ingestion is enabled
196
- if self._memori.is_enabled and self._memori.conscious_ingest:
197
- kwargs = self._inject_context(kwargs)
198
489
 
199
- # Make the actual API call
200
- response = self._openai.completions.create(**kwargs)
490
+ class MemoriOpenAIInterceptor(MemoriOpenAI):
491
+ """DEPRECATED: Use automatic interception instead."""
201
492
 
202
- # Record conversation if memori is enabled
203
- if self._memori.is_enabled:
204
- self._record_conversation(kwargs, response)
493
+ def __init__(self, memori_instance, **kwargs):
494
+ logger.warning(
495
+ "MemoriOpenAIInterceptor is deprecated. Use automatic interception instead:\n"
496
+ "memori.enable() then use OpenAI() client directly."
497
+ )
498
+ super().__init__(memori_instance, **kwargs)
205
499
 
206
- return response
207
500
 
208
- def _inject_context(self, kwargs):
209
- """Inject relevant context into prompt"""
210
- try:
211
- user_input = kwargs.get("prompt", "")
212
-
213
- if user_input:
214
- # Fetch relevant context
215
- context = self._memori.retrieve_context(user_input, limit=3)
216
-
217
- if context:
218
- # Create a context prompt
219
- context_prompt = "--- Relevant Memories ---\n"
220
- for mem in context:
221
- if isinstance(mem, dict):
222
- summary = mem.get("summary", "") or mem.get(
223
- "content", ""
224
- )
225
- context_prompt += f"- {summary}\n"
226
- else:
227
- context_prompt += f"- {str(mem)}\n"
228
- context_prompt += "-------------------------\n"
229
-
230
- # Prepend context to the prompt
231
- kwargs["prompt"] = context_prompt + user_input
232
-
233
- logger.debug(f"Injected context: {len(context)} memories")
234
- except Exception as e:
235
- logger.error(f"Context injection failed: {e}")
501
+ def create_openai_client(memori_instance, provider_config=None, **kwargs):
502
+ """
503
+ Create an OpenAI client that automatically records to memori.
236
504
 
237
- return kwargs
505
+ This is the recommended way to create OpenAI clients with memori integration.
238
506
 
239
- def _record_conversation(self, kwargs, response):
240
- """Record the conversation"""
241
- try:
242
- # Extract details
243
- prompt = kwargs.get("prompt", "")
244
- model = kwargs.get("model", "unknown")
245
-
246
- # Extract AI response
247
- ai_output = ""
248
- if hasattr(response, "choices") and response.choices:
249
- choice = response.choices[0]
250
- if hasattr(choice, "text"):
251
- ai_output = choice.text or ""
252
-
253
- # Calculate tokens used
254
- tokens_used = 0
255
- if hasattr(response, "usage") and response.usage:
256
- tokens_used = getattr(response.usage, "total_tokens", 0)
257
-
258
- # Record conversation
259
- self._memori.record_conversation(
260
- user_input=prompt,
261
- ai_output=ai_output,
262
- model=model,
263
- metadata={
264
- "integration": "openai_wrapper",
265
- "api_type": "completions",
266
- "tokens_used": tokens_used,
267
- "auto_recorded": True,
268
- },
269
- )
270
- except Exception as e:
271
- logger.error(f"Failed to record OpenAI conversation: {e}")
507
+ Args:
508
+ memori_instance: Memori instance to record conversations to
509
+ provider_config: Provider configuration (optional)
510
+ **kwargs: Additional arguments for OpenAI client
272
511
 
273
- return CompletionsWrapper(self._openai, self._memori)
512
+ Returns:
513
+ OpenAI client instance with automatic recording
514
+ """
515
+ try:
516
+ import openai
517
+
518
+ # Register the memori instance for automatic interception
519
+ register_memori_instance(memori_instance)
520
+
521
+ # Use provider config if available, otherwise use kwargs
522
+ if provider_config:
523
+ client_kwargs = provider_config.to_openai_kwargs()
524
+ client_kwargs.update(kwargs) # Allow kwargs to override config
525
+ else:
526
+ client_kwargs = kwargs
527
+
528
+ # Create standard OpenAI client - it will be automatically intercepted
529
+ client = openai.OpenAI(**client_kwargs)
530
+
531
+ logger.info("Created OpenAI client with automatic memori recording")
532
+ return client
533
+
534
+ except ImportError as e:
535
+ logger.error(f"Failed to import OpenAI: {e}")
536
+ raise ImportError("OpenAI package required: pip install openai") from e
537
+ except Exception as e:
538
+ logger.error(f"Failed to create OpenAI client: {e}")
539
+ raise