memorisdk 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of memorisdk might be problematic. Click here for more details.
- memoriai/__init__.py +140 -0
- memoriai/agents/__init__.py +7 -0
- memoriai/agents/conscious_agent.py +506 -0
- memoriai/agents/memory_agent.py +322 -0
- memoriai/agents/retrieval_agent.py +579 -0
- memoriai/config/__init__.py +14 -0
- memoriai/config/manager.py +281 -0
- memoriai/config/settings.py +287 -0
- memoriai/core/__init__.py +6 -0
- memoriai/core/database.py +966 -0
- memoriai/core/memory.py +1349 -0
- memoriai/database/__init__.py +5 -0
- memoriai/database/connectors/__init__.py +9 -0
- memoriai/database/connectors/mysql_connector.py +159 -0
- memoriai/database/connectors/postgres_connector.py +158 -0
- memoriai/database/connectors/sqlite_connector.py +148 -0
- memoriai/database/queries/__init__.py +15 -0
- memoriai/database/queries/base_queries.py +204 -0
- memoriai/database/queries/chat_queries.py +157 -0
- memoriai/database/queries/entity_queries.py +236 -0
- memoriai/database/queries/memory_queries.py +178 -0
- memoriai/database/templates/__init__.py +0 -0
- memoriai/database/templates/basic_template.py +0 -0
- memoriai/database/templates/schemas/__init__.py +0 -0
- memoriai/integrations/__init__.py +68 -0
- memoriai/integrations/anthropic_integration.py +194 -0
- memoriai/integrations/litellm_integration.py +11 -0
- memoriai/integrations/openai_integration.py +273 -0
- memoriai/scripts/llm_text.py +50 -0
- memoriai/tools/__init__.py +5 -0
- memoriai/tools/memory_tool.py +544 -0
- memoriai/utils/__init__.py +89 -0
- memoriai/utils/exceptions.py +418 -0
- memoriai/utils/helpers.py +433 -0
- memoriai/utils/logging.py +204 -0
- memoriai/utils/pydantic_models.py +258 -0
- memoriai/utils/schemas.py +0 -0
- memoriai/utils/validators.py +339 -0
- memorisdk-1.0.0.dist-info/METADATA +386 -0
- memorisdk-1.0.0.dist-info/RECORD +44 -0
- memorisdk-1.0.0.dist-info/WHEEL +5 -0
- memorisdk-1.0.0.dist-info/entry_points.txt +2 -0
- memorisdk-1.0.0.dist-info/licenses/LICENSE +203 -0
- memorisdk-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,273 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenAI Integration - Clean wrapper without monkey-patching
|
|
3
|
+
|
|
4
|
+
RECOMMENDED: Use LiteLLM instead for unified API and native callback support.
|
|
5
|
+
This integration is provided for direct OpenAI SDK usage.
|
|
6
|
+
|
|
7
|
+
Usage:
|
|
8
|
+
from memoriai.integrations.openai_integration import MemoriOpenAI
|
|
9
|
+
|
|
10
|
+
# Initialize with your memori instance
|
|
11
|
+
client = MemoriOpenAI(memori_instance, api_key="your-key")
|
|
12
|
+
|
|
13
|
+
# Use exactly like OpenAI client
|
|
14
|
+
response = client.chat.completions.create(...)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from typing import Optional
|
|
18
|
+
|
|
19
|
+
from loguru import logger
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class MemoriOpenAI:
|
|
23
|
+
"""
|
|
24
|
+
Clean OpenAI wrapper that automatically records conversations
|
|
25
|
+
without monkey-patching. Drop-in replacement for OpenAI client.
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, memori_instance, api_key: Optional[str] = None, **kwargs):
|
|
29
|
+
"""
|
|
30
|
+
Initialize MemoriOpenAI wrapper
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
memori_instance: Memori instance for recording conversations
|
|
34
|
+
api_key: OpenAI API key
|
|
35
|
+
**kwargs: Additional arguments passed to OpenAI client
|
|
36
|
+
"""
|
|
37
|
+
try:
|
|
38
|
+
import openai
|
|
39
|
+
|
|
40
|
+
self._openai = openai.OpenAI(api_key=api_key, **kwargs)
|
|
41
|
+
self._memori = memori_instance
|
|
42
|
+
|
|
43
|
+
# Create wrapped completions
|
|
44
|
+
self.chat = self._create_chat_wrapper()
|
|
45
|
+
self.completions = self._create_completions_wrapper()
|
|
46
|
+
|
|
47
|
+
# Pass through other attributes
|
|
48
|
+
for attr in dir(self._openai):
|
|
49
|
+
if not attr.startswith("_") and attr not in ["chat", "completions"]:
|
|
50
|
+
setattr(self, attr, getattr(self._openai, attr))
|
|
51
|
+
|
|
52
|
+
except ImportError as err:
|
|
53
|
+
raise ImportError("OpenAI package required: pip install openai") from err
|
|
54
|
+
|
|
55
|
+
def _create_chat_wrapper(self):
|
|
56
|
+
"""Create wrapped chat completions"""
|
|
57
|
+
|
|
58
|
+
class ChatWrapper:
|
|
59
|
+
def __init__(self, openai_client, memori_instance):
|
|
60
|
+
self._openai = openai_client
|
|
61
|
+
self._memori = memori_instance
|
|
62
|
+
self.completions = self._create_completions_wrapper()
|
|
63
|
+
|
|
64
|
+
def _create_completions_wrapper(self):
|
|
65
|
+
class CompletionsWrapper:
|
|
66
|
+
def __init__(self, openai_client, memori_instance):
|
|
67
|
+
self._openai = openai_client
|
|
68
|
+
self._memori = memori_instance
|
|
69
|
+
|
|
70
|
+
def create(self, **kwargs):
|
|
71
|
+
# Inject context if conscious ingestion is enabled
|
|
72
|
+
if self._memori.is_enabled and self._memori.conscious_ingest:
|
|
73
|
+
kwargs = self._inject_context(kwargs)
|
|
74
|
+
|
|
75
|
+
# Make the actual API call
|
|
76
|
+
response = self._openai.chat.completions.create(**kwargs)
|
|
77
|
+
|
|
78
|
+
# Record conversation if memori is enabled
|
|
79
|
+
if self._memori.is_enabled:
|
|
80
|
+
self._record_conversation(kwargs, response)
|
|
81
|
+
|
|
82
|
+
return response
|
|
83
|
+
|
|
84
|
+
def _inject_context(self, kwargs):
|
|
85
|
+
"""Inject relevant context into messages"""
|
|
86
|
+
try:
|
|
87
|
+
# Extract user input from messages
|
|
88
|
+
user_input = ""
|
|
89
|
+
for msg in reversed(kwargs.get("messages", [])):
|
|
90
|
+
if msg.get("role") == "user":
|
|
91
|
+
user_input = msg.get("content", "")
|
|
92
|
+
break
|
|
93
|
+
|
|
94
|
+
if user_input:
|
|
95
|
+
# Fetch relevant context
|
|
96
|
+
context = self._memori.retrieve_context(
|
|
97
|
+
user_input, limit=3
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if context:
|
|
101
|
+
# Create a context prompt
|
|
102
|
+
context_prompt = "--- Relevant Memories ---\n"
|
|
103
|
+
for mem in context:
|
|
104
|
+
if isinstance(mem, dict):
|
|
105
|
+
summary = mem.get("summary", "") or mem.get(
|
|
106
|
+
"content", ""
|
|
107
|
+
)
|
|
108
|
+
context_prompt += f"- {summary}\n"
|
|
109
|
+
else:
|
|
110
|
+
context_prompt += f"- {str(mem)}\n"
|
|
111
|
+
context_prompt += "-------------------------\n"
|
|
112
|
+
|
|
113
|
+
# Inject context into the system message
|
|
114
|
+
messages = kwargs.get("messages", [])
|
|
115
|
+
system_message_found = False
|
|
116
|
+
for msg in messages:
|
|
117
|
+
if msg.get("role") == "system":
|
|
118
|
+
msg["content"] = context_prompt + msg.get(
|
|
119
|
+
"content", ""
|
|
120
|
+
)
|
|
121
|
+
system_message_found = True
|
|
122
|
+
break
|
|
123
|
+
|
|
124
|
+
if not system_message_found:
|
|
125
|
+
messages.insert(
|
|
126
|
+
0,
|
|
127
|
+
{
|
|
128
|
+
"role": "system",
|
|
129
|
+
"content": context_prompt,
|
|
130
|
+
},
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
logger.debug(
|
|
134
|
+
f"Injected context: {len(context)} memories"
|
|
135
|
+
)
|
|
136
|
+
except Exception as e:
|
|
137
|
+
logger.error(f"Context injection failed: {e}")
|
|
138
|
+
|
|
139
|
+
return kwargs
|
|
140
|
+
|
|
141
|
+
def _record_conversation(self, kwargs, response):
|
|
142
|
+
"""Record the conversation"""
|
|
143
|
+
try:
|
|
144
|
+
# Extract details
|
|
145
|
+
messages = kwargs.get("messages", [])
|
|
146
|
+
model = kwargs.get("model", "unknown")
|
|
147
|
+
|
|
148
|
+
# Find user input (last user message)
|
|
149
|
+
user_input = ""
|
|
150
|
+
for message in reversed(messages):
|
|
151
|
+
if message.get("role") == "user":
|
|
152
|
+
user_input = message.get("content", "")
|
|
153
|
+
break
|
|
154
|
+
|
|
155
|
+
# Extract AI response
|
|
156
|
+
ai_output = ""
|
|
157
|
+
if hasattr(response, "choices") and response.choices:
|
|
158
|
+
choice = response.choices[0]
|
|
159
|
+
if hasattr(choice, "message") and choice.message:
|
|
160
|
+
ai_output = choice.message.content or ""
|
|
161
|
+
|
|
162
|
+
# Calculate tokens used
|
|
163
|
+
tokens_used = 0
|
|
164
|
+
if hasattr(response, "usage") and response.usage:
|
|
165
|
+
tokens_used = getattr(response.usage, "total_tokens", 0)
|
|
166
|
+
|
|
167
|
+
# Record conversation
|
|
168
|
+
self._memori.record_conversation(
|
|
169
|
+
user_input=user_input,
|
|
170
|
+
ai_output=ai_output,
|
|
171
|
+
model=model,
|
|
172
|
+
metadata={
|
|
173
|
+
"integration": "openai_wrapper",
|
|
174
|
+
"api_type": "chat_completions",
|
|
175
|
+
"tokens_used": tokens_used,
|
|
176
|
+
"auto_recorded": True,
|
|
177
|
+
},
|
|
178
|
+
)
|
|
179
|
+
except Exception as e:
|
|
180
|
+
logger.error(f"Failed to record OpenAI conversation: {e}")
|
|
181
|
+
|
|
182
|
+
return CompletionsWrapper(self._openai, self._memori)
|
|
183
|
+
|
|
184
|
+
return ChatWrapper(self._openai, self._memori)
|
|
185
|
+
|
|
186
|
+
def _create_completions_wrapper(self):
|
|
187
|
+
"""Create wrapped legacy completions"""
|
|
188
|
+
|
|
189
|
+
class CompletionsWrapper:
|
|
190
|
+
def __init__(self, openai_client, memori_instance):
|
|
191
|
+
self._openai = openai_client
|
|
192
|
+
self._memori = memori_instance
|
|
193
|
+
|
|
194
|
+
def create(self, **kwargs):
|
|
195
|
+
# Inject context if conscious ingestion is enabled
|
|
196
|
+
if self._memori.is_enabled and self._memori.conscious_ingest:
|
|
197
|
+
kwargs = self._inject_context(kwargs)
|
|
198
|
+
|
|
199
|
+
# Make the actual API call
|
|
200
|
+
response = self._openai.completions.create(**kwargs)
|
|
201
|
+
|
|
202
|
+
# Record conversation if memori is enabled
|
|
203
|
+
if self._memori.is_enabled:
|
|
204
|
+
self._record_conversation(kwargs, response)
|
|
205
|
+
|
|
206
|
+
return response
|
|
207
|
+
|
|
208
|
+
def _inject_context(self, kwargs):
|
|
209
|
+
"""Inject relevant context into prompt"""
|
|
210
|
+
try:
|
|
211
|
+
user_input = kwargs.get("prompt", "")
|
|
212
|
+
|
|
213
|
+
if user_input:
|
|
214
|
+
# Fetch relevant context
|
|
215
|
+
context = self._memori.retrieve_context(user_input, limit=3)
|
|
216
|
+
|
|
217
|
+
if context:
|
|
218
|
+
# Create a context prompt
|
|
219
|
+
context_prompt = "--- Relevant Memories ---\n"
|
|
220
|
+
for mem in context:
|
|
221
|
+
if isinstance(mem, dict):
|
|
222
|
+
summary = mem.get("summary", "") or mem.get(
|
|
223
|
+
"content", ""
|
|
224
|
+
)
|
|
225
|
+
context_prompt += f"- {summary}\n"
|
|
226
|
+
else:
|
|
227
|
+
context_prompt += f"- {str(mem)}\n"
|
|
228
|
+
context_prompt += "-------------------------\n"
|
|
229
|
+
|
|
230
|
+
# Prepend context to the prompt
|
|
231
|
+
kwargs["prompt"] = context_prompt + user_input
|
|
232
|
+
|
|
233
|
+
logger.debug(f"Injected context: {len(context)} memories")
|
|
234
|
+
except Exception as e:
|
|
235
|
+
logger.error(f"Context injection failed: {e}")
|
|
236
|
+
|
|
237
|
+
return kwargs
|
|
238
|
+
|
|
239
|
+
def _record_conversation(self, kwargs, response):
|
|
240
|
+
"""Record the conversation"""
|
|
241
|
+
try:
|
|
242
|
+
# Extract details
|
|
243
|
+
prompt = kwargs.get("prompt", "")
|
|
244
|
+
model = kwargs.get("model", "unknown")
|
|
245
|
+
|
|
246
|
+
# Extract AI response
|
|
247
|
+
ai_output = ""
|
|
248
|
+
if hasattr(response, "choices") and response.choices:
|
|
249
|
+
choice = response.choices[0]
|
|
250
|
+
if hasattr(choice, "text"):
|
|
251
|
+
ai_output = choice.text or ""
|
|
252
|
+
|
|
253
|
+
# Calculate tokens used
|
|
254
|
+
tokens_used = 0
|
|
255
|
+
if hasattr(response, "usage") and response.usage:
|
|
256
|
+
tokens_used = getattr(response.usage, "total_tokens", 0)
|
|
257
|
+
|
|
258
|
+
# Record conversation
|
|
259
|
+
self._memori.record_conversation(
|
|
260
|
+
user_input=prompt,
|
|
261
|
+
ai_output=ai_output,
|
|
262
|
+
model=model,
|
|
263
|
+
metadata={
|
|
264
|
+
"integration": "openai_wrapper",
|
|
265
|
+
"api_type": "completions",
|
|
266
|
+
"tokens_used": tokens_used,
|
|
267
|
+
"auto_recorded": True,
|
|
268
|
+
},
|
|
269
|
+
)
|
|
270
|
+
except Exception as e:
|
|
271
|
+
logger.error(f"Failed to record OpenAI conversation: {e}")
|
|
272
|
+
|
|
273
|
+
return CompletionsWrapper(self._openai, self._memori)
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import os
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def is_text_file(file_path):
|
|
5
|
+
try:
|
|
6
|
+
with open(file_path, encoding="utf-8") as f:
|
|
7
|
+
f.read()
|
|
8
|
+
return True
|
|
9
|
+
except (UnicodeDecodeError, OSError):
|
|
10
|
+
return False
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def ingest_folder_to_txt(
|
|
14
|
+
input_path, output_file="ingested_data.txt", exclude_dirs=None
|
|
15
|
+
):
|
|
16
|
+
if exclude_dirs is None:
|
|
17
|
+
exclude_dirs = [".git", "node_modules", "__pycache__"]
|
|
18
|
+
|
|
19
|
+
with open(output_file, "w", encoding="utf-8") as out_f:
|
|
20
|
+
for root, dirs, files in os.walk(input_path):
|
|
21
|
+
# Filter out excluded directories
|
|
22
|
+
dirs[:] = [d for d in dirs if d not in exclude_dirs]
|
|
23
|
+
|
|
24
|
+
for file in files:
|
|
25
|
+
file_path = os.path.join(root, file)
|
|
26
|
+
|
|
27
|
+
# Skip binary files or the output file itself
|
|
28
|
+
if not is_text_file(file_path) or os.path.abspath(
|
|
29
|
+
file_path
|
|
30
|
+
) == os.path.abspath(output_file):
|
|
31
|
+
continue
|
|
32
|
+
|
|
33
|
+
try:
|
|
34
|
+
with open(file_path, encoding="utf-8") as in_f:
|
|
35
|
+
content = in_f.read()
|
|
36
|
+
|
|
37
|
+
relative_path = os.path.relpath(file_path, input_path)
|
|
38
|
+
out_f.write(f"\n### FILE: {relative_path} ###\n")
|
|
39
|
+
out_f.write(content + "\n")
|
|
40
|
+
|
|
41
|
+
except Exception as e:
|
|
42
|
+
print(f"Skipping {file_path}: {e}")
|
|
43
|
+
|
|
44
|
+
print(f"\n✅ Ingested data written to: {output_file}")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ---- Run from CLI or script ----
|
|
48
|
+
if __name__ == "__main__":
|
|
49
|
+
folder_path = input("Enter the path to the folder: ").strip()
|
|
50
|
+
ingest_folder_to_txt(folder_path)
|