mem0ai-azure-mysql 0.1.115__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mem0/__init__.py +6 -0
- mem0/client/__init__.py +0 -0
- mem0/client/main.py +1535 -0
- mem0/client/project.py +860 -0
- mem0/client/utils.py +29 -0
- mem0/configs/__init__.py +0 -0
- mem0/configs/base.py +90 -0
- mem0/configs/dbs/__init__.py +4 -0
- mem0/configs/dbs/base.py +41 -0
- mem0/configs/dbs/mysql.py +25 -0
- mem0/configs/embeddings/__init__.py +0 -0
- mem0/configs/embeddings/base.py +108 -0
- mem0/configs/enums.py +7 -0
- mem0/configs/llms/__init__.py +0 -0
- mem0/configs/llms/base.py +152 -0
- mem0/configs/prompts.py +333 -0
- mem0/configs/vector_stores/__init__.py +0 -0
- mem0/configs/vector_stores/azure_ai_search.py +59 -0
- mem0/configs/vector_stores/baidu.py +29 -0
- mem0/configs/vector_stores/chroma.py +40 -0
- mem0/configs/vector_stores/elasticsearch.py +47 -0
- mem0/configs/vector_stores/faiss.py +39 -0
- mem0/configs/vector_stores/langchain.py +32 -0
- mem0/configs/vector_stores/milvus.py +43 -0
- mem0/configs/vector_stores/mongodb.py +25 -0
- mem0/configs/vector_stores/opensearch.py +41 -0
- mem0/configs/vector_stores/pgvector.py +37 -0
- mem0/configs/vector_stores/pinecone.py +56 -0
- mem0/configs/vector_stores/qdrant.py +49 -0
- mem0/configs/vector_stores/redis.py +26 -0
- mem0/configs/vector_stores/supabase.py +44 -0
- mem0/configs/vector_stores/upstash_vector.py +36 -0
- mem0/configs/vector_stores/vertex_ai_vector_search.py +27 -0
- mem0/configs/vector_stores/weaviate.py +43 -0
- mem0/dbs/__init__.py +4 -0
- mem0/dbs/base.py +68 -0
- mem0/dbs/configs.py +21 -0
- mem0/dbs/mysql.py +321 -0
- mem0/embeddings/__init__.py +0 -0
- mem0/embeddings/aws_bedrock.py +100 -0
- mem0/embeddings/azure_openai.py +43 -0
- mem0/embeddings/base.py +31 -0
- mem0/embeddings/configs.py +30 -0
- mem0/embeddings/gemini.py +39 -0
- mem0/embeddings/huggingface.py +41 -0
- mem0/embeddings/langchain.py +35 -0
- mem0/embeddings/lmstudio.py +29 -0
- mem0/embeddings/mock.py +11 -0
- mem0/embeddings/ollama.py +53 -0
- mem0/embeddings/openai.py +49 -0
- mem0/embeddings/together.py +31 -0
- mem0/embeddings/vertexai.py +54 -0
- mem0/graphs/__init__.py +0 -0
- mem0/graphs/configs.py +96 -0
- mem0/graphs/neptune/__init__.py +0 -0
- mem0/graphs/neptune/base.py +410 -0
- mem0/graphs/neptune/main.py +372 -0
- mem0/graphs/tools.py +371 -0
- mem0/graphs/utils.py +97 -0
- mem0/llms/__init__.py +0 -0
- mem0/llms/anthropic.py +64 -0
- mem0/llms/aws_bedrock.py +270 -0
- mem0/llms/azure_openai.py +114 -0
- mem0/llms/azure_openai_structured.py +76 -0
- mem0/llms/base.py +32 -0
- mem0/llms/configs.py +34 -0
- mem0/llms/deepseek.py +85 -0
- mem0/llms/gemini.py +201 -0
- mem0/llms/groq.py +88 -0
- mem0/llms/langchain.py +65 -0
- mem0/llms/litellm.py +87 -0
- mem0/llms/lmstudio.py +53 -0
- mem0/llms/ollama.py +94 -0
- mem0/llms/openai.py +124 -0
- mem0/llms/openai_structured.py +52 -0
- mem0/llms/sarvam.py +89 -0
- mem0/llms/together.py +88 -0
- mem0/llms/vllm.py +89 -0
- mem0/llms/xai.py +52 -0
- mem0/memory/__init__.py +0 -0
- mem0/memory/base.py +63 -0
- mem0/memory/graph_memory.py +632 -0
- mem0/memory/main.py +1843 -0
- mem0/memory/memgraph_memory.py +630 -0
- mem0/memory/setup.py +56 -0
- mem0/memory/storage.py +218 -0
- mem0/memory/telemetry.py +90 -0
- mem0/memory/utils.py +133 -0
- mem0/proxy/__init__.py +0 -0
- mem0/proxy/main.py +194 -0
- mem0/utils/factory.py +132 -0
- mem0/vector_stores/__init__.py +0 -0
- mem0/vector_stores/azure_ai_search.py +383 -0
- mem0/vector_stores/baidu.py +368 -0
- mem0/vector_stores/base.py +58 -0
- mem0/vector_stores/chroma.py +229 -0
- mem0/vector_stores/configs.py +60 -0
- mem0/vector_stores/elasticsearch.py +235 -0
- mem0/vector_stores/faiss.py +473 -0
- mem0/vector_stores/langchain.py +179 -0
- mem0/vector_stores/milvus.py +245 -0
- mem0/vector_stores/mongodb.py +293 -0
- mem0/vector_stores/opensearch.py +281 -0
- mem0/vector_stores/pgvector.py +294 -0
- mem0/vector_stores/pinecone.py +373 -0
- mem0/vector_stores/qdrant.py +240 -0
- mem0/vector_stores/redis.py +295 -0
- mem0/vector_stores/supabase.py +237 -0
- mem0/vector_stores/upstash_vector.py +293 -0
- mem0/vector_stores/vertex_ai_vector_search.py +629 -0
- mem0/vector_stores/weaviate.py +316 -0
- mem0ai_azure_mysql-0.1.115.data/data/README.md +169 -0
- mem0ai_azure_mysql-0.1.115.dist-info/METADATA +224 -0
- mem0ai_azure_mysql-0.1.115.dist-info/RECORD +116 -0
- mem0ai_azure_mysql-0.1.115.dist-info/WHEEL +4 -0
- mem0ai_azure_mysql-0.1.115.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import Dict, List, Mapping, Optional
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
|
|
7
|
+
try:
|
|
8
|
+
import weaviate
|
|
9
|
+
except ImportError:
|
|
10
|
+
raise ImportError(
|
|
11
|
+
"The 'weaviate' library is required. Please install it using 'pip install weaviate-client weaviate'."
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
import weaviate.classes.config as wvcc
|
|
15
|
+
from weaviate.classes.init import Auth
|
|
16
|
+
from weaviate.classes.query import Filter, MetadataQuery
|
|
17
|
+
from weaviate.util import get_valid_uuid
|
|
18
|
+
|
|
19
|
+
from mem0.vector_stores.base import VectorStoreBase
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class OutputData(BaseModel):
|
|
25
|
+
id: str
|
|
26
|
+
score: float
|
|
27
|
+
payload: Dict
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class Weaviate(VectorStoreBase):
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
collection_name: str,
|
|
34
|
+
embedding_model_dims: int,
|
|
35
|
+
cluster_url: str = None,
|
|
36
|
+
auth_client_secret: str = None,
|
|
37
|
+
additional_headers: dict = None,
|
|
38
|
+
):
|
|
39
|
+
"""
|
|
40
|
+
Initialize the Weaviate vector store.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
collection_name (str): Name of the collection/class in Weaviate.
|
|
44
|
+
embedding_model_dims (int): Dimensions of the embedding model.
|
|
45
|
+
client (WeaviateClient, optional): Existing Weaviate client instance. Defaults to None.
|
|
46
|
+
cluster_url (str, optional): URL for Weaviate server. Defaults to None.
|
|
47
|
+
auth_config (dict, optional): Authentication configuration for Weaviate. Defaults to None.
|
|
48
|
+
additional_headers (dict, optional): Additional headers for requests. Defaults to None.
|
|
49
|
+
"""
|
|
50
|
+
if "localhost" in cluster_url:
|
|
51
|
+
self.client = weaviate.connect_to_local(headers=additional_headers)
|
|
52
|
+
else:
|
|
53
|
+
self.client = weaviate.connect_to_wcs(
|
|
54
|
+
cluster_url=cluster_url,
|
|
55
|
+
auth_credentials=Auth.api_key(auth_client_secret),
|
|
56
|
+
headers=additional_headers,
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
self.collection_name = collection_name
|
|
60
|
+
self.embedding_model_dims = embedding_model_dims
|
|
61
|
+
self.create_col(embedding_model_dims)
|
|
62
|
+
|
|
63
|
+
def _parse_output(self, data: Dict) -> List[OutputData]:
|
|
64
|
+
"""
|
|
65
|
+
Parse the output data.
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
data (Dict): Output data.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
List[OutputData]: Parsed output data.
|
|
72
|
+
"""
|
|
73
|
+
keys = ["ids", "distances", "metadatas"]
|
|
74
|
+
values = []
|
|
75
|
+
|
|
76
|
+
for key in keys:
|
|
77
|
+
value = data.get(key, [])
|
|
78
|
+
if isinstance(value, list) and value and isinstance(value[0], list):
|
|
79
|
+
value = value[0]
|
|
80
|
+
values.append(value)
|
|
81
|
+
|
|
82
|
+
ids, distances, metadatas = values
|
|
83
|
+
max_length = max(len(v) for v in values if isinstance(v, list) and v is not None)
|
|
84
|
+
|
|
85
|
+
result = []
|
|
86
|
+
for i in range(max_length):
|
|
87
|
+
entry = OutputData(
|
|
88
|
+
id=ids[i] if isinstance(ids, list) and ids and i < len(ids) else None,
|
|
89
|
+
score=(distances[i] if isinstance(distances, list) and distances and i < len(distances) else None),
|
|
90
|
+
payload=(metadatas[i] if isinstance(metadatas, list) and metadatas and i < len(metadatas) else None),
|
|
91
|
+
)
|
|
92
|
+
result.append(entry)
|
|
93
|
+
|
|
94
|
+
return result
|
|
95
|
+
|
|
96
|
+
def create_col(self, vector_size, distance="cosine"):
|
|
97
|
+
"""
|
|
98
|
+
Create a new collection with the specified schema.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
vector_size (int): Size of the vectors to be stored.
|
|
102
|
+
distance (str, optional): Distance metric for vector similarity. Defaults to "cosine".
|
|
103
|
+
"""
|
|
104
|
+
if self.client.collections.exists(self.collection_name):
|
|
105
|
+
logger.debug(f"Collection {self.collection_name} already exists. Skipping creation.")
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
properties = [
|
|
109
|
+
wvcc.Property(name="ids", data_type=wvcc.DataType.TEXT),
|
|
110
|
+
wvcc.Property(name="hash", data_type=wvcc.DataType.TEXT),
|
|
111
|
+
wvcc.Property(
|
|
112
|
+
name="metadata",
|
|
113
|
+
data_type=wvcc.DataType.TEXT,
|
|
114
|
+
description="Additional metadata",
|
|
115
|
+
),
|
|
116
|
+
wvcc.Property(name="data", data_type=wvcc.DataType.TEXT),
|
|
117
|
+
wvcc.Property(name="created_at", data_type=wvcc.DataType.TEXT),
|
|
118
|
+
wvcc.Property(name="category", data_type=wvcc.DataType.TEXT),
|
|
119
|
+
wvcc.Property(name="updated_at", data_type=wvcc.DataType.TEXT),
|
|
120
|
+
wvcc.Property(name="user_id", data_type=wvcc.DataType.TEXT),
|
|
121
|
+
wvcc.Property(name="agent_id", data_type=wvcc.DataType.TEXT),
|
|
122
|
+
wvcc.Property(name="run_id", data_type=wvcc.DataType.TEXT),
|
|
123
|
+
]
|
|
124
|
+
|
|
125
|
+
vectorizer_config = wvcc.Configure.Vectorizer.none()
|
|
126
|
+
vector_index_config = wvcc.Configure.VectorIndex.hnsw()
|
|
127
|
+
|
|
128
|
+
self.client.collections.create(
|
|
129
|
+
self.collection_name,
|
|
130
|
+
vectorizer_config=vectorizer_config,
|
|
131
|
+
vector_index_config=vector_index_config,
|
|
132
|
+
properties=properties,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
def insert(self, vectors, payloads=None, ids=None):
|
|
136
|
+
"""
|
|
137
|
+
Insert vectors into a collection.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
vectors (list): List of vectors to insert.
|
|
141
|
+
payloads (list, optional): List of payloads corresponding to vectors. Defaults to None.
|
|
142
|
+
ids (list, optional): List of IDs corresponding to vectors. Defaults to None.
|
|
143
|
+
"""
|
|
144
|
+
logger.info(f"Inserting {len(vectors)} vectors into collection {self.collection_name}")
|
|
145
|
+
with self.client.batch.fixed_size(batch_size=100) as batch:
|
|
146
|
+
for idx, vector in enumerate(vectors):
|
|
147
|
+
object_id = ids[idx] if ids and idx < len(ids) else str(uuid.uuid4())
|
|
148
|
+
object_id = get_valid_uuid(object_id)
|
|
149
|
+
|
|
150
|
+
data_object = payloads[idx] if payloads and idx < len(payloads) else {}
|
|
151
|
+
|
|
152
|
+
# Ensure 'id' is not included in properties (it's used as the Weaviate object ID)
|
|
153
|
+
if "ids" in data_object:
|
|
154
|
+
del data_object["ids"]
|
|
155
|
+
|
|
156
|
+
batch.add_object(collection=self.collection_name, properties=data_object, uuid=object_id, vector=vector)
|
|
157
|
+
|
|
158
|
+
def search(
|
|
159
|
+
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
|
|
160
|
+
) -> List[OutputData]:
|
|
161
|
+
"""
|
|
162
|
+
Search for similar vectors.
|
|
163
|
+
"""
|
|
164
|
+
collection = self.client.collections.get(str(self.collection_name))
|
|
165
|
+
filter_conditions = []
|
|
166
|
+
if filters:
|
|
167
|
+
for key, value in filters.items():
|
|
168
|
+
if value and key in ["user_id", "agent_id", "run_id"]:
|
|
169
|
+
filter_conditions.append(Filter.by_property(key).equal(value))
|
|
170
|
+
combined_filter = Filter.all_of(filter_conditions) if filter_conditions else None
|
|
171
|
+
response = collection.query.hybrid(
|
|
172
|
+
query="",
|
|
173
|
+
vector=vectors,
|
|
174
|
+
limit=limit,
|
|
175
|
+
filters=combined_filter,
|
|
176
|
+
return_properties=["hash", "created_at", "updated_at", "user_id", "agent_id", "run_id", "data", "category"],
|
|
177
|
+
return_metadata=MetadataQuery(score=True),
|
|
178
|
+
)
|
|
179
|
+
results = []
|
|
180
|
+
for obj in response.objects:
|
|
181
|
+
payload = obj.properties.copy()
|
|
182
|
+
|
|
183
|
+
for id_field in ["run_id", "agent_id", "user_id"]:
|
|
184
|
+
if id_field in payload and payload[id_field] is None:
|
|
185
|
+
del payload[id_field]
|
|
186
|
+
|
|
187
|
+
payload["id"] = str(obj.uuid).split("'")[0] # Include the id in the payload
|
|
188
|
+
results.append(
|
|
189
|
+
OutputData(
|
|
190
|
+
id=str(obj.uuid),
|
|
191
|
+
score=1
|
|
192
|
+
if obj.metadata.distance is None
|
|
193
|
+
else 1 - obj.metadata.distance, # Convert distance to score
|
|
194
|
+
payload=payload,
|
|
195
|
+
)
|
|
196
|
+
)
|
|
197
|
+
return results
|
|
198
|
+
|
|
199
|
+
def delete(self, vector_id):
|
|
200
|
+
"""
|
|
201
|
+
Delete a vector by ID.
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
vector_id: ID of the vector to delete.
|
|
205
|
+
"""
|
|
206
|
+
collection = self.client.collections.get(str(self.collection_name))
|
|
207
|
+
collection.data.delete_by_id(vector_id)
|
|
208
|
+
|
|
209
|
+
def update(self, vector_id, vector=None, payload=None):
|
|
210
|
+
"""
|
|
211
|
+
Update a vector and its payload.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
vector_id: ID of the vector to update.
|
|
215
|
+
vector (list, optional): Updated vector. Defaults to None.
|
|
216
|
+
payload (dict, optional): Updated payload. Defaults to None.
|
|
217
|
+
"""
|
|
218
|
+
collection = self.client.collections.get(str(self.collection_name))
|
|
219
|
+
|
|
220
|
+
if payload:
|
|
221
|
+
collection.data.update(uuid=vector_id, properties=payload)
|
|
222
|
+
|
|
223
|
+
if vector:
|
|
224
|
+
existing_data = self.get(vector_id)
|
|
225
|
+
if existing_data:
|
|
226
|
+
existing_data = dict(existing_data)
|
|
227
|
+
if "id" in existing_data:
|
|
228
|
+
del existing_data["id"]
|
|
229
|
+
existing_payload: Mapping[str, str] = existing_data
|
|
230
|
+
collection.data.update(uuid=vector_id, properties=existing_payload, vector=vector)
|
|
231
|
+
|
|
232
|
+
def get(self, vector_id):
|
|
233
|
+
"""
|
|
234
|
+
Retrieve a vector by ID.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
vector_id: ID of the vector to retrieve.
|
|
238
|
+
|
|
239
|
+
Returns:
|
|
240
|
+
dict: Retrieved vector and metadata.
|
|
241
|
+
"""
|
|
242
|
+
vector_id = get_valid_uuid(vector_id)
|
|
243
|
+
collection = self.client.collections.get(str(self.collection_name))
|
|
244
|
+
|
|
245
|
+
response = collection.query.fetch_object_by_id(
|
|
246
|
+
uuid=vector_id,
|
|
247
|
+
return_properties=["hash", "created_at", "updated_at", "user_id", "agent_id", "run_id", "data", "category"],
|
|
248
|
+
)
|
|
249
|
+
# results = {}
|
|
250
|
+
# print("reponse",response)
|
|
251
|
+
# for obj in response.objects:
|
|
252
|
+
payload = response.properties.copy()
|
|
253
|
+
payload["id"] = str(response.uuid).split("'")[0]
|
|
254
|
+
results = OutputData(
|
|
255
|
+
id=str(response.uuid).split("'")[0],
|
|
256
|
+
score=1.0,
|
|
257
|
+
payload=payload,
|
|
258
|
+
)
|
|
259
|
+
return results
|
|
260
|
+
|
|
261
|
+
def list_cols(self):
|
|
262
|
+
"""
|
|
263
|
+
List all collections.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
list: List of collection names.
|
|
267
|
+
"""
|
|
268
|
+
collections = self.client.collections.list_all()
|
|
269
|
+
logger.debug(f"collections: {collections}")
|
|
270
|
+
print(f"collections: {collections}")
|
|
271
|
+
return {"collections": [{"name": col.name} for col in collections]}
|
|
272
|
+
|
|
273
|
+
def delete_col(self):
|
|
274
|
+
"""Delete a collection."""
|
|
275
|
+
self.client.collections.delete(self.collection_name)
|
|
276
|
+
|
|
277
|
+
def col_info(self):
|
|
278
|
+
"""
|
|
279
|
+
Get information about a collection.
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
dict: Collection information.
|
|
283
|
+
"""
|
|
284
|
+
schema = self.client.collections.get(self.collection_name)
|
|
285
|
+
if schema:
|
|
286
|
+
return schema
|
|
287
|
+
return None
|
|
288
|
+
|
|
289
|
+
def list(self, filters=None, limit=100) -> List[OutputData]:
|
|
290
|
+
"""
|
|
291
|
+
List all vectors in a collection.
|
|
292
|
+
"""
|
|
293
|
+
collection = self.client.collections.get(self.collection_name)
|
|
294
|
+
filter_conditions = []
|
|
295
|
+
if filters:
|
|
296
|
+
for key, value in filters.items():
|
|
297
|
+
if value and key in ["user_id", "agent_id", "run_id"]:
|
|
298
|
+
filter_conditions.append(Filter.by_property(key).equal(value))
|
|
299
|
+
combined_filter = Filter.all_of(filter_conditions) if filter_conditions else None
|
|
300
|
+
response = collection.query.fetch_objects(
|
|
301
|
+
limit=limit,
|
|
302
|
+
filters=combined_filter,
|
|
303
|
+
return_properties=["hash", "created_at", "updated_at", "user_id", "agent_id", "run_id", "data", "category"],
|
|
304
|
+
)
|
|
305
|
+
results = []
|
|
306
|
+
for obj in response.objects:
|
|
307
|
+
payload = obj.properties.copy()
|
|
308
|
+
payload["id"] = str(obj.uuid).split("'")[0]
|
|
309
|
+
results.append(OutputData(id=str(obj.uuid).split("'")[0], score=1.0, payload=payload))
|
|
310
|
+
return [results]
|
|
311
|
+
|
|
312
|
+
def reset(self):
|
|
313
|
+
"""Reset the index by deleting and recreating it."""
|
|
314
|
+
logger.warning(f"Resetting index {self.collection_name}...")
|
|
315
|
+
self.delete_col()
|
|
316
|
+
self.create_col()
|
|
@@ -0,0 +1,169 @@
|
|
|
1
|
+
<p align="center">
|
|
2
|
+
<a href="https://github.com/mem0ai/mem0">
|
|
3
|
+
<img src="docs/images/banner-sm.png" width="800px" alt="Mem0 - The Memory Layer for Personalized AI">
|
|
4
|
+
</a>
|
|
5
|
+
</p>
|
|
6
|
+
<p align="center" style="display: flex; justify-content: center; gap: 20px; align-items: center;">
|
|
7
|
+
<a href="https://trendshift.io/repositories/11194" target="blank">
|
|
8
|
+
<img src="https://trendshift.io/api/badge/repositories/11194" alt="mem0ai%2Fmem0 | Trendshift" width="250" height="55"/>
|
|
9
|
+
</a>
|
|
10
|
+
</p>
|
|
11
|
+
|
|
12
|
+
<p align="center">
|
|
13
|
+
<a href="https://mem0.ai">Learn more</a>
|
|
14
|
+
Β·
|
|
15
|
+
<a href="https://mem0.dev/DiG">Join Discord</a>
|
|
16
|
+
Β·
|
|
17
|
+
<a href="https://mem0.dev/demo">Demo</a>
|
|
18
|
+
Β·
|
|
19
|
+
<a href="https://mem0.dev/openmemory">OpenMemory</a>
|
|
20
|
+
</p>
|
|
21
|
+
|
|
22
|
+
<p align="center">
|
|
23
|
+
<a href="https://mem0.dev/DiG">
|
|
24
|
+
<img src="https://dcbadge.vercel.app/api/server/6PzXDgEjG5?style=flat" alt="Mem0 Discord">
|
|
25
|
+
</a>
|
|
26
|
+
<a href="https://pepy.tech/project/mem0ai">
|
|
27
|
+
<img src="https://img.shields.io/pypi/dm/mem0ai" alt="Mem0 PyPI - Downloads">
|
|
28
|
+
</a>
|
|
29
|
+
<a href="https://github.com/mem0ai/mem0">
|
|
30
|
+
<img src="https://img.shields.io/github/commit-activity/m/mem0ai/mem0?style=flat-square" alt="GitHub commit activity">
|
|
31
|
+
</a>
|
|
32
|
+
<a href="https://pypi.org/project/mem0ai" target="blank">
|
|
33
|
+
<img src="https://img.shields.io/pypi/v/mem0ai?color=%2334D058&label=pypi%20package" alt="Package version">
|
|
34
|
+
</a>
|
|
35
|
+
<a href="https://www.npmjs.com/package/mem0ai" target="blank">
|
|
36
|
+
<img src="https://img.shields.io/npm/v/mem0ai" alt="Npm package">
|
|
37
|
+
</a>
|
|
38
|
+
<a href="https://www.ycombinator.com/companies/mem0">
|
|
39
|
+
<img src="https://img.shields.io/badge/Y%20Combinator-S24-orange?style=flat-square" alt="Y Combinator S24">
|
|
40
|
+
</a>
|
|
41
|
+
</p>
|
|
42
|
+
|
|
43
|
+
<p align="center">
|
|
44
|
+
<a href="https://mem0.ai/research"><strong>π Building Production-Ready AI Agents with Scalable Long-Term Memory β</strong></a>
|
|
45
|
+
</p>
|
|
46
|
+
<p align="center">
|
|
47
|
+
<strong>β‘ +26% Accuracy vs. OpenAI Memory β’ π 91% Faster β’ π° 90% Fewer Tokens</strong>
|
|
48
|
+
</p>
|
|
49
|
+
|
|
50
|
+
## π₯ Research Highlights
|
|
51
|
+
- **+26% Accuracy** over OpenAI Memory on the LOCOMO benchmark
|
|
52
|
+
- **91% Faster Responses** than full-context, ensuring low-latency at scale
|
|
53
|
+
- **90% Lower Token Usage** than full-context, cutting costs without compromise
|
|
54
|
+
- [Read the full paper](https://mem0.ai/research)
|
|
55
|
+
|
|
56
|
+
# Introduction
|
|
57
|
+
|
|
58
|
+
[Mem0](https://mem0.ai) ("mem-zero") enhances AI assistants and agents with an intelligent memory layer, enabling personalized AI interactions. It remembers user preferences, adapts to individual needs, and continuously learns over timeβideal for customer support chatbots, AI assistants, and autonomous systems.
|
|
59
|
+
|
|
60
|
+
### Key Features & Use Cases
|
|
61
|
+
|
|
62
|
+
**Core Capabilities:**
|
|
63
|
+
- **Multi-Level Memory**: Seamlessly retains User, Session, and Agent state with adaptive personalization
|
|
64
|
+
- **Developer-Friendly**: Intuitive API, cross-platform SDKs, and a fully managed service option
|
|
65
|
+
|
|
66
|
+
**Applications:**
|
|
67
|
+
- **AI Assistants**: Consistent, context-rich conversations
|
|
68
|
+
- **Customer Support**: Recall past tickets and user history for tailored help
|
|
69
|
+
- **Healthcare**: Track patient preferences and history for personalized care
|
|
70
|
+
- **Productivity & Gaming**: Adaptive workflows and environments based on user behavior
|
|
71
|
+
|
|
72
|
+
## π Quickstart Guide <a name="quickstart"></a>
|
|
73
|
+
|
|
74
|
+
Choose between our hosted platform or self-hosted package:
|
|
75
|
+
|
|
76
|
+
### Hosted Platform
|
|
77
|
+
|
|
78
|
+
Get up and running in minutes with automatic updates, analytics, and enterprise security.
|
|
79
|
+
|
|
80
|
+
1. Sign up on [Mem0 Platform](https://app.mem0.ai)
|
|
81
|
+
2. Embed the memory layer via SDK or API keys
|
|
82
|
+
|
|
83
|
+
### Self-Hosted (Open Source)
|
|
84
|
+
|
|
85
|
+
Install the sdk via pip:
|
|
86
|
+
|
|
87
|
+
```bash
|
|
88
|
+
pip install mem0ai
|
|
89
|
+
```
|
|
90
|
+
|
|
91
|
+
Install sdk via npm:
|
|
92
|
+
```bash
|
|
93
|
+
npm install mem0ai
|
|
94
|
+
```
|
|
95
|
+
|
|
96
|
+
### Basic Usage
|
|
97
|
+
|
|
98
|
+
Mem0 requires an LLM to function, with `gpt-4o-mini` from OpenAI as the default. However, it supports a variety of LLMs; for details, refer to our [Supported LLMs documentation](https://docs.mem0.ai/components/llms/overview).
|
|
99
|
+
|
|
100
|
+
First step is to instantiate the memory:
|
|
101
|
+
|
|
102
|
+
```python
|
|
103
|
+
from openai import OpenAI
|
|
104
|
+
from mem0 import Memory
|
|
105
|
+
|
|
106
|
+
openai_client = OpenAI()
|
|
107
|
+
memory = Memory()
|
|
108
|
+
|
|
109
|
+
def chat_with_memories(message: str, user_id: str = "default_user") -> str:
|
|
110
|
+
# Retrieve relevant memories
|
|
111
|
+
relevant_memories = memory.search(query=message, user_id=user_id, limit=3)
|
|
112
|
+
memories_str = "\n".join(f"- {entry['memory']}" for entry in relevant_memories["results"])
|
|
113
|
+
|
|
114
|
+
# Generate Assistant response
|
|
115
|
+
system_prompt = f"You are a helpful AI. Answer the question based on query and memories.\nUser Memories:\n{memories_str}"
|
|
116
|
+
messages = [{"role": "system", "content": system_prompt}, {"role": "user", "content": message}]
|
|
117
|
+
response = openai_client.chat.completions.create(model="gpt-4o-mini", messages=messages)
|
|
118
|
+
assistant_response = response.choices[0].message.content
|
|
119
|
+
|
|
120
|
+
# Create new memories from the conversation
|
|
121
|
+
messages.append({"role": "assistant", "content": assistant_response})
|
|
122
|
+
memory.add(messages, user_id=user_id)
|
|
123
|
+
|
|
124
|
+
return assistant_response
|
|
125
|
+
|
|
126
|
+
def main():
|
|
127
|
+
print("Chat with AI (type 'exit' to quit)")
|
|
128
|
+
while True:
|
|
129
|
+
user_input = input("You: ").strip()
|
|
130
|
+
if user_input.lower() == 'exit':
|
|
131
|
+
print("Goodbye!")
|
|
132
|
+
break
|
|
133
|
+
print(f"AI: {chat_with_memories(user_input)}")
|
|
134
|
+
|
|
135
|
+
if __name__ == "__main__":
|
|
136
|
+
main()
|
|
137
|
+
```
|
|
138
|
+
|
|
139
|
+
For detailed integration steps, see the [Quickstart](https://docs.mem0.ai/quickstart) and [API Reference](https://docs.mem0.ai/api-reference).
|
|
140
|
+
|
|
141
|
+
## π Integrations & Demos
|
|
142
|
+
|
|
143
|
+
- **ChatGPT with Memory**: Personalized chat powered by Mem0 ([Live Demo](https://mem0.dev/demo))
|
|
144
|
+
- **Browser Extension**: Store memories across ChatGPT, Perplexity, and Claude ([Chrome Extension](https://chromewebstore.google.com/detail/onihkkbipkfeijkadecaafbgagkhglop?utm_source=item-share-cb))
|
|
145
|
+
- **Langgraph Support**: Build a customer bot with Langgraph + Mem0 ([Guide](https://docs.mem0.ai/integrations/langgraph))
|
|
146
|
+
- **CrewAI Integration**: Tailor CrewAI outputs with Mem0 ([Example](https://docs.mem0.ai/integrations/crewai))
|
|
147
|
+
|
|
148
|
+
## π Documentation & Support
|
|
149
|
+
|
|
150
|
+
- Full docs: https://docs.mem0.ai
|
|
151
|
+
- Community: [Discord](https://mem0.dev/DiG) Β· [Twitter](https://x.com/mem0ai)
|
|
152
|
+
- Contact: founders@mem0.ai
|
|
153
|
+
|
|
154
|
+
## Citation
|
|
155
|
+
|
|
156
|
+
We now have a paper you can cite:
|
|
157
|
+
|
|
158
|
+
```bibtex
|
|
159
|
+
@article{mem0,
|
|
160
|
+
title={Mem0: Building Production-Ready AI Agents with Scalable Long-Term Memory},
|
|
161
|
+
author={Chhikara, Prateek and Khant, Dev and Aryan, Saket and Singh, Taranjeet and Yadav, Deshraj},
|
|
162
|
+
journal={arXiv preprint arXiv:2504.19413},
|
|
163
|
+
year={2025}
|
|
164
|
+
}
|
|
165
|
+
```
|
|
166
|
+
|
|
167
|
+
## βοΈ License
|
|
168
|
+
|
|
169
|
+
Apache 2.0 β see the [LICENSE](LICENSE) file for details.
|