mem0ai-azure-mysql 0.1.115__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (116) hide show
  1. mem0/__init__.py +6 -0
  2. mem0/client/__init__.py +0 -0
  3. mem0/client/main.py +1535 -0
  4. mem0/client/project.py +860 -0
  5. mem0/client/utils.py +29 -0
  6. mem0/configs/__init__.py +0 -0
  7. mem0/configs/base.py +90 -0
  8. mem0/configs/dbs/__init__.py +4 -0
  9. mem0/configs/dbs/base.py +41 -0
  10. mem0/configs/dbs/mysql.py +25 -0
  11. mem0/configs/embeddings/__init__.py +0 -0
  12. mem0/configs/embeddings/base.py +108 -0
  13. mem0/configs/enums.py +7 -0
  14. mem0/configs/llms/__init__.py +0 -0
  15. mem0/configs/llms/base.py +152 -0
  16. mem0/configs/prompts.py +333 -0
  17. mem0/configs/vector_stores/__init__.py +0 -0
  18. mem0/configs/vector_stores/azure_ai_search.py +59 -0
  19. mem0/configs/vector_stores/baidu.py +29 -0
  20. mem0/configs/vector_stores/chroma.py +40 -0
  21. mem0/configs/vector_stores/elasticsearch.py +47 -0
  22. mem0/configs/vector_stores/faiss.py +39 -0
  23. mem0/configs/vector_stores/langchain.py +32 -0
  24. mem0/configs/vector_stores/milvus.py +43 -0
  25. mem0/configs/vector_stores/mongodb.py +25 -0
  26. mem0/configs/vector_stores/opensearch.py +41 -0
  27. mem0/configs/vector_stores/pgvector.py +37 -0
  28. mem0/configs/vector_stores/pinecone.py +56 -0
  29. mem0/configs/vector_stores/qdrant.py +49 -0
  30. mem0/configs/vector_stores/redis.py +26 -0
  31. mem0/configs/vector_stores/supabase.py +44 -0
  32. mem0/configs/vector_stores/upstash_vector.py +36 -0
  33. mem0/configs/vector_stores/vertex_ai_vector_search.py +27 -0
  34. mem0/configs/vector_stores/weaviate.py +43 -0
  35. mem0/dbs/__init__.py +4 -0
  36. mem0/dbs/base.py +68 -0
  37. mem0/dbs/configs.py +21 -0
  38. mem0/dbs/mysql.py +321 -0
  39. mem0/embeddings/__init__.py +0 -0
  40. mem0/embeddings/aws_bedrock.py +100 -0
  41. mem0/embeddings/azure_openai.py +43 -0
  42. mem0/embeddings/base.py +31 -0
  43. mem0/embeddings/configs.py +30 -0
  44. mem0/embeddings/gemini.py +39 -0
  45. mem0/embeddings/huggingface.py +41 -0
  46. mem0/embeddings/langchain.py +35 -0
  47. mem0/embeddings/lmstudio.py +29 -0
  48. mem0/embeddings/mock.py +11 -0
  49. mem0/embeddings/ollama.py +53 -0
  50. mem0/embeddings/openai.py +49 -0
  51. mem0/embeddings/together.py +31 -0
  52. mem0/embeddings/vertexai.py +54 -0
  53. mem0/graphs/__init__.py +0 -0
  54. mem0/graphs/configs.py +96 -0
  55. mem0/graphs/neptune/__init__.py +0 -0
  56. mem0/graphs/neptune/base.py +410 -0
  57. mem0/graphs/neptune/main.py +372 -0
  58. mem0/graphs/tools.py +371 -0
  59. mem0/graphs/utils.py +97 -0
  60. mem0/llms/__init__.py +0 -0
  61. mem0/llms/anthropic.py +64 -0
  62. mem0/llms/aws_bedrock.py +270 -0
  63. mem0/llms/azure_openai.py +114 -0
  64. mem0/llms/azure_openai_structured.py +76 -0
  65. mem0/llms/base.py +32 -0
  66. mem0/llms/configs.py +34 -0
  67. mem0/llms/deepseek.py +85 -0
  68. mem0/llms/gemini.py +201 -0
  69. mem0/llms/groq.py +88 -0
  70. mem0/llms/langchain.py +65 -0
  71. mem0/llms/litellm.py +87 -0
  72. mem0/llms/lmstudio.py +53 -0
  73. mem0/llms/ollama.py +94 -0
  74. mem0/llms/openai.py +124 -0
  75. mem0/llms/openai_structured.py +52 -0
  76. mem0/llms/sarvam.py +89 -0
  77. mem0/llms/together.py +88 -0
  78. mem0/llms/vllm.py +89 -0
  79. mem0/llms/xai.py +52 -0
  80. mem0/memory/__init__.py +0 -0
  81. mem0/memory/base.py +63 -0
  82. mem0/memory/graph_memory.py +632 -0
  83. mem0/memory/main.py +1843 -0
  84. mem0/memory/memgraph_memory.py +630 -0
  85. mem0/memory/setup.py +56 -0
  86. mem0/memory/storage.py +218 -0
  87. mem0/memory/telemetry.py +90 -0
  88. mem0/memory/utils.py +133 -0
  89. mem0/proxy/__init__.py +0 -0
  90. mem0/proxy/main.py +194 -0
  91. mem0/utils/factory.py +132 -0
  92. mem0/vector_stores/__init__.py +0 -0
  93. mem0/vector_stores/azure_ai_search.py +383 -0
  94. mem0/vector_stores/baidu.py +368 -0
  95. mem0/vector_stores/base.py +58 -0
  96. mem0/vector_stores/chroma.py +229 -0
  97. mem0/vector_stores/configs.py +60 -0
  98. mem0/vector_stores/elasticsearch.py +235 -0
  99. mem0/vector_stores/faiss.py +473 -0
  100. mem0/vector_stores/langchain.py +179 -0
  101. mem0/vector_stores/milvus.py +245 -0
  102. mem0/vector_stores/mongodb.py +293 -0
  103. mem0/vector_stores/opensearch.py +281 -0
  104. mem0/vector_stores/pgvector.py +294 -0
  105. mem0/vector_stores/pinecone.py +373 -0
  106. mem0/vector_stores/qdrant.py +240 -0
  107. mem0/vector_stores/redis.py +295 -0
  108. mem0/vector_stores/supabase.py +237 -0
  109. mem0/vector_stores/upstash_vector.py +293 -0
  110. mem0/vector_stores/vertex_ai_vector_search.py +629 -0
  111. mem0/vector_stores/weaviate.py +316 -0
  112. mem0ai_azure_mysql-0.1.115.data/data/README.md +169 -0
  113. mem0ai_azure_mysql-0.1.115.dist-info/METADATA +224 -0
  114. mem0ai_azure_mysql-0.1.115.dist-info/RECORD +116 -0
  115. mem0ai_azure_mysql-0.1.115.dist-info/WHEEL +4 -0
  116. mem0ai_azure_mysql-0.1.115.dist-info/licenses/LICENSE +201 -0
mem0/memory/main.py ADDED
@@ -0,0 +1,1843 @@
1
+ import asyncio
2
+ import concurrent
3
+ import gc
4
+ import hashlib
5
+ import json
6
+ import logging
7
+ import os
8
+ import uuid
9
+ import warnings
10
+ from copy import deepcopy
11
+ from datetime import datetime
12
+ from typing import Any, Dict, Optional
13
+
14
+ import pytz
15
+ from pydantic import ValidationError
16
+
17
+ from mem0.configs.base import MemoryConfig, MemoryItem
18
+ from mem0.configs.enums import MemoryType
19
+ from mem0.configs.prompts import (
20
+ PROCEDURAL_MEMORY_SYSTEM_PROMPT,
21
+ get_update_memory_messages,
22
+ )
23
+ from mem0.memory.base import MemoryBase
24
+ from mem0.memory.setup import mem0_dir, setup_config
25
+ from mem0.memory.telemetry import capture_event
26
+ from mem0.memory.utils import (
27
+ get_fact_retrieval_messages,
28
+ parse_messages,
29
+ parse_vision_messages,
30
+ process_telemetry_filters,
31
+ remove_code_blocks,
32
+ )
33
+ from mem0.utils.factory import EmbedderFactory, LlmFactory, VectorStoreFactory, DBFactory
34
+
35
+
36
+ def _build_filters_and_metadata(
37
+ *, # Enforce keyword-only arguments
38
+ user_id: Optional[str] = None,
39
+ agent_id: Optional[str] = None,
40
+ run_id: Optional[str] = None,
41
+ actor_id: Optional[str] = None, # For query-time filtering
42
+ input_metadata: Optional[Dict[str, Any]] = None,
43
+ input_filters: Optional[Dict[str, Any]] = None,
44
+ ) -> tuple[Dict[str, Any], Dict[str, Any]]:
45
+ """
46
+ Constructs metadata for storage and filters for querying based on session and actor identifiers.
47
+
48
+ This helper supports multiple session identifiers (`user_id`, `agent_id`, and/or `run_id`)
49
+ for flexible session scoping and optionally narrows queries to a specific `actor_id`. It returns two dicts:
50
+
51
+ 1. `base_metadata_template`: Used as a template for metadata when storing new memories.
52
+ It includes all provided session identifier(s) and any `input_metadata`.
53
+ 2. `effective_query_filters`: Used for querying existing memories. It includes all
54
+ provided session identifier(s), any `input_filters`, and a resolved actor
55
+ identifier for targeted filtering if specified by any actor-related inputs.
56
+
57
+ Actor filtering precedence: explicit `actor_id` arg → `filters["actor_id"]`
58
+ This resolved actor ID is used for querying but is not added to `base_metadata_template`,
59
+ as the actor for storage is typically derived from message content at a later stage.
60
+
61
+ Args:
62
+ user_id (Optional[str]): User identifier, for session scoping.
63
+ agent_id (Optional[str]): Agent identifier, for session scoping.
64
+ run_id (Optional[str]): Run identifier, for session scoping.
65
+ actor_id (Optional[str]): Explicit actor identifier, used as a potential source for
66
+ actor-specific filtering. See actor resolution precedence in the main description.
67
+ input_metadata (Optional[Dict[str, Any]]): Base dictionary to be augmented with
68
+ session identifiers for the storage metadata template. Defaults to an empty dict.
69
+ input_filters (Optional[Dict[str, Any]]): Base dictionary to be augmented with
70
+ session and actor identifiers for query filters. Defaults to an empty dict.
71
+
72
+ Returns:
73
+ tuple[Dict[str, Any], Dict[str, Any]]: A tuple containing:
74
+ - base_metadata_template (Dict[str, Any]): Metadata template for storing memories,
75
+ scoped to the provided session(s).
76
+ - effective_query_filters (Dict[str, Any]): Filters for querying memories,
77
+ scoped to the provided session(s) and potentially a resolved actor.
78
+ """
79
+
80
+ base_metadata_template = deepcopy(input_metadata) if input_metadata else {}
81
+ effective_query_filters = deepcopy(input_filters) if input_filters else {}
82
+
83
+ # ---------- add all provided session ids ----------
84
+ session_ids_provided = []
85
+
86
+ if user_id:
87
+ base_metadata_template["user_id"] = user_id
88
+ effective_query_filters["user_id"] = user_id
89
+ session_ids_provided.append("user_id")
90
+
91
+ if agent_id:
92
+ base_metadata_template["agent_id"] = agent_id
93
+ effective_query_filters["agent_id"] = agent_id
94
+ session_ids_provided.append("agent_id")
95
+
96
+ if run_id:
97
+ base_metadata_template["run_id"] = run_id
98
+ effective_query_filters["run_id"] = run_id
99
+ session_ids_provided.append("run_id")
100
+
101
+ if not session_ids_provided:
102
+ raise ValueError("At least one of 'user_id', 'agent_id', or 'run_id' must be provided.")
103
+
104
+ # ---------- optional actor filter ----------
105
+ resolved_actor_id = actor_id or effective_query_filters.get("actor_id")
106
+ if resolved_actor_id:
107
+ effective_query_filters["actor_id"] = resolved_actor_id
108
+
109
+ return base_metadata_template, effective_query_filters
110
+
111
+
112
+ setup_config()
113
+ logger = logging.getLogger(__name__)
114
+
115
+
116
+ class Memory(MemoryBase):
117
+ def __init__(self, config: MemoryConfig = MemoryConfig()):
118
+ self.config = config
119
+
120
+ self.custom_fact_extraction_prompt = self.config.custom_fact_extraction_prompt
121
+ self.custom_update_memory_prompt = self.config.custom_update_memory_prompt
122
+ self.embedding_model = EmbedderFactory.create(
123
+ self.config.embedder.provider,
124
+ self.config.embedder.config,
125
+ self.config.vector_store.config,
126
+ )
127
+ self.vector_store = VectorStoreFactory.create(
128
+ self.config.vector_store.provider, self.config.vector_store.config
129
+ )
130
+ self.llm = LlmFactory.create(self.config.llm.provider, self.config.llm.config)
131
+ self.db = DBFactory.create(self.config.db.provider, self.config.db.config)
132
+ self.collection_name = self.config.vector_store.config.collection_name
133
+ self.api_version = self.config.version
134
+
135
+ self.enable_graph = False
136
+
137
+ if self.config.graph_store.config:
138
+ if self.config.graph_store.provider == "memgraph":
139
+ from mem0.memory.memgraph_memory import MemoryGraph
140
+ elif self.config.graph_store.provider == "neptune":
141
+ from mem0.graphs.neptune.main import MemoryGraph
142
+ else:
143
+ from mem0.memory.graph_memory import MemoryGraph
144
+
145
+ self.graph = MemoryGraph(self.config)
146
+ self.enable_graph = True
147
+ else:
148
+ self.graph = None
149
+ self.config.vector_store.config.collection_name = "mem0migrations"
150
+ if self.config.vector_store.provider in ["faiss", "qdrant"]:
151
+ provider_path = f"migrations_{self.config.vector_store.provider}"
152
+ self.config.vector_store.config.path = os.path.join(mem0_dir, provider_path)
153
+ os.makedirs(self.config.vector_store.config.path, exist_ok=True)
154
+ self._telemetry_vector_store = VectorStoreFactory.create(
155
+ self.config.vector_store.provider, self.config.vector_store.config
156
+ )
157
+ capture_event("mem0.init", self, {"sync_type": "sync"})
158
+
159
+ @classmethod
160
+ def from_config(cls, config_dict: Dict[str, Any]):
161
+ try:
162
+ config = cls._process_config(config_dict)
163
+ config = MemoryConfig(**config_dict)
164
+ except ValidationError as e:
165
+ logger.error(f"Configuration validation error: {e}")
166
+ raise
167
+ return cls(config)
168
+
169
+ @staticmethod
170
+ def _process_config(config_dict: Dict[str, Any]) -> Dict[str, Any]:
171
+ if "graph_store" in config_dict:
172
+ if "vector_store" not in config_dict and "embedder" in config_dict:
173
+ config_dict["vector_store"] = {}
174
+ config_dict["vector_store"]["config"] = {}
175
+ config_dict["vector_store"]["config"]["embedding_model_dims"] = config_dict["embedder"]["config"][
176
+ "embedding_dims"
177
+ ]
178
+ try:
179
+ return config_dict
180
+ except ValidationError as e:
181
+ logger.error(f"Configuration validation error: {e}")
182
+ raise
183
+
184
+ def add(
185
+ self,
186
+ messages,
187
+ *,
188
+ user_id: Optional[str] = None,
189
+ agent_id: Optional[str] = None,
190
+ run_id: Optional[str] = None,
191
+ metadata: Optional[Dict[str, Any]] = None,
192
+ infer: bool = True,
193
+ memory_type: Optional[str] = None,
194
+ prompt: Optional[str] = None,
195
+ ):
196
+ """
197
+ Create a new memory.
198
+
199
+ Adds new memories scoped to a single session id (e.g. `user_id`, `agent_id`, or `run_id`). One of those ids is required.
200
+
201
+ Args:
202
+ messages (str or List[Dict[str, str]]): The message content or list of messages
203
+ (e.g., `[{"role": "user", "content": "Hello"}, {"role": "assistant", "content": "Hi"}]`)
204
+ to be processed and stored.
205
+ user_id (str, optional): ID of the user creating the memory. Defaults to None.
206
+ agent_id (str, optional): ID of the agent creating the memory. Defaults to None.
207
+ run_id (str, optional): ID of the run creating the memory. Defaults to None.
208
+ metadata (dict, optional): Metadata to store with the memory. Defaults to None.
209
+ infer (bool, optional): If True (default), an LLM is used to extract key facts from
210
+ 'messages' and decide whether to add, update, or delete related memories.
211
+ If False, 'messages' are added as raw memories directly.
212
+ memory_type (str, optional): Specifies the type of memory. Currently, only
213
+ `MemoryType.PROCEDURAL.value` ("procedural_memory") is explicitly handled for
214
+ creating procedural memories (typically requires 'agent_id'). Otherwise, memories
215
+ are treated as general conversational/factual memories.memory_type (str, optional): Type of memory to create. Defaults to None. By default, it creates the short term memories and long term (semantic and episodic) memories. Pass "procedural_memory" to create procedural memories.
216
+ prompt (str, optional): Prompt to use for the memory creation. Defaults to None.
217
+
218
+
219
+ Returns:
220
+ dict: A dictionary containing the result of the memory addition operation, typically
221
+ including a list of memory items affected (added, updated) under a "results" key,
222
+ and potentially "relations" if graph store is enabled.
223
+ Example for v1.1+: `{"results": [{"id": "...", "memory": "...", "event": "ADD"}]}`
224
+ """
225
+
226
+ processed_metadata, effective_filters = _build_filters_and_metadata(
227
+ user_id=user_id,
228
+ agent_id=agent_id,
229
+ run_id=run_id,
230
+ input_metadata=metadata,
231
+ )
232
+
233
+ if memory_type is not None and memory_type != MemoryType.PROCEDURAL.value:
234
+ raise ValueError(
235
+ f"Invalid 'memory_type'. Please pass {MemoryType.PROCEDURAL.value} to create procedural memories."
236
+ )
237
+
238
+ if isinstance(messages, str):
239
+ messages = [{"role": "user", "content": messages}]
240
+
241
+ elif isinstance(messages, dict):
242
+ messages = [messages]
243
+
244
+ elif not isinstance(messages, list):
245
+ raise ValueError("messages must be str, dict, or list[dict]")
246
+
247
+ if agent_id is not None and memory_type == MemoryType.PROCEDURAL.value:
248
+ results = self._create_procedural_memory(messages, metadata=processed_metadata, prompt=prompt)
249
+ return results
250
+
251
+ if self.config.llm.config.get("enable_vision"):
252
+ messages = parse_vision_messages(messages, self.llm, self.config.llm.config.get("vision_details"))
253
+ else:
254
+ messages = parse_vision_messages(messages)
255
+
256
+ with concurrent.futures.ThreadPoolExecutor() as executor:
257
+ future1 = executor.submit(self._add_to_vector_store, messages, processed_metadata, effective_filters, infer)
258
+ future2 = executor.submit(self._add_to_graph, messages, effective_filters)
259
+
260
+ concurrent.futures.wait([future1, future2])
261
+
262
+ vector_store_result = future1.result()
263
+ graph_result = future2.result()
264
+
265
+ if self.api_version == "v1.0":
266
+ warnings.warn(
267
+ "The current add API output format is deprecated. "
268
+ "To use the latest format, set `api_version='v1.1'`. "
269
+ "The current format will be removed in mem0ai 1.1.0 and later versions.",
270
+ category=DeprecationWarning,
271
+ stacklevel=2,
272
+ )
273
+ return vector_store_result
274
+
275
+ if self.enable_graph:
276
+ return {
277
+ "results": vector_store_result,
278
+ "relations": graph_result,
279
+ }
280
+
281
+ return {"results": vector_store_result}
282
+
283
+ def _add_to_vector_store(self, messages, metadata, filters, infer):
284
+ if not infer:
285
+ returned_memories = []
286
+ for message_dict in messages:
287
+ if (
288
+ not isinstance(message_dict, dict)
289
+ or message_dict.get("role") is None
290
+ or message_dict.get("content") is None
291
+ ):
292
+ logger.warning(f"Skipping invalid message format: {message_dict}")
293
+ continue
294
+
295
+ if message_dict["role"] == "system":
296
+ continue
297
+
298
+ per_msg_meta = deepcopy(metadata)
299
+ per_msg_meta["role"] = message_dict["role"]
300
+
301
+ actor_name = message_dict.get("name")
302
+ if actor_name:
303
+ per_msg_meta["actor_id"] = actor_name
304
+
305
+ msg_content = message_dict["content"]
306
+ msg_embeddings = self.embedding_model.embed(msg_content, "add")
307
+ mem_id = self._create_memory(msg_content, msg_embeddings, per_msg_meta)
308
+
309
+ returned_memories.append(
310
+ {
311
+ "id": mem_id,
312
+ "memory": msg_content,
313
+ "event": "ADD",
314
+ "actor_id": actor_name if actor_name else None,
315
+ "role": message_dict["role"],
316
+ }
317
+ )
318
+ return returned_memories
319
+
320
+ parsed_messages = parse_messages(messages)
321
+
322
+ if self.config.custom_fact_extraction_prompt:
323
+ system_prompt = self.config.custom_fact_extraction_prompt
324
+ user_prompt = f"Input:\n{parsed_messages}"
325
+ else:
326
+ system_prompt, user_prompt = get_fact_retrieval_messages(parsed_messages)
327
+
328
+ response = self.llm.generate_response(
329
+ messages=[
330
+ {"role": "system", "content": system_prompt},
331
+ {"role": "user", "content": user_prompt},
332
+ ],
333
+ response_format={"type": "json_object"},
334
+ )
335
+
336
+ try:
337
+ response = remove_code_blocks(response)
338
+ new_retrieved_facts = json.loads(response)["facts"]
339
+ except Exception as e:
340
+ logger.error(f"Error in new_retrieved_facts: {e}")
341
+ new_retrieved_facts = []
342
+
343
+ if not new_retrieved_facts:
344
+ logger.debug("No new facts retrieved from input. Skipping memory update LLM call.")
345
+
346
+ retrieved_old_memory = []
347
+ new_message_embeddings = {}
348
+ for new_mem in new_retrieved_facts:
349
+ messages_embeddings = self.embedding_model.embed(new_mem, "add")
350
+ new_message_embeddings[new_mem] = messages_embeddings
351
+ existing_memories = self.vector_store.search(
352
+ query=new_mem,
353
+ vectors=messages_embeddings,
354
+ limit=5,
355
+ filters=filters,
356
+ )
357
+ for mem in existing_memories:
358
+ retrieved_old_memory.append({"id": mem.id, "text": mem.payload["data"]})
359
+
360
+ unique_data = {}
361
+ for item in retrieved_old_memory:
362
+ unique_data[item["id"]] = item
363
+ retrieved_old_memory = list(unique_data.values())
364
+ logger.info(f"Total existing memories: {len(retrieved_old_memory)}")
365
+
366
+ # mapping UUIDs with integers for handling UUID hallucinations
367
+ temp_uuid_mapping = {}
368
+ for idx, item in enumerate(retrieved_old_memory):
369
+ temp_uuid_mapping[str(idx)] = item["id"]
370
+ retrieved_old_memory[idx]["id"] = str(idx)
371
+
372
+ if new_retrieved_facts:
373
+ function_calling_prompt = get_update_memory_messages(
374
+ retrieved_old_memory, new_retrieved_facts, self.config.custom_update_memory_prompt
375
+ )
376
+
377
+ try:
378
+ response: str = self.llm.generate_response(
379
+ messages=[{"role": "user", "content": function_calling_prompt}],
380
+ response_format={"type": "json_object"},
381
+ )
382
+ except Exception as e:
383
+ logger.error(f"Error in new memory actions response: {e}")
384
+ response = ""
385
+
386
+ try:
387
+ response = remove_code_blocks(response)
388
+ new_memories_with_actions = json.loads(response)
389
+ except Exception as e:
390
+ logger.error(f"Invalid JSON response: {e}")
391
+ new_memories_with_actions = {}
392
+ else:
393
+ new_memories_with_actions = {}
394
+
395
+ returned_memories = []
396
+ try:
397
+ for resp in new_memories_with_actions.get("memory", []):
398
+ logger.info(resp)
399
+ try:
400
+ action_text = resp.get("text")
401
+ if not action_text:
402
+ logger.info("Skipping memory entry because of empty `text` field.")
403
+ continue
404
+
405
+ event_type = resp.get("event")
406
+ if event_type == "ADD":
407
+ memory_id = self._create_memory(
408
+ data=action_text,
409
+ existing_embeddings=new_message_embeddings,
410
+ metadata=deepcopy(metadata),
411
+ )
412
+ returned_memories.append({"id": memory_id, "memory": action_text, "event": event_type})
413
+ elif event_type == "UPDATE":
414
+ self._update_memory(
415
+ memory_id=temp_uuid_mapping[resp.get("id")],
416
+ data=action_text,
417
+ existing_embeddings=new_message_embeddings,
418
+ metadata=deepcopy(metadata),
419
+ )
420
+ returned_memories.append(
421
+ {
422
+ "id": temp_uuid_mapping[resp.get("id")],
423
+ "memory": action_text,
424
+ "event": event_type,
425
+ "previous_memory": resp.get("old_memory"),
426
+ }
427
+ )
428
+ elif event_type == "DELETE":
429
+ self._delete_memory(memory_id=temp_uuid_mapping[resp.get("id")])
430
+ returned_memories.append(
431
+ {
432
+ "id": temp_uuid_mapping[resp.get("id")],
433
+ "memory": action_text,
434
+ "event": event_type,
435
+ }
436
+ )
437
+ elif event_type == "NONE":
438
+ logger.info("NOOP for Memory.")
439
+ except Exception as e:
440
+ logger.error(f"Error processing memory action: {resp}, Error: {e}")
441
+ except Exception as e:
442
+ logger.error(f"Error iterating new_memories_with_actions: {e}")
443
+
444
+ keys, encoded_ids = process_telemetry_filters(filters)
445
+ capture_event(
446
+ "mem0.add",
447
+ self,
448
+ {"version": self.api_version, "keys": keys, "encoded_ids": encoded_ids, "sync_type": "sync"},
449
+ )
450
+ return returned_memories
451
+
452
+ def _add_to_graph(self, messages, filters):
453
+ added_entities = []
454
+ if self.enable_graph:
455
+ if filters.get("user_id") is None:
456
+ filters["user_id"] = "user"
457
+
458
+ data = "\n".join([msg["content"] for msg in messages if "content" in msg and msg["role"] != "system"])
459
+ added_entities = self.graph.add(data, filters)
460
+
461
+ return added_entities
462
+
463
+ def get(self, memory_id):
464
+ """
465
+ Retrieve a memory by ID.
466
+
467
+ Args:
468
+ memory_id (str): ID of the memory to retrieve.
469
+
470
+ Returns:
471
+ dict: Retrieved memory.
472
+ """
473
+ capture_event("mem0.get", self, {"memory_id": memory_id, "sync_type": "sync"})
474
+ memory = self.vector_store.get(vector_id=memory_id)
475
+ if not memory:
476
+ return None
477
+
478
+ promoted_payload_keys = [
479
+ "user_id",
480
+ "agent_id",
481
+ "run_id",
482
+ "actor_id",
483
+ "role",
484
+ ]
485
+
486
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
487
+
488
+ result_item = MemoryItem(
489
+ id=memory.id,
490
+ memory=memory.payload["data"],
491
+ hash=memory.payload.get("hash"),
492
+ created_at=memory.payload.get("created_at"),
493
+ updated_at=memory.payload.get("updated_at"),
494
+ ).model_dump()
495
+
496
+ for key in promoted_payload_keys:
497
+ if key in memory.payload:
498
+ result_item[key] = memory.payload[key]
499
+
500
+ additional_metadata = {k: v for k, v in memory.payload.items() if k not in core_and_promoted_keys}
501
+ if additional_metadata:
502
+ result_item["metadata"] = additional_metadata
503
+
504
+ return result_item
505
+
506
+ def get_all(
507
+ self,
508
+ *,
509
+ user_id: Optional[str] = None,
510
+ agent_id: Optional[str] = None,
511
+ run_id: Optional[str] = None,
512
+ filters: Optional[Dict[str, Any]] = None,
513
+ limit: int = 100,
514
+ ):
515
+ """
516
+ List all memories.
517
+
518
+ Args:
519
+ user_id (str, optional): user id
520
+ agent_id (str, optional): agent id
521
+ run_id (str, optional): run id
522
+ filters (dict, optional): Additional custom key-value filters to apply to the search.
523
+ These are merged with the ID-based scoping filters. For example,
524
+ `filters={"actor_id": "some_user"}`.
525
+ limit (int, optional): The maximum number of memories to return. Defaults to 100.
526
+
527
+ Returns:
528
+ dict: A dictionary containing a list of memories under the "results" key,
529
+ and potentially "relations" if graph store is enabled. For API v1.0,
530
+ it might return a direct list (see deprecation warning).
531
+ Example for v1.1+: `{"results": [{"id": "...", "memory": "...", ...}]}`
532
+ """
533
+
534
+ _, effective_filters = _build_filters_and_metadata(
535
+ user_id=user_id, agent_id=agent_id, run_id=run_id, input_filters=filters
536
+ )
537
+
538
+ if not any(key in effective_filters for key in ("user_id", "agent_id", "run_id")):
539
+ raise ValueError("At least one of 'user_id', 'agent_id', or 'run_id' must be specified.")
540
+
541
+ keys, encoded_ids = process_telemetry_filters(effective_filters)
542
+ capture_event(
543
+ "mem0.get_all", self, {"limit": limit, "keys": keys, "encoded_ids": encoded_ids, "sync_type": "sync"}
544
+ )
545
+
546
+ with concurrent.futures.ThreadPoolExecutor() as executor:
547
+ future_memories = executor.submit(self._get_all_from_vector_store, effective_filters, limit)
548
+ future_graph_entities = (
549
+ executor.submit(self.graph.get_all, effective_filters, limit) if self.enable_graph else None
550
+ )
551
+
552
+ concurrent.futures.wait(
553
+ [future_memories, future_graph_entities] if future_graph_entities else [future_memories]
554
+ )
555
+
556
+ all_memories_result = future_memories.result()
557
+ graph_entities_result = future_graph_entities.result() if future_graph_entities else None
558
+
559
+ if self.enable_graph:
560
+ return {"results": all_memories_result, "relations": graph_entities_result}
561
+
562
+ if self.api_version == "v1.0":
563
+ warnings.warn(
564
+ "The current get_all API output format is deprecated. "
565
+ "To use the latest format, set `api_version='v1.1'` (which returns a dict with a 'results' key). "
566
+ "The current format (direct list for v1.0) will be removed in mem0ai 1.1.0 and later versions.",
567
+ category=DeprecationWarning,
568
+ stacklevel=2,
569
+ )
570
+ return all_memories_result
571
+ else:
572
+ return {"results": all_memories_result}
573
+
574
+ def _get_all_from_vector_store(self, filters, limit):
575
+ memories_result = self.vector_store.list(filters=filters, limit=limit)
576
+ actual_memories = (
577
+ memories_result[0]
578
+ if isinstance(memories_result, (tuple, list)) and len(memories_result) > 0
579
+ else memories_result
580
+ )
581
+
582
+ promoted_payload_keys = [
583
+ "user_id",
584
+ "agent_id",
585
+ "run_id",
586
+ "actor_id",
587
+ "role",
588
+ ]
589
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
590
+
591
+ formatted_memories = []
592
+ for mem in actual_memories:
593
+ memory_item_dict = MemoryItem(
594
+ id=mem.id,
595
+ memory=mem.payload["data"],
596
+ hash=mem.payload.get("hash"),
597
+ created_at=mem.payload.get("created_at"),
598
+ updated_at=mem.payload.get("updated_at"),
599
+ ).model_dump(exclude={"score"})
600
+
601
+ for key in promoted_payload_keys:
602
+ if key in mem.payload:
603
+ memory_item_dict[key] = mem.payload[key]
604
+
605
+ additional_metadata = {k: v for k, v in mem.payload.items() if k not in core_and_promoted_keys}
606
+ if additional_metadata:
607
+ memory_item_dict["metadata"] = additional_metadata
608
+
609
+ formatted_memories.append(memory_item_dict)
610
+
611
+ return formatted_memories
612
+
613
+ def search(
614
+ self,
615
+ query: str,
616
+ *,
617
+ user_id: Optional[str] = None,
618
+ agent_id: Optional[str] = None,
619
+ run_id: Optional[str] = None,
620
+ limit: int = 100,
621
+ filters: Optional[Dict[str, Any]] = None,
622
+ threshold: Optional[float] = None,
623
+ ):
624
+ """
625
+ Searches for memories based on a query
626
+ Args:
627
+ query (str): Query to search for.
628
+ user_id (str, optional): ID of the user to search for. Defaults to None.
629
+ agent_id (str, optional): ID of the agent to search for. Defaults to None.
630
+ run_id (str, optional): ID of the run to search for. Defaults to None.
631
+ limit (int, optional): Limit the number of results. Defaults to 100.
632
+ filters (dict, optional): Filters to apply to the search. Defaults to None..
633
+ threshold (float, optional): Minimum score for a memory to be included in the results. Defaults to None.
634
+
635
+ Returns:
636
+ dict: A dictionary containing the search results, typically under a "results" key,
637
+ and potentially "relations" if graph store is enabled.
638
+ Example for v1.1+: `{"results": [{"id": "...", "memory": "...", "score": 0.8, ...}]}`
639
+ """
640
+ _, effective_filters = _build_filters_and_metadata(
641
+ user_id=user_id, agent_id=agent_id, run_id=run_id, input_filters=filters
642
+ )
643
+
644
+ if not any(key in effective_filters for key in ("user_id", "agent_id", "run_id")):
645
+ raise ValueError("At least one of 'user_id', 'agent_id', or 'run_id' must be specified.")
646
+
647
+ keys, encoded_ids = process_telemetry_filters(effective_filters)
648
+ capture_event(
649
+ "mem0.search",
650
+ self,
651
+ {
652
+ "limit": limit,
653
+ "version": self.api_version,
654
+ "keys": keys,
655
+ "encoded_ids": encoded_ids,
656
+ "sync_type": "sync",
657
+ "threshold": threshold,
658
+ },
659
+ )
660
+
661
+ with concurrent.futures.ThreadPoolExecutor() as executor:
662
+ future_memories = executor.submit(self._search_vector_store, query, effective_filters, limit, threshold)
663
+ future_graph_entities = (
664
+ executor.submit(self.graph.search, query, effective_filters, limit) if self.enable_graph else None
665
+ )
666
+
667
+ concurrent.futures.wait(
668
+ [future_memories, future_graph_entities] if future_graph_entities else [future_memories]
669
+ )
670
+
671
+ original_memories = future_memories.result()
672
+ graph_entities = future_graph_entities.result() if future_graph_entities else None
673
+
674
+ if self.enable_graph:
675
+ return {"results": original_memories, "relations": graph_entities}
676
+
677
+ if self.api_version == "v1.0":
678
+ warnings.warn(
679
+ "The current search API output format is deprecated. "
680
+ "To use the latest format, set `api_version='v1.1'`. "
681
+ "The current format will be removed in mem0ai 1.1.0 and later versions.",
682
+ category=DeprecationWarning,
683
+ stacklevel=2,
684
+ )
685
+ return {"results": original_memories}
686
+ else:
687
+ return {"results": original_memories}
688
+
689
+ def _search_vector_store(self, query, filters, limit, threshold: Optional[float] = None):
690
+ embeddings = self.embedding_model.embed(query, "search")
691
+ memories = self.vector_store.search(query=query, vectors=embeddings, limit=limit, filters=filters)
692
+
693
+ promoted_payload_keys = [
694
+ "user_id",
695
+ "agent_id",
696
+ "run_id",
697
+ "actor_id",
698
+ "role",
699
+ ]
700
+
701
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
702
+
703
+ original_memories = []
704
+ for mem in memories:
705
+ memory_item_dict = MemoryItem(
706
+ id=mem.id,
707
+ memory=mem.payload["data"],
708
+ hash=mem.payload.get("hash"),
709
+ created_at=mem.payload.get("created_at"),
710
+ updated_at=mem.payload.get("updated_at"),
711
+ score=mem.score,
712
+ ).model_dump()
713
+
714
+ for key in promoted_payload_keys:
715
+ if key in mem.payload:
716
+ memory_item_dict[key] = mem.payload[key]
717
+
718
+ additional_metadata = {k: v for k, v in mem.payload.items() if k not in core_and_promoted_keys}
719
+ if additional_metadata:
720
+ memory_item_dict["metadata"] = additional_metadata
721
+
722
+ if threshold is None or mem.score >= threshold:
723
+ original_memories.append(memory_item_dict)
724
+
725
+ return original_memories
726
+
727
+ def update(self, memory_id, data):
728
+ """
729
+ Update a memory by ID.
730
+
731
+ Args:
732
+ memory_id (str): ID of the memory to update.
733
+ data (dict): Data to update the memory with.
734
+
735
+ Returns:
736
+ dict: Updated memory.
737
+ """
738
+ capture_event("mem0.update", self, {"memory_id": memory_id, "sync_type": "sync"})
739
+
740
+ existing_embeddings = {data: self.embedding_model.embed(data, "update")}
741
+
742
+ self._update_memory(memory_id, data, existing_embeddings)
743
+ return {"message": "Memory updated successfully!"}
744
+
745
+ def delete(self, memory_id):
746
+ """
747
+ Delete a memory by ID.
748
+
749
+ Args:
750
+ memory_id (str): ID of the memory to delete.
751
+ """
752
+ capture_event("mem0.delete", self, {"memory_id": memory_id, "sync_type": "sync"})
753
+ self._delete_memory(memory_id)
754
+ return {"message": "Memory deleted successfully!"}
755
+
756
+ def delete_all(self, user_id: Optional[str] = None, agent_id: Optional[str] = None, run_id: Optional[str] = None):
757
+ """
758
+ Delete all memories.
759
+
760
+ Args:
761
+ user_id (str, optional): ID of the user to delete memories for. Defaults to None.
762
+ agent_id (str, optional): ID of the agent to delete memories for. Defaults to None.
763
+ run_id (str, optional): ID of the run to delete memories for. Defaults to None.
764
+ """
765
+ filters: Dict[str, Any] = {}
766
+ if user_id:
767
+ filters["user_id"] = user_id
768
+ if agent_id:
769
+ filters["agent_id"] = agent_id
770
+ if run_id:
771
+ filters["run_id"] = run_id
772
+
773
+ if not filters:
774
+ raise ValueError(
775
+ "At least one filter is required to delete all memories. If you want to delete all memories, use the `reset()` method."
776
+ )
777
+
778
+ keys, encoded_ids = process_telemetry_filters(filters)
779
+ capture_event("mem0.delete_all", self, {"keys": keys, "encoded_ids": encoded_ids, "sync_type": "sync"})
780
+ memories = self.vector_store.list(filters=filters)[0]
781
+ for memory in memories:
782
+ self._delete_memory(memory.id)
783
+
784
+ logger.info(f"Deleted {len(memories)} memories")
785
+
786
+ if self.enable_graph:
787
+ self.graph.delete_all(filters)
788
+
789
+ return {"message": "Memories deleted successfully!"}
790
+
791
+ def history(self, memory_id):
792
+ """
793
+ Get the history of changes for a memory by ID.
794
+
795
+ Args:
796
+ memory_id (str): ID of the memory to get history for.
797
+
798
+ Returns:
799
+ list: List of changes for the memory.
800
+ """
801
+ capture_event("mem0.history", self, {"memory_id": memory_id, "sync_type": "sync"})
802
+ return self.db.get_history(memory_id)
803
+
804
+ def _create_memory(self, data, existing_embeddings, metadata=None):
805
+ logger.debug(f"Creating memory with {data=}")
806
+ if data in existing_embeddings:
807
+ embeddings = existing_embeddings[data]
808
+ else:
809
+ embeddings = self.embedding_model.embed(data, memory_action="add")
810
+ memory_id = str(uuid.uuid4())
811
+ metadata = metadata or {}
812
+ metadata["data"] = data
813
+ metadata["hash"] = hashlib.md5(data.encode()).hexdigest()
814
+ metadata["created_at"] = datetime.now(pytz.timezone("US/Pacific")).isoformat()
815
+
816
+ self.vector_store.insert(
817
+ vectors=[embeddings],
818
+ ids=[memory_id],
819
+ payloads=[metadata],
820
+ )
821
+ self.db.add_history(
822
+ memory_id,
823
+ None,
824
+ data,
825
+ "ADD",
826
+ created_at=metadata.get("created_at"),
827
+ actor_id=metadata.get("actor_id"),
828
+ role=metadata.get("role"),
829
+ )
830
+ capture_event("mem0._create_memory", self, {"memory_id": memory_id, "sync_type": "sync"})
831
+ return memory_id
832
+
833
+ def _create_procedural_memory(self, messages, metadata=None, prompt=None):
834
+ """
835
+ Create a procedural memory
836
+
837
+ Args:
838
+ messages (list): List of messages to create a procedural memory from.
839
+ metadata (dict): Metadata to create a procedural memory from.
840
+ prompt (str, optional): Prompt to use for the procedural memory creation. Defaults to None.
841
+ """
842
+ logger.info("Creating procedural memory")
843
+
844
+ parsed_messages = [
845
+ {"role": "system", "content": prompt or PROCEDURAL_MEMORY_SYSTEM_PROMPT},
846
+ *messages,
847
+ {
848
+ "role": "user",
849
+ "content": "Create procedural memory of the above conversation.",
850
+ },
851
+ ]
852
+
853
+ try:
854
+ procedural_memory = self.llm.generate_response(messages=parsed_messages)
855
+ except Exception as e:
856
+ logger.error(f"Error generating procedural memory summary: {e}")
857
+ raise
858
+
859
+ if metadata is None:
860
+ raise ValueError("Metadata cannot be done for procedural memory.")
861
+
862
+ metadata["memory_type"] = MemoryType.PROCEDURAL.value
863
+ embeddings = self.embedding_model.embed(procedural_memory, memory_action="add")
864
+ memory_id = self._create_memory(procedural_memory, {procedural_memory: embeddings}, metadata=metadata)
865
+ capture_event("mem0._create_procedural_memory", self, {"memory_id": memory_id, "sync_type": "sync"})
866
+
867
+ result = {"results": [{"id": memory_id, "memory": procedural_memory, "event": "ADD"}]}
868
+
869
+ return result
870
+
871
+ def _update_memory(self, memory_id, data, existing_embeddings, metadata=None):
872
+ logger.info(f"Updating memory with {data=}")
873
+
874
+ try:
875
+ existing_memory = self.vector_store.get(vector_id=memory_id)
876
+ except Exception:
877
+ logger.error(f"Error getting memory with ID {memory_id} during update.")
878
+ raise ValueError(f"Error getting memory with ID {memory_id}. Please provide a valid 'memory_id'")
879
+
880
+ prev_value = existing_memory.payload.get("data")
881
+
882
+ new_metadata = deepcopy(metadata) if metadata is not None else {}
883
+
884
+ new_metadata["data"] = data
885
+ new_metadata["hash"] = hashlib.md5(data.encode()).hexdigest()
886
+ new_metadata["created_at"] = existing_memory.payload.get("created_at")
887
+ new_metadata["updated_at"] = datetime.now(pytz.timezone("US/Pacific")).isoformat()
888
+
889
+ if "user_id" in existing_memory.payload:
890
+ new_metadata["user_id"] = existing_memory.payload["user_id"]
891
+ if "agent_id" in existing_memory.payload:
892
+ new_metadata["agent_id"] = existing_memory.payload["agent_id"]
893
+ if "run_id" in existing_memory.payload:
894
+ new_metadata["run_id"] = existing_memory.payload["run_id"]
895
+ if "actor_id" in existing_memory.payload:
896
+ new_metadata["actor_id"] = existing_memory.payload["actor_id"]
897
+ if "role" in existing_memory.payload:
898
+ new_metadata["role"] = existing_memory.payload["role"]
899
+
900
+ if data in existing_embeddings:
901
+ embeddings = existing_embeddings[data]
902
+ else:
903
+ embeddings = self.embedding_model.embed(data, "update")
904
+
905
+ self.vector_store.update(
906
+ vector_id=memory_id,
907
+ vector=embeddings,
908
+ payload=new_metadata,
909
+ )
910
+ logger.info(f"Updating memory with ID {memory_id=} with {data=}")
911
+
912
+ self.db.add_history(
913
+ memory_id,
914
+ prev_value,
915
+ data,
916
+ "UPDATE",
917
+ created_at=new_metadata["created_at"],
918
+ updated_at=new_metadata["updated_at"],
919
+ actor_id=new_metadata.get("actor_id"),
920
+ role=new_metadata.get("role"),
921
+ )
922
+ capture_event("mem0._update_memory", self, {"memory_id": memory_id, "sync_type": "sync"})
923
+ return memory_id
924
+
925
+ def _delete_memory(self, memory_id):
926
+ logger.info(f"Deleting memory with {memory_id=}")
927
+ existing_memory = self.vector_store.get(vector_id=memory_id)
928
+ prev_value = existing_memory.payload["data"]
929
+ self.vector_store.delete(vector_id=memory_id)
930
+ self.db.add_history(
931
+ memory_id,
932
+ prev_value,
933
+ None,
934
+ "DELETE",
935
+ actor_id=existing_memory.payload.get("actor_id"),
936
+ role=existing_memory.payload.get("role"),
937
+ is_deleted=1,
938
+ )
939
+ capture_event("mem0._delete_memory", self, {"memory_id": memory_id, "sync_type": "sync"})
940
+ return memory_id
941
+
942
+ def reset(self):
943
+ """
944
+ Reset the memory store by:
945
+ Deletes the vector store collection
946
+ Resets the database
947
+ Recreates the vector store with a new client
948
+ """
949
+ logger.warning("Resetting all memories")
950
+
951
+ self.db.reset()
952
+ self.db.close()
953
+
954
+ self.db = DBFactory.create(self.config.db.provider, self.config.db.config)
955
+
956
+ if hasattr(self.vector_store, "reset"):
957
+ self.vector_store = VectorStoreFactory.reset(self.vector_store)
958
+ else:
959
+ logger.warning("Vector store does not support reset. Skipping.")
960
+ self.vector_store.delete_col()
961
+ self.vector_store = VectorStoreFactory.create(
962
+ self.config.vector_store.provider, self.config.vector_store.config
963
+ )
964
+ capture_event("mem0.reset", self, {"sync_type": "sync"})
965
+
966
+ def chat(self, query):
967
+ raise NotImplementedError("Chat function not implemented yet.")
968
+
969
+
970
+ class AsyncMemory(MemoryBase):
971
+ def __init__(self, config: MemoryConfig = MemoryConfig()):
972
+ self.config = config
973
+
974
+ self.embedding_model = EmbedderFactory.create(
975
+ self.config.embedder.provider,
976
+ self.config.embedder.config,
977
+ self.config.vector_store.config,
978
+ )
979
+ self.vector_store = VectorStoreFactory.create(
980
+ self.config.vector_store.provider, self.config.vector_store.config
981
+ )
982
+ self.llm = LlmFactory.create(self.config.llm.provider, self.config.llm.config)
983
+ self.db = DBFactory.create(self.config.db.provider, self.config.db.config)
984
+ self.collection_name = self.config.vector_store.config.collection_name
985
+ self.api_version = self.config.version
986
+
987
+ self.enable_graph = False
988
+
989
+ if self.config.graph_store.config:
990
+ from mem0.memory.graph_memory import MemoryGraph
991
+
992
+ self.graph = MemoryGraph(self.config)
993
+ self.enable_graph = True
994
+ else:
995
+ self.graph = None
996
+
997
+ capture_event("mem0.init", self, {"sync_type": "async"})
998
+
999
+ @classmethod
1000
+ async def from_config(cls, config_dict: Dict[str, Any]):
1001
+ try:
1002
+ config = cls._process_config(config_dict)
1003
+ config = MemoryConfig(**config_dict)
1004
+ except ValidationError as e:
1005
+ logger.error(f"Configuration validation error: {e}")
1006
+ raise
1007
+ return cls(config)
1008
+
1009
+ @staticmethod
1010
+ def _process_config(config_dict: Dict[str, Any]) -> Dict[str, Any]:
1011
+ if "graph_store" in config_dict:
1012
+ if "vector_store" not in config_dict and "embedder" in config_dict:
1013
+ config_dict["vector_store"] = {}
1014
+ config_dict["vector_store"]["config"] = {}
1015
+ config_dict["vector_store"]["config"]["embedding_model_dims"] = config_dict["embedder"]["config"][
1016
+ "embedding_dims"
1017
+ ]
1018
+ try:
1019
+ return config_dict
1020
+ except ValidationError as e:
1021
+ logger.error(f"Configuration validation error: {e}")
1022
+ raise
1023
+
1024
+ async def add(
1025
+ self,
1026
+ messages,
1027
+ *,
1028
+ user_id: Optional[str] = None,
1029
+ agent_id: Optional[str] = None,
1030
+ run_id: Optional[str] = None,
1031
+ metadata: Optional[Dict[str, Any]] = None,
1032
+ infer: bool = True,
1033
+ memory_type: Optional[str] = None,
1034
+ prompt: Optional[str] = None,
1035
+ llm=None,
1036
+ ):
1037
+ """
1038
+ Create a new memory asynchronously.
1039
+
1040
+ Args:
1041
+ messages (str or List[Dict[str, str]]): Messages to store in the memory.
1042
+ user_id (str, optional): ID of the user creating the memory.
1043
+ agent_id (str, optional): ID of the agent creating the memory. Defaults to None.
1044
+ run_id (str, optional): ID of the run creating the memory. Defaults to None.
1045
+ metadata (dict, optional): Metadata to store with the memory. Defaults to None.
1046
+ infer (bool, optional): Whether to infer the memories. Defaults to True.
1047
+ memory_type (str, optional): Type of memory to create. Defaults to None.
1048
+ Pass "procedural_memory" to create procedural memories.
1049
+ prompt (str, optional): Prompt to use for the memory creation. Defaults to None.
1050
+ llm (BaseChatModel, optional): LLM class to use for generating procedural memories. Defaults to None. Useful when user is using LangChain ChatModel.
1051
+ Returns:
1052
+ dict: A dictionary containing the result of the memory addition operation.
1053
+ """
1054
+ processed_metadata, effective_filters = _build_filters_and_metadata(
1055
+ user_id=user_id, agent_id=agent_id, run_id=run_id, input_metadata=metadata
1056
+ )
1057
+
1058
+ if memory_type is not None and memory_type != MemoryType.PROCEDURAL.value:
1059
+ raise ValueError(
1060
+ f"Invalid 'memory_type'. Please pass {MemoryType.PROCEDURAL.value} to create procedural memories."
1061
+ )
1062
+
1063
+ if isinstance(messages, str):
1064
+ messages = [{"role": "user", "content": messages}]
1065
+
1066
+ elif isinstance(messages, dict):
1067
+ messages = [messages]
1068
+
1069
+ elif not isinstance(messages, list):
1070
+ raise ValueError("messages must be str, dict, or list[dict]")
1071
+
1072
+ if agent_id is not None and memory_type == MemoryType.PROCEDURAL.value:
1073
+ results = await self._create_procedural_memory(
1074
+ messages, metadata=processed_metadata, prompt=prompt, llm=llm
1075
+ )
1076
+ return results
1077
+
1078
+ if self.config.llm.config.get("enable_vision"):
1079
+ messages = parse_vision_messages(messages, self.llm, self.config.llm.config.get("vision_details"))
1080
+ else:
1081
+ messages = parse_vision_messages(messages)
1082
+
1083
+ vector_store_task = asyncio.create_task(
1084
+ self._add_to_vector_store(messages, processed_metadata, effective_filters, infer)
1085
+ )
1086
+ graph_task = asyncio.create_task(self._add_to_graph(messages, effective_filters))
1087
+
1088
+ vector_store_result, graph_result = await asyncio.gather(vector_store_task, graph_task)
1089
+
1090
+ if self.api_version == "v1.0":
1091
+ warnings.warn(
1092
+ "The current add API output format is deprecated. "
1093
+ "To use the latest format, set `api_version='v1.1'`. "
1094
+ "The current format will be removed in mem0ai 1.1.0 and later versions.",
1095
+ category=DeprecationWarning,
1096
+ stacklevel=2,
1097
+ )
1098
+ return vector_store_result
1099
+
1100
+ if self.enable_graph:
1101
+ return {
1102
+ "results": vector_store_result,
1103
+ "relations": graph_result,
1104
+ }
1105
+
1106
+ return {"results": vector_store_result}
1107
+
1108
+ async def _add_to_vector_store(
1109
+ self,
1110
+ messages: list,
1111
+ metadata: dict,
1112
+ effective_filters: dict,
1113
+ infer: bool,
1114
+ ):
1115
+ if not infer:
1116
+ returned_memories = []
1117
+ for message_dict in messages:
1118
+ if (
1119
+ not isinstance(message_dict, dict)
1120
+ or message_dict.get("role") is None
1121
+ or message_dict.get("content") is None
1122
+ ):
1123
+ logger.warning(f"Skipping invalid message format (async): {message_dict}")
1124
+ continue
1125
+
1126
+ if message_dict["role"] == "system":
1127
+ continue
1128
+
1129
+ per_msg_meta = deepcopy(metadata)
1130
+ per_msg_meta["role"] = message_dict["role"]
1131
+
1132
+ actor_name = message_dict.get("name")
1133
+ if actor_name:
1134
+ per_msg_meta["actor_id"] = actor_name
1135
+
1136
+ msg_content = message_dict["content"]
1137
+ msg_embeddings = await asyncio.to_thread(self.embedding_model.embed, msg_content, "add")
1138
+ mem_id = await self._create_memory(msg_content, msg_embeddings, per_msg_meta)
1139
+
1140
+ returned_memories.append(
1141
+ {
1142
+ "id": mem_id,
1143
+ "memory": msg_content,
1144
+ "event": "ADD",
1145
+ "actor_id": actor_name if actor_name else None,
1146
+ "role": message_dict["role"],
1147
+ }
1148
+ )
1149
+ return returned_memories
1150
+
1151
+ parsed_messages = parse_messages(messages)
1152
+ if self.config.custom_fact_extraction_prompt:
1153
+ system_prompt = self.config.custom_fact_extraction_prompt
1154
+ user_prompt = f"Input:\n{parsed_messages}"
1155
+ else:
1156
+ system_prompt, user_prompt = get_fact_retrieval_messages(parsed_messages)
1157
+
1158
+ response = await asyncio.to_thread(
1159
+ self.llm.generate_response,
1160
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}],
1161
+ response_format={"type": "json_object"},
1162
+ )
1163
+ try:
1164
+ response = remove_code_blocks(response)
1165
+ new_retrieved_facts = json.loads(response)["facts"]
1166
+ except Exception as e:
1167
+ logger.error(f"Error in new_retrieved_facts: {e}")
1168
+ new_retrieved_facts = []
1169
+
1170
+ if not new_retrieved_facts:
1171
+ logger.debug("No new facts retrieved from input. Skipping memory update LLM call.")
1172
+
1173
+ retrieved_old_memory = []
1174
+ new_message_embeddings = {}
1175
+
1176
+ async def process_fact_for_search(new_mem_content):
1177
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, new_mem_content, "add")
1178
+ new_message_embeddings[new_mem_content] = embeddings
1179
+ existing_mems = await asyncio.to_thread(
1180
+ self.vector_store.search,
1181
+ query=new_mem_content,
1182
+ vectors=embeddings,
1183
+ limit=5,
1184
+ filters=effective_filters, # 'filters' is query_filters_for_inference
1185
+ )
1186
+ return [{"id": mem.id, "text": mem.payload["data"]} for mem in existing_mems]
1187
+
1188
+ search_tasks = [process_fact_for_search(fact) for fact in new_retrieved_facts]
1189
+ search_results_list = await asyncio.gather(*search_tasks)
1190
+ for result_group in search_results_list:
1191
+ retrieved_old_memory.extend(result_group)
1192
+
1193
+ unique_data = {}
1194
+ for item in retrieved_old_memory:
1195
+ unique_data[item["id"]] = item
1196
+ retrieved_old_memory = list(unique_data.values())
1197
+ logger.info(f"Total existing memories: {len(retrieved_old_memory)}")
1198
+ temp_uuid_mapping = {}
1199
+ for idx, item in enumerate(retrieved_old_memory):
1200
+ temp_uuid_mapping[str(idx)] = item["id"]
1201
+ retrieved_old_memory[idx]["id"] = str(idx)
1202
+
1203
+ if new_retrieved_facts:
1204
+ function_calling_prompt = get_update_memory_messages(
1205
+ retrieved_old_memory, new_retrieved_facts, self.config.custom_update_memory_prompt
1206
+ )
1207
+ try:
1208
+ response = await asyncio.to_thread(
1209
+ self.llm.generate_response,
1210
+ messages=[{"role": "user", "content": function_calling_prompt}],
1211
+ response_format={"type": "json_object"},
1212
+ )
1213
+ except Exception as e:
1214
+ logger.error(f"Error in new memory actions response: {e}")
1215
+ response = ""
1216
+ try:
1217
+ response = remove_code_blocks(response)
1218
+ new_memories_with_actions = json.loads(response)
1219
+ except Exception as e:
1220
+ logger.error(f"Invalid JSON response: {e}")
1221
+ new_memories_with_actions = {}
1222
+
1223
+ returned_memories = []
1224
+ try:
1225
+ memory_tasks = []
1226
+ for resp in new_memories_with_actions.get("memory", []):
1227
+ logger.info(resp)
1228
+ try:
1229
+ action_text = resp.get("text")
1230
+ if not action_text:
1231
+ continue
1232
+ event_type = resp.get("event")
1233
+
1234
+ if event_type == "ADD":
1235
+ task = asyncio.create_task(
1236
+ self._create_memory(
1237
+ data=action_text,
1238
+ existing_embeddings=new_message_embeddings,
1239
+ metadata=deepcopy(metadata),
1240
+ )
1241
+ )
1242
+ memory_tasks.append((task, resp, "ADD", None))
1243
+ elif event_type == "UPDATE":
1244
+ task = asyncio.create_task(
1245
+ self._update_memory(
1246
+ memory_id=temp_uuid_mapping[resp["id"]],
1247
+ data=action_text,
1248
+ existing_embeddings=new_message_embeddings,
1249
+ metadata=deepcopy(metadata),
1250
+ )
1251
+ )
1252
+ memory_tasks.append((task, resp, "UPDATE", temp_uuid_mapping[resp["id"]]))
1253
+ elif event_type == "DELETE":
1254
+ task = asyncio.create_task(self._delete_memory(memory_id=temp_uuid_mapping[resp.get("id")]))
1255
+ memory_tasks.append((task, resp, "DELETE", temp_uuid_mapping[resp.get("id")]))
1256
+ elif event_type == "NONE":
1257
+ logger.info("NOOP for Memory (async).")
1258
+ except Exception as e:
1259
+ logger.error(f"Error processing memory action (async): {resp}, Error: {e}")
1260
+
1261
+ for task, resp, event_type, mem_id in memory_tasks:
1262
+ try:
1263
+ result_id = await task
1264
+ if event_type == "ADD":
1265
+ returned_memories.append({"id": result_id, "memory": resp.get("text"), "event": event_type})
1266
+ elif event_type == "UPDATE":
1267
+ returned_memories.append(
1268
+ {
1269
+ "id": mem_id,
1270
+ "memory": resp.get("text"),
1271
+ "event": event_type,
1272
+ "previous_memory": resp.get("old_memory"),
1273
+ }
1274
+ )
1275
+ elif event_type == "DELETE":
1276
+ returned_memories.append({"id": mem_id, "memory": resp.get("text"), "event": event_type})
1277
+ except Exception as e:
1278
+ logger.error(f"Error awaiting memory task (async): {e}")
1279
+ except Exception as e:
1280
+ logger.error(f"Error in memory processing loop (async): {e}")
1281
+
1282
+ keys, encoded_ids = process_telemetry_filters(effective_filters)
1283
+ capture_event(
1284
+ "mem0.add",
1285
+ self,
1286
+ {"version": self.api_version, "keys": keys, "encoded_ids": encoded_ids, "sync_type": "async"},
1287
+ )
1288
+ return returned_memories
1289
+
1290
+ async def _add_to_graph(self, messages, filters):
1291
+ added_entities = []
1292
+ if self.enable_graph:
1293
+ if filters.get("user_id") is None:
1294
+ filters["user_id"] = "user"
1295
+
1296
+ data = "\n".join([msg["content"] for msg in messages if "content" in msg and msg["role"] != "system"])
1297
+ added_entities = await asyncio.to_thread(self.graph.add, data, filters)
1298
+
1299
+ return added_entities
1300
+
1301
+ async def get(self, memory_id):
1302
+ """
1303
+ Retrieve a memory by ID asynchronously.
1304
+
1305
+ Args:
1306
+ memory_id (str): ID of the memory to retrieve.
1307
+
1308
+ Returns:
1309
+ dict: Retrieved memory.
1310
+ """
1311
+ capture_event("mem0.get", self, {"memory_id": memory_id, "sync_type": "async"})
1312
+ memory = await asyncio.to_thread(self.vector_store.get, vector_id=memory_id)
1313
+ if not memory:
1314
+ return None
1315
+
1316
+ promoted_payload_keys = [
1317
+ "user_id",
1318
+ "agent_id",
1319
+ "run_id",
1320
+ "actor_id",
1321
+ "role",
1322
+ ]
1323
+
1324
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
1325
+
1326
+ result_item = MemoryItem(
1327
+ id=memory.id,
1328
+ memory=memory.payload["data"],
1329
+ hash=memory.payload.get("hash"),
1330
+ created_at=memory.payload.get("created_at"),
1331
+ updated_at=memory.payload.get("updated_at"),
1332
+ ).model_dump()
1333
+
1334
+ for key in promoted_payload_keys:
1335
+ if key in memory.payload:
1336
+ result_item[key] = memory.payload[key]
1337
+
1338
+ additional_metadata = {k: v for k, v in memory.payload.items() if k not in core_and_promoted_keys}
1339
+ if additional_metadata:
1340
+ result_item["metadata"] = additional_metadata
1341
+
1342
+ return result_item
1343
+
1344
+ async def get_all(
1345
+ self,
1346
+ *,
1347
+ user_id: Optional[str] = None,
1348
+ agent_id: Optional[str] = None,
1349
+ run_id: Optional[str] = None,
1350
+ filters: Optional[Dict[str, Any]] = None,
1351
+ limit: int = 100,
1352
+ ):
1353
+ """
1354
+ List all memories.
1355
+
1356
+ Args:
1357
+ user_id (str, optional): user id
1358
+ agent_id (str, optional): agent id
1359
+ run_id (str, optional): run id
1360
+ filters (dict, optional): Additional custom key-value filters to apply to the search.
1361
+ These are merged with the ID-based scoping filters. For example,
1362
+ `filters={"actor_id": "some_user"}`.
1363
+ limit (int, optional): The maximum number of memories to return. Defaults to 100.
1364
+
1365
+ Returns:
1366
+ dict: A dictionary containing a list of memories under the "results" key,
1367
+ and potentially "relations" if graph store is enabled. For API v1.0,
1368
+ it might return a direct list (see deprecation warning).
1369
+ Example for v1.1+: `{"results": [{"id": "...", "memory": "...", ...}]}`
1370
+ """
1371
+
1372
+ _, effective_filters = _build_filters_and_metadata(
1373
+ user_id=user_id, agent_id=agent_id, run_id=run_id, input_filters=filters
1374
+ )
1375
+
1376
+ if not any(key in effective_filters for key in ("user_id", "agent_id", "run_id")):
1377
+ raise ValueError(
1378
+ "When 'conversation_id' is not provided (classic mode), "
1379
+ "at least one of 'user_id', 'agent_id', or 'run_id' must be specified for get_all."
1380
+ )
1381
+
1382
+ keys, encoded_ids = process_telemetry_filters(effective_filters)
1383
+ capture_event(
1384
+ "mem0.get_all", self, {"limit": limit, "keys": keys, "encoded_ids": encoded_ids, "sync_type": "async"}
1385
+ )
1386
+
1387
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1388
+ future_memories = executor.submit(self._get_all_from_vector_store, effective_filters, limit)
1389
+ future_graph_entities = (
1390
+ executor.submit(self.graph.get_all, effective_filters, limit) if self.enable_graph else None
1391
+ )
1392
+
1393
+ concurrent.futures.wait(
1394
+ [future_memories, future_graph_entities] if future_graph_entities else [future_memories]
1395
+ )
1396
+
1397
+ all_memories_result = future_memories.result()
1398
+ graph_entities_result = future_graph_entities.result() if future_graph_entities else None
1399
+
1400
+ if self.enable_graph:
1401
+ return {"results": all_memories_result, "relations": graph_entities_result}
1402
+
1403
+ if self.api_version == "v1.0":
1404
+ warnings.warn(
1405
+ "The current get_all API output format is deprecated. "
1406
+ "To use the latest format, set `api_version='v1.1'` (which returns a dict with a 'results' key). "
1407
+ "The current format (direct list for v1.0) will be removed in mem0ai 1.1.0 and later versions.",
1408
+ category=DeprecationWarning,
1409
+ stacklevel=2,
1410
+ )
1411
+ return all_memories_result
1412
+ else:
1413
+ return {"results": all_memories_result}
1414
+
1415
+ async def _get_all_from_vector_store(self, filters, limit):
1416
+ memories_result = await asyncio.to_thread(self.vector_store.list, filters=filters, limit=limit)
1417
+ actual_memories = (
1418
+ memories_result[0]
1419
+ if isinstance(memories_result, (tuple, list)) and len(memories_result) > 0
1420
+ else memories_result
1421
+ )
1422
+
1423
+ promoted_payload_keys = [
1424
+ "user_id",
1425
+ "agent_id",
1426
+ "run_id",
1427
+ "actor_id",
1428
+ "role",
1429
+ ]
1430
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
1431
+
1432
+ formatted_memories = []
1433
+ for mem in actual_memories:
1434
+ memory_item_dict = MemoryItem(
1435
+ id=mem.id,
1436
+ memory=mem.payload["data"],
1437
+ hash=mem.payload.get("hash"),
1438
+ created_at=mem.payload.get("created_at"),
1439
+ updated_at=mem.payload.get("updated_at"),
1440
+ ).model_dump(exclude={"score"})
1441
+
1442
+ for key in promoted_payload_keys:
1443
+ if key in mem.payload:
1444
+ memory_item_dict[key] = mem.payload[key]
1445
+
1446
+ additional_metadata = {k: v for k, v in mem.payload.items() if k not in core_and_promoted_keys}
1447
+ if additional_metadata:
1448
+ memory_item_dict["metadata"] = additional_metadata
1449
+
1450
+ formatted_memories.append(memory_item_dict)
1451
+
1452
+ return formatted_memories
1453
+
1454
+ async def search(
1455
+ self,
1456
+ query: str,
1457
+ *,
1458
+ user_id: Optional[str] = None,
1459
+ agent_id: Optional[str] = None,
1460
+ run_id: Optional[str] = None,
1461
+ limit: int = 100,
1462
+ filters: Optional[Dict[str, Any]] = None,
1463
+ threshold: Optional[float] = None,
1464
+ ):
1465
+ """
1466
+ Searches for memories based on a query
1467
+ Args:
1468
+ query (str): Query to search for.
1469
+ user_id (str, optional): ID of the user to search for. Defaults to None.
1470
+ agent_id (str, optional): ID of the agent to search for. Defaults to None.
1471
+ run_id (str, optional): ID of the run to search for. Defaults to None.
1472
+ limit (int, optional): Limit the number of results. Defaults to 100.
1473
+ filters (dict, optional): Filters to apply to the search. Defaults to None.
1474
+ threshold (float, optional): Minimum score for a memory to be included in the results. Defaults to None.
1475
+
1476
+ Returns:
1477
+ dict: A dictionary containing the search results, typically under a "results" key,
1478
+ and potentially "relations" if graph store is enabled.
1479
+ Example for v1.1+: `{"results": [{"id": "...", "memory": "...", "score": 0.8, ...}]}`
1480
+ """
1481
+
1482
+ _, effective_filters = _build_filters_and_metadata(
1483
+ user_id=user_id, agent_id=agent_id, run_id=run_id, input_filters=filters
1484
+ )
1485
+
1486
+ if not any(key in effective_filters for key in ("user_id", "agent_id", "run_id")):
1487
+ raise ValueError("at least one of 'user_id', 'agent_id', or 'run_id' must be specified ")
1488
+
1489
+ keys, encoded_ids = process_telemetry_filters(effective_filters)
1490
+ capture_event(
1491
+ "mem0.search",
1492
+ self,
1493
+ {
1494
+ "limit": limit,
1495
+ "version": self.api_version,
1496
+ "keys": keys,
1497
+ "encoded_ids": encoded_ids,
1498
+ "sync_type": "async",
1499
+ "threshold": threshold,
1500
+ },
1501
+ )
1502
+
1503
+ vector_store_task = asyncio.create_task(self._search_vector_store(query, effective_filters, limit, threshold))
1504
+
1505
+ graph_task = None
1506
+ if self.enable_graph:
1507
+ if hasattr(self.graph.search, "__await__"): # Check if graph search is async
1508
+ graph_task = asyncio.create_task(self.graph.search(query, effective_filters, limit))
1509
+ else:
1510
+ graph_task = asyncio.create_task(asyncio.to_thread(self.graph.search, query, effective_filters, limit))
1511
+
1512
+ if graph_task:
1513
+ original_memories, graph_entities = await asyncio.gather(vector_store_task, graph_task)
1514
+ else:
1515
+ original_memories = await vector_store_task
1516
+ graph_entities = None
1517
+
1518
+ if self.enable_graph:
1519
+ return {"results": original_memories, "relations": graph_entities}
1520
+
1521
+ if self.api_version == "v1.0":
1522
+ warnings.warn(
1523
+ "The current search API output format is deprecated. "
1524
+ "To use the latest format, set `api_version='v1.1'`. "
1525
+ "The current format will be removed in mem0ai 1.1.0 and later versions.",
1526
+ category=DeprecationWarning,
1527
+ stacklevel=2,
1528
+ )
1529
+ return {"results": original_memories}
1530
+ else:
1531
+ return {"results": original_memories}
1532
+
1533
+ async def _search_vector_store(self, query, filters, limit, threshold: Optional[float] = None):
1534
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, query, "search")
1535
+ memories = await asyncio.to_thread(
1536
+ self.vector_store.search, query=query, vectors=embeddings, limit=limit, filters=filters
1537
+ )
1538
+
1539
+ promoted_payload_keys = [
1540
+ "user_id",
1541
+ "agent_id",
1542
+ "run_id",
1543
+ "actor_id",
1544
+ "role",
1545
+ ]
1546
+
1547
+ core_and_promoted_keys = {"data", "hash", "created_at", "updated_at", "id", *promoted_payload_keys}
1548
+
1549
+ original_memories = []
1550
+ for mem in memories:
1551
+ memory_item_dict = MemoryItem(
1552
+ id=mem.id,
1553
+ memory=mem.payload["data"],
1554
+ hash=mem.payload.get("hash"),
1555
+ created_at=mem.payload.get("created_at"),
1556
+ updated_at=mem.payload.get("updated_at"),
1557
+ score=mem.score,
1558
+ ).model_dump()
1559
+
1560
+ for key in promoted_payload_keys:
1561
+ if key in mem.payload:
1562
+ memory_item_dict[key] = mem.payload[key]
1563
+
1564
+ additional_metadata = {k: v for k, v in mem.payload.items() if k not in core_and_promoted_keys}
1565
+ if additional_metadata:
1566
+ memory_item_dict["metadata"] = additional_metadata
1567
+
1568
+ if threshold is None or mem.score >= threshold:
1569
+ original_memories.append(memory_item_dict)
1570
+
1571
+ return original_memories
1572
+
1573
+ async def update(self, memory_id, data):
1574
+ """
1575
+ Update a memory by ID asynchronously.
1576
+
1577
+ Args:
1578
+ memory_id (str): ID of the memory to update.
1579
+ data (dict): Data to update the memory with.
1580
+
1581
+ Returns:
1582
+ dict: Updated memory.
1583
+ """
1584
+ capture_event("mem0.update", self, {"memory_id": memory_id, "sync_type": "async"})
1585
+
1586
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, data, "update")
1587
+ existing_embeddings = {data: embeddings}
1588
+
1589
+ await self._update_memory(memory_id, data, existing_embeddings)
1590
+ return {"message": "Memory updated successfully!"}
1591
+
1592
+ async def delete(self, memory_id):
1593
+ """
1594
+ Delete a memory by ID asynchronously.
1595
+
1596
+ Args:
1597
+ memory_id (str): ID of the memory to delete.
1598
+ """
1599
+ capture_event("mem0.delete", self, {"memory_id": memory_id, "sync_type": "async"})
1600
+ await self._delete_memory(memory_id)
1601
+ return {"message": "Memory deleted successfully!"}
1602
+
1603
+ async def delete_all(self, user_id=None, agent_id=None, run_id=None):
1604
+ """
1605
+ Delete all memories asynchronously.
1606
+
1607
+ Args:
1608
+ user_id (str, optional): ID of the user to delete memories for. Defaults to None.
1609
+ agent_id (str, optional): ID of the agent to delete memories for. Defaults to None.
1610
+ run_id (str, optional): ID of the run to delete memories for. Defaults to None.
1611
+ """
1612
+ filters = {}
1613
+ if user_id:
1614
+ filters["user_id"] = user_id
1615
+ if agent_id:
1616
+ filters["agent_id"] = agent_id
1617
+ if run_id:
1618
+ filters["run_id"] = run_id
1619
+
1620
+ if not filters:
1621
+ raise ValueError(
1622
+ "At least one filter is required to delete all memories. If you want to delete all memories, use the `reset()` method."
1623
+ )
1624
+
1625
+ keys, encoded_ids = process_telemetry_filters(filters)
1626
+ capture_event("mem0.delete_all", self, {"keys": keys, "encoded_ids": encoded_ids, "sync_type": "async"})
1627
+ memories = await asyncio.to_thread(self.vector_store.list, filters=filters)
1628
+
1629
+ delete_tasks = []
1630
+ for memory in memories[0]:
1631
+ delete_tasks.append(self._delete_memory(memory.id))
1632
+
1633
+ await asyncio.gather(*delete_tasks)
1634
+
1635
+ logger.info(f"Deleted {len(memories[0])} memories")
1636
+
1637
+ if self.enable_graph:
1638
+ await asyncio.to_thread(self.graph.delete_all, filters)
1639
+
1640
+ return {"message": "Memories deleted successfully!"}
1641
+
1642
+ async def history(self, memory_id):
1643
+ """
1644
+ Get the history of changes for a memory by ID asynchronously.
1645
+
1646
+ Args:
1647
+ memory_id (str): ID of the memory to get history for.
1648
+
1649
+ Returns:
1650
+ list: List of changes for the memory.
1651
+ """
1652
+ capture_event("mem0.history", self, {"memory_id": memory_id, "sync_type": "async"})
1653
+ return await asyncio.to_thread(self.db.get_history, memory_id)
1654
+
1655
+ async def _create_memory(self, data, existing_embeddings, metadata=None):
1656
+ logger.debug(f"Creating memory with {data=}")
1657
+ if data in existing_embeddings:
1658
+ embeddings = existing_embeddings[data]
1659
+ else:
1660
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, data, memory_action="add")
1661
+
1662
+ memory_id = str(uuid.uuid4())
1663
+ metadata = metadata or {}
1664
+ metadata["data"] = data
1665
+ metadata["hash"] = hashlib.md5(data.encode()).hexdigest()
1666
+ metadata["created_at"] = datetime.now(pytz.timezone("US/Pacific")).isoformat()
1667
+
1668
+ await asyncio.to_thread(
1669
+ self.vector_store.insert,
1670
+ vectors=[embeddings],
1671
+ ids=[memory_id],
1672
+ payloads=[metadata],
1673
+ )
1674
+
1675
+ await asyncio.to_thread(
1676
+ self.db.add_history,
1677
+ memory_id,
1678
+ None,
1679
+ data,
1680
+ "ADD",
1681
+ created_at=metadata.get("created_at"),
1682
+ actor_id=metadata.get("actor_id"),
1683
+ role=metadata.get("role"),
1684
+ )
1685
+
1686
+ capture_event("mem0._create_memory", self, {"memory_id": memory_id, "sync_type": "async"})
1687
+ return memory_id
1688
+
1689
+ async def _create_procedural_memory(self, messages, metadata=None, llm=None, prompt=None):
1690
+ """
1691
+ Create a procedural memory asynchronously
1692
+
1693
+ Args:
1694
+ messages (list): List of messages to create a procedural memory from.
1695
+ metadata (dict): Metadata to create a procedural memory from.
1696
+ llm (llm, optional): LLM to use for the procedural memory creation. Defaults to None.
1697
+ prompt (str, optional): Prompt to use for the procedural memory creation. Defaults to None.
1698
+ """
1699
+ try:
1700
+ from langchain_core.messages.utils import (
1701
+ convert_to_messages, # type: ignore
1702
+ )
1703
+ except Exception:
1704
+ logger.error(
1705
+ "Import error while loading langchain-core. Please install 'langchain-core' to use procedural memory."
1706
+ )
1707
+ raise
1708
+
1709
+ logger.info("Creating procedural memory")
1710
+
1711
+ parsed_messages = [
1712
+ {"role": "system", "content": prompt or PROCEDURAL_MEMORY_SYSTEM_PROMPT},
1713
+ *messages,
1714
+ {"role": "user", "content": "Create procedural memory of the above conversation."},
1715
+ ]
1716
+
1717
+ try:
1718
+ if llm is not None:
1719
+ parsed_messages = convert_to_messages(parsed_messages)
1720
+ response = await asyncio.to_thread(llm.invoke, input=parsed_messages)
1721
+ procedural_memory = response.content
1722
+ else:
1723
+ procedural_memory = await asyncio.to_thread(self.llm.generate_response, messages=parsed_messages)
1724
+ except Exception as e:
1725
+ logger.error(f"Error generating procedural memory summary: {e}")
1726
+ raise
1727
+
1728
+ if metadata is None:
1729
+ raise ValueError("Metadata cannot be done for procedural memory.")
1730
+
1731
+ metadata["memory_type"] = MemoryType.PROCEDURAL.value
1732
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, procedural_memory, memory_action="add")
1733
+ memory_id = await self._create_memory(procedural_memory, {procedural_memory: embeddings}, metadata=metadata)
1734
+ capture_event("mem0._create_procedural_memory", self, {"memory_id": memory_id, "sync_type": "async"})
1735
+
1736
+ result = {"results": [{"id": memory_id, "memory": procedural_memory, "event": "ADD"}]}
1737
+
1738
+ return result
1739
+
1740
+ async def _update_memory(self, memory_id, data, existing_embeddings, metadata=None):
1741
+ logger.info(f"Updating memory with {data=}")
1742
+
1743
+ try:
1744
+ existing_memory = await asyncio.to_thread(self.vector_store.get, vector_id=memory_id)
1745
+ except Exception:
1746
+ logger.error(f"Error getting memory with ID {memory_id} during update.")
1747
+ raise ValueError(f"Error getting memory with ID {memory_id}. Please provide a valid 'memory_id'")
1748
+
1749
+ prev_value = existing_memory.payload.get("data")
1750
+
1751
+ new_metadata = deepcopy(metadata) if metadata is not None else {}
1752
+
1753
+ new_metadata["data"] = data
1754
+ new_metadata["hash"] = hashlib.md5(data.encode()).hexdigest()
1755
+ new_metadata["created_at"] = existing_memory.payload.get("created_at")
1756
+ new_metadata["updated_at"] = datetime.now(pytz.timezone("US/Pacific")).isoformat()
1757
+
1758
+ if "user_id" in existing_memory.payload:
1759
+ new_metadata["user_id"] = existing_memory.payload["user_id"]
1760
+ if "agent_id" in existing_memory.payload:
1761
+ new_metadata["agent_id"] = existing_memory.payload["agent_id"]
1762
+ if "run_id" in existing_memory.payload:
1763
+ new_metadata["run_id"] = existing_memory.payload["run_id"]
1764
+
1765
+ if "actor_id" in existing_memory.payload:
1766
+ new_metadata["actor_id"] = existing_memory.payload["actor_id"]
1767
+ if "role" in existing_memory.payload:
1768
+ new_metadata["role"] = existing_memory.payload["role"]
1769
+
1770
+ if data in existing_embeddings:
1771
+ embeddings = existing_embeddings[data]
1772
+ else:
1773
+ embeddings = await asyncio.to_thread(self.embedding_model.embed, data, "update")
1774
+
1775
+ await asyncio.to_thread(
1776
+ self.vector_store.update,
1777
+ vector_id=memory_id,
1778
+ vector=embeddings,
1779
+ payload=new_metadata,
1780
+ )
1781
+ logger.info(f"Updating memory with ID {memory_id=} with {data=}")
1782
+
1783
+ await asyncio.to_thread(
1784
+ self.db.add_history,
1785
+ memory_id,
1786
+ prev_value,
1787
+ data,
1788
+ "UPDATE",
1789
+ created_at=new_metadata["created_at"],
1790
+ updated_at=new_metadata["updated_at"],
1791
+ actor_id=new_metadata.get("actor_id"),
1792
+ role=new_metadata.get("role"),
1793
+ )
1794
+ capture_event("mem0._update_memory", self, {"memory_id": memory_id, "sync_type": "async"})
1795
+ return memory_id
1796
+
1797
+ async def _delete_memory(self, memory_id):
1798
+ logging.info(f"Deleting memory with {memory_id=}")
1799
+ existing_memory = await asyncio.to_thread(self.vector_store.get, vector_id=memory_id)
1800
+ prev_value = existing_memory.payload["data"]
1801
+
1802
+ await asyncio.to_thread(self.vector_store.delete, vector_id=memory_id)
1803
+ await asyncio.to_thread(
1804
+ self.db.add_history,
1805
+ memory_id,
1806
+ prev_value,
1807
+ None,
1808
+ "DELETE",
1809
+ actor_id=existing_memory.payload.get("actor_id"),
1810
+ role=existing_memory.payload.get("role"),
1811
+ is_deleted=1,
1812
+ )
1813
+
1814
+ capture_event("mem0._delete_memory", self, {"memory_id": memory_id, "sync_type": "async"})
1815
+ return memory_id
1816
+
1817
+ async def reset(self):
1818
+ """
1819
+ Reset the memory store asynchronously by:
1820
+ Deletes the vector store collection
1821
+ Resets the database
1822
+ Recreates the vector store with a new client
1823
+ """
1824
+ logger.warning("Resetting all memories")
1825
+ await asyncio.to_thread(self.vector_store.delete_col)
1826
+
1827
+ gc.collect()
1828
+
1829
+ if hasattr(self.vector_store, "client") and hasattr(self.vector_store.client, "close"):
1830
+ await asyncio.to_thread(self.vector_store.client.close)
1831
+
1832
+ await asyncio.to_thread(self.db.reset)
1833
+ await asyncio.to_thread(self.db.close)
1834
+
1835
+ self.db = DBFactory.create(self.config.db.provider, self.config.db.config)
1836
+
1837
+ self.vector_store = VectorStoreFactory.create(
1838
+ self.config.vector_store.provider, self.config.vector_store.config
1839
+ )
1840
+ capture_event("mem0.reset", self, {"sync_type": "async"})
1841
+
1842
+ async def chat(self, query):
1843
+ raise NotImplementedError("Chat function not implemented yet.")