agentrun-mem0ai 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentrun_mem0/__init__.py +6 -0
- agentrun_mem0/client/__init__.py +0 -0
- agentrun_mem0/client/main.py +1747 -0
- agentrun_mem0/client/project.py +931 -0
- agentrun_mem0/client/utils.py +115 -0
- agentrun_mem0/configs/__init__.py +0 -0
- agentrun_mem0/configs/base.py +90 -0
- agentrun_mem0/configs/embeddings/__init__.py +0 -0
- agentrun_mem0/configs/embeddings/base.py +110 -0
- agentrun_mem0/configs/enums.py +7 -0
- agentrun_mem0/configs/llms/__init__.py +0 -0
- agentrun_mem0/configs/llms/anthropic.py +56 -0
- agentrun_mem0/configs/llms/aws_bedrock.py +192 -0
- agentrun_mem0/configs/llms/azure.py +57 -0
- agentrun_mem0/configs/llms/base.py +62 -0
- agentrun_mem0/configs/llms/deepseek.py +56 -0
- agentrun_mem0/configs/llms/lmstudio.py +59 -0
- agentrun_mem0/configs/llms/ollama.py +56 -0
- agentrun_mem0/configs/llms/openai.py +79 -0
- agentrun_mem0/configs/llms/vllm.py +56 -0
- agentrun_mem0/configs/prompts.py +459 -0
- agentrun_mem0/configs/rerankers/__init__.py +0 -0
- agentrun_mem0/configs/rerankers/base.py +17 -0
- agentrun_mem0/configs/rerankers/cohere.py +15 -0
- agentrun_mem0/configs/rerankers/config.py +12 -0
- agentrun_mem0/configs/rerankers/huggingface.py +17 -0
- agentrun_mem0/configs/rerankers/llm.py +48 -0
- agentrun_mem0/configs/rerankers/sentence_transformer.py +16 -0
- agentrun_mem0/configs/rerankers/zero_entropy.py +28 -0
- agentrun_mem0/configs/vector_stores/__init__.py +0 -0
- agentrun_mem0/configs/vector_stores/alibabacloud_mysql.py +64 -0
- agentrun_mem0/configs/vector_stores/aliyun_tablestore.py +32 -0
- agentrun_mem0/configs/vector_stores/azure_ai_search.py +57 -0
- agentrun_mem0/configs/vector_stores/azure_mysql.py +84 -0
- agentrun_mem0/configs/vector_stores/baidu.py +27 -0
- agentrun_mem0/configs/vector_stores/chroma.py +58 -0
- agentrun_mem0/configs/vector_stores/databricks.py +61 -0
- agentrun_mem0/configs/vector_stores/elasticsearch.py +65 -0
- agentrun_mem0/configs/vector_stores/faiss.py +37 -0
- agentrun_mem0/configs/vector_stores/langchain.py +30 -0
- agentrun_mem0/configs/vector_stores/milvus.py +42 -0
- agentrun_mem0/configs/vector_stores/mongodb.py +25 -0
- agentrun_mem0/configs/vector_stores/neptune.py +27 -0
- agentrun_mem0/configs/vector_stores/opensearch.py +41 -0
- agentrun_mem0/configs/vector_stores/pgvector.py +52 -0
- agentrun_mem0/configs/vector_stores/pinecone.py +55 -0
- agentrun_mem0/configs/vector_stores/qdrant.py +47 -0
- agentrun_mem0/configs/vector_stores/redis.py +24 -0
- agentrun_mem0/configs/vector_stores/s3_vectors.py +28 -0
- agentrun_mem0/configs/vector_stores/supabase.py +44 -0
- agentrun_mem0/configs/vector_stores/upstash_vector.py +34 -0
- agentrun_mem0/configs/vector_stores/valkey.py +15 -0
- agentrun_mem0/configs/vector_stores/vertex_ai_vector_search.py +28 -0
- agentrun_mem0/configs/vector_stores/weaviate.py +41 -0
- agentrun_mem0/embeddings/__init__.py +0 -0
- agentrun_mem0/embeddings/aws_bedrock.py +100 -0
- agentrun_mem0/embeddings/azure_openai.py +55 -0
- agentrun_mem0/embeddings/base.py +31 -0
- agentrun_mem0/embeddings/configs.py +30 -0
- agentrun_mem0/embeddings/gemini.py +39 -0
- agentrun_mem0/embeddings/huggingface.py +44 -0
- agentrun_mem0/embeddings/langchain.py +35 -0
- agentrun_mem0/embeddings/lmstudio.py +29 -0
- agentrun_mem0/embeddings/mock.py +11 -0
- agentrun_mem0/embeddings/ollama.py +53 -0
- agentrun_mem0/embeddings/openai.py +49 -0
- agentrun_mem0/embeddings/together.py +31 -0
- agentrun_mem0/embeddings/vertexai.py +64 -0
- agentrun_mem0/exceptions.py +503 -0
- agentrun_mem0/graphs/__init__.py +0 -0
- agentrun_mem0/graphs/configs.py +105 -0
- agentrun_mem0/graphs/neptune/__init__.py +0 -0
- agentrun_mem0/graphs/neptune/base.py +497 -0
- agentrun_mem0/graphs/neptune/neptunedb.py +511 -0
- agentrun_mem0/graphs/neptune/neptunegraph.py +474 -0
- agentrun_mem0/graphs/tools.py +371 -0
- agentrun_mem0/graphs/utils.py +97 -0
- agentrun_mem0/llms/__init__.py +0 -0
- agentrun_mem0/llms/anthropic.py +87 -0
- agentrun_mem0/llms/aws_bedrock.py +665 -0
- agentrun_mem0/llms/azure_openai.py +141 -0
- agentrun_mem0/llms/azure_openai_structured.py +91 -0
- agentrun_mem0/llms/base.py +131 -0
- agentrun_mem0/llms/configs.py +34 -0
- agentrun_mem0/llms/deepseek.py +107 -0
- agentrun_mem0/llms/gemini.py +201 -0
- agentrun_mem0/llms/groq.py +88 -0
- agentrun_mem0/llms/langchain.py +94 -0
- agentrun_mem0/llms/litellm.py +87 -0
- agentrun_mem0/llms/lmstudio.py +114 -0
- agentrun_mem0/llms/ollama.py +117 -0
- agentrun_mem0/llms/openai.py +147 -0
- agentrun_mem0/llms/openai_structured.py +52 -0
- agentrun_mem0/llms/sarvam.py +89 -0
- agentrun_mem0/llms/together.py +88 -0
- agentrun_mem0/llms/vllm.py +107 -0
- agentrun_mem0/llms/xai.py +52 -0
- agentrun_mem0/memory/__init__.py +0 -0
- agentrun_mem0/memory/base.py +63 -0
- agentrun_mem0/memory/graph_memory.py +698 -0
- agentrun_mem0/memory/kuzu_memory.py +713 -0
- agentrun_mem0/memory/main.py +2229 -0
- agentrun_mem0/memory/memgraph_memory.py +689 -0
- agentrun_mem0/memory/setup.py +56 -0
- agentrun_mem0/memory/storage.py +218 -0
- agentrun_mem0/memory/telemetry.py +90 -0
- agentrun_mem0/memory/utils.py +208 -0
- agentrun_mem0/proxy/__init__.py +0 -0
- agentrun_mem0/proxy/main.py +189 -0
- agentrun_mem0/reranker/__init__.py +9 -0
- agentrun_mem0/reranker/base.py +20 -0
- agentrun_mem0/reranker/cohere_reranker.py +85 -0
- agentrun_mem0/reranker/huggingface_reranker.py +147 -0
- agentrun_mem0/reranker/llm_reranker.py +142 -0
- agentrun_mem0/reranker/sentence_transformer_reranker.py +107 -0
- agentrun_mem0/reranker/zero_entropy_reranker.py +96 -0
- agentrun_mem0/utils/factory.py +283 -0
- agentrun_mem0/utils/gcp_auth.py +167 -0
- agentrun_mem0/vector_stores/__init__.py +0 -0
- agentrun_mem0/vector_stores/alibabacloud_mysql.py +547 -0
- agentrun_mem0/vector_stores/aliyun_tablestore.py +252 -0
- agentrun_mem0/vector_stores/azure_ai_search.py +396 -0
- agentrun_mem0/vector_stores/azure_mysql.py +463 -0
- agentrun_mem0/vector_stores/baidu.py +368 -0
- agentrun_mem0/vector_stores/base.py +58 -0
- agentrun_mem0/vector_stores/chroma.py +332 -0
- agentrun_mem0/vector_stores/configs.py +67 -0
- agentrun_mem0/vector_stores/databricks.py +761 -0
- agentrun_mem0/vector_stores/elasticsearch.py +237 -0
- agentrun_mem0/vector_stores/faiss.py +479 -0
- agentrun_mem0/vector_stores/langchain.py +180 -0
- agentrun_mem0/vector_stores/milvus.py +250 -0
- agentrun_mem0/vector_stores/mongodb.py +310 -0
- agentrun_mem0/vector_stores/neptune_analytics.py +467 -0
- agentrun_mem0/vector_stores/opensearch.py +292 -0
- agentrun_mem0/vector_stores/pgvector.py +404 -0
- agentrun_mem0/vector_stores/pinecone.py +382 -0
- agentrun_mem0/vector_stores/qdrant.py +270 -0
- agentrun_mem0/vector_stores/redis.py +295 -0
- agentrun_mem0/vector_stores/s3_vectors.py +176 -0
- agentrun_mem0/vector_stores/supabase.py +237 -0
- agentrun_mem0/vector_stores/upstash_vector.py +293 -0
- agentrun_mem0/vector_stores/valkey.py +824 -0
- agentrun_mem0/vector_stores/vertex_ai_vector_search.py +635 -0
- agentrun_mem0/vector_stores/weaviate.py +343 -0
- agentrun_mem0ai-0.0.11.data/data/README.md +205 -0
- agentrun_mem0ai-0.0.11.dist-info/METADATA +277 -0
- agentrun_mem0ai-0.0.11.dist-info/RECORD +150 -0
- agentrun_mem0ai-0.0.11.dist-info/WHEEL +4 -0
- agentrun_mem0ai-0.0.11.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
try:
|
|
5
|
+
from elasticsearch import Elasticsearch
|
|
6
|
+
from elasticsearch.helpers import bulk
|
|
7
|
+
except ImportError:
|
|
8
|
+
raise ImportError("Elasticsearch requires extra dependencies. Install with `pip install elasticsearch`") from None
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel
|
|
11
|
+
|
|
12
|
+
from agentrun_mem0.configs.vector_stores.elasticsearch import ElasticsearchConfig
|
|
13
|
+
from agentrun_mem0.vector_stores.base import VectorStoreBase
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OutputData(BaseModel):
|
|
19
|
+
id: str
|
|
20
|
+
score: float
|
|
21
|
+
payload: Dict
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class ElasticsearchDB(VectorStoreBase):
|
|
25
|
+
def __init__(self, **kwargs):
|
|
26
|
+
config = ElasticsearchConfig(**kwargs)
|
|
27
|
+
|
|
28
|
+
# Initialize Elasticsearch client
|
|
29
|
+
if config.cloud_id:
|
|
30
|
+
self.client = Elasticsearch(
|
|
31
|
+
cloud_id=config.cloud_id,
|
|
32
|
+
api_key=config.api_key,
|
|
33
|
+
verify_certs=config.verify_certs,
|
|
34
|
+
headers= config.headers or {},
|
|
35
|
+
)
|
|
36
|
+
else:
|
|
37
|
+
self.client = Elasticsearch(
|
|
38
|
+
hosts=[f"{config.host}" if config.port is None else f"{config.host}:{config.port}"],
|
|
39
|
+
basic_auth=(config.user, config.password) if (config.user and config.password) else None,
|
|
40
|
+
verify_certs=config.verify_certs,
|
|
41
|
+
headers= config.headers or {},
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
self.collection_name = config.collection_name
|
|
45
|
+
self.embedding_model_dims = config.embedding_model_dims
|
|
46
|
+
|
|
47
|
+
# Create index only if auto_create_index is True
|
|
48
|
+
if config.auto_create_index:
|
|
49
|
+
self.create_index()
|
|
50
|
+
|
|
51
|
+
if config.custom_search_query:
|
|
52
|
+
self.custom_search_query = config.custom_search_query
|
|
53
|
+
else:
|
|
54
|
+
self.custom_search_query = None
|
|
55
|
+
|
|
56
|
+
def create_index(self) -> None:
|
|
57
|
+
"""Create Elasticsearch index with proper mappings if it doesn't exist"""
|
|
58
|
+
index_settings = {
|
|
59
|
+
"settings": {"index": {"number_of_replicas": 1, "number_of_shards": 5, "refresh_interval": "1s"}},
|
|
60
|
+
"mappings": {
|
|
61
|
+
"properties": {
|
|
62
|
+
"text": {"type": "text"},
|
|
63
|
+
"vector": {
|
|
64
|
+
"type": "dense_vector",
|
|
65
|
+
"dims": self.embedding_model_dims,
|
|
66
|
+
"index": True,
|
|
67
|
+
"similarity": "cosine",
|
|
68
|
+
},
|
|
69
|
+
"metadata": {"type": "object", "properties": {"user_id": {"type": "keyword"}}},
|
|
70
|
+
}
|
|
71
|
+
},
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
if not self.client.indices.exists(index=self.collection_name):
|
|
75
|
+
self.client.indices.create(index=self.collection_name, body=index_settings)
|
|
76
|
+
logger.info(f"Created index {self.collection_name}")
|
|
77
|
+
else:
|
|
78
|
+
logger.info(f"Index {self.collection_name} already exists")
|
|
79
|
+
|
|
80
|
+
def create_col(self, name: str, vector_size: int, distance: str = "cosine") -> None:
|
|
81
|
+
"""Create a new collection (index in Elasticsearch)."""
|
|
82
|
+
index_settings = {
|
|
83
|
+
"mappings": {
|
|
84
|
+
"properties": {
|
|
85
|
+
"vector": {"type": "dense_vector", "dims": vector_size, "index": True, "similarity": "cosine"},
|
|
86
|
+
"payload": {"type": "object"},
|
|
87
|
+
"id": {"type": "keyword"},
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
if not self.client.indices.exists(index=name):
|
|
93
|
+
self.client.indices.create(index=name, body=index_settings)
|
|
94
|
+
logger.info(f"Created index {name}")
|
|
95
|
+
|
|
96
|
+
def insert(
|
|
97
|
+
self, vectors: List[List[float]], payloads: Optional[List[Dict]] = None, ids: Optional[List[str]] = None
|
|
98
|
+
) -> List[OutputData]:
|
|
99
|
+
"""Insert vectors into the index."""
|
|
100
|
+
if not ids:
|
|
101
|
+
ids = [str(i) for i in range(len(vectors))]
|
|
102
|
+
|
|
103
|
+
if payloads is None:
|
|
104
|
+
payloads = [{} for _ in range(len(vectors))]
|
|
105
|
+
|
|
106
|
+
actions = []
|
|
107
|
+
for i, (vec, id_) in enumerate(zip(vectors, ids)):
|
|
108
|
+
action = {
|
|
109
|
+
"_index": self.collection_name,
|
|
110
|
+
"_id": id_,
|
|
111
|
+
"_source": {
|
|
112
|
+
"vector": vec,
|
|
113
|
+
"metadata": payloads[i], # Store all metadata in the metadata field
|
|
114
|
+
},
|
|
115
|
+
}
|
|
116
|
+
actions.append(action)
|
|
117
|
+
|
|
118
|
+
bulk(self.client, actions)
|
|
119
|
+
|
|
120
|
+
results = []
|
|
121
|
+
for i, id_ in enumerate(ids):
|
|
122
|
+
results.append(
|
|
123
|
+
OutputData(
|
|
124
|
+
id=id_,
|
|
125
|
+
score=1.0, # Default score for inserts
|
|
126
|
+
payload=payloads[i],
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
return results
|
|
130
|
+
|
|
131
|
+
def search(
|
|
132
|
+
self, query: str, vectors: List[float], limit: int = 5, filters: Optional[Dict] = None
|
|
133
|
+
) -> List[OutputData]:
|
|
134
|
+
"""
|
|
135
|
+
Search with two options:
|
|
136
|
+
1. Use custom search query if provided
|
|
137
|
+
2. Use KNN search on vectors with pre-filtering if no custom search query is provided
|
|
138
|
+
"""
|
|
139
|
+
if self.custom_search_query:
|
|
140
|
+
search_query = self.custom_search_query(vectors, limit, filters)
|
|
141
|
+
else:
|
|
142
|
+
search_query = {
|
|
143
|
+
"knn": {"field": "vector", "query_vector": vectors, "k": limit, "num_candidates": limit * 2}
|
|
144
|
+
}
|
|
145
|
+
if filters:
|
|
146
|
+
filter_conditions = []
|
|
147
|
+
for key, value in filters.items():
|
|
148
|
+
filter_conditions.append({"term": {f"metadata.{key}": value}})
|
|
149
|
+
search_query["knn"]["filter"] = {"bool": {"must": filter_conditions}}
|
|
150
|
+
|
|
151
|
+
response = self.client.search(index=self.collection_name, body=search_query)
|
|
152
|
+
|
|
153
|
+
results = []
|
|
154
|
+
for hit in response["hits"]["hits"]:
|
|
155
|
+
results.append(
|
|
156
|
+
OutputData(id=hit["_id"], score=hit["_score"], payload=hit.get("_source", {}).get("metadata", {}))
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
return results
|
|
160
|
+
|
|
161
|
+
def delete(self, vector_id: str) -> None:
|
|
162
|
+
"""Delete a vector by ID."""
|
|
163
|
+
self.client.delete(index=self.collection_name, id=vector_id)
|
|
164
|
+
|
|
165
|
+
def update(self, vector_id: str, vector: Optional[List[float]] = None, payload: Optional[Dict] = None) -> None:
|
|
166
|
+
"""Update a vector and its payload."""
|
|
167
|
+
doc = {}
|
|
168
|
+
if vector is not None:
|
|
169
|
+
doc["vector"] = vector
|
|
170
|
+
if payload is not None:
|
|
171
|
+
doc["metadata"] = payload
|
|
172
|
+
|
|
173
|
+
self.client.update(index=self.collection_name, id=vector_id, body={"doc": doc})
|
|
174
|
+
|
|
175
|
+
def get(self, vector_id: str) -> Optional[OutputData]:
|
|
176
|
+
"""Retrieve a vector by ID."""
|
|
177
|
+
try:
|
|
178
|
+
response = self.client.get(index=self.collection_name, id=vector_id)
|
|
179
|
+
return OutputData(
|
|
180
|
+
id=response["_id"],
|
|
181
|
+
score=1.0, # Default score for direct get
|
|
182
|
+
payload=response["_source"].get("metadata", {}),
|
|
183
|
+
)
|
|
184
|
+
except KeyError as e:
|
|
185
|
+
logger.warning(f"Missing key in Elasticsearch response: {e}")
|
|
186
|
+
return None
|
|
187
|
+
except TypeError as e:
|
|
188
|
+
logger.warning(f"Invalid response type from Elasticsearch: {e}")
|
|
189
|
+
return None
|
|
190
|
+
except Exception as e:
|
|
191
|
+
logger.error(f"Unexpected error while parsing Elasticsearch response: {e}")
|
|
192
|
+
return None
|
|
193
|
+
|
|
194
|
+
def list_cols(self) -> List[str]:
|
|
195
|
+
"""List all collections (indices)."""
|
|
196
|
+
return list(self.client.indices.get_alias().keys())
|
|
197
|
+
|
|
198
|
+
def delete_col(self) -> None:
|
|
199
|
+
"""Delete a collection (index)."""
|
|
200
|
+
self.client.indices.delete(index=self.collection_name)
|
|
201
|
+
|
|
202
|
+
def col_info(self, name: str) -> Any:
|
|
203
|
+
"""Get information about a collection (index)."""
|
|
204
|
+
return self.client.indices.get(index=name)
|
|
205
|
+
|
|
206
|
+
def list(self, filters: Optional[Dict] = None, limit: Optional[int] = None) -> List[List[OutputData]]:
|
|
207
|
+
"""List all memories."""
|
|
208
|
+
query: Dict[str, Any] = {"query": {"match_all": {}}}
|
|
209
|
+
|
|
210
|
+
if filters:
|
|
211
|
+
filter_conditions = []
|
|
212
|
+
for key, value in filters.items():
|
|
213
|
+
filter_conditions.append({"term": {f"metadata.{key}": value}})
|
|
214
|
+
query["query"] = {"bool": {"must": filter_conditions}}
|
|
215
|
+
|
|
216
|
+
if limit:
|
|
217
|
+
query["size"] = limit
|
|
218
|
+
|
|
219
|
+
response = self.client.search(index=self.collection_name, body=query)
|
|
220
|
+
|
|
221
|
+
results = []
|
|
222
|
+
for hit in response["hits"]["hits"]:
|
|
223
|
+
results.append(
|
|
224
|
+
OutputData(
|
|
225
|
+
id=hit["_id"],
|
|
226
|
+
score=1.0, # Default score for list operation
|
|
227
|
+
payload=hit.get("_source", {}).get("metadata", {}),
|
|
228
|
+
)
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
return [results]
|
|
232
|
+
|
|
233
|
+
def reset(self):
|
|
234
|
+
"""Reset the index by deleting and recreating it."""
|
|
235
|
+
logger.warning(f"Resetting index {self.collection_name}...")
|
|
236
|
+
self.delete_col()
|
|
237
|
+
self.create_index()
|