mem0ai-azure-mysql 0.1.115__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mem0/__init__.py +6 -0
- mem0/client/__init__.py +0 -0
- mem0/client/main.py +1535 -0
- mem0/client/project.py +860 -0
- mem0/client/utils.py +29 -0
- mem0/configs/__init__.py +0 -0
- mem0/configs/base.py +90 -0
- mem0/configs/dbs/__init__.py +4 -0
- mem0/configs/dbs/base.py +41 -0
- mem0/configs/dbs/mysql.py +25 -0
- mem0/configs/embeddings/__init__.py +0 -0
- mem0/configs/embeddings/base.py +108 -0
- mem0/configs/enums.py +7 -0
- mem0/configs/llms/__init__.py +0 -0
- mem0/configs/llms/base.py +152 -0
- mem0/configs/prompts.py +333 -0
- mem0/configs/vector_stores/__init__.py +0 -0
- mem0/configs/vector_stores/azure_ai_search.py +59 -0
- mem0/configs/vector_stores/baidu.py +29 -0
- mem0/configs/vector_stores/chroma.py +40 -0
- mem0/configs/vector_stores/elasticsearch.py +47 -0
- mem0/configs/vector_stores/faiss.py +39 -0
- mem0/configs/vector_stores/langchain.py +32 -0
- mem0/configs/vector_stores/milvus.py +43 -0
- mem0/configs/vector_stores/mongodb.py +25 -0
- mem0/configs/vector_stores/opensearch.py +41 -0
- mem0/configs/vector_stores/pgvector.py +37 -0
- mem0/configs/vector_stores/pinecone.py +56 -0
- mem0/configs/vector_stores/qdrant.py +49 -0
- mem0/configs/vector_stores/redis.py +26 -0
- mem0/configs/vector_stores/supabase.py +44 -0
- mem0/configs/vector_stores/upstash_vector.py +36 -0
- mem0/configs/vector_stores/vertex_ai_vector_search.py +27 -0
- mem0/configs/vector_stores/weaviate.py +43 -0
- mem0/dbs/__init__.py +4 -0
- mem0/dbs/base.py +68 -0
- mem0/dbs/configs.py +21 -0
- mem0/dbs/mysql.py +321 -0
- mem0/embeddings/__init__.py +0 -0
- mem0/embeddings/aws_bedrock.py +100 -0
- mem0/embeddings/azure_openai.py +43 -0
- mem0/embeddings/base.py +31 -0
- mem0/embeddings/configs.py +30 -0
- mem0/embeddings/gemini.py +39 -0
- mem0/embeddings/huggingface.py +41 -0
- mem0/embeddings/langchain.py +35 -0
- mem0/embeddings/lmstudio.py +29 -0
- mem0/embeddings/mock.py +11 -0
- mem0/embeddings/ollama.py +53 -0
- mem0/embeddings/openai.py +49 -0
- mem0/embeddings/together.py +31 -0
- mem0/embeddings/vertexai.py +54 -0
- mem0/graphs/__init__.py +0 -0
- mem0/graphs/configs.py +96 -0
- mem0/graphs/neptune/__init__.py +0 -0
- mem0/graphs/neptune/base.py +410 -0
- mem0/graphs/neptune/main.py +372 -0
- mem0/graphs/tools.py +371 -0
- mem0/graphs/utils.py +97 -0
- mem0/llms/__init__.py +0 -0
- mem0/llms/anthropic.py +64 -0
- mem0/llms/aws_bedrock.py +270 -0
- mem0/llms/azure_openai.py +114 -0
- mem0/llms/azure_openai_structured.py +76 -0
- mem0/llms/base.py +32 -0
- mem0/llms/configs.py +34 -0
- mem0/llms/deepseek.py +85 -0
- mem0/llms/gemini.py +201 -0
- mem0/llms/groq.py +88 -0
- mem0/llms/langchain.py +65 -0
- mem0/llms/litellm.py +87 -0
- mem0/llms/lmstudio.py +53 -0
- mem0/llms/ollama.py +94 -0
- mem0/llms/openai.py +124 -0
- mem0/llms/openai_structured.py +52 -0
- mem0/llms/sarvam.py +89 -0
- mem0/llms/together.py +88 -0
- mem0/llms/vllm.py +89 -0
- mem0/llms/xai.py +52 -0
- mem0/memory/__init__.py +0 -0
- mem0/memory/base.py +63 -0
- mem0/memory/graph_memory.py +632 -0
- mem0/memory/main.py +1843 -0
- mem0/memory/memgraph_memory.py +630 -0
- mem0/memory/setup.py +56 -0
- mem0/memory/storage.py +218 -0
- mem0/memory/telemetry.py +90 -0
- mem0/memory/utils.py +133 -0
- mem0/proxy/__init__.py +0 -0
- mem0/proxy/main.py +194 -0
- mem0/utils/factory.py +132 -0
- mem0/vector_stores/__init__.py +0 -0
- mem0/vector_stores/azure_ai_search.py +383 -0
- mem0/vector_stores/baidu.py +368 -0
- mem0/vector_stores/base.py +58 -0
- mem0/vector_stores/chroma.py +229 -0
- mem0/vector_stores/configs.py +60 -0
- mem0/vector_stores/elasticsearch.py +235 -0
- mem0/vector_stores/faiss.py +473 -0
- mem0/vector_stores/langchain.py +179 -0
- mem0/vector_stores/milvus.py +245 -0
- mem0/vector_stores/mongodb.py +293 -0
- mem0/vector_stores/opensearch.py +281 -0
- mem0/vector_stores/pgvector.py +294 -0
- mem0/vector_stores/pinecone.py +373 -0
- mem0/vector_stores/qdrant.py +240 -0
- mem0/vector_stores/redis.py +295 -0
- mem0/vector_stores/supabase.py +237 -0
- mem0/vector_stores/upstash_vector.py +293 -0
- mem0/vector_stores/vertex_ai_vector_search.py +629 -0
- mem0/vector_stores/weaviate.py +316 -0
- mem0ai_azure_mysql-0.1.115.data/data/README.md +169 -0
- mem0ai_azure_mysql-0.1.115.dist-info/METADATA +224 -0
- mem0ai_azure_mysql-0.1.115.dist-info/RECORD +116 -0
- mem0ai_azure_mysql-0.1.115.dist-info/WHEEL +4 -0
- mem0ai_azure_mysql-0.1.115.dist-info/licenses/LICENSE +201 -0
mem0/embeddings/mock.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import Literal, Optional
|
|
2
|
+
|
|
3
|
+
from mem0.embeddings.base import EmbeddingBase
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MockEmbeddings(EmbeddingBase):
|
|
7
|
+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
|
|
8
|
+
"""
|
|
9
|
+
Generate a mock embedding with dimension of 10.
|
|
10
|
+
"""
|
|
11
|
+
return [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import subprocess
|
|
2
|
+
import sys
|
|
3
|
+
from typing import Literal, Optional
|
|
4
|
+
|
|
5
|
+
from mem0.configs.embeddings.base import BaseEmbedderConfig
|
|
6
|
+
from mem0.embeddings.base import EmbeddingBase
|
|
7
|
+
|
|
8
|
+
try:
|
|
9
|
+
from ollama import Client
|
|
10
|
+
except ImportError:
|
|
11
|
+
user_input = input("The 'ollama' library is required. Install it now? [y/N]: ")
|
|
12
|
+
if user_input.lower() == "y":
|
|
13
|
+
try:
|
|
14
|
+
subprocess.check_call([sys.executable, "-m", "pip", "install", "ollama"])
|
|
15
|
+
from ollama import Client
|
|
16
|
+
except subprocess.CalledProcessError:
|
|
17
|
+
print("Failed to install 'ollama'. Please install it manually using 'pip install ollama'.")
|
|
18
|
+
sys.exit(1)
|
|
19
|
+
else:
|
|
20
|
+
print("The required 'ollama' library is not installed.")
|
|
21
|
+
sys.exit(1)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class OllamaEmbedding(EmbeddingBase):
|
|
25
|
+
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
|
|
26
|
+
super().__init__(config)
|
|
27
|
+
|
|
28
|
+
self.config.model = self.config.model or "nomic-embed-text"
|
|
29
|
+
self.config.embedding_dims = self.config.embedding_dims or 512
|
|
30
|
+
|
|
31
|
+
self.client = Client(host=self.config.ollama_base_url)
|
|
32
|
+
self._ensure_model_exists()
|
|
33
|
+
|
|
34
|
+
def _ensure_model_exists(self):
|
|
35
|
+
"""
|
|
36
|
+
Ensure the specified model exists locally. If not, pull it from Ollama.
|
|
37
|
+
"""
|
|
38
|
+
local_models = self.client.list()["models"]
|
|
39
|
+
if not any(model.get("name") == self.config.model for model in local_models):
|
|
40
|
+
self.client.pull(self.config.model)
|
|
41
|
+
|
|
42
|
+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
|
|
43
|
+
"""
|
|
44
|
+
Get the embedding for the given text using Ollama.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
text (str): The text to embed.
|
|
48
|
+
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
|
|
49
|
+
Returns:
|
|
50
|
+
list: The embedding vector.
|
|
51
|
+
"""
|
|
52
|
+
response = self.client.embeddings(model=self.config.model, prompt=text)
|
|
53
|
+
return response["embedding"]
|
|
@@ -0,0 +1,49 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import warnings
|
|
3
|
+
from typing import Literal, Optional
|
|
4
|
+
|
|
5
|
+
from openai import OpenAI
|
|
6
|
+
|
|
7
|
+
from mem0.configs.embeddings.base import BaseEmbedderConfig
|
|
8
|
+
from mem0.embeddings.base import EmbeddingBase
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OpenAIEmbedding(EmbeddingBase):
|
|
12
|
+
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
|
|
13
|
+
super().__init__(config)
|
|
14
|
+
|
|
15
|
+
self.config.model = self.config.model or "text-embedding-3-small"
|
|
16
|
+
self.config.embedding_dims = self.config.embedding_dims or 1536
|
|
17
|
+
|
|
18
|
+
api_key = self.config.api_key or os.getenv("OPENAI_API_KEY")
|
|
19
|
+
base_url = (
|
|
20
|
+
self.config.openai_base_url
|
|
21
|
+
or os.getenv("OPENAI_API_BASE")
|
|
22
|
+
or os.getenv("OPENAI_BASE_URL")
|
|
23
|
+
or "https://api.openai.com/v1"
|
|
24
|
+
)
|
|
25
|
+
if os.environ.get("OPENAI_API_BASE"):
|
|
26
|
+
warnings.warn(
|
|
27
|
+
"The environment variable 'OPENAI_API_BASE' is deprecated and will be removed in the 0.1.80. "
|
|
28
|
+
"Please use 'OPENAI_BASE_URL' instead.",
|
|
29
|
+
DeprecationWarning,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
self.client = OpenAI(api_key=api_key, base_url=base_url)
|
|
33
|
+
|
|
34
|
+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
|
|
35
|
+
"""
|
|
36
|
+
Get the embedding for the given text using OpenAI.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
text (str): The text to embed.
|
|
40
|
+
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
|
|
41
|
+
Returns:
|
|
42
|
+
list: The embedding vector.
|
|
43
|
+
"""
|
|
44
|
+
text = text.replace("\n", " ")
|
|
45
|
+
return (
|
|
46
|
+
self.client.embeddings.create(input=[text], model=self.config.model, dimensions=self.config.embedding_dims)
|
|
47
|
+
.data[0]
|
|
48
|
+
.embedding
|
|
49
|
+
)
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Literal, Optional
|
|
3
|
+
|
|
4
|
+
from together import Together
|
|
5
|
+
|
|
6
|
+
from mem0.configs.embeddings.base import BaseEmbedderConfig
|
|
7
|
+
from mem0.embeddings.base import EmbeddingBase
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class TogetherEmbedding(EmbeddingBase):
|
|
11
|
+
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
|
|
12
|
+
super().__init__(config)
|
|
13
|
+
|
|
14
|
+
self.config.model = self.config.model or "togethercomputer/m2-bert-80M-8k-retrieval"
|
|
15
|
+
api_key = self.config.api_key or os.getenv("TOGETHER_API_KEY")
|
|
16
|
+
# TODO: check if this is correct
|
|
17
|
+
self.config.embedding_dims = self.config.embedding_dims or 768
|
|
18
|
+
self.client = Together(api_key=api_key)
|
|
19
|
+
|
|
20
|
+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
|
|
21
|
+
"""
|
|
22
|
+
Get the embedding for the given text using OpenAI.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
text (str): The text to embed.
|
|
26
|
+
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
|
|
27
|
+
Returns:
|
|
28
|
+
list: The embedding vector.
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
return self.client.embeddings.create(model=self.config.model, input=text).data[0].embedding
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Literal, Optional
|
|
3
|
+
|
|
4
|
+
from vertexai.language_models import TextEmbeddingInput, TextEmbeddingModel
|
|
5
|
+
|
|
6
|
+
from mem0.configs.embeddings.base import BaseEmbedderConfig
|
|
7
|
+
from mem0.embeddings.base import EmbeddingBase
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class VertexAIEmbedding(EmbeddingBase):
|
|
11
|
+
def __init__(self, config: Optional[BaseEmbedderConfig] = None):
|
|
12
|
+
super().__init__(config)
|
|
13
|
+
|
|
14
|
+
self.config.model = self.config.model or "text-embedding-004"
|
|
15
|
+
self.config.embedding_dims = self.config.embedding_dims or 256
|
|
16
|
+
|
|
17
|
+
self.embedding_types = {
|
|
18
|
+
"add": self.config.memory_add_embedding_type or "RETRIEVAL_DOCUMENT",
|
|
19
|
+
"update": self.config.memory_update_embedding_type or "RETRIEVAL_DOCUMENT",
|
|
20
|
+
"search": self.config.memory_search_embedding_type or "RETRIEVAL_QUERY",
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
credentials_path = self.config.vertex_credentials_json
|
|
24
|
+
|
|
25
|
+
if credentials_path:
|
|
26
|
+
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = credentials_path
|
|
27
|
+
elif not os.getenv("GOOGLE_APPLICATION_CREDENTIALS"):
|
|
28
|
+
raise ValueError(
|
|
29
|
+
"Google application credentials JSON is not provided. Please provide a valid JSON path or set the 'GOOGLE_APPLICATION_CREDENTIALS' environment variable."
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
self.model = TextEmbeddingModel.from_pretrained(self.config.model)
|
|
33
|
+
|
|
34
|
+
def embed(self, text, memory_action: Optional[Literal["add", "search", "update"]] = None):
|
|
35
|
+
"""
|
|
36
|
+
Get the embedding for the given text using Vertex AI.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
text (str): The text to embed.
|
|
40
|
+
memory_action (optional): The type of embedding to use. Must be one of "add", "search", or "update". Defaults to None.
|
|
41
|
+
Returns:
|
|
42
|
+
list: The embedding vector.
|
|
43
|
+
"""
|
|
44
|
+
embedding_type = "SEMANTIC_SIMILARITY"
|
|
45
|
+
if memory_action is not None:
|
|
46
|
+
if memory_action not in self.embedding_types:
|
|
47
|
+
raise ValueError(f"Invalid memory action: {memory_action}")
|
|
48
|
+
|
|
49
|
+
embedding_type = self.embedding_types[memory_action]
|
|
50
|
+
|
|
51
|
+
text_input = TextEmbeddingInput(text=text, task_type=embedding_type)
|
|
52
|
+
embeddings = self.model.get_embeddings(texts=[text_input], output_dimensionality=self.config.embedding_dims)
|
|
53
|
+
|
|
54
|
+
return embeddings[0].values
|
mem0/graphs/__init__.py
ADDED
|
File without changes
|
mem0/graphs/configs.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
from typing import Optional, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
4
|
+
|
|
5
|
+
from mem0.llms.configs import LlmConfig
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Neo4jConfig(BaseModel):
|
|
9
|
+
url: Optional[str] = Field(None, description="Host address for the graph database")
|
|
10
|
+
username: Optional[str] = Field(None, description="Username for the graph database")
|
|
11
|
+
password: Optional[str] = Field(None, description="Password for the graph database")
|
|
12
|
+
database: Optional[str] = Field(None, description="Database for the graph database")
|
|
13
|
+
base_label: Optional[bool] = Field(None, description="Whether to use base node label __Entity__ for all entities")
|
|
14
|
+
|
|
15
|
+
@model_validator(mode="before")
|
|
16
|
+
def check_host_port_or_path(cls, values):
|
|
17
|
+
url, username, password = (
|
|
18
|
+
values.get("url"),
|
|
19
|
+
values.get("username"),
|
|
20
|
+
values.get("password"),
|
|
21
|
+
)
|
|
22
|
+
if not url or not username or not password:
|
|
23
|
+
raise ValueError("Please provide 'url', 'username' and 'password'.")
|
|
24
|
+
return values
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class MemgraphConfig(BaseModel):
|
|
28
|
+
url: Optional[str] = Field(None, description="Host address for the graph database")
|
|
29
|
+
username: Optional[str] = Field(None, description="Username for the graph database")
|
|
30
|
+
password: Optional[str] = Field(None, description="Password for the graph database")
|
|
31
|
+
|
|
32
|
+
@model_validator(mode="before")
|
|
33
|
+
def check_host_port_or_path(cls, values):
|
|
34
|
+
url, username, password = (
|
|
35
|
+
values.get("url"),
|
|
36
|
+
values.get("username"),
|
|
37
|
+
values.get("password"),
|
|
38
|
+
)
|
|
39
|
+
if not url or not username or not password:
|
|
40
|
+
raise ValueError("Please provide 'url', 'username' and 'password'.")
|
|
41
|
+
return values
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class NeptuneConfig(BaseModel):
|
|
45
|
+
endpoint: Optional[str] = (
|
|
46
|
+
Field(
|
|
47
|
+
None,
|
|
48
|
+
description="Endpoint to connect to a Neptune Analytics Server as neptune-graph://<graphid>",
|
|
49
|
+
),
|
|
50
|
+
)
|
|
51
|
+
base_label: Optional[bool] = Field(None, description="Whether to use base node label __Entity__ for all entities")
|
|
52
|
+
|
|
53
|
+
@model_validator(mode="before")
|
|
54
|
+
def check_host_port_or_path(cls, values):
|
|
55
|
+
endpoint = values.get("endpoint")
|
|
56
|
+
if not endpoint:
|
|
57
|
+
raise ValueError("Please provide 'endpoint' with the format as 'neptune-graph://<graphid>'.")
|
|
58
|
+
if endpoint.startswith("neptune-db://"):
|
|
59
|
+
raise ValueError("neptune-db server is not yet supported")
|
|
60
|
+
elif endpoint.startswith("neptune-graph://"):
|
|
61
|
+
# This is a Neptune Analytics Graph
|
|
62
|
+
graph_identifier = endpoint.replace("neptune-graph://", "")
|
|
63
|
+
if not graph_identifier.startswith("g-"):
|
|
64
|
+
raise ValueError("Provide a valid 'graph_identifier'.")
|
|
65
|
+
values["graph_identifier"] = graph_identifier
|
|
66
|
+
return values
|
|
67
|
+
else:
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"You must provide an endpoint to create a NeptuneServer as either neptune-db://<endpoint> or neptune-graph://<graphid>"
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class GraphStoreConfig(BaseModel):
|
|
74
|
+
provider: str = Field(
|
|
75
|
+
description="Provider of the data store (e.g., 'neo4j', 'memgraph', 'neptune')",
|
|
76
|
+
default="neo4j",
|
|
77
|
+
)
|
|
78
|
+
config: Union[Neo4jConfig, MemgraphConfig, NeptuneConfig] = Field(
|
|
79
|
+
description="Configuration for the specific data store", default=None
|
|
80
|
+
)
|
|
81
|
+
llm: Optional[LlmConfig] = Field(description="LLM configuration for querying the graph store", default=None)
|
|
82
|
+
custom_prompt: Optional[str] = Field(
|
|
83
|
+
description="Custom prompt to fetch entities from the given text", default=None
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
@field_validator("config")
|
|
87
|
+
def validate_config(cls, v, values):
|
|
88
|
+
provider = values.data.get("provider")
|
|
89
|
+
if provider == "neo4j":
|
|
90
|
+
return Neo4jConfig(**v.model_dump())
|
|
91
|
+
elif provider == "memgraph":
|
|
92
|
+
return MemgraphConfig(**v.model_dump())
|
|
93
|
+
elif provider == "neptune":
|
|
94
|
+
return NeptuneConfig(**v.model_dump())
|
|
95
|
+
else:
|
|
96
|
+
raise ValueError(f"Unsupported graph store provider: {provider}")
|
|
File without changes
|