mem0ai-azure-mysql 0.1.115.1__tar.gz → 0.1.116.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/.gitignore +1 -0
- mem0ai_azure_mysql-0.1.116.1/PKG-INFO +87 -0
- mem0ai_azure_mysql-0.1.116.1/README.md +24 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/client/main.py +20 -17
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/base.py +19 -5
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/anthropic.py +56 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/aws_bedrock.py +191 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/azure.py +57 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/base.py +62 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/deepseek.py +56 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/lmstudio.py +59 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/ollama.py +56 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/openai.py +76 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/llms/vllm.py +56 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/azure_ai_search.py +3 -2
- mem0ai_azure_mysql-0.1.116.1/mem0/configs/vector_stores/databricks.py +63 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/elasticsearch.py +18 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/milvus.py +1 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/pgvector.py +15 -2
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/pinecone.py +1 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/azure_openai.py +7 -9
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/ollama.py +1 -1
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/configs.py +8 -2
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/neptune/main.py +1 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/tools.py +6 -6
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/anthropic.py +33 -10
- mem0ai_azure_mysql-0.1.116.1/mem0/llms/aws_bedrock.py +600 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/azure_openai.py +37 -20
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/azure_openai_structured.py +19 -4
- mem0ai_azure_mysql-0.1.116.1/mem0/llms/base.py +131 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/deepseek.py +31 -9
- mem0ai_azure_mysql-0.1.116.1/mem0/llms/lmstudio.py +114 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/llms/ollama.py +106 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/openai.py +39 -22
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/vllm.py +32 -14
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/base.py +2 -2
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/graph_memory.py +111 -45
- mem0ai_azure_mysql-0.1.116.1/mem0/memory/kuzu_memory.py +710 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/main.py +59 -37
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/memgraph_memory.py +43 -35
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/utils.py +51 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/proxy/main.py +5 -10
- mem0ai_azure_mysql-0.1.116.1/mem0/utils/factory.py +239 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/azure_ai_search.py +30 -31
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/chroma.py +27 -2
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/configs.py +1 -0
- mem0ai_azure_mysql-0.1.116.1/mem0/vector_stores/databricks.py +759 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/elasticsearch.py +2 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/langchain.py +3 -2
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/milvus.py +3 -1
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/mongodb.py +20 -1
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/pgvector.py +83 -9
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/pinecone.py +17 -8
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/qdrant.py +30 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/pyproject.toml +27 -5
- mem0ai_azure_mysql-0.1.115.1/LICENSE +0 -201
- mem0ai_azure_mysql-0.1.115.1/PKG-INFO +0 -224
- mem0ai_azure_mysql-0.1.115.1/README.md +0 -169
- mem0ai_azure_mysql-0.1.115.1/mem0/configs/llms/base.py +0 -152
- mem0ai_azure_mysql-0.1.115.1/mem0/llms/aws_bedrock.py +0 -270
- mem0ai_azure_mysql-0.1.115.1/mem0/llms/base.py +0 -32
- mem0ai_azure_mysql-0.1.115.1/mem0/llms/lmstudio.py +0 -53
- mem0ai_azure_mysql-0.1.115.1/mem0/llms/ollama.py +0 -94
- mem0ai_azure_mysql-0.1.115.1/mem0/utils/factory.py +0 -132
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/client/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/client/project.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/client/utils.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/dbs/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/dbs/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/dbs/mysql.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/embeddings/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/embeddings/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/enums.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/llms/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/prompts.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/baidu.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/chroma.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/faiss.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/langchain.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/mongodb.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/opensearch.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/qdrant.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/redis.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/supabase.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/upstash_vector.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/vertex_ai_vector_search.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/configs/vector_stores/weaviate.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/dbs/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/dbs/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/dbs/configs.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/dbs/mysql.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/aws_bedrock.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/configs.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/gemini.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/huggingface.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/langchain.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/lmstudio.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/mock.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/openai.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/together.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/embeddings/vertexai.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/neptune/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/neptune/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/graphs/utils.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/configs.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/gemini.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/groq.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/langchain.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/litellm.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/openai_structured.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/sarvam.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/together.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/llms/xai.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/setup.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/storage.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/memory/telemetry.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/proxy/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/__init__.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/baidu.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/base.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/faiss.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/opensearch.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/redis.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/supabase.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/upstash_vector.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/vertex_ai_vector_search.py +0 -0
- {mem0ai_azure_mysql-0.1.115.1 → mem0ai_azure_mysql-0.1.116.1}/mem0/vector_stores/weaviate.py +0 -0
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: mem0ai-azure-mysql
|
|
3
|
+
Version: 0.1.116.1
|
|
4
|
+
Summary: Long-term memory for AI Agents with Azure DefaultAzureCredential authentication and MySQL history database support
|
|
5
|
+
Requires-Python: <4.0,>=3.9
|
|
6
|
+
Requires-Dist: azure-identity>=1.23.1
|
|
7
|
+
Requires-Dist: azure-search-documents>=11.5.3
|
|
8
|
+
Requires-Dist: langchain-neo4j>=0.4.0
|
|
9
|
+
Requires-Dist: openai<1.100.0,>=1.90.0
|
|
10
|
+
Requires-Dist: posthog>=3.5.0
|
|
11
|
+
Requires-Dist: protobuf<6.0.0,>=5.29.0
|
|
12
|
+
Requires-Dist: pydantic>=2.7.3
|
|
13
|
+
Requires-Dist: pymysql
|
|
14
|
+
Requires-Dist: pytz>=2024.1
|
|
15
|
+
Requires-Dist: qdrant-client>=1.9.1
|
|
16
|
+
Requires-Dist: rank-bm25>=0.2.2
|
|
17
|
+
Requires-Dist: sqlalchemy>=2.0.31
|
|
18
|
+
Provides-Extra: dev
|
|
19
|
+
Requires-Dist: isort>=5.13.2; extra == 'dev'
|
|
20
|
+
Requires-Dist: pytest>=8.2.2; extra == 'dev'
|
|
21
|
+
Requires-Dist: ruff>=0.6.5; extra == 'dev'
|
|
22
|
+
Provides-Extra: extras
|
|
23
|
+
Requires-Dist: boto3>=1.34.0; extra == 'extras'
|
|
24
|
+
Requires-Dist: elasticsearch>=8.0.0; extra == 'extras'
|
|
25
|
+
Requires-Dist: langchain-community>=0.0.0; extra == 'extras'
|
|
26
|
+
Requires-Dist: langchain-memgraph>=0.1.0; extra == 'extras'
|
|
27
|
+
Requires-Dist: opensearch-py>=2.0.0; extra == 'extras'
|
|
28
|
+
Requires-Dist: sentence-transformers>=5.0.0; extra == 'extras'
|
|
29
|
+
Provides-Extra: graph
|
|
30
|
+
Requires-Dist: kuzu>=0.11.0; extra == 'graph'
|
|
31
|
+
Requires-Dist: langchain-aws>=0.2.23; extra == 'graph'
|
|
32
|
+
Requires-Dist: langchain-neo4j>=0.4.0; extra == 'graph'
|
|
33
|
+
Requires-Dist: neo4j>=5.23.1; extra == 'graph'
|
|
34
|
+
Requires-Dist: rank-bm25>=0.2.2; extra == 'graph'
|
|
35
|
+
Provides-Extra: llms
|
|
36
|
+
Requires-Dist: google-genai>=1.0.0; extra == 'llms'
|
|
37
|
+
Requires-Dist: google-generativeai>=0.3.0; extra == 'llms'
|
|
38
|
+
Requires-Dist: groq>=0.3.0; extra == 'llms'
|
|
39
|
+
Requires-Dist: litellm>=1.74.0; extra == 'llms'
|
|
40
|
+
Requires-Dist: ollama>=0.1.0; extra == 'llms'
|
|
41
|
+
Requires-Dist: openai<1.100.0,>=1.90.0; extra == 'llms'
|
|
42
|
+
Requires-Dist: together>=0.2.10; extra == 'llms'
|
|
43
|
+
Requires-Dist: vertexai>=0.1.0; extra == 'llms'
|
|
44
|
+
Provides-Extra: test
|
|
45
|
+
Requires-Dist: pytest-asyncio>=0.23.7; extra == 'test'
|
|
46
|
+
Requires-Dist: pytest-mock>=3.14.0; extra == 'test'
|
|
47
|
+
Requires-Dist: pytest>=8.2.2; extra == 'test'
|
|
48
|
+
Provides-Extra: vector-stores
|
|
49
|
+
Requires-Dist: azure-identity>=1.24.0; extra == 'vector-stores'
|
|
50
|
+
Requires-Dist: azure-search-documents>=11.4.0b8; extra == 'vector-stores'
|
|
51
|
+
Requires-Dist: chromadb>=0.4.24; extra == 'vector-stores'
|
|
52
|
+
Requires-Dist: databricks-sdk>=0.63.0; extra == 'vector-stores'
|
|
53
|
+
Requires-Dist: faiss-cpu>=1.7.4; extra == 'vector-stores'
|
|
54
|
+
Requires-Dist: pinecone-text>=0.10.0; extra == 'vector-stores'
|
|
55
|
+
Requires-Dist: pinecone<=7.3.0; extra == 'vector-stores'
|
|
56
|
+
Requires-Dist: psycopg>=3.2.8; extra == 'vector-stores'
|
|
57
|
+
Requires-Dist: pymochow>=2.2.9; extra == 'vector-stores'
|
|
58
|
+
Requires-Dist: pymongo>=4.13.2; extra == 'vector-stores'
|
|
59
|
+
Requires-Dist: upstash-vector>=0.1.0; extra == 'vector-stores'
|
|
60
|
+
Requires-Dist: vecs>=0.4.0; extra == 'vector-stores'
|
|
61
|
+
Requires-Dist: weaviate-client<4.15.0,>=4.4.0; extra == 'vector-stores'
|
|
62
|
+
Description-Content-Type: text/markdown
|
|
63
|
+
|
|
64
|
+
# Mem0 - Azure Enhanced Fork
|
|
65
|
+
|
|
66
|
+
This repository is an enhanced fork of [mem0ai/mem0](https://github.com/mem0ai/mem0.git) that provides enterprise-grade improvements for Azure environments and production deployments.
|
|
67
|
+
|
|
68
|
+
## 🚀 Key Enhancements
|
|
69
|
+
|
|
70
|
+
### 1. Azure Entra ID Authentication
|
|
71
|
+
- **Azure AI Search**: Support for Azure Entra ID (Azure AD) authentication using [`DefaultAzureCredential`](mem0/vector_stores/azure_ai_search.py:114)
|
|
72
|
+
- **Azure OpenAI**: Seamless Entra ID integration for both LLM and embedding services using [`DefaultAzureCredential`](mem0/llms/azure_openai.py:37)
|
|
73
|
+
- **Simplified Authentication**: No need to manage API keys when using managed identities or service principals
|
|
74
|
+
|
|
75
|
+
### 2. MySQL Database Support
|
|
76
|
+
- **Production-Ready**: Replace SQLite3 with enterprise-grade [`MySQL`](mem0/dbs/mysql.py:18) for scalable memory history storage
|
|
77
|
+
- **Connection Pooling**: Built-in connection pooling and SSL support for secure connections
|
|
78
|
+
- **Migration Support**: Automatic schema migration from existing SQLite databases
|
|
79
|
+
- **Thread-Safe**: Thread-safe operations with proper connection management
|
|
80
|
+
|
|
81
|
+
## 📦 Installation
|
|
82
|
+
|
|
83
|
+
Install the enhanced package with Azure and MySQL dependencies:
|
|
84
|
+
|
|
85
|
+
```bash
|
|
86
|
+
pip install mem0ai-azure-mysql
|
|
87
|
+
```
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
# Mem0 - Azure Enhanced Fork
|
|
2
|
+
|
|
3
|
+
This repository is an enhanced fork of [mem0ai/mem0](https://github.com/mem0ai/mem0.git) that provides enterprise-grade improvements for Azure environments and production deployments.
|
|
4
|
+
|
|
5
|
+
## 🚀 Key Enhancements
|
|
6
|
+
|
|
7
|
+
### 1. Azure Entra ID Authentication
|
|
8
|
+
- **Azure AI Search**: Support for Azure Entra ID (Azure AD) authentication using [`DefaultAzureCredential`](mem0/vector_stores/azure_ai_search.py:114)
|
|
9
|
+
- **Azure OpenAI**: Seamless Entra ID integration for both LLM and embedding services using [`DefaultAzureCredential`](mem0/llms/azure_openai.py:37)
|
|
10
|
+
- **Simplified Authentication**: No need to manage API keys when using managed identities or service principals
|
|
11
|
+
|
|
12
|
+
### 2. MySQL Database Support
|
|
13
|
+
- **Production-Ready**: Replace SQLite3 with enterprise-grade [`MySQL`](mem0/dbs/mysql.py:18) for scalable memory history storage
|
|
14
|
+
- **Connection Pooling**: Built-in connection pooling and SSL support for secure connections
|
|
15
|
+
- **Migration Support**: Automatic schema migration from existing SQLite databases
|
|
16
|
+
- **Thread-Safe**: Thread-safe operations with proper connection management
|
|
17
|
+
|
|
18
|
+
## 📦 Installation
|
|
19
|
+
|
|
20
|
+
Install the enhanced package with Azure and MySQL dependencies:
|
|
21
|
+
|
|
22
|
+
```bash
|
|
23
|
+
pip install mem0ai-azure-mysql
|
|
24
|
+
```
|
|
@@ -267,10 +267,14 @@ class MemoryClient:
|
|
|
267
267
|
Update a memory by ID.
|
|
268
268
|
Args:
|
|
269
269
|
memory_id (str): Memory ID.
|
|
270
|
-
text (str, optional):
|
|
270
|
+
text (str, optional): New content to update the memory with.
|
|
271
271
|
metadata (dict, optional): Metadata to update in the memory.
|
|
272
|
+
|
|
272
273
|
Returns:
|
|
273
274
|
Dict[str, Any]: The response from the server.
|
|
275
|
+
|
|
276
|
+
Example:
|
|
277
|
+
>>> client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
|
|
274
278
|
"""
|
|
275
279
|
if text is None and metadata is None:
|
|
276
280
|
raise ValueError("Either text or metadata must be provided for update.")
|
|
@@ -447,16 +451,13 @@ class MemoryClient:
|
|
|
447
451
|
"""Batch update memories.
|
|
448
452
|
|
|
449
453
|
Args:
|
|
450
|
-
memories: List of memory dictionaries to update. Each dictionary
|
|
451
|
-
must contain:
|
|
454
|
+
memories: List of memory dictionaries to update. Each dictionary must contain:
|
|
452
455
|
- memory_id (str): ID of the memory to update
|
|
453
|
-
- text (str): New text content for the memory
|
|
456
|
+
- text (str, optional): New text content for the memory
|
|
457
|
+
- metadata (dict, optional): New metadata for the memory
|
|
454
458
|
|
|
455
459
|
Returns:
|
|
456
|
-
str:
|
|
457
|
-
|
|
458
|
-
Raises:
|
|
459
|
-
APIError: If the API request fails.
|
|
460
|
+
Dict[str, Any]: The response from the server.
|
|
460
461
|
"""
|
|
461
462
|
response = self.client.put("/v1/batch/", json={"memories": memories})
|
|
462
463
|
response.raise_for_status()
|
|
@@ -1057,13 +1058,18 @@ class AsyncMemoryClient:
|
|
|
1057
1058
|
self, memory_id: str, text: Optional[str] = None, metadata: Optional[Dict[str, Any]] = None
|
|
1058
1059
|
) -> Dict[str, Any]:
|
|
1059
1060
|
"""
|
|
1060
|
-
Update a memory by ID.
|
|
1061
|
+
Update a memory by ID asynchronously.
|
|
1062
|
+
|
|
1061
1063
|
Args:
|
|
1062
1064
|
memory_id (str): Memory ID.
|
|
1063
|
-
text (str, optional):
|
|
1065
|
+
text (str, optional): New content to update the memory with.
|
|
1064
1066
|
metadata (dict, optional): Metadata to update in the memory.
|
|
1067
|
+
|
|
1065
1068
|
Returns:
|
|
1066
1069
|
Dict[str, Any]: The response from the server.
|
|
1070
|
+
|
|
1071
|
+
Example:
|
|
1072
|
+
>>> await client.update(memory_id="mem_123", text="Likes to play tennis on weekends")
|
|
1067
1073
|
"""
|
|
1068
1074
|
if text is None and metadata is None:
|
|
1069
1075
|
raise ValueError("Either text or metadata must be provided for update.")
|
|
@@ -1232,16 +1238,13 @@ class AsyncMemoryClient:
|
|
|
1232
1238
|
"""Batch update memories.
|
|
1233
1239
|
|
|
1234
1240
|
Args:
|
|
1235
|
-
memories: List of memory dictionaries to update. Each dictionary
|
|
1236
|
-
must contain:
|
|
1241
|
+
memories: List of memory dictionaries to update. Each dictionary must contain:
|
|
1237
1242
|
- memory_id (str): ID of the memory to update
|
|
1238
|
-
- text (str): New text content for the memory
|
|
1243
|
+
- text (str, optional): New text content for the memory
|
|
1244
|
+
- metadata (dict, optional): New metadata for the memory
|
|
1239
1245
|
|
|
1240
1246
|
Returns:
|
|
1241
|
-
str:
|
|
1242
|
-
|
|
1243
|
-
Raises:
|
|
1244
|
-
APIError: If the API request fails.
|
|
1247
|
+
Dict[str, Any]: The response from the server.
|
|
1245
1248
|
"""
|
|
1246
1249
|
response = await self.async_client.put("/v1/batch/", json={"memories": memories})
|
|
1247
1250
|
response.raise_for_status()
|
|
@@ -78,13 +78,27 @@ class AzureConfig(BaseModel):
|
|
|
78
78
|
default_headers (Dict[str, str]): Headers to include in requests to the Azure API.
|
|
79
79
|
"""
|
|
80
80
|
|
|
81
|
-
api_key: str = Field(
|
|
81
|
+
api_key: str | None = Field(
|
|
82
82
|
description="The API key used for authenticating with the Azure service.",
|
|
83
83
|
default=None,
|
|
84
84
|
)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
85
|
+
azure_ad_token: str | None = Field(
|
|
86
|
+
description="The Azure AD token used for authentication.",
|
|
87
|
+
default=None,
|
|
88
|
+
)
|
|
89
|
+
azure_deployment: str | None = Field(
|
|
90
|
+
description="The name of the Azure deployment.",
|
|
91
|
+
default=None,
|
|
92
|
+
)
|
|
93
|
+
azure_endpoint: str | None = Field(
|
|
94
|
+
description="The endpoint URL for the Azure service.",
|
|
95
|
+
default=None,
|
|
96
|
+
)
|
|
97
|
+
api_version: str | None = Field(
|
|
98
|
+
description="The version of the Azure API being used.",
|
|
99
|
+
default=None
|
|
100
|
+
)
|
|
88
101
|
default_headers: Optional[Dict[str, str]] = Field(
|
|
89
|
-
description="Headers to include in requests to the Azure API.",
|
|
102
|
+
description="Headers to include in requests to the Azure API.",
|
|
103
|
+
default=None
|
|
90
104
|
)
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AnthropicConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for Anthropic-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds Anthropic-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# Anthropic-specific parameters
|
|
25
|
+
anthropic_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize Anthropic configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: Anthropic model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: Anthropic API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
anthropic_base_url: Anthropic API base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# Anthropic-specific parameters
|
|
56
|
+
self.anthropic_base_url = anthropic_base_url
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
from typing import Optional, Dict, Any, List
|
|
2
|
+
from mem0.configs.llms.base import BaseLlmConfig
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class AWSBedrockConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for AWS Bedrock LLM integration.
|
|
9
|
+
|
|
10
|
+
Supports all available Bedrock models with automatic provider detection.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
max_tokens: int = 2000,
|
|
18
|
+
top_p: float = 0.9,
|
|
19
|
+
top_k: int = 1,
|
|
20
|
+
aws_access_key_id: Optional[str] = None,
|
|
21
|
+
aws_secret_access_key: Optional[str] = None,
|
|
22
|
+
aws_region: str = "us-west-2",
|
|
23
|
+
aws_session_token: Optional[str] = None,
|
|
24
|
+
aws_profile: Optional[str] = None,
|
|
25
|
+
model_kwargs: Optional[Dict[str, Any]] = None,
|
|
26
|
+
**kwargs,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize AWS Bedrock configuration.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: Bedrock model identifier (e.g., "amazon.nova-3-mini-20241119-v1:0")
|
|
33
|
+
temperature: Controls randomness (0.0 to 2.0)
|
|
34
|
+
max_tokens: Maximum tokens to generate
|
|
35
|
+
top_p: Nucleus sampling parameter (0.0 to 1.0)
|
|
36
|
+
top_k: Top-k sampling parameter (1 to 40)
|
|
37
|
+
aws_access_key_id: AWS access key (optional, uses env vars if not provided)
|
|
38
|
+
aws_secret_access_key: AWS secret key (optional, uses env vars if not provided)
|
|
39
|
+
aws_region: AWS region for Bedrock service
|
|
40
|
+
aws_session_token: AWS session token for temporary credentials
|
|
41
|
+
aws_profile: AWS profile name for credentials
|
|
42
|
+
model_kwargs: Additional model-specific parameters
|
|
43
|
+
**kwargs: Additional arguments passed to base class
|
|
44
|
+
"""
|
|
45
|
+
super().__init__(
|
|
46
|
+
model=model or "anthropic.claude-3-5-sonnet-20240620-v1:0",
|
|
47
|
+
temperature=temperature,
|
|
48
|
+
max_tokens=max_tokens,
|
|
49
|
+
top_p=top_p,
|
|
50
|
+
top_k=top_k,
|
|
51
|
+
**kwargs,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
self.aws_access_key_id = aws_access_key_id
|
|
55
|
+
self.aws_secret_access_key = aws_secret_access_key
|
|
56
|
+
self.aws_region = aws_region
|
|
57
|
+
self.aws_session_token = aws_session_token
|
|
58
|
+
self.aws_profile = aws_profile
|
|
59
|
+
self.model_kwargs = model_kwargs or {}
|
|
60
|
+
|
|
61
|
+
@property
|
|
62
|
+
def provider(self) -> str:
|
|
63
|
+
"""Get the provider from the model identifier."""
|
|
64
|
+
if not self.model or "." not in self.model:
|
|
65
|
+
return "unknown"
|
|
66
|
+
return self.model.split(".")[0]
|
|
67
|
+
|
|
68
|
+
@property
|
|
69
|
+
def model_name(self) -> str:
|
|
70
|
+
"""Get the model name without provider prefix."""
|
|
71
|
+
if not self.model or "." not in self.model:
|
|
72
|
+
return self.model
|
|
73
|
+
return ".".join(self.model.split(".")[1:])
|
|
74
|
+
|
|
75
|
+
def get_model_config(self) -> Dict[str, Any]:
|
|
76
|
+
"""Get model-specific configuration parameters."""
|
|
77
|
+
base_config = {
|
|
78
|
+
"temperature": self.temperature,
|
|
79
|
+
"max_tokens": self.max_tokens,
|
|
80
|
+
"top_p": self.top_p,
|
|
81
|
+
"top_k": self.top_k,
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
# Add custom model kwargs
|
|
85
|
+
base_config.update(self.model_kwargs)
|
|
86
|
+
|
|
87
|
+
return base_config
|
|
88
|
+
|
|
89
|
+
def get_aws_config(self) -> Dict[str, Any]:
|
|
90
|
+
"""Get AWS configuration parameters."""
|
|
91
|
+
config = {
|
|
92
|
+
"region_name": self.aws_region,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
if self.aws_access_key_id:
|
|
96
|
+
config["aws_access_key_id"] = self.aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID")
|
|
97
|
+
|
|
98
|
+
if self.aws_secret_access_key:
|
|
99
|
+
config["aws_secret_access_key"] = self.aws_secret_access_key or os.getenv("AWS_SECRET_ACCESS_KEY")
|
|
100
|
+
|
|
101
|
+
if self.aws_session_token:
|
|
102
|
+
config["aws_session_token"] = self.aws_session_token or os.getenv("AWS_SESSION_TOKEN")
|
|
103
|
+
|
|
104
|
+
if self.aws_profile:
|
|
105
|
+
config["profile_name"] = self.aws_profile or os.getenv("AWS_PROFILE")
|
|
106
|
+
|
|
107
|
+
return config
|
|
108
|
+
|
|
109
|
+
def validate_model_format(self) -> bool:
|
|
110
|
+
"""
|
|
111
|
+
Validate that the model identifier follows Bedrock naming convention.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
True if valid, False otherwise
|
|
115
|
+
"""
|
|
116
|
+
if not self.model:
|
|
117
|
+
return False
|
|
118
|
+
|
|
119
|
+
# Check if model follows provider.model-name format
|
|
120
|
+
if "." not in self.model:
|
|
121
|
+
return False
|
|
122
|
+
|
|
123
|
+
provider, model_name = self.model.split(".", 1)
|
|
124
|
+
|
|
125
|
+
# Validate provider
|
|
126
|
+
valid_providers = [
|
|
127
|
+
"ai21", "amazon", "anthropic", "cohere", "meta", "mistral",
|
|
128
|
+
"stability", "writer", "deepseek", "gpt-oss", "perplexity",
|
|
129
|
+
"snowflake", "titan", "command", "j2", "llama"
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
if provider not in valid_providers:
|
|
133
|
+
return False
|
|
134
|
+
|
|
135
|
+
# Validate model name is not empty
|
|
136
|
+
if not model_name:
|
|
137
|
+
return False
|
|
138
|
+
|
|
139
|
+
return True
|
|
140
|
+
|
|
141
|
+
def get_supported_regions(self) -> List[str]:
|
|
142
|
+
"""Get list of AWS regions that support Bedrock."""
|
|
143
|
+
return [
|
|
144
|
+
"us-east-1",
|
|
145
|
+
"us-west-2",
|
|
146
|
+
"us-east-2",
|
|
147
|
+
"eu-west-1",
|
|
148
|
+
"ap-southeast-1",
|
|
149
|
+
"ap-northeast-1",
|
|
150
|
+
]
|
|
151
|
+
|
|
152
|
+
def get_model_capabilities(self) -> Dict[str, Any]:
|
|
153
|
+
"""Get model capabilities based on provider."""
|
|
154
|
+
capabilities = {
|
|
155
|
+
"supports_tools": False,
|
|
156
|
+
"supports_vision": False,
|
|
157
|
+
"supports_streaming": False,
|
|
158
|
+
"supports_multimodal": False,
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
if self.provider == "anthropic":
|
|
162
|
+
capabilities.update({
|
|
163
|
+
"supports_tools": True,
|
|
164
|
+
"supports_vision": True,
|
|
165
|
+
"supports_streaming": True,
|
|
166
|
+
"supports_multimodal": True,
|
|
167
|
+
})
|
|
168
|
+
elif self.provider == "amazon":
|
|
169
|
+
capabilities.update({
|
|
170
|
+
"supports_tools": True,
|
|
171
|
+
"supports_vision": True,
|
|
172
|
+
"supports_streaming": True,
|
|
173
|
+
"supports_multimodal": True,
|
|
174
|
+
})
|
|
175
|
+
elif self.provider == "cohere":
|
|
176
|
+
capabilities.update({
|
|
177
|
+
"supports_tools": True,
|
|
178
|
+
"supports_streaming": True,
|
|
179
|
+
})
|
|
180
|
+
elif self.provider == "meta":
|
|
181
|
+
capabilities.update({
|
|
182
|
+
"supports_vision": True,
|
|
183
|
+
"supports_streaming": True,
|
|
184
|
+
})
|
|
185
|
+
elif self.provider == "mistral":
|
|
186
|
+
capabilities.update({
|
|
187
|
+
"supports_vision": True,
|
|
188
|
+
"supports_streaming": True,
|
|
189
|
+
})
|
|
190
|
+
|
|
191
|
+
return capabilities
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
|
+
|
|
3
|
+
from mem0.configs.base import AzureConfig
|
|
4
|
+
from mem0.configs.llms.base import BaseLlmConfig
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class AzureOpenAIConfig(BaseLlmConfig):
|
|
8
|
+
"""
|
|
9
|
+
Configuration class for Azure OpenAI-specific parameters.
|
|
10
|
+
Inherits from BaseLlmConfig and adds Azure OpenAI-specific settings.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(
|
|
14
|
+
self,
|
|
15
|
+
# Base parameters
|
|
16
|
+
model: Optional[str] = None,
|
|
17
|
+
temperature: float = 0.1,
|
|
18
|
+
api_key: Optional[str] = None,
|
|
19
|
+
max_tokens: int = 2000,
|
|
20
|
+
top_p: float = 0.1,
|
|
21
|
+
top_k: int = 1,
|
|
22
|
+
enable_vision: bool = False,
|
|
23
|
+
vision_details: Optional[str] = "auto",
|
|
24
|
+
http_client_proxies: Optional[dict] = None,
|
|
25
|
+
# Azure OpenAI-specific parameters
|
|
26
|
+
azure_kwargs: Optional[Dict[str, Any]] = None,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize Azure OpenAI configuration.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: Azure OpenAI model to use, defaults to None
|
|
33
|
+
temperature: Controls randomness, defaults to 0.1
|
|
34
|
+
api_key: Azure OpenAI API key, defaults to None
|
|
35
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
36
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
37
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
38
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
39
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
40
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
41
|
+
azure_kwargs: Azure-specific configuration, defaults to None
|
|
42
|
+
"""
|
|
43
|
+
# Initialize base parameters
|
|
44
|
+
super().__init__(
|
|
45
|
+
model=model,
|
|
46
|
+
temperature=temperature,
|
|
47
|
+
api_key=api_key,
|
|
48
|
+
max_tokens=max_tokens,
|
|
49
|
+
top_p=top_p,
|
|
50
|
+
top_k=top_k,
|
|
51
|
+
enable_vision=enable_vision,
|
|
52
|
+
vision_details=vision_details,
|
|
53
|
+
http_client_proxies=http_client_proxies,
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Azure OpenAI-specific parameters
|
|
57
|
+
self.azure_kwargs = AzureConfig(**(azure_kwargs or {}))
|
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
from abc import ABC
|
|
2
|
+
from typing import Dict, Optional, Union
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class BaseLlmConfig(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Base configuration for LLMs with only common parameters.
|
|
10
|
+
Provider-specific configurations should be handled by separate config classes.
|
|
11
|
+
|
|
12
|
+
This class contains only the parameters that are common across all LLM providers.
|
|
13
|
+
For provider-specific parameters, use the appropriate provider config class.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def __init__(
|
|
17
|
+
self,
|
|
18
|
+
model: Optional[Union[str, Dict]] = None,
|
|
19
|
+
temperature: float = 0.1,
|
|
20
|
+
api_key: Optional[str] = None,
|
|
21
|
+
max_tokens: int = 2000,
|
|
22
|
+
top_p: float = 0.1,
|
|
23
|
+
top_k: int = 1,
|
|
24
|
+
enable_vision: bool = False,
|
|
25
|
+
vision_details: Optional[str] = "auto",
|
|
26
|
+
http_client_proxies: Optional[Union[Dict, str]] = None,
|
|
27
|
+
):
|
|
28
|
+
"""
|
|
29
|
+
Initialize a base configuration class instance for the LLM.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
model: The model identifier to use (e.g., "gpt-4o-mini", "claude-3-5-sonnet-20240620")
|
|
33
|
+
Defaults to None (will be set by provider-specific configs)
|
|
34
|
+
temperature: Controls the randomness of the model's output.
|
|
35
|
+
Higher values (closer to 1) make output more random, lower values make it more deterministic.
|
|
36
|
+
Range: 0.0 to 2.0. Defaults to 0.1
|
|
37
|
+
api_key: API key for the LLM provider. If None, will try to get from environment variables.
|
|
38
|
+
Defaults to None
|
|
39
|
+
max_tokens: Maximum number of tokens to generate in the response.
|
|
40
|
+
Range: 1 to 4096 (varies by model). Defaults to 2000
|
|
41
|
+
top_p: Nucleus sampling parameter. Controls diversity via nucleus sampling.
|
|
42
|
+
Higher values (closer to 1) make word selection more diverse.
|
|
43
|
+
Range: 0.0 to 1.0. Defaults to 0.1
|
|
44
|
+
top_k: Top-k sampling parameter. Limits the number of tokens considered for each step.
|
|
45
|
+
Higher values make word selection more diverse.
|
|
46
|
+
Range: 1 to 40. Defaults to 1
|
|
47
|
+
enable_vision: Whether to enable vision capabilities for the model.
|
|
48
|
+
Only applicable to vision-enabled models. Defaults to False
|
|
49
|
+
vision_details: Level of detail for vision processing.
|
|
50
|
+
Options: "low", "high", "auto". Defaults to "auto"
|
|
51
|
+
http_client_proxies: Proxy settings for HTTP client.
|
|
52
|
+
Can be a dict or string. Defaults to None
|
|
53
|
+
"""
|
|
54
|
+
self.model = model
|
|
55
|
+
self.temperature = temperature
|
|
56
|
+
self.api_key = api_key
|
|
57
|
+
self.max_tokens = max_tokens
|
|
58
|
+
self.top_p = top_p
|
|
59
|
+
self.top_k = top_k
|
|
60
|
+
self.enable_vision = enable_vision
|
|
61
|
+
self.vision_details = vision_details
|
|
62
|
+
self.http_client = httpx.Client(proxies=http_client_proxies) if http_client_proxies else None
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from mem0.configs.llms.base import BaseLlmConfig
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class DeepSeekConfig(BaseLlmConfig):
|
|
7
|
+
"""
|
|
8
|
+
Configuration class for DeepSeek-specific parameters.
|
|
9
|
+
Inherits from BaseLlmConfig and adds DeepSeek-specific settings.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
def __init__(
|
|
13
|
+
self,
|
|
14
|
+
# Base parameters
|
|
15
|
+
model: Optional[str] = None,
|
|
16
|
+
temperature: float = 0.1,
|
|
17
|
+
api_key: Optional[str] = None,
|
|
18
|
+
max_tokens: int = 2000,
|
|
19
|
+
top_p: float = 0.1,
|
|
20
|
+
top_k: int = 1,
|
|
21
|
+
enable_vision: bool = False,
|
|
22
|
+
vision_details: Optional[str] = "auto",
|
|
23
|
+
http_client_proxies: Optional[dict] = None,
|
|
24
|
+
# DeepSeek-specific parameters
|
|
25
|
+
deepseek_base_url: Optional[str] = None,
|
|
26
|
+
):
|
|
27
|
+
"""
|
|
28
|
+
Initialize DeepSeek configuration.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model: DeepSeek model to use, defaults to None
|
|
32
|
+
temperature: Controls randomness, defaults to 0.1
|
|
33
|
+
api_key: DeepSeek API key, defaults to None
|
|
34
|
+
max_tokens: Maximum tokens to generate, defaults to 2000
|
|
35
|
+
top_p: Nucleus sampling parameter, defaults to 0.1
|
|
36
|
+
top_k: Top-k sampling parameter, defaults to 1
|
|
37
|
+
enable_vision: Enable vision capabilities, defaults to False
|
|
38
|
+
vision_details: Vision detail level, defaults to "auto"
|
|
39
|
+
http_client_proxies: HTTP client proxy settings, defaults to None
|
|
40
|
+
deepseek_base_url: DeepSeek API base URL, defaults to None
|
|
41
|
+
"""
|
|
42
|
+
# Initialize base parameters
|
|
43
|
+
super().__init__(
|
|
44
|
+
model=model,
|
|
45
|
+
temperature=temperature,
|
|
46
|
+
api_key=api_key,
|
|
47
|
+
max_tokens=max_tokens,
|
|
48
|
+
top_p=top_p,
|
|
49
|
+
top_k=top_k,
|
|
50
|
+
enable_vision=enable_vision,
|
|
51
|
+
vision_details=vision_details,
|
|
52
|
+
http_client_proxies=http_client_proxies,
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
# DeepSeek-specific parameters
|
|
56
|
+
self.deepseek_base_url = deepseek_base_url
|