mem0-open-mcp 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,19 @@
1
+ # mem0-server environment variables
2
+
3
+ # OpenAI (default provider)
4
+ OPENAI_API_KEY=your_openai_api_key_here
5
+
6
+ # Anthropic (optional)
7
+ # ANTHROPIC_API_KEY=your_anthropic_api_key_here
8
+
9
+ # Ollama (for local LLM)
10
+ # No API key needed, just set base_url in config
11
+
12
+ # Vector Store (cloud options)
13
+ # QDRANT_API_KEY=your_qdrant_cloud_api_key
14
+ # PINECONE_API_KEY=your_pinecone_api_key
15
+
16
+ # Server settings (can also be set in config file)
17
+ # MEM0_SERVER_HOST=0.0.0.0
18
+ # MEM0_SERVER_PORT=8765
19
+ # MEM0_SERVER_USER_ID=default
@@ -0,0 +1,62 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+ .Python
7
+ build/
8
+ develop-eggs/
9
+ dist/
10
+ downloads/
11
+ eggs/
12
+ .eggs/
13
+ lib/
14
+ lib64/
15
+ parts/
16
+ sdist/
17
+ var/
18
+ wheels/
19
+ *.egg-info/
20
+ .installed.cfg
21
+ *.egg
22
+
23
+ # Virtual environments
24
+ .venv/
25
+ venv/
26
+ ENV/
27
+ env/
28
+
29
+ # IDE
30
+ .idea/
31
+ .vscode/
32
+ *.swp
33
+ *.swo
34
+ *~
35
+
36
+ # Testing
37
+ .tox/
38
+ .nox/
39
+ .coverage
40
+ .coverage.*
41
+ htmlcov/
42
+ .pytest_cache/
43
+ .mypy_cache/
44
+
45
+ # Ruff
46
+ .ruff_cache/
47
+
48
+ # Environment
49
+ .env
50
+ .env.local
51
+
52
+ # Config files with secrets
53
+ mem0-server.yaml
54
+ !mem0-server.example.yaml
55
+
56
+ # Docker
57
+ .docker/
58
+
59
+ # OS
60
+ .DS_Store
61
+ Thumbs.db
62
+ mem0-open-mcp.yaml
@@ -0,0 +1,29 @@
1
+ FROM python:3.11-slim
2
+
3
+ WORKDIR /app
4
+
5
+ # Install system dependencies
6
+ RUN apt-get update && apt-get install -y --no-install-recommends \
7
+ curl \
8
+ && rm -rf /var/lib/apt/lists/*
9
+
10
+ # Copy package files
11
+ COPY pyproject.toml README.md ./
12
+ COPY src/ ./src/
13
+
14
+ # Install package
15
+ RUN pip install --no-cache-dir -e .
16
+
17
+ # Copy example config
18
+ COPY mem0-server.example.yaml /app/mem0-server.yaml
19
+
20
+ # Expose port
21
+ EXPOSE 8765
22
+
23
+ # Health check
24
+ HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
25
+ CMD curl -f http://localhost:8765/health || exit 1
26
+
27
+ # Run server
28
+ ENTRYPOINT ["mem0-server"]
29
+ CMD ["serve", "--config", "/app/mem0-server.yaml"]
@@ -0,0 +1,187 @@
1
+ Metadata-Version: 2.4
2
+ Name: mem0-open-mcp
3
+ Version: 0.1.0
4
+ Summary: Open-source MCP server for mem0 - local LLMs, self-hosted, Docker-free
5
+ Author: Alex
6
+ License-Expression: Apache-2.0
7
+ Keywords: ai,llm,local,mcp,mem0,memory,ollama,server
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Intended Audience :: Developers
10
+ Classifier: License :: OSI Approved :: Apache Software License
11
+ Classifier: Programming Language :: Python :: 3
12
+ Classifier: Programming Language :: Python :: 3.10
13
+ Classifier: Programming Language :: Python :: 3.11
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Requires-Python: >=3.10
16
+ Requires-Dist: fastapi>=0.115.0
17
+ Requires-Dist: httpx>=0.27.0
18
+ Requires-Dist: mcp>=1.0.0
19
+ Requires-Dist: mem0ai>=1.0.0
20
+ Requires-Dist: pydantic-settings>=2.0.0
21
+ Requires-Dist: pydantic>=2.0.0
22
+ Requires-Dist: python-dotenv>=1.0.0
23
+ Requires-Dist: pyyaml>=6.0.0
24
+ Requires-Dist: rich>=13.0.0
25
+ Requires-Dist: typer[all]>=0.12.0
26
+ Requires-Dist: uvicorn[standard]>=0.32.0
27
+ Provides-Extra: chroma
28
+ Requires-Dist: chromadb>=0.5.0; extra == 'chroma'
29
+ Provides-Extra: dev
30
+ Requires-Dist: mypy>=1.13.0; extra == 'dev'
31
+ Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
32
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
33
+ Requires-Dist: ruff>=0.8.0; extra == 'dev'
34
+ Provides-Extra: ollama
35
+ Requires-Dist: ollama>=0.4.0; extra == 'ollama'
36
+ Provides-Extra: pinecone
37
+ Requires-Dist: pinecone>=5.0.0; extra == 'pinecone'
38
+ Provides-Extra: qdrant
39
+ Requires-Dist: qdrant-client>=1.12.0; extra == 'qdrant'
40
+ Description-Content-Type: text/markdown
41
+
42
+ # mem0-open-mcp
43
+
44
+ Open-source MCP server for [mem0](https://mem0.ai) — **local LLMs, self-hosted, Docker-free**.
45
+
46
+ Created because the official `mem0-mcp` configuration wasn't working properly for my setup.
47
+
48
+ ## Features
49
+
50
+ - **Local LLMs**: Ollama (recommended), LMStudio*, or any OpenAI-compatible API
51
+ - **Self-hosted**: Your data stays on your infrastructure
52
+ - **Docker-free**: Simple `pip install` + CLI
53
+ - **Flexible**: YAML config with environment variable support
54
+ - **Multiple Vector Stores**: Qdrant, Chroma, Pinecone, and more
55
+
56
+ > *LMStudio requires JSON mode compatible models
57
+
58
+ ## Quick Start
59
+
60
+ ### Installation
61
+
62
+ Install from source:
63
+
64
+ ```bash
65
+ git clone https://github.com/yourname/mem0-open-mcp.git
66
+ cd mem0-open-mcp
67
+ pip install -e .
68
+ ```
69
+
70
+ ### Usage
71
+
72
+ ```bash
73
+ # Create default config
74
+ mem0-open-mcp init
75
+
76
+ # Interactive configuration wizard
77
+ mem0-open-mcp configure
78
+
79
+ # Start the server
80
+ mem0-open-mcp serve
81
+
82
+ # With options
83
+ mem0-open-mcp serve --port 8765 --user-id alice
84
+ ```
85
+
86
+ ## Configuration
87
+
88
+ Create `mem0-open-mcp.yaml`:
89
+
90
+ ```yaml
91
+ server:
92
+ host: "0.0.0.0"
93
+ port: 8765
94
+ user_id: "default"
95
+
96
+ llm:
97
+ provider: "ollama"
98
+ config:
99
+ model: "llama3.2"
100
+ base_url: "http://localhost:11434"
101
+
102
+ embedder:
103
+ provider: "ollama"
104
+ config:
105
+ model: "nomic-embed-text"
106
+ base_url: "http://localhost:11434"
107
+ embedding_dims: 768
108
+
109
+ vector_store:
110
+ provider: "qdrant"
111
+ config:
112
+ collection_name: "mem0_memories"
113
+ host: "localhost"
114
+ port: 6333
115
+ embedding_model_dims: 768
116
+ ```
117
+
118
+ ### With LMStudio
119
+
120
+ > **⚠️ Note**: LMStudio requires a model that supports `response_format: json_object`.
121
+ > mem0 uses structured JSON output for memory extraction. If you get `response_format` errors,
122
+ > use Ollama instead or select a model with JSON mode support in LMStudio.
123
+
124
+ ```yaml
125
+ llm:
126
+ provider: "openai"
127
+ config:
128
+ model: "your-model-name"
129
+ base_url: "http://localhost:1234/v1"
130
+
131
+ embedder:
132
+ provider: "openai"
133
+ config:
134
+ model: "your-embedding-model"
135
+ base_url: "http://localhost:1234/v1"
136
+ ```
137
+
138
+ ## MCP Integration
139
+
140
+ Connect your MCP client to:
141
+
142
+ ```
143
+ http://localhost:8765/mcp/<client-name>/sse/<user-id>
144
+ ```
145
+
146
+ ### Claude Desktop
147
+
148
+ ```json
149
+ {
150
+ "mcpServers": {
151
+ "mem0": {
152
+ "url": "http://localhost:8765/mcp/claude/sse/default"
153
+ }
154
+ }
155
+ }
156
+ ```
157
+
158
+ ## Available MCP Tools
159
+
160
+ | Tool | Description |
161
+ |------|-------------|
162
+ | `add_memories` | Store new memories from text |
163
+ | `search_memory` | Search memories by query |
164
+ | `list_memories` | List all user memories |
165
+ | `get_memory` | Get a specific memory by ID |
166
+ | `delete_memories` | Delete memories by IDs |
167
+ | `delete_all_memories` | Delete all user memories |
168
+
169
+ ## API Endpoints
170
+
171
+ | Endpoint | Method | Description |
172
+ |----------|--------|-------------|
173
+ | `/health` | GET | Health check |
174
+ | `/api/v1/status` | GET | Server status |
175
+ | `/api/v1/config` | GET/PUT | Configuration |
176
+ | `/api/v1/memories` | GET/POST/DELETE | Memory operations |
177
+ | `/api/v1/memories/search` | POST | Search memories |
178
+
179
+ ## Requirements
180
+
181
+ - Python 3.10+
182
+ - Vector store (Qdrant recommended)
183
+ - LLM server (Ollama, LMStudio, etc.)
184
+
185
+ ## License
186
+
187
+ Apache 2.0
@@ -0,0 +1,146 @@
1
+ # mem0-open-mcp
2
+
3
+ Open-source MCP server for [mem0](https://mem0.ai) — **local LLMs, self-hosted, Docker-free**.
4
+
5
+ Created because the official `mem0-mcp` configuration wasn't working properly for my setup.
6
+
7
+ ## Features
8
+
9
+ - **Local LLMs**: Ollama (recommended), LMStudio*, or any OpenAI-compatible API
10
+ - **Self-hosted**: Your data stays on your infrastructure
11
+ - **Docker-free**: Simple `pip install` + CLI
12
+ - **Flexible**: YAML config with environment variable support
13
+ - **Multiple Vector Stores**: Qdrant, Chroma, Pinecone, and more
14
+
15
+ > *LMStudio requires JSON mode compatible models
16
+
17
+ ## Quick Start
18
+
19
+ ### Installation
20
+
21
+ Install from source:
22
+
23
+ ```bash
24
+ git clone https://github.com/yourname/mem0-open-mcp.git
25
+ cd mem0-open-mcp
26
+ pip install -e .
27
+ ```
28
+
29
+ ### Usage
30
+
31
+ ```bash
32
+ # Create default config
33
+ mem0-open-mcp init
34
+
35
+ # Interactive configuration wizard
36
+ mem0-open-mcp configure
37
+
38
+ # Start the server
39
+ mem0-open-mcp serve
40
+
41
+ # With options
42
+ mem0-open-mcp serve --port 8765 --user-id alice
43
+ ```
44
+
45
+ ## Configuration
46
+
47
+ Create `mem0-open-mcp.yaml`:
48
+
49
+ ```yaml
50
+ server:
51
+ host: "0.0.0.0"
52
+ port: 8765
53
+ user_id: "default"
54
+
55
+ llm:
56
+ provider: "ollama"
57
+ config:
58
+ model: "llama3.2"
59
+ base_url: "http://localhost:11434"
60
+
61
+ embedder:
62
+ provider: "ollama"
63
+ config:
64
+ model: "nomic-embed-text"
65
+ base_url: "http://localhost:11434"
66
+ embedding_dims: 768
67
+
68
+ vector_store:
69
+ provider: "qdrant"
70
+ config:
71
+ collection_name: "mem0_memories"
72
+ host: "localhost"
73
+ port: 6333
74
+ embedding_model_dims: 768
75
+ ```
76
+
77
+ ### With LMStudio
78
+
79
+ > **⚠️ Note**: LMStudio requires a model that supports `response_format: json_object`.
80
+ > mem0 uses structured JSON output for memory extraction. If you get `response_format` errors,
81
+ > use Ollama instead or select a model with JSON mode support in LMStudio.
82
+
83
+ ```yaml
84
+ llm:
85
+ provider: "openai"
86
+ config:
87
+ model: "your-model-name"
88
+ base_url: "http://localhost:1234/v1"
89
+
90
+ embedder:
91
+ provider: "openai"
92
+ config:
93
+ model: "your-embedding-model"
94
+ base_url: "http://localhost:1234/v1"
95
+ ```
96
+
97
+ ## MCP Integration
98
+
99
+ Connect your MCP client to:
100
+
101
+ ```
102
+ http://localhost:8765/mcp/<client-name>/sse/<user-id>
103
+ ```
104
+
105
+ ### Claude Desktop
106
+
107
+ ```json
108
+ {
109
+ "mcpServers": {
110
+ "mem0": {
111
+ "url": "http://localhost:8765/mcp/claude/sse/default"
112
+ }
113
+ }
114
+ }
115
+ ```
116
+
117
+ ## Available MCP Tools
118
+
119
+ | Tool | Description |
120
+ |------|-------------|
121
+ | `add_memories` | Store new memories from text |
122
+ | `search_memory` | Search memories by query |
123
+ | `list_memories` | List all user memories |
124
+ | `get_memory` | Get a specific memory by ID |
125
+ | `delete_memories` | Delete memories by IDs |
126
+ | `delete_all_memories` | Delete all user memories |
127
+
128
+ ## API Endpoints
129
+
130
+ | Endpoint | Method | Description |
131
+ |----------|--------|-------------|
132
+ | `/health` | GET | Health check |
133
+ | `/api/v1/status` | GET | Server status |
134
+ | `/api/v1/config` | GET/PUT | Configuration |
135
+ | `/api/v1/memories` | GET/POST/DELETE | Memory operations |
136
+ | `/api/v1/memories/search` | POST | Search memories |
137
+
138
+ ## Requirements
139
+
140
+ - Python 3.10+
141
+ - Vector store (Qdrant recommended)
142
+ - LLM server (Ollama, LMStudio, etc.)
143
+
144
+ ## License
145
+
146
+ Apache 2.0
@@ -0,0 +1,49 @@
1
+ version: "3.8"
2
+
3
+ services:
4
+ # Vector database
5
+ qdrant:
6
+ image: qdrant/qdrant:latest
7
+ ports:
8
+ - "6333:6333"
9
+ - "6334:6334"
10
+ volumes:
11
+ - qdrant_data:/qdrant/storage
12
+ restart: unless-stopped
13
+
14
+ # mem0-server
15
+ mem0-server:
16
+ build:
17
+ context: .
18
+ dockerfile: Dockerfile
19
+ ports:
20
+ - "8765:8765"
21
+ environment:
22
+ - OPENAI_API_KEY=${OPENAI_API_KEY}
23
+ # For Ollama, uncomment the following:
24
+ # - OLLAMA_HOST=http://host.docker.internal:11434
25
+ volumes:
26
+ - ./mem0-server.yaml:/app/mem0-server.yaml:ro
27
+ depends_on:
28
+ - qdrant
29
+ restart: unless-stopped
30
+ healthcheck:
31
+ test: ["CMD", "curl", "-f", "http://localhost:8765/health"]
32
+ interval: 30s
33
+ timeout: 10s
34
+ retries: 3
35
+ start_period: 10s
36
+
37
+ # Optional: Ollama for local LLM
38
+ # Uncomment to use Ollama
39
+ # ollama:
40
+ # image: ollama/ollama:latest
41
+ # ports:
42
+ # - "11434:11434"
43
+ # volumes:
44
+ # - ollama_data:/root/.ollama
45
+ # restart: unless-stopped
46
+
47
+ volumes:
48
+ qdrant_data:
49
+ # ollama_data:
@@ -0,0 +1,32 @@
1
+ # Ollama configuration example for mem0-server
2
+ # Use this for fully local LLM and embedding
3
+
4
+ server:
5
+ host: "0.0.0.0"
6
+ port: 8765
7
+ user_id: "default"
8
+ log_level: "info"
9
+
10
+ llm:
11
+ provider: "ollama"
12
+ config:
13
+ model: "llama3.2" # or mistral, gemma2, etc.
14
+ temperature: 0.1
15
+ max_tokens: 2000
16
+ base_url: "http://localhost:11434"
17
+
18
+ embedder:
19
+ provider: "ollama"
20
+ config:
21
+ model: "nomic-embed-text" # or mxbai-embed-large
22
+ base_url: "http://localhost:11434"
23
+
24
+ vector_store:
25
+ provider: "qdrant"
26
+ config:
27
+ collection_name: "mem0_memories"
28
+ host: "localhost"
29
+ port: 6333
30
+
31
+ openmemory:
32
+ custom_instructions: null
@@ -0,0 +1,54 @@
1
+ # Default configuration file for mem0-server
2
+ # See https://docs.mem0.ai for provider options
3
+
4
+ server:
5
+ host: "0.0.0.0"
6
+ port: 8765
7
+ user_id: "default"
8
+ log_level: "info"
9
+ reload: false
10
+
11
+ # LLM Configuration
12
+ # Providers: openai, anthropic, azure_openai, ollama, together, groq,
13
+ # litellm, mistralai, google_ai, aws_bedrock, gemini, deepseek, xai, lmstudio
14
+ llm:
15
+ provider: "openai"
16
+ config:
17
+ model: "gpt-4o-mini"
18
+ temperature: 0.1
19
+ max_tokens: 2000
20
+ # Use env:VAR_NAME to read from environment variable
21
+ api_key: "env:OPENAI_API_KEY"
22
+ # For Ollama, set base_url instead:
23
+ # base_url: "http://localhost:11434"
24
+
25
+ # Embedder Configuration
26
+ # Providers: openai, azure_openai, ollama, huggingface, vertexai, gemini,
27
+ # lmstudio, together, aws_bedrock
28
+ embedder:
29
+ provider: "openai"
30
+ config:
31
+ model: "text-embedding-3-small"
32
+ api_key: "env:OPENAI_API_KEY"
33
+ # For Ollama:
34
+ # base_url: "http://localhost:11434"
35
+
36
+ # Vector Store Configuration
37
+ # Providers: qdrant, chroma, pinecone, milvus, weaviate, pgvector,
38
+ # faiss, redis, azure_ai_search, vertex_ai_vector_search, mongodb_atlas, memory
39
+ vector_store:
40
+ provider: "qdrant"
41
+ config:
42
+ collection_name: "mem0_memories"
43
+ host: "localhost"
44
+ port: 6333
45
+ # For cloud services:
46
+ # api_key: "env:QDRANT_API_KEY"
47
+ # url: "https://your-cluster.qdrant.io"
48
+
49
+ # OpenMemory Settings
50
+ openmemory:
51
+ # Custom instructions for memory extraction (optional)
52
+ custom_instructions: null
53
+ # Custom category definitions (optional)
54
+ custom_categories: null
@@ -0,0 +1,73 @@
1
+ [project]
2
+ name = "mem0-open-mcp"
3
+ version = "0.1.0"
4
+ description = "Open-source MCP server for mem0 - local LLMs, self-hosted, Docker-free"
5
+ readme = "README.md"
6
+ license = "Apache-2.0"
7
+ requires-python = ">=3.10"
8
+ authors = [
9
+ { name = "Alex" }
10
+ ]
11
+ keywords = ["mem0", "mcp", "memory", "llm", "ai", "server", "ollama", "local"]
12
+ classifiers = [
13
+ "Development Status :: 3 - Alpha",
14
+ "Intended Audience :: Developers",
15
+ "License :: OSI Approved :: Apache Software License",
16
+ "Programming Language :: Python :: 3",
17
+ "Programming Language :: Python :: 3.10",
18
+ "Programming Language :: Python :: 3.11",
19
+ "Programming Language :: Python :: 3.12",
20
+ ]
21
+
22
+ dependencies = [
23
+ "mem0ai>=1.0.0",
24
+ "typer[all]>=0.12.0",
25
+ "fastapi>=0.115.0",
26
+ "uvicorn[standard]>=0.32.0",
27
+ "pydantic>=2.0.0",
28
+ "pydantic-settings>=2.0.0",
29
+ "pyyaml>=6.0.0",
30
+ "python-dotenv>=1.0.0",
31
+ "rich>=13.0.0",
32
+ "mcp>=1.0.0",
33
+ "httpx>=0.27.0",
34
+ ]
35
+
36
+ [project.optional-dependencies]
37
+ dev = [
38
+ "pytest>=8.0.0",
39
+ "pytest-asyncio>=0.24.0",
40
+ "ruff>=0.8.0",
41
+ "mypy>=1.13.0",
42
+ ]
43
+ qdrant = ["qdrant-client>=1.12.0"]
44
+ chroma = ["chromadb>=0.5.0"]
45
+ pinecone = ["pinecone>=5.0.0"]
46
+ ollama = ["ollama>=0.4.0"]
47
+
48
+ [project.scripts]
49
+ mem0-open-mcp = "mem0_server.cli:app"
50
+
51
+ [build-system]
52
+ requires = ["hatchling"]
53
+ build-backend = "hatchling.build"
54
+
55
+ [tool.hatch.build.targets.wheel]
56
+ packages = ["src/mem0_server"]
57
+
58
+ [tool.ruff]
59
+ line-length = 100
60
+ target-version = "py310"
61
+
62
+ [tool.ruff.lint]
63
+ select = ["E", "F", "I", "UP", "B", "C4"]
64
+ ignore = ["E501"]
65
+
66
+ [tool.mypy]
67
+ python_version = "3.10"
68
+ strict = true
69
+ ignore_missing_imports = true
70
+
71
+ [tool.pytest.ini_options]
72
+ asyncio_mode = "auto"
73
+ testpaths = ["tests"]
@@ -0,0 +1,8 @@
1
+ """
2
+ mem0-server: Standalone MCP server for mem0 with web configuration UI.
3
+
4
+ This package provides a CLI tool to run mem0 as an MCP server without Docker,
5
+ with optional web UI for configuration management.
6
+ """
7
+
8
+ __version__ = "0.1.0"
@@ -0,0 +1,5 @@
1
+ """API package for mem0-server."""
2
+
3
+ from mem0_server.api.routes import create_api_router
4
+
5
+ __all__ = ["create_api_router"]