better-mem0-mcp 1.1.0b8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,5 @@
1
+ """better-mem0-mcp: Zero-setup MCP Server for AI memory."""
2
+
3
+ from .server import main
4
+
5
+ __all__ = ["main"]
@@ -0,0 +1,176 @@
1
+ """
2
+ better-mem0-mcp: Zero-setup MCP Server for AI memory.
3
+
4
+ Configuration via environment variables:
5
+ - DATABASE_URL: PostgreSQL connection string
6
+ - API_KEYS: Provider:key pairs (e.g., "gemini:AIza...,openai:sk-xxx")
7
+ - LLM_MODELS: Model fallback chain (e.g., "gemini/gemini-3-flash-preview,openai/gpt-4o-mini")
8
+ - EMBEDDER_MODELS: Embedding fallback chain
9
+ """
10
+
11
+ import os
12
+ from functools import lru_cache
13
+ from urllib.parse import parse_qs, urlparse
14
+
15
+ from pydantic import ConfigDict
16
+ from pydantic_settings import BaseSettings
17
+
18
+
19
+ class Settings(BaseSettings):
20
+ """Configuration with multi-key and fallback support."""
21
+
22
+ model_config = ConfigDict(extra="ignore")
23
+
24
+ # Database (Required)
25
+ database_url: str = ""
26
+
27
+ # API Keys: "provider:key,provider:key,..."
28
+ api_keys: str = ""
29
+
30
+ # Models with fallback: "provider/model,provider/model,..."
31
+ # Note: For >2000 dims, HNSW index is disabled (uses exact search)
32
+ llm_models: str = "gemini/gemini-3-flash-preview"
33
+ embedder_models: str = "gemini/gemini-embedding-001"
34
+
35
+ def setup_api_keys(self) -> dict[str, list[str]]:
36
+ """
37
+ Parse API_KEYS and set environment variables for LiteLLM.
38
+
39
+ Returns:
40
+ Dict mapping provider to list of API keys.
41
+ """
42
+ env_map = {
43
+ "gemini": "GOOGLE_API_KEY",
44
+ "openai": "OPENAI_API_KEY",
45
+ "anthropic": "ANTHROPIC_API_KEY",
46
+ "groq": "GROQ_API_KEY",
47
+ "deepseek": "DEEPSEEK_API_KEY",
48
+ "mistral": "MISTRAL_API_KEY",
49
+ }
50
+
51
+ keys_by_provider: dict[str, list[str]] = {}
52
+
53
+ for pair in self.api_keys.split(","):
54
+ pair = pair.strip()
55
+ if ":" not in pair:
56
+ continue
57
+
58
+ provider, key = pair.split(":", 1)
59
+ provider = provider.strip().lower()
60
+ key = key.strip()
61
+
62
+ if not key:
63
+ continue
64
+
65
+ keys_by_provider.setdefault(provider, []).append(key)
66
+
67
+ # Set first key of each provider as env var (LiteLLM reads from env)
68
+ for provider, keys in keys_by_provider.items():
69
+ if provider in env_map and keys:
70
+ os.environ[env_map[provider]] = keys[0]
71
+
72
+ return keys_by_provider
73
+
74
+ def parse_database_url(self) -> dict:
75
+ """Parse DATABASE_URL into connection parameters."""
76
+ if not self.database_url:
77
+ return {}
78
+
79
+ parsed = urlparse(self.database_url)
80
+ query = parse_qs(parsed.query)
81
+
82
+ return {
83
+ "dbname": parsed.path[1:] if parsed.path else "mem0",
84
+ "user": parsed.username or "postgres",
85
+ "password": parsed.password or "",
86
+ "host": parsed.hostname or "localhost",
87
+ "port": parsed.port or 5432,
88
+ "sslmode": query.get("sslmode", ["prefer"])[0],
89
+ }
90
+
91
+ def get_llm_config(self) -> dict:
92
+ """Build Mem0 LLM configuration with fallback."""
93
+ models = [m.strip() for m in self.llm_models.split(",") if m.strip()]
94
+
95
+ if not models:
96
+ models = ["gemini/gemini-3-flash-preview"]
97
+
98
+ primary = models[0]
99
+ fallbacks = models[1:] if len(models) > 1 else None
100
+
101
+ # Gemini 3 models require temperature=1.0 to avoid infinite loops
102
+ temperature = 1.0 if "gemini-3" in primary else 0.1
103
+
104
+ config: dict = {
105
+ "provider": "litellm",
106
+ "config": {
107
+ "model": primary,
108
+ "temperature": temperature,
109
+ },
110
+ }
111
+
112
+ if fallbacks:
113
+ config["config"]["fallbacks"] = fallbacks
114
+ config["config"]["num_retries"] = 2
115
+
116
+ return config
117
+
118
+ def get_embedder_config(self) -> dict:
119
+ """Build Mem0 embedder configuration."""
120
+ models = [m.strip() for m in self.embedder_models.split(",") if m.strip()]
121
+
122
+ primary = models[0]
123
+
124
+ # Parse provider/model
125
+ if "/" in primary:
126
+ parts = primary.split("/", 1)
127
+ provider = parts[0]
128
+ model = parts[1]
129
+ else:
130
+ provider = "openai"
131
+ model = primary
132
+
133
+ # Fixed 1536 dims for all models:
134
+ # - Compatible with pgvector HNSW index (limit: 2000)
135
+ # - Good balance between quality and storage
136
+ # - Matryoshka models (gemini-embedding-001, text-embedding-3-*) support any dimension
137
+ embedding_dims = 1536
138
+
139
+ return {
140
+ "provider": provider,
141
+ "config": {
142
+ "model": model,
143
+ "embedding_dims": embedding_dims,
144
+ },
145
+ }
146
+
147
+ def get_mem0_config(self) -> dict:
148
+ """Build complete Mem0 configuration."""
149
+ embedder_config = self.get_embedder_config()
150
+
151
+ # Get embedding dimensions from embedder config
152
+ embedding_dims = embedder_config.get("config", {}).get("embedding_dims", 1536)
153
+
154
+ # Disable HNSW for dims > 2000 (pgvector HNSW limit)
155
+ # Uses exact search instead of ANN - slower but works with any dimensions
156
+ use_hnsw = embedding_dims <= 2000
157
+
158
+ return {
159
+ "vector_store": {
160
+ "provider": "pgvector",
161
+ "config": {
162
+ "collection_name": "mem0_memories",
163
+ "connection_string": self.database_url,
164
+ "embedding_model_dims": embedding_dims,
165
+ "hnsw": use_hnsw,
166
+ },
167
+ },
168
+ "llm": self.get_llm_config(),
169
+ "embedder": embedder_config,
170
+ }
171
+
172
+
173
+ @lru_cache
174
+ def get_settings() -> Settings:
175
+ """Get cached settings instance."""
176
+ return Settings()
@@ -0,0 +1,47 @@
1
+ # memory - Full Documentation
2
+
3
+ ## Overview
4
+ Memory operations for AI agents: add, search, list, delete.
5
+
6
+ Uses Mem0 for vector memory (pgvector) and SQL-based graph storage.
7
+
8
+ ## Actions
9
+
10
+ ### add
11
+ Save information to long-term memory. Mem0 automatically:
12
+ - Extracts key facts
13
+ - Creates embeddings for semantic search
14
+ - Deduplicates similar memories
15
+
16
+ ```json
17
+ {"action": "add", "content": "User prefers dark mode and uses FastAPI"}
18
+ ```
19
+
20
+ ### search
21
+ Semantic search across stored memories. Combines vector search with graph context.
22
+
23
+ ```json
24
+ {"action": "search", "query": "coding preferences", "limit": 5}
25
+ ```
26
+
27
+ ### list
28
+ Get all stored memories for a user.
29
+
30
+ ```json
31
+ {"action": "list"}
32
+ ```
33
+
34
+ ### delete
35
+ Remove a memory by ID.
36
+
37
+ ```json
38
+ {"action": "delete", "memory_id": "abc12345-..."}
39
+ ```
40
+
41
+ ## Parameters
42
+ - `action` - Required: add, search, list, delete
43
+ - `content` - Required for add: information to remember
44
+ - `query` - Required for search: what to search for
45
+ - `memory_id` - Required for delete: ID of memory to remove
46
+ - `limit` - Optional for search: max results (default: 5)
47
+ - `user_id` - Optional: scope memories to a specific user
@@ -0,0 +1,172 @@
1
+ """SQL-based Graph Storage - works with any PostgreSQL, no extensions needed."""
2
+
3
+ from typing import Any
4
+
5
+ import psycopg
6
+ from loguru import logger
7
+
8
+
9
+ class SQLGraphStore:
10
+ """Graph storage using plain SQL tables (nodes + edges)."""
11
+
12
+ INIT_SQL = """
13
+ CREATE TABLE IF NOT EXISTS graph_nodes (
14
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
15
+ label VARCHAR(50) NOT NULL,
16
+ name VARCHAR(255),
17
+ properties JSONB DEFAULT '{}',
18
+ user_id VARCHAR(100) NOT NULL,
19
+ created_at TIMESTAMPTZ DEFAULT NOW()
20
+ );
21
+
22
+ CREATE TABLE IF NOT EXISTS graph_edges (
23
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
24
+ from_node_id UUID REFERENCES graph_nodes(id) ON DELETE CASCADE,
25
+ to_node_id UUID REFERENCES graph_nodes(id) ON DELETE CASCADE,
26
+ relationship VARCHAR(100) NOT NULL,
27
+ properties JSONB DEFAULT '{}',
28
+ created_at TIMESTAMPTZ DEFAULT NOW()
29
+ );
30
+
31
+ CREATE INDEX IF NOT EXISTS idx_graph_nodes_user ON graph_nodes(user_id);
32
+ CREATE INDEX IF NOT EXISTS idx_graph_nodes_name ON graph_nodes(name);
33
+ CREATE INDEX IF NOT EXISTS idx_graph_nodes_label ON graph_nodes(label);
34
+ CREATE INDEX IF NOT EXISTS idx_graph_edges_from ON graph_edges(from_node_id);
35
+ CREATE INDEX IF NOT EXISTS idx_graph_edges_to ON graph_edges(to_node_id);
36
+ CREATE INDEX IF NOT EXISTS idx_graph_edges_rel ON graph_edges(relationship);
37
+ """
38
+
39
+ def __init__(self, conn_params: dict[str, Any]):
40
+ """Initialize graph store with database connection parameters."""
41
+ self.conn_params = {
42
+ k: v
43
+ for k, v in conn_params.items()
44
+ if k in ("host", "port", "dbname", "user", "password")
45
+ }
46
+ # Add sslmode if present
47
+ if "sslmode" in conn_params:
48
+ self.conn_params["sslmode"] = conn_params["sslmode"]
49
+ self._init_tables()
50
+
51
+ def _get_connection(self):
52
+ """Get a new database connection."""
53
+ return psycopg.connect(**self.conn_params)
54
+
55
+ def _init_tables(self):
56
+ """Initialize graph tables if not exist."""
57
+ try:
58
+ with self._get_connection() as conn:
59
+ conn.execute(self.INIT_SQL)
60
+ conn.commit()
61
+ logger.info("Graph tables initialized")
62
+ except Exception as e:
63
+ logger.warning(f"Graph init failed (may already exist): {e}")
64
+
65
+ def add_node(
66
+ self,
67
+ label: str,
68
+ name: str,
69
+ user_id: str,
70
+ properties: dict | None = None,
71
+ ) -> str | None:
72
+ """Add or get existing node. Returns node ID."""
73
+ try:
74
+ with self._get_connection() as conn:
75
+ # Check if exists
76
+ result = conn.execute(
77
+ "SELECT id FROM graph_nodes WHERE label = %s AND name = %s AND user_id = %s",
78
+ (label, name, user_id),
79
+ ).fetchone()
80
+
81
+ if result:
82
+ return str(result[0])
83
+
84
+ # Create new
85
+ from psycopg.types.json import Json
86
+
87
+ result = conn.execute(
88
+ """INSERT INTO graph_nodes (label, name, properties, user_id)
89
+ VALUES (%s, %s, %s, %s) RETURNING id""",
90
+ (label, name, Json(properties or {}), user_id),
91
+ ).fetchone()
92
+ conn.commit()
93
+ return str(result[0]) if result else None
94
+ except Exception as e:
95
+ logger.error(f"Failed to add node: {e}")
96
+ return None
97
+
98
+ def add_edge(
99
+ self,
100
+ from_id: str,
101
+ to_id: str,
102
+ relationship: str,
103
+ properties: dict | None = None,
104
+ ) -> bool:
105
+ """Add edge between nodes."""
106
+ try:
107
+ with self._get_connection() as conn:
108
+ from psycopg.types.json import Json
109
+
110
+ conn.execute(
111
+ """INSERT INTO graph_edges (from_node_id, to_node_id, relationship, properties)
112
+ VALUES (%s, %s, %s, %s)
113
+ ON CONFLICT DO NOTHING""",
114
+ (from_id, to_id, relationship, Json(properties or {})),
115
+ )
116
+ conn.commit()
117
+ return True
118
+ except Exception as e:
119
+ logger.error(f"Failed to add edge: {e}")
120
+ return False
121
+
122
+ def find_related(self, name: str, user_id: str, limit: int = 10) -> list[dict]:
123
+ """Find nodes related to a name."""
124
+ try:
125
+ with self._get_connection() as conn:
126
+ results = conn.execute(
127
+ """
128
+ SELECT DISTINCT n2.label, n2.name, e.relationship
129
+ FROM graph_nodes n1
130
+ JOIN graph_edges e ON n1.id = e.from_node_id OR n1.id = e.to_node_id
131
+ JOIN graph_nodes n2 ON (e.to_node_id = n2.id OR e.from_node_id = n2.id)
132
+ AND n2.id != n1.id
133
+ WHERE n1.name ILIKE %s AND n1.user_id = %s
134
+ LIMIT %s
135
+ """,
136
+ (f"%{name}%", user_id, limit),
137
+ ).fetchall()
138
+
139
+ return [
140
+ {"label": r[0], "name": r[1], "relationship": r[2]} for r in results
141
+ ]
142
+ except Exception as e:
143
+ logger.error(f"Failed to find related: {e}")
144
+ return []
145
+
146
+ def get_context(self, query: str, user_id: str) -> str:
147
+ """Get graph context for a query (find related entities)."""
148
+ words = [w for w in query.split() if len(w) > 3]
149
+ related: list[dict] = []
150
+
151
+ for word in words[:5]: # Limit to first 5 significant words
152
+ related.extend(self.find_related(word, user_id, limit=3))
153
+
154
+ if not related:
155
+ return ""
156
+
157
+ # Deduplicate
158
+ seen = set()
159
+ unique = []
160
+ for r in related:
161
+ key = (r["name"], r["relationship"])
162
+ if key not in seen:
163
+ seen.add(key)
164
+ unique.append(r)
165
+
166
+ if not unique:
167
+ return ""
168
+
169
+ lines = [
170
+ f"- {r['name']} ({r['label']}) - {r['relationship']}" for r in unique[:5]
171
+ ]
172
+ return "Related context:\n" + "\n".join(lines)
File without changes
@@ -0,0 +1,177 @@
1
+ """
2
+ better-mem0-mcp MCP Server.
3
+
4
+ Tiered Description Pattern:
5
+ - Tier 1: Compressed descriptions in tool definitions (~150 tokens)
6
+ - Tier 2: Full docs via `help` tool (on-demand, ~500 tokens)
7
+ - Tier 3: MCP Resources for supported clients
8
+ """
9
+
10
+ from pathlib import Path
11
+
12
+ from loguru import logger
13
+ from mcp.server.fastmcp import FastMCP
14
+
15
+ from .config import get_settings
16
+
17
+ # Initialize MCP Server
18
+ mcp = FastMCP("better-mem0-mcp")
19
+
20
+ # Lazy-initialized globals
21
+ _memory = None
22
+ _graph = None
23
+ _settings = None
24
+
25
+ DEFAULT_USER = "default"
26
+
27
+
28
+ def _init():
29
+ """Lazy initialization of memory and graph stores."""
30
+ global _memory, _graph, _settings
31
+
32
+ if _settings is not None:
33
+ return
34
+
35
+ _settings = get_settings()
36
+
37
+ # Validate required settings
38
+ if not _settings.database_url:
39
+ raise ValueError("DATABASE_URL is required")
40
+ if not _settings.api_keys:
41
+ raise ValueError("API_KEYS is required")
42
+
43
+ # Setup API keys (sets env vars for LiteLLM)
44
+ keys_by_provider = _settings.setup_api_keys()
45
+ logger.info(f"API keys configured for: {list(keys_by_provider.keys())}")
46
+
47
+ # Initialize Mem0 Memory (Vector Store)
48
+ from mem0 import Memory
49
+
50
+ mem0_config = _settings.get_mem0_config()
51
+ _memory = Memory.from_config(mem0_config)
52
+ logger.info(f"Vector Memory initialized: {_settings.llm_models.split(',')[0]}")
53
+
54
+ # Initialize Graph Store (SQL-based)
55
+ try:
56
+ from .graph import SQLGraphStore
57
+
58
+ _graph = SQLGraphStore(_settings.parse_database_url())
59
+ logger.info("Graph Memory initialized (SQL)")
60
+ except Exception as e:
61
+ logger.warning(f"Graph Memory disabled: {e}")
62
+
63
+
64
+ def _load_doc(name: str) -> str:
65
+ """Load documentation file from docs/ directory."""
66
+ docs_dir = Path(__file__).parent / "docs"
67
+ doc_file = docs_dir / f"{name}.md"
68
+ if doc_file.exists():
69
+ return doc_file.read_text()
70
+ return f"Documentation not found: {name}"
71
+
72
+
73
+ # =============================================================================
74
+ # Tool: memory
75
+ # Tier 1: Compressed description
76
+ # =============================================================================
77
+ @mcp.tool()
78
+ async def memory(
79
+ action: str,
80
+ content: str | None = None,
81
+ query: str | None = None,
82
+ memory_id: str | None = None,
83
+ user_id: str | None = None,
84
+ limit: int = 5,
85
+ ) -> str:
86
+ """
87
+ Memory operations: add, search, list, delete.
88
+ - add: Save information (requires content)
89
+ - search: Find memories (requires query)
90
+ - list: Get all memories
91
+ - delete: Remove by ID (requires memory_id)
92
+ Use `help` tool for full documentation.
93
+ """
94
+ _init()
95
+
96
+ user_id = user_id or DEFAULT_USER
97
+
98
+ try:
99
+ if action == "add":
100
+ if not content:
101
+ return "Error: 'content' required for add action"
102
+
103
+ result = _memory.add(content, user_id=user_id)
104
+ logger.info(f"Added memory for {user_id}: {content[:50]}...")
105
+ return f"Saved: {result}"
106
+
107
+ elif action == "search":
108
+ if not query:
109
+ return "Error: 'query' required for search action"
110
+
111
+ # Vector search
112
+ results = _memory.search(query, user_id=user_id, limit=limit)
113
+
114
+ # Add graph context
115
+ graph_context = ""
116
+ if _graph:
117
+ graph_context = _graph.get_context(query, user_id)
118
+
119
+ if not results and not graph_context:
120
+ return "No memories found."
121
+
122
+ output = ""
123
+ if results:
124
+ output = "Memories:\n" + "\n".join(
125
+ [f"- {r.get('memory', str(r))}" for r in results]
126
+ )
127
+ if graph_context:
128
+ output += f"\n\n{graph_context}"
129
+
130
+ return output
131
+
132
+ elif action == "list":
133
+ results = _memory.get_all(user_id=user_id)
134
+
135
+ if not results:
136
+ return "No memories stored."
137
+
138
+ lines = [f"- [{r['id'][:8]}] {r['memory']}" for r in results]
139
+ return f"Memories ({len(results)}):\n" + "\n".join(lines)
140
+
141
+ elif action == "delete":
142
+ if not memory_id:
143
+ return "Error: 'memory_id' required for delete action"
144
+
145
+ _memory.delete(memory_id)
146
+ logger.info(f"Deleted memory: {memory_id}")
147
+ return f"Deleted: {memory_id}"
148
+
149
+ else:
150
+ return f"Unknown action: {action}. Use: add, search, list, delete"
151
+
152
+ except Exception as e:
153
+ logger.error(f"Memory operation failed: {e}")
154
+ return f"Error: {e}"
155
+
156
+
157
+ # =============================================================================
158
+ # Tool: help
159
+ # Tier 2: Full documentation on-demand
160
+ # =============================================================================
161
+ @mcp.tool()
162
+ async def help(tool_name: str = "memory") -> str:
163
+ """
164
+ Get full documentation for a tool.
165
+ Use when compressed descriptions are insufficient.
166
+ """
167
+ return _load_doc(tool_name)
168
+
169
+
170
+ def main():
171
+ """Entry point for MCP server."""
172
+ logger.info("Starting better-mem0-mcp server (stdio)")
173
+ mcp.run(transport="stdio")
174
+
175
+
176
+ if __name__ == "__main__":
177
+ main()
@@ -0,0 +1,149 @@
1
+ Metadata-Version: 2.4
2
+ Name: better-mem0-mcp
3
+ Version: 1.1.0b8
4
+ Summary: Zero-setup MCP Server for AI memory - works with Neon/Supabase
5
+ Project-URL: Homepage, https://github.com/n24q02m/better-mem0-mcp
6
+ Project-URL: Repository, https://github.com/n24q02m/better-mem0-mcp.git
7
+ Project-URL: Issues, https://github.com/n24q02m/better-mem0-mcp/issues
8
+ Author-email: n24q02m <quangminh2422004@gmail.com>
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Keywords: ai-agent,llm,mcp,mem0,memory,pgvector
12
+ Classifier: Development Status :: 4 - Beta
13
+ Classifier: Environment :: Console
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: MIT License
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.13
19
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
20
+ Requires-Python: >=3.13
21
+ Requires-Dist: google-genai>=1.0.0
22
+ Requires-Dist: litellm>=1.0.0
23
+ Requires-Dist: loguru>=0.7.0
24
+ Requires-Dist: mcp[cli]>=1.0.0
25
+ Requires-Dist: mem0ai>=0.1.0
26
+ Requires-Dist: psycopg[binary,pool]>=3.1.0
27
+ Requires-Dist: pydantic-settings>=2.0.0
28
+ Description-Content-Type: text/markdown
29
+
30
+ # better-mem0-mcp
31
+
32
+ **Zero-setup** MCP Server for AI memory. Works with Neon/Supabase free tier.
33
+
34
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
35
+
36
+ ## Quick Start
37
+
38
+ ### 1. Get Prerequisites
39
+
40
+ - **Database**: [Neon](https://neon.tech) or [Supabase](https://supabase.com) (free tier)
41
+ - **API Key**: [Google AI Studio](https://aistudio.google.com/apikey) (free tier)
42
+
43
+ ### 2. Add to mcp.json
44
+
45
+ #### uvx (Recommended)
46
+
47
+ ```json
48
+ {
49
+ "mcpServers": {
50
+ "memory": {
51
+ "command": "uvx",
52
+ "args": ["better-mem0-mcp"],
53
+ "env": {
54
+ "DATABASE_URL": "postgresql://user:pass@xxx.neon.tech/neondb?sslmode=require",
55
+ "API_KEYS": "gemini:AIza..."
56
+ }
57
+ }
58
+ }
59
+ }
60
+ ```
61
+
62
+ #### Docker
63
+
64
+ ```json
65
+ {
66
+ "mcpServers": {
67
+ "memory": {
68
+ "command": "docker",
69
+ "args": ["run", "-i", "--rm", "-e", "DATABASE_URL", "-e", "API_KEYS", "n24q02m/better-mem0-mcp:latest"],
70
+ "env": {
71
+ "DATABASE_URL": "postgresql://...",
72
+ "API_KEYS": "gemini:AIza..."
73
+ }
74
+ }
75
+ }
76
+ }
77
+ ```
78
+
79
+ ### 3. Done!
80
+
81
+ Ask Claude: "Remember that I prefer dark mode and use FastAPI"
82
+
83
+ ---
84
+
85
+ ## Configuration
86
+
87
+ | Variable | Required | Description |
88
+ |----------|----------|-------------|
89
+ | `DATABASE_URL` | Yes | PostgreSQL connection string |
90
+ | `API_KEYS` | Yes | `provider:key,...` (multi-key per provider OK) |
91
+ | `LLM_MODELS` | No | `provider/model,...` (fallback chain) |
92
+ | `EMBEDDER_MODELS` | No | `provider/model,...` (fallback chain) |
93
+
94
+ ### Examples
95
+
96
+ **Minimal (Gemini only):**
97
+ ```
98
+ API_KEYS=gemini:AIza...
99
+ ```
100
+
101
+ **Multi-key with fallback:**
102
+ ```
103
+ API_KEYS=gemini:AIza-1,gemini:AIza-2,openai:sk-xxx
104
+ LLM_MODELS=gemini/gemini-2.5-flash,openai/gpt-4o-mini
105
+ EMBEDDER_MODELS=gemini/gemini-embedding-001,openai/text-embedding-3-small
106
+ ```
107
+
108
+ ### Defaults
109
+
110
+ | Setting | Default |
111
+ |---------|---------|
112
+ | `LLM_MODELS` | `gemini/gemini-2.5-flash` |
113
+ | `EMBEDDER_MODELS` | `gemini/gemini-embedding-001` |
114
+
115
+ ---
116
+
117
+ ## Tools
118
+
119
+ | Tool | Description |
120
+ |------|-------------|
121
+ | `memory` | `action`: add, search, list, delete |
122
+ | `help` | Detailed documentation |
123
+
124
+ ### Usage
125
+
126
+ ```json
127
+ {"action": "add", "content": "I prefer TypeScript over JavaScript"}
128
+ {"action": "search", "query": "preferences"}
129
+ {"action": "list"}
130
+ {"action": "delete", "memory_id": "abc123"}
131
+ ```
132
+
133
+ ---
134
+
135
+ ## Why better-mem0-mcp?
136
+
137
+ | Feature | Official mem0-mcp | better-mem0-mcp |
138
+ |---------|-------------------|-----------------|
139
+ | Storage | Mem0 Cloud | **Self-hosted PostgreSQL** |
140
+ | Graph Memory | No | **Yes (SQL-based)** |
141
+ | LLM Provider | OpenAI only | **Any (Gemini/OpenAI/Ollama/...)** |
142
+ | Fallback | No | **Yes (multi-key + multi-model)** |
143
+ | Setup | API Key | **DATABASE_URL + API_KEYS** |
144
+
145
+ ---
146
+
147
+ ## License
148
+
149
+ MIT
@@ -0,0 +1,11 @@
1
+ better_mem0_mcp/__init__.py,sha256=fcqgbz2HvMCPidqqoPvtRky5pGIHP2w9oVim7UQkuBc,106
2
+ better_mem0_mcp/config.py,sha256=vgKLIw3jHyeBLTzzBnuHV5x6Nra0Rbav1IJRL8rLCuk,5576
3
+ better_mem0_mcp/graph.py,sha256=rE9z6XECiAktEqDNgmwqCpFpvKSn3azO9H4sRBhj8UU,6195
4
+ better_mem0_mcp/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ better_mem0_mcp/server.py,sha256=O6s0L1FggB9FDfX4OKF6govrRznwUIAh9RkFRCrYX3Y,5124
6
+ better_mem0_mcp/docs/memory.md,sha256=jqoxBYHVh2N9TZbt77qY5zhL1vSU-Ro-yuaczT1r7Mo,1126
7
+ better_mem0_mcp-1.1.0b8.dist-info/METADATA,sha256=5YzOORgrypbkhJIhVf7sQpcs3ceqGz4QTNMn6Ix8ahM,3873
8
+ better_mem0_mcp-1.1.0b8.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
9
+ better_mem0_mcp-1.1.0b8.dist-info/entry_points.txt,sha256=2b7E3D6yo94mQXP2Ms0bhUlWkK9f664f0GrstImOq30,57
10
+ better_mem0_mcp-1.1.0b8.dist-info/licenses/LICENSE,sha256=d7xQ6sRyeGus6gnvwgqiQtSY7XdFw0Jd0w5-Co_xHnk,1064
11
+ better_mem0_mcp-1.1.0b8.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.28.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ better-mem0-mcp = better_mem0_mcp:main
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 n24q02m
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.