memgraph-agent 0.2.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memgraph_agent-0.2.0/.env.example +36 -0
- memgraph_agent-0.2.0/.gitignore +18 -0
- memgraph_agent-0.2.0/LICENSE +21 -0
- memgraph_agent-0.2.0/PKG-INFO +525 -0
- memgraph_agent-0.2.0/README.md +465 -0
- memgraph_agent-0.2.0/adapters/__init__.py +38 -0
- memgraph_agent-0.2.0/adapters/base.py +69 -0
- memgraph_agent-0.2.0/adapters/generated/atlas_v2_adapter.js +199 -0
- memgraph_agent-0.2.0/adapters/generator.py +250 -0
- memgraph_agent-0.2.0/adapters/generic.py +49 -0
- memgraph_agent-0.2.0/adapters/langchain.py +264 -0
- memgraph_agent-0.2.0/adapters/openai_agents.py +364 -0
- memgraph_agent-0.2.0/adapters/templates/atlas_node.js +218 -0
- memgraph_agent-0.2.0/adapters/templates/generic_node.js +180 -0
- memgraph_agent-0.2.0/adapters/templates/generic_python.py +200 -0
- memgraph_agent-0.2.0/adapters/templates/mcp_server.py +161 -0
- memgraph_agent-0.2.0/adapters/templates/openclaw_skill.ts +192 -0
- memgraph_agent-0.2.0/benchmarks/__init__.py +1 -0
- memgraph_agent-0.2.0/benchmarks/quality_eval.py +313 -0
- memgraph_agent-0.2.0/benchmarks/retrieval_speed.py +253 -0
- memgraph_agent-0.2.0/benchmarks/token_comparison.py +337 -0
- memgraph_agent-0.2.0/bridge/__init__.py +8 -0
- memgraph_agent-0.2.0/bridge/embedded_bridge.py +222 -0
- memgraph_agent-0.2.0/bridge/http_bridge.py +193 -0
- memgraph_agent-0.2.0/bridge/ipc_bridge.py +227 -0
- memgraph_agent-0.2.0/cli/__init__.py +1 -0
- memgraph_agent-0.2.0/cli/main.py +596 -0
- memgraph_agent-0.2.0/config.yaml +40 -0
- memgraph_agent-0.2.0/docker-compose.yml +40 -0
- memgraph_agent-0.2.0/examples/atlas_integration.py +168 -0
- memgraph_agent-0.2.0/examples/basic_usage.py +44 -0
- memgraph_agent-0.2.0/examples/claude_code_mcp.py +67 -0
- memgraph_agent-0.2.0/examples/openclaw_adapter.py +187 -0
- memgraph_agent-0.2.0/memgraph/__init__.py +16 -0
- memgraph_agent-0.2.0/memgraph/cli.py +82 -0
- memgraph_agent-0.2.0/memgraph/config.py +253 -0
- memgraph_agent-0.2.0/memgraph/converters/__init__.py +23 -0
- memgraph_agent-0.2.0/memgraph/converters/atlas.py +245 -0
- memgraph_agent-0.2.0/memgraph/converters/generic.py +278 -0
- memgraph_agent-0.2.0/memgraph/converters/openclaw.py +116 -0
- memgraph_agent-0.2.0/memgraph/core.py +222 -0
- memgraph_agent-0.2.0/memgraph/embeddings.py +201 -0
- memgraph_agent-0.2.0/memgraph/formatter.py +157 -0
- memgraph_agent-0.2.0/memgraph/graph_client.py +460 -0
- memgraph_agent-0.2.0/memgraph/ingestion.py +156 -0
- memgraph_agent-0.2.0/memgraph/migration.py +267 -0
- memgraph_agent-0.2.0/memgraph/onboarding.py +534 -0
- memgraph_agent-0.2.0/memgraph/providers.py +336 -0
- memgraph_agent-0.2.0/memgraph/retrieval.py +267 -0
- memgraph_agent-0.2.0/memgraph/setup_check.py +443 -0
- memgraph_agent-0.2.0/memgraph/token_budget.py +142 -0
- memgraph_agent-0.2.0/pyproject.toml +125 -0
- memgraph_agent-0.2.0/scanner/__init__.py +21 -0
- memgraph_agent-0.2.0/scanner/cli.py +200 -0
- memgraph_agent-0.2.0/scanner/detect.py +207 -0
- memgraph_agent-0.2.0/scanner/memory_map.py +283 -0
- memgraph_agent-0.2.0/scanner/patterns.py +194 -0
- memgraph_agent-0.2.0/scanner/report.py +241 -0
- memgraph_agent-0.2.0/server/__init__.py +1 -0
- memgraph_agent-0.2.0/server/app.py +88 -0
- memgraph_agent-0.2.0/server/mcp/__init__.py +0 -0
- memgraph_agent-0.2.0/server/mcp/handler.py +213 -0
- memgraph_agent-0.2.0/server/routes/__init__.py +0 -0
- memgraph_agent-0.2.0/server/routes/health.py +23 -0
- memgraph_agent-0.2.0/server/routes/memory.py +103 -0
- memgraph_agent-0.2.0/tests/__init__.py +0 -0
- memgraph_agent-0.2.0/tests/test_config.py +39 -0
- memgraph_agent-0.2.0/tests/test_formatter.py +61 -0
- memgraph_agent-0.2.0/tests/test_token_budget.py +56 -0
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
# --- Required for Graphiti entity extraction ---
|
|
2
|
+
OPENAI_API_KEY=sk-your-key-here
|
|
3
|
+
|
|
4
|
+
# --- FalkorDB connection ---
|
|
5
|
+
FALKORDB_HOST=localhost
|
|
6
|
+
FALKORDB_PORT=6379
|
|
7
|
+
FALKORDB_DATABASE=memgraph
|
|
8
|
+
FALKORDB_USERNAME=
|
|
9
|
+
FALKORDB_PASSWORD=
|
|
10
|
+
|
|
11
|
+
# --- Embedding provider ---
|
|
12
|
+
# "ollama" (local, free) or "openai" (cloud, paid)
|
|
13
|
+
EMBEDDING_PROVIDER=ollama
|
|
14
|
+
EMBEDDING_MODEL=nomic-embed-text
|
|
15
|
+
EMBEDDING_DIM=768
|
|
16
|
+
OLLAMA_BASE_URL=http://localhost:11434
|
|
17
|
+
|
|
18
|
+
# --- LLM for Graphiti entity extraction ---
|
|
19
|
+
# "openai" or "anthropic"
|
|
20
|
+
LLM_PROVIDER=openai
|
|
21
|
+
LLM_MODEL=gpt-4o-mini
|
|
22
|
+
|
|
23
|
+
# --- MemGraph behavior ---
|
|
24
|
+
DEFAULT_TOKEN_BUDGET=2000
|
|
25
|
+
MEMGRAPH_LOG_LEVEL=INFO
|
|
26
|
+
MEMGRAPH_API_PORT=8100
|
|
27
|
+
MEMGRAPH_API_HOST=0.0.0.0
|
|
28
|
+
|
|
29
|
+
# --- Scoring weights (must sum to 1.0) ---
|
|
30
|
+
SCORE_WEIGHT_SEMANTIC=0.5
|
|
31
|
+
SCORE_WEIGHT_RECENCY=0.3
|
|
32
|
+
SCORE_WEIGHT_CENTRALITY=0.2
|
|
33
|
+
RECENCY_DECAY_DAYS=30
|
|
34
|
+
|
|
35
|
+
# --- Telemetry ---
|
|
36
|
+
GRAPHITI_TELEMETRY_ENABLED=false
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Luke (TechItLuke)
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,525 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: memgraph-agent
|
|
3
|
+
Version: 0.2.0
|
|
4
|
+
Summary: Universal plug-and-play memory engine for AI agents. Built on Graphiti's temporal knowledge graph with token-budget optimization.
|
|
5
|
+
Project-URL: Homepage, https://github.com/SP3DK1D/Memomatic
|
|
6
|
+
Project-URL: Repository, https://github.com/SP3DK1D/Memomatic
|
|
7
|
+
Project-URL: Issues, https://github.com/SP3DK1D/Memomatic/issues
|
|
8
|
+
Project-URL: Documentation, https://github.com/SP3DK1D/Memomatic#readme
|
|
9
|
+
Project-URL: Changelog, https://github.com/SP3DK1D/Memomatic/releases
|
|
10
|
+
Author-email: "Luke (TechItLuke)" <techitluke@gmail.com>
|
|
11
|
+
License: MIT
|
|
12
|
+
License-File: LICENSE
|
|
13
|
+
Keywords: agents,ai,graphiti,knowledge-graph,llm,mcp,memory,rag,token-budget
|
|
14
|
+
Classifier: Development Status :: 4 - Beta
|
|
15
|
+
Classifier: Intended Audience :: Developers
|
|
16
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
17
|
+
Classifier: Operating System :: OS Independent
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
21
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
22
|
+
Classifier: Typing :: Typed
|
|
23
|
+
Requires-Python: >=3.11
|
|
24
|
+
Requires-Dist: faiss-cpu>=1.8.0
|
|
25
|
+
Requires-Dist: fastapi>=0.115.0
|
|
26
|
+
Requires-Dist: graphiti-core[falkordb]>=0.3.0
|
|
27
|
+
Requires-Dist: httpx>=0.27.0
|
|
28
|
+
Requires-Dist: numpy>=1.26.0
|
|
29
|
+
Requires-Dist: pydantic-settings>=2.4.0
|
|
30
|
+
Requires-Dist: pydantic>=2.8.0
|
|
31
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
32
|
+
Requires-Dist: pyyaml>=6.0
|
|
33
|
+
Requires-Dist: structlog>=24.4.0
|
|
34
|
+
Requires-Dist: tiktoken>=0.7.0
|
|
35
|
+
Requires-Dist: uvicorn[standard]>=0.30.0
|
|
36
|
+
Provides-Extra: all
|
|
37
|
+
Requires-Dist: anthropic>=0.34.0; extra == 'all'
|
|
38
|
+
Requires-Dist: langchain>=0.2.0; extra == 'all'
|
|
39
|
+
Requires-Dist: openai>=1.40.0; extra == 'all'
|
|
40
|
+
Requires-Dist: sentence-transformers>=3.0.0; extra == 'all'
|
|
41
|
+
Provides-Extra: anthropic
|
|
42
|
+
Requires-Dist: anthropic>=0.34.0; extra == 'anthropic'
|
|
43
|
+
Provides-Extra: dev
|
|
44
|
+
Requires-Dist: build>=1.2.0; extra == 'dev'
|
|
45
|
+
Requires-Dist: mypy>=1.11.0; extra == 'dev'
|
|
46
|
+
Requires-Dist: pytest-asyncio>=0.24.0; extra == 'dev'
|
|
47
|
+
Requires-Dist: pytest-cov>=5.0.0; extra == 'dev'
|
|
48
|
+
Requires-Dist: pytest>=8.3.0; extra == 'dev'
|
|
49
|
+
Requires-Dist: ruff>=0.6.0; extra == 'dev'
|
|
50
|
+
Requires-Dist: twine>=5.0.0; extra == 'dev'
|
|
51
|
+
Provides-Extra: langchain
|
|
52
|
+
Requires-Dist: langchain>=0.2.0; extra == 'langchain'
|
|
53
|
+
Provides-Extra: local
|
|
54
|
+
Requires-Dist: sentence-transformers>=3.0.0; extra == 'local'
|
|
55
|
+
Provides-Extra: openai
|
|
56
|
+
Requires-Dist: openai>=1.40.0; extra == 'openai'
|
|
57
|
+
Provides-Extra: voyage
|
|
58
|
+
Requires-Dist: voyageai>=0.2.0; extra == 'voyage'
|
|
59
|
+
Description-Content-Type: text/markdown
|
|
60
|
+
|
|
61
|
+
# MemGraph — Universal AI Agent Memory Engine
|
|
62
|
+
|
|
63
|
+
[](https://pypi.org/project/memgraph-agent/)
|
|
64
|
+
[](https://python.org)
|
|
65
|
+
[](LICENSE)
|
|
66
|
+
[](https://github.com/SP3DK1D/Memomatic/actions)
|
|
67
|
+
[](https://github.com/SP3DK1D/Memomatic/actions)
|
|
68
|
+
[](https://modelcontextprotocol.io)
|
|
69
|
+
[](docker-compose.yml)
|
|
70
|
+
|
|
71
|
+
> **Give any AI agent perfect memory in 3 lines of code.**
|
|
72
|
+
|
|
73
|
+
MemGraph wraps [Graphiti](https://github.com/getzep/graphiti)'s temporal knowledge graph with a **token-budget allocator** that packs the maximum signal into the minimum tokens. Most agent turns need zero long-term memory — those turns pay zero memory tokens.
|
|
74
|
+
|
|
75
|
+
---
|
|
76
|
+
|
|
77
|
+
## The Problem
|
|
78
|
+
|
|
79
|
+
Every "agent memory" system does one of two dumb things:
|
|
80
|
+
|
|
81
|
+
| Approach | Problem |
|
|
82
|
+
|---|---|
|
|
83
|
+
| Dump the whole history | Wastes thousands of tokens every turn. Expensive and slow. |
|
|
84
|
+
| Summarise into one paragraph | Lossy. Misses important details. Can't answer precise questions. |
|
|
85
|
+
|
|
86
|
+
## The MemGraph Solution
|
|
87
|
+
|
|
88
|
+
```
|
|
89
|
+
Knowledge graph + hybrid retrieval + token-budget allocator
|
|
90
|
+
```
|
|
91
|
+
|
|
92
|
+
1. **Knowledge graph storage** — facts, entities, and temporal relationships via Graphiti + FalkorDB.
|
|
93
|
+
2. **Hybrid retrieval** — FAISS vector search + graph neighbour expansion + BM25, re-ranked by relevance × recency × centrality.
|
|
94
|
+
3. **Token-budget allocator** — given a hard ceiling (e.g. 1000 tokens), pack the most information-dense fragments that fit. When nothing is relevant, return empty — saving 100% of memory tokens.
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Benchmark Results
|
|
99
|
+
|
|
100
|
+
> Measured on a synthetic 40-turn conversation with 10 test queries.
|
|
101
|
+
> Hardware: Ryzen 5800H, 32GB DDR4, nomic-embed-text (local).
|
|
102
|
+
|
|
103
|
+
| Approach | Avg Tokens / Query | P95 Latency | Token Reduction |
|
|
104
|
+
|---|---|---|---|
|
|
105
|
+
| Raw context (full history) | ~3,200 | — | baseline |
|
|
106
|
+
| Sliding window (10 turns) | ~820 | — | −74% |
|
|
107
|
+
| **MemGraph (budget=1000)** | **~310** | **< 30ms** | **−90%** |
|
|
108
|
+
|
|
109
|
+
Run the benchmarks yourself:
|
|
110
|
+
|
|
111
|
+
```bash
|
|
112
|
+
uv run python benchmarks/token_comparison.py --turns 40 --budget 1000
|
|
113
|
+
uv run python benchmarks/retrieval_speed.py --facts 200 --queries 50
|
|
114
|
+
uv run python benchmarks/quality_eval.py --budget 800
|
|
115
|
+
```
|
|
116
|
+
|
|
117
|
+
---
|
|
118
|
+
|
|
119
|
+
## Quickstart
|
|
120
|
+
|
|
121
|
+
### Install
|
|
122
|
+
|
|
123
|
+
```bash
|
|
124
|
+
pip install memgraph-agent
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Check prerequisites (Docker, FalkorDB, Ollama)
|
|
128
|
+
|
|
129
|
+
```bash
|
|
130
|
+
memgraph-setup # check only
|
|
131
|
+
memgraph-setup --fix # auto-start missing services
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
### One-command infra
|
|
135
|
+
|
|
136
|
+
```bash
|
|
137
|
+
docker compose up -d # starts FalkorDB on :6379 and :3000 (browser UI)
|
|
138
|
+
```
|
|
139
|
+
|
|
140
|
+
### Use it
|
|
141
|
+
|
|
142
|
+
```python
|
|
143
|
+
from memgraph import MemGraph
|
|
144
|
+
|
|
145
|
+
async with await MemGraph.create() as mg:
|
|
146
|
+
# Store anything — plain text, messages, JSON
|
|
147
|
+
await mg.store("Luke prefers TypeScript and runs Ollama locally.")
|
|
148
|
+
await mg.store([
|
|
149
|
+
{"role": "user", "content": "What's the ATLAS stack?"},
|
|
150
|
+
{"role": "assistant", "content": "Express.js + Qwen3 via Ollama."},
|
|
151
|
+
])
|
|
152
|
+
|
|
153
|
+
# Retrieve — only pays token cost when relevant context exists
|
|
154
|
+
ctx = await mg.query("What stack does Luke use?", token_budget=500)
|
|
155
|
+
print(ctx.text) # formatted, token-capped, ready to inject
|
|
156
|
+
print(ctx.tokens_used) # exact token count (0 if nothing relevant)
|
|
157
|
+
print(ctx.is_empty) # True when nothing relevant found
|
|
158
|
+
```
|
|
159
|
+
|
|
160
|
+
That's it. No vector DB setup, no embedding server to manage (uses Ollama locally by default).
|
|
161
|
+
|
|
162
|
+
---
|
|
163
|
+
|
|
164
|
+
## Install Options
|
|
165
|
+
|
|
166
|
+
```bash
|
|
167
|
+
# Minimal (core + REST server + MCP)
|
|
168
|
+
pip install memgraph-agent
|
|
169
|
+
|
|
170
|
+
# With OpenAI client (for OpenAI Agents SDK adapter)
|
|
171
|
+
pip install memgraph-agent[openai]
|
|
172
|
+
|
|
173
|
+
# With Anthropic client
|
|
174
|
+
pip install memgraph-agent[anthropic]
|
|
175
|
+
|
|
176
|
+
# With LangChain adapter
|
|
177
|
+
pip install memgraph-agent[langchain]
|
|
178
|
+
|
|
179
|
+
# Everything
|
|
180
|
+
pip install memgraph-agent[all]
|
|
181
|
+
```
|
|
182
|
+
|
|
183
|
+
---
|
|
184
|
+
|
|
185
|
+
## Setup Guide
|
|
186
|
+
|
|
187
|
+
### Prerequisites
|
|
188
|
+
|
|
189
|
+
| Requirement | Notes |
|
|
190
|
+
|---|---|
|
|
191
|
+
| Python 3.11+ | `python --version` |
|
|
192
|
+
| Docker | For FalkorDB. [Get Docker](https://docs.docker.com/get-docker/) |
|
|
193
|
+
| Ollama (local) | For embeddings. [ollama.com](https://ollama.com) |
|
|
194
|
+
| OpenAI API key | For Graphiti entity extraction (even with local embeddings) |
|
|
195
|
+
|
|
196
|
+
### Automated setup
|
|
197
|
+
|
|
198
|
+
```bash
|
|
199
|
+
# Check everything
|
|
200
|
+
memgraph-setup
|
|
201
|
+
|
|
202
|
+
# Auto-fix: creates FalkorDB container, starts Ollama, creates .env
|
|
203
|
+
memgraph-setup --fix
|
|
204
|
+
|
|
205
|
+
# Run only specific checks
|
|
206
|
+
memgraph-setup --check Docker FalkorDB "OPENAI_API_KEY"
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
The checker validates:
|
|
210
|
+
- Python version ≥ 3.11
|
|
211
|
+
- Docker installed and daemon running
|
|
212
|
+
- FalkorDB container reachable (creates it if `--fix`)
|
|
213
|
+
- Ollama API responsive (starts `ollama serve` if `--fix`)
|
|
214
|
+
- `nomic-embed-text` model pulled (runs `ollama pull` if `--fix`)
|
|
215
|
+
- `.env` file exists with required keys (copies from `.env.example` if `--fix`)
|
|
216
|
+
- `OPENAI_API_KEY` is set and non-placeholder
|
|
217
|
+
|
|
218
|
+
### Manual setup
|
|
219
|
+
|
|
220
|
+
```bash
|
|
221
|
+
# 1. Clone
|
|
222
|
+
git clone https://github.com/SP3DK1D/Memomatic.git && cd Memomatic
|
|
223
|
+
|
|
224
|
+
# 2. Install
|
|
225
|
+
pip install -e ".[dev]"
|
|
226
|
+
# or with uv:
|
|
227
|
+
uv sync
|
|
228
|
+
|
|
229
|
+
# 3. Configure
|
|
230
|
+
cp .env.example .env
|
|
231
|
+
# edit .env — set OPENAI_API_KEY at minimum
|
|
232
|
+
|
|
233
|
+
# 4. Start FalkorDB
|
|
234
|
+
docker compose up -d falkordb
|
|
235
|
+
|
|
236
|
+
# 5. Pull embedding model
|
|
237
|
+
ollama pull nomic-embed-text
|
|
238
|
+
|
|
239
|
+
# 6. Verify
|
|
240
|
+
memgraph setup
|
|
241
|
+
```
|
|
242
|
+
|
|
243
|
+
---
|
|
244
|
+
|
|
245
|
+
## Configuration
|
|
246
|
+
|
|
247
|
+
MemGraph is configured via `config.yaml` (overridable via environment variables):
|
|
248
|
+
|
|
249
|
+
```yaml
|
|
250
|
+
graph:
|
|
251
|
+
host: localhost
|
|
252
|
+
port: 6379
|
|
253
|
+
database: memgraph # keeps data separate from other FalkorDB projects
|
|
254
|
+
|
|
255
|
+
embeddings:
|
|
256
|
+
provider: ollama # or "openai"
|
|
257
|
+
model: nomic-embed-text # or "text-embedding-3-small"
|
|
258
|
+
dim: 768
|
|
259
|
+
|
|
260
|
+
retrieval:
|
|
261
|
+
top_k: 20
|
|
262
|
+
default_token_budget: 2000
|
|
263
|
+
|
|
264
|
+
formatter:
|
|
265
|
+
default_format: claude_xml # claude_xml | openai_system | markdown | json
|
|
266
|
+
```
|
|
267
|
+
|
|
268
|
+
All `config.yaml` keys can be overridden via environment variables — see [.env.example](.env.example).
|
|
269
|
+
|
|
270
|
+
---
|
|
271
|
+
|
|
272
|
+
## What's in the Box
|
|
273
|
+
|
|
274
|
+
| Module | Purpose |
|
|
275
|
+
|---|---|
|
|
276
|
+
| `memgraph.core.MemGraph` | High-level entry point: `store()`, `query()`, `forget()`, `stats()` |
|
|
277
|
+
| `memgraph.token_budget` | Greedy knapsack packer — the key differentiator |
|
|
278
|
+
| `memgraph.retrieval` | FAISS + graph hybrid search with scoring |
|
|
279
|
+
| `memgraph.formatter` | Claude XML / OpenAI system / Markdown / JSON output |
|
|
280
|
+
| `memgraph.setup_check` | Prerequisite checker + auto-installer |
|
|
281
|
+
| `server.app` | FastAPI REST server (`memgraph-server`) |
|
|
282
|
+
| `server.mcp.handler` | MCP protocol handler for Claude Code / Cursor / Windsurf |
|
|
283
|
+
| `adapters.GenericAgentAdapter` | Two-hook adapter for any agent framework |
|
|
284
|
+
| `adapters.MemGraphChatMemory` | LangChain `BaseChatMemory` drop-in |
|
|
285
|
+
| `adapters.MemGraphHooks` | OpenAI Agents SDK hooks |
|
|
286
|
+
| `adapters.SimpleOpenAIAgentAdapter` | Works with raw `openai` chat completions |
|
|
287
|
+
| `adapters.AdapterGenerator` | Auto-generates Node.js / TypeScript / Python adapters |
|
|
288
|
+
| `scanner` | Detects existing memory systems in any codebase |
|
|
289
|
+
| `bridge` | HTTP + IPC bridge for non-Python agents |
|
|
290
|
+
| `cli` | Full CLI (`memgraph setup / scan / integrate / query / store`) |
|
|
291
|
+
|
|
292
|
+
---
|
|
293
|
+
|
|
294
|
+
## Framework Adapters
|
|
295
|
+
|
|
296
|
+
### Any Python agent (3 lines)
|
|
297
|
+
|
|
298
|
+
```python
|
|
299
|
+
from adapters.generic import GenericAgentAdapter
|
|
300
|
+
|
|
301
|
+
adapter = GenericAgentAdapter(mg, token_budget=1500, fmt="claude_xml")
|
|
302
|
+
|
|
303
|
+
context = await adapter.before_turn(user_message) # inject into prompt
|
|
304
|
+
await adapter.after_turn(messages) # store the turn
|
|
305
|
+
```
|
|
306
|
+
|
|
307
|
+
### LangChain
|
|
308
|
+
|
|
309
|
+
```python
|
|
310
|
+
from adapters.langchain import MemGraphChatMemory
|
|
311
|
+
|
|
312
|
+
memory = MemGraphChatMemory(mg, token_budget=1500)
|
|
313
|
+
chain = LLMChain(llm=chat_model, prompt=prompt, memory=memory)
|
|
314
|
+
# Works exactly like ConversationBufferMemory — but token-aware
|
|
315
|
+
```
|
|
316
|
+
|
|
317
|
+
### OpenAI Agents SDK
|
|
318
|
+
|
|
319
|
+
```python
|
|
320
|
+
from adapters.openai_agents import MemGraphHooks
|
|
321
|
+
|
|
322
|
+
agent = Agent(
|
|
323
|
+
name="MyAgent",
|
|
324
|
+
instructions="You are a helpful assistant.",
|
|
325
|
+
hooks=MemGraphHooks(mg),
|
|
326
|
+
)
|
|
327
|
+
```
|
|
328
|
+
|
|
329
|
+
### Raw OpenAI completions
|
|
330
|
+
|
|
331
|
+
```python
|
|
332
|
+
from adapters.openai_agents import SimpleOpenAIAgentAdapter
|
|
333
|
+
|
|
334
|
+
adapter = SimpleOpenAIAgentAdapter(mg)
|
|
335
|
+
messages = await adapter.before_completion(messages)
|
|
336
|
+
response = await client.chat.completions.create(model="gpt-4o", messages=messages)
|
|
337
|
+
await adapter.after_completion(messages, response.choices[0].message.content)
|
|
338
|
+
```
|
|
339
|
+
|
|
340
|
+
### Node.js / TypeScript agents (ATLAS, OpenClaw, etc.)
|
|
341
|
+
|
|
342
|
+
```bash
|
|
343
|
+
# Auto-detect your agent's memory system and generate an adapter
|
|
344
|
+
memgraph integrate /path/to/your/agent
|
|
345
|
+
|
|
346
|
+
# Or apply it directly
|
|
347
|
+
memgraph integrate /path/to/your/agent --apply
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
Then add ONE line to your agent's entry point:
|
|
351
|
+
|
|
352
|
+
```js
|
|
353
|
+
require('./memgraph-adapter').patch(); // Node.js
|
|
354
|
+
// or
|
|
355
|
+
import './memgraph-adapter'; // TypeScript — adapter auto-patches at import
|
|
356
|
+
```
|
|
357
|
+
|
|
358
|
+
---
|
|
359
|
+
|
|
360
|
+
## MCP Integration (Claude Code / Cursor / Windsurf)
|
|
361
|
+
|
|
362
|
+
Run the MCP server:
|
|
363
|
+
|
|
364
|
+
```bash
|
|
365
|
+
memgraph-server
|
|
366
|
+
# or
|
|
367
|
+
uv run uvicorn server.app:app --port 8100
|
|
368
|
+
```
|
|
369
|
+
|
|
370
|
+
Add to your MCP client config:
|
|
371
|
+
|
|
372
|
+
```json
|
|
373
|
+
{
|
|
374
|
+
"mcpServers": {
|
|
375
|
+
"memgraph": {
|
|
376
|
+
"url": "http://localhost:8100/mcp"
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
}
|
|
380
|
+
```
|
|
381
|
+
|
|
382
|
+
Tools exposed: `store_memory`, `query_memory`, `forget`, `memory_stats`, `generate_context`.
|
|
383
|
+
|
|
384
|
+
See [examples/claude_code_mcp.py](examples/claude_code_mcp.py) for a complete walkthrough.
|
|
385
|
+
|
|
386
|
+
---
|
|
387
|
+
|
|
388
|
+
## REST API
|
|
389
|
+
|
|
390
|
+
```bash
|
|
391
|
+
# Start server
|
|
392
|
+
memgraph-server # listens on :8100
|
|
393
|
+
|
|
394
|
+
# Store a memory
|
|
395
|
+
curl -X POST http://localhost:8100/store \
|
|
396
|
+
-H 'Content-Type: application/json' \
|
|
397
|
+
-d '{"content": "Luke prefers TypeScript.", "group_id": "session-1"}'
|
|
398
|
+
|
|
399
|
+
# Query memory
|
|
400
|
+
curl -X POST http://localhost:8100/query \
|
|
401
|
+
-H 'Content-Type: application/json' \
|
|
402
|
+
-d '{"query": "What stack does Luke prefer?", "token_budget": 500}'
|
|
403
|
+
|
|
404
|
+
# Health check
|
|
405
|
+
curl http://localhost:8100/health
|
|
406
|
+
```
|
|
407
|
+
|
|
408
|
+
Interactive docs at `http://localhost:8100/docs`.
|
|
409
|
+
|
|
410
|
+
---
|
|
411
|
+
|
|
412
|
+
## CLI
|
|
413
|
+
|
|
414
|
+
```bash
|
|
415
|
+
# Check prerequisites
|
|
416
|
+
memgraph setup
|
|
417
|
+
memgraph setup --fix # auto-install missing
|
|
418
|
+
|
|
419
|
+
# Scan a project's memory architecture
|
|
420
|
+
memgraph scan /path/to/atlas
|
|
421
|
+
|
|
422
|
+
# Generate + apply an adapter
|
|
423
|
+
memgraph integrate /path/to/atlas --apply
|
|
424
|
+
|
|
425
|
+
# Store and query directly
|
|
426
|
+
memgraph store "Luke runs Ollama on a Ryzen 5800H."
|
|
427
|
+
memgraph query "What hardware does Luke use?" --budget 300
|
|
428
|
+
|
|
429
|
+
# Check server health
|
|
430
|
+
memgraph status
|
|
431
|
+
```
|
|
432
|
+
|
|
433
|
+
---
|
|
434
|
+
|
|
435
|
+
## Output Formats
|
|
436
|
+
|
|
437
|
+
```python
|
|
438
|
+
from memgraph.formatter import OutputFormat
|
|
439
|
+
|
|
440
|
+
# Claude XML (default — Claude parses this most efficiently)
|
|
441
|
+
ctx = await mg.query(q, fmt=OutputFormat.CLAUDE_XML)
|
|
442
|
+
# → <agent_memory token_budget="1000">...</agent_memory>
|
|
443
|
+
|
|
444
|
+
# OpenAI system message
|
|
445
|
+
ctx = await mg.query(q, fmt=OutputFormat.OPENAI_SYSTEM)
|
|
446
|
+
# → [MEMORY CONTEXT] ... [/MEMORY CONTEXT]
|
|
447
|
+
|
|
448
|
+
# Markdown (for CLAUDE.md injection)
|
|
449
|
+
ctx = await mg.query(q, fmt=OutputFormat.MARKDOWN)
|
|
450
|
+
# → ## Agent Memory\n- **ATLAS**: ...
|
|
451
|
+
|
|
452
|
+
# Raw JSON
|
|
453
|
+
ctx = await mg.query(q, fmt=OutputFormat.JSON)
|
|
454
|
+
# → {"entities": [...], "facts": [...], ...}
|
|
455
|
+
```
|
|
456
|
+
|
|
457
|
+
---
|
|
458
|
+
|
|
459
|
+
## Roadmap
|
|
460
|
+
|
|
461
|
+
- [x] Phase 1: Core engine (FAISS + Graphiti + token budget + formatters)
|
|
462
|
+
- [x] Phase 2: FastAPI REST + MCP server
|
|
463
|
+
- [x] Phase 3: Framework adapters (LangChain, OpenAI Agents, generic), benchmarks
|
|
464
|
+
- [x] Phase 4: PyPI release, setup checker, CI/CD
|
|
465
|
+
- [ ] Hosted API tier (pay-as-you-go, no Docker required)
|
|
466
|
+
- [ ] LangGraph adapter
|
|
467
|
+
- [ ] Streaming retrieval (yield fragments as they score)
|
|
468
|
+
- [ ] Web dashboard (FalkorDB browser + token savings metrics)
|
|
469
|
+
|
|
470
|
+
---
|
|
471
|
+
|
|
472
|
+
## Development
|
|
473
|
+
|
|
474
|
+
```bash
|
|
475
|
+
# Install with dev deps
|
|
476
|
+
uv sync --extra dev
|
|
477
|
+
|
|
478
|
+
# Run tests (pure, no live services needed)
|
|
479
|
+
uv run pytest tests/ -v
|
|
480
|
+
|
|
481
|
+
# Run tests with coverage
|
|
482
|
+
uv run pytest tests/ --cov=memgraph --cov-report=term-missing
|
|
483
|
+
|
|
484
|
+
# Lint + format
|
|
485
|
+
uv run ruff check .
|
|
486
|
+
uv run ruff format .
|
|
487
|
+
|
|
488
|
+
# Type check
|
|
489
|
+
uv run mypy memgraph/
|
|
490
|
+
|
|
491
|
+
# Run benchmarks (requires live FalkorDB + Ollama)
|
|
492
|
+
uv run python benchmarks/token_comparison.py
|
|
493
|
+
uv run python benchmarks/retrieval_speed.py
|
|
494
|
+
uv run python benchmarks/quality_eval.py
|
|
495
|
+
```
|
|
496
|
+
|
|
497
|
+
### Project structure
|
|
498
|
+
|
|
499
|
+
```
|
|
500
|
+
memgraph/ Core Python package (store, query, forget, token budget)
|
|
501
|
+
server/ FastAPI REST + MCP server
|
|
502
|
+
adapters/ Framework adapters (LangChain, OpenAI Agents, generic, Node.js templates)
|
|
503
|
+
scanner/ Project scanner (detects existing memory systems)
|
|
504
|
+
bridge/ HTTP + IPC bridge for non-Python agents
|
|
505
|
+
cli/ Unified CLI (memgraph setup / scan / integrate / query)
|
|
506
|
+
benchmarks/ Token comparison, speed, and quality benchmarks
|
|
507
|
+
examples/ Working end-to-end examples
|
|
508
|
+
tests/ pytest suite (pure module tests, no live services)
|
|
509
|
+
```
|
|
510
|
+
|
|
511
|
+
---
|
|
512
|
+
|
|
513
|
+
## License
|
|
514
|
+
|
|
515
|
+
MIT — see [LICENSE](LICENSE).
|
|
516
|
+
|
|
517
|
+
---
|
|
518
|
+
|
|
519
|
+
## Credits
|
|
520
|
+
|
|
521
|
+
Built by [Luke (TechItLuke)](https://github.com/SP3DK1D) on top of:
|
|
522
|
+
- [Graphiti](https://github.com/getzep/graphiti) by Zep — temporal knowledge graph engine
|
|
523
|
+
- [FalkorDB](https://falkordb.com) — lightweight Redis-compatible graph database
|
|
524
|
+
- [FAISS](https://github.com/facebookresearch/faiss) by Meta — in-process vector search
|
|
525
|
+
- [nomic-embed-text](https://ollama.com/library/nomic-embed-text) — free local embeddings via Ollama
|