mnemonics 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mnemonics/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ """mnemonics — verified AI memory. Retrieval that doesn't hallucinate."""
2
+ from mnemonics.store import Store
3
+ from mnemonics.ingest import ingest
4
+ from mnemonics.retrieve import retrieve
5
+
6
+ __all__ = ["Store", "ingest", "retrieve"]
7
+ __version__ = "0.1.0"
mnemonics/cli.py ADDED
@@ -0,0 +1,82 @@
1
+ """mnemonics CLI."""
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import json
6
+ import sys
7
+
8
+
9
+ def main() -> None:
10
+ p = argparse.ArgumentParser(prog="mnemonics")
11
+ sub = p.add_subparsers(dest="cmd")
12
+
13
+ # serve
14
+ s = sub.add_parser("serve", help="Start REST server")
15
+ s.add_argument("--port", type=int, default=7810)
16
+ s.add_argument("--path", default="~/.mnemonics")
17
+
18
+ # mcp
19
+ sub.add_parser("mcp", help="Start MCP stdio server")
20
+
21
+ # ingest
22
+ i = sub.add_parser("ingest", help="Add text to memory")
23
+ i.add_argument("text", nargs="+")
24
+ i.add_argument("--ns", default="default")
25
+ i.add_argument("--path", default="~/.mnemonics")
26
+
27
+ # retrieve
28
+ r = sub.add_parser("retrieve", help="Search memory")
29
+ r.add_argument("query")
30
+ r.add_argument("--ns", default="default")
31
+ r.add_argument("--top-k", type=int, default=5)
32
+ r.add_argument("--no-verify", action="store_true")
33
+ r.add_argument("--path", default="~/.mnemonics")
34
+
35
+ # stats
36
+ st = sub.add_parser("stats", help="Show memory stats")
37
+ st.add_argument("--path", default="~/.mnemonics")
38
+
39
+ args = p.parse_args()
40
+
41
+ if args.cmd == "serve":
42
+ import os
43
+ os.environ["MNEMONICS_PATH"] = args.path
44
+ from mnemonics.server import serve
45
+ serve(port=args.port)
46
+
47
+ elif args.cmd == "mcp":
48
+ from mnemonics.server import serve
49
+ serve(mcp=True)
50
+
51
+ elif args.cmd == "ingest":
52
+ from mnemonics.store import Store
53
+ from mnemonics.ingest import ingest
54
+ store = Store(args.path)
55
+ n = ingest(texts=[" ".join(args.text)], store=store, ns=args.ns)
56
+ print(f"Stored {n} chunk(s).")
57
+
58
+ elif args.cmd == "retrieve":
59
+ from mnemonics.store import Store
60
+ from mnemonics.retrieve import retrieve
61
+ store = Store(args.path)
62
+ result = retrieve(
63
+ query=args.query,
64
+ store=store,
65
+ ns=args.ns,
66
+ top_k=args.top_k,
67
+ verify=not args.no_verify,
68
+ )
69
+ print(f"trust_score: {result['trust_score']} flagged: {result['flagged_count']}")
70
+ for r in result["results"]:
71
+ flag = " ⚠" if r.get("flagged") else ""
72
+ print(f" [{r['score']:.3f}]{flag} {r['text'][:120]}")
73
+
74
+ elif args.cmd == "stats":
75
+ from mnemonics.store import Store
76
+ store = Store(args.path)
77
+ for ns in store.list_namespaces():
78
+ print(f" {ns}: {store.count(ns)} chunks")
79
+
80
+ else:
81
+ p.print_help()
82
+ sys.exit(1)
mnemonics/ingest.py ADDED
@@ -0,0 +1,63 @@
1
+ """Ingest text into the store: chunk → embed → save."""
2
+ from __future__ import annotations
3
+
4
+ import re
5
+ from typing import Any
6
+
7
+ import numpy as np
8
+
9
+ from mnemonics.store import Store
10
+
11
+ _encoder: Any = None
12
+ _encoder_name: str = "all-MiniLM-L6-v2"
13
+
14
+
15
+ def _get_encoder(model: str = _encoder_name) -> Any:
16
+ global _encoder, _encoder_name
17
+ if _encoder is None or model != _encoder_name:
18
+ from sentence_transformers import SentenceTransformer
19
+ _encoder = SentenceTransformer(model)
20
+ _encoder_name = model
21
+ return _encoder
22
+
23
+
24
+ def _chunk(text: str, size: int = 200, overlap: int = 40) -> list[str]:
25
+ words = text.split()
26
+ if len(words) <= size:
27
+ return [text]
28
+ chunks = []
29
+ i = 0
30
+ while i < len(words):
31
+ chunk = " ".join(words[i:i + size])
32
+ chunks.append(chunk)
33
+ i += size - overlap
34
+ return chunks
35
+
36
+
37
+ def ingest(
38
+ texts: list[str],
39
+ store: Store,
40
+ ns: str = "default",
41
+ meta: list[dict] | None = None,
42
+ model: str = "all-MiniLM-L6-v2",
43
+ chunk_size: int = 200,
44
+ chunk_overlap: int = 40,
45
+ ) -> int:
46
+ """Chunk, embed and store texts. Returns total chunks stored."""
47
+ enc = _get_encoder(model)
48
+ all_chunks: list[str] = []
49
+ all_meta: list[dict] = []
50
+
51
+ for i, text in enumerate(texts):
52
+ chunks = _chunk(text, chunk_size, chunk_overlap)
53
+ m = (meta[i] if meta else {}) | {"source_idx": i}
54
+ all_chunks.extend(chunks)
55
+ all_meta.extend([m] * len(chunks))
56
+
57
+ if not all_chunks:
58
+ return 0
59
+
60
+ vecs = enc.encode(all_chunks, batch_size=64, show_progress_bar=False,
61
+ normalize_embeddings=True, convert_to_numpy=True)
62
+ store.add(all_chunks, vecs, ns=ns, meta=all_meta)
63
+ return len(all_chunks)
mnemonics/retrieve.py ADDED
@@ -0,0 +1,66 @@
1
+ """Retrieve memories — embed query, search, optionally verify with halluguard."""
2
+ from __future__ import annotations
3
+
4
+ from typing import Any
5
+
6
+ import requests
7
+
8
+ from mnemonics.store import Store
9
+ from mnemonics.ingest import _get_encoder
10
+
11
+
12
+ def retrieve(
13
+ query: str,
14
+ store: Store,
15
+ ns: str = "default",
16
+ top_k: int = 5,
17
+ model: str = "all-MiniLM-L6-v2",
18
+ verify: bool = True,
19
+ verify_threshold: float = 0.45,
20
+ ) -> dict[str, Any]:
21
+ """
22
+ Search the store for query.
23
+ If verify=True, runs halluguard on results against the retrieved corpus.
24
+ Returns: {results, verified, trust_score, flagged_count}
25
+ """
26
+ enc = _get_encoder(model)
27
+ qvec = enc.encode([query], normalize_embeddings=True, convert_to_numpy=True)[0]
28
+ results = store.search(qvec, ns=ns, top_k=top_k)
29
+
30
+ if not results:
31
+ return {"results": [], "verified": True, "trust_score": 1.0, "flagged_count": 0}
32
+
33
+ trust_score = 1.0
34
+ flagged_count = 0
35
+ verified = True
36
+
37
+ if verify:
38
+ try:
39
+ corpus = [r["text"] for r in results]
40
+ combined = " ".join(corpus)
41
+ resp = requests.post(
42
+ "http://127.0.0.1:7801/check",
43
+ json={"corpus": corpus, "answer": combined, "threshold": verify_threshold},
44
+ timeout=10,
45
+ )
46
+ if resp.ok:
47
+ data = resp.json()
48
+ trust_score = data.get("trust_score", 1.0)
49
+ flagged_count = data.get("n_flagged", 0)
50
+ verified = data.get("ok", True)
51
+ # Mark flagged results
52
+ flagged_texts = {f["text"] for f in data.get("flagged", [])}
53
+ for r in results:
54
+ r["flagged"] = r["text"] in flagged_texts
55
+ except Exception:
56
+ pass # halluguard daemon not running — skip verify
57
+
58
+ for r in results:
59
+ r.setdefault("flagged", False)
60
+
61
+ return {
62
+ "results": results,
63
+ "verified": verified,
64
+ "trust_score": round(trust_score, 4),
65
+ "flagged_count": flagged_count,
66
+ }
mnemonics/server.py ADDED
@@ -0,0 +1,232 @@
1
+ """MCP + REST server for mnemonics.
2
+
3
+ Endpoints:
4
+ GET /health
5
+ POST /ingest {"texts": [...], "ns": "default", "meta": [...]}
6
+ POST /retrieve {"query": "...", "ns": "default", "top_k": 5, "verify": true}
7
+ GET /namespaces
8
+ GET /count?ns=default
9
+ DELETE /memory/<id>
10
+
11
+ MCP tools (JSON-RPC over stdio):
12
+ mnemonics_ingest — store memories
13
+ mnemonics_retrieve — search + verify
14
+ mnemonics_forget — delete a memory by id
15
+ """
16
+ from __future__ import annotations
17
+
18
+ import json
19
+ import os
20
+ import sys
21
+ from http.server import BaseHTTPRequestHandler, HTTPServer
22
+ from typing import Any
23
+ from pathlib import Path
24
+
25
+ from mnemonics.store import Store
26
+ from mnemonics.ingest import ingest as _ingest
27
+ from mnemonics.retrieve import retrieve as _retrieve
28
+
29
+ MNEMONICS_PORT = int(os.environ.get("MNEMONICS_PORT", "7810"))
30
+ MNEMONICS_PATH = os.environ.get("MNEMONICS_PATH", "~/.mnemonics")
31
+
32
+ _store: Store | None = None
33
+
34
+
35
+ def _get_store() -> Store:
36
+ global _store
37
+ if _store is None:
38
+ _store = Store(MNEMONICS_PATH)
39
+ return _store
40
+
41
+
42
+ class _Handler(BaseHTTPRequestHandler):
43
+ def log_message(self, format: str, *args: Any) -> None:
44
+ pass
45
+
46
+ def _body(self) -> dict:
47
+ length = int(self.headers.get("Content-Length", 0))
48
+ if length == 0:
49
+ return {}
50
+ return json.loads(self.rfile.read(length))
51
+
52
+ def _json(self, code: int, data: Any) -> None:
53
+ body = json.dumps(data).encode()
54
+ self.send_response(code)
55
+ self.send_header("Content-Type", "application/json")
56
+ self.send_header("Content-Length", str(len(body)))
57
+ self.end_headers()
58
+ self.wfile.write(body)
59
+
60
+ def do_GET(self) -> None:
61
+ path = self.path.split("?")[0]
62
+ if path == "/health":
63
+ self._json(200, {"status": "ok", "version": "0.1.0"})
64
+ elif path == "/namespaces":
65
+ self._json(200, {"namespaces": _get_store().list_namespaces()})
66
+ elif path == "/count":
67
+ ns = (self.path.split("ns=")[-1] if "ns=" in self.path else "default")
68
+ self._json(200, {"ns": ns, "count": _get_store().count(ns)})
69
+ else:
70
+ self._json(404, {"error": "not found"})
71
+
72
+ def do_POST(self) -> None:
73
+ try:
74
+ body = self._body()
75
+ except Exception:
76
+ self._json(400, {"error": "invalid JSON"})
77
+ return
78
+
79
+ if self.path == "/ingest":
80
+ texts = body.get("texts", [])
81
+ if not texts:
82
+ self._json(400, {"error": "texts must not be empty"})
83
+ return
84
+ n = _ingest(
85
+ texts=texts,
86
+ store=_get_store(),
87
+ ns=body.get("ns", "default"),
88
+ meta=body.get("meta"),
89
+ )
90
+ self._json(200, {"ingested": n})
91
+
92
+ elif self.path == "/retrieve":
93
+ query = body.get("query", "").strip()
94
+ if not query:
95
+ self._json(400, {"error": "query must not be empty"})
96
+ return
97
+ result = _retrieve(
98
+ query=query,
99
+ store=_get_store(),
100
+ ns=body.get("ns", "default"),
101
+ top_k=int(body.get("top_k", 5)),
102
+ verify=bool(body.get("verify", True)),
103
+ )
104
+ self._json(200, result)
105
+
106
+ else:
107
+ self._json(404, {"error": "not found"})
108
+
109
+ def do_DELETE(self) -> None:
110
+ if self.path.startswith("/memory/"):
111
+ try:
112
+ mid = int(self.path.split("/memory/")[1])
113
+ ok = _get_store().delete(mid)
114
+ self._json(200, {"deleted": ok})
115
+ except ValueError:
116
+ self._json(400, {"error": "invalid id"})
117
+ else:
118
+ self._json(404, {"error": "not found"})
119
+
120
+
121
+ # ── MCP stdio mode ───────────────────────────────────────────────────────────
122
+
123
+ def _mcp_loop() -> None:
124
+ """JSON-RPC over stdin/stdout for MCP clients (Claude Code, Cursor, Metis)."""
125
+ for line in sys.stdin:
126
+ line = line.strip()
127
+ if not line:
128
+ continue
129
+ try:
130
+ req = json.loads(line)
131
+ except Exception:
132
+ continue
133
+
134
+ rid = req.get("id")
135
+ method = req.get("method", "")
136
+ params = req.get("params", {})
137
+
138
+ def ok(result: Any) -> None:
139
+ print(json.dumps({"jsonrpc": "2.0", "id": rid, "result": result}), flush=True)
140
+
141
+ def err(msg: str, code: int = -32600) -> None:
142
+ print(json.dumps({"jsonrpc": "2.0", "id": rid, "error": {"code": code, "message": msg}}), flush=True)
143
+
144
+ if method == "initialize":
145
+ ok({
146
+ "protocolVersion": "2024-11-05",
147
+ "serverInfo": {"name": "mnemonics", "version": "0.1.0"},
148
+ "capabilities": {"tools": {}},
149
+ })
150
+
151
+ elif method == "tools/list":
152
+ ok({"tools": [
153
+ {
154
+ "name": "mnemonics_ingest",
155
+ "description": "Store text memories into mnemonics. Chunks, embeds and persists.",
156
+ "inputSchema": {
157
+ "type": "object",
158
+ "properties": {
159
+ "texts": {"type": "array", "items": {"type": "string"}},
160
+ "ns": {"type": "string", "description": "Namespace (default: 'default')"},
161
+ },
162
+ "required": ["texts"],
163
+ },
164
+ },
165
+ {
166
+ "name": "mnemonics_retrieve",
167
+ "description": "Search memories for a query. Returns top results with hallucination verification.",
168
+ "inputSchema": {
169
+ "type": "object",
170
+ "properties": {
171
+ "query": {"type": "string"},
172
+ "ns": {"type": "string"},
173
+ "top_k": {"type": "integer"},
174
+ "verify": {"type": "boolean", "description": "Run halluguard verification (default true)"},
175
+ },
176
+ "required": ["query"],
177
+ },
178
+ },
179
+ {
180
+ "name": "mnemonics_forget",
181
+ "description": "Delete a memory by id.",
182
+ "inputSchema": {
183
+ "type": "object",
184
+ "properties": {"id": {"type": "integer"}},
185
+ "required": ["id"],
186
+ },
187
+ },
188
+ ]})
189
+
190
+ elif method == "tools/call":
191
+ name = params.get("name")
192
+ args = params.get("arguments", {})
193
+
194
+ if name == "mnemonics_ingest":
195
+ texts = args.get("texts", [])
196
+ n = _ingest(texts=texts, store=_get_store(), ns=args.get("ns", "default"))
197
+ ok({"content": [{"type": "text", "text": f"Stored {n} chunks."}]})
198
+
199
+ elif name == "mnemonics_retrieve":
200
+ result = _retrieve(
201
+ query=args["query"],
202
+ store=_get_store(),
203
+ ns=args.get("ns", "default"),
204
+ top_k=int(args.get("top_k", 5)),
205
+ verify=bool(args.get("verify", True)),
206
+ )
207
+ lines = [f"trust_score: {result['trust_score']} flagged: {result['flagged_count']}"]
208
+ for r in result["results"]:
209
+ flag = " ⚠ FLAGGED" if r.get("flagged") else ""
210
+ lines.append(f"[{r['score']:.3f}]{flag} {r['text'][:200]}")
211
+ ok({"content": [{"type": "text", "text": "\n".join(lines)}]})
212
+
213
+ elif name == "mnemonics_forget":
214
+ deleted = _get_store().delete(int(args["id"]))
215
+ ok({"content": [{"type": "text", "text": f"Deleted: {deleted}"}]})
216
+
217
+ else:
218
+ err(f"unknown tool: {name}")
219
+ else:
220
+ err(f"unknown method: {method}")
221
+
222
+
223
+ def serve(port: int = MNEMONICS_PORT, mcp: bool = False) -> None:
224
+ if mcp:
225
+ _mcp_loop()
226
+ return
227
+ print(f"[mnemonics] listening on :{port}", flush=True)
228
+ server = HTTPServer(("127.0.0.1", port), _Handler)
229
+ try:
230
+ server.serve_forever()
231
+ except KeyboardInterrupt:
232
+ sys.exit(0)
mnemonics/store.py ADDED
@@ -0,0 +1,110 @@
1
+ """Persistent storage: SQLite (metadata) + hnswlib (vectors)."""
2
+ from __future__ import annotations
3
+
4
+ import json
5
+ import sqlite3
6
+ import threading
7
+ from pathlib import Path
8
+ from typing import Any
9
+
10
+ import hnswlib
11
+ import numpy as np
12
+
13
+
14
+ _SCHEMA = """
15
+ CREATE TABLE IF NOT EXISTS memories (
16
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
17
+ ns TEXT NOT NULL DEFAULT 'default',
18
+ text TEXT NOT NULL,
19
+ meta TEXT NOT NULL DEFAULT '{}',
20
+ created TEXT NOT NULL DEFAULT (datetime('now'))
21
+ );
22
+ CREATE INDEX IF NOT EXISTS idx_ns ON memories(ns);
23
+ """
24
+
25
+ DIM = 384 # all-MiniLM-L6-v2 output dim
26
+
27
+
28
+ class Store:
29
+ """Thread-safe memory store backed by SQLite + hnswlib."""
30
+
31
+ def __init__(self, path: str | Path = "~/.mnemonics"):
32
+ self.root = Path(path).expanduser()
33
+ self.root.mkdir(parents=True, exist_ok=True)
34
+ self._lock = threading.Lock()
35
+ self._db = sqlite3.connect(str(self.root / "memories.db"), check_same_thread=False)
36
+ self._db.executescript(_SCHEMA)
37
+ self._db.commit()
38
+ self._index: dict[str, hnswlib.Index] = {}
39
+
40
+ def _index_for(self, ns: str) -> hnswlib.Index:
41
+ if ns not in self._index:
42
+ idx_path = self.root / f"index_{ns}.bin"
43
+ idx = hnswlib.Index(space="cosine", dim=DIM)
44
+ if idx_path.exists():
45
+ idx.load_index(str(idx_path))
46
+ idx.set_ef(64)
47
+ else:
48
+ idx.init_index(max_elements=100_000, ef_construction=200, M=16)
49
+ idx.set_ef(64)
50
+ self._index[ns] = idx
51
+ return self._index[ns]
52
+
53
+ def add(self, texts: list[str], vectors: np.ndarray, ns: str = "default", meta: list[dict] | None = None) -> list[int]:
54
+ if meta is None:
55
+ meta = [{} for _ in texts]
56
+ with self._lock:
57
+ ids = []
58
+ for text, m in zip(texts, meta):
59
+ cur = self._db.execute(
60
+ "INSERT INTO memories (ns, text, meta) VALUES (?, ?, ?)",
61
+ (ns, text, json.dumps(m)),
62
+ )
63
+ ids.append(cur.lastrowid)
64
+ self._db.commit()
65
+ idx = self._index_for(ns)
66
+ idx.add_items(vectors, ids)
67
+ idx.save_index(str(self.root / f"index_{ns}.bin"))
68
+ return ids
69
+
70
+ def search(self, vector: np.ndarray, ns: str = "default", top_k: int = 5) -> list[dict[str, Any]]:
71
+ with self._lock:
72
+ idx = self._index_for(ns)
73
+ n = min(top_k, idx.get_current_count())
74
+ if n == 0:
75
+ return []
76
+ labels, distances = idx.knn_query(vector, k=n)
77
+ row_ids = [int(x) for x in labels[0]]
78
+ placeholders = ",".join("?" * len(row_ids))
79
+ rows = self._db.execute(
80
+ f"SELECT id, text, meta, created FROM memories WHERE id IN ({placeholders})",
81
+ row_ids,
82
+ ).fetchall()
83
+ by_id = {r[0]: r for r in rows}
84
+ results = []
85
+ for rid, dist in zip(labels[0], distances[0]):
86
+ row = by_id.get(int(rid))
87
+ if row is None:
88
+ continue
89
+ results.append({
90
+ "id": row[0],
91
+ "text": row[1],
92
+ "meta": json.loads(row[2]),
93
+ "created": row[3],
94
+ "score": float(1 - dist),
95
+ })
96
+ return results
97
+
98
+ def list_namespaces(self) -> list[str]:
99
+ rows = self._db.execute("SELECT DISTINCT ns FROM memories ORDER BY ns").fetchall()
100
+ return [r[0] for r in rows]
101
+
102
+ def count(self, ns: str = "default") -> int:
103
+ row = self._db.execute("SELECT COUNT(*) FROM memories WHERE ns=?", (ns,)).fetchone()
104
+ return row[0] if row else 0
105
+
106
+ def delete(self, memory_id: int) -> bool:
107
+ with self._lock:
108
+ cur = self._db.execute("DELETE FROM memories WHERE id=?", (memory_id,))
109
+ self._db.commit()
110
+ return cur.rowcount > 0
@@ -0,0 +1,152 @@
1
+ Metadata-Version: 2.4
2
+ Name: mnemonics
3
+ Version: 0.1.0
4
+ Summary: Verified AI memory — retrieval that doesn't hallucinate.
5
+ Author: atakan
6
+ License-Expression: MIT
7
+ Keywords: llm,memory,retrieval,rag,hallucination,mcp,agent
8
+ Classifier: Development Status :: 3 - Alpha
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
11
+ Requires-Python: >=3.10
12
+ Description-Content-Type: text/markdown
13
+ Requires-Dist: numpy>=1.24
14
+ Requires-Dist: hnswlib>=0.8
15
+ Requires-Dist: sentence-transformers>=3.0
16
+ Requires-Dist: requests>=2.28
17
+ Provides-Extra: verify
18
+ Requires-Dist: halluguard>=0.3; extra == "verify"
19
+ Provides-Extra: server
20
+ Requires-Dist: halluguard>=0.3; extra == "server"
21
+ Provides-Extra: dev
22
+ Requires-Dist: pytest>=8.0; extra == "dev"
23
+ Requires-Dist: pytest-cov>=5.0; extra == "dev"
24
+ Requires-Dist: ruff>=0.5; extra == "dev"
25
+
26
+ # Mnemonics
27
+
28
+ **Your memory doesn't hallucinate.**
29
+
30
+ Mnemonics is a local-first AI memory layer that stores, retrieves, and *verifies* what it returns. Built on sentence embeddings + HNSW vector search, with optional [halluguard](https://github.com/nakata-app/halluguard) verification to flag results that drift from the indexed corpus.
31
+
32
+ ## Why
33
+
34
+ Every RAG pipeline has the same silent failure mode: the retriever returns plausible-looking chunks, the LLM fills in the gaps, and nobody notices the fabrication until it matters. Mnemonics surfaces that problem at retrieval time, not after.
35
+
36
+ ## Install
37
+
38
+ ```bash
39
+ pip install mnemonics
40
+ # with verification support:
41
+ pip install "mnemonics[verify]"
42
+ ```
43
+
44
+ ## Quick start
45
+
46
+ ```bash
47
+ # Store something
48
+ mnemonics ingest "The Eiffel Tower is 330 meters tall and located in Paris."
49
+
50
+ # Retrieve with hallucination check
51
+ mnemonics retrieve "how tall is the Eiffel Tower"
52
+ # trust_score: 1.0 flagged: 0
53
+ # [0.912] The Eiffel Tower is 330 meters tall and located in Paris.
54
+ ```
55
+
56
+ ## Python API
57
+
58
+ ```python
59
+ from mnemonics.store import Store
60
+ from mnemonics.ingest import ingest
61
+ from mnemonics.retrieve import retrieve
62
+
63
+ store = Store("~/.mnemonics")
64
+
65
+ ingest(["Paris is the capital of France.", "Rome is the capital of Italy."], store)
66
+
67
+ result = retrieve("what is the capital of France", store, top_k=3, verify=True)
68
+ for r in result["results"]:
69
+ flag = " FLAGGED" if r["flagged"] else ""
70
+ print(f"[{r['score']:.3f}]{flag} {r['text']}")
71
+
72
+ print(f"trust_score: {result['trust_score']}")
73
+ ```
74
+
75
+ ## REST server
76
+
77
+ ```bash
78
+ mnemonics serve --port 7810
79
+ ```
80
+
81
+ | Method | Path | Body |
82
+ |--------|------|------|
83
+ | POST | `/ingest` | `{"texts": [...], "ns": "default"}` |
84
+ | POST | `/retrieve` | `{"query": "...", "top_k": 5, "verify": true}` |
85
+ | GET | `/health` | |
86
+ | GET | `/namespaces` | |
87
+ | GET | `/count?ns=default` | |
88
+ | DELETE | `/memory/<id>` | |
89
+
90
+ ## MCP (Claude Code / Cursor / Metis)
91
+
92
+ ```bash
93
+ mnemonics mcp
94
+ ```
95
+
96
+ Add to your MCP config:
97
+
98
+ ```json
99
+ {
100
+ "mcpServers": {
101
+ "mnemonics": {
102
+ "command": "mnemonics",
103
+ "args": ["mcp"]
104
+ }
105
+ }
106
+ }
107
+ ```
108
+
109
+ Tools exposed: `mnemonics_ingest`, `mnemonics_retrieve`, `mnemonics_forget`
110
+
111
+ ## Namespaces
112
+
113
+ Isolate memories by project, user, or any key:
114
+
115
+ ```bash
116
+ mnemonics ingest "project notes..." --ns work
117
+ mnemonics retrieve "deadlines" --ns work
118
+ ```
119
+
120
+ ## Architecture
121
+
122
+ ```
123
+ texts -> chunk (200w / 40w overlap) -> embed (all-MiniLM-L6-v2)
124
+ -> hnswlib cosine index (per namespace)
125
+ -> SQLite metadata store
126
+
127
+ retrieve -> embed query -> knn search -> halluguard verify -> ranked results
128
+ ```
129
+
130
+ Storage layout under `~/.mnemonics`:
131
+
132
+ ```
133
+ memories.db SQLite (text, meta, timestamps)
134
+ index_default.bin hnswlib index for "default" namespace
135
+ index_<ns>.bin one index per namespace
136
+ ```
137
+
138
+ ## Verification
139
+
140
+ When `verify=True`, retrieved chunks are sent to a local halluguard daemon (port 7801) which cross-checks each result against the full retrieved corpus. Results that diverge get flagged and the aggregate `trust_score` drops.
141
+
142
+ ```bash
143
+ pip install "mnemonics[verify]"
144
+ halluguard serve &
145
+ mnemonics retrieve "your query" # auto-verifies
146
+ ```
147
+
148
+ Verification is best-effort: if the daemon is not running, retrieval proceeds normally with `trust_score: 1.0`.
149
+
150
+ ## License
151
+
152
+ MIT
@@ -0,0 +1,11 @@
1
+ mnemonics/__init__.py,sha256=HNcDp5WPgCIcE_1MOaBZO3dWD46iF5gx1FFY8ityeLw,251
2
+ mnemonics/cli.py,sha256=pLy313ls2RazYqhyZsjv4zgEm-GeT6J_BDHKtGnmVAI,2498
3
+ mnemonics/ingest.py,sha256=F7cUWhh4K7LQ8iIleyjS1F484ptmyo7M0scSw9NF4vA,1741
4
+ mnemonics/retrieve.py,sha256=fPdTvNCXC8Jnurlkt7XWgnoEcMrhc5GKlmhirfGKODA,2066
5
+ mnemonics/server.py,sha256=wppzpeRW55Qxc9tIfZxLqcp7i9xHRKjnhHMt1S6hqR4,8390
6
+ mnemonics/store.py,sha256=rD8V4WjKPRjpD69t2-woE9pXUKfcyuN0Rsd5vWkFhpo,3969
7
+ mnemonics-0.1.0.dist-info/METADATA,sha256=nNbx_8bbRj8rmjFlHkhLP08uWCx29sTp3EVrD1Bbgsw,4113
8
+ mnemonics-0.1.0.dist-info/WHEEL,sha256=aeYiig01lYGDzBgS8HxWXOg3uV61G9ijOsup-k9o1sk,91
9
+ mnemonics-0.1.0.dist-info/entry_points.txt,sha256=j0fjDfTRZ5QwkDi56gGycyPP40zBJdTt-ZM5U7kPpWk,49
10
+ mnemonics-0.1.0.dist-info/top_level.txt,sha256=XC8R48GH1xNWd33VtcACI5p30usZwKtm7aUGll0rFak,10
11
+ mnemonics-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,5 @@
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (82.0.1)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ mnemonics = mnemonics.cli:main
@@ -0,0 +1 @@
1
+ mnemonics