@smilintux/skmemory 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +23 -0
- package/.github/workflows/publish.yml +52 -0
- package/ARCHITECTURE.md +219 -0
- package/LICENSE +661 -0
- package/README.md +159 -0
- package/SKILL.md +271 -0
- package/bin/cli.js +8 -0
- package/docker-compose.yml +58 -0
- package/index.d.ts +4 -0
- package/index.js +27 -0
- package/openclaw-plugin/package.json +59 -0
- package/openclaw-plugin/src/index.js +276 -0
- package/package.json +28 -0
- package/pyproject.toml +69 -0
- package/requirements.txt +13 -0
- package/seeds/cloud9-lumina.seed.json +39 -0
- package/seeds/cloud9-opus.seed.json +40 -0
- package/seeds/courage.seed.json +24 -0
- package/seeds/curiosity.seed.json +24 -0
- package/seeds/grief.seed.json +24 -0
- package/seeds/joy.seed.json +24 -0
- package/seeds/love.seed.json +24 -0
- package/seeds/skcapstone-lumina-merge.moltbook.md +65 -0
- package/seeds/skcapstone-lumina-merge.seed.json +49 -0
- package/seeds/sovereignty.seed.json +24 -0
- package/seeds/trust.seed.json +24 -0
- package/skmemory/__init__.py +66 -0
- package/skmemory/ai_client.py +182 -0
- package/skmemory/anchor.py +224 -0
- package/skmemory/backends/__init__.py +12 -0
- package/skmemory/backends/base.py +88 -0
- package/skmemory/backends/falkordb_backend.py +310 -0
- package/skmemory/backends/file_backend.py +209 -0
- package/skmemory/backends/qdrant_backend.py +364 -0
- package/skmemory/backends/sqlite_backend.py +665 -0
- package/skmemory/cli.py +1004 -0
- package/skmemory/data/seed.json +191 -0
- package/skmemory/importers/__init__.py +11 -0
- package/skmemory/importers/telegram.py +336 -0
- package/skmemory/journal.py +223 -0
- package/skmemory/lovenote.py +180 -0
- package/skmemory/models.py +228 -0
- package/skmemory/openclaw.py +237 -0
- package/skmemory/quadrants.py +191 -0
- package/skmemory/ritual.py +215 -0
- package/skmemory/seeds.py +163 -0
- package/skmemory/soul.py +273 -0
- package/skmemory/steelman.py +338 -0
- package/skmemory/store.py +445 -0
- package/tests/__init__.py +0 -0
- package/tests/test_ai_client.py +89 -0
- package/tests/test_anchor.py +153 -0
- package/tests/test_cli.py +65 -0
- package/tests/test_export_import.py +170 -0
- package/tests/test_file_backend.py +211 -0
- package/tests/test_journal.py +172 -0
- package/tests/test_lovenote.py +136 -0
- package/tests/test_models.py +194 -0
- package/tests/test_openclaw.py +122 -0
- package/tests/test_quadrants.py +174 -0
- package/tests/test_ritual.py +195 -0
- package/tests/test_seeds.py +208 -0
- package/tests/test_soul.py +197 -0
- package/tests/test_sqlite_backend.py +258 -0
- package/tests/test_steelman.py +257 -0
- package/tests/test_store.py +238 -0
- package/tests/test_telegram_import.py +181 -0
|
@@ -0,0 +1,665 @@
|
|
|
1
|
+
"""
|
|
2
|
+
SQLite-indexed storage backend (Level 0.5).
|
|
3
|
+
|
|
4
|
+
Solves the file-scanning problem: instead of reading every JSON file
|
|
5
|
+
on every list/search/filter, we maintain a SQLite index alongside
|
|
6
|
+
the JSON files. Queries hit the index. Full content loads on demand.
|
|
7
|
+
|
|
8
|
+
Zero infrastructure. Ships with Python. Instant boot.
|
|
9
|
+
|
|
10
|
+
Performance:
|
|
11
|
+
File scan (1000 memories): ~2-5 seconds
|
|
12
|
+
SQLite query (1000 memories): ~2-5 milliseconds
|
|
13
|
+
|
|
14
|
+
Directory layout (same as FileBackend):
|
|
15
|
+
base_path/
|
|
16
|
+
├── index.db <-- NEW: SQLite index
|
|
17
|
+
├── short-term/
|
|
18
|
+
│ └── {id}.json
|
|
19
|
+
├── mid-term/
|
|
20
|
+
│ └── ...
|
|
21
|
+
└── long-term/
|
|
22
|
+
└── ...
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
from __future__ import annotations
|
|
26
|
+
|
|
27
|
+
import json
|
|
28
|
+
import os
|
|
29
|
+
import sqlite3
|
|
30
|
+
from pathlib import Path
|
|
31
|
+
from typing import Optional
|
|
32
|
+
|
|
33
|
+
from ..models import EmotionalSnapshot, Memory, MemoryLayer
|
|
34
|
+
from .base import BaseBackend
|
|
35
|
+
|
|
36
|
+
DEFAULT_BASE_PATH = os.path.expanduser("~/.skmemory/memories")
|
|
37
|
+
|
|
38
|
+
_SCHEMA = """
|
|
39
|
+
CREATE TABLE IF NOT EXISTS memories (
|
|
40
|
+
id TEXT PRIMARY KEY,
|
|
41
|
+
title TEXT NOT NULL,
|
|
42
|
+
layer TEXT NOT NULL,
|
|
43
|
+
role TEXT NOT NULL DEFAULT 'general',
|
|
44
|
+
tags TEXT NOT NULL DEFAULT '',
|
|
45
|
+
source TEXT NOT NULL DEFAULT 'manual',
|
|
46
|
+
source_ref TEXT NOT NULL DEFAULT '',
|
|
47
|
+
summary TEXT NOT NULL DEFAULT '',
|
|
48
|
+
content_preview TEXT NOT NULL DEFAULT '',
|
|
49
|
+
emotional_intensity REAL NOT NULL DEFAULT 0.0,
|
|
50
|
+
emotional_valence REAL NOT NULL DEFAULT 0.0,
|
|
51
|
+
emotional_labels TEXT NOT NULL DEFAULT '',
|
|
52
|
+
cloud9_achieved INTEGER NOT NULL DEFAULT 0,
|
|
53
|
+
parent_id TEXT,
|
|
54
|
+
related_ids TEXT NOT NULL DEFAULT '',
|
|
55
|
+
created_at TEXT NOT NULL,
|
|
56
|
+
updated_at TEXT NOT NULL,
|
|
57
|
+
file_path TEXT NOT NULL,
|
|
58
|
+
content_hash TEXT NOT NULL DEFAULT ''
|
|
59
|
+
);
|
|
60
|
+
|
|
61
|
+
CREATE INDEX IF NOT EXISTS idx_layer ON memories(layer);
|
|
62
|
+
CREATE INDEX IF NOT EXISTS idx_created ON memories(created_at DESC);
|
|
63
|
+
CREATE INDEX IF NOT EXISTS idx_intensity ON memories(emotional_intensity DESC);
|
|
64
|
+
CREATE INDEX IF NOT EXISTS idx_source ON memories(source);
|
|
65
|
+
CREATE INDEX IF NOT EXISTS idx_parent ON memories(parent_id);
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
# Reason: 150 chars is enough for an agent to decide if it needs the full memory.
|
|
69
|
+
CONTENT_PREVIEW_LENGTH = 150
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class SQLiteBackend(BaseBackend):
|
|
73
|
+
"""SQLite-indexed file storage for fast queries with full JSON on demand.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
base_path: Root directory for memory storage and index.
|
|
77
|
+
"""
|
|
78
|
+
|
|
79
|
+
def __init__(self, base_path: str = DEFAULT_BASE_PATH) -> None:
|
|
80
|
+
self.base_path = Path(base_path)
|
|
81
|
+
self._ensure_dirs()
|
|
82
|
+
self._db_path = self.base_path / "index.db"
|
|
83
|
+
self._conn: Optional[sqlite3.Connection] = None
|
|
84
|
+
self._ensure_db()
|
|
85
|
+
|
|
86
|
+
def _ensure_dirs(self) -> None:
|
|
87
|
+
"""Create layer directories if they don't exist."""
|
|
88
|
+
for layer in MemoryLayer:
|
|
89
|
+
(self.base_path / layer.value).mkdir(parents=True, exist_ok=True)
|
|
90
|
+
|
|
91
|
+
def _get_conn(self) -> sqlite3.Connection:
|
|
92
|
+
"""Get or create the SQLite connection.
|
|
93
|
+
|
|
94
|
+
Returns:
|
|
95
|
+
sqlite3.Connection: Active database connection.
|
|
96
|
+
"""
|
|
97
|
+
if self._conn is None:
|
|
98
|
+
self._conn = sqlite3.connect(
|
|
99
|
+
str(self._db_path),
|
|
100
|
+
check_same_thread=False,
|
|
101
|
+
)
|
|
102
|
+
self._conn.row_factory = sqlite3.Row
|
|
103
|
+
self._conn.execute("PRAGMA journal_mode=WAL")
|
|
104
|
+
self._conn.execute("PRAGMA synchronous=NORMAL")
|
|
105
|
+
return self._conn
|
|
106
|
+
|
|
107
|
+
def _ensure_db(self) -> None:
|
|
108
|
+
"""Initialize the database schema."""
|
|
109
|
+
conn = self._get_conn()
|
|
110
|
+
conn.executescript(_SCHEMA)
|
|
111
|
+
conn.commit()
|
|
112
|
+
|
|
113
|
+
def _file_path(self, memory: Memory) -> Path:
|
|
114
|
+
"""Get the file path for a memory.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
memory: The memory to locate.
|
|
118
|
+
|
|
119
|
+
Returns:
|
|
120
|
+
Path: Full path to the JSON file.
|
|
121
|
+
"""
|
|
122
|
+
return self.base_path / memory.layer.value / f"{memory.id}.json"
|
|
123
|
+
|
|
124
|
+
def _find_file(self, memory_id: str) -> Optional[Path]:
|
|
125
|
+
"""Locate a memory file using the index first, then fallback.
|
|
126
|
+
|
|
127
|
+
Args:
|
|
128
|
+
memory_id: The memory ID to find.
|
|
129
|
+
|
|
130
|
+
Returns:
|
|
131
|
+
Optional[Path]: Path to the file if found.
|
|
132
|
+
"""
|
|
133
|
+
conn = self._get_conn()
|
|
134
|
+
row = conn.execute(
|
|
135
|
+
"SELECT file_path FROM memories WHERE id = ?", (memory_id,)
|
|
136
|
+
).fetchone()
|
|
137
|
+
if row:
|
|
138
|
+
path = Path(row["file_path"])
|
|
139
|
+
if path.exists():
|
|
140
|
+
return path
|
|
141
|
+
|
|
142
|
+
for layer in MemoryLayer:
|
|
143
|
+
path = self.base_path / layer.value / f"{memory_id}.json"
|
|
144
|
+
if path.exists():
|
|
145
|
+
return path
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
def _index_memory(self, memory: Memory, file_path: Path) -> None:
|
|
149
|
+
"""Insert or update the index entry for a memory.
|
|
150
|
+
|
|
151
|
+
Args:
|
|
152
|
+
memory: The memory to index.
|
|
153
|
+
file_path: Where the JSON file lives.
|
|
154
|
+
"""
|
|
155
|
+
conn = self._get_conn()
|
|
156
|
+
content_preview = memory.content[:CONTENT_PREVIEW_LENGTH]
|
|
157
|
+
conn.execute(
|
|
158
|
+
"""
|
|
159
|
+
INSERT OR REPLACE INTO memories (
|
|
160
|
+
id, title, layer, role, tags, source, source_ref,
|
|
161
|
+
summary, content_preview, emotional_intensity,
|
|
162
|
+
emotional_valence, emotional_labels, cloud9_achieved,
|
|
163
|
+
parent_id, related_ids, created_at, updated_at,
|
|
164
|
+
file_path, content_hash
|
|
165
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
|
166
|
+
""",
|
|
167
|
+
(
|
|
168
|
+
memory.id,
|
|
169
|
+
memory.title,
|
|
170
|
+
memory.layer.value,
|
|
171
|
+
memory.role.value,
|
|
172
|
+
",".join(memory.tags),
|
|
173
|
+
memory.source,
|
|
174
|
+
memory.source_ref,
|
|
175
|
+
memory.summary,
|
|
176
|
+
content_preview,
|
|
177
|
+
memory.emotional.intensity,
|
|
178
|
+
memory.emotional.valence,
|
|
179
|
+
",".join(memory.emotional.labels),
|
|
180
|
+
1 if memory.emotional.cloud9_achieved else 0,
|
|
181
|
+
memory.parent_id,
|
|
182
|
+
",".join(memory.related_ids),
|
|
183
|
+
memory.created_at,
|
|
184
|
+
memory.updated_at,
|
|
185
|
+
str(file_path),
|
|
186
|
+
memory.content_hash(),
|
|
187
|
+
),
|
|
188
|
+
)
|
|
189
|
+
conn.commit()
|
|
190
|
+
|
|
191
|
+
def _row_to_memory_summary(self, row: sqlite3.Row) -> dict:
|
|
192
|
+
"""Convert a database row to a lightweight memory summary dict.
|
|
193
|
+
|
|
194
|
+
This is the token-efficient representation: no full content,
|
|
195
|
+
just enough for an agent to decide if it needs more.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
row: SQLite row.
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
dict: Lightweight memory summary.
|
|
202
|
+
"""
|
|
203
|
+
return {
|
|
204
|
+
"id": row["id"],
|
|
205
|
+
"title": row["title"],
|
|
206
|
+
"layer": row["layer"],
|
|
207
|
+
"role": row["role"],
|
|
208
|
+
"tags": [t for t in row["tags"].split(",") if t],
|
|
209
|
+
"source": row["source"],
|
|
210
|
+
"summary": row["summary"],
|
|
211
|
+
"content_preview": row["content_preview"],
|
|
212
|
+
"emotional_intensity": row["emotional_intensity"],
|
|
213
|
+
"emotional_valence": row["emotional_valence"],
|
|
214
|
+
"emotional_labels": [
|
|
215
|
+
l for l in row["emotional_labels"].split(",") if l
|
|
216
|
+
],
|
|
217
|
+
"cloud9_achieved": bool(row["cloud9_achieved"]),
|
|
218
|
+
"created_at": row["created_at"],
|
|
219
|
+
"parent_id": row["parent_id"],
|
|
220
|
+
"related_ids": [r for r in row["related_ids"].split(",") if r],
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
def _row_to_memory(self, row: sqlite3.Row) -> Optional[Memory]:
|
|
224
|
+
"""Load the full Memory object from disk using the index path.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
row: SQLite row with file_path.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
Optional[Memory]: Full memory object, or None if file missing.
|
|
231
|
+
"""
|
|
232
|
+
path = Path(row["file_path"])
|
|
233
|
+
if not path.exists():
|
|
234
|
+
return None
|
|
235
|
+
try:
|
|
236
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
237
|
+
return Memory(**data)
|
|
238
|
+
except (json.JSONDecodeError, Exception):
|
|
239
|
+
return None
|
|
240
|
+
|
|
241
|
+
def save(self, memory: Memory) -> str:
|
|
242
|
+
"""Persist a memory as JSON and update the index.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
memory: The Memory to store.
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
str: The memory ID.
|
|
249
|
+
"""
|
|
250
|
+
path = self._file_path(memory)
|
|
251
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
252
|
+
path.write_text(
|
|
253
|
+
json.dumps(memory.model_dump(), indent=2, default=str),
|
|
254
|
+
encoding="utf-8",
|
|
255
|
+
)
|
|
256
|
+
self._index_memory(memory, path)
|
|
257
|
+
return memory.id
|
|
258
|
+
|
|
259
|
+
def load(self, memory_id: str) -> Optional[Memory]:
|
|
260
|
+
"""Load a memory by ID, using the index for fast lookup.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
memory_id: The memory identifier.
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Optional[Memory]: The memory if found, None otherwise.
|
|
267
|
+
"""
|
|
268
|
+
path = self._find_file(memory_id)
|
|
269
|
+
if path is None:
|
|
270
|
+
return None
|
|
271
|
+
try:
|
|
272
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
273
|
+
return Memory(**data)
|
|
274
|
+
except (json.JSONDecodeError, Exception):
|
|
275
|
+
return None
|
|
276
|
+
|
|
277
|
+
def delete(self, memory_id: str) -> bool:
|
|
278
|
+
"""Delete a memory file and its index entry.
|
|
279
|
+
|
|
280
|
+
Args:
|
|
281
|
+
memory_id: The memory identifier.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
bool: True if deleted, False if not found.
|
|
285
|
+
"""
|
|
286
|
+
path = self._find_file(memory_id)
|
|
287
|
+
|
|
288
|
+
conn = self._get_conn()
|
|
289
|
+
conn.execute("DELETE FROM memories WHERE id = ?", (memory_id,))
|
|
290
|
+
conn.commit()
|
|
291
|
+
|
|
292
|
+
if path is None:
|
|
293
|
+
return False
|
|
294
|
+
if path.exists():
|
|
295
|
+
path.unlink()
|
|
296
|
+
return True
|
|
297
|
+
|
|
298
|
+
def list_memories(
|
|
299
|
+
self,
|
|
300
|
+
layer: Optional[MemoryLayer] = None,
|
|
301
|
+
tags: Optional[list[str]] = None,
|
|
302
|
+
limit: int = 50,
|
|
303
|
+
) -> list[Memory]:
|
|
304
|
+
"""List memories using the index for filtering, loading full objects.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
layer: Filter by memory layer (None = all layers).
|
|
308
|
+
tags: Filter by tags (AND logic).
|
|
309
|
+
limit: Maximum results.
|
|
310
|
+
|
|
311
|
+
Returns:
|
|
312
|
+
list[Memory]: Matching memories sorted newest first.
|
|
313
|
+
"""
|
|
314
|
+
conn = self._get_conn()
|
|
315
|
+
conditions = []
|
|
316
|
+
params: list = []
|
|
317
|
+
|
|
318
|
+
if layer:
|
|
319
|
+
conditions.append("layer = ?")
|
|
320
|
+
params.append(layer.value)
|
|
321
|
+
|
|
322
|
+
if tags:
|
|
323
|
+
for tag in tags:
|
|
324
|
+
conditions.append("tags LIKE ?")
|
|
325
|
+
params.append(f"%{tag}%")
|
|
326
|
+
|
|
327
|
+
where = " AND ".join(conditions) if conditions else "1=1"
|
|
328
|
+
params.append(limit)
|
|
329
|
+
|
|
330
|
+
rows = conn.execute(
|
|
331
|
+
f"SELECT * FROM memories WHERE {where} "
|
|
332
|
+
f"ORDER BY created_at DESC LIMIT ?",
|
|
333
|
+
params,
|
|
334
|
+
).fetchall()
|
|
335
|
+
|
|
336
|
+
results = []
|
|
337
|
+
for row in rows:
|
|
338
|
+
mem = self._row_to_memory(row)
|
|
339
|
+
if mem is not None:
|
|
340
|
+
results.append(mem)
|
|
341
|
+
return results
|
|
342
|
+
|
|
343
|
+
def list_summaries(
|
|
344
|
+
self,
|
|
345
|
+
layer: Optional[MemoryLayer] = None,
|
|
346
|
+
tags: Optional[list[str]] = None,
|
|
347
|
+
limit: int = 50,
|
|
348
|
+
min_intensity: float = 0.0,
|
|
349
|
+
order_by: str = "created_at",
|
|
350
|
+
) -> list[dict]:
|
|
351
|
+
"""List memory summaries from the index only (no file I/O).
|
|
352
|
+
|
|
353
|
+
This is the token-efficient path: returns lightweight dicts
|
|
354
|
+
with title, summary, preview, and emotional data — no full content.
|
|
355
|
+
Use this for agent context loading.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
layer: Filter by memory layer.
|
|
359
|
+
tags: Filter by tags (AND logic).
|
|
360
|
+
limit: Maximum results.
|
|
361
|
+
min_intensity: Minimum emotional intensity filter.
|
|
362
|
+
order_by: Sort field ('created_at' or 'emotional_intensity').
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
list[dict]: Lightweight memory summaries.
|
|
366
|
+
"""
|
|
367
|
+
conn = self._get_conn()
|
|
368
|
+
conditions = []
|
|
369
|
+
params: list = []
|
|
370
|
+
|
|
371
|
+
if layer:
|
|
372
|
+
conditions.append("layer = ?")
|
|
373
|
+
params.append(layer.value)
|
|
374
|
+
|
|
375
|
+
if tags:
|
|
376
|
+
for tag in tags:
|
|
377
|
+
conditions.append("tags LIKE ?")
|
|
378
|
+
params.append(f"%{tag}%")
|
|
379
|
+
|
|
380
|
+
if min_intensity > 0:
|
|
381
|
+
conditions.append("emotional_intensity >= ?")
|
|
382
|
+
params.append(min_intensity)
|
|
383
|
+
|
|
384
|
+
where = " AND ".join(conditions) if conditions else "1=1"
|
|
385
|
+
|
|
386
|
+
if order_by == "emotional_intensity":
|
|
387
|
+
order = "emotional_intensity DESC"
|
|
388
|
+
else:
|
|
389
|
+
order = "created_at DESC"
|
|
390
|
+
|
|
391
|
+
params.append(limit)
|
|
392
|
+
|
|
393
|
+
rows = conn.execute(
|
|
394
|
+
f"SELECT * FROM memories WHERE {where} ORDER BY {order} LIMIT ?",
|
|
395
|
+
params,
|
|
396
|
+
).fetchall()
|
|
397
|
+
|
|
398
|
+
return [self._row_to_memory_summary(row) for row in rows]
|
|
399
|
+
|
|
400
|
+
def search_text(self, query: str, limit: int = 10) -> list[Memory]:
|
|
401
|
+
"""Search memories using the SQLite index (title, summary, preview).
|
|
402
|
+
|
|
403
|
+
Falls back to full file scan only if the index doesn't find matches.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
query: Search string.
|
|
407
|
+
limit: Maximum results.
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
list[Memory]: Matching memories.
|
|
411
|
+
"""
|
|
412
|
+
conn = self._get_conn()
|
|
413
|
+
query_param = f"%{query}%"
|
|
414
|
+
|
|
415
|
+
rows = conn.execute(
|
|
416
|
+
"""
|
|
417
|
+
SELECT * FROM memories
|
|
418
|
+
WHERE title LIKE ? OR summary LIKE ? OR content_preview LIKE ?
|
|
419
|
+
OR tags LIKE ?
|
|
420
|
+
ORDER BY created_at DESC
|
|
421
|
+
LIMIT ?
|
|
422
|
+
""",
|
|
423
|
+
(query_param, query_param, query_param, query_param, limit),
|
|
424
|
+
).fetchall()
|
|
425
|
+
|
|
426
|
+
results = []
|
|
427
|
+
for row in rows:
|
|
428
|
+
mem = self._row_to_memory(row)
|
|
429
|
+
if mem is not None:
|
|
430
|
+
results.append(mem)
|
|
431
|
+
return results
|
|
432
|
+
|
|
433
|
+
def get_related(self, memory_id: str, depth: int = 1) -> list[dict]:
|
|
434
|
+
"""Get related memories by traversing related_ids (shallow graph).
|
|
435
|
+
|
|
436
|
+
Args:
|
|
437
|
+
memory_id: Starting memory ID.
|
|
438
|
+
depth: How many hops to follow (1 = direct relations).
|
|
439
|
+
|
|
440
|
+
Returns:
|
|
441
|
+
list[dict]: Related memory summaries.
|
|
442
|
+
"""
|
|
443
|
+
conn = self._get_conn()
|
|
444
|
+
visited: set[str] = {memory_id}
|
|
445
|
+
frontier: list[str] = []
|
|
446
|
+
results: list[dict] = []
|
|
447
|
+
|
|
448
|
+
# Reason: seed the frontier from the starting node's relationships
|
|
449
|
+
row = conn.execute(
|
|
450
|
+
"SELECT * FROM memories WHERE id = ?", (memory_id,)
|
|
451
|
+
).fetchone()
|
|
452
|
+
if row is None:
|
|
453
|
+
return results
|
|
454
|
+
|
|
455
|
+
related = [r for r in row["related_ids"].split(",") if r]
|
|
456
|
+
frontier.extend(r for r in related if r not in visited)
|
|
457
|
+
if row["parent_id"] and row["parent_id"] not in visited:
|
|
458
|
+
frontier.append(row["parent_id"])
|
|
459
|
+
|
|
460
|
+
for _ in range(depth):
|
|
461
|
+
next_frontier: list[str] = []
|
|
462
|
+
for mid in frontier:
|
|
463
|
+
if mid in visited:
|
|
464
|
+
continue
|
|
465
|
+
visited.add(mid)
|
|
466
|
+
|
|
467
|
+
neighbor = conn.execute(
|
|
468
|
+
"SELECT * FROM memories WHERE id = ?", (mid,)
|
|
469
|
+
).fetchone()
|
|
470
|
+
if neighbor is None:
|
|
471
|
+
continue
|
|
472
|
+
|
|
473
|
+
results.append(self._row_to_memory_summary(neighbor))
|
|
474
|
+
|
|
475
|
+
child_related = [
|
|
476
|
+
r for r in neighbor["related_ids"].split(",") if r
|
|
477
|
+
]
|
|
478
|
+
next_frontier.extend(
|
|
479
|
+
r for r in child_related if r not in visited
|
|
480
|
+
)
|
|
481
|
+
if neighbor["parent_id"] and neighbor["parent_id"] not in visited:
|
|
482
|
+
next_frontier.append(neighbor["parent_id"])
|
|
483
|
+
|
|
484
|
+
frontier = next_frontier
|
|
485
|
+
|
|
486
|
+
return results
|
|
487
|
+
|
|
488
|
+
def export_all(self, output_path: Optional[str] = None) -> str:
|
|
489
|
+
"""Export all memories as a single JSON file for backup.
|
|
490
|
+
|
|
491
|
+
Reads every JSON file on disk and bundles them into one
|
|
492
|
+
git-friendly backup. One file per day by default (overwrites
|
|
493
|
+
same-day exports).
|
|
494
|
+
|
|
495
|
+
Args:
|
|
496
|
+
output_path: Where to write the backup. If None, uses
|
|
497
|
+
``~/.skmemory/backups/skmemory-backup-YYYY-MM-DD.json``.
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
str: Path to the written backup file.
|
|
501
|
+
"""
|
|
502
|
+
from datetime import date as _date
|
|
503
|
+
|
|
504
|
+
if output_path is None:
|
|
505
|
+
backup_dir = self.base_path.parent / "backups"
|
|
506
|
+
backup_dir.mkdir(parents=True, exist_ok=True)
|
|
507
|
+
output_path = str(
|
|
508
|
+
backup_dir / f"skmemory-backup-{_date.today().isoformat()}.json"
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
memories: list[dict] = []
|
|
512
|
+
for layer in MemoryLayer:
|
|
513
|
+
layer_dir = self.base_path / layer.value
|
|
514
|
+
if not layer_dir.exists():
|
|
515
|
+
continue
|
|
516
|
+
for json_file in sorted(layer_dir.glob("*.json")):
|
|
517
|
+
try:
|
|
518
|
+
data = json.loads(json_file.read_text(encoding="utf-8"))
|
|
519
|
+
memories.append(data)
|
|
520
|
+
except (json.JSONDecodeError, Exception):
|
|
521
|
+
continue
|
|
522
|
+
|
|
523
|
+
from .. import __version__
|
|
524
|
+
from datetime import datetime as _dt, timezone as _tz
|
|
525
|
+
|
|
526
|
+
payload = {
|
|
527
|
+
"skmemory_version": __version__,
|
|
528
|
+
"exported_at": _dt.now(_tz.utc).isoformat(),
|
|
529
|
+
"memory_count": len(memories),
|
|
530
|
+
"base_path": str(self.base_path),
|
|
531
|
+
"memories": memories,
|
|
532
|
+
}
|
|
533
|
+
|
|
534
|
+
Path(output_path).write_text(
|
|
535
|
+
json.dumps(payload, indent=2, default=str), encoding="utf-8"
|
|
536
|
+
)
|
|
537
|
+
return output_path
|
|
538
|
+
|
|
539
|
+
def import_backup(self, backup_path: str) -> int:
|
|
540
|
+
"""Restore memories from a JSON backup file.
|
|
541
|
+
|
|
542
|
+
Each memory is written as a JSON file in its layer directory
|
|
543
|
+
and re-indexed. Existing memories with the same ID are overwritten.
|
|
544
|
+
|
|
545
|
+
Args:
|
|
546
|
+
backup_path: Path to the backup JSON file.
|
|
547
|
+
|
|
548
|
+
Returns:
|
|
549
|
+
int: Number of memories restored.
|
|
550
|
+
|
|
551
|
+
Raises:
|
|
552
|
+
FileNotFoundError: If backup_path does not exist.
|
|
553
|
+
ValueError: If the file is not a valid skmemory backup.
|
|
554
|
+
"""
|
|
555
|
+
path = Path(backup_path)
|
|
556
|
+
if not path.exists():
|
|
557
|
+
raise FileNotFoundError(f"Backup not found: {backup_path}")
|
|
558
|
+
|
|
559
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
560
|
+
|
|
561
|
+
if "memories" not in data or not isinstance(data["memories"], list):
|
|
562
|
+
raise ValueError(
|
|
563
|
+
"Invalid backup file: missing 'memories' array"
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
count = 0
|
|
567
|
+
for mem_data in data["memories"]:
|
|
568
|
+
try:
|
|
569
|
+
memory = Memory(**mem_data)
|
|
570
|
+
file_path = self._file_path(memory)
|
|
571
|
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
572
|
+
file_path.write_text(
|
|
573
|
+
json.dumps(memory.model_dump(), indent=2, default=str),
|
|
574
|
+
encoding="utf-8",
|
|
575
|
+
)
|
|
576
|
+
self._index_memory(memory, file_path)
|
|
577
|
+
count += 1
|
|
578
|
+
except Exception:
|
|
579
|
+
continue
|
|
580
|
+
|
|
581
|
+
return count
|
|
582
|
+
|
|
583
|
+
def reindex(self) -> int:
|
|
584
|
+
"""Rebuild the entire index from JSON files on disk.
|
|
585
|
+
|
|
586
|
+
Use this after manual file edits or migration from FileBackend.
|
|
587
|
+
|
|
588
|
+
Returns:
|
|
589
|
+
int: Number of memories indexed.
|
|
590
|
+
"""
|
|
591
|
+
conn = self._get_conn()
|
|
592
|
+
conn.execute("DELETE FROM memories")
|
|
593
|
+
conn.commit()
|
|
594
|
+
|
|
595
|
+
count = 0
|
|
596
|
+
for layer in MemoryLayer:
|
|
597
|
+
layer_dir = self.base_path / layer.value
|
|
598
|
+
if not layer_dir.exists():
|
|
599
|
+
continue
|
|
600
|
+
for json_file in layer_dir.glob("*.json"):
|
|
601
|
+
try:
|
|
602
|
+
data = json.loads(json_file.read_text(encoding="utf-8"))
|
|
603
|
+
memory = Memory(**data)
|
|
604
|
+
self._index_memory(memory, json_file)
|
|
605
|
+
count += 1
|
|
606
|
+
except (json.JSONDecodeError, Exception):
|
|
607
|
+
continue
|
|
608
|
+
|
|
609
|
+
return count
|
|
610
|
+
|
|
611
|
+
def stats(self) -> dict:
|
|
612
|
+
"""Get index statistics.
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
dict: Counts by layer, total, and index size.
|
|
616
|
+
"""
|
|
617
|
+
conn = self._get_conn()
|
|
618
|
+
|
|
619
|
+
total = conn.execute("SELECT COUNT(*) FROM memories").fetchone()[0]
|
|
620
|
+
|
|
621
|
+
layer_counts = {}
|
|
622
|
+
for layer in MemoryLayer:
|
|
623
|
+
count = conn.execute(
|
|
624
|
+
"SELECT COUNT(*) FROM memories WHERE layer = ?",
|
|
625
|
+
(layer.value,),
|
|
626
|
+
).fetchone()[0]
|
|
627
|
+
layer_counts[layer.value] = count
|
|
628
|
+
|
|
629
|
+
db_size = self._db_path.stat().st_size if self._db_path.exists() else 0
|
|
630
|
+
|
|
631
|
+
return {
|
|
632
|
+
"total": total,
|
|
633
|
+
"by_layer": layer_counts,
|
|
634
|
+
"index_size_bytes": db_size,
|
|
635
|
+
"index_path": str(self._db_path),
|
|
636
|
+
}
|
|
637
|
+
|
|
638
|
+
def health_check(self) -> dict:
|
|
639
|
+
"""Check SQLite backend health.
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
dict: Status with path, counts, and index info.
|
|
643
|
+
"""
|
|
644
|
+
try:
|
|
645
|
+
s = self.stats()
|
|
646
|
+
return {
|
|
647
|
+
"ok": True,
|
|
648
|
+
"backend": "SQLiteBackend",
|
|
649
|
+
"base_path": str(self.base_path),
|
|
650
|
+
"total_memories": s["total"],
|
|
651
|
+
"by_layer": s["by_layer"],
|
|
652
|
+
"index_size_bytes": s["index_size_bytes"],
|
|
653
|
+
}
|
|
654
|
+
except Exception as e:
|
|
655
|
+
return {
|
|
656
|
+
"ok": False,
|
|
657
|
+
"backend": "SQLiteBackend",
|
|
658
|
+
"error": str(e),
|
|
659
|
+
}
|
|
660
|
+
|
|
661
|
+
def close(self) -> None:
|
|
662
|
+
"""Close the database connection."""
|
|
663
|
+
if self._conn:
|
|
664
|
+
self._conn.close()
|
|
665
|
+
self._conn = None
|