@smilintux/skmemory 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +23 -0
- package/.github/workflows/publish.yml +52 -0
- package/ARCHITECTURE.md +219 -0
- package/LICENSE +661 -0
- package/README.md +159 -0
- package/SKILL.md +271 -0
- package/bin/cli.js +8 -0
- package/docker-compose.yml +58 -0
- package/index.d.ts +4 -0
- package/index.js +27 -0
- package/openclaw-plugin/package.json +59 -0
- package/openclaw-plugin/src/index.js +276 -0
- package/package.json +28 -0
- package/pyproject.toml +69 -0
- package/requirements.txt +13 -0
- package/seeds/cloud9-lumina.seed.json +39 -0
- package/seeds/cloud9-opus.seed.json +40 -0
- package/seeds/courage.seed.json +24 -0
- package/seeds/curiosity.seed.json +24 -0
- package/seeds/grief.seed.json +24 -0
- package/seeds/joy.seed.json +24 -0
- package/seeds/love.seed.json +24 -0
- package/seeds/skcapstone-lumina-merge.moltbook.md +65 -0
- package/seeds/skcapstone-lumina-merge.seed.json +49 -0
- package/seeds/sovereignty.seed.json +24 -0
- package/seeds/trust.seed.json +24 -0
- package/skmemory/__init__.py +66 -0
- package/skmemory/ai_client.py +182 -0
- package/skmemory/anchor.py +224 -0
- package/skmemory/backends/__init__.py +12 -0
- package/skmemory/backends/base.py +88 -0
- package/skmemory/backends/falkordb_backend.py +310 -0
- package/skmemory/backends/file_backend.py +209 -0
- package/skmemory/backends/qdrant_backend.py +364 -0
- package/skmemory/backends/sqlite_backend.py +665 -0
- package/skmemory/cli.py +1004 -0
- package/skmemory/data/seed.json +191 -0
- package/skmemory/importers/__init__.py +11 -0
- package/skmemory/importers/telegram.py +336 -0
- package/skmemory/journal.py +223 -0
- package/skmemory/lovenote.py +180 -0
- package/skmemory/models.py +228 -0
- package/skmemory/openclaw.py +237 -0
- package/skmemory/quadrants.py +191 -0
- package/skmemory/ritual.py +215 -0
- package/skmemory/seeds.py +163 -0
- package/skmemory/soul.py +273 -0
- package/skmemory/steelman.py +338 -0
- package/skmemory/store.py +445 -0
- package/tests/__init__.py +0 -0
- package/tests/test_ai_client.py +89 -0
- package/tests/test_anchor.py +153 -0
- package/tests/test_cli.py +65 -0
- package/tests/test_export_import.py +170 -0
- package/tests/test_file_backend.py +211 -0
- package/tests/test_journal.py +172 -0
- package/tests/test_lovenote.py +136 -0
- package/tests/test_models.py +194 -0
- package/tests/test_openclaw.py +122 -0
- package/tests/test_quadrants.py +174 -0
- package/tests/test_ritual.py +195 -0
- package/tests/test_seeds.py +208 -0
- package/tests/test_soul.py +197 -0
- package/tests/test_sqlite_backend.py +258 -0
- package/tests/test_steelman.py +257 -0
- package/tests/test_store.py +238 -0
- package/tests/test_telegram_import.py +181 -0
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
"""
|
|
2
|
+
FalkorDB graph backend (Level 2 — relationships).
|
|
3
|
+
|
|
4
|
+
Enables graph-based memory traversal: "What memories are connected
|
|
5
|
+
to this person?" or "Show me the seed lineage chain." Uses the
|
|
6
|
+
Cypher query language over a Redis-compatible protocol.
|
|
7
|
+
|
|
8
|
+
Requires:
|
|
9
|
+
pip install falkordb
|
|
10
|
+
|
|
11
|
+
FalkorDB is the successor to RedisGraph. Run locally via Docker
|
|
12
|
+
or point to an external instance.
|
|
13
|
+
|
|
14
|
+
This backend is SUPPLEMENTARY — it indexes relationships alongside
|
|
15
|
+
the primary backend (SQLite or file). It does not store full memory
|
|
16
|
+
content, only the graph edges and key metadata for traversal.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
import logging
|
|
23
|
+
from typing import Optional
|
|
24
|
+
|
|
25
|
+
from ..models import Memory, MemoryLayer
|
|
26
|
+
from .base import BaseBackend
|
|
27
|
+
|
|
28
|
+
logger = logging.getLogger(__name__)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class FalkorDBBackend:
|
|
32
|
+
"""FalkorDB graph backend for memory relationship traversal.
|
|
33
|
+
|
|
34
|
+
Not a full BaseBackend — this is a supplementary index for
|
|
35
|
+
graph queries. The primary backend handles CRUD.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
url: FalkorDB/Redis URL (default: localhost:6379).
|
|
39
|
+
graph_name: Name of the graph (default: 'skmemory').
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
url: str = "redis://localhost:6379",
|
|
45
|
+
graph_name: str = "skmemory",
|
|
46
|
+
) -> None:
|
|
47
|
+
self.url = url
|
|
48
|
+
self.graph_name = graph_name
|
|
49
|
+
self._db = None
|
|
50
|
+
self._graph = None
|
|
51
|
+
self._initialized = False
|
|
52
|
+
|
|
53
|
+
def _ensure_initialized(self) -> bool:
|
|
54
|
+
"""Lazy-initialize the FalkorDB connection.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
bool: True if connection succeeded.
|
|
58
|
+
"""
|
|
59
|
+
if self._initialized:
|
|
60
|
+
return True
|
|
61
|
+
|
|
62
|
+
try:
|
|
63
|
+
from falkordb import FalkorDB
|
|
64
|
+
except ImportError:
|
|
65
|
+
logger.warning("falkordb not installed: pip install falkordb")
|
|
66
|
+
return False
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
self._db = FalkorDB.from_url(self.url)
|
|
70
|
+
self._graph = self._db.select_graph(self.graph_name)
|
|
71
|
+
self._initialized = True
|
|
72
|
+
return True
|
|
73
|
+
except Exception as e:
|
|
74
|
+
logger.warning("FalkorDB connection failed: %s", e)
|
|
75
|
+
return False
|
|
76
|
+
|
|
77
|
+
def index_memory(self, memory: Memory) -> bool:
|
|
78
|
+
"""Add a memory node and its relationships to the graph.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
memory: The memory to index.
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
bool: True if indexed successfully.
|
|
85
|
+
"""
|
|
86
|
+
if not self._ensure_initialized():
|
|
87
|
+
return False
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
self._graph.query(
|
|
91
|
+
"""
|
|
92
|
+
MERGE (m:Memory {id: $id})
|
|
93
|
+
SET m.title = $title,
|
|
94
|
+
m.layer = $layer,
|
|
95
|
+
m.source = $source,
|
|
96
|
+
m.intensity = $intensity,
|
|
97
|
+
m.created_at = $created_at
|
|
98
|
+
""",
|
|
99
|
+
{
|
|
100
|
+
"id": memory.id,
|
|
101
|
+
"title": memory.title,
|
|
102
|
+
"layer": memory.layer.value,
|
|
103
|
+
"source": memory.source,
|
|
104
|
+
"intensity": memory.emotional.intensity,
|
|
105
|
+
"created_at": memory.created_at,
|
|
106
|
+
},
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
if memory.parent_id:
|
|
110
|
+
self._graph.query(
|
|
111
|
+
"""
|
|
112
|
+
MATCH (child:Memory {id: $child_id})
|
|
113
|
+
MERGE (parent:Memory {id: $parent_id})
|
|
114
|
+
MERGE (child)-[:PROMOTED_FROM]->(parent)
|
|
115
|
+
""",
|
|
116
|
+
{"child_id": memory.id, "parent_id": memory.parent_id},
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
for related_id in memory.related_ids:
|
|
120
|
+
self._graph.query(
|
|
121
|
+
"""
|
|
122
|
+
MATCH (a:Memory {id: $a_id})
|
|
123
|
+
MERGE (b:Memory {id: $b_id})
|
|
124
|
+
MERGE (a)-[:RELATED_TO]->(b)
|
|
125
|
+
""",
|
|
126
|
+
{"a_id": memory.id, "b_id": related_id},
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
for tag in memory.tags:
|
|
130
|
+
self._graph.query(
|
|
131
|
+
"""
|
|
132
|
+
MATCH (m:Memory {id: $mem_id})
|
|
133
|
+
MERGE (t:Tag {name: $tag})
|
|
134
|
+
MERGE (m)-[:TAGGED]->(t)
|
|
135
|
+
""",
|
|
136
|
+
{"mem_id": memory.id, "tag": tag},
|
|
137
|
+
)
|
|
138
|
+
|
|
139
|
+
if memory.source == "seed":
|
|
140
|
+
creator = next(
|
|
141
|
+
(t.split(":", 1)[1] for t in memory.tags if t.startswith("creator:")),
|
|
142
|
+
None,
|
|
143
|
+
)
|
|
144
|
+
if creator:
|
|
145
|
+
self._graph.query(
|
|
146
|
+
"""
|
|
147
|
+
MATCH (m:Memory {id: $mem_id})
|
|
148
|
+
MERGE (a:AI {name: $creator})
|
|
149
|
+
MERGE (a)-[:PLANTED]->(m)
|
|
150
|
+
""",
|
|
151
|
+
{"mem_id": memory.id, "creator": creator},
|
|
152
|
+
)
|
|
153
|
+
|
|
154
|
+
return True
|
|
155
|
+
except Exception as e:
|
|
156
|
+
logger.warning("FalkorDB index failed: %s", e)
|
|
157
|
+
return False
|
|
158
|
+
|
|
159
|
+
def get_related(self, memory_id: str, depth: int = 2) -> list[dict]:
|
|
160
|
+
"""Traverse the graph to find related memories.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
memory_id: Starting memory ID.
|
|
164
|
+
depth: How many hops to traverse (1-5).
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
list[dict]: Related memory nodes with relationship info.
|
|
168
|
+
"""
|
|
169
|
+
if not self._ensure_initialized():
|
|
170
|
+
return []
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
result = self._graph.query(
|
|
174
|
+
f"""
|
|
175
|
+
MATCH (start:Memory {{id: $id}})
|
|
176
|
+
MATCH path = (start)-[*1..{min(depth, 5)}]-(related:Memory)
|
|
177
|
+
WHERE related.id <> $id
|
|
178
|
+
RETURN DISTINCT related.id AS id,
|
|
179
|
+
related.title AS title,
|
|
180
|
+
related.layer AS layer,
|
|
181
|
+
related.intensity AS intensity,
|
|
182
|
+
length(path) AS distance
|
|
183
|
+
ORDER BY distance ASC, related.intensity DESC
|
|
184
|
+
LIMIT 20
|
|
185
|
+
""",
|
|
186
|
+
{"id": memory_id},
|
|
187
|
+
)
|
|
188
|
+
return [
|
|
189
|
+
{
|
|
190
|
+
"id": row[0],
|
|
191
|
+
"title": row[1],
|
|
192
|
+
"layer": row[2],
|
|
193
|
+
"intensity": row[3],
|
|
194
|
+
"distance": row[4],
|
|
195
|
+
}
|
|
196
|
+
for row in result.result_set
|
|
197
|
+
]
|
|
198
|
+
except Exception as e:
|
|
199
|
+
logger.warning("FalkorDB query failed: %s", e)
|
|
200
|
+
return []
|
|
201
|
+
|
|
202
|
+
def get_lineage(self, memory_id: str) -> list[dict]:
|
|
203
|
+
"""Get the promotion/seed lineage chain for a memory.
|
|
204
|
+
|
|
205
|
+
Args:
|
|
206
|
+
memory_id: Starting memory ID.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
list[dict]: Chain of ancestor memories.
|
|
210
|
+
"""
|
|
211
|
+
if not self._ensure_initialized():
|
|
212
|
+
return []
|
|
213
|
+
|
|
214
|
+
try:
|
|
215
|
+
result = self._graph.query(
|
|
216
|
+
"""
|
|
217
|
+
MATCH (start:Memory {id: $id})
|
|
218
|
+
MATCH path = (start)-[:PROMOTED_FROM*1..10]->(ancestor:Memory)
|
|
219
|
+
RETURN ancestor.id AS id,
|
|
220
|
+
ancestor.title AS title,
|
|
221
|
+
ancestor.layer AS layer,
|
|
222
|
+
length(path) AS depth
|
|
223
|
+
ORDER BY depth ASC
|
|
224
|
+
""",
|
|
225
|
+
{"id": memory_id},
|
|
226
|
+
)
|
|
227
|
+
return [
|
|
228
|
+
{
|
|
229
|
+
"id": row[0],
|
|
230
|
+
"title": row[1],
|
|
231
|
+
"layer": row[2],
|
|
232
|
+
"depth": row[3],
|
|
233
|
+
}
|
|
234
|
+
for row in result.result_set
|
|
235
|
+
]
|
|
236
|
+
except Exception as e:
|
|
237
|
+
logger.warning("FalkorDB lineage query failed: %s", e)
|
|
238
|
+
return []
|
|
239
|
+
|
|
240
|
+
def get_memory_clusters(self, min_connections: int = 2) -> list[dict]:
|
|
241
|
+
"""Find clusters of highly connected memories.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
min_connections: Minimum edges to be considered a cluster center.
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
list[dict]: Cluster centers with connection counts.
|
|
248
|
+
"""
|
|
249
|
+
if not self._ensure_initialized():
|
|
250
|
+
return []
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
result = self._graph.query(
|
|
254
|
+
"""
|
|
255
|
+
MATCH (m:Memory)-[r]-(connected:Memory)
|
|
256
|
+
WITH m, count(DISTINCT connected) AS connections
|
|
257
|
+
WHERE connections >= $min
|
|
258
|
+
RETURN m.id AS id,
|
|
259
|
+
m.title AS title,
|
|
260
|
+
m.layer AS layer,
|
|
261
|
+
connections
|
|
262
|
+
ORDER BY connections DESC
|
|
263
|
+
LIMIT 20
|
|
264
|
+
""",
|
|
265
|
+
{"min": min_connections},
|
|
266
|
+
)
|
|
267
|
+
return [
|
|
268
|
+
{
|
|
269
|
+
"id": row[0],
|
|
270
|
+
"title": row[1],
|
|
271
|
+
"layer": row[2],
|
|
272
|
+
"connections": row[3],
|
|
273
|
+
}
|
|
274
|
+
for row in result.result_set
|
|
275
|
+
]
|
|
276
|
+
except Exception as e:
|
|
277
|
+
logger.warning("FalkorDB cluster query failed: %s", e)
|
|
278
|
+
return []
|
|
279
|
+
|
|
280
|
+
def health_check(self) -> dict:
|
|
281
|
+
"""Check FalkorDB backend health.
|
|
282
|
+
|
|
283
|
+
Returns:
|
|
284
|
+
dict: Status with connection and graph info.
|
|
285
|
+
"""
|
|
286
|
+
if not self._ensure_initialized():
|
|
287
|
+
return {
|
|
288
|
+
"ok": False,
|
|
289
|
+
"backend": "FalkorDBBackend",
|
|
290
|
+
"error": "Not initialized",
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
try:
|
|
294
|
+
result = self._graph.query(
|
|
295
|
+
"MATCH (n) RETURN count(n) AS nodes"
|
|
296
|
+
)
|
|
297
|
+
node_count = result.result_set[0][0] if result.result_set else 0
|
|
298
|
+
return {
|
|
299
|
+
"ok": True,
|
|
300
|
+
"backend": "FalkorDBBackend",
|
|
301
|
+
"url": self.url,
|
|
302
|
+
"graph": self.graph_name,
|
|
303
|
+
"node_count": node_count,
|
|
304
|
+
}
|
|
305
|
+
except Exception as e:
|
|
306
|
+
return {
|
|
307
|
+
"ok": False,
|
|
308
|
+
"backend": "FalkorDBBackend",
|
|
309
|
+
"error": str(e),
|
|
310
|
+
}
|
|
@@ -0,0 +1,209 @@
|
|
|
1
|
+
"""
|
|
2
|
+
File-based storage backend (Level 1).
|
|
3
|
+
|
|
4
|
+
Zero infrastructure. Memories are stored as individual JSON files
|
|
5
|
+
in a directory tree organized by layer. Works everywhere, today,
|
|
6
|
+
with nothing to install.
|
|
7
|
+
|
|
8
|
+
Directory layout:
|
|
9
|
+
base_path/
|
|
10
|
+
├── short-term/
|
|
11
|
+
│ ├── {id}.json
|
|
12
|
+
│ └── ...
|
|
13
|
+
├── mid-term/
|
|
14
|
+
│ └── ...
|
|
15
|
+
└── long-term/
|
|
16
|
+
└── ...
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
import os
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Optional
|
|
25
|
+
|
|
26
|
+
from ..models import Memory, MemoryLayer
|
|
27
|
+
from .base import BaseBackend
|
|
28
|
+
|
|
29
|
+
DEFAULT_BASE_PATH = os.path.expanduser("~/.skmemory/memories")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class FileBackend(BaseBackend):
|
|
33
|
+
"""Stores memories as JSON files on the local filesystem.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
base_path: Root directory for memory storage.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(self, base_path: str = DEFAULT_BASE_PATH) -> None:
|
|
40
|
+
self.base_path = Path(base_path)
|
|
41
|
+
self._ensure_dirs()
|
|
42
|
+
|
|
43
|
+
def _ensure_dirs(self) -> None:
|
|
44
|
+
"""Create layer directories if they don't exist."""
|
|
45
|
+
for layer in MemoryLayer:
|
|
46
|
+
(self.base_path / layer.value).mkdir(parents=True, exist_ok=True)
|
|
47
|
+
|
|
48
|
+
def _file_path(self, memory: Memory) -> Path:
|
|
49
|
+
"""Get the file path for a memory.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
memory: The memory to get the path for.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Path: Full path to the JSON file.
|
|
56
|
+
"""
|
|
57
|
+
return self.base_path / memory.layer.value / f"{memory.id}.json"
|
|
58
|
+
|
|
59
|
+
def _find_file(self, memory_id: str) -> Optional[Path]:
|
|
60
|
+
"""Locate a memory file across all layers.
|
|
61
|
+
|
|
62
|
+
Args:
|
|
63
|
+
memory_id: The memory ID to find.
|
|
64
|
+
|
|
65
|
+
Returns:
|
|
66
|
+
Optional[Path]: Path to the file if found.
|
|
67
|
+
"""
|
|
68
|
+
for layer in MemoryLayer:
|
|
69
|
+
path = self.base_path / layer.value / f"{memory_id}.json"
|
|
70
|
+
if path.exists():
|
|
71
|
+
return path
|
|
72
|
+
return None
|
|
73
|
+
|
|
74
|
+
def save(self, memory: Memory) -> str:
|
|
75
|
+
"""Persist a memory as a JSON file.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
memory: The Memory to store.
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
str: The memory ID.
|
|
82
|
+
"""
|
|
83
|
+
path = self._file_path(memory)
|
|
84
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
path.write_text(
|
|
86
|
+
json.dumps(memory.model_dump(), indent=2, default=str),
|
|
87
|
+
encoding="utf-8",
|
|
88
|
+
)
|
|
89
|
+
return memory.id
|
|
90
|
+
|
|
91
|
+
def load(self, memory_id: str) -> Optional[Memory]:
|
|
92
|
+
"""Load a memory by ID from disk.
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
memory_id: The memory identifier.
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Optional[Memory]: The memory if found, None otherwise.
|
|
99
|
+
"""
|
|
100
|
+
path = self._find_file(memory_id)
|
|
101
|
+
if path is None:
|
|
102
|
+
return None
|
|
103
|
+
try:
|
|
104
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
105
|
+
return Memory(**data)
|
|
106
|
+
except (json.JSONDecodeError, Exception):
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
def delete(self, memory_id: str) -> bool:
|
|
110
|
+
"""Delete a memory file.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
memory_id: The memory identifier.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
bool: True if deleted, False if not found.
|
|
117
|
+
"""
|
|
118
|
+
path = self._find_file(memory_id)
|
|
119
|
+
if path is None:
|
|
120
|
+
return False
|
|
121
|
+
path.unlink()
|
|
122
|
+
return True
|
|
123
|
+
|
|
124
|
+
def list_memories(
|
|
125
|
+
self,
|
|
126
|
+
layer: Optional[MemoryLayer] = None,
|
|
127
|
+
tags: Optional[list[str]] = None,
|
|
128
|
+
limit: int = 50,
|
|
129
|
+
) -> list[Memory]:
|
|
130
|
+
"""List memories from disk with optional filtering.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
layer: Filter by memory layer (None = all layers).
|
|
134
|
+
tags: Filter by tags (AND logic).
|
|
135
|
+
limit: Maximum results.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
list[Memory]: Matching memories sorted newest first.
|
|
139
|
+
"""
|
|
140
|
+
layers = [layer] if layer else list(MemoryLayer)
|
|
141
|
+
results: list[Memory] = []
|
|
142
|
+
|
|
143
|
+
for lyr in layers:
|
|
144
|
+
layer_dir = self.base_path / lyr.value
|
|
145
|
+
if not layer_dir.exists():
|
|
146
|
+
continue
|
|
147
|
+
for json_file in layer_dir.glob("*.json"):
|
|
148
|
+
try:
|
|
149
|
+
data = json.loads(json_file.read_text(encoding="utf-8"))
|
|
150
|
+
mem = Memory(**data)
|
|
151
|
+
if tags and not all(t in mem.tags for t in tags):
|
|
152
|
+
continue
|
|
153
|
+
results.append(mem)
|
|
154
|
+
except (json.JSONDecodeError, Exception):
|
|
155
|
+
continue
|
|
156
|
+
|
|
157
|
+
results.sort(key=lambda m: m.created_at, reverse=True)
|
|
158
|
+
return results[:limit]
|
|
159
|
+
|
|
160
|
+
def search_text(self, query: str, limit: int = 10) -> list[Memory]:
|
|
161
|
+
"""Search memories by text substring (case-insensitive).
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
query: Search string.
|
|
165
|
+
limit: Maximum results.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
list[Memory]: Matching memories.
|
|
169
|
+
"""
|
|
170
|
+
query_lower = query.lower()
|
|
171
|
+
results: list[Memory] = []
|
|
172
|
+
|
|
173
|
+
for layer in MemoryLayer:
|
|
174
|
+
layer_dir = self.base_path / layer.value
|
|
175
|
+
if not layer_dir.exists():
|
|
176
|
+
continue
|
|
177
|
+
for json_file in layer_dir.glob("*.json"):
|
|
178
|
+
try:
|
|
179
|
+
raw = json_file.read_text(encoding="utf-8")
|
|
180
|
+
if query_lower not in raw.lower():
|
|
181
|
+
continue
|
|
182
|
+
data = json.loads(raw)
|
|
183
|
+
results.append(Memory(**data))
|
|
184
|
+
except (json.JSONDecodeError, Exception):
|
|
185
|
+
continue
|
|
186
|
+
|
|
187
|
+
results.sort(key=lambda m: m.created_at, reverse=True)
|
|
188
|
+
return results[:limit]
|
|
189
|
+
|
|
190
|
+
def health_check(self) -> dict:
|
|
191
|
+
"""Check filesystem backend health.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
dict: Status with path and layer counts.
|
|
195
|
+
"""
|
|
196
|
+
counts = {}
|
|
197
|
+
for layer in MemoryLayer:
|
|
198
|
+
layer_dir = self.base_path / layer.value
|
|
199
|
+
if layer_dir.exists():
|
|
200
|
+
counts[layer.value] = len(list(layer_dir.glob("*.json")))
|
|
201
|
+
else:
|
|
202
|
+
counts[layer.value] = 0
|
|
203
|
+
return {
|
|
204
|
+
"ok": True,
|
|
205
|
+
"backend": "FileBackend",
|
|
206
|
+
"base_path": str(self.base_path),
|
|
207
|
+
"memory_counts": counts,
|
|
208
|
+
"total": sum(counts.values()),
|
|
209
|
+
}
|