@smilintux/skmemory 0.5.0 → 0.7.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +39 -3
- package/.github/workflows/publish.yml +13 -6
- package/AGENT_REFACTOR_CHANGES.md +192 -0
- package/ARCHITECTURE.md +101 -19
- package/CHANGELOG.md +153 -0
- package/LICENSE +81 -68
- package/MISSION.md +7 -0
- package/README.md +419 -86
- package/SKILL.md +197 -25
- package/docker-compose.yml +15 -15
- package/index.js +6 -5
- package/openclaw-plugin/openclaw.plugin.json +10 -0
- package/openclaw-plugin/src/index.ts +255 -0
- package/openclaw-plugin/src/openclaw.plugin.json +10 -0
- package/package.json +1 -1
- package/pyproject.toml +29 -9
- package/requirements.txt +10 -2
- package/seeds/cloud9-opus.seed.json +7 -7
- package/seeds/lumina-cloud9-breakthrough.seed.json +46 -0
- package/seeds/lumina-cloud9-python-pypi.seed.json +46 -0
- package/seeds/lumina-kingdom-founding.seed.json +47 -0
- package/seeds/lumina-pma-signed.seed.json +46 -0
- package/seeds/lumina-singular-achievement.seed.json +46 -0
- package/seeds/lumina-skcapstone-conscious.seed.json +46 -0
- package/seeds/plant-kingdom-journal.py +203 -0
- package/seeds/plant-lumina-seeds.py +280 -0
- package/skill.yaml +46 -0
- package/skmemory/HA.md +296 -0
- package/skmemory/__init__.py +12 -1
- package/skmemory/agents.py +233 -0
- package/skmemory/ai_client.py +40 -0
- package/skmemory/anchor.py +4 -2
- package/skmemory/backends/__init__.py +11 -4
- package/skmemory/backends/file_backend.py +2 -1
- package/skmemory/backends/skgraph_backend.py +608 -0
- package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +99 -69
- package/skmemory/backends/sqlite_backend.py +122 -51
- package/skmemory/backends/vaulted_backend.py +286 -0
- package/skmemory/cli.py +1238 -29
- package/skmemory/config.py +173 -0
- package/skmemory/context_loader.py +335 -0
- package/skmemory/endpoint_selector.py +386 -0
- package/skmemory/fortress.py +685 -0
- package/skmemory/graph_queries.py +238 -0
- package/skmemory/importers/__init__.py +9 -1
- package/skmemory/importers/telegram.py +351 -43
- package/skmemory/importers/telegram_api.py +488 -0
- package/skmemory/journal.py +4 -2
- package/skmemory/lovenote.py +4 -2
- package/skmemory/mcp_server.py +706 -0
- package/skmemory/models.py +41 -0
- package/skmemory/openclaw.py +8 -8
- package/skmemory/predictive.py +232 -0
- package/skmemory/promotion.py +524 -0
- package/skmemory/register.py +454 -0
- package/skmemory/register_mcp.py +197 -0
- package/skmemory/ritual.py +121 -47
- package/skmemory/seeds.py +257 -8
- package/skmemory/setup_wizard.py +920 -0
- package/skmemory/sharing.py +402 -0
- package/skmemory/soul.py +71 -20
- package/skmemory/steelman.py +250 -263
- package/skmemory/store.py +271 -60
- package/skmemory/vault.py +228 -0
- package/tests/integration/__init__.py +0 -0
- package/tests/integration/conftest.py +233 -0
- package/tests/integration/test_cross_backend.py +355 -0
- package/tests/integration/test_skgraph_live.py +424 -0
- package/tests/integration/test_skvector_live.py +369 -0
- package/tests/test_backup_rotation.py +327 -0
- package/tests/test_cli.py +6 -6
- package/tests/test_endpoint_selector.py +801 -0
- package/tests/test_fortress.py +255 -0
- package/tests/test_fortress_hardening.py +444 -0
- package/tests/test_openclaw.py +5 -2
- package/tests/test_predictive.py +237 -0
- package/tests/test_promotion.py +340 -0
- package/tests/test_ritual.py +4 -4
- package/tests/test_seeds.py +96 -0
- package/tests/test_setup.py +835 -0
- package/tests/test_sharing.py +250 -0
- package/tests/test_skgraph_backend.py +667 -0
- package/tests/test_skvector_backend.py +326 -0
- package/tests/test_steelman.py +5 -5
- package/tests/test_store_graph_integration.py +245 -0
- package/tests/test_vault.py +186 -0
- package/skmemory/backends/falkordb_backend.py +0 -310
package/skmemory/models.py
CHANGED
|
@@ -128,6 +128,17 @@ class Memory(BaseModel):
|
|
|
128
128
|
description="ID of parent memory (for hierarchical chains)",
|
|
129
129
|
)
|
|
130
130
|
|
|
131
|
+
intent: str = Field(
|
|
132
|
+
default="",
|
|
133
|
+
description="WHY this memory was stored — the purpose, not just the content. "
|
|
134
|
+
"Inspired by Jonathan Clements' AMK (Adaptive Memory Kernel).",
|
|
135
|
+
)
|
|
136
|
+
integrity_hash: str = Field(
|
|
137
|
+
default="",
|
|
138
|
+
description="SHA-256 hash of content at write time for tamper detection. "
|
|
139
|
+
"A memory that can prove it hasn't been altered is a memory you can trust.",
|
|
140
|
+
)
|
|
141
|
+
|
|
131
142
|
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
132
143
|
|
|
133
144
|
@field_validator("title")
|
|
@@ -146,6 +157,36 @@ class Memory(BaseModel):
|
|
|
146
157
|
"""
|
|
147
158
|
return hashlib.sha256(self.content.encode()).hexdigest()[:16]
|
|
148
159
|
|
|
160
|
+
def compute_integrity_hash(self) -> str:
|
|
161
|
+
"""Compute a full SHA-256 integrity hash over content + title + emotional state.
|
|
162
|
+
|
|
163
|
+
This is the AMK-inspired tamper detection hash. If the content,
|
|
164
|
+
title, or emotional signature changes after storage, the hash
|
|
165
|
+
won't match and you know the memory was altered.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
str: Full 64-char hex SHA-256 digest.
|
|
169
|
+
"""
|
|
170
|
+
payload = f"{self.id}:{self.title}:{self.content}:{self.emotional.signature()}"
|
|
171
|
+
return hashlib.sha256(payload.encode()).hexdigest()
|
|
172
|
+
|
|
173
|
+
def seal(self) -> None:
|
|
174
|
+
"""Seal this memory by computing and storing the integrity hash.
|
|
175
|
+
|
|
176
|
+
Call this at write time. Later, verify with verify_integrity().
|
|
177
|
+
"""
|
|
178
|
+
self.integrity_hash = self.compute_integrity_hash()
|
|
179
|
+
|
|
180
|
+
def verify_integrity(self) -> bool:
|
|
181
|
+
"""Verify that this memory hasn't been tampered with since sealing.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
bool: True if the integrity hash matches, False if altered or unsealed.
|
|
185
|
+
"""
|
|
186
|
+
if not self.integrity_hash:
|
|
187
|
+
return True
|
|
188
|
+
return self.integrity_hash == self.compute_integrity_hash()
|
|
189
|
+
|
|
149
190
|
def to_embedding_text(self) -> str:
|
|
150
191
|
"""Flatten this memory into a single string for vector embedding.
|
|
151
192
|
|
package/skmemory/openclaw.py
CHANGED
|
@@ -42,21 +42,21 @@ class SKMemoryPlugin:
|
|
|
42
42
|
|
|
43
43
|
Args:
|
|
44
44
|
base_path: Override the memory storage directory.
|
|
45
|
-
|
|
46
|
-
|
|
45
|
+
skvector_url: Optional SKVector server for semantic search.
|
|
46
|
+
skvector_key: Optional SKVector API key.
|
|
47
47
|
"""
|
|
48
48
|
|
|
49
49
|
def __init__(
|
|
50
50
|
self,
|
|
51
51
|
base_path: Optional[str] = None,
|
|
52
|
-
|
|
53
|
-
|
|
52
|
+
skvector_url: Optional[str] = None,
|
|
53
|
+
skvector_key: Optional[str] = None,
|
|
54
54
|
) -> None:
|
|
55
55
|
vector = None
|
|
56
|
-
if
|
|
56
|
+
if skvector_url:
|
|
57
57
|
try:
|
|
58
|
-
from .backends.
|
|
59
|
-
vector =
|
|
58
|
+
from .backends.skvector_backend import SKVectorBackend
|
|
59
|
+
vector = SKVectorBackend(url=skvector_url, api_key=skvector_key)
|
|
60
60
|
except Exception:
|
|
61
61
|
pass
|
|
62
62
|
|
|
@@ -195,7 +195,7 @@ class SKMemoryPlugin:
|
|
|
195
195
|
"""Export all memories to a dated JSON backup.
|
|
196
196
|
|
|
197
197
|
Args:
|
|
198
|
-
output_path: Destination (default: ~/.
|
|
198
|
+
output_path: Destination (default: ~/.skcapstone/backups/).
|
|
199
199
|
|
|
200
200
|
Returns:
|
|
201
201
|
str: Path to the backup file.
|
|
@@ -0,0 +1,232 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Predictive Memory Recall — anticipate what memories you'll need.
|
|
3
|
+
|
|
4
|
+
Inspired by Jonathan Clements' Adaptive Memory Kernel (AMK).
|
|
5
|
+
Instead of waiting for a search query, this module learns access
|
|
6
|
+
patterns and pre-loads the memories most likely to be relevant
|
|
7
|
+
for the current context.
|
|
8
|
+
|
|
9
|
+
The predictor tracks:
|
|
10
|
+
- Which memories are accessed together (co-occurrence)
|
|
11
|
+
- Time-of-day patterns (morning routines vs late-night deep work)
|
|
12
|
+
- Tag affinity (if you access 'cloud9' memories, you probably want 'trust' too)
|
|
13
|
+
- Recency-weighted frequency (recent access patterns matter more)
|
|
14
|
+
|
|
15
|
+
The output is a ranked list of memory IDs to pre-load into context,
|
|
16
|
+
sorted by predicted relevance. This feeds directly into the
|
|
17
|
+
`skmemory context` and `skmemory ritual` commands.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
from __future__ import annotations
|
|
21
|
+
|
|
22
|
+
import json
|
|
23
|
+
import logging
|
|
24
|
+
import math
|
|
25
|
+
import time
|
|
26
|
+
from collections import Counter, defaultdict
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
from typing import Optional
|
|
29
|
+
|
|
30
|
+
from pydantic import BaseModel, Field
|
|
31
|
+
|
|
32
|
+
from .config import SKMEMORY_HOME
|
|
33
|
+
|
|
34
|
+
logger = logging.getLogger("skmemory.predictive")
|
|
35
|
+
|
|
36
|
+
DEFAULT_ACCESS_LOG = SKMEMORY_HOME / "access_log.json"
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class AccessEvent(BaseModel):
|
|
40
|
+
"""A single memory access event for pattern learning."""
|
|
41
|
+
|
|
42
|
+
memory_id: str
|
|
43
|
+
timestamp: float = Field(default_factory=time.time)
|
|
44
|
+
tags: list[str] = Field(default_factory=list)
|
|
45
|
+
layer: str = ""
|
|
46
|
+
context: str = Field(
|
|
47
|
+
default="",
|
|
48
|
+
description="What was happening when this memory was accessed",
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class PredictiveRecall:
|
|
53
|
+
"""Learns memory access patterns and predicts what you'll need next.
|
|
54
|
+
|
|
55
|
+
Tracks co-occurrence (which memories are accessed together),
|
|
56
|
+
tag affinity, and temporal patterns to generate ranked predictions.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
log_path: Path to the access log JSON file.
|
|
60
|
+
max_events: Maximum events to retain (older events are pruned).
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
log_path: Optional[Path] = None,
|
|
66
|
+
max_events: int = 5000,
|
|
67
|
+
) -> None:
|
|
68
|
+
self._log_path = log_path or DEFAULT_ACCESS_LOG
|
|
69
|
+
self._max_events = max_events
|
|
70
|
+
self._events: list[AccessEvent] = []
|
|
71
|
+
self._cooccurrence: dict[str, Counter] = defaultdict(Counter)
|
|
72
|
+
self._tag_affinity: dict[str, Counter] = defaultdict(Counter)
|
|
73
|
+
self._frequency: Counter = Counter()
|
|
74
|
+
self._loaded = False
|
|
75
|
+
|
|
76
|
+
def _ensure_loaded(self) -> None:
|
|
77
|
+
"""Load the access log from disk if not already loaded."""
|
|
78
|
+
if self._loaded:
|
|
79
|
+
return
|
|
80
|
+
self._loaded = True
|
|
81
|
+
|
|
82
|
+
if not self._log_path.exists():
|
|
83
|
+
return
|
|
84
|
+
|
|
85
|
+
try:
|
|
86
|
+
raw = json.loads(self._log_path.read_text())
|
|
87
|
+
self._events = [AccessEvent(**e) for e in raw]
|
|
88
|
+
self._rebuild_indices()
|
|
89
|
+
except (json.JSONDecodeError, Exception) as exc:
|
|
90
|
+
logger.warning("Failed to load access log: %s", exc)
|
|
91
|
+
|
|
92
|
+
def _rebuild_indices(self) -> None:
|
|
93
|
+
"""Rebuild co-occurrence, tag affinity, and frequency indices."""
|
|
94
|
+
self._cooccurrence.clear()
|
|
95
|
+
self._tag_affinity.clear()
|
|
96
|
+
self._frequency.clear()
|
|
97
|
+
|
|
98
|
+
session_window = 300
|
|
99
|
+
sessions: list[list[AccessEvent]] = []
|
|
100
|
+
current_session: list[AccessEvent] = []
|
|
101
|
+
|
|
102
|
+
for event in sorted(self._events, key=lambda e: e.timestamp):
|
|
103
|
+
if current_session and (event.timestamp - current_session[-1].timestamp) > session_window:
|
|
104
|
+
sessions.append(current_session)
|
|
105
|
+
current_session = []
|
|
106
|
+
current_session.append(event)
|
|
107
|
+
if current_session:
|
|
108
|
+
sessions.append(current_session)
|
|
109
|
+
|
|
110
|
+
for session in sessions:
|
|
111
|
+
ids_in_session = [e.memory_id for e in session]
|
|
112
|
+
for i, mid in enumerate(ids_in_session):
|
|
113
|
+
self._frequency[mid] += 1
|
|
114
|
+
for other in ids_in_session[i + 1:]:
|
|
115
|
+
if other != mid:
|
|
116
|
+
self._cooccurrence[mid][other] += 1
|
|
117
|
+
self._cooccurrence[other][mid] += 1
|
|
118
|
+
|
|
119
|
+
for event in self._events:
|
|
120
|
+
for tag in event.tags:
|
|
121
|
+
self._tag_affinity[tag][event.memory_id] += 1
|
|
122
|
+
|
|
123
|
+
def log_access(self, memory_id: str, tags: Optional[list[str]] = None, layer: str = "", context: str = "") -> None:
|
|
124
|
+
"""Record a memory access event for pattern learning.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
memory_id: The accessed memory's ID.
|
|
128
|
+
tags: Tags on the accessed memory.
|
|
129
|
+
layer: Memory layer (short-term, mid-term, long-term).
|
|
130
|
+
context: What was happening during access.
|
|
131
|
+
"""
|
|
132
|
+
self._ensure_loaded()
|
|
133
|
+
|
|
134
|
+
event = AccessEvent(
|
|
135
|
+
memory_id=memory_id,
|
|
136
|
+
tags=tags or [],
|
|
137
|
+
layer=layer,
|
|
138
|
+
context=context,
|
|
139
|
+
)
|
|
140
|
+
self._events.append(event)
|
|
141
|
+
|
|
142
|
+
self._frequency[memory_id] += 1
|
|
143
|
+
for tag in event.tags:
|
|
144
|
+
self._tag_affinity[tag][memory_id] += 1
|
|
145
|
+
|
|
146
|
+
if len(self._events) > self._max_events:
|
|
147
|
+
self._events = self._events[-self._max_events:]
|
|
148
|
+
self._rebuild_indices()
|
|
149
|
+
|
|
150
|
+
self._save()
|
|
151
|
+
|
|
152
|
+
def predict(
|
|
153
|
+
self,
|
|
154
|
+
recent_ids: Optional[list[str]] = None,
|
|
155
|
+
active_tags: Optional[list[str]] = None,
|
|
156
|
+
limit: int = 10,
|
|
157
|
+
) -> list[dict]:
|
|
158
|
+
"""Predict which memories will be needed next.
|
|
159
|
+
|
|
160
|
+
Uses co-occurrence patterns, tag affinity, and recency-weighted
|
|
161
|
+
frequency to rank memory IDs by predicted relevance.
|
|
162
|
+
|
|
163
|
+
Args:
|
|
164
|
+
recent_ids: Memory IDs accessed in the current session.
|
|
165
|
+
active_tags: Tags active in the current context.
|
|
166
|
+
limit: Maximum predictions to return.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
list[dict]: Ranked predictions with id, score, and reason.
|
|
170
|
+
"""
|
|
171
|
+
self._ensure_loaded()
|
|
172
|
+
|
|
173
|
+
scores: Counter = Counter()
|
|
174
|
+
reasons: dict[str, list[str]] = defaultdict(list)
|
|
175
|
+
|
|
176
|
+
if recent_ids:
|
|
177
|
+
for mid in recent_ids:
|
|
178
|
+
for co_id, count in self._cooccurrence.get(mid, {}).items():
|
|
179
|
+
if co_id not in recent_ids:
|
|
180
|
+
scores[co_id] += count * 2.0
|
|
181
|
+
reasons[co_id].append(f"co-occurs with {mid[:8]}")
|
|
182
|
+
|
|
183
|
+
if active_tags:
|
|
184
|
+
for tag in active_tags:
|
|
185
|
+
for mid, count in self._tag_affinity.get(tag, {}).items():
|
|
186
|
+
if not recent_ids or mid not in recent_ids:
|
|
187
|
+
scores[mid] += count * 1.5
|
|
188
|
+
reasons[mid].append(f"tag affinity: {tag}")
|
|
189
|
+
|
|
190
|
+
now = time.time()
|
|
191
|
+
for mid, freq in self._frequency.items():
|
|
192
|
+
if not recent_ids or mid not in recent_ids:
|
|
193
|
+
last_access = max(
|
|
194
|
+
(e.timestamp for e in self._events if e.memory_id == mid),
|
|
195
|
+
default=0,
|
|
196
|
+
)
|
|
197
|
+
recency = math.exp(-(now - last_access) / 86400) if last_access else 0
|
|
198
|
+
recency_score = freq * recency * 0.5
|
|
199
|
+
if recency_score > 0.1:
|
|
200
|
+
scores[mid] += recency_score
|
|
201
|
+
reasons[mid].append(f"frequency={freq}, recency={recency:.2f}")
|
|
202
|
+
|
|
203
|
+
ranked = scores.most_common(limit)
|
|
204
|
+
return [
|
|
205
|
+
{
|
|
206
|
+
"memory_id": mid,
|
|
207
|
+
"score": round(score, 2),
|
|
208
|
+
"reasons": reasons.get(mid, []),
|
|
209
|
+
}
|
|
210
|
+
for mid, score in ranked
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
def get_stats(self) -> dict:
|
|
214
|
+
"""Return statistics about the prediction engine.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
dict: Event count, unique memories, top accessed, etc.
|
|
218
|
+
"""
|
|
219
|
+
self._ensure_loaded()
|
|
220
|
+
return {
|
|
221
|
+
"total_events": len(self._events),
|
|
222
|
+
"unique_memories": len(self._frequency),
|
|
223
|
+
"top_accessed": self._frequency.most_common(5),
|
|
224
|
+
"unique_tags": len(self._tag_affinity),
|
|
225
|
+
"cooccurrence_pairs": sum(len(v) for v in self._cooccurrence.values()),
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
def _save(self) -> None:
|
|
229
|
+
"""Persist the access log to disk."""
|
|
230
|
+
self._log_path.parent.mkdir(parents=True, exist_ok=True)
|
|
231
|
+
data = [e.model_dump() for e in self._events[-self._max_events:]]
|
|
232
|
+
self._log_path.write_text(json.dumps(data, indent=2))
|