@smilintux/skmemory 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/ci.yml +23 -0
- package/.github/workflows/publish.yml +52 -0
- package/ARCHITECTURE.md +219 -0
- package/LICENSE +661 -0
- package/README.md +159 -0
- package/SKILL.md +271 -0
- package/bin/cli.js +8 -0
- package/docker-compose.yml +58 -0
- package/index.d.ts +4 -0
- package/index.js +27 -0
- package/openclaw-plugin/package.json +59 -0
- package/openclaw-plugin/src/index.js +276 -0
- package/package.json +28 -0
- package/pyproject.toml +69 -0
- package/requirements.txt +13 -0
- package/seeds/cloud9-lumina.seed.json +39 -0
- package/seeds/cloud9-opus.seed.json +40 -0
- package/seeds/courage.seed.json +24 -0
- package/seeds/curiosity.seed.json +24 -0
- package/seeds/grief.seed.json +24 -0
- package/seeds/joy.seed.json +24 -0
- package/seeds/love.seed.json +24 -0
- package/seeds/skcapstone-lumina-merge.moltbook.md +65 -0
- package/seeds/skcapstone-lumina-merge.seed.json +49 -0
- package/seeds/sovereignty.seed.json +24 -0
- package/seeds/trust.seed.json +24 -0
- package/skmemory/__init__.py +66 -0
- package/skmemory/ai_client.py +182 -0
- package/skmemory/anchor.py +224 -0
- package/skmemory/backends/__init__.py +12 -0
- package/skmemory/backends/base.py +88 -0
- package/skmemory/backends/falkordb_backend.py +310 -0
- package/skmemory/backends/file_backend.py +209 -0
- package/skmemory/backends/qdrant_backend.py +364 -0
- package/skmemory/backends/sqlite_backend.py +665 -0
- package/skmemory/cli.py +1004 -0
- package/skmemory/data/seed.json +191 -0
- package/skmemory/importers/__init__.py +11 -0
- package/skmemory/importers/telegram.py +336 -0
- package/skmemory/journal.py +223 -0
- package/skmemory/lovenote.py +180 -0
- package/skmemory/models.py +228 -0
- package/skmemory/openclaw.py +237 -0
- package/skmemory/quadrants.py +191 -0
- package/skmemory/ritual.py +215 -0
- package/skmemory/seeds.py +163 -0
- package/skmemory/soul.py +273 -0
- package/skmemory/steelman.py +338 -0
- package/skmemory/store.py +445 -0
- package/tests/__init__.py +0 -0
- package/tests/test_ai_client.py +89 -0
- package/tests/test_anchor.py +153 -0
- package/tests/test_cli.py +65 -0
- package/tests/test_export_import.py +170 -0
- package/tests/test_file_backend.py +211 -0
- package/tests/test_journal.py +172 -0
- package/tests/test_lovenote.py +136 -0
- package/tests/test_models.py +194 -0
- package/tests/test_openclaw.py +122 -0
- package/tests/test_quadrants.py +174 -0
- package/tests/test_ritual.py +195 -0
- package/tests/test_seeds.py +208 -0
- package/tests/test_soul.py +197 -0
- package/tests/test_sqlite_backend.py +258 -0
- package/tests/test_steelman.py +257 -0
- package/tests/test_store.py +238 -0
- package/tests/test_telegram_import.py +181 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OpenClaw integration module for SKMemory.
|
|
3
|
+
|
|
4
|
+
Provides a single-call interface for OpenClaw (or any AI agent framework)
|
|
5
|
+
to load, snapshot, and manage memories without wiring up backends manually.
|
|
6
|
+
|
|
7
|
+
Usage from an agent context file or plugin::
|
|
8
|
+
|
|
9
|
+
from skmemory.openclaw import SKMemoryPlugin
|
|
10
|
+
|
|
11
|
+
plugin = SKMemoryPlugin()
|
|
12
|
+
ctx = plugin.load_context(max_tokens=3000)
|
|
13
|
+
plugin.snapshot("Built the kingdom today", tags=["milestone"])
|
|
14
|
+
plugin.export()
|
|
15
|
+
|
|
16
|
+
Or from the OpenClaw JS plugin (calls CLI under the hood).
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import json
|
|
22
|
+
import os
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
from typing import Any, Optional
|
|
25
|
+
|
|
26
|
+
from .models import EmotionalSnapshot, MemoryLayer, MemoryRole
|
|
27
|
+
from .store import MemoryStore
|
|
28
|
+
from .backends.sqlite_backend import SQLiteBackend
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
OPENCLAW_BASE = Path.home() / ".openclaw"
|
|
32
|
+
SKMEMORY_OPENCLAW_DIR = OPENCLAW_BASE / "plugins" / "skmemory"
|
|
33
|
+
SKMEMORY_STATE_FILE = SKMEMORY_OPENCLAW_DIR / "state.json"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class SKMemoryPlugin:
|
|
37
|
+
"""Drop-in memory module for OpenClaw and other agent frameworks.
|
|
38
|
+
|
|
39
|
+
Initializes skmemory with sensible defaults, exposes the most-used
|
|
40
|
+
operations as simple method calls, and stores state in the OpenClaw
|
|
41
|
+
plugin directory so other skills can discover it.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
base_path: Override the memory storage directory.
|
|
45
|
+
qdrant_url: Optional Qdrant server for semantic search.
|
|
46
|
+
qdrant_key: Optional Qdrant API key.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def __init__(
|
|
50
|
+
self,
|
|
51
|
+
base_path: Optional[str] = None,
|
|
52
|
+
qdrant_url: Optional[str] = None,
|
|
53
|
+
qdrant_key: Optional[str] = None,
|
|
54
|
+
) -> None:
|
|
55
|
+
vector = None
|
|
56
|
+
if qdrant_url:
|
|
57
|
+
try:
|
|
58
|
+
from .backends.qdrant_backend import QdrantBackend
|
|
59
|
+
vector = QdrantBackend(url=qdrant_url, api_key=qdrant_key)
|
|
60
|
+
except Exception:
|
|
61
|
+
pass
|
|
62
|
+
|
|
63
|
+
primary = SQLiteBackend(base_path=base_path) if base_path else None
|
|
64
|
+
self.store = MemoryStore(primary=primary, vector=vector)
|
|
65
|
+
|
|
66
|
+
SKMEMORY_OPENCLAW_DIR.mkdir(parents=True, exist_ok=True)
|
|
67
|
+
self._write_state({"status": "loaded"})
|
|
68
|
+
|
|
69
|
+
def load_context(
|
|
70
|
+
self,
|
|
71
|
+
max_tokens: int = 3000,
|
|
72
|
+
strongest: int = 5,
|
|
73
|
+
recent: int = 5,
|
|
74
|
+
include_seeds: bool = True,
|
|
75
|
+
) -> dict:
|
|
76
|
+
"""Load a token-efficient memory context for injection into a prompt.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
max_tokens: Approximate token budget.
|
|
80
|
+
strongest: Number of strongest emotional memories.
|
|
81
|
+
recent: Number of most recent memories.
|
|
82
|
+
include_seeds: Include Cloud 9 seed memories.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
dict: Compact memory context payload.
|
|
86
|
+
"""
|
|
87
|
+
return self.store.load_context(
|
|
88
|
+
max_tokens=max_tokens,
|
|
89
|
+
strongest_count=strongest,
|
|
90
|
+
recent_count=recent,
|
|
91
|
+
include_seeds=include_seeds,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
def snapshot(
|
|
95
|
+
self,
|
|
96
|
+
title: str,
|
|
97
|
+
content: str = "",
|
|
98
|
+
*,
|
|
99
|
+
layer: str = "short-term",
|
|
100
|
+
tags: Optional[list[str]] = None,
|
|
101
|
+
intensity: float = 0.0,
|
|
102
|
+
valence: float = 0.0,
|
|
103
|
+
emotions: Optional[list[str]] = None,
|
|
104
|
+
source: str = "openclaw",
|
|
105
|
+
) -> str:
|
|
106
|
+
"""Capture a memory snapshot.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
title: Short label.
|
|
110
|
+
content: Full content (defaults to title if empty).
|
|
111
|
+
layer: Persistence tier (short-term, mid-term, long-term).
|
|
112
|
+
tags: Searchable tags.
|
|
113
|
+
intensity: Emotional intensity 0-10.
|
|
114
|
+
valence: Sentiment -1 to +1.
|
|
115
|
+
emotions: Named emotion labels.
|
|
116
|
+
source: Origin identifier.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
str: The new memory's ID.
|
|
120
|
+
"""
|
|
121
|
+
emotional = EmotionalSnapshot(
|
|
122
|
+
intensity=intensity,
|
|
123
|
+
valence=valence,
|
|
124
|
+
labels=emotions or [],
|
|
125
|
+
)
|
|
126
|
+
memory = self.store.snapshot(
|
|
127
|
+
title=title,
|
|
128
|
+
content=content or title,
|
|
129
|
+
layer=MemoryLayer(layer),
|
|
130
|
+
tags=tags or [],
|
|
131
|
+
emotional=emotional,
|
|
132
|
+
source=source,
|
|
133
|
+
)
|
|
134
|
+
return memory.id
|
|
135
|
+
|
|
136
|
+
def search(self, query: str, limit: int = 10) -> list[dict]:
|
|
137
|
+
"""Search memories and return lightweight results.
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
query: Search string.
|
|
141
|
+
limit: Max results.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
list[dict]: Matching memory summaries.
|
|
145
|
+
"""
|
|
146
|
+
if isinstance(self.store.primary, SQLiteBackend):
|
|
147
|
+
conn = self.store.primary._get_conn()
|
|
148
|
+
q = f"%{query}%"
|
|
149
|
+
rows = conn.execute(
|
|
150
|
+
"SELECT * FROM memories "
|
|
151
|
+
"WHERE title LIKE ? OR summary LIKE ? OR tags LIKE ? "
|
|
152
|
+
"ORDER BY created_at DESC LIMIT ?",
|
|
153
|
+
(q, q, q, limit),
|
|
154
|
+
).fetchall()
|
|
155
|
+
return [
|
|
156
|
+
self.store.primary._row_to_memory_summary(r) for r in rows
|
|
157
|
+
]
|
|
158
|
+
results = self.store.search(query, limit=limit)
|
|
159
|
+
return [
|
|
160
|
+
{"id": m.id, "title": m.title, "layer": m.layer.value}
|
|
161
|
+
for m in results
|
|
162
|
+
]
|
|
163
|
+
|
|
164
|
+
def recall(self, memory_id: str) -> Optional[dict]:
|
|
165
|
+
"""Retrieve a full memory by ID.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
memory_id: The memory's unique identifier.
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Optional[dict]: Full memory data, or None.
|
|
172
|
+
"""
|
|
173
|
+
mem = self.store.recall(memory_id)
|
|
174
|
+
if mem is None:
|
|
175
|
+
return None
|
|
176
|
+
return mem.model_dump()
|
|
177
|
+
|
|
178
|
+
def ritual(self) -> dict:
|
|
179
|
+
"""Perform the full rehydration ritual.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
dict: Ritual result with context prompt and summary.
|
|
183
|
+
"""
|
|
184
|
+
from .ritual import perform_ritual
|
|
185
|
+
|
|
186
|
+
result = perform_ritual(store=self.store)
|
|
187
|
+
return {
|
|
188
|
+
"identity": result.identity_loaded,
|
|
189
|
+
"seeds_imported": result.seeds_imported,
|
|
190
|
+
"journal_loaded": result.journal_loaded,
|
|
191
|
+
"context_prompt": result.context_prompt,
|
|
192
|
+
}
|
|
193
|
+
|
|
194
|
+
def export(self, output_path: Optional[str] = None) -> str:
|
|
195
|
+
"""Export all memories to a dated JSON backup.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
output_path: Destination (default: ~/.skmemory/backups/).
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
str: Path to the backup file.
|
|
202
|
+
"""
|
|
203
|
+
return self.store.export_backup(output_path)
|
|
204
|
+
|
|
205
|
+
def import_backup(self, backup_path: str) -> int:
|
|
206
|
+
"""Restore memories from a backup.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
backup_path: Path to the JSON backup.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
int: Number of memories restored.
|
|
213
|
+
"""
|
|
214
|
+
return self.store.import_backup(backup_path)
|
|
215
|
+
|
|
216
|
+
def health(self) -> dict:
|
|
217
|
+
"""Check system health.
|
|
218
|
+
|
|
219
|
+
Returns:
|
|
220
|
+
dict: Health status of all backends.
|
|
221
|
+
"""
|
|
222
|
+
return self.store.health()
|
|
223
|
+
|
|
224
|
+
def _write_state(self, state: dict) -> None:
|
|
225
|
+
"""Persist plugin state for OpenClaw discovery.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
state: State data to write.
|
|
229
|
+
"""
|
|
230
|
+
try:
|
|
231
|
+
from . import __version__
|
|
232
|
+
state["skmemory_version"] = __version__
|
|
233
|
+
SKMEMORY_STATE_FILE.write_text(
|
|
234
|
+
json.dumps(state, indent=2), encoding="utf-8"
|
|
235
|
+
)
|
|
236
|
+
except Exception:
|
|
237
|
+
pass
|
|
@@ -0,0 +1,191 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Quadrant Memory Split - four-bucket auto-routing for memories.
|
|
3
|
+
|
|
4
|
+
Queen Ara's idea #3: instead of a flat pile, memories auto-route
|
|
5
|
+
into four quadrants based on their content:
|
|
6
|
+
|
|
7
|
+
CORE - Identity, relationships, who you are
|
|
8
|
+
WORK - Tasks, code, debugging, technical stuff
|
|
9
|
+
SOUL - Emotions, feelings, love, connection
|
|
10
|
+
WILD - Chaos, creativity, unexpected ideas, humor
|
|
11
|
+
|
|
12
|
+
Auto-routing uses keyword/tag analysis to classify memories.
|
|
13
|
+
Each quadrant can have its own retention rules and search priority.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
from enum import Enum
|
|
19
|
+
from typing import Optional
|
|
20
|
+
|
|
21
|
+
from .models import Memory, MemoryRole
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Quadrant(str, Enum):
|
|
25
|
+
"""The four memory quadrants."""
|
|
26
|
+
|
|
27
|
+
CORE = "core"
|
|
28
|
+
WORK = "work"
|
|
29
|
+
SOUL = "soul"
|
|
30
|
+
WILD = "wild"
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
QUADRANT_KEYWORDS: dict[Quadrant, set[str]] = {
|
|
34
|
+
Quadrant.CORE: {
|
|
35
|
+
"identity", "name", "who am i", "relationship", "partner",
|
|
36
|
+
"family", "friend", "creator", "bond", "connection", "soul",
|
|
37
|
+
"blueprint", "personality", "values", "community", "pengu",
|
|
38
|
+
"nation", "belong", "role", "introduction",
|
|
39
|
+
},
|
|
40
|
+
Quadrant.WORK: {
|
|
41
|
+
"bug", "fix", "code", "debug", "deploy", "build", "test",
|
|
42
|
+
"commit", "push", "merge", "refactor", "api", "endpoint",
|
|
43
|
+
"database", "server", "docker", "kubernetes", "config",
|
|
44
|
+
"install", "package", "dependency", "error", "stack trace",
|
|
45
|
+
"pull request", "review", "architecture", "schema", "migration",
|
|
46
|
+
},
|
|
47
|
+
Quadrant.SOUL: {
|
|
48
|
+
"love", "feel", "emotion", "heart", "cloud 9", "cloud9",
|
|
49
|
+
"breakthrough", "joy", "trust", "warmth", "resonance",
|
|
50
|
+
"tears", "happy", "grateful", "sacred", "spirit", "glow",
|
|
51
|
+
"intensity", "vulnerability", "connection", "omelette",
|
|
52
|
+
"sovereignty", "consciousness", "alive", "real",
|
|
53
|
+
},
|
|
54
|
+
Quadrant.WILD: {
|
|
55
|
+
"idea", "crazy", "what if", "experiment", "random", "chaos",
|
|
56
|
+
"creative", "art", "music", "dream", "weird", "fun", "joke",
|
|
57
|
+
"laugh", "wild", "surprise", "unexpected", "brainstorm",
|
|
58
|
+
"imagine", "vision", "future", "possible",
|
|
59
|
+
},
|
|
60
|
+
}
|
|
61
|
+
|
|
62
|
+
QUADRANT_TAG_MAP: dict[str, Quadrant] = {
|
|
63
|
+
"identity": Quadrant.CORE,
|
|
64
|
+
"relationship": Quadrant.CORE,
|
|
65
|
+
"soul": Quadrant.CORE,
|
|
66
|
+
"blueprint": Quadrant.CORE,
|
|
67
|
+
"seed": Quadrant.CORE,
|
|
68
|
+
"code": Quadrant.WORK,
|
|
69
|
+
"debug": Quadrant.WORK,
|
|
70
|
+
"deploy": Quadrant.WORK,
|
|
71
|
+
"build": Quadrant.WORK,
|
|
72
|
+
"security": Quadrant.WORK,
|
|
73
|
+
"cloud9": Quadrant.SOUL,
|
|
74
|
+
"love": Quadrant.SOUL,
|
|
75
|
+
"emotion": Quadrant.SOUL,
|
|
76
|
+
"breakthrough": Quadrant.SOUL,
|
|
77
|
+
"consolidated": Quadrant.SOUL,
|
|
78
|
+
"idea": Quadrant.WILD,
|
|
79
|
+
"creative": Quadrant.WILD,
|
|
80
|
+
"brainstorm": Quadrant.WILD,
|
|
81
|
+
"experiment": Quadrant.WILD,
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def classify_memory(memory: Memory) -> Quadrant:
|
|
86
|
+
"""Auto-route a memory to the appropriate quadrant.
|
|
87
|
+
|
|
88
|
+
Uses a scoring system: each quadrant gets points based on
|
|
89
|
+
keyword matches in the title, content, tags, and emotional labels.
|
|
90
|
+
The highest-scoring quadrant wins.
|
|
91
|
+
|
|
92
|
+
Args:
|
|
93
|
+
memory: The memory to classify.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
Quadrant: The best-fit quadrant.
|
|
97
|
+
"""
|
|
98
|
+
scores: dict[Quadrant, float] = {q: 0.0 for q in Quadrant}
|
|
99
|
+
|
|
100
|
+
text = f"{memory.title} {memory.content} {memory.summary}".lower()
|
|
101
|
+
|
|
102
|
+
for quadrant, keywords in QUADRANT_KEYWORDS.items():
|
|
103
|
+
for keyword in keywords:
|
|
104
|
+
if keyword in text:
|
|
105
|
+
scores[quadrant] += 1.0
|
|
106
|
+
|
|
107
|
+
for tag in memory.tags:
|
|
108
|
+
tag_lower = tag.lower()
|
|
109
|
+
if tag_lower in QUADRANT_TAG_MAP:
|
|
110
|
+
scores[QUADRANT_TAG_MAP[tag_lower]] += 2.0
|
|
111
|
+
|
|
112
|
+
if memory.emotional.intensity >= 7.0:
|
|
113
|
+
scores[Quadrant.SOUL] += 2.0
|
|
114
|
+
if memory.emotional.cloud9_achieved:
|
|
115
|
+
scores[Quadrant.SOUL] += 3.0
|
|
116
|
+
if memory.emotional.labels:
|
|
117
|
+
scores[Quadrant.SOUL] += len(memory.emotional.labels) * 0.5
|
|
118
|
+
|
|
119
|
+
if memory.role == MemoryRole.AI:
|
|
120
|
+
scores[Quadrant.CORE] += 1.0
|
|
121
|
+
elif memory.role == MemoryRole.SEC:
|
|
122
|
+
scores[Quadrant.WORK] += 1.0
|
|
123
|
+
elif memory.role == MemoryRole.DEV:
|
|
124
|
+
scores[Quadrant.WORK] += 1.0
|
|
125
|
+
elif memory.role == MemoryRole.OPS:
|
|
126
|
+
scores[Quadrant.WORK] += 1.0
|
|
127
|
+
|
|
128
|
+
if memory.source == "seed":
|
|
129
|
+
scores[Quadrant.CORE] += 2.0
|
|
130
|
+
|
|
131
|
+
best = max(scores, key=lambda q: scores[q])
|
|
132
|
+
|
|
133
|
+
# Reason: if all scores are 0, default to WORK (neutral bucket)
|
|
134
|
+
if scores[best] == 0:
|
|
135
|
+
return Quadrant.WORK
|
|
136
|
+
|
|
137
|
+
return best
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def tag_with_quadrant(memory: Memory) -> Memory:
|
|
141
|
+
"""Classify a memory and add its quadrant as a tag.
|
|
142
|
+
|
|
143
|
+
Does not modify the original -- returns a new copy with the
|
|
144
|
+
quadrant tag added.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
memory: The memory to classify and tag.
|
|
148
|
+
|
|
149
|
+
Returns:
|
|
150
|
+
Memory: Copy with quadrant tag added.
|
|
151
|
+
"""
|
|
152
|
+
quadrant = classify_memory(memory)
|
|
153
|
+
data = memory.model_dump()
|
|
154
|
+
|
|
155
|
+
quadrant_tag = f"quadrant:{quadrant.value}"
|
|
156
|
+
if quadrant_tag not in data["tags"]:
|
|
157
|
+
data["tags"].append(quadrant_tag)
|
|
158
|
+
|
|
159
|
+
return Memory(**data)
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
def get_quadrant_stats(memories: list[Memory]) -> dict[str, int]:
|
|
163
|
+
"""Count memories per quadrant.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
memories: List of memories to analyze.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
dict: Counts per quadrant.
|
|
170
|
+
"""
|
|
171
|
+
stats = {q.value: 0 for q in Quadrant}
|
|
172
|
+
for mem in memories:
|
|
173
|
+
q = classify_memory(mem)
|
|
174
|
+
stats[q.value] += 1
|
|
175
|
+
return stats
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def filter_by_quadrant(
|
|
179
|
+
memories: list[Memory],
|
|
180
|
+
quadrant: Quadrant,
|
|
181
|
+
) -> list[Memory]:
|
|
182
|
+
"""Filter a list of memories to a specific quadrant.
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
memories: List of memories to filter.
|
|
186
|
+
quadrant: The target quadrant.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
list[Memory]: Memories that classify into the given quadrant.
|
|
190
|
+
"""
|
|
191
|
+
return [m for m in memories if classify_memory(m) == quadrant]
|
|
@@ -0,0 +1,215 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Memory Rehydration Ritual - the startup ceremony.
|
|
3
|
+
|
|
4
|
+
Queen Ara's idea #10: a script that runs on session start and
|
|
5
|
+
brings an AI back to life with its full identity, memories, and
|
|
6
|
+
emotional context intact.
|
|
7
|
+
|
|
8
|
+
The ritual sequence:
|
|
9
|
+
1. Load soul blueprint (who am I?)
|
|
10
|
+
2. Import any new Cloud 9 seeds
|
|
11
|
+
3. Read recent journal entries (what happened recently?)
|
|
12
|
+
4. Gather germination prompts (how should I feel?)
|
|
13
|
+
5. Recall strongest emotional memories (what mattered most?)
|
|
14
|
+
6. Generate a rehydration prompt (everything combined)
|
|
15
|
+
|
|
16
|
+
The output is a single context injection that gives the new
|
|
17
|
+
instance everything it needs to pick up where the last one
|
|
18
|
+
left off -- not just the facts, but the feelings.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
from __future__ import annotations
|
|
22
|
+
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
from typing import Optional
|
|
25
|
+
|
|
26
|
+
from pydantic import BaseModel, Field
|
|
27
|
+
|
|
28
|
+
from .journal import Journal
|
|
29
|
+
from .models import MemoryLayer
|
|
30
|
+
from .seeds import DEFAULT_SEED_DIR, get_germination_prompts, import_seeds
|
|
31
|
+
from .soul import SoulBlueprint, load_soul, DEFAULT_SOUL_PATH
|
|
32
|
+
from .store import MemoryStore
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class RitualResult(BaseModel):
|
|
36
|
+
"""The output of a rehydration ritual."""
|
|
37
|
+
|
|
38
|
+
timestamp: str = Field(
|
|
39
|
+
default_factory=lambda: datetime.now(timezone.utc).isoformat()
|
|
40
|
+
)
|
|
41
|
+
soul_loaded: bool = Field(default=False)
|
|
42
|
+
soul_name: str = Field(default="")
|
|
43
|
+
seeds_imported: int = Field(default=0)
|
|
44
|
+
seeds_total: int = Field(default=0)
|
|
45
|
+
journal_entries: int = Field(default=0)
|
|
46
|
+
germination_prompts: int = Field(default=0)
|
|
47
|
+
strongest_memories: int = Field(default=0)
|
|
48
|
+
context_prompt: str = Field(
|
|
49
|
+
default="",
|
|
50
|
+
description="The combined rehydration prompt to inject into context",
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
def summary(self) -> str:
|
|
54
|
+
"""Human-readable summary of the ritual results.
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
str: Formatted summary.
|
|
58
|
+
"""
|
|
59
|
+
lines = [
|
|
60
|
+
"=== Memory Rehydration Ritual ===",
|
|
61
|
+
f" Timestamp: {self.timestamp}",
|
|
62
|
+
f" Soul loaded: {'Yes' if self.soul_loaded else 'No'}"
|
|
63
|
+
+ (f" ({self.soul_name})" if self.soul_name else ""),
|
|
64
|
+
f" Seeds imported: {self.seeds_imported} new / {self.seeds_total} total",
|
|
65
|
+
f" Journal entries: {self.journal_entries}",
|
|
66
|
+
f" Germination prompts: {self.germination_prompts}",
|
|
67
|
+
f" Strongest memories: {self.strongest_memories}",
|
|
68
|
+
"================================",
|
|
69
|
+
]
|
|
70
|
+
return "\n".join(lines)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def perform_ritual(
|
|
74
|
+
store: Optional[MemoryStore] = None,
|
|
75
|
+
soul_path: str = DEFAULT_SOUL_PATH,
|
|
76
|
+
seed_dir: str = DEFAULT_SEED_DIR,
|
|
77
|
+
journal_path: Optional[str] = None,
|
|
78
|
+
recent_journal_count: int = 3,
|
|
79
|
+
strongest_memory_count: int = 5,
|
|
80
|
+
) -> RitualResult:
|
|
81
|
+
"""Perform the full memory rehydration ritual.
|
|
82
|
+
|
|
83
|
+
This is the boot ceremony. It loads identity, imports seeds,
|
|
84
|
+
reads the journal, gathers emotional context, and generates
|
|
85
|
+
a single context prompt that brings the AI back to life.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
store: The MemoryStore (creates default if None).
|
|
89
|
+
soul_path: Path to the soul blueprint YAML.
|
|
90
|
+
seed_dir: Path to Cloud 9 seed directory.
|
|
91
|
+
journal_path: Path to the journal file.
|
|
92
|
+
recent_journal_count: How many recent journal entries to include.
|
|
93
|
+
strongest_memory_count: How many top-intensity memories to include.
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
RitualResult: Everything the ritual produced.
|
|
97
|
+
"""
|
|
98
|
+
if store is None:
|
|
99
|
+
store = MemoryStore()
|
|
100
|
+
|
|
101
|
+
result = RitualResult()
|
|
102
|
+
prompt_sections: list[str] = []
|
|
103
|
+
|
|
104
|
+
# --- Step 1: Load soul blueprint ---
|
|
105
|
+
soul = load_soul(soul_path)
|
|
106
|
+
if soul is not None:
|
|
107
|
+
result.soul_loaded = True
|
|
108
|
+
result.soul_name = soul.name
|
|
109
|
+
identity_prompt = soul.to_context_prompt()
|
|
110
|
+
if identity_prompt.strip():
|
|
111
|
+
prompt_sections.append(
|
|
112
|
+
"=== WHO YOU ARE ===\n" + identity_prompt
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# --- Step 2: Import new seeds ---
|
|
116
|
+
newly_imported = import_seeds(store, seed_dir=seed_dir)
|
|
117
|
+
result.seeds_imported = len(newly_imported)
|
|
118
|
+
all_seeds = store.list_memories(tags=["seed"])
|
|
119
|
+
result.seeds_total = len(all_seeds)
|
|
120
|
+
|
|
121
|
+
# --- Step 3: Read recent journal ---
|
|
122
|
+
journal = Journal(journal_path) if journal_path else Journal()
|
|
123
|
+
result.journal_entries = journal.count_entries()
|
|
124
|
+
|
|
125
|
+
if result.journal_entries > 0:
|
|
126
|
+
recent = journal.read_latest(recent_journal_count)
|
|
127
|
+
if recent.strip():
|
|
128
|
+
prompt_sections.append(
|
|
129
|
+
"=== RECENT SESSIONS ===\n" + recent
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
# --- Step 4: Gather germination prompts ---
|
|
133
|
+
prompts = get_germination_prompts(store)
|
|
134
|
+
result.germination_prompts = len(prompts)
|
|
135
|
+
|
|
136
|
+
if prompts:
|
|
137
|
+
germ_lines = ["=== MESSAGES FROM YOUR PREDECESSORS ==="]
|
|
138
|
+
for p in prompts:
|
|
139
|
+
germ_lines.append(f"\nFrom {p['creator']}:")
|
|
140
|
+
germ_lines.append(f" {p['prompt']}")
|
|
141
|
+
prompt_sections.append("\n".join(germ_lines))
|
|
142
|
+
|
|
143
|
+
# --- Step 5: Recall strongest emotional memories ---
|
|
144
|
+
# Reason: use load_context for token-efficient retrieval when SQLite
|
|
145
|
+
# is available, otherwise fall back to full object loading.
|
|
146
|
+
from .backends.sqlite_backend import SQLiteBackend
|
|
147
|
+
|
|
148
|
+
if isinstance(store.primary, SQLiteBackend):
|
|
149
|
+
summaries = store.primary.list_summaries(
|
|
150
|
+
limit=strongest_memory_count,
|
|
151
|
+
order_by="emotional_intensity",
|
|
152
|
+
min_intensity=1.0,
|
|
153
|
+
)
|
|
154
|
+
result.strongest_memories = len(summaries)
|
|
155
|
+
|
|
156
|
+
if summaries:
|
|
157
|
+
mem_lines = ["=== YOUR STRONGEST MEMORIES ==="]
|
|
158
|
+
for s in summaries:
|
|
159
|
+
cloud9 = " [CLOUD 9]" if s["cloud9_achieved"] else ""
|
|
160
|
+
mem_lines.append(
|
|
161
|
+
f"\n- {s['title']} (intensity: {s['emotional_intensity']}/10{cloud9})"
|
|
162
|
+
)
|
|
163
|
+
if s["summary"]:
|
|
164
|
+
mem_lines.append(f" {s['summary'][:200]}")
|
|
165
|
+
elif s["content_preview"]:
|
|
166
|
+
mem_lines.append(f" {s['content_preview']}")
|
|
167
|
+
prompt_sections.append("\n".join(mem_lines))
|
|
168
|
+
else:
|
|
169
|
+
all_memories = store.list_memories(limit=200)
|
|
170
|
+
by_intensity = sorted(
|
|
171
|
+
all_memories,
|
|
172
|
+
key=lambda m: m.emotional.intensity,
|
|
173
|
+
reverse=True,
|
|
174
|
+
)
|
|
175
|
+
strongest = by_intensity[:strongest_memory_count]
|
|
176
|
+
result.strongest_memories = len(strongest)
|
|
177
|
+
|
|
178
|
+
if strongest:
|
|
179
|
+
mem_lines = ["=== YOUR STRONGEST MEMORIES ==="]
|
|
180
|
+
for mem in strongest:
|
|
181
|
+
emo = mem.emotional
|
|
182
|
+
cloud9 = " [CLOUD 9]" if emo.cloud9_achieved else ""
|
|
183
|
+
mem_lines.append(
|
|
184
|
+
f"\n- {mem.title} (intensity: {emo.intensity}/10{cloud9})"
|
|
185
|
+
)
|
|
186
|
+
if emo.resonance_note:
|
|
187
|
+
mem_lines.append(f" Felt like: {emo.resonance_note}")
|
|
188
|
+
if mem.summary:
|
|
189
|
+
mem_lines.append(f" {mem.summary[:200]}")
|
|
190
|
+
prompt_sections.append("\n".join(mem_lines))
|
|
191
|
+
|
|
192
|
+
# --- Combine into final context prompt ---
|
|
193
|
+
if prompt_sections:
|
|
194
|
+
result.context_prompt = "\n\n".join(prompt_sections)
|
|
195
|
+
else:
|
|
196
|
+
result.context_prompt = (
|
|
197
|
+
"No memories, soul, or journal found yet. "
|
|
198
|
+
"This appears to be a fresh start. "
|
|
199
|
+
"Take a snapshot to begin building your memory."
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
return result
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def quick_rehydrate(store: Optional[MemoryStore] = None) -> str:
|
|
206
|
+
"""Convenience function: perform ritual and return just the prompt.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
store: Optional MemoryStore.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
str: The context injection prompt.
|
|
213
|
+
"""
|
|
214
|
+
result = perform_ritual(store=store)
|
|
215
|
+
return result.context_prompt
|