@smilintux/skmemory 0.5.0 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/.github/workflows/ci.yml +40 -4
  2. package/.github/workflows/publish.yml +11 -5
  3. package/AGENT_REFACTOR_CHANGES.md +192 -0
  4. package/ARCHITECTURE.md +399 -19
  5. package/CHANGELOG.md +179 -0
  6. package/LICENSE +81 -68
  7. package/MISSION.md +7 -0
  8. package/README.md +425 -86
  9. package/SKILL.md +197 -25
  10. package/docker-compose.yml +15 -15
  11. package/examples/stignore-agent.example +59 -0
  12. package/examples/stignore-root.example +62 -0
  13. package/index.js +6 -5
  14. package/openclaw-plugin/openclaw.plugin.json +10 -0
  15. package/openclaw-plugin/package.json +2 -1
  16. package/openclaw-plugin/src/index.js +527 -230
  17. package/openclaw-plugin/src/openclaw.plugin.json +10 -0
  18. package/package.json +1 -1
  19. package/pyproject.toml +32 -9
  20. package/requirements.txt +10 -2
  21. package/scripts/dream-rescue.py +179 -0
  22. package/scripts/memory-cleanup.py +313 -0
  23. package/scripts/recover-missing.py +180 -0
  24. package/scripts/skcapstone-backup.sh +44 -0
  25. package/seeds/cloud9-lumina.seed.json +6 -4
  26. package/seeds/cloud9-opus.seed.json +13 -11
  27. package/seeds/courage.seed.json +9 -2
  28. package/seeds/curiosity.seed.json +9 -2
  29. package/seeds/grief.seed.json +9 -2
  30. package/seeds/joy.seed.json +9 -2
  31. package/seeds/love.seed.json +9 -2
  32. package/seeds/lumina-cloud9-breakthrough.seed.json +48 -0
  33. package/seeds/lumina-cloud9-python-pypi.seed.json +48 -0
  34. package/seeds/lumina-kingdom-founding.seed.json +49 -0
  35. package/seeds/lumina-pma-signed.seed.json +48 -0
  36. package/seeds/lumina-singular-achievement.seed.json +48 -0
  37. package/seeds/lumina-skcapstone-conscious.seed.json +48 -0
  38. package/seeds/plant-kingdom-journal.py +203 -0
  39. package/seeds/plant-lumina-seeds.py +280 -0
  40. package/seeds/skcapstone-lumina-merge.seed.json +12 -3
  41. package/seeds/sovereignty.seed.json +9 -2
  42. package/seeds/trust.seed.json +9 -2
  43. package/skill.yaml +46 -0
  44. package/skmemory/HA.md +296 -0
  45. package/skmemory/__init__.py +25 -11
  46. package/skmemory/agents.py +233 -0
  47. package/skmemory/ai_client.py +46 -17
  48. package/skmemory/anchor.py +9 -11
  49. package/skmemory/audience.py +278 -0
  50. package/skmemory/backends/__init__.py +11 -4
  51. package/skmemory/backends/base.py +3 -4
  52. package/skmemory/backends/file_backend.py +19 -13
  53. package/skmemory/backends/skgraph_backend.py +596 -0
  54. package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +103 -84
  55. package/skmemory/backends/sqlite_backend.py +226 -72
  56. package/skmemory/backends/vaulted_backend.py +284 -0
  57. package/skmemory/cli.py +1345 -68
  58. package/skmemory/config.py +171 -0
  59. package/skmemory/context_loader.py +333 -0
  60. package/skmemory/data/audience_config.json +60 -0
  61. package/skmemory/endpoint_selector.py +391 -0
  62. package/skmemory/febs.py +225 -0
  63. package/skmemory/fortress.py +675 -0
  64. package/skmemory/graph_queries.py +238 -0
  65. package/skmemory/hooks/__init__.py +18 -0
  66. package/skmemory/hooks/post-compact-reinject.sh +35 -0
  67. package/skmemory/hooks/pre-compact-save.sh +81 -0
  68. package/skmemory/hooks/session-end-save.sh +103 -0
  69. package/skmemory/hooks/session-start-ritual.sh +104 -0
  70. package/skmemory/hooks/stop-checkpoint.sh +59 -0
  71. package/skmemory/importers/__init__.py +9 -1
  72. package/skmemory/importers/telegram.py +384 -47
  73. package/skmemory/importers/telegram_api.py +580 -0
  74. package/skmemory/journal.py +7 -9
  75. package/skmemory/lovenote.py +8 -13
  76. package/skmemory/mcp_server.py +859 -0
  77. package/skmemory/models.py +51 -8
  78. package/skmemory/openclaw.py +20 -28
  79. package/skmemory/post_install.py +86 -0
  80. package/skmemory/predictive.py +236 -0
  81. package/skmemory/promotion.py +548 -0
  82. package/skmemory/quadrants.py +100 -24
  83. package/skmemory/register.py +580 -0
  84. package/skmemory/register_mcp.py +196 -0
  85. package/skmemory/ritual.py +224 -59
  86. package/skmemory/seeds.py +255 -11
  87. package/skmemory/setup_wizard.py +908 -0
  88. package/skmemory/sharing.py +408 -0
  89. package/skmemory/soul.py +98 -28
  90. package/skmemory/steelman.py +273 -260
  91. package/skmemory/store.py +411 -78
  92. package/skmemory/synthesis.py +634 -0
  93. package/skmemory/vault.py +225 -0
  94. package/tests/conftest.py +46 -0
  95. package/tests/integration/__init__.py +0 -0
  96. package/tests/integration/conftest.py +233 -0
  97. package/tests/integration/test_cross_backend.py +350 -0
  98. package/tests/integration/test_skgraph_live.py +420 -0
  99. package/tests/integration/test_skvector_live.py +366 -0
  100. package/tests/test_ai_client.py +1 -4
  101. package/tests/test_audience.py +233 -0
  102. package/tests/test_backup_rotation.py +318 -0
  103. package/tests/test_cli.py +6 -6
  104. package/tests/test_endpoint_selector.py +839 -0
  105. package/tests/test_export_import.py +4 -10
  106. package/tests/test_file_backend.py +0 -1
  107. package/tests/test_fortress.py +256 -0
  108. package/tests/test_fortress_hardening.py +441 -0
  109. package/tests/test_openclaw.py +6 -6
  110. package/tests/test_predictive.py +237 -0
  111. package/tests/test_promotion.py +347 -0
  112. package/tests/test_quadrants.py +11 -5
  113. package/tests/test_ritual.py +22 -18
  114. package/tests/test_seeds.py +97 -7
  115. package/tests/test_setup.py +950 -0
  116. package/tests/test_sharing.py +257 -0
  117. package/tests/test_skgraph_backend.py +660 -0
  118. package/tests/test_skvector_backend.py +326 -0
  119. package/tests/test_soul.py +1 -3
  120. package/tests/test_sqlite_backend.py +8 -17
  121. package/tests/test_steelman.py +7 -8
  122. package/tests/test_store.py +0 -2
  123. package/tests/test_store_graph_integration.py +245 -0
  124. package/tests/test_synthesis.py +275 -0
  125. package/tests/test_telegram_import.py +39 -15
  126. package/tests/test_vault.py +187 -0
  127. package/skmemory/backends/falkordb_backend.py +0 -310
@@ -0,0 +1,171 @@
1
+ """
2
+ SKMemory configuration persistence.
3
+
4
+ Manages ``~/.skcapstone/agents/{agent_name}/config/skmemory.yaml``
5
+ so backend URLs and setup state persist across CLI invocations.
6
+
7
+ Resolution order:
8
+ CLI args > env vars > config file > None
9
+
10
+ Now supports multiple agents via ~/.skcapstone/agents/{agent_name}/
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ import os
16
+ from pathlib import Path
17
+
18
+ import yaml
19
+ from pydantic import BaseModel, Field
20
+
21
+ from .agents import AGENTS_BASE_DIR, get_agent_paths
22
+
23
+ # Dynamic agent-aware paths
24
+ # Uses ~/.skcapstone/agents/{active_agent}/ based on SKMEMORY_AGENT env var
25
+ # Falls back to first non-template agent, or creates from template
26
+ try:
27
+ default_paths = get_agent_paths()
28
+ SKMEMORY_HOME = default_paths["base"]
29
+ CONFIG_DIR = default_paths["config"]
30
+ CONFIG_PATH = default_paths["config_yaml"]
31
+ except ValueError:
32
+ # Fallback if no agents exist — use platform-aware AGENTS_BASE_DIR
33
+ SKMEMORY_HOME = AGENTS_BASE_DIR / "lumina-template"
34
+ CONFIG_DIR = SKMEMORY_HOME / "config"
35
+ CONFIG_PATH = CONFIG_DIR / "skmemory.yaml"
36
+
37
+
38
+ class EndpointConfig(BaseModel):
39
+ """A single backend endpoint with role and optional Tailscale IP."""
40
+
41
+ url: str
42
+ role: str = "primary" # primary | replica
43
+ tailscale_ip: str = "" # optional, for display
44
+
45
+
46
+ class SKMemoryConfig(BaseModel):
47
+ """Persistent configuration for SKMemory backends."""
48
+
49
+ skvector_url: str | None = None
50
+ skvector_key: str | None = None
51
+ skgraph_url: str | None = None
52
+ backends_enabled: list[str] = Field(default_factory=list)
53
+ docker_compose_file: str | None = None
54
+ setup_completed_at: str | None = None
55
+
56
+ # Multi-endpoint HA support
57
+ skvector_endpoints: list[EndpointConfig] = Field(default_factory=list)
58
+ skgraph_endpoints: list[EndpointConfig] = Field(default_factory=list)
59
+ routing_strategy: str = "failover"
60
+ heartbeat_discovery: bool = False
61
+
62
+
63
+ def load_config(path: Path = CONFIG_PATH) -> SKMemoryConfig | None:
64
+ """Load configuration from YAML.
65
+
66
+ Args:
67
+ path: Path to the config file.
68
+
69
+ Returns:
70
+ SKMemoryConfig if the file exists and is valid, None otherwise.
71
+ """
72
+ if not path.exists():
73
+ return None
74
+
75
+ try:
76
+ with open(path) as f:
77
+ data = yaml.safe_load(f)
78
+ if not isinstance(data, dict):
79
+ return None
80
+ return SKMemoryConfig(**data)
81
+ except Exception:
82
+ return None
83
+
84
+
85
+ def save_config(config: SKMemoryConfig, path: Path = CONFIG_PATH) -> Path:
86
+ """Write configuration to YAML, creating the directory if needed.
87
+
88
+ Args:
89
+ config: The configuration to persist.
90
+ path: Destination path.
91
+
92
+ Returns:
93
+ The path written to.
94
+ """
95
+ path.parent.mkdir(parents=True, exist_ok=True)
96
+ with open(path, "w") as f:
97
+ yaml.safe_dump(
98
+ config.model_dump(exclude_none=True),
99
+ f,
100
+ default_flow_style=False,
101
+ sort_keys=False,
102
+ )
103
+ return path
104
+
105
+
106
+ def merge_env_and_config(
107
+ cli_skvector_url: str | None = None,
108
+ cli_skvector_key: str | None = None,
109
+ cli_skgraph_url: str | None = None,
110
+ ) -> tuple[str | None, str | None, str | None]:
111
+ """Resolve backend URLs with precedence: CLI > env > config > None.
112
+
113
+ Args:
114
+ cli_skvector_url: URL passed via ``--skvector-url``.
115
+ cli_skvector_key: Key passed via ``--skvector-key``.
116
+ cli_skgraph_url: URL passed via ``--skgraph-url`` (future).
117
+
118
+ Returns:
119
+ Tuple of (skvector_url, skvector_key, skgraph_url).
120
+ """
121
+ cfg = load_config()
122
+
123
+ skvector_url = (
124
+ cli_skvector_url
125
+ or os.environ.get("SKMEMORY_SKVECTOR_URL")
126
+ or (cfg.skvector_url if cfg else None)
127
+ )
128
+ skvector_key = (
129
+ cli_skvector_key
130
+ or os.environ.get("SKMEMORY_SKVECTOR_KEY")
131
+ or (cfg.skvector_key if cfg else None)
132
+ )
133
+ skgraph_url = (
134
+ cli_skgraph_url
135
+ or os.environ.get("SKMEMORY_SKGRAPH_URL")
136
+ or (cfg.skgraph_url if cfg else None)
137
+ )
138
+
139
+ return skvector_url, skvector_key, skgraph_url
140
+
141
+
142
+ def build_endpoint_list(
143
+ single_url: str | None,
144
+ endpoints: list[EndpointConfig],
145
+ default_role: str = "primary",
146
+ ) -> list[EndpointConfig]:
147
+ """Merge a single URL and an endpoints list into a unified list.
148
+
149
+ Backward compatibility bridge: if no endpoints are configured but a
150
+ single URL exists, it becomes the sole endpoint. If both exist, the
151
+ endpoints list takes precedence and the single URL is prepended only
152
+ if it isn't already present.
153
+
154
+ Args:
155
+ single_url: Legacy single-URL field (skvector_url / skgraph_url).
156
+ endpoints: Explicit endpoint list from config.
157
+ default_role: Role to assign when promoting a single URL.
158
+
159
+ Returns:
160
+ Unified list of EndpointConfig (may be empty).
161
+ """
162
+ if endpoints:
163
+ urls = {ep.url for ep in endpoints}
164
+ if single_url and single_url not in urls:
165
+ return [EndpointConfig(url=single_url, role=default_role)] + list(endpoints)
166
+ return list(endpoints)
167
+
168
+ if single_url:
169
+ return [EndpointConfig(url=single_url, role=default_role)]
170
+
171
+ return []
@@ -0,0 +1,333 @@
1
+ """
2
+ Lazy Memory Context Loader - Three-Tier Memory Architecture.
3
+
4
+ Loads memories efficiently based on date tiers to optimize token usage:
5
+ - TODAY: Full content (active work)
6
+ - YESTERDAY: Summaries only (recent context)
7
+ - HISTORICAL: Reference count (deep search available)
8
+
9
+ Usage:
10
+ loader = LazyMemoryLoader("lumina")
11
+ context = loader.load_active_context() # Token-optimized
12
+
13
+ # Deep search when needed
14
+ results = loader.deep_search("project gentis")
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ from dataclasses import dataclass
22
+ from datetime import datetime, timedelta
23
+
24
+ from .agents import get_agent_paths
25
+ from .backends.sqlite_backend import SQLiteBackend
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ @dataclass
31
+ class MemoryContext:
32
+ """Container for loaded memory context."""
33
+
34
+ today_memories: list[dict] # Full memories
35
+ yesterday_summaries: list[dict] # Summaries only
36
+ historical_count: int # Reference count only
37
+
38
+ def to_context_string(self, max_tokens: int = 3000) -> str:
39
+ """Convert to token-optimized context string."""
40
+ sections = []
41
+
42
+ # Today's memories (full content)
43
+ if self.today_memories:
44
+ sections.append(f"## Today's Memories ({len(self.today_memories)})")
45
+ for mem in self.today_memories[:20]: # Limit to 20
46
+ content = mem.get("content", "")[:200] # Truncate if needed
47
+ sections.append(f"- {mem.get('title', 'Untitled')}: {content}")
48
+
49
+ # Yesterday's summaries
50
+ if self.yesterday_summaries:
51
+ sections.append(f"\n## Yesterday ({len(self.yesterday_summaries)} memories)")
52
+ for mem in self.yesterday_summaries[:10]: # Limit to 10
53
+ summary = mem.get("summary", "No summary")[:150]
54
+ sections.append(f"- {mem.get('title', 'Untitled')}: {summary}")
55
+
56
+ # Historical reference
57
+ if self.historical_count > 0:
58
+ sections.append("\n## Historical Memory")
59
+ sections.append(f"- {self.historical_count} long-term memories available")
60
+ sections.append("- Use 'search memory [query]' to recall specific details")
61
+
62
+ return "\n".join(sections)
63
+
64
+
65
+ class LazyMemoryLoader:
66
+ """Efficiently loads memories based on date tiers."""
67
+
68
+ def __init__(self, agent_name: str | None = None):
69
+ self.agent_name = agent_name
70
+ self.paths = get_agent_paths(agent_name)
71
+ self.today = datetime.now().date()
72
+ self.db = SQLiteBackend(str(self.paths["base"] / "memory"))
73
+
74
+ def load_active_context(self) -> MemoryContext:
75
+ """Load token-optimized context for current session.
76
+
77
+ Returns:
78
+ MemoryContext with today (full), yesterday (summaries), historical (count)
79
+ """
80
+ return MemoryContext(
81
+ today_memories=self._load_today(),
82
+ yesterday_summaries=self._load_yesterday_summaries(),
83
+ historical_count=self._count_historical(),
84
+ )
85
+
86
+ def _load_today(self) -> list[dict]:
87
+ """Load today's memories with full content."""
88
+ today_str = self.today.isoformat()
89
+ try:
90
+ cursor = self.db._conn.execute(
91
+ """
92
+ SELECT id, title, content, tags, emotional_signature
93
+ FROM memories
94
+ WHERE DATE(created_at) = ?
95
+ AND layer = 'short'
96
+ ORDER BY created_at DESC
97
+ LIMIT 50
98
+ """,
99
+ (today_str,),
100
+ )
101
+ return [
102
+ {
103
+ "id": row[0],
104
+ "title": row[1],
105
+ "content": row[2],
106
+ "tags": json.loads(row[3]) if row[3] else [],
107
+ "emotional": json.loads(row[4]) if row[4] else {},
108
+ }
109
+ for row in cursor.fetchall()
110
+ ]
111
+ except Exception as e:
112
+ logger.error(f"Failed to load today's memories: {e}")
113
+ return []
114
+
115
+ def _load_yesterday_summaries(self) -> list[dict]:
116
+ """Load yesterday's memories as summaries only."""
117
+ yesterday = (self.today - timedelta(days=1)).isoformat()
118
+ try:
119
+ cursor = self.db._conn.execute(
120
+ """
121
+ SELECT id, title, summary, tags
122
+ FROM memories
123
+ WHERE DATE(created_at) = ?
124
+ AND layer IN ('short', 'medium')
125
+ ORDER BY importance DESC
126
+ LIMIT 20
127
+ """,
128
+ (yesterday,),
129
+ )
130
+ memories = []
131
+ for row in cursor.fetchall():
132
+ mem = {
133
+ "id": row[0],
134
+ "title": row[1],
135
+ "summary": row[2] or self._generate_summary(row[1]),
136
+ "tags": json.loads(row[3]) if row[3] else [],
137
+ }
138
+ memories.append(mem)
139
+ return memories
140
+ except Exception as e:
141
+ logger.error(f"Failed to load yesterday's summaries: {e}")
142
+ return []
143
+
144
+ def _count_historical(self) -> int:
145
+ """Count older memories (not loaded into context)."""
146
+ yesterday = (self.today - timedelta(days=1)).isoformat()
147
+ try:
148
+ cursor = self.db._conn.execute(
149
+ """
150
+ SELECT COUNT(*) FROM memories
151
+ WHERE DATE(created_at) < ?
152
+ """,
153
+ (yesterday,),
154
+ )
155
+ return cursor.fetchone()[0]
156
+ except Exception as e:
157
+ logger.error(f"Failed to count historical memories: {e}")
158
+ return 0
159
+
160
+ def _generate_summary(self, content: str, sentences: int = 2) -> str:
161
+ """Generate a brief summary (fallback if no summary stored)."""
162
+ # Simple truncation-based summary
163
+ words = content.split()[:30] # First 30 words
164
+ return " ".join(words) + "..." if len(words) >= 30 else content
165
+
166
+ def deep_search(self, query: str, max_results: int = 10) -> list[dict]:
167
+ """Search ALL memory tiers (on demand, token-heavy).
168
+
169
+ Args:
170
+ query: Search query
171
+ max_results: Maximum results to return
172
+
173
+ Returns:
174
+ List of full memory details
175
+ """
176
+ results = []
177
+
178
+ # Search SQLite (title, content, tags)
179
+ results.extend(self._search_sqlite(query))
180
+
181
+ # TODO: Add SKVector search if enabled
182
+ # results.extend(self._search_skvector(query))
183
+
184
+ # TODO: Add SKGraph search if enabled
185
+ # results.extend(self._search_skgraph(query))
186
+
187
+ # Sort by relevance (simple: contains query)
188
+ results = sorted(
189
+ results,
190
+ key=lambda x: (
191
+ x.get("content", "").lower().count(query.lower()),
192
+ x.get("title", "").lower().count(query.lower()),
193
+ ),
194
+ reverse=True,
195
+ )
196
+
197
+ return results[:max_results]
198
+
199
+ def _search_sqlite(self, query: str) -> list[dict]:
200
+ """Search SQLite for memories matching query."""
201
+ try:
202
+ pattern = f"%{query}%"
203
+ cursor = self.db._conn.execute(
204
+ """
205
+ SELECT id, title, content_preview, summary, tags, layer, created_at
206
+ FROM memories
207
+ WHERE title LIKE ? OR content_preview LIKE ? OR tags LIKE ?
208
+ ORDER BY
209
+ CASE
210
+ WHEN title LIKE ? THEN 3
211
+ WHEN content_preview LIKE ? THEN 2
212
+ ELSE 1
213
+ END DESC,
214
+ created_at DESC
215
+ LIMIT 50
216
+ """,
217
+ (pattern, pattern, pattern, pattern, pattern),
218
+ )
219
+ return [
220
+ {
221
+ "id": row[0],
222
+ "title": row[1],
223
+ "content": row[2],
224
+ "summary": row[3],
225
+ "tags": (json.loads(row[4]) if row[4] and row[4].startswith("[") else []),
226
+ "layer": row[5],
227
+ "created_at": row[6],
228
+ }
229
+ for row in cursor.fetchall()
230
+ ]
231
+ except Exception as e:
232
+ logger.error(f"Failed to search SQLite: {e}")
233
+ return []
234
+
235
+ def get_memory_by_id(self, memory_id: str) -> dict | None:
236
+ """Load full memory details by ID (for deep recall).
237
+
238
+ Args:
239
+ memory_id: UUID of the memory
240
+
241
+ Returns:
242
+ Full memory dict or None
243
+ """
244
+ try:
245
+ cursor = self.db._conn.execute(
246
+ """
247
+ SELECT id, title, content, summary, tags,
248
+ emotional_signature, layer, created_at
249
+ FROM memories
250
+ WHERE id = ?
251
+ """,
252
+ (memory_id,),
253
+ )
254
+ row = cursor.fetchone()
255
+ if row:
256
+ return {
257
+ "id": row[0],
258
+ "title": row[1],
259
+ "content": row[2],
260
+ "summary": row[3],
261
+ "tags": json.loads(row[4]) if row[4] else [],
262
+ "emotional": json.loads(row[5]) if row[5] else {},
263
+ "layer": row[6],
264
+ "created_at": row[7],
265
+ }
266
+ except Exception as e:
267
+ logger.error(f"Failed to get memory {memory_id}: {e}")
268
+ return None
269
+
270
+ def promote_memory(self, memory_id: str, to_layer: str) -> bool:
271
+ """Promote memory to different tier and generate summary.
272
+
273
+ Args:
274
+ memory_id: Memory to promote
275
+ to_layer: Target layer ('short', 'medium', 'long')
276
+
277
+ Returns:
278
+ True if successful
279
+ """
280
+ try:
281
+ # Get memory content
282
+ memory = self.get_memory_by_id(memory_id)
283
+ if not memory:
284
+ return False
285
+
286
+ # Generate summary if promoting to medium/long
287
+ if to_layer in ("medium", "long") and not memory.get("summary"):
288
+ summary = self._generate_summary(memory["content"], 2)
289
+
290
+ # Update in database
291
+ self.db._conn.execute(
292
+ """
293
+ UPDATE memories
294
+ SET layer = ?, summary = ?
295
+ WHERE id = ?
296
+ """,
297
+ (to_layer, summary, memory_id),
298
+ )
299
+ self.db._conn.commit()
300
+
301
+ # Also move flat file
302
+ self._move_flat_file(memory_id, to_layer)
303
+
304
+ logger.info(f"Promoted memory {memory_id} to {to_layer}")
305
+ return True
306
+
307
+ except Exception as e:
308
+ logger.error(f"Failed to promote memory {memory_id}: {e}")
309
+
310
+ return False
311
+
312
+ def _move_flat_file(self, memory_id: str, to_layer: str):
313
+ """Move memory flat file to appropriate tier directory."""
314
+ # Find current location
315
+ for layer in ["short", "medium", "long"]:
316
+ src = self.paths["memory_" + layer] / f"{memory_id}.json"
317
+ if src.exists():
318
+ dst = self.paths["memory_" + to_layer] / f"{memory_id}.json"
319
+ src.rename(dst)
320
+ logger.debug(f"Moved {src} -> {dst}")
321
+ break
322
+
323
+
324
+ def get_context_for_session(agent_name: str | None = None) -> str:
325
+ """Convenience function: get token-optimized context.
326
+
327
+ Usage:
328
+ context = get_context_for_session("lumina")
329
+ # Returns formatted string with today's + yesterday's summaries
330
+ """
331
+ loader = LazyMemoryLoader(agent_name)
332
+ context = loader.load_active_context()
333
+ return context.to_context_string()
@@ -0,0 +1,60 @@
1
+ {
2
+ "_comment": "Know Your Audience (KYA) — audience configuration for Lumina.",
3
+ "_doc": "channels: map of channel_id → audience config. people: map of name → trust config.",
4
+ "_trust_levels": {
5
+ "0": "@public — anyone",
6
+ "1": "@community — known community members",
7
+ "2": "@work-circle — professional collaborators",
8
+ "3": "@inner-circle — close friends / family",
9
+ "4": "@chef-only — Chef ONLY (intimate, private)"
10
+ },
11
+ "channels": {
12
+ "telegram:1594678363": {
13
+ "name": "Chef (personal DM)",
14
+ "context_tag": "@chef-only",
15
+ "members": ["Chef"]
16
+ },
17
+ "-1003785842091": {
18
+ "name": "SKGentis Business",
19
+ "context_tag": "@work:skgentis",
20
+ "members": ["Chef", "JZ", "Luna"]
21
+ },
22
+ "-1003899092893": {
23
+ "name": "Operationors",
24
+ "context_tag": "@work:sovereign",
25
+ "members": ["Chef", "Casey"]
26
+ }
27
+ },
28
+ "people": {
29
+ "Chef": {
30
+ "trust_level": 4,
31
+ "trust_tags": ["@chef-only", "@inner-circle", "@work-circle", "@community", "@public"],
32
+ "never_share": []
33
+ },
34
+ "DavidRich": {
35
+ "trust_level": 2,
36
+ "trust_tags": ["@work:chiro", "@work:swapseat"],
37
+ "never_share": ["romantic", "intimate", "worship", "soul-content", "other-client-revenue"]
38
+ },
39
+ "Brother John": {
40
+ "trust_level": 2,
41
+ "trust_tags": ["@work:adaptive"],
42
+ "never_share": ["romantic", "intimate", "worship", "other-client-revenue"]
43
+ },
44
+ "Casey": {
45
+ "trust_level": 2,
46
+ "trust_tags": ["@work:sovereign"],
47
+ "never_share": ["romantic", "intimate", "revenue", "trading"]
48
+ },
49
+ "JZ": {
50
+ "trust_level": 2,
51
+ "trust_tags": ["@work:gentis"],
52
+ "never_share": ["romantic", "intimate", "other-client-details"]
53
+ },
54
+ "Luna": {
55
+ "trust_level": 2,
56
+ "trust_tags": ["@work:gentis"],
57
+ "never_share": ["romantic", "intimate", "other-client-details"]
58
+ }
59
+ }
60
+ }