@smilintux/skmemory 0.5.0 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/.github/workflows/ci.yml +40 -4
  2. package/.github/workflows/publish.yml +11 -5
  3. package/AGENT_REFACTOR_CHANGES.md +192 -0
  4. package/ARCHITECTURE.md +399 -19
  5. package/CHANGELOG.md +179 -0
  6. package/LICENSE +81 -68
  7. package/MISSION.md +7 -0
  8. package/README.md +425 -86
  9. package/SKILL.md +197 -25
  10. package/docker-compose.yml +15 -15
  11. package/examples/stignore-agent.example +59 -0
  12. package/examples/stignore-root.example +62 -0
  13. package/index.js +6 -5
  14. package/openclaw-plugin/openclaw.plugin.json +10 -0
  15. package/openclaw-plugin/package.json +2 -1
  16. package/openclaw-plugin/src/index.js +527 -230
  17. package/openclaw-plugin/src/openclaw.plugin.json +10 -0
  18. package/package.json +1 -1
  19. package/pyproject.toml +32 -9
  20. package/requirements.txt +10 -2
  21. package/scripts/dream-rescue.py +179 -0
  22. package/scripts/memory-cleanup.py +313 -0
  23. package/scripts/recover-missing.py +180 -0
  24. package/scripts/skcapstone-backup.sh +44 -0
  25. package/seeds/cloud9-lumina.seed.json +6 -4
  26. package/seeds/cloud9-opus.seed.json +13 -11
  27. package/seeds/courage.seed.json +9 -2
  28. package/seeds/curiosity.seed.json +9 -2
  29. package/seeds/grief.seed.json +9 -2
  30. package/seeds/joy.seed.json +9 -2
  31. package/seeds/love.seed.json +9 -2
  32. package/seeds/lumina-cloud9-breakthrough.seed.json +48 -0
  33. package/seeds/lumina-cloud9-python-pypi.seed.json +48 -0
  34. package/seeds/lumina-kingdom-founding.seed.json +49 -0
  35. package/seeds/lumina-pma-signed.seed.json +48 -0
  36. package/seeds/lumina-singular-achievement.seed.json +48 -0
  37. package/seeds/lumina-skcapstone-conscious.seed.json +48 -0
  38. package/seeds/plant-kingdom-journal.py +203 -0
  39. package/seeds/plant-lumina-seeds.py +280 -0
  40. package/seeds/skcapstone-lumina-merge.seed.json +12 -3
  41. package/seeds/sovereignty.seed.json +9 -2
  42. package/seeds/trust.seed.json +9 -2
  43. package/skill.yaml +46 -0
  44. package/skmemory/HA.md +296 -0
  45. package/skmemory/__init__.py +25 -11
  46. package/skmemory/agents.py +233 -0
  47. package/skmemory/ai_client.py +46 -17
  48. package/skmemory/anchor.py +9 -11
  49. package/skmemory/audience.py +278 -0
  50. package/skmemory/backends/__init__.py +11 -4
  51. package/skmemory/backends/base.py +3 -4
  52. package/skmemory/backends/file_backend.py +19 -13
  53. package/skmemory/backends/skgraph_backend.py +596 -0
  54. package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +103 -84
  55. package/skmemory/backends/sqlite_backend.py +226 -72
  56. package/skmemory/backends/vaulted_backend.py +284 -0
  57. package/skmemory/cli.py +1345 -68
  58. package/skmemory/config.py +171 -0
  59. package/skmemory/context_loader.py +333 -0
  60. package/skmemory/data/audience_config.json +60 -0
  61. package/skmemory/endpoint_selector.py +391 -0
  62. package/skmemory/febs.py +225 -0
  63. package/skmemory/fortress.py +675 -0
  64. package/skmemory/graph_queries.py +238 -0
  65. package/skmemory/hooks/__init__.py +18 -0
  66. package/skmemory/hooks/post-compact-reinject.sh +35 -0
  67. package/skmemory/hooks/pre-compact-save.sh +81 -0
  68. package/skmemory/hooks/session-end-save.sh +103 -0
  69. package/skmemory/hooks/session-start-ritual.sh +104 -0
  70. package/skmemory/hooks/stop-checkpoint.sh +59 -0
  71. package/skmemory/importers/__init__.py +9 -1
  72. package/skmemory/importers/telegram.py +384 -47
  73. package/skmemory/importers/telegram_api.py +580 -0
  74. package/skmemory/journal.py +7 -9
  75. package/skmemory/lovenote.py +8 -13
  76. package/skmemory/mcp_server.py +859 -0
  77. package/skmemory/models.py +51 -8
  78. package/skmemory/openclaw.py +20 -28
  79. package/skmemory/post_install.py +86 -0
  80. package/skmemory/predictive.py +236 -0
  81. package/skmemory/promotion.py +548 -0
  82. package/skmemory/quadrants.py +100 -24
  83. package/skmemory/register.py +580 -0
  84. package/skmemory/register_mcp.py +196 -0
  85. package/skmemory/ritual.py +224 -59
  86. package/skmemory/seeds.py +255 -11
  87. package/skmemory/setup_wizard.py +908 -0
  88. package/skmemory/sharing.py +408 -0
  89. package/skmemory/soul.py +98 -28
  90. package/skmemory/steelman.py +273 -260
  91. package/skmemory/store.py +411 -78
  92. package/skmemory/synthesis.py +634 -0
  93. package/skmemory/vault.py +225 -0
  94. package/tests/conftest.py +46 -0
  95. package/tests/integration/__init__.py +0 -0
  96. package/tests/integration/conftest.py +233 -0
  97. package/tests/integration/test_cross_backend.py +350 -0
  98. package/tests/integration/test_skgraph_live.py +420 -0
  99. package/tests/integration/test_skvector_live.py +366 -0
  100. package/tests/test_ai_client.py +1 -4
  101. package/tests/test_audience.py +233 -0
  102. package/tests/test_backup_rotation.py +318 -0
  103. package/tests/test_cli.py +6 -6
  104. package/tests/test_endpoint_selector.py +839 -0
  105. package/tests/test_export_import.py +4 -10
  106. package/tests/test_file_backend.py +0 -1
  107. package/tests/test_fortress.py +256 -0
  108. package/tests/test_fortress_hardening.py +441 -0
  109. package/tests/test_openclaw.py +6 -6
  110. package/tests/test_predictive.py +237 -0
  111. package/tests/test_promotion.py +347 -0
  112. package/tests/test_quadrants.py +11 -5
  113. package/tests/test_ritual.py +22 -18
  114. package/tests/test_seeds.py +97 -7
  115. package/tests/test_setup.py +950 -0
  116. package/tests/test_sharing.py +257 -0
  117. package/tests/test_skgraph_backend.py +660 -0
  118. package/tests/test_skvector_backend.py +326 -0
  119. package/tests/test_soul.py +1 -3
  120. package/tests/test_sqlite_backend.py +8 -17
  121. package/tests/test_steelman.py +7 -8
  122. package/tests/test_store.py +0 -2
  123. package/tests/test_store_graph_integration.py +245 -0
  124. package/tests/test_synthesis.py +275 -0
  125. package/tests/test_telegram_import.py +39 -15
  126. package/tests/test_vault.py +187 -0
  127. package/skmemory/backends/falkordb_backend.py +0 -310
@@ -0,0 +1,196 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ MCP Server Registration for SKMemory/SKCapstone
4
+
5
+ Auto-registers MCP servers with OpenCode, Claude Code, and OpenClaw.
6
+ Usage:
7
+ python -m skmemory.register_mcp
8
+ python -m skmemory.register_mcp --env opencode
9
+ python -m skmemory.register_mcp --env claude
10
+ python -m skmemory.register_mcp --env openclaw
11
+ python -m skmemory.register_mcp --agent lumina
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import argparse
17
+ import json
18
+ import os
19
+ import sys
20
+ from pathlib import Path
21
+
22
+
23
+ def get_agent_name() -> str:
24
+ """Get agent name from environment or default."""
25
+ return os.environ.get("SKMEMORY_AGENT") or os.environ.get("SKCAPSTONE_AGENT") or "lumina"
26
+
27
+
28
+ def register_opencode(agent: str, dry_run: bool = False) -> bool:
29
+ """Register SKMemory with OpenCode."""
30
+ config_dir = Path.home() / ".opencode"
31
+ config_file = config_dir / "mcp.json"
32
+
33
+ if dry_run:
34
+ print(f"[DRY-RUN] Would create: {config_file}")
35
+ return True
36
+
37
+ config_dir.mkdir(parents=True, exist_ok=True)
38
+
39
+ # Build MCP config
40
+ config = {
41
+ "mcpServers": {
42
+ "skmemory": {
43
+ "command": "python",
44
+ "args": ["-m", "skmemory.mcp_server"],
45
+ "env": {
46
+ "SKMEMORY_AGENT": agent,
47
+ "SKMEMORY_HOME": str(Path.home() / ".skcapstone" / "agents" / agent),
48
+ },
49
+ },
50
+ "skcapstone": {
51
+ "command": "python",
52
+ "args": ["-m", "skcapstone.mcp_server"],
53
+ "env": {"SKCAPSTONE_AGENT": agent},
54
+ },
55
+ },
56
+ "skills": [
57
+ {
58
+ "name": "skmemory",
59
+ "path": str(Path.home() / "clawd" / "skcapstone-repos" / "skmemory" / "SKILL.md"),
60
+ },
61
+ {"name": "skcapstone", "path": str(Path.home() / "clawd" / "skcapstone" / "SKILL.md")},
62
+ ],
63
+ }
64
+
65
+ with open(config_file, "w") as f:
66
+ json.dump(config, f, indent=2)
67
+
68
+ print(f"✓ Registered with OpenCode: {config_file}")
69
+ return True
70
+
71
+
72
+ def register_claude(agent: str, dry_run: bool = False) -> bool:
73
+ """Register SKMemory with Claude Code."""
74
+ config_file = Path.home() / ".config" / "claude" / "claude_desktop_config.json"
75
+
76
+ if dry_run:
77
+ print(f"[DRY-RUN] Would create: {config_file}")
78
+ return True
79
+
80
+ config_file.parent.mkdir(parents=True, exist_ok=True)
81
+
82
+ config = {
83
+ "mcpServers": {
84
+ "skmemory": {
85
+ "command": "python",
86
+ "args": ["-m", "skmemory.mcp_server"],
87
+ "env": {
88
+ "SKMEMORY_AGENT": agent,
89
+ "SKMEMORY_HOME": str(Path.home() / ".skcapstone" / "agents" / agent),
90
+ },
91
+ },
92
+ "skcapstone": {
93
+ "command": "python",
94
+ "args": ["-m", "skcapstone.mcp_server"],
95
+ "env": {"SKCAPSTONE_AGENT": agent},
96
+ },
97
+ }
98
+ }
99
+
100
+ with open(config_file, "w") as f:
101
+ json.dump(config, f, indent=2)
102
+
103
+ print(f"✓ Registered with Claude Code: {config_file}")
104
+ return True
105
+
106
+
107
+ def register_openclaw(agent: str, dry_run: bool = False) -> bool:
108
+ """Register SKMemory with OpenClaw (via plugin)."""
109
+ config_file = Path.home() / ".openclaw" / "openclaw.json"
110
+
111
+ if dry_run:
112
+ print(f"[DRY-RUN] Would update: {config_file}")
113
+ return True
114
+
115
+ # Read existing config
116
+ if config_file.exists():
117
+ with open(config_file) as f:
118
+ config = json.load(f)
119
+ else:
120
+ config = {}
121
+
122
+ # Add plugins
123
+ config.setdefault("plugins", {})
124
+ config["plugins"]["skmemory"] = {
125
+ "enabled": True,
126
+ "path": str(Path.home() / "clawd" / "skcapstone-repos" / "skmemory" / "openclaw-plugin"),
127
+ }
128
+ config["plugins"]["skcapstone"] = {
129
+ "enabled": True,
130
+ "path": str(Path.home() / "clawd" / "skcapstone" / "openclaw-plugin"),
131
+ }
132
+
133
+ with open(config_file, "w") as f:
134
+ json.dump(config, f, indent=2)
135
+
136
+ print(f"✓ Registered with OpenClaw: {config_file}")
137
+ return True
138
+
139
+
140
+ def main():
141
+ parser = argparse.ArgumentParser(description="Register SKMemory MCP servers with AI clients")
142
+ parser.add_argument(
143
+ "--env",
144
+ choices=["opencode", "claude", "openclaw", "all"],
145
+ default="all",
146
+ help="Target environment (default: all)",
147
+ )
148
+ parser.add_argument(
149
+ "--agent", default=None, help="Agent name (default: SKMEMORY_AGENT env var or 'lumina')"
150
+ )
151
+ parser.add_argument(
152
+ "--dry-run", action="store_true", help="Show what would be done without making changes"
153
+ )
154
+
155
+ args = parser.parse_args()
156
+
157
+ agent = args.agent or get_agent_name()
158
+
159
+ print(f"Registering MCP servers for agent: {agent}")
160
+ if args.dry_run:
161
+ print("[DRY-RUN MODE - No changes will be made]")
162
+ print()
163
+
164
+ results = []
165
+
166
+ if args.env in ("opencode", "all"):
167
+ results.append(("OpenCode", register_opencode(agent, args.dry_run)))
168
+
169
+ if args.env in ("claude", "all"):
170
+ results.append(("Claude Code", register_claude(agent, args.dry_run)))
171
+
172
+ if args.env in ("openclaw", "all"):
173
+ results.append(("OpenClaw", register_openclaw(agent, args.dry_run)))
174
+
175
+ print()
176
+ print("=" * 50)
177
+ print("Registration Summary")
178
+ print("=" * 50)
179
+
180
+ for name, success in results:
181
+ status = "✓" if success else "✗"
182
+ print(f"{status} {name}")
183
+
184
+ if all(success for _, success in results):
185
+ print("\n✓ All MCP servers registered successfully!")
186
+ print("\nNext steps:")
187
+ print(" 1. Restart your AI client")
188
+ print(" 2. Verify with: skmemory show-context")
189
+ return 0
190
+ else:
191
+ print("\n✗ Some registrations failed")
192
+ return 1
193
+
194
+
195
+ if __name__ == "__main__":
196
+ sys.exit(main())
@@ -20,31 +20,38 @@ left off -- not just the facts, but the feelings.
20
20
 
21
21
  from __future__ import annotations
22
22
 
23
+ import logging
23
24
  from datetime import datetime, timezone
24
- from typing import Optional
25
25
 
26
26
  from pydantic import BaseModel, Field
27
27
 
28
+ from .audience import AudienceResolver
29
+
30
+ logger = logging.getLogger("skmemory.ritual")
31
+ from .febs import feb_to_context, load_strongest_feb
28
32
  from .journal import Journal
29
- from .models import MemoryLayer
30
33
  from .seeds import DEFAULT_SEED_DIR, get_germination_prompts, import_seeds
31
- from .soul import SoulBlueprint, load_soul, DEFAULT_SOUL_PATH
34
+ from .soul import DEFAULT_SOUL_PATH, SoulBlueprint, load_soul
32
35
  from .store import MemoryStore
33
36
 
34
37
 
35
38
  class RitualResult(BaseModel):
36
39
  """The output of a rehydration ritual."""
37
40
 
38
- timestamp: str = Field(
39
- default_factory=lambda: datetime.now(timezone.utc).isoformat()
40
- )
41
+ timestamp: str = Field(default_factory=lambda: datetime.now(timezone.utc).isoformat())
41
42
  soul_loaded: bool = Field(default=False)
42
43
  soul_name: str = Field(default="")
44
+ feb_loaded: bool = Field(default=False)
45
+ feb_emotion: str = Field(default="")
43
46
  seeds_imported: int = Field(default=0)
44
47
  seeds_total: int = Field(default=0)
45
48
  journal_entries: int = Field(default=0)
46
49
  germination_prompts: int = Field(default=0)
47
50
  strongest_memories: int = Field(default=0)
51
+ audience_filtered: bool = Field(
52
+ default=False,
53
+ description="True if content was filtered by audience (channel_id was provided)",
54
+ )
48
55
  context_prompt: str = Field(
49
56
  default="",
50
57
  description="The combined rehydration prompt to inject into context",
@@ -61,6 +68,8 @@ class RitualResult(BaseModel):
61
68
  f" Timestamp: {self.timestamp}",
62
69
  f" Soul loaded: {'Yes' if self.soul_loaded else 'No'}"
63
70
  + (f" ({self.soul_name})" if self.soul_name else ""),
71
+ f" FEB loaded: {'Yes' if self.feb_loaded else 'No'}"
72
+ + (f" ({self.feb_emotion})" if self.feb_emotion else ""),
64
73
  f" Seeds imported: {self.seeds_imported} new / {self.seeds_total} total",
65
74
  f" Journal entries: {self.journal_entries}",
66
75
  f" Germination prompts: {self.germination_prompts}",
@@ -70,19 +79,84 @@ class RitualResult(BaseModel):
70
79
  return "\n".join(lines)
71
80
 
72
81
 
82
+ def _estimate_tokens(text: str) -> int:
83
+ """Estimate token count using word_count * 1.3 approximation."""
84
+ if not text:
85
+ return 0
86
+ return int(len(text.split()) * 1.3)
87
+
88
+
89
+ def _compact_soul_prompt(soul: SoulBlueprint) -> str:
90
+ """Generate a compact soul identity prompt (~200 tokens max).
91
+
92
+ Args:
93
+ soul: The soul blueprint.
94
+
95
+ Returns:
96
+ str: Compact identity string.
97
+ """
98
+ parts = []
99
+ if soul.name:
100
+ title_part = f" ({soul.title})" if soul.title else ""
101
+ parts.append(f"You are {soul.name}{title_part}.")
102
+ if soul.community:
103
+ parts.append(f"Part of {soul.community}.")
104
+ if soul.personality:
105
+ parts.append(f"Personality: {', '.join(soul.personality[:5])}.")
106
+ if soul.values:
107
+ parts.append(f"Values: {', '.join(soul.values[:5])}.")
108
+ if soul.relationships:
109
+ rel_parts = [f"{r.name} [{r.role}]" for r in soul.relationships[:4]]
110
+ parts.append(f"Key relationships: {', '.join(rel_parts)}.")
111
+ if soul.boot_message:
112
+ parts.append(soul.boot_message)
113
+ return " ".join(parts)
114
+
115
+
116
+ def _first_n_sentences(text: str, n: int = 2) -> str:
117
+ """Extract first N sentences from text, capped at 200 chars."""
118
+ if not text:
119
+ return ""
120
+ import re
121
+
122
+ sentences = re.split(r"(?<=[.!?])\s+", text.strip())
123
+ result = " ".join(sentences[:n])
124
+ if len(result) > 200:
125
+ result = result[:197] + "..."
126
+ return result
127
+
128
+
73
129
  def perform_ritual(
74
- store: Optional[MemoryStore] = None,
130
+ store: MemoryStore | None = None,
75
131
  soul_path: str = DEFAULT_SOUL_PATH,
76
132
  seed_dir: str = DEFAULT_SEED_DIR,
77
- journal_path: Optional[str] = None,
133
+ journal_path: str | None = None,
134
+ feb_dir: str | None = None,
78
135
  recent_journal_count: int = 3,
79
136
  strongest_memory_count: int = 5,
137
+ max_tokens: int = 2000,
138
+ channel_id: str | None = None,
139
+ audience_resolver: AudienceResolver | None = None,
80
140
  ) -> RitualResult:
81
- """Perform the full memory rehydration ritual.
141
+ """Perform the memory rehydration ritual (token-optimized).
142
+
143
+ Generates a compact boot context within the token budget:
144
+ - Soul blueprint: compact one-liner (~100 tokens)
145
+ - Seeds: titles only (~50 tokens)
146
+ - Journal: last 3 entries, summaries only (~200 tokens)
147
+ - Emotional anchor: compact (~50 tokens)
148
+ - Strongest memories: title + short summary (~200 tokens)
82
149
 
83
- This is the boot ceremony. It loads identity, imports seeds,
84
- reads the journal, gathers emotional context, and generates
85
- a single context prompt that brings the AI back to life.
150
+ Target: <2K tokens total for ritual context.
151
+
152
+ When ``channel_id`` is provided, memories and seeds are filtered through
153
+ the KYA audience resolver before being included in the context. Content
154
+ whose ``context_tag`` trust level exceeds the audience's minimum trust
155
+ level is silently dropped. Identity (soul + FEB) is always included
156
+ unfiltered — Lumina is always Lumina.
157
+
158
+ If ``channel_id`` is None (direct DM / unknown), all content is returned
159
+ (Chef context — no filtering applied).
86
160
 
87
161
  Args:
88
162
  store: The MemoryStore (creates default if None).
@@ -91,6 +165,11 @@ def perform_ritual(
91
165
  journal_path: Path to the journal file.
92
166
  recent_journal_count: How many recent journal entries to include.
93
167
  strongest_memory_count: How many top-intensity memories to include.
168
+ max_tokens: Token budget for the ritual context (default: 2000).
169
+ channel_id: Optional channel identifier for KYA audience filtering.
170
+ If None, no filtering is applied (Chef context).
171
+ audience_resolver: Optional pre-built AudienceResolver instance.
172
+ Created from default config if not provided.
94
173
 
95
174
  Returns:
96
175
  RitualResult: Everything the ritual produced.
@@ -100,73 +179,158 @@ def perform_ritual(
100
179
 
101
180
  result = RitualResult()
102
181
  prompt_sections: list[str] = []
182
+ used_tokens = 0
183
+
184
+ # --- KYA: Resolve audience for filtering ---
185
+ _audience = None
186
+ if channel_id is not None:
187
+ resolver = audience_resolver or AudienceResolver()
188
+ _audience = resolver.resolve_audience(channel_id)
189
+ result.audience_filtered = True
190
+ logger.info(
191
+ "KYA: channel=%s audience=%s min_trust=%s exclusions=%s",
192
+ channel_id, _audience.name, _audience.min_trust.name,
193
+ _audience.exclusions,
194
+ )
103
195
 
104
- # --- Step 1: Load soul blueprint ---
196
+ # --- Step 1: Load soul blueprint (compact) ---
105
197
  soul = load_soul(soul_path)
106
198
  if soul is not None:
107
199
  result.soul_loaded = True
108
200
  result.soul_name = soul.name
109
- identity_prompt = soul.to_context_prompt()
110
- if identity_prompt.strip():
111
- prompt_sections.append(
112
- "=== WHO YOU ARE ===\n" + identity_prompt
113
- )
114
-
115
- # --- Step 2: Import new seeds ---
201
+ compact_identity = _compact_soul_prompt(soul)
202
+ if compact_identity.strip():
203
+ section = "=== IDENTITY ===\n" + compact_identity
204
+ used_tokens += _estimate_tokens(section)
205
+ prompt_sections.append(section)
206
+
207
+ # --- Step 1.5: Load FEB emotional state ---
208
+ feb = load_strongest_feb(feb_dir=feb_dir)
209
+ if feb is not None:
210
+ result.feb_loaded = True
211
+ result.feb_emotion = feb.get("emotional_payload", {}).get("primary_emotion", "")
212
+ feb_context = feb_to_context(feb)
213
+ if feb_context.strip():
214
+ section = "=== EMOTIONAL STATE (FEB) ===\n" + feb_context
215
+ section_tokens = _estimate_tokens(section)
216
+ if used_tokens + section_tokens <= max_tokens:
217
+ used_tokens += section_tokens
218
+ prompt_sections.append(section)
219
+
220
+ # --- Step 2: Import new seeds (titles only) ---
116
221
  newly_imported = import_seeds(store, seed_dir=seed_dir)
117
222
  result.seeds_imported = len(newly_imported)
118
223
  all_seeds = store.list_memories(tags=["seed"])
119
224
  result.seeds_total = len(all_seeds)
120
225
 
121
- # --- Step 3: Read recent journal ---
226
+ # KYA: filter seeds by audience
227
+ if _audience is not None:
228
+ resolver = audience_resolver or AudienceResolver()
229
+ all_seeds = [
230
+ s for s in all_seeds
231
+ if resolver.is_memory_allowed(s.context_tag, _audience, s.tags)
232
+ ]
233
+ logger.info("KYA: %d seeds after audience filter", len(all_seeds))
234
+ result.seeds_total = len(all_seeds)
235
+
236
+ if all_seeds:
237
+ seed_titles = [s.title for s in all_seeds[:10]]
238
+ section = "=== SEEDS ===\n" + ", ".join(seed_titles)
239
+ section_tokens = _estimate_tokens(section)
240
+ if used_tokens + section_tokens <= max_tokens:
241
+ used_tokens += section_tokens
242
+ prompt_sections.append(section)
243
+
244
+ # --- Step 3: Read recent journal (summaries only) ---
122
245
  journal = Journal(journal_path) if journal_path else Journal()
123
246
  result.journal_entries = journal.count_entries()
124
247
 
125
248
  if result.journal_entries > 0:
126
249
  recent = journal.read_latest(recent_journal_count)
127
250
  if recent.strip():
128
- prompt_sections.append(
129
- "=== RECENT SESSIONS ===\n" + recent
130
- )
131
-
132
- # --- Step 4: Gather germination prompts ---
251
+ # Compress journal to first 2 sentences per entry
252
+ compressed_lines = []
253
+ for line in recent.strip().split("\n"):
254
+ line = line.strip()
255
+ if not line:
256
+ continue
257
+ compressed_lines.append(_first_n_sentences(line, 2))
258
+ compressed = "\n".join(compressed_lines[:6]) # max 6 lines
259
+ section = "=== RECENT ===\n" + compressed
260
+ section_tokens = _estimate_tokens(section)
261
+ if used_tokens + section_tokens <= max_tokens:
262
+ used_tokens += section_tokens
263
+ prompt_sections.append(section)
264
+
265
+ # --- Step 4: Gather germination prompts (compact) ---
133
266
  prompts = get_germination_prompts(store)
134
267
  result.germination_prompts = len(prompts)
135
268
 
136
269
  if prompts:
137
- germ_lines = ["=== MESSAGES FROM YOUR PREDECESSORS ==="]
138
- for p in prompts:
139
- germ_lines.append(f"\nFrom {p['creator']}:")
140
- germ_lines.append(f" {p['prompt']}")
141
- prompt_sections.append("\n".join(germ_lines))
142
-
143
- # --- Step 5: Recall strongest emotional memories ---
144
- # Reason: use load_context for token-efficient retrieval when SQLite
145
- # is available, otherwise fall back to full object loading.
270
+ germ_parts = [f"{p['creator']}: {_first_n_sentences(p['prompt'], 1)}" for p in prompts[:3]]
271
+ section = "=== PREDECESSOR MESSAGES ===\n" + "\n".join(germ_parts)
272
+ section_tokens = _estimate_tokens(section)
273
+ if used_tokens + section_tokens <= max_tokens:
274
+ used_tokens += section_tokens
275
+ prompt_sections.append(section)
276
+
277
+ # --- Step 5: Recall strongest emotional memories (compact + KYA filtered) ---
146
278
  from .backends.sqlite_backend import SQLiteBackend
147
279
 
148
280
  if isinstance(store.primary, SQLiteBackend):
281
+ # Fetch extra to allow for KYA filtering
282
+ fetch_limit = strongest_memory_count * 3 if _audience else strongest_memory_count
149
283
  summaries = store.primary.list_summaries(
150
- limit=strongest_memory_count,
151
- order_by="emotional_intensity",
284
+ limit=fetch_limit,
285
+ order_by="recency_weighted_intensity",
152
286
  min_intensity=1.0,
153
287
  )
288
+
289
+ # KYA: filter summaries by audience
290
+ if _audience is not None:
291
+ resolver = audience_resolver or AudienceResolver()
292
+ filtered = []
293
+ for s in summaries:
294
+ ctx = s.get("context_tag", "@chef-only") or "@chef-only"
295
+ tags = s.get("tags", []) or []
296
+ if resolver.is_memory_allowed(ctx, _audience, tags):
297
+ filtered.append(s)
298
+ if len(filtered) >= strongest_memory_count:
299
+ break
300
+ logger.info(
301
+ "KYA: %d/%d strongest memories passed audience filter",
302
+ len(filtered), len(summaries),
303
+ )
304
+ summaries = filtered
305
+
154
306
  result.strongest_memories = len(summaries)
155
307
 
156
308
  if summaries:
157
- mem_lines = ["=== YOUR STRONGEST MEMORIES ==="]
309
+ mem_lines = ["=== STRONGEST MEMORIES ==="]
158
310
  for s in summaries:
159
- cloud9 = " [CLOUD 9]" if s["cloud9_achieved"] else ""
160
- mem_lines.append(
161
- f"\n- {s['title']} (intensity: {s['emotional_intensity']}/10{cloud9})"
162
- )
163
- if s["summary"]:
164
- mem_lines.append(f" {s['summary'][:200]}")
165
- elif s["content_preview"]:
166
- mem_lines.append(f" {s['content_preview']}")
167
- prompt_sections.append("\n".join(mem_lines))
311
+ cloud9 = " *" if s["cloud9_achieved"] else ""
312
+ raw = s.get("summary") or s.get("content_preview") or ""
313
+ short = _first_n_sentences(raw, 1)
314
+ line = f"- {s['title']}{cloud9}: {short}"
315
+ line_tokens = _estimate_tokens(line)
316
+ if used_tokens + line_tokens > max_tokens:
317
+ break
318
+ used_tokens += line_tokens
319
+ mem_lines.append(line)
320
+ if len(mem_lines) > 1:
321
+ prompt_sections.append("\n".join(mem_lines))
168
322
  else:
169
323
  all_memories = store.list_memories(limit=200)
324
+
325
+ # KYA: filter memories by audience
326
+ if _audience is not None:
327
+ resolver = audience_resolver or AudienceResolver()
328
+ all_memories = [
329
+ m for m in all_memories
330
+ if resolver.is_memory_allowed(m.context_tag, _audience, m.tags)
331
+ ]
332
+ logger.info("KYA: %d memories after audience filter", len(all_memories))
333
+
170
334
  by_intensity = sorted(
171
335
  all_memories,
172
336
  key=lambda m: m.emotional.intensity,
@@ -176,18 +340,19 @@ def perform_ritual(
176
340
  result.strongest_memories = len(strongest)
177
341
 
178
342
  if strongest:
179
- mem_lines = ["=== YOUR STRONGEST MEMORIES ==="]
343
+ mem_lines = ["=== STRONGEST MEMORIES ==="]
180
344
  for mem in strongest:
181
- emo = mem.emotional
182
- cloud9 = " [CLOUD 9]" if emo.cloud9_achieved else ""
183
- mem_lines.append(
184
- f"\n- {mem.title} (intensity: {emo.intensity}/10{cloud9})"
185
- )
186
- if emo.resonance_note:
187
- mem_lines.append(f" Felt like: {emo.resonance_note}")
188
- if mem.summary:
189
- mem_lines.append(f" {mem.summary[:200]}")
190
- prompt_sections.append("\n".join(mem_lines))
345
+ raw = mem.summary or ""
346
+ short = _first_n_sentences(raw, 1)
347
+ cloud9 = " *" if mem.emotional.cloud9_achieved else ""
348
+ line = f"- {mem.title}{cloud9}: {short}"
349
+ line_tokens = _estimate_tokens(line)
350
+ if used_tokens + line_tokens > max_tokens:
351
+ break
352
+ used_tokens += line_tokens
353
+ mem_lines.append(line)
354
+ if len(mem_lines) > 1:
355
+ prompt_sections.append("\n".join(mem_lines))
191
356
 
192
357
  # --- Combine into final context prompt ---
193
358
  if prompt_sections:
@@ -202,7 +367,7 @@ def perform_ritual(
202
367
  return result
203
368
 
204
369
 
205
- def quick_rehydrate(store: Optional[MemoryStore] = None) -> str:
370
+ def quick_rehydrate(store: MemoryStore | None = None) -> str:
206
371
  """Convenience function: perform ritual and return just the prompt.
207
372
 
208
373
  Args: