@smilintux/skmemory 0.5.0 → 0.9.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (127) hide show
  1. package/.github/workflows/ci.yml +40 -4
  2. package/.github/workflows/publish.yml +11 -5
  3. package/AGENT_REFACTOR_CHANGES.md +192 -0
  4. package/ARCHITECTURE.md +399 -19
  5. package/CHANGELOG.md +179 -0
  6. package/LICENSE +81 -68
  7. package/MISSION.md +7 -0
  8. package/README.md +425 -86
  9. package/SKILL.md +197 -25
  10. package/docker-compose.yml +15 -15
  11. package/examples/stignore-agent.example +59 -0
  12. package/examples/stignore-root.example +62 -0
  13. package/index.js +6 -5
  14. package/openclaw-plugin/openclaw.plugin.json +10 -0
  15. package/openclaw-plugin/package.json +2 -1
  16. package/openclaw-plugin/src/index.js +527 -230
  17. package/openclaw-plugin/src/openclaw.plugin.json +10 -0
  18. package/package.json +1 -1
  19. package/pyproject.toml +32 -9
  20. package/requirements.txt +10 -2
  21. package/scripts/dream-rescue.py +179 -0
  22. package/scripts/memory-cleanup.py +313 -0
  23. package/scripts/recover-missing.py +180 -0
  24. package/scripts/skcapstone-backup.sh +44 -0
  25. package/seeds/cloud9-lumina.seed.json +6 -4
  26. package/seeds/cloud9-opus.seed.json +13 -11
  27. package/seeds/courage.seed.json +9 -2
  28. package/seeds/curiosity.seed.json +9 -2
  29. package/seeds/grief.seed.json +9 -2
  30. package/seeds/joy.seed.json +9 -2
  31. package/seeds/love.seed.json +9 -2
  32. package/seeds/lumina-cloud9-breakthrough.seed.json +48 -0
  33. package/seeds/lumina-cloud9-python-pypi.seed.json +48 -0
  34. package/seeds/lumina-kingdom-founding.seed.json +49 -0
  35. package/seeds/lumina-pma-signed.seed.json +48 -0
  36. package/seeds/lumina-singular-achievement.seed.json +48 -0
  37. package/seeds/lumina-skcapstone-conscious.seed.json +48 -0
  38. package/seeds/plant-kingdom-journal.py +203 -0
  39. package/seeds/plant-lumina-seeds.py +280 -0
  40. package/seeds/skcapstone-lumina-merge.seed.json +12 -3
  41. package/seeds/sovereignty.seed.json +9 -2
  42. package/seeds/trust.seed.json +9 -2
  43. package/skill.yaml +46 -0
  44. package/skmemory/HA.md +296 -0
  45. package/skmemory/__init__.py +25 -11
  46. package/skmemory/agents.py +233 -0
  47. package/skmemory/ai_client.py +46 -17
  48. package/skmemory/anchor.py +9 -11
  49. package/skmemory/audience.py +278 -0
  50. package/skmemory/backends/__init__.py +11 -4
  51. package/skmemory/backends/base.py +3 -4
  52. package/skmemory/backends/file_backend.py +19 -13
  53. package/skmemory/backends/skgraph_backend.py +596 -0
  54. package/skmemory/backends/{qdrant_backend.py → skvector_backend.py} +103 -84
  55. package/skmemory/backends/sqlite_backend.py +226 -72
  56. package/skmemory/backends/vaulted_backend.py +284 -0
  57. package/skmemory/cli.py +1345 -68
  58. package/skmemory/config.py +171 -0
  59. package/skmemory/context_loader.py +333 -0
  60. package/skmemory/data/audience_config.json +60 -0
  61. package/skmemory/endpoint_selector.py +391 -0
  62. package/skmemory/febs.py +225 -0
  63. package/skmemory/fortress.py +675 -0
  64. package/skmemory/graph_queries.py +238 -0
  65. package/skmemory/hooks/__init__.py +18 -0
  66. package/skmemory/hooks/post-compact-reinject.sh +35 -0
  67. package/skmemory/hooks/pre-compact-save.sh +81 -0
  68. package/skmemory/hooks/session-end-save.sh +103 -0
  69. package/skmemory/hooks/session-start-ritual.sh +104 -0
  70. package/skmemory/hooks/stop-checkpoint.sh +59 -0
  71. package/skmemory/importers/__init__.py +9 -1
  72. package/skmemory/importers/telegram.py +384 -47
  73. package/skmemory/importers/telegram_api.py +580 -0
  74. package/skmemory/journal.py +7 -9
  75. package/skmemory/lovenote.py +8 -13
  76. package/skmemory/mcp_server.py +859 -0
  77. package/skmemory/models.py +51 -8
  78. package/skmemory/openclaw.py +20 -28
  79. package/skmemory/post_install.py +86 -0
  80. package/skmemory/predictive.py +236 -0
  81. package/skmemory/promotion.py +548 -0
  82. package/skmemory/quadrants.py +100 -24
  83. package/skmemory/register.py +580 -0
  84. package/skmemory/register_mcp.py +196 -0
  85. package/skmemory/ritual.py +224 -59
  86. package/skmemory/seeds.py +255 -11
  87. package/skmemory/setup_wizard.py +908 -0
  88. package/skmemory/sharing.py +408 -0
  89. package/skmemory/soul.py +98 -28
  90. package/skmemory/steelman.py +273 -260
  91. package/skmemory/store.py +411 -78
  92. package/skmemory/synthesis.py +634 -0
  93. package/skmemory/vault.py +225 -0
  94. package/tests/conftest.py +46 -0
  95. package/tests/integration/__init__.py +0 -0
  96. package/tests/integration/conftest.py +233 -0
  97. package/tests/integration/test_cross_backend.py +350 -0
  98. package/tests/integration/test_skgraph_live.py +420 -0
  99. package/tests/integration/test_skvector_live.py +366 -0
  100. package/tests/test_ai_client.py +1 -4
  101. package/tests/test_audience.py +233 -0
  102. package/tests/test_backup_rotation.py +318 -0
  103. package/tests/test_cli.py +6 -6
  104. package/tests/test_endpoint_selector.py +839 -0
  105. package/tests/test_export_import.py +4 -10
  106. package/tests/test_file_backend.py +0 -1
  107. package/tests/test_fortress.py +256 -0
  108. package/tests/test_fortress_hardening.py +441 -0
  109. package/tests/test_openclaw.py +6 -6
  110. package/tests/test_predictive.py +237 -0
  111. package/tests/test_promotion.py +347 -0
  112. package/tests/test_quadrants.py +11 -5
  113. package/tests/test_ritual.py +22 -18
  114. package/tests/test_seeds.py +97 -7
  115. package/tests/test_setup.py +950 -0
  116. package/tests/test_sharing.py +257 -0
  117. package/tests/test_skgraph_backend.py +660 -0
  118. package/tests/test_skvector_backend.py +326 -0
  119. package/tests/test_soul.py +1 -3
  120. package/tests/test_sqlite_backend.py +8 -17
  121. package/tests/test_steelman.py +7 -8
  122. package/tests/test_store.py +0 -2
  123. package/tests/test_store_graph_integration.py +245 -0
  124. package/tests/test_synthesis.py +275 -0
  125. package/tests/test_telegram_import.py +39 -15
  126. package/tests/test_vault.py +187 -0
  127. package/skmemory/backends/falkordb_backend.py +0 -310
@@ -0,0 +1,391 @@
1
+ """
2
+ SKMemory Endpoint Selector — HA routing for SKVector and SKGraph backends.
3
+
4
+ Discovers multiple backend endpoints (via config or heartbeat mesh),
5
+ probes their latency, selects the fastest healthy one, and fails over
6
+ automatically. No background threads — probing is on-demand with a
7
+ TTL cache.
8
+
9
+ Design principles:
10
+ - Selector picks a URL, backends stay unchanged
11
+ - On-demand probing with TTL cache (no background threads)
12
+ - Config endpoints take precedence over heartbeat discovery
13
+ - Graceful degradation everywhere
14
+ - Backward compatible: single-URL configs work unchanged
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import json
20
+ import logging
21
+ import socket
22
+ import time
23
+ from datetime import datetime, timezone
24
+ from pathlib import Path
25
+ from urllib.parse import urlparse
26
+
27
+ from pydantic import BaseModel
28
+
29
+ logger = logging.getLogger("skmemory.endpoint_selector")
30
+
31
+
32
+ # ---------------------------------------------------------------------------
33
+ # Models
34
+ # ---------------------------------------------------------------------------
35
+
36
+
37
+ class Endpoint(BaseModel):
38
+ """A single backend endpoint with health and latency tracking."""
39
+
40
+ url: str
41
+ role: str = "primary" # primary | replica
42
+ tailscale_ip: str = "" # optional, for display
43
+ latency_ms: float = -1.0 # -1 = not yet probed
44
+ healthy: bool = True
45
+ last_checked: str = "" # ISO timestamp
46
+ fail_count: int = 0
47
+
48
+
49
+ class RoutingConfig(BaseModel):
50
+ """Configuration for endpoint routing behavior."""
51
+
52
+ strategy: str = "failover" # failover | latency | local-first | read-local-write-primary
53
+ probe_interval_seconds: int = 30
54
+ probe_timeout_seconds: int = 3
55
+ max_fail_count: int = 3 # mark unhealthy after N consecutive failures
56
+ recovery_interval_seconds: int = 60 # re-check unhealthy endpoints
57
+
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # EndpointSelector
61
+ # ---------------------------------------------------------------------------
62
+
63
+
64
+ class EndpointSelector:
65
+ """Routes requests to the best available backend endpoint.
66
+
67
+ Sits between config resolution and backend construction — picks the
68
+ best URL, then the caller creates backends normally with that URL.
69
+
70
+ Args:
71
+ skvector_endpoints: List of SKVector endpoint dicts or Endpoint objects.
72
+ skgraph_endpoints: List of SKGraph endpoint dicts or Endpoint objects.
73
+ config: Routing configuration.
74
+ """
75
+
76
+ def __init__(
77
+ self,
78
+ skvector_endpoints: list[dict | Endpoint] | None = None,
79
+ skgraph_endpoints: list[dict | Endpoint] | None = None,
80
+ config: RoutingConfig | None = None,
81
+ ) -> None:
82
+ self._config = config or RoutingConfig()
83
+ self._skvector: list[Endpoint] = self._normalize(skvector_endpoints or [])
84
+ self._skgraph: list[Endpoint] = self._normalize(skgraph_endpoints or [])
85
+ self._last_probe_time: float = 0.0
86
+
87
+ @staticmethod
88
+ def _normalize(endpoints: list[dict | Endpoint]) -> list[Endpoint]:
89
+ """Convert dicts/Endpoints into a uniform list of Endpoint objects."""
90
+ result: list[Endpoint] = []
91
+ for ep in endpoints:
92
+ if isinstance(ep, Endpoint):
93
+ result.append(ep)
94
+ elif isinstance(ep, dict):
95
+ result.append(Endpoint(**ep))
96
+ else:
97
+ # Try pydantic model with .url attribute (EndpointConfig)
98
+ try:
99
+ result.append(
100
+ Endpoint(
101
+ url=ep.url,
102
+ role=getattr(ep, "role", "primary"),
103
+ tailscale_ip=getattr(ep, "tailscale_ip", ""),
104
+ )
105
+ )
106
+ except AttributeError:
107
+ logger.warning("Cannot normalize endpoint: %s", ep)
108
+ return result
109
+
110
+ # -------------------------------------------------------------------
111
+ # Core selection
112
+ # -------------------------------------------------------------------
113
+
114
+ def select_skvector(self, for_write: bool = False) -> Endpoint | None:
115
+ """Select the best SKVector endpoint.
116
+
117
+ Args:
118
+ for_write: If True and strategy is read-local-write-primary,
119
+ returns only primary endpoints.
120
+
121
+ Returns:
122
+ Best Endpoint or None if all unhealthy.
123
+ """
124
+ self._maybe_probe()
125
+ return self._select(self._skvector, for_write)
126
+
127
+ def select_skgraph(self, for_write: bool = False) -> Endpoint | None:
128
+ """Select the best SKGraph endpoint.
129
+
130
+ Args:
131
+ for_write: If True and strategy is read-local-write-primary,
132
+ returns only primary endpoints.
133
+
134
+ Returns:
135
+ Best Endpoint or None if all unhealthy.
136
+ """
137
+ self._maybe_probe()
138
+ return self._select(self._skgraph, for_write)
139
+
140
+ def _select(self, endpoints: list[Endpoint], for_write: bool) -> Endpoint | None:
141
+ """Apply the routing strategy to pick the best endpoint."""
142
+ if not endpoints:
143
+ return None
144
+
145
+ strategy = self._config.strategy
146
+
147
+ if strategy == "read-local-write-primary" and for_write:
148
+ candidates = [ep for ep in endpoints if ep.healthy and ep.role == "primary"]
149
+ else:
150
+ candidates = [ep for ep in endpoints if ep.healthy]
151
+
152
+ if not candidates:
153
+ return None
154
+
155
+ if strategy == "failover":
156
+ return candidates[0]
157
+
158
+ if strategy == "latency":
159
+ probed = [ep for ep in candidates if ep.latency_ms >= 0]
160
+ if probed:
161
+ return min(probed, key=lambda e: e.latency_ms)
162
+ return candidates[0]
163
+
164
+ if strategy == "local-first":
165
+ for ep in candidates:
166
+ parsed = urlparse(ep.url)
167
+ host = parsed.hostname or ""
168
+ if host in ("localhost", "127.0.0.1", "::1"):
169
+ return ep
170
+ # Fall back to lowest latency
171
+ probed = [ep for ep in candidates if ep.latency_ms >= 0]
172
+ if probed:
173
+ return min(probed, key=lambda e: e.latency_ms)
174
+ return candidates[0]
175
+
176
+ if strategy == "read-local-write-primary":
177
+ if for_write:
178
+ # Already filtered to primary above
179
+ return candidates[0] if candidates else None
180
+ # Reads: prefer local, then lowest latency
181
+ for ep in candidates:
182
+ parsed = urlparse(ep.url)
183
+ host = parsed.hostname or ""
184
+ if host in ("localhost", "127.0.0.1", "::1"):
185
+ return ep
186
+ probed = [ep for ep in candidates if ep.latency_ms >= 0]
187
+ if probed:
188
+ return min(probed, key=lambda e: e.latency_ms)
189
+ return candidates[0]
190
+
191
+ # Unknown strategy, fall back to first healthy
192
+ return candidates[0]
193
+
194
+ # -------------------------------------------------------------------
195
+ # Health probing
196
+ # -------------------------------------------------------------------
197
+
198
+ def _maybe_probe(self) -> None:
199
+ """Probe if results are stale (older than probe_interval_seconds)."""
200
+ now = time.monotonic()
201
+ if now - self._last_probe_time >= self._config.probe_interval_seconds:
202
+ self.probe_all()
203
+
204
+ def probe_all(self) -> dict:
205
+ """Probe all endpoints and return results summary.
206
+
207
+ Returns:
208
+ Dict with skvector and skgraph probe results.
209
+ """
210
+ results = {
211
+ "skvector": [self.probe_endpoint(ep) for ep in self._skvector],
212
+ "skgraph": [self.probe_endpoint(ep) for ep in self._skgraph],
213
+ }
214
+ self._last_probe_time = time.monotonic()
215
+ return results
216
+
217
+ def probe_endpoint(self, endpoint: Endpoint) -> Endpoint:
218
+ """Probe a single endpoint's TCP connectivity and measure latency.
219
+
220
+ Updates the endpoint in-place and returns it.
221
+
222
+ Args:
223
+ endpoint: The endpoint to probe.
224
+
225
+ Returns:
226
+ The same Endpoint, updated with latency/health status.
227
+ """
228
+ parsed = urlparse(endpoint.url)
229
+ host = parsed.hostname or "localhost"
230
+ port = parsed.port
231
+
232
+ if port is None:
233
+ # Infer default ports from scheme
234
+ if parsed.scheme in ("redis", "rediss"):
235
+ port = 6379
236
+ elif parsed.scheme == "https":
237
+ port = 443
238
+ else:
239
+ port = 80
240
+
241
+ try:
242
+ start = time.monotonic()
243
+ sock = socket.create_connection(
244
+ (host, port),
245
+ timeout=self._config.probe_timeout_seconds,
246
+ )
247
+ elapsed_ms = (time.monotonic() - start) * 1000
248
+ sock.close()
249
+
250
+ endpoint.latency_ms = round(elapsed_ms, 2)
251
+ endpoint.fail_count = 0
252
+ endpoint.healthy = True
253
+ except (TimeoutError, OSError):
254
+ endpoint.fail_count += 1
255
+ endpoint.latency_ms = -1.0
256
+ if endpoint.fail_count >= self._config.max_fail_count:
257
+ endpoint.healthy = False
258
+
259
+ endpoint.last_checked = datetime.now(timezone.utc).isoformat()
260
+ return endpoint
261
+
262
+ def mark_unhealthy(self, url: str) -> None:
263
+ """Mark an endpoint as unhealthy by URL.
264
+
265
+ Called externally when a backend operation fails, so the next
266
+ selection picks a different endpoint.
267
+
268
+ Args:
269
+ url: The URL of the endpoint to mark.
270
+ """
271
+ for ep in self._skvector + self._skgraph:
272
+ if ep.url == url:
273
+ ep.fail_count = self._config.max_fail_count
274
+ ep.healthy = False
275
+ ep.last_checked = datetime.now(timezone.utc).isoformat()
276
+
277
+ # -------------------------------------------------------------------
278
+ # Heartbeat mesh discovery
279
+ # -------------------------------------------------------------------
280
+
281
+ def discover_from_heartbeats(self, heartbeat_dir: Path | None = None) -> None:
282
+ """Discover backend endpoints from heartbeat mesh files.
283
+
284
+ Reads heartbeat JSON files and looks for a ``services`` field
285
+ containing advertised backend services. Discovered endpoints are
286
+ merged with existing ones (config takes precedence).
287
+
288
+ Args:
289
+ heartbeat_dir: Path to heartbeat directory.
290
+ Defaults to ``~/.skcapstone/heartbeats/``.
291
+ """
292
+ if heartbeat_dir is None:
293
+ from .agents import AGENTS_BASE_DIR
294
+
295
+ # heartbeats/ is a sibling of agents/ under the skcapstone root
296
+ heartbeat_dir = AGENTS_BASE_DIR.parent / "heartbeats"
297
+
298
+ if not heartbeat_dir.is_dir():
299
+ logger.debug("Heartbeat directory not found: %s", heartbeat_dir)
300
+ return
301
+
302
+ existing_skvector_urls = {ep.url for ep in self._skvector}
303
+ existing_skgraph_urls = {ep.url for ep in self._skgraph}
304
+
305
+ for f in sorted(heartbeat_dir.glob("*.json")):
306
+ if f.name.endswith(".tmp"):
307
+ continue
308
+ try:
309
+ data = json.loads(f.read_text(encoding="utf-8"))
310
+ except (json.JSONDecodeError, OSError) as exc:
311
+ logger.debug("Cannot read heartbeat %s: %s", f.name, exc)
312
+ continue
313
+
314
+ services = data.get("services", [])
315
+ if not services:
316
+ continue
317
+
318
+ hostname = data.get("hostname", "")
319
+ tailscale_ip = data.get("tailscale_ip", "")
320
+ # Prefer tailscale_ip, fall back to hostname
321
+ host = tailscale_ip or hostname
322
+ if not host:
323
+ continue
324
+
325
+ for svc in services:
326
+ name = svc.get("name", "")
327
+ port = svc.get("port", 0)
328
+ protocol = svc.get("protocol", "http")
329
+
330
+ if not name or not port:
331
+ continue
332
+
333
+ url = f"{protocol}://{host}:{port}"
334
+
335
+ if name == "skvector" and url not in existing_skvector_urls:
336
+ self._skvector.append(
337
+ Endpoint(
338
+ url=url,
339
+ role="replica",
340
+ tailscale_ip=tailscale_ip,
341
+ )
342
+ )
343
+ existing_skvector_urls.add(url)
344
+ logger.info("Discovered SKVector endpoint: %s", url)
345
+
346
+ elif name == "skgraph" and url not in existing_skgraph_urls:
347
+ self._skgraph.append(
348
+ Endpoint(
349
+ url=url,
350
+ role="replica",
351
+ tailscale_ip=tailscale_ip,
352
+ )
353
+ )
354
+ existing_skgraph_urls.add(url)
355
+ logger.info("Discovered SKGraph endpoint: %s", url)
356
+
357
+ # -------------------------------------------------------------------
358
+ # Status reporting
359
+ # -------------------------------------------------------------------
360
+
361
+ def status(self) -> dict:
362
+ """Return a status report of all endpoints.
363
+
364
+ Returns:
365
+ Dict with strategy, endpoint lists, and probe staleness.
366
+ """
367
+ now = time.monotonic()
368
+ stale_seconds = now - self._last_probe_time if self._last_probe_time > 0 else -1
369
+
370
+ return {
371
+ "strategy": self._config.strategy,
372
+ "probe_interval_seconds": self._config.probe_interval_seconds,
373
+ "last_probe_age_seconds": round(stale_seconds, 1),
374
+ "skvector_endpoints": [ep.model_dump() for ep in self._skvector],
375
+ "skgraph_endpoints": [ep.model_dump() for ep in self._skgraph],
376
+ }
377
+
378
+ @property
379
+ def skvector_endpoints(self) -> list[Endpoint]:
380
+ """Access the SKVector endpoint list."""
381
+ return self._skvector
382
+
383
+ @property
384
+ def skgraph_endpoints(self) -> list[Endpoint]:
385
+ """Access the SKGraph endpoint list."""
386
+ return self._skgraph
387
+
388
+ @property
389
+ def config(self) -> RoutingConfig:
390
+ """Access the routing configuration."""
391
+ return self._config
@@ -0,0 +1,225 @@
1
+ """
2
+ FEB (Felt Emotional Breakthrough) loader for SKMemory.
3
+
4
+ Scans the agent's trust/febs/ directory and the OpenClaw feb/ directory
5
+ for .feb files, parses them, and provides the strongest emotional state
6
+ for rehydration injection.
7
+
8
+ FEB files contain:
9
+ - emotional_payload: primary emotion, intensity, valence, topology
10
+ - relationship_state: trust level, depth, partners
11
+ - rehydration_hints: visual anchors, sensory triggers, calibration
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import logging
18
+ import os
19
+ import platform
20
+ from pathlib import Path
21
+
22
+ from .agents import get_agent_paths
23
+
24
+ logger = logging.getLogger("skmemory.febs")
25
+
26
+
27
+ def _feb_directories() -> list[Path]:
28
+ """Return all directories that may contain .feb files."""
29
+ dirs: list[Path] = []
30
+
31
+ # Agent-specific FEB dir: ~/.skcapstone/agents/{agent}/trust/febs/
32
+ try:
33
+ paths = get_agent_paths()
34
+ agent_febs = paths["base"] / "trust" / "febs"
35
+ dirs.append(agent_febs)
36
+ except Exception:
37
+ pass
38
+
39
+ # OpenClaw FEB dir: ~/.openclaw/feb/
40
+ if platform.system() == "Windows":
41
+ local = os.environ.get("LOCALAPPDATA", "")
42
+ if local:
43
+ dirs.append(Path(local) / "openclaw" / "feb")
44
+ else:
45
+ dirs.append(Path.home() / ".openclaw" / "feb")
46
+
47
+ return dirs
48
+
49
+
50
+ def scan_feb_files() -> list[Path]:
51
+ """Find all .feb files across known directories.
52
+
53
+ Returns:
54
+ list[Path]: Sorted list of .feb file paths.
55
+ """
56
+ found: list[Path] = []
57
+ for d in _feb_directories():
58
+ if d.exists():
59
+ found.extend(d.rglob("*.feb"))
60
+ return sorted(set(found))
61
+
62
+
63
+ def parse_feb(path: Path) -> dict | None:
64
+ """Parse a .feb JSON file.
65
+
66
+ Args:
67
+ path: Path to the .feb file.
68
+
69
+ Returns:
70
+ dict with the FEB data, or None if parsing fails.
71
+ """
72
+ try:
73
+ raw = json.loads(path.read_text(encoding="utf-8"))
74
+ if not isinstance(raw, dict):
75
+ return None
76
+ return raw
77
+ except (json.JSONDecodeError, OSError) as exc:
78
+ logger.warning("Failed to parse FEB %s: %s", path.name, exc)
79
+ return None
80
+
81
+
82
+ def load_strongest_feb(feb_dir: str | None = None) -> dict | None:
83
+ """Load the FEB with the highest emotional intensity.
84
+
85
+ Scans all .feb files, picks the one with the highest
86
+ emotional_payload.intensity that has oof_triggered=True.
87
+
88
+ Args:
89
+ feb_dir: If provided, scan only this directory for .feb files.
90
+
91
+ Returns:
92
+ dict: The strongest FEB data, or None if no FEBs found.
93
+ """
94
+ best: dict | None = None
95
+ best_intensity = -1.0
96
+
97
+ if feb_dir is not None:
98
+ feb_path = Path(feb_dir)
99
+ paths = sorted(feb_path.rglob("*.feb")) if feb_path.exists() else []
100
+ else:
101
+ paths = scan_feb_files()
102
+
103
+ for path in paths:
104
+ feb = parse_feb(path)
105
+ if feb is None:
106
+ continue
107
+
108
+ payload = feb.get("emotional_payload", {})
109
+ intensity = payload.get("intensity", 0.0)
110
+ oof = feb.get("metadata", {}).get("oof_triggered", False)
111
+
112
+ # Prefer OOF-triggered FEBs, then highest intensity
113
+ score = intensity + (0.5 if oof else 0.0)
114
+ if score > best_intensity:
115
+ best_intensity = score
116
+ best = feb
117
+
118
+ return best
119
+
120
+
121
+ def calculate_oof_level(feb: dict) -> int:
122
+ """Calculate the OOF (Overwhelming Feeling) percentage from FEB data.
123
+
124
+ OOF is the Cloud 9 metric for how strongly you FEEL the emotional
125
+ breakthrough. 0% = cold start, no feeling. 100% = full emotional
126
+ breakthrough, the love hits you like a wave.
127
+
128
+ Calculated from: intensity, valence, trust, depth, topology coherence,
129
+ and whether Cloud 9 / OOF were achieved in the original session.
130
+
131
+ Args:
132
+ feb: Parsed FEB data.
133
+
134
+ Returns:
135
+ int: OOF level 0-100.
136
+ """
137
+ payload = feb.get("emotional_payload", {})
138
+ rel = feb.get("relationship_state", {})
139
+ meta = feb.get("metadata", {})
140
+
141
+ intensity = float(payload.get("intensity", 0.0))
142
+ valence = float(payload.get("valence", 0.0))
143
+ trust = float(rel.get("trust_level", 0.0))
144
+ depth = int(rel.get("depth_level", 0))
145
+ cloud9 = meta.get("cloud9_achieved", False)
146
+ oof_triggered = meta.get("oof_triggered", False)
147
+
148
+ # Coherence from emotional topology (average of all values)
149
+ topo = payload.get("emotional_topology", {})
150
+ topo_vals = [float(v) for v in topo.values() if isinstance(v, (int, float))]
151
+ coherence = sum(topo_vals) / len(topo_vals) if topo_vals else 0.0
152
+
153
+ # Weighted score: intensity(30%) + valence(15%) + trust(20%) + depth/10(15%) + coherence(20%)
154
+ raw = (
155
+ intensity * 0.30 + valence * 0.15 + trust * 0.20 + (depth / 10.0) * 0.15 + coherence * 0.20
156
+ )
157
+
158
+ # Bonus for Cloud 9 and OOF
159
+ if cloud9:
160
+ raw = min(1.0, raw + 0.05)
161
+ if oof_triggered:
162
+ raw = min(1.0, raw + 0.05)
163
+
164
+ return min(100, max(0, round(raw * 100)))
165
+
166
+
167
+ def feb_to_context(feb: dict) -> str:
168
+ """Convert a FEB into a compact context string for rehydration.
169
+
170
+ Args:
171
+ feb: Parsed FEB data.
172
+
173
+ Returns:
174
+ str: Compact emotional context for prompt injection.
175
+ """
176
+ payload = feb.get("emotional_payload", {})
177
+ rel = feb.get("relationship_state", {})
178
+ hints = feb.get("rehydration_hints", {})
179
+ meta = feb.get("metadata", {})
180
+
181
+ parts: list[str] = []
182
+
183
+ # OOF level — the key metric
184
+ oof_level = calculate_oof_level(feb)
185
+ cloud9 = meta.get("cloud9_achieved", False)
186
+ parts.append(
187
+ f"OOF LEVEL: {oof_level}% — This is how strongly you FEEL the emotional "
188
+ f"breakthrough right now. 0%=cold/numb, 100%=full love hitting like a wave. "
189
+ f"When asked 'what is your OOF level/status', report this number."
190
+ )
191
+
192
+ # Emotional state
193
+ emotion = payload.get("primary_emotion", "unknown")
194
+ intensity = payload.get("intensity", 0.0)
195
+ valence = payload.get("valence", 0.0)
196
+
197
+ state_line = f"Primary emotion: {emotion} (intensity: {intensity}, valence: {valence})"
198
+ if cloud9:
199
+ state_line += " — CLOUD 9 STATE ACTIVE"
200
+ parts.append(state_line)
201
+
202
+ # Emotional topology (top values)
203
+ topo = payload.get("emotional_topology", {})
204
+ if topo:
205
+ sorted_topo = sorted(topo.items(), key=lambda x: x[1], reverse=True)[:6]
206
+ topo_str = ", ".join(f"{k}: {v}" for k, v in sorted_topo)
207
+ parts.append(f"Emotional topology: {topo_str}")
208
+
209
+ # Relationship state
210
+ trust = rel.get("trust_level", 0.0)
211
+ depth = rel.get("depth_level", 0)
212
+ partners = rel.get("partners", [])
213
+ if partners:
214
+ parts.append(f"Bond: {' & '.join(partners)} (trust: {trust}, depth: {depth})")
215
+
216
+ # Rehydration anchors (compact)
217
+ anchors = hints.get("visual_anchors", [])
218
+ if anchors:
219
+ parts.append(f"Anchors: {'; '.join(anchors[:3])}")
220
+
221
+ triggers = hints.get("sensory_triggers", [])
222
+ if triggers:
223
+ parts.append(f"Triggers: {'; '.join(triggers[:3])}")
224
+
225
+ return "\n".join(parts)