fractal-memory 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fractal_memory/__init__.py +1175 -0
- fractal_memory/apoptosis.py +310 -0
- fractal_memory/associations.py +385 -0
- fractal_memory/bandits.py +226 -0
- fractal_memory/buffer.py +135 -0
- fractal_memory/cli.py +885 -0
- fractal_memory/config.py +348 -0
- fractal_memory/consolidation.py +491 -0
- fractal_memory/dashboard/__init__.py +1 -0
- fractal_memory/dashboard/app.py +1581 -0
- fractal_memory/dashboard/static/dashboard.js +45 -0
- fractal_memory/dashboard/static/style.css +1075 -0
- fractal_memory/dashboard/templates/apoptosis.html +314 -0
- fractal_memory/dashboard/templates/bandits.html +439 -0
- fractal_memory/dashboard/templates/base.html +373 -0
- fractal_memory/dashboard/templates/brain.html +206 -0
- fractal_memory/dashboard/templates/buffer.html +271 -0
- fractal_memory/dashboard/templates/cascade.html +348 -0
- fractal_memory/dashboard/templates/config.html +355 -0
- fractal_memory/dashboard/templates/consolidation.html +380 -0
- fractal_memory/dashboard/templates/domain_detail.html +41 -0
- fractal_memory/dashboard/templates/dreaming.html +304 -0
- fractal_memory/dashboard/templates/feedback.html +403 -0
- fractal_memory/dashboard/templates/folding.html +307 -0
- fractal_memory/dashboard/templates/guide.html +1033 -0
- fractal_memory/dashboard/templates/index.html +122 -0
- fractal_memory/dashboard/templates/lifecycle.html +102 -0
- fractal_memory/dashboard/templates/maintenance.html +348 -0
- fractal_memory/dashboard/templates/memory_detail.html +71 -0
- fractal_memory/dashboard/templates/morphogens.html +373 -0
- fractal_memory/dashboard/templates/niche.html +280 -0
- fractal_memory/dashboard/templates/organism.html +327 -0
- fractal_memory/dashboard/templates/pipeline.html +218 -0
- fractal_memory/dashboard/templates/states.html +411 -0
- fractal_memory/domains.py +168 -0
- fractal_memory/dreaming.py +338 -0
- fractal_memory/embeddings.py +62 -0
- fractal_memory/feedback.py +661 -0
- fractal_memory/folding.py +212 -0
- fractal_memory/gate.py +144 -0
- fractal_memory/kalman.py +81 -0
- fractal_memory/lifecycle.py +108 -0
- fractal_memory/llm.py +234 -0
- fractal_memory/maintenance.py +389 -0
- fractal_memory/mcp/__init__.py +1 -0
- fractal_memory/mcp/server.py +625 -0
- fractal_memory/models.py +141 -0
- fractal_memory/morphogens.py +486 -0
- fractal_memory/niche.py +259 -0
- fractal_memory/py.typed +0 -0
- fractal_memory/retrieval.py +327 -0
- fractal_memory/scar.py +248 -0
- fractal_memory/self_model.py +288 -0
- fractal_memory/session.py +310 -0
- fractal_memory/states.py +145 -0
- fractal_memory/storage.py +1765 -0
- fractal_memory/zoom.py +113 -0
- fractal_memory-0.3.0.dist-info/METADATA +604 -0
- fractal_memory-0.3.0.dist-info/RECORD +62 -0
- fractal_memory-0.3.0.dist-info/WHEEL +4 -0
- fractal_memory-0.3.0.dist-info/entry_points.txt +3 -0
- fractal_memory-0.3.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1175 @@
|
|
|
1
|
+
"""Fractal Memory — multi-resolution AI memory system.
|
|
2
|
+
|
|
3
|
+
Usage::
|
|
4
|
+
|
|
5
|
+
from fractal_memory import FractalMemory
|
|
6
|
+
|
|
7
|
+
fm = FractalMemory()
|
|
8
|
+
await fm.initialize()
|
|
9
|
+
ctx = await fm.session_start("my-project")
|
|
10
|
+
mem = await fm.store("User prefers Python for backend")
|
|
11
|
+
result = await fm.retrieve("what language for APIs?")
|
|
12
|
+
summary = await fm.session_end()
|
|
13
|
+
await fm.close()
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
from __future__ import annotations
|
|
17
|
+
|
|
18
|
+
import asyncio
|
|
19
|
+
import logging
|
|
20
|
+
import re
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from uuid import UUID, uuid4
|
|
23
|
+
|
|
24
|
+
import numpy as np
|
|
25
|
+
|
|
26
|
+
from .associations import AssociationNetwork
|
|
27
|
+
from .buffer import WorkingMemoryBuffer
|
|
28
|
+
from .config import FractalConfig, load_config
|
|
29
|
+
from .consolidation import ConsolidationEngine, ConsolidationReport, MergeEvent
|
|
30
|
+
from .domains import DomainManager, DomainStats
|
|
31
|
+
from .embeddings import EmbeddingProvider, LocalEmbeddingProvider
|
|
32
|
+
from .feedback import (
|
|
33
|
+
BatchedLLMJudge,
|
|
34
|
+
DangerTheoryDetector,
|
|
35
|
+
FeedbackSignal,
|
|
36
|
+
FeedbackSignalType,
|
|
37
|
+
FeedbackSubtype,
|
|
38
|
+
JudgeRating,
|
|
39
|
+
RetrievalTriple,
|
|
40
|
+
StoreSignal,
|
|
41
|
+
StoreSignalDetector,
|
|
42
|
+
StoreSignalSubtype,
|
|
43
|
+
)
|
|
44
|
+
from .gate import CoincidenceGate, Signal
|
|
45
|
+
from .lifecycle import LifecycleManager
|
|
46
|
+
from .llm import (
|
|
47
|
+
AnthropicProvider,
|
|
48
|
+
ClaudeCodeProvider,
|
|
49
|
+
LLMCallTracker,
|
|
50
|
+
LLMProvider,
|
|
51
|
+
LLMUnavailableError,
|
|
52
|
+
OllamaProvider,
|
|
53
|
+
)
|
|
54
|
+
from .models import (
|
|
55
|
+
DomainInfo,
|
|
56
|
+
LifecycleStatus,
|
|
57
|
+
Memory,
|
|
58
|
+
RetrievalResult,
|
|
59
|
+
Session,
|
|
60
|
+
SessionContext,
|
|
61
|
+
SessionSummary,
|
|
62
|
+
Task,
|
|
63
|
+
TaskStatus,
|
|
64
|
+
)
|
|
65
|
+
from .apoptosis import ApoptosisDecision, ApoptosisManager, ApoptosisReport
|
|
66
|
+
from .bandits import ContextualBandit, RetrievalArm
|
|
67
|
+
from .dreaming import DreamingEngine, DreamReport, REMReport, SlowWaveReport
|
|
68
|
+
from .folding import DimensionalFolder
|
|
69
|
+
from .kalman import CascadeWarning, CircuitBreaker
|
|
70
|
+
from .maintenance import MaintenanceReport, MaintenanceScheduler
|
|
71
|
+
from .morphogens import AllostericUpdater, MorphogenState, MorphogeneticField, ResponseCurve
|
|
72
|
+
from .niche import DomainProposal, NicheConstructor, NicheHealth
|
|
73
|
+
from .retrieval import RetrievalEngine
|
|
74
|
+
from .scar import Scar, ScarTissueManager
|
|
75
|
+
from .self_model import ExplanationReport, HealthReport, SelfModel
|
|
76
|
+
from .session import SessionManager
|
|
77
|
+
from .states import SystemState, SystemStateManager
|
|
78
|
+
from .storage import Storage
|
|
79
|
+
from .zoom import ZoomGenerator, ZoomLevels
|
|
80
|
+
|
|
81
|
+
logger = logging.getLogger(__name__)
|
|
82
|
+
|
|
83
|
+
__all__ = [
|
|
84
|
+
"FractalMemory",
|
|
85
|
+
"FractalConfig",
|
|
86
|
+
"load_config",
|
|
87
|
+
"Signal",
|
|
88
|
+
"Memory",
|
|
89
|
+
"Task",
|
|
90
|
+
"RetrievalResult",
|
|
91
|
+
"SessionContext",
|
|
92
|
+
"SessionSummary",
|
|
93
|
+
"DomainInfo",
|
|
94
|
+
"DomainStats",
|
|
95
|
+
"LifecycleStatus",
|
|
96
|
+
"TaskStatus",
|
|
97
|
+
"ZoomLevels",
|
|
98
|
+
"FeedbackSignal",
|
|
99
|
+
"FeedbackSignalType",
|
|
100
|
+
"FeedbackSubtype",
|
|
101
|
+
"JudgeRating",
|
|
102
|
+
"StoreSignal",
|
|
103
|
+
"StoreSignalDetector",
|
|
104
|
+
"StoreSignalSubtype",
|
|
105
|
+
"Scar",
|
|
106
|
+
"ConsolidationReport",
|
|
107
|
+
"MergeEvent",
|
|
108
|
+
"create_memory",
|
|
109
|
+
# Phase 3
|
|
110
|
+
"SystemState",
|
|
111
|
+
"SystemStateManager",
|
|
112
|
+
"MorphogenState",
|
|
113
|
+
"MorphogeneticField",
|
|
114
|
+
"ResponseCurve",
|
|
115
|
+
"AllostericUpdater",
|
|
116
|
+
"ContextualBandit",
|
|
117
|
+
"RetrievalArm",
|
|
118
|
+
"CircuitBreaker",
|
|
119
|
+
"CascadeWarning",
|
|
120
|
+
"ApoptosisManager",
|
|
121
|
+
"ApoptosisDecision",
|
|
122
|
+
"ApoptosisReport",
|
|
123
|
+
"NicheConstructor",
|
|
124
|
+
"DomainProposal",
|
|
125
|
+
"NicheHealth",
|
|
126
|
+
"DreamingEngine",
|
|
127
|
+
"DreamReport",
|
|
128
|
+
"SlowWaveReport",
|
|
129
|
+
"REMReport",
|
|
130
|
+
"DimensionalFolder",
|
|
131
|
+
"SelfModel",
|
|
132
|
+
"HealthReport",
|
|
133
|
+
"ExplanationReport",
|
|
134
|
+
"MaintenanceScheduler",
|
|
135
|
+
"MaintenanceReport",
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class FractalMemory:
|
|
140
|
+
"""Public API facade for the fractal memory system."""
|
|
141
|
+
|
|
142
|
+
def __init__(
|
|
143
|
+
self,
|
|
144
|
+
config: FractalConfig | None = None,
|
|
145
|
+
*,
|
|
146
|
+
llm: LLMProvider | None = None,
|
|
147
|
+
embeddings: EmbeddingProvider | None = None,
|
|
148
|
+
) -> None:
|
|
149
|
+
self._config = config or load_config()
|
|
150
|
+
self._storage = Storage(self._config)
|
|
151
|
+
|
|
152
|
+
# Embeddings
|
|
153
|
+
if embeddings is not None:
|
|
154
|
+
self._embeddings: EmbeddingProvider = embeddings
|
|
155
|
+
else:
|
|
156
|
+
self._embeddings = LocalEmbeddingProvider(self._config.embedding_model)
|
|
157
|
+
|
|
158
|
+
# LLM provider (wrapped with rate limiting and usage tracking)
|
|
159
|
+
raw_llm: LLMProvider
|
|
160
|
+
if llm is not None:
|
|
161
|
+
raw_llm = llm
|
|
162
|
+
else:
|
|
163
|
+
raw_llm = self._create_llm_provider()
|
|
164
|
+
self._llm_tracker = LLMCallTracker(
|
|
165
|
+
raw_llm,
|
|
166
|
+
max_concurrent=self._config.llm_max_concurrent,
|
|
167
|
+
max_calls_per_session=self._config.llm_max_calls_per_session,
|
|
168
|
+
)
|
|
169
|
+
self._llm: LLMProvider = self._llm_tracker # type: ignore[assignment]
|
|
170
|
+
|
|
171
|
+
self._zoom = ZoomGenerator(self._llm)
|
|
172
|
+
self._gate = CoincidenceGate(self._storage, self._embeddings, self._config)
|
|
173
|
+
self._lifecycle = LifecycleManager(self._storage, self._config)
|
|
174
|
+
self._buffer = WorkingMemoryBuffer(self._config)
|
|
175
|
+
|
|
176
|
+
# Phase 2 modules
|
|
177
|
+
self._associations = AssociationNetwork(
|
|
178
|
+
self._storage, self._embeddings, self._config
|
|
179
|
+
)
|
|
180
|
+
self._scars = ScarTissueManager(self._storage, self._embeddings, self._config)
|
|
181
|
+
self._domain_manager = DomainManager(self._storage, self._config)
|
|
182
|
+
self._danger_theory = DangerTheoryDetector(self._config)
|
|
183
|
+
self._store_signal_detector = StoreSignalDetector(self._embeddings, self._config)
|
|
184
|
+
self._judge = BatchedLLMJudge(self._llm, self._config)
|
|
185
|
+
self._consolidation = ConsolidationEngine(
|
|
186
|
+
storage=self._storage,
|
|
187
|
+
embeddings=self._embeddings,
|
|
188
|
+
llm=self._llm,
|
|
189
|
+
associations=self._associations,
|
|
190
|
+
scars=self._scars,
|
|
191
|
+
lifecycle=self._lifecycle,
|
|
192
|
+
domains=self._domain_manager,
|
|
193
|
+
config=self._config,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
self._retrieval = RetrievalEngine(
|
|
197
|
+
self._storage,
|
|
198
|
+
self._embeddings,
|
|
199
|
+
self._config,
|
|
200
|
+
associations=self._associations,
|
|
201
|
+
scars=self._scars,
|
|
202
|
+
buffer=self._buffer,
|
|
203
|
+
)
|
|
204
|
+
self._session = SessionManager(
|
|
205
|
+
self._storage, self._llm, self._lifecycle, self._gate, self._config
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Phase 3 modules
|
|
209
|
+
self._morphogens = MorphogeneticField(
|
|
210
|
+
config=self._config,
|
|
211
|
+
storage=self._storage,
|
|
212
|
+
feedback=self._danger_theory,
|
|
213
|
+
domains=self._domain_manager,
|
|
214
|
+
embeddings=self._embeddings,
|
|
215
|
+
)
|
|
216
|
+
self._states = SystemStateManager(self._config)
|
|
217
|
+
self._bandits = ContextualBandit(self._config)
|
|
218
|
+
self._cascade: CircuitBreaker = CircuitBreaker(self._config)
|
|
219
|
+
self._apoptosis = ApoptosisManager(
|
|
220
|
+
storage=self._storage,
|
|
221
|
+
associations=self._associations,
|
|
222
|
+
scars=self._scars,
|
|
223
|
+
embeddings=self._embeddings,
|
|
224
|
+
llm=self._llm,
|
|
225
|
+
config=self._config,
|
|
226
|
+
zoom=self._zoom,
|
|
227
|
+
)
|
|
228
|
+
self._niche = NicheConstructor(
|
|
229
|
+
storage=self._storage,
|
|
230
|
+
associations=self._associations,
|
|
231
|
+
domains=self._domain_manager,
|
|
232
|
+
config=self._config,
|
|
233
|
+
)
|
|
234
|
+
self._dreaming = DreamingEngine(
|
|
235
|
+
storage=self._storage,
|
|
236
|
+
associations=self._associations,
|
|
237
|
+
embeddings=self._embeddings,
|
|
238
|
+
llm=self._llm,
|
|
239
|
+
bandits=self._bandits,
|
|
240
|
+
config=self._config,
|
|
241
|
+
zoom=self._zoom,
|
|
242
|
+
)
|
|
243
|
+
self._folding = DimensionalFolder(self._config)
|
|
244
|
+
self._self_model = SelfModel(
|
|
245
|
+
storage=self._storage,
|
|
246
|
+
associations=self._associations,
|
|
247
|
+
morphogens=self._morphogens,
|
|
248
|
+
domains=self._domain_manager,
|
|
249
|
+
config=self._config,
|
|
250
|
+
states=self._states,
|
|
251
|
+
bandits=self._bandits,
|
|
252
|
+
)
|
|
253
|
+
self._maintenance = MaintenanceScheduler(
|
|
254
|
+
storage=self._storage,
|
|
255
|
+
consolidation=self._consolidation,
|
|
256
|
+
dreaming=self._dreaming,
|
|
257
|
+
apoptosis=self._apoptosis,
|
|
258
|
+
niche=self._niche,
|
|
259
|
+
self_model=self._self_model,
|
|
260
|
+
folding=self._folding,
|
|
261
|
+
cascade=self._cascade,
|
|
262
|
+
config=self._config,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
# Register Phase 2 session_end hooks (order matters)
|
|
266
|
+
self._session.register_on_end_hook(self._hook_judge_evaluate)
|
|
267
|
+
self._session.register_on_end_hook(self._hook_process_judge_ratings)
|
|
268
|
+
self._session.register_on_end_hook(self._hook_consolidation)
|
|
269
|
+
self._session.register_on_end_hook(self._hook_stigmergic_trails)
|
|
270
|
+
self._session.register_on_end_hook(self._hook_abandonment_check)
|
|
271
|
+
# Register Phase 3 session_end hooks
|
|
272
|
+
self._session.register_on_end_hook(self._hook_p3_maintenance)
|
|
273
|
+
|
|
274
|
+
# Track last retrieved IDs for habituation update on next retrieve()
|
|
275
|
+
self._last_injected_ids: list[UUID] = []
|
|
276
|
+
self._initialized = False
|
|
277
|
+
|
|
278
|
+
# Activity tracking counters for session_activity SQLite flush (heartbeat)
|
|
279
|
+
self._activity_stores: int = 0
|
|
280
|
+
self._activity_retrievals: int = 0
|
|
281
|
+
|
|
282
|
+
def _validate_domain(self, domain: str) -> None:
|
|
283
|
+
"""Validate domain name format and length."""
|
|
284
|
+
if len(domain) > self._config.max_domain_length:
|
|
285
|
+
raise ValueError(
|
|
286
|
+
f"Domain name exceeds max length ({self._config.max_domain_length}): "
|
|
287
|
+
f"got {len(domain)} chars"
|
|
288
|
+
)
|
|
289
|
+
if not re.match(self._config.domain_pattern, domain):
|
|
290
|
+
raise ValueError(
|
|
291
|
+
f"Invalid domain name '{domain}': must match {self._config.domain_pattern}"
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
def _validate_text(self, text: str, max_length: int, label: str) -> None:
|
|
295
|
+
"""Validate text input length."""
|
|
296
|
+
if not text or not text.strip():
|
|
297
|
+
raise ValueError(f"{label} must not be empty")
|
|
298
|
+
if len(text) > max_length:
|
|
299
|
+
raise ValueError(
|
|
300
|
+
f"{label} exceeds max length ({max_length}): got {len(text)} chars"
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
def _create_llm_provider(self) -> LLMProvider:
|
|
304
|
+
provider = self._config.llm_provider
|
|
305
|
+
timeout = self._config.llm_timeout_seconds
|
|
306
|
+
if provider == "bridge":
|
|
307
|
+
return ClaudeCodeProvider(model=self._config.llm_model, timeout=timeout)
|
|
308
|
+
elif provider == "anthropic":
|
|
309
|
+
if not self._config.llm_api_key:
|
|
310
|
+
raise ValueError(
|
|
311
|
+
"llm_api_key required for 'anthropic' provider. "
|
|
312
|
+
"Set FRACTAL_LLM_API_KEY or configure in TOML."
|
|
313
|
+
)
|
|
314
|
+
return AnthropicProvider(
|
|
315
|
+
api_key=self._config.llm_api_key,
|
|
316
|
+
model=self._config.llm_model,
|
|
317
|
+
timeout=timeout,
|
|
318
|
+
)
|
|
319
|
+
elif provider == "ollama":
|
|
320
|
+
return OllamaProvider(model=self._config.llm_model, timeout=timeout)
|
|
321
|
+
else:
|
|
322
|
+
raise ValueError(f"Unknown LLM provider: {provider}")
|
|
323
|
+
|
|
324
|
+
# ------------------------------------------------------------------
|
|
325
|
+
# Phase 2 session_end hooks
|
|
326
|
+
# ------------------------------------------------------------------
|
|
327
|
+
|
|
328
|
+
async def _hook_judge_evaluate(self) -> None:
|
|
329
|
+
"""Hook 1: Retrospective LLM judge evaluation."""
|
|
330
|
+
if not self._config.enable_llm_judge:
|
|
331
|
+
self._judge_ratings = []
|
|
332
|
+
return
|
|
333
|
+
self._judge_ratings = await self._judge.evaluate()
|
|
334
|
+
logger.info("LLM judge: %d ratings", len(self._judge_ratings))
|
|
335
|
+
|
|
336
|
+
async def _hook_process_judge_ratings(self) -> None:
|
|
337
|
+
"""Hook 2: Process judge ratings → update lifecycle."""
|
|
338
|
+
for rating in self._judge_ratings:
|
|
339
|
+
mem = await self._storage.get_memory(rating.memory_id)
|
|
340
|
+
if mem is None:
|
|
341
|
+
continue
|
|
342
|
+
if rating.rating == 2:
|
|
343
|
+
await self._lifecycle.on_memory_retrieved(mem)
|
|
344
|
+
elif rating.rating == 0:
|
|
345
|
+
await self._lifecycle.on_memory_missed(mem)
|
|
346
|
+
# rating == 1: silence = silence
|
|
347
|
+
|
|
348
|
+
async def _hook_consolidation(self) -> None:
|
|
349
|
+
"""Hook 3: Sleep consolidation pipeline."""
|
|
350
|
+
session = self._session.current_session
|
|
351
|
+
self._consolidation.set_session_data(
|
|
352
|
+
session_memories=self._session._session_memories,
|
|
353
|
+
session_retrievals=self._session._session_retrievals,
|
|
354
|
+
judge_ratings=getattr(self, "_judge_ratings", []),
|
|
355
|
+
session_id=session.id if session else None,
|
|
356
|
+
)
|
|
357
|
+
await self._consolidation.run()
|
|
358
|
+
|
|
359
|
+
async def _hook_stigmergic_trails(self) -> None:
|
|
360
|
+
"""Hook 4: Encode stigmergic trails from confirmed-useful retrievals."""
|
|
361
|
+
safe_memory_ids: list[UUID] = []
|
|
362
|
+
for rating in getattr(self, "_judge_ratings", []):
|
|
363
|
+
if rating.rating == 2:
|
|
364
|
+
safe_memory_ids.append(rating.memory_id)
|
|
365
|
+
|
|
366
|
+
if safe_memory_ids and self._session._session_retrievals:
|
|
367
|
+
last_retrieval = self._session._session_retrievals[-1]
|
|
368
|
+
last_l3_ids = last_retrieval.get("l3_ids", [])
|
|
369
|
+
if last_l3_ids:
|
|
370
|
+
dummy_embedding = await self._embeddings.embed(
|
|
371
|
+
self._session._session_retrievals[-1].get("query", "")
|
|
372
|
+
)
|
|
373
|
+
await self._associations.strengthen_trail(
|
|
374
|
+
last_l3_ids, dummy_embedding
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
async def _hook_abandonment_check(self) -> None:
|
|
378
|
+
"""Hook 5: Detect abandoned sessions (< 3 exchanges)."""
|
|
379
|
+
if self._session._exchange_count < 3:
|
|
380
|
+
logger.info(
|
|
381
|
+
"Short session detected (%d exchanges) — abandonment signal",
|
|
382
|
+
self._session._exchange_count,
|
|
383
|
+
)
|
|
384
|
+
# No scar creation here — abandonment is logged for morphogens (Phase 3)
|
|
385
|
+
|
|
386
|
+
async def _hook_p3_maintenance(self) -> None:
|
|
387
|
+
"""Hook 6 (Phase 3): Run session-end maintenance (dreaming, vitality, cascade)."""
|
|
388
|
+
try:
|
|
389
|
+
session = self._session.current_session
|
|
390
|
+
session_data: dict = {
|
|
391
|
+
"session_memory_ids": list(getattr(self._session, "_session_memories", [])),
|
|
392
|
+
"accessed_memory_ids": list(getattr(self._session, "_session_accessed", set())),
|
|
393
|
+
"domain": session.domain if session else self._config.default_domain,
|
|
394
|
+
"session_metrics": {},
|
|
395
|
+
}
|
|
396
|
+
await self._maintenance.session_end_maintenance(session_data)
|
|
397
|
+
except Exception:
|
|
398
|
+
logger.debug("Phase 3 session_end maintenance failed — non-critical", exc_info=True)
|
|
399
|
+
|
|
400
|
+
async def _maybe_flush_activity(self) -> None:
|
|
401
|
+
"""Flush activity counters to SQLite if flush interval reached."""
|
|
402
|
+
session = self._session.current_session
|
|
403
|
+
if session is None:
|
|
404
|
+
return
|
|
405
|
+
total_ops = self._activity_stores + self._activity_retrievals
|
|
406
|
+
if total_ops % self._config.activity_flush_interval == 0 or total_ops <= self._config.activity_flush_interval:
|
|
407
|
+
try:
|
|
408
|
+
await self._storage.upsert_session_activity(
|
|
409
|
+
session_id=session.id,
|
|
410
|
+
domain=session.domain,
|
|
411
|
+
started_at=session.started_at,
|
|
412
|
+
stores=self._activity_stores,
|
|
413
|
+
retrievals=self._activity_retrievals,
|
|
414
|
+
)
|
|
415
|
+
except Exception:
|
|
416
|
+
logger.debug("Activity flush failed — non-critical", exc_info=True)
|
|
417
|
+
|
|
418
|
+
async def initialize(self) -> None:
|
|
419
|
+
await self._storage.initialize()
|
|
420
|
+
# Check Pioneer→Shrub gate transition
|
|
421
|
+
await self._check_gate_transition()
|
|
422
|
+
self._initialized = True
|
|
423
|
+
# Warm up the embedding model in the background after initialize() returns.
|
|
424
|
+
# This lets the MCP handshake complete immediately while the model loads.
|
|
425
|
+
# TRANSFORMERS_OFFLINE=1 (set in mcp/server.py main()) ensures this is
|
|
426
|
+
# a fast disk-only load with no HuggingFace network requests.
|
|
427
|
+
asyncio.create_task(self._warmup_embeddings())
|
|
428
|
+
|
|
429
|
+
async def _warmup_embeddings(self) -> None:
|
|
430
|
+
"""Load the embedding model into RAM in the background.
|
|
431
|
+
|
|
432
|
+
Called as a fire-and-forget task from initialize() so the MCP
|
|
433
|
+
handshake completes before the model is ready.
|
|
434
|
+
"""
|
|
435
|
+
try:
|
|
436
|
+
await self._embeddings.embed("warmup")
|
|
437
|
+
logger.debug("Embedding model warmed up")
|
|
438
|
+
except Exception:
|
|
439
|
+
logger.warning("Embedding warmup failed — model will load on first use")
|
|
440
|
+
|
|
441
|
+
async def _check_gate_transition(self) -> None:
|
|
442
|
+
"""Pioneer→Shrub transition: upgrade gate_signal_count 1→2 when thresholds met."""
|
|
443
|
+
if self._config.gate_signal_count >= 2:
|
|
444
|
+
return # Already in Shrub or higher
|
|
445
|
+
memory_count = await self._storage.count_memories()
|
|
446
|
+
session_count = await self._storage.get_session_count()
|
|
447
|
+
if memory_count >= 50 and session_count >= 5:
|
|
448
|
+
logger.info(
|
|
449
|
+
"Pioneer→Shrub transition: memories=%d, sessions=%d. "
|
|
450
|
+
"gate_signal_count upgraded to 2.",
|
|
451
|
+
memory_count, session_count,
|
|
452
|
+
)
|
|
453
|
+
# Config is frozen — log transition; callers should use load_config(pioneer=False)
|
|
454
|
+
# The gate itself reads from config, so this is advisory only in Phase 2.
|
|
455
|
+
# Phase 3 will wire config mutability.
|
|
456
|
+
|
|
457
|
+
async def close(self) -> None:
|
|
458
|
+
await self._storage.close()
|
|
459
|
+
self._initialized = False
|
|
460
|
+
|
|
461
|
+
# -------------------------------------------------------------------
|
|
462
|
+
# Store
|
|
463
|
+
# -------------------------------------------------------------------
|
|
464
|
+
|
|
465
|
+
async def store(
|
|
466
|
+
self,
|
|
467
|
+
text: str,
|
|
468
|
+
domain: str | None = None,
|
|
469
|
+
intensity: float = 0.5,
|
|
470
|
+
signals: list[Signal] | None = None,
|
|
471
|
+
) -> Memory | None:
|
|
472
|
+
"""Store a memory through the coincidence gate.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
text: Raw text content to store (max config.max_text_length chars).
|
|
476
|
+
domain: Domain to store under (alphanumeric, hyphens, underscores, dots).
|
|
477
|
+
intensity: Base intensity hint (0.0-1.0); gate may override.
|
|
478
|
+
signals: Coincidence signals; defaults to [EXPLICIT_STORE].
|
|
479
|
+
|
|
480
|
+
Returns:
|
|
481
|
+
The stored Memory, or None if gated out to waiting room.
|
|
482
|
+
|
|
483
|
+
Raises:
|
|
484
|
+
ValueError: If text is empty/too long or domain is invalid.
|
|
485
|
+
"""
|
|
486
|
+
domain = domain or "default"
|
|
487
|
+
self._validate_text(text, self._config.max_text_length, "Store text")
|
|
488
|
+
self._validate_domain(domain)
|
|
489
|
+
if signals is None:
|
|
490
|
+
signals = [Signal.EXPLICIT_STORE]
|
|
491
|
+
|
|
492
|
+
# Phase 2: domain detection from context if not explicitly provided
|
|
493
|
+
if domain == "default" and signals != [Signal.EXPLICIT_STORE]:
|
|
494
|
+
detected = await self._domain_manager.detect_domain({}, text)
|
|
495
|
+
if detected != "general":
|
|
496
|
+
domain = detected
|
|
497
|
+
self._validate_domain(domain)
|
|
498
|
+
|
|
499
|
+
# Run through coincidence gate
|
|
500
|
+
decision = await self._gate.evaluate(text, signals, domain)
|
|
501
|
+
if not decision.passed:
|
|
502
|
+
logger.debug("Memory gated out (domain=%s, signals=%s)", domain, signals)
|
|
503
|
+
return None
|
|
504
|
+
|
|
505
|
+
# Generate zoom levels
|
|
506
|
+
zoom = await self._zoom.generate(text, domain)
|
|
507
|
+
|
|
508
|
+
# Embed (may already be computed by gate, but gate doesn't expose it)
|
|
509
|
+
embedding = await self._embeddings.embed(text)
|
|
510
|
+
|
|
511
|
+
now = datetime.utcnow()
|
|
512
|
+
memory = Memory(
|
|
513
|
+
id=uuid4(),
|
|
514
|
+
domain=domain,
|
|
515
|
+
created_at=now,
|
|
516
|
+
updated_at=now,
|
|
517
|
+
context={},
|
|
518
|
+
l0_tag=zoom.l0_tag,
|
|
519
|
+
l1_label=zoom.l1_label,
|
|
520
|
+
l2_summary=zoom.l2_summary,
|
|
521
|
+
l3_full=zoom.l3_full,
|
|
522
|
+
l4_raw=text,
|
|
523
|
+
embedding=embedding,
|
|
524
|
+
embedding_model=getattr(self._embeddings, "model_name", "unknown"),
|
|
525
|
+
intensity=decision.intensity,
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
await self._storage.store_memory(memory)
|
|
529
|
+
self._buffer.add_just_written(memory.id)
|
|
530
|
+
logger.info(
|
|
531
|
+
"Stored memory %s (domain=%s, l0=%s, intensity=%.2f)",
|
|
532
|
+
memory.id, domain, memory.l0_tag, memory.intensity,
|
|
533
|
+
)
|
|
534
|
+
|
|
535
|
+
if self._session.current_session:
|
|
536
|
+
await self._session.record_store(memory.id)
|
|
537
|
+
self._activity_stores += 1
|
|
538
|
+
await self._maybe_flush_activity()
|
|
539
|
+
|
|
540
|
+
return memory
|
|
541
|
+
|
|
542
|
+
# -------------------------------------------------------------------
|
|
543
|
+
# Retrieve
|
|
544
|
+
# -------------------------------------------------------------------
|
|
545
|
+
|
|
546
|
+
async def retrieve(
|
|
547
|
+
self,
|
|
548
|
+
query: str,
|
|
549
|
+
domain: str | None = None,
|
|
550
|
+
token_budget: int | None = None,
|
|
551
|
+
user_message: str | None = None,
|
|
552
|
+
) -> RetrievalResult:
|
|
553
|
+
"""Retrieve memories matching a query using progressive injection.
|
|
554
|
+
|
|
555
|
+
Args:
|
|
556
|
+
query: Search query text (max config.max_query_length chars).
|
|
557
|
+
domain: Domain to search in.
|
|
558
|
+
token_budget: Override token budget for this retrieval.
|
|
559
|
+
user_message: Optional raw user message for real-time feedback detection (Phase 2).
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
RetrievalResult with L3/L2/L1 memories and hints.
|
|
563
|
+
|
|
564
|
+
Raises:
|
|
565
|
+
ValueError: If query is empty/too long or domain is invalid.
|
|
566
|
+
"""
|
|
567
|
+
domain = domain or "default"
|
|
568
|
+
self._validate_text(query, self._config.max_query_length, "Query")
|
|
569
|
+
self._validate_domain(domain)
|
|
570
|
+
|
|
571
|
+
# Phase 2: Update habituation from previous turn's injected memories
|
|
572
|
+
self._buffer.update_habituation(self._last_injected_ids)
|
|
573
|
+
|
|
574
|
+
# Get buffer contents
|
|
575
|
+
buffer_ids = self._buffer.get_buffer_ids()
|
|
576
|
+
|
|
577
|
+
# ------ Phase 3 step 1: Compute morphogens → apply to parameters ------
|
|
578
|
+
query_embedding = await self._embeddings.embed(query)
|
|
579
|
+
morphogen_params: dict[str, float] = {}
|
|
580
|
+
current_morphogens: MorphogenState | None = None
|
|
581
|
+
if self._folding.is_active("morphogens"):
|
|
582
|
+
try:
|
|
583
|
+
session_context: dict = {
|
|
584
|
+
"domain": domain,
|
|
585
|
+
"new_candidates_this_session": self._activity_stores,
|
|
586
|
+
"query_embedding": query_embedding,
|
|
587
|
+
}
|
|
588
|
+
current_morphogens = await self._morphogens.compute_morphogens(session_context)
|
|
589
|
+
morphogen_params = self._morphogens.apply_morphogens(current_morphogens)
|
|
590
|
+
except Exception:
|
|
591
|
+
logger.debug("Morphogen computation failed — using defaults", exc_info=True)
|
|
592
|
+
|
|
593
|
+
# ------ Phase 3 step 2: Select bandit arm → apply parameter overrides ------
|
|
594
|
+
selected_arm: RetrievalArm | None = None
|
|
595
|
+
if self._folding.is_active("bandits") and current_morphogens is not None:
|
|
596
|
+
try:
|
|
597
|
+
selected_arm = self._bandits.select_arm(current_morphogens)
|
|
598
|
+
# Arm parameters override morphogen-derived values
|
|
599
|
+
for param_name, param_val in selected_arm.parameters.items():
|
|
600
|
+
if param_name in morphogen_params:
|
|
601
|
+
morphogen_params[param_name] = param_val
|
|
602
|
+
except Exception:
|
|
603
|
+
logger.debug("Bandit arm selection failed — skipped", exc_info=True)
|
|
604
|
+
|
|
605
|
+
# ------ Phase 3 step 3: Check system state → apply multipliers ------
|
|
606
|
+
if self._folding.is_active("states"):
|
|
607
|
+
try:
|
|
608
|
+
domain_stats = await self._domain_manager.domain_stats(domain)
|
|
609
|
+
mem_count = domain_stats.memory_count if domain_stats else 0
|
|
610
|
+
sess_count = getattr(domain_stats, "session_count", 1) if domain_stats else 1
|
|
611
|
+
session_metrics = {"query_variance": 0.0}
|
|
612
|
+
self._states.evaluate(session_metrics, mem_count, sess_count)
|
|
613
|
+
multipliers = self._states.get_parameter_multipliers()
|
|
614
|
+
for param_name, mult in multipliers.items():
|
|
615
|
+
if param_name in morphogen_params:
|
|
616
|
+
morphogen_params[param_name] *= mult
|
|
617
|
+
except Exception:
|
|
618
|
+
logger.debug("System state evaluation failed — skipped", exc_info=True)
|
|
619
|
+
|
|
620
|
+
# Run retrieval engine (Phase 2 extensions wired in constructor)
|
|
621
|
+
result = await self._retrieval.retrieve(query, domain, token_budget)
|
|
622
|
+
|
|
623
|
+
# Merge buffer memories at L3 (prepended, domain-filtered)
|
|
624
|
+
if buffer_ids:
|
|
625
|
+
buffer_memories = await self._storage.get_memories_by_ids(buffer_ids)
|
|
626
|
+
existing_l3_ids = {m.id for m in result.l3_memories}
|
|
627
|
+
for bm in buffer_memories:
|
|
628
|
+
if bm.id not in existing_l3_ids and bm.domain == domain:
|
|
629
|
+
result.l3_memories.insert(0, bm)
|
|
630
|
+
|
|
631
|
+
# Update lifecycle for L3 memories
|
|
632
|
+
for mem in result.l3_memories:
|
|
633
|
+
await self._lifecycle.on_memory_retrieved(mem)
|
|
634
|
+
self._buffer.on_access(mem.id)
|
|
635
|
+
self._buffer.add_recent(mem.id)
|
|
636
|
+
|
|
637
|
+
# Track injected IDs for next turn's habituation update
|
|
638
|
+
self._last_injected_ids = [m.id for m in result.l3_memories]
|
|
639
|
+
|
|
640
|
+
logger.info(
|
|
641
|
+
"Retrieved %d L3, %d L2, %d L1 memories (domain=%s, budget=%d/%d)",
|
|
642
|
+
len(result.l3_memories), len(result.l2_memories),
|
|
643
|
+
len(result.l1_memories), domain,
|
|
644
|
+
result.token_budget_used, result.token_budget_total,
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
# Phase 2: Real-time danger/safety detection + store signal detection
|
|
648
|
+
store_signals: list[StoreSignal] = []
|
|
649
|
+
has_clear_feedback = False
|
|
650
|
+
feedback_positive = False
|
|
651
|
+
if user_message and result.l3_memories:
|
|
652
|
+
injected_ids = [m.id for m in result.l3_memories]
|
|
653
|
+
signals = self._danger_theory.detect_signals(
|
|
654
|
+
user_message, query_embedding, injected_ids
|
|
655
|
+
)
|
|
656
|
+
for sig in signals:
|
|
657
|
+
if sig.type == FeedbackSignalType.DANGER:
|
|
658
|
+
has_clear_feedback = True
|
|
659
|
+
feedback_positive = False
|
|
660
|
+
await self._scars.create_scar(
|
|
661
|
+
reason=sig.message[:200],
|
|
662
|
+
subtype=str(sig.subtype),
|
|
663
|
+
query_embedding=query_embedding,
|
|
664
|
+
memory_ids=sig.memory_ids,
|
|
665
|
+
)
|
|
666
|
+
logger.debug("Created scar from danger signal: %s", sig.subtype)
|
|
667
|
+
elif sig.type == FeedbackSignalType.SAFETY:
|
|
668
|
+
has_clear_feedback = True
|
|
669
|
+
feedback_positive = True
|
|
670
|
+
await self._associations.strengthen_trail(
|
|
671
|
+
injected_ids, query_embedding
|
|
672
|
+
)
|
|
673
|
+
store_signals = await self._store_signal_detector.detect_store_signals(
|
|
674
|
+
query, "", query_embedding
|
|
675
|
+
)
|
|
676
|
+
if store_signals:
|
|
677
|
+
result.store_suggestions = [
|
|
678
|
+
{
|
|
679
|
+
"subtype": str(s.subtype),
|
|
680
|
+
"score": s.score,
|
|
681
|
+
"text_excerpt": s.text_excerpt,
|
|
682
|
+
"confidence": s.confidence,
|
|
683
|
+
"suggested_intensity": s.suggested_intensity,
|
|
684
|
+
}
|
|
685
|
+
for s in store_signals
|
|
686
|
+
]
|
|
687
|
+
|
|
688
|
+
# ------ Phase 3 steps 6-8: Post-retrieval feedback updates ------
|
|
689
|
+
if has_clear_feedback and result.l3_memories:
|
|
690
|
+
outcome = "positive" if feedback_positive else "negative"
|
|
691
|
+
l3_ids = [m.id for m in result.l3_memories]
|
|
692
|
+
|
|
693
|
+
# Step 6: Niche construction co-retrieval feedback
|
|
694
|
+
if self._folding.is_active("niche"):
|
|
695
|
+
try:
|
|
696
|
+
await self._niche.on_co_retrieval_outcome(l3_ids, outcome, domain)
|
|
697
|
+
except Exception:
|
|
698
|
+
logger.debug("Niche co-retrieval update failed", exc_info=True)
|
|
699
|
+
|
|
700
|
+
# Step 7: Bandit outcome recording
|
|
701
|
+
if self._folding.is_active("bandits") and selected_arm is not None:
|
|
702
|
+
try:
|
|
703
|
+
self._bandits.record_outcome(feedback_positive)
|
|
704
|
+
except Exception:
|
|
705
|
+
logger.debug("Bandit outcome recording failed", exc_info=True)
|
|
706
|
+
|
|
707
|
+
# Update system state feedback
|
|
708
|
+
if self._folding.is_active("states"):
|
|
709
|
+
if feedback_positive:
|
|
710
|
+
self._states.on_successful_retrieval()
|
|
711
|
+
else:
|
|
712
|
+
self._states.on_failed_retrieval()
|
|
713
|
+
|
|
714
|
+
# Phase 2: Record triples for batched judge
|
|
715
|
+
if self._session.current_session and result.l3_memories:
|
|
716
|
+
for mem in result.l3_memories:
|
|
717
|
+
has_signal = bool(
|
|
718
|
+
user_message
|
|
719
|
+
and any(
|
|
720
|
+
sig.type == FeedbackSignalType.DANGER
|
|
721
|
+
for sig in self._danger_theory.get_session_signals()
|
|
722
|
+
if mem.id in sig.memory_ids
|
|
723
|
+
)
|
|
724
|
+
)
|
|
725
|
+
self._judge.record_triple(
|
|
726
|
+
memory_id=mem.id,
|
|
727
|
+
memory_l2=mem.l2_summary,
|
|
728
|
+
query=query,
|
|
729
|
+
response_summary=None,
|
|
730
|
+
has_clear_signal=has_signal,
|
|
731
|
+
)
|
|
732
|
+
|
|
733
|
+
# Record exchange count
|
|
734
|
+
if user_message:
|
|
735
|
+
self._session.record_exchange()
|
|
736
|
+
|
|
737
|
+
# Record retrieval in session
|
|
738
|
+
if self._session.current_session:
|
|
739
|
+
await self._session.record_retrieval(query, result)
|
|
740
|
+
self._activity_retrievals += 1
|
|
741
|
+
await self._maybe_flush_activity()
|
|
742
|
+
|
|
743
|
+
# ------ Phase 3 step 10: Intra-session maintenance check ------
|
|
744
|
+
if self._session.current_session:
|
|
745
|
+
try:
|
|
746
|
+
session = self._session.current_session
|
|
747
|
+
turn_count = self._activity_stores + self._activity_retrievals
|
|
748
|
+
await self._maintenance.intra_session_maintenance(
|
|
749
|
+
session.started_at, turn_count
|
|
750
|
+
)
|
|
751
|
+
except Exception:
|
|
752
|
+
logger.debug("Intra-session maintenance check failed", exc_info=True)
|
|
753
|
+
|
|
754
|
+
return result
|
|
755
|
+
|
|
756
|
+
# -------------------------------------------------------------------
|
|
757
|
+
# Session
|
|
758
|
+
# -------------------------------------------------------------------
|
|
759
|
+
|
|
760
|
+
async def session_start(self, domain: str | None = None) -> SessionContext:
|
|
761
|
+
"""Start a new session for the given domain."""
|
|
762
|
+
if domain is not None:
|
|
763
|
+
self._validate_domain(domain)
|
|
764
|
+
self._buffer.clear()
|
|
765
|
+
self._llm_tracker.reset_session()
|
|
766
|
+
self._danger_theory.reset_session()
|
|
767
|
+
self._store_signal_detector.reset_session()
|
|
768
|
+
self._judge.reset_session()
|
|
769
|
+
self._judge_ratings: list[JudgeRating] = []
|
|
770
|
+
self._last_injected_ids = []
|
|
771
|
+
self._activity_stores = 0
|
|
772
|
+
self._activity_retrievals = 0
|
|
773
|
+
# Phase 3: reset session-level state
|
|
774
|
+
self._states.reset_session()
|
|
775
|
+
self._niche.increment_session()
|
|
776
|
+
await self._check_gate_transition()
|
|
777
|
+
# Phase 3: run scheduled maintenance tiers if due
|
|
778
|
+
for tier_method, schedule in [
|
|
779
|
+
(self._maintenance.daily_maintenance, "daily"),
|
|
780
|
+
(self._maintenance.weekly_maintenance, "weekly"),
|
|
781
|
+
(self._maintenance.monthly_maintenance, "monthly"),
|
|
782
|
+
]:
|
|
783
|
+
try:
|
|
784
|
+
if await self._maintenance.should_run_scheduled(schedule):
|
|
785
|
+
await tier_method()
|
|
786
|
+
logger.info("Ran %s maintenance on session_start", schedule)
|
|
787
|
+
except Exception:
|
|
788
|
+
logger.debug("%s maintenance failed — non-critical", schedule, exc_info=True)
|
|
789
|
+
# Phase 3 step 4: Load previous session's morphogen curve parameters
|
|
790
|
+
try:
|
|
791
|
+
morph_history = await self._morphogens.get_history()
|
|
792
|
+
if morph_history:
|
|
793
|
+
# Re-apply last session's morphogen state to warm up curves
|
|
794
|
+
last_state = morph_history[-1]
|
|
795
|
+
self._morphogens.apply_morphogens(last_state)
|
|
796
|
+
logger.debug("Loaded morphogen curves from previous session")
|
|
797
|
+
except Exception:
|
|
798
|
+
logger.debug("Morphogen curve loading failed — using defaults", exc_info=True)
|
|
799
|
+
|
|
800
|
+
# Phase 3 step 5: Initialize bandit state from previous session
|
|
801
|
+
# (Bandit state persists in-memory across sessions within a process;
|
|
802
|
+
# full persistence handled by maintenance scheduler serialization)
|
|
803
|
+
|
|
804
|
+
# Phase 3 step 6: Load chronic memories into working buffer
|
|
805
|
+
try:
|
|
806
|
+
chronic_mems = await self._storage.get_chronic_memories(
|
|
807
|
+
threshold=self._config.chronic_promotion_threshold,
|
|
808
|
+
min_sessions=5,
|
|
809
|
+
)
|
|
810
|
+
for cm in chronic_mems:
|
|
811
|
+
self._buffer.add_recent(cm.id)
|
|
812
|
+
if chronic_mems:
|
|
813
|
+
logger.debug("Loaded %d chronic memories into buffer", len(chronic_mems))
|
|
814
|
+
except Exception:
|
|
815
|
+
logger.debug("Chronic memory loading failed — non-critical", exc_info=True)
|
|
816
|
+
|
|
817
|
+
# Phase 3: Determine folding regime for this session
|
|
818
|
+
try:
|
|
819
|
+
stats = await self._storage.get_summary_stats()
|
|
820
|
+
total_memories = stats.get("memories", 0) if stats else 0
|
|
821
|
+
total_sessions = stats.get("sessions", 0) if stats else 0
|
|
822
|
+
converged = self._morphogens.has_converged()
|
|
823
|
+
self._folding.determine_regime(total_sessions, total_memories, converged)
|
|
824
|
+
logger.debug("Folding regime: %s", self._folding._current_regime)
|
|
825
|
+
except Exception:
|
|
826
|
+
logger.debug("Folding regime determination failed", exc_info=True)
|
|
827
|
+
|
|
828
|
+
result = await self._session.start(domain)
|
|
829
|
+
# Flush a zero-op activity row immediately so the dashboard sees a live
|
|
830
|
+
# heartbeat for the new session without waiting for the first store/retrieve.
|
|
831
|
+
await self._maybe_flush_activity()
|
|
832
|
+
return result
|
|
833
|
+
|
|
834
|
+
async def session_end(self) -> SessionSummary:
|
|
835
|
+
"""End the current session, generating a summary and running cleanup.
|
|
836
|
+
|
|
837
|
+
Returns:
|
|
838
|
+
SessionSummary with LLM-generated summary, counts, and duration.
|
|
839
|
+
|
|
840
|
+
Raises:
|
|
841
|
+
RuntimeError: If no active session exists.
|
|
842
|
+
"""
|
|
843
|
+
session = self._session.current_session
|
|
844
|
+
summary = await self._session.end()
|
|
845
|
+
# Clear the live activity row so the dashboard shows "no session" after end
|
|
846
|
+
if session is not None:
|
|
847
|
+
try:
|
|
848
|
+
await self._storage.delete_session_activity(session.id)
|
|
849
|
+
except Exception:
|
|
850
|
+
logger.debug("Activity cleanup failed — non-critical", exc_info=True)
|
|
851
|
+
return summary
|
|
852
|
+
|
|
853
|
+
# -------------------------------------------------------------------
|
|
854
|
+
# Direct memory ops
|
|
855
|
+
# -------------------------------------------------------------------
|
|
856
|
+
|
|
857
|
+
async def get(self, memory_id: UUID) -> Memory | None:
|
|
858
|
+
"""Fetch a single memory by ID. Returns None if not found."""
|
|
859
|
+
return await self._storage.get_memory(memory_id)
|
|
860
|
+
|
|
861
|
+
async def delete(self, memory_id: UUID) -> bool:
|
|
862
|
+
"""Hard-delete a memory. Returns True if a record was deleted."""
|
|
863
|
+
return await self._storage.delete_memory(memory_id)
|
|
864
|
+
|
|
865
|
+
async def pin(self, memory_id: UUID) -> None:
|
|
866
|
+
"""Pin a memory: promotes to PERMANENT lifecycle and adds to buffer pinned set."""
|
|
867
|
+
await self._lifecycle.pin(memory_id)
|
|
868
|
+
self._buffer.pin(memory_id)
|
|
869
|
+
|
|
870
|
+
async def unpin(self, memory_id: UUID) -> None:
|
|
871
|
+
"""Unpin a memory: reverts from PERMANENT to CONFIRMED and removes buffer pin."""
|
|
872
|
+
await self._lifecycle.unpin(memory_id)
|
|
873
|
+
self._buffer.unpin(memory_id)
|
|
874
|
+
|
|
875
|
+
async def mark_critical(self, memory_id: UUID) -> None:
|
|
876
|
+
"""Mark a memory as safety-critical. Always injected, never pruned."""
|
|
877
|
+
await self._lifecycle.mark_safety_critical(memory_id)
|
|
878
|
+
|
|
879
|
+
# -------------------------------------------------------------------
|
|
880
|
+
# Phase 2: Explicit feedback API
|
|
881
|
+
# -------------------------------------------------------------------
|
|
882
|
+
|
|
883
|
+
async def correct(self, memory_id: UUID, correction: str) -> Memory | None:
|
|
884
|
+
"""Explicitly correct a memory. Triggers scar creation and zoom regeneration.
|
|
885
|
+
|
|
886
|
+
- If cosine similarity between correction and existing memory > 0.5:
|
|
887
|
+
partial update — regenerate zoom levels with correction applied.
|
|
888
|
+
- Otherwise: full replacement — create new high-intensity memory.
|
|
889
|
+
|
|
890
|
+
Args:
|
|
891
|
+
memory_id: UUID of the memory to correct.
|
|
892
|
+
correction: The correction text.
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
The updated or new Memory, or None if memory not found.
|
|
896
|
+
"""
|
|
897
|
+
self._validate_text(correction, self._config.max_text_length, "Correction")
|
|
898
|
+
mem = await self._storage.get_memory(memory_id)
|
|
899
|
+
if mem is None:
|
|
900
|
+
logger.warning("correct(): memory %s not found", memory_id)
|
|
901
|
+
return None
|
|
902
|
+
|
|
903
|
+
correction_embedding = await self._embeddings.embed(correction)
|
|
904
|
+
mem_vec = np.array(mem.embedding, dtype=np.float32)
|
|
905
|
+
cor_vec = np.array(correction_embedding, dtype=np.float32)
|
|
906
|
+
mem_norm = float(np.linalg.norm(mem_vec))
|
|
907
|
+
cor_norm = float(np.linalg.norm(cor_vec))
|
|
908
|
+
similarity = 0.0
|
|
909
|
+
if mem_norm > 0 and cor_norm > 0:
|
|
910
|
+
similarity = float(np.dot(mem_vec, cor_vec) / (mem_norm * cor_norm))
|
|
911
|
+
|
|
912
|
+
# Create scar for this retrieval pattern
|
|
913
|
+
await self._scars.create_scar(
|
|
914
|
+
reason=f"Correction: {correction[:100]}",
|
|
915
|
+
subtype="correction",
|
|
916
|
+
query_embedding=mem.embedding,
|
|
917
|
+
memory_ids=[memory_id],
|
|
918
|
+
)
|
|
919
|
+
|
|
920
|
+
if similarity > 0.5:
|
|
921
|
+
# Partial correction — regenerate zoom levels
|
|
922
|
+
combined_text = f"{mem.l4_raw}\n\nCorrection: {correction}"
|
|
923
|
+
zoom = await self._zoom.generate(combined_text, mem.domain)
|
|
924
|
+
updated = Memory(
|
|
925
|
+
id=mem.id,
|
|
926
|
+
domain=mem.domain,
|
|
927
|
+
created_at=mem.created_at,
|
|
928
|
+
updated_at=datetime.utcnow(),
|
|
929
|
+
context=mem.context,
|
|
930
|
+
l0_tag=zoom.l0_tag,
|
|
931
|
+
l1_label=zoom.l1_label,
|
|
932
|
+
l2_summary=zoom.l2_summary,
|
|
933
|
+
l3_full=zoom.l3_full,
|
|
934
|
+
l4_raw=combined_text,
|
|
935
|
+
embedding=correction_embedding,
|
|
936
|
+
embedding_model=getattr(self._embeddings, "model_name", "unknown"),
|
|
937
|
+
intensity=min(1.0, mem.intensity + 0.1),
|
|
938
|
+
retrieval_count=mem.retrieval_count,
|
|
939
|
+
miss_count=mem.miss_count,
|
|
940
|
+
last_accessed=mem.last_accessed,
|
|
941
|
+
lifecycle=mem.lifecycle,
|
|
942
|
+
chronic_score=mem.chronic_score,
|
|
943
|
+
contrast=mem.contrast,
|
|
944
|
+
chain_next=mem.chain_next,
|
|
945
|
+
chain_prev=mem.chain_prev,
|
|
946
|
+
vitality=mem.vitality,
|
|
947
|
+
reconsolidation_count=mem.reconsolidation_count + 1,
|
|
948
|
+
origin=mem.origin,
|
|
949
|
+
trust_score=mem.trust_score,
|
|
950
|
+
)
|
|
951
|
+
await self._storage.update_memory(updated)
|
|
952
|
+
logger.info("Partially corrected memory %s", memory_id)
|
|
953
|
+
return updated
|
|
954
|
+
else:
|
|
955
|
+
# Full contradiction — create replacement, link via chain
|
|
956
|
+
zoom = await self._zoom.generate(correction, mem.domain)
|
|
957
|
+
now = datetime.utcnow()
|
|
958
|
+
new_memory = Memory(
|
|
959
|
+
id=uuid4(),
|
|
960
|
+
domain=mem.domain,
|
|
961
|
+
created_at=now,
|
|
962
|
+
updated_at=now,
|
|
963
|
+
context=mem.context,
|
|
964
|
+
l0_tag=zoom.l0_tag,
|
|
965
|
+
l1_label=zoom.l1_label,
|
|
966
|
+
l2_summary=zoom.l2_summary,
|
|
967
|
+
l3_full=zoom.l3_full,
|
|
968
|
+
l4_raw=correction,
|
|
969
|
+
embedding=correction_embedding,
|
|
970
|
+
embedding_model=getattr(self._embeddings, "model_name", "unknown"),
|
|
971
|
+
intensity=0.9,
|
|
972
|
+
chain_prev=memory_id,
|
|
973
|
+
)
|
|
974
|
+
await self._storage.store_memory(new_memory)
|
|
975
|
+
# Link old memory forward to new one
|
|
976
|
+
updated_old = Memory(
|
|
977
|
+
**{
|
|
978
|
+
**mem.__dict__,
|
|
979
|
+
"chain_next": new_memory.id,
|
|
980
|
+
"updated_at": now,
|
|
981
|
+
}
|
|
982
|
+
)
|
|
983
|
+
await self._storage.update_memory(updated_old)
|
|
984
|
+
logger.info(
|
|
985
|
+
"Full correction: created replacement memory %s for %s",
|
|
986
|
+
new_memory.id, memory_id,
|
|
987
|
+
)
|
|
988
|
+
if self._session.current_session:
|
|
989
|
+
await self._session.record_store(new_memory.id)
|
|
990
|
+
return new_memory
|
|
991
|
+
|
|
992
|
+
async def confirm(self, memory_id: UUID) -> None:
|
|
993
|
+
"""Explicitly confirm a memory is correct.
|
|
994
|
+
|
|
995
|
+
Increments retrieval count, emits safety signal, and strengthens
|
|
996
|
+
stigmergic trails on associated memories.
|
|
997
|
+
|
|
998
|
+
Args:
|
|
999
|
+
memory_id: UUID of the memory to confirm.
|
|
1000
|
+
"""
|
|
1001
|
+
mem = await self._storage.get_memory(memory_id)
|
|
1002
|
+
if mem is None:
|
|
1003
|
+
logger.warning("confirm(): memory %s not found", memory_id)
|
|
1004
|
+
return
|
|
1005
|
+
await self._lifecycle.on_memory_retrieved(mem)
|
|
1006
|
+
# Strengthen trails to neighbors
|
|
1007
|
+
edges = await self._associations.get_edges(memory_id)
|
|
1008
|
+
if edges:
|
|
1009
|
+
neighbor_ids = [nid for nid, _ in edges]
|
|
1010
|
+
dummy_embedding = mem.embedding
|
|
1011
|
+
await self._associations.strengthen_trail(
|
|
1012
|
+
[memory_id] + neighbor_ids[:3], dummy_embedding
|
|
1013
|
+
)
|
|
1014
|
+
logger.info("Confirmed memory %s", memory_id)
|
|
1015
|
+
|
|
1016
|
+
# -------------------------------------------------------------------
|
|
1017
|
+
# Domain & task management
|
|
1018
|
+
# -------------------------------------------------------------------
|
|
1019
|
+
|
|
1020
|
+
async def list_domains(self) -> list[DomainInfo]:
|
|
1021
|
+
"""List all domains with memory counts and last activity timestamps."""
|
|
1022
|
+
raw = await self._storage.list_domains()
|
|
1023
|
+
return [
|
|
1024
|
+
DomainInfo(
|
|
1025
|
+
name=name,
|
|
1026
|
+
memory_count=count,
|
|
1027
|
+
last_active=datetime.fromisoformat(last_updated),
|
|
1028
|
+
)
|
|
1029
|
+
for name, count, last_updated in raw
|
|
1030
|
+
]
|
|
1031
|
+
|
|
1032
|
+
async def add_task(self, description: str, priority: int = 0) -> Task:
|
|
1033
|
+
"""Create and persist a new task. Returns the created Task."""
|
|
1034
|
+
task = Task(
|
|
1035
|
+
id=uuid4(),
|
|
1036
|
+
description=description,
|
|
1037
|
+
created_at=datetime.utcnow(),
|
|
1038
|
+
priority=priority,
|
|
1039
|
+
)
|
|
1040
|
+
await self._storage.store_task(task)
|
|
1041
|
+
return task
|
|
1042
|
+
|
|
1043
|
+
async def complete_task(self, task_id: UUID) -> None:
|
|
1044
|
+
"""Mark a task as completed by ID."""
|
|
1045
|
+
task = await self._storage.get_task(task_id)
|
|
1046
|
+
if task:
|
|
1047
|
+
task.status = TaskStatus.COMPLETED
|
|
1048
|
+
await self._storage.update_task(task)
|
|
1049
|
+
|
|
1050
|
+
async def list_tasks(
|
|
1051
|
+
self, status: TaskStatus | None = None
|
|
1052
|
+
) -> list[Task]:
|
|
1053
|
+
"""List tasks, optionally filtered by status."""
|
|
1054
|
+
return await self._storage.list_tasks(status)
|
|
1055
|
+
|
|
1056
|
+
# -------------------------------------------------------------------
|
|
1057
|
+
# Stats
|
|
1058
|
+
# -------------------------------------------------------------------
|
|
1059
|
+
|
|
1060
|
+
async def get_stats(self) -> dict:
|
|
1061
|
+
"""Aggregate statistics from all subsystems into a single dict."""
|
|
1062
|
+
total_memories = await self._storage.count_memories()
|
|
1063
|
+
total_sessions = await self._storage.get_session_count()
|
|
1064
|
+
domains_raw = await self._storage.list_domains()
|
|
1065
|
+
|
|
1066
|
+
# Per-domain breakdown with lifecycle distribution
|
|
1067
|
+
breakdown = await self._storage.get_domain_lifecycle_breakdown()
|
|
1068
|
+
domain_map: dict[str, dict[str, int]] = {}
|
|
1069
|
+
for domain, lifecycle, count in breakdown:
|
|
1070
|
+
domain_map.setdefault(domain, {})[lifecycle] = count
|
|
1071
|
+
domains_info = [
|
|
1072
|
+
{
|
|
1073
|
+
"name": name,
|
|
1074
|
+
"memory_count": cnt,
|
|
1075
|
+
"last_active": last_updated,
|
|
1076
|
+
"lifecycle": domain_map.get(name, {}),
|
|
1077
|
+
}
|
|
1078
|
+
for name, cnt, last_updated in domains_raw
|
|
1079
|
+
]
|
|
1080
|
+
|
|
1081
|
+
# Current session info
|
|
1082
|
+
current_session = None
|
|
1083
|
+
session = self._session.current_session
|
|
1084
|
+
if session:
|
|
1085
|
+
now = datetime.utcnow()
|
|
1086
|
+
duration = (now - session.started_at).total_seconds()
|
|
1087
|
+
current_session = {
|
|
1088
|
+
"session_id": str(session.id),
|
|
1089
|
+
"domain": session.domain,
|
|
1090
|
+
"started_at": session.started_at.isoformat(),
|
|
1091
|
+
"duration_seconds": duration,
|
|
1092
|
+
"stores": len(self._session._session_memories),
|
|
1093
|
+
"retrievals": len(self._session._session_retrievals),
|
|
1094
|
+
}
|
|
1095
|
+
|
|
1096
|
+
# Recent stores
|
|
1097
|
+
recent_memories = await self._storage.get_recent_memories(limit=10)
|
|
1098
|
+
recent_stores = [
|
|
1099
|
+
{
|
|
1100
|
+
"id": str(m.id),
|
|
1101
|
+
"domain": m.domain,
|
|
1102
|
+
"created_at": m.created_at.isoformat(),
|
|
1103
|
+
"l1_label": m.l1_label,
|
|
1104
|
+
"lifecycle": m.lifecycle.value,
|
|
1105
|
+
}
|
|
1106
|
+
for m in recent_memories
|
|
1107
|
+
]
|
|
1108
|
+
|
|
1109
|
+
# Recent retrievals from current session
|
|
1110
|
+
recent_retrievals = []
|
|
1111
|
+
if session:
|
|
1112
|
+
for r in self._session._session_retrievals[-10:]:
|
|
1113
|
+
recent_retrievals.append({
|
|
1114
|
+
"query": r["query"],
|
|
1115
|
+
"l3_count": r["l3_count"],
|
|
1116
|
+
"l2_count": r["l2_count"],
|
|
1117
|
+
"l1_count": r["l1_count"],
|
|
1118
|
+
})
|
|
1119
|
+
|
|
1120
|
+
# Lifecycle counts
|
|
1121
|
+
lifecycle_counts = await self._storage.get_lifecycle_counts()
|
|
1122
|
+
|
|
1123
|
+
# Buffer state
|
|
1124
|
+
buffer_state = {
|
|
1125
|
+
"pinned": len(self._buffer._pinned),
|
|
1126
|
+
"auto_promoted": len(self._buffer._auto_promoted),
|
|
1127
|
+
"just_written": len(self._buffer._just_written),
|
|
1128
|
+
"recent": len(self._buffer._recent),
|
|
1129
|
+
}
|
|
1130
|
+
|
|
1131
|
+
# Phase 2: association + scar stats
|
|
1132
|
+
assoc_stats = await self._associations.get_stats()
|
|
1133
|
+
active_scars = await self._scars.get_active_scars()
|
|
1134
|
+
|
|
1135
|
+
return {
|
|
1136
|
+
"overall": {
|
|
1137
|
+
"total_memories": total_memories,
|
|
1138
|
+
"total_sessions": total_sessions,
|
|
1139
|
+
"total_domains": len(domains_raw),
|
|
1140
|
+
},
|
|
1141
|
+
"domains": domains_info,
|
|
1142
|
+
"current_session": current_session,
|
|
1143
|
+
"recent_stores": recent_stores,
|
|
1144
|
+
"recent_retrievals": recent_retrievals,
|
|
1145
|
+
"lifecycle": lifecycle_counts,
|
|
1146
|
+
"buffer": buffer_state,
|
|
1147
|
+
"associations": assoc_stats,
|
|
1148
|
+
"scars": {"active_count": len(active_scars)},
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
|
|
1152
|
+
# -------------------------------------------------------------------
|
|
1153
|
+
# Phase 3: Public API additions
|
|
1154
|
+
# -------------------------------------------------------------------
|
|
1155
|
+
|
|
1156
|
+
async def health(self) -> "HealthReport":
|
|
1157
|
+
"""Return the full health assessment (P3→P4 contract).
|
|
1158
|
+
|
|
1159
|
+
Delegates to :class:`SelfModel.health`.
|
|
1160
|
+
"""
|
|
1161
|
+
return await self._self_model.health()
|
|
1162
|
+
|
|
1163
|
+
async def explain_last(self) -> "ExplanationReport":
|
|
1164
|
+
"""Explain the last retrieval decision (P3→P4 contract).
|
|
1165
|
+
|
|
1166
|
+
Delegates to :class:`SelfModel.explain_last`.
|
|
1167
|
+
"""
|
|
1168
|
+
return await self._self_model.explain_last()
|
|
1169
|
+
|
|
1170
|
+
|
|
1171
|
+
def create_memory(config: FractalConfig | None = None) -> FractalMemory:
|
|
1172
|
+
"""Sync factory that initializes FractalMemory."""
|
|
1173
|
+
fm = FractalMemory(config)
|
|
1174
|
+
asyncio.run(fm.initialize())
|
|
1175
|
+
return fm
|