@psiclawops/hypermem 0.8.2 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3127 @@
1
+ /**
2
+ * hypermem Context Engine Plugin
3
+ *
4
+ * Implements OpenClaw's ContextEngine interface backed by hypermem's
5
+ * four-layer memory architecture:
6
+ *
7
+ * L1 Cache — SQLite `:memory:` hot session working memory
8
+ * L2 Messages — per-agent conversation history (SQLite)
9
+ * L3 Vectors — semantic + keyword search (KNN + FTS5)
10
+ * L4 Library — facts, knowledge, episodes, preferences
11
+ *
12
+ * Lifecycle mapping:
13
+ * ingest() → record each message into messages.db
14
+ * assemble() → compositor builds context from all four layers
15
+ * compact() → delegate to runtime (ownsCompaction: false)
16
+ * afterTurn() → trigger background indexer (fire-and-forget)
17
+ * bootstrap() → warm hot-cache session, register agent in fleet
18
+ * dispose() → close hypermem connections
19
+ *
20
+ * Session key format expected: "agent:<agentId>:<channel>:<name>"
21
+ */
22
+ import { definePluginEntry } from 'openclaw/plugin-sdk/plugin-entry';
23
+ import { buildPluginConfigSchema } from 'openclaw/plugin-sdk/core';
24
+ import { z } from 'zod';
25
+ import { detectTopicShift, stripMessageMetadata, SessionTopicMap, applyToolGradientToWindow, OPENCLAW_BOOTSTRAP_FILES, rotateSessionContext, TRIM_SOFT_TARGET, TRIM_GROWTH_THRESHOLD, TRIM_HEADROOM_FRACTION, resolveTrimBudgets, formatToolChainStub, decideReplayRecovery, isReplayState, } from '@psiclawops/hypermem';
26
+ import { evictStaleContent } from '@psiclawops/hypermem/image-eviction';
27
+ import { repairToolPairs } from '@psiclawops/hypermem';
28
+ import os from 'os';
29
+ import path from 'path';
30
+ import fs from 'fs/promises';
31
+ import { fileURLToPath } from 'url';
32
+ import fsSync from 'fs';
33
+ let _telemetryStream = null;
34
+ let _telemetryStreamFailed = false;
35
+ let _telemetryTurnCounter = 0;
36
+ function telemetryEnabled() {
37
+ return process.env.HYPERMEM_TELEMETRY === '1';
38
+ }
39
+ function getTelemetryStream() {
40
+ if (_telemetryStream || _telemetryStreamFailed)
41
+ return _telemetryStream;
42
+ try {
43
+ const p = process.env.HYPERMEM_TELEMETRY_PATH || './hypermem-telemetry.jsonl';
44
+ _telemetryStream = fsSync.createWriteStream(p, { flags: 'a' });
45
+ _telemetryStream.on('error', () => {
46
+ _telemetryStreamFailed = true;
47
+ _telemetryStream = null;
48
+ });
49
+ }
50
+ catch {
51
+ _telemetryStreamFailed = true;
52
+ _telemetryStream = null;
53
+ }
54
+ return _telemetryStream;
55
+ }
56
+ function trimTelemetry(fields) {
57
+ if (!telemetryEnabled())
58
+ return;
59
+ const stream = getTelemetryStream();
60
+ if (!stream)
61
+ return;
62
+ try {
63
+ const record = {
64
+ event: 'trim',
65
+ ts: new Date().toISOString(),
66
+ ...fields,
67
+ };
68
+ stream.write(JSON.stringify(record) + '\n');
69
+ }
70
+ catch {
71
+ // Telemetry must never throw
72
+ }
73
+ }
74
+ function assembleTrace(fields) {
75
+ if (!telemetryEnabled())
76
+ return;
77
+ const stream = getTelemetryStream();
78
+ if (!stream)
79
+ return;
80
+ try {
81
+ const record = {
82
+ event: 'assemble',
83
+ ts: new Date().toISOString(),
84
+ ...fields,
85
+ };
86
+ stream.write(JSON.stringify(record) + '\n');
87
+ }
88
+ catch {
89
+ // Telemetry must never throw
90
+ }
91
+ }
92
+ function degradationTelemetry(fields) {
93
+ if (!telemetryEnabled())
94
+ return;
95
+ const stream = getTelemetryStream();
96
+ if (!stream)
97
+ return;
98
+ try {
99
+ const record = {
100
+ event: 'degradation',
101
+ ts: new Date().toISOString(),
102
+ ...fields,
103
+ };
104
+ stream.write(JSON.stringify(record) + '\n');
105
+ }
106
+ catch {
107
+ // Telemetry must never throw
108
+ }
109
+ }
110
+ function nextTurnId() {
111
+ _telemetryTurnCounter = (_telemetryTurnCounter + 1) >>> 0;
112
+ return `${Date.now().toString(36)}-${_telemetryTurnCounter.toString(36)}`;
113
+ }
114
+ // ─── Trim Ownership (Phase A Sprint 2) ───────────────────────────
115
+ //
116
+ // Sprint 2 consolidates trim ownership: the assemble-owned family
117
+ // (assemble.normal, assemble.subagent, assemble.toolLoop) is the single
118
+ // steady-state trim owner. Compact paths (compact.nuclear, compact.history,
119
+ // compact.history2) are exempted — they're exception-only. warmstart,
120
+ // reshape, and afterTurn.secondary are demoted in sub-tasks 2.2 and 2.3.
121
+ //
122
+ // This block adds:
123
+ // 1. A per-session turn context (beginTrimOwnerTurn/endTrimOwnerTurn) scoped
124
+ // by the main assemble() flow.
125
+ // 2. A single shared trimOwner claim helper that lets exactly one **real**
126
+ // steady-state trim claim ownership per turn and throws loudly in
127
+ // development (NODE_ENV='development') when a second real steady-state
128
+ // trim path attempts to claim the same turn.
129
+ // 3. A non-counting guard/noop telemetry helper (same JSONL channel) that
130
+ // demoted paths can emit to preserve visibility of warm-start/reshape
131
+ // without consuming a steady-state owner slot.
132
+ //
133
+ // Sub-task 2.1 only adds the scaffolding + invariant; no existing trim call
134
+ // is removed here. Demotions of warm-start/reshape/afterTurn.secondary land
135
+ // in 2.2 and 2.3.
136
+ const STEADY_STATE_TRIM_PATHS = new Set([
137
+ 'assemble.normal',
138
+ 'assemble.subagent',
139
+ 'assemble.toolLoop',
140
+ ]);
141
+ const COMPACT_TRIM_PATHS = new Set([
142
+ 'compact.nuclear',
143
+ 'compact.history',
144
+ 'compact.history2',
145
+ ]);
146
+ // ─── Guard-telemetry reason enum (Phase A Sprint 2.2a) ──────────────────
147
+ // Plugin-local, constant-backed union of allowed `reason` values on
148
+ // `event: 'trim-guard'` records. Keeping this bounded prevents ad-hoc
149
+ // numeric/user strings from leaking into the telemetry JSONL channel and
150
+ // makes downstream reporting stable. Do NOT widen this to arbitrary
151
+ // strings — add a new member here first, then reference it at call sites.
152
+ //
153
+ // Scope note: this union is plugin-local (per planner 2.2 §C). It is not
154
+ // re-exported via `src/types.ts` because the shared public types surface
155
+ // must not gain a telemetry-reason enum as part of this sprint.
156
+ const GUARD_TELEMETRY_REASONS = [
157
+ 'warmstart-pressure-demoted',
158
+ 'reshape-downshift-demoted',
159
+ 'duplicate-claim-suppressed',
160
+ 'afterturn-secondary-demoted',
161
+ 'window-within-budget-skip',
162
+ 'pressure-accounting-anomaly',
163
+ ];
164
+ // Turn-scoped ownership map (Phase A Sprint 2.2a).
165
+ //
166
+ // Previously keyed by `sessionKey` alone, which clobbered overlapping same-
167
+ // session assemble() flows (Sprint 2.1 security eval, medium finding #1).
168
+ // Now keyed by the composite `sessionKey|turnId` so two concurrent turns on
169
+ // the same session key remain isolated: each `beginTrimOwnerTurn` gets its
170
+ // own slot, `claimTrimOwner` checks the exact turn's slot, and
171
+ // `endTrimOwnerTurn` removes only that turn's slot.
172
+ const _trimOwnerTurns = new Map();
173
+ function _trimOwnerKey(sessionKey, turnId) {
174
+ return `${sessionKey}|${turnId}`;
175
+ }
176
+ function beginTrimOwnerTurn(sessionKey, turnId) {
177
+ _trimOwnerTurns.set(_trimOwnerKey(sessionKey, turnId), { turnId });
178
+ }
179
+ function endTrimOwnerTurn(sessionKey, turnId) {
180
+ _trimOwnerTurns.delete(_trimOwnerKey(sessionKey, turnId));
181
+ }
182
+ /**
183
+ * Claim the steady-state trim owner slot for the current turn.
184
+ *
185
+ * Behavior:
186
+ * - compact.* paths are exception-only and pass through without claiming.
187
+ * - Non-steady paths (warmstart, reshape, afterTurn.secondary) also pass
188
+ * through without claiming. Demoted/no-op sites should normally emit
189
+ * via guardTelemetry() instead so they stay visible without contending
190
+ * for ownership (sub-tasks 2.2 and 2.3 wire this in).
191
+ * - Steady-state paths (assemble.normal, assemble.subagent,
192
+ * assemble.toolLoop) claim the single owner slot for the current turn.
193
+ * The first such claim succeeds. A second steady-state claim against the
194
+ * same turn is a duplicate-turn violation: it throws loudly under
195
+ * NODE_ENV='development' and warns in other environments (returning
196
+ * false so non-dev runtimes keep working).
197
+ *
198
+ * Callers should invoke this immediately before the real
199
+ * trimHistoryToTokenBudget() call. Guard telemetry does NOT route through
200
+ * this helper — it is explicitly excluded from the steady-state invariant.
201
+ *
202
+ * Returns true when the claim succeeds (or is exempt); false on a swallowed
203
+ * duplicate claim in non-development. In development the duplicate throws
204
+ * before returning.
205
+ */
206
+ function claimTrimOwner(sessionKey, turnId, path) {
207
+ // Compact paths: exempt — they represent an exceptional pressure path and
208
+ // never contend for the steady-state slot.
209
+ if (COMPACT_TRIM_PATHS.has(path))
210
+ return true;
211
+ // Non-steady paths: pass through (warmstart/reshape/afterTurn.secondary).
212
+ // Warmstart + reshape are demoted to guardTelemetry in 2.2a.
213
+ if (!STEADY_STATE_TRIM_PATHS.has(path))
214
+ return true;
215
+ const ctx = _trimOwnerTurns.get(_trimOwnerKey(sessionKey, turnId));
216
+ if (!ctx)
217
+ return true; // No active assemble-turn scope — nothing to enforce here.
218
+ if (ctx.claimedPath) {
219
+ const msg = `[hypermem-plugin] trimOwner: duplicate steady-state trim claim in turn ` +
220
+ `${ctx.turnId} (sessionKey=${sessionKey}): first=${ctx.claimedPath} second=${path}`;
221
+ if (process.env.NODE_ENV === 'development') {
222
+ throw new Error(msg);
223
+ }
224
+ // Non-development: do not throw, but leave a loud trail so telemetry
225
+ // surfaces the violation. Callers MUST honor the false return and skip
226
+ // the second real trim (Sprint 2.2a enforcement).
227
+ console.warn(msg);
228
+ return false;
229
+ }
230
+ ctx.claimedPath = path;
231
+ return true;
232
+ }
233
+ /**
234
+ * Non-counting guard / noop telemetry.
235
+ *
236
+ * Emits a `trim-guard` record on the same JSONL channel as trimTelemetry()
237
+ * but with a distinct event name so per-turn reporting (scripts/trim-report.mjs,
238
+ * future ownership dashboards) can keep it out of `trimCount`. Used by
239
+ * demoted/no-op call sites in 2.2 and 2.3 so their path labels stay visible
240
+ * in telemetry without consuming a steady-state owner slot.
241
+ *
242
+ * Zero-cost when telemetry is off. Never throws.
243
+ */
244
+ function guardTelemetry(fields) {
245
+ if (!telemetryEnabled())
246
+ return;
247
+ const stream = getTelemetryStream();
248
+ if (!stream)
249
+ return;
250
+ try {
251
+ const record = {
252
+ event: 'trim-guard',
253
+ ts: new Date().toISOString(),
254
+ ...fields,
255
+ };
256
+ stream.write(JSON.stringify(record) + '\n');
257
+ }
258
+ catch {
259
+ // Telemetry must never throw
260
+ }
261
+ }
262
+ // ─── B3: Batch trim with growth allowance ────────────────────────────────
263
+ // Trim fires only when window usage exceeds the soft target by this fraction.
264
+ // Small natural growth (e.g. a short assistant reply) never triggers a trim;
265
+ // only genuine spikes (model switch, cold-start, multi-tool overrun) do.
266
+ // When trim fires, the target is (softTarget * (1 - headroomFraction)) so the
267
+ // window has room to grow for several turns before the next trim fires.
268
+ //
269
+ // softTarget (0.65): matches refreshRedisGradient → steady state never trims
270
+ // growthThreshold (0.05): 5% overage buffer before trim fires
271
+ // headroomFraction (0.10): trim target = softTarget * 0.90 → ~58.5% of budget
272
+ // Canonical values live in the core package so plugin trim guards and compose
273
+ // paths cannot drift.
274
+ // Test-only: expose emitters so the unit test can exercise them directly
275
+ // without standing up a real session. Wrapped in a getter object so the flag
276
+ // guard still runs (zero-cost when off).
277
+ export const __telemetryForTests = {
278
+ trimTelemetry,
279
+ assembleTrace,
280
+ degradationTelemetry,
281
+ guardTelemetry,
282
+ nextTurnId,
283
+ beginTrimOwnerTurn,
284
+ endTrimOwnerTurn,
285
+ claimTrimOwner,
286
+ // B3/C0.1: Expose the canonical policy surface so tests can assert against
287
+ // the shared source of truth instead of embedding formulas locally.
288
+ TRIM_SOFT_TARGET,
289
+ TRIM_GROWTH_THRESHOLD,
290
+ TRIM_HEADROOM_FRACTION,
291
+ resolveTrimBudgets,
292
+ reset() {
293
+ if (_telemetryStream) {
294
+ try {
295
+ _telemetryStream.end();
296
+ }
297
+ catch { /* ignore */ }
298
+ }
299
+ _telemetryStream = null;
300
+ _telemetryStreamFailed = false;
301
+ _telemetryTurnCounter = 0;
302
+ _trimOwnerTurns.clear();
303
+ },
304
+ };
305
+ // ─── hypermem singleton ────────────────────────────────────────
306
+ // Runtime load is dynamic (hypermem is a sibling package loaded from repo dist,
307
+ // not installed via npm). Types come from the core package devDependency.
308
+ // This pattern keeps the runtime path stable while TypeScript resolves types
309
+ // from the canonical source — no more local shim drift.
310
+ // Resolved at init time: pluginConfig.hyperMemPath > import.meta.resolve('@psiclawops/hypermem') > dev fallback
311
+ let HYPERMEM_PATH = '';
312
+ let _hm = null;
313
+ let _hmInitPromise = null;
314
+ let _indexer = null;
315
+ let _fleetStore = null;
316
+ let _generateEmbeddings = null;
317
+ let _embeddingConfig = null;
318
+ // P1.7: TaskFlow runtime reference — bound at registration time, best-effort.
319
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
320
+ let _taskFlowRuntime = null;
321
+ // ─── Eviction config cache ────────────────────────────────────
322
+ // Populated from user config during hypermem init. Stored here so
323
+ // assemble() (which can't await loadUserConfig) can read it without
324
+ // re-reading disk on every turn.
325
+ let _evictionConfig;
326
+ // ─── Context window reserve cache ────────────────────────────
327
+ // Populated from user config during hypermem init. Ensures hypermem leaves
328
+ // a guaranteed headroom fraction for system prompts, tool results, and
329
+ // incoming data — preventing the trim tiers from firing too close to the edge.
330
+ //
331
+ // contextWindowSize: full model context window in tokens (default: 128_000)
332
+ // contextWindowReserve: fraction [0.0–0.5] to keep free (default: 0.25)
333
+ //
334
+ // Effective history budget = (windowSize * (1 - reserve)) - overheadFallback
335
+ // e.g. 128k * 0.75 - 28k = 68k for council agents at 25% reserve
336
+ let _contextWindowSize = 128_000;
337
+ let _contextWindowReserve = 0.25;
338
+ let _deferToolPruning = false;
339
+ let _verboseLogging = false;
340
+ let _contextWindowOverrides = {};
341
+ const _budgetFallbackWarnings = new Set();
342
+ export const CONTEXT_WINDOW_OVERRIDE_KEY_REGEX = /^[^/\s]+\/[^/\s]+$/;
343
+ const contextWindowOverrideSchema = z.object({
344
+ contextTokens: z.number().int().positive().optional(),
345
+ contextWindow: z.number().int().positive().optional(),
346
+ }).superRefine((value, ctx) => {
347
+ if (value.contextTokens == null && value.contextWindow == null) {
348
+ ctx.addIssue({
349
+ code: z.ZodIssueCode.custom,
350
+ message: 'override must declare contextTokens, contextWindow, or both',
351
+ });
352
+ }
353
+ if (value.contextTokens != null &&
354
+ value.contextWindow != null &&
355
+ value.contextTokens > value.contextWindow) {
356
+ ctx.addIssue({
357
+ code: z.ZodIssueCode.custom,
358
+ message: 'contextTokens must be less than or equal to contextWindow',
359
+ });
360
+ }
361
+ });
362
+ export function sanitizeContextWindowOverrides(raw) {
363
+ if (!raw || typeof raw !== 'object' || Array.isArray(raw)) {
364
+ return { value: {}, warnings: [] };
365
+ }
366
+ const value = {};
367
+ const warnings = [];
368
+ for (const [key, candidate] of Object.entries(raw)) {
369
+ const normalizedKey = key.trim().toLowerCase();
370
+ if (!CONTEXT_WINDOW_OVERRIDE_KEY_REGEX.test(normalizedKey)) {
371
+ warnings.push(`ignoring contextWindowOverrides[${JSON.stringify(key)}]: key must be "provider/model"`);
372
+ continue;
373
+ }
374
+ const parsed = contextWindowOverrideSchema.safeParse(candidate);
375
+ if (!parsed.success) {
376
+ warnings.push(`ignoring contextWindowOverrides[${JSON.stringify(key)}]: ` +
377
+ parsed.error.issues.map(issue => issue.message).join('; '));
378
+ continue;
379
+ }
380
+ value[normalizedKey] = parsed.data;
381
+ }
382
+ return { value, warnings };
383
+ }
384
+ export function resolveEffectiveBudget(args) {
385
+ const { tokenBudget, model, contextWindowSize, contextWindowReserve } = args;
386
+ if (tokenBudget) {
387
+ return { budget: tokenBudget, source: 'runtime tokenBudget' };
388
+ }
389
+ const key = normalizeModelKey(model);
390
+ const override = key ? args.contextWindowOverrides?.[key] : undefined;
391
+ const configuredWindow = override?.contextTokens ?? override?.contextWindow;
392
+ if (configuredWindow) {
393
+ return {
394
+ budget: Math.floor(configuredWindow * (1 - contextWindowReserve)),
395
+ source: `contextWindowOverrides[${key}]`,
396
+ };
397
+ }
398
+ return {
399
+ budget: Math.floor(contextWindowSize * (1 - contextWindowReserve)),
400
+ source: 'fallback contextWindowSize',
401
+ };
402
+ }
403
+ function normalizeModelKey(model) {
404
+ if (!model)
405
+ return null;
406
+ const key = model.trim().toLowerCase();
407
+ return key.length > 0 ? key : null;
408
+ }
409
+ function verboseLog(message) {
410
+ if (_verboseLogging)
411
+ console.log(message);
412
+ }
413
+ function resolveConfiguredWindow(model) {
414
+ const key = normalizeModelKey(model);
415
+ if (!key)
416
+ return null;
417
+ const override = _contextWindowOverrides[key];
418
+ if (!override)
419
+ return null;
420
+ return override.contextTokens ?? override.contextWindow ?? null;
421
+ }
422
+ // Subagent warming mode: 'full' | 'light' | 'off'. Default: 'light'.
423
+ // Controls how much HyperMem context is injected into subagent sessions.
424
+ let _subagentWarming = 'light';
425
+ // Cache replay threshold: 15min default. Set to 0 in user config to disable.
426
+ let _cacheReplayThresholdMs = 900_000;
427
+ // ─── System overhead cache ────────────────────────────────────
428
+ // Caches the non-history token cost (contextBlock + runtime system prompt)
429
+ // from the last full compose per session key. Used in tool-loop turns to
430
+ // return an honest estimatedTokens without re-running the full compose
431
+ // pipeline. Map key = resolved session key.
432
+ const _overheadCache = new Map();
433
+ // Tier-aware conservative fallback when no cached value exists (cold session,
434
+ // first turn after restart). Over-estimates are safer than under-estimates:
435
+ // a false-positive compact is cheaper than letting context blow past budget.
436
+ const OVERHEAD_FALLBACK = {
437
+ council: 28_000,
438
+ director: 28_000,
439
+ specialist: 18_000,
440
+ };
441
+ const OVERHEAD_FALLBACK_DEFAULT = 15_000;
442
+ function getOverheadFallback(tier) {
443
+ if (!tier)
444
+ return OVERHEAD_FALLBACK_DEFAULT;
445
+ return OVERHEAD_FALLBACK[tier] ?? OVERHEAD_FALLBACK_DEFAULT;
446
+ }
447
+ /**
448
+ * Compute the effective history budget for trim and compact operations.
449
+ *
450
+ * Priority:
451
+ * 1. tokenBudget passed by the runtime (most precise)
452
+ * 2. Derived from context window config: windowSize * (1 - reserve)
453
+ *
454
+ * The reserve fraction (default 0.25 = 25%) guarantees headroom for:
455
+ * - System prompt + identity blocks (~28k for council agents)
456
+ * - Incoming tool results (can be 10–30k in parallel web_search bursts)
457
+ * - Response generation buffer (~4k)
458
+ *
459
+ * Without the reserve, trim tiers fire at 75–85% of tokenBudget but
460
+ * total context (history + system) exceeds the model window before trim
461
+ * completes, causing result stripping.
462
+ */
463
+ function computeEffectiveBudget(tokenBudget, model) {
464
+ const resolved = resolveEffectiveBudget({
465
+ tokenBudget,
466
+ model,
467
+ contextWindowSize: _contextWindowSize,
468
+ contextWindowReserve: _contextWindowReserve,
469
+ contextWindowOverrides: _contextWindowOverrides,
470
+ });
471
+ if (resolved.source === 'runtime tokenBudget') {
472
+ verboseLog(`[hypermem-plugin] budget source: runtime tokenBudget=${tokenBudget}${model ? ` model=${model}` : ''}`);
473
+ return resolved.budget;
474
+ }
475
+ const configuredWindow = resolveConfiguredWindow(model);
476
+ if (configuredWindow) {
477
+ verboseLog(`[hypermem-plugin] budget source: contextWindowOverrides[${normalizeModelKey(model)}]=${configuredWindow}, ` +
478
+ `reserve=${_contextWindowReserve}, effective=${resolved.budget}`);
479
+ return resolved.budget;
480
+ }
481
+ verboseLog(`[hypermem-plugin] budget source: fallback contextWindowSize=${_contextWindowSize}, ` +
482
+ `reserve=${_contextWindowReserve}, effective=${resolved.budget}${model ? ` model=${model}` : ''}`);
483
+ const warningKey = normalizeModelKey(model) ?? '(unknown-model)';
484
+ if (!_budgetFallbackWarnings.has(warningKey)) {
485
+ _budgetFallbackWarnings.add(warningKey);
486
+ console.warn(`[hypermem-plugin] No runtime tokenBudget${model ? ` for model ${model}` : ''}; ` +
487
+ `falling back to contextWindowSize=${_contextWindowSize}. ` +
488
+ `Add contextWindowOverrides["provider/model"] to config.json or openclaw.json if detection is wrong.`);
489
+ }
490
+ return resolved.budget;
491
+ }
492
+ // ─── Plugin config cache ───────────────────────────────────────
493
+ // Populated from openclaw.json plugins.entries.hypercompositor.config
494
+ // during register(). loadUserConfig() merges this over config.json.
495
+ let _pluginConfig = {};
496
+ /**
497
+ * Load user config with priority: pluginConfig (openclaw.json) > config.json (legacy).
498
+ * pluginConfig values win; config.json provides fallback for keys not set in openclaw.json.
499
+ * This allows gradual migration from the shadow config.json to central config.
500
+ */
501
+ async function loadUserConfig() {
502
+ // Resolve data dir: pluginConfig > default
503
+ const dataDir = _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem');
504
+ const configPath = path.join(dataDir, 'config.json');
505
+ let fileConfig = {};
506
+ try {
507
+ const raw = await fs.readFile(configPath, 'utf-8');
508
+ fileConfig = JSON.parse(raw);
509
+ console.log(`[hypermem-plugin] Loaded legacy config from ${configPath}`);
510
+ }
511
+ catch (err) {
512
+ if (err.code !== 'ENOENT') {
513
+ console.warn(`[hypermem-plugin] Failed to parse config.json (using defaults):`, err.message);
514
+ }
515
+ }
516
+ // Merge: pluginConfig (openclaw.json) wins over fileConfig (legacy config.json).
517
+ // Top-level scalar keys from pluginConfig override fileConfig.
518
+ // Nested objects (compositor, eviction, embedding) are shallow-merged.
519
+ const merged = { ...fileConfig };
520
+ if (_pluginConfig.contextWindowSize != null)
521
+ merged.contextWindowSize = _pluginConfig.contextWindowSize;
522
+ if (_pluginConfig.contextWindowReserve != null)
523
+ merged.contextWindowReserve = _pluginConfig.contextWindowReserve;
524
+ if (_pluginConfig.deferToolPruning != null)
525
+ merged.deferToolPruning = _pluginConfig.deferToolPruning;
526
+ if (_pluginConfig.verboseLogging != null)
527
+ merged.verboseLogging = _pluginConfig.verboseLogging;
528
+ if (_pluginConfig.contextWindowOverrides != null)
529
+ merged.contextWindowOverrides = { ...merged.contextWindowOverrides, ..._pluginConfig.contextWindowOverrides };
530
+ if (_pluginConfig.warmCacheReplayThresholdMs != null)
531
+ merged.warmCacheReplayThresholdMs = _pluginConfig.warmCacheReplayThresholdMs;
532
+ if (_pluginConfig.subagentWarming != null)
533
+ merged.subagentWarming = _pluginConfig.subagentWarming;
534
+ if (_pluginConfig.compositor)
535
+ merged.compositor = { ...merged.compositor, ..._pluginConfig.compositor };
536
+ if (_pluginConfig.eviction)
537
+ merged.eviction = { ...merged.eviction, ..._pluginConfig.eviction };
538
+ if (_pluginConfig.embedding)
539
+ merged.embedding = { ...merged.embedding, ..._pluginConfig.embedding };
540
+ if (Object.keys(fileConfig).length > 0 && Object.keys(_pluginConfig).filter(k => k !== 'hyperMemPath' && k !== 'dataDir').length > 0) {
541
+ console.log('[hypermem-plugin] Note: migrating config.json keys to plugins.entries.hypercompositor.config in openclaw.json is recommended');
542
+ }
543
+ return merged;
544
+ }
545
+ async function getHyperMem() {
546
+ if (_hm)
547
+ return _hm;
548
+ if (_hmInitPromise)
549
+ return _hmInitPromise;
550
+ _hmInitPromise = (async () => {
551
+ // Dynamic import — hypermem is loaded from repo dist
552
+ const mod = await import(HYPERMEM_PATH);
553
+ const HyperMem = mod.HyperMem;
554
+ // Capture generateEmbeddings from the dynamic module for use in afterTurn().
555
+ // Bind it with the user's embedding config so the pre-compute path uses the
556
+ // same provider as the indexer (Ollama vs OpenAI).
557
+ if (typeof mod.generateEmbeddings === 'function') {
558
+ const rawGenerate = mod.generateEmbeddings;
559
+ _generateEmbeddings = (texts) => rawGenerate(texts, _embeddingConfig ?? undefined);
560
+ }
561
+ // Load optional user config — compositor tuning overrides
562
+ const userConfig = await loadUserConfig();
563
+ // Build embedding config from user config. Applied to both HyperMem core
564
+ // (VectorStore init) and the _generateEmbeddings closure above.
565
+ if (userConfig.embedding) {
566
+ const ue = userConfig.embedding;
567
+ // Provider-specific model/dimension/batch defaults
568
+ const providerDefaults = ue.provider === 'gemini'
569
+ ? { model: 'gemini-embedding-001', dimensions: 3072, batchSize: 100, timeout: 15000 }
570
+ : ue.provider === 'openai'
571
+ ? { model: 'text-embedding-3-small', dimensions: 1536, batchSize: 128, timeout: 10000 }
572
+ : { model: 'nomic-embed-text', dimensions: 768, batchSize: 32, timeout: 10000 };
573
+ _embeddingConfig = {
574
+ provider: ue.provider ?? 'ollama',
575
+ ollamaUrl: ue.ollamaUrl ?? 'http://localhost:11434',
576
+ openaiBaseUrl: ue.openaiBaseUrl ?? 'https://api.openai.com/v1',
577
+ openaiApiKey: ue.openaiApiKey,
578
+ geminiBaseUrl: ue.geminiBaseUrl,
579
+ geminiIndexTaskType: ue.geminiIndexTaskType,
580
+ geminiQueryTaskType: ue.geminiQueryTaskType,
581
+ model: ue.model ?? providerDefaults.model,
582
+ dimensions: ue.dimensions ?? providerDefaults.dimensions,
583
+ timeout: ue.timeout ?? providerDefaults.timeout,
584
+ batchSize: ue.batchSize ?? providerDefaults.batchSize,
585
+ };
586
+ console.log(`[hypermem-plugin] Embedding provider: ${_embeddingConfig.provider} ` +
587
+ `(model: ${_embeddingConfig.model}, ${_embeddingConfig.dimensions}d, batch: ${_embeddingConfig.batchSize})`);
588
+ }
589
+ // Cache eviction config at module scope so assemble() can read it
590
+ // synchronously without re-reading disk on every turn.
591
+ _evictionConfig = userConfig.eviction ?? {};
592
+ // Cache context window config so all three trim hotpaths use the same values.
593
+ if (typeof userConfig.contextWindowSize === 'number' && userConfig.contextWindowSize > 0) {
594
+ _contextWindowSize = userConfig.contextWindowSize;
595
+ }
596
+ if (typeof userConfig.contextWindowReserve === 'number' &&
597
+ userConfig.contextWindowReserve >= 0 && userConfig.contextWindowReserve <= 0.5) {
598
+ _contextWindowReserve = userConfig.contextWindowReserve;
599
+ }
600
+ _deferToolPruning = userConfig.deferToolPruning === true;
601
+ if (_deferToolPruning) {
602
+ console.log('[hypermem-plugin] deferToolPruning: true — tool gradient deferred to host contextPruning');
603
+ }
604
+ _verboseLogging = userConfig.verboseLogging === true;
605
+ const sanitizedOverrides = sanitizeContextWindowOverrides(userConfig.contextWindowOverrides);
606
+ _contextWindowOverrides = sanitizedOverrides.value;
607
+ for (const warning of sanitizedOverrides.warnings) {
608
+ console.warn(`[hypermem-plugin] ${warning}`);
609
+ }
610
+ const warmingVal = userConfig.subagentWarming;
611
+ if (warmingVal === 'full' || warmingVal === 'light' || warmingVal === 'off') {
612
+ _subagentWarming = warmingVal;
613
+ console.log(`[hypermem-plugin] subagentWarming: ${_subagentWarming}`);
614
+ }
615
+ if (typeof userConfig.warmCacheReplayThresholdMs === 'number') {
616
+ _cacheReplayThresholdMs = userConfig.warmCacheReplayThresholdMs;
617
+ }
618
+ const reservedTokens = Math.floor(_contextWindowSize * _contextWindowReserve);
619
+ console.log(`[hypermem-plugin] context window: ${_contextWindowSize} tokens, ` +
620
+ `${Math.round(_contextWindowReserve * 100)}% reserved (${reservedTokens} tokens), ` +
621
+ `effective history budget: ${_contextWindowSize - reservedTokens} tokens`);
622
+ verboseLog(`[hypermem-plugin] warmCacheReplayThresholdMs=${_cacheReplayThresholdMs}`);
623
+ verboseLog(`[hypermem-plugin] contextWindowOverrides keys=${Object.keys(_contextWindowOverrides).join(', ') || '(none)'}`);
624
+ const instance = await HyperMem.create({
625
+ dataDir: _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem'),
626
+ cache: {
627
+ keyPrefix: 'hm:',
628
+ sessionTTL: 14400, // 4h for system/identity/meta slots
629
+ historyTTL: 86400, // 24h for history — ages out, not count-trimmed
630
+ },
631
+ ...(userConfig.compositor ? { compositor: userConfig.compositor } : {}),
632
+ ...(_embeddingConfig ? { embedding: _embeddingConfig } : {}),
633
+ });
634
+ _hm = instance;
635
+ // Wire up fleet store and background indexer from dynamic module
636
+ const { FleetStore: FleetStoreClass, createIndexer } = mod;
637
+ const libraryDb = instance.dbManager.getLibraryDb();
638
+ _fleetStore = new FleetStoreClass(libraryDb);
639
+ try {
640
+ // T1.2: Wire indexer with proper DB accessors and cursor fetcher.
641
+ // The cursor fetcher enables priority-based indexing: messages the model
642
+ // hasn't seen yet (post-cursor) are processed first.
643
+ _indexer = createIndexer((agentId) => instance.dbManager.getMessageDb(agentId), () => instance.dbManager.getLibraryDb(), () => {
644
+ // List agents from fleet_agents table (active only)
645
+ try {
646
+ const rows = instance.dbManager.getLibraryDb()
647
+ .prepare("SELECT id FROM fleet_agents WHERE status = 'active'")
648
+ .all();
649
+ return rows.map(r => r.id);
650
+ }
651
+ catch {
652
+ return [];
653
+ }
654
+ }, {
655
+ enabled: true,
656
+ periodicInterval: userConfig?.maintenance?.periodicInterval ?? 300000,
657
+ maxActiveConversations: userConfig?.maintenance?.maxActiveConversations ?? 5,
658
+ recentConversationCooldownMs: userConfig?.maintenance?.recentConversationCooldownMs ?? 30000,
659
+ maxCandidatesPerPass: userConfig?.maintenance?.maxCandidatesPerPass ?? 200,
660
+ },
661
+ // Cursor fetcher: reads the SQLite-backed session cursor
662
+ async (agentId, sessionKey) => {
663
+ return instance.getSessionCursor(agentId, sessionKey);
664
+ },
665
+ // Pass vector store so new facts/episodes are embedded at index time
666
+ instance.getVectorStore() ?? undefined,
667
+ // Dreaming config — passed from hypermem user config if set
668
+ userConfig?.dreaming ?? {},
669
+ // KL-01: global write policy — passed from hypermem user config
670
+ userConfig?.globalWritePolicy ?? 'deny');
671
+ _indexer.start();
672
+ if (_verboseLogging) {
673
+ const mc = userConfig?.maintenance ?? {};
674
+ console.log(`[hypermem-plugin] maintenance settings: periodicInterval=${mc.periodicInterval ?? 300000}ms ` +
675
+ `maxActiveConversations=${mc.maxActiveConversations ?? 5} ` +
676
+ `cooldown=${mc.recentConversationCooldownMs ?? 30000}ms ` +
677
+ `maxCandidatesPerPass=${mc.maxCandidatesPerPass ?? 200}`);
678
+ }
679
+ }
680
+ catch {
681
+ // Non-fatal — indexer wiring can fail without breaking context assembly
682
+ }
683
+ return instance;
684
+ })();
685
+ return _hmInitPromise;
686
+ }
687
+ // ─── Session Key Helpers ────────────────────────────────────────
688
+ /**
689
+ * Extract agentId from a session key.
690
+ * Session keys follow: "agent:<agentId>:<channel>:<name>"
691
+ * Falls back to "main" if the key doesn't match expected format.
692
+ */
693
+ function extractAgentId(sessionKey) {
694
+ if (!sessionKey)
695
+ return 'main';
696
+ const parts = sessionKey.split(':');
697
+ if (parts[0] === 'agent' && parts.length >= 2) {
698
+ return parts[1];
699
+ }
700
+ return 'main';
701
+ }
702
+ /**
703
+ * Normalize sessionKey — prefer the explicit sessionKey param,
704
+ * fall back to sessionId (UUID) which we can't parse as a session key.
705
+ * If neither is useful, use a default.
706
+ */
707
+ function resolveSessionKey(sessionId, sessionKey) {
708
+ if (sessionKey)
709
+ return sessionKey;
710
+ // sessionId is a UUID — not a parseable session key.
711
+ // Use a synthetic key so recording works but note it won't resolve to a named session.
712
+ return `session:${sessionId}`;
713
+ }
714
+ const SYNTHETIC_MISSING_TOOL_RESULT_TEXT = 'No result provided';
715
+ function extractTextFromInboundContent(content) {
716
+ if (typeof content === 'string')
717
+ return content;
718
+ if (!Array.isArray(content))
719
+ return '';
720
+ return content
721
+ .filter((part) => Boolean(part && typeof part.type === 'string'))
722
+ .filter(part => part.type === 'text' && typeof part.text === 'string')
723
+ .map(part => part.text ?? '')
724
+ .join('\n');
725
+ }
726
+ function collectNeutralToolPairStats(messages) {
727
+ const callIds = new Set();
728
+ const resultIds = new Set();
729
+ let toolCallCount = 0;
730
+ let toolResultCount = 0;
731
+ let syntheticNoResultCount = 0;
732
+ for (const msg of messages) {
733
+ for (const tc of msg.toolCalls ?? []) {
734
+ toolCallCount++;
735
+ if (tc.id)
736
+ callIds.add(tc.id);
737
+ }
738
+ for (const tr of msg.toolResults ?? []) {
739
+ toolResultCount++;
740
+ if (tr.callId)
741
+ resultIds.add(tr.callId);
742
+ if ((tr.content ?? '').trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT)
743
+ syntheticNoResultCount++;
744
+ }
745
+ }
746
+ const missingToolResultIds = [...callIds].filter(id => !resultIds.has(id));
747
+ const orphanToolResultIds = [...resultIds].filter(id => !callIds.has(id));
748
+ return {
749
+ toolCallCount,
750
+ toolResultCount,
751
+ missingToolResultCount: missingToolResultIds.length,
752
+ orphanToolResultCount: orphanToolResultIds.length,
753
+ syntheticNoResultCount,
754
+ missingToolResultIds,
755
+ orphanToolResultIds,
756
+ };
757
+ }
758
+ function collectAgentToolPairStats(messages) {
759
+ const callIds = new Set();
760
+ const resultIds = new Set();
761
+ let toolCallCount = 0;
762
+ let toolResultCount = 0;
763
+ let syntheticNoResultCount = 0;
764
+ for (const msg of messages) {
765
+ if (msg.role === 'assistant' && Array.isArray(msg.content)) {
766
+ for (const block of msg.content) {
767
+ if (block.type === 'toolCall' || block.type === 'toolUse') {
768
+ toolCallCount++;
769
+ if (typeof block.id === 'string' && block.id.length > 0)
770
+ callIds.add(block.id);
771
+ }
772
+ }
773
+ }
774
+ if (msg.role === 'toolResult') {
775
+ toolResultCount++;
776
+ const toolCallId = typeof msg.toolCallId === 'string' ? msg.toolCallId : '';
777
+ if (toolCallId)
778
+ resultIds.add(toolCallId);
779
+ if (extractTextFromInboundContent(msg.content).trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT) {
780
+ syntheticNoResultCount++;
781
+ }
782
+ }
783
+ }
784
+ const missingToolResultIds = [...callIds].filter(id => !resultIds.has(id));
785
+ const orphanToolResultIds = [...resultIds].filter(id => !callIds.has(id));
786
+ return {
787
+ toolCallCount,
788
+ toolResultCount,
789
+ missingToolResultCount: missingToolResultIds.length,
790
+ orphanToolResultCount: orphanToolResultIds.length,
791
+ syntheticNoResultCount,
792
+ missingToolResultIds,
793
+ orphanToolResultIds,
794
+ };
795
+ }
796
+ async function bumpToolPairMetrics(hm, agentId, sessionKey, delta, anomaly) {
797
+ const slot = 'toolPairMetrics';
798
+ let stored = {};
799
+ try {
800
+ const raw = await hm.cache.getSlot(agentId, sessionKey, slot);
801
+ if (raw)
802
+ stored = JSON.parse(raw);
803
+ }
804
+ catch {
805
+ stored = {};
806
+ }
807
+ const next = {
808
+ composeCount: (stored.composeCount ?? 0) + (delta.composeCount ?? 0),
809
+ syntheticNoResultIngested: (stored.syntheticNoResultIngested ?? 0) + (delta.syntheticNoResultIngested ?? 0),
810
+ preBridgeMissingToolResults: (stored.preBridgeMissingToolResults ?? 0) + (delta.preBridgeMissingToolResults ?? 0),
811
+ preBridgeOrphanToolResults: (stored.preBridgeOrphanToolResults ?? 0) + (delta.preBridgeOrphanToolResults ?? 0),
812
+ postBridgeMissingToolResults: (stored.postBridgeMissingToolResults ?? 0) + (delta.postBridgeMissingToolResults ?? 0),
813
+ postBridgeOrphanToolResults: (stored.postBridgeOrphanToolResults ?? 0) + (delta.postBridgeOrphanToolResults ?? 0),
814
+ lastUpdatedAt: new Date().toISOString(),
815
+ lastAnomaly: anomaly ?? stored.lastAnomaly,
816
+ };
817
+ await hm.cache.setSlot(agentId, sessionKey, slot, JSON.stringify(next));
818
+ }
819
+ /**
820
+ * Convert an OpenClaw AgentMessage to hypermem's NeutralMessage format.
821
+ */
822
+ function toNeutralMessage(msg) {
823
+ // Extract text content from string or array format
824
+ let textContent = null;
825
+ if (typeof msg.content === 'string') {
826
+ textContent = msg.content;
827
+ }
828
+ else if (Array.isArray(msg.content)) {
829
+ const textParts = msg.content
830
+ .filter((c) => c.type === 'text' && typeof c.text === 'string')
831
+ .map(c => c.text);
832
+ textContent = textParts.length > 0 ? textParts.join('\n') : null;
833
+ }
834
+ // Detect tool calls/results.
835
+ // OpenClaw stores tool calls as content blocks: { type: 'toolCall' | 'toolUse', id, name, input }
836
+ // Legacy wire format stores them as a separate msg.tool_calls / msg.toolCalls array
837
+ // with OpenAI format: { id, type: 'function', function: { name, arguments } }
838
+ // Normalize everything to NeutralToolCall format: { id, name, arguments: string }
839
+ const contentBlockToolCalls = Array.isArray(msg.content)
840
+ ? msg.content
841
+ .filter(c => c.type === 'toolCall' || c.type === 'toolUse')
842
+ .map(c => ({
843
+ id: c.id ?? 'unknown',
844
+ name: c.name ?? 'unknown',
845
+ arguments: typeof c.input === 'string' ? c.input : JSON.stringify(c.input ?? {}),
846
+ }))
847
+ : [];
848
+ // Legacy wire format tool calls (OpenAI style)
849
+ const rawToolCalls = msg.tool_calls
850
+ ?? msg.toolCalls
851
+ ?? null;
852
+ let toolCalls = null;
853
+ if (rawToolCalls && rawToolCalls.length > 0) {
854
+ toolCalls = rawToolCalls.map(tc => {
855
+ // OpenAI wire format: { id, type: 'function', function: { name, arguments } }
856
+ const fn = tc.function;
857
+ if (fn) {
858
+ return {
859
+ id: tc.id ?? 'unknown',
860
+ name: fn.name ?? 'unknown',
861
+ arguments: typeof fn.arguments === 'string' ? fn.arguments : JSON.stringify(fn.arguments ?? {}),
862
+ };
863
+ }
864
+ // Already NeutralToolCall-ish or content block format
865
+ return {
866
+ id: tc.id ?? 'unknown',
867
+ name: tc.name ?? 'unknown',
868
+ arguments: typeof tc.arguments === 'string' ? tc.arguments
869
+ : typeof tc.input === 'string' ? tc.input
870
+ : JSON.stringify(tc.arguments ?? tc.input ?? {}),
871
+ };
872
+ });
873
+ }
874
+ else if (contentBlockToolCalls.length > 0) {
875
+ toolCalls = contentBlockToolCalls;
876
+ }
877
+ // OpenClaw uses role 'toolResult' (camelCase). Support all three spellings.
878
+ const isToolResultMsg = msg.role === 'tool' || msg.role === 'tool_result' || msg.role === 'toolResult';
879
+ // Tool results must stay on the result side of the transcript. If we persist them as
880
+ // assistant rows with orphaned toolResults, later replay can retain a tool_result after
881
+ // trimming away the matching assistant tool_use, which Anthropic rejects with a 400.
882
+ let toolResults = null;
883
+ if (isToolResultMsg && textContent) {
884
+ const toolCallId = msg.tool_call_id ?? msg.toolCallId ?? 'unknown';
885
+ const toolName = msg.name ?? msg.toolName ?? 'tool';
886
+ toolResults = [{ callId: toolCallId, name: toolName, content: textContent }];
887
+ textContent = null; // owned by toolResults now, not duplicated in textContent
888
+ }
889
+ const role = isToolResultMsg
890
+ ? 'user'
891
+ : msg.role;
892
+ return {
893
+ role,
894
+ textContent,
895
+ toolCalls: isToolResultMsg ? null : toolCalls,
896
+ toolResults,
897
+ };
898
+ }
899
+ // ─── Context Engine Implementation ─────────────────────────────
900
+ /**
901
+ * In-flight warm dedup map.
902
+ * Key: "agentId::sessionKey" — Value: the in-progress warm() Promise.
903
+ * Prevents concurrent bootstrap() calls from firing multiple full warms
904
+ * for the same session key before the first one sets the Redis history key.
905
+ * Cleared on completion (success or failure) so the next cold start retries.
906
+ */
907
+ const _warmInFlight = new Map();
908
+ // ─── Token estimation ──────────────────────────────────────────
909
+ /**
910
+ * Estimate tokens for a string using the same ~4 chars/token heuristic
911
+ * used by the hypermem compositor. Fast and allocation-free — no tokenizer
912
+ * library needed for a budget guard.
913
+ */
914
+ function estimateTokens(text) {
915
+ if (!text)
916
+ return 0;
917
+ return Math.ceil(text.length / 4);
918
+ }
919
+ function estimateMessagePartTokens(part) {
920
+ if (part.type === 'image' || part.type === 'image_url') {
921
+ const src = part.source?.data;
922
+ const url = part.image_url?.url;
923
+ const dataStr = typeof src === 'string' ? src : (typeof url === 'string' ? url : '');
924
+ return Math.ceil(dataStr.length / 3);
925
+ }
926
+ if (part.type === 'toolCall' || part.type === 'tool_use') {
927
+ return Math.ceil(JSON.stringify(part).length / 2);
928
+ }
929
+ const textVal = typeof part.text === 'string' ? part.text
930
+ : typeof part.content === 'string' ? part.content
931
+ : part.content != null ? JSON.stringify(part.content) : null;
932
+ return estimateTokens(textVal);
933
+ }
934
+ function estimateMessageTokens(msg) {
935
+ let total = estimateTokens(typeof msg.textContent === 'string' ? msg.textContent : null);
936
+ if (typeof msg.content === 'string' && typeof msg.textContent !== 'string') {
937
+ total += estimateTokens(msg.content);
938
+ }
939
+ if (msg.toolCalls)
940
+ total += Math.ceil(JSON.stringify(msg.toolCalls).length / 2);
941
+ if (msg.toolResults)
942
+ total += Math.ceil(JSON.stringify(msg.toolResults).length / 2);
943
+ if (Array.isArray(msg.content)) {
944
+ total += msg.content.reduce((sum, part) => sum + estimateMessagePartTokens(part), 0);
945
+ }
946
+ return total;
947
+ }
948
+ function estimateMessageArrayTokens(messages) {
949
+ return messages.reduce((sum, msg) => sum + estimateMessageTokens(msg), 0);
950
+ }
951
+ function maybeLogPressureAccountingAnomaly(fields) {
952
+ const threshold = Math.max(500, Math.floor(fields.budget * 0.05));
953
+ const deltas = {
954
+ runtimeVsComposed: Math.abs(fields.runtimeTokens - fields.composedTokens),
955
+ redisVsComposed: Math.abs(fields.redisTokens - fields.composedTokens),
956
+ runtimeVsRedis: Math.abs(fields.runtimeTokens - fields.redisTokens),
957
+ };
958
+ // Post-0.6.0: "redis" is actually the L1 SQLite cache window, which lags
959
+ // behind the runtime message array between trim passes. Cache-vs-runtime
960
+ // drift is structural and harmless — the runtime array is authoritative
961
+ // (it's what the model sees). Only warn when runtimeVsComposed diverges,
962
+ // which indicates an actual trim accounting bug.
963
+ if (deltas.runtimeVsComposed < threshold) {
964
+ // Log cache drift at debug level for observability, not as a warning.
965
+ if (deltas.redisVsComposed >= threshold || deltas.runtimeVsRedis >= threshold) {
966
+ console.debug(`[hypermem-plugin] cache-drift (non-anomalous): path=${fields.path} ` +
967
+ `runtime=${fields.runtimeTokens} cache=${fields.redisTokens} composed=${fields.composedTokens} ` +
968
+ `budget=${fields.budget}`);
969
+ }
970
+ return;
971
+ }
972
+ console.warn(`[hypermem-plugin] pressure-accounting anomaly: path=${fields.path} ` +
973
+ `runtime=${fields.runtimeTokens} cache=${fields.redisTokens} composed=${fields.composedTokens} ` +
974
+ `budget=${fields.budget} threshold=${threshold}`);
975
+ guardTelemetry({
976
+ path: fields.path,
977
+ agentId: fields.agentId,
978
+ sessionKey: fields.sessionKey,
979
+ reason: 'pressure-accounting-anomaly',
980
+ });
981
+ }
982
+ function normalizeReplayRecoveryState(value) {
983
+ if (value == null)
984
+ return null;
985
+ if (value === '')
986
+ return '';
987
+ return isReplayState(value) ? value : null;
988
+ }
989
+ async function persistReplayRecoveryState(hm, agentId, sessionKey, nextState) {
990
+ try {
991
+ await hm.cache.setSlot(agentId, sessionKey, 'replayRecoveryState', nextState ?? '');
992
+ }
993
+ catch {
994
+ // Non-fatal
995
+ }
996
+ }
997
+ function hasStructuredToolCallMessage(msg) {
998
+ if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0)
999
+ return true;
1000
+ if (!Array.isArray(msg.content))
1001
+ return false;
1002
+ return msg.content.some(part => part.type === 'toolCall' || part.type === 'tool_use');
1003
+ }
1004
+ function hasStructuredToolResultMessage(msg) {
1005
+ if (Array.isArray(msg.toolResults) && msg.toolResults.length > 0)
1006
+ return true;
1007
+ if (msg.role === 'toolResult' || msg.role === 'tool' || msg.role === 'tool_result')
1008
+ return true;
1009
+ if (!Array.isArray(msg.content))
1010
+ return false;
1011
+ return msg.content.some(part => part.type === 'tool_result' || part.type === 'toolResult');
1012
+ }
1013
+ function getToolCallIds(msg) {
1014
+ const ids = [];
1015
+ if (Array.isArray(msg.toolCalls)) {
1016
+ ids.push(...msg.toolCalls.map(tc => tc.id).filter((id) => typeof id === 'string' && id.length > 0));
1017
+ }
1018
+ if (Array.isArray(msg.content)) {
1019
+ for (const part of msg.content) {
1020
+ if ((part.type === 'toolCall' || part.type === 'tool_use') && typeof part.id === 'string' && part.id.length > 0) {
1021
+ ids.push(part.id);
1022
+ }
1023
+ }
1024
+ }
1025
+ return ids;
1026
+ }
1027
+ function getToolResultIds(msg) {
1028
+ const ids = [];
1029
+ if (Array.isArray(msg.toolResults)) {
1030
+ ids.push(...msg.toolResults.map(tr => tr.callId).filter((id) => typeof id === 'string' && id.length > 0));
1031
+ }
1032
+ if (typeof msg.toolCallId === 'string' && msg.toolCallId.length > 0) {
1033
+ ids.push(msg.toolCallId);
1034
+ }
1035
+ if (typeof msg.tool_call_id === 'string' && msg.tool_call_id.length > 0) {
1036
+ ids.push(msg.tool_call_id);
1037
+ }
1038
+ return ids;
1039
+ }
1040
+ function clusterTranscriptMessages(messages) {
1041
+ const clusters = [];
1042
+ for (let i = 0; i < messages.length; i++) {
1043
+ const current = messages[i];
1044
+ const cluster = [current];
1045
+ if (hasStructuredToolCallMessage(current)) {
1046
+ const callIds = new Set(getToolCallIds(current));
1047
+ let j = i + 1;
1048
+ while (j < messages.length) {
1049
+ const candidate = messages[j];
1050
+ if (!hasStructuredToolResultMessage(candidate))
1051
+ break;
1052
+ const resultIds = getToolResultIds(candidate);
1053
+ if (callIds.size > 0 && resultIds.length > 0 && !resultIds.some(id => callIds.has(id)))
1054
+ break;
1055
+ cluster.push(candidate);
1056
+ j++;
1057
+ }
1058
+ i = j - 1;
1059
+ }
1060
+ else if (hasStructuredToolResultMessage(current)) {
1061
+ let j = i + 1;
1062
+ while (j < messages.length) {
1063
+ const candidate = messages[j];
1064
+ if (!hasStructuredToolResultMessage(candidate) || hasStructuredToolCallMessage(candidate))
1065
+ break;
1066
+ cluster.push(candidate);
1067
+ j++;
1068
+ }
1069
+ i = j - 1;
1070
+ }
1071
+ clusters.push(cluster);
1072
+ }
1073
+ return clusters;
1074
+ }
1075
+ /**
1076
+ * Estimate total token cost of the current Redis history window for a session.
1077
+ * Counts text content + tool call/result JSON for each message.
1078
+ */
1079
+ async function estimateWindowTokens(hm, agentId, sessionKey) {
1080
+ try {
1081
+ // Prefer the hot window cache (set after compaction trims the history).
1082
+ // Fall back to the actual history list — the window cache is only populated
1083
+ // after compact() calls setWindow(), so a fresh or never-compacted session
1084
+ // has no window cache entry. Without this fallback, getWindow returns null
1085
+ // → estimateWindowTokens returns 0 → compact() always says within_budget
1086
+ // → overflow loop.
1087
+ const window = await hm.cache.getWindow(agentId, sessionKey)
1088
+ ?? await hm.cache.getHistory(agentId, sessionKey);
1089
+ if (!window || window.length === 0)
1090
+ return 0;
1091
+ return estimateMessageArrayTokens(window);
1092
+ }
1093
+ catch {
1094
+ return 0;
1095
+ }
1096
+ }
1097
+ /**
1098
+ * Truncate a JSONL session file to keep only the last `targetDepth` message
1099
+ * entries plus all non-message entries (header, compaction, model_change, etc).
1100
+ *
1101
+ * This is needed because the runtime loads messages from the JSONL file
1102
+ * (not from Redis) to build its overflow estimate. When ownsCompaction=true,
1103
+ * OpenClaw's truncateSessionAfterCompaction() is never called, so we do it
1104
+ * ourselves.
1105
+ *
1106
+ * Returns true if the file was actually truncated, false if no action was
1107
+ * needed or the file didn't exist.
1108
+ */
1109
+ async function truncateJsonlIfNeeded(sessionFile, targetDepth, force = false, tokenBudgetOverride) {
1110
+ if (!sessionFile || typeof sessionFile !== 'string')
1111
+ return false;
1112
+ try {
1113
+ const raw = await fs.readFile(sessionFile, 'utf-8');
1114
+ const lines = raw.split('\n').filter(l => l.trim());
1115
+ if (lines.length === 0)
1116
+ return false;
1117
+ const header = lines[0];
1118
+ const entries = [];
1119
+ for (let i = 1; i < lines.length; i++) {
1120
+ try {
1121
+ entries.push({ line: lines[i], parsed: JSON.parse(lines[i]) });
1122
+ }
1123
+ catch {
1124
+ entries.push({ line: lines[i], parsed: null });
1125
+ }
1126
+ // Yield every 100 entries to avoid blocking the event loop
1127
+ if (i % 100 === 0)
1128
+ await new Promise(r => setImmediate(r));
1129
+ }
1130
+ const messageEntries = [];
1131
+ const metadataEntries = [];
1132
+ for (const e of entries) {
1133
+ if (e.parsed?.type === 'message') {
1134
+ messageEntries.push(e);
1135
+ }
1136
+ else {
1137
+ metadataEntries.push(e);
1138
+ }
1139
+ }
1140
+ // Only rewrite if meaningfully over target — unless force=true (over-budget path)
1141
+ if (!force && messageEntries.length <= targetDepth * 1.5)
1142
+ return false;
1143
+ // If a token budget is specified, keep newest messages within that budget
1144
+ let keptMessages;
1145
+ if (tokenBudgetOverride) {
1146
+ let tokenCount = 0;
1147
+ const kept = [];
1148
+ for (let i = messageEntries.length - 1; i >= 0 && kept.length < targetDepth; i--) {
1149
+ const m = messageEntries[i].parsed?.message ?? messageEntries[i].parsed;
1150
+ let t = 0;
1151
+ if (m?.content)
1152
+ t += Math.ceil(JSON.stringify(m.content).length / 4);
1153
+ if (m?.textContent)
1154
+ t += Math.ceil(String(m.textContent).length / 4);
1155
+ if (m?.toolResults)
1156
+ t += Math.ceil(JSON.stringify(m.toolResults).length / 4);
1157
+ if (m?.toolCalls)
1158
+ t += Math.ceil(JSON.stringify(m.toolCalls).length / 4);
1159
+ if (tokenCount + t > tokenBudgetOverride && kept.length > 0)
1160
+ break;
1161
+ kept.unshift(messageEntries[i]);
1162
+ tokenCount += t;
1163
+ }
1164
+ keptMessages = kept;
1165
+ }
1166
+ else {
1167
+ keptMessages = messageEntries.slice(-targetDepth);
1168
+ }
1169
+ const keptSet = new Set(keptMessages.map(e => e.line));
1170
+ const metaSet = new Set(metadataEntries.map(e => e.line));
1171
+ const rebuilt = [header];
1172
+ for (const e of entries) {
1173
+ if (metaSet.has(e.line) || keptSet.has(e.line)) {
1174
+ rebuilt.push(e.line);
1175
+ }
1176
+ }
1177
+ const tmpPath = `${sessionFile}.hm-compact-${process.pid}-${Date.now()}.tmp`;
1178
+ await fs.writeFile(tmpPath, rebuilt.join('\n') + '\n', 'utf-8');
1179
+ await fs.rename(tmpPath, sessionFile);
1180
+ console.log(`[hypermem-plugin] truncateJsonl: ${entries.length} → ${rebuilt.length - 1} entries ` +
1181
+ `(kept ${keptMessages.length} messages + ${metadataEntries.length} metadata, file=${sessionFile.split('/').pop()})`);
1182
+ return true;
1183
+ }
1184
+ catch (err) {
1185
+ // ENOENT is expected when session file doesn't exist yet — not worth logging
1186
+ if (err.code !== 'ENOENT') {
1187
+ console.warn('[hypermem-plugin] truncateJsonl failed (non-fatal):', err.message);
1188
+ }
1189
+ return false;
1190
+ }
1191
+ }
1192
+ function createHyperMemEngine() {
1193
+ return {
1194
+ info: {
1195
+ id: 'hypercompositor',
1196
+ name: 'hypermem context engine',
1197
+ version: '0.6.3',
1198
+ // We own compaction — assemble() trims to budget via the compositor safety
1199
+ // valve, so runtime compaction is never needed. compact() handles any
1200
+ // explicit calls by trimming the Redis history window directly.
1201
+ ownsCompaction: true,
1202
+ },
1203
+ /**
1204
+ * Bootstrap: warm Redis session for this agent, register in fleet if needed.
1205
+ *
1206
+ * Idempotent — skips warming if the session is already hot in Redis.
1207
+ * Without this guard, the OpenClaw runtime calls bootstrap() on every turn
1208
+ * (not just session start), causing:
1209
+ * 1. A SQLite read + Redis pipeline push on every message (lane lock)
1210
+ * 2. 250 messages re-pushed to Redis per turn (dedup in pushHistory helps,
1211
+ * but the read cost still runs)
1212
+ * 3. Followup queue drain blocked until warm completes
1213
+ *
1214
+ * With this guard: cold start = full warm; hot session = single EXISTS check.
1215
+ */
1216
+ async bootstrap({ sessionId, sessionKey }) {
1217
+ try {
1218
+ const hm = await getHyperMem();
1219
+ const sk = resolveSessionKey(sessionId, sessionKey);
1220
+ const agentId = extractAgentId(sk);
1221
+ // EC1 JSONL truncation moved to maintain() — bootstrap stays fast.
1222
+ // B2: Session-restart detection — rotateSessionContext hook.
1223
+ // When the runtime starts a new session (new sessionId) for an existing
1224
+ // sessionKey, archive the old context head and create a fresh active
1225
+ // context so the new conversation starts clean. This prevents the new
1226
+ // session from inheriting a stale context head pointer from the prior run.
1227
+ //
1228
+ // Detection: if a conversation row exists for this sessionKey AND the
1229
+ // stored session_id differs from the incoming sessionId (runtime-assigned),
1230
+ // treat this as a session restart.
1231
+ //
1232
+ // Non-fatal: context rotation is best-effort and never blocks bootstrap.
1233
+ if (sessionId) {
1234
+ try {
1235
+ const _msgDb = hm.dbManager.getMessageDb(agentId);
1236
+ if (_msgDb) {
1237
+ const _existingConv = _msgDb.prepare('SELECT id, session_id FROM conversations WHERE session_key = ? LIMIT 1').get(sk);
1238
+ if (_existingConv &&
1239
+ _existingConv.session_id !== null &&
1240
+ _existingConv.session_id !== sessionId) {
1241
+ // Distinct sessionId — this is a session restart for an existing sessionKey.
1242
+ rotateSessionContext(_msgDb, agentId, sk, _existingConv.id);
1243
+ // Update the stored session_id to the new one.
1244
+ try {
1245
+ _msgDb.prepare('UPDATE conversations SET session_id = ? WHERE id = ?')
1246
+ .run(sessionId, _existingConv.id);
1247
+ }
1248
+ catch {
1249
+ // Best-effort — column may not exist in older schemas
1250
+ }
1251
+ console.log(`[hypermem-plugin] bootstrap: session restart detected for ${agentId}/${sk} ` +
1252
+ `(prev session_id=${_existingConv.session_id}, new=${sessionId}) — context rotated`);
1253
+ }
1254
+ else if (_existingConv && _existingConv.session_id === null && sessionId) {
1255
+ // Conversation exists but session_id was never recorded — stamp it now.
1256
+ try {
1257
+ _msgDb.prepare('UPDATE conversations SET session_id = ? WHERE id = ?')
1258
+ .run(sessionId, _existingConv.id);
1259
+ }
1260
+ catch {
1261
+ // Best-effort
1262
+ }
1263
+ }
1264
+ }
1265
+ }
1266
+ catch (rotateErr) {
1267
+ // Non-fatal — never block bootstrap on context rotation
1268
+ console.warn('[hypermem-plugin] bootstrap: rotateSessionContext failed (non-fatal):', rotateErr.message);
1269
+ }
1270
+ }
1271
+ // Fast path: if session already has history in Redis, skip warm entirely.
1272
+ // sessionExists() is a single EXISTS call — sub-millisecond cost.
1273
+ const alreadyWarm = await hm.cache.sessionExists(agentId, sk);
1274
+ if (alreadyWarm) {
1275
+ return { bootstrapped: true };
1276
+ }
1277
+ // In-flight dedup: if a warm is already running for this session key,
1278
+ // reuse that promise instead of launching a second concurrent warm.
1279
+ const inflightKey = `${agentId}::${sk}`;
1280
+ const existing = _warmInFlight.get(inflightKey);
1281
+ if (existing) {
1282
+ await existing;
1283
+ return { bootstrapped: true };
1284
+ }
1285
+ // Cold start: warm Redis with the session — pre-loads history + slots
1286
+ // CRIT-002: Load supplemental identity files (MOTIVATIONS.md, STYLE.md) that are
1287
+ // NOT already injected by OpenClaw's contextInjection into the system prompt.
1288
+ // SOUL.md and IDENTITY.md are filtered out here because OpenClaw injects them
1289
+ // via workspace bootstrap — re-injecting them via the identity slot would cause
1290
+ // duplication. Only agent-specific extras (MOTIVATIONS.md, STYLE.md) are included.
1291
+ // Non-fatal: missing files are silently skipped.
1292
+ let identityBlock;
1293
+ try {
1294
+ // Council agents live at workspace-council/<agentId>/
1295
+ // Other agents at workspace/<agentId>/ — try council path first
1296
+ const homedir = os.homedir();
1297
+ const councilPath = path.join(homedir, '.openclaw', 'workspace-council', agentId);
1298
+ const workspacePath = path.join(homedir, '.openclaw', 'workspace', agentId);
1299
+ let wsPath = councilPath;
1300
+ try {
1301
+ await fs.access(councilPath);
1302
+ }
1303
+ catch {
1304
+ wsPath = workspacePath;
1305
+ }
1306
+ const identityFiles = ['SOUL.md', 'IDENTITY.md', 'MOTIVATIONS.md', 'STYLE.md']
1307
+ .filter(f => !OPENCLAW_BOOTSTRAP_FILES.has(f));
1308
+ const parts = [];
1309
+ for (const fname of identityFiles) {
1310
+ try {
1311
+ const content = await fs.readFile(path.join(wsPath, fname), 'utf-8');
1312
+ if (content.trim())
1313
+ parts.push(content.trim());
1314
+ }
1315
+ catch {
1316
+ // File absent — skip silently
1317
+ }
1318
+ }
1319
+ if (parts.length > 0)
1320
+ identityBlock = parts.join('\n\n');
1321
+ }
1322
+ catch {
1323
+ // Identity load is best-effort — never block bootstrap on this
1324
+ }
1325
+ // Capture wsPath for post-warm seeding (declared in the identity block above)
1326
+ let _wsPathForSeed;
1327
+ try {
1328
+ const homedir2 = os.homedir();
1329
+ const councilPath2 = path.join(homedir2, '.openclaw', 'workspace-council', agentId);
1330
+ const workspacePath2 = path.join(homedir2, '.openclaw', 'workspace', agentId);
1331
+ try {
1332
+ await fs.access(councilPath2);
1333
+ _wsPathForSeed = councilPath2;
1334
+ }
1335
+ catch {
1336
+ _wsPathForSeed = workspacePath2;
1337
+ }
1338
+ }
1339
+ catch { /* non-fatal */ }
1340
+ const warmPromise = hm.warm(agentId, sk, identityBlock ? { identity: identityBlock } : undefined).finally(() => {
1341
+ _warmInFlight.delete(inflightKey);
1342
+ });
1343
+ _warmInFlight.set(inflightKey, warmPromise);
1344
+ await warmPromise;
1345
+ // ACA doc seeding — fire-and-forget after warm.
1346
+ // Idempotent: WorkspaceSeeder skips files whose hash hasn't changed.
1347
+ // Seeds SOUL.md, TOOLS.md, AGENTS.md, POLICY.md etc. into library.db
1348
+ // doc_chunks so trigger-based retrieval can serve them at compose time.
1349
+ if (_wsPathForSeed) {
1350
+ const wsPathForSeed = _wsPathForSeed;
1351
+ hm.seedWorkspace(wsPathForSeed, { agentId }).then(seedResult => {
1352
+ if (seedResult.totalInserted > 0 || seedResult.reindexed > 0) {
1353
+ console.log(`[hypermem-plugin] bootstrap: seeded workspace docs for ${agentId} ` +
1354
+ `(+${seedResult.totalInserted} chunks, ${seedResult.reindexed} reindexed, ` +
1355
+ `${seedResult.skipped} unchanged, ${seedResult.errors.length} errors)`);
1356
+ }
1357
+ }).catch(err => {
1358
+ console.warn('[hypermem-plugin] bootstrap: workspace seeding failed (non-fatal):', err.message);
1359
+ });
1360
+ }
1361
+ // Post-warm pressure check: if messages.db had accumulated history,
1362
+ // warm() may have loaded the session straight to 80%+. Pre-trim now
1363
+ // so the first turn has headroom instead of starting saturated.
1364
+ // This is the "restart at 98%" failure mode reported by Helm 2026-04-05:
1365
+ // JSONL truncation + Redis flush isn't enough if messages.db is still full
1366
+ // and warm() reloads it. Trim here closes the loop.
1367
+ try {
1368
+ const postWarmTokens = await estimateWindowTokens(hm, agentId, sk);
1369
+ // Use a conservative 90k default; if the session is genuinely large,
1370
+ // we'll underestimate budget and trim more aggressively — that's fine.
1371
+ const warmBudget = 90_000;
1372
+ const warmPressure = postWarmTokens / warmBudget;
1373
+ if (warmPressure > 0.80) {
1374
+ // Sprint 2.2a: demote warmstart to guard telemetry.
1375
+ //
1376
+ // Previously this path performed a real trim + invalidateWindow
1377
+ // and emitted `event:'trim'` with path='warmstart'. Assemble
1378
+ // (tool-loop + normal/subagent) is the steady-state owner now,
1379
+ // so the first turn's assemble.* trim absorbs any remaining
1380
+ // post-warm pressure. Keeping the pressure check + threshold
1381
+ // branch here preserves observability via `event:'trim-guard'`
1382
+ // without mutating Redis history or the window cache.
1383
+ guardTelemetry({
1384
+ path: 'warmstart',
1385
+ agentId, sessionKey: sk,
1386
+ reason: 'warmstart-pressure-demoted',
1387
+ });
1388
+ }
1389
+ }
1390
+ catch {
1391
+ // Non-fatal — first turn's tool-loop trim is the fallback
1392
+ }
1393
+ return { bootstrapped: true };
1394
+ }
1395
+ catch (err) {
1396
+ // Bootstrap failure is non-fatal — log and continue
1397
+ console.warn('[hypermem-plugin] bootstrap failed:', err.message);
1398
+ return { bootstrapped: false, reason: err.message };
1399
+ }
1400
+ },
1401
+ /**
1402
+ * Transcript maintenance — runs after bootstrap, successful turns, or compaction.
1403
+ *
1404
+ * Moved from bootstrap: proactive JSONL truncation is forward-looking (helps
1405
+ * next restart, not current session), so it belongs in maintenance, not init.
1406
+ * Also runs tool pair repair on Redis history to fix orphaned pairs from
1407
+ * trim/compaction passes.
1408
+ */
1409
+ async maintain({ sessionId, sessionKey, sessionFile }) {
1410
+ let changed = false;
1411
+ let bytesFreed = 0;
1412
+ let rewrittenEntries = 0;
1413
+ try {
1414
+ const hm = await getHyperMem();
1415
+ const sk = resolveSessionKey(sessionId, sessionKey);
1416
+ const agentId = extractAgentId(sk);
1417
+ // 1. Proactive JSONL truncation (EC1 guard — next restart loads clean)
1418
+ try {
1419
+ const EC1_MAX_MESSAGES = 60;
1420
+ const EC1_TOKEN_BUDGET = Math.floor(128_000 * 0.40);
1421
+ const truncated = await truncateJsonlIfNeeded(sessionFile, EC1_MAX_MESSAGES, false, EC1_TOKEN_BUDGET);
1422
+ if (truncated) {
1423
+ console.log(`[hypermem-plugin] maintain: proactive JSONL trim for ${agentId} ` +
1424
+ `(EC1 guard — next restart will load clean)`);
1425
+ changed = true;
1426
+ }
1427
+ }
1428
+ catch {
1429
+ // Non-fatal — JSONL truncation is best-effort
1430
+ }
1431
+ // 2. Redis history tool pair repair
1432
+ // Compaction and trim passes can orphan tool_call/tool_result pairs.
1433
+ // Anthropic and Gemini reject orphaned pairs with 400 errors.
1434
+ try {
1435
+ const history = await hm.cache.getHistory(agentId, sk);
1436
+ if (history && history.length > 0) {
1437
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1438
+ const repairedHistory = repairToolPairs(history);
1439
+ const removedCount = history.length - repairedHistory.length;
1440
+ if (removedCount > 0) {
1441
+ await hm.cache.replaceHistory(agentId, sk, repairedHistory);
1442
+ await hm.cache.invalidateWindow(agentId, sk);
1443
+ console.log(`[hypermem-plugin] maintain: repaired tool pairs in Redis history ` +
1444
+ `for ${agentId} (removed ${removedCount} orphaned messages)`);
1445
+ changed = true;
1446
+ rewrittenEntries += removedCount;
1447
+ // Rough estimate: ~500 bytes per removed message
1448
+ bytesFreed += removedCount * 500;
1449
+ }
1450
+ }
1451
+ }
1452
+ catch {
1453
+ // Non-fatal
1454
+ }
1455
+ return { changed, bytesFreed, rewrittenEntries };
1456
+ }
1457
+ catch (err) {
1458
+ console.warn('[hypermem-plugin] maintain failed:', err.message);
1459
+ return { changed, bytesFreed, rewrittenEntries, reason: err.message };
1460
+ }
1461
+ },
1462
+ /**
1463
+ * Ingest a single message into hypermem's message store.
1464
+ * Skip heartbeats — they're noise in the memory store.
1465
+ */
1466
+ async ingest({ sessionId, sessionKey, message, isHeartbeat }) {
1467
+ if (isHeartbeat) {
1468
+ return { ingested: false };
1469
+ }
1470
+ // Skip system messages — they come from the runtime, not the conversation
1471
+ const msg = message;
1472
+ if (msg.role === 'system') {
1473
+ return { ingested: false };
1474
+ }
1475
+ try {
1476
+ const hm = await getHyperMem();
1477
+ const sk = resolveSessionKey(sessionId, sessionKey);
1478
+ const agentId = extractAgentId(sk);
1479
+ let neutral = toNeutralMessage(msg);
1480
+ // Route to appropriate record method based on role.
1481
+ // User messages are intentionally NOT recorded here — afterTurn() handles
1482
+ // user recording with proper metadata stripping (stripMessageMetadata).
1483
+ // Recording here too causes dual-write: once raw (here), once clean (afterTurn).
1484
+ if (neutral.role === 'user') {
1485
+ return { ingested: false };
1486
+ }
1487
+ // ── Pre-ingestion wave guard ──────────────────────────────────────────
1488
+ // Tool result payloads can be 10k-50k tokens each. When a parallel tool
1489
+ // batch (4-6 results) lands while the session is already at 70%+, storing
1490
+ // full payloads pushes the hot window past the nuclear path threshold
1491
+ // before the next assemble() can trim. Use current hot-window state as
1492
+ // the pressure signal (appropriate here, we're deciding what to write TO
1493
+ // the window).
1494
+ //
1495
+ // Above 70%: truncate toolResult content in transcript, but keep the
1496
+ // full payload durable in tool_artifacts (schema v9). Stub carries
1497
+ // artifactId so the compositor can hydrate on demand.
1498
+ // Above 85%: full stub replacement in transcript, still with artifactId.
1499
+ // At all levels: the full payload is persisted durably. No data loss.
1500
+ const isInboundToolResult = msg.role === 'tool' || msg.role === 'tool_result' || msg.role === 'toolResult';
1501
+ if (isInboundToolResult && neutral.toolResults && neutral.toolResults.length > 0) {
1502
+ const windowTokens = await estimateWindowTokens(hm, agentId, sk);
1503
+ const effectiveBudget = computeEffectiveBudget(undefined);
1504
+ const windowPressure = windowTokens / effectiveBudget;
1505
+ // Error tool results are always preserved intact: they're small and
1506
+ // the model needs the error signal to understand what went wrong.
1507
+ const hasErrorResult = neutral.toolResults.some(tr => tr.isError);
1508
+ // Only apply degradation / artifact capture above elevated pressure.
1509
+ if (windowPressure > 0.70) {
1510
+ const MAX_TOOL_RESULT_CHARS = 500;
1511
+ const highPressure = windowPressure > 0.85;
1512
+ const reason = highPressure ? 'wave_guard_pressure_high' : 'wave_guard_pressure_elevated';
1513
+ // For each non-error tool result, persist the full payload as a
1514
+ // durable artifact first, then rewrite the transcript entry to
1515
+ // either a full stub (high pressure) or a truncated stub with an
1516
+ // artifact pointer (elevated pressure).
1517
+ const rewrittenResults = await Promise.all(neutral.toolResults.map(async (tr) => {
1518
+ if (tr.isError)
1519
+ return tr;
1520
+ const content = typeof tr.content === 'string'
1521
+ ? tr.content
1522
+ : JSON.stringify(tr.content);
1523
+ // At elevated pressure, small payloads pass through unchanged.
1524
+ if (!highPressure && content.length <= MAX_TOOL_RESULT_CHARS) {
1525
+ return tr;
1526
+ }
1527
+ let artifactId;
1528
+ try {
1529
+ const record = await hm.recordToolArtifact(agentId, sk, {
1530
+ toolName: tr.name || 'tool_result',
1531
+ toolCallId: tr.callId || undefined,
1532
+ isError: false,
1533
+ payload: content,
1534
+ summary: content.slice(0, 160),
1535
+ });
1536
+ artifactId = record.id;
1537
+ }
1538
+ catch (artErr) {
1539
+ console.warn('[hypermem-plugin] tool artifact capture failed (non-fatal):', artErr.message);
1540
+ }
1541
+ const summary = highPressure
1542
+ ? `omitted at ${(windowPressure * 100).toFixed(0)}% window pressure`
1543
+ : `truncated at ${(windowPressure * 100).toFixed(0)}% pressure: ${Math.ceil(content.length / 4)} tokens`;
1544
+ return {
1545
+ ...tr,
1546
+ content: formatToolChainStub({
1547
+ name: tr.name || 'tool_result',
1548
+ id: tr.callId || 'unknown',
1549
+ status: 'ejected',
1550
+ reason,
1551
+ summary,
1552
+ artifactId,
1553
+ }),
1554
+ };
1555
+ }));
1556
+ neutral = { ...neutral, toolResults: rewrittenResults };
1557
+ console.log(`[hypermem] ingest wave-guard: ${highPressure ? 'stubbed' : 'truncated'} toolResult (window pressure ${(windowPressure * 100).toFixed(0)}% > ${highPressure ? 85 : 70}%)${hasErrorResult ? ' + error results preserved' : ''} - full payload persisted to tool_artifacts`);
1558
+ }
1559
+ }
1560
+ await hm.recordAssistantMessage(agentId, sk, neutral);
1561
+ return { ingested: true };
1562
+ }
1563
+ catch (err) {
1564
+ // Ingest failure is non-fatal — record is best-effort
1565
+ console.warn('[hypermem-plugin] ingest failed:', err.message);
1566
+ return { ingested: false };
1567
+ }
1568
+ },
1569
+ /**
1570
+ * Batch ingest: process multiple messages in a single call.
1571
+ *
1572
+ * Note: when afterTurn() is defined (which it is), the runtime calls
1573
+ * afterTurn instead of ingest/ingestBatch. This is here for interface
1574
+ * completeness and forward compatibility.
1575
+ */
1576
+ async ingestBatch({ sessionId, sessionKey, messages, isHeartbeat }) {
1577
+ if (isHeartbeat) {
1578
+ return { ingestedCount: 0 };
1579
+ }
1580
+ let ingestedCount = 0;
1581
+ try {
1582
+ const hm = await getHyperMem();
1583
+ const sk = resolveSessionKey(sessionId, sessionKey);
1584
+ const agentId = extractAgentId(sk);
1585
+ for (const message of messages) {
1586
+ const msg = message;
1587
+ if (msg.role === 'system')
1588
+ continue;
1589
+ const neutral = toNeutralMessage(msg);
1590
+ if (neutral.role === 'user' && !neutral.toolResults?.length) {
1591
+ await hm.recordUserMessage(agentId, sk, stripMessageMetadata(neutral.textContent ?? ''));
1592
+ }
1593
+ else {
1594
+ await hm.recordAssistantMessage(agentId, sk, neutral);
1595
+ }
1596
+ ingestedCount++;
1597
+ }
1598
+ }
1599
+ catch (err) {
1600
+ console.warn('[hypermem-plugin] ingestBatch failed:', err.message);
1601
+ }
1602
+ return { ingestedCount };
1603
+ },
1604
+ /**
1605
+ * Assemble model context from all four hypermem layers.
1606
+ *
1607
+ * The `messages` param contains the current conversation history from the
1608
+ * runtime. We pass the prompt (latest user message) as the retrieval query,
1609
+ * and let the compositor build the full context.
1610
+ *
1611
+ * Returns:
1612
+ * messages — full assembled message array for the model
1613
+ * estimatedTokens — token count of assembled context
1614
+ * systemPromptAddition — facts/recall/episodes injected before runtime system prompt
1615
+ */
1616
+ async assemble({ sessionId, sessionKey, messages, tokenBudget, prompt, model }) {
1617
+ // ── Tool-loop guard ──────────────────────────────────────────────────────
1618
+ // When the last message is a toolResult, the runtime is mid tool-loop:
1619
+ // the model already has full context from the initial turn assembly.
1620
+ // Re-running the full compose pipeline here is wasteful and, in long
1621
+ // tool loops, causes cumulative context growth that triggers preemptive
1622
+ // context overflow. Pass the messages through as-is.
1623
+ //
1624
+ // Matches OpenClaw's legacy behavior: the legacy engine's assemble() is a
1625
+ // pass-through that never re-injects context on tool-loop calls.
1626
+ const lastMsg = messages[messages.length - 1];
1627
+ const isToolLoop = lastMsg?.role === 'toolResult' || lastMsg?.role === 'tool';
1628
+ // Telemetry: emit one assembleTrace at entry. Path taxonomy:
1629
+ // 'subagent' - session key matches the subagent pattern
1630
+ // 'cold' - normal full-assembly or tool-loop entry (a separate
1631
+ // 'replay' trace is emitted if the cache replay fast
1632
+ // path is taken below)
1633
+ // Zero-cost when HYPERMEM_TELEMETRY !== '1'.
1634
+ //
1635
+ // Trim-ownership turn context (Sprint 2): the turnId is also used to
1636
+ // scope the shared trim-owner claim helper so duplicate steady-state
1637
+ // trims in a single assemble() turn can be detected and (under
1638
+ // NODE_ENV='development') throw loudly. We always allocate the turnId
1639
+ // and open the scope — the map write is cheap and keeps enforcement
1640
+ // active even when telemetry is off. The scope is closed in the
1641
+ // finally block wrapping the full assemble body below.
1642
+ const _asmSk = resolveSessionKey(sessionId, sessionKey);
1643
+ const _asmTurnId = nextTurnId();
1644
+ beginTrimOwnerTurn(_asmSk, _asmTurnId);
1645
+ if (telemetryEnabled()) {
1646
+ const _agentId = extractAgentId(_asmSk);
1647
+ const _entryPath = _asmSk.includes('subagent:')
1648
+ ? 'subagent'
1649
+ : 'cold';
1650
+ assembleTrace({
1651
+ agentId: _agentId,
1652
+ sessionKey: _asmSk,
1653
+ turnId: _asmTurnId,
1654
+ path: _entryPath,
1655
+ toolLoop: isToolLoop,
1656
+ msgCount: messages.length,
1657
+ });
1658
+ }
1659
+ try {
1660
+ if (isToolLoop) {
1661
+ // Tool-loop turns: pass messages through unchanged but still:
1662
+ // 1. Run the trim guardrail — tool loops accumulate history as fast
1663
+ // as regular turns, and the old path skipped trim entirely, leaving
1664
+ // the compaction guard blind (received estimatedTokens=0).
1665
+ // 2. Return a real estimatedTokens = windowTokens + cached overhead,
1666
+ // so the guard has accurate signal and can fire when needed.
1667
+ //
1668
+ // Fix (ingestion-wave): use pressure-tiered trim instead of fixed 80%.
1669
+ // At 91% with 5 parallel web_search calls incoming (~20-30% of budget),
1670
+ // a fixed 80% trim only frees 11% headroom — the wave overflows anyway
1671
+ // and results strip silently. Tier the trim target based on pre-trim
1672
+ // pressure so high-pressure sessions get real headroom before results land.
1673
+ const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
1674
+ try {
1675
+ const hm = await getHyperMem();
1676
+ const sk = resolveSessionKey(sessionId, sessionKey);
1677
+ const agentId = extractAgentId(sk);
1678
+ // ── Image / heavy-content eviction pre-pass ──────────────────────
1679
+ // Evict stale image payloads and large tool results before measuring
1680
+ // pressure. This frees tokens without compaction — images alone can
1681
+ // account for 30%+ of context from a single screenshot 2 turns ago.
1682
+ const evictionCfg = _evictionConfig;
1683
+ const evictionEnabled = evictionCfg?.enabled !== false;
1684
+ let workingMessages = messages;
1685
+ if (evictionEnabled) {
1686
+ const { messages: evicted, stats: evStats } = evictStaleContent(messages, {
1687
+ imageAgeTurns: evictionCfg?.imageAgeTurns,
1688
+ toolResultAgeTurns: evictionCfg?.toolResultAgeTurns,
1689
+ minTokensToEvict: evictionCfg?.minTokensToEvict,
1690
+ keepPreviewChars: evictionCfg?.keepPreviewChars,
1691
+ });
1692
+ workingMessages = evicted;
1693
+ if (evStats.tokensFreed > 0) {
1694
+ console.log(`[hypermem] eviction: ${evStats.imagesEvicted} images, ` +
1695
+ `${evStats.toolResultsEvicted} tool results, ` +
1696
+ `~${evStats.tokensFreed.toLocaleString()} tokens freed`);
1697
+ }
1698
+ }
1699
+ // Measure pressure from the in-memory message array we are actually about
1700
+ // to shape and return. Redis remains a cross-check only.
1701
+ const runtimeTokens = estimateMessageArrayTokens(workingMessages);
1702
+ const redisTokens = await estimateWindowTokens(hm, agentId, sk);
1703
+ const replayRecovery = decideReplayRecovery({
1704
+ currentState: normalizeReplayRecoveryState(await hm.cache.getSlot(agentId, sk, 'replayRecoveryState').catch(() => '')),
1705
+ runtimeTokens,
1706
+ redisTokens,
1707
+ effectiveBudget,
1708
+ });
1709
+ const replayMarkerText = replayRecovery.emittedText;
1710
+ const preTrimTokens = runtimeTokens;
1711
+ const pressure = preTrimTokens / effectiveBudget;
1712
+ // Pressure-tiered trim targets use a single authority: the working
1713
+ // message array. Redis drift is logged as an anomaly, never used as
1714
+ // a trim trigger. Replay recovery gets its own explicit bounded mode
1715
+ // instead of sharing the steady-state pressure heuristics.
1716
+ let trimTarget;
1717
+ if (typeof replayRecovery.trimTargetOverride === 'number') {
1718
+ trimTarget = replayRecovery.trimTargetOverride;
1719
+ }
1720
+ else if (pressure > 0.85) {
1721
+ trimTarget = 0.40; // critical: 60% headroom for incoming wave
1722
+ }
1723
+ else if (pressure > 0.80) {
1724
+ trimTarget = 0.50; // high: 50% headroom
1725
+ }
1726
+ else if (pressure > 0.75) {
1727
+ trimTarget = 0.55; // elevated: 45% headroom
1728
+ }
1729
+ else {
1730
+ trimTarget = 0.65; // normal: 35% headroom
1731
+ }
1732
+ const trimBudget = Math.floor(effectiveBudget * trimTarget);
1733
+ // Steady-state trim owner claim (Sprint 2.2a): route through the
1734
+ // shared helper keyed by (sessionKey, turnId). In development a
1735
+ // duplicate steady-state trim in the same assemble() turn throws.
1736
+ // In non-development a duplicate returns false; the real trim +
1737
+ // its `event:'trim'` emission are gated on the successful claim so
1738
+ // a duplicate claim is actually suppressed, not just warned.
1739
+ // Compact.* paths are exempt; this path is assemble-owned.
1740
+ const toolLoopClaimed = claimTrimOwner(sk, _asmTurnId, 'assemble.toolLoop');
1741
+ let trimmed = 0;
1742
+ let toolLoopCacheInvalidated = false;
1743
+ if (toolLoopClaimed) {
1744
+ trimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimBudget);
1745
+ if (trimmed > 0) {
1746
+ await hm.cache.invalidateWindow(agentId, sk);
1747
+ toolLoopCacheInvalidated = true;
1748
+ }
1749
+ if (telemetryEnabled()) {
1750
+ const postTrimTokens = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
1751
+ trimTelemetry({
1752
+ path: 'assemble.toolLoop',
1753
+ agentId, sessionKey: sk,
1754
+ preTokens: preTrimTokens,
1755
+ postTokens: postTrimTokens,
1756
+ removed: trimmed,
1757
+ cacheInvalidated: toolLoopCacheInvalidated,
1758
+ reason: `pressure=${(pressure * 100).toFixed(1)}%`,
1759
+ });
1760
+ }
1761
+ }
1762
+ else if (telemetryEnabled()) {
1763
+ // Surface the suppressed-duplicate as a bounded guard record so
1764
+ // downstream reporting can see how often the gate fires. No
1765
+ // history or window mutation here.
1766
+ guardTelemetry({
1767
+ path: 'assemble.toolLoop',
1768
+ agentId, sessionKey: sk,
1769
+ reason: 'duplicate-claim-suppressed',
1770
+ });
1771
+ }
1772
+ // Also trim the messages array itself to match the budget.
1773
+ // Redis trim clears the *next* turn's window. This turn's messages are
1774
+ // still the full runtime array — if we return them unchanged at 94%,
1775
+ // OpenClaw strips tool results before sending to the model regardless
1776
+ // of what estimatedTokens says. We need to return a slimmer array now.
1777
+ //
1778
+ // Strategy: keep system/identity messages at the front, then fill from
1779
+ // the back (most recent) until we hit trimBudget. Drop the middle.
1780
+ let trimmedMessages = workingMessages;
1781
+ if (pressure > trimTarget) {
1782
+ const msgArray = workingMessages;
1783
+ // Separate system messages (always keep) from conversation turns
1784
+ const systemMsgs = msgArray.filter(m => m.role === 'system');
1785
+ const convMsgs = msgArray.filter(m => m.role !== 'system');
1786
+ // Pre-process: inline-truncate large tool results before budget-fill drop.
1787
+ // A message with a 40k-token tool result that barely misses budget gets dropped
1788
+ // entirely. Replacing with a placeholder keeps the turn's metadata in context
1789
+ // while freeing the bulk of the tokens.
1790
+ const MAX_INLINE_TOOL_CHARS = 2000; // ~500 tokens
1791
+ // FIX (Bug 3): handle both NeutralMessage format (m.toolResults) and
1792
+ // OpenClaw native format (m.content array with type='tool_result' blocks).
1793
+ // Old guard `if (!m.toolResults)` skipped every native-format message.
1794
+ // Also fixed: replacement must be valid NeutralToolResult { callId, name, content },
1795
+ // not { type, text } which breaks pair-integrity downstream.
1796
+ const processedConvMsgs = convMsgs.map(m => {
1797
+ // NeutralMessage format
1798
+ if (m.toolResults) {
1799
+ const resultStr = JSON.stringify(m.toolResults);
1800
+ if (resultStr.length <= MAX_INLINE_TOOL_CHARS)
1801
+ return m;
1802
+ const firstResult = m.toolResults[0];
1803
+ return {
1804
+ ...m,
1805
+ toolResults: [{
1806
+ callId: firstResult?.callId ?? 'unknown',
1807
+ name: firstResult?.name ?? 'tool',
1808
+ content: `[tool result truncated: ${Math.ceil(resultStr.length / 4)} tokens]`,
1809
+ }],
1810
+ };
1811
+ }
1812
+ // OpenClaw native format
1813
+ if (Array.isArray(m.content)) {
1814
+ const content = m.content;
1815
+ const hasLarge = content.some(c => {
1816
+ if (c.type !== 'tool_result')
1817
+ return false;
1818
+ const val = typeof c.content === 'string' ? c.content : JSON.stringify(c.content ?? '');
1819
+ return val.length > MAX_INLINE_TOOL_CHARS;
1820
+ });
1821
+ if (!hasLarge)
1822
+ return m;
1823
+ return {
1824
+ ...m,
1825
+ content: content.map(c => {
1826
+ if (c.type !== 'tool_result')
1827
+ return c;
1828
+ const val = typeof c.content === 'string' ? c.content : JSON.stringify(c.content ?? '');
1829
+ if (val.length <= MAX_INLINE_TOOL_CHARS)
1830
+ return c;
1831
+ return { ...c, content: `[tool result truncated: ${Math.ceil(val.length / 4)} tokens]` };
1832
+ }),
1833
+ };
1834
+ }
1835
+ return m;
1836
+ });
1837
+ // Fill from the back within budget
1838
+ let budget = trimBudget;
1839
+ // Reserve tokens for system messages using the same accounting
1840
+ // function as the final composed-array estimate.
1841
+ for (const sm of systemMsgs) {
1842
+ budget -= estimateMessageTokens(sm);
1843
+ }
1844
+ const msgCost = (m) => estimateMessageTokens(m);
1845
+ const clusters = clusterTranscriptMessages(processedConvMsgs);
1846
+ const keptClusters = [];
1847
+ const tailCluster = clusters.length > 0 ? clusters[clusters.length - 1] : [];
1848
+ if (tailCluster.length > 0) {
1849
+ budget -= tailCluster.reduce((sum, msg) => sum + msgCost(msg), 0);
1850
+ keptClusters.unshift(tailCluster);
1851
+ }
1852
+ for (let i = clusters.length - 2; i >= 0 && budget > 0; i--) {
1853
+ const cluster = clusters[i];
1854
+ const clusterCost = cluster.reduce((sum, msg) => sum + msgCost(msg), 0);
1855
+ if (budget - clusterCost >= 0) {
1856
+ keptClusters.unshift(cluster);
1857
+ budget -= clusterCost;
1858
+ }
1859
+ }
1860
+ const kept = keptClusters.flat();
1861
+ const keptCount = processedConvMsgs.length - kept.length;
1862
+ if (keptCount > 0) {
1863
+ console.log(`[hypermem-plugin] tool-loop trim: pressure=${(pressure * 100).toFixed(1)}% → ` +
1864
+ `target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs, messages=${keptCount} dropped)`);
1865
+ trimmedMessages = [...systemMsgs, ...kept];
1866
+ }
1867
+ else if (trimmed > 0) {
1868
+ console.log(`[hypermem-plugin] tool-loop trim: pressure=${(pressure * 100).toFixed(1)}% → ` +
1869
+ `target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs)`);
1870
+ }
1871
+ }
1872
+ else if (trimmed > 0) {
1873
+ console.log(`[hypermem-plugin] tool-loop trim: pressure=${(pressure * 100).toFixed(1)}% → ` +
1874
+ `target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs)`);
1875
+ }
1876
+ // Apply tool gradient to compress large tool results before returning.
1877
+ // Skip if deferToolPruning is enabled — OpenClaw's contextPruning handles it.
1878
+ if (!_deferToolPruning) {
1879
+ // The full compose path runs applyToolGradientToWindow during reshaping;
1880
+ // the tool-loop path was previously skipping this, leaving a 40k-token
1881
+ // web_search result uncompressed every turn.
1882
+ try {
1883
+ const gradientApplied = applyToolGradientToWindow(trimmedMessages, trimBudget);
1884
+ trimmedMessages = gradientApplied;
1885
+ }
1886
+ catch {
1887
+ // Non-fatal: if gradient fails, continue with untouched trimmedMessages
1888
+ }
1889
+ } // end deferToolPruning gate
1890
+ // Repair orphaned tool pairs in the trimmed message list.
1891
+ // In-memory trim (cluster drop) can strand tool_result messages whose
1892
+ // paired tool_use was in a dropped cluster.
1893
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1894
+ trimmedMessages = repairToolPairs(trimmedMessages);
1895
+ const composedTokens = estimateMessageArrayTokens(trimmedMessages);
1896
+ maybeLogPressureAccountingAnomaly({
1897
+ path: 'assemble.toolLoop',
1898
+ agentId,
1899
+ sessionKey: sk,
1900
+ runtimeTokens: preTrimTokens,
1901
+ redisTokens,
1902
+ composedTokens,
1903
+ budget: effectiveBudget,
1904
+ });
1905
+ await persistReplayRecoveryState(hm, agentId, sk, replayRecovery.nextState);
1906
+ degradationTelemetry({
1907
+ agentId,
1908
+ sessionKey: sk,
1909
+ turnId: _asmTurnId,
1910
+ path: 'toolLoop',
1911
+ toolChainCoEjections: 0,
1912
+ toolChainStubReplacements: 0,
1913
+ artifactDegradations: 0,
1914
+ replayState: replayRecovery.emittedMarker?.state,
1915
+ replayReason: replayRecovery.emittedMarker?.reason,
1916
+ });
1917
+ const overhead = _overheadCache.get(sk) ?? getOverheadFallback();
1918
+ return {
1919
+ messages: trimmedMessages,
1920
+ estimatedTokens: composedTokens + overhead,
1921
+ systemPromptAddition: replayMarkerText || undefined,
1922
+ };
1923
+ }
1924
+ catch {
1925
+ // Non-fatal: return conservative estimate so guard doesn't go blind
1926
+ return {
1927
+ messages: messages,
1928
+ estimatedTokens: Math.floor(effectiveBudget * 0.8),
1929
+ };
1930
+ }
1931
+ }
1932
+ try {
1933
+ const hm = await getHyperMem();
1934
+ const sk = resolveSessionKey(sessionId, sessionKey);
1935
+ const agentId = extractAgentId(sk);
1936
+ // ── Subagent warming control ─────────────────────────────────────────
1937
+ // Detect subagent sessions by key pattern and apply warming mode.
1938
+ // 'off' = passthrough (no HyperMem context at all)
1939
+ // 'light' = facts + history only (skip library/wiki/semantic/keystones/doc chunks)
1940
+ // 'full' = standard compositor pipeline
1941
+ const isSubagent = sk.includes('subagent:');
1942
+ if (isSubagent && _subagentWarming === 'off') {
1943
+ console.log(`[hypermem-plugin] assemble: subagent warming=off, passthrough (sk: ${sk})`);
1944
+ return {
1945
+ messages: messages,
1946
+ estimatedTokens: estimateMessageArrayTokens(messages),
1947
+ };
1948
+ }
1949
+ if (isSubagent) {
1950
+ console.log(`[hypermem-plugin] assemble: subagent warming=${_subagentWarming} (sk: ${sk})`);
1951
+ }
1952
+ // Resolve agent tier from fleet store (for doc chunk tier filtering)
1953
+ let tier;
1954
+ try {
1955
+ const agent = _fleetStore?.getAgent(agentId);
1956
+ tier = agent?.tier;
1957
+ }
1958
+ catch {
1959
+ // Non-fatal — tier filtering just won't apply
1960
+ }
1961
+ // historyDepth: derive a safe message count from the token budget.
1962
+ // Uses 50% of the budget for history (down from 60% — more budget goes to
1963
+ // L3/L4 context slots now). Floor at 50, ceiling at 200.
1964
+ // This is a preventive guard — the compositor's safety valve still trims
1965
+ // by token count post-assembly, but limiting depth up front avoids
1966
+ // feeding the compactor a window it can't reduce.
1967
+ const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
1968
+ const historyDepth = Math.min(250, Math.max(50, Math.floor((effectiveBudget * 0.65) / 500)));
1969
+ const runtimeEntryTokens = estimateMessageArrayTokens(messages);
1970
+ const redisEntryTokens = await estimateWindowTokens(hm, agentId, sk);
1971
+ const replayRecovery = decideReplayRecovery({
1972
+ currentState: normalizeReplayRecoveryState(await hm.cache.getSlot(agentId, sk, 'replayRecoveryState').catch(() => '')),
1973
+ runtimeTokens: runtimeEntryTokens,
1974
+ redisTokens: redisEntryTokens,
1975
+ effectiveBudget,
1976
+ });
1977
+ const replayHistoryDepth = replayRecovery.active && replayRecovery.historyDepthCap
1978
+ ? Math.min(historyDepth, replayRecovery.historyDepthCap)
1979
+ : historyDepth;
1980
+ // ── Redis guardrail: trim history to token budget ────────────────────
1981
+ // Prevents model-switch bloat: if an agent previously ran on a larger
1982
+ // context window, Redis history may exceed the current model's budget.
1983
+ // Trimming here (before compose) ensures the compositor never sees a
1984
+ // history window it can't fit.
1985
+ //
1986
+ // Sprint 3 (AfterTurn Rebuild/Trim Loop Fix): the assemble.normal trim now
1987
+ // first checks whether the window is already within trimBudget. When
1988
+ // afterTurn's refreshRedisGradient caps the rebuilt window at the same
1989
+ // 0.65 fraction (Sprint 3 compositor fix), the steady-state path will
1990
+ // find preTokens <= trimBudget and skip the trim entirely. The trim only
1991
+ // fires when real excess exists (pressure spikes, model switch, cold start),
1992
+ // breaking the unconditional afterTurn→assemble trim churn loop.
1993
+ //
1994
+ // B3: Batch trim with growth allowance.
1995
+ // Trim only fires when the window has grown past the soft target by more
1996
+ // than TRIM_GROWTH_THRESHOLD (5%). When it does fire, trim to
1997
+ // softTarget * (1 - TRIM_HEADROOM_FRACTION) so the window has room to
1998
+ // grow for several turns before the next trim fires. This eliminates
1999
+ // per-turn trim churn from minor natural growth (short assistant replies,
2000
+ // small tool outputs) while still catching genuine pressure spikes.
2001
+ try {
2002
+ const { softBudget: trimSoftBudget, triggerBudget: trimTriggerBudget, targetBudget: trimTargetBudget, } = resolveTrimBudgets(effectiveBudget);
2003
+ // Always read preTokens so we can make the skip decision and emit telemetry.
2004
+ const preTokensNormal = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
2005
+ const normalPath = isSubagent ? 'assemble.subagent' : 'assemble.normal';
2006
+ // B3: Skip trim when window is within the growth-allowance envelope.
2007
+ // This replaces the Sprint 3 `windowAlreadyFits` check (which only
2008
+ // skipped at exactly ≤ softTarget). The growth allowance lets the
2009
+ // window float up to +5% before triggering, avoiding trim on every
2010
+ // turn that ends a few tokens above 65%.
2011
+ const withinGrowthEnvelope = preTokensNormal > 0 && preTokensNormal <= trimTriggerBudget;
2012
+ if (withinGrowthEnvelope) {
2013
+ if (telemetryEnabled()) {
2014
+ guardTelemetry({
2015
+ path: normalPath,
2016
+ agentId, sessionKey: sk,
2017
+ reason: 'window-within-budget-skip',
2018
+ });
2019
+ }
2020
+ }
2021
+ else {
2022
+ // Steady-state trim owner claim (Sprint 2.2a): route assemble.normal
2023
+ // and assemble.subagent through the shared helper keyed by
2024
+ // (sessionKey, _asmTurnId). The real trim + its `event:'trim'`
2025
+ // emission are gated on the claim so a duplicate steady-state claim
2026
+ // in the same turn is actually suppressed in production, not just
2027
+ // warned. In development the duplicate throws.
2028
+ const normalClaimed = claimTrimOwner(sk, _asmTurnId, normalPath);
2029
+ if (normalClaimed) {
2030
+ // B3: trim to the headroom target (below soft target) so the
2031
+ // window has room to grow before the next trim fires.
2032
+ const trimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimTargetBudget);
2033
+ let normalCacheInvalidated = false;
2034
+ if (trimmed > 0) {
2035
+ // Invalidate window cache since history changed
2036
+ await hm.cache.invalidateWindow(agentId, sk);
2037
+ normalCacheInvalidated = true;
2038
+ }
2039
+ if (telemetryEnabled()) {
2040
+ const postTokensNormal = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
2041
+ trimTelemetry({
2042
+ path: normalPath,
2043
+ agentId, sessionKey: sk,
2044
+ preTokens: preTokensNormal,
2045
+ postTokens: postTokensNormal,
2046
+ removed: trimmed,
2047
+ cacheInvalidated: normalCacheInvalidated,
2048
+ reason: `b3:trigger=${trimTriggerBudget},target=${trimTargetBudget}`,
2049
+ });
2050
+ }
2051
+ }
2052
+ else if (telemetryEnabled()) {
2053
+ guardTelemetry({
2054
+ path: normalPath,
2055
+ agentId, sessionKey: sk,
2056
+ reason: 'duplicate-claim-suppressed',
2057
+ });
2058
+ }
2059
+ }
2060
+ }
2061
+ catch (trimErr) {
2062
+ // Non-fatal — compositor's budget-fit walk is the second line of defense
2063
+ console.warn('[hypermem-plugin] assemble: Redis trim failed (non-fatal):', trimErr.message);
2064
+ }
2065
+ // ── Budget downshift: proactive reshape pass ───────────────────────────────────────
2066
+ // If this session previously composed at a higher token budget (e.g. gpt-5.4
2067
+ // → claude-sonnet model switch), the Redis window is still sized for the old
2068
+ // budget. trimHistoryToTokenBudget above trims by count but skips tool
2069
+ // gradient logic. A downshift >10% triggers a full reshape: apply tool
2070
+ // gradient at the new budget + trim, then write back before compose runs.
2071
+ // This prevents several turns of compaction churn after a model switch.
2072
+ //
2073
+ // Bug fix: previously read from getWindow() which is always null here
2074
+ // (afterTurn invalidates it every turn). Also fixed: was doing setWindow()
2075
+ // then invalidateWindow() which is a write-then-delete no-op. Now reads
2076
+ // from history list and writes back via replaceHistory().
2077
+ let lastState = null;
2078
+ try {
2079
+ lastState = await hm.cache.getModelState(agentId, sk);
2080
+ const DOWNSHIFT_THRESHOLD = 0.10;
2081
+ const isDownshift = lastState &&
2082
+ (lastState.tokenBudget - effectiveBudget) / lastState.tokenBudget > DOWNSHIFT_THRESHOLD;
2083
+ if (isDownshift && !_deferToolPruning) {
2084
+ // Sprint 2.2a: demote reshape to guard telemetry.
2085
+ //
2086
+ // Previously this branch re-ran applyToolGradientToWindow, wrote
2087
+ // back via replaceHistory, invalidated the window cache, and
2088
+ // stamped `reshapedAt` on model state. Assemble.* is the
2089
+ // steady-state owner, so the subsequent assemble.normal /
2090
+ // assemble.subagent trim (gated by claimTrimOwner) handles any
2091
+ // real downshift pressure. Keeping the detection branch preserves
2092
+ // observability; guardTelemetry records the would-be-reshape
2093
+ // without mutating history, the window, or model state.
2094
+ //
2095
+ // CRITICAL: do NOT call setModelState({ reshapedAt, … }) here.
2096
+ // compact() skips when reshapedAt is recent, which would cause it
2097
+ // to skip on the strength of a reshape that never ran.
2098
+ guardTelemetry({
2099
+ path: 'reshape',
2100
+ agentId, sessionKey: sk,
2101
+ reason: 'reshape-downshift-demoted',
2102
+ });
2103
+ }
2104
+ }
2105
+ catch (reshapeErr) {
2106
+ // Non-fatal — compositor safety valve is still the last defense
2107
+ console.warn('[hypermem-plugin] assemble: reshape pass failed (non-fatal):', reshapeErr.message);
2108
+ }
2109
+ // ── Cache replay fast path ─────────────────────────────────────────────
2110
+ // If the session was active recently, return the cached contextBlock
2111
+ // (systemPromptAddition) to produce a byte-identical system prompt and
2112
+ // hit the provider prefix cache (Anthropic / OpenAI).
2113
+ // The message window is always rebuilt fresh — only the compositor output
2114
+ // (contextBlock) is cached, since that's what determines prefix identity.
2115
+ const cacheReplayThresholdMs = _cacheReplayThresholdMs;
2116
+ let cachedContextBlock = null;
2117
+ if (cacheReplayThresholdMs > 0 && !replayRecovery.shouldSkipCacheReplay) {
2118
+ try {
2119
+ const cachedAt = await hm.cache.getSlot(agentId, sk, 'assemblyContextAt');
2120
+ if (cachedAt && Date.now() - parseInt(cachedAt) < cacheReplayThresholdMs) {
2121
+ cachedContextBlock = await hm.cache.getSlot(agentId, sk, 'assemblyContextBlock');
2122
+ if (cachedContextBlock) {
2123
+ console.log(`[hypermem-plugin] assemble: cache replay hit for ${agentId} (${Math.round((Date.now() - parseInt(cachedAt)) / 1000)}s old)`);
2124
+ if (telemetryEnabled()) {
2125
+ assembleTrace({
2126
+ agentId,
2127
+ sessionKey: sk,
2128
+ turnId: _asmTurnId,
2129
+ path: 'replay',
2130
+ toolLoop: isToolLoop,
2131
+ msgCount: messages.length,
2132
+ });
2133
+ }
2134
+ }
2135
+ }
2136
+ }
2137
+ catch {
2138
+ // Non-fatal — fall through to full assembly
2139
+ }
2140
+ }
2141
+ // Subagent light mode: skip library/wiki/semantic/keystones/doc chunks.
2142
+ // Keeps: system, identity, history, active facts, output profile, tool gradient.
2143
+ const subagentLight = isSubagent && _subagentWarming === 'light';
2144
+ const request = {
2145
+ agentId,
2146
+ sessionKey: sk,
2147
+ tokenBudget: effectiveBudget,
2148
+ historyDepth: lastState?.historyDepth && lastState.historyDepth < replayHistoryDepth
2149
+ ? lastState.historyDepth
2150
+ : replayHistoryDepth,
2151
+ tier,
2152
+ model, // pass model for provider detection
2153
+ includeDocChunks: subagentLight ? false : !cachedContextBlock, // skip doc retrieval on cache hit or subagent light
2154
+ includeLibrary: subagentLight ? false : undefined, // skip wiki/knowledge/preferences
2155
+ includeSemanticRecall: subagentLight ? false : undefined, // skip vector/FTS recall
2156
+ includeKeystones: subagentLight ? false : undefined, // skip keystone history injection
2157
+ prompt,
2158
+ skipProviderTranslation: true, // runtime handles provider translation
2159
+ };
2160
+ const result = await hm.compose(request);
2161
+ degradationTelemetry({
2162
+ agentId,
2163
+ sessionKey: sk,
2164
+ turnId: _asmTurnId,
2165
+ path: 'compose',
2166
+ toolChainCoEjections: result.diagnostics?.toolChainCoEjections ?? 0,
2167
+ toolChainStubReplacements: result.diagnostics?.toolChainStubReplacements ?? 0,
2168
+ artifactDegradations: result.diagnostics?.artifactDegradations ?? 0,
2169
+ artifactOversizeThresholdTokens: result.diagnostics?.artifactOversizeThresholdTokens,
2170
+ replayState: replayRecovery.emittedMarker?.state,
2171
+ replayReason: replayRecovery.emittedMarker?.reason,
2172
+ });
2173
+ // Use cached contextBlock if available (cache replay), otherwise use fresh result.
2174
+ // After a full compose, write the new contextBlock to cache for the next turn.
2175
+ if (cachedContextBlock) {
2176
+ result.contextBlock = cachedContextBlock;
2177
+ }
2178
+ else if (result.contextBlock && cacheReplayThresholdMs > 0 && !replayRecovery.shouldSkipCacheReplay && !replayRecovery.emittedText) {
2179
+ // Write cache async — never block the assemble() return on this
2180
+ const blockToCache = result.contextBlock;
2181
+ const nowStr = Date.now().toString();
2182
+ const ttlSec = Math.ceil((cacheReplayThresholdMs * 2) / 1000);
2183
+ Promise.all([
2184
+ hm.cache.setSlot(agentId, sk, 'assemblyContextBlock', blockToCache),
2185
+ hm.cache.setSlot(agentId, sk, 'assemblyContextAt', nowStr),
2186
+ ]).then(() => {
2187
+ // Extend TTL on the cached keys to 2× the threshold
2188
+ // setSlot uses the sessionTTL from RedisLayer config — acceptable fallback
2189
+ }).catch(() => { });
2190
+ }
2191
+ if (replayRecovery.emittedText) {
2192
+ result.contextBlock = result.contextBlock
2193
+ ? `${result.contextBlock}
2194
+ ${replayRecovery.emittedText}`
2195
+ : replayRecovery.emittedText;
2196
+ }
2197
+ // Convert NeutralMessage[] → AgentMessage[] for the OpenClaw runtime.
2198
+ // neutralToAgentMessage can return a single message or an array (tool results
2199
+ // expand to individual ToolResultMessage objects), so we flatMap.
2200
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
2201
+ let outputMessages = result.messages
2202
+ .filter(m => m.role != null)
2203
+ .flatMap(m => neutralToAgentMessage(m));
2204
+ const neutralPairStats = collectNeutralToolPairStats(result.messages);
2205
+ const agentPairStats = collectAgentToolPairStats(outputMessages);
2206
+ const toolPairAnomaly = neutralPairStats.missingToolResultCount > 0 ||
2207
+ neutralPairStats.orphanToolResultCount > 0 ||
2208
+ agentPairStats.missingToolResultCount > 0 ||
2209
+ agentPairStats.orphanToolResultCount > 0 ||
2210
+ agentPairStats.syntheticNoResultCount > 0
2211
+ ? {
2212
+ stage: 'assemble',
2213
+ neutralMissingToolResultIds: neutralPairStats.missingToolResultIds.slice(0, 10),
2214
+ neutralOrphanToolResultIds: neutralPairStats.orphanToolResultIds.slice(0, 10),
2215
+ agentMissingToolResultIds: agentPairStats.missingToolResultIds.slice(0, 10),
2216
+ agentOrphanToolResultIds: agentPairStats.orphanToolResultIds.slice(0, 10),
2217
+ syntheticNoResultCount: agentPairStats.syntheticNoResultCount,
2218
+ }
2219
+ : undefined;
2220
+ await bumpToolPairMetrics(hm, agentId, sk, {
2221
+ composeCount: 1,
2222
+ preBridgeMissingToolResults: neutralPairStats.missingToolResultCount,
2223
+ preBridgeOrphanToolResults: neutralPairStats.orphanToolResultCount,
2224
+ postBridgeMissingToolResults: agentPairStats.missingToolResultCount,
2225
+ postBridgeOrphanToolResults: agentPairStats.orphanToolResultCount,
2226
+ }, toolPairAnomaly);
2227
+ if (toolPairAnomaly) {
2228
+ console.warn(`[hypermem-plugin] tool-pair-integrity: ${agentId}/${sk} ` +
2229
+ `neutralMissing=${neutralPairStats.missingToolResultCount} neutralOrphan=${neutralPairStats.orphanToolResultCount} ` +
2230
+ `agentMissing=${agentPairStats.missingToolResultCount} agentOrphan=${agentPairStats.orphanToolResultCount} ` +
2231
+ `synthetic=${agentPairStats.syntheticNoResultCount}`);
2232
+ }
2233
+ // Repair orphaned tool pairs before returning to provider.
2234
+ // compaction/trim passes can remove tool_use blocks without removing their
2235
+ // paired tool_result messages — Anthropic and Gemini reject these with 400.
2236
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
2237
+ outputMessages = repairToolPairs(outputMessages);
2238
+ // Cache overhead for tool-loop turns: contextBlock tokens (chars/4) +
2239
+ // tier-aware estimate for runtime system prompt (SOUL.md, identity,
2240
+ // workspace files — not visible from inside the plugin).
2241
+ const contextBlockTokens = Math.ceil((result.contextBlock?.length ?? 0) / 4);
2242
+ const runtimeSystemTokens = getOverheadFallback(tier);
2243
+ _overheadCache.set(sk, contextBlockTokens + runtimeSystemTokens);
2244
+ await persistReplayRecoveryState(hm, agentId, sk, replayRecovery.nextState);
2245
+ // Update model state for downshift detection on next turn
2246
+ try {
2247
+ await hm.cache.setModelState(agentId, sk, {
2248
+ model: model ?? 'unknown',
2249
+ tokenBudget: effectiveBudget,
2250
+ composedAt: new Date().toISOString(),
2251
+ historyDepth,
2252
+ });
2253
+ }
2254
+ catch {
2255
+ // Non-fatal
2256
+ }
2257
+ return {
2258
+ messages: outputMessages,
2259
+ estimatedTokens: result.tokenCount ?? 0,
2260
+ // systemPromptAddition injects hypermem context before the runtime system prompt.
2261
+ // This is the facts/recall/episodes block assembled by the compositor.
2262
+ systemPromptAddition: result.contextBlock || undefined,
2263
+ };
2264
+ }
2265
+ catch (err) {
2266
+ console.error('[hypermem-plugin] assemble error (stack):', err.stack ?? err);
2267
+ throw err; // Re-throw so the runtime falls back to legacy pipeline
2268
+ }
2269
+ }
2270
+ finally {
2271
+ // End the trim-owner turn scope opened at assemble entry. Paired
2272
+ // with beginTrimOwnerTurn(_asmSk, _asmTurnId) above; runs on every
2273
+ // exit path (normal return, tool-loop return, replay return, error
2274
+ // re-throw). Turn-scoped keying (Sprint 2.2a) means this only
2275
+ // removes THIS turn's slot, so concurrent same-session turns remain
2276
+ // isolated instead of clobbering each other.
2277
+ endTrimOwnerTurn(_asmSk, _asmTurnId);
2278
+ }
2279
+ },
2280
+ /**
2281
+ * Compact context. hypermem owns compaction.
2282
+ *
2283
+ * Strategy: assemble() already trims the composed message list to the token
2284
+ * budget via the compositor safety valve, so the model never receives an
2285
+ * oversized context. compact() is called by the runtime when it detects
2286
+ * overflow — at that point we:
2287
+ * 1. Estimate tokens in the current Redis history window
2288
+ * 2. If already under budget (compositor already handled it), report clean
2289
+ * 3. If over budget (e.g. window was built before budget cap was applied),
2290
+ * trim the Redis window to a safe depth and invalidate the compose cache
2291
+ *
2292
+ * This prevents the runtime from running its own LLM-summarization compaction
2293
+ * pass, which would destroy message history we're explicitly managing.
2294
+ */
2295
+ async compact({ sessionId, sessionKey, sessionFile, tokenBudget, currentTokenCount }) {
2296
+ try {
2297
+ const hm = await getHyperMem();
2298
+ const sk = resolveSessionKey(sessionId, sessionKey);
2299
+ const agentId = extractAgentId(sk);
2300
+ // Skip if a reshape pass just ran (within last 30s) — avoid double-processing
2301
+ // Cache modelState here for reuse in density-aware JSONL truncation below.
2302
+ let cachedModelState = null;
2303
+ let model;
2304
+ try {
2305
+ cachedModelState = await hm.cache.getModelState(agentId, sk);
2306
+ model = cachedModelState?.model;
2307
+ if (cachedModelState?.reshapedAt) {
2308
+ const reshapeAge = Date.now() - new Date(cachedModelState.reshapedAt).getTime();
2309
+ // Only skip if session is NOT critically full — nuclear path must bypass this guard.
2310
+ // If currentTokenCount > 85% budget, fall through to nuclear compaction below.
2311
+ const isCriticallyFull = currentTokenCount != null &&
2312
+ currentTokenCount > (computeEffectiveBudget(tokenBudget, model) * 0.85);
2313
+ if (reshapeAge < 30_000 && !isCriticallyFull) {
2314
+ console.log(`[hypermem-plugin] compact: skipping — reshape pass ran ${reshapeAge}ms ago`);
2315
+ return { ok: true, compacted: false, reason: 'reshape-recently-ran' };
2316
+ }
2317
+ }
2318
+ }
2319
+ catch {
2320
+ // Non-fatal — proceed with compaction
2321
+ }
2322
+ // Re-estimate from the actual Redis window.
2323
+ // The runtime's estimate (currentTokenCount) includes the full inbound message
2324
+ // and system prompt — our estimate only covers the history window. When they
2325
+ // diverge significantly upward, the difference is "inbound overhead" consuming
2326
+ // budget the history is competing for. We trim history to make room.
2327
+ const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
2328
+ const tokensBefore = await estimateWindowTokens(hm, agentId, sk);
2329
+ // Target depth for both Redis trimming and JSONL truncation.
2330
+ // Target 50% of budget capacity, assume ~500 tokens/message average.
2331
+ const targetDepth = Math.max(20, Math.floor((effectiveBudget * 0.5) / 500));
2332
+ // ── NUCLEAR COMPACTION ────────────────────────────────────────────────
2333
+ // When the runtime reports the session is ≥85% full, trust that signal
2334
+ // over our Redis estimate. The JSONL accumulates full tool results that
2335
+ // the gradient never sees, so Redis can look fine while the transcript
2336
+ // is genuinely saturated. Normal compact() returns compacted=false in
2337
+ // this scenario ("within_budget"), which gives the runtime zero relief.
2338
+ //
2339
+ // Also triggered when reshape ran recently but the session is still
2340
+ // critically full — bypass the reshape guard in that case.
2341
+ const NUCLEAR_THRESHOLD = 0.85;
2342
+ const isNuclear = currentTokenCount != null && currentTokenCount > effectiveBudget * NUCLEAR_THRESHOLD;
2343
+ if (isNuclear) {
2344
+ // Cut deep: target 20% of normal depth = ~25 messages for a 128k session.
2345
+ // Keeps very recent context, clears the long tool-heavy tail.
2346
+ const nuclearDepth = Math.max(10, Math.floor(targetDepth * 0.20));
2347
+ const nuclearBudget = Math.floor(effectiveBudget * 0.25);
2348
+ const nuclearRemoved = await hm.cache.trimHistoryToTokenBudget(agentId, sk, nuclearBudget);
2349
+ await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
2350
+ await truncateJsonlIfNeeded(sessionFile, nuclearDepth, true);
2351
+ const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
2352
+ if (telemetryEnabled()) {
2353
+ trimTelemetry({
2354
+ path: 'compact.nuclear',
2355
+ agentId, sessionKey: sk,
2356
+ preTokens: tokensBefore,
2357
+ postTokens: tokensAfter,
2358
+ removed: nuclearRemoved,
2359
+ cacheInvalidated: true,
2360
+ reason: `currentTokenCount=${currentTokenCount}/${effectiveBudget}`,
2361
+ });
2362
+ }
2363
+ console.log(`[hypermem-plugin] compact: NUCLEAR — session at ${currentTokenCount}/${effectiveBudget} tokens ` +
2364
+ `(${Math.round((currentTokenCount / effectiveBudget) * 100)}% full), ` +
2365
+ `deep-trimmed JSONL to ${nuclearDepth} messages, Redis ${tokensBefore}→${tokensAfter} tokens`);
2366
+ return { ok: true, compacted: true, result: { tokensBefore, tokensAfter } };
2367
+ }
2368
+ // ── END NUCLEAR ───────────────────────────────────────────────────────
2369
+ // Detect large-inbound-content scenario: runtime total significantly exceeds
2370
+ // our history estimate. The gap is the inbound message + system prompt overhead.
2371
+ // Trim history to leave room for it even if history alone is within budget.
2372
+ if (currentTokenCount != null && currentTokenCount > tokensBefore) {
2373
+ const inboundOverhead = currentTokenCount - tokensBefore;
2374
+ if (inboundOverhead > effectiveBudget * 0.15) {
2375
+ // Large inbound content (document review, big tool result, etc.)
2376
+ // Trim history so history + inbound fits within 85% of budget.
2377
+ const budgetForHistory = Math.floor(effectiveBudget * 0.85) - inboundOverhead;
2378
+ if (budgetForHistory < tokensBefore && budgetForHistory > 0) {
2379
+ const historyTrimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, budgetForHistory);
2380
+ await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
2381
+ const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
2382
+ await truncateJsonlIfNeeded(sessionFile, targetDepth);
2383
+ if (telemetryEnabled()) {
2384
+ trimTelemetry({
2385
+ path: 'compact.history',
2386
+ agentId, sessionKey: sk,
2387
+ preTokens: tokensBefore,
2388
+ postTokens: tokensAfter,
2389
+ removed: historyTrimmed,
2390
+ cacheInvalidated: true,
2391
+ reason: `inbound-overhead=${inboundOverhead}`,
2392
+ });
2393
+ }
2394
+ console.log(`[hypermem-plugin] compact: large-inbound-content (gap=${inboundOverhead} tokens), ` +
2395
+ `trimmed history ${tokensBefore}→${tokensAfter} (budget-for-history=${budgetForHistory}, trimmed=${historyTrimmed} messages)`);
2396
+ return { ok: true, compacted: true, result: { tokensBefore, tokensAfter } };
2397
+ }
2398
+ }
2399
+ }
2400
+ // Under 70% of budget by our own Redis estimate.
2401
+ // We still need to check the JSONL — the runtime's overflow is based on JSONL
2402
+ // message count, not Redis. If the JSONL is bloated (> targetDepth * 1.5 messages)
2403
+ // we truncate it even if Redis looks fine, then return compacted=true so the
2404
+ // runtime retries with the trimmed file instead of killing the session.
2405
+ if (tokensBefore <= effectiveBudget * 0.7) {
2406
+ const jsonlTruncated = await truncateJsonlIfNeeded(sessionFile, targetDepth);
2407
+ if (jsonlTruncated) {
2408
+ console.log(`[hypermem-plugin] compact: Redis within_budget but JSONL was bloated — truncated to ${targetDepth} messages`);
2409
+ return {
2410
+ ok: true,
2411
+ compacted: true,
2412
+ result: { tokensBefore, tokensAfter: tokensBefore },
2413
+ };
2414
+ }
2415
+ return {
2416
+ ok: true,
2417
+ compacted: false,
2418
+ reason: 'within_budget',
2419
+ result: { tokensBefore, tokensAfter: tokensBefore },
2420
+ };
2421
+ }
2422
+ // Over budget: trim both the window cache AND the history list.
2423
+ // Bug fix: if no window cache exists (fresh/never-compacted session),
2424
+ // compact() was only trying to trim the window (which was null) and
2425
+ // the history list was left untouched → 0 actual trimming → timeout
2426
+ // compaction death spiral.
2427
+ const window = await hm.cache.getWindow(agentId, sk);
2428
+ if (window && window.length > targetDepth) {
2429
+ const trimmed = window.slice(-targetDepth);
2430
+ await hm.cache.setWindow(agentId, sk, trimmed);
2431
+ }
2432
+ // Always trim the underlying history list — this is the source of truth
2433
+ // when no window cache exists. trimHistoryToTokenBudget walks newest→oldest
2434
+ // and LTRIMs everything beyond the budget.
2435
+ const trimBudget = Math.floor(effectiveBudget * 0.5);
2436
+ const historyTrimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimBudget);
2437
+ if (historyTrimmed > 0) {
2438
+ console.log(`[hypermem-plugin] compact: trimmed ${historyTrimmed} messages from history list`);
2439
+ }
2440
+ // Invalidate the compose cache so next assemble() re-builds from trimmed data
2441
+ await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
2442
+ const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
2443
+ if (telemetryEnabled()) {
2444
+ trimTelemetry({
2445
+ path: 'compact.history2',
2446
+ agentId, sessionKey: sk,
2447
+ preTokens: tokensBefore,
2448
+ postTokens: tokensAfter,
2449
+ removed: historyTrimmed,
2450
+ cacheInvalidated: true,
2451
+ reason: `over-budget tokensBefore=${tokensBefore}/${effectiveBudget}`,
2452
+ });
2453
+ }
2454
+ console.log(`[hypermem-plugin] compact: trimmed ${tokensBefore} → ${tokensAfter} tokens (budget: ${effectiveBudget})`);
2455
+ // Density-aware JSONL truncation: derive target depth from actual avg tokens/message
2456
+ // rather than assuming a fixed 500 tokens/message. This prevents a large-message
2457
+ // session (e.g. 145 msgs × 882 tok = 128k) from bypassing the 1.5x guard and
2458
+ // leaving the JSONL untouched while Redis is correctly trimmed.
2459
+ // force=true bypasses the 1.5x early-exit — over-budget always rewrites.
2460
+ const histDepth = cachedModelState?.historyDepth ?? targetDepth;
2461
+ const avgTokPerMsg = histDepth > 0 && tokensBefore > 0 ? tokensBefore / histDepth : 500;
2462
+ const densityTargetDepth = Math.max(10, Math.floor(trimBudget / avgTokPerMsg));
2463
+ await truncateJsonlIfNeeded(sessionFile, densityTargetDepth, true);
2464
+ console.log(`[hypermem-plugin] compact: JSONL density-trim targetDepth=${densityTargetDepth} (histDepth=${histDepth}, avg=${Math.round(avgTokPerMsg)} tok/msg)`);
2465
+ return {
2466
+ ok: true,
2467
+ compacted: true,
2468
+ result: { tokensBefore, tokensAfter },
2469
+ };
2470
+ }
2471
+ catch (err) {
2472
+ console.warn('[hypermem-plugin] compact failed:', err.message);
2473
+ // Non-fatal: return ok so the runtime doesn't retry with its own compaction
2474
+ return { ok: true, compacted: false, reason: err.message };
2475
+ }
2476
+ },
2477
+ /**
2478
+ * After-turn hook: ingest new messages then trigger background indexer.
2479
+ *
2480
+ * IMPORTANT: When afterTurn is defined, the runtime calls ONLY afterTurn —
2481
+ * it never calls ingest() or ingestBatch(). So we must ingest the new
2482
+ * messages here, using messages.slice(prePromptMessageCount).
2483
+ */
2484
+ async afterTurn({ sessionId, sessionKey, messages, prePromptMessageCount, isHeartbeat }) {
2485
+ if (isHeartbeat)
2486
+ return;
2487
+ try {
2488
+ const hm = await getHyperMem();
2489
+ const sk = resolveSessionKey(sessionId, sessionKey);
2490
+ const agentId = extractAgentId(sk);
2491
+ // Ingest only the new messages produced this turn
2492
+ const newMessages = messages.slice(prePromptMessageCount);
2493
+ for (const msg of newMessages) {
2494
+ const m = msg;
2495
+ // Skip system messages — they come from the runtime, not the conversation
2496
+ if (m.role === 'system')
2497
+ continue;
2498
+ if (m.role === 'toolResult' && extractTextFromInboundContent(m.content).trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT) {
2499
+ const toolCallId = typeof m.toolCallId === 'string' ? m.toolCallId : 'unknown';
2500
+ const toolName = typeof m.toolName === 'string' ? m.toolName : 'unknown';
2501
+ await bumpToolPairMetrics(hm, agentId, sk, { syntheticNoResultIngested: 1 }, {
2502
+ stage: 'afterTurn',
2503
+ toolCallId,
2504
+ toolName,
2505
+ });
2506
+ console.warn(`[hypermem-plugin] tool-pair-integrity: observed synthetic missing tool result for ${agentId}/${sk} ` +
2507
+ `tool=${toolName} callId=${toolCallId}`);
2508
+ }
2509
+ const neutral = toNeutralMessage(m);
2510
+ if (neutral.role === 'user' && !neutral.toolResults?.length) {
2511
+ // Record plain user messages here and strip transport envelope metadata
2512
+ // before storage so prompt wrappers like:
2513
+ // Sender (untrusted metadata): { ... }
2514
+ // never enter messages.db / Redis history / downstream facts.
2515
+ //
2516
+ // recordUserMessage() also strips defensively at core level, but we do
2517
+ // it here too so the intended behavior is explicit at the plugin boundary.
2518
+ //
2519
+ // IMPORTANT: tool results arrive as role='user' carriers (toNeutralMessage
2520
+ // sets role='user' + toolResults=[...] + textContent=null). These MUST go
2521
+ // through recordAssistantMessage to persist the toolResults array.
2522
+ // recordUserMessage takes a plain string and would silently discard them.
2523
+ await hm.recordUserMessage(agentId, sk, stripMessageMetadata(neutral.textContent ?? ''));
2524
+ }
2525
+ else {
2526
+ await hm.recordAssistantMessage(agentId, sk, neutral);
2527
+ }
2528
+ }
2529
+ // P3.1: Topic detection on the inbound user message
2530
+ // Non-fatal: topic detection never blocks afterTurn
2531
+ try {
2532
+ const inboundUserMsg = newMessages
2533
+ .map(m => m)
2534
+ .find(m => m.role === 'user');
2535
+ if (inboundUserMsg) {
2536
+ const neutralUser = toNeutralMessage(inboundUserMsg);
2537
+ // Gather recent messages for context (all messages before the new ones)
2538
+ const contextMessages = messages.slice(0, prePromptMessageCount)
2539
+ .filter(m => m.role !== 'system')
2540
+ .slice(-10)
2541
+ .map(m => toNeutralMessage(m));
2542
+ const db = hm.dbManager.getMessageDb(agentId);
2543
+ if (db) {
2544
+ const topicMap = new SessionTopicMap(db);
2545
+ const activeTopic = topicMap.getActiveTopic(sk);
2546
+ const signal = detectTopicShift(neutralUser, contextMessages, activeTopic?.id ?? null);
2547
+ if (signal.isNewTopic && signal.topicName) {
2548
+ const newTopicId = topicMap.createTopic(sk, signal.topicName);
2549
+ // New topic starts with count 1 (the message that triggered the shift)
2550
+ topicMap.incrementMessageCount(newTopicId);
2551
+ // Write topic_id onto the stored user message (best-effort)
2552
+ try {
2553
+ const stored = db.prepare(`
2554
+ SELECT m.id FROM messages m
2555
+ JOIN conversations c ON c.id = m.conversation_id
2556
+ WHERE c.session_key = ? AND m.role = 'user'
2557
+ ORDER BY m.message_index DESC LIMIT 1
2558
+ `).get(sk);
2559
+ if (stored) {
2560
+ db.prepare('UPDATE messages SET topic_id = ? WHERE id = ?')
2561
+ .run(newTopicId, stored.id);
2562
+ }
2563
+ }
2564
+ catch {
2565
+ // Best-effort
2566
+ }
2567
+ }
2568
+ else if (activeTopic) {
2569
+ topicMap.activateTopic(sk, activeTopic.id);
2570
+ topicMap.incrementMessageCount(activeTopic.id);
2571
+ }
2572
+ }
2573
+ }
2574
+ }
2575
+ catch {
2576
+ // Topic detection is entirely non-fatal
2577
+ }
2578
+ // Recompute the Redis hot history from SQLite so turn-age gradient is
2579
+ // materialized after every turn. This prevents warm-compressed history
2580
+ // from drifting back to raw payloads during live sessions.
2581
+ //
2582
+ // Pass the cached model tokenBudget so refreshRedisGradient can cap the
2583
+ // gradient-compressed window to budget before writing to Redis. Without
2584
+ // this, afterTurn writes up to 250 messages regardless of budget, causing
2585
+ // trimHistoryToTokenBudget to fire and trim ~200 messages on every
2586
+ // subsequent assemble() — the churn loop seen in Helm's logs.
2587
+ if (hm.cache.isConnected) {
2588
+ try {
2589
+ const modelState = await hm.cache.getModelState(agentId, sk);
2590
+ const gradientBudget = modelState?.tokenBudget;
2591
+ const gradientDepth = modelState?.historyDepth;
2592
+ await hm.refreshRedisGradient(agentId, sk, gradientBudget, gradientDepth);
2593
+ }
2594
+ catch (refreshErr) {
2595
+ console.warn('[hypermem-plugin] afterTurn: refreshRedisGradient failed (non-fatal):', refreshErr.message);
2596
+ }
2597
+ }
2598
+ // Invalidate the window cache after ingesting new messages.
2599
+ // The next assemble() call will re-compose with the new data.
2600
+ try {
2601
+ await hm.cache.invalidateWindow(agentId, sk);
2602
+ }
2603
+ catch {
2604
+ // Window invalidation is best-effort
2605
+ }
2606
+ // Pre-emptive secondary trim when session exits a turn hot.
2607
+ // If a session just finished a turn at >80% pressure, the NEXT turn's
2608
+ // incoming tool results (parallel web searches, large exec output, etc.)
2609
+ // will hit a window with no headroom — the ingestion wave failure mode
2610
+ // (reported by Helm, 2026-04-05). Pre-trim here so the tool-loop
2611
+ // assemble() path starts the next turn with meaningful space.
2612
+ //
2613
+ // Uses modelState.tokenBudget if cached; skips if unavailable (non-fatal).
2614
+ try {
2615
+ const modelState = await hm.cache.getModelState(agentId, sk);
2616
+ if (modelState?.tokenBudget) {
2617
+ // Use the runtime message array as the only trim-pressure source.
2618
+ // Redis remains a drift signal for anomaly logging.
2619
+ const runtimePostTokens = estimateMessageArrayTokens(messages);
2620
+ const redisPostTokens = await estimateWindowTokens(hm, agentId, sk);
2621
+ const postTurnTokens = runtimePostTokens;
2622
+ maybeLogPressureAccountingAnomaly({
2623
+ path: 'afterTurn.secondary',
2624
+ agentId,
2625
+ sessionKey: sk,
2626
+ runtimeTokens: runtimePostTokens,
2627
+ redisTokens: redisPostTokens,
2628
+ composedTokens: postTurnTokens,
2629
+ budget: modelState.tokenBudget,
2630
+ });
2631
+ const postTurnPressure = postTurnTokens / modelState.tokenBudget;
2632
+ // Sprint 2.2b: demote afterTurn.secondary to guard-only no-op.
2633
+ //
2634
+ // Previously this path was a two-tier real trim that fired after
2635
+ // every turn ending at >80% pressure, calling
2636
+ // trimHistoryToTokenBudget() and emitting `event:'trim'` with
2637
+ // path='afterTurn.secondary'. Sprint 2 consolidates steady-state
2638
+ // trim ownership in assemble.* (tool-loop + normal/subagent),
2639
+ // with compact.* as the only exception family. The afterTurn
2640
+ // post-turn pressure path is now redundant: the next turn's
2641
+ // assemble.* trim absorbs any residual pressure.
2642
+ //
2643
+ // Pattern matches the warmstart/reshape demotion from 2.2a:
2644
+ // keep the pressure predicate + threshold branch so observability
2645
+ // via `event:'trim-guard'` is preserved, but emit NO real trim,
2646
+ // NO invalidateWindow, NO mutation. The compact skip-gate stays
2647
+ // correct because this path never stamped any model state.
2648
+ if (postTurnPressure > 0.80) {
2649
+ guardTelemetry({
2650
+ path: 'afterTurn.secondary',
2651
+ agentId, sessionKey: sk,
2652
+ reason: 'afterturn-secondary-demoted',
2653
+ });
2654
+ }
2655
+ }
2656
+ }
2657
+ catch {
2658
+ // Non-fatal — next turn's tool-loop trim is the fallback
2659
+ }
2660
+ // Pre-compute embedding for the assistant's reply so the next compose()
2661
+ // can skip the Ollama round-trip entirely (fire-and-forget).
2662
+ //
2663
+ // Why the assistant reply, not the current user message:
2664
+ // The assistant's reply is the strongest semantic predictor of what the
2665
+ // user will ask next — it's the context they're responding to. By the time
2666
+ // the next user message arrives and compose() fires, this embedding is
2667
+ // already warm in Redis. Cache hit rate: near 100% on normal conversation
2668
+ // flow (one reply per turn).
2669
+ //
2670
+ // The previous approach (embedding the current user message) still missed
2671
+ // on every turn because compose() queries against the INCOMING user message,
2672
+ // not the one that was just processed.
2673
+ //
2674
+ // newMessages = messages.slice(prePromptMessageCount) — the assistant reply
2675
+ // is always in here. Walk backwards to find the last assistant text turn.
2676
+ try {
2677
+ let assistantReplyText = null;
2678
+ for (let i = newMessages.length - 1; i >= 0; i--) {
2679
+ const m = newMessages[i];
2680
+ if (m.role === 'assistant') {
2681
+ const neutral = toNeutralMessage(m);
2682
+ if (neutral.textContent) {
2683
+ assistantReplyText = neutral.textContent;
2684
+ break;
2685
+ }
2686
+ }
2687
+ }
2688
+ if (assistantReplyText && _generateEmbeddings) {
2689
+ // Fire-and-forget: don't await, don't block afterTurn
2690
+ _generateEmbeddings([assistantReplyText]).then(async ([embedding]) => {
2691
+ if (embedding) {
2692
+ await hm.cache.setQueryEmbedding(agentId, sk, embedding);
2693
+ }
2694
+ }).catch(() => {
2695
+ // Non-fatal: embedding pre-compute failed, compose() will call Ollama
2696
+ });
2697
+ }
2698
+ }
2699
+ catch {
2700
+ // Pre-embed is entirely non-fatal
2701
+ }
2702
+ // P1.7: Direct per-agent tick after each turn — no need to wait for 5-min interval.
2703
+ if (_indexer) {
2704
+ const _agentIdForTick = agentId;
2705
+ const runTick = async () => {
2706
+ if (_taskFlowRuntime) {
2707
+ // Preflight: only create a managed flow if we can actually tick.
2708
+ // Creating a flow we never finish/fail leaves orphaned queued rows.
2709
+ let flow = null;
2710
+ try {
2711
+ // Use createManaged + finish/fail only — do NOT call runTask().
2712
+ // runTask() writes a task_run row to runs.sqlite with status='running'
2713
+ // and the TaskFlow runtime has no completeTask() method, so those rows
2714
+ // would accumulate forever and block clean restarts.
2715
+ flow = _taskFlowRuntime.createManaged({
2716
+ controllerId: 'hypermem/indexer',
2717
+ goal: `Index messages for ${_agentIdForTick}`,
2718
+ });
2719
+ await _indexer.tick();
2720
+ // expectedRevision is required: finishFlow uses optimistic locking.
2721
+ // A freshly created managed flow always starts at revision 0.
2722
+ // MUST be awaited — finish/fail return Promises. Calling without
2723
+ // await lets the Promise get GC'd before the DB write completes,
2724
+ // leaving the flow permanently in queued state.
2725
+ const finishResult = await Promise.resolve(_taskFlowRuntime.finish({ flowId: flow.flowId, expectedRevision: flow.revision }));
2726
+ if (finishResult && !finishResult.applied) {
2727
+ console.warn('[hypermem-plugin] TaskFlow finish failed:', finishResult.code ?? finishResult.reason, 'flowId:', flow.flowId, 'revision:', flow.revision);
2728
+ }
2729
+ }
2730
+ catch (tickErr) {
2731
+ // Best-effort fail — non-fatal, but always mark the flow so it doesn't leak
2732
+ if (flow) {
2733
+ try {
2734
+ await Promise.resolve(_taskFlowRuntime.fail({ flowId: flow.flowId, expectedRevision: flow.revision }));
2735
+ }
2736
+ catch { /* ignore */ }
2737
+ }
2738
+ throw tickErr;
2739
+ }
2740
+ }
2741
+ else {
2742
+ await _indexer.tick();
2743
+ }
2744
+ };
2745
+ runTick().catch(() => {
2746
+ // Non-fatal: indexer tick failure never blocks afterTurn
2747
+ });
2748
+ }
2749
+ }
2750
+ catch (err) {
2751
+ // afterTurn is never fatal
2752
+ console.warn('[hypermem-plugin] afterTurn failed:', err.message);
2753
+ }
2754
+ },
2755
+ /**
2756
+ * Prepare context for a subagent session before it starts.
2757
+ *
2758
+ * Seeds the child session's Redis with parent context based on the
2759
+ * subagentWarming config ('full' | 'light' | 'off').
2760
+ * Returns a rollback handle to clean up if spawn fails.
2761
+ */
2762
+ async prepareSubagentSpawn({ parentSessionKey, childSessionKey }) {
2763
+ if (_subagentWarming === 'off') {
2764
+ return undefined;
2765
+ }
2766
+ try {
2767
+ const hm = await getHyperMem();
2768
+ const parentAgentId = extractAgentId(parentSessionKey);
2769
+ const childAgentId = extractAgentId(childSessionKey);
2770
+ // Seed child with parent's active facts
2771
+ const facts = hm.getActiveFacts(parentAgentId, { limit: 50 });
2772
+ if (facts && facts.length > 0) {
2773
+ const factBlock = facts
2774
+ .map(f => f.content)
2775
+ .join('\n');
2776
+ await hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', factBlock);
2777
+ }
2778
+ // For 'full' warming, also seed recent history context
2779
+ if (_subagentWarming === 'full') {
2780
+ const history = await hm.cache.getHistory(parentAgentId, parentSessionKey);
2781
+ if (history && history.length > 0) {
2782
+ const recentHistory = history.slice(-10);
2783
+ await hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', JSON.stringify(recentHistory));
2784
+ }
2785
+ }
2786
+ console.log(`[hypermem-plugin] prepareSubagentSpawn: seeded ${childSessionKey} ` +
2787
+ `from ${parentSessionKey} (warming=${_subagentWarming})`);
2788
+ return {
2789
+ async rollback() {
2790
+ try {
2791
+ const hm = await getHyperMem();
2792
+ await hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', '');
2793
+ await hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', '');
2794
+ }
2795
+ catch {
2796
+ // Rollback is best-effort
2797
+ }
2798
+ },
2799
+ };
2800
+ }
2801
+ catch (err) {
2802
+ console.warn('[hypermem-plugin] prepareSubagentSpawn failed (non-fatal):', err.message);
2803
+ return undefined;
2804
+ }
2805
+ },
2806
+ /**
2807
+ * Clean up after a subagent session ends.
2808
+ *
2809
+ * Removes Redis slots and invalidates caches for the dead session
2810
+ * to prevent stale data accumulation.
2811
+ */
2812
+ async onSubagentEnded({ childSessionKey, reason }) {
2813
+ try {
2814
+ const hm = await getHyperMem();
2815
+ const childAgentId = extractAgentId(childSessionKey);
2816
+ await Promise.all([
2817
+ hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', ''),
2818
+ hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', ''),
2819
+ hm.cache.setSlot(childAgentId, childSessionKey, 'assemblyContextBlock', ''),
2820
+ hm.cache.setSlot(childAgentId, childSessionKey, 'assemblyContextAt', '0'),
2821
+ hm.cache.invalidateWindow(childAgentId, childSessionKey).catch(() => { }),
2822
+ ]);
2823
+ _overheadCache.delete(childSessionKey);
2824
+ console.log(`[hypermem-plugin] onSubagentEnded: cleaned up ${childSessionKey} (reason=${reason})`);
2825
+ }
2826
+ catch (err) {
2827
+ console.warn('[hypermem-plugin] onSubagentEnded failed (non-fatal):', err.message);
2828
+ }
2829
+ },
2830
+ /**
2831
+ * Dispose: intentionally a no-op.
2832
+ *
2833
+ * The runtime calls dispose() at the end of every request cycle, but
2834
+ * hypermem's Redis connection and SQLite handles are gateway-lifetime
2835
+ * singletons — not request-scoped. Closing and nulling _hm here causes
2836
+ * a full reconnect + re-init on every turn (~400-800ms latency per turn).
2837
+ *
2838
+ * ioredis manages its own reconnection on connection loss. If the gateway
2839
+ * process exits, Node.js cleans up file handles automatically.
2840
+ *
2841
+ * If a true shutdown is needed (e.g. gateway restart signal), call
2842
+ * _hm.close() directly from a gateway:shutdown hook instead.
2843
+ */
2844
+ async dispose() {
2845
+ // Intentional no-op — see comment above.
2846
+ },
2847
+ };
2848
+ }
2849
+ // ─── NeutralMessage → AgentMessage ─────────────────────────────
2850
+ /**
2851
+ * Convert hypermem's NeutralMessage back to OpenClaw's AgentMessage format.
2852
+ *
2853
+ * The runtime expects messages conforming to pi-ai's Message union:
2854
+ * UserMessage: { role: 'user', content: string | ContentBlock[], timestamp }
2855
+ * AssistantMessage: { role: 'assistant', content: ContentBlock[], api, provider, model, usage, stopReason, timestamp }
2856
+ * ToolResultMessage: { role: 'toolResult', toolCallId, toolName, content, isError, timestamp }
2857
+ *
2858
+ * hypermem stores tool results as NeutralMessage with role='user' and toolResults[].
2859
+ * These must be expanded into individual ToolResultMessage objects.
2860
+ *
2861
+ * For assistant messages with tool calls, NeutralToolCall.arguments is a JSON string
2862
+ * but the runtime's ToolCall.arguments is Record<string, any>. We parse it here.
2863
+ *
2864
+ * Missing metadata fields (api, provider, model, usage, stopReason) are filled with
2865
+ * sentinel values. The runtime's convertToLlm strips them before the API call, and
2866
+ * the session transcript already has the real values. These are just structural stubs
2867
+ * so the AgentMessage type is satisfied at runtime.
2868
+ */
2869
+ function neutralToAgentMessage(msg) {
2870
+ const now = Date.now();
2871
+ // Tool results: expand to individual ToolResultMessage objects
2872
+ if (msg.toolResults && msg.toolResults.length > 0) {
2873
+ return msg.toolResults.map(tr => ({
2874
+ role: 'toolResult',
2875
+ toolCallId: tr.callId,
2876
+ toolName: tr.name,
2877
+ content: [{ type: 'text', text: tr.content ?? '' }],
2878
+ isError: tr.isError ?? false,
2879
+ timestamp: now,
2880
+ }));
2881
+ }
2882
+ if (msg.role === 'user') {
2883
+ return {
2884
+ role: 'user',
2885
+ content: msg.textContent ?? '',
2886
+ timestamp: now,
2887
+ };
2888
+ }
2889
+ if (msg.role === 'system') {
2890
+ // System messages are passed through as-is; the runtime handles them separately
2891
+ return {
2892
+ role: 'system',
2893
+ content: msg.textContent ?? '',
2894
+ timestamp: now,
2895
+ // Preserve dynamicBoundary metadata for prompt caching
2896
+ ...msg.metadata?.dynamicBoundary
2897
+ ? { metadata: { dynamicBoundary: true } }
2898
+ : {},
2899
+ };
2900
+ }
2901
+ // Assistant message
2902
+ const content = [];
2903
+ if (msg.textContent) {
2904
+ content.push({ type: 'text', text: msg.textContent });
2905
+ }
2906
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
2907
+ for (const tc of msg.toolCalls) {
2908
+ // Parse arguments from JSON string → object (runtime expects Record<string, any>)
2909
+ let args;
2910
+ try {
2911
+ args = typeof tc.arguments === 'string' ? JSON.parse(tc.arguments) : (tc.arguments ?? {});
2912
+ }
2913
+ catch {
2914
+ args = {};
2915
+ }
2916
+ content.push({
2917
+ type: 'toolCall',
2918
+ id: tc.id,
2919
+ name: tc.name,
2920
+ arguments: args,
2921
+ });
2922
+ }
2923
+ }
2924
+ // Stub metadata fields — the runtime needs these structurally but convertToLlm
2925
+ // strips them before the API call. Real values live in the session transcript.
2926
+ return {
2927
+ role: 'assistant',
2928
+ content: content.length > 0 ? content : [{ type: 'text', text: '' }],
2929
+ api: 'unknown',
2930
+ provider: 'unknown',
2931
+ model: 'unknown',
2932
+ usage: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
2933
+ stopReason: 'stop',
2934
+ timestamp: now,
2935
+ };
2936
+ }
2937
+ // ─── Cache Bust Utility ────────────────────────────────────────────────────
2938
+ /**
2939
+ * Bust the assembly cache for a specific agent+session.
2940
+ * Call this after writing to identity files (SOUL.md, IDENTITY.md, TOOLS.md,
2941
+ * USER.md) to ensure the next assemble() runs full compositor, not a replay.
2942
+ */
2943
+ export async function bustAssemblyCache(agentId, sessionKey) {
2944
+ try {
2945
+ const hm = await getHyperMem();
2946
+ await Promise.all([
2947
+ hm.cache.setSlot(agentId, sessionKey, 'assemblyContextBlock', ''),
2948
+ hm.cache.setSlot(agentId, sessionKey, 'assemblyContextAt', '0'),
2949
+ ]);
2950
+ }
2951
+ catch {
2952
+ // Non-fatal
2953
+ }
2954
+ }
2955
+ // ─── Plugin Config Schema ────────────────────────────────────────
2956
+ // Exposed via openclaw.json → plugins.entries.hypercompositor.config
2957
+ // Validated by OpenClaw on gateway start. Visible via `openclaw config get`.
2958
+ const hypercompositorConfigSchema = z.object({
2959
+ /** Path to HyperMem core dist/index.js. Auto-resolved if omitted. */
2960
+ hyperMemPath: z.string().optional(),
2961
+ /** HyperMem data directory. Default: ~/.openclaw/hypermem */
2962
+ dataDir: z.string().optional(),
2963
+ /** Full model context window size in tokens. Default: 128000 */
2964
+ contextWindowSize: z.number().int().positive().optional(),
2965
+ /** Fraction [0.0–0.5] reserved for system prompts + headroom. Default: 0.25 */
2966
+ contextWindowReserve: z.number().min(0).max(0.5).optional(),
2967
+ /** Defer tool pruning to OpenClaw's contextPruning. Default: false */
2968
+ deferToolPruning: z.boolean().optional(),
2969
+ /** Emit detailed budget-source and trim-decision logs. Default: false */
2970
+ verboseLogging: z.boolean().optional(),
2971
+ /** Manual per-model context window fallback table used when runtime tokenBudget is missing. */
2972
+ contextWindowOverrides: z.record(z.string().regex(CONTEXT_WINDOW_OVERRIDE_KEY_REGEX, 'key must be "provider/model"'), contextWindowOverrideSchema).optional(),
2973
+ /** Treat cache replay snapshots older than this as stale. Default: 120000ms */
2974
+ warmCacheReplayThresholdMs: z.number().int().positive().optional(),
2975
+ /** Subagent context injection: 'full' | 'light' | 'off'. Default: 'light' */
2976
+ subagentWarming: z.enum(['full', 'light', 'off']).optional(),
2977
+ /** Compositor tuning overrides */
2978
+ compositor: z.object({
2979
+ budgetFraction: z.number().min(0).max(1).optional(),
2980
+ reserveFraction: z.number().min(0).max(1).optional(),
2981
+ historyFraction: z.number().min(0).max(1).optional(),
2982
+ memoryFraction: z.number().min(0).max(1).optional(),
2983
+ defaultTokenBudget: z.number().int().positive().optional(),
2984
+ maxHistoryMessages: z.number().int().positive().optional(),
2985
+ maxFacts: z.number().int().positive().optional(),
2986
+ maxExpertisePatterns: z.number().int().positive().optional(),
2987
+ maxCrossSessionContext: z.number().int().nonnegative().optional(),
2988
+ maxTotalTriggerTokens: z.number().int().nonnegative().optional(),
2989
+ maxRecentToolPairs: z.number().int().nonnegative().optional(),
2990
+ maxProseToolPairs: z.number().int().nonnegative().optional(),
2991
+ warmHistoryBudgetFraction: z.number().min(0).max(1).optional(),
2992
+ contextWindowReserve: z.number().min(0).max(1).optional(),
2993
+ dynamicReserveTurnHorizon: z.number().int().positive().optional(),
2994
+ dynamicReserveMax: z.number().min(0).max(1).optional(),
2995
+ dynamicReserveEnabled: z.boolean().optional(),
2996
+ keystoneHistoryFraction: z.number().min(0).max(1).optional(),
2997
+ keystoneMaxMessages: z.number().int().nonnegative().optional(),
2998
+ keystoneMinSignificance: z.number().min(0).max(1).optional(),
2999
+ targetBudgetFraction: z.number().min(0).max(1).optional(),
3000
+ enableFOS: z.boolean().optional(),
3001
+ enableMOD: z.boolean().optional(),
3002
+ hyperformProfile: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
3003
+ outputProfile: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
3004
+ outputStandard: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
3005
+ wikiTokenCap: z.number().int().positive().optional(),
3006
+ zigzagOrdering: z.boolean().optional(),
3007
+ }).optional(),
3008
+ /** Image/tool eviction settings */
3009
+ eviction: z.object({
3010
+ enabled: z.boolean().optional(),
3011
+ imageAgeTurns: z.number().int().nonnegative().optional(),
3012
+ toolResultAgeTurns: z.number().int().nonnegative().optional(),
3013
+ minTokensToEvict: z.number().int().nonnegative().optional(),
3014
+ keepPreviewChars: z.number().int().nonnegative().optional(),
3015
+ }).optional(),
3016
+ /** Embedding provider config */
3017
+ embedding: z.object({
3018
+ provider: z.enum(['ollama', 'openai', 'gemini']).optional(),
3019
+ ollamaUrl: z.string().optional(),
3020
+ openaiApiKey: z.string().optional(),
3021
+ openaiBaseUrl: z.string().optional(),
3022
+ geminiBaseUrl: z.string().optional(),
3023
+ geminiIndexTaskType: z.string().optional(),
3024
+ geminiQueryTaskType: z.string().optional(),
3025
+ model: z.string().optional(),
3026
+ dimensions: z.number().int().positive().optional(),
3027
+ timeout: z.number().int().positive().optional(),
3028
+ batchSize: z.number().int().positive().optional(),
3029
+ }).optional(),
3030
+ });
3031
+ // ─── Plugin Entry ───────────────────────────────────────────────
3032
+ const engine = createHyperMemEngine();
3033
+ export default definePluginEntry({
3034
+ id: 'hypercompositor',
3035
+ name: 'HyperCompositor — context engine',
3036
+ description: 'Four-layer memory architecture for OpenClaw agents: SQLite hot cache, message history, vector search, and structured library.',
3037
+ kind: 'context-engine',
3038
+ configSchema: buildPluginConfigSchema(hypercompositorConfigSchema),
3039
+ register(api) {
3040
+ // ── Resolve plugin config from openclaw.json ──
3041
+ const pluginCfg = (api.pluginConfig ?? {});
3042
+ _pluginConfig = pluginCfg;
3043
+ // ── Resolve HYPERMEM_PATH: pluginConfig > ESM package resolve > dev fallback ──
3044
+ if (pluginCfg.hyperMemPath) {
3045
+ HYPERMEM_PATH = pluginCfg.hyperMemPath;
3046
+ console.log(`[hypermem-plugin] Using configured hyperMemPath: ${HYPERMEM_PATH}`);
3047
+ }
3048
+ else {
3049
+ try {
3050
+ const resolvedUrl = import.meta.resolve('@psiclawops/hypermem');
3051
+ HYPERMEM_PATH = resolvedUrl.startsWith('file:') ? fileURLToPath(resolvedUrl) : resolvedUrl;
3052
+ }
3053
+ catch {
3054
+ // Dev fallback: resolve relative to plugin directory
3055
+ const __pluginDir = path.dirname(fileURLToPath(import.meta.url));
3056
+ HYPERMEM_PATH = path.resolve(__pluginDir, '../../dist/index.js');
3057
+ console.log(`[hypermem-plugin] Falling back to dev path: ${HYPERMEM_PATH}`);
3058
+ }
3059
+ }
3060
+ api.registerContextEngine('hypercompositor', () => engine);
3061
+ // ── HyperForm config dir init ──
3062
+ // Copy defaults and guide to ~/.openclaw/hypermem/config/ on every load.
3063
+ // Defaults are overwritten on plugin update. Active config files are never touched.
3064
+ void (async () => {
3065
+ try {
3066
+ const dataDir = _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem');
3067
+ const configDir = path.join(dataDir, 'config');
3068
+ await fs.mkdir(configDir, { recursive: true });
3069
+ const __pluginDir = path.dirname(fileURLToPath(import.meta.url));
3070
+ const defaultsSrc = path.resolve(__pluginDir, '../../../config-defaults');
3071
+ const defaultFiles = [
3072
+ 'hyperform-fos-defaults.json',
3073
+ 'hyperform-mod-defaults.json',
3074
+ 'HYPERFORM-GUIDE.md',
3075
+ ];
3076
+ for (const fname of defaultFiles) {
3077
+ const src = path.join(defaultsSrc, fname);
3078
+ const dest = path.join(configDir, fname);
3079
+ try {
3080
+ await fs.copyFile(src, dest);
3081
+ }
3082
+ catch {
3083
+ // defaults may not exist in dev builds — non-fatal
3084
+ }
3085
+ }
3086
+ // On first install, copy defaults as active config if active files don't exist
3087
+ for (const [src, dest] of [
3088
+ ['hyperform-fos-defaults.json', 'hyperform-fos.json'],
3089
+ ['hyperform-mod-defaults.json', 'hyperform-mod.json'],
3090
+ ]) {
3091
+ const destPath = path.join(configDir, dest);
3092
+ try {
3093
+ await fs.access(destPath);
3094
+ }
3095
+ catch {
3096
+ // Active config doesn't exist — copy defaults as starting point
3097
+ try {
3098
+ await fs.copyFile(path.join(configDir, src), destPath);
3099
+ }
3100
+ catch {
3101
+ // non-fatal
3102
+ }
3103
+ }
3104
+ }
3105
+ }
3106
+ catch {
3107
+ // non-fatal — HyperForm config init is best-effort
3108
+ }
3109
+ })();
3110
+ // P1.7: Bind TaskFlow runtime for task visibility — best-effort.
3111
+ // Guard: api.runtime.taskFlow may not exist on older OpenClaw versions.
3112
+ try {
3113
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
3114
+ const tf = api.runtime?.taskFlow;
3115
+ if (tf && typeof tf.bindSession === 'function') {
3116
+ _taskFlowRuntime = tf.bindSession({
3117
+ sessionKey: 'hypermem-plugin',
3118
+ requesterOrigin: 'hypermem-plugin',
3119
+ });
3120
+ }
3121
+ }
3122
+ catch {
3123
+ // TaskFlow binding is best-effort — plugin remains fully functional without it
3124
+ }
3125
+ },
3126
+ });
3127
+ //# sourceMappingURL=index.js.map