@psiclawops/hypercompositor 0.9.0 → 0.9.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +4 -4
- package/dist/index.d.ts +0 -208
- package/dist/index.d.ts.map +0 -1
- package/dist/index.js +0 -3474
package/dist/index.js
DELETED
|
@@ -1,3474 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* hypermem Context Engine Plugin
|
|
3
|
-
*
|
|
4
|
-
* Implements OpenClaw's ContextEngine interface backed by hypermem's
|
|
5
|
-
* four-layer memory architecture:
|
|
6
|
-
*
|
|
7
|
-
* L1 Cache — SQLite `:memory:` hot session working memory
|
|
8
|
-
* L2 Messages — per-agent conversation history (SQLite)
|
|
9
|
-
* L3 Vectors — semantic + keyword search (KNN + FTS5)
|
|
10
|
-
* L4 Library — facts, knowledge, episodes, preferences
|
|
11
|
-
*
|
|
12
|
-
* Lifecycle mapping:
|
|
13
|
-
* ingest() → record each message into messages.db
|
|
14
|
-
* assemble() → compositor builds context from all four layers
|
|
15
|
-
* compact() → delegate to runtime (ownsCompaction: false)
|
|
16
|
-
* afterTurn() → trigger background indexer (fire-and-forget)
|
|
17
|
-
* bootstrap() → warm hot-cache session, register agent in fleet
|
|
18
|
-
* dispose() → close hypermem connections
|
|
19
|
-
*
|
|
20
|
-
* Session key format expected: "agent:<agentId>:<channel>:<name>"
|
|
21
|
-
*/
|
|
22
|
-
import { definePluginEntry } from 'openclaw/plugin-sdk/plugin-entry';
|
|
23
|
-
import { buildPluginConfigSchema } from 'openclaw/plugin-sdk/core';
|
|
24
|
-
import { z } from 'zod';
|
|
25
|
-
import { detectTopicShift, stripMessageMetadata, SessionTopicMap, applyToolGradientToWindow, OPENCLAW_BOOTSTRAP_FILES, rotateSessionContext, TRIM_SOFT_TARGET, TRIM_GROWTH_THRESHOLD, TRIM_HEADROOM_FRACTION, resolveTrimBudgets, resolveAdaptiveLifecyclePolicy, formatToolChainStub, decideReplayRecovery, isReplayState, recordOutputMetrics,
|
|
26
|
-
// Sprint 3: unified pressure signal
|
|
27
|
-
computeUnifiedPressure, PRESSURE_SOURCE, } from '@psiclawops/hypermem';
|
|
28
|
-
import { evictStaleContent } from '@psiclawops/hypermem/image-eviction';
|
|
29
|
-
import { repairToolPairs } from '@psiclawops/hypermem';
|
|
30
|
-
import os from 'os';
|
|
31
|
-
import path from 'path';
|
|
32
|
-
import fs from 'fs/promises';
|
|
33
|
-
import { randomUUID } from 'node:crypto';
|
|
34
|
-
import { fileURLToPath } from 'url';
|
|
35
|
-
import fsSync from 'fs';
|
|
36
|
-
let _telemetryStream = null;
|
|
37
|
-
let _telemetryStreamFailed = false;
|
|
38
|
-
let _telemetryTurnCounter = 0;
|
|
39
|
-
function telemetryEnabled() {
|
|
40
|
-
return process.env.HYPERMEM_TELEMETRY === '1';
|
|
41
|
-
}
|
|
42
|
-
function getTelemetryStream() {
|
|
43
|
-
if (_telemetryStream || _telemetryStreamFailed)
|
|
44
|
-
return _telemetryStream;
|
|
45
|
-
try {
|
|
46
|
-
const p = process.env.HYPERMEM_TELEMETRY_PATH || './hypermem-telemetry.jsonl';
|
|
47
|
-
_telemetryStream = fsSync.createWriteStream(p, { flags: 'a' });
|
|
48
|
-
_telemetryStream.on('error', () => {
|
|
49
|
-
_telemetryStreamFailed = true;
|
|
50
|
-
_telemetryStream = null;
|
|
51
|
-
});
|
|
52
|
-
}
|
|
53
|
-
catch {
|
|
54
|
-
_telemetryStreamFailed = true;
|
|
55
|
-
_telemetryStream = null;
|
|
56
|
-
}
|
|
57
|
-
return _telemetryStream;
|
|
58
|
-
}
|
|
59
|
-
function trimTelemetry(fields) {
|
|
60
|
-
if (!telemetryEnabled())
|
|
61
|
-
return;
|
|
62
|
-
const stream = getTelemetryStream();
|
|
63
|
-
if (!stream)
|
|
64
|
-
return;
|
|
65
|
-
try {
|
|
66
|
-
const record = {
|
|
67
|
-
event: 'trim',
|
|
68
|
-
ts: new Date().toISOString(),
|
|
69
|
-
...fields,
|
|
70
|
-
};
|
|
71
|
-
stream.write(JSON.stringify(record) + '\n');
|
|
72
|
-
}
|
|
73
|
-
catch {
|
|
74
|
-
// Telemetry must never throw
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
function assembleTrace(fields) {
|
|
78
|
-
if (!telemetryEnabled())
|
|
79
|
-
return;
|
|
80
|
-
const stream = getTelemetryStream();
|
|
81
|
-
if (!stream)
|
|
82
|
-
return;
|
|
83
|
-
try {
|
|
84
|
-
const record = {
|
|
85
|
-
event: 'assemble',
|
|
86
|
-
ts: new Date().toISOString(),
|
|
87
|
-
...fields,
|
|
88
|
-
};
|
|
89
|
-
stream.write(JSON.stringify(record) + '\n');
|
|
90
|
-
}
|
|
91
|
-
catch {
|
|
92
|
-
// Telemetry must never throw
|
|
93
|
-
}
|
|
94
|
-
}
|
|
95
|
-
function degradationTelemetry(fields) {
|
|
96
|
-
if (!telemetryEnabled())
|
|
97
|
-
return;
|
|
98
|
-
const stream = getTelemetryStream();
|
|
99
|
-
if (!stream)
|
|
100
|
-
return;
|
|
101
|
-
try {
|
|
102
|
-
const record = {
|
|
103
|
-
event: 'degradation',
|
|
104
|
-
ts: new Date().toISOString(),
|
|
105
|
-
...fields,
|
|
106
|
-
};
|
|
107
|
-
stream.write(JSON.stringify(record) + '\n');
|
|
108
|
-
}
|
|
109
|
-
catch {
|
|
110
|
-
// Telemetry must never throw
|
|
111
|
-
}
|
|
112
|
-
}
|
|
113
|
-
function lifecyclePolicyTelemetry(fields) {
|
|
114
|
-
if (!telemetryEnabled())
|
|
115
|
-
return;
|
|
116
|
-
const stream = getTelemetryStream();
|
|
117
|
-
if (!stream)
|
|
118
|
-
return;
|
|
119
|
-
try {
|
|
120
|
-
const record = {
|
|
121
|
-
event: 'lifecycle-policy',
|
|
122
|
-
ts: new Date().toISOString(),
|
|
123
|
-
...fields,
|
|
124
|
-
};
|
|
125
|
-
stream.write(JSON.stringify(record) + '\n');
|
|
126
|
-
}
|
|
127
|
-
catch {
|
|
128
|
-
// Telemetry must never throw
|
|
129
|
-
}
|
|
130
|
-
}
|
|
131
|
-
function nextTurnId() {
|
|
132
|
-
_telemetryTurnCounter = (_telemetryTurnCounter + 1) >>> 0;
|
|
133
|
-
return `${Date.now().toString(36)}-${_telemetryTurnCounter.toString(36)}`;
|
|
134
|
-
}
|
|
135
|
-
// ─── Trim Ownership (Phase A Sprint 2) ───────────────────────────
|
|
136
|
-
//
|
|
137
|
-
// Sprint 2 consolidates trim ownership: the assemble-owned family
|
|
138
|
-
// (assemble.normal, assemble.subagent, assemble.toolLoop) is the single
|
|
139
|
-
// steady-state trim owner. Compact paths (compact.nuclear, compact.history,
|
|
140
|
-
// compact.history2) are exempted — they're exception-only. warmstart,
|
|
141
|
-
// reshape, and afterTurn.secondary are demoted in sub-tasks 2.2 and 2.3.
|
|
142
|
-
//
|
|
143
|
-
// This block adds:
|
|
144
|
-
// 1. A per-session turn context (beginTrimOwnerTurn/endTrimOwnerTurn) scoped
|
|
145
|
-
// by the main assemble() flow.
|
|
146
|
-
// 2. A single shared trimOwner claim helper that lets exactly one **real**
|
|
147
|
-
// steady-state trim claim ownership per turn and throws loudly in
|
|
148
|
-
// development (NODE_ENV='development') when a second real steady-state
|
|
149
|
-
// trim path attempts to claim the same turn.
|
|
150
|
-
// 3. A non-counting guard/noop telemetry helper (same JSONL channel) that
|
|
151
|
-
// demoted paths can emit to preserve visibility of warm-start/reshape
|
|
152
|
-
// without consuming a steady-state owner slot.
|
|
153
|
-
//
|
|
154
|
-
// Sub-task 2.1 only adds the scaffolding + invariant; no existing trim call
|
|
155
|
-
// is removed here. Demotions of warm-start/reshape/afterTurn.secondary land
|
|
156
|
-
// in 2.2 and 2.3.
|
|
157
|
-
const STEADY_STATE_TRIM_PATHS = new Set([
|
|
158
|
-
'assemble.normal',
|
|
159
|
-
'assemble.subagent',
|
|
160
|
-
'assemble.toolLoop',
|
|
161
|
-
]);
|
|
162
|
-
const COMPACT_TRIM_PATHS = new Set([
|
|
163
|
-
'compact.nuclear',
|
|
164
|
-
'compact.history',
|
|
165
|
-
'compact.history2',
|
|
166
|
-
]);
|
|
167
|
-
// ─── Guard-telemetry reason enum (Phase A Sprint 2.2a) ──────────────────
|
|
168
|
-
// Plugin-local, constant-backed union of allowed `reason` values on
|
|
169
|
-
// `event: 'trim-guard'` records. Keeping this bounded prevents ad-hoc
|
|
170
|
-
// numeric/user strings from leaking into the telemetry JSONL channel and
|
|
171
|
-
// makes downstream reporting stable. Do NOT widen this to arbitrary
|
|
172
|
-
// strings — add a new member here first, then reference it at call sites.
|
|
173
|
-
//
|
|
174
|
-
// Scope note: this union is plugin-local (per planner 2.2 §C). It is not
|
|
175
|
-
// re-exported via `src/types.ts` because the shared public types surface
|
|
176
|
-
// must not gain a telemetry-reason enum as part of this sprint.
|
|
177
|
-
const GUARD_TELEMETRY_REASONS = [
|
|
178
|
-
'warmstart-pressure-demoted',
|
|
179
|
-
'reshape-downshift-demoted',
|
|
180
|
-
'duplicate-claim-suppressed',
|
|
181
|
-
'afterturn-secondary-demoted',
|
|
182
|
-
'window-within-budget-skip',
|
|
183
|
-
'pressure-accounting-anomaly',
|
|
184
|
-
];
|
|
185
|
-
// Turn-scoped ownership map (Phase A Sprint 2.2a).
|
|
186
|
-
//
|
|
187
|
-
// Previously keyed by `sessionKey` alone, which clobbered overlapping same-
|
|
188
|
-
// session assemble() flows (Sprint 2.1 security eval, medium finding #1).
|
|
189
|
-
// Now keyed by the composite `sessionKey|turnId` so two concurrent turns on
|
|
190
|
-
// the same session key remain isolated: each `beginTrimOwnerTurn` gets its
|
|
191
|
-
// own slot, `claimTrimOwner` checks the exact turn's slot, and
|
|
192
|
-
// `endTrimOwnerTurn` removes only that turn's slot.
|
|
193
|
-
const _trimOwnerTurns = new Map();
|
|
194
|
-
function _trimOwnerKey(sessionKey, turnId) {
|
|
195
|
-
return `${sessionKey}|${turnId}`;
|
|
196
|
-
}
|
|
197
|
-
function beginTrimOwnerTurn(sessionKey, turnId) {
|
|
198
|
-
_trimOwnerTurns.set(_trimOwnerKey(sessionKey, turnId), { turnId });
|
|
199
|
-
}
|
|
200
|
-
function endTrimOwnerTurn(sessionKey, turnId) {
|
|
201
|
-
_trimOwnerTurns.delete(_trimOwnerKey(sessionKey, turnId));
|
|
202
|
-
}
|
|
203
|
-
/**
|
|
204
|
-
* Claim the steady-state trim owner slot for the current turn.
|
|
205
|
-
*
|
|
206
|
-
* Behavior:
|
|
207
|
-
* - compact.* paths are exception-only and pass through without claiming.
|
|
208
|
-
* - Non-steady paths (warmstart, reshape, afterTurn.secondary) also pass
|
|
209
|
-
* through without claiming. Demoted/no-op sites should normally emit
|
|
210
|
-
* via guardTelemetry() instead so they stay visible without contending
|
|
211
|
-
* for ownership (sub-tasks 2.2 and 2.3 wire this in).
|
|
212
|
-
* - Steady-state paths (assemble.normal, assemble.subagent,
|
|
213
|
-
* assemble.toolLoop) claim the single owner slot for the current turn.
|
|
214
|
-
* The first such claim succeeds. A second steady-state claim against the
|
|
215
|
-
* same turn is a duplicate-turn violation: it throws loudly under
|
|
216
|
-
* NODE_ENV='development' and warns in other environments (returning
|
|
217
|
-
* false so non-dev runtimes keep working).
|
|
218
|
-
*
|
|
219
|
-
* Callers should invoke this immediately before the real
|
|
220
|
-
* trimHistoryToTokenBudget() call. Guard telemetry does NOT route through
|
|
221
|
-
* this helper — it is explicitly excluded from the steady-state invariant.
|
|
222
|
-
*
|
|
223
|
-
* Returns true when the claim succeeds (or is exempt); false on a swallowed
|
|
224
|
-
* duplicate claim in non-development. In development the duplicate throws
|
|
225
|
-
* before returning.
|
|
226
|
-
*/
|
|
227
|
-
function claimTrimOwner(sessionKey, turnId, path) {
|
|
228
|
-
// Compact paths: exempt — they represent an exceptional pressure path and
|
|
229
|
-
// never contend for the steady-state slot.
|
|
230
|
-
if (COMPACT_TRIM_PATHS.has(path))
|
|
231
|
-
return true;
|
|
232
|
-
// Non-steady paths: pass through (warmstart/reshape/afterTurn.secondary).
|
|
233
|
-
// Warmstart + reshape are demoted to guardTelemetry in 2.2a.
|
|
234
|
-
if (!STEADY_STATE_TRIM_PATHS.has(path))
|
|
235
|
-
return true;
|
|
236
|
-
const ctx = _trimOwnerTurns.get(_trimOwnerKey(sessionKey, turnId));
|
|
237
|
-
if (!ctx)
|
|
238
|
-
return true; // No active assemble-turn scope — nothing to enforce here.
|
|
239
|
-
if (ctx.claimedPath) {
|
|
240
|
-
const msg = `[hypermem-plugin] trimOwner: duplicate steady-state trim claim in turn ` +
|
|
241
|
-
`${ctx.turnId} (sessionKey=${sessionKey}): first=${ctx.claimedPath} second=${path}`;
|
|
242
|
-
if (process.env.NODE_ENV === 'development') {
|
|
243
|
-
throw new Error(msg);
|
|
244
|
-
}
|
|
245
|
-
// Non-development: do not throw, but leave a loud trail so telemetry
|
|
246
|
-
// surfaces the violation. Callers MUST honor the false return and skip
|
|
247
|
-
// the second real trim (Sprint 2.2a enforcement).
|
|
248
|
-
console.warn(msg);
|
|
249
|
-
return false;
|
|
250
|
-
}
|
|
251
|
-
ctx.claimedPath = path;
|
|
252
|
-
return true;
|
|
253
|
-
}
|
|
254
|
-
/**
|
|
255
|
-
* Non-counting guard / noop telemetry.
|
|
256
|
-
*
|
|
257
|
-
* Emits a `trim-guard` record on the same JSONL channel as trimTelemetry()
|
|
258
|
-
* but with a distinct event name so per-turn reporting (scripts/trim-report.mjs,
|
|
259
|
-
* future ownership dashboards) can keep it out of `trimCount`. Used by
|
|
260
|
-
* demoted/no-op call sites in 2.2 and 2.3 so their path labels stay visible
|
|
261
|
-
* in telemetry without consuming a steady-state owner slot.
|
|
262
|
-
*
|
|
263
|
-
* Zero-cost when telemetry is off. Never throws.
|
|
264
|
-
*/
|
|
265
|
-
function guardTelemetry(fields) {
|
|
266
|
-
if (!telemetryEnabled())
|
|
267
|
-
return;
|
|
268
|
-
const stream = getTelemetryStream();
|
|
269
|
-
if (!stream)
|
|
270
|
-
return;
|
|
271
|
-
try {
|
|
272
|
-
const record = {
|
|
273
|
-
event: 'trim-guard',
|
|
274
|
-
ts: new Date().toISOString(),
|
|
275
|
-
...fields,
|
|
276
|
-
};
|
|
277
|
-
stream.write(JSON.stringify(record) + '\n');
|
|
278
|
-
}
|
|
279
|
-
catch {
|
|
280
|
-
// Telemetry must never throw
|
|
281
|
-
}
|
|
282
|
-
}
|
|
283
|
-
// ─── B3: Batch trim with growth allowance ────────────────────────────────
|
|
284
|
-
// Trim fires only when window usage exceeds the soft target by this fraction.
|
|
285
|
-
// Small natural growth (e.g. a short assistant reply) never triggers a trim;
|
|
286
|
-
// only genuine spikes (model switch, cold-start, multi-tool overrun) do.
|
|
287
|
-
// When trim fires, the target is (softTarget * (1 - headroomFraction)) so the
|
|
288
|
-
// window has room to grow for several turns before the next trim fires.
|
|
289
|
-
//
|
|
290
|
-
// softTarget (0.65): matches refreshRedisGradient → steady state never trims
|
|
291
|
-
// growthThreshold (0.05): 5% overage buffer before trim fires
|
|
292
|
-
// headroomFraction (0.10): trim target = softTarget * 0.90 → ~58.5% of budget
|
|
293
|
-
// Canonical values live in the core package so plugin trim guards and compose
|
|
294
|
-
// paths cannot drift.
|
|
295
|
-
// Test-only: expose emitters so the unit test can exercise them directly
|
|
296
|
-
// without standing up a real session. Wrapped in a getter object so the flag
|
|
297
|
-
// guard still runs (zero-cost when off).
|
|
298
|
-
export const __telemetryForTests = {
|
|
299
|
-
trimTelemetry,
|
|
300
|
-
assembleTrace,
|
|
301
|
-
degradationTelemetry,
|
|
302
|
-
guardTelemetry,
|
|
303
|
-
lifecyclePolicyTelemetry,
|
|
304
|
-
nextTurnId,
|
|
305
|
-
beginTrimOwnerTurn,
|
|
306
|
-
endTrimOwnerTurn,
|
|
307
|
-
claimTrimOwner,
|
|
308
|
-
// B3/C0.1: Expose the canonical policy surface so tests can assert against
|
|
309
|
-
// the shared source of truth instead of embedding formulas locally.
|
|
310
|
-
TRIM_SOFT_TARGET,
|
|
311
|
-
TRIM_GROWTH_THRESHOLD,
|
|
312
|
-
TRIM_HEADROOM_FRACTION,
|
|
313
|
-
resolveTrimBudgets,
|
|
314
|
-
reset() {
|
|
315
|
-
if (_telemetryStream) {
|
|
316
|
-
try {
|
|
317
|
-
_telemetryStream.end();
|
|
318
|
-
}
|
|
319
|
-
catch { /* ignore */ }
|
|
320
|
-
}
|
|
321
|
-
_telemetryStream = null;
|
|
322
|
-
_telemetryStreamFailed = false;
|
|
323
|
-
_telemetryTurnCounter = 0;
|
|
324
|
-
_trimOwnerTurns.clear();
|
|
325
|
-
},
|
|
326
|
-
};
|
|
327
|
-
// ─── hypermem singleton ────────────────────────────────────────
|
|
328
|
-
// Runtime load is dynamic (hypermem is a sibling package loaded from repo dist,
|
|
329
|
-
// not installed via npm). Types come from the core package devDependency.
|
|
330
|
-
// This pattern keeps the runtime path stable while TypeScript resolves types
|
|
331
|
-
// from the canonical source — no more local shim drift.
|
|
332
|
-
// Resolved at init time: pluginConfig.hyperMemPath > import.meta.resolve('@psiclawops/hypermem') > dev fallback
|
|
333
|
-
let HYPERMEM_PATH = '';
|
|
334
|
-
let _hm = null;
|
|
335
|
-
let _hmInitPromise = null;
|
|
336
|
-
let _indexer = null;
|
|
337
|
-
let _fleetStore = null;
|
|
338
|
-
let _generateEmbeddings = null;
|
|
339
|
-
let _embeddingConfig = null;
|
|
340
|
-
// P1.7: TaskFlow runtime reference — bound at registration time, best-effort.
|
|
341
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
342
|
-
let _taskFlowRuntime = null;
|
|
343
|
-
// ─── Eviction config cache ────────────────────────────────────
|
|
344
|
-
// Populated from user config during hypermem init. Stored here so
|
|
345
|
-
// assemble() (which can't await loadUserConfig) can read it without
|
|
346
|
-
// re-reading disk on every turn.
|
|
347
|
-
let _evictionConfig;
|
|
348
|
-
// ─── Context window reserve cache ────────────────────────────
|
|
349
|
-
// Populated from user config during hypermem init. Ensures hypermem leaves
|
|
350
|
-
// a guaranteed headroom fraction for system prompts, tool results, and
|
|
351
|
-
// incoming data — preventing the trim tiers from firing too close to the edge.
|
|
352
|
-
//
|
|
353
|
-
// contextWindowSize: full model context window in tokens (default: 128_000)
|
|
354
|
-
// contextWindowReserve: fraction [0.0–0.5] to keep free (default: 0.25)
|
|
355
|
-
//
|
|
356
|
-
// Effective history budget = (windowSize * (1 - reserve)) - overheadFallback
|
|
357
|
-
// e.g. 128k * 0.75 - 28k = 68k for council agents at 25% reserve
|
|
358
|
-
let _contextWindowSize = 128_000;
|
|
359
|
-
let _contextWindowReserve = 0.25;
|
|
360
|
-
let _deferToolPruning = false;
|
|
361
|
-
let _verboseLogging = false;
|
|
362
|
-
let _contextWindowOverrides = {};
|
|
363
|
-
const _budgetFallbackWarnings = new Set();
|
|
364
|
-
export const CONTEXT_WINDOW_OVERRIDE_KEY_REGEX = /^[^/\s]+\/[^/\s]+$/;
|
|
365
|
-
const contextWindowOverrideSchema = z.object({
|
|
366
|
-
contextTokens: z.number().int().positive().optional(),
|
|
367
|
-
contextWindow: z.number().int().positive().optional(),
|
|
368
|
-
}).superRefine((value, ctx) => {
|
|
369
|
-
if (value.contextTokens == null && value.contextWindow == null) {
|
|
370
|
-
ctx.addIssue({
|
|
371
|
-
code: z.ZodIssueCode.custom,
|
|
372
|
-
message: 'override must declare contextTokens, contextWindow, or both',
|
|
373
|
-
});
|
|
374
|
-
}
|
|
375
|
-
if (value.contextTokens != null &&
|
|
376
|
-
value.contextWindow != null &&
|
|
377
|
-
value.contextTokens > value.contextWindow) {
|
|
378
|
-
ctx.addIssue({
|
|
379
|
-
code: z.ZodIssueCode.custom,
|
|
380
|
-
message: 'contextTokens must be less than or equal to contextWindow',
|
|
381
|
-
});
|
|
382
|
-
}
|
|
383
|
-
});
|
|
384
|
-
export function sanitizeContextWindowOverrides(raw) {
|
|
385
|
-
if (!raw || typeof raw !== 'object' || Array.isArray(raw)) {
|
|
386
|
-
return { value: {}, warnings: [] };
|
|
387
|
-
}
|
|
388
|
-
const value = {};
|
|
389
|
-
const warnings = [];
|
|
390
|
-
for (const [key, candidate] of Object.entries(raw)) {
|
|
391
|
-
const normalizedKey = key.trim().toLowerCase();
|
|
392
|
-
if (!CONTEXT_WINDOW_OVERRIDE_KEY_REGEX.test(normalizedKey)) {
|
|
393
|
-
warnings.push(`ignoring contextWindowOverrides[${JSON.stringify(key)}]: key must be "provider/model"`);
|
|
394
|
-
continue;
|
|
395
|
-
}
|
|
396
|
-
const parsed = contextWindowOverrideSchema.safeParse(candidate);
|
|
397
|
-
if (!parsed.success) {
|
|
398
|
-
warnings.push(`ignoring contextWindowOverrides[${JSON.stringify(key)}]: ` +
|
|
399
|
-
parsed.error.issues.map(issue => issue.message).join('; '));
|
|
400
|
-
continue;
|
|
401
|
-
}
|
|
402
|
-
value[normalizedKey] = parsed.data;
|
|
403
|
-
}
|
|
404
|
-
return { value, warnings };
|
|
405
|
-
}
|
|
406
|
-
export function resolveEffectiveBudget(args) {
|
|
407
|
-
const { tokenBudget, model, contextWindowSize, contextWindowReserve } = args;
|
|
408
|
-
if (tokenBudget) {
|
|
409
|
-
return { budget: tokenBudget, source: 'runtime tokenBudget' };
|
|
410
|
-
}
|
|
411
|
-
const key = normalizeModelKey(model);
|
|
412
|
-
const override = key ? args.contextWindowOverrides?.[key] : undefined;
|
|
413
|
-
const configuredWindow = override?.contextTokens ?? override?.contextWindow;
|
|
414
|
-
if (configuredWindow) {
|
|
415
|
-
return {
|
|
416
|
-
budget: Math.floor(configuredWindow * (1 - contextWindowReserve)),
|
|
417
|
-
source: `contextWindowOverrides[${key}]`,
|
|
418
|
-
};
|
|
419
|
-
}
|
|
420
|
-
return {
|
|
421
|
-
budget: Math.floor(contextWindowSize * (1 - contextWindowReserve)),
|
|
422
|
-
source: 'fallback contextWindowSize',
|
|
423
|
-
};
|
|
424
|
-
}
|
|
425
|
-
export function resolveModelIdentity(model) {
|
|
426
|
-
const modelKey = normalizeModelKey(model);
|
|
427
|
-
if (!modelKey) {
|
|
428
|
-
return {
|
|
429
|
-
rawModel: model ?? null,
|
|
430
|
-
modelKey: null,
|
|
431
|
-
provider: null,
|
|
432
|
-
modelId: null,
|
|
433
|
-
};
|
|
434
|
-
}
|
|
435
|
-
const slash = modelKey.indexOf('/');
|
|
436
|
-
return {
|
|
437
|
-
rawModel: model ?? null,
|
|
438
|
-
modelKey,
|
|
439
|
-
provider: slash > 0 ? modelKey.slice(0, slash) : null,
|
|
440
|
-
modelId: slash > 0 && slash < modelKey.length - 1 ? modelKey.slice(slash + 1) : modelKey,
|
|
441
|
-
};
|
|
442
|
-
}
|
|
443
|
-
export function diffModelState(previous, current) {
|
|
444
|
-
const previousIdentity = previous?.modelKey || previous?.provider || previous?.modelId
|
|
445
|
-
? {
|
|
446
|
-
rawModel: previous.model ?? null,
|
|
447
|
-
modelKey: previous.modelKey ?? normalizeModelKey(previous.model),
|
|
448
|
-
provider: previous.provider ?? resolveModelIdentity(previous.model).provider,
|
|
449
|
-
modelId: previous.modelId ?? resolveModelIdentity(previous.model).modelId,
|
|
450
|
-
}
|
|
451
|
-
: resolveModelIdentity(previous?.model);
|
|
452
|
-
const currentIdentity = resolveModelIdentity(current.model);
|
|
453
|
-
const previousBudget = previous?.tokenBudget;
|
|
454
|
-
const currentBudget = current.tokenBudget;
|
|
455
|
-
const budgetChanged = previousBudget != null && currentBudget != null && previousBudget !== currentBudget;
|
|
456
|
-
return {
|
|
457
|
-
previousIdentity,
|
|
458
|
-
currentIdentity,
|
|
459
|
-
modelChanged: previousIdentity.modelKey !== currentIdentity.modelKey,
|
|
460
|
-
providerChanged: previousIdentity.provider !== currentIdentity.provider,
|
|
461
|
-
modelIdChanged: previousIdentity.modelId !== currentIdentity.modelId,
|
|
462
|
-
budgetChanged,
|
|
463
|
-
budgetDownshift: previousBudget != null && currentBudget != null && currentBudget < previousBudget,
|
|
464
|
-
budgetUplift: previousBudget != null && currentBudget != null && currentBudget > previousBudget,
|
|
465
|
-
};
|
|
466
|
-
}
|
|
467
|
-
function normalizeModelKey(model) {
|
|
468
|
-
if (!model)
|
|
469
|
-
return null;
|
|
470
|
-
const key = model.trim().toLowerCase();
|
|
471
|
-
return key.length > 0 ? key : null;
|
|
472
|
-
}
|
|
473
|
-
function verboseLog(message) {
|
|
474
|
-
if (_verboseLogging)
|
|
475
|
-
console.log(message);
|
|
476
|
-
}
|
|
477
|
-
function resolveConfiguredWindow(model) {
|
|
478
|
-
const key = normalizeModelKey(model);
|
|
479
|
-
if (!key)
|
|
480
|
-
return null;
|
|
481
|
-
const override = _contextWindowOverrides[key];
|
|
482
|
-
if (!override)
|
|
483
|
-
return null;
|
|
484
|
-
return override.contextTokens ?? override.contextWindow ?? null;
|
|
485
|
-
}
|
|
486
|
-
// Subagent warming mode: 'full' | 'light' | 'off'. Default: 'light'.
|
|
487
|
-
// Controls how much HyperMem context is injected into subagent sessions.
|
|
488
|
-
let _subagentWarming = 'light';
|
|
489
|
-
const FORKED_CONTEXT_META_SLOT = 'forkedContextMeta';
|
|
490
|
-
// Cache replay threshold: 15min default. Set to 0 in user config to disable.
|
|
491
|
-
let _cacheReplayThresholdMs = 900_000;
|
|
492
|
-
// ─── System overhead cache ────────────────────────────────────
|
|
493
|
-
// Caches the non-history token cost (contextBlock + runtime system prompt)
|
|
494
|
-
// from the last full compose per session key. Used in tool-loop turns to
|
|
495
|
-
// return an honest estimatedTokens without re-running the full compose
|
|
496
|
-
// pipeline. Map key = resolved session key.
|
|
497
|
-
const _overheadCache = new Map();
|
|
498
|
-
// Tier-aware conservative fallback when no cached value exists (cold session,
|
|
499
|
-
// first turn after restart). Over-estimates are safer than under-estimates:
|
|
500
|
-
// a false-positive compact is cheaper than letting context blow past budget.
|
|
501
|
-
const OVERHEAD_FALLBACK = {
|
|
502
|
-
council: 28_000,
|
|
503
|
-
director: 28_000,
|
|
504
|
-
specialist: 18_000,
|
|
505
|
-
};
|
|
506
|
-
const OVERHEAD_FALLBACK_DEFAULT = 15_000;
|
|
507
|
-
function getOverheadFallback(tier) {
|
|
508
|
-
if (!tier)
|
|
509
|
-
return OVERHEAD_FALLBACK_DEFAULT;
|
|
510
|
-
return OVERHEAD_FALLBACK[tier] ?? OVERHEAD_FALLBACK_DEFAULT;
|
|
511
|
-
}
|
|
512
|
-
/**
|
|
513
|
-
* Compute the effective history budget for trim and compact operations.
|
|
514
|
-
*
|
|
515
|
-
* Priority:
|
|
516
|
-
* 1. tokenBudget passed by the runtime (most precise)
|
|
517
|
-
* 2. Derived from context window config: windowSize * (1 - reserve)
|
|
518
|
-
*
|
|
519
|
-
* The reserve fraction (default 0.25 = 25%) guarantees headroom for:
|
|
520
|
-
* - System prompt + identity blocks (~28k for council agents)
|
|
521
|
-
* - Incoming tool results (can be 10–30k in parallel web_search bursts)
|
|
522
|
-
* - Response generation buffer (~4k)
|
|
523
|
-
*
|
|
524
|
-
* Without the reserve, trim tiers fire at 75–85% of tokenBudget but
|
|
525
|
-
* total context (history + system) exceeds the model window before trim
|
|
526
|
-
* completes, causing result stripping.
|
|
527
|
-
*/
|
|
528
|
-
function computeEffectiveBudget(tokenBudget, model) {
|
|
529
|
-
const resolved = resolveEffectiveBudget({
|
|
530
|
-
tokenBudget,
|
|
531
|
-
model,
|
|
532
|
-
contextWindowSize: _contextWindowSize,
|
|
533
|
-
contextWindowReserve: _contextWindowReserve,
|
|
534
|
-
contextWindowOverrides: _contextWindowOverrides,
|
|
535
|
-
});
|
|
536
|
-
if (resolved.source === 'runtime tokenBudget') {
|
|
537
|
-
verboseLog(`[hypermem-plugin] budget source: runtime tokenBudget=${tokenBudget}${model ? ` model=${model}` : ''}`);
|
|
538
|
-
return resolved.budget;
|
|
539
|
-
}
|
|
540
|
-
const configuredWindow = resolveConfiguredWindow(model);
|
|
541
|
-
if (configuredWindow) {
|
|
542
|
-
verboseLog(`[hypermem-plugin] budget source: contextWindowOverrides[${normalizeModelKey(model)}]=${configuredWindow}, ` +
|
|
543
|
-
`reserve=${_contextWindowReserve}, effective=${resolved.budget}`);
|
|
544
|
-
return resolved.budget;
|
|
545
|
-
}
|
|
546
|
-
verboseLog(`[hypermem-plugin] budget source: fallback contextWindowSize=${_contextWindowSize}, ` +
|
|
547
|
-
`reserve=${_contextWindowReserve}, effective=${resolved.budget}${model ? ` model=${model}` : ''}`);
|
|
548
|
-
const warningKey = normalizeModelKey(model) ?? '(unknown-model)';
|
|
549
|
-
if (!_budgetFallbackWarnings.has(warningKey)) {
|
|
550
|
-
_budgetFallbackWarnings.add(warningKey);
|
|
551
|
-
console.warn(`[hypermem-plugin] No runtime tokenBudget${model ? ` for model ${model}` : ''}; ` +
|
|
552
|
-
`falling back to contextWindowSize=${_contextWindowSize}. ` +
|
|
553
|
-
`Add contextWindowOverrides["provider/model"] to config.json or openclaw.json if detection is wrong.`);
|
|
554
|
-
}
|
|
555
|
-
return resolved.budget;
|
|
556
|
-
}
|
|
557
|
-
// ─── Plugin config cache ───────────────────────────────────────
|
|
558
|
-
// Populated from openclaw.json plugins.entries.hypercompositor.config
|
|
559
|
-
// during register(). loadUserConfig() merges this over config.json.
|
|
560
|
-
let _pluginConfig = {};
|
|
561
|
-
/**
|
|
562
|
-
* Load user config with priority: pluginConfig (openclaw.json) > config.json (legacy).
|
|
563
|
-
* pluginConfig values win; config.json provides fallback for keys not set in openclaw.json.
|
|
564
|
-
* This allows gradual migration from the shadow config.json to central config.
|
|
565
|
-
*/
|
|
566
|
-
async function loadUserConfig() {
|
|
567
|
-
// Resolve data dir: pluginConfig > default
|
|
568
|
-
const dataDir = _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem');
|
|
569
|
-
const configPath = path.join(dataDir, 'config.json');
|
|
570
|
-
let fileConfig = {};
|
|
571
|
-
try {
|
|
572
|
-
const raw = await fs.readFile(configPath, 'utf-8');
|
|
573
|
-
fileConfig = JSON.parse(raw);
|
|
574
|
-
console.log(`[hypermem-plugin] Loaded legacy config from ${configPath}`);
|
|
575
|
-
}
|
|
576
|
-
catch (err) {
|
|
577
|
-
if (err.code !== 'ENOENT') {
|
|
578
|
-
console.warn(`[hypermem-plugin] Failed to parse config.json (using defaults):`, err.message);
|
|
579
|
-
}
|
|
580
|
-
}
|
|
581
|
-
// Merge: pluginConfig (openclaw.json) wins over fileConfig (legacy config.json).
|
|
582
|
-
// Top-level scalar keys from pluginConfig override fileConfig.
|
|
583
|
-
// Nested objects (compositor, eviction, embedding) are shallow-merged.
|
|
584
|
-
const merged = { ...fileConfig };
|
|
585
|
-
if (_pluginConfig.contextWindowSize != null)
|
|
586
|
-
merged.contextWindowSize = _pluginConfig.contextWindowSize;
|
|
587
|
-
if (_pluginConfig.contextWindowReserve != null)
|
|
588
|
-
merged.contextWindowReserve = _pluginConfig.contextWindowReserve;
|
|
589
|
-
if (_pluginConfig.deferToolPruning != null)
|
|
590
|
-
merged.deferToolPruning = _pluginConfig.deferToolPruning;
|
|
591
|
-
if (_pluginConfig.verboseLogging != null)
|
|
592
|
-
merged.verboseLogging = _pluginConfig.verboseLogging;
|
|
593
|
-
if (_pluginConfig.contextWindowOverrides != null)
|
|
594
|
-
merged.contextWindowOverrides = { ...merged.contextWindowOverrides, ..._pluginConfig.contextWindowOverrides };
|
|
595
|
-
if (_pluginConfig.warmCacheReplayThresholdMs != null)
|
|
596
|
-
merged.warmCacheReplayThresholdMs = _pluginConfig.warmCacheReplayThresholdMs;
|
|
597
|
-
if (_pluginConfig.subagentWarming != null)
|
|
598
|
-
merged.subagentWarming = _pluginConfig.subagentWarming;
|
|
599
|
-
if (_pluginConfig.compositor)
|
|
600
|
-
merged.compositor = { ...merged.compositor, ..._pluginConfig.compositor };
|
|
601
|
-
if (_pluginConfig.eviction)
|
|
602
|
-
merged.eviction = { ...merged.eviction, ..._pluginConfig.eviction };
|
|
603
|
-
if (_pluginConfig.embedding)
|
|
604
|
-
merged.embedding = { ...merged.embedding, ..._pluginConfig.embedding };
|
|
605
|
-
if (_pluginConfig.reranker)
|
|
606
|
-
merged.reranker = { ...merged.reranker, ..._pluginConfig.reranker };
|
|
607
|
-
if (Object.keys(fileConfig).length > 0 && Object.keys(_pluginConfig).filter(k => k !== 'hyperMemPath' && k !== 'dataDir').length > 0) {
|
|
608
|
-
console.log('[hypermem-plugin] Note: migrating config.json keys to plugins.entries.hypercompositor.config in openclaw.json is recommended');
|
|
609
|
-
}
|
|
610
|
-
return merged;
|
|
611
|
-
}
|
|
612
|
-
async function getHyperMem() {
|
|
613
|
-
if (_hm)
|
|
614
|
-
return _hm;
|
|
615
|
-
if (_hmInitPromise)
|
|
616
|
-
return _hmInitPromise;
|
|
617
|
-
_hmInitPromise = (async () => {
|
|
618
|
-
// Dynamic import — hypermem is loaded from repo dist
|
|
619
|
-
const mod = await import(HYPERMEM_PATH);
|
|
620
|
-
const HyperMem = mod.HyperMem;
|
|
621
|
-
// Capture generateEmbeddings from the dynamic module for use in afterTurn().
|
|
622
|
-
// Bind it with the user's embedding config so the pre-compute path uses the
|
|
623
|
-
// same provider as the indexer (Ollama vs OpenAI).
|
|
624
|
-
if (typeof mod.generateEmbeddings === 'function') {
|
|
625
|
-
const rawGenerate = mod.generateEmbeddings;
|
|
626
|
-
_generateEmbeddings = (texts) => rawGenerate(texts, _embeddingConfig ?? undefined);
|
|
627
|
-
}
|
|
628
|
-
// Load optional user config — compositor tuning overrides
|
|
629
|
-
const userConfig = await loadUserConfig();
|
|
630
|
-
// Build embedding config from user config. Applied to both HyperMem core
|
|
631
|
-
// (VectorStore init) and the _generateEmbeddings closure above.
|
|
632
|
-
if (userConfig.embedding) {
|
|
633
|
-
const ue = userConfig.embedding;
|
|
634
|
-
// Provider-specific model/dimension/batch defaults
|
|
635
|
-
const providerDefaults = ue.provider === 'gemini'
|
|
636
|
-
? { model: 'gemini-embedding-001', dimensions: 3072, batchSize: 100, timeout: 15000 }
|
|
637
|
-
: ue.provider === 'openai'
|
|
638
|
-
? { model: 'text-embedding-3-small', dimensions: 1536, batchSize: 128, timeout: 10000 }
|
|
639
|
-
: { model: 'nomic-embed-text', dimensions: 768, batchSize: 32, timeout: 10000 };
|
|
640
|
-
_embeddingConfig = {
|
|
641
|
-
provider: ue.provider ?? 'ollama',
|
|
642
|
-
ollamaUrl: ue.ollamaUrl ?? 'http://localhost:11434',
|
|
643
|
-
openaiBaseUrl: ue.openaiBaseUrl ?? 'https://api.openai.com/v1',
|
|
644
|
-
openaiApiKey: ue.openaiApiKey,
|
|
645
|
-
geminiBaseUrl: ue.geminiBaseUrl,
|
|
646
|
-
geminiIndexTaskType: ue.geminiIndexTaskType,
|
|
647
|
-
geminiQueryTaskType: ue.geminiQueryTaskType,
|
|
648
|
-
model: ue.model ?? providerDefaults.model,
|
|
649
|
-
dimensions: ue.dimensions ?? providerDefaults.dimensions,
|
|
650
|
-
timeout: ue.timeout ?? providerDefaults.timeout,
|
|
651
|
-
batchSize: ue.batchSize ?? providerDefaults.batchSize,
|
|
652
|
-
};
|
|
653
|
-
console.log(`[hypermem-plugin] Embedding provider: ${_embeddingConfig.provider} ` +
|
|
654
|
-
`(model: ${_embeddingConfig.model}, ${_embeddingConfig.dimensions}d, batch: ${_embeddingConfig.batchSize})`);
|
|
655
|
-
}
|
|
656
|
-
// Cache eviction config at module scope so assemble() can read it
|
|
657
|
-
// synchronously without re-reading disk on every turn.
|
|
658
|
-
_evictionConfig = userConfig.eviction ?? {};
|
|
659
|
-
// Cache context window config so all three trim hotpaths use the same values.
|
|
660
|
-
if (typeof userConfig.contextWindowSize === 'number' && userConfig.contextWindowSize > 0) {
|
|
661
|
-
_contextWindowSize = userConfig.contextWindowSize;
|
|
662
|
-
}
|
|
663
|
-
if (typeof userConfig.contextWindowReserve === 'number' &&
|
|
664
|
-
userConfig.contextWindowReserve >= 0 && userConfig.contextWindowReserve <= 0.5) {
|
|
665
|
-
_contextWindowReserve = userConfig.contextWindowReserve;
|
|
666
|
-
}
|
|
667
|
-
_deferToolPruning = userConfig.deferToolPruning === true;
|
|
668
|
-
if (_deferToolPruning) {
|
|
669
|
-
console.log('[hypermem-plugin] deferToolPruning: true — tool gradient deferred to host contextPruning');
|
|
670
|
-
}
|
|
671
|
-
_verboseLogging = userConfig.verboseLogging === true;
|
|
672
|
-
const sanitizedOverrides = sanitizeContextWindowOverrides(userConfig.contextWindowOverrides);
|
|
673
|
-
_contextWindowOverrides = sanitizedOverrides.value;
|
|
674
|
-
for (const warning of sanitizedOverrides.warnings) {
|
|
675
|
-
console.warn(`[hypermem-plugin] ${warning}`);
|
|
676
|
-
}
|
|
677
|
-
const warmingVal = userConfig.subagentWarming;
|
|
678
|
-
if (warmingVal === 'full' || warmingVal === 'light' || warmingVal === 'off') {
|
|
679
|
-
_subagentWarming = warmingVal;
|
|
680
|
-
console.log(`[hypermem-plugin] subagentWarming: ${_subagentWarming}`);
|
|
681
|
-
}
|
|
682
|
-
if (typeof userConfig.warmCacheReplayThresholdMs === 'number') {
|
|
683
|
-
_cacheReplayThresholdMs = userConfig.warmCacheReplayThresholdMs;
|
|
684
|
-
}
|
|
685
|
-
const reservedTokens = Math.floor(_contextWindowSize * _contextWindowReserve);
|
|
686
|
-
console.log(`[hypermem-plugin] context window: ${_contextWindowSize} tokens, ` +
|
|
687
|
-
`${Math.round(_contextWindowReserve * 100)}% reserved (${reservedTokens} tokens), ` +
|
|
688
|
-
`effective history budget: ${_contextWindowSize - reservedTokens} tokens`);
|
|
689
|
-
verboseLog(`[hypermem-plugin] warmCacheReplayThresholdMs=${_cacheReplayThresholdMs}`);
|
|
690
|
-
verboseLog(`[hypermem-plugin] contextWindowOverrides keys=${Object.keys(_contextWindowOverrides).join(', ') || '(none)'}`);
|
|
691
|
-
const cacheConfig = userConfig.cache;
|
|
692
|
-
const instance = await HyperMem.create({
|
|
693
|
-
dataDir: _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem'),
|
|
694
|
-
cache: {
|
|
695
|
-
keyPrefix: cacheConfig?.keyPrefix ?? 'hm:',
|
|
696
|
-
sessionTTL: cacheConfig?.sessionTTL ?? 14400, // 4h default for system/identity/meta slots
|
|
697
|
-
historyTTL: cacheConfig?.historyTTL ?? 86400, // 24h default for history/cursor hot cache
|
|
698
|
-
},
|
|
699
|
-
...(userConfig.compositor ? { compositor: userConfig.compositor } : {}),
|
|
700
|
-
...(_embeddingConfig ? { embedding: _embeddingConfig } : {}),
|
|
701
|
-
...(userConfig.reranker
|
|
702
|
-
? { reranker: userConfig.reranker }
|
|
703
|
-
: {}),
|
|
704
|
-
});
|
|
705
|
-
_hm = instance;
|
|
706
|
-
// Wire up fleet store and background indexer from dynamic module
|
|
707
|
-
const { FleetStore: FleetStoreClass, createIndexer } = mod;
|
|
708
|
-
const libraryDb = instance.dbManager.getLibraryDb();
|
|
709
|
-
_fleetStore = new FleetStoreClass(libraryDb);
|
|
710
|
-
try {
|
|
711
|
-
// T1.2: Wire indexer with proper DB accessors and cursor fetcher.
|
|
712
|
-
// The cursor fetcher enables priority-based indexing: messages the model
|
|
713
|
-
// hasn't seen yet (post-cursor) are processed first.
|
|
714
|
-
_indexer = createIndexer((agentId) => instance.dbManager.getMessageDb(agentId), () => instance.dbManager.getLibraryDb(), () => {
|
|
715
|
-
// List agents from fleet_agents table (active only)
|
|
716
|
-
try {
|
|
717
|
-
const rows = instance.dbManager.getLibraryDb()
|
|
718
|
-
.prepare("SELECT id FROM fleet_agents WHERE status = 'active'")
|
|
719
|
-
.all();
|
|
720
|
-
return rows.map(r => r.id);
|
|
721
|
-
}
|
|
722
|
-
catch {
|
|
723
|
-
return [];
|
|
724
|
-
}
|
|
725
|
-
}, {
|
|
726
|
-
enabled: true,
|
|
727
|
-
periodicInterval: userConfig?.maintenance?.periodicInterval ?? 300000,
|
|
728
|
-
maxActiveConversations: userConfig?.maintenance?.maxActiveConversations ?? 5,
|
|
729
|
-
recentConversationCooldownMs: userConfig?.maintenance?.recentConversationCooldownMs ?? 30000,
|
|
730
|
-
maxCandidatesPerPass: userConfig?.maintenance?.maxCandidatesPerPass ?? 200,
|
|
731
|
-
},
|
|
732
|
-
// Cursor fetcher: reads the SQLite-backed session cursor
|
|
733
|
-
async (agentId, sessionKey) => {
|
|
734
|
-
return instance.getSessionCursor(agentId, sessionKey);
|
|
735
|
-
},
|
|
736
|
-
// Pass vector store so new facts/episodes are embedded at index time
|
|
737
|
-
instance.getVectorStore() ?? undefined,
|
|
738
|
-
// Dreaming config — passed from hypermem user config if set
|
|
739
|
-
userConfig?.dreaming ?? {},
|
|
740
|
-
// KL-01: global write policy — passed from hypermem user config
|
|
741
|
-
userConfig?.globalWritePolicy ?? 'deny');
|
|
742
|
-
_indexer.start();
|
|
743
|
-
if (_verboseLogging) {
|
|
744
|
-
const mc = userConfig?.maintenance ?? {};
|
|
745
|
-
console.log(`[hypermem-plugin] maintenance settings: periodicInterval=${mc.periodicInterval ?? 300000}ms ` +
|
|
746
|
-
`maxActiveConversations=${mc.maxActiveConversations ?? 5} ` +
|
|
747
|
-
`cooldown=${mc.recentConversationCooldownMs ?? 30000}ms ` +
|
|
748
|
-
`maxCandidatesPerPass=${mc.maxCandidatesPerPass ?? 200}`);
|
|
749
|
-
}
|
|
750
|
-
}
|
|
751
|
-
catch {
|
|
752
|
-
// Non-fatal — indexer wiring can fail without breaking context assembly
|
|
753
|
-
}
|
|
754
|
-
return instance;
|
|
755
|
-
})();
|
|
756
|
-
return _hmInitPromise;
|
|
757
|
-
}
|
|
758
|
-
// ─── Session Key Helpers ────────────────────────────────────────
|
|
759
|
-
/**
|
|
760
|
-
* Extract agentId from a session key.
|
|
761
|
-
* Session keys follow: "agent:<agentId>:<channel>:<name>"
|
|
762
|
-
* Falls back to "main" if the key doesn't match expected format.
|
|
763
|
-
*/
|
|
764
|
-
function extractAgentId(sessionKey) {
|
|
765
|
-
if (!sessionKey)
|
|
766
|
-
return 'main';
|
|
767
|
-
const parts = sessionKey.split(':');
|
|
768
|
-
if (parts[0] === 'agent' && parts.length >= 2) {
|
|
769
|
-
return parts[1];
|
|
770
|
-
}
|
|
771
|
-
return 'main';
|
|
772
|
-
}
|
|
773
|
-
/**
|
|
774
|
-
* Normalize sessionKey — prefer the explicit sessionKey param,
|
|
775
|
-
* fall back to sessionId (UUID) which we can't parse as a session key.
|
|
776
|
-
* If neither is useful, use a default.
|
|
777
|
-
*/
|
|
778
|
-
function resolveSessionKey(sessionId, sessionKey) {
|
|
779
|
-
if (sessionKey)
|
|
780
|
-
return sessionKey;
|
|
781
|
-
// sessionId is a UUID — not a parseable session key.
|
|
782
|
-
// Use a synthetic key so recording works but note it won't resolve to a named session.
|
|
783
|
-
return `session:${sessionId}`;
|
|
784
|
-
}
|
|
785
|
-
const SYNTHETIC_MISSING_TOOL_RESULT_TEXT = 'No result provided';
|
|
786
|
-
function extractTextFromInboundContent(content) {
|
|
787
|
-
if (typeof content === 'string')
|
|
788
|
-
return content;
|
|
789
|
-
if (!Array.isArray(content))
|
|
790
|
-
return '';
|
|
791
|
-
return content
|
|
792
|
-
.filter((part) => Boolean(part && typeof part.type === 'string'))
|
|
793
|
-
.filter(part => part.type === 'text' && typeof part.text === 'string')
|
|
794
|
-
.map(part => part.text ?? '')
|
|
795
|
-
.join('\n');
|
|
796
|
-
}
|
|
797
|
-
function resolveAssistantTokenCount(msg, runtimeContext) {
|
|
798
|
-
const usage = msg.usage;
|
|
799
|
-
if (usage && typeof usage === 'object') {
|
|
800
|
-
const candidates = [
|
|
801
|
-
usage.total,
|
|
802
|
-
usage.totalTokens,
|
|
803
|
-
usage.total_tokens,
|
|
804
|
-
usage.output,
|
|
805
|
-
usage.outputTokens,
|
|
806
|
-
usage.output_tokens,
|
|
807
|
-
usage.completionTokens,
|
|
808
|
-
usage.completion_tokens,
|
|
809
|
-
];
|
|
810
|
-
for (const candidate of candidates) {
|
|
811
|
-
if (typeof candidate === 'number' && Number.isFinite(candidate) && candidate > 0) {
|
|
812
|
-
return Math.floor(candidate);
|
|
813
|
-
}
|
|
814
|
-
}
|
|
815
|
-
}
|
|
816
|
-
const runtimeTokenCount = runtimeContext?.currentTokenCount;
|
|
817
|
-
if (typeof runtimeTokenCount === 'number' && Number.isFinite(runtimeTokenCount) && runtimeTokenCount > 0) {
|
|
818
|
-
return Math.floor(runtimeTokenCount);
|
|
819
|
-
}
|
|
820
|
-
return undefined;
|
|
821
|
-
}
|
|
822
|
-
function resolveAssistantOutputTokenCount(msg, runtimeContext) {
|
|
823
|
-
const usage = msg.usage;
|
|
824
|
-
if (usage && typeof usage === 'object') {
|
|
825
|
-
const candidates = [
|
|
826
|
-
usage.output,
|
|
827
|
-
usage.outputTokens,
|
|
828
|
-
usage.output_tokens,
|
|
829
|
-
usage.completionTokens,
|
|
830
|
-
usage.completion_tokens,
|
|
831
|
-
usage.totalTokens,
|
|
832
|
-
usage.total_tokens,
|
|
833
|
-
usage.total,
|
|
834
|
-
];
|
|
835
|
-
for (const candidate of candidates) {
|
|
836
|
-
if (typeof candidate === 'number' && Number.isFinite(candidate) && candidate > 0) {
|
|
837
|
-
return Math.floor(candidate);
|
|
838
|
-
}
|
|
839
|
-
}
|
|
840
|
-
}
|
|
841
|
-
const runtimeTokenCount = runtimeContext?.currentTokenCount;
|
|
842
|
-
if (typeof runtimeTokenCount === 'number' && Number.isFinite(runtimeTokenCount) && runtimeTokenCount > 0) {
|
|
843
|
-
return Math.floor(runtimeTokenCount);
|
|
844
|
-
}
|
|
845
|
-
const text = extractTextFromInboundContent(msg.content);
|
|
846
|
-
const tokenEstimate = Math.ceil(text.length / 4);
|
|
847
|
-
return tokenEstimate > 0 ? tokenEstimate : undefined;
|
|
848
|
-
}
|
|
849
|
-
function collectNeutralToolPairStats(messages) {
|
|
850
|
-
const callIds = new Set();
|
|
851
|
-
const resultIds = new Set();
|
|
852
|
-
let toolCallCount = 0;
|
|
853
|
-
let toolResultCount = 0;
|
|
854
|
-
let syntheticNoResultCount = 0;
|
|
855
|
-
for (const msg of messages) {
|
|
856
|
-
for (const tc of msg.toolCalls ?? []) {
|
|
857
|
-
toolCallCount++;
|
|
858
|
-
if (tc.id)
|
|
859
|
-
callIds.add(tc.id);
|
|
860
|
-
}
|
|
861
|
-
for (const tr of msg.toolResults ?? []) {
|
|
862
|
-
toolResultCount++;
|
|
863
|
-
if (tr.callId)
|
|
864
|
-
resultIds.add(tr.callId);
|
|
865
|
-
if ((tr.content ?? '').trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT)
|
|
866
|
-
syntheticNoResultCount++;
|
|
867
|
-
}
|
|
868
|
-
}
|
|
869
|
-
const missingToolResultIds = [...callIds].filter(id => !resultIds.has(id));
|
|
870
|
-
const orphanToolResultIds = [...resultIds].filter(id => !callIds.has(id));
|
|
871
|
-
return {
|
|
872
|
-
toolCallCount,
|
|
873
|
-
toolResultCount,
|
|
874
|
-
missingToolResultCount: missingToolResultIds.length,
|
|
875
|
-
orphanToolResultCount: orphanToolResultIds.length,
|
|
876
|
-
syntheticNoResultCount,
|
|
877
|
-
missingToolResultIds,
|
|
878
|
-
orphanToolResultIds,
|
|
879
|
-
};
|
|
880
|
-
}
|
|
881
|
-
function collectAgentToolPairStats(messages) {
|
|
882
|
-
const callIds = new Set();
|
|
883
|
-
const resultIds = new Set();
|
|
884
|
-
let toolCallCount = 0;
|
|
885
|
-
let toolResultCount = 0;
|
|
886
|
-
let syntheticNoResultCount = 0;
|
|
887
|
-
for (const msg of messages) {
|
|
888
|
-
if (msg.role === 'assistant' && Array.isArray(msg.content)) {
|
|
889
|
-
for (const block of msg.content) {
|
|
890
|
-
if (block.type === 'toolCall' || block.type === 'toolUse') {
|
|
891
|
-
toolCallCount++;
|
|
892
|
-
if (typeof block.id === 'string' && block.id.length > 0)
|
|
893
|
-
callIds.add(block.id);
|
|
894
|
-
}
|
|
895
|
-
}
|
|
896
|
-
}
|
|
897
|
-
if (msg.role === 'toolResult') {
|
|
898
|
-
toolResultCount++;
|
|
899
|
-
const toolCallId = typeof msg.toolCallId === 'string' ? msg.toolCallId : '';
|
|
900
|
-
if (toolCallId)
|
|
901
|
-
resultIds.add(toolCallId);
|
|
902
|
-
if (extractTextFromInboundContent(msg.content).trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT) {
|
|
903
|
-
syntheticNoResultCount++;
|
|
904
|
-
}
|
|
905
|
-
}
|
|
906
|
-
}
|
|
907
|
-
const missingToolResultIds = [...callIds].filter(id => !resultIds.has(id));
|
|
908
|
-
const orphanToolResultIds = [...resultIds].filter(id => !callIds.has(id));
|
|
909
|
-
return {
|
|
910
|
-
toolCallCount,
|
|
911
|
-
toolResultCount,
|
|
912
|
-
missingToolResultCount: missingToolResultIds.length,
|
|
913
|
-
orphanToolResultCount: orphanToolResultIds.length,
|
|
914
|
-
syntheticNoResultCount,
|
|
915
|
-
missingToolResultIds,
|
|
916
|
-
orphanToolResultIds,
|
|
917
|
-
};
|
|
918
|
-
}
|
|
919
|
-
async function bumpToolPairMetrics(hm, agentId, sessionKey, delta, anomaly) {
|
|
920
|
-
const slot = 'toolPairMetrics';
|
|
921
|
-
let stored = {};
|
|
922
|
-
try {
|
|
923
|
-
const raw = await hm.cache.getSlot(agentId, sessionKey, slot);
|
|
924
|
-
if (raw)
|
|
925
|
-
stored = JSON.parse(raw);
|
|
926
|
-
}
|
|
927
|
-
catch {
|
|
928
|
-
stored = {};
|
|
929
|
-
}
|
|
930
|
-
const next = {
|
|
931
|
-
composeCount: (stored.composeCount ?? 0) + (delta.composeCount ?? 0),
|
|
932
|
-
syntheticNoResultIngested: (stored.syntheticNoResultIngested ?? 0) + (delta.syntheticNoResultIngested ?? 0),
|
|
933
|
-
preBridgeMissingToolResults: (stored.preBridgeMissingToolResults ?? 0) + (delta.preBridgeMissingToolResults ?? 0),
|
|
934
|
-
preBridgeOrphanToolResults: (stored.preBridgeOrphanToolResults ?? 0) + (delta.preBridgeOrphanToolResults ?? 0),
|
|
935
|
-
postBridgeMissingToolResults: (stored.postBridgeMissingToolResults ?? 0) + (delta.postBridgeMissingToolResults ?? 0),
|
|
936
|
-
postBridgeOrphanToolResults: (stored.postBridgeOrphanToolResults ?? 0) + (delta.postBridgeOrphanToolResults ?? 0),
|
|
937
|
-
lastUpdatedAt: new Date().toISOString(),
|
|
938
|
-
lastAnomaly: anomaly ?? stored.lastAnomaly,
|
|
939
|
-
};
|
|
940
|
-
await hm.cache.setSlot(agentId, sessionKey, slot, JSON.stringify(next));
|
|
941
|
-
}
|
|
942
|
-
/**
|
|
943
|
-
* Convert an OpenClaw AgentMessage to hypermem's NeutralMessage format.
|
|
944
|
-
*/
|
|
945
|
-
function toNeutralMessage(msg) {
|
|
946
|
-
// Extract text content from string or array format
|
|
947
|
-
let textContent = null;
|
|
948
|
-
if (typeof msg.content === 'string') {
|
|
949
|
-
textContent = msg.content;
|
|
950
|
-
}
|
|
951
|
-
else if (Array.isArray(msg.content)) {
|
|
952
|
-
const textParts = msg.content
|
|
953
|
-
.filter((c) => c.type === 'text' && typeof c.text === 'string')
|
|
954
|
-
.map(c => c.text);
|
|
955
|
-
textContent = textParts.length > 0 ? textParts.join('\n') : null;
|
|
956
|
-
}
|
|
957
|
-
// Detect tool calls/results.
|
|
958
|
-
// OpenClaw stores tool calls as content blocks: { type: 'toolCall' | 'toolUse', id, name, input }
|
|
959
|
-
// Legacy wire format stores them as a separate msg.tool_calls / msg.toolCalls array
|
|
960
|
-
// with OpenAI format: { id, type: 'function', function: { name, arguments } }
|
|
961
|
-
// Normalize everything to NeutralToolCall format: { id, name, arguments: string }
|
|
962
|
-
const contentBlockToolCalls = Array.isArray(msg.content)
|
|
963
|
-
? msg.content
|
|
964
|
-
.filter(c => c.type === 'toolCall' || c.type === 'toolUse')
|
|
965
|
-
.map(c => ({
|
|
966
|
-
id: c.id ?? 'unknown',
|
|
967
|
-
name: c.name ?? 'unknown',
|
|
968
|
-
arguments: typeof c.input === 'string' ? c.input : JSON.stringify(c.input ?? {}),
|
|
969
|
-
}))
|
|
970
|
-
: [];
|
|
971
|
-
// Legacy wire format tool calls (OpenAI style)
|
|
972
|
-
const rawToolCalls = msg.tool_calls
|
|
973
|
-
?? msg.toolCalls
|
|
974
|
-
?? null;
|
|
975
|
-
let toolCalls = null;
|
|
976
|
-
if (rawToolCalls && rawToolCalls.length > 0) {
|
|
977
|
-
toolCalls = rawToolCalls.map(tc => {
|
|
978
|
-
// OpenAI wire format: { id, type: 'function', function: { name, arguments } }
|
|
979
|
-
const fn = tc.function;
|
|
980
|
-
if (fn) {
|
|
981
|
-
return {
|
|
982
|
-
id: tc.id ?? 'unknown',
|
|
983
|
-
name: fn.name ?? 'unknown',
|
|
984
|
-
arguments: typeof fn.arguments === 'string' ? fn.arguments : JSON.stringify(fn.arguments ?? {}),
|
|
985
|
-
};
|
|
986
|
-
}
|
|
987
|
-
// Already NeutralToolCall-ish or content block format
|
|
988
|
-
return {
|
|
989
|
-
id: tc.id ?? 'unknown',
|
|
990
|
-
name: tc.name ?? 'unknown',
|
|
991
|
-
arguments: typeof tc.arguments === 'string' ? tc.arguments
|
|
992
|
-
: typeof tc.input === 'string' ? tc.input
|
|
993
|
-
: JSON.stringify(tc.arguments ?? tc.input ?? {}),
|
|
994
|
-
};
|
|
995
|
-
});
|
|
996
|
-
}
|
|
997
|
-
else if (contentBlockToolCalls.length > 0) {
|
|
998
|
-
toolCalls = contentBlockToolCalls;
|
|
999
|
-
}
|
|
1000
|
-
// OpenClaw uses role 'toolResult' (camelCase). Support all three spellings.
|
|
1001
|
-
const isToolResultMsg = msg.role === 'tool' || msg.role === 'tool_result' || msg.role === 'toolResult';
|
|
1002
|
-
// Tool results must stay on the result side of the transcript. If we persist them as
|
|
1003
|
-
// assistant rows with orphaned toolResults, later replay can retain a tool_result after
|
|
1004
|
-
// trimming away the matching assistant tool_use, which Anthropic rejects with a 400.
|
|
1005
|
-
let toolResults = null;
|
|
1006
|
-
if (isToolResultMsg && textContent) {
|
|
1007
|
-
const toolCallId = msg.tool_call_id ?? msg.toolCallId ?? 'unknown';
|
|
1008
|
-
const toolName = msg.name ?? msg.toolName ?? 'tool';
|
|
1009
|
-
toolResults = [{ callId: toolCallId, name: toolName, content: textContent }];
|
|
1010
|
-
textContent = null; // owned by toolResults now, not duplicated in textContent
|
|
1011
|
-
}
|
|
1012
|
-
const role = isToolResultMsg
|
|
1013
|
-
? 'user'
|
|
1014
|
-
: msg.role;
|
|
1015
|
-
return {
|
|
1016
|
-
role,
|
|
1017
|
-
textContent,
|
|
1018
|
-
toolCalls: isToolResultMsg ? null : toolCalls,
|
|
1019
|
-
toolResults,
|
|
1020
|
-
};
|
|
1021
|
-
}
|
|
1022
|
-
// ─── Context Engine Implementation ─────────────────────────────
|
|
1023
|
-
/**
|
|
1024
|
-
* In-flight warm dedup map.
|
|
1025
|
-
* Key: "agentId::sessionKey" — Value: the in-progress warm() Promise.
|
|
1026
|
-
* Prevents concurrent bootstrap() calls from firing multiple full warms
|
|
1027
|
-
* for the same session key before the first one sets the Redis history key.
|
|
1028
|
-
* Cleared on completion (success or failure) so the next cold start retries.
|
|
1029
|
-
*/
|
|
1030
|
-
const _warmInFlight = new Map();
|
|
1031
|
-
// ─── Token estimation ──────────────────────────────────────────
|
|
1032
|
-
/**
|
|
1033
|
-
* Estimate tokens for a string using the same ~4 chars/token heuristic
|
|
1034
|
-
* used by the hypermem compositor. Fast and allocation-free — no tokenizer
|
|
1035
|
-
* library needed for a budget guard.
|
|
1036
|
-
*/
|
|
1037
|
-
function estimateTokens(text) {
|
|
1038
|
-
if (!text)
|
|
1039
|
-
return 0;
|
|
1040
|
-
return Math.ceil(text.length / 4);
|
|
1041
|
-
}
|
|
1042
|
-
function estimateMessagePartTokens(part) {
|
|
1043
|
-
if (part.type === 'image' || part.type === 'image_url') {
|
|
1044
|
-
const src = part.source?.data;
|
|
1045
|
-
const url = part.image_url?.url;
|
|
1046
|
-
const dataStr = typeof src === 'string' ? src : (typeof url === 'string' ? url : '');
|
|
1047
|
-
return Math.ceil(dataStr.length / 3);
|
|
1048
|
-
}
|
|
1049
|
-
if (part.type === 'toolCall' || part.type === 'tool_use') {
|
|
1050
|
-
return Math.ceil(JSON.stringify(part).length / 2);
|
|
1051
|
-
}
|
|
1052
|
-
const textVal = typeof part.text === 'string' ? part.text
|
|
1053
|
-
: typeof part.content === 'string' ? part.content
|
|
1054
|
-
: part.content != null ? JSON.stringify(part.content) : null;
|
|
1055
|
-
return estimateTokens(textVal);
|
|
1056
|
-
}
|
|
1057
|
-
function estimateMessageTokens(msg) {
|
|
1058
|
-
let total = estimateTokens(typeof msg.textContent === 'string' ? msg.textContent : null);
|
|
1059
|
-
if (typeof msg.content === 'string' && typeof msg.textContent !== 'string') {
|
|
1060
|
-
total += estimateTokens(msg.content);
|
|
1061
|
-
}
|
|
1062
|
-
if (msg.toolCalls)
|
|
1063
|
-
total += Math.ceil(JSON.stringify(msg.toolCalls).length / 2);
|
|
1064
|
-
if (msg.toolResults)
|
|
1065
|
-
total += Math.ceil(JSON.stringify(msg.toolResults).length / 2);
|
|
1066
|
-
if (Array.isArray(msg.content)) {
|
|
1067
|
-
total += msg.content.reduce((sum, part) => sum + estimateMessagePartTokens(part), 0);
|
|
1068
|
-
}
|
|
1069
|
-
return total;
|
|
1070
|
-
}
|
|
1071
|
-
function estimateMessageArrayTokens(messages) {
|
|
1072
|
-
return messages.reduce((sum, msg) => sum + estimateMessageTokens(msg), 0);
|
|
1073
|
-
}
|
|
1074
|
-
function maybeLogPressureAccountingAnomaly(fields) {
|
|
1075
|
-
const threshold = Math.max(500, Math.floor(fields.budget * 0.05));
|
|
1076
|
-
const deltas = {
|
|
1077
|
-
runtimeVsComposed: Math.abs(fields.runtimeTokens - fields.composedTokens),
|
|
1078
|
-
redisVsComposed: Math.abs(fields.redisTokens - fields.composedTokens),
|
|
1079
|
-
runtimeVsRedis: Math.abs(fields.runtimeTokens - fields.redisTokens),
|
|
1080
|
-
};
|
|
1081
|
-
// Post-0.6.0: "redis" is actually the L1 SQLite cache window, which lags
|
|
1082
|
-
// behind the runtime message array between trim passes. Cache-vs-runtime
|
|
1083
|
-
// drift is structural and harmless — the runtime array is authoritative
|
|
1084
|
-
// (it's what the model sees). Only warn when runtimeVsComposed diverges,
|
|
1085
|
-
// which indicates an actual trim accounting bug.
|
|
1086
|
-
if (deltas.runtimeVsComposed < threshold) {
|
|
1087
|
-
// Log cache drift at debug level for observability, not as a warning.
|
|
1088
|
-
if (deltas.redisVsComposed >= threshold || deltas.runtimeVsRedis >= threshold) {
|
|
1089
|
-
console.debug(`[hypermem-plugin] cache-drift (non-anomalous): path=${fields.path} ` +
|
|
1090
|
-
`runtime=${fields.runtimeTokens} cache=${fields.redisTokens} composed=${fields.composedTokens} ` +
|
|
1091
|
-
`budget=${fields.budget}`);
|
|
1092
|
-
}
|
|
1093
|
-
return;
|
|
1094
|
-
}
|
|
1095
|
-
console.warn(`[hypermem-plugin] pressure-accounting anomaly: path=${fields.path} ` +
|
|
1096
|
-
`runtime=${fields.runtimeTokens} cache=${fields.redisTokens} composed=${fields.composedTokens} ` +
|
|
1097
|
-
`budget=${fields.budget} threshold=${threshold}`);
|
|
1098
|
-
guardTelemetry({
|
|
1099
|
-
path: fields.path,
|
|
1100
|
-
agentId: fields.agentId,
|
|
1101
|
-
sessionKey: fields.sessionKey,
|
|
1102
|
-
reason: 'pressure-accounting-anomaly',
|
|
1103
|
-
});
|
|
1104
|
-
}
|
|
1105
|
-
function normalizeReplayRecoveryState(value) {
|
|
1106
|
-
if (value == null)
|
|
1107
|
-
return null;
|
|
1108
|
-
if (value === '')
|
|
1109
|
-
return '';
|
|
1110
|
-
return isReplayState(value) ? value : null;
|
|
1111
|
-
}
|
|
1112
|
-
async function persistReplayRecoveryState(hm, agentId, sessionKey, nextState) {
|
|
1113
|
-
try {
|
|
1114
|
-
await hm.cache.setSlot(agentId, sessionKey, 'replayRecoveryState', nextState ?? '');
|
|
1115
|
-
}
|
|
1116
|
-
catch {
|
|
1117
|
-
// Non-fatal
|
|
1118
|
-
}
|
|
1119
|
-
}
|
|
1120
|
-
function hasStructuredToolCallMessage(msg) {
|
|
1121
|
-
if (Array.isArray(msg.toolCalls) && msg.toolCalls.length > 0)
|
|
1122
|
-
return true;
|
|
1123
|
-
if (!Array.isArray(msg.content))
|
|
1124
|
-
return false;
|
|
1125
|
-
return msg.content.some(part => part.type === 'toolCall' || part.type === 'tool_use');
|
|
1126
|
-
}
|
|
1127
|
-
function hasStructuredToolResultMessage(msg) {
|
|
1128
|
-
if (Array.isArray(msg.toolResults) && msg.toolResults.length > 0)
|
|
1129
|
-
return true;
|
|
1130
|
-
if (msg.role === 'toolResult' || msg.role === 'tool' || msg.role === 'tool_result')
|
|
1131
|
-
return true;
|
|
1132
|
-
if (!Array.isArray(msg.content))
|
|
1133
|
-
return false;
|
|
1134
|
-
return msg.content.some(part => part.type === 'tool_result' || part.type === 'toolResult');
|
|
1135
|
-
}
|
|
1136
|
-
function getToolCallIds(msg) {
|
|
1137
|
-
const ids = [];
|
|
1138
|
-
if (Array.isArray(msg.toolCalls)) {
|
|
1139
|
-
ids.push(...msg.toolCalls.map(tc => tc.id).filter((id) => typeof id === 'string' && id.length > 0));
|
|
1140
|
-
}
|
|
1141
|
-
if (Array.isArray(msg.content)) {
|
|
1142
|
-
for (const part of msg.content) {
|
|
1143
|
-
if ((part.type === 'toolCall' || part.type === 'tool_use') && typeof part.id === 'string' && part.id.length > 0) {
|
|
1144
|
-
ids.push(part.id);
|
|
1145
|
-
}
|
|
1146
|
-
}
|
|
1147
|
-
}
|
|
1148
|
-
return ids;
|
|
1149
|
-
}
|
|
1150
|
-
function getToolResultIds(msg) {
|
|
1151
|
-
const ids = [];
|
|
1152
|
-
if (Array.isArray(msg.toolResults)) {
|
|
1153
|
-
ids.push(...msg.toolResults.map(tr => tr.callId).filter((id) => typeof id === 'string' && id.length > 0));
|
|
1154
|
-
}
|
|
1155
|
-
if (typeof msg.toolCallId === 'string' && msg.toolCallId.length > 0) {
|
|
1156
|
-
ids.push(msg.toolCallId);
|
|
1157
|
-
}
|
|
1158
|
-
if (typeof msg.tool_call_id === 'string' && msg.tool_call_id.length > 0) {
|
|
1159
|
-
ids.push(msg.tool_call_id);
|
|
1160
|
-
}
|
|
1161
|
-
return ids;
|
|
1162
|
-
}
|
|
1163
|
-
function clusterTranscriptMessages(messages) {
|
|
1164
|
-
const clusters = [];
|
|
1165
|
-
for (let i = 0; i < messages.length; i++) {
|
|
1166
|
-
const current = messages[i];
|
|
1167
|
-
const cluster = [current];
|
|
1168
|
-
if (hasStructuredToolCallMessage(current)) {
|
|
1169
|
-
const callIds = new Set(getToolCallIds(current));
|
|
1170
|
-
let j = i + 1;
|
|
1171
|
-
while (j < messages.length) {
|
|
1172
|
-
const candidate = messages[j];
|
|
1173
|
-
if (!hasStructuredToolResultMessage(candidate))
|
|
1174
|
-
break;
|
|
1175
|
-
const resultIds = getToolResultIds(candidate);
|
|
1176
|
-
if (callIds.size > 0 && resultIds.length > 0 && !resultIds.some(id => callIds.has(id)))
|
|
1177
|
-
break;
|
|
1178
|
-
cluster.push(candidate);
|
|
1179
|
-
j++;
|
|
1180
|
-
}
|
|
1181
|
-
i = j - 1;
|
|
1182
|
-
}
|
|
1183
|
-
else if (hasStructuredToolResultMessage(current)) {
|
|
1184
|
-
let j = i + 1;
|
|
1185
|
-
while (j < messages.length) {
|
|
1186
|
-
const candidate = messages[j];
|
|
1187
|
-
if (!hasStructuredToolResultMessage(candidate) || hasStructuredToolCallMessage(candidate))
|
|
1188
|
-
break;
|
|
1189
|
-
cluster.push(candidate);
|
|
1190
|
-
j++;
|
|
1191
|
-
}
|
|
1192
|
-
i = j - 1;
|
|
1193
|
-
}
|
|
1194
|
-
clusters.push(cluster);
|
|
1195
|
-
}
|
|
1196
|
-
return clusters;
|
|
1197
|
-
}
|
|
1198
|
-
/**
|
|
1199
|
-
* Estimate total token cost of the current Redis history window for a session.
|
|
1200
|
-
* Counts text content + tool call/result JSON for each message.
|
|
1201
|
-
*/
|
|
1202
|
-
async function estimateWindowTokens(hm, agentId, sessionKey) {
|
|
1203
|
-
try {
|
|
1204
|
-
// Prefer the hot window cache (set after compaction trims the history).
|
|
1205
|
-
// Fall back to the actual history list — the window cache is only populated
|
|
1206
|
-
// after compact() calls setWindow(), so a fresh or never-compacted session
|
|
1207
|
-
// has no window cache entry. Without this fallback, getWindow returns null
|
|
1208
|
-
// → estimateWindowTokens returns 0 → compact() always says within_budget
|
|
1209
|
-
// → overflow loop.
|
|
1210
|
-
const window = await hm.cache.getWindow(agentId, sessionKey)
|
|
1211
|
-
?? await hm.cache.getHistory(agentId, sessionKey);
|
|
1212
|
-
if (!window || window.length === 0)
|
|
1213
|
-
return 0;
|
|
1214
|
-
return estimateMessageArrayTokens(window);
|
|
1215
|
-
}
|
|
1216
|
-
catch {
|
|
1217
|
-
return 0;
|
|
1218
|
-
}
|
|
1219
|
-
}
|
|
1220
|
-
/**
|
|
1221
|
-
* Truncate a JSONL session file to keep only the last `targetDepth` message
|
|
1222
|
-
* entries plus all non-message entries (header, compaction, model_change, etc).
|
|
1223
|
-
*
|
|
1224
|
-
* This is needed because the runtime loads messages from the JSONL file
|
|
1225
|
-
* (not from Redis) to build its overflow estimate. When ownsCompaction=true,
|
|
1226
|
-
* OpenClaw's truncateSessionAfterCompaction() is never called, so we do it
|
|
1227
|
-
* ourselves.
|
|
1228
|
-
*
|
|
1229
|
-
* Returns true if the file was actually truncated, false if no action was
|
|
1230
|
-
* needed or the file didn't exist.
|
|
1231
|
-
*/
|
|
1232
|
-
async function truncateJsonlIfNeeded(sessionFile, targetDepth, force = false, tokenBudgetOverride) {
|
|
1233
|
-
if (!sessionFile || typeof sessionFile !== 'string')
|
|
1234
|
-
return false;
|
|
1235
|
-
try {
|
|
1236
|
-
const raw = await fs.readFile(sessionFile, 'utf-8');
|
|
1237
|
-
const lines = raw.split('\n').filter(l => l.trim());
|
|
1238
|
-
if (lines.length === 0)
|
|
1239
|
-
return false;
|
|
1240
|
-
const header = lines[0];
|
|
1241
|
-
const entries = [];
|
|
1242
|
-
for (let i = 1; i < lines.length; i++) {
|
|
1243
|
-
try {
|
|
1244
|
-
entries.push({ line: lines[i], parsed: JSON.parse(lines[i]) });
|
|
1245
|
-
}
|
|
1246
|
-
catch {
|
|
1247
|
-
entries.push({ line: lines[i], parsed: null });
|
|
1248
|
-
}
|
|
1249
|
-
// Yield every 100 entries to avoid blocking the event loop
|
|
1250
|
-
if (i % 100 === 0)
|
|
1251
|
-
await new Promise(r => setImmediate(r));
|
|
1252
|
-
}
|
|
1253
|
-
const messageEntries = [];
|
|
1254
|
-
const metadataEntries = [];
|
|
1255
|
-
for (const e of entries) {
|
|
1256
|
-
if (e.parsed?.type === 'message') {
|
|
1257
|
-
messageEntries.push(e);
|
|
1258
|
-
}
|
|
1259
|
-
else {
|
|
1260
|
-
metadataEntries.push(e);
|
|
1261
|
-
}
|
|
1262
|
-
}
|
|
1263
|
-
// Only rewrite if meaningfully over target — unless force=true (over-budget path)
|
|
1264
|
-
if (!force && messageEntries.length <= targetDepth * 1.5)
|
|
1265
|
-
return false;
|
|
1266
|
-
// If a token budget is specified, keep newest messages within that budget
|
|
1267
|
-
let keptMessages;
|
|
1268
|
-
if (tokenBudgetOverride) {
|
|
1269
|
-
let tokenCount = 0;
|
|
1270
|
-
const kept = [];
|
|
1271
|
-
for (let i = messageEntries.length - 1; i >= 0 && kept.length < targetDepth; i--) {
|
|
1272
|
-
const m = messageEntries[i].parsed?.message ?? messageEntries[i].parsed;
|
|
1273
|
-
let t = 0;
|
|
1274
|
-
if (m?.content)
|
|
1275
|
-
t += Math.ceil(JSON.stringify(m.content).length / 4);
|
|
1276
|
-
if (m?.textContent)
|
|
1277
|
-
t += Math.ceil(String(m.textContent).length / 4);
|
|
1278
|
-
if (m?.toolResults)
|
|
1279
|
-
t += Math.ceil(JSON.stringify(m.toolResults).length / 4);
|
|
1280
|
-
if (m?.toolCalls)
|
|
1281
|
-
t += Math.ceil(JSON.stringify(m.toolCalls).length / 4);
|
|
1282
|
-
if (tokenCount + t > tokenBudgetOverride && kept.length > 0)
|
|
1283
|
-
break;
|
|
1284
|
-
kept.unshift(messageEntries[i]);
|
|
1285
|
-
tokenCount += t;
|
|
1286
|
-
}
|
|
1287
|
-
keptMessages = kept;
|
|
1288
|
-
}
|
|
1289
|
-
else {
|
|
1290
|
-
keptMessages = messageEntries.slice(-targetDepth);
|
|
1291
|
-
}
|
|
1292
|
-
const keptSet = new Set(keptMessages.map(e => e.line));
|
|
1293
|
-
const metaSet = new Set(metadataEntries.map(e => e.line));
|
|
1294
|
-
const rebuilt = [header];
|
|
1295
|
-
for (const e of entries) {
|
|
1296
|
-
if (metaSet.has(e.line) || keptSet.has(e.line)) {
|
|
1297
|
-
rebuilt.push(e.line);
|
|
1298
|
-
}
|
|
1299
|
-
}
|
|
1300
|
-
const tmpPath = `${sessionFile}.hm-compact-${process.pid}-${Date.now()}.tmp`;
|
|
1301
|
-
await fs.writeFile(tmpPath, rebuilt.join('\n') + '\n', 'utf-8');
|
|
1302
|
-
await fs.rename(tmpPath, sessionFile);
|
|
1303
|
-
console.log(`[hypermem-plugin] truncateJsonl: ${entries.length} → ${rebuilt.length - 1} entries ` +
|
|
1304
|
-
`(kept ${keptMessages.length} messages + ${metadataEntries.length} metadata, file=${sessionFile.split('/').pop()})`);
|
|
1305
|
-
return true;
|
|
1306
|
-
}
|
|
1307
|
-
catch (err) {
|
|
1308
|
-
// ENOENT is expected when session file doesn't exist yet — not worth logging
|
|
1309
|
-
if (err.code !== 'ENOENT') {
|
|
1310
|
-
console.warn('[hypermem-plugin] truncateJsonl failed (non-fatal):', err.message);
|
|
1311
|
-
}
|
|
1312
|
-
return false;
|
|
1313
|
-
}
|
|
1314
|
-
}
|
|
1315
|
-
function createHyperMemEngine() {
|
|
1316
|
-
return {
|
|
1317
|
-
info: {
|
|
1318
|
-
id: 'hypercompositor',
|
|
1319
|
-
name: 'hypermem context engine',
|
|
1320
|
-
version: '0.6.3',
|
|
1321
|
-
// We own compaction — assemble() trims to budget via the compositor safety
|
|
1322
|
-
// valve, so runtime compaction is never needed. compact() handles any
|
|
1323
|
-
// explicit calls by trimming the Redis history window directly.
|
|
1324
|
-
ownsCompaction: true,
|
|
1325
|
-
},
|
|
1326
|
-
/**
|
|
1327
|
-
* Bootstrap: warm Redis session for this agent, register in fleet if needed.
|
|
1328
|
-
*
|
|
1329
|
-
* Idempotent — skips warming if the session is already hot in Redis.
|
|
1330
|
-
* Without this guard, the OpenClaw runtime calls bootstrap() on every turn
|
|
1331
|
-
* (not just session start), causing:
|
|
1332
|
-
* 1. A SQLite read + Redis pipeline push on every message (lane lock)
|
|
1333
|
-
* 2. 250 messages re-pushed to Redis per turn (dedup in pushHistory helps,
|
|
1334
|
-
* but the read cost still runs)
|
|
1335
|
-
* 3. Followup queue drain blocked until warm completes
|
|
1336
|
-
*
|
|
1337
|
-
* With this guard: cold start = full warm; hot session = single EXISTS check.
|
|
1338
|
-
*/
|
|
1339
|
-
async bootstrap({ sessionId, sessionKey }) {
|
|
1340
|
-
try {
|
|
1341
|
-
const hm = await getHyperMem();
|
|
1342
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
1343
|
-
const agentId = extractAgentId(sk);
|
|
1344
|
-
// EC1 JSONL truncation moved to maintain() — bootstrap stays fast.
|
|
1345
|
-
// B2: Session-restart detection — rotateSessionContext hook.
|
|
1346
|
-
// When the runtime starts a new session (new sessionId) for an existing
|
|
1347
|
-
// sessionKey, archive the old context head and create a fresh active
|
|
1348
|
-
// context so the new conversation starts clean. This prevents the new
|
|
1349
|
-
// session from inheriting a stale context head pointer from the prior run.
|
|
1350
|
-
//
|
|
1351
|
-
// Detection: if a conversation row exists for this sessionKey AND the
|
|
1352
|
-
// stored session_id differs from the incoming sessionId (runtime-assigned),
|
|
1353
|
-
// treat this as a session restart.
|
|
1354
|
-
//
|
|
1355
|
-
// Non-fatal: context rotation is best-effort and never blocks bootstrap.
|
|
1356
|
-
if (sessionId) {
|
|
1357
|
-
try {
|
|
1358
|
-
const _msgDb = hm.dbManager.getMessageDb(agentId);
|
|
1359
|
-
if (_msgDb) {
|
|
1360
|
-
const _existingConv = _msgDb.prepare('SELECT id, session_id FROM conversations WHERE session_key = ? LIMIT 1').get(sk);
|
|
1361
|
-
if (_existingConv &&
|
|
1362
|
-
_existingConv.session_id !== null &&
|
|
1363
|
-
_existingConv.session_id !== sessionId) {
|
|
1364
|
-
// Distinct sessionId — this is a session restart for an existing sessionKey.
|
|
1365
|
-
rotateSessionContext(_msgDb, agentId, sk, _existingConv.id);
|
|
1366
|
-
// Update the stored session_id to the new one.
|
|
1367
|
-
try {
|
|
1368
|
-
_msgDb.prepare('UPDATE conversations SET session_id = ? WHERE id = ?')
|
|
1369
|
-
.run(sessionId, _existingConv.id);
|
|
1370
|
-
}
|
|
1371
|
-
catch {
|
|
1372
|
-
// Best-effort — column may not exist in older schemas
|
|
1373
|
-
}
|
|
1374
|
-
console.log(`[hypermem-plugin] bootstrap: session restart detected for ${agentId}/${sk} ` +
|
|
1375
|
-
`(prev session_id=${_existingConv.session_id}, new=${sessionId}) — context rotated`);
|
|
1376
|
-
}
|
|
1377
|
-
else if (_existingConv && _existingConv.session_id === null && sessionId) {
|
|
1378
|
-
// Conversation exists but session_id was never recorded — stamp it now.
|
|
1379
|
-
try {
|
|
1380
|
-
_msgDb.prepare('UPDATE conversations SET session_id = ? WHERE id = ?')
|
|
1381
|
-
.run(sessionId, _existingConv.id);
|
|
1382
|
-
}
|
|
1383
|
-
catch {
|
|
1384
|
-
// Best-effort
|
|
1385
|
-
}
|
|
1386
|
-
}
|
|
1387
|
-
}
|
|
1388
|
-
}
|
|
1389
|
-
catch (rotateErr) {
|
|
1390
|
-
// Non-fatal — never block bootstrap on context rotation
|
|
1391
|
-
console.warn('[hypermem-plugin] bootstrap: rotateSessionContext failed (non-fatal):', rotateErr.message);
|
|
1392
|
-
}
|
|
1393
|
-
}
|
|
1394
|
-
// Fast path: if session already has history in Redis, skip warm entirely.
|
|
1395
|
-
// sessionExists() is a single EXISTS call — sub-millisecond cost.
|
|
1396
|
-
const alreadyWarm = await hm.cache.sessionExists(agentId, sk);
|
|
1397
|
-
if (alreadyWarm) {
|
|
1398
|
-
return { bootstrapped: true };
|
|
1399
|
-
}
|
|
1400
|
-
// In-flight dedup: if a warm is already running for this session key,
|
|
1401
|
-
// reuse that promise instead of launching a second concurrent warm.
|
|
1402
|
-
const inflightKey = `${agentId}::${sk}`;
|
|
1403
|
-
const existing = _warmInFlight.get(inflightKey);
|
|
1404
|
-
if (existing) {
|
|
1405
|
-
await existing;
|
|
1406
|
-
return { bootstrapped: true };
|
|
1407
|
-
}
|
|
1408
|
-
// Cold start: warm Redis with the session — pre-loads history + slots
|
|
1409
|
-
// CRIT-002: Load supplemental identity files (MOTIVATIONS.md, STYLE.md) that are
|
|
1410
|
-
// NOT already injected by OpenClaw's contextInjection into the system prompt.
|
|
1411
|
-
// SOUL.md and IDENTITY.md are filtered out here because OpenClaw injects them
|
|
1412
|
-
// via workspace bootstrap — re-injecting them via the identity slot would cause
|
|
1413
|
-
// duplication. Only agent-specific extras (MOTIVATIONS.md, STYLE.md) are included.
|
|
1414
|
-
// Non-fatal: missing files are silently skipped.
|
|
1415
|
-
let identityBlock;
|
|
1416
|
-
try {
|
|
1417
|
-
// Council agents live at workspace/<agentId>/
|
|
1418
|
-
// Other agents at workspace/<agentId>/ — try council path first
|
|
1419
|
-
const homedir = os.homedir();
|
|
1420
|
-
const councilPath = path.join(homedir, '.openclaw', 'workspace', agentId);
|
|
1421
|
-
const workspacePath = path.join(homedir, '.openclaw', 'workspace', agentId);
|
|
1422
|
-
let wsPath = councilPath;
|
|
1423
|
-
try {
|
|
1424
|
-
await fs.access(councilPath);
|
|
1425
|
-
}
|
|
1426
|
-
catch {
|
|
1427
|
-
wsPath = workspacePath;
|
|
1428
|
-
}
|
|
1429
|
-
const identityFiles = ['SOUL.md', 'IDENTITY.md', 'MOTIVATIONS.md', 'STYLE.md']
|
|
1430
|
-
.filter(f => !OPENCLAW_BOOTSTRAP_FILES.has(f));
|
|
1431
|
-
const parts = [];
|
|
1432
|
-
for (const fname of identityFiles) {
|
|
1433
|
-
try {
|
|
1434
|
-
const content = await fs.readFile(path.join(wsPath, fname), 'utf-8');
|
|
1435
|
-
if (content.trim())
|
|
1436
|
-
parts.push(content.trim());
|
|
1437
|
-
}
|
|
1438
|
-
catch {
|
|
1439
|
-
// File absent — skip silently
|
|
1440
|
-
}
|
|
1441
|
-
}
|
|
1442
|
-
if (parts.length > 0)
|
|
1443
|
-
identityBlock = parts.join('\n\n');
|
|
1444
|
-
}
|
|
1445
|
-
catch {
|
|
1446
|
-
// Identity load is best-effort — never block bootstrap on this
|
|
1447
|
-
}
|
|
1448
|
-
// Capture wsPath for post-warm seeding (declared in the identity block above)
|
|
1449
|
-
let _wsPathForSeed;
|
|
1450
|
-
try {
|
|
1451
|
-
const homedir2 = os.homedir();
|
|
1452
|
-
const councilPath2 = path.join(homedir2, '.openclaw', 'workspace', agentId);
|
|
1453
|
-
const workspacePath2 = path.join(homedir2, '.openclaw', 'workspace', agentId);
|
|
1454
|
-
try {
|
|
1455
|
-
await fs.access(councilPath2);
|
|
1456
|
-
_wsPathForSeed = councilPath2;
|
|
1457
|
-
}
|
|
1458
|
-
catch {
|
|
1459
|
-
_wsPathForSeed = workspacePath2;
|
|
1460
|
-
}
|
|
1461
|
-
}
|
|
1462
|
-
catch { /* non-fatal */ }
|
|
1463
|
-
const warmPromise = hm.warm(agentId, sk, identityBlock ? { identity: identityBlock } : undefined).finally(() => {
|
|
1464
|
-
_warmInFlight.delete(inflightKey);
|
|
1465
|
-
});
|
|
1466
|
-
_warmInFlight.set(inflightKey, warmPromise);
|
|
1467
|
-
await warmPromise;
|
|
1468
|
-
// ACA doc seeding — fire-and-forget after warm.
|
|
1469
|
-
// Idempotent: WorkspaceSeeder skips files whose hash hasn't changed.
|
|
1470
|
-
// Seeds SOUL.md, TOOLS.md, AGENTS.md, POLICY.md etc. into library.db
|
|
1471
|
-
// doc_chunks so trigger-based retrieval can serve them at compose time.
|
|
1472
|
-
if (_wsPathForSeed) {
|
|
1473
|
-
const wsPathForSeed = _wsPathForSeed;
|
|
1474
|
-
hm.seedWorkspace(wsPathForSeed, { agentId }).then(seedResult => {
|
|
1475
|
-
if (seedResult.totalInserted > 0 || seedResult.reindexed > 0) {
|
|
1476
|
-
console.log(`[hypermem-plugin] bootstrap: seeded workspace docs for ${agentId} ` +
|
|
1477
|
-
`(+${seedResult.totalInserted} chunks, ${seedResult.reindexed} reindexed, ` +
|
|
1478
|
-
`${seedResult.skipped} unchanged, ${seedResult.errors.length} errors)`);
|
|
1479
|
-
}
|
|
1480
|
-
}).catch(err => {
|
|
1481
|
-
console.warn('[hypermem-plugin] bootstrap: workspace seeding failed (non-fatal):', err.message);
|
|
1482
|
-
});
|
|
1483
|
-
}
|
|
1484
|
-
// Post-warm pressure check: if messages.db had accumulated history,
|
|
1485
|
-
// warm() may have loaded the session straight to 80%+. Pre-trim now
|
|
1486
|
-
// so the first turn has headroom instead of starting saturated.
|
|
1487
|
-
// This is the "restart at 98%" failure mode reported by Eve 2026-04-05:
|
|
1488
|
-
// JSONL truncation + Redis flush isn't enough if messages.db is still full
|
|
1489
|
-
// and warm() reloads it. Trim here closes the loop.
|
|
1490
|
-
try {
|
|
1491
|
-
const postWarmTokens = await estimateWindowTokens(hm, agentId, sk);
|
|
1492
|
-
// Use a conservative 90k default; if the session is genuinely large,
|
|
1493
|
-
// we'll underestimate budget and trim more aggressively — that's fine.
|
|
1494
|
-
const warmBudget = 90_000;
|
|
1495
|
-
const warmPressure = postWarmTokens / warmBudget;
|
|
1496
|
-
if (warmPressure > 0.80) {
|
|
1497
|
-
// Sprint 2.2a: demote warmstart to guard telemetry.
|
|
1498
|
-
//
|
|
1499
|
-
// Previously this path performed a real trim + invalidateWindow
|
|
1500
|
-
// and emitted `event:'trim'` with path='warmstart'. Assemble
|
|
1501
|
-
// (tool-loop + normal/subagent) is the steady-state owner now,
|
|
1502
|
-
// so the first turn's assemble.* trim absorbs any remaining
|
|
1503
|
-
// post-warm pressure. Keeping the pressure check + threshold
|
|
1504
|
-
// branch here preserves observability via `event:'trim-guard'`
|
|
1505
|
-
// without mutating Redis history or the window cache.
|
|
1506
|
-
guardTelemetry({
|
|
1507
|
-
path: 'warmstart',
|
|
1508
|
-
agentId, sessionKey: sk,
|
|
1509
|
-
reason: 'warmstart-pressure-demoted',
|
|
1510
|
-
});
|
|
1511
|
-
}
|
|
1512
|
-
}
|
|
1513
|
-
catch {
|
|
1514
|
-
// Non-fatal — first turn's tool-loop trim is the fallback
|
|
1515
|
-
}
|
|
1516
|
-
return { bootstrapped: true };
|
|
1517
|
-
}
|
|
1518
|
-
catch (err) {
|
|
1519
|
-
// Bootstrap failure is non-fatal — log and continue
|
|
1520
|
-
console.warn('[hypermem-plugin] bootstrap failed:', err.message);
|
|
1521
|
-
return { bootstrapped: false, reason: err.message };
|
|
1522
|
-
}
|
|
1523
|
-
},
|
|
1524
|
-
/**
|
|
1525
|
-
* Transcript maintenance — runs after bootstrap, successful turns, or compaction.
|
|
1526
|
-
*
|
|
1527
|
-
* Moved from bootstrap: proactive JSONL truncation is forward-looking (helps
|
|
1528
|
-
* next restart, not current session), so it belongs in maintenance, not init.
|
|
1529
|
-
* Also runs tool pair repair on Redis history to fix orphaned pairs from
|
|
1530
|
-
* trim/compaction passes.
|
|
1531
|
-
*/
|
|
1532
|
-
async maintain({ sessionId, sessionKey, sessionFile }) {
|
|
1533
|
-
let changed = false;
|
|
1534
|
-
let bytesFreed = 0;
|
|
1535
|
-
let rewrittenEntries = 0;
|
|
1536
|
-
try {
|
|
1537
|
-
const hm = await getHyperMem();
|
|
1538
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
1539
|
-
const agentId = extractAgentId(sk);
|
|
1540
|
-
// 1. Proactive JSONL truncation (EC1 guard — next restart loads clean)
|
|
1541
|
-
try {
|
|
1542
|
-
const EC1_MAX_MESSAGES = 60;
|
|
1543
|
-
const EC1_TOKEN_BUDGET = Math.floor(128_000 * 0.40);
|
|
1544
|
-
const truncated = await truncateJsonlIfNeeded(sessionFile, EC1_MAX_MESSAGES, false, EC1_TOKEN_BUDGET);
|
|
1545
|
-
if (truncated) {
|
|
1546
|
-
console.log(`[hypermem-plugin] maintain: proactive JSONL trim for ${agentId} ` +
|
|
1547
|
-
`(EC1 guard — next restart will load clean)`);
|
|
1548
|
-
changed = true;
|
|
1549
|
-
}
|
|
1550
|
-
}
|
|
1551
|
-
catch {
|
|
1552
|
-
// Non-fatal — JSONL truncation is best-effort
|
|
1553
|
-
}
|
|
1554
|
-
// 2. Redis history tool pair repair
|
|
1555
|
-
// Compaction and trim passes can orphan tool_call/tool_result pairs.
|
|
1556
|
-
// Anthropic and Gemini reject orphaned pairs with 400 errors.
|
|
1557
|
-
try {
|
|
1558
|
-
const history = await hm.cache.getHistory(agentId, sk);
|
|
1559
|
-
if (history && history.length > 0) {
|
|
1560
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
1561
|
-
const repairedHistory = repairToolPairs(history);
|
|
1562
|
-
const removedCount = history.length - repairedHistory.length;
|
|
1563
|
-
if (removedCount > 0) {
|
|
1564
|
-
await hm.cache.replaceHistory(agentId, sk, repairedHistory);
|
|
1565
|
-
await hm.cache.invalidateWindow(agentId, sk);
|
|
1566
|
-
console.log(`[hypermem-plugin] maintain: repaired tool pairs in Redis history ` +
|
|
1567
|
-
`for ${agentId} (removed ${removedCount} orphaned messages)`);
|
|
1568
|
-
changed = true;
|
|
1569
|
-
rewrittenEntries += removedCount;
|
|
1570
|
-
// Rough estimate: ~500 bytes per removed message
|
|
1571
|
-
bytesFreed += removedCount * 500;
|
|
1572
|
-
}
|
|
1573
|
-
}
|
|
1574
|
-
}
|
|
1575
|
-
catch {
|
|
1576
|
-
// Non-fatal
|
|
1577
|
-
}
|
|
1578
|
-
return { changed, bytesFreed, rewrittenEntries };
|
|
1579
|
-
}
|
|
1580
|
-
catch (err) {
|
|
1581
|
-
console.warn('[hypermem-plugin] maintain failed:', err.message);
|
|
1582
|
-
return { changed, bytesFreed, rewrittenEntries, reason: err.message };
|
|
1583
|
-
}
|
|
1584
|
-
},
|
|
1585
|
-
/**
|
|
1586
|
-
* Ingest a single message into hypermem's message store.
|
|
1587
|
-
* Skip heartbeats — they're noise in the memory store.
|
|
1588
|
-
*/
|
|
1589
|
-
async ingest({ sessionId, sessionKey, message, isHeartbeat }) {
|
|
1590
|
-
if (isHeartbeat) {
|
|
1591
|
-
return { ingested: false };
|
|
1592
|
-
}
|
|
1593
|
-
// Skip system messages — they come from the runtime, not the conversation
|
|
1594
|
-
const msg = message;
|
|
1595
|
-
if (msg.role === 'system') {
|
|
1596
|
-
return { ingested: false };
|
|
1597
|
-
}
|
|
1598
|
-
try {
|
|
1599
|
-
const hm = await getHyperMem();
|
|
1600
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
1601
|
-
const agentId = extractAgentId(sk);
|
|
1602
|
-
let neutral = toNeutralMessage(msg);
|
|
1603
|
-
// Route to appropriate record method based on role.
|
|
1604
|
-
// User messages are intentionally NOT recorded here — afterTurn() handles
|
|
1605
|
-
// user recording with proper metadata stripping (stripMessageMetadata).
|
|
1606
|
-
// Recording here too causes dual-write: once raw (here), once clean (afterTurn).
|
|
1607
|
-
if (neutral.role === 'user') {
|
|
1608
|
-
return { ingested: false };
|
|
1609
|
-
}
|
|
1610
|
-
// ── Pre-ingestion wave guard ──────────────────────────────────────────
|
|
1611
|
-
// Tool result payloads can be 10k-50k tokens each. When a parallel tool
|
|
1612
|
-
// batch (4-6 results) lands while the session is already at 70%+, storing
|
|
1613
|
-
// full payloads pushes the hot window past the nuclear path threshold
|
|
1614
|
-
// before the next assemble() can trim. Use current hot-window state as
|
|
1615
|
-
// the pressure signal (appropriate here, we're deciding what to write TO
|
|
1616
|
-
// the window).
|
|
1617
|
-
//
|
|
1618
|
-
// Above 70%: truncate toolResult content in transcript, but keep the
|
|
1619
|
-
// full payload durable in tool_artifacts (schema v9). Stub carries
|
|
1620
|
-
// artifactId so the compositor can hydrate on demand.
|
|
1621
|
-
// Above 85%: full stub replacement in transcript, still with artifactId.
|
|
1622
|
-
// At all levels: the full payload is persisted durably. No data loss.
|
|
1623
|
-
const isInboundToolResult = msg.role === 'tool' || msg.role === 'tool_result' || msg.role === 'toolResult';
|
|
1624
|
-
if (isInboundToolResult && neutral.toolResults && neutral.toolResults.length > 0) {
|
|
1625
|
-
const windowTokens = await estimateWindowTokens(hm, agentId, sk);
|
|
1626
|
-
const effectiveBudget = computeEffectiveBudget(undefined);
|
|
1627
|
-
const windowPressure = windowTokens / effectiveBudget;
|
|
1628
|
-
// Error tool results are always preserved intact: they're small and
|
|
1629
|
-
// the model needs the error signal to understand what went wrong.
|
|
1630
|
-
const hasErrorResult = neutral.toolResults.some(tr => tr.isError);
|
|
1631
|
-
// Only apply degradation / artifact capture above elevated pressure.
|
|
1632
|
-
if (windowPressure > 0.70) {
|
|
1633
|
-
const MAX_TOOL_RESULT_CHARS = 500;
|
|
1634
|
-
const highPressure = windowPressure > 0.85;
|
|
1635
|
-
const reason = highPressure ? 'wave_guard_pressure_high' : 'wave_guard_pressure_elevated';
|
|
1636
|
-
// For each non-error tool result, persist the full payload as a
|
|
1637
|
-
// durable artifact first, then rewrite the transcript entry to
|
|
1638
|
-
// either a full stub (high pressure) or a truncated stub with an
|
|
1639
|
-
// artifact pointer (elevated pressure).
|
|
1640
|
-
const rewrittenResults = await Promise.all(neutral.toolResults.map(async (tr) => {
|
|
1641
|
-
if (tr.isError)
|
|
1642
|
-
return tr;
|
|
1643
|
-
const content = typeof tr.content === 'string'
|
|
1644
|
-
? tr.content
|
|
1645
|
-
: JSON.stringify(tr.content);
|
|
1646
|
-
// At elevated pressure, small payloads pass through unchanged.
|
|
1647
|
-
if (!highPressure && content.length <= MAX_TOOL_RESULT_CHARS) {
|
|
1648
|
-
return tr;
|
|
1649
|
-
}
|
|
1650
|
-
let artifactId;
|
|
1651
|
-
try {
|
|
1652
|
-
const record = await hm.recordToolArtifact(agentId, sk, {
|
|
1653
|
-
toolName: tr.name || 'tool_result',
|
|
1654
|
-
toolCallId: tr.callId || undefined,
|
|
1655
|
-
isError: false,
|
|
1656
|
-
payload: content,
|
|
1657
|
-
summary: content.slice(0, 160),
|
|
1658
|
-
});
|
|
1659
|
-
artifactId = record.id;
|
|
1660
|
-
}
|
|
1661
|
-
catch (artErr) {
|
|
1662
|
-
console.warn('[hypermem-plugin] tool artifact capture failed (non-fatal):', artErr.message);
|
|
1663
|
-
}
|
|
1664
|
-
const summary = highPressure
|
|
1665
|
-
? `omitted at ${(windowPressure * 100).toFixed(0)}% window pressure`
|
|
1666
|
-
: `truncated at ${(windowPressure * 100).toFixed(0)}% pressure: ${Math.ceil(content.length / 4)} tokens`;
|
|
1667
|
-
return {
|
|
1668
|
-
...tr,
|
|
1669
|
-
content: formatToolChainStub({
|
|
1670
|
-
name: tr.name || 'tool_result',
|
|
1671
|
-
id: tr.callId || 'unknown',
|
|
1672
|
-
status: 'ejected',
|
|
1673
|
-
reason,
|
|
1674
|
-
summary,
|
|
1675
|
-
artifactId,
|
|
1676
|
-
}),
|
|
1677
|
-
};
|
|
1678
|
-
}));
|
|
1679
|
-
neutral = { ...neutral, toolResults: rewrittenResults };
|
|
1680
|
-
console.log(`[hypermem] ingest wave-guard: ${highPressure ? 'stubbed' : 'truncated'} toolResult (window pressure ${(windowPressure * 100).toFixed(0)}% > ${highPressure ? 85 : 70}%)${hasErrorResult ? ' + error results preserved' : ''} - full payload persisted to tool_artifacts`);
|
|
1681
|
-
}
|
|
1682
|
-
}
|
|
1683
|
-
await hm.recordAssistantMessage(agentId, sk, neutral);
|
|
1684
|
-
return { ingested: true };
|
|
1685
|
-
}
|
|
1686
|
-
catch (err) {
|
|
1687
|
-
// Ingest failure is non-fatal — record is best-effort
|
|
1688
|
-
console.warn('[hypermem-plugin] ingest failed:', err.message);
|
|
1689
|
-
return { ingested: false };
|
|
1690
|
-
}
|
|
1691
|
-
},
|
|
1692
|
-
/**
|
|
1693
|
-
* Batch ingest: process multiple messages in a single call.
|
|
1694
|
-
*
|
|
1695
|
-
* Note: when afterTurn() is defined (which it is), the runtime calls
|
|
1696
|
-
* afterTurn instead of ingest/ingestBatch. This is here for interface
|
|
1697
|
-
* completeness and forward compatibility.
|
|
1698
|
-
*/
|
|
1699
|
-
async ingestBatch({ sessionId, sessionKey, messages, isHeartbeat }) {
|
|
1700
|
-
if (isHeartbeat) {
|
|
1701
|
-
return { ingestedCount: 0 };
|
|
1702
|
-
}
|
|
1703
|
-
let ingestedCount = 0;
|
|
1704
|
-
try {
|
|
1705
|
-
const hm = await getHyperMem();
|
|
1706
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
1707
|
-
const agentId = extractAgentId(sk);
|
|
1708
|
-
for (const message of messages) {
|
|
1709
|
-
const msg = message;
|
|
1710
|
-
if (msg.role === 'system')
|
|
1711
|
-
continue;
|
|
1712
|
-
const neutral = toNeutralMessage(msg);
|
|
1713
|
-
if (neutral.role === 'user' && !neutral.toolResults?.length) {
|
|
1714
|
-
await hm.recordUserMessage(agentId, sk, stripMessageMetadata(neutral.textContent ?? ''));
|
|
1715
|
-
}
|
|
1716
|
-
else {
|
|
1717
|
-
await hm.recordAssistantMessage(agentId, sk, neutral);
|
|
1718
|
-
}
|
|
1719
|
-
ingestedCount++;
|
|
1720
|
-
}
|
|
1721
|
-
}
|
|
1722
|
-
catch (err) {
|
|
1723
|
-
console.warn('[hypermem-plugin] ingestBatch failed:', err.message);
|
|
1724
|
-
}
|
|
1725
|
-
return { ingestedCount };
|
|
1726
|
-
},
|
|
1727
|
-
/**
|
|
1728
|
-
* Assemble model context from all four hypermem layers.
|
|
1729
|
-
*
|
|
1730
|
-
* The `messages` param contains the current conversation history from the
|
|
1731
|
-
* runtime. We pass the prompt (latest user message) as the retrieval query,
|
|
1732
|
-
* and let the compositor build the full context.
|
|
1733
|
-
*
|
|
1734
|
-
* Returns:
|
|
1735
|
-
* messages — full assembled message array for the model
|
|
1736
|
-
* estimatedTokens — token count of assembled context
|
|
1737
|
-
* systemPromptAddition — facts/recall/episodes injected before runtime system prompt
|
|
1738
|
-
*/
|
|
1739
|
-
async assemble({ sessionId, sessionKey, messages, tokenBudget, prompt, model }) {
|
|
1740
|
-
// ── Tool-loop guard ──────────────────────────────────────────────────────
|
|
1741
|
-
// When the last message is a toolResult, the runtime is mid tool-loop:
|
|
1742
|
-
// the model already has full context from the initial turn assembly.
|
|
1743
|
-
// Re-running the full compose pipeline here is wasteful and, in long
|
|
1744
|
-
// tool loops, causes cumulative context growth that triggers preemptive
|
|
1745
|
-
// context overflow. Pass the messages through as-is.
|
|
1746
|
-
//
|
|
1747
|
-
// Matches OpenClaw's legacy behavior: the legacy engine's assemble() is a
|
|
1748
|
-
// pass-through that never re-injects context on tool-loop calls.
|
|
1749
|
-
const lastMsg = messages[messages.length - 1];
|
|
1750
|
-
const isToolLoop = lastMsg?.role === 'toolResult' || lastMsg?.role === 'tool';
|
|
1751
|
-
// Telemetry: emit one assembleTrace at entry. Path taxonomy:
|
|
1752
|
-
// 'subagent' - session key matches the subagent pattern
|
|
1753
|
-
// 'cold' - normal full-assembly or tool-loop entry (a separate
|
|
1754
|
-
// 'replay' trace is emitted if the cache replay fast
|
|
1755
|
-
// path is taken below)
|
|
1756
|
-
// Zero-cost when HYPERMEM_TELEMETRY !== '1'.
|
|
1757
|
-
//
|
|
1758
|
-
// Trim-ownership turn context (Sprint 2): the turnId is also used to
|
|
1759
|
-
// scope the shared trim-owner claim helper so duplicate steady-state
|
|
1760
|
-
// trims in a single assemble() turn can be detected and (under
|
|
1761
|
-
// NODE_ENV='development') throw loudly. We always allocate the turnId
|
|
1762
|
-
// and open the scope — the map write is cheap and keeps enforcement
|
|
1763
|
-
// active even when telemetry is off. The scope is closed in the
|
|
1764
|
-
// finally block wrapping the full assemble body below.
|
|
1765
|
-
const _asmSk = resolveSessionKey(sessionId, sessionKey);
|
|
1766
|
-
const _asmTurnId = nextTurnId();
|
|
1767
|
-
beginTrimOwnerTurn(_asmSk, _asmTurnId);
|
|
1768
|
-
if (telemetryEnabled()) {
|
|
1769
|
-
const _agentId = extractAgentId(_asmSk);
|
|
1770
|
-
const _entryPath = _asmSk.includes('subagent:')
|
|
1771
|
-
? 'subagent'
|
|
1772
|
-
: 'cold';
|
|
1773
|
-
assembleTrace({
|
|
1774
|
-
agentId: _agentId,
|
|
1775
|
-
sessionKey: _asmSk,
|
|
1776
|
-
turnId: _asmTurnId,
|
|
1777
|
-
path: _entryPath,
|
|
1778
|
-
toolLoop: isToolLoop,
|
|
1779
|
-
msgCount: messages.length,
|
|
1780
|
-
});
|
|
1781
|
-
}
|
|
1782
|
-
try {
|
|
1783
|
-
if (isToolLoop) {
|
|
1784
|
-
// Tool-loop turns: pass messages through unchanged but still:
|
|
1785
|
-
// 1. Run the trim guardrail — tool loops accumulate history as fast
|
|
1786
|
-
// as regular turns, and the old path skipped trim entirely, leaving
|
|
1787
|
-
// the compaction guard blind (received estimatedTokens=0).
|
|
1788
|
-
// 2. Return a real estimatedTokens = windowTokens + cached overhead,
|
|
1789
|
-
// so the guard has accurate signal and can fire when needed.
|
|
1790
|
-
//
|
|
1791
|
-
// Fix (ingestion-wave): use pressure-tiered trim instead of fixed 80%.
|
|
1792
|
-
// At 91% with 5 parallel web_search calls incoming (~20-30% of budget),
|
|
1793
|
-
// a fixed 80% trim only frees 11% headroom — the wave overflows anyway
|
|
1794
|
-
// and results strip silently. Tier the trim target based on pre-trim
|
|
1795
|
-
// pressure so high-pressure sessions get real headroom before results land.
|
|
1796
|
-
const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
|
|
1797
|
-
try {
|
|
1798
|
-
const hm = await getHyperMem();
|
|
1799
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
1800
|
-
const agentId = extractAgentId(sk);
|
|
1801
|
-
// ── Image / heavy-content eviction pre-pass ──────────────────────
|
|
1802
|
-
// Evict stale image payloads and large tool results before measuring
|
|
1803
|
-
// pressure. This frees tokens without compaction — images alone can
|
|
1804
|
-
// account for 30%+ of context from a single screenshot 2 turns ago.
|
|
1805
|
-
const evictionCfg = _evictionConfig;
|
|
1806
|
-
const evictionEnabled = evictionCfg?.enabled !== false;
|
|
1807
|
-
let workingMessages = messages;
|
|
1808
|
-
if (evictionEnabled) {
|
|
1809
|
-
const { messages: evicted, stats: evStats } = evictStaleContent(messages, {
|
|
1810
|
-
imageAgeTurns: evictionCfg?.imageAgeTurns,
|
|
1811
|
-
toolResultAgeTurns: evictionCfg?.toolResultAgeTurns,
|
|
1812
|
-
minTokensToEvict: evictionCfg?.minTokensToEvict,
|
|
1813
|
-
keepPreviewChars: evictionCfg?.keepPreviewChars,
|
|
1814
|
-
});
|
|
1815
|
-
workingMessages = evicted;
|
|
1816
|
-
if (evStats.tokensFreed > 0) {
|
|
1817
|
-
console.log(`[hypermem] eviction: ${evStats.imagesEvicted} images, ` +
|
|
1818
|
-
`${evStats.toolResultsEvicted} tool results, ` +
|
|
1819
|
-
`~${evStats.tokensFreed.toLocaleString()} tokens freed`);
|
|
1820
|
-
}
|
|
1821
|
-
}
|
|
1822
|
-
// Measure pressure from the in-memory message array we are actually about
|
|
1823
|
-
// to shape and return. Redis remains a cross-check only.
|
|
1824
|
-
const runtimeTokens = estimateMessageArrayTokens(workingMessages);
|
|
1825
|
-
const redisTokens = await estimateWindowTokens(hm, agentId, sk);
|
|
1826
|
-
const replayRecovery = decideReplayRecovery({
|
|
1827
|
-
currentState: normalizeReplayRecoveryState(await hm.cache.getSlot(agentId, sk, 'replayRecoveryState').catch(() => '')),
|
|
1828
|
-
runtimeTokens,
|
|
1829
|
-
redisTokens,
|
|
1830
|
-
effectiveBudget,
|
|
1831
|
-
});
|
|
1832
|
-
const replayMarkerText = replayRecovery.emittedText;
|
|
1833
|
-
const preTrimTokens = runtimeTokens;
|
|
1834
|
-
// Sprint 3: unified pressure signal — tool-loop assemble path
|
|
1835
|
-
const s3ToolLoopPressure = computeUnifiedPressure(preTrimTokens, effectiveBudget, PRESSURE_SOURCE.TOOLLOOP_RUNTIME_ARRAY);
|
|
1836
|
-
const pressure = s3ToolLoopPressure.fraction;
|
|
1837
|
-
// Pressure-tiered trim targets use a single authority: the working
|
|
1838
|
-
// message array. Redis drift is logged as an anomaly, never used as
|
|
1839
|
-
// a trim trigger. Replay recovery gets its own explicit bounded mode
|
|
1840
|
-
// instead of sharing the steady-state pressure heuristics.
|
|
1841
|
-
let trimTarget;
|
|
1842
|
-
if (typeof replayRecovery.trimTargetOverride === 'number') {
|
|
1843
|
-
trimTarget = replayRecovery.trimTargetOverride;
|
|
1844
|
-
}
|
|
1845
|
-
else if (pressure > 0.85) {
|
|
1846
|
-
trimTarget = 0.40; // critical: 60% headroom for incoming wave
|
|
1847
|
-
}
|
|
1848
|
-
else if (pressure > 0.80) {
|
|
1849
|
-
trimTarget = 0.50; // high: 50% headroom
|
|
1850
|
-
}
|
|
1851
|
-
else if (pressure > 0.75) {
|
|
1852
|
-
trimTarget = 0.55; // elevated: 45% headroom
|
|
1853
|
-
}
|
|
1854
|
-
else {
|
|
1855
|
-
trimTarget = 0.65; // normal: 35% headroom
|
|
1856
|
-
}
|
|
1857
|
-
const trimBudget = Math.floor(effectiveBudget * trimTarget);
|
|
1858
|
-
// Steady-state trim owner claim (Sprint 2.2a): route through the
|
|
1859
|
-
// shared helper keyed by (sessionKey, turnId). In development a
|
|
1860
|
-
// duplicate steady-state trim in the same assemble() turn throws.
|
|
1861
|
-
// In non-development a duplicate returns false; the real trim +
|
|
1862
|
-
// its `event:'trim'` emission are gated on the successful claim so
|
|
1863
|
-
// a duplicate claim is actually suppressed, not just warned.
|
|
1864
|
-
// Compact.* paths are exempt; this path is assemble-owned.
|
|
1865
|
-
const toolLoopClaimed = claimTrimOwner(sk, _asmTurnId, 'assemble.toolLoop');
|
|
1866
|
-
let trimmed = 0;
|
|
1867
|
-
let toolLoopCacheInvalidated = false;
|
|
1868
|
-
if (toolLoopClaimed) {
|
|
1869
|
-
trimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimBudget);
|
|
1870
|
-
if (trimmed > 0) {
|
|
1871
|
-
await hm.cache.invalidateWindow(agentId, sk);
|
|
1872
|
-
toolLoopCacheInvalidated = true;
|
|
1873
|
-
}
|
|
1874
|
-
if (telemetryEnabled()) {
|
|
1875
|
-
const postTrimTokens = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
|
|
1876
|
-
trimTelemetry({
|
|
1877
|
-
path: 'assemble.toolLoop',
|
|
1878
|
-
agentId, sessionKey: sk,
|
|
1879
|
-
preTokens: preTrimTokens,
|
|
1880
|
-
postTokens: postTrimTokens,
|
|
1881
|
-
removed: trimmed,
|
|
1882
|
-
cacheInvalidated: toolLoopCacheInvalidated,
|
|
1883
|
-
reason: `pressure=${(pressure * 100).toFixed(1)}%`,
|
|
1884
|
-
});
|
|
1885
|
-
}
|
|
1886
|
-
}
|
|
1887
|
-
else if (telemetryEnabled()) {
|
|
1888
|
-
// Surface the suppressed-duplicate as a bounded guard record so
|
|
1889
|
-
// downstream reporting can see how often the gate fires. No
|
|
1890
|
-
// history or window mutation here.
|
|
1891
|
-
guardTelemetry({
|
|
1892
|
-
path: 'assemble.toolLoop',
|
|
1893
|
-
agentId, sessionKey: sk,
|
|
1894
|
-
reason: 'duplicate-claim-suppressed',
|
|
1895
|
-
});
|
|
1896
|
-
}
|
|
1897
|
-
// Also trim the messages array itself to match the budget.
|
|
1898
|
-
// Redis trim clears the *next* turn's window. This turn's messages are
|
|
1899
|
-
// still the full runtime array — if we return them unchanged at 94%,
|
|
1900
|
-
// OpenClaw strips tool results before sending to the model regardless
|
|
1901
|
-
// of what estimatedTokens says. We need to return a slimmer array now.
|
|
1902
|
-
//
|
|
1903
|
-
// Strategy: keep system/identity messages at the front, then fill from
|
|
1904
|
-
// the back (most recent) until we hit trimBudget. Drop the middle.
|
|
1905
|
-
let trimmedMessages = workingMessages;
|
|
1906
|
-
if (pressure > trimTarget) {
|
|
1907
|
-
const msgArray = workingMessages;
|
|
1908
|
-
// Separate system messages (always keep) from conversation turns
|
|
1909
|
-
const systemMsgs = msgArray.filter(m => m.role === 'system');
|
|
1910
|
-
const convMsgs = msgArray.filter(m => m.role !== 'system');
|
|
1911
|
-
// Pre-process: inline-truncate large tool results before budget-fill drop.
|
|
1912
|
-
// A message with a 40k-token tool result that barely misses budget gets dropped
|
|
1913
|
-
// entirely. Replacing with a placeholder keeps the turn's metadata in context
|
|
1914
|
-
// while freeing the bulk of the tokens.
|
|
1915
|
-
const MAX_INLINE_TOOL_CHARS = 2000; // ~500 tokens
|
|
1916
|
-
// FIX (Bug 3): handle both NeutralMessage format (m.toolResults) and
|
|
1917
|
-
// OpenClaw native format (m.content array with type='tool_result' blocks).
|
|
1918
|
-
// Old guard `if (!m.toolResults)` skipped every native-format message.
|
|
1919
|
-
// Also fixed: replacement must be valid NeutralToolResult { callId, name, content },
|
|
1920
|
-
// not { type, text } which breaks pair-integrity downstream.
|
|
1921
|
-
const processedConvMsgs = convMsgs.map(m => {
|
|
1922
|
-
// NeutralMessage format
|
|
1923
|
-
if (m.toolResults) {
|
|
1924
|
-
const resultStr = JSON.stringify(m.toolResults);
|
|
1925
|
-
if (resultStr.length <= MAX_INLINE_TOOL_CHARS)
|
|
1926
|
-
return m;
|
|
1927
|
-
const firstResult = m.toolResults[0];
|
|
1928
|
-
return {
|
|
1929
|
-
...m,
|
|
1930
|
-
toolResults: [{
|
|
1931
|
-
callId: firstResult?.callId ?? 'unknown',
|
|
1932
|
-
name: firstResult?.name ?? 'tool',
|
|
1933
|
-
content: `[tool result truncated: ${Math.ceil(resultStr.length / 4)} tokens]`,
|
|
1934
|
-
}],
|
|
1935
|
-
};
|
|
1936
|
-
}
|
|
1937
|
-
// OpenClaw native format
|
|
1938
|
-
if (Array.isArray(m.content)) {
|
|
1939
|
-
const content = m.content;
|
|
1940
|
-
const hasLarge = content.some(c => {
|
|
1941
|
-
if (c.type !== 'tool_result')
|
|
1942
|
-
return false;
|
|
1943
|
-
const val = typeof c.content === 'string' ? c.content : JSON.stringify(c.content ?? '');
|
|
1944
|
-
return val.length > MAX_INLINE_TOOL_CHARS;
|
|
1945
|
-
});
|
|
1946
|
-
if (!hasLarge)
|
|
1947
|
-
return m;
|
|
1948
|
-
return {
|
|
1949
|
-
...m,
|
|
1950
|
-
content: content.map(c => {
|
|
1951
|
-
if (c.type !== 'tool_result')
|
|
1952
|
-
return c;
|
|
1953
|
-
const val = typeof c.content === 'string' ? c.content : JSON.stringify(c.content ?? '');
|
|
1954
|
-
if (val.length <= MAX_INLINE_TOOL_CHARS)
|
|
1955
|
-
return c;
|
|
1956
|
-
return { ...c, content: `[tool result truncated: ${Math.ceil(val.length / 4)} tokens]` };
|
|
1957
|
-
}),
|
|
1958
|
-
};
|
|
1959
|
-
}
|
|
1960
|
-
return m;
|
|
1961
|
-
});
|
|
1962
|
-
// Fill from the back within budget
|
|
1963
|
-
let budget = trimBudget;
|
|
1964
|
-
// Reserve tokens for system messages using the same accounting
|
|
1965
|
-
// function as the final composed-array estimate.
|
|
1966
|
-
for (const sm of systemMsgs) {
|
|
1967
|
-
budget -= estimateMessageTokens(sm);
|
|
1968
|
-
}
|
|
1969
|
-
const msgCost = (m) => estimateMessageTokens(m);
|
|
1970
|
-
const clusters = clusterTranscriptMessages(processedConvMsgs);
|
|
1971
|
-
const keptClusters = [];
|
|
1972
|
-
const tailCluster = clusters.length > 0 ? clusters[clusters.length - 1] : [];
|
|
1973
|
-
if (tailCluster.length > 0) {
|
|
1974
|
-
budget -= tailCluster.reduce((sum, msg) => sum + msgCost(msg), 0);
|
|
1975
|
-
keptClusters.unshift(tailCluster);
|
|
1976
|
-
}
|
|
1977
|
-
for (let i = clusters.length - 2; i >= 0 && budget > 0; i--) {
|
|
1978
|
-
const cluster = clusters[i];
|
|
1979
|
-
const clusterCost = cluster.reduce((sum, msg) => sum + msgCost(msg), 0);
|
|
1980
|
-
if (budget - clusterCost >= 0) {
|
|
1981
|
-
keptClusters.unshift(cluster);
|
|
1982
|
-
budget -= clusterCost;
|
|
1983
|
-
}
|
|
1984
|
-
}
|
|
1985
|
-
const kept = keptClusters.flat();
|
|
1986
|
-
const keptCount = processedConvMsgs.length - kept.length;
|
|
1987
|
-
if (keptCount > 0) {
|
|
1988
|
-
console.log(`[hypermem-plugin] tool-loop trim: pressure=${s3ToolLoopPressure.pct}% source=${s3ToolLoopPressure.source} → ` +
|
|
1989
|
-
`target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs, messages=${keptCount} dropped)`);
|
|
1990
|
-
trimmedMessages = [...systemMsgs, ...kept];
|
|
1991
|
-
}
|
|
1992
|
-
else if (trimmed > 0) {
|
|
1993
|
-
console.log(`[hypermem-plugin] tool-loop trim: pressure=${s3ToolLoopPressure.pct}% source=${s3ToolLoopPressure.source} → ` +
|
|
1994
|
-
`target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs)`);
|
|
1995
|
-
}
|
|
1996
|
-
}
|
|
1997
|
-
else if (trimmed > 0) {
|
|
1998
|
-
console.log(`[hypermem-plugin] tool-loop trim: pressure=${s3ToolLoopPressure.pct}% source=${s3ToolLoopPressure.source} → ` +
|
|
1999
|
-
`target=${(trimTarget * 100).toFixed(0)}% (redis=${trimmed} msgs)`);
|
|
2000
|
-
}
|
|
2001
|
-
// Apply tool gradient to compress large tool results before returning.
|
|
2002
|
-
// Skip if deferToolPruning is enabled — OpenClaw's contextPruning handles it.
|
|
2003
|
-
if (!_deferToolPruning) {
|
|
2004
|
-
// The full compose path runs applyToolGradientToWindow during reshaping;
|
|
2005
|
-
// the tool-loop path was previously skipping this, leaving a 40k-token
|
|
2006
|
-
// web_search result uncompressed every turn.
|
|
2007
|
-
try {
|
|
2008
|
-
const gradientApplied = applyToolGradientToWindow(trimmedMessages, trimBudget);
|
|
2009
|
-
trimmedMessages = gradientApplied;
|
|
2010
|
-
}
|
|
2011
|
-
catch {
|
|
2012
|
-
// Non-fatal: if gradient fails, continue with untouched trimmedMessages
|
|
2013
|
-
}
|
|
2014
|
-
} // end deferToolPruning gate
|
|
2015
|
-
// Repair orphaned tool pairs in the trimmed message list.
|
|
2016
|
-
// In-memory trim (cluster drop) can strand tool_result messages whose
|
|
2017
|
-
// paired tool_use was in a dropped cluster.
|
|
2018
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
2019
|
-
trimmedMessages = repairToolPairs(trimmedMessages);
|
|
2020
|
-
const composedTokens = estimateMessageArrayTokens(trimmedMessages);
|
|
2021
|
-
maybeLogPressureAccountingAnomaly({
|
|
2022
|
-
path: 'assemble.toolLoop',
|
|
2023
|
-
agentId,
|
|
2024
|
-
sessionKey: sk,
|
|
2025
|
-
runtimeTokens: preTrimTokens,
|
|
2026
|
-
redisTokens,
|
|
2027
|
-
composedTokens,
|
|
2028
|
-
budget: effectiveBudget,
|
|
2029
|
-
});
|
|
2030
|
-
await persistReplayRecoveryState(hm, agentId, sk, replayRecovery.nextState);
|
|
2031
|
-
degradationTelemetry({
|
|
2032
|
-
agentId,
|
|
2033
|
-
sessionKey: sk,
|
|
2034
|
-
turnId: _asmTurnId,
|
|
2035
|
-
path: 'toolLoop',
|
|
2036
|
-
toolChainCoEjections: 0,
|
|
2037
|
-
toolChainStubReplacements: 0,
|
|
2038
|
-
artifactDegradations: 0,
|
|
2039
|
-
replayState: replayRecovery.emittedMarker?.state,
|
|
2040
|
-
replayReason: replayRecovery.emittedMarker?.reason,
|
|
2041
|
-
});
|
|
2042
|
-
const overhead = _overheadCache.get(sk) ?? getOverheadFallback();
|
|
2043
|
-
return {
|
|
2044
|
-
messages: trimmedMessages,
|
|
2045
|
-
estimatedTokens: composedTokens + overhead,
|
|
2046
|
-
systemPromptAddition: replayMarkerText || undefined,
|
|
2047
|
-
};
|
|
2048
|
-
}
|
|
2049
|
-
catch {
|
|
2050
|
-
// Non-fatal: return conservative estimate so guard doesn't go blind
|
|
2051
|
-
return {
|
|
2052
|
-
messages: messages,
|
|
2053
|
-
estimatedTokens: Math.floor(effectiveBudget * 0.8),
|
|
2054
|
-
};
|
|
2055
|
-
}
|
|
2056
|
-
}
|
|
2057
|
-
try {
|
|
2058
|
-
const hm = await getHyperMem();
|
|
2059
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
2060
|
-
const agentId = extractAgentId(sk);
|
|
2061
|
-
// ── Subagent warming control ─────────────────────────────────────────
|
|
2062
|
-
// Detect subagent sessions by key pattern and apply warming mode.
|
|
2063
|
-
// 'off' = passthrough (no HyperMem context at all)
|
|
2064
|
-
// 'light' = facts + history only (skip library/wiki/semantic/keystones/doc chunks)
|
|
2065
|
-
// 'full' = standard compositor pipeline
|
|
2066
|
-
const isSubagent = sk.includes('subagent:');
|
|
2067
|
-
if (isSubagent && _subagentWarming === 'off') {
|
|
2068
|
-
console.log(`[hypermem-plugin] assemble: subagent warming=off, passthrough (sk: ${sk})`);
|
|
2069
|
-
return {
|
|
2070
|
-
messages: messages,
|
|
2071
|
-
estimatedTokens: estimateMessageArrayTokens(messages),
|
|
2072
|
-
};
|
|
2073
|
-
}
|
|
2074
|
-
if (isSubagent) {
|
|
2075
|
-
console.log(`[hypermem-plugin] assemble: subagent warming=${_subagentWarming} (sk: ${sk})`);
|
|
2076
|
-
}
|
|
2077
|
-
// Resolve agent tier from fleet store (for doc chunk tier filtering)
|
|
2078
|
-
let tier;
|
|
2079
|
-
try {
|
|
2080
|
-
const agent = _fleetStore?.getAgent(agentId);
|
|
2081
|
-
tier = agent?.tier;
|
|
2082
|
-
}
|
|
2083
|
-
catch {
|
|
2084
|
-
// Non-fatal — tier filtering just won't apply
|
|
2085
|
-
}
|
|
2086
|
-
// historyDepth: derive a safe message count from the token budget.
|
|
2087
|
-
// Uses 50% of the budget for history (down from 60% — more budget goes to
|
|
2088
|
-
// L3/L4 context slots now). Floor at 50, ceiling at 200.
|
|
2089
|
-
// This is a preventive guard — the compositor's safety valve still trims
|
|
2090
|
-
// by token count post-assembly, but limiting depth up front avoids
|
|
2091
|
-
// feeding the compactor a window it can't reduce.
|
|
2092
|
-
const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
|
|
2093
|
-
const historyDepth = Math.min(250, Math.max(50, Math.floor((effectiveBudget * 0.65) / 500)));
|
|
2094
|
-
const runtimeEntryTokens = estimateMessageArrayTokens(messages);
|
|
2095
|
-
const redisEntryTokens = await estimateWindowTokens(hm, agentId, sk);
|
|
2096
|
-
const replayRecovery = decideReplayRecovery({
|
|
2097
|
-
currentState: normalizeReplayRecoveryState(await hm.cache.getSlot(agentId, sk, 'replayRecoveryState').catch(() => '')),
|
|
2098
|
-
runtimeTokens: runtimeEntryTokens,
|
|
2099
|
-
redisTokens: redisEntryTokens,
|
|
2100
|
-
effectiveBudget,
|
|
2101
|
-
});
|
|
2102
|
-
const replayHistoryDepth = replayRecovery.active && replayRecovery.historyDepthCap
|
|
2103
|
-
? Math.min(historyDepth, replayRecovery.historyDepthCap)
|
|
2104
|
-
: historyDepth;
|
|
2105
|
-
// ── Redis guardrail: trim history to token budget ────────────────────
|
|
2106
|
-
// Prevents model-switch bloat: if an agent previously ran on a larger
|
|
2107
|
-
// context window, Redis history may exceed the current model's budget.
|
|
2108
|
-
// Trimming here (before compose) ensures the compositor never sees a
|
|
2109
|
-
// history window it can't fit.
|
|
2110
|
-
//
|
|
2111
|
-
// Sprint 3 (AfterTurn Rebuild/Trim Loop Fix): the assemble.normal trim now
|
|
2112
|
-
// first checks whether the window is already within trimBudget. When
|
|
2113
|
-
// afterTurn's refreshRedisGradient caps the rebuilt window at the same
|
|
2114
|
-
// 0.65 fraction (Sprint 3 compositor fix), the steady-state path will
|
|
2115
|
-
// find preTokens <= trimBudget and skip the trim entirely. The trim only
|
|
2116
|
-
// fires when real excess exists (pressure spikes, model switch, cold start),
|
|
2117
|
-
// breaking the unconditional afterTurn→assemble trim churn loop.
|
|
2118
|
-
//
|
|
2119
|
-
// B3: Batch trim with growth allowance.
|
|
2120
|
-
// Trim only fires when the window has grown past the soft target by more
|
|
2121
|
-
// than TRIM_GROWTH_THRESHOLD (5%). When it does fire, trim to
|
|
2122
|
-
// softTarget * (1 - TRIM_HEADROOM_FRACTION) so the window has room to
|
|
2123
|
-
// grow for several turns before the next trim fires. This eliminates
|
|
2124
|
-
// per-turn trim churn from minor natural growth (short assistant replies,
|
|
2125
|
-
// small tool outputs) while still catching genuine pressure spikes.
|
|
2126
|
-
try {
|
|
2127
|
-
const { softBudget: trimSoftBudget, triggerBudget: trimTriggerBudget, targetBudget: trimTargetBudget, } = resolveTrimBudgets(effectiveBudget);
|
|
2128
|
-
// Always read preTokens so we can make the skip decision and emit telemetry.
|
|
2129
|
-
const preTokensNormal = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
|
|
2130
|
-
const normalPath = isSubagent ? 'assemble.subagent' : 'assemble.normal';
|
|
2131
|
-
// B3: Skip trim when window is within the growth-allowance envelope.
|
|
2132
|
-
// This replaces the Sprint 3 `windowAlreadyFits` check (which only
|
|
2133
|
-
// skipped at exactly ≤ softTarget). The growth allowance lets the
|
|
2134
|
-
// window float up to +5% before triggering, avoiding trim on every
|
|
2135
|
-
// turn that ends a few tokens above 65%.
|
|
2136
|
-
const withinGrowthEnvelope = preTokensNormal > 0 && preTokensNormal <= trimTriggerBudget;
|
|
2137
|
-
if (withinGrowthEnvelope) {
|
|
2138
|
-
if (telemetryEnabled()) {
|
|
2139
|
-
guardTelemetry({
|
|
2140
|
-
path: normalPath,
|
|
2141
|
-
agentId, sessionKey: sk,
|
|
2142
|
-
reason: 'window-within-budget-skip',
|
|
2143
|
-
});
|
|
2144
|
-
}
|
|
2145
|
-
}
|
|
2146
|
-
else {
|
|
2147
|
-
// Steady-state trim owner claim (Sprint 2.2a): route assemble.normal
|
|
2148
|
-
// and assemble.subagent through the shared helper keyed by
|
|
2149
|
-
// (sessionKey, _asmTurnId). The real trim + its `event:'trim'`
|
|
2150
|
-
// emission are gated on the claim so a duplicate steady-state claim
|
|
2151
|
-
// in the same turn is actually suppressed in production, not just
|
|
2152
|
-
// warned. In development the duplicate throws.
|
|
2153
|
-
const normalClaimed = claimTrimOwner(sk, _asmTurnId, normalPath);
|
|
2154
|
-
if (normalClaimed) {
|
|
2155
|
-
// B3: trim to the headroom target (below soft target) so the
|
|
2156
|
-
// window has room to grow before the next trim fires.
|
|
2157
|
-
const trimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimTargetBudget);
|
|
2158
|
-
let normalCacheInvalidated = false;
|
|
2159
|
-
if (trimmed > 0) {
|
|
2160
|
-
// Invalidate window cache since history changed
|
|
2161
|
-
await hm.cache.invalidateWindow(agentId, sk);
|
|
2162
|
-
normalCacheInvalidated = true;
|
|
2163
|
-
}
|
|
2164
|
-
if (telemetryEnabled()) {
|
|
2165
|
-
const postTokensNormal = await estimateWindowTokens(hm, agentId, sk).catch(() => 0);
|
|
2166
|
-
trimTelemetry({
|
|
2167
|
-
path: normalPath,
|
|
2168
|
-
agentId, sessionKey: sk,
|
|
2169
|
-
preTokens: preTokensNormal,
|
|
2170
|
-
postTokens: postTokensNormal,
|
|
2171
|
-
removed: trimmed,
|
|
2172
|
-
cacheInvalidated: normalCacheInvalidated,
|
|
2173
|
-
reason: `b3:trigger=${trimTriggerBudget},target=${trimTargetBudget}`,
|
|
2174
|
-
});
|
|
2175
|
-
}
|
|
2176
|
-
}
|
|
2177
|
-
else if (telemetryEnabled()) {
|
|
2178
|
-
guardTelemetry({
|
|
2179
|
-
path: normalPath,
|
|
2180
|
-
agentId, sessionKey: sk,
|
|
2181
|
-
reason: 'duplicate-claim-suppressed',
|
|
2182
|
-
});
|
|
2183
|
-
}
|
|
2184
|
-
}
|
|
2185
|
-
}
|
|
2186
|
-
catch (trimErr) {
|
|
2187
|
-
// Non-fatal — compositor's budget-fit walk is the second line of defense
|
|
2188
|
-
console.warn('[hypermem-plugin] assemble: Redis trim failed (non-fatal):', trimErr.message);
|
|
2189
|
-
}
|
|
2190
|
-
// ── Budget downshift: proactive reshape pass ───────────────────────────────────────
|
|
2191
|
-
// Detect provider/model identity changes as well as raw budget changes.
|
|
2192
|
-
// Provider routing matters operationally because the same model family can
|
|
2193
|
-
// land on a different effective context window, for example Copilot Sonnet
|
|
2194
|
-
// vs direct Anthropic Sonnet. Only budget downshifts trigger the demoted
|
|
2195
|
-
// reshape guard, but verbose logs now show provider/model swaps even when
|
|
2196
|
-
// the effective budget stays flat or increases.
|
|
2197
|
-
let lastState = null;
|
|
2198
|
-
try {
|
|
2199
|
-
lastState = await hm.cache.getModelState(agentId, sk);
|
|
2200
|
-
const DOWNSHIFT_THRESHOLD = 0.10;
|
|
2201
|
-
const modelDelta = diffModelState(lastState, {
|
|
2202
|
-
model,
|
|
2203
|
-
tokenBudget: effectiveBudget,
|
|
2204
|
-
});
|
|
2205
|
-
const downshiftFraction = lastState?.tokenBudget
|
|
2206
|
-
? (lastState.tokenBudget - effectiveBudget) / lastState.tokenBudget
|
|
2207
|
-
: 0;
|
|
2208
|
-
const isDownshift = modelDelta.budgetDownshift && downshiftFraction > DOWNSHIFT_THRESHOLD;
|
|
2209
|
-
if (lastState && (modelDelta.modelChanged || modelDelta.budgetChanged)) {
|
|
2210
|
-
verboseLog(`[hypermem-plugin] model state change: ` +
|
|
2211
|
-
`prev=${modelDelta.previousIdentity.modelKey ?? 'unknown'} ` +
|
|
2212
|
-
`next=${modelDelta.currentIdentity.modelKey ?? 'unknown'} ` +
|
|
2213
|
-
`providerChanged=${modelDelta.providerChanged} ` +
|
|
2214
|
-
`modelIdChanged=${modelDelta.modelIdChanged} ` +
|
|
2215
|
-
`budget=${lastState.tokenBudget}->${effectiveBudget}`);
|
|
2216
|
-
}
|
|
2217
|
-
if (isDownshift && !_deferToolPruning) {
|
|
2218
|
-
// Sprint 2.2a: demote reshape to guard telemetry.
|
|
2219
|
-
//
|
|
2220
|
-
// Previously this branch re-ran applyToolGradientToWindow, wrote
|
|
2221
|
-
// back via replaceHistory, invalidated the window cache, and
|
|
2222
|
-
// stamped `reshapedAt` on model state. Assemble.* is the
|
|
2223
|
-
// steady-state owner, so the subsequent assemble.normal /
|
|
2224
|
-
// assemble.subagent trim (gated by claimTrimOwner) handles any
|
|
2225
|
-
// real downshift pressure. Keeping the detection branch preserves
|
|
2226
|
-
// observability; guardTelemetry records the would-be-reshape
|
|
2227
|
-
// without mutating history, the window, or model state.
|
|
2228
|
-
//
|
|
2229
|
-
// CRITICAL: do NOT call setModelState({ reshapedAt, … }) here.
|
|
2230
|
-
// compact() skips when reshapedAt is recent, which would cause it
|
|
2231
|
-
// to skip on the strength of a reshape that never ran.
|
|
2232
|
-
guardTelemetry({
|
|
2233
|
-
path: 'reshape',
|
|
2234
|
-
agentId, sessionKey: sk,
|
|
2235
|
-
reason: 'reshape-downshift-demoted',
|
|
2236
|
-
});
|
|
2237
|
-
}
|
|
2238
|
-
}
|
|
2239
|
-
catch (reshapeErr) {
|
|
2240
|
-
// Non-fatal — compositor safety valve is still the last defense
|
|
2241
|
-
console.warn('[hypermem-plugin] assemble: reshape pass failed (non-fatal):', reshapeErr.message);
|
|
2242
|
-
}
|
|
2243
|
-
// ── Cache replay fast path ─────────────────────────────────────────────
|
|
2244
|
-
// If the session was active recently, return the cached contextBlock
|
|
2245
|
-
// (systemPromptAddition) to produce a byte-identical system prompt and
|
|
2246
|
-
// hit the provider prefix cache (Anthropic / OpenAI).
|
|
2247
|
-
// The message window is always rebuilt fresh — only the compositor output
|
|
2248
|
-
// (contextBlock) is cached, since that's what determines prefix identity.
|
|
2249
|
-
const cacheReplayThresholdMs = _cacheReplayThresholdMs;
|
|
2250
|
-
let cachedContextBlock = null;
|
|
2251
|
-
if (cacheReplayThresholdMs > 0 && !replayRecovery.shouldSkipCacheReplay) {
|
|
2252
|
-
try {
|
|
2253
|
-
const cachedAt = await hm.cache.getSlot(agentId, sk, 'assemblyContextAt');
|
|
2254
|
-
if (cachedAt && Date.now() - parseInt(cachedAt) < cacheReplayThresholdMs) {
|
|
2255
|
-
cachedContextBlock = await hm.cache.getSlot(agentId, sk, 'assemblyContextBlock');
|
|
2256
|
-
if (cachedContextBlock) {
|
|
2257
|
-
console.log(`[hypermem-plugin] assemble: cache replay hit for ${agentId} (${Math.round((Date.now() - parseInt(cachedAt)) / 1000)}s old)`);
|
|
2258
|
-
if (telemetryEnabled()) {
|
|
2259
|
-
assembleTrace({
|
|
2260
|
-
agentId,
|
|
2261
|
-
sessionKey: sk,
|
|
2262
|
-
turnId: _asmTurnId,
|
|
2263
|
-
path: 'replay',
|
|
2264
|
-
toolLoop: isToolLoop,
|
|
2265
|
-
msgCount: messages.length,
|
|
2266
|
-
composeTopicTelemetryStatus: 'intentionally-omitted',
|
|
2267
|
-
});
|
|
2268
|
-
}
|
|
2269
|
-
}
|
|
2270
|
-
}
|
|
2271
|
-
}
|
|
2272
|
-
catch {
|
|
2273
|
-
// Non-fatal — fall through to full assembly
|
|
2274
|
-
}
|
|
2275
|
-
}
|
|
2276
|
-
// Subagent light mode: skip library/wiki/semantic/keystones/doc chunks.
|
|
2277
|
-
// Keeps: system, identity, history, active facts, output profile, tool gradient.
|
|
2278
|
-
const subagentLight = isSubagent && _subagentWarming === 'light';
|
|
2279
|
-
let forkedContext;
|
|
2280
|
-
if (isSubagent) {
|
|
2281
|
-
try {
|
|
2282
|
-
const rawForkedContext = await hm.cache.getSlot(agentId, sk, FORKED_CONTEXT_META_SLOT);
|
|
2283
|
-
if (rawForkedContext) {
|
|
2284
|
-
const parsed = JSON.parse(rawForkedContext);
|
|
2285
|
-
if (parsed?.enabled === true)
|
|
2286
|
-
forkedContext = parsed;
|
|
2287
|
-
}
|
|
2288
|
-
}
|
|
2289
|
-
catch {
|
|
2290
|
-
// Fork metadata is advisory; fall back to normal subagent lifecycle.
|
|
2291
|
-
}
|
|
2292
|
-
}
|
|
2293
|
-
const request = {
|
|
2294
|
-
agentId,
|
|
2295
|
-
sessionKey: sk,
|
|
2296
|
-
tokenBudget: effectiveBudget,
|
|
2297
|
-
historyDepth: lastState?.historyDepth && lastState.historyDepth < replayHistoryDepth
|
|
2298
|
-
? lastState.historyDepth
|
|
2299
|
-
: replayHistoryDepth,
|
|
2300
|
-
tier,
|
|
2301
|
-
model, // pass model for provider detection
|
|
2302
|
-
includeDocChunks: subagentLight ? false : !cachedContextBlock, // skip doc retrieval on cache hit or subagent light
|
|
2303
|
-
includeLibrary: subagentLight ? false : undefined, // skip wiki/knowledge/preferences
|
|
2304
|
-
includeSemanticRecall: subagentLight ? false : undefined, // skip vector/FTS recall
|
|
2305
|
-
includeKeystones: subagentLight ? false : undefined, // skip keystone history injection
|
|
2306
|
-
prompt,
|
|
2307
|
-
forkedContext,
|
|
2308
|
-
skipProviderTranslation: true, // runtime handles provider translation
|
|
2309
|
-
};
|
|
2310
|
-
const result = await hm.compose(request);
|
|
2311
|
-
degradationTelemetry({
|
|
2312
|
-
agentId,
|
|
2313
|
-
sessionKey: sk,
|
|
2314
|
-
turnId: _asmTurnId,
|
|
2315
|
-
path: 'compose',
|
|
2316
|
-
toolChainCoEjections: result.diagnostics?.toolChainCoEjections ?? 0,
|
|
2317
|
-
toolChainStubReplacements: result.diagnostics?.toolChainStubReplacements ?? 0,
|
|
2318
|
-
artifactDegradations: result.diagnostics?.artifactDegradations ?? 0,
|
|
2319
|
-
artifactOversizeThresholdTokens: result.diagnostics?.artifactOversizeThresholdTokens,
|
|
2320
|
-
replayState: replayRecovery.emittedMarker?.state,
|
|
2321
|
-
replayReason: replayRecovery.emittedMarker?.reason,
|
|
2322
|
-
});
|
|
2323
|
-
// Sprint 1: emit assemble-level trace with full observability fields
|
|
2324
|
-
// after a full compose (not replay). Surfaces prefix stability,
|
|
2325
|
-
// reranker outcome, slot spans, and compaction eligibility.
|
|
2326
|
-
if (telemetryEnabled() && !cachedContextBlock) {
|
|
2327
|
-
const diag = result.diagnostics;
|
|
2328
|
-
// prefixChanged: compare current prefixHash against prevPrefixHash
|
|
2329
|
-
// (surfaced by the compositor when a cache bypass detected prefix mutation).
|
|
2330
|
-
// When no previous hash is available (first turn), leave prefixChanged undefined.
|
|
2331
|
-
let prefixChanged;
|
|
2332
|
-
if (diag?.prefixHash && diag?.prevPrefixHash) {
|
|
2333
|
-
prefixChanged = diag.prefixHash !== diag.prevPrefixHash;
|
|
2334
|
-
}
|
|
2335
|
-
assembleTrace({
|
|
2336
|
-
agentId,
|
|
2337
|
-
sessionKey: sk,
|
|
2338
|
-
turnId: _asmTurnId,
|
|
2339
|
-
path: isSubagent ? 'subagent' : 'cold',
|
|
2340
|
-
toolLoop: isToolLoop,
|
|
2341
|
-
msgCount: result.messages.length,
|
|
2342
|
-
prefixChanged,
|
|
2343
|
-
prefixHash: diag?.prefixHash,
|
|
2344
|
-
rerankerStatus: diag?.rerankerStatus,
|
|
2345
|
-
rerankerCandidates: diag?.rerankerCandidates,
|
|
2346
|
-
rerankerProvider: diag?.rerankerProvider,
|
|
2347
|
-
slotSpans: diag?.slotSpans,
|
|
2348
|
-
compactionEligibleCount: diag?.compactionEligibleCount,
|
|
2349
|
-
compactionEligibleRatio: diag?.compactionEligibleRatio,
|
|
2350
|
-
compactionProcessedCount: diag?.compactionProcessedCount,
|
|
2351
|
-
composeTopicSource: diag?.composeTopicSource,
|
|
2352
|
-
composeTopicState: diag?.composeTopicState,
|
|
2353
|
-
composeTopicMessageCount: diag?.composeTopicMessageCount,
|
|
2354
|
-
composeTopicStampedMessageCount: diag?.composeTopicStampedMessageCount,
|
|
2355
|
-
composeTopicTelemetryStatus: diag?.composeTopicTelemetryStatus,
|
|
2356
|
-
});
|
|
2357
|
-
if (diag?.adaptiveLifecycleBand) {
|
|
2358
|
-
lifecyclePolicyTelemetry({
|
|
2359
|
-
path: 'compose.preRecall',
|
|
2360
|
-
agentId,
|
|
2361
|
-
sessionKey: sk,
|
|
2362
|
-
band: diag.adaptiveLifecycleBand,
|
|
2363
|
-
pressurePct: diag.adaptiveLifecyclePressurePct,
|
|
2364
|
-
trimSoftTarget: diag.adaptiveTrimSoftTarget,
|
|
2365
|
-
reasons: diag.adaptiveLifecycleReasons,
|
|
2366
|
-
});
|
|
2367
|
-
}
|
|
2368
|
-
if (diag?.adaptiveEvictionLifecycleBand) {
|
|
2369
|
-
lifecyclePolicyTelemetry({
|
|
2370
|
-
path: 'compose.eviction',
|
|
2371
|
-
agentId,
|
|
2372
|
-
sessionKey: sk,
|
|
2373
|
-
band: diag.adaptiveEvictionLifecycleBand,
|
|
2374
|
-
pressurePct: diag.adaptiveEvictionPressurePct,
|
|
2375
|
-
trimSoftTarget: diag.adaptiveTrimSoftTarget,
|
|
2376
|
-
reasons: diag.adaptiveLifecycleBandDiverged ? ['diverged-from-preRecall'] : undefined,
|
|
2377
|
-
});
|
|
2378
|
-
}
|
|
2379
|
-
}
|
|
2380
|
-
// Use cached contextBlock if available (cache replay), otherwise use fresh result.
|
|
2381
|
-
// After a full compose, write the new contextBlock to cache for the next turn.
|
|
2382
|
-
if (cachedContextBlock) {
|
|
2383
|
-
result.contextBlock = cachedContextBlock;
|
|
2384
|
-
}
|
|
2385
|
-
else if (result.contextBlock && cacheReplayThresholdMs > 0 && !replayRecovery.shouldSkipCacheReplay && !replayRecovery.emittedText) {
|
|
2386
|
-
// Write cache async — never block the assemble() return on this
|
|
2387
|
-
const blockToCache = result.contextBlock;
|
|
2388
|
-
const nowStr = Date.now().toString();
|
|
2389
|
-
const ttlSec = Math.ceil((cacheReplayThresholdMs * 2) / 1000);
|
|
2390
|
-
Promise.all([
|
|
2391
|
-
hm.cache.setSlot(agentId, sk, 'assemblyContextBlock', blockToCache),
|
|
2392
|
-
hm.cache.setSlot(agentId, sk, 'assemblyContextAt', nowStr),
|
|
2393
|
-
]).then(() => {
|
|
2394
|
-
// Extend TTL on the cached keys to 2× the threshold
|
|
2395
|
-
// setSlot uses the sessionTTL from RedisLayer config — acceptable fallback
|
|
2396
|
-
}).catch(() => { });
|
|
2397
|
-
}
|
|
2398
|
-
if (replayRecovery.emittedText) {
|
|
2399
|
-
result.contextBlock = result.contextBlock
|
|
2400
|
-
? `${result.contextBlock}
|
|
2401
|
-
${replayRecovery.emittedText}`
|
|
2402
|
-
: replayRecovery.emittedText;
|
|
2403
|
-
}
|
|
2404
|
-
// Convert NeutralMessage[] → AgentMessage[] for the OpenClaw runtime.
|
|
2405
|
-
// neutralToAgentMessage can return a single message or an array (tool results
|
|
2406
|
-
// expand to individual ToolResultMessage objects), so we flatMap.
|
|
2407
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
2408
|
-
let outputMessages = result.messages
|
|
2409
|
-
.filter(m => m.role != null)
|
|
2410
|
-
.flatMap(m => neutralToAgentMessage(m));
|
|
2411
|
-
const neutralPairStats = collectNeutralToolPairStats(result.messages);
|
|
2412
|
-
const agentPairStats = collectAgentToolPairStats(outputMessages);
|
|
2413
|
-
const toolPairAnomaly = neutralPairStats.missingToolResultCount > 0 ||
|
|
2414
|
-
neutralPairStats.orphanToolResultCount > 0 ||
|
|
2415
|
-
agentPairStats.missingToolResultCount > 0 ||
|
|
2416
|
-
agentPairStats.orphanToolResultCount > 0 ||
|
|
2417
|
-
agentPairStats.syntheticNoResultCount > 0
|
|
2418
|
-
? {
|
|
2419
|
-
stage: 'assemble',
|
|
2420
|
-
neutralMissingToolResultIds: neutralPairStats.missingToolResultIds.slice(0, 10),
|
|
2421
|
-
neutralOrphanToolResultIds: neutralPairStats.orphanToolResultIds.slice(0, 10),
|
|
2422
|
-
agentMissingToolResultIds: agentPairStats.missingToolResultIds.slice(0, 10),
|
|
2423
|
-
agentOrphanToolResultIds: agentPairStats.orphanToolResultIds.slice(0, 10),
|
|
2424
|
-
syntheticNoResultCount: agentPairStats.syntheticNoResultCount,
|
|
2425
|
-
}
|
|
2426
|
-
: undefined;
|
|
2427
|
-
await bumpToolPairMetrics(hm, agentId, sk, {
|
|
2428
|
-
composeCount: 1,
|
|
2429
|
-
preBridgeMissingToolResults: neutralPairStats.missingToolResultCount,
|
|
2430
|
-
preBridgeOrphanToolResults: neutralPairStats.orphanToolResultCount,
|
|
2431
|
-
postBridgeMissingToolResults: agentPairStats.missingToolResultCount,
|
|
2432
|
-
postBridgeOrphanToolResults: agentPairStats.orphanToolResultCount,
|
|
2433
|
-
}, toolPairAnomaly);
|
|
2434
|
-
if (toolPairAnomaly) {
|
|
2435
|
-
console.warn(`[hypermem-plugin] tool-pair-integrity: ${agentId}/${sk} ` +
|
|
2436
|
-
`neutralMissing=${neutralPairStats.missingToolResultCount} neutralOrphan=${neutralPairStats.orphanToolResultCount} ` +
|
|
2437
|
-
`agentMissing=${agentPairStats.missingToolResultCount} agentOrphan=${agentPairStats.orphanToolResultCount} ` +
|
|
2438
|
-
`synthetic=${agentPairStats.syntheticNoResultCount}`);
|
|
2439
|
-
}
|
|
2440
|
-
// Repair orphaned tool pairs before returning to provider.
|
|
2441
|
-
// compaction/trim passes can remove tool_use blocks without removing their
|
|
2442
|
-
// paired tool_result messages — Anthropic and Gemini reject these with 400.
|
|
2443
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
2444
|
-
outputMessages = repairToolPairs(outputMessages);
|
|
2445
|
-
// Cache overhead for tool-loop turns: contextBlock tokens (chars/4) +
|
|
2446
|
-
// tier-aware estimate for runtime system prompt (SOUL.md, identity,
|
|
2447
|
-
// workspace files — not visible from inside the plugin).
|
|
2448
|
-
const contextBlockTokens = Math.ceil((result.contextBlock?.length ?? 0) / 4);
|
|
2449
|
-
const runtimeSystemTokens = getOverheadFallback(tier);
|
|
2450
|
-
_overheadCache.set(sk, contextBlockTokens + runtimeSystemTokens);
|
|
2451
|
-
await persistReplayRecoveryState(hm, agentId, sk, replayRecovery.nextState);
|
|
2452
|
-
if (forkedContext) {
|
|
2453
|
-
await hm.cache.setSlot(agentId, sk, FORKED_CONTEXT_META_SLOT, '').catch(() => { });
|
|
2454
|
-
}
|
|
2455
|
-
// Update model state for downshift detection on next turn
|
|
2456
|
-
try {
|
|
2457
|
-
const modelIdentity = resolveModelIdentity(model);
|
|
2458
|
-
await hm.cache.setModelState(agentId, sk, {
|
|
2459
|
-
model: model ?? 'unknown',
|
|
2460
|
-
modelKey: modelIdentity.modelKey ?? undefined,
|
|
2461
|
-
provider: modelIdentity.provider ?? undefined,
|
|
2462
|
-
modelId: modelIdentity.modelId ?? undefined,
|
|
2463
|
-
tokenBudget: effectiveBudget,
|
|
2464
|
-
composedAt: new Date().toISOString(),
|
|
2465
|
-
historyDepth,
|
|
2466
|
-
});
|
|
2467
|
-
}
|
|
2468
|
-
catch {
|
|
2469
|
-
// Non-fatal
|
|
2470
|
-
}
|
|
2471
|
-
return {
|
|
2472
|
-
messages: outputMessages,
|
|
2473
|
-
estimatedTokens: result.tokenCount ?? 0,
|
|
2474
|
-
// systemPromptAddition injects hypermem context before the runtime system prompt.
|
|
2475
|
-
// This is the facts/recall/episodes block assembled by the compositor.
|
|
2476
|
-
systemPromptAddition: result.contextBlock || undefined,
|
|
2477
|
-
};
|
|
2478
|
-
}
|
|
2479
|
-
catch (err) {
|
|
2480
|
-
console.error('[hypermem-plugin] assemble error (stack):', err.stack ?? err);
|
|
2481
|
-
throw err; // Re-throw so the runtime falls back to legacy pipeline
|
|
2482
|
-
}
|
|
2483
|
-
}
|
|
2484
|
-
finally {
|
|
2485
|
-
// End the trim-owner turn scope opened at assemble entry. Paired
|
|
2486
|
-
// with beginTrimOwnerTurn(_asmSk, _asmTurnId) above; runs on every
|
|
2487
|
-
// exit path (normal return, tool-loop return, replay return, error
|
|
2488
|
-
// re-throw). Turn-scoped keying (Sprint 2.2a) means this only
|
|
2489
|
-
// removes THIS turn's slot, so concurrent same-session turns remain
|
|
2490
|
-
// isolated instead of clobbering each other.
|
|
2491
|
-
endTrimOwnerTurn(_asmSk, _asmTurnId);
|
|
2492
|
-
}
|
|
2493
|
-
},
|
|
2494
|
-
/**
|
|
2495
|
-
* Compact context. hypermem owns compaction.
|
|
2496
|
-
*
|
|
2497
|
-
* Strategy: assemble() already trims the composed message list to the token
|
|
2498
|
-
* budget via the compositor safety valve, so the model never receives an
|
|
2499
|
-
* oversized context. compact() is called by the runtime when it detects
|
|
2500
|
-
* overflow — at that point we:
|
|
2501
|
-
* 1. Estimate tokens in the current Redis history window
|
|
2502
|
-
* 2. If already under budget (compositor already handled it), report clean
|
|
2503
|
-
* 3. If over budget (e.g. window was built before budget cap was applied),
|
|
2504
|
-
* trim the Redis window to a safe depth and invalidate the compose cache
|
|
2505
|
-
*
|
|
2506
|
-
* This prevents the runtime from running its own LLM-summarization compaction
|
|
2507
|
-
* pass, which would destroy message history we're explicitly managing.
|
|
2508
|
-
*/
|
|
2509
|
-
async compact({ sessionId, sessionKey, sessionFile, tokenBudget, currentTokenCount }) {
|
|
2510
|
-
try {
|
|
2511
|
-
const hm = await getHyperMem();
|
|
2512
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
2513
|
-
const agentId = extractAgentId(sk);
|
|
2514
|
-
// Skip if a reshape pass just ran (within last 30s) — avoid double-processing
|
|
2515
|
-
// Cache modelState here for reuse in density-aware JSONL truncation below.
|
|
2516
|
-
let cachedModelState = null;
|
|
2517
|
-
let model;
|
|
2518
|
-
try {
|
|
2519
|
-
cachedModelState = await hm.cache.getModelState(agentId, sk);
|
|
2520
|
-
model = cachedModelState?.model;
|
|
2521
|
-
if (cachedModelState?.reshapedAt) {
|
|
2522
|
-
const reshapeAge = Date.now() - new Date(cachedModelState.reshapedAt).getTime();
|
|
2523
|
-
// Only skip if session is NOT critically full — nuclear path must bypass this guard.
|
|
2524
|
-
// If currentTokenCount > 85% budget, fall through to nuclear compaction below.
|
|
2525
|
-
const isCriticallyFull = currentTokenCount != null &&
|
|
2526
|
-
currentTokenCount > (computeEffectiveBudget(tokenBudget, model) * 0.85);
|
|
2527
|
-
if (reshapeAge < 30_000 && !isCriticallyFull) {
|
|
2528
|
-
console.log(`[hypermem-plugin] compact: skipping — reshape pass ran ${reshapeAge}ms ago`);
|
|
2529
|
-
return { ok: true, compacted: false, reason: 'reshape-recently-ran' };
|
|
2530
|
-
}
|
|
2531
|
-
}
|
|
2532
|
-
}
|
|
2533
|
-
catch {
|
|
2534
|
-
// Non-fatal — proceed with compaction
|
|
2535
|
-
}
|
|
2536
|
-
// Re-estimate from the actual Redis window.
|
|
2537
|
-
// The runtime's estimate (currentTokenCount) includes the full inbound message
|
|
2538
|
-
// and system prompt — our estimate only covers the history window. When they
|
|
2539
|
-
// diverge significantly upward, the difference is "inbound overhead" consuming
|
|
2540
|
-
// budget the history is competing for. We trim history to make room.
|
|
2541
|
-
const effectiveBudget = computeEffectiveBudget(tokenBudget, model);
|
|
2542
|
-
const tokensBefore = await estimateWindowTokens(hm, agentId, sk);
|
|
2543
|
-
// Sprint 3: Unified pressure signal — compact path (Redis estimate)
|
|
2544
|
-
const s3CompactPressure = computeUnifiedPressure(tokensBefore, effectiveBudget, PRESSURE_SOURCE.COMPACT_REDIS_ESTIMATE);
|
|
2545
|
-
console.log(`[hypermem-plugin] compact: pressure=${s3CompactPressure.pct}% source=${s3CompactPressure.source} tokens=${tokensBefore}/${effectiveBudget}`);
|
|
2546
|
-
// Target depth for both Redis trimming and JSONL truncation.
|
|
2547
|
-
// Target 50% of budget capacity, assume ~500 tokens/message average.
|
|
2548
|
-
const targetDepth = Math.max(20, Math.floor((effectiveBudget * 0.5) / 500));
|
|
2549
|
-
// ── NUCLEAR COMPACTION ────────────────────────────────────────────────
|
|
2550
|
-
// When the runtime reports the session is ≥85% full, trust that signal
|
|
2551
|
-
// over our Redis estimate. The JSONL accumulates full tool results that
|
|
2552
|
-
// the gradient never sees, so Redis can look fine while the transcript
|
|
2553
|
-
// is genuinely saturated. Normal compact() returns compacted=false in
|
|
2554
|
-
// this scenario ("within_budget"), which gives the runtime zero relief.
|
|
2555
|
-
//
|
|
2556
|
-
// Also triggered when reshape ran recently but the session is still
|
|
2557
|
-
// critically full — bypass the reshape guard in that case.
|
|
2558
|
-
const NUCLEAR_THRESHOLD = 0.85;
|
|
2559
|
-
// Sprint 3: runtime-total pressure for nuclear check uses its own source label
|
|
2560
|
-
const s3NuclearPressure = currentTokenCount != null
|
|
2561
|
-
? computeUnifiedPressure(currentTokenCount, effectiveBudget, PRESSURE_SOURCE.COMPACT_RUNTIME_TOTAL)
|
|
2562
|
-
: s3CompactPressure;
|
|
2563
|
-
const isNuclear = currentTokenCount != null && currentTokenCount > effectiveBudget * NUCLEAR_THRESHOLD;
|
|
2564
|
-
if (isNuclear) {
|
|
2565
|
-
// Cut deep: target 20% of normal depth = ~25 messages for a 128k session.
|
|
2566
|
-
// Keeps very recent context, clears the long tool-heavy tail.
|
|
2567
|
-
const nuclearDepth = Math.max(10, Math.floor(targetDepth * 0.20));
|
|
2568
|
-
const nuclearBudget = Math.floor(effectiveBudget * 0.25);
|
|
2569
|
-
const nuclearRemoved = await hm.cache.trimHistoryToTokenBudget(agentId, sk, nuclearBudget);
|
|
2570
|
-
await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
|
|
2571
|
-
await truncateJsonlIfNeeded(sessionFile, nuclearDepth, true);
|
|
2572
|
-
const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
|
|
2573
|
-
if (telemetryEnabled()) {
|
|
2574
|
-
trimTelemetry({
|
|
2575
|
-
path: 'compact.nuclear',
|
|
2576
|
-
agentId, sessionKey: sk,
|
|
2577
|
-
preTokens: tokensBefore,
|
|
2578
|
-
postTokens: tokensAfter,
|
|
2579
|
-
removed: nuclearRemoved,
|
|
2580
|
-
cacheInvalidated: true,
|
|
2581
|
-
reason: `${s3NuclearPressure.source}:${s3NuclearPressure.pct}% currentTokenCount=${currentTokenCount}/${effectiveBudget}`,
|
|
2582
|
-
});
|
|
2583
|
-
}
|
|
2584
|
-
console.log(`[hypermem-plugin] compact: NUCLEAR — pressure=${s3NuclearPressure.pct}% source=${s3NuclearPressure.source} ` +
|
|
2585
|
-
`session at ${currentTokenCount}/${effectiveBudget} tokens, ` +
|
|
2586
|
-
`deep-trimmed JSONL to ${nuclearDepth} messages, Redis ${tokensBefore}→${tokensAfter} tokens`);
|
|
2587
|
-
return { ok: true, compacted: true, result: { tokensBefore, tokensAfter } };
|
|
2588
|
-
}
|
|
2589
|
-
// ── END NUCLEAR ───────────────────────────────────────────────────────
|
|
2590
|
-
// Detect large-inbound-content scenario: runtime total significantly exceeds
|
|
2591
|
-
// our history estimate. The gap is the inbound message + system prompt overhead.
|
|
2592
|
-
// Trim history to leave room for it even if history alone is within budget.
|
|
2593
|
-
if (currentTokenCount != null && currentTokenCount > tokensBefore) {
|
|
2594
|
-
const inboundOverhead = currentTokenCount - tokensBefore;
|
|
2595
|
-
if (inboundOverhead > effectiveBudget * 0.15) {
|
|
2596
|
-
// Large inbound content (document review, big tool result, etc.)
|
|
2597
|
-
// Trim history so history + inbound fits within 85% of budget.
|
|
2598
|
-
const budgetForHistory = Math.floor(effectiveBudget * 0.85) - inboundOverhead;
|
|
2599
|
-
if (budgetForHistory < tokensBefore && budgetForHistory > 0) {
|
|
2600
|
-
const historyTrimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, budgetForHistory);
|
|
2601
|
-
await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
|
|
2602
|
-
const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
|
|
2603
|
-
await truncateJsonlIfNeeded(sessionFile, targetDepth);
|
|
2604
|
-
if (telemetryEnabled()) {
|
|
2605
|
-
trimTelemetry({
|
|
2606
|
-
path: 'compact.history',
|
|
2607
|
-
agentId, sessionKey: sk,
|
|
2608
|
-
preTokens: tokensBefore,
|
|
2609
|
-
postTokens: tokensAfter,
|
|
2610
|
-
removed: historyTrimmed,
|
|
2611
|
-
cacheInvalidated: true,
|
|
2612
|
-
reason: `inbound-overhead=${inboundOverhead}`,
|
|
2613
|
-
});
|
|
2614
|
-
}
|
|
2615
|
-
console.log(`[hypermem-plugin] compact: large-inbound-content (gap=${inboundOverhead} tokens), ` +
|
|
2616
|
-
`trimmed history ${tokensBefore}→${tokensAfter} (budget-for-history=${budgetForHistory}, trimmed=${historyTrimmed} messages)`);
|
|
2617
|
-
return { ok: true, compacted: true, result: { tokensBefore, tokensAfter } };
|
|
2618
|
-
}
|
|
2619
|
-
}
|
|
2620
|
-
}
|
|
2621
|
-
// Under 70% of budget by our own Redis estimate.
|
|
2622
|
-
// We still need to check the JSONL — the runtime's overflow is based on JSONL
|
|
2623
|
-
// message count, not Redis. If the JSONL is bloated (> targetDepth * 1.5 messages)
|
|
2624
|
-
// we truncate it even if Redis looks fine, then return compacted=true so the
|
|
2625
|
-
// runtime retries with the trimmed file instead of killing the session.
|
|
2626
|
-
if (tokensBefore <= effectiveBudget * 0.7) {
|
|
2627
|
-
const jsonlTruncated = await truncateJsonlIfNeeded(sessionFile, targetDepth);
|
|
2628
|
-
if (jsonlTruncated) {
|
|
2629
|
-
console.log(`[hypermem-plugin] compact: Redis within_budget but JSONL was bloated — truncated to ${targetDepth} messages`);
|
|
2630
|
-
return {
|
|
2631
|
-
ok: true,
|
|
2632
|
-
compacted: true,
|
|
2633
|
-
result: { tokensBefore, tokensAfter: tokensBefore },
|
|
2634
|
-
};
|
|
2635
|
-
}
|
|
2636
|
-
return {
|
|
2637
|
-
ok: true,
|
|
2638
|
-
compacted: false,
|
|
2639
|
-
reason: 'within_budget',
|
|
2640
|
-
result: { tokensBefore, tokensAfter: tokensBefore },
|
|
2641
|
-
};
|
|
2642
|
-
}
|
|
2643
|
-
// Over budget: trim both the window cache AND the history list.
|
|
2644
|
-
// Bug fix: if no window cache exists (fresh/never-compacted session),
|
|
2645
|
-
// compact() was only trying to trim the window (which was null) and
|
|
2646
|
-
// the history list was left untouched → 0 actual trimming → timeout
|
|
2647
|
-
// compaction death spiral.
|
|
2648
|
-
const window = await hm.cache.getWindow(agentId, sk);
|
|
2649
|
-
if (window && window.length > targetDepth) {
|
|
2650
|
-
const trimmed = window.slice(-targetDepth);
|
|
2651
|
-
await hm.cache.setWindow(agentId, sk, trimmed);
|
|
2652
|
-
}
|
|
2653
|
-
// Always trim the underlying history list — this is the source of truth
|
|
2654
|
-
// when no window cache exists. trimHistoryToTokenBudget walks newest→oldest
|
|
2655
|
-
// and LTRIMs everything beyond the budget.
|
|
2656
|
-
const trimBudget = Math.floor(effectiveBudget * 0.5);
|
|
2657
|
-
const historyTrimmed = await hm.cache.trimHistoryToTokenBudget(agentId, sk, trimBudget);
|
|
2658
|
-
if (historyTrimmed > 0) {
|
|
2659
|
-
console.log(`[hypermem-plugin] compact: trimmed ${historyTrimmed} messages from history list`);
|
|
2660
|
-
}
|
|
2661
|
-
// Invalidate the compose cache so next assemble() re-builds from trimmed data
|
|
2662
|
-
await hm.cache.invalidateWindow(agentId, sk).catch(() => { });
|
|
2663
|
-
const tokensAfter = await estimateWindowTokens(hm, agentId, sk);
|
|
2664
|
-
if (telemetryEnabled()) {
|
|
2665
|
-
trimTelemetry({
|
|
2666
|
-
path: 'compact.history2',
|
|
2667
|
-
agentId, sessionKey: sk,
|
|
2668
|
-
preTokens: tokensBefore,
|
|
2669
|
-
postTokens: tokensAfter,
|
|
2670
|
-
removed: historyTrimmed,
|
|
2671
|
-
cacheInvalidated: true,
|
|
2672
|
-
reason: `${s3CompactPressure.source}:${s3CompactPressure.pct}% over-budget tokensBefore=${tokensBefore}/${effectiveBudget}`,
|
|
2673
|
-
});
|
|
2674
|
-
}
|
|
2675
|
-
console.log(`[hypermem-plugin] compact: trimmed ${tokensBefore} → ${tokensAfter} tokens (budget: ${effectiveBudget}, pressure=${s3CompactPressure.pct}% source=${s3CompactPressure.source})`);
|
|
2676
|
-
// Density-aware JSONL truncation: derive target depth from actual avg tokens/message
|
|
2677
|
-
// rather than assuming a fixed 500 tokens/message. This prevents a large-message
|
|
2678
|
-
// session (e.g. 145 msgs × 882 tok = 128k) from bypassing the 1.5x guard and
|
|
2679
|
-
// leaving the JSONL untouched while Redis is correctly trimmed.
|
|
2680
|
-
// force=true bypasses the 1.5x early-exit — over-budget always rewrites.
|
|
2681
|
-
const histDepth = cachedModelState?.historyDepth ?? targetDepth;
|
|
2682
|
-
const avgTokPerMsg = histDepth > 0 && tokensBefore > 0 ? tokensBefore / histDepth : 500;
|
|
2683
|
-
const densityTargetDepth = Math.max(10, Math.floor(trimBudget / avgTokPerMsg));
|
|
2684
|
-
await truncateJsonlIfNeeded(sessionFile, densityTargetDepth, true);
|
|
2685
|
-
console.log(`[hypermem-plugin] compact: JSONL density-trim targetDepth=${densityTargetDepth} (histDepth=${histDepth}, avg=${Math.round(avgTokPerMsg)} tok/msg)`);
|
|
2686
|
-
return {
|
|
2687
|
-
ok: true,
|
|
2688
|
-
compacted: true,
|
|
2689
|
-
result: { tokensBefore, tokensAfter },
|
|
2690
|
-
};
|
|
2691
|
-
}
|
|
2692
|
-
catch (err) {
|
|
2693
|
-
console.warn('[hypermem-plugin] compact failed:', err.message);
|
|
2694
|
-
// Non-fatal: return ok so the runtime doesn't retry with its own compaction
|
|
2695
|
-
return { ok: true, compacted: false, reason: err.message };
|
|
2696
|
-
}
|
|
2697
|
-
},
|
|
2698
|
-
/**
|
|
2699
|
-
* After-turn hook: ingest new messages then trigger background indexer.
|
|
2700
|
-
*
|
|
2701
|
-
* IMPORTANT: When afterTurn is defined, the runtime calls ONLY afterTurn —
|
|
2702
|
-
* it never calls ingest() or ingestBatch(). So we must ingest the new
|
|
2703
|
-
* messages here, using messages.slice(prePromptMessageCount).
|
|
2704
|
-
*/
|
|
2705
|
-
async afterTurn({ sessionId, sessionKey, messages, prePromptMessageCount, isHeartbeat, runtimeContext }) {
|
|
2706
|
-
if (isHeartbeat)
|
|
2707
|
-
return;
|
|
2708
|
-
try {
|
|
2709
|
-
const hm = await getHyperMem();
|
|
2710
|
-
const sk = resolveSessionKey(sessionId, sessionKey);
|
|
2711
|
-
const agentId = extractAgentId(sk);
|
|
2712
|
-
// Ingest only the new messages produced this turn
|
|
2713
|
-
const newMessages = messages.slice(prePromptMessageCount);
|
|
2714
|
-
for (const msg of newMessages) {
|
|
2715
|
-
const m = msg;
|
|
2716
|
-
// Skip system messages — they come from the runtime, not the conversation
|
|
2717
|
-
if (m.role === 'system')
|
|
2718
|
-
continue;
|
|
2719
|
-
if (m.role === 'toolResult' && extractTextFromInboundContent(m.content).trim() === SYNTHETIC_MISSING_TOOL_RESULT_TEXT) {
|
|
2720
|
-
const toolCallId = typeof m.toolCallId === 'string' ? m.toolCallId : 'unknown';
|
|
2721
|
-
const toolName = typeof m.toolName === 'string' ? m.toolName : 'unknown';
|
|
2722
|
-
await bumpToolPairMetrics(hm, agentId, sk, { syntheticNoResultIngested: 1 }, {
|
|
2723
|
-
stage: 'afterTurn',
|
|
2724
|
-
toolCallId,
|
|
2725
|
-
toolName,
|
|
2726
|
-
});
|
|
2727
|
-
console.warn(`[hypermem-plugin] tool-pair-integrity: observed synthetic missing tool result for ${agentId}/${sk} ` +
|
|
2728
|
-
`tool=${toolName} callId=${toolCallId}`);
|
|
2729
|
-
}
|
|
2730
|
-
const neutral = toNeutralMessage(m);
|
|
2731
|
-
if (neutral.role === 'user' && !neutral.toolResults?.length) {
|
|
2732
|
-
// Record plain user messages here and strip transport envelope metadata
|
|
2733
|
-
// before storage so prompt wrappers like:
|
|
2734
|
-
// Sender (untrusted metadata): { ... }
|
|
2735
|
-
// never enter messages.db / Redis history / downstream facts.
|
|
2736
|
-
//
|
|
2737
|
-
// recordUserMessage() also strips defensively at core level, but we do
|
|
2738
|
-
// it here too so the intended behavior is explicit at the plugin boundary.
|
|
2739
|
-
//
|
|
2740
|
-
// IMPORTANT: tool results arrive as role='user' carriers (toNeutralMessage
|
|
2741
|
-
// sets role='user' + toolResults=[...] + textContent=null). These MUST go
|
|
2742
|
-
// through recordAssistantMessage to persist the toolResults array.
|
|
2743
|
-
// recordUserMessage takes a plain string and would silently discard them.
|
|
2744
|
-
await hm.recordUserMessage(agentId, sk, stripMessageMetadata(neutral.textContent ?? ''));
|
|
2745
|
-
}
|
|
2746
|
-
else {
|
|
2747
|
-
await hm.recordAssistantMessage(agentId, sk, neutral, {
|
|
2748
|
-
tokenCount: neutral.role === 'assistant' ? resolveAssistantTokenCount(m, runtimeContext) : undefined,
|
|
2749
|
-
});
|
|
2750
|
-
}
|
|
2751
|
-
}
|
|
2752
|
-
try {
|
|
2753
|
-
const lastAssistantMessage = [...newMessages].reverse().find(m => m.role === 'assistant');
|
|
2754
|
-
if (lastAssistantMessage) {
|
|
2755
|
-
const modelState = await hm.cache.getModelState(agentId, sk).catch(() => null);
|
|
2756
|
-
const promptCacheUsage = runtimeContext?.promptCache?.lastCallUsage;
|
|
2757
|
-
const outputTokens = resolveAssistantOutputTokenCount(lastAssistantMessage, runtimeContext) ?? 1;
|
|
2758
|
-
const inputTokens = typeof promptCacheUsage?.input === 'number'
|
|
2759
|
-
? Math.floor(promptCacheUsage.input)
|
|
2760
|
-
: typeof runtimeContext?.currentTokenCount === 'number'
|
|
2761
|
-
? Math.floor(runtimeContext.currentTokenCount)
|
|
2762
|
-
: null;
|
|
2763
|
-
const cacheReadTokens = typeof promptCacheUsage?.cacheRead === 'number'
|
|
2764
|
-
? Math.floor(promptCacheUsage.cacheRead)
|
|
2765
|
-
: null;
|
|
2766
|
-
const modelId = typeof lastAssistantMessage.model === 'string'
|
|
2767
|
-
? lastAssistantMessage.model
|
|
2768
|
-
: modelState?.modelId ?? modelState?.model ?? 'unknown';
|
|
2769
|
-
const provider = typeof lastAssistantMessage.provider === 'string'
|
|
2770
|
-
? lastAssistantMessage.provider
|
|
2771
|
-
: modelState?.provider ?? 'unknown';
|
|
2772
|
-
const taskType = typeof runtimeContext?.taskType === 'string'
|
|
2773
|
-
? runtimeContext.taskType ?? null
|
|
2774
|
-
: null;
|
|
2775
|
-
recordOutputMetrics(hm.dbManager.getLibraryDb(), {
|
|
2776
|
-
id: `turn-metric-${agentId}-${Date.now()}-${randomUUID()}`,
|
|
2777
|
-
timestamp: new Date().toISOString(),
|
|
2778
|
-
agent_id: agentId,
|
|
2779
|
-
session_key: sk,
|
|
2780
|
-
model_id: modelId,
|
|
2781
|
-
provider,
|
|
2782
|
-
fos_version: null,
|
|
2783
|
-
mod_version: null,
|
|
2784
|
-
mod_id: null,
|
|
2785
|
-
task_type: taskType,
|
|
2786
|
-
output_tokens: outputTokens,
|
|
2787
|
-
input_tokens: inputTokens,
|
|
2788
|
-
cache_read_tokens: cacheReadTokens,
|
|
2789
|
-
corrections_fired: [],
|
|
2790
|
-
latency_ms: null,
|
|
2791
|
-
});
|
|
2792
|
-
}
|
|
2793
|
-
}
|
|
2794
|
-
catch {
|
|
2795
|
-
// Non-fatal telemetry path
|
|
2796
|
-
}
|
|
2797
|
-
// P3.1: Topic detection on the inbound user message
|
|
2798
|
-
// Non-fatal: topic detection never blocks afterTurn
|
|
2799
|
-
let adaptiveTopicShiftConfidence;
|
|
2800
|
-
try {
|
|
2801
|
-
const inboundUserMsg = newMessages
|
|
2802
|
-
.map(m => m)
|
|
2803
|
-
.find(m => m.role === 'user');
|
|
2804
|
-
if (inboundUserMsg) {
|
|
2805
|
-
const neutralUser = toNeutralMessage(inboundUserMsg);
|
|
2806
|
-
// Gather recent messages for context (all messages before the new ones)
|
|
2807
|
-
const contextMessages = messages.slice(0, prePromptMessageCount)
|
|
2808
|
-
.filter(m => m.role !== 'system')
|
|
2809
|
-
.slice(-10)
|
|
2810
|
-
.map(m => toNeutralMessage(m));
|
|
2811
|
-
const db = hm.dbManager.getMessageDb(agentId);
|
|
2812
|
-
if (db) {
|
|
2813
|
-
const topicMap = new SessionTopicMap(db);
|
|
2814
|
-
const activeTopic = topicMap.getActiveTopic(sk);
|
|
2815
|
-
const signal = detectTopicShift(neutralUser, contextMessages, activeTopic?.id ?? null);
|
|
2816
|
-
adaptiveTopicShiftConfidence = signal.confidence;
|
|
2817
|
-
if (signal.isNewTopic && signal.topicName) {
|
|
2818
|
-
const newTopicId = topicMap.createTopic(sk, signal.topicName);
|
|
2819
|
-
// New topic starts with count 1 (the message that triggered the shift)
|
|
2820
|
-
topicMap.incrementMessageCount(newTopicId);
|
|
2821
|
-
// Write topic_id onto the stored user message (best-effort)
|
|
2822
|
-
try {
|
|
2823
|
-
const stored = db.prepare(`
|
|
2824
|
-
SELECT m.id FROM messages m
|
|
2825
|
-
JOIN conversations c ON c.id = m.conversation_id
|
|
2826
|
-
WHERE c.session_key = ? AND m.role = 'user'
|
|
2827
|
-
ORDER BY m.message_index DESC LIMIT 1
|
|
2828
|
-
`).get(sk);
|
|
2829
|
-
if (stored) {
|
|
2830
|
-
db.prepare('UPDATE messages SET topic_id = ? WHERE id = ?')
|
|
2831
|
-
.run(newTopicId, stored.id);
|
|
2832
|
-
}
|
|
2833
|
-
}
|
|
2834
|
-
catch {
|
|
2835
|
-
// Best-effort
|
|
2836
|
-
}
|
|
2837
|
-
}
|
|
2838
|
-
else if (activeTopic) {
|
|
2839
|
-
topicMap.activateTopic(sk, activeTopic.id);
|
|
2840
|
-
topicMap.incrementMessageCount(activeTopic.id);
|
|
2841
|
-
}
|
|
2842
|
-
}
|
|
2843
|
-
}
|
|
2844
|
-
}
|
|
2845
|
-
catch {
|
|
2846
|
-
// Topic detection is entirely non-fatal
|
|
2847
|
-
}
|
|
2848
|
-
// Recompute the Redis hot history from SQLite so turn-age gradient is
|
|
2849
|
-
// materialized after every turn. This prevents warm-compressed history
|
|
2850
|
-
// from drifting back to raw payloads during live sessions.
|
|
2851
|
-
//
|
|
2852
|
-
// Pass the cached model tokenBudget so refreshRedisGradient can cap the
|
|
2853
|
-
// gradient-compressed window to budget before writing to Redis. Without
|
|
2854
|
-
// this, afterTurn writes up to 250 messages regardless of budget, causing
|
|
2855
|
-
// trimHistoryToTokenBudget to fire and trim ~200 messages on every
|
|
2856
|
-
// subsequent assemble() — the churn loop seen in Eve's logs.
|
|
2857
|
-
if (hm.cache.isConnected) {
|
|
2858
|
-
try {
|
|
2859
|
-
const modelState = await hm.cache.getModelState(agentId, sk);
|
|
2860
|
-
const gradientBudget = modelState?.tokenBudget;
|
|
2861
|
-
const gradientDepth = modelState?.historyDepth;
|
|
2862
|
-
const inboundUserMsg = newMessages
|
|
2863
|
-
.map(m => m)
|
|
2864
|
-
.find(m => m.role === 'user');
|
|
2865
|
-
const inboundUserText = inboundUserMsg
|
|
2866
|
-
? stripMessageMetadata(extractTextFromInboundContent(inboundUserMsg.content))
|
|
2867
|
-
: '';
|
|
2868
|
-
const lifecyclePolicy = resolveAdaptiveLifecyclePolicy({
|
|
2869
|
-
usedTokens: estimateMessageArrayTokens(messages),
|
|
2870
|
-
effectiveBudget: gradientBudget,
|
|
2871
|
-
userTurnCount: messages.filter(m => m.role === 'user').length,
|
|
2872
|
-
explicitNewSession: /^\/new(?:\s|$)/i.test(inboundUserText.trim()),
|
|
2873
|
-
topicShiftConfidence: adaptiveTopicShiftConfidence,
|
|
2874
|
-
});
|
|
2875
|
-
lifecyclePolicyTelemetry({
|
|
2876
|
-
path: 'afterTurn.gradient',
|
|
2877
|
-
agentId,
|
|
2878
|
-
sessionKey: sk,
|
|
2879
|
-
band: lifecyclePolicy.band,
|
|
2880
|
-
pressurePct: lifecyclePolicy.pressurePct,
|
|
2881
|
-
topicShiftConfidence: adaptiveTopicShiftConfidence,
|
|
2882
|
-
trimSoftTarget: lifecyclePolicy.trimSoftTarget,
|
|
2883
|
-
reasons: lifecyclePolicy.reasons,
|
|
2884
|
-
});
|
|
2885
|
-
await hm.refreshRedisGradient(agentId, sk, gradientBudget, gradientDepth, lifecyclePolicy.trimSoftTarget);
|
|
2886
|
-
}
|
|
2887
|
-
catch (refreshErr) {
|
|
2888
|
-
console.warn('[hypermem-plugin] afterTurn: refreshRedisGradient failed (non-fatal):', refreshErr.message);
|
|
2889
|
-
}
|
|
2890
|
-
}
|
|
2891
|
-
// Invalidate the window cache after ingesting new messages.
|
|
2892
|
-
// The next assemble() call will re-compose with the new data.
|
|
2893
|
-
try {
|
|
2894
|
-
await hm.cache.invalidateWindow(agentId, sk);
|
|
2895
|
-
}
|
|
2896
|
-
catch {
|
|
2897
|
-
// Window invalidation is best-effort
|
|
2898
|
-
}
|
|
2899
|
-
// Pre-emptive secondary trim when session exits a turn hot.
|
|
2900
|
-
// If a session just finished a turn at >80% pressure, the NEXT turn's
|
|
2901
|
-
// incoming tool results (parallel web searches, large exec output, etc.)
|
|
2902
|
-
// will hit a window with no headroom — the ingestion wave failure mode
|
|
2903
|
-
// (reported by Eve, 2026-04-05). Pre-trim here so the tool-loop
|
|
2904
|
-
// assemble() path starts the next turn with meaningful space.
|
|
2905
|
-
//
|
|
2906
|
-
// Uses modelState.tokenBudget if cached; skips if unavailable (non-fatal).
|
|
2907
|
-
try {
|
|
2908
|
-
const modelState = await hm.cache.getModelState(agentId, sk);
|
|
2909
|
-
if (modelState?.tokenBudget) {
|
|
2910
|
-
// Use the runtime message array as the only trim-pressure source.
|
|
2911
|
-
// Redis remains a drift signal for anomaly logging.
|
|
2912
|
-
const runtimePostTokens = estimateMessageArrayTokens(messages);
|
|
2913
|
-
const redisPostTokens = await estimateWindowTokens(hm, agentId, sk);
|
|
2914
|
-
const postTurnTokens = runtimePostTokens;
|
|
2915
|
-
maybeLogPressureAccountingAnomaly({
|
|
2916
|
-
path: 'afterTurn.secondary',
|
|
2917
|
-
agentId,
|
|
2918
|
-
sessionKey: sk,
|
|
2919
|
-
runtimeTokens: runtimePostTokens,
|
|
2920
|
-
redisTokens: redisPostTokens,
|
|
2921
|
-
composedTokens: postTurnTokens,
|
|
2922
|
-
budget: modelState.tokenBudget,
|
|
2923
|
-
});
|
|
2924
|
-
const postTurnPressure = postTurnTokens / modelState.tokenBudget;
|
|
2925
|
-
// Sprint 2.2b: demote afterTurn.secondary to guard-only no-op.
|
|
2926
|
-
//
|
|
2927
|
-
// Previously this path was a two-tier real trim that fired after
|
|
2928
|
-
// every turn ending at >80% pressure, calling
|
|
2929
|
-
// trimHistoryToTokenBudget() and emitting `event:'trim'` with
|
|
2930
|
-
// path='afterTurn.secondary'. Sprint 2 consolidates steady-state
|
|
2931
|
-
// trim ownership in assemble.* (tool-loop + normal/subagent),
|
|
2932
|
-
// with compact.* as the only exception family. The afterTurn
|
|
2933
|
-
// post-turn pressure path is now redundant: the next turn's
|
|
2934
|
-
// assemble.* trim absorbs any residual pressure.
|
|
2935
|
-
//
|
|
2936
|
-
// Pattern matches the warmstart/reshape demotion from 2.2a:
|
|
2937
|
-
// keep the pressure predicate + threshold branch so observability
|
|
2938
|
-
// via `event:'trim-guard'` is preserved, but emit NO real trim,
|
|
2939
|
-
// NO invalidateWindow, NO mutation. The compact skip-gate stays
|
|
2940
|
-
// correct because this path never stamped any model state.
|
|
2941
|
-
if (postTurnPressure > 0.80) {
|
|
2942
|
-
guardTelemetry({
|
|
2943
|
-
path: 'afterTurn.secondary',
|
|
2944
|
-
agentId, sessionKey: sk,
|
|
2945
|
-
reason: 'afterturn-secondary-demoted',
|
|
2946
|
-
});
|
|
2947
|
-
}
|
|
2948
|
-
}
|
|
2949
|
-
}
|
|
2950
|
-
catch {
|
|
2951
|
-
// Non-fatal — next turn's tool-loop trim is the fallback
|
|
2952
|
-
}
|
|
2953
|
-
// Pre-compute embedding for the assistant's reply so the next compose()
|
|
2954
|
-
// can skip the Ollama round-trip entirely (fire-and-forget).
|
|
2955
|
-
//
|
|
2956
|
-
// Why the assistant reply, not the current user message:
|
|
2957
|
-
// The assistant's reply is the strongest semantic predictor of what the
|
|
2958
|
-
// user will ask next — it's the context they're responding to. By the time
|
|
2959
|
-
// the next user message arrives and compose() fires, this embedding is
|
|
2960
|
-
// already warm in Redis. Cache hit rate: near 100% on normal conversation
|
|
2961
|
-
// flow (one reply per turn).
|
|
2962
|
-
//
|
|
2963
|
-
// The previous approach (embedding the current user message) still missed
|
|
2964
|
-
// on every turn because compose() queries against the INCOMING user message,
|
|
2965
|
-
// not the one that was just processed.
|
|
2966
|
-
//
|
|
2967
|
-
// newMessages = messages.slice(prePromptMessageCount) — the assistant reply
|
|
2968
|
-
// is always in here. Walk backwards to find the last assistant text turn.
|
|
2969
|
-
try {
|
|
2970
|
-
let assistantReplyText = null;
|
|
2971
|
-
for (let i = newMessages.length - 1; i >= 0; i--) {
|
|
2972
|
-
const m = newMessages[i];
|
|
2973
|
-
if (m.role === 'assistant') {
|
|
2974
|
-
const neutral = toNeutralMessage(m);
|
|
2975
|
-
if (neutral.textContent) {
|
|
2976
|
-
assistantReplyText = neutral.textContent;
|
|
2977
|
-
break;
|
|
2978
|
-
}
|
|
2979
|
-
}
|
|
2980
|
-
}
|
|
2981
|
-
if (assistantReplyText && _generateEmbeddings) {
|
|
2982
|
-
// Fire-and-forget: don't await, don't block afterTurn
|
|
2983
|
-
_generateEmbeddings([assistantReplyText]).then(async ([embedding]) => {
|
|
2984
|
-
if (embedding) {
|
|
2985
|
-
await hm.cache.setQueryEmbedding(agentId, sk, embedding);
|
|
2986
|
-
}
|
|
2987
|
-
}).catch(() => {
|
|
2988
|
-
// Non-fatal: embedding pre-compute failed, compose() will call Ollama
|
|
2989
|
-
});
|
|
2990
|
-
}
|
|
2991
|
-
}
|
|
2992
|
-
catch {
|
|
2993
|
-
// Pre-embed is entirely non-fatal
|
|
2994
|
-
}
|
|
2995
|
-
// P1.7: Direct per-agent tick after each turn — no need to wait for 5-min interval.
|
|
2996
|
-
if (_indexer) {
|
|
2997
|
-
const _agentIdForTick = agentId;
|
|
2998
|
-
const runTick = async () => {
|
|
2999
|
-
if (_taskFlowRuntime) {
|
|
3000
|
-
// Preflight: only create a managed flow if we can actually tick.
|
|
3001
|
-
// Creating a flow we never finish/fail leaves orphaned queued rows.
|
|
3002
|
-
let flow = null;
|
|
3003
|
-
try {
|
|
3004
|
-
// Use createManaged + finish/fail only — do NOT call runTask().
|
|
3005
|
-
// runTask() writes a task_run row to runs.sqlite with status='running'
|
|
3006
|
-
// and the TaskFlow runtime has no completeTask() method, so those rows
|
|
3007
|
-
// would accumulate forever and block clean restarts.
|
|
3008
|
-
flow = _taskFlowRuntime.createManaged({
|
|
3009
|
-
controllerId: 'hypermem/indexer',
|
|
3010
|
-
goal: `Index messages for ${_agentIdForTick}`,
|
|
3011
|
-
});
|
|
3012
|
-
await _indexer.tick();
|
|
3013
|
-
// expectedRevision is required: finishFlow uses optimistic locking.
|
|
3014
|
-
// A freshly created managed flow always starts at revision 0.
|
|
3015
|
-
// MUST be awaited — finish/fail return Promises. Calling without
|
|
3016
|
-
// await lets the Promise get GC'd before the DB write completes,
|
|
3017
|
-
// leaving the flow permanently in queued state.
|
|
3018
|
-
const finishResult = await Promise.resolve(_taskFlowRuntime.finish({ flowId: flow.flowId, expectedRevision: flow.revision }));
|
|
3019
|
-
if (finishResult && !finishResult.applied) {
|
|
3020
|
-
console.warn('[hypermem-plugin] TaskFlow finish failed:', finishResult.code ?? finishResult.reason, 'flowId:', flow.flowId, 'revision:', flow.revision);
|
|
3021
|
-
}
|
|
3022
|
-
}
|
|
3023
|
-
catch (tickErr) {
|
|
3024
|
-
// Best-effort fail — non-fatal, but always mark the flow so it doesn't leak
|
|
3025
|
-
if (flow) {
|
|
3026
|
-
try {
|
|
3027
|
-
await Promise.resolve(_taskFlowRuntime.fail({ flowId: flow.flowId, expectedRevision: flow.revision }));
|
|
3028
|
-
}
|
|
3029
|
-
catch { /* ignore */ }
|
|
3030
|
-
}
|
|
3031
|
-
throw tickErr;
|
|
3032
|
-
}
|
|
3033
|
-
}
|
|
3034
|
-
else {
|
|
3035
|
-
await _indexer.tick();
|
|
3036
|
-
}
|
|
3037
|
-
};
|
|
3038
|
-
runTick().catch(() => {
|
|
3039
|
-
// Non-fatal: indexer tick failure never blocks afterTurn
|
|
3040
|
-
});
|
|
3041
|
-
}
|
|
3042
|
-
}
|
|
3043
|
-
catch (err) {
|
|
3044
|
-
// afterTurn is never fatal
|
|
3045
|
-
console.warn('[hypermem-plugin] afterTurn failed:', err.message);
|
|
3046
|
-
}
|
|
3047
|
-
},
|
|
3048
|
-
/**
|
|
3049
|
-
* Prepare context for a subagent session before it starts.
|
|
3050
|
-
*
|
|
3051
|
-
* Seeds the child session's Redis with parent context based on the
|
|
3052
|
-
* subagentWarming config ('full' | 'light' | 'off').
|
|
3053
|
-
* Returns a rollback handle to clean up if spawn fails.
|
|
3054
|
-
*/
|
|
3055
|
-
async prepareSubagentSpawn(params) {
|
|
3056
|
-
const { parentSessionKey, childSessionKey } = params;
|
|
3057
|
-
const forkParams = params;
|
|
3058
|
-
const contextMode = forkParams.contextMode;
|
|
3059
|
-
const parentSessionId = forkParams.parentSessionId;
|
|
3060
|
-
const childSessionId = forkParams.childSessionId;
|
|
3061
|
-
if (_subagentWarming === 'off') {
|
|
3062
|
-
return undefined;
|
|
3063
|
-
}
|
|
3064
|
-
try {
|
|
3065
|
-
const hm = await getHyperMem();
|
|
3066
|
-
const parentAgentId = extractAgentId(parentSessionKey);
|
|
3067
|
-
const childAgentId = extractAgentId(childSessionKey);
|
|
3068
|
-
const isForkedContext = contextMode === 'fork';
|
|
3069
|
-
let parentHistoryMessages = 0;
|
|
3070
|
-
let parentUserTurnCount = 0;
|
|
3071
|
-
let parentPressureFraction;
|
|
3072
|
-
// Seed child with parent's active facts. This preserves the historical
|
|
3073
|
-
// slot for compatibility; facts still primarily come from L4 by agent id.
|
|
3074
|
-
const facts = hm.getActiveFacts(parentAgentId, { limit: 50 });
|
|
3075
|
-
if (facts && facts.length > 0) {
|
|
3076
|
-
const factBlock = facts
|
|
3077
|
-
.map(f => f.content)
|
|
3078
|
-
.join('\n');
|
|
3079
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', factBlock);
|
|
3080
|
-
}
|
|
3081
|
-
const history = await hm.cache.getHistory(parentAgentId, parentSessionKey);
|
|
3082
|
-
if (history && history.length > 0) {
|
|
3083
|
-
const maxSeededHistory = _subagentWarming === 'full' ? 25 : 12;
|
|
3084
|
-
const recentHistory = history.slice(-maxSeededHistory);
|
|
3085
|
-
parentHistoryMessages = recentHistory.length;
|
|
3086
|
-
parentUserTurnCount = recentHistory.filter(m => m.role === 'user').length;
|
|
3087
|
-
const parentTokens = estimateMessageArrayTokens(recentHistory);
|
|
3088
|
-
const parentModelState = await hm.cache.getModelState(parentAgentId, parentSessionKey).catch(() => null);
|
|
3089
|
-
const parentBudget = parentModelState?.tokenBudget && parentModelState.tokenBudget > 0
|
|
3090
|
-
? parentModelState.tokenBudget
|
|
3091
|
-
: undefined;
|
|
3092
|
-
parentPressureFraction = parentBudget ? parentTokens / parentBudget : undefined;
|
|
3093
|
-
if (isForkedContext || _subagentWarming === 'full') {
|
|
3094
|
-
await hm.cache.replaceHistory(childAgentId, childSessionKey, recentHistory, maxSeededHistory);
|
|
3095
|
-
await hm.cache.invalidateWindow(childAgentId, childSessionKey).catch(() => { });
|
|
3096
|
-
}
|
|
3097
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', JSON.stringify(recentHistory));
|
|
3098
|
-
}
|
|
3099
|
-
if (isForkedContext) {
|
|
3100
|
-
const forkedMeta = {
|
|
3101
|
-
enabled: true,
|
|
3102
|
-
parentSessionKey,
|
|
3103
|
-
parentSessionId,
|
|
3104
|
-
childSessionId,
|
|
3105
|
-
parentPressureFraction,
|
|
3106
|
-
parentUserTurnCount,
|
|
3107
|
-
parentHistoryMessages,
|
|
3108
|
-
};
|
|
3109
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, FORKED_CONTEXT_META_SLOT, JSON.stringify(forkedMeta));
|
|
3110
|
-
}
|
|
3111
|
-
console.log(`[hypermem-plugin] prepareSubagentSpawn: seeded ${childSessionKey} ` +
|
|
3112
|
-
`from ${parentSessionKey} (warming=${_subagentWarming}, contextMode=${contextMode ?? 'isolated'}, ` +
|
|
3113
|
-
`history=${parentHistoryMessages})`);
|
|
3114
|
-
return {
|
|
3115
|
-
async rollback() {
|
|
3116
|
-
try {
|
|
3117
|
-
const hm = await getHyperMem();
|
|
3118
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', '');
|
|
3119
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', '');
|
|
3120
|
-
await hm.cache.setSlot(childAgentId, childSessionKey, FORKED_CONTEXT_META_SLOT, '');
|
|
3121
|
-
await hm.cache.replaceHistory(childAgentId, childSessionKey, [], 0);
|
|
3122
|
-
await hm.cache.invalidateWindow(childAgentId, childSessionKey).catch(() => { });
|
|
3123
|
-
}
|
|
3124
|
-
catch {
|
|
3125
|
-
// Rollback is best-effort
|
|
3126
|
-
}
|
|
3127
|
-
},
|
|
3128
|
-
};
|
|
3129
|
-
}
|
|
3130
|
-
catch (err) {
|
|
3131
|
-
console.warn('[hypermem-plugin] prepareSubagentSpawn failed (non-fatal):', err.message);
|
|
3132
|
-
return undefined;
|
|
3133
|
-
}
|
|
3134
|
-
},
|
|
3135
|
-
/**
|
|
3136
|
-
* Clean up after a subagent session ends.
|
|
3137
|
-
*
|
|
3138
|
-
* Removes Redis slots and invalidates caches for the dead session
|
|
3139
|
-
* to prevent stale data accumulation.
|
|
3140
|
-
*/
|
|
3141
|
-
async onSubagentEnded({ childSessionKey, reason }) {
|
|
3142
|
-
try {
|
|
3143
|
-
const hm = await getHyperMem();
|
|
3144
|
-
const childAgentId = extractAgentId(childSessionKey);
|
|
3145
|
-
await Promise.all([
|
|
3146
|
-
hm.cache.setSlot(childAgentId, childSessionKey, 'parentFacts', ''),
|
|
3147
|
-
hm.cache.setSlot(childAgentId, childSessionKey, 'parentHistory', ''),
|
|
3148
|
-
hm.cache.setSlot(childAgentId, childSessionKey, FORKED_CONTEXT_META_SLOT, ''),
|
|
3149
|
-
hm.cache.setSlot(childAgentId, childSessionKey, 'assemblyContextBlock', ''),
|
|
3150
|
-
hm.cache.setSlot(childAgentId, childSessionKey, 'assemblyContextAt', '0'),
|
|
3151
|
-
hm.cache.invalidateWindow(childAgentId, childSessionKey).catch(() => { }),
|
|
3152
|
-
]);
|
|
3153
|
-
_overheadCache.delete(childSessionKey);
|
|
3154
|
-
console.log(`[hypermem-plugin] onSubagentEnded: cleaned up ${childSessionKey} (reason=${reason})`);
|
|
3155
|
-
}
|
|
3156
|
-
catch (err) {
|
|
3157
|
-
console.warn('[hypermem-plugin] onSubagentEnded failed (non-fatal):', err.message);
|
|
3158
|
-
}
|
|
3159
|
-
},
|
|
3160
|
-
/**
|
|
3161
|
-
* Dispose: intentionally a no-op.
|
|
3162
|
-
*
|
|
3163
|
-
* The runtime calls dispose() at the end of every request cycle, but
|
|
3164
|
-
* hypermem's Redis connection and SQLite handles are gateway-lifetime
|
|
3165
|
-
* singletons — not request-scoped. Closing and nulling _hm here causes
|
|
3166
|
-
* a full reconnect + re-init on every turn (~400-800ms latency per turn).
|
|
3167
|
-
*
|
|
3168
|
-
* ioredis manages its own reconnection on connection loss. If the gateway
|
|
3169
|
-
* process exits, Node.js cleans up file handles automatically.
|
|
3170
|
-
*
|
|
3171
|
-
* If a true shutdown is needed (e.g. gateway restart signal), call
|
|
3172
|
-
* _hm.close() directly from a gateway:shutdown hook instead.
|
|
3173
|
-
*/
|
|
3174
|
-
async dispose() {
|
|
3175
|
-
// Intentional no-op — see comment above.
|
|
3176
|
-
},
|
|
3177
|
-
};
|
|
3178
|
-
}
|
|
3179
|
-
// ─── NeutralMessage → AgentMessage ─────────────────────────────
|
|
3180
|
-
/**
|
|
3181
|
-
* Convert hypermem's NeutralMessage back to OpenClaw's AgentMessage format.
|
|
3182
|
-
*
|
|
3183
|
-
* The runtime expects messages conforming to pi-ai's Message union:
|
|
3184
|
-
* UserMessage: { role: 'user', content: string | ContentBlock[], timestamp }
|
|
3185
|
-
* AssistantMessage: { role: 'assistant', content: ContentBlock[], api, provider, model, usage, stopReason, timestamp }
|
|
3186
|
-
* ToolResultMessage: { role: 'toolResult', toolCallId, toolName, content, isError, timestamp }
|
|
3187
|
-
*
|
|
3188
|
-
* hypermem stores tool results as NeutralMessage with role='user' and toolResults[].
|
|
3189
|
-
* These must be expanded into individual ToolResultMessage objects.
|
|
3190
|
-
*
|
|
3191
|
-
* For assistant messages with tool calls, NeutralToolCall.arguments is a JSON string
|
|
3192
|
-
* but the runtime's ToolCall.arguments is Record<string, any>. We parse it here.
|
|
3193
|
-
*
|
|
3194
|
-
* Missing metadata fields (api, provider, model, usage, stopReason) are filled with
|
|
3195
|
-
* sentinel values. The runtime's convertToLlm strips them before the API call, and
|
|
3196
|
-
* the session transcript already has the real values. These are just structural stubs
|
|
3197
|
-
* so the AgentMessage type is satisfied at runtime.
|
|
3198
|
-
*/
|
|
3199
|
-
function neutralToAgentMessage(msg) {
|
|
3200
|
-
const now = Date.now();
|
|
3201
|
-
// Tool results: expand to individual ToolResultMessage objects
|
|
3202
|
-
if (msg.toolResults && msg.toolResults.length > 0) {
|
|
3203
|
-
return msg.toolResults.map(tr => ({
|
|
3204
|
-
role: 'toolResult',
|
|
3205
|
-
toolCallId: tr.callId,
|
|
3206
|
-
toolName: tr.name,
|
|
3207
|
-
content: [{ type: 'text', text: tr.content ?? '' }],
|
|
3208
|
-
isError: tr.isError ?? false,
|
|
3209
|
-
timestamp: now,
|
|
3210
|
-
}));
|
|
3211
|
-
}
|
|
3212
|
-
if (msg.role === 'user') {
|
|
3213
|
-
return {
|
|
3214
|
-
role: 'user',
|
|
3215
|
-
content: msg.textContent ?? '',
|
|
3216
|
-
timestamp: now,
|
|
3217
|
-
};
|
|
3218
|
-
}
|
|
3219
|
-
if (msg.role === 'system') {
|
|
3220
|
-
// System messages are passed through as-is; the runtime handles them separately
|
|
3221
|
-
return {
|
|
3222
|
-
role: 'system',
|
|
3223
|
-
content: msg.textContent ?? '',
|
|
3224
|
-
timestamp: now,
|
|
3225
|
-
// Preserve dynamicBoundary metadata for prompt caching
|
|
3226
|
-
...msg.metadata?.dynamicBoundary
|
|
3227
|
-
? { metadata: { dynamicBoundary: true } }
|
|
3228
|
-
: {},
|
|
3229
|
-
};
|
|
3230
|
-
}
|
|
3231
|
-
// Assistant message
|
|
3232
|
-
const content = [];
|
|
3233
|
-
if (msg.textContent) {
|
|
3234
|
-
content.push({ type: 'text', text: msg.textContent });
|
|
3235
|
-
}
|
|
3236
|
-
if (msg.toolCalls && msg.toolCalls.length > 0) {
|
|
3237
|
-
for (const tc of msg.toolCalls) {
|
|
3238
|
-
// Parse arguments from JSON string → object (runtime expects Record<string, any>)
|
|
3239
|
-
let args;
|
|
3240
|
-
try {
|
|
3241
|
-
args = typeof tc.arguments === 'string' ? JSON.parse(tc.arguments) : (tc.arguments ?? {});
|
|
3242
|
-
}
|
|
3243
|
-
catch {
|
|
3244
|
-
args = {};
|
|
3245
|
-
}
|
|
3246
|
-
content.push({
|
|
3247
|
-
type: 'toolCall',
|
|
3248
|
-
id: tc.id,
|
|
3249
|
-
name: tc.name,
|
|
3250
|
-
arguments: args,
|
|
3251
|
-
});
|
|
3252
|
-
}
|
|
3253
|
-
}
|
|
3254
|
-
// Stub metadata fields — the runtime needs these structurally but convertToLlm
|
|
3255
|
-
// strips them before the API call. Real values live in the session transcript.
|
|
3256
|
-
return {
|
|
3257
|
-
role: 'assistant',
|
|
3258
|
-
content: content.length > 0 ? content : [{ type: 'text', text: '' }],
|
|
3259
|
-
api: 'unknown',
|
|
3260
|
-
provider: 'unknown',
|
|
3261
|
-
model: 'unknown',
|
|
3262
|
-
usage: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
3263
|
-
stopReason: 'stop',
|
|
3264
|
-
timestamp: now,
|
|
3265
|
-
};
|
|
3266
|
-
}
|
|
3267
|
-
// ─── Cache Bust Utility ────────────────────────────────────────────────────
|
|
3268
|
-
/**
|
|
3269
|
-
* Bust the assembly cache for a specific agent+session.
|
|
3270
|
-
* Call this after writing to identity files (SOUL.md, IDENTITY.md, TOOLS.md,
|
|
3271
|
-
* USER.md) to ensure the next assemble() runs full compositor, not a replay.
|
|
3272
|
-
*/
|
|
3273
|
-
export async function bustAssemblyCache(agentId, sessionKey) {
|
|
3274
|
-
try {
|
|
3275
|
-
const hm = await getHyperMem();
|
|
3276
|
-
await Promise.all([
|
|
3277
|
-
hm.cache.setSlot(agentId, sessionKey, 'assemblyContextBlock', ''),
|
|
3278
|
-
hm.cache.setSlot(agentId, sessionKey, 'assemblyContextAt', '0'),
|
|
3279
|
-
]);
|
|
3280
|
-
}
|
|
3281
|
-
catch {
|
|
3282
|
-
// Non-fatal
|
|
3283
|
-
}
|
|
3284
|
-
}
|
|
3285
|
-
// ─── Plugin Config Schema ────────────────────────────────────────
|
|
3286
|
-
// Exposed via openclaw.json → plugins.entries.hypercompositor.config
|
|
3287
|
-
// Validated by OpenClaw on gateway start. Visible via `openclaw config get`.
|
|
3288
|
-
const hypercompositorConfigSchema = z.object({
|
|
3289
|
-
/** Path to HyperMem core dist/index.js. Auto-resolved if omitted. */
|
|
3290
|
-
hyperMemPath: z.string().optional(),
|
|
3291
|
-
/** HyperMem data directory. Default: ~/.openclaw/hypermem */
|
|
3292
|
-
dataDir: z.string().optional(),
|
|
3293
|
-
/** Full model context window size in tokens. Default: 128000 */
|
|
3294
|
-
contextWindowSize: z.number().int().positive().optional(),
|
|
3295
|
-
/** Fraction [0.0–0.5] reserved for system prompts + headroom. Default: 0.25 */
|
|
3296
|
-
contextWindowReserve: z.number().min(0).max(0.5).optional(),
|
|
3297
|
-
/** Defer tool pruning to OpenClaw's contextPruning. Default: false */
|
|
3298
|
-
deferToolPruning: z.boolean().optional(),
|
|
3299
|
-
/** Emit detailed budget-source and trim-decision logs. Default: false */
|
|
3300
|
-
verboseLogging: z.boolean().optional(),
|
|
3301
|
-
/** Manual per-model context window fallback table used when runtime tokenBudget is missing. */
|
|
3302
|
-
contextWindowOverrides: z.record(z.string().regex(CONTEXT_WINDOW_OVERRIDE_KEY_REGEX, 'key must be "provider/model"'), contextWindowOverrideSchema).optional(),
|
|
3303
|
-
/** Treat cache replay snapshots older than this as stale. Default: 120000ms */
|
|
3304
|
-
warmCacheReplayThresholdMs: z.number().int().positive().optional(),
|
|
3305
|
-
/** Subagent context injection: 'full' | 'light' | 'off'. Default: 'light' */
|
|
3306
|
-
subagentWarming: z.enum(['full', 'light', 'off']).optional(),
|
|
3307
|
-
/** Compositor tuning overrides */
|
|
3308
|
-
compositor: z.object({
|
|
3309
|
-
budgetFraction: z.number().min(0).max(1).optional(),
|
|
3310
|
-
reserveFraction: z.number().min(0).max(1).optional(),
|
|
3311
|
-
historyFraction: z.number().min(0).max(1).optional(),
|
|
3312
|
-
memoryFraction: z.number().min(0).max(1).optional(),
|
|
3313
|
-
defaultTokenBudget: z.number().int().positive().optional(),
|
|
3314
|
-
maxHistoryMessages: z.number().int().positive().optional(),
|
|
3315
|
-
maxFacts: z.number().int().positive().optional(),
|
|
3316
|
-
maxExpertisePatterns: z.number().int().positive().optional(),
|
|
3317
|
-
maxCrossSessionContext: z.number().int().nonnegative().optional(),
|
|
3318
|
-
maxTotalTriggerTokens: z.number().int().nonnegative().optional(),
|
|
3319
|
-
maxRecentToolPairs: z.number().int().nonnegative().optional(),
|
|
3320
|
-
maxProseToolPairs: z.number().int().nonnegative().optional(),
|
|
3321
|
-
warmHistoryBudgetFraction: z.number().min(0).max(1).optional(),
|
|
3322
|
-
contextWindowReserve: z.number().min(0).max(1).optional(),
|
|
3323
|
-
dynamicReserveTurnHorizon: z.number().int().positive().optional(),
|
|
3324
|
-
dynamicReserveMax: z.number().min(0).max(1).optional(),
|
|
3325
|
-
dynamicReserveEnabled: z.boolean().optional(),
|
|
3326
|
-
keystoneHistoryFraction: z.number().min(0).max(1).optional(),
|
|
3327
|
-
keystoneMaxMessages: z.number().int().nonnegative().optional(),
|
|
3328
|
-
keystoneMinSignificance: z.number().min(0).max(1).optional(),
|
|
3329
|
-
targetBudgetFraction: z.number().min(0).max(1).optional(),
|
|
3330
|
-
enableFOS: z.boolean().optional(),
|
|
3331
|
-
enableMOD: z.boolean().optional(),
|
|
3332
|
-
hyperformProfile: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
|
|
3333
|
-
outputProfile: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
|
|
3334
|
-
outputStandard: z.enum(['light', 'standard', 'full', 'starter', 'fleet']).optional(),
|
|
3335
|
-
wikiTokenCap: z.number().int().positive().optional(),
|
|
3336
|
-
zigzagOrdering: z.boolean().optional(),
|
|
3337
|
-
}).optional(),
|
|
3338
|
-
/** Image/tool eviction settings */
|
|
3339
|
-
eviction: z.object({
|
|
3340
|
-
enabled: z.boolean().optional(),
|
|
3341
|
-
imageAgeTurns: z.number().int().nonnegative().optional(),
|
|
3342
|
-
toolResultAgeTurns: z.number().int().nonnegative().optional(),
|
|
3343
|
-
minTokensToEvict: z.number().int().nonnegative().optional(),
|
|
3344
|
-
keepPreviewChars: z.number().int().nonnegative().optional(),
|
|
3345
|
-
}).optional(),
|
|
3346
|
-
/** Embedding provider config */
|
|
3347
|
-
embedding: z.object({
|
|
3348
|
-
provider: z.enum(['ollama', 'openai', 'gemini']).optional(),
|
|
3349
|
-
ollamaUrl: z.string().optional(),
|
|
3350
|
-
openaiApiKey: z.string().optional(),
|
|
3351
|
-
openaiBaseUrl: z.string().optional(),
|
|
3352
|
-
geminiBaseUrl: z.string().optional(),
|
|
3353
|
-
geminiIndexTaskType: z.string().optional(),
|
|
3354
|
-
geminiQueryTaskType: z.string().optional(),
|
|
3355
|
-
model: z.string().optional(),
|
|
3356
|
-
dimensions: z.number().int().positive().optional(),
|
|
3357
|
-
timeout: z.number().int().positive().optional(),
|
|
3358
|
-
batchSize: z.number().int().positive().optional(),
|
|
3359
|
-
}).optional(),
|
|
3360
|
-
/**
|
|
3361
|
-
* Optional reranker config. When omitted or provider is 'none', the
|
|
3362
|
-
* compositor runs with RRF-only ordering. See INSTALL.md → Reranker.
|
|
3363
|
-
*/
|
|
3364
|
-
reranker: z.object({
|
|
3365
|
-
provider: z.enum(['zeroentropy', 'openrouter', 'local', 'none']),
|
|
3366
|
-
minCandidates: z.number().int().nonnegative().optional(),
|
|
3367
|
-
maxDocuments: z.number().int().positive().optional(),
|
|
3368
|
-
topK: z.number().int().positive().optional(),
|
|
3369
|
-
timeoutMs: z.number().int().positive().optional(),
|
|
3370
|
-
zeroEntropyApiKey: z.string().optional(),
|
|
3371
|
-
zeroEntropyModel: z.string().optional(),
|
|
3372
|
-
openrouterApiKey: z.string().optional(),
|
|
3373
|
-
openrouterModel: z.string().optional(),
|
|
3374
|
-
ollamaUrl: z.string().optional(),
|
|
3375
|
-
ollamaModel: z.string().optional(),
|
|
3376
|
-
}).optional(),
|
|
3377
|
-
});
|
|
3378
|
-
// ─── Plugin Entry ───────────────────────────────────────────────
|
|
3379
|
-
const engine = createHyperMemEngine();
|
|
3380
|
-
export default definePluginEntry({
|
|
3381
|
-
id: 'hypercompositor',
|
|
3382
|
-
name: 'HyperCompositor — context engine',
|
|
3383
|
-
description: 'Four-layer memory architecture for OpenClaw agents: SQLite hot cache, message history, vector search, and structured library.',
|
|
3384
|
-
kind: 'context-engine',
|
|
3385
|
-
configSchema: buildPluginConfigSchema(hypercompositorConfigSchema),
|
|
3386
|
-
register(api) {
|
|
3387
|
-
// ── Resolve plugin config from openclaw.json ──
|
|
3388
|
-
const pluginCfg = (api.pluginConfig ?? {});
|
|
3389
|
-
_pluginConfig = pluginCfg;
|
|
3390
|
-
// ── Resolve HYPERMEM_PATH: pluginConfig > ESM package resolve > dev fallback ──
|
|
3391
|
-
if (pluginCfg.hyperMemPath) {
|
|
3392
|
-
HYPERMEM_PATH = pluginCfg.hyperMemPath;
|
|
3393
|
-
console.log(`[hypermem-plugin] Using configured hyperMemPath: ${HYPERMEM_PATH}`);
|
|
3394
|
-
}
|
|
3395
|
-
else {
|
|
3396
|
-
try {
|
|
3397
|
-
const resolvedUrl = import.meta.resolve('@psiclawops/hypermem');
|
|
3398
|
-
HYPERMEM_PATH = resolvedUrl.startsWith('file:') ? fileURLToPath(resolvedUrl) : resolvedUrl;
|
|
3399
|
-
}
|
|
3400
|
-
catch {
|
|
3401
|
-
// Dev fallback: resolve relative to plugin directory
|
|
3402
|
-
const __pluginDir = path.dirname(fileURLToPath(import.meta.url));
|
|
3403
|
-
HYPERMEM_PATH = path.resolve(__pluginDir, '../../dist/index.js');
|
|
3404
|
-
console.log(`[hypermem-plugin] Falling back to dev path: ${HYPERMEM_PATH}`);
|
|
3405
|
-
}
|
|
3406
|
-
}
|
|
3407
|
-
api.registerContextEngine('hypercompositor', () => engine);
|
|
3408
|
-
// ── HyperForm config dir init ──
|
|
3409
|
-
// Copy defaults and guide to ~/.openclaw/hypermem/config/ on every load.
|
|
3410
|
-
// Defaults are overwritten on plugin update. Active config files are never touched.
|
|
3411
|
-
void (async () => {
|
|
3412
|
-
try {
|
|
3413
|
-
const dataDir = _pluginConfig.dataDir ?? path.join(os.homedir(), '.openclaw/hypermem');
|
|
3414
|
-
const configDir = path.join(dataDir, 'config');
|
|
3415
|
-
await fs.mkdir(configDir, { recursive: true });
|
|
3416
|
-
const __pluginDir = path.dirname(fileURLToPath(import.meta.url));
|
|
3417
|
-
const defaultsSrc = path.resolve(__pluginDir, '../../../config-defaults');
|
|
3418
|
-
const defaultFiles = [
|
|
3419
|
-
'hyperform-fos-defaults.json',
|
|
3420
|
-
'hyperform-mod-defaults.json',
|
|
3421
|
-
'HYPERFORM-GUIDE.md',
|
|
3422
|
-
];
|
|
3423
|
-
for (const fname of defaultFiles) {
|
|
3424
|
-
const src = path.join(defaultsSrc, fname);
|
|
3425
|
-
const dest = path.join(configDir, fname);
|
|
3426
|
-
try {
|
|
3427
|
-
await fs.copyFile(src, dest);
|
|
3428
|
-
}
|
|
3429
|
-
catch {
|
|
3430
|
-
// defaults may not exist in dev builds — non-fatal
|
|
3431
|
-
}
|
|
3432
|
-
}
|
|
3433
|
-
// On first install, copy defaults as active config if active files don't exist
|
|
3434
|
-
for (const [src, dest] of [
|
|
3435
|
-
['hyperform-fos-defaults.json', 'hyperform-fos.json'],
|
|
3436
|
-
['hyperform-mod-defaults.json', 'hyperform-mod.json'],
|
|
3437
|
-
]) {
|
|
3438
|
-
const destPath = path.join(configDir, dest);
|
|
3439
|
-
try {
|
|
3440
|
-
await fs.access(destPath);
|
|
3441
|
-
}
|
|
3442
|
-
catch {
|
|
3443
|
-
// Active config doesn't exist — copy defaults as starting point
|
|
3444
|
-
try {
|
|
3445
|
-
await fs.copyFile(path.join(configDir, src), destPath);
|
|
3446
|
-
}
|
|
3447
|
-
catch {
|
|
3448
|
-
// non-fatal
|
|
3449
|
-
}
|
|
3450
|
-
}
|
|
3451
|
-
}
|
|
3452
|
-
}
|
|
3453
|
-
catch {
|
|
3454
|
-
// non-fatal — HyperForm config init is best-effort
|
|
3455
|
-
}
|
|
3456
|
-
})();
|
|
3457
|
-
// P1.7: Bind TaskFlow runtime for task visibility — best-effort.
|
|
3458
|
-
// Guard: api.runtime.taskFlow may not exist on older OpenClaw versions.
|
|
3459
|
-
try {
|
|
3460
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
3461
|
-
const tf = api.runtime?.taskFlow;
|
|
3462
|
-
if (tf && typeof tf.bindSession === 'function') {
|
|
3463
|
-
_taskFlowRuntime = tf.bindSession({
|
|
3464
|
-
sessionKey: 'hypermem-plugin',
|
|
3465
|
-
requesterOrigin: 'hypermem-plugin',
|
|
3466
|
-
});
|
|
3467
|
-
}
|
|
3468
|
-
}
|
|
3469
|
-
catch {
|
|
3470
|
-
// TaskFlow binding is best-effort — plugin remains fully functional without it
|
|
3471
|
-
}
|
|
3472
|
-
},
|
|
3473
|
-
});
|
|
3474
|
-
//# sourceMappingURL=index.js.map
|