@beingmartinbmc/ojas 0.2.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +308 -0
- package/dist/aahar/index.d.ts +179 -0
- package/dist/aahar/index.d.ts.map +1 -0
- package/dist/aahar/index.js +657 -0
- package/dist/aahar/index.js.map +1 -0
- package/dist/aahar/scoring.d.ts +85 -0
- package/dist/aahar/scoring.d.ts.map +1 -0
- package/dist/aahar/scoring.js +268 -0
- package/dist/aahar/scoring.js.map +1 -0
- package/dist/agni/index.d.ts +113 -0
- package/dist/agni/index.d.ts.map +1 -0
- package/dist/agni/index.js +328 -0
- package/dist/agni/index.js.map +1 -0
- package/dist/agni/model-router.d.ts +77 -0
- package/dist/agni/model-router.d.ts.map +1 -0
- package/dist/agni/model-router.js +163 -0
- package/dist/agni/model-router.js.map +1 -0
- package/dist/agni/response-distiller.d.ts +37 -0
- package/dist/agni/response-distiller.d.ts.map +1 -0
- package/dist/agni/response-distiller.js +193 -0
- package/dist/agni/response-distiller.js.map +1 -0
- package/dist/agni/tiktoken-adapter.d.ts +55 -0
- package/dist/agni/tiktoken-adapter.d.ts.map +1 -0
- package/dist/agni/tiktoken-adapter.js +113 -0
- package/dist/agni/tiktoken-adapter.js.map +1 -0
- package/dist/chikitsa/index.d.ts +130 -0
- package/dist/chikitsa/index.d.ts.map +1 -0
- package/dist/chikitsa/index.js +565 -0
- package/dist/chikitsa/index.js.map +1 -0
- package/dist/demo.d.ts +15 -0
- package/dist/demo.d.ts.map +1 -0
- package/dist/demo.js +278 -0
- package/dist/demo.js.map +1 -0
- package/dist/index.d.ts +201 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +588 -0
- package/dist/index.js.map +1 -0
- package/dist/mcp/audit.d.ts +39 -0
- package/dist/mcp/audit.d.ts.map +1 -0
- package/dist/mcp/audit.js +73 -0
- package/dist/mcp/audit.js.map +1 -0
- package/dist/mcp/contracts.d.ts +76 -0
- package/dist/mcp/contracts.d.ts.map +1 -0
- package/dist/mcp/contracts.js +44 -0
- package/dist/mcp/contracts.js.map +1 -0
- package/dist/mcp/envelope.d.ts +107 -0
- package/dist/mcp/envelope.d.ts.map +1 -0
- package/dist/mcp/envelope.js +162 -0
- package/dist/mcp/envelope.js.map +1 -0
- package/dist/mcp/registry.d.ts +110 -0
- package/dist/mcp/registry.d.ts.map +1 -0
- package/dist/mcp/registry.js +258 -0
- package/dist/mcp/registry.js.map +1 -0
- package/dist/mcp/server.d.ts +26 -0
- package/dist/mcp/server.d.ts.map +1 -0
- package/dist/mcp/server.js +107 -0
- package/dist/mcp/server.js.map +1 -0
- package/dist/mcp/tools/agent.d.ts +4 -0
- package/dist/mcp/tools/agent.d.ts.map +1 -0
- package/dist/mcp/tools/agent.js +300 -0
- package/dist/mcp/tools/agent.js.map +1 -0
- package/dist/mcp/tools/context.d.ts +4 -0
- package/dist/mcp/tools/context.d.ts.map +1 -0
- package/dist/mcp/tools/context.js +261 -0
- package/dist/mcp/tools/context.js.map +1 -0
- package/dist/mcp/tools/index.d.ts +5 -0
- package/dist/mcp/tools/index.d.ts.map +1 -0
- package/dist/mcp/tools/index.js +20 -0
- package/dist/mcp/tools/index.js.map +1 -0
- package/dist/mcp/tools/memory.d.ts +4 -0
- package/dist/mcp/tools/memory.d.ts.map +1 -0
- package/dist/mcp/tools/memory.js +220 -0
- package/dist/mcp/tools/memory.js.map +1 -0
- package/dist/mcp/tools/output.d.ts +4 -0
- package/dist/mcp/tools/output.d.ts.map +1 -0
- package/dist/mcp/tools/output.js +206 -0
- package/dist/mcp/tools/output.js.map +1 -0
- package/dist/mcp/tools/recovery.d.ts +4 -0
- package/dist/mcp/tools/recovery.d.ts.map +1 -0
- package/dist/mcp/tools/recovery.js +165 -0
- package/dist/mcp/tools/recovery.js.map +1 -0
- package/dist/mcp/tools/registrar.d.ts +4 -0
- package/dist/mcp/tools/registrar.d.ts.map +1 -0
- package/dist/mcp/tools/registrar.js +17 -0
- package/dist/mcp/tools/registrar.js.map +1 -0
- package/dist/mcp/tools/report.d.ts +4 -0
- package/dist/mcp/tools/report.d.ts.map +1 -0
- package/dist/mcp/tools/report.js +68 -0
- package/dist/mcp/tools/report.js.map +1 -0
- package/dist/mcp/tools/shared.d.ts +37 -0
- package/dist/mcp/tools/shared.d.ts.map +1 -0
- package/dist/mcp/tools/shared.js +214 -0
- package/dist/mcp/tools/shared.js.map +1 -0
- package/dist/mcp/trace.d.ts +47 -0
- package/dist/mcp/trace.d.ts.map +1 -0
- package/dist/mcp/trace.js +216 -0
- package/dist/mcp/trace.js.map +1 -0
- package/dist/nidra/index.d.ts +275 -0
- package/dist/nidra/index.d.ts.map +1 -0
- package/dist/nidra/index.js +889 -0
- package/dist/nidra/index.js.map +1 -0
- package/dist/persistence/migrations.d.ts +10 -0
- package/dist/persistence/migrations.d.ts.map +1 -0
- package/dist/persistence/migrations.js +77 -0
- package/dist/persistence/migrations.js.map +1 -0
- package/dist/persistence/sqlite.d.ts +30 -0
- package/dist/persistence/sqlite.d.ts.map +1 -0
- package/dist/persistence/sqlite.js +209 -0
- package/dist/persistence/sqlite.js.map +1 -0
- package/dist/persistence/types.d.ts +104 -0
- package/dist/persistence/types.d.ts.map +1 -0
- package/dist/persistence/types.js +5 -0
- package/dist/persistence/types.js.map +1 -0
- package/dist/pulse/index.d.ts +144 -0
- package/dist/pulse/index.d.ts.map +1 -0
- package/dist/pulse/index.js +453 -0
- package/dist/pulse/index.js.map +1 -0
- package/dist/raksha/classifiers/http-classifier.d.ts +26 -0
- package/dist/raksha/classifiers/http-classifier.d.ts.map +1 -0
- package/dist/raksha/classifiers/http-classifier.js +62 -0
- package/dist/raksha/classifiers/http-classifier.js.map +1 -0
- package/dist/raksha/classifiers/index.d.ts +5 -0
- package/dist/raksha/classifiers/index.d.ts.map +1 -0
- package/dist/raksha/classifiers/index.js +8 -0
- package/dist/raksha/classifiers/index.js.map +1 -0
- package/dist/raksha/classifiers/onnx-classifier.d.ts +41 -0
- package/dist/raksha/classifiers/onnx-classifier.d.ts.map +1 -0
- package/dist/raksha/classifiers/onnx-classifier.js +99 -0
- package/dist/raksha/classifiers/onnx-classifier.js.map +1 -0
- package/dist/raksha/hallucination-detectors.d.ts +106 -0
- package/dist/raksha/hallucination-detectors.d.ts.map +1 -0
- package/dist/raksha/hallucination-detectors.js +327 -0
- package/dist/raksha/hallucination-detectors.js.map +1 -0
- package/dist/raksha/index.d.ts +168 -0
- package/dist/raksha/index.d.ts.map +1 -0
- package/dist/raksha/index.js +597 -0
- package/dist/raksha/index.js.map +1 -0
- package/dist/raksha/prompt-injection-detectors.d.ts +30 -0
- package/dist/raksha/prompt-injection-detectors.d.ts.map +1 -0
- package/dist/raksha/prompt-injection-detectors.js +153 -0
- package/dist/raksha/prompt-injection-detectors.js.map +1 -0
- package/dist/types.d.ts +1115 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +71 -0
- package/dist/types.js.map +1 -0
- package/dist/util/calibration.d.ts +32 -0
- package/dist/util/calibration.d.ts.map +1 -0
- package/dist/util/calibration.js +108 -0
- package/dist/util/calibration.js.map +1 -0
- package/dist/util/id.d.ts +2 -0
- package/dist/util/id.d.ts.map +1 -0
- package/dist/util/id.js +9 -0
- package/dist/util/id.js.map +1 -0
- package/dist/vyayam/index.d.ts +76 -0
- package/dist/vyayam/index.d.ts.map +1 -0
- package/dist/vyayam/index.js +528 -0
- package/dist/vyayam/index.js.map +1 -0
- package/dist/vyayam/tool-fault-proxy.d.ts +95 -0
- package/dist/vyayam/tool-fault-proxy.d.ts.map +1 -0
- package/dist/vyayam/tool-fault-proxy.js +170 -0
- package/dist/vyayam/tool-fault-proxy.js.map +1 -0
- package/docs/ARCHITECTURE.md +162 -0
- package/docs/BACKLOG.md +342 -0
- package/docs/CONFIGURATION.md +305 -0
- package/docs/EVIDENCE.md +232 -0
- package/docs/EVIDENCE_MATRIX.md +293 -0
- package/docs/KNOWN_FAILURES.md +367 -0
- package/docs/MCP.md +614 -0
- package/docs/MODULES.md +368 -0
- package/docs/SECURITY.md +251 -0
- package/docs/TRUST.md +88 -0
- package/docs/assets/ojas-hero.png +0 -0
- package/package.json +101 -0
|
@@ -0,0 +1,889 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
/**
|
|
3
|
+
* Ojas Nidra (ओजस निद्रा) — AI Recovery & Consolidation System
|
|
4
|
+
*
|
|
5
|
+
* Governs how AI agents recover, stabilize, and evolve.
|
|
6
|
+
* Provides memory consolidation, cognitive recovery,
|
|
7
|
+
* and reflective learning cycles.
|
|
8
|
+
*/
|
|
9
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
10
|
+
exports.Nidra = void 0;
|
|
11
|
+
const types_1 = require("../types");
|
|
12
|
+
const id_1 = require("../util/id");
|
|
13
|
+
// ─── Helpers ─────────────────────────────────────────────────────────────────
|
|
14
|
+
function now() {
|
|
15
|
+
return new Date().toISOString();
|
|
16
|
+
}
|
|
17
|
+
function healthScore(value, source) {
|
|
18
|
+
return { value: clamp(value), timestamp: now(), source };
|
|
19
|
+
}
|
|
20
|
+
function clamp(v, min = 0, max = 1) {
|
|
21
|
+
return Math.max(min, Math.min(max, v));
|
|
22
|
+
}
|
|
23
|
+
function uuid() {
|
|
24
|
+
return (0, id_1.newId)('nidra');
|
|
25
|
+
}
|
|
26
|
+
/** Earliest ISO timestamp across the given traces, or `undefined` for empty input. */
|
|
27
|
+
function earliestTimestamp(traces) {
|
|
28
|
+
if (traces.length === 0)
|
|
29
|
+
return undefined;
|
|
30
|
+
let earliest = traces[0].timestamp;
|
|
31
|
+
for (let i = 1; i < traces.length; i++) {
|
|
32
|
+
if (traces[i].timestamp < earliest)
|
|
33
|
+
earliest = traces[i].timestamp;
|
|
34
|
+
}
|
|
35
|
+
return earliest;
|
|
36
|
+
}
|
|
37
|
+
/** True iff the two applicability lists share at least one element. */
|
|
38
|
+
function applicabilityOverlaps(a, b) {
|
|
39
|
+
if (a.length === 0 || b.length === 0)
|
|
40
|
+
return false;
|
|
41
|
+
const setA = new Set(a);
|
|
42
|
+
for (const x of b)
|
|
43
|
+
if (setA.has(x))
|
|
44
|
+
return true;
|
|
45
|
+
return false;
|
|
46
|
+
}
|
|
47
|
+
// ─── Nidra Engine ────────────────────────────────────────────────────────────
|
|
48
|
+
class Nidra {
|
|
49
|
+
policy;
|
|
50
|
+
traces = [];
|
|
51
|
+
memories = [];
|
|
52
|
+
cycleHistory = [];
|
|
53
|
+
lastRecovery = null;
|
|
54
|
+
/**
|
|
55
|
+
* Trace IDs that have been processed by a recovery cycle — regardless of
|
|
56
|
+
* whether they produced a retained memory. Without this set, low-confidence
|
|
57
|
+
* traces would be reprocessed forever because the prior implementation only
|
|
58
|
+
* treated a trace as "processed" if it ended up inside a kept memory.
|
|
59
|
+
*/
|
|
60
|
+
processedTraceIds = new Set();
|
|
61
|
+
/**
|
|
62
|
+
* IDs of memories that have already emitted a `memory_cold` event
|
|
63
|
+
* during the current run, so we don't re-emit on every `assess()`
|
|
64
|
+
* pass while the temperature stays below the cold threshold.
|
|
65
|
+
* Cleared when a memory is re-warmed via `touchMemory`.
|
|
66
|
+
*/
|
|
67
|
+
coldNotifiedIds = new Set();
|
|
68
|
+
/**
|
|
69
|
+
* IDs of memories pruned in this Nidra's lifetime, kept so
|
|
70
|
+
* `getMemoryDelta()` can report them under `removedIds`.
|
|
71
|
+
* Bounded by `maxProcessedTraceIds` to avoid unbounded growth.
|
|
72
|
+
*/
|
|
73
|
+
prunedMemoryIds = [];
|
|
74
|
+
constructor(policy = {}) {
|
|
75
|
+
this.policy = this.validatePolicy({ ...types_1.DEFAULT_RECOVERY_POLICY, ...policy });
|
|
76
|
+
}
|
|
77
|
+
// ── Memory Temperature (Block 3 — Nidra upgrades) ────────────────────────
|
|
78
|
+
/**
|
|
79
|
+
* Mark a memory as accessed: bump its temperature toward `1.0` and
|
|
80
|
+
* stamp the touch timestamp. Used by agents when they actually read
|
|
81
|
+
* a memory; `Aahar.filter()` integrations can also call this when a
|
|
82
|
+
* memory makes it into the context window.
|
|
83
|
+
*
|
|
84
|
+
* Returns `true` if the touched memory was previously below the
|
|
85
|
+
* cold threshold (i.e. the touch *re-warmed* it). Callers may
|
|
86
|
+
* choose to suppress next-cycle `memory_cold` emission in that case.
|
|
87
|
+
*/
|
|
88
|
+
touchMemory(id) {
|
|
89
|
+
const m = this.memories.find((x) => x.id === id);
|
|
90
|
+
if (!m)
|
|
91
|
+
return false;
|
|
92
|
+
const decayed = this.computeDecayedTemperature(m);
|
|
93
|
+
const boost = this.policy.temperatureBoostOnTouch ?? 0;
|
|
94
|
+
const newTemp = Math.min(1, decayed + boost);
|
|
95
|
+
const wasCold = decayed < (this.policy.coldTemperatureThreshold ?? 0.15);
|
|
96
|
+
m.temperature = newTemp;
|
|
97
|
+
m.temperatureUpdatedAt = now();
|
|
98
|
+
if (wasCold) {
|
|
99
|
+
// Allow a future cold-crossing to emit again now that we've re-warmed.
|
|
100
|
+
this.coldNotifiedIds.delete(id);
|
|
101
|
+
}
|
|
102
|
+
return wasCold;
|
|
103
|
+
}
|
|
104
|
+
/**
|
|
105
|
+
* Current temperature of `id`, with decay applied since the last
|
|
106
|
+
* touch. Returns `undefined` for unknown memory IDs.
|
|
107
|
+
*/
|
|
108
|
+
getMemoryTemperature(id) {
|
|
109
|
+
const m = this.memories.find((x) => x.id === id);
|
|
110
|
+
if (!m)
|
|
111
|
+
return undefined;
|
|
112
|
+
return this.computeDecayedTemperature(m);
|
|
113
|
+
}
|
|
114
|
+
/**
|
|
115
|
+
* Decay model: exponential half-life. Given `temperature` last
|
|
116
|
+
* updated at `temperatureUpdatedAt`, returns the current decayed
|
|
117
|
+
* value. The half-life is configurable via
|
|
118
|
+
* `RecoveryPolicy.temperatureHalfLifeSec` (default one week).
|
|
119
|
+
*/
|
|
120
|
+
computeDecayedTemperature(m) {
|
|
121
|
+
if (m.temperature === undefined)
|
|
122
|
+
return 0.5; // untracked = "warm"
|
|
123
|
+
const halfLife = this.policy.temperatureHalfLifeSec ?? 7 * 24 * 60 * 60;
|
|
124
|
+
if (halfLife <= 0)
|
|
125
|
+
return m.temperature; // decay disabled
|
|
126
|
+
const last = m.temperatureUpdatedAt ? Date.parse(m.temperatureUpdatedAt) : Date.parse(m.createdAt);
|
|
127
|
+
if (!Number.isFinite(last))
|
|
128
|
+
return m.temperature;
|
|
129
|
+
const dtSec = Math.max(0, (Date.now() - last) / 1000);
|
|
130
|
+
const decayFactor = Math.pow(0.5, dtSec / halfLife);
|
|
131
|
+
return clamp(m.temperature * decayFactor);
|
|
132
|
+
}
|
|
133
|
+
/**
|
|
134
|
+
* Returns memories whose decayed temperature is now below the cold
|
|
135
|
+
* threshold. The set is *idempotent across calls* via the
|
|
136
|
+
* `coldNotifiedIds` set, so this can be called from a periodic
|
|
137
|
+
* `assess()` pass without spamming the agent.
|
|
138
|
+
*
|
|
139
|
+
* The returned shape is the data needed to construct a `memory_cold`
|
|
140
|
+
* Pulse event; Ojas wires the actual emission in `Ojas.healthCheck()`.
|
|
141
|
+
*/
|
|
142
|
+
detectColdMemories() {
|
|
143
|
+
const threshold = this.policy.coldTemperatureThreshold ?? 0.15;
|
|
144
|
+
const out = [];
|
|
145
|
+
for (const m of this.memories) {
|
|
146
|
+
if (m.temperature === undefined)
|
|
147
|
+
continue;
|
|
148
|
+
const decayed = this.computeDecayedTemperature(m);
|
|
149
|
+
if (decayed < threshold && !this.coldNotifiedIds.has(m.id)) {
|
|
150
|
+
out.push({ id: m.id, temperature: decayed, abstraction: m.abstraction });
|
|
151
|
+
this.coldNotifiedIds.add(m.id);
|
|
152
|
+
}
|
|
153
|
+
}
|
|
154
|
+
return out;
|
|
155
|
+
}
|
|
156
|
+
// ── Delta sync (Block 3 — Nidra upgrades) ────────────────────────────────
|
|
157
|
+
/**
|
|
158
|
+
* Cursor representing the agent's view of the memory set right now.
|
|
159
|
+
* The hash collapses a fingerprint of every active memory's
|
|
160
|
+
* `(id, createdAt, invalidatedAt?, content)` tuple so any change
|
|
161
|
+
* produces a different cursor.
|
|
162
|
+
*/
|
|
163
|
+
getMemoryCursor() {
|
|
164
|
+
return {
|
|
165
|
+
cursorHash: this.computeMemoryHash(),
|
|
166
|
+
issuedAt: now(),
|
|
167
|
+
};
|
|
168
|
+
}
|
|
169
|
+
/**
|
|
170
|
+
* Return only the memories that changed since `cursor`. If the
|
|
171
|
+
* cursor is unknown (the agent is calling for the first time, or
|
|
172
|
+
* its cursor is older than our pruned-id horizon), the response is
|
|
173
|
+
* a full resync.
|
|
174
|
+
*/
|
|
175
|
+
getMemoryDelta(cursor) {
|
|
176
|
+
const currentHash = this.computeMemoryHash();
|
|
177
|
+
const nextCursor = { cursorHash: currentHash, issuedAt: now() };
|
|
178
|
+
if (!cursor) {
|
|
179
|
+
return {
|
|
180
|
+
added: [...this.memories],
|
|
181
|
+
modified: [],
|
|
182
|
+
removedIds: [],
|
|
183
|
+
nextCursor,
|
|
184
|
+
fullResync: true,
|
|
185
|
+
};
|
|
186
|
+
}
|
|
187
|
+
if (cursor.cursorHash === currentHash) {
|
|
188
|
+
return { added: [], modified: [], removedIds: [], nextCursor, fullResync: false };
|
|
189
|
+
}
|
|
190
|
+
// We don't keep per-cursor snapshots — that would be O(N * cursors).
|
|
191
|
+
// Instead we use timestamps: anything created after `cursor.issuedAt`
|
|
192
|
+
// is `added`; anything updated after `cursor.issuedAt` (but created
|
|
193
|
+
// before) is `modified`; anything in `prunedMemoryIds` whose prune
|
|
194
|
+
// event was after `cursor.issuedAt` is `removed`. This is a
|
|
195
|
+
// best-effort delta — callers needing strict consistency should
|
|
196
|
+
// request a full resync via `getMemoryDelta()` with no argument.
|
|
197
|
+
const since = Date.parse(cursor.issuedAt);
|
|
198
|
+
if (!Number.isFinite(since)) {
|
|
199
|
+
return {
|
|
200
|
+
added: [...this.memories],
|
|
201
|
+
modified: [],
|
|
202
|
+
removedIds: [],
|
|
203
|
+
nextCursor,
|
|
204
|
+
fullResync: true,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
const added = [];
|
|
208
|
+
const modified = [];
|
|
209
|
+
for (const m of this.memories) {
|
|
210
|
+
const createdAt = Date.parse(m.createdAt);
|
|
211
|
+
const invalidatedAt = m.invalidatedAt ? Date.parse(m.invalidatedAt) : 0;
|
|
212
|
+
const lastChange = Math.max(createdAt, invalidatedAt);
|
|
213
|
+
if (createdAt > since)
|
|
214
|
+
added.push(m);
|
|
215
|
+
else if (lastChange > since)
|
|
216
|
+
modified.push(m);
|
|
217
|
+
}
|
|
218
|
+
return {
|
|
219
|
+
added,
|
|
220
|
+
modified,
|
|
221
|
+
removedIds: [...this.prunedMemoryIds], // best-effort: all known prunes
|
|
222
|
+
nextCursor,
|
|
223
|
+
fullResync: false,
|
|
224
|
+
};
|
|
225
|
+
}
|
|
226
|
+
/**
|
|
227
|
+
* Cheap hash for `MemoryCursor.cursorHash`. We just sum a per-memory
|
|
228
|
+
* fingerprint into a 32-bit accumulator. Not collision-resistant in
|
|
229
|
+
* the cryptographic sense — but the cursor only needs to detect
|
|
230
|
+
* "anything changed since last time", and per-id IDs are already
|
|
231
|
+
* unique.
|
|
232
|
+
*/
|
|
233
|
+
computeMemoryHash() {
|
|
234
|
+
let acc = 0xcafe;
|
|
235
|
+
for (const m of this.memories) {
|
|
236
|
+
const sig = `${m.id}|${m.createdAt}|${m.invalidatedAt ?? ''}|${m.confidence.toFixed(3)}`;
|
|
237
|
+
for (let i = 0; i < sig.length; i += 1) {
|
|
238
|
+
acc = ((acc * 33) ^ sig.charCodeAt(i)) >>> 0;
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
return acc.toString(36).padStart(8, '0');
|
|
242
|
+
}
|
|
243
|
+
/**
|
|
244
|
+
* Reject malformed RecoveryPolicy fields at the boundary so a buggy
|
|
245
|
+
* caller can't poison division by `0`, blow up `Math.exp(NaN)`, or
|
|
246
|
+
* silently disable retention with `-1`.
|
|
247
|
+
*/
|
|
248
|
+
validatePolicy(policy) {
|
|
249
|
+
const finite = (v) => typeof v === 'number' && Number.isFinite(v);
|
|
250
|
+
if (!Number.isInteger(policy.minTracesForConsolidation) || policy.minTracesForConsolidation < 0) {
|
|
251
|
+
throw new Error('Nidra: minTracesForConsolidation must be a non-negative integer');
|
|
252
|
+
}
|
|
253
|
+
if (!finite(policy.recoveryIntervalSec) || policy.recoveryIntervalSec < 0) {
|
|
254
|
+
throw new Error('Nidra: recoveryIntervalSec must be a non-negative finite number');
|
|
255
|
+
}
|
|
256
|
+
if (!finite(policy.maxDriftThreshold) || policy.maxDriftThreshold < 0 || policy.maxDriftThreshold > 1) {
|
|
257
|
+
throw new Error('Nidra: maxDriftThreshold must be a finite number in [0,1]');
|
|
258
|
+
}
|
|
259
|
+
if (!Number.isInteger(policy.failureWindowSize) || policy.failureWindowSize <= 0) {
|
|
260
|
+
throw new Error('Nidra: failureWindowSize must be a positive integer');
|
|
261
|
+
}
|
|
262
|
+
if (!finite(policy.retentionConfidence) || policy.retentionConfidence < 0 || policy.retentionConfidence > 1) {
|
|
263
|
+
throw new Error('Nidra: retentionConfidence must be a finite number in [0,1]');
|
|
264
|
+
}
|
|
265
|
+
if (policy.supersessionConfidenceDelta !== undefined) {
|
|
266
|
+
const d = policy.supersessionConfidenceDelta;
|
|
267
|
+
if (!finite(d) || d < 0 || d > 1) {
|
|
268
|
+
throw new Error('Nidra: supersessionConfidenceDelta must be a finite number in [0,1] if set');
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
for (const cap of ['maxTraces', 'maxMemories', 'maxProcessedTraceIds', 'maxCycleHistory']) {
|
|
272
|
+
const v = policy[cap];
|
|
273
|
+
if (v !== undefined && (!Number.isInteger(v) || v < 0)) {
|
|
274
|
+
throw new Error(`Nidra: ${cap} must be a non-negative integer if set`);
|
|
275
|
+
}
|
|
276
|
+
}
|
|
277
|
+
return policy;
|
|
278
|
+
}
|
|
279
|
+
// ── Trace Ingestion ──────────────────────────────────────────────────────
|
|
280
|
+
/**
|
|
281
|
+
* Record an execution trace for later consolidation. Evicts the oldest
|
|
282
|
+
* traces (and any matching processed-id markers) once the configured
|
|
283
|
+
* retention cap is hit, so long-running agents stay bounded in memory.
|
|
284
|
+
*/
|
|
285
|
+
recordTrace(trace) {
|
|
286
|
+
this.traces.push(trace);
|
|
287
|
+
this.enforceTraceRetention();
|
|
288
|
+
}
|
|
289
|
+
/** Record multiple traces at once. */
|
|
290
|
+
recordTraces(traces) {
|
|
291
|
+
this.traces.push(...traces);
|
|
292
|
+
this.enforceTraceRetention();
|
|
293
|
+
}
|
|
294
|
+
enforceTraceRetention() {
|
|
295
|
+
const cap = this.policy.maxTraces ?? 0;
|
|
296
|
+
if (cap > 0 && this.traces.length > cap) {
|
|
297
|
+
const drop = this.traces.length - cap;
|
|
298
|
+
const dropped = this.traces.splice(0, drop);
|
|
299
|
+
// Also drop any processed-id markers that referenced the evicted
|
|
300
|
+
// traces so the set itself stays bounded.
|
|
301
|
+
for (const t of dropped)
|
|
302
|
+
this.processedTraceIds.delete(t.id);
|
|
303
|
+
}
|
|
304
|
+
// Independent processed-id cap in case it grew via other paths.
|
|
305
|
+
const idCap = this.policy.maxProcessedTraceIds ?? 0;
|
|
306
|
+
if (idCap > 0 && this.processedTraceIds.size > idCap) {
|
|
307
|
+
const it = this.processedTraceIds.values();
|
|
308
|
+
const drop = this.processedTraceIds.size - idCap;
|
|
309
|
+
for (let i = 0; i < drop; i++) {
|
|
310
|
+
const next = it.next();
|
|
311
|
+
if (next.done)
|
|
312
|
+
break;
|
|
313
|
+
this.processedTraceIds.delete(next.value);
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
}
|
|
317
|
+
enforceMemoryAndCycleRetention() {
|
|
318
|
+
const mCap = this.policy.maxMemories ?? 0;
|
|
319
|
+
if (mCap > 0 && this.memories.length > mCap) {
|
|
320
|
+
const evicted = this.memories.splice(0, this.memories.length - mCap);
|
|
321
|
+
// Remember evicted IDs so `getMemoryDelta()` can report removals.
|
|
322
|
+
// Cap the pruned-id list at the processed-trace-ids horizon so it
|
|
323
|
+
// doesn't grow without bound.
|
|
324
|
+
for (const m of evicted)
|
|
325
|
+
this.prunedMemoryIds.push(m.id);
|
|
326
|
+
const idsHorizon = this.policy.maxProcessedTraceIds ?? 10_000;
|
|
327
|
+
if (this.prunedMemoryIds.length > idsHorizon) {
|
|
328
|
+
this.prunedMemoryIds.splice(0, this.prunedMemoryIds.length - idsHorizon);
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
const cCap = this.policy.maxCycleHistory ?? 0;
|
|
332
|
+
if (cCap > 0 && this.cycleHistory.length > cCap) {
|
|
333
|
+
this.cycleHistory.splice(0, this.cycleHistory.length - cCap);
|
|
334
|
+
}
|
|
335
|
+
}
|
|
336
|
+
// ── Memory Consolidation ─────────────────────────────────────────────────
|
|
337
|
+
/**
|
|
338
|
+
* Analyse unprocessed traces without committing anything. The returned
|
|
339
|
+
* envelope can be inspected, passed to `agent.injectMemory()`, and then
|
|
340
|
+
* finalised via `commitAnalysis()`. This is the building block for
|
|
341
|
+
* transactional recovery — see `Ojas.recover()` for the canonical use.
|
|
342
|
+
*
|
|
343
|
+
* `estimatedHealthAfter` is a forward-looking projection from the same
|
|
344
|
+
* pre-mutation state; for a true post-state, call `assess()` after
|
|
345
|
+
* committing.
|
|
346
|
+
*/
|
|
347
|
+
analyseUnprocessed() {
|
|
348
|
+
const cycleId = uuid();
|
|
349
|
+
const startedAt = now();
|
|
350
|
+
const healthBefore = this.assess();
|
|
351
|
+
const unprocessed = this.getUnprocessedTraces();
|
|
352
|
+
const clusters = this.clusterTraces(unprocessed);
|
|
353
|
+
const memories = this.consolidateClusters(clusters);
|
|
354
|
+
const patterns = this.identifyPatterns(clusters);
|
|
355
|
+
const driftBefore = this.measureDrift(unprocessed);
|
|
356
|
+
// Identical to healthBefore here because we haven't mutated anything yet.
|
|
357
|
+
// It's named `estimatedHealthAfter` to signal that callers wanting the real
|
|
358
|
+
// post-state must re-assess after applying side effects.
|
|
359
|
+
const estimatedHealthAfter = healthBefore;
|
|
360
|
+
return {
|
|
361
|
+
cycleId,
|
|
362
|
+
startedAt,
|
|
363
|
+
tracesProcessed: unprocessed.length,
|
|
364
|
+
coveredTraceIds: unprocessed.map((t) => t.id),
|
|
365
|
+
memories,
|
|
366
|
+
patterns,
|
|
367
|
+
healthBefore,
|
|
368
|
+
estimatedHealthAfter,
|
|
369
|
+
driftBefore,
|
|
370
|
+
};
|
|
371
|
+
}
|
|
372
|
+
/**
|
|
373
|
+
* Pure preview: returns the memories, patterns, and drift estimate a
|
|
374
|
+
* commit would produce, with NO mutation. Memories are not appended,
|
|
375
|
+
* `lastRecovery` is unchanged, no trace IDs are marked processed, and
|
|
376
|
+
* no cycle is added to history. `healthAfter` is an estimate — for the
|
|
377
|
+
* true post-commit state, call `runRecoveryCycle()`.
|
|
378
|
+
*
|
|
379
|
+
* `supersededMemoryIds` lists the IDs that a commit WOULD invalidate
|
|
380
|
+
* given current state, so operators can see the impact of running
|
|
381
|
+
* the cycle without taking the side effect.
|
|
382
|
+
*/
|
|
383
|
+
previewRecoveryCycle() {
|
|
384
|
+
const a = this.analyseUnprocessed();
|
|
385
|
+
const supersessionPlan = this.computeSupersessions(a.memories);
|
|
386
|
+
return {
|
|
387
|
+
cycleId: a.cycleId,
|
|
388
|
+
startedAt: a.startedAt,
|
|
389
|
+
completedAt: now(),
|
|
390
|
+
tracesProcessed: a.tracesProcessed,
|
|
391
|
+
memoriesConsolidated: a.memories,
|
|
392
|
+
patternsIdentified: a.patterns,
|
|
393
|
+
driftReduction: Math.max(0, a.driftBefore - a.estimatedHealthAfter.driftScore),
|
|
394
|
+
healthBefore: a.healthBefore,
|
|
395
|
+
healthAfter: a.estimatedHealthAfter,
|
|
396
|
+
mode: 'preview',
|
|
397
|
+
healthAfterEstimated: true,
|
|
398
|
+
supersededMemoryIds: supersessionPlan.map((p) => p.existingId),
|
|
399
|
+
};
|
|
400
|
+
}
|
|
401
|
+
/**
|
|
402
|
+
* Commit a precomputed `RecoveryAnalysis`. This is the side-effecting
|
|
403
|
+
* half of the analyse/commit pair: appends memories, marks traces
|
|
404
|
+
* processed, updates `lastRecovery`, and records the cycle. Use this
|
|
405
|
+
* directly to interleave external side effects (e.g. injecting memories
|
|
406
|
+
* into an agent) between analysis and commit, so a failure in those
|
|
407
|
+
* side effects leaves Nidra state untouched.
|
|
408
|
+
*
|
|
409
|
+
* `healthAfter` is captured AFTER mutation, so `driftReduction`
|
|
410
|
+
* reflects the real change.
|
|
411
|
+
*
|
|
412
|
+
* **Stale-analysis semantics.** Between `analyseUnprocessed()` and
|
|
413
|
+
* `commitAnalysis()`, new traces may arrive (carried over to next
|
|
414
|
+
* cycle — fine) AND existing traces may be evicted by retention caps
|
|
415
|
+
* if the trace store filled up. We mark any covered trace ID as
|
|
416
|
+
* processed regardless, because the analysis already encoded what
|
|
417
|
+
* those traces meant. The result reports `coveredTraceIdsStillRetained`
|
|
418
|
+
* vs `coveredTraceIdsEvicted` so the caller can tell when memories
|
|
419
|
+
* reference source traces that are no longer in `getTraces()`.
|
|
420
|
+
*/
|
|
421
|
+
commitAnalysis(a) {
|
|
422
|
+
// Snapshot retained-trace IDs BEFORE applying retention so we can
|
|
423
|
+
// attribute eviction correctly.
|
|
424
|
+
const retainedIds = new Set(this.traces.map((t) => t.id));
|
|
425
|
+
const stillRetained = [];
|
|
426
|
+
const evicted = [];
|
|
427
|
+
for (const id of a.coveredTraceIds) {
|
|
428
|
+
if (retainedIds.has(id))
|
|
429
|
+
stillRetained.push(id);
|
|
430
|
+
else
|
|
431
|
+
evicted.push(id);
|
|
432
|
+
}
|
|
433
|
+
// Stamp provenance on each memory: 'degraded' if any of its
|
|
434
|
+
// sourceTraces are no longer in the trace store, 'full' otherwise.
|
|
435
|
+
// sourceTracesEvicted is the exact subset that's now ungrabbable.
|
|
436
|
+
const evictedSet = new Set(evicted);
|
|
437
|
+
const initialTemp = 0.5; // fresh memories start "warm"
|
|
438
|
+
for (const m of a.memories) {
|
|
439
|
+
const evictedSources = (m.sourceTraces ?? []).filter((id) => evictedSet.has(id));
|
|
440
|
+
if (evictedSources.length > 0) {
|
|
441
|
+
m.provenance = 'degraded';
|
|
442
|
+
m.sourceTracesEvicted = evictedSources;
|
|
443
|
+
}
|
|
444
|
+
else {
|
|
445
|
+
m.provenance = 'full';
|
|
446
|
+
}
|
|
447
|
+
// Initialise temperature tracking on every newly committed memory.
|
|
448
|
+
// Existing memories without this field are still supported — `getMemoryTemperature`
|
|
449
|
+
// returns `undefined` for them, and `detectColdMemories` skips them.
|
|
450
|
+
if (m.temperature === undefined) {
|
|
451
|
+
m.temperature = initialTemp;
|
|
452
|
+
m.temperatureUpdatedAt = now();
|
|
453
|
+
}
|
|
454
|
+
}
|
|
455
|
+
// Compute supersession against CURRENT state BEFORE pushing new
|
|
456
|
+
// memories, so we only consider prior memories, not the new batch's
|
|
457
|
+
// siblings. Mutations are applied here — not in `computeSupersessions`
|
|
458
|
+
// — because we want one consistent `supersessionTime` per cycle.
|
|
459
|
+
const supersessionPlan = this.computeSupersessions(a.memories);
|
|
460
|
+
const supersessionTime = now();
|
|
461
|
+
const memoryById = new Map();
|
|
462
|
+
for (const m of this.memories)
|
|
463
|
+
memoryById.set(m.id, m);
|
|
464
|
+
for (const { existingId, supersededBy } of supersessionPlan) {
|
|
465
|
+
const existing = memoryById.get(existingId);
|
|
466
|
+
if (existing) {
|
|
467
|
+
existing.invalidatedAt = supersessionTime;
|
|
468
|
+
existing.supersededBy = supersededBy;
|
|
469
|
+
}
|
|
470
|
+
}
|
|
471
|
+
// Commit mutations.
|
|
472
|
+
this.memories.push(...a.memories);
|
|
473
|
+
for (const id of a.coveredTraceIds)
|
|
474
|
+
this.processedTraceIds.add(id);
|
|
475
|
+
this.lastRecovery = now();
|
|
476
|
+
this.enforceTraceRetention();
|
|
477
|
+
this.enforceMemoryAndCycleRetention();
|
|
478
|
+
// Capture true post-mutation state.
|
|
479
|
+
const healthAfter = this.assess();
|
|
480
|
+
const result = {
|
|
481
|
+
cycleId: a.cycleId,
|
|
482
|
+
startedAt: a.startedAt,
|
|
483
|
+
completedAt: now(),
|
|
484
|
+
tracesProcessed: a.tracesProcessed,
|
|
485
|
+
memoriesConsolidated: a.memories,
|
|
486
|
+
patternsIdentified: a.patterns,
|
|
487
|
+
driftReduction: Math.max(0, a.driftBefore - healthAfter.driftScore),
|
|
488
|
+
healthBefore: a.healthBefore,
|
|
489
|
+
healthAfter,
|
|
490
|
+
mode: 'committed',
|
|
491
|
+
healthAfterEstimated: false,
|
|
492
|
+
coveredTracesEvicted: evicted.length,
|
|
493
|
+
coveredTracesRetained: stillRetained.length,
|
|
494
|
+
supersededMemoryIds: supersessionPlan.map((p) => p.existingId),
|
|
495
|
+
};
|
|
496
|
+
this.cycleHistory.push(result);
|
|
497
|
+
this.enforceMemoryAndCycleRetention();
|
|
498
|
+
return result;
|
|
499
|
+
}
|
|
500
|
+
/**
|
|
501
|
+
* Run a full recovery cycle in-process: analyse, then commit. Equivalent
|
|
502
|
+
* to `commitAnalysis(analyseUnprocessed())`. Convenient when there are
|
|
503
|
+
* no external side effects to sequence in between.
|
|
504
|
+
*/
|
|
505
|
+
runRecoveryCycle() {
|
|
506
|
+
return this.commitAnalysis(this.analyseUnprocessed());
|
|
507
|
+
}
|
|
508
|
+
/**
|
|
509
|
+
* Cluster traces by action pattern for consolidation.
|
|
510
|
+
*/
|
|
511
|
+
clusterTraces(traces) {
|
|
512
|
+
const groups = new Map();
|
|
513
|
+
for (const trace of traces) {
|
|
514
|
+
const key = trace.action;
|
|
515
|
+
if (!groups.has(key))
|
|
516
|
+
groups.set(key, []);
|
|
517
|
+
groups.get(key).push(trace);
|
|
518
|
+
}
|
|
519
|
+
const clusters = [];
|
|
520
|
+
for (const [pattern, groupTraces] of groups) {
|
|
521
|
+
const successes = groupTraces.filter((t) => t.success).length;
|
|
522
|
+
clusters.push({
|
|
523
|
+
pattern,
|
|
524
|
+
traces: groupTraces,
|
|
525
|
+
frequency: groupTraces.length,
|
|
526
|
+
successRate: groupTraces.length > 0 ? successes / groupTraces.length : 0,
|
|
527
|
+
});
|
|
528
|
+
}
|
|
529
|
+
return clusters.sort((a, b) => b.frequency - a.frequency);
|
|
530
|
+
}
|
|
531
|
+
/**
|
|
532
|
+
* Convert pattern clusters into consolidated memories.
|
|
533
|
+
*/
|
|
534
|
+
consolidateClusters(clusters) {
|
|
535
|
+
const memories = [];
|
|
536
|
+
for (const cluster of clusters) {
|
|
537
|
+
if (cluster.traces.length < 2)
|
|
538
|
+
continue;
|
|
539
|
+
const confidence = this.computeConfidence(cluster);
|
|
540
|
+
if (confidence < this.policy.retentionConfidence)
|
|
541
|
+
continue;
|
|
542
|
+
const heuristic = this.extractHeuristic(cluster);
|
|
543
|
+
const abstraction = this.extractAbstraction(cluster);
|
|
544
|
+
memories.push({
|
|
545
|
+
id: uuid(),
|
|
546
|
+
kind: 'procedural',
|
|
547
|
+
createdAt: now(),
|
|
548
|
+
observedAt: earliestTimestamp(cluster.traces),
|
|
549
|
+
sourceTraces: cluster.traces.map((t) => t.id),
|
|
550
|
+
heuristic,
|
|
551
|
+
confidence,
|
|
552
|
+
applicability: [cluster.pattern],
|
|
553
|
+
abstraction,
|
|
554
|
+
});
|
|
555
|
+
}
|
|
556
|
+
return memories;
|
|
557
|
+
}
|
|
558
|
+
/**
|
|
559
|
+
* Pure planning step: compute which existing *active* memories would
|
|
560
|
+
* be superseded by the given new memories. Does **not** mutate. Used
|
|
561
|
+
* by `commitAnalysis()` to invalidate prior memories, and by
|
|
562
|
+
* `previewRecoveryCycle()` to surface the same plan without applying
|
|
563
|
+
* it.
|
|
564
|
+
*
|
|
565
|
+
* A new memory supersedes an existing one when ALL of:
|
|
566
|
+
* - existing.invalidatedAt is unset (still active);
|
|
567
|
+
* - kinds match (defaulting to `'procedural'` when unset);
|
|
568
|
+
* - `applicability` lists share at least one element;
|
|
569
|
+
* - `|existing.confidence - new.confidence|` ≥ the policy's
|
|
570
|
+
* `supersessionConfidenceDelta` (default 0.15).
|
|
571
|
+
*
|
|
572
|
+
* Each existing memory is claimed by at most one new memory per batch.
|
|
573
|
+
*/
|
|
574
|
+
computeSupersessions(newMemories) {
|
|
575
|
+
const delta = this.policy.supersessionConfidenceDelta ?? 0.15;
|
|
576
|
+
const plan = [];
|
|
577
|
+
const claimed = new Set();
|
|
578
|
+
for (const newMem of newMemories) {
|
|
579
|
+
for (const existing of this.memories) {
|
|
580
|
+
if (existing.invalidatedAt)
|
|
581
|
+
continue;
|
|
582
|
+
if (claimed.has(existing.id))
|
|
583
|
+
continue;
|
|
584
|
+
if ((existing.kind ?? 'procedural') !== (newMem.kind ?? 'procedural'))
|
|
585
|
+
continue;
|
|
586
|
+
if (!applicabilityOverlaps(existing.applicability, newMem.applicability))
|
|
587
|
+
continue;
|
|
588
|
+
if (Math.abs(existing.confidence - newMem.confidence) < delta)
|
|
589
|
+
continue;
|
|
590
|
+
plan.push({ existingId: existing.id, supersededBy: newMem.id });
|
|
591
|
+
claimed.add(existing.id);
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
return plan;
|
|
595
|
+
}
|
|
596
|
+
/**
|
|
597
|
+
* Compute confidence for a pattern cluster.
|
|
598
|
+
*/
|
|
599
|
+
computeConfidence(cluster) {
|
|
600
|
+
const frequencyScore = Math.min(1, cluster.frequency / 10);
|
|
601
|
+
const successScore = cluster.successRate;
|
|
602
|
+
const consistencyScore = this.measureConsistency(cluster.traces);
|
|
603
|
+
return (frequencyScore * 0.3) + (successScore * 0.4) + (consistencyScore * 0.3);
|
|
604
|
+
}
|
|
605
|
+
/**
|
|
606
|
+
* Measure consistency of outcomes within a cluster.
|
|
607
|
+
*/
|
|
608
|
+
measureConsistency(traces) {
|
|
609
|
+
if (traces.length < 2)
|
|
610
|
+
return 0;
|
|
611
|
+
const durations = traces.map((t) => t.durationMs);
|
|
612
|
+
const mean = durations.reduce((a, b) => a + b, 0) / durations.length;
|
|
613
|
+
const variance = durations.reduce((sum, d) => sum + (d - mean) ** 2, 0) / durations.length;
|
|
614
|
+
const cv = mean > 0 ? Math.sqrt(variance) / mean : 0;
|
|
615
|
+
// Lower coefficient of variation = higher consistency
|
|
616
|
+
return clamp(1 - cv);
|
|
617
|
+
}
|
|
618
|
+
/**
|
|
619
|
+
* Extract a heuristic from a pattern cluster.
|
|
620
|
+
*/
|
|
621
|
+
extractHeuristic(cluster) {
|
|
622
|
+
const successRate = (cluster.successRate * 100).toFixed(0);
|
|
623
|
+
const avgDuration = (cluster.traces.reduce((s, t) => s + t.durationMs, 0) / cluster.traces.length).toFixed(0);
|
|
624
|
+
if (cluster.successRate >= 0.8) {
|
|
625
|
+
return `Action "${cluster.pattern}" is reliable (${successRate}% success, avg ${avgDuration}ms). Prefer for similar tasks.`;
|
|
626
|
+
}
|
|
627
|
+
else if (cluster.successRate >= 0.5) {
|
|
628
|
+
return `Action "${cluster.pattern}" is partially reliable (${successRate}% success). Use with verification.`;
|
|
629
|
+
}
|
|
630
|
+
else {
|
|
631
|
+
const errors = cluster.traces
|
|
632
|
+
.filter((t) => !t.success && t.error)
|
|
633
|
+
.map((t) => t.error)
|
|
634
|
+
.slice(0, 3);
|
|
635
|
+
return `Action "${cluster.pattern}" is unreliable (${successRate}% success). Common errors: ${errors.join('; ') || 'unknown'}. Avoid or redesign.`;
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
/**
|
|
639
|
+
* Extract a higher-level abstraction from a cluster.
|
|
640
|
+
*/
|
|
641
|
+
extractAbstraction(cluster) {
|
|
642
|
+
const avgDuration = cluster.traces.reduce((s, t) => s + t.durationMs, 0) / cluster.traces.length;
|
|
643
|
+
const hasErrors = cluster.traces.some((t) => !t.success);
|
|
644
|
+
return [
|
|
645
|
+
`Pattern: ${cluster.pattern}`,
|
|
646
|
+
`Observations: ${cluster.frequency}`,
|
|
647
|
+
`Success: ${(cluster.successRate * 100).toFixed(0)}%`,
|
|
648
|
+
`Avg duration: ${avgDuration.toFixed(0)}ms`,
|
|
649
|
+
hasErrors ? 'Has failure modes — requires error handling' : 'Stable execution pattern',
|
|
650
|
+
].join('. ');
|
|
651
|
+
}
|
|
652
|
+
// ── Pattern Identification ───────────────────────────────────────────────
|
|
653
|
+
/**
|
|
654
|
+
* Identify recurring patterns across trace clusters.
|
|
655
|
+
*/
|
|
656
|
+
identifyPatterns(clusters) {
|
|
657
|
+
const patterns = [];
|
|
658
|
+
// Repeated failure pattern
|
|
659
|
+
const failingActions = clusters.filter((c) => c.successRate < 0.5 && c.frequency >= 3);
|
|
660
|
+
for (const c of failingActions) {
|
|
661
|
+
patterns.push(`REPEATED_FAILURE: "${c.pattern}" fails ${((1 - c.successRate) * 100).toFixed(0)}% of the time over ${c.frequency} attempts`);
|
|
662
|
+
}
|
|
663
|
+
// High-frequency actions
|
|
664
|
+
const frequent = clusters.filter((c) => c.frequency >= 5);
|
|
665
|
+
for (const c of frequent) {
|
|
666
|
+
patterns.push(`HIGH_FREQUENCY: "${c.pattern}" executed ${c.frequency} times — consider optimization`);
|
|
667
|
+
}
|
|
668
|
+
// Slow actions
|
|
669
|
+
const slow = clusters.filter((c) => {
|
|
670
|
+
const avg = c.traces.reduce((s, t) => s + t.durationMs, 0) / c.traces.length;
|
|
671
|
+
return avg > 5000;
|
|
672
|
+
});
|
|
673
|
+
for (const c of slow) {
|
|
674
|
+
const avg = c.traces.reduce((s, t) => s + t.durationMs, 0) / c.traces.length;
|
|
675
|
+
patterns.push(`SLOW_ACTION: "${c.pattern}" averages ${avg.toFixed(0)}ms — bottleneck candidate`);
|
|
676
|
+
}
|
|
677
|
+
return patterns;
|
|
678
|
+
}
|
|
679
|
+
// ── Drift Measurement ────────────────────────────────────────────────────
|
|
680
|
+
/**
|
|
681
|
+
* Measure cognitive drift: how much recent behavior deviates
|
|
682
|
+
* from stable patterns. Higher = more drift.
|
|
683
|
+
*/
|
|
684
|
+
measureDrift(traces) {
|
|
685
|
+
const source = traces || this.traces;
|
|
686
|
+
if (source.length < 2)
|
|
687
|
+
return 0;
|
|
688
|
+
const recent = source.slice(-this.policy.failureWindowSize);
|
|
689
|
+
const recentFailRate = recent.filter((t) => !t.success).length / recent.length;
|
|
690
|
+
// Check for increasing failure trend
|
|
691
|
+
const halfIdx = Math.floor(recent.length / 2);
|
|
692
|
+
const firstHalf = recent.slice(0, halfIdx);
|
|
693
|
+
const secondHalf = recent.slice(halfIdx);
|
|
694
|
+
const firstFailRate = firstHalf.length > 0
|
|
695
|
+
? firstHalf.filter((t) => !t.success).length / firstHalf.length
|
|
696
|
+
: 0;
|
|
697
|
+
const secondFailRate = secondHalf.length > 0
|
|
698
|
+
? secondHalf.filter((t) => !t.success).length / secondHalf.length
|
|
699
|
+
: 0;
|
|
700
|
+
const trendDrift = Math.max(0, secondFailRate - firstFailRate);
|
|
701
|
+
return clamp((recentFailRate * 0.6) + (trendDrift * 0.4));
|
|
702
|
+
}
|
|
703
|
+
// ── Health Assessment ────────────────────────────────────────────────────
|
|
704
|
+
/**
|
|
705
|
+
* Produce a complete recovery health report.
|
|
706
|
+
*
|
|
707
|
+
* `memoryConsolidation` is computed against *active* memories only
|
|
708
|
+
* (those with no `invalidatedAt`). Counting superseded memories
|
|
709
|
+
* would inflate the ratio as the agent revises its beliefs, hiding
|
|
710
|
+
* that the live memory store has actually grown leaner.
|
|
711
|
+
*/
|
|
712
|
+
assess() {
|
|
713
|
+
const drift = this.measureDrift();
|
|
714
|
+
const activeMemoryCount = this.memories.filter((m) => !m.invalidatedAt).length;
|
|
715
|
+
const consolidationRatio = this.traces.length > 0
|
|
716
|
+
? activeMemoryCount / Math.max(1, Math.floor(this.traces.length / this.policy.minTracesForConsolidation))
|
|
717
|
+
: 1;
|
|
718
|
+
const recentCycles = this.cycleHistory.slice(-5);
|
|
719
|
+
const avgDriftReduction = recentCycles.length > 0
|
|
720
|
+
? recentCycles.reduce((s, c) => s + c.driftReduction, 0) / recentCycles.length
|
|
721
|
+
: 0;
|
|
722
|
+
return {
|
|
723
|
+
memoryConsolidation: healthScore(clamp(consolidationRatio), 'nidra.consolidation'),
|
|
724
|
+
cognitiveStability: healthScore(1 - drift, 'nidra.stability'),
|
|
725
|
+
learningRate: healthScore(avgDriftReduction, 'nidra.learning'),
|
|
726
|
+
driftScore: drift,
|
|
727
|
+
lastRecoveryCycle: this.lastRecovery,
|
|
728
|
+
};
|
|
729
|
+
}
|
|
730
|
+
// ── Recommendations ──────────────────────────────────────────────────────
|
|
731
|
+
recommend() {
|
|
732
|
+
const recs = [];
|
|
733
|
+
const health = this.assess();
|
|
734
|
+
if (health.driftScore > this.policy.maxDriftThreshold) {
|
|
735
|
+
recs.push({
|
|
736
|
+
module: 'nidra',
|
|
737
|
+
severity: 'critical',
|
|
738
|
+
message: `Cognitive drift at ${(health.driftScore * 100).toFixed(1)}% — exceeds threshold.`,
|
|
739
|
+
action: 'Initiate immediate recovery cycle.',
|
|
740
|
+
});
|
|
741
|
+
}
|
|
742
|
+
const unprocessed = this.getUnprocessedTraces();
|
|
743
|
+
if (unprocessed.length >= this.policy.minTracesForConsolidation) {
|
|
744
|
+
recs.push({
|
|
745
|
+
module: 'nidra',
|
|
746
|
+
severity: 'warning',
|
|
747
|
+
message: `${unprocessed.length} traces awaiting consolidation.`,
|
|
748
|
+
action: 'Run recovery cycle to consolidate memories.',
|
|
749
|
+
});
|
|
750
|
+
}
|
|
751
|
+
if (health.cognitiveStability.value < 0.5) {
|
|
752
|
+
recs.push({
|
|
753
|
+
module: 'nidra',
|
|
754
|
+
severity: 'critical',
|
|
755
|
+
message: 'Cognitive stability is critically low.',
|
|
756
|
+
action: 'Reduce task load and initiate recovery.',
|
|
757
|
+
});
|
|
758
|
+
}
|
|
759
|
+
if (recs.length === 0) {
|
|
760
|
+
recs.push({
|
|
761
|
+
module: 'nidra',
|
|
762
|
+
severity: 'info',
|
|
763
|
+
message: 'Recovery health is stable.',
|
|
764
|
+
});
|
|
765
|
+
}
|
|
766
|
+
return recs;
|
|
767
|
+
}
|
|
768
|
+
// ── Accessors ────────────────────────────────────────────────────────────
|
|
769
|
+
/**
|
|
770
|
+
* Returns true if enough traces exist to justify a recovery cycle.
|
|
771
|
+
*/
|
|
772
|
+
needsRecovery() {
|
|
773
|
+
const unprocessed = this.getUnprocessedTraces();
|
|
774
|
+
const drift = this.measureDrift();
|
|
775
|
+
return (unprocessed.length >= this.policy.minTracesForConsolidation ||
|
|
776
|
+
drift > this.policy.maxDriftThreshold);
|
|
777
|
+
}
|
|
778
|
+
getUnprocessedTraces() {
|
|
779
|
+
return this.traces.filter((t) => !this.processedTraceIds.has(t.id));
|
|
780
|
+
}
|
|
781
|
+
/**
|
|
782
|
+
* All memories ever consolidated by this Nidra instance, including
|
|
783
|
+
* those that have been invalidated by supersession. Use this for
|
|
784
|
+
* audit / replay; use `getActiveMemories()` for the "what's true now"
|
|
785
|
+
* view and `findMemories(query)` for richer queries.
|
|
786
|
+
*/
|
|
787
|
+
getMemories() {
|
|
788
|
+
return this.memories.map((m) => ({ ...m, sourceTraces: [...m.sourceTraces], applicability: [...m.applicability] }));
|
|
789
|
+
}
|
|
790
|
+
/**
|
|
791
|
+
* The currently authoritative memories — those with no
|
|
792
|
+
* `invalidatedAt` set. This is the live view a consumer should
|
|
793
|
+
* inject into an agent or surface on a dashboard. Memories that
|
|
794
|
+
* have been superseded are excluded but remain in `getMemories()`
|
|
795
|
+
* for provenance.
|
|
796
|
+
*/
|
|
797
|
+
getActiveMemories() {
|
|
798
|
+
return this.getMemories().filter((m) => !m.invalidatedAt);
|
|
799
|
+
}
|
|
800
|
+
/**
|
|
801
|
+
* Bi-temporal / kind-aware query over the memory store: "what was
|
|
802
|
+
* true at any point in time".
|
|
803
|
+
*
|
|
804
|
+
* - `asOf` (ISO timestamp; default = now): include only memories
|
|
805
|
+
* that were valid at that instant — `createdAt <= asOf` AND
|
|
806
|
+
* (`invalidatedAt` is unset OR `invalidatedAt > asOf`). Set
|
|
807
|
+
* `includeInvalidated` to `true` to drop the invalidation cutoff
|
|
808
|
+
* and see every memory created on or before `asOf`.
|
|
809
|
+
* - `kind`: restrict to one or more memory kinds. Memories with no
|
|
810
|
+
* explicit `kind` are treated as `'procedural'`.
|
|
811
|
+
* - `applicability`: exact match against any element of the
|
|
812
|
+
* memory's `applicability` list.
|
|
813
|
+
* - `minConfidence`: drop memories below this confidence.
|
|
814
|
+
* - `limit`: cap the result length (no cap if undefined).
|
|
815
|
+
*
|
|
816
|
+
* Results are returned in insertion order (oldest first).
|
|
817
|
+
*/
|
|
818
|
+
findMemories(query = {}) {
|
|
819
|
+
const asOf = query.asOf ?? now();
|
|
820
|
+
const kinds = query.kind === undefined
|
|
821
|
+
? null
|
|
822
|
+
: new Set(Array.isArray(query.kind) ? query.kind : [query.kind]);
|
|
823
|
+
const out = [];
|
|
824
|
+
for (const m of this.memories) {
|
|
825
|
+
if (m.createdAt > asOf)
|
|
826
|
+
continue;
|
|
827
|
+
if (!query.includeInvalidated && m.invalidatedAt && m.invalidatedAt <= asOf)
|
|
828
|
+
continue;
|
|
829
|
+
if (kinds && !kinds.has((m.kind ?? 'procedural')))
|
|
830
|
+
continue;
|
|
831
|
+
if (query.applicability !== undefined && !m.applicability.includes(query.applicability))
|
|
832
|
+
continue;
|
|
833
|
+
if (query.minConfidence !== undefined && m.confidence < query.minConfidence)
|
|
834
|
+
continue;
|
|
835
|
+
out.push({ ...m, sourceTraces: [...m.sourceTraces], applicability: [...m.applicability] });
|
|
836
|
+
if (query.limit !== undefined && out.length >= query.limit)
|
|
837
|
+
break;
|
|
838
|
+
}
|
|
839
|
+
return out;
|
|
840
|
+
}
|
|
841
|
+
getTraces() {
|
|
842
|
+
return this.traces.map((t) => ({
|
|
843
|
+
...t,
|
|
844
|
+
failures: t.failures?.map((f) => ({ ...f, metadata: f.metadata ? { ...f.metadata } : undefined })),
|
|
845
|
+
}));
|
|
846
|
+
}
|
|
847
|
+
getCycleHistory() {
|
|
848
|
+
return structuredClone(this.cycleHistory);
|
|
849
|
+
}
|
|
850
|
+
exportState() {
|
|
851
|
+
return {
|
|
852
|
+
traces: [...this.getTraces()],
|
|
853
|
+
memories: [...this.getMemories()],
|
|
854
|
+
cycleHistory: structuredClone(this.cycleHistory),
|
|
855
|
+
lastRecovery: this.lastRecovery,
|
|
856
|
+
processedTraceIds: Array.from(this.processedTraceIds),
|
|
857
|
+
coldNotifiedIds: Array.from(this.coldNotifiedIds),
|
|
858
|
+
prunedMemoryIds: [...this.prunedMemoryIds],
|
|
859
|
+
};
|
|
860
|
+
}
|
|
861
|
+
importState(snapshot) {
|
|
862
|
+
if (!snapshot)
|
|
863
|
+
return;
|
|
864
|
+
this.traces = (snapshot.traces ?? []).map((t) => ({
|
|
865
|
+
...t,
|
|
866
|
+
failures: t.failures?.map((f) => ({ ...f, metadata: f.metadata ? { ...f.metadata } : undefined })),
|
|
867
|
+
}));
|
|
868
|
+
this.memories = (snapshot.memories ?? []).map((m) => ({
|
|
869
|
+
...m,
|
|
870
|
+
sourceTraces: [...m.sourceTraces],
|
|
871
|
+
applicability: [...m.applicability],
|
|
872
|
+
}));
|
|
873
|
+
this.cycleHistory = structuredClone(snapshot.cycleHistory ?? []);
|
|
874
|
+
this.lastRecovery = snapshot.lastRecovery ?? null;
|
|
875
|
+
this.processedTraceIds = new Set(snapshot.processedTraceIds ?? []);
|
|
876
|
+
this.coldNotifiedIds = new Set(snapshot.coldNotifiedIds ?? []);
|
|
877
|
+
this.prunedMemoryIds = [...(snapshot.prunedMemoryIds ?? [])];
|
|
878
|
+
}
|
|
879
|
+
getPolicy() {
|
|
880
|
+
return { ...this.policy };
|
|
881
|
+
}
|
|
882
|
+
updatePolicy(updates) {
|
|
883
|
+
this.policy = this.validatePolicy({ ...this.policy, ...updates });
|
|
884
|
+
this.enforceTraceRetention();
|
|
885
|
+
this.enforceMemoryAndCycleRetention();
|
|
886
|
+
}
|
|
887
|
+
}
|
|
888
|
+
exports.Nidra = Nidra;
|
|
889
|
+
//# sourceMappingURL=index.js.map
|