@joshuaswarren/openclaw-engram 9.0.92 → 9.0.94

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/bin/engram-access.js +9 -0
  2. package/dist/access-cli.js +219 -0
  3. package/dist/access-cli.js.map +1 -0
  4. package/dist/calibration-TLGCHI7D.js +236 -0
  5. package/dist/calibration-TLGCHI7D.js.map +1 -0
  6. package/dist/causal-chain-OJCLNNEH.js +23 -0
  7. package/dist/causal-consolidation-LN3WQ6OV.js +206 -0
  8. package/dist/causal-consolidation-LN3WQ6OV.js.map +1 -0
  9. package/dist/causal-retrieval-MEONMWFL.js +183 -0
  10. package/dist/causal-retrieval-MEONMWFL.js.map +1 -0
  11. package/dist/chunk-3GARBXWR.js +280 -0
  12. package/dist/chunk-3GARBXWR.js.map +1 -0
  13. package/dist/chunk-AFMNQR7H.js +411 -0
  14. package/dist/chunk-AFMNQR7H.js.map +1 -0
  15. package/dist/chunk-AHYRVKIJ.js +28110 -0
  16. package/dist/chunk-AHYRVKIJ.js.map +1 -0
  17. package/dist/chunk-DEIBZP3O.js +49 -0
  18. package/dist/chunk-DEIBZP3O.js.map +1 -0
  19. package/dist/{chunk-DDJJSZIV.js → chunk-H7LMMCVZ.js} +1 -1
  20. package/dist/chunk-H7LMMCVZ.js.map +1 -0
  21. package/dist/chunk-PW5V7OJH.js +364 -0
  22. package/dist/chunk-PW5V7OJH.js.map +1 -0
  23. package/dist/{chunk-LARLNIEW.js → chunk-R2BJ47PU.js} +35 -3
  24. package/dist/chunk-R2BJ47PU.js.map +1 -0
  25. package/dist/chunk-WSXU2GHQ.js +160 -0
  26. package/dist/chunk-WSXU2GHQ.js.map +1 -0
  27. package/dist/{engine-A33VEWKG.js → engine-IWHCC5P6.js} +3 -3
  28. package/dist/index.js +6809 -35533
  29. package/dist/index.js.map +1 -1
  30. package/dist/{storage-IGAVYRWY.js → storage-DO5TURPO.js} +2 -2
  31. package/dist/storage-DO5TURPO.js.map +1 -0
  32. package/package.json +8 -1
  33. package/dist/chunk-DDJJSZIV.js.map +0 -1
  34. package/dist/chunk-LARLNIEW.js.map +0 -1
  35. /package/dist/{engine-A33VEWKG.js.map → causal-chain-OJCLNNEH.js.map} +0 -0
  36. /package/dist/{storage-IGAVYRWY.js.map → engine-IWHCC5P6.js.map} +0 -0
@@ -0,0 +1,9 @@
1
+ #!/usr/bin/env node
2
+
3
+ import("../dist/access-cli.js")
4
+ .then(({ runCli }) => runCli(process.argv.slice(2)))
5
+ .catch((error) => {
6
+ const message = error instanceof Error ? error.message : "unknown load error";
7
+ console.error(`access-cli failed to load dist/access-cli.js: ${message}`);
8
+ process.exit(1);
9
+ });
@@ -0,0 +1,219 @@
1
+ // openclaw-engram: Local-first memory plugin
2
+ import {
3
+ EngramAccessService,
4
+ Orchestrator,
5
+ parseConfig
6
+ } from "./chunk-AHYRVKIJ.js";
7
+ import "./chunk-R2BJ47PU.js";
8
+ import "./chunk-WSXU2GHQ.js";
9
+ import "./chunk-H7LMMCVZ.js";
10
+ import "./chunk-BNBG2XP6.js";
11
+ import "./chunk-AFMNQR7H.js";
12
+ import "./chunk-PW5V7OJH.js";
13
+ import "./chunk-DEIBZP3O.js";
14
+ import "./chunk-SSIIJJKA.js";
15
+
16
+ // src/access-cli.ts
17
+ import fs from "fs";
18
+ import os from "os";
19
+ import path from "path";
20
+ var UsageError = class extends Error {
21
+ constructor(kind, optionName) {
22
+ super("invalid access-cli arguments");
23
+ this.kind = kind;
24
+ this.optionName = optionName;
25
+ }
26
+ };
27
+ function formatUsageError(error) {
28
+ switch (error.kind) {
29
+ case "unsupported-command":
30
+ return "unsupported command";
31
+ case "unexpected-positional":
32
+ return "unexpected positional argument";
33
+ case "missing-option":
34
+ return `missing required option: --${error.optionName ?? "unknown"}`;
35
+ case "missing-content":
36
+ return "missing required option: --content or --content-file";
37
+ case "invalid-integer":
38
+ return `invalid integer for --${error.optionName ?? "unknown"}`;
39
+ case "invalid-number":
40
+ return `invalid number for --${error.optionName ?? "unknown"}`;
41
+ }
42
+ }
43
+ function writeCliOutput(text = "") {
44
+ process.stdout.write(`${text}
45
+ `);
46
+ }
47
+ function usage() {
48
+ return [
49
+ "Usage:",
50
+ " engram-access browse [options]",
51
+ " engram-access store [options]",
52
+ "",
53
+ "Browse options:",
54
+ " --namespace <name>",
55
+ " --query <text>",
56
+ " --category <name>",
57
+ " --status <name>",
58
+ " --sort <updated_desc|updated_asc|created_desc|created_asc>",
59
+ " --limit <n>",
60
+ " --offset <n>",
61
+ "",
62
+ "Store options:",
63
+ " --namespace <name>",
64
+ " --session-key <key>",
65
+ " --principal <principal>",
66
+ " --content <text> | --content-file <path>",
67
+ " --category <name>",
68
+ " --confidence <0-1>",
69
+ " --tag <tag> (repeatable)",
70
+ " --entity-ref <ref>",
71
+ " --ttl <duration>",
72
+ " --source-reason <text>",
73
+ " --idempotency-key <key>",
74
+ " --dry-run"
75
+ ].join("\n");
76
+ }
77
+ function parseArgs(argv) {
78
+ const [commandRaw, ...rest] = argv;
79
+ if (commandRaw !== "browse" && commandRaw !== "store") {
80
+ throw new UsageError("unsupported-command");
81
+ }
82
+ const options = {};
83
+ const flags = /* @__PURE__ */ new Set();
84
+ for (let i = 0; i < rest.length; i += 1) {
85
+ const token = rest[i];
86
+ if (!token.startsWith("--")) {
87
+ throw new UsageError("unexpected-positional");
88
+ }
89
+ const key = token.slice(2);
90
+ const next = rest[i + 1];
91
+ if (!next || next.startsWith("--")) {
92
+ flags.add(key);
93
+ continue;
94
+ }
95
+ if (!options[key]) {
96
+ options[key] = [];
97
+ }
98
+ options[key].push(next);
99
+ i += 1;
100
+ }
101
+ return {
102
+ command: commandRaw,
103
+ options,
104
+ flags
105
+ };
106
+ }
107
+ function getLastOption(args, name) {
108
+ const values = args.options[name];
109
+ if (!values || values.length === 0) return void 0;
110
+ return values[values.length - 1];
111
+ }
112
+ function getAllOptions(args, name) {
113
+ return args.options[name] ?? [];
114
+ }
115
+ function requireOption(args, name) {
116
+ const value = getLastOption(args, name);
117
+ if (!value || value.trim().length === 0) {
118
+ throw new UsageError("missing-option", name);
119
+ }
120
+ return value;
121
+ }
122
+ function parseIntegerOption(args, name) {
123
+ const raw = getLastOption(args, name);
124
+ if (!raw) return void 0;
125
+ const value = parseInt(raw, 10);
126
+ if (!Number.isFinite(value)) {
127
+ throw new UsageError("invalid-integer", name);
128
+ }
129
+ return value;
130
+ }
131
+ function parseFloatOption(args, name) {
132
+ const raw = getLastOption(args, name);
133
+ if (!raw) return void 0;
134
+ const value = Number.parseFloat(raw);
135
+ if (!Number.isFinite(value)) {
136
+ throw new UsageError("invalid-number", name);
137
+ }
138
+ return value;
139
+ }
140
+ function loadPluginConfig() {
141
+ const configPath = process.env.OPENCLAW_ENGRAM_CONFIG_PATH || process.env.OPENCLAW_CONFIG_PATH || path.join(process.env.HOME ?? os.homedir(), ".openclaw", "openclaw.json");
142
+ const raw = JSON.parse(fs.readFileSync(configPath, "utf8"));
143
+ return raw?.plugins?.entries?.["openclaw-engram"]?.config ?? {};
144
+ }
145
+ function buildRuntime() {
146
+ const config = parseConfig(loadPluginConfig());
147
+ return {
148
+ config,
149
+ service: new EngramAccessService(new Orchestrator(config))
150
+ };
151
+ }
152
+ async function runBrowse(args) {
153
+ const { service } = buildRuntime();
154
+ const result = await service.memoryBrowse({
155
+ namespace: getLastOption(args, "namespace"),
156
+ query: getLastOption(args, "query"),
157
+ category: getLastOption(args, "category"),
158
+ status: getLastOption(args, "status"),
159
+ sort: getLastOption(args, "sort"),
160
+ limit: parseIntegerOption(args, "limit"),
161
+ offset: parseIntegerOption(args, "offset")
162
+ });
163
+ console.log(JSON.stringify(result, null, 2));
164
+ }
165
+ async function runStore(args) {
166
+ const { config, service } = buildRuntime();
167
+ const contentFile = getLastOption(args, "content-file");
168
+ const inlineContent = getLastOption(args, "content");
169
+ const content = contentFile ? fs.readFileSync(contentFile, "utf8") : inlineContent;
170
+ if (!content || content.trim().length === 0) {
171
+ throw new UsageError("missing-content");
172
+ }
173
+ const result = await service.memoryStore({
174
+ namespace: getLastOption(args, "namespace"),
175
+ sessionKey: getLastOption(args, "session-key"),
176
+ authenticatedPrincipal: getLastOption(args, "principal") ?? config.agentAccessHttp.principal,
177
+ content,
178
+ category: requireOption(args, "category"),
179
+ confidence: parseFloatOption(args, "confidence"),
180
+ tags: getAllOptions(args, "tag"),
181
+ entityRef: getLastOption(args, "entity-ref"),
182
+ ttl: getLastOption(args, "ttl"),
183
+ sourceReason: getLastOption(args, "source-reason"),
184
+ idempotencyKey: getLastOption(args, "idempotency-key"),
185
+ dryRun: args.flags.has("dry-run")
186
+ });
187
+ console.log(JSON.stringify(result, null, 2));
188
+ }
189
+ async function main(argv = process.argv.slice(2)) {
190
+ const args = parseArgs(argv);
191
+ if (args.command === "browse") {
192
+ await runBrowse(args);
193
+ return;
194
+ }
195
+ await runStore(args);
196
+ }
197
+ function printUsage() {
198
+ writeCliOutput(usage());
199
+ }
200
+ async function runCli(argv = process.argv.slice(2)) {
201
+ try {
202
+ await main(argv);
203
+ } catch (error) {
204
+ if (error instanceof UsageError) {
205
+ writeCliOutput(formatUsageError(error));
206
+ writeCliOutput();
207
+ printUsage();
208
+ process.exit(1);
209
+ }
210
+ console.error("access-cli failed");
211
+ process.exit(1);
212
+ }
213
+ }
214
+ export {
215
+ main,
216
+ printUsage,
217
+ runCli
218
+ };
219
+ //# sourceMappingURL=access-cli.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/access-cli.ts"],"sourcesContent":["import fs from \"node:fs\";\nimport os from \"node:os\";\nimport path from \"node:path\";\nimport { parseConfig } from \"./config.js\";\nimport type { PluginConfig } from \"./types.js\";\nimport { Orchestrator } from \"./orchestrator.js\";\nimport { EngramAccessService } from \"./access-service.js\";\n\ntype CommandName = \"browse\" | \"store\";\n\ntype ParsedArgs = {\n command: CommandName;\n options: Record<string, string[]>;\n flags: Set<string>;\n};\n\ntype Runtime = {\n config: PluginConfig;\n service: EngramAccessService;\n};\n\ntype UsageErrorKind =\n | \"unsupported-command\"\n | \"unexpected-positional\"\n | \"missing-option\"\n | \"missing-content\"\n | \"invalid-integer\"\n | \"invalid-number\";\n\nclass UsageError extends Error {\n constructor(\n readonly kind: UsageErrorKind,\n readonly optionName?: string,\n ) {\n super(\"invalid access-cli arguments\");\n }\n}\n\nfunction formatUsageError(error: UsageError): string {\n switch (error.kind) {\n case \"unsupported-command\":\n return \"unsupported command\";\n case \"unexpected-positional\":\n return \"unexpected positional argument\";\n case \"missing-option\":\n return `missing required option: --${error.optionName ?? \"unknown\"}`;\n case \"missing-content\":\n return \"missing required option: --content or --content-file\";\n case \"invalid-integer\":\n return `invalid integer for --${error.optionName ?? \"unknown\"}`;\n case \"invalid-number\":\n return `invalid number for --${error.optionName ?? \"unknown\"}`;\n }\n}\n\nfunction writeCliOutput(text: string = \"\"): void {\n process.stdout.write(`${text}\\n`);\n}\n\nfunction usage(): string {\n return [\n \"Usage:\",\n \" engram-access browse [options]\",\n \" engram-access store [options]\",\n \"\",\n \"Browse options:\",\n \" --namespace <name>\",\n \" --query <text>\",\n \" --category <name>\",\n \" --status <name>\",\n \" --sort <updated_desc|updated_asc|created_desc|created_asc>\",\n \" --limit <n>\",\n \" --offset <n>\",\n \"\",\n \"Store options:\",\n \" --namespace <name>\",\n \" --session-key <key>\",\n \" --principal <principal>\",\n \" --content <text> | --content-file <path>\",\n \" --category <name>\",\n \" --confidence <0-1>\",\n \" --tag <tag> (repeatable)\",\n \" --entity-ref <ref>\",\n \" --ttl <duration>\",\n \" --source-reason <text>\",\n \" --idempotency-key <key>\",\n \" --dry-run\",\n ].join(\"\\n\");\n}\n\nfunction parseArgs(argv: string[]): ParsedArgs {\n const [commandRaw, ...rest] = argv;\n if (commandRaw !== \"browse\" && commandRaw !== \"store\") {\n throw new UsageError(\"unsupported-command\");\n }\n\n const options: Record<string, string[]> = {};\n const flags = new Set<string>();\n\n for (let i = 0; i < rest.length; i += 1) {\n const token = rest[i];\n if (!token.startsWith(\"--\")) {\n throw new UsageError(\"unexpected-positional\");\n }\n const key = token.slice(2);\n const next = rest[i + 1];\n if (!next || next.startsWith(\"--\")) {\n flags.add(key);\n continue;\n }\n if (!options[key]) {\n options[key] = [];\n }\n options[key].push(next);\n i += 1;\n }\n\n return {\n command: commandRaw,\n options,\n flags,\n };\n}\n\nfunction getLastOption(args: ParsedArgs, name: string): string | undefined {\n const values = args.options[name];\n if (!values || values.length === 0) return undefined;\n return values[values.length - 1];\n}\n\nfunction getAllOptions(args: ParsedArgs, name: string): string[] {\n return args.options[name] ?? [];\n}\n\nfunction requireOption(args: ParsedArgs, name: string): string {\n const value = getLastOption(args, name);\n if (!value || value.trim().length === 0) {\n throw new UsageError(\"missing-option\", name);\n }\n return value;\n}\n\nfunction parseIntegerOption(args: ParsedArgs, name: string): number | undefined {\n const raw = getLastOption(args, name);\n if (!raw) return undefined;\n const value = parseInt(raw, 10);\n if (!Number.isFinite(value)) {\n throw new UsageError(\"invalid-integer\", name);\n }\n return value;\n}\n\nfunction parseFloatOption(args: ParsedArgs, name: string): number | undefined {\n const raw = getLastOption(args, name);\n if (!raw) return undefined;\n const value = Number.parseFloat(raw);\n if (!Number.isFinite(value)) {\n throw new UsageError(\"invalid-number\", name);\n }\n return value;\n}\n\nfunction loadPluginConfig(): Record<string, unknown> {\n const configPath =\n process.env.OPENCLAW_ENGRAM_CONFIG_PATH ||\n process.env.OPENCLAW_CONFIG_PATH ||\n path.join(process.env.HOME ?? os.homedir(), \".openclaw\", \"openclaw.json\");\n const raw = JSON.parse(fs.readFileSync(configPath, \"utf8\"));\n return raw?.plugins?.entries?.[\"openclaw-engram\"]?.config ?? {};\n}\n\nfunction buildRuntime(): Runtime {\n const config = parseConfig(loadPluginConfig());\n return {\n config,\n service: new EngramAccessService(new Orchestrator(config)),\n };\n}\n\nasync function runBrowse(args: ParsedArgs): Promise<void> {\n const { service } = buildRuntime();\n const result = await service.memoryBrowse({\n namespace: getLastOption(args, \"namespace\"),\n query: getLastOption(args, \"query\"),\n category: getLastOption(args, \"category\"),\n status: getLastOption(args, \"status\"),\n sort: getLastOption(args, \"sort\") as \"updated_desc\" | \"updated_asc\" | \"created_desc\" | \"created_asc\" | undefined,\n limit: parseIntegerOption(args, \"limit\"),\n offset: parseIntegerOption(args, \"offset\"),\n });\n console.log(JSON.stringify(result, null, 2));\n}\n\nasync function runStore(args: ParsedArgs): Promise<void> {\n const { config, service } = buildRuntime();\n const contentFile = getLastOption(args, \"content-file\");\n const inlineContent = getLastOption(args, \"content\");\n const content = contentFile ? fs.readFileSync(contentFile, \"utf8\") : inlineContent;\n if (!content || content.trim().length === 0) {\n throw new UsageError(\"missing-content\");\n }\n\n const result = await service.memoryStore({\n namespace: getLastOption(args, \"namespace\"),\n sessionKey: getLastOption(args, \"session-key\"),\n authenticatedPrincipal: getLastOption(args, \"principal\") ?? config.agentAccessHttp.principal,\n content,\n category: requireOption(args, \"category\"),\n confidence: parseFloatOption(args, \"confidence\"),\n tags: getAllOptions(args, \"tag\"),\n entityRef: getLastOption(args, \"entity-ref\"),\n ttl: getLastOption(args, \"ttl\"),\n sourceReason: getLastOption(args, \"source-reason\"),\n idempotencyKey: getLastOption(args, \"idempotency-key\"),\n dryRun: args.flags.has(\"dry-run\"),\n });\n console.log(JSON.stringify(result, null, 2));\n}\n\nexport async function main(argv: string[] = process.argv.slice(2)): Promise<void> {\n const args = parseArgs(argv);\n if (args.command === \"browse\") {\n await runBrowse(args);\n return;\n }\n await runStore(args);\n}\n\nexport function printUsage(): void {\n writeCliOutput(usage());\n}\n\nexport async function runCli(argv: string[] = process.argv.slice(2)): Promise<void> {\n try {\n await main(argv);\n } catch (error) {\n if (error instanceof UsageError) {\n writeCliOutput(formatUsageError(error));\n writeCliOutput();\n printUsage();\n process.exit(1);\n }\n\n console.error(\"access-cli failed\");\n process.exit(1);\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;AAAA,OAAO,QAAQ;AACf,OAAO,QAAQ;AACf,OAAO,UAAU;AA2BjB,IAAM,aAAN,cAAyB,MAAM;AAAA,EAC7B,YACW,MACA,YACT;AACA,UAAM,8BAA8B;AAH3B;AACA;AAAA,EAGX;AACF;AAEA,SAAS,iBAAiB,OAA2B;AACnD,UAAQ,MAAM,MAAM;AAAA,IAClB,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,8BAA8B,MAAM,cAAc,SAAS;AAAA,IACpE,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO,yBAAyB,MAAM,cAAc,SAAS;AAAA,IAC/D,KAAK;AACH,aAAO,wBAAwB,MAAM,cAAc,SAAS;AAAA,EAChE;AACF;AAEA,SAAS,eAAe,OAAe,IAAU;AAC/C,UAAQ,OAAO,MAAM,GAAG,IAAI;AAAA,CAAI;AAClC;AAEA,SAAS,QAAgB;AACvB,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF,EAAE,KAAK,IAAI;AACb;AAEA,SAAS,UAAU,MAA4B;AAC7C,QAAM,CAAC,YAAY,GAAG,IAAI,IAAI;AAC9B,MAAI,eAAe,YAAY,eAAe,SAAS;AACrD,UAAM,IAAI,WAAW,qBAAqB;AAAA,EAC5C;AAEA,QAAM,UAAoC,CAAC;AAC3C,QAAM,QAAQ,oBAAI,IAAY;AAE9B,WAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK,GAAG;AACvC,UAAM,QAAQ,KAAK,CAAC;AACpB,QAAI,CAAC,MAAM,WAAW,IAAI,GAAG;AAC3B,YAAM,IAAI,WAAW,uBAAuB;AAAA,IAC9C;AACA,UAAM,MAAM,MAAM,MAAM,CAAC;AACzB,UAAM,OAAO,KAAK,IAAI,CAAC;AACvB,QAAI,CAAC,QAAQ,KAAK,WAAW,IAAI,GAAG;AAClC,YAAM,IAAI,GAAG;AACb;AAAA,IACF;AACA,QAAI,CAAC,QAAQ,GAAG,GAAG;AACjB,cAAQ,GAAG,IAAI,CAAC;AAAA,IAClB;AACA,YAAQ,GAAG,EAAE,KAAK,IAAI;AACtB,SAAK;AAAA,EACP;AAEA,SAAO;AAAA,IACL,SAAS;AAAA,IACT;AAAA,IACA;AAAA,EACF;AACF;AAEA,SAAS,cAAc,MAAkB,MAAkC;AACzE,QAAM,SAAS,KAAK,QAAQ,IAAI;AAChC,MAAI,CAAC,UAAU,OAAO,WAAW,EAAG,QAAO;AAC3C,SAAO,OAAO,OAAO,SAAS,CAAC;AACjC;AAEA,SAAS,cAAc,MAAkB,MAAwB;AAC/D,SAAO,KAAK,QAAQ,IAAI,KAAK,CAAC;AAChC;AAEA,SAAS,cAAc,MAAkB,MAAsB;AAC7D,QAAM,QAAQ,cAAc,MAAM,IAAI;AACtC,MAAI,CAAC,SAAS,MAAM,KAAK,EAAE,WAAW,GAAG;AACvC,UAAM,IAAI,WAAW,kBAAkB,IAAI;AAAA,EAC7C;AACA,SAAO;AACT;AAEA,SAAS,mBAAmB,MAAkB,MAAkC;AAC9E,QAAM,MAAM,cAAc,MAAM,IAAI;AACpC,MAAI,CAAC,IAAK,QAAO;AACjB,QAAM,QAAQ,SAAS,KAAK,EAAE;AAC9B,MAAI,CAAC,OAAO,SAAS,KAAK,GAAG;AAC3B,UAAM,IAAI,WAAW,mBAAmB,IAAI;AAAA,EAC9C;AACA,SAAO;AACT;AAEA,SAAS,iBAAiB,MAAkB,MAAkC;AAC5E,QAAM,MAAM,cAAc,MAAM,IAAI;AACpC,MAAI,CAAC,IAAK,QAAO;AACjB,QAAM,QAAQ,OAAO,WAAW,GAAG;AACnC,MAAI,CAAC,OAAO,SAAS,KAAK,GAAG;AAC3B,UAAM,IAAI,WAAW,kBAAkB,IAAI;AAAA,EAC7C;AACA,SAAO;AACT;AAEA,SAAS,mBAA4C;AACnD,QAAM,aACJ,QAAQ,IAAI,+BACZ,QAAQ,IAAI,wBACZ,KAAK,KAAK,QAAQ,IAAI,QAAQ,GAAG,QAAQ,GAAG,aAAa,eAAe;AAC1E,QAAM,MAAM,KAAK,MAAM,GAAG,aAAa,YAAY,MAAM,CAAC;AAC1D,SAAO,KAAK,SAAS,UAAU,iBAAiB,GAAG,UAAU,CAAC;AAChE;AAEA,SAAS,eAAwB;AAC/B,QAAM,SAAS,YAAY,iBAAiB,CAAC;AAC7C,SAAO;AAAA,IACL;AAAA,IACA,SAAS,IAAI,oBAAoB,IAAI,aAAa,MAAM,CAAC;AAAA,EAC3D;AACF;AAEA,eAAe,UAAU,MAAiC;AACxD,QAAM,EAAE,QAAQ,IAAI,aAAa;AACjC,QAAM,SAAS,MAAM,QAAQ,aAAa;AAAA,IACxC,WAAW,cAAc,MAAM,WAAW;AAAA,IAC1C,OAAO,cAAc,MAAM,OAAO;AAAA,IAClC,UAAU,cAAc,MAAM,UAAU;AAAA,IACxC,QAAQ,cAAc,MAAM,QAAQ;AAAA,IACpC,MAAM,cAAc,MAAM,MAAM;AAAA,IAChC,OAAO,mBAAmB,MAAM,OAAO;AAAA,IACvC,QAAQ,mBAAmB,MAAM,QAAQ;AAAA,EAC3C,CAAC;AACD,UAAQ,IAAI,KAAK,UAAU,QAAQ,MAAM,CAAC,CAAC;AAC7C;AAEA,eAAe,SAAS,MAAiC;AACvD,QAAM,EAAE,QAAQ,QAAQ,IAAI,aAAa;AACzC,QAAM,cAAc,cAAc,MAAM,cAAc;AACtD,QAAM,gBAAgB,cAAc,MAAM,SAAS;AACnD,QAAM,UAAU,cAAc,GAAG,aAAa,aAAa,MAAM,IAAI;AACrE,MAAI,CAAC,WAAW,QAAQ,KAAK,EAAE,WAAW,GAAG;AAC3C,UAAM,IAAI,WAAW,iBAAiB;AAAA,EACxC;AAEA,QAAM,SAAS,MAAM,QAAQ,YAAY;AAAA,IACvC,WAAW,cAAc,MAAM,WAAW;AAAA,IAC1C,YAAY,cAAc,MAAM,aAAa;AAAA,IAC7C,wBAAwB,cAAc,MAAM,WAAW,KAAK,OAAO,gBAAgB;AAAA,IACnF;AAAA,IACA,UAAU,cAAc,MAAM,UAAU;AAAA,IACxC,YAAY,iBAAiB,MAAM,YAAY;AAAA,IAC/C,MAAM,cAAc,MAAM,KAAK;AAAA,IAC/B,WAAW,cAAc,MAAM,YAAY;AAAA,IAC3C,KAAK,cAAc,MAAM,KAAK;AAAA,IAC9B,cAAc,cAAc,MAAM,eAAe;AAAA,IACjD,gBAAgB,cAAc,MAAM,iBAAiB;AAAA,IACrD,QAAQ,KAAK,MAAM,IAAI,SAAS;AAAA,EAClC,CAAC;AACD,UAAQ,IAAI,KAAK,UAAU,QAAQ,MAAM,CAAC,CAAC;AAC7C;AAEA,eAAsB,KAAK,OAAiB,QAAQ,KAAK,MAAM,CAAC,GAAkB;AAChF,QAAM,OAAO,UAAU,IAAI;AAC3B,MAAI,KAAK,YAAY,UAAU;AAC7B,UAAM,UAAU,IAAI;AACpB;AAAA,EACF;AACA,QAAM,SAAS,IAAI;AACrB;AAEO,SAAS,aAAmB;AACjC,iBAAe,MAAM,CAAC;AACxB;AAEA,eAAsB,OAAO,OAAiB,QAAQ,KAAK,MAAM,CAAC,GAAkB;AAClF,MAAI;AACF,UAAM,KAAK,IAAI;AAAA,EACjB,SAAS,OAAO;AACd,QAAI,iBAAiB,YAAY;AAC/B,qBAAe,iBAAiB,KAAK,CAAC;AACtC,qBAAe;AACf,iBAAW;AACX,cAAQ,KAAK,CAAC;AAAA,IAChB;AAEA,YAAQ,MAAM,mBAAmB;AACjC,YAAQ,KAAK,CAAC;AAAA,EAChB;AACF;","names":[]}
@@ -0,0 +1,236 @@
1
+ // openclaw-engram: Local-first memory plugin
2
+ import {
3
+ FallbackLlmClient
4
+ } from "./chunk-PW5V7OJH.js";
5
+ import {
6
+ listJsonFiles
7
+ } from "./chunk-DEIBZP3O.js";
8
+ import {
9
+ log
10
+ } from "./chunk-SSIIJJKA.js";
11
+
12
+ // src/calibration.ts
13
+ import { createHash } from "crypto";
14
+ import path from "path";
15
+ import { mkdir, readFile, writeFile } from "fs/promises";
16
+ function calibrationDir(memoryDir) {
17
+ return path.join(memoryDir, "state", "calibration");
18
+ }
19
+ function calibrationIndexPath(memoryDir) {
20
+ return path.join(calibrationDir(memoryDir), "calibration-index.json");
21
+ }
22
+ async function readCalibrationIndex(memoryDir) {
23
+ try {
24
+ const raw = JSON.parse(await readFile(calibrationIndexPath(memoryDir), "utf8"));
25
+ return {
26
+ rules: Array.isArray(raw.rules) ? raw.rules : [],
27
+ updatedAt: typeof raw.updatedAt === "string" ? raw.updatedAt : (/* @__PURE__ */ new Date()).toISOString(),
28
+ totalCorrectionsAnalyzed: typeof raw.totalCorrectionsAnalyzed === "number" ? raw.totalCorrectionsAnalyzed : 0
29
+ };
30
+ } catch {
31
+ return { rules: [], updatedAt: (/* @__PURE__ */ new Date()).toISOString(), totalCorrectionsAnalyzed: 0 };
32
+ }
33
+ }
34
+ async function writeCalibrationIndex(memoryDir, index) {
35
+ const dir = calibrationDir(memoryDir);
36
+ await mkdir(dir, { recursive: true });
37
+ index.updatedAt = (/* @__PURE__ */ new Date()).toISOString();
38
+ await writeFile(calibrationIndexPath(memoryDir), JSON.stringify(index, null, 2), "utf8");
39
+ }
40
+ async function readCorrections(memoryDir) {
41
+ const correctionsDir = path.join(memoryDir, "corrections");
42
+ const files = await listJsonFiles(correctionsDir).catch(() => {
43
+ return [];
44
+ });
45
+ const factsDir = path.join(memoryDir, "facts");
46
+ try {
47
+ const { readdir } = await import("fs/promises");
48
+ const dayDirs = (await readdir(factsDir)).filter((d) => /^\d{4}-\d{2}-\d{2}$/.test(d));
49
+ for (const day of dayDirs) {
50
+ const dayPath = path.join(factsDir, day);
51
+ const dayFiles = (await readdir(dayPath)).filter((f) => f.startsWith("correction-") && f.endsWith(".md")).map((f) => path.join(dayPath, f));
52
+ files.push(...dayFiles);
53
+ }
54
+ } catch {
55
+ }
56
+ try {
57
+ const { readdir } = await import("fs/promises");
58
+ const corrFiles = (await readdir(correctionsDir)).filter((f) => f.endsWith(".md")).map((f) => path.join(correctionsDir, f));
59
+ files.push(...corrFiles);
60
+ } catch {
61
+ }
62
+ const corrections = [];
63
+ const seen = /* @__PURE__ */ new Set();
64
+ for (const filePath of files) {
65
+ try {
66
+ const raw = await readFile(filePath, "utf8");
67
+ const fmMatch = raw.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
68
+ if (!fmMatch) continue;
69
+ const content = fmMatch[2].trim();
70
+ if (!content || content.length < 10) continue;
71
+ const idMatch = fmMatch[1].match(/^id:\s*(.+)$/m);
72
+ const id = idMatch?.[1]?.trim() ?? path.basename(filePath, ".md");
73
+ if (seen.has(id)) continue;
74
+ seen.add(id);
75
+ const confMatch = fmMatch[1].match(/^confidence:\s*(.+)$/m);
76
+ const confidence = confMatch ? parseFloat(confMatch[1]) : 0.9;
77
+ const entityMatch = fmMatch[1].match(/^entityRef:\s*(.+)$/m);
78
+ const entityRefs = entityMatch ? [entityMatch[1].trim()] : [];
79
+ corrections.push({ id, content, created: "", confidence, entityRefs, tags: [] });
80
+ } catch {
81
+ }
82
+ }
83
+ return corrections;
84
+ }
85
+ var CLUSTER_PROMPT = `You are analyzing user corrections to an AI assistant. Each correction represents a moment where the assistant's prediction of what the user wanted was WRONG.
86
+
87
+ Your job: Group these corrections into clusters where the SAME TYPE of misunderstanding is happening. Then for each cluster, synthesize a CalibrationRule.
88
+
89
+ A CalibrationRule describes:
90
+ - condition: When does this type of mistake happen?
91
+ - modelTendency: What does the model tend to assume or do wrong?
92
+ - userExpectation: What does the user actually want instead?
93
+ - calibration: How should the model adjust its behavior?
94
+ - ruleType: One of "model_tendency", "user_expectation", "scope_boundary", "verification_required"
95
+
96
+ Focus on PATTERNS, not individual corrections. A cluster needs at least 2 corrections to be worth a rule.
97
+
98
+ Output valid JSON only:
99
+ {
100
+ "rules": [
101
+ {
102
+ "ruleType": "model_tendency",
103
+ "condition": "When discussing project scope or task boundaries",
104
+ "modelTendency": "The model tends to assume broader scope than the user intends",
105
+ "userExpectation": "The user prefers narrow, specific task definitions and wants to be asked before scope expansion",
106
+ "calibration": "When uncertain about scope, ask for clarification rather than assuming. Default to the narrower interpretation.",
107
+ "confidence": 0.85,
108
+ "evidenceIds": ["correction-id-1", "correction-id-2"]
109
+ }
110
+ ]
111
+ }`;
112
+ async function synthesizeCalibrationRules(corrections, llm, existingRules) {
113
+ if (corrections.length < 2) return [];
114
+ const correctionText = corrections.slice(0, 50).map((c, i) => `[${c.id}] ${c.content}`).join("\n\n");
115
+ const existingRulesText = existingRules.length > 0 ? `
116
+
117
+ Existing calibration rules (avoid duplicating these):
118
+ ${existingRules.map((r) => `- ${r.condition}: ${r.calibration}`).join("\n")}` : "";
119
+ const response = await llm.chatCompletion(
120
+ [
121
+ { role: "system", content: CLUSTER_PROMPT },
122
+ { role: "user", content: `Here are ${corrections.length} corrections from this user:
123
+
124
+ ${correctionText}${existingRulesText}` }
125
+ ],
126
+ { temperature: 0.3, maxTokens: 3e3 }
127
+ );
128
+ if (!response?.content) return [];
129
+ try {
130
+ let jsonStr = response.content.trim();
131
+ const fenceMatch = jsonStr.match(/```(?:json)?\s*\n?([\s\S]*?)\n?\s*```/);
132
+ if (fenceMatch) jsonStr = fenceMatch[1];
133
+ const parsed = JSON.parse(jsonStr);
134
+ if (!Array.isArray(parsed.rules)) return [];
135
+ const now = (/* @__PURE__ */ new Date()).toISOString();
136
+ return parsed.rules.filter((r) => r.condition && r.calibration && r.modelTendency).map((r) => ({
137
+ id: `cal-${createHash("sha256").update(r.condition + r.calibration).digest("hex").slice(0, 12)}`,
138
+ ruleType: r.ruleType ?? "model_tendency",
139
+ condition: String(r.condition),
140
+ modelTendency: String(r.modelTendency),
141
+ userExpectation: String(r.userExpectation ?? ""),
142
+ calibration: String(r.calibration),
143
+ confidence: typeof r.confidence === "number" ? r.confidence : 0.7,
144
+ evidenceCount: Array.isArray(r.evidenceIds) ? r.evidenceIds.length : 1,
145
+ evidenceCorrectionIds: Array.isArray(r.evidenceIds) ? r.evidenceIds : [],
146
+ createdAt: now,
147
+ lastReinforcedAt: now
148
+ }));
149
+ } catch {
150
+ log.warn("[calibration] failed to parse LLM response");
151
+ return [];
152
+ }
153
+ }
154
+ function buildCalibrationRecallSection(rules, query, maxChars = 1200) {
155
+ if (rules.length === 0) return null;
156
+ const lines = [
157
+ "## Model Calibration (learned from past corrections)",
158
+ "",
159
+ "Adjustments for this specific user, learned from patterns in their corrections:",
160
+ ""
161
+ ];
162
+ let totalChars = lines.join("\n").length;
163
+ for (const rule of rules) {
164
+ const line = `- **${rule.condition}**: ${rule.modelTendency} \u2192 Instead: ${rule.calibration}`;
165
+ if (totalChars + line.length + 1 > maxChars) break;
166
+ lines.push(line);
167
+ totalChars += line.length + 1;
168
+ }
169
+ if (lines.length <= 4) return null;
170
+ lines.push("");
171
+ return lines.join("\n");
172
+ }
173
+ async function runCalibrationConsolidation(options) {
174
+ try {
175
+ const llm = new FallbackLlmClient(options.gatewayConfig);
176
+ if (!llm.isAvailable()) {
177
+ log.debug("[calibration] no LLM available \u2014 skipping consolidation");
178
+ return [];
179
+ }
180
+ const corrections = await readCorrections(options.memoryDir);
181
+ if (corrections.length < 3) {
182
+ log.debug(`[calibration] only ${corrections.length} corrections \u2014 need at least 3`);
183
+ return [];
184
+ }
185
+ const existingIndex = await readCalibrationIndex(options.memoryDir);
186
+ const newRules = await synthesizeCalibrationRules(corrections, llm, existingIndex.rules);
187
+ if (newRules.length === 0) {
188
+ log.debug("[calibration] no new calibration rules synthesized");
189
+ return existingIndex.rules;
190
+ }
191
+ const ruleMap = new Map(existingIndex.rules.map((r) => [r.id, r]));
192
+ for (const rule of newRules) {
193
+ if (ruleMap.has(rule.id)) {
194
+ const existing = ruleMap.get(rule.id);
195
+ existing.lastReinforcedAt = (/* @__PURE__ */ new Date()).toISOString();
196
+ existing.evidenceCount += rule.evidenceCount;
197
+ existing.confidence = Math.min(1, existing.confidence + 0.05);
198
+ } else {
199
+ ruleMap.set(rule.id, rule);
200
+ }
201
+ }
202
+ const allRules = [...ruleMap.values()];
203
+ await writeCalibrationIndex(options.memoryDir, {
204
+ rules: allRules,
205
+ updatedAt: (/* @__PURE__ */ new Date()).toISOString(),
206
+ totalCorrectionsAnalyzed: corrections.length
207
+ });
208
+ log.debug(`[calibration] synthesized ${newRules.length} new rule(s), ${allRules.length} total`);
209
+ return allRules;
210
+ } catch (error) {
211
+ log.warn(`[calibration] consolidation failed (non-fatal): ${error instanceof Error ? error.message : String(error)}`);
212
+ return [];
213
+ }
214
+ }
215
+ async function runCalibrationIfEnabled(options) {
216
+ if (!options.calibrationEnabled) {
217
+ return [];
218
+ }
219
+ return runCalibrationConsolidation({
220
+ memoryDir: options.memoryDir,
221
+ gatewayConfig: options.gatewayConfig
222
+ });
223
+ }
224
+ async function getCalibrationRulesForRecall(memoryDir) {
225
+ const index = await readCalibrationIndex(memoryDir);
226
+ return index.rules;
227
+ }
228
+ export {
229
+ buildCalibrationRecallSection,
230
+ getCalibrationRulesForRecall,
231
+ readCalibrationIndex,
232
+ runCalibrationConsolidation,
233
+ runCalibrationIfEnabled,
234
+ synthesizeCalibrationRules
235
+ };
236
+ //# sourceMappingURL=calibration-TLGCHI7D.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/calibration.ts"],"sourcesContent":["/**\n * calibration.ts — Prediction-Error-Driven Model-User Calibration\n *\n * Analyzes patterns in user corrections to identify systematic miscalibration\n * between the model's predictions and the user's actual expectations.\n * During consolidation, replays chains of similar corrections through an LLM\n * to synthesize CalibrationRules that adjust model behavior for this specific user.\n *\n * Inspired by:\n * - Cerebellar motor calibration (prediction errors drive lasting adjustments)\n * - Temporal difference learning (dopamine signals prediction error)\n * - Tesla FSD shadow mode (divergence between prediction and reality = training signal)\n */\n\nimport { createHash } from \"node:crypto\";\nimport path from \"node:path\";\nimport { mkdir, readFile, writeFile } from \"node:fs/promises\";\nimport { FallbackLlmClient } from \"./fallback-llm.js\";\nimport type { GatewayConfig, MemoryFile } from \"./types.js\";\nimport { listJsonFiles, readJsonFile } from \"./json-store.js\";\nimport { isRecord } from \"./store-contract.js\";\nimport { log } from \"./logger.js\";\n\n// ─── Types ───────────────────────────────────────────────────────────────────\n\nexport interface CalibrationRule {\n id: string;\n ruleType: \"model_tendency\" | \"user_expectation\" | \"scope_boundary\" | \"verification_required\";\n condition: string;\n modelTendency: string;\n userExpectation: string;\n calibration: string;\n confidence: number;\n evidenceCount: number;\n evidenceCorrectionIds: string[];\n createdAt: string;\n lastReinforcedAt: string;\n}\n\nexport interface CalibrationIndex {\n rules: CalibrationRule[];\n updatedAt: string;\n totalCorrectionsAnalyzed: number;\n}\n\n// ─── Storage ─────────────────────────────────────────────────────────────────\n\nfunction calibrationDir(memoryDir: string): string {\n return path.join(memoryDir, \"state\", \"calibration\");\n}\n\nfunction calibrationIndexPath(memoryDir: string): string {\n return path.join(calibrationDir(memoryDir), \"calibration-index.json\");\n}\n\nexport async function readCalibrationIndex(memoryDir: string): Promise<CalibrationIndex> {\n try {\n const raw = JSON.parse(await readFile(calibrationIndexPath(memoryDir), \"utf8\"));\n return {\n rules: Array.isArray(raw.rules) ? raw.rules : [],\n updatedAt: typeof raw.updatedAt === \"string\" ? raw.updatedAt : new Date().toISOString(),\n totalCorrectionsAnalyzed: typeof raw.totalCorrectionsAnalyzed === \"number\" ? raw.totalCorrectionsAnalyzed : 0,\n };\n } catch {\n return { rules: [], updatedAt: new Date().toISOString(), totalCorrectionsAnalyzed: 0 };\n }\n}\n\nasync function writeCalibrationIndex(memoryDir: string, index: CalibrationIndex): Promise<void> {\n const dir = calibrationDir(memoryDir);\n await mkdir(dir, { recursive: true });\n index.updatedAt = new Date().toISOString();\n await writeFile(calibrationIndexPath(memoryDir), JSON.stringify(index, null, 2), \"utf8\");\n}\n\n// ─── Correction Reading ──────────────────────────────────────────────────────\n\ninterface CorrectionMemory {\n id: string;\n content: string;\n created: string;\n confidence: number;\n entityRefs: string[];\n tags: string[];\n}\n\nasync function readCorrections(memoryDir: string): Promise<CorrectionMemory[]> {\n const correctionsDir = path.join(memoryDir, \"corrections\");\n const files = await listJsonFiles(correctionsDir).catch(() => {\n // Corrections might be in facts/ directories too\n return [] as string[];\n });\n\n // Also scan facts directories for correction-category files\n const factsDir = path.join(memoryDir, \"facts\");\n try {\n const { readdir } = await import(\"node:fs/promises\");\n const dayDirs = (await readdir(factsDir)).filter((d: string) => /^\\d{4}-\\d{2}-\\d{2}$/.test(d));\n for (const day of dayDirs) {\n const dayPath = path.join(factsDir, day);\n const dayFiles = (await readdir(dayPath))\n .filter((f: string) => f.startsWith(\"correction-\") && f.endsWith(\".md\"))\n .map((f: string) => path.join(dayPath, f));\n files.push(...dayFiles);\n }\n } catch {\n // facts dir might not exist\n }\n\n // Also check the dedicated corrections directory\n try {\n const { readdir } = await import(\"node:fs/promises\");\n const corrFiles = (await readdir(correctionsDir))\n .filter((f: string) => f.endsWith(\".md\"))\n .map((f: string) => path.join(correctionsDir, f));\n files.push(...corrFiles);\n } catch {\n // corrections dir might not exist\n }\n\n const corrections: CorrectionMemory[] = [];\n const seen = new Set<string>();\n\n for (const filePath of files) {\n try {\n const raw = await readFile(filePath, \"utf8\");\n\n // Parse frontmatter\n const fmMatch = raw.match(/^---\\n([\\s\\S]*?)\\n---\\n([\\s\\S]*)$/);\n if (!fmMatch) continue;\n\n const content = fmMatch[2].trim();\n if (!content || content.length < 10) continue;\n\n // Extract id from frontmatter\n const idMatch = fmMatch[1].match(/^id:\\s*(.+)$/m);\n const id = idMatch?.[1]?.trim() ?? path.basename(filePath, \".md\");\n\n if (seen.has(id)) continue;\n seen.add(id);\n\n const confMatch = fmMatch[1].match(/^confidence:\\s*(.+)$/m);\n const confidence = confMatch ? parseFloat(confMatch[1]) : 0.9;\n\n const entityMatch = fmMatch[1].match(/^entityRef:\\s*(.+)$/m);\n const entityRefs = entityMatch ? [entityMatch[1].trim()] : [];\n\n corrections.push({ id, content, created: \"\", confidence, entityRefs, tags: [] });\n } catch {\n // skip unparseable files\n }\n }\n\n return corrections;\n}\n\n// ─── LLM-Assisted Clustering and Replay ──────────────────────────────────────\n\nconst CLUSTER_PROMPT = `You are analyzing user corrections to an AI assistant. Each correction represents a moment where the assistant's prediction of what the user wanted was WRONG.\n\nYour job: Group these corrections into clusters where the SAME TYPE of misunderstanding is happening. Then for each cluster, synthesize a CalibrationRule.\n\nA CalibrationRule describes:\n- condition: When does this type of mistake happen?\n- modelTendency: What does the model tend to assume or do wrong?\n- userExpectation: What does the user actually want instead?\n- calibration: How should the model adjust its behavior?\n- ruleType: One of \"model_tendency\", \"user_expectation\", \"scope_boundary\", \"verification_required\"\n\nFocus on PATTERNS, not individual corrections. A cluster needs at least 2 corrections to be worth a rule.\n\nOutput valid JSON only:\n{\n \"rules\": [\n {\n \"ruleType\": \"model_tendency\",\n \"condition\": \"When discussing project scope or task boundaries\",\n \"modelTendency\": \"The model tends to assume broader scope than the user intends\",\n \"userExpectation\": \"The user prefers narrow, specific task definitions and wants to be asked before scope expansion\",\n \"calibration\": \"When uncertain about scope, ask for clarification rather than assuming. Default to the narrower interpretation.\",\n \"confidence\": 0.85,\n \"evidenceIds\": [\"correction-id-1\", \"correction-id-2\"]\n }\n ]\n}`;\n\nexport async function synthesizeCalibrationRules(\n corrections: CorrectionMemory[],\n llm: FallbackLlmClient,\n existingRules: CalibrationRule[],\n): Promise<CalibrationRule[]> {\n if (corrections.length < 2) return [];\n\n // Format corrections for the LLM\n const correctionText = corrections\n .slice(0, 50) // limit to avoid huge prompts\n .map((c, i) => `[${c.id}] ${c.content}`)\n .join(\"\\n\\n\");\n\n const existingRulesText = existingRules.length > 0\n ? `\\n\\nExisting calibration rules (avoid duplicating these):\\n${existingRules.map((r) => `- ${r.condition}: ${r.calibration}`).join(\"\\n\")}`\n : \"\";\n\n const response = await llm.chatCompletion(\n [\n { role: \"system\", content: CLUSTER_PROMPT },\n { role: \"user\", content: `Here are ${corrections.length} corrections from this user:\\n\\n${correctionText}${existingRulesText}` },\n ],\n { temperature: 0.3, maxTokens: 3000 },\n );\n\n if (!response?.content) return [];\n\n try {\n let jsonStr = response.content.trim();\n const fenceMatch = jsonStr.match(/```(?:json)?\\s*\\n?([\\s\\S]*?)\\n?\\s*```/);\n if (fenceMatch) jsonStr = fenceMatch[1];\n\n const parsed = JSON.parse(jsonStr);\n if (!Array.isArray(parsed.rules)) return [];\n\n const now = new Date().toISOString();\n return parsed.rules\n .filter((r: any) => r.condition && r.calibration && r.modelTendency)\n .map((r: any) => ({\n id: `cal-${createHash(\"sha256\").update(r.condition + r.calibration).digest(\"hex\").slice(0, 12)}`,\n ruleType: r.ruleType ?? \"model_tendency\",\n condition: String(r.condition),\n modelTendency: String(r.modelTendency),\n userExpectation: String(r.userExpectation ?? \"\"),\n calibration: String(r.calibration),\n confidence: typeof r.confidence === \"number\" ? r.confidence : 0.7,\n evidenceCount: Array.isArray(r.evidenceIds) ? r.evidenceIds.length : 1,\n evidenceCorrectionIds: Array.isArray(r.evidenceIds) ? r.evidenceIds : [],\n createdAt: now,\n lastReinforcedAt: now,\n }));\n } catch {\n log.warn(\"[calibration] failed to parse LLM response\");\n return [];\n }\n}\n\n// ─── Recall Section ──────────────────────────────────────────────────────────\n\n/**\n * Build a recall section from calibration rules relevant to the current query.\n * Uses the LLM to select which rules apply to the current context.\n */\nexport function buildCalibrationRecallSection(\n rules: CalibrationRule[],\n query: string,\n maxChars: number = 1200,\n): string | null {\n if (rules.length === 0) return null;\n\n // Simple relevance: include all rules (they're already filtered to this user)\n // In production, could use embedding similarity to filter\n const lines: string[] = [\n \"## Model Calibration (learned from past corrections)\",\n \"\",\n \"Adjustments for this specific user, learned from patterns in their corrections:\",\n \"\",\n ];\n\n let totalChars = lines.join(\"\\n\").length;\n\n for (const rule of rules) {\n const line = `- **${rule.condition}**: ${rule.modelTendency} → Instead: ${rule.calibration}`;\n if (totalChars + line.length + 1 > maxChars) break;\n lines.push(line);\n totalChars += line.length + 1;\n }\n\n if (lines.length <= 4) return null;\n lines.push(\"\");\n return lines.join(\"\\n\");\n}\n\n// ─── Public API ──────────────────────────────────────────────────────────────\n\n/**\n * Run the full calibration pipeline:\n * 1. Read all corrections\n * 2. Send to LLM for clustering and rule synthesis\n * 3. Merge with existing rules\n * 4. Write updated index\n */\nexport async function runCalibrationConsolidation(options: {\n memoryDir: string;\n gatewayConfig?: GatewayConfig;\n}): Promise<CalibrationRule[]> {\n try {\n const llm = new FallbackLlmClient(options.gatewayConfig);\n if (!llm.isAvailable()) {\n log.debug(\"[calibration] no LLM available — skipping consolidation\");\n return [];\n }\n\n const corrections = await readCorrections(options.memoryDir);\n if (corrections.length < 3) {\n log.debug(`[calibration] only ${corrections.length} corrections — need at least 3`);\n return [];\n }\n\n const existingIndex = await readCalibrationIndex(options.memoryDir);\n\n const newRules = await synthesizeCalibrationRules(corrections, llm, existingIndex.rules);\n if (newRules.length === 0) {\n log.debug(\"[calibration] no new calibration rules synthesized\");\n return existingIndex.rules;\n }\n\n // Merge: keep existing rules, add new ones (deduplicate by id)\n const ruleMap = new Map(existingIndex.rules.map((r) => [r.id, r]));\n for (const rule of newRules) {\n if (ruleMap.has(rule.id)) {\n // Reinforce existing rule\n const existing = ruleMap.get(rule.id)!;\n existing.lastReinforcedAt = new Date().toISOString();\n existing.evidenceCount += rule.evidenceCount;\n existing.confidence = Math.min(1, existing.confidence + 0.05);\n } else {\n ruleMap.set(rule.id, rule);\n }\n }\n\n const allRules = [...ruleMap.values()];\n await writeCalibrationIndex(options.memoryDir, {\n rules: allRules,\n updatedAt: new Date().toISOString(),\n totalCorrectionsAnalyzed: corrections.length,\n });\n\n log.debug(`[calibration] synthesized ${newRules.length} new rule(s), ${allRules.length} total`);\n return allRules;\n } catch (error) {\n log.warn(`[calibration] consolidation failed (non-fatal): ${error instanceof Error ? error.message : String(error)}`);\n return [];\n }\n}\n\n/**\n * Standalone entry point for calibration consolidation that can be called\n * independently of weekly compounding. The compounding engine's\n * `synthesizeWeekly()` is one trigger, but orchestrators or periodic\n * maintenance jobs should call this directly so calibration is not gated\n * on weekly compounding being enabled.\n */\nexport async function runCalibrationIfEnabled(options: {\n memoryDir: string;\n calibrationEnabled: boolean;\n gatewayConfig?: GatewayConfig;\n}): Promise<CalibrationRule[]> {\n if (!options.calibrationEnabled) {\n return [];\n }\n return runCalibrationConsolidation({\n memoryDir: options.memoryDir,\n gatewayConfig: options.gatewayConfig,\n });\n}\n\n/**\n * Get calibration rules for recall injection.\n * Reads the pre-computed calibration index.\n */\nexport async function getCalibrationRulesForRecall(\n memoryDir: string,\n): Promise<CalibrationRule[]> {\n const index = await readCalibrationIndex(memoryDir);\n return index.rules;\n}\n"],"mappings":";;;;;;;;;;;;AAcA,SAAS,kBAAkB;AAC3B,OAAO,UAAU;AACjB,SAAS,OAAO,UAAU,iBAAiB;AA+B3C,SAAS,eAAe,WAA2B;AACjD,SAAO,KAAK,KAAK,WAAW,SAAS,aAAa;AACpD;AAEA,SAAS,qBAAqB,WAA2B;AACvD,SAAO,KAAK,KAAK,eAAe,SAAS,GAAG,wBAAwB;AACtE;AAEA,eAAsB,qBAAqB,WAA8C;AACvF,MAAI;AACF,UAAM,MAAM,KAAK,MAAM,MAAM,SAAS,qBAAqB,SAAS,GAAG,MAAM,CAAC;AAC9E,WAAO;AAAA,MACL,OAAO,MAAM,QAAQ,IAAI,KAAK,IAAI,IAAI,QAAQ,CAAC;AAAA,MAC/C,WAAW,OAAO,IAAI,cAAc,WAAW,IAAI,aAAY,oBAAI,KAAK,GAAE,YAAY;AAAA,MACtF,0BAA0B,OAAO,IAAI,6BAA6B,WAAW,IAAI,2BAA2B;AAAA,IAC9G;AAAA,EACF,QAAQ;AACN,WAAO,EAAE,OAAO,CAAC,GAAG,YAAW,oBAAI,KAAK,GAAE,YAAY,GAAG,0BAA0B,EAAE;AAAA,EACvF;AACF;AAEA,eAAe,sBAAsB,WAAmB,OAAwC;AAC9F,QAAM,MAAM,eAAe,SAAS;AACpC,QAAM,MAAM,KAAK,EAAE,WAAW,KAAK,CAAC;AACpC,QAAM,aAAY,oBAAI,KAAK,GAAE,YAAY;AACzC,QAAM,UAAU,qBAAqB,SAAS,GAAG,KAAK,UAAU,OAAO,MAAM,CAAC,GAAG,MAAM;AACzF;AAaA,eAAe,gBAAgB,WAAgD;AAC7E,QAAM,iBAAiB,KAAK,KAAK,WAAW,aAAa;AACzD,QAAM,QAAQ,MAAM,cAAc,cAAc,EAAE,MAAM,MAAM;AAE5D,WAAO,CAAC;AAAA,EACV,CAAC;AAGD,QAAM,WAAW,KAAK,KAAK,WAAW,OAAO;AAC7C,MAAI;AACF,UAAM,EAAE,QAAQ,IAAI,MAAM,OAAO,aAAkB;AACnD,UAAM,WAAW,MAAM,QAAQ,QAAQ,GAAG,OAAO,CAAC,MAAc,sBAAsB,KAAK,CAAC,CAAC;AAC7F,eAAW,OAAO,SAAS;AACzB,YAAM,UAAU,KAAK,KAAK,UAAU,GAAG;AACvC,YAAM,YAAY,MAAM,QAAQ,OAAO,GACpC,OAAO,CAAC,MAAc,EAAE,WAAW,aAAa,KAAK,EAAE,SAAS,KAAK,CAAC,EACtE,IAAI,CAAC,MAAc,KAAK,KAAK,SAAS,CAAC,CAAC;AAC3C,YAAM,KAAK,GAAG,QAAQ;AAAA,IACxB;AAAA,EACF,QAAQ;AAAA,EAER;AAGA,MAAI;AACF,UAAM,EAAE,QAAQ,IAAI,MAAM,OAAO,aAAkB;AACnD,UAAM,aAAa,MAAM,QAAQ,cAAc,GAC5C,OAAO,CAAC,MAAc,EAAE,SAAS,KAAK,CAAC,EACvC,IAAI,CAAC,MAAc,KAAK,KAAK,gBAAgB,CAAC,CAAC;AAClD,UAAM,KAAK,GAAG,SAAS;AAAA,EACzB,QAAQ;AAAA,EAER;AAEA,QAAM,cAAkC,CAAC;AACzC,QAAM,OAAO,oBAAI,IAAY;AAE7B,aAAW,YAAY,OAAO;AAC5B,QAAI;AACF,YAAM,MAAM,MAAM,SAAS,UAAU,MAAM;AAG3C,YAAM,UAAU,IAAI,MAAM,mCAAmC;AAC7D,UAAI,CAAC,QAAS;AAEd,YAAM,UAAU,QAAQ,CAAC,EAAE,KAAK;AAChC,UAAI,CAAC,WAAW,QAAQ,SAAS,GAAI;AAGrC,YAAM,UAAU,QAAQ,CAAC,EAAE,MAAM,eAAe;AAChD,YAAM,KAAK,UAAU,CAAC,GAAG,KAAK,KAAK,KAAK,SAAS,UAAU,KAAK;AAEhE,UAAI,KAAK,IAAI,EAAE,EAAG;AAClB,WAAK,IAAI,EAAE;AAEX,YAAM,YAAY,QAAQ,CAAC,EAAE,MAAM,uBAAuB;AAC1D,YAAM,aAAa,YAAY,WAAW,UAAU,CAAC,CAAC,IAAI;AAE1D,YAAM,cAAc,QAAQ,CAAC,EAAE,MAAM,sBAAsB;AAC3D,YAAM,aAAa,cAAc,CAAC,YAAY,CAAC,EAAE,KAAK,CAAC,IAAI,CAAC;AAE5D,kBAAY,KAAK,EAAE,IAAI,SAAS,SAAS,IAAI,YAAY,YAAY,MAAM,CAAC,EAAE,CAAC;AAAA,IACjF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,SAAO;AACT;AAIA,IAAM,iBAAiB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AA4BvB,eAAsB,2BACpB,aACA,KACA,eAC4B;AAC5B,MAAI,YAAY,SAAS,EAAG,QAAO,CAAC;AAGpC,QAAM,iBAAiB,YACpB,MAAM,GAAG,EAAE,EACX,IAAI,CAAC,GAAG,MAAM,IAAI,EAAE,EAAE,KAAK,EAAE,OAAO,EAAE,EACtC,KAAK,MAAM;AAEd,QAAM,oBAAoB,cAAc,SAAS,IAC7C;AAAA;AAAA;AAAA,EAA8D,cAAc,IAAI,CAAC,MAAM,KAAK,EAAE,SAAS,KAAK,EAAE,WAAW,EAAE,EAAE,KAAK,IAAI,CAAC,KACvI;AAEJ,QAAM,WAAW,MAAM,IAAI;AAAA,IACzB;AAAA,MACE,EAAE,MAAM,UAAU,SAAS,eAAe;AAAA,MAC1C,EAAE,MAAM,QAAQ,SAAS,YAAY,YAAY,MAAM;AAAA;AAAA,EAAmC,cAAc,GAAG,iBAAiB,GAAG;AAAA,IACjI;AAAA,IACA,EAAE,aAAa,KAAK,WAAW,IAAK;AAAA,EACtC;AAEA,MAAI,CAAC,UAAU,QAAS,QAAO,CAAC;AAEhC,MAAI;AACF,QAAI,UAAU,SAAS,QAAQ,KAAK;AACpC,UAAM,aAAa,QAAQ,MAAM,uCAAuC;AACxE,QAAI,WAAY,WAAU,WAAW,CAAC;AAEtC,UAAM,SAAS,KAAK,MAAM,OAAO;AACjC,QAAI,CAAC,MAAM,QAAQ,OAAO,KAAK,EAAG,QAAO,CAAC;AAE1C,UAAM,OAAM,oBAAI,KAAK,GAAE,YAAY;AACnC,WAAO,OAAO,MACX,OAAO,CAAC,MAAW,EAAE,aAAa,EAAE,eAAe,EAAE,aAAa,EAClE,IAAI,CAAC,OAAY;AAAA,MAChB,IAAI,OAAO,WAAW,QAAQ,EAAE,OAAO,EAAE,YAAY,EAAE,WAAW,EAAE,OAAO,KAAK,EAAE,MAAM,GAAG,EAAE,CAAC;AAAA,MAC9F,UAAU,EAAE,YAAY;AAAA,MACxB,WAAW,OAAO,EAAE,SAAS;AAAA,MAC7B,eAAe,OAAO,EAAE,aAAa;AAAA,MACrC,iBAAiB,OAAO,EAAE,mBAAmB,EAAE;AAAA,MAC/C,aAAa,OAAO,EAAE,WAAW;AAAA,MACjC,YAAY,OAAO,EAAE,eAAe,WAAW,EAAE,aAAa;AAAA,MAC9D,eAAe,MAAM,QAAQ,EAAE,WAAW,IAAI,EAAE,YAAY,SAAS;AAAA,MACrE,uBAAuB,MAAM,QAAQ,EAAE,WAAW,IAAI,EAAE,cAAc,CAAC;AAAA,MACvE,WAAW;AAAA,MACX,kBAAkB;AAAA,IACpB,EAAE;AAAA,EACN,QAAQ;AACN,QAAI,KAAK,4CAA4C;AACrD,WAAO,CAAC;AAAA,EACV;AACF;AAQO,SAAS,8BACd,OACA,OACA,WAAmB,MACJ;AACf,MAAI,MAAM,WAAW,EAAG,QAAO;AAI/B,QAAM,QAAkB;AAAA,IACtB;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAEA,MAAI,aAAa,MAAM,KAAK,IAAI,EAAE;AAElC,aAAW,QAAQ,OAAO;AACxB,UAAM,OAAO,OAAO,KAAK,SAAS,OAAO,KAAK,aAAa,oBAAe,KAAK,WAAW;AAC1F,QAAI,aAAa,KAAK,SAAS,IAAI,SAAU;AAC7C,UAAM,KAAK,IAAI;AACf,kBAAc,KAAK,SAAS;AAAA,EAC9B;AAEA,MAAI,MAAM,UAAU,EAAG,QAAO;AAC9B,QAAM,KAAK,EAAE;AACb,SAAO,MAAM,KAAK,IAAI;AACxB;AAWA,eAAsB,4BAA4B,SAGnB;AAC7B,MAAI;AACF,UAAM,MAAM,IAAI,kBAAkB,QAAQ,aAAa;AACvD,QAAI,CAAC,IAAI,YAAY,GAAG;AACtB,UAAI,MAAM,8DAAyD;AACnE,aAAO,CAAC;AAAA,IACV;AAEA,UAAM,cAAc,MAAM,gBAAgB,QAAQ,SAAS;AAC3D,QAAI,YAAY,SAAS,GAAG;AAC1B,UAAI,MAAM,sBAAsB,YAAY,MAAM,qCAAgC;AAClF,aAAO,CAAC;AAAA,IACV;AAEA,UAAM,gBAAgB,MAAM,qBAAqB,QAAQ,SAAS;AAElE,UAAM,WAAW,MAAM,2BAA2B,aAAa,KAAK,cAAc,KAAK;AACvF,QAAI,SAAS,WAAW,GAAG;AACzB,UAAI,MAAM,oDAAoD;AAC9D,aAAO,cAAc;AAAA,IACvB;AAGA,UAAM,UAAU,IAAI,IAAI,cAAc,MAAM,IAAI,CAAC,MAAM,CAAC,EAAE,IAAI,CAAC,CAAC,CAAC;AACjE,eAAW,QAAQ,UAAU;AAC3B,UAAI,QAAQ,IAAI,KAAK,EAAE,GAAG;AAExB,cAAM,WAAW,QAAQ,IAAI,KAAK,EAAE;AACpC,iBAAS,oBAAmB,oBAAI,KAAK,GAAE,YAAY;AACnD,iBAAS,iBAAiB,KAAK;AAC/B,iBAAS,aAAa,KAAK,IAAI,GAAG,SAAS,aAAa,IAAI;AAAA,MAC9D,OAAO;AACL,gBAAQ,IAAI,KAAK,IAAI,IAAI;AAAA,MAC3B;AAAA,IACF;AAEA,UAAM,WAAW,CAAC,GAAG,QAAQ,OAAO,CAAC;AACrC,UAAM,sBAAsB,QAAQ,WAAW;AAAA,MAC7C,OAAO;AAAA,MACP,YAAW,oBAAI,KAAK,GAAE,YAAY;AAAA,MAClC,0BAA0B,YAAY;AAAA,IACxC,CAAC;AAED,QAAI,MAAM,6BAA6B,SAAS,MAAM,iBAAiB,SAAS,MAAM,QAAQ;AAC9F,WAAO;AAAA,EACT,SAAS,OAAO;AACd,QAAI,KAAK,mDAAmD,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC,EAAE;AACpH,WAAO,CAAC;AAAA,EACV;AACF;AASA,eAAsB,wBAAwB,SAIf;AAC7B,MAAI,CAAC,QAAQ,oBAAoB;AAC/B,WAAO,CAAC;AAAA,EACV;AACA,SAAO,4BAA4B;AAAA,IACjC,WAAW,QAAQ;AAAA,IACnB,eAAe,QAAQ;AAAA,EACzB,CAAC;AACH;AAMA,eAAsB,6BACpB,WAC4B;AAC5B,QAAM,QAAQ,MAAM,qBAAqB,SAAS;AAClD,SAAO,MAAM;AACf;","names":[]}
@@ -0,0 +1,23 @@
1
+ // openclaw-engram: Local-first memory plugin
2
+ import {
3
+ makeEdgeId,
4
+ readChainIndex,
5
+ resolveChainsDir,
6
+ scoreStitchCandidate,
7
+ stitchCausalChain,
8
+ validateCausalEdge,
9
+ writeChainIndex
10
+ } from "./chunk-3GARBXWR.js";
11
+ import "./chunk-AFMNQR7H.js";
12
+ import "./chunk-DEIBZP3O.js";
13
+ import "./chunk-SSIIJJKA.js";
14
+ export {
15
+ makeEdgeId,
16
+ readChainIndex,
17
+ resolveChainsDir,
18
+ scoreStitchCandidate,
19
+ stitchCausalChain,
20
+ validateCausalEdge,
21
+ writeChainIndex
22
+ };
23
+ //# sourceMappingURL=causal-chain-OJCLNNEH.js.map