agentflow-dashboard 0.8.4 → 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,3223 @@
1
+ import {
2
+ computePatternSignature,
3
+ computeToolPatternSignature,
4
+ createGuardedBuilder,
5
+ createVault,
6
+ detectDrift,
7
+ evaluateAssertions,
8
+ extractDecisionsFromLangChain,
9
+ extractDecisionsFromNodes,
10
+ extractDecisionsFromSession,
11
+ extractWikilinks,
12
+ findVariantsWithModel,
13
+ getAgentBriefingData,
14
+ getDecisionReplayData,
15
+ getEfficiency,
16
+ parseEntity,
17
+ serializeEntity,
18
+ trackConformanceTrend,
19
+ vaultEntityCount,
20
+ vaultFingerprint
21
+ } from "./chunk-LZXEKWRO.js";
22
+ import "./chunk-3RG5ZIWI.js";
23
+
24
+ // ../../../soma/dist/index.js
25
+ import { existsSync, readFileSync, writeFileSync } from "fs";
26
+ import { dirname } from "path";
27
+ import { mkdirSync } from "fs";
28
+ import { existsSync as existsSync2, mkdirSync as mkdirSync2, readdirSync, readFileSync as readFileSync2, renameSync, writeFileSync as writeFileSync2 } from "fs";
29
+ import { join, basename, extname } from "path";
30
+ import { createHash } from "crypto";
31
+ import { existsSync as existsSync3, readFileSync as readFileSync3, writeFileSync as writeFileSync3, mkdirSync as mkdirSync3 } from "fs";
32
+ import { dirname as dirname2 } from "path";
33
+ import { existsSync as existsSync4, readFileSync as readFileSync4, writeFileSync as writeFileSync4, mkdirSync as mkdirSync4 } from "fs";
34
+ import { createHash as createHash2 } from "crypto";
35
+ import { dirname as dirname3 } from "path";
36
+ import { existsSync as existsSync5, readFileSync as readFileSync5, writeFileSync as writeFileSync5, mkdirSync as mkdirSync5 } from "fs";
37
+ import { createHash as createHash3 } from "crypto";
38
+ import { dirname as dirname4 } from "path";
39
+ import { existsSync as existsSync6, readFileSync as readFileSync6, writeFileSync as writeFileSync6, mkdirSync as mkdirSync6 } from "fs";
40
+ import { dirname as dirname5 } from "path";
41
+ import { existsSync as existsSync7, readdirSync as readdirSync2, readFileSync as readFileSync7, statSync } from "fs";
42
+ import { join as join2, extname as extname2, basename as basename2 } from "path";
43
+ import { existsSync as existsSync8, readFileSync as readFileSync8 } from "fs";
44
+ import { join as join3 } from "path";
45
+ import { homedir } from "os";
46
+ var KNOWLEDGE_LAYERS = ["archive", "working", "emerging", "canon"];
47
+ var LAYER_SEMANTIC_WEIGHTS = {
48
+ archive: "historical",
49
+ working: "contextual",
50
+ emerging: "advisory",
51
+ canon: "mandatory"
52
+ };
53
+ var ENTITY_STATUSES = {
54
+ agent: ["active", "inactive", "deprecated"],
55
+ execution: ["completed", "failed", "running", "pending"],
56
+ archetype: ["active", "proposed", "deprecated"],
57
+ insight: ["active", "superseded", "rejected"],
58
+ policy: ["active", "draft", "deprecated", "enforcing"],
59
+ decision: ["active", "superseded", "reversed", "flagged"],
60
+ assumption: ["active", "validated", "invalidated"],
61
+ constraint: ["active", "resolved", "deprecated"],
62
+ contradiction: ["active", "resolved"],
63
+ synthesis: ["active", "superseded"],
64
+ content: ["active", "superseded"]
65
+ };
66
+ function resolveAgentId(entity) {
67
+ return entity.agentId ?? entity.agent_id;
68
+ }
69
+ var WORKER_WRITE_PERMISSIONS = {
70
+ harvester: ["archive"],
71
+ reconciler: ["archive"],
72
+ synthesizer: ["emerging"],
73
+ cartographer: ["emerging"],
74
+ governance: ["canon"],
75
+ "team-context": ["working"],
76
+ "policy-bridge": []
77
+ // read-only
78
+ };
79
+ var LAYER_REQUIRED_FIELDS = {
80
+ archive: ["layer", "source_worker"],
81
+ working: ["layer", "source_worker", "team_id", "decay_at"],
82
+ emerging: ["layer", "source_worker", "confidence_score", "evidence_links", "decay_at"],
83
+ canon: ["layer", "source_worker", "ratified_by", "ratified_at", "origin_l3_id"]
84
+ };
85
+ function cosineSimilarity(a, b) {
86
+ if (a.length !== b.length || a.length === 0) return 0;
87
+ let dotProduct = 0;
88
+ let normA = 0;
89
+ let normB = 0;
90
+ for (let i = 0; i < a.length; i++) {
91
+ dotProduct += a[i] * b[i];
92
+ normA += a[i] * a[i];
93
+ normB += b[i] * b[i];
94
+ }
95
+ const denominator = Math.sqrt(normA) * Math.sqrt(normB);
96
+ return denominator === 0 ? 0 : dotProduct / denominator;
97
+ }
98
+ function createJsonVectorStore(filePath) {
99
+ let vectors = /* @__PURE__ */ new Map();
100
+ let dirty = false;
101
+ function load() {
102
+ if (existsSync(filePath)) {
103
+ try {
104
+ const data = JSON.parse(readFileSync(filePath, "utf-8"));
105
+ vectors = new Map(data.map((v) => [v.id, v]));
106
+ } catch {
107
+ vectors = /* @__PURE__ */ new Map();
108
+ }
109
+ }
110
+ }
111
+ function save() {
112
+ if (!dirty) return;
113
+ const dir = dirname(filePath);
114
+ if (!existsSync(dir)) mkdirSync(dir, { recursive: true });
115
+ writeFileSync(filePath, JSON.stringify([...vectors.values()]), "utf-8");
116
+ dirty = false;
117
+ }
118
+ function matchesFilter(metadata, filter) {
119
+ for (const [key, value] of Object.entries(filter)) {
120
+ const actual = metadata[key];
121
+ if (Array.isArray(value)) {
122
+ if (!value.includes(actual)) return false;
123
+ } else if (typeof value === "object" && value !== null) {
124
+ const range = value;
125
+ const num = typeof actual === "number" ? actual : Number.NaN;
126
+ if (range.min !== void 0 && num < range.min) return false;
127
+ if (range.max !== void 0 && num > range.max) return false;
128
+ } else {
129
+ if (actual !== value) return false;
130
+ }
131
+ }
132
+ return true;
133
+ }
134
+ load();
135
+ return {
136
+ async upsert(id, vector, metadata) {
137
+ vectors.set(id, { id, vector, metadata });
138
+ dirty = true;
139
+ save();
140
+ },
141
+ async delete(id) {
142
+ vectors.delete(id);
143
+ dirty = true;
144
+ save();
145
+ },
146
+ async search(queryVector, options) {
147
+ const limit = (options == null ? void 0 : options.limit) ?? 10;
148
+ const filter = options == null ? void 0 : options.filter;
149
+ const scored = [];
150
+ for (const stored of vectors.values()) {
151
+ if (filter && !matchesFilter(stored.metadata, filter)) continue;
152
+ const score = cosineSimilarity(queryVector, stored.vector);
153
+ scored.push({ id: stored.id, score, metadata: stored.metadata });
154
+ }
155
+ scored.sort((a, b) => b.score - a.score);
156
+ return scored.slice(0, limit);
157
+ },
158
+ async count() {
159
+ return vectors.size;
160
+ }
161
+ };
162
+ }
163
+ function createLanceVectorStore(_dirPath) {
164
+ throw new Error(
165
+ "LanceDB vector store not yet implemented. Install @lancedb/lancedb and check back. For now, use createJsonVectorStore() as the default."
166
+ );
167
+ }
168
+ function createMilvusVectorStore(_url) {
169
+ throw new Error(
170
+ "Milvus vector store not yet implemented. For now, use createJsonVectorStore() as the default."
171
+ );
172
+ }
173
+ var _layersConfig = {};
174
+ function setLayersConfig(config) {
175
+ _layersConfig = config;
176
+ }
177
+ function isLayerEnabled(layer) {
178
+ var _a;
179
+ if (layer === "working") return ((_a = _layersConfig.working) == null ? void 0 : _a.enabled) ?? false;
180
+ return true;
181
+ }
182
+ function validateLayerFields(entity) {
183
+ const errors = [];
184
+ const layer = entity.layer;
185
+ if (!layer) return [];
186
+ if (layer === "working" && !isLayerEnabled("working")) return [];
187
+ const requiredFields = LAYER_REQUIRED_FIELDS[layer];
188
+ if (!requiredFields) {
189
+ errors.push({ field: "layer", message: `Unknown layer: ${layer}` });
190
+ return errors;
191
+ }
192
+ for (const field of requiredFields) {
193
+ const value = entity[field];
194
+ if (value === void 0 || value === null) {
195
+ errors.push({ field, message: `Missing required field '${field}' for layer '${layer}'` });
196
+ }
197
+ }
198
+ if (layer === "working" && entity.team_id !== void 0 && entity.team_id === "") {
199
+ errors.push({ field: "team_id", message: "team_id must be a non-empty string for L2 entries" });
200
+ }
201
+ if (layer === "emerging" && entity.confidence_score !== void 0) {
202
+ if (entity.confidence_score < 0 || entity.confidence_score > 1) {
203
+ errors.push({ field: "confidence_score", message: "confidence_score must be between 0.0 and 1.0" });
204
+ }
205
+ }
206
+ if (layer === "emerging" && entity.evidence_links !== void 0 && !Array.isArray(entity.evidence_links)) {
207
+ errors.push({ field: "evidence_links", message: "evidence_links must be an array" });
208
+ }
209
+ if ((layer === "archive" || layer === "canon") && entity.decay_at) {
210
+ errors.push({ field: "decay_at", message: `L${layer === "archive" ? "1" : "4"} entries must not have decay_at` });
211
+ }
212
+ return errors;
213
+ }
214
+ var LayerPermissionError = class extends Error {
215
+ constructor(worker, layer, permittedLayers) {
216
+ super(`Worker '${worker}' is not authorized to write to layer '${layer}'. Permitted layers: [${permittedLayers.join(", ")}]`);
217
+ this.worker = worker;
218
+ this.layer = layer;
219
+ this.permittedLayers = permittedLayers;
220
+ this.name = "LayerPermissionError";
221
+ }
222
+ };
223
+ function enforceWritePermission(worker, layer) {
224
+ const permitted = WORKER_WRITE_PERMISSIONS[worker];
225
+ if (!permitted) {
226
+ throw new LayerPermissionError(worker, layer, []);
227
+ }
228
+ if (!permitted.includes(layer)) {
229
+ throw new LayerPermissionError(worker, layer, permitted);
230
+ }
231
+ }
232
+ function canWrite(worker, layer) {
233
+ const permitted = WORKER_WRITE_PERMISSIONS[worker];
234
+ return permitted !== void 0 && permitted.includes(layer);
235
+ }
236
+ function queryByLayer(vault, layer, filter) {
237
+ return vault.listByLayer(layer, filter);
238
+ }
239
+ function writeToLayer(vault, worker, layer, entity) {
240
+ if (!isLayerEnabled(layer)) {
241
+ throw new Error(`Layer '${layer}' is disabled. Set layers.working.enabled=true to enable.`);
242
+ }
243
+ enforceWritePermission(worker, layer);
244
+ const layered = {
245
+ ...entity,
246
+ layer,
247
+ source_worker: worker
248
+ };
249
+ const errors = validateLayerFields(layered);
250
+ if (errors.length > 0) {
251
+ const messages = errors.map((e) => `${e.field}: ${e.message}`).join("; ");
252
+ throw new Error(`Layer validation failed for '${layer}': ${messages}`);
253
+ }
254
+ return vault.create(layered);
255
+ }
256
+ function getNodes(graph) {
257
+ if (graph.nodes instanceof Map) {
258
+ return [...graph.nodes.values()];
259
+ }
260
+ return Object.values(graph.nodes);
261
+ }
262
+ function getNode(graph, id) {
263
+ if (graph.nodes instanceof Map) {
264
+ return graph.nodes.get(id);
265
+ }
266
+ return graph.nodes[id];
267
+ }
268
+ function isExecutionGraph(obj) {
269
+ if (!obj || typeof obj !== "object") return false;
270
+ const o = obj;
271
+ return "nodes" in o && (o.nodes instanceof Map || typeof o.nodes === "object" && o.nodes !== null) && ("edges" in o && Array.isArray(o.edges)) && ("agentId" in o && typeof o.agentId === "string");
272
+ }
273
+ function extractDecisionsFromGraph(graph) {
274
+ var _a;
275
+ const decisions = [];
276
+ const nodes = getNodes(graph);
277
+ const nonRootNodes = nodes.filter((n) => n.parentId !== null);
278
+ const hasExplicitDecisionEvents = (_a = graph.events) == null ? void 0 : _a.some((e) => e.eventType === "decision" || e.eventType === "subagent_spawn");
279
+ if (nonRootNodes.length === 0 && !hasExplicitDecisionEvents) return decisions;
280
+ for (const node of nodes) {
281
+ if (node.type === "tool") {
282
+ decisions.push({
283
+ decision_type: "tool_choice",
284
+ choice: node.name,
285
+ outcome: node.status,
286
+ decision_context: {
287
+ ...node.metadata,
288
+ ...node.state ?? {},
289
+ duration: node.endTime != null && node.startTime ? node.endTime - node.startTime : void 0
290
+ },
291
+ agent_id: graph.agentId,
292
+ graph_id: graph.id,
293
+ node_id: node.id
294
+ });
295
+ }
296
+ }
297
+ for (const edge of graph.edges) {
298
+ if (edge.type === "branched") {
299
+ const targetNode = getNode(graph, edge.to);
300
+ const sourceNode = getNode(graph, edge.from);
301
+ if (!targetNode || !sourceNode) continue;
302
+ const siblings = nodes.filter(
303
+ (n) => n.parentId === sourceNode.id && n.id !== targetNode.id
304
+ );
305
+ decisions.push({
306
+ decision_type: "branch",
307
+ choice: targetNode.name,
308
+ alternatives: siblings.map((s) => s.name),
309
+ outcome: targetNode.status,
310
+ decision_context: {
311
+ source_node: sourceNode.name,
312
+ ...targetNode.metadata
313
+ },
314
+ agent_id: graph.agentId,
315
+ graph_id: graph.id,
316
+ node_id: targetNode.id
317
+ });
318
+ }
319
+ }
320
+ const retryEdges = graph.edges.filter((e) => e.type === "retried");
321
+ if (retryEdges.length > 0) {
322
+ const retryGroups = /* @__PURE__ */ new Map();
323
+ for (const edge of retryEdges) {
324
+ const group = retryGroups.get(edge.to) ?? [];
325
+ group.push(edge);
326
+ retryGroups.set(edge.to, group);
327
+ }
328
+ for (const [targetId, edges] of retryGroups) {
329
+ const targetNode = getNode(graph, targetId);
330
+ if (!targetNode) continue;
331
+ decisions.push({
332
+ decision_type: "retry",
333
+ choice: targetNode.name,
334
+ outcome: targetNode.status,
335
+ decision_context: {
336
+ retry_count: edges.length,
337
+ ...targetNode.metadata
338
+ },
339
+ agent_id: graph.agentId,
340
+ graph_id: graph.id,
341
+ node_id: targetNode.id
342
+ });
343
+ }
344
+ }
345
+ for (const node of nodes) {
346
+ if (node.type === "subagent") {
347
+ decisions.push({
348
+ decision_type: "delegation",
349
+ choice: node.name,
350
+ outcome: node.status,
351
+ decision_context: {
352
+ parent_agent: graph.agentId,
353
+ ...node.metadata
354
+ },
355
+ agent_id: graph.agentId,
356
+ graph_id: graph.id,
357
+ node_id: node.id
358
+ });
359
+ }
360
+ }
361
+ if (graph.events) {
362
+ for (const event of graph.events) {
363
+ if (event.eventType === "subagent_spawn") {
364
+ const alreadyCaptured = decisions.some(
365
+ (d) => d.decision_type === "delegation" && d.node_id === event.nodeId
366
+ );
367
+ if (!alreadyCaptured) {
368
+ decisions.push({
369
+ decision_type: "delegation",
370
+ choice: String(event.data.name ?? event.data.agentId ?? `unattributed:node-${event.nodeId}`),
371
+ outcome: "spawned",
372
+ decision_context: { ...event.data },
373
+ agent_id: graph.agentId,
374
+ graph_id: graph.id,
375
+ node_id: event.nodeId
376
+ });
377
+ }
378
+ }
379
+ }
380
+ }
381
+ const failedNodes = nodes.filter((n) => n.status === "failed");
382
+ for (const failedNode of failedNodes) {
383
+ const path = [];
384
+ let current = failedNode;
385
+ while (current) {
386
+ path.unshift(current.name);
387
+ current = current.parentId ? getNode(graph, current.parentId) : void 0;
388
+ }
389
+ decisions.push({
390
+ decision_type: "failure",
391
+ choice: failedNode.name,
392
+ outcome: "failed",
393
+ decision_context: {
394
+ error: failedNode.metadata.error ?? failedNode.metadata.errorMessage,
395
+ error_stack: failedNode.metadata.errorStack,
396
+ failure_path: path,
397
+ ...failedNode.metadata
398
+ },
399
+ agent_id: graph.agentId,
400
+ graph_id: graph.id,
401
+ node_id: failedNode.id
402
+ });
403
+ }
404
+ if (graph.events) {
405
+ for (const event of graph.events) {
406
+ if (event.eventType === "custom" && event.data.action === "state_update") {
407
+ const matchingDecision = decisions.find((d) => d.node_id === event.nodeId);
408
+ if (matchingDecision) {
409
+ const { action, ...stateData } = event.data;
410
+ Object.assign(matchingDecision.decision_context, stateData);
411
+ }
412
+ }
413
+ }
414
+ }
415
+ if (graph.events) {
416
+ for (const event of graph.events) {
417
+ if (event.eventType === "decision") {
418
+ decisions.push({
419
+ decision_type: event.data.decision_type ?? "tool_choice",
420
+ choice: String(event.data.choice ?? "unattributed"),
421
+ alternatives: Array.isArray(event.data.alternatives) ? event.data.alternatives.map(String) : void 0,
422
+ rationale: event.data.rationale ? String(event.data.rationale) : void 0,
423
+ outcome: String(event.data.outcome ?? "unattributed"),
424
+ decision_context: { ...event.data },
425
+ agent_id: graph.agentId,
426
+ graph_id: graph.id,
427
+ node_id: event.nodeId
428
+ });
429
+ }
430
+ }
431
+ }
432
+ return decisions;
433
+ }
434
+ function decisionsToEntities(decisions) {
435
+ return decisions.map((d) => ({
436
+ type: "decision",
437
+ name: `${d.decision_type}: ${d.choice} (${d.agent_id})`,
438
+ status: "active",
439
+ decision_type: d.decision_type,
440
+ choice: d.choice,
441
+ alternatives: d.alternatives,
442
+ rationale: d.rationale ?? "",
443
+ outcome: d.outcome,
444
+ decision_context: d.decision_context,
445
+ graph_id: d.graph_id,
446
+ agent_id: d.agent_id,
447
+ claim: `Agent ${d.agent_id} ${d.decision_type === "tool_choice" ? "used tool" : d.decision_type === "branch" ? "chose branch" : d.decision_type === "retry" ? "retried" : d.decision_type === "delegation" ? "delegated to" : "failed at"} "${d.choice}"`,
448
+ confidence: "medium",
449
+ evidence: [],
450
+ sourceIds: [d.graph_id],
451
+ tags: ["graph-inferred", d.decision_type],
452
+ related: [`execution/${d.graph_id}`],
453
+ body: buildDecisionBody(d)
454
+ }));
455
+ }
456
+ function buildDecisionBody(d) {
457
+ var _a;
458
+ const lines = [`## ${d.decision_type}: ${d.choice}
459
+ `];
460
+ lines.push(`Agent **${d.agent_id}** made a ${d.decision_type} decision.`);
461
+ lines.push(`- **Choice:** ${d.choice}`);
462
+ if ((_a = d.alternatives) == null ? void 0 : _a.length) lines.push(`- **Alternatives:** ${d.alternatives.join(", ")}`);
463
+ if (d.rationale) lines.push(`- **Rationale:** ${d.rationale}`);
464
+ lines.push(`- **Outcome:** ${d.outcome}`);
465
+ if (d.decision_context.error) lines.push(`- **Error:** ${d.decision_context.error}`);
466
+ return lines.join("\n");
467
+ }
468
+ var DEFAULT_MAX_LENGTH = 2e3;
469
+ var DEFAULT_MIN_LENGTH = 50;
470
+ var DEFAULT_DENYLIST = ["process", "sleep", "poll", "clear", "remove"];
471
+ function extractContentFromSession(events, config) {
472
+ if (!config.enabled) return [];
473
+ const maxLen = config.maxContentLength ?? DEFAULT_MAX_LENGTH;
474
+ const minLen = config.minContentLength ?? DEFAULT_MIN_LENGTH;
475
+ const denylist = new Set(config.toolDenylist ?? DEFAULT_DENYLIST);
476
+ const allowlist = config.toolAllowlist ? new Set(config.toolAllowlist) : null;
477
+ const extractToolResults = config.extractToolResults ?? true;
478
+ const extractReasoning = config.extractReasoning ?? true;
479
+ const extractOutputs = config.extractOutputs ?? true;
480
+ const artifacts = [];
481
+ let lastToolName = "";
482
+ for (const evt of events) {
483
+ const evtType = evt.type;
484
+ if (evtType === "message") {
485
+ const msg = evt.message;
486
+ if (!msg) continue;
487
+ const role = msg.role;
488
+ const content = msg.content;
489
+ const blocks = Array.isArray(content) ? content : [];
490
+ if (role === "assistant") {
491
+ for (const block of blocks) {
492
+ const blockType = block.type;
493
+ if (blockType === "thinking" && extractReasoning) {
494
+ const text = String(block.thinking ?? block.text ?? "");
495
+ if (text.length >= minLen) {
496
+ artifacts.push({
497
+ contentType: "agent_reasoning",
498
+ body: text.slice(0, maxLen),
499
+ tags: ["content", "reasoning"]
500
+ });
501
+ }
502
+ }
503
+ if (blockType === "toolCall" || blockType === "tool_use") {
504
+ lastToolName = String(block.name ?? "");
505
+ }
506
+ if (extractOutputs && (blockType === "toolCall" || blockType === "tool_use")) {
507
+ const toolName = String(block.name ?? "");
508
+ const input = block.input ?? block.arguments ?? {};
509
+ const inputStr = JSON.stringify(input);
510
+ if (toolName === "write" || toolName === "writeFile" || inputStr.includes("writeFileSync") || inputStr.includes("clemo-second-brain") || inputStr.includes("/inbox/")) {
511
+ const filePath = input.path ?? input.filePath ?? input.file_path ?? extractPathFromArgs(inputStr);
512
+ if (filePath) {
513
+ artifacts.push({
514
+ contentType: "generated_output",
515
+ sourceTool: toolName,
516
+ body: `Output written to: ${filePath}`,
517
+ filePath,
518
+ tags: ["content", "generated-output", toolName]
519
+ });
520
+ }
521
+ }
522
+ }
523
+ }
524
+ }
525
+ if (role === "toolResult" && extractToolResults) {
526
+ const toolName = lastToolName || "unknown";
527
+ if (allowlist && !allowlist.has(toolName)) continue;
528
+ if (!allowlist && denylist.has(toolName)) continue;
529
+ const text = blocksToText(blocks);
530
+ if (text.length >= minLen) {
531
+ artifacts.push({
532
+ contentType: "tool_result",
533
+ sourceTool: toolName,
534
+ body: text.slice(0, maxLen),
535
+ tags: ["content", "tool-result", toolName]
536
+ });
537
+ }
538
+ }
539
+ }
540
+ if (evtType === "tool_use" || evtType === "toolCall") {
541
+ lastToolName = String(evt.name ?? evt.toolName ?? "");
542
+ }
543
+ if (evtType === "tool_result" || evtType === "toolResult") {
544
+ if (!extractToolResults) continue;
545
+ const toolName = lastToolName || "unknown";
546
+ if (allowlist && !allowlist.has(toolName)) continue;
547
+ if (!allowlist && denylist.has(toolName)) continue;
548
+ const text = String(evt.content ?? evt.output ?? evt.text ?? "");
549
+ if (text.length >= minLen) {
550
+ artifacts.push({
551
+ contentType: "tool_result",
552
+ sourceTool: toolName,
553
+ body: text.slice(0, maxLen),
554
+ tags: ["content", "tool-result", toolName]
555
+ });
556
+ }
557
+ }
558
+ }
559
+ return artifacts;
560
+ }
561
+ function blocksToText(blocks) {
562
+ return blocks.map((b) => String(b.text ?? b.content ?? "")).filter(Boolean).join("\n");
563
+ }
564
+ function extractPathFromArgs(argsStr) {
565
+ const match = argsStr.match(/"(?:path|filePath|file_path|destination)"\s*:\s*"([^"]+)"/);
566
+ if (match == null ? void 0 : match[1]) return match[1];
567
+ const pathMatch = argsStr.match(/\/home\/[^"'\s]+\.\w+/);
568
+ return (pathMatch == null ? void 0 : pathMatch[0]) ?? null;
569
+ }
570
+ var DEFAULT_STATE_FILE = ".soma/harvester-state.json";
571
+ var jsonParser = (content) => {
572
+ const data = JSON.parse(content);
573
+ const events = Array.isArray(data) ? data : [data];
574
+ return { events };
575
+ };
576
+ var jsonlParser = (content) => {
577
+ const events = content.trim().split("\n").filter((line) => line.trim()).map((line) => JSON.parse(line));
578
+ return { events };
579
+ };
580
+ var markdownParser = (content, fileName) => {
581
+ return {
582
+ entities: [{
583
+ type: "note",
584
+ name: basename(fileName, ".md"),
585
+ body: content,
586
+ tags: ["inbox"]
587
+ }]
588
+ };
589
+ };
590
+ var DEFAULT_PARSERS = {
591
+ ".json": jsonParser,
592
+ ".jsonl": jsonlParser,
593
+ ".md": markdownParser
594
+ };
595
+ function createHarvester(vault, config) {
596
+ void (config == null ? void 0 : config.concurrency);
597
+ const stateFile = (config == null ? void 0 : config.stateFile) ?? DEFAULT_STATE_FILE;
598
+ const parsers = { ...DEFAULT_PARSERS, ...config == null ? void 0 : config.parsers };
599
+ const contentConfig = (config == null ? void 0 : config.contentExtraction) ?? { enabled: false };
600
+ let state = { processedEventIds: /* @__PURE__ */ new Set(), lastProcessedTimestamp: 0 };
601
+ try {
602
+ if (existsSync2(stateFile)) {
603
+ const raw = JSON.parse(readFileSync2(stateFile, "utf-8"));
604
+ const currentCount = vaultEntityCount(vault.baseDir);
605
+ if (raw.entityCount == null && raw.vaultFingerprint) {
606
+ console.log("[Harvester] Migrating state from vaultFingerprint to entityCount");
607
+ state = { processedEventIds: /* @__PURE__ */ new Set(), lastProcessedTimestamp: 0, entityCount: currentCount };
608
+ } else if (raw.entityCount != null && currentCount < raw.entityCount) {
609
+ console.log(`[Harvester] Vault entity count decreased (${raw.entityCount} \u2192 ${currentCount}) \u2014 resetting state`);
610
+ state = { processedEventIds: /* @__PURE__ */ new Set(), lastProcessedTimestamp: 0, entityCount: currentCount };
611
+ } else {
612
+ state = {
613
+ processedEventIds: new Set(raw.processedEventIds ?? []),
614
+ lastProcessedTimestamp: raw.lastProcessedTimestamp ?? 0,
615
+ entityCount: currentCount
616
+ };
617
+ }
618
+ }
619
+ } catch {
620
+ }
621
+ function saveState() {
622
+ const dir = join(stateFile, "..");
623
+ if (!existsSync2(dir)) mkdirSync2(dir, { recursive: true });
624
+ const raw = {
625
+ processedEventIds: [...state.processedEventIds].slice(-1e4),
626
+ // Keep last 10K
627
+ lastProcessedTimestamp: state.lastProcessedTimestamp,
628
+ entityCount: state.entityCount ?? vaultEntityCount(vault.baseDir)
629
+ };
630
+ writeFileSync2(stateFile, JSON.stringify(raw, null, 2), "utf-8");
631
+ }
632
+ function eventId(event) {
633
+ return `${event.agentId}-${event.timestamp}`;
634
+ }
635
+ function normalizeId(name) {
636
+ return name.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "");
637
+ }
638
+ function ensureAgentEntity(agentId) {
639
+ const id = normalizeId(agentId);
640
+ const existing = vault.read("agent", id);
641
+ if (!existing) {
642
+ writeToLayer(vault, "harvester", "archive", {
643
+ type: "agent",
644
+ id,
645
+ name: agentId,
646
+ agentId,
647
+ status: "active",
648
+ tags: ["agent-layer"]
649
+ });
650
+ }
651
+ }
652
+ function isDuplicateTrace(traceId) {
653
+ if (state.processedEventIds.has(traceId)) return true;
654
+ const existing = vault.list("execution", { limit: 5e3 });
655
+ return existing.some((e) => e.trace_id === traceId);
656
+ }
657
+ function createExecutionEntity(event, decisions) {
658
+ var _a;
659
+ const agentNormId = normalizeId(event.agentId);
660
+ const execId = `exec-${event.agentId}-${event.timestamp}`;
661
+ const traceId = `${event.agentId}-${event.timestamp}`;
662
+ if (isDuplicateTrace(traceId)) {
663
+ console.log(`[Harvester] Duplicate trace skipped: ${traceId}`);
664
+ return;
665
+ }
666
+ const eventData = event;
667
+ const metadata = eventData.metadata;
668
+ const aicp = metadata == null ? void 0 : metadata.aicp;
669
+ let allDecisions = decisions ? [...decisions] : [];
670
+ if (aicp == null ? void 0 : aicp.consulted) {
671
+ allDecisions.push({
672
+ action: "aicp-preflight",
673
+ outcome: aicp.proceed ? "ok" : "failed",
674
+ reasoning: `AICP: ${aicp.warnings ?? 0} warnings, ${aicp.recommendations ?? 0} recommendations`,
675
+ index: allDecisions.length
676
+ });
677
+ }
678
+ const decisionPattern = allDecisions.length > 0 ? computePatternSignature(allDecisions) : void 0;
679
+ writeToLayer(vault, "harvester", "archive", {
680
+ type: "execution",
681
+ id: normalizeId(execId),
682
+ name: `${event.agentId} execution at ${new Date(event.timestamp).toISOString()}`,
683
+ agentId: event.agentId,
684
+ agent_id: event.agentId,
685
+ trace_id: traceId,
686
+ source_system: "agentflow",
687
+ status: event.status === "completed" ? "completed" : event.status === "failed" ? "failed" : "running",
688
+ duration: event.duration,
689
+ nodeCount: event.nodeCount,
690
+ variant: event.pathSignature,
691
+ conformanceScore: (_a = event.processContext) == null ? void 0 : _a.conformanceScore,
692
+ trigger: "event",
693
+ decisions: allDecisions.length > 0 ? allDecisions : void 0,
694
+ decisionPattern,
695
+ tags: ["agent-layer", event.agentId],
696
+ related: [`agent/${agentNormId}`],
697
+ body: `Execution of ${event.agentId}. Duration: ${event.duration}ms. Nodes: ${event.nodeCount}. Status: ${event.status}.`
698
+ });
699
+ const agent = vault.read("agent", agentNormId);
700
+ if (agent) {
701
+ const execRef = `execution/${normalizeId(execId)}`;
702
+ if (!agent.related.includes(execRef)) {
703
+ const updatedRelated = [...agent.related, execRef].slice(-50);
704
+ vault.update(agentNormId, { related: updatedRelated });
705
+ }
706
+ }
707
+ }
708
+ function updateAgentProfile(event) {
709
+ const id = normalizeId(event.agentId);
710
+ const agent = vault.read("agent", id);
711
+ if (!agent) return;
712
+ const totalExec = (agent.totalExecutions ?? 0) + 1;
713
+ const failCount = agent.failureCount ?? 0;
714
+ const newFails = event.eventType === "execution.failed" ? failCount + 1 : failCount;
715
+ vault.update(id, {
716
+ totalExecutions: totalExec,
717
+ failureCount: newFails,
718
+ failureRate: totalExec > 0 ? newFails / totalExec : 0
719
+ });
720
+ }
721
+ return {
722
+ /**
723
+ * Ingest execution and pattern events from AgentFlow.
724
+ * Returns the number of events ingested (skipping already-processed).
725
+ */
726
+ async ingest(events) {
727
+ let ingested = 0;
728
+ for (const event of events) {
729
+ const eid = eventId(event);
730
+ if (state.processedEventIds.has(eid)) continue;
731
+ ensureAgentEntity(event.agentId);
732
+ if (event.eventType === "execution.completed" || event.eventType === "execution.failed") {
733
+ createExecutionEntity(event);
734
+ updateAgentProfile(event);
735
+ }
736
+ if (event.eventType === "pattern.discovered" || event.eventType === "pattern.updated") {
737
+ }
738
+ state.processedEventIds.add(eid);
739
+ state.lastProcessedTimestamp = Math.max(state.lastProcessedTimestamp, event.timestamp);
740
+ ingested++;
741
+ }
742
+ if (ingested > 0) saveState();
743
+ return ingested;
744
+ },
745
+ /**
746
+ * Process files from an inbox directory.
747
+ * Each file is parsed, entities created, then moved to processed/.
748
+ */
749
+ async processInbox(inboxDir) {
750
+ var _a, _b;
751
+ if (!existsSync2(inboxDir)) return 0;
752
+ const processedDir = join(inboxDir, "..", "processed");
753
+ const errorsDir = join(inboxDir, "..", "errors");
754
+ if (!existsSync2(processedDir)) mkdirSync2(processedDir, { recursive: true });
755
+ if (!existsSync2(errorsDir)) mkdirSync2(errorsDir, { recursive: true });
756
+ const supportedExts = Object.keys(parsers);
757
+ const files = readdirSync(inboxDir).filter((f) => supportedExts.includes(extname(f)));
758
+ let processed = 0;
759
+ for (const file of files) {
760
+ const filePath = join(inboxDir, file);
761
+ try {
762
+ const content = readFileSync2(filePath, "utf-8");
763
+ const ext = extname(file);
764
+ const parser = parsers[ext];
765
+ if (!parser) continue;
766
+ const result = parser(content, file);
767
+ if ((_a = result.events) == null ? void 0 : _a.length) {
768
+ for (const event of result.events) {
769
+ if (isExecutionGraph(event)) {
770
+ await this.ingestGraph(event);
771
+ }
772
+ }
773
+ await this.ingest(result.events.filter((e) => !isExecutionGraph(e)));
774
+ if (contentConfig.enabled) {
775
+ const rawEvents = ext === ".jsonl" ? content.trim().split("\n").filter(Boolean).map((l) => {
776
+ try {
777
+ return JSON.parse(l);
778
+ } catch {
779
+ return null;
780
+ }
781
+ }).filter(Boolean) : result.events;
782
+ const contentArtifacts = extractContentFromSession(rawEvents, contentConfig);
783
+ const firstEvt = result.events[0];
784
+ const execId = (firstEvt == null ? void 0 : firstEvt.agentId) ? `exec-${firstEvt.agentId}-${firstEvt.timestamp}` : null;
785
+ for (let ci = 0; ci < contentArtifacts.length; ci++) {
786
+ const artifact = contentArtifacts[ci];
787
+ const contentId = `content-${file.replace(/\.\w+$/, "")}-${ci}`;
788
+ try {
789
+ const agentIdStr = (firstEvt == null ? void 0 : firstEvt.agentId) ? String(firstEvt.agentId) : void 0;
790
+ writeToLayer(vault, "harvester", "archive", {
791
+ type: "content",
792
+ id: normalizeId(contentId),
793
+ name: `${artifact.contentType}: ${(artifact.sourceTool ?? artifact.contentType).slice(0, 40)}`,
794
+ status: "active",
795
+ content_type: artifact.contentType,
796
+ source_tool: artifact.sourceTool,
797
+ source_execution: execId ? normalizeId(execId) : void 0,
798
+ agent_id: agentIdStr,
799
+ agentId: agentIdStr,
800
+ file_path: artifact.filePath,
801
+ tags: agentIdStr ? [...artifact.tags, agentIdStr] : artifact.tags,
802
+ related: execId ? [`execution/${normalizeId(execId)}`] : [],
803
+ body: artifact.body
804
+ });
805
+ } catch {
806
+ }
807
+ }
808
+ }
809
+ }
810
+ if ((_b = result.entities) == null ? void 0 : _b.length) {
811
+ for (const entity of result.entities) {
812
+ writeToLayer(vault, "harvester", "archive", entity);
813
+ }
814
+ }
815
+ renameSync(filePath, join(processedDir, file));
816
+ processed++;
817
+ } catch (err) {
818
+ try {
819
+ renameSync(filePath, join(errorsDir, file));
820
+ } catch (moveErr) {
821
+ console.warn(`[Harvester] Failed to move ${file} to errors dir:`, moveErr.message);
822
+ }
823
+ console.error(`Harvester error processing ${file}:`, err);
824
+ }
825
+ }
826
+ return processed;
827
+ },
828
+ /**
829
+ * Ingest a full ExecutionGraph, extracting decisions and writing them
830
+ * to L1 with stable trace_id (decision-graphId-nodeId).
831
+ * Returns the number of decision entities created.
832
+ */
833
+ async ingestGraph(graph) {
834
+ const graphNodes = graph.nodes;
835
+ const normalizedDecisions = extractDecisionsFromNodes(graphNodes);
836
+ const pattern = normalizedDecisions.length > 0 ? computePatternSignature(normalizedDecisions) : void 0;
837
+ if (graph.agentId && normalizedDecisions.length > 0) {
838
+ const execTraceId = `exec-${graph.agentId}-${Date.now()}`;
839
+ if (!isDuplicateTrace(execTraceId)) {
840
+ try {
841
+ writeToLayer(vault, "harvester", "archive", {
842
+ type: "execution",
843
+ id: normalizeId(execTraceId),
844
+ name: `${graph.agentId} execution`,
845
+ agentId: graph.agentId,
846
+ agent_id: graph.agentId,
847
+ trace_id: execTraceId,
848
+ source_system: "agentflow-graph",
849
+ status: graph.status ?? "completed",
850
+ decisions: normalizedDecisions,
851
+ decisionPattern: pattern,
852
+ tags: ["agent-layer", graph.agentId],
853
+ related: [],
854
+ body: ""
855
+ });
856
+ state.processedEventIds.add(execTraceId);
857
+ } catch {
858
+ }
859
+ }
860
+ }
861
+ const decisions = extractDecisionsFromGraph(graph);
862
+ if (decisions.length === 0) return normalizedDecisions.length > 0 ? 1 : 0;
863
+ const entities = decisionsToEntities(decisions);
864
+ let created = 0;
865
+ for (const entity of entities) {
866
+ const traceId = `decision-${entity.graph_id}-${entity.name.replace(/[^a-z0-9]+/gi, "-").toLowerCase()}`;
867
+ if (isDuplicateTrace(traceId)) continue;
868
+ try {
869
+ writeToLayer(vault, "harvester", "archive", {
870
+ ...entity,
871
+ trace_id: traceId,
872
+ source_system: "agentflow-graph"
873
+ });
874
+ state.processedEventIds.add(traceId);
875
+ created++;
876
+ } catch {
877
+ }
878
+ }
879
+ if (created > 0) saveState();
880
+ return created;
881
+ },
882
+ /** Get current state for debugging. */
883
+ getState() {
884
+ return {
885
+ processedCount: state.processedEventIds.size,
886
+ lastTimestamp: state.lastProcessedTimestamp
887
+ };
888
+ }
889
+ };
890
+ }
891
+ var DEFAULT_SCORE_THRESHOLD = 0.4;
892
+ var DEFAULT_DEDUP_THRESHOLD = 0.7;
893
+ var SCORING_KEYWORDS = [
894
+ { category: "decision", keywords: ["decided", "chose", "selected", "opted", "picked", "agreed", "concluded"], weight: 0.15 },
895
+ { category: "assumption", keywords: ["assumed", "expected", "believed", "thought", "predicted", "hypothesized"], weight: 0.15 },
896
+ { category: "constraint", keywords: ["must", "cannot", "required", "blocked", "limited", "restricted", "prevented"], weight: 0.15 },
897
+ { category: "contradiction", keywords: ["contradicts", "conflicts", "inconsistent", "disagrees", "but", "however", "although"], weight: 0.15 }
898
+ ];
899
+ function overlapCoefficient(a, b) {
900
+ const setA = new Set(a.toLowerCase().split(/\s+/));
901
+ const setB = new Set(b.toLowerCase().split(/\s+/));
902
+ let intersection = 0;
903
+ for (const word of setA) if (setB.has(word)) intersection++;
904
+ const minSize = Math.min(setA.size, setB.size);
905
+ return minSize > 0 ? intersection / minSize : 0;
906
+ }
907
+ function isDuplicateInVault(vault, type, title, threshold = 0.7) {
908
+ const existing = vault.list(type);
909
+ return existing.some((e) => overlapCoefficient(e.name, title) >= threshold);
910
+ }
911
+ function md5(content) {
912
+ return createHash("md5").update(content).digest("hex");
913
+ }
914
+ function createSynthesizer(vault, analysisFn, config) {
915
+ const scoreThreshold = (config == null ? void 0 : config.scoreThreshold) ?? DEFAULT_SCORE_THRESHOLD;
916
+ const dedupThreshold = (config == null ? void 0 : config.dedupThreshold) ?? DEFAULT_DEDUP_THRESHOLD;
917
+ const stateFile = (config == null ? void 0 : config.stateFile) ?? ".soma/synthesizer-state.json";
918
+ let hashes = /* @__PURE__ */ new Map();
919
+ let lastAnalysisHash = "";
920
+ let savedEntityCount = 0;
921
+ try {
922
+ if (existsSync3(stateFile)) {
923
+ const raw = JSON.parse(readFileSync3(stateFile, "utf-8"));
924
+ const currentCount = vaultEntityCount(vault.baseDir);
925
+ if (raw.entityCount == null && raw.vaultFingerprint) {
926
+ console.log("[Synthesizer] Migrating state from vaultFingerprint to entityCount");
927
+ hashes = /* @__PURE__ */ new Map();
928
+ lastAnalysisHash = "";
929
+ } else if (raw.entityCount != null && currentCount < raw.entityCount) {
930
+ console.log(`[Synthesizer] Vault entity count decreased (${raw.entityCount} \u2192 ${currentCount}) \u2014 resetting state`);
931
+ hashes = /* @__PURE__ */ new Map();
932
+ lastAnalysisHash = "";
933
+ } else {
934
+ hashes = new Map(Object.entries(raw.hashes ?? {}));
935
+ lastAnalysisHash = raw.lastAnalysisHash ?? "";
936
+ }
937
+ savedEntityCount = currentCount;
938
+ }
939
+ } catch {
940
+ }
941
+ function saveState() {
942
+ const dir = dirname2(stateFile);
943
+ if (!existsSync3(dir)) mkdirSync3(dir, { recursive: true });
944
+ writeFileSync3(stateFile, JSON.stringify({
945
+ hashes: Object.fromEntries(hashes),
946
+ lastAnalysisHash,
947
+ entityCount: savedEntityCount ?? vaultEntityCount(vault.baseDir)
948
+ }), "utf-8");
949
+ }
950
+ function scoreCandidate(entity) {
951
+ let score = 0;
952
+ const text = `${entity.name} ${entity.body}`.toLowerCase();
953
+ score += Math.min(text.length / 3e3, 0.3);
954
+ for (const { keywords, weight } of SCORING_KEYWORDS) {
955
+ if (keywords.some((kw) => text.includes(kw))) score += weight;
956
+ }
957
+ if (text.includes("## outcome") || text.includes("## result")) score += 0.1;
958
+ if (text.includes("## context") || text.includes("## background")) score += 0.1;
959
+ return score;
960
+ }
961
+ function deduplicateSpecs(specs) {
962
+ const merged = [];
963
+ for (const spec of specs) {
964
+ let found = false;
965
+ for (const existing of merged) {
966
+ if (existing.type === spec.type && overlapCoefficient(existing.title, spec.title) >= dedupThreshold) {
967
+ existing.sourceCount += spec.sourceCount;
968
+ existing.evidence = [.../* @__PURE__ */ new Set([...existing.evidence, ...spec.evidence])];
969
+ existing.sourceIds = [.../* @__PURE__ */ new Set([...existing.sourceIds, ...spec.sourceIds])];
970
+ if (existing.sourceCount >= 3 && existing.confidence === "low") existing.confidence = "medium";
971
+ if (existing.sourceCount >= 2 && existing.confidence === "medium") existing.confidence = "high";
972
+ found = true;
973
+ break;
974
+ }
975
+ }
976
+ if (!found) merged.push({ ...spec });
977
+ }
978
+ const existingLearnings = [
979
+ ...vault.list("assumption"),
980
+ ...vault.list("decision"),
981
+ ...vault.list("constraint"),
982
+ ...vault.list("contradiction"),
983
+ ...vault.list("synthesis")
984
+ ];
985
+ return merged.filter((spec) => {
986
+ return !existingLearnings.some(
987
+ (existing) => overlapCoefficient(existing.name, spec.title) >= 0.8
988
+ );
989
+ });
990
+ }
991
+ return {
992
+ /**
993
+ * Run the full synthesis pipeline.
994
+ * Returns the number of learning records created.
995
+ */
996
+ async synthesize() {
997
+ const allEntities = [
998
+ ...vault.list("execution"),
999
+ ...vault.list("insight"),
1000
+ ...vault.list("agent"),
1001
+ ...vault.list("decision")
1002
+ ].filter((e) => !e.tags.includes("synthesized") && !e.decayed_from);
1003
+ const candidates = allEntities.filter((e) => {
1004
+ const hash = md5(e.body);
1005
+ if (hashes.get(e.id) === hash) return false;
1006
+ hashes.set(e.id, hash);
1007
+ return true;
1008
+ }).filter((e) => scoreCandidate(e) >= scoreThreshold);
1009
+ if (candidates.length === 0) {
1010
+ saveState();
1011
+ return 0;
1012
+ }
1013
+ const allSpecs = [];
1014
+ for (const candidate of candidates) {
1015
+ try {
1016
+ const prompt = buildExtractionPrompt(candidate);
1017
+ const response = await analysisFn(prompt);
1018
+ const specs = parseExtractionResponse(response, candidate);
1019
+ allSpecs.push(...specs);
1020
+ } catch {
1021
+ }
1022
+ }
1023
+ if (allSpecs.length === 0) {
1024
+ saveState();
1025
+ return 0;
1026
+ }
1027
+ const deduplicated = deduplicateSpecs(allSpecs);
1028
+ let created = 0;
1029
+ for (const spec of deduplicated) {
1030
+ try {
1031
+ const entityType = spec.type;
1032
+ writeToLayer(vault, "synthesizer", "emerging", {
1033
+ type: entityType,
1034
+ name: spec.title,
1035
+ status: "active",
1036
+ claim: spec.claim,
1037
+ confidence: spec.confidence,
1038
+ confidence_score: spec.confidence === "high" ? 0.9 : spec.confidence === "medium" ? 0.6 : 0.3,
1039
+ evidence: spec.evidence,
1040
+ evidence_links: spec.sourceIds,
1041
+ sourceIds: spec.sourceIds,
1042
+ decay_at: new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString(),
1043
+ tags: ["synthesized", spec.type],
1044
+ related: spec.sourceIds.map((id) => `execution/${id}`),
1045
+ body: `## ${spec.title}
1046
+
1047
+ ${spec.claim}
1048
+
1049
+ ### Evidence
1050
+ ${spec.evidence.map((e) => `- ${e}`).join("\n")}`
1051
+ });
1052
+ created++;
1053
+ } catch {
1054
+ }
1055
+ }
1056
+ for (const spec of deduplicated) {
1057
+ if (spec.confidence === "high" && (spec.type === "constraint" || spec.type === "decision")) {
1058
+ try {
1059
+ const policyPrompt = `Based on this ${spec.type}: "${spec.claim}", suggest a guard policy. Return JSON: { "scope": "...", "conditions": "...", "enforcement": "warn|error|abort", "thresholds": {} }`;
1060
+ const policyResponse = await analysisFn(policyPrompt);
1061
+ const policyData = JSON.parse(policyResponse);
1062
+ if (!policyData.scope) console.warn(`[Synthesizer] Policy for '${spec.title}' missing scope`);
1063
+ writeToLayer(vault, "synthesizer", "emerging", {
1064
+ type: "policy",
1065
+ name: `Policy: ${spec.title}`,
1066
+ status: "draft",
1067
+ scope: policyData.scope ?? "unattributed",
1068
+ conditions: policyData.conditions ?? spec.claim,
1069
+ enforcement: policyData.enforcement ?? "warn",
1070
+ thresholds: policyData.thresholds,
1071
+ confidence_score: 0.7,
1072
+ evidence_links: spec.sourceIds,
1073
+ decay_at: new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString(),
1074
+ tags: ["synthesized", "auto-policy"],
1075
+ related: [`${spec.type}/${spec.title}`],
1076
+ body: `Auto-generated policy from ${spec.type}: ${spec.claim}`
1077
+ });
1078
+ } catch {
1079
+ }
1080
+ }
1081
+ }
1082
+ saveState();
1083
+ return created;
1084
+ },
1085
+ /** Score a single entity (for testing/debugging). */
1086
+ scoreCandidate,
1087
+ /**
1088
+ * Analyze agent-level statistics via LLM.
1089
+ * Unlike synthesize() which scores individual entity bodies,
1090
+ * this builds a rich prompt from aggregate agent data and
1091
+ * asks the LLM to extract insights, patterns, and policy suggestions.
1092
+ * Returns the number of learnings created.
1093
+ */
1094
+ async analyzeAgents() {
1095
+ const agents = vault.list("agent");
1096
+ if (agents.length === 0) return 0;
1097
+ const agentSummaries = [];
1098
+ for (const agent of agents) {
1099
+ const data = agent;
1100
+ const total = data.totalExecutions ?? 0;
1101
+ if (total === 0) continue;
1102
+ const failRate = data.failureRate ?? 0;
1103
+ const failCount = Math.round(total * failRate);
1104
+ agentSummaries.push(`- ${agent.name}: ${total} runs, ${failCount} failures (${(failRate * 100).toFixed(1)}%)`);
1105
+ }
1106
+ const statsString = agentSummaries.join("\n");
1107
+ const currentHash = md5(statsString);
1108
+ if (currentHash === lastAnalysisHash) {
1109
+ console.log("[Soma Synthesizer] Agent stats unchanged \u2014 skipping analysis");
1110
+ return 0;
1111
+ }
1112
+ const existingTypes = ["insight", "decision", "assumption", "constraint", "contradiction", "synthesis"];
1113
+ const existingTitles = [];
1114
+ for (const type of existingTypes) {
1115
+ for (const e of vault.list(type)) {
1116
+ if (e.tags.includes("synthesized")) existingTitles.push(`[${e.type}] ${e.name}`);
1117
+ }
1118
+ }
1119
+ for (const p of vault.list("policy")) {
1120
+ existingTitles.push(`[policy] ${p.name}`);
1121
+ }
1122
+ const existingKnowledgeSection = existingTitles.length > 0 ? `
1123
+ The following insights and policies ALREADY EXIST in the knowledge vault.
1124
+ Do NOT repeat or rephrase these. Only return genuinely NEW findings:
1125
+ ${existingTitles.map((t) => `- ${t}`).join("\n")}
1126
+ ` : "";
1127
+ const prompt = `You are analyzing AI agent execution statistics from an organizational knowledge vault.
1128
+
1129
+ Here are all agents and their performance:
1130
+
1131
+ ${statsString}
1132
+ ${existingKnowledgeSection}
1133
+ Based on this data, extract NEW insights, decisions, constraints, and contradictions that are NOT already covered above.
1134
+
1135
+ For each finding, return a JSON array:
1136
+ [
1137
+ {
1138
+ "type": "insight|decision|constraint|contradiction",
1139
+ "title": "Short descriptive title",
1140
+ "claim": "What was found or should be done",
1141
+ "confidence": "low|medium|high",
1142
+ "evidence": ["supporting data point 1", "supporting data point 2"],
1143
+ "agentIds": ["agent-name-1"]
1144
+ }
1145
+ ]
1146
+
1147
+ Focus on:
1148
+ - Agents with high failure rates \u2014 what policies should be enforced?
1149
+ - Patterns across agents \u2014 do similar agents fail similarly?
1150
+ - Reliability trends \u2014 which agents are healthy vs. problematic?
1151
+ - Suggested guard thresholds based on the data
1152
+
1153
+ Return [] if all meaningful insights are already covered above.`;
1154
+ try {
1155
+ const response = await analysisFn(prompt);
1156
+ if (!response) {
1157
+ console.warn("[Soma Synthesizer] LLM returned empty response for agent analysis");
1158
+ return 0;
1159
+ }
1160
+ const match = response.match(/\[[\s\S]*\]/);
1161
+ if (!match) {
1162
+ console.warn("[Soma Synthesizer] No JSON array found in LLM response (" + response.length + " chars)");
1163
+ return 0;
1164
+ }
1165
+ let parsed;
1166
+ try {
1167
+ parsed = JSON.parse(match[0]);
1168
+ } catch {
1169
+ let fixable = match[0];
1170
+ const lastBrace = fixable.lastIndexOf("}");
1171
+ if (lastBrace > 0) {
1172
+ fixable = fixable.slice(0, lastBrace + 1) + "]";
1173
+ try {
1174
+ parsed = JSON.parse(fixable);
1175
+ } catch {
1176
+ console.warn("[Soma Synthesizer] Could not parse LLM JSON response (even after repair)");
1177
+ return 0;
1178
+ }
1179
+ } else {
1180
+ console.warn("[Soma Synthesizer] Could not parse LLM JSON response");
1181
+ return 0;
1182
+ }
1183
+ }
1184
+ if (!Array.isArray(parsed)) return 0;
1185
+ console.log(`[Soma Synthesizer] Extracted ${parsed.length} insights from LLM`);
1186
+ let created = 0;
1187
+ let superseded = 0;
1188
+ let skippedDupes = 0;
1189
+ const confidenceRank = { high: 3, medium: 2, low: 1 };
1190
+ for (const rawItem of parsed) {
1191
+ const item = rawItem;
1192
+ const entityType = String(item.type ?? "insight");
1193
+ const title = String(item.title ?? "Untitled");
1194
+ const claim = String(item.claim ?? "");
1195
+ const confidence = String(item.confidence ?? "medium");
1196
+ const evidence = Array.isArray(item.evidence) ? item.evidence.map(String) : [];
1197
+ const agentIds = Array.isArray(item.agentIds) ? item.agentIds.map(String) : [];
1198
+ const existingMatch = vault.list(entityType).find(
1199
+ (e) => overlapCoefficient(e.name, title) >= dedupThreshold
1200
+ );
1201
+ if (existingMatch) {
1202
+ const existingConf = existingMatch.confidence ?? "low";
1203
+ const existingEvidence = existingMatch.evidence;
1204
+ const existingEvidenceArr = Array.isArray(existingEvidence) ? existingEvidence.map(String) : [];
1205
+ const newEvidenceItems = evidence.filter((ev) => !existingEvidenceArr.includes(ev));
1206
+ if ((confidenceRank[confidence] ?? 0) > (confidenceRank[existingConf] ?? 0) || newEvidenceItems.length > 0) {
1207
+ const mergedEvidence = [.../* @__PURE__ */ new Set([...existingEvidenceArr, ...evidence])];
1208
+ const bestConfidence = (confidenceRank[confidence] ?? 0) >= (confidenceRank[existingConf] ?? 0) ? confidence : existingConf;
1209
+ vault.update(existingMatch.id, {
1210
+ claim,
1211
+ confidence: bestConfidence,
1212
+ evidence: mergedEvidence,
1213
+ body: `## ${existingMatch.name}
1214
+
1215
+ ${claim}
1216
+
1217
+ ### Evidence
1218
+ ${mergedEvidence.map((e) => `- ${e}`).join("\n")}`
1219
+ });
1220
+ superseded++;
1221
+ } else {
1222
+ skippedDupes++;
1223
+ }
1224
+ continue;
1225
+ }
1226
+ try {
1227
+ writeToLayer(vault, "synthesizer", "emerging", {
1228
+ type: entityType,
1229
+ name: title,
1230
+ status: "active",
1231
+ claim,
1232
+ confidence,
1233
+ confidence_score: confidence === "high" ? 0.9 : confidence === "medium" ? 0.6 : 0.3,
1234
+ evidence,
1235
+ evidence_links: agentIds,
1236
+ sourceIds: agentIds,
1237
+ decay_at: new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString(),
1238
+ tags: ["synthesized", entityType, "agent-analysis"],
1239
+ related: agentIds.map((id) => `agent/${id}`),
1240
+ body: `## ${title}
1241
+
1242
+ ${claim}
1243
+
1244
+ ### Evidence
1245
+ ${evidence.map((e) => `- ${e}`).join("\n")}`
1246
+ });
1247
+ created++;
1248
+ } catch {
1249
+ }
1250
+ }
1251
+ if (skippedDupes > 0 || superseded > 0) {
1252
+ console.log(`[Soma Synthesizer] ${skippedDupes} skipped, ${superseded} superseded, ${created} new`);
1253
+ }
1254
+ lastAnalysisHash = currentHash;
1255
+ for (const rawItem2 of parsed) {
1256
+ const item2 = rawItem2;
1257
+ if (item2.confidence === "high" && (item2.type === "constraint" || item2.type === "decision")) {
1258
+ const policyTitle = `Policy: ${item2.title}`;
1259
+ if (isDuplicateInVault(vault, "policy", policyTitle, dedupThreshold)) {
1260
+ continue;
1261
+ }
1262
+ try {
1263
+ const policyPrompt = `Based on this ${item2.type}: "${item2.claim}", suggest a guard policy for an AI agent system. Return JSON: { "scope": "...", "conditions": "...", "enforcement": "warn|error|abort", "thresholds": {} }`;
1264
+ const policyResponse = await analysisFn(policyPrompt);
1265
+ const policyMatch = policyResponse.match(/\{[\s\S]*\}/);
1266
+ if (policyMatch) {
1267
+ const policyData = JSON.parse(policyMatch[0]);
1268
+ if (!policyData.scope) console.warn(`[Synthesizer] Policy '${policyTitle}' missing scope`);
1269
+ writeToLayer(vault, "synthesizer", "emerging", {
1270
+ type: "policy",
1271
+ name: policyTitle,
1272
+ status: "draft",
1273
+ scope: policyData.scope ?? "unattributed",
1274
+ conditions: policyData.conditions ?? item2.claim,
1275
+ enforcement: policyData.enforcement ?? "warn",
1276
+ thresholds: policyData.thresholds,
1277
+ confidence_score: 0.7,
1278
+ evidence_links: item2.agentIds ?? [],
1279
+ decay_at: new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString(),
1280
+ tags: ["synthesized", "auto-policy", "agent-analysis"],
1281
+ related: (item2.agentIds ?? []).map((id) => `agent/${id}`),
1282
+ body: `Auto-generated policy from ${item2.type}: ${item2.claim}`
1283
+ });
1284
+ }
1285
+ } catch {
1286
+ }
1287
+ }
1288
+ }
1289
+ saveState();
1290
+ return created;
1291
+ } catch (err) {
1292
+ console.error("[Soma Synthesizer] analyzeAgents error:", err instanceof Error ? err.message : String(err));
1293
+ return 0;
1294
+ }
1295
+ },
1296
+ /**
1297
+ * Synthesize L1 entries into L3 proposals.
1298
+ * Detects recurring patterns across multiple agent traces and
1299
+ * generates L3 (Emerging Knowledge) proposals with confidence scores
1300
+ * and evidence links to source L1 entries.
1301
+ *
1302
+ * Enforces write restriction: only L3 writes allowed.
1303
+ */
1304
+ async synthesizeL3() {
1305
+ enforceWritePermission("synthesizer", "emerging");
1306
+ const l1Entries = queryByLayer(vault, "archive");
1307
+ if (l1Entries.length < 3) return 0;
1308
+ const byAgent = /* @__PURE__ */ new Map();
1309
+ for (const entry of l1Entries) {
1310
+ if (!entry.agent_id) {
1311
+ console.warn(`[Synthesizer] Skipping entry ${entry.id} from agent grouping: missing agent_id`);
1312
+ continue;
1313
+ }
1314
+ if (!byAgent.has(entry.agent_id)) byAgent.set(entry.agent_id, []);
1315
+ byAgent.get(entry.agent_id).push(entry);
1316
+ }
1317
+ const patternCandidates = [];
1318
+ const l1Array = l1Entries.filter((e) => !e.superseded_by);
1319
+ for (let i = 0; i < l1Array.length; i++) {
1320
+ const entry = l1Array[i];
1321
+ const matches = [entry.id];
1322
+ const agents = new Set(entry.agent_id ? [entry.agent_id] : []);
1323
+ for (let j = i + 1; j < l1Array.length; j++) {
1324
+ const other = l1Array[j];
1325
+ const similarity = overlapCoefficient(entry.body, other.body);
1326
+ if (similarity >= 0.5) {
1327
+ matches.push(other.id);
1328
+ if (other.agent_id) agents.add(other.agent_id);
1329
+ }
1330
+ }
1331
+ if (matches.length >= 3) {
1332
+ patternCandidates.push({
1333
+ content: entry.body,
1334
+ evidenceIds: matches,
1335
+ agentCount: agents.size,
1336
+ sourceAgents: [...agents]
1337
+ });
1338
+ }
1339
+ }
1340
+ if (patternCandidates.length === 0) {
1341
+ saveState();
1342
+ return 0;
1343
+ }
1344
+ let created = 0;
1345
+ const decayAt = new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString();
1346
+ for (const candidate of patternCandidates.slice(0, 20)) {
1347
+ try {
1348
+ const prompt = `You are analyzing recurring patterns found across ${candidate.agentCount} agents and ${candidate.evidenceIds.length} traces.
1349
+
1350
+ Pattern content sample:
1351
+ ${candidate.content.slice(0, 2e3)}
1352
+
1353
+ Synthesize this into a concise organizational knowledge proposal. Return JSON:
1354
+ { "title": "Short title", "claim": "What was discovered", "confidence": 0.0-1.0 }
1355
+
1356
+ Set confidence based on: number of supporting traces (${candidate.evidenceIds.length}), cross-agent corroboration (${candidate.agentCount} agents).`;
1357
+ const response = await analysisFn(prompt);
1358
+ const match = response.match(/\{[\s\S]*\}/);
1359
+ if (!match) continue;
1360
+ const data = JSON.parse(match[0]);
1361
+ const confidenceScore = Math.max(0, Math.min(1, Number(data.confidence) || 0.5));
1362
+ const existingL3 = queryByLayer(vault, "emerging");
1363
+ const isDuplicate = existingL3.some(
1364
+ (e) => overlapCoefficient(e.name, String(data.title)) >= dedupThreshold
1365
+ );
1366
+ if (isDuplicate) continue;
1367
+ writeToLayer(vault, "synthesizer", "emerging", {
1368
+ type: "insight",
1369
+ name: String(data.title),
1370
+ status: "pending",
1371
+ confidence_score: confidenceScore,
1372
+ evidence_links: candidate.evidenceIds,
1373
+ source_agents: candidate.sourceAgents,
1374
+ decay_at: decayAt,
1375
+ tags: ["synthesized", "l3-proposal"],
1376
+ related: candidate.evidenceIds.map((id) => `execution/${id}`),
1377
+ body: `## ${data.title}
1378
+
1379
+ ${data.claim}
1380
+
1381
+ ### Evidence
1382
+ Based on ${candidate.evidenceIds.length} traces across ${candidate.agentCount} agents.`
1383
+ });
1384
+ created++;
1385
+ } catch {
1386
+ }
1387
+ }
1388
+ saveState();
1389
+ return created;
1390
+ },
1391
+ /**
1392
+ * Synthesize decision entities into L3 proposals via pattern clustering.
1393
+ * Groups decisions by decision_type and agent, detects recurring patterns,
1394
+ * and generates L3 proposals with confidence scoring.
1395
+ *
1396
+ * Returns the number of L3 proposals created.
1397
+ */
1398
+ async synthesizeDecisions() {
1399
+ enforceWritePermission("synthesizer", "emerging");
1400
+ const decisions = vault.list("decision");
1401
+ if (decisions.length < 2) return 0;
1402
+ const byType = /* @__PURE__ */ new Map();
1403
+ for (const d of decisions) {
1404
+ const dtype = d.decision_type ?? "untyped:decision";
1405
+ if (!d.decision_type) console.warn(`[Synthesizer] Decision ${d.id} missing decision_type, using 'untyped:decision'`);
1406
+ if (!byType.has(dtype)) byType.set(dtype, []);
1407
+ byType.get(dtype).push(d);
1408
+ }
1409
+ let created = 0;
1410
+ const decayAt = new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString();
1411
+ for (const [decisionType, group] of byType) {
1412
+ if (group.length < 2) continue;
1413
+ const clusters = [];
1414
+ const used = /* @__PURE__ */ new Set();
1415
+ for (let i = 0; i < group.length; i++) {
1416
+ const entry = group[i];
1417
+ if (used.has(entry.id)) continue;
1418
+ const cluster = [entry];
1419
+ used.add(entry.id);
1420
+ for (let j = i + 1; j < group.length; j++) {
1421
+ const other = group[j];
1422
+ if (used.has(other.id)) continue;
1423
+ if (overlapCoefficient(entry.body, other.body) >= 0.4) {
1424
+ cluster.push(other);
1425
+ used.add(other.id);
1426
+ }
1427
+ }
1428
+ if (cluster.length >= 2) {
1429
+ clusters.push(cluster);
1430
+ }
1431
+ }
1432
+ for (const cluster of clusters.slice(0, 10)) {
1433
+ const agents = [...new Set(cluster.map((e) => e.agent_id).filter(Boolean))];
1434
+ const evidenceIds = cluster.map((e) => e.id);
1435
+ const confidenceScore = Math.min(0.9, 0.3 + cluster.length * 0.1 + agents.length * 0.1);
1436
+ const title = `${decisionType} pattern: ${cluster[0].name.slice(0, 60)}`;
1437
+ const existingL3 = queryByLayer(vault, "emerging");
1438
+ const isDuplicate = existingL3.some(
1439
+ (e) => overlapCoefficient(e.name, title) >= dedupThreshold
1440
+ );
1441
+ if (isDuplicate) continue;
1442
+ try {
1443
+ writeToLayer(vault, "synthesizer", "emerging", {
1444
+ type: "insight",
1445
+ name: title,
1446
+ status: "pending",
1447
+ confidence_score: confidenceScore,
1448
+ evidence_links: evidenceIds,
1449
+ source_agents: agents.map(String),
1450
+ decay_at: decayAt,
1451
+ decision_type: decisionType,
1452
+ tags: ["synthesized", "l3-proposal", "decision-pattern"],
1453
+ related: evidenceIds.map((id) => `decision/${id}`),
1454
+ body: `## Decision Pattern: ${decisionType}
1455
+
1456
+ Recurring ${decisionType} pattern detected across ${agents.length} agent(s) and ${cluster.length} decisions.
1457
+
1458
+ ### Evidence
1459
+ ${evidenceIds.map((id) => `- [[decision/${id}]]`).join("\n")}`
1460
+ });
1461
+ created++;
1462
+ } catch {
1463
+ }
1464
+ }
1465
+ }
1466
+ saveState();
1467
+ return created;
1468
+ },
1469
+ /**
1470
+ * Detect decision divergence between agents with similar patterns.
1471
+ * Compares decision chains across agents to find where failing agents
1472
+ * make different choices than successful ones.
1473
+ */
1474
+ synthesizeDecisionDivergence() {
1475
+ const executions = vault.list("execution");
1476
+ const agents = vault.list("agent");
1477
+ const agentDecisions = /* @__PURE__ */ new Map();
1478
+ for (const exec of executions) {
1479
+ const data = exec;
1480
+ const agentId = resolveAgentId(data);
1481
+ const decisions = data.decisions;
1482
+ const pattern = data.decisionPattern;
1483
+ if (!agentId || !decisions || decisions.length === 0) continue;
1484
+ if (!agentDecisions.has(agentId)) {
1485
+ const agent = agents.find((a) => a.name === agentId || a.agentId === agentId);
1486
+ const failureRate = (agent == null ? void 0 : agent.failureRate) ?? 0;
1487
+ agentDecisions.set(agentId, { patterns: [], successRate: 1 - failureRate, decisions: [] });
1488
+ }
1489
+ const entry = agentDecisions.get(agentId);
1490
+ if (pattern) entry.patterns.push(pattern);
1491
+ entry.decisions.push(decisions);
1492
+ }
1493
+ const agentsWithData = [...agentDecisions.entries()].filter(([, v]) => v.decisions.length >= 3);
1494
+ if (agentsWithData.length < 2) return 0;
1495
+ let created = 0;
1496
+ for (let i = 0; i < agentsWithData.length; i++) {
1497
+ for (let j = i + 1; j < agentsWithData.length; j++) {
1498
+ const [agentA, dataA] = agentsWithData[i];
1499
+ const [agentB, dataB] = agentsWithData[j];
1500
+ const gap = Math.abs(dataA.successRate - dataB.successRate);
1501
+ if (gap < 0.2) continue;
1502
+ const [winner, winnerData] = dataA.successRate > dataB.successRate ? [agentA, dataA] : [agentB, dataB];
1503
+ const [loser, loserData] = dataA.successRate > dataB.successRate ? [agentB, dataB] : [agentA, dataA];
1504
+ const winnerActions = /* @__PURE__ */ new Map();
1505
+ const loserActions = /* @__PURE__ */ new Map();
1506
+ for (const chain of winnerData.decisions) {
1507
+ for (const d of chain) {
1508
+ const cur = winnerActions.get(d.action) ?? { total: 0, ok: 0 };
1509
+ cur.total++;
1510
+ if (d.outcome === "ok") cur.ok++;
1511
+ winnerActions.set(d.action, cur);
1512
+ }
1513
+ }
1514
+ for (const chain of loserData.decisions) {
1515
+ for (const d of chain) {
1516
+ const cur = loserActions.get(d.action) ?? { total: 0, ok: 0 };
1517
+ cur.total++;
1518
+ if (d.outcome === "ok") cur.ok++;
1519
+ loserActions.set(d.action, cur);
1520
+ }
1521
+ }
1522
+ const winnerActionSet = new Set(winnerActions.keys());
1523
+ const loserActionSet = new Set(loserActions.keys());
1524
+ let intersection = 0;
1525
+ for (const a of winnerActionSet) {
1526
+ if (loserActionSet.has(a)) intersection++;
1527
+ }
1528
+ const actionOverlap = intersection / Math.min(winnerActionSet.size, loserActionSet.size);
1529
+ if (actionOverlap < 0.3) continue;
1530
+ const winnerOnly = [...winnerActions.keys()].filter((a) => !loserActions.has(a));
1531
+ const loserOnly = [...loserActions.keys()].filter((a) => !winnerActions.has(a));
1532
+ if (winnerOnly.length === 0 && loserOnly.length === 0) continue;
1533
+ const insightName = `Decision divergence: ${loser} vs ${winner}`;
1534
+ const existingL3 = queryByLayer(vault, "emerging");
1535
+ if (existingL3.some((e) => overlapCoefficient(e.name, insightName) >= dedupThreshold)) continue;
1536
+ const fmtRate = (stats) => `${(stats.ok / Math.max(1, stats.total) * 100).toFixed(0)}%`;
1537
+ const claim = `${winner} (${(winnerData.successRate * 100).toFixed(0)}% overall) and ${loser} (${(loserData.successRate * 100).toFixed(0)}% overall) show different decision patterns. ` + (winnerOnly.length > 0 ? `${winner} uses: ${winnerOnly.slice(0, 3).map((a) => `${a} (${fmtRate(winnerActions.get(a))} success)`).join(", ")}. ` : "") + (loserOnly.length > 0 ? `${loser} uses instead: ${loserOnly.slice(0, 3).map((a) => `${a} (${fmtRate(loserActions.get(a))} success)`).join(", ")}.` : "");
1538
+ const hasAicpPreflight = [...winnerData.decisions, ...loserData.decisions].some((chain) => chain.some((d) => d.action === "aicp-preflight"));
1539
+ const via = hasAicpPreflight ? "aicp-preflight" : void 0;
1540
+ const tags = ["synthesized", "divergence", "actionable"];
1541
+ if (via) tags.push("aicp-mediated");
1542
+ try {
1543
+ writeToLayer(vault, "synthesizer", "emerging", {
1544
+ type: "insight",
1545
+ name: insightName,
1546
+ status: "active",
1547
+ tags,
1548
+ claim,
1549
+ via,
1550
+ confidence_score: Math.min(0.9, 0.5 + gap),
1551
+ evidence_links: [winner, loser],
1552
+ source_agents: [winner, loser],
1553
+ body: `## Decision Divergence
1554
+
1555
+ ${claim}
1556
+
1557
+ ### ${winner} actions
1558
+ ${[...winnerActions.entries()].map(([a, s]) => `- ${a} (${s.total}x, ${fmtRate(s)} success)`).join("\n")}
1559
+
1560
+ ### ${loser} actions
1561
+ ${[...loserActions.entries()].map(([a, s]) => `- ${a} (${s.total}x, ${fmtRate(s)} success)`).join("\n")}`
1562
+ });
1563
+ created++;
1564
+ } catch {
1565
+ }
1566
+ }
1567
+ }
1568
+ return created;
1569
+ }
1570
+ };
1571
+ }
1572
+ function buildExtractionPrompt(entity) {
1573
+ return `You are analyzing a knowledge record to extract learnings.
1574
+
1575
+ Record type: ${entity.type}
1576
+ Record name: ${entity.name}
1577
+ Content (first 4000 chars):
1578
+ ${entity.body.slice(0, 4e3)}
1579
+
1580
+ Extract any assumptions, decisions, constraints, or contradictions from this record.
1581
+ Return a JSON array of objects with: { "type": "assumption|decision|constraint|contradiction", "title": "...", "claim": "...", "confidence": "low|medium|high", "evidence": ["..."] }
1582
+
1583
+ Return [] if no learnings found.`;
1584
+ }
1585
+ function parseExtractionResponse(response, source) {
1586
+ try {
1587
+ const match = response.match(/\[[\s\S]*\]/);
1588
+ if (!match) return [];
1589
+ const parsed = JSON.parse(match[0]);
1590
+ if (!Array.isArray(parsed)) return [];
1591
+ return parsed.map((item) => ({
1592
+ type: String(item.type ?? "assumption"),
1593
+ title: String(item.title ?? "Untitled"),
1594
+ claim: String(item.claim ?? ""),
1595
+ confidence: item.confidence ?? "low",
1596
+ evidence: Array.isArray(item.evidence) ? item.evidence.map(String) : [],
1597
+ sourceIds: [source.id],
1598
+ sourceCount: 1
1599
+ }));
1600
+ } catch {
1601
+ return [];
1602
+ }
1603
+ }
1604
+ var DEFAULT_MIN_CLUSTER_SIZE = 3;
1605
+ var DEFAULT_SIMILARITY_THRESHOLD = 0.5;
1606
+ function createCartographer(vault, vectorStore, embedFn, config) {
1607
+ const minClusterSize = (config == null ? void 0 : config.minClusterSize) ?? DEFAULT_MIN_CLUSTER_SIZE;
1608
+ const similarityThreshold = (config == null ? void 0 : config.similarityThreshold) ?? DEFAULT_SIMILARITY_THRESHOLD;
1609
+ const stateFile = (config == null ? void 0 : config.stateFile) ?? ".soma/cartographer-state.json";
1610
+ let state = { embeddedIds: /* @__PURE__ */ new Set(), entityHashes: /* @__PURE__ */ new Map(), clusterAssignments: {} };
1611
+ try {
1612
+ if (existsSync4(stateFile)) {
1613
+ const raw = JSON.parse(readFileSync4(stateFile, "utf-8"));
1614
+ const currentCount = vaultEntityCount(vault.baseDir);
1615
+ if (raw.entityCount == null && raw.vaultFingerprint) {
1616
+ console.log("[Cartographer] Migrating state from vaultFingerprint to entityCount");
1617
+ state = { embeddedIds: /* @__PURE__ */ new Set(), entityHashes: /* @__PURE__ */ new Map(), clusterAssignments: {}, entityCount: currentCount };
1618
+ } else if (raw.entityCount != null && currentCount < raw.entityCount) {
1619
+ console.log(`[Cartographer] Vault entity count decreased (${raw.entityCount} \u2192 ${currentCount}) \u2014 resetting state`);
1620
+ state = { embeddedIds: /* @__PURE__ */ new Set(), entityHashes: /* @__PURE__ */ new Map(), clusterAssignments: {}, entityCount: currentCount };
1621
+ } else {
1622
+ state = {
1623
+ embeddedIds: new Set(raw.embeddedIds ?? []),
1624
+ entityHashes: new Map(Object.entries(raw.entityHashes ?? {})),
1625
+ clusterAssignments: raw.clusterAssignments ?? {},
1626
+ entityCount: currentCount
1627
+ };
1628
+ }
1629
+ }
1630
+ } catch (err) {
1631
+ console.warn("[Cartographer] Failed to load state, starting fresh:", err.message);
1632
+ }
1633
+ function saveState() {
1634
+ const dir = dirname3(stateFile);
1635
+ if (!existsSync4(dir)) mkdirSync4(dir, { recursive: true });
1636
+ writeFileSync4(stateFile, JSON.stringify({
1637
+ embeddedIds: [...state.embeddedIds],
1638
+ entityHashes: Object.fromEntries(state.entityHashes),
1639
+ clusterAssignments: state.clusterAssignments,
1640
+ entityCount: state.entityCount ?? vaultEntityCount(vault.baseDir)
1641
+ }), "utf-8");
1642
+ }
1643
+ function contentHash(text) {
1644
+ return createHash2("md5").update(text).digest("hex");
1645
+ }
1646
+ function entityToText(entity) {
1647
+ return `${entity.type}: ${entity.name}
1648
+ ${entity.tags.join(", ")}
1649
+ ${entity.body}`.slice(0, 2e3);
1650
+ }
1651
+ function clusterByLinks(entities) {
1652
+ const adj = /* @__PURE__ */ new Map();
1653
+ for (const e of entities) {
1654
+ if (!adj.has(e.id)) adj.set(e.id, /* @__PURE__ */ new Set());
1655
+ for (const link of e.related) {
1656
+ const linkedId = link.split("/").pop() ?? "";
1657
+ if (!adj.has(linkedId)) adj.set(linkedId, /* @__PURE__ */ new Set());
1658
+ adj.get(e.id).add(linkedId);
1659
+ adj.get(linkedId).add(e.id);
1660
+ }
1661
+ }
1662
+ const visited = /* @__PURE__ */ new Set();
1663
+ const clusters = /* @__PURE__ */ new Map();
1664
+ let clusterId = 0;
1665
+ for (const [nodeId] of adj) {
1666
+ if (visited.has(nodeId)) continue;
1667
+ const community = [];
1668
+ const queue = [nodeId];
1669
+ while (queue.length > 0) {
1670
+ const current = queue.shift();
1671
+ if (visited.has(current)) continue;
1672
+ visited.add(current);
1673
+ community.push(current);
1674
+ for (const neighbor of adj.get(current) ?? []) {
1675
+ if (!visited.has(neighbor)) queue.push(neighbor);
1676
+ }
1677
+ }
1678
+ if (community.length >= minClusterSize) {
1679
+ clusters.set(clusterId++, community);
1680
+ }
1681
+ }
1682
+ return clusters;
1683
+ }
1684
+ return {
1685
+ /**
1686
+ * Embed all new/changed entities into the vector store.
1687
+ * Returns the number of entities embedded.
1688
+ */
1689
+ async embed() {
1690
+ if (!embedFn) return 0;
1691
+ const allTypes = ["agent", "execution", "archetype", "insight", "policy", "decision", "assumption", "constraint", "contradiction", "synthesis"];
1692
+ let embedded = 0;
1693
+ for (const type of allTypes) {
1694
+ const entities = vault.list(type);
1695
+ for (const entity of entities) {
1696
+ const text = entityToText(entity);
1697
+ const hash = contentHash(text);
1698
+ if (state.embeddedIds.has(entity.id) && state.entityHashes.get(entity.id) === hash) continue;
1699
+ try {
1700
+ const vector = await embedFn(text);
1701
+ if (!vector) continue;
1702
+ await vectorStore.upsert(entity.id, vector, {
1703
+ type: entity.type,
1704
+ name: entity.name,
1705
+ status: entity.status,
1706
+ tags: entity.tags
1707
+ });
1708
+ state.embeddedIds.add(entity.id);
1709
+ state.entityHashes.set(entity.id, hash);
1710
+ embedded++;
1711
+ } catch {
1712
+ }
1713
+ }
1714
+ }
1715
+ if (embedded > 0) saveState();
1716
+ return embedded;
1717
+ },
1718
+ /**
1719
+ * Discover clusters and archetypes.
1720
+ * Returns the number of archetypes created.
1721
+ */
1722
+ async discover() {
1723
+ const allEntities = [];
1724
+ for (const type of ["agent", "execution", "archetype", "insight", "policy"]) {
1725
+ allEntities.push(...vault.list(type));
1726
+ }
1727
+ if (allEntities.length < minClusterSize) return 0;
1728
+ const linkClusters = clusterByLinks(allEntities);
1729
+ let archetypesCreated = 0;
1730
+ for (const [, members] of linkClusters) {
1731
+ const memberEntities = members.map((id) => allEntities.find((e) => e.id === id)).filter(Boolean);
1732
+ const types = new Set(memberEntities.map((e) => e.type));
1733
+ const agents = new Set(
1734
+ memberEntities.filter((e) => e.type === "execution" || e.type === "agent").map((e) => e.agentId).filter(Boolean)
1735
+ );
1736
+ if (agents.size >= 2 || types.size >= 3) {
1737
+ const name = `archetype-cluster-${Date.now()}-${archetypesCreated}`;
1738
+ writeToLayer(vault, "cartographer", "emerging", {
1739
+ type: "archetype",
1740
+ name,
1741
+ status: "proposed",
1742
+ pattern: `Cluster of ${members.length} entities across ${agents.size} agents and ${types.size} types`,
1743
+ confidence: Math.min(members.length / 10, 1),
1744
+ confidence_score: Math.min(members.length / 10, 1),
1745
+ evidence_links: members,
1746
+ decay_at: new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString(),
1747
+ memberAgents: [...agents],
1748
+ memberExecutions: members.filter((id) => {
1749
+ var _a;
1750
+ return ((_a = memberEntities.find((e) => e.id === id)) == null ? void 0 : _a.type) === "execution";
1751
+ }),
1752
+ bottlenecks: [],
1753
+ suggestedPolicies: [],
1754
+ tags: ["archetype", "auto-discovered"],
1755
+ related: members.map((id) => {
1756
+ const e = memberEntities.find((x) => x.id === id);
1757
+ return e ? `${e.type}/${e.id}` : id;
1758
+ }),
1759
+ body: `Auto-discovered archetype spanning ${agents.size} agents.
1760
+
1761
+ Members:
1762
+ ${members.map((id) => `- [[${id}]]`).join("\n")}`
1763
+ });
1764
+ archetypesCreated++;
1765
+ }
1766
+ }
1767
+ saveState();
1768
+ return archetypesCreated;
1769
+ },
1770
+ /**
1771
+ * Semantic search across all entity types.
1772
+ */
1773
+ async search(query, options) {
1774
+ if (!embedFn) return [];
1775
+ const queryVector = await embedFn(query);
1776
+ if (!queryVector) return [];
1777
+ return vectorStore.search(queryVector, options);
1778
+ },
1779
+ /**
1780
+ * Extract entities and relationships from L1, L3, and L4 entries.
1781
+ * Creates relationship mapping proposals in L3 with evidence_links and confidence_score.
1782
+ * Returns the number of relationship proposals created.
1783
+ */
1784
+ async mapRelationships() {
1785
+ const l1Entries = queryByLayer(vault, "archive");
1786
+ const l3Entries = queryByLayer(vault, "emerging");
1787
+ const l4Entries = queryByLayer(vault, "canon");
1788
+ const allLayered = [...l1Entries, ...l3Entries, ...l4Entries];
1789
+ if (allLayered.length < 2) return 0;
1790
+ let created = 0;
1791
+ const decayAt = new Date(Date.now() + 90 * 24 * 60 * 60 * 1e3).toISOString();
1792
+ const CIRCUIT_BREAKER_LIMIT = 100;
1793
+ for (let i = 0; i < allLayered.length && i < 200; i++) {
1794
+ const entry = allLayered[i];
1795
+ for (let j = i + 1; j < allLayered.length && j < 200; j++) {
1796
+ const other = allLayered[j];
1797
+ if (entry.id === other.id) continue;
1798
+ const sharedTags = entry.tags.filter((t) => other.tags.includes(t));
1799
+ if (sharedTags.length === 0) continue;
1800
+ if (entry.related.some((r) => r.includes(other.id))) continue;
1801
+ const confidence = Math.min(sharedTags.length / 5, 0.9);
1802
+ if (confidence < similarityThreshold) continue;
1803
+ try {
1804
+ writeToLayer(vault, "cartographer", "emerging", {
1805
+ type: "synthesis",
1806
+ name: `Relationship: ${entry.name} \u2194 ${other.name}`,
1807
+ status: "pending",
1808
+ confidence_score: confidence,
1809
+ evidence_links: [entry.id, other.id],
1810
+ decay_at: decayAt,
1811
+ tags: ["relationship-proposal", "cartographer"],
1812
+ related: [`${entry.type}/${entry.id}`, `${other.type}/${other.id}`],
1813
+ body: `Proposed relationship between ${entry.name} and ${other.name}.
1814
+ Shared tags: ${sharedTags.join(", ")}`
1815
+ });
1816
+ created++;
1817
+ } catch {
1818
+ }
1819
+ if (created >= CIRCUIT_BREAKER_LIMIT) return created;
1820
+ }
1821
+ }
1822
+ return created;
1823
+ },
1824
+ /**
1825
+ * Detect contradictions between L3 entries and L4 canon.
1826
+ * Flags L3 entries that contradict established L4 truth.
1827
+ * Returns the number of contradictions found.
1828
+ */
1829
+ detectContradictions() {
1830
+ const l3Entries = queryByLayer(vault, "emerging");
1831
+ const l4Entries = queryByLayer(vault, "canon");
1832
+ let flagged = 0;
1833
+ for (const l3 of l3Entries) {
1834
+ for (const l4 of l4Entries) {
1835
+ const sharedTags = l3.tags.filter((t) => l4.tags.includes(t) && t !== "synthesized");
1836
+ if (sharedTags.length === 0) continue;
1837
+ const l3Text = l3.body.toLowerCase();
1838
+ const l4Text = l4.body.toLowerCase();
1839
+ const contradictionPairs = [
1840
+ ["should", "should not"],
1841
+ ["enable", "disable"],
1842
+ ["allow", "deny"],
1843
+ ["increase", "decrease"],
1844
+ ["must", "must not"]
1845
+ ];
1846
+ for (const [a, b] of contradictionPairs) {
1847
+ if (l3Text.includes(a) && l4Text.includes(b) || l3Text.includes(b) && l4Text.includes(a)) {
1848
+ vault.update(l3.id, {
1849
+ tags: [.../* @__PURE__ */ new Set([...l3.tags, "contradicts-canon"])],
1850
+ related: [.../* @__PURE__ */ new Set([...l3.related, `${l4.type}/${l4.id}`])]
1851
+ });
1852
+ flagged++;
1853
+ break;
1854
+ }
1855
+ }
1856
+ }
1857
+ }
1858
+ return flagged;
1859
+ },
1860
+ /** Suggest missing relationships within clusters. */
1861
+ async suggestRelationships() {
1862
+ const suggestions = [];
1863
+ const allEntities = vault.list("insight").concat(vault.list("decision"), vault.list("archetype"));
1864
+ for (const entity of allEntities) {
1865
+ if (!embedFn) break;
1866
+ try {
1867
+ const text = entityToText(entity);
1868
+ const vector = await embedFn(text);
1869
+ if (!vector) continue;
1870
+ const similar = await vectorStore.search(vector, { limit: 5 });
1871
+ for (const result of similar) {
1872
+ if (result.id === entity.id) continue;
1873
+ if (result.score < similarityThreshold) continue;
1874
+ if (entity.related.some((r) => r.includes(result.id))) continue;
1875
+ suggestions.push({
1876
+ from: entity.id,
1877
+ to: result.id,
1878
+ type: "related-to",
1879
+ confidence: result.score
1880
+ });
1881
+ }
1882
+ } catch {
1883
+ }
1884
+ }
1885
+ return suggestions;
1886
+ }
1887
+ };
1888
+ }
1889
+ var DEFAULT_STUB_THRESHOLD = 100;
1890
+ function overlapCoefficient2(a, b) {
1891
+ const setA = new Set(a.toLowerCase().split(/\s+/));
1892
+ const setB = new Set(b.toLowerCase().split(/\s+/));
1893
+ let intersection = 0;
1894
+ for (const word of setA) if (setB.has(word)) intersection++;
1895
+ const minSize = Math.min(setA.size, setB.size);
1896
+ return minSize > 0 ? intersection / minSize : 0;
1897
+ }
1898
+ var TYPE_CORRECTIONS = {
1899
+ agents: "agent",
1900
+ persons: "person",
1901
+ projects: "project",
1902
+ tasks: "task",
1903
+ decisions: "decision",
1904
+ assumptions: "assumption",
1905
+ constraints: "constraint",
1906
+ contradictions: "contradiction",
1907
+ insights: "insight",
1908
+ policies: "policy",
1909
+ executions: "execution",
1910
+ archetypes: "archetype",
1911
+ syntheses: "synthesis"
1912
+ };
1913
+ var STATUS_CORRECTIONS = {
1914
+ open: "active",
1915
+ closed: "completed",
1916
+ done: "completed",
1917
+ wip: "active",
1918
+ "in-progress": "active",
1919
+ "in progress": "active",
1920
+ finished: "completed",
1921
+ todo: "pending",
1922
+ cancelled: "deprecated",
1923
+ archived: "deprecated"
1924
+ };
1925
+ function createReconciler(vault, analysisFn, config) {
1926
+ const stubThreshold = (config == null ? void 0 : config.stubThreshold) ?? DEFAULT_STUB_THRESHOLD;
1927
+ const stateFile = (config == null ? void 0 : config.stateFile) ?? ".soma/reconciler-state.json";
1928
+ let hashes = /* @__PURE__ */ new Map();
1929
+ let savedEntityCount = 0;
1930
+ try {
1931
+ if (existsSync5(stateFile)) {
1932
+ const raw = JSON.parse(readFileSync5(stateFile, "utf-8"));
1933
+ const currentCount = vaultEntityCount(vault.baseDir);
1934
+ if (raw.entityCount == null && raw.vaultFingerprint) {
1935
+ console.log("[Reconciler] Migrating state from vaultFingerprint to entityCount");
1936
+ hashes = /* @__PURE__ */ new Map();
1937
+ } else if (raw.entityCount != null && currentCount < raw.entityCount) {
1938
+ console.log(`[Reconciler] Vault entity count decreased (${raw.entityCount} \u2192 ${currentCount}) \u2014 resetting state`);
1939
+ hashes = /* @__PURE__ */ new Map();
1940
+ } else {
1941
+ hashes = new Map(Object.entries(raw.hashes ?? {}));
1942
+ }
1943
+ savedEntityCount = currentCount;
1944
+ }
1945
+ } catch {
1946
+ }
1947
+ function saveState() {
1948
+ const dir = dirname4(stateFile);
1949
+ if (!existsSync5(dir)) mkdirSync5(dir, { recursive: true });
1950
+ writeFileSync5(stateFile, JSON.stringify({
1951
+ hashes: Object.fromEntries(hashes),
1952
+ entityCount: savedEntityCount ?? vaultEntityCount(vault.baseDir)
1953
+ }), "utf-8");
1954
+ }
1955
+ function md52(content) {
1956
+ return createHash3("md5").update(content).digest("hex");
1957
+ }
1958
+ return {
1959
+ /**
1960
+ * Scan vault entities for structural issues.
1961
+ * Returns a list of issues with codes and severities.
1962
+ */
1963
+ scan(options) {
1964
+ const issues = [];
1965
+ const allTypes = ["agent", "execution", "archetype", "insight", "policy", "decision", "assumption", "constraint", "contradiction", "synthesis"];
1966
+ for (const type of allTypes) {
1967
+ const entities = vault.list(type);
1968
+ for (const entity of entities) {
1969
+ const content = `${entity.type}:${entity.name}:${entity.body}`;
1970
+ const hash = md52(content);
1971
+ if (!(options == null ? void 0 : options.fullScan) && hashes.get(entity.id) === hash) continue;
1972
+ hashes.set(entity.id, hash);
1973
+ if (!entity.type) issues.push({ code: "FM001", severity: "error", entityPath: `${type}/${entity.id}`, message: "Missing type field", autoFixable: true });
1974
+ if (!entity.created) issues.push({ code: "FM001", severity: "warning", entityPath: `${type}/${entity.id}`, message: "Missing created field", autoFixable: true });
1975
+ if (!entity.name) issues.push({ code: "FM001", severity: "warning", entityPath: `${type}/${entity.id}`, message: "Missing name field", autoFixable: true });
1976
+ if (TYPE_CORRECTIONS[entity.type]) {
1977
+ issues.push({ code: "FM002", severity: "warning", entityPath: `${type}/${entity.id}`, message: `Invalid type "${entity.type}" (should be "${TYPE_CORRECTIONS[entity.type]}")`, autoFixable: true });
1978
+ }
1979
+ const validStatuses = ENTITY_STATUSES[entity.type];
1980
+ if (validStatuses && !validStatuses.includes(entity.status)) {
1981
+ const corrected = STATUS_CORRECTIONS[entity.status];
1982
+ issues.push({ code: "FM003", severity: "warning", entityPath: `${type}/${entity.id}`, message: `Invalid status "${entity.status}"${corrected ? ` (should be "${corrected}")` : ""}`, autoFixable: !!corrected });
1983
+ }
1984
+ if (entity.tags && !Array.isArray(entity.tags)) {
1985
+ issues.push({ code: "FM004", severity: "warning", entityPath: `${type}/${entity.id}`, message: "tags should be an array", autoFixable: true });
1986
+ }
1987
+ if (entity.related && !Array.isArray(entity.related)) {
1988
+ issues.push({ code: "FM004", severity: "warning", entityPath: `${type}/${entity.id}`, message: "related should be an array", autoFixable: true });
1989
+ }
1990
+ for (const link of entity.related) {
1991
+ const parts = link.split("/");
1992
+ if (parts.length >= 2) {
1993
+ const linkedEntity = vault.read(parts[0], parts.slice(1).join("/"));
1994
+ if (!linkedEntity) {
1995
+ issues.push({ code: "LINK001", severity: "warning", entityPath: `${type}/${entity.id}`, message: `Broken wikilink: [[${link}]]`, autoFixable: false });
1996
+ }
1997
+ }
1998
+ }
1999
+ if (options == null ? void 0 : options.fullScan) {
2000
+ let hasInbound = false;
2001
+ for (const otherType of allTypes) {
2002
+ const others = vault.list(otherType);
2003
+ if (others.some((o) => o.related.some((r) => r.includes(entity.id)))) {
2004
+ hasInbound = true;
2005
+ break;
2006
+ }
2007
+ }
2008
+ if (!hasInbound && entity.type !== "agent") {
2009
+ issues.push({ code: "ORPHAN001", severity: "info", entityPath: `${type}/${entity.id}`, message: "Orphan entity \u2014 no inbound links", autoFixable: false });
2010
+ }
2011
+ }
2012
+ if (entity.body.length < stubThreshold) {
2013
+ issues.push({ code: "STUB001", severity: "info", entityPath: `${type}/${entity.id}`, message: `Stub entity (body: ${entity.body.length} chars, threshold: ${stubThreshold})`, autoFixable: false });
2014
+ }
2015
+ }
2016
+ }
2017
+ saveState();
2018
+ return issues;
2019
+ },
2020
+ /**
2021
+ * Auto-fix deterministic issues (no LLM needed).
2022
+ * Returns the number of fixes applied.
2023
+ */
2024
+ autofix(issues) {
2025
+ let fixed = 0;
2026
+ for (const issue of issues) {
2027
+ if (!issue.autoFixable) continue;
2028
+ const [type, ...idParts] = issue.entityPath.split("/");
2029
+ const id = idParts.join("/");
2030
+ if (!type || !id) continue;
2031
+ const entity = vault.read(type, id);
2032
+ if (!entity) continue;
2033
+ const patch = {};
2034
+ switch (issue.code) {
2035
+ case "FM001": {
2036
+ if (!entity.type) patch.type = type;
2037
+ if (!entity.created) patch.created = (/* @__PURE__ */ new Date()).toISOString();
2038
+ if (!entity.name) patch.name = id;
2039
+ break;
2040
+ }
2041
+ case "FM002": {
2042
+ const corrected = TYPE_CORRECTIONS[entity.type];
2043
+ if (corrected) patch.type = corrected;
2044
+ break;
2045
+ }
2046
+ case "FM003": {
2047
+ const corrected = STATUS_CORRECTIONS[entity.status];
2048
+ if (corrected) patch.status = corrected;
2049
+ break;
2050
+ }
2051
+ case "FM004": {
2052
+ if (entity.tags && !Array.isArray(entity.tags)) patch.tags = [String(entity.tags)];
2053
+ if (entity.related && !Array.isArray(entity.related)) patch.related = [String(entity.related)];
2054
+ break;
2055
+ }
2056
+ }
2057
+ if (Object.keys(patch).length > 0) {
2058
+ vault.update(id, patch);
2059
+ fixed++;
2060
+ }
2061
+ }
2062
+ return fixed;
2063
+ },
2064
+ /**
2065
+ * Reconcile L1 entries: detect near-duplicates, merge overlapping entries,
2066
+ * and resolve conflicts. Only operates on L1 (archive) knowledge entries.
2067
+ *
2068
+ * Execution and agent entities are excluded — they are event logs, not
2069
+ * duplicatable knowledge. Superseded entries are also excluded.
2070
+ *
2071
+ * Returns { reconciled, mergeErrors }.
2072
+ */
2073
+ reconcileL1() {
2074
+ const KNOWLEDGE_TYPES = /* @__PURE__ */ new Set(["insight", "decision", "policy", "constraint", "contradiction", "synthesis", "archetype", "assumption"]);
2075
+ const allL1 = queryByLayer(vault, "archive");
2076
+ const l1Entries = allL1.filter((e) => {
2077
+ if (!KNOWLEDGE_TYPES.has(e.type)) return false;
2078
+ if (e.superseded_by) return false;
2079
+ if (Array.isArray(e.reconciled_from)) return false;
2080
+ return true;
2081
+ });
2082
+ let reconciled = 0;
2083
+ let mergeErrors = 0;
2084
+ const processed = /* @__PURE__ */ new Set();
2085
+ const existingMerges = allL1.map((e) => e.reconciled_from).filter((rf) => Array.isArray(rf));
2086
+ for (let i = 0; i < l1Entries.length; i++) {
2087
+ const entry = l1Entries[i];
2088
+ if (processed.has(entry.id)) continue;
2089
+ const duplicates = [];
2090
+ for (let j = i + 1; j < l1Entries.length; j++) {
2091
+ const other = l1Entries[j];
2092
+ if (processed.has(other.id)) continue;
2093
+ const similarity = overlapCoefficient2(entry.body, other.body);
2094
+ if (similarity >= 0.7) {
2095
+ duplicates.push(other);
2096
+ }
2097
+ }
2098
+ if (duplicates.length === 0) continue;
2099
+ const allEntries = [entry, ...duplicates];
2100
+ const sourceIds = allEntries.map((e) => e.id);
2101
+ const alreadyCovered = existingMerges.some(
2102
+ (rf) => sourceIds.every((id) => rf.includes(id))
2103
+ );
2104
+ if (alreadyCovered) {
2105
+ for (const dup of duplicates) processed.add(dup.id);
2106
+ processed.add(entry.id);
2107
+ continue;
2108
+ }
2109
+ const newestEntry = allEntries.reduce(
2110
+ (a, b) => new Date(b.updated).getTime() > new Date(a.updated).getTime() ? b : a
2111
+ );
2112
+ const allAgentIds = [...new Set(allEntries.map((e) => e.agent_id).filter(Boolean))];
2113
+ try {
2114
+ const mergedId = writeToLayer(vault, "reconciler", "archive", {
2115
+ type: newestEntry.type,
2116
+ name: newestEntry.name,
2117
+ status: newestEntry.status,
2118
+ agent_id: allAgentIds.join(","),
2119
+ trace_id: newestEntry.trace_id,
2120
+ source_system: newestEntry.source_system,
2121
+ reconciled_from: sourceIds,
2122
+ tags: [...new Set(allEntries.flatMap((e) => e.tags))],
2123
+ related: [...new Set(allEntries.flatMap((e) => e.related))],
2124
+ body: newestEntry.body
2125
+ });
2126
+ for (const source of allEntries) {
2127
+ vault.update(source.id, { superseded_by: mergedId });
2128
+ processed.add(source.id);
2129
+ }
2130
+ reconciled++;
2131
+ } catch (err) {
2132
+ console.error("[Reconciler] Merge failed:", err instanceof Error ? err.message : String(err));
2133
+ mergeErrors++;
2134
+ }
2135
+ }
2136
+ return { reconciled, mergeErrors };
2137
+ },
2138
+ /**
2139
+ * Run the full reconciliation pipeline.
2140
+ * Returns { scanned, issues, fixed }.
2141
+ */
2142
+ async run(options) {
2143
+ const issues = this.scan(options);
2144
+ const autoFixable = issues.filter((i) => i.autoFixable);
2145
+ const fixed = this.autofix(autoFixable);
2146
+ if (analysisFn) {
2147
+ const brokenLinks = issues.filter((i) => i.code === "LINK001");
2148
+ for (const _issue of brokenLinks.slice(0, 10)) {
2149
+ }
2150
+ const stubs = issues.filter((i) => i.code === "STUB001");
2151
+ for (const _issue of stubs.slice(0, 5)) {
2152
+ }
2153
+ }
2154
+ return {
2155
+ scanned: issues.length > 0 ? issues.length : 0,
2156
+ issues: issues.length,
2157
+ fixed
2158
+ };
2159
+ }
2160
+ };
2161
+ }
2162
+ var INTENT_TO_LAYER = {
2163
+ enforce: "canon",
2164
+ advise: "emerging",
2165
+ brief: "working",
2166
+ route: "archive"
2167
+ };
2168
+ var PolicyBridgeError = class extends Error {
2169
+ constructor(message) {
2170
+ super(message);
2171
+ this.name = "PolicyBridgeError";
2172
+ }
2173
+ };
2174
+ function createPolicyBridge(vault) {
2175
+ function tagResults(entries, layer) {
2176
+ return entries.map((entry) => ({
2177
+ entry,
2178
+ source_layer: layer,
2179
+ semantic_weight: LAYER_SEMANTIC_WEIGHTS[layer]
2180
+ }));
2181
+ }
2182
+ function filterByTopic(entries, topic) {
2183
+ if (!topic) return entries;
2184
+ const lower = topic.toLowerCase();
2185
+ return entries.filter(
2186
+ (e) => e.name.toLowerCase().includes(lower) || e.body.toLowerCase().includes(lower) || e.tags.some((t) => t.toLowerCase().includes(lower))
2187
+ );
2188
+ }
2189
+ return {
2190
+ query(intent, options) {
2191
+ const topic = options == null ? void 0 : options.topic;
2192
+ const teamId = options == null ? void 0 : options.team_id;
2193
+ const limit = (options == null ? void 0 : options.limit) ?? 100;
2194
+ if (intent === "all") {
2195
+ const result = {
2196
+ canon: tagResults(filterByTopic(queryByLayer(vault, "canon", { limit }), topic), "canon"),
2197
+ emerging: tagResults(filterByTopic(queryByLayer(vault, "emerging", { limit }), topic), "emerging"),
2198
+ working: tagResults(
2199
+ filterByTopic(queryByLayer(vault, "working", { limit, team_id: teamId }), topic),
2200
+ "working"
2201
+ ),
2202
+ archive: tagResults(filterByTopic(queryByLayer(vault, "archive", { limit }), topic), "archive")
2203
+ };
2204
+ return result;
2205
+ }
2206
+ const layer = INTENT_TO_LAYER[intent];
2207
+ if (!layer) {
2208
+ throw new PolicyBridgeError(`Unknown intent: '${intent}'. Use: enforce, advise, brief, route, or all.`);
2209
+ }
2210
+ if (intent === "brief" && !teamId) {
2211
+ throw new PolicyBridgeError("team_id is required for brief (L2) queries.");
2212
+ }
2213
+ const entries = queryByLayer(vault, layer, { limit, team_id: teamId });
2214
+ return tagResults(filterByTopic(entries, topic), layer);
2215
+ },
2216
+ // Legacy PolicySource for backward compatibility with AgentFlow guards
2217
+ policySource: createSomaPolicySource(vault)
2218
+ };
2219
+ }
2220
+ function createSomaPolicySource(vault) {
2221
+ return {
2222
+ recentFailureRate(agentId) {
2223
+ const normalized = agentId.toLowerCase().replace(/[^a-z0-9]+/g, "-");
2224
+ const agent = vault.read("agent", normalized);
2225
+ return (agent == null ? void 0 : agent.failureRate) ?? 0;
2226
+ },
2227
+ isKnownBottleneck(nodeName) {
2228
+ const archetypes = vault.list("archetype");
2229
+ return archetypes.some((a) => {
2230
+ const bottlenecks = a.bottlenecks;
2231
+ return Array.isArray(bottlenecks) && bottlenecks.includes(nodeName);
2232
+ });
2233
+ },
2234
+ lastConformanceScore(agentId) {
2235
+ const normalized = agentId.toLowerCase().replace(/[^a-z0-9]+/g, "-");
2236
+ const executions = vault.list("execution", { agentId: normalized, limit: 1 });
2237
+ if (executions.length === 0) return null;
2238
+ return executions[0].conformanceScore ?? null;
2239
+ },
2240
+ getAgentProfile(agentId) {
2241
+ const normalized = agentId.toLowerCase().replace(/[^a-z0-9]+/g, "-");
2242
+ const agent = vault.read("agent", normalized);
2243
+ return (agent == null ? void 0 : agent.profile) ?? null;
2244
+ }
2245
+ };
2246
+ }
2247
+ var DEFAULT_L2_DECAY_DAYS = 14;
2248
+ var DEFAULT_L3_DECAY_DAYS = 90;
2249
+ function updateEvidenceReferences(vault, oldId, newId) {
2250
+ for (const layer of ["emerging", "canon"]) {
2251
+ const entries = queryByLayer(vault, layer);
2252
+ for (const entry of entries) {
2253
+ const links = entry.evidence_links;
2254
+ if (!Array.isArray(links) || !links.includes(oldId)) continue;
2255
+ const updatedLinks = links.map((id) => id === oldId ? newId : id);
2256
+ vault.update(entry.id, { evidence_links: updatedLinks });
2257
+ }
2258
+ }
2259
+ }
2260
+ function checkDanglingReferences(vault) {
2261
+ const dangling = [];
2262
+ for (const layer of ["emerging", "canon"]) {
2263
+ const entries = queryByLayer(vault, layer);
2264
+ for (const entry of entries) {
2265
+ const links = entry.evidence_links;
2266
+ if (!Array.isArray(links)) continue;
2267
+ for (const linkId of links) {
2268
+ let found = false;
2269
+ for (const type of ["execution", "insight", "decision", "policy", "agent", "archetype", "assumption", "constraint", "contradiction", "synthesis"]) {
2270
+ if (vault.read(type, linkId)) {
2271
+ found = true;
2272
+ break;
2273
+ }
2274
+ }
2275
+ if (!found) {
2276
+ dangling.push({ entryId: entry.id, missingTargetId: linkId });
2277
+ }
2278
+ }
2279
+ }
2280
+ }
2281
+ return dangling;
2282
+ }
2283
+ function createDecayProcessor(vault, config) {
2284
+ const l2DefaultDays = (config == null ? void 0 : config.l2DefaultDays) ?? DEFAULT_L2_DECAY_DAYS;
2285
+ const l3DefaultDays = (config == null ? void 0 : config.l3DefaultDays) ?? DEFAULT_L3_DECAY_DAYS;
2286
+ const teamDecayDays = (config == null ? void 0 : config.teamDecayDays) ?? {};
2287
+ function computeL2DecayAt(teamId) {
2288
+ const days = teamDecayDays[teamId] ?? l2DefaultDays;
2289
+ return new Date(Date.now() + days * 24 * 60 * 60 * 1e3).toISOString();
2290
+ }
2291
+ function computeL3DecayAt() {
2292
+ return new Date(Date.now() + l3DefaultDays * 24 * 60 * 60 * 1e3).toISOString();
2293
+ }
2294
+ function extendDecayOnAccess(entity) {
2295
+ if (!entity.layer || !entity.decay_at) return;
2296
+ if (entity.layer !== "working" && entity.layer !== "emerging") return;
2297
+ let newDecayAt;
2298
+ if (entity.layer === "working") {
2299
+ const days = (entity.team_id ? teamDecayDays[entity.team_id] : void 0) ?? l2DefaultDays;
2300
+ newDecayAt = new Date(Date.now() + days * 24 * 60 * 60 * 1e3).toISOString();
2301
+ } else {
2302
+ newDecayAt = new Date(Date.now() + l3DefaultDays * 24 * 60 * 60 * 1e3).toISOString();
2303
+ }
2304
+ vault.update(entity.id, { decay_at: newDecayAt });
2305
+ }
2306
+ function processDecay() {
2307
+ const now = Date.now();
2308
+ let l2Decayed = 0;
2309
+ let l3Decayed = 0;
2310
+ const l2Entries = queryByLayer(vault, "working");
2311
+ for (const entry of l2Entries) {
2312
+ if (!entry.decay_at) continue;
2313
+ if (new Date(entry.decay_at).getTime() > now) continue;
2314
+ try {
2315
+ writeToLayer(vault, "reconciler", "archive", {
2316
+ type: entry.type,
2317
+ id: `decayed-${entry.id}`,
2318
+ name: entry.name,
2319
+ status: entry.status,
2320
+ tags: [...entry.tags, "decayed"],
2321
+ related: entry.related,
2322
+ body: entry.body,
2323
+ decayed_from: "working",
2324
+ // Preserve original metadata
2325
+ team_id: entry.team_id,
2326
+ agent_id: entry.agent_id,
2327
+ trace_id: entry.trace_id,
2328
+ source_system: entry.source_system
2329
+ });
2330
+ updateEvidenceReferences(vault, entry.id, `decayed-${entry.id}`);
2331
+ vault.remove(entry.id);
2332
+ l2Decayed++;
2333
+ } catch {
2334
+ }
2335
+ }
2336
+ const l3Entries = queryByLayer(vault, "emerging");
2337
+ for (const entry of l3Entries) {
2338
+ if (!entry.decay_at) continue;
2339
+ if (new Date(entry.decay_at).getTime() > now) continue;
2340
+ if (entry.status === "promoted" || entry.status === "rejected") continue;
2341
+ try {
2342
+ writeToLayer(vault, "reconciler", "archive", {
2343
+ type: entry.type,
2344
+ id: `decayed-${entry.id}`,
2345
+ name: entry.name,
2346
+ status: entry.status,
2347
+ tags: [...entry.tags, "decayed"],
2348
+ related: entry.related,
2349
+ body: entry.body,
2350
+ decayed_from: "emerging",
2351
+ // Preserve original metadata
2352
+ confidence_score: entry.confidence_score,
2353
+ evidence_links: entry.evidence_links,
2354
+ agent_id: entry.agent_id
2355
+ });
2356
+ updateEvidenceReferences(vault, entry.id, `decayed-${entry.id}`);
2357
+ vault.remove(entry.id);
2358
+ l3Decayed++;
2359
+ } catch {
2360
+ }
2361
+ }
2362
+ return { l2Decayed, l3Decayed, total: l2Decayed + l3Decayed };
2363
+ }
2364
+ return {
2365
+ computeL2DecayAt,
2366
+ computeL3DecayAt,
2367
+ extendDecayOnAccess,
2368
+ processDecay,
2369
+ /** Get the configured decay windows. */
2370
+ getConfig() {
2371
+ return { l2DefaultDays, l3DefaultDays, teamDecayDays };
2372
+ }
2373
+ };
2374
+ }
2375
+ var GovernanceError = class extends Error {
2376
+ constructor(message) {
2377
+ super(message);
2378
+ this.name = "GovernanceError";
2379
+ }
2380
+ };
2381
+ function createGovernanceAPI(vault) {
2382
+ return {
2383
+ list_pending() {
2384
+ const l3Entries = queryByLayer(vault, "emerging");
2385
+ return l3Entries.filter((e) => e.status === "pending").sort((a, b) => (b.confidence_score ?? 0) - (a.confidence_score ?? 0));
2386
+ },
2387
+ promote(entryId, reviewerId) {
2388
+ const l3Entries = queryByLayer(vault, "emerging");
2389
+ const entry = l3Entries.find((e) => e.id === entryId);
2390
+ if (!entry) {
2391
+ const l2Entries = queryByLayer(vault, "working");
2392
+ const l2Entry = l2Entries.find((e) => e.id === entryId);
2393
+ if (l2Entry) {
2394
+ throw new GovernanceError(`L2 entries cannot be promoted. Entry '${entryId}' is in Working Memory (L2).`);
2395
+ }
2396
+ throw new GovernanceError(`Entry '${entryId}' not found in L3 (Emerging Knowledge).`);
2397
+ }
2398
+ if (entry.layer === "working") {
2399
+ throw new GovernanceError(`L2 entries cannot be promoted. Entry '${entryId}' is in Working Memory (L2).`);
2400
+ }
2401
+ if (entry.status === "promoted") {
2402
+ throw new GovernanceError(`Entry '${entryId}' has already been promoted.`);
2403
+ }
2404
+ if (entry.status === "rejected") {
2405
+ throw new GovernanceError(`Entry '${entryId}' has been rejected. Resubmit to promote.`);
2406
+ }
2407
+ const now = (/* @__PURE__ */ new Date()).toISOString();
2408
+ const entryData = entry;
2409
+ const l4Id = writeToLayer(vault, "governance", "canon", {
2410
+ type: entry.type,
2411
+ id: `canon-${entryId}`,
2412
+ name: entry.name,
2413
+ status: "active",
2414
+ ratified_by: reviewerId,
2415
+ ratified_at: now,
2416
+ origin_l3_id: entryId,
2417
+ // Preserve evidence chain from L3
2418
+ evidence_links: entry.evidence_links ?? [],
2419
+ confidence_score: entry.confidence_score,
2420
+ claim: entryData.claim,
2421
+ source_agents: entryData.source_agents,
2422
+ tags: [...entry.tags.filter((t) => t !== "l3-proposal"), "ratified", "canon"],
2423
+ related: entry.related,
2424
+ body: entry.body
2425
+ });
2426
+ vault.update(entryId, { status: "promoted" });
2427
+ return l4Id;
2428
+ },
2429
+ reject(entryId, reviewerId, reason) {
2430
+ const l3Entries = queryByLayer(vault, "emerging");
2431
+ const entry = l3Entries.find((e) => e.id === entryId);
2432
+ if (!entry) {
2433
+ const l2Entries = queryByLayer(vault, "working");
2434
+ const l2Entry = l2Entries.find((e) => e.id === entryId);
2435
+ if (l2Entry) {
2436
+ throw new GovernanceError(`L2 entries cannot be rejected via governance. Entry '${entryId}' is in Working Memory (L2).`);
2437
+ }
2438
+ throw new GovernanceError(`Entry '${entryId}' not found in L3 (Emerging Knowledge).`);
2439
+ }
2440
+ if (entry.status === "promoted") {
2441
+ throw new GovernanceError(`Entry '${entryId}' has already been promoted.`);
2442
+ }
2443
+ if (entry.status === "rejected") {
2444
+ throw new GovernanceError(`Entry '${entryId}' has already been rejected.`);
2445
+ }
2446
+ vault.update(entryId, {
2447
+ status: "rejected",
2448
+ rejected_by: reviewerId,
2449
+ rejected_at: (/* @__PURE__ */ new Date()).toISOString(),
2450
+ rejection_reason: reason
2451
+ });
2452
+ },
2453
+ autoPromote(config) {
2454
+ const enabled = (config == null ? void 0 : config.enabled) ?? false;
2455
+ if (!enabled) return { promoted: [], skipped: 0 };
2456
+ const minConfidence = (config == null ? void 0 : config.minConfidence) ?? 0.9;
2457
+ const minAgentCount = (config == null ? void 0 : config.minAgentCount) ?? 5;
2458
+ const pending = this.list_pending();
2459
+ const promoted = [];
2460
+ let skipped = 0;
2461
+ for (const entry of pending) {
2462
+ const confidence = entry.confidence_score ?? 0;
2463
+ if (confidence < minConfidence) {
2464
+ skipped++;
2465
+ continue;
2466
+ }
2467
+ const evidenceLinks = entry.evidence_links ?? [];
2468
+ const l1Entries = queryByLayer(vault, "archive");
2469
+ const agentIds = /* @__PURE__ */ new Set();
2470
+ for (const linkId of evidenceLinks) {
2471
+ const linked = l1Entries.find((e) => e.id === linkId);
2472
+ if (linked) {
2473
+ const agentId = linked.agent_id ?? linked.agentId;
2474
+ if (agentId) agentIds.add(agentId);
2475
+ }
2476
+ }
2477
+ if (agentIds.size < minAgentCount) {
2478
+ skipped++;
2479
+ continue;
2480
+ }
2481
+ try {
2482
+ const l4Id = this.promote(entry.id, "auto-promote");
2483
+ promoted.push(l4Id);
2484
+ console.log(`[Governance] Auto-promoted '${entry.name}' (confidence: ${confidence}, agents: ${agentIds.size})`);
2485
+ } catch {
2486
+ skipped++;
2487
+ }
2488
+ }
2489
+ return { promoted, skipped };
2490
+ },
2491
+ get_evidence(entryId) {
2492
+ const l3Entries = queryByLayer(vault, "emerging");
2493
+ const entry = l3Entries.find((e) => e.id === entryId);
2494
+ if (!entry) {
2495
+ throw new GovernanceError(`Entry '${entryId}' not found in L3 (Emerging Knowledge).`);
2496
+ }
2497
+ const evidence = [];
2498
+ const evidenceLinks = entry.evidence_links ?? [];
2499
+ for (const linkId of evidenceLinks) {
2500
+ const l1Entries = queryByLayer(vault, "archive");
2501
+ const linked = l1Entries.find((e) => e.id === linkId);
2502
+ if (linked) evidence.push(linked);
2503
+ }
2504
+ return { entry, evidence };
2505
+ }
2506
+ };
2507
+ }
2508
+ function createSoma(config) {
2509
+ const vaultDir = (config == null ? void 0 : config.vaultDir) ?? ".soma/vault";
2510
+ const inboxDir = (config == null ? void 0 : config.inboxDir) ?? ".soma/inbox";
2511
+ if (config == null ? void 0 : config.layers) setLayersConfig(config.layers);
2512
+ const vault = createVault({ baseDir: vaultDir });
2513
+ const vectorStore = (config == null ? void 0 : config.vectorStore) ?? createJsonVectorStore(`${vaultDir}/../_vectors.json`);
2514
+ const policyBridge = createPolicyBridge(vault);
2515
+ const policySource = policyBridge.policySource;
2516
+ const governance = createGovernanceAPI(vault);
2517
+ const decayProcessor = createDecayProcessor(vault, config == null ? void 0 : config.decay);
2518
+ vault.setOnRead((entity) => decayProcessor.extendDecayOnAccess(entity));
2519
+ const harvester = createHarvester(vault, config == null ? void 0 : config.harvester);
2520
+ const synthesizer = (config == null ? void 0 : config.analysisFn) ? createSynthesizer(vault, config.analysisFn, config == null ? void 0 : config.synthesizer) : void 0;
2521
+ const cartographer = createCartographer(vault, vectorStore, config == null ? void 0 : config.embedFn, config == null ? void 0 : config.cartographer);
2522
+ const reconciler = createReconciler(vault, config == null ? void 0 : config.analysisFn, config == null ? void 0 : config.reconciler);
2523
+ return {
2524
+ vault,
2525
+ vectorStore,
2526
+ policySource,
2527
+ policyBridge,
2528
+ governance,
2529
+ harvester,
2530
+ synthesizer,
2531
+ cartographer,
2532
+ reconciler,
2533
+ decayProcessor,
2534
+ async run() {
2535
+ var _a, _b;
2536
+ const harvested = await harvester.processInbox(inboxDir);
2537
+ const reconcileResult = await reconciler.run();
2538
+ const l1Result = reconciler.reconcileL1();
2539
+ let synthesized = 0;
2540
+ if (synthesizer) {
2541
+ synthesized = await synthesizer.synthesize();
2542
+ synthesized += await synthesizer.synthesizeL3();
2543
+ synthesized += await synthesizer.synthesizeDecisions();
2544
+ synthesized += synthesizer.synthesizeDecisionDivergence();
2545
+ }
2546
+ const autoPromoteResult = governance.autoPromote((_a = config == null ? void 0 : config.governance) == null ? void 0 : _a.autoPromote);
2547
+ const driftStateFile = `${vaultDir}/../conformance-history.json`;
2548
+ let conformanceHistory = [];
2549
+ try {
2550
+ if (existsSync6(driftStateFile)) {
2551
+ conformanceHistory = JSON.parse(readFileSync6(driftStateFile, "utf-8"));
2552
+ }
2553
+ } catch {
2554
+ }
2555
+ const agents = vault.list("agent");
2556
+ let driftTracked = 0;
2557
+ let driftAlerts = 0;
2558
+ for (const agent of agents) {
2559
+ const data = agent;
2560
+ const failureRate = data.failureRate;
2561
+ const totalExecutions = data.totalExecutions;
2562
+ const conformanceScore = data.lastConformanceScore ?? (failureRate != null ? 1 - failureRate : void 0);
2563
+ if (conformanceScore != null && (totalExecutions ?? 0) > 0) {
2564
+ conformanceHistory = trackConformanceTrend(conformanceHistory, {
2565
+ agentId: agent.name,
2566
+ score: conformanceScore,
2567
+ runId: `run-${Date.now()}`
2568
+ });
2569
+ driftTracked++;
2570
+ }
2571
+ }
2572
+ const agentIds = [...new Set(conformanceHistory.map((e) => e.agentId))];
2573
+ for (const agentId of agentIds) {
2574
+ const agentHistory = conformanceHistory.filter((e) => e.agentId === agentId);
2575
+ const driftReport = detectDrift(agentHistory);
2576
+ if (driftReport.status === "degrading" && driftReport.alert) {
2577
+ try {
2578
+ vault.create({
2579
+ type: "insight",
2580
+ name: `Drift alert: ${agentId}`,
2581
+ status: "active",
2582
+ tags: ["drift-alert", "auto-generated"],
2583
+ body: driftReport.alert.message
2584
+ });
2585
+ driftAlerts++;
2586
+ } catch {
2587
+ }
2588
+ }
2589
+ }
2590
+ const trimmedHistory = conformanceHistory.slice(-5e3);
2591
+ try {
2592
+ const dir = dirname5(driftStateFile);
2593
+ if (!existsSync6(dir)) mkdirSync6(dir, { recursive: true });
2594
+ writeFileSync6(driftStateFile, JSON.stringify(trimmedHistory), "utf-8");
2595
+ } catch {
2596
+ }
2597
+ const embedded = await cartographer.embed();
2598
+ const archetypes = await cartographer.discover();
2599
+ await cartographer.mapRelationships();
2600
+ cartographer.detectContradictions();
2601
+ const decayResult = decayProcessor.processDecay();
2602
+ let assertionFailures = 0;
2603
+ if ((config == null ? void 0 : config.assertions) && config.assertions.length > 0) {
2604
+ const violations = await evaluateAssertions(
2605
+ config.assertions,
2606
+ "soma-pipeline"
2607
+ );
2608
+ for (const v of violations) {
2609
+ try {
2610
+ vault.create({
2611
+ type: "insight",
2612
+ name: `Assertion failed: ${((_b = v.explanation) == null ? void 0 : _b.rule) ?? v.message}`,
2613
+ status: "active",
2614
+ tags: ["assertion-failure", "auto-generated"],
2615
+ body: v.message
2616
+ });
2617
+ assertionFailures++;
2618
+ } catch {
2619
+ }
2620
+ }
2621
+ }
2622
+ return {
2623
+ harvested,
2624
+ reconciled: { issues: reconcileResult.issues, fixed: reconcileResult.fixed, mergeErrors: l1Result.mergeErrors },
2625
+ synthesized,
2626
+ autoPromoted: autoPromoteResult.promoted.length,
2627
+ mapped: embedded + archetypes,
2628
+ decayed: decayResult.total,
2629
+ drift: { tracked: driftTracked, alerts: driftAlerts },
2630
+ assertionFailures
2631
+ };
2632
+ },
2633
+ watch(watchDir) {
2634
+ const dir = watchDir ?? inboxDir;
2635
+ const interval = setInterval(async () => {
2636
+ try {
2637
+ const count = await harvester.processInbox(dir);
2638
+ if (count > 0) {
2639
+ console.log(`Soma: Harvested ${count} files from inbox`);
2640
+ }
2641
+ } catch (err) {
2642
+ console.error("Soma watch error:", err);
2643
+ }
2644
+ }, 1e4);
2645
+ return () => clearInterval(interval);
2646
+ }
2647
+ };
2648
+ }
2649
+ function migrateToLayers(vault) {
2650
+ const allTypes = [
2651
+ "agent",
2652
+ "execution",
2653
+ "archetype",
2654
+ "insight",
2655
+ "policy",
2656
+ "decision",
2657
+ "assumption",
2658
+ "constraint",
2659
+ "contradiction",
2660
+ "synthesis"
2661
+ ];
2662
+ let migrated = 0;
2663
+ let skipped = 0;
2664
+ let errors = 0;
2665
+ for (const type of allTypes) {
2666
+ const entities = vault.list(type, { limit: 1e5 });
2667
+ for (const entity of entities) {
2668
+ if (entity.layer) {
2669
+ skipped++;
2670
+ continue;
2671
+ }
2672
+ try {
2673
+ vault.update(entity.id, {
2674
+ layer: "archive",
2675
+ source_worker: "migration"
2676
+ });
2677
+ migrated++;
2678
+ } catch (err) {
2679
+ console.error(`[Migration] Failed to migrate ${type}/${entity.id}: ${err instanceof Error ? err.message : String(err)}`);
2680
+ errors++;
2681
+ }
2682
+ }
2683
+ }
2684
+ return { migrated, skipped, errors };
2685
+ }
2686
+ function isLangChainRun(obj) {
2687
+ if (typeof obj !== "object" || obj === null) return false;
2688
+ const o = obj;
2689
+ return typeof o.id === "string" && typeof o.name === "string" && typeof o.run_type === "string" && typeof o.start_time === "number" && typeof o.status === "string";
2690
+ }
2691
+ function mapStatus(status) {
2692
+ if (status === "success") return "completed";
2693
+ if (status === "error") return "failed";
2694
+ if (status === "pending") return "running";
2695
+ return "completed";
2696
+ }
2697
+ function mapRunType(runType) {
2698
+ switch (runType) {
2699
+ case "chain":
2700
+ return "agent";
2701
+ case "tool":
2702
+ return "tool";
2703
+ case "llm":
2704
+ return "tool";
2705
+ case "retriever":
2706
+ return "tool";
2707
+ case "prompt":
2708
+ return "tool";
2709
+ case "parser":
2710
+ return "tool";
2711
+ default:
2712
+ return "custom";
2713
+ }
2714
+ }
2715
+ function buildGraph(run, parentId, result) {
2716
+ const nodeId = run.id;
2717
+ const metadata = {};
2718
+ if (run.error) metadata.error = run.error;
2719
+ if (run.inputs) metadata.inputs = run.inputs;
2720
+ if (run.outputs) metadata.outputs = run.outputs;
2721
+ const childIds = [];
2722
+ result.nodes[nodeId] = {
2723
+ id: nodeId,
2724
+ type: mapRunType(run.run_type),
2725
+ name: run.name,
2726
+ startTime: run.start_time,
2727
+ endTime: run.end_time ?? null,
2728
+ status: mapStatus(run.status),
2729
+ parentId,
2730
+ children: childIds,
2731
+ metadata
2732
+ };
2733
+ if (parentId) {
2734
+ result.edges.push({ from: parentId, to: nodeId, type: "called" });
2735
+ const parent = result.nodes[parentId];
2736
+ if (parent) parent.children.push(nodeId);
2737
+ }
2738
+ if (run.child_runs) {
2739
+ for (const child of run.child_runs) {
2740
+ childIds.push(child.id);
2741
+ buildGraph(child, nodeId, result);
2742
+ }
2743
+ }
2744
+ }
2745
+ function langchainRunToGraphLike(run) {
2746
+ const result = { nodes: {}, edges: [] };
2747
+ buildGraph(run, null, result);
2748
+ return {
2749
+ id: run.id,
2750
+ agentId: run.name,
2751
+ nodes: result.nodes,
2752
+ edges: result.edges,
2753
+ status: mapStatus(run.status),
2754
+ rootNodeId: run.id
2755
+ };
2756
+ }
2757
+ var langchainAdapter = {
2758
+ canAdapt: isLangChainRun,
2759
+ adapt: langchainRunToGraphLike
2760
+ };
2761
+ var DEFAULT_FAILURE_RATE_THRESHOLD = 0.2;
2762
+ function evaluatePreflight(vault, agentId) {
2763
+ const start = Date.now();
2764
+ const agents = vault.list("agent");
2765
+ const agent = agents.find(
2766
+ (a) => a.name === agentId || a.agentId === agentId
2767
+ );
2768
+ if (!agent) {
2769
+ return {
2770
+ proceed: true,
2771
+ warnings: [],
2772
+ recommendations: [{
2773
+ insight: `Agent '${agentId}' is not registered in the vault. Run the pipeline to create an agent profile.`,
2774
+ sourceAgents: [],
2775
+ confidence: 0
2776
+ }],
2777
+ available: true,
2778
+ _meta: { durationMs: Date.now() - start }
2779
+ };
2780
+ }
2781
+ const data = agent;
2782
+ const failureRate = data.failureRate ?? 0;
2783
+ const totalExecutions = data.totalExecutions ?? 0;
2784
+ const warnings = [];
2785
+ const recommendations = [];
2786
+ let proceed = true;
2787
+ if (failureRate > DEFAULT_FAILURE_RATE_THRESHOLD && totalExecutions >= 5) {
2788
+ warnings.push({
2789
+ rule: "max-failure-rate",
2790
+ threshold: DEFAULT_FAILURE_RATE_THRESHOLD,
2791
+ actual: failureRate,
2792
+ message: `Failure rate ${(failureRate * 100).toFixed(1)}% exceeds threshold ${(DEFAULT_FAILURE_RATE_THRESHOLD * 100).toFixed(0)}%`,
2793
+ source: "agent-profile"
2794
+ });
2795
+ }
2796
+ const l4Entries = queryByLayer(vault, "canon");
2797
+ for (const entry of l4Entries) {
2798
+ const eData = entry;
2799
+ if (eData.enforcement !== "error") continue;
2800
+ const scope = eData.scope ?? "";
2801
+ const body = entry.body + " " + entry.related.join(" ") + " " + scope;
2802
+ if (body.includes(agentId) || body.includes(agent.name) || scope === "*") {
2803
+ warnings.push({
2804
+ rule: entry.name,
2805
+ message: `L4 enforcement policy: ${entry.name}`,
2806
+ source: "L4 canon"
2807
+ });
2808
+ proceed = false;
2809
+ }
2810
+ }
2811
+ const constraints = vault.list("constraint");
2812
+ for (const c of constraints) {
2813
+ if (c.status !== "active") continue;
2814
+ const body = c.body + " " + c.related.join(" ");
2815
+ if (body.includes(agentId) || body.includes(agent.name)) {
2816
+ warnings.push({
2817
+ rule: c.name,
2818
+ message: `Active constraint: ${c.name}`,
2819
+ source: c.layer ? `L${c.layer === "emerging" ? "3" : c.layer === "canon" ? "4" : "?"} ${c.layer}` : "vault"
2820
+ });
2821
+ }
2822
+ }
2823
+ const l3Entries = queryByLayer(vault, "emerging");
2824
+ for (const entry of l3Entries) {
2825
+ if (entry.status !== "pending") continue;
2826
+ const body = entry.body + " " + entry.related.join(" ");
2827
+ if (body.includes(agentId) || body.includes(agent.name)) {
2828
+ warnings.push({
2829
+ rule: entry.name,
2830
+ message: `Pending L3 proposal: ${entry.name}`,
2831
+ source: "L3 emerging"
2832
+ });
2833
+ }
2834
+ }
2835
+ for (const peer of agents) {
2836
+ if (peer.name === agent.name) continue;
2837
+ const peerData = peer;
2838
+ const peerRate = peerData.failureRate ?? 0;
2839
+ const peerExec = peerData.totalExecutions ?? 0;
2840
+ if (peerExec < 5) continue;
2841
+ const gap = failureRate - peerRate;
2842
+ if (gap >= 0.2) {
2843
+ const insights = vault.list("insight");
2844
+ for (const insight of insights) {
2845
+ const sa = insight.source_agents;
2846
+ if (sa && sa.includes(agentId) && sa.includes(peer.name)) {
2847
+ recommendations.push({
2848
+ insight: `${peer.name} has ${((1 - peerRate) * 100).toFixed(0)}% success rate. ${insight.claim ?? insight.name}`,
2849
+ sourceAgents: sa,
2850
+ confidence: insight.confidence_score ?? 0.5
2851
+ });
2852
+ }
2853
+ }
2854
+ if (recommendations.length === 0) {
2855
+ recommendations.push({
2856
+ insight: `Consider approach from ${peer.name} (${((1 - peerRate) * 100).toFixed(0)}% success vs your ${((1 - failureRate) * 100).toFixed(0)}%)`,
2857
+ sourceAgents: [peer.name],
2858
+ confidence: 0.5
2859
+ });
2860
+ }
2861
+ }
2862
+ }
2863
+ return {
2864
+ proceed,
2865
+ warnings,
2866
+ recommendations,
2867
+ available: true,
2868
+ _meta: { durationMs: Date.now() - start }
2869
+ };
2870
+ }
2871
+ function mapEventType(status) {
2872
+ if (status === "error" || status === "failed") return "execution.failed";
2873
+ return "execution.completed";
2874
+ }
2875
+ function mapStatus2(status) {
2876
+ if (status === "success") return "completed";
2877
+ if (status === "error" || status === "failed") return "failed";
2878
+ if (status === "running") return "running";
2879
+ return "completed";
2880
+ }
2881
+ function computePathSignature(nodes) {
2882
+ const nodeList = Object.values(nodes);
2883
+ nodeList.sort((a, b) => (a.startTime ?? 0) - (b.startTime ?? 0));
2884
+ return nodeList.map((n) => `${n.type ?? "?"}:${n.name ?? "?"}`).join("\u2192");
2885
+ }
2886
+ function traceToEvent(raw) {
2887
+ const agentId = raw.agentId;
2888
+ const status = raw.status;
2889
+ const startTime = raw.startTime;
2890
+ const endTime = raw.endTime;
2891
+ const nodes = raw.nodes ?? {};
2892
+ const graphId = raw.id ?? `${agentId}-${startTime}`;
2893
+ if (!agentId || !startTime) return null;
2894
+ const duration = (endTime ?? startTime) - startTime;
2895
+ const nodeCount = Object.keys(nodes).length;
2896
+ const pathSignature = computePathSignature(nodes);
2897
+ return {
2898
+ eventType: mapEventType(status),
2899
+ graphId,
2900
+ agentId,
2901
+ timestamp: startTime,
2902
+ // Use original timestamp, not Date.now()
2903
+ schemaVersion: 1,
2904
+ status: mapStatus2(status),
2905
+ duration,
2906
+ nodeCount,
2907
+ pathSignature,
2908
+ violations: []
2909
+ };
2910
+ }
2911
+ function cronRunToEvent(entry, filePath) {
2912
+ if (entry.action !== "finished") return null;
2913
+ const jobId = entry.jobId ?? basename2(filePath, ".jsonl");
2914
+ const agentId = `openclaw:${jobId}`;
2915
+ const startTime = entry.runAtMs ?? entry.ts;
2916
+ const duration = entry.durationMs ?? 0;
2917
+ const status = entry.status;
2918
+ if (!startTime) return null;
2919
+ return {
2920
+ eventType: status === "ok" ? "execution.completed" : "execution.failed",
2921
+ graphId: entry.sessionId ?? `${jobId}-${startTime}`,
2922
+ agentId,
2923
+ timestamp: startTime,
2924
+ schemaVersion: 1,
2925
+ status: status === "ok" ? "completed" : "failed",
2926
+ duration,
2927
+ nodeCount: 1,
2928
+ pathSignature: `cron-job:${jobId}`,
2929
+ violations: []
2930
+ };
2931
+ }
2932
+ function parseJsonlFile(filePath) {
2933
+ const events = [];
2934
+ try {
2935
+ const content = readFileSync7(filePath, "utf-8");
2936
+ for (const line of content.split("\n")) {
2937
+ if (!line.trim()) continue;
2938
+ try {
2939
+ const entry = JSON.parse(line);
2940
+ const event = cronRunToEvent(entry, filePath);
2941
+ if (event) events.push(event);
2942
+ } catch {
2943
+ }
2944
+ }
2945
+ } catch {
2946
+ }
2947
+ return events;
2948
+ }
2949
+ function findTraceFiles(dir) {
2950
+ const results = [];
2951
+ if (!existsSync7(dir)) return results;
2952
+ const entries = readdirSync2(dir);
2953
+ for (const entry of entries) {
2954
+ if (entry.startsWith(".") || entry.startsWith("_")) continue;
2955
+ if (entry.includes(".deleted.") || entry.includes(".reset.")) continue;
2956
+ const fullPath = join2(dir, entry);
2957
+ try {
2958
+ const stat = statSync(fullPath);
2959
+ if (stat.isDirectory()) {
2960
+ results.push(...findTraceFiles(fullPath));
2961
+ } else if (extname2(entry) === ".json" || extname2(entry) === ".jsonl") {
2962
+ results.push(fullPath);
2963
+ }
2964
+ } catch {
2965
+ }
2966
+ }
2967
+ return results;
2968
+ }
2969
+ function scanTraces(dirs) {
2970
+ const events = [];
2971
+ const errors = [];
2972
+ let filesScanned = 0;
2973
+ let filesSkipped = 0;
2974
+ for (const dir of dirs) {
2975
+ const files = findTraceFiles(dir);
2976
+ for (const file of files) {
2977
+ filesScanned++;
2978
+ if (extname2(file) === ".jsonl") {
2979
+ const jsonlEvents = parseJsonlFile(file);
2980
+ events.push(...jsonlEvents);
2981
+ if (jsonlEvents.length === 0) filesSkipped++;
2982
+ continue;
2983
+ }
2984
+ try {
2985
+ const content = readFileSync7(file, "utf-8");
2986
+ const raw = JSON.parse(content);
2987
+ if (raw.eventType && raw.agentId && raw.timestamp) {
2988
+ const event2 = {
2989
+ eventType: mapEventType(String(raw.status ?? raw.eventType)),
2990
+ graphId: String(raw.graphId ?? ""),
2991
+ agentId: String(raw.agentId),
2992
+ timestamp: Number(raw.timestamp),
2993
+ schemaVersion: Number(raw.schemaVersion ?? 1),
2994
+ status: mapStatus2(String(raw.status ?? "completed")),
2995
+ duration: Number(raw.duration ?? 0),
2996
+ nodeCount: Number(raw.nodeCount ?? 1),
2997
+ pathSignature: String(raw.pathSignature ?? ""),
2998
+ violations: Array.isArray(raw.violations) ? raw.violations : []
2999
+ };
3000
+ events.push(event2);
3001
+ continue;
3002
+ }
3003
+ if (!raw.agentId || !raw.nodes) {
3004
+ filesSkipped++;
3005
+ continue;
3006
+ }
3007
+ const event = traceToEvent(raw);
3008
+ if (event) {
3009
+ events.push(event);
3010
+ } else {
3011
+ filesSkipped++;
3012
+ }
3013
+ } catch (err) {
3014
+ filesSkipped++;
3015
+ errors.push(`${file}: ${err instanceof Error ? err.message : String(err)}`);
3016
+ }
3017
+ }
3018
+ }
3019
+ return { events, filesScanned, filesSkipped, errors };
3020
+ }
3021
+ var PROVIDER_ENV_VARS = {
3022
+ openrouter: "OPENROUTER_API_KEY",
3023
+ anthropic: "ANTHROPIC_API_KEY",
3024
+ openai: "OPENAI_API_KEY",
3025
+ custom: "SOMA_API_KEY"
3026
+ };
3027
+ function resolveApiKey(config) {
3028
+ if (config.apiKey) return config.apiKey;
3029
+ if (process.env.SOMA_API_KEY) return process.env.SOMA_API_KEY;
3030
+ const envVar = PROVIDER_ENV_VARS[config.provider];
3031
+ if (envVar && process.env[envVar]) return process.env[envVar];
3032
+ if (envVar) {
3033
+ const dotenvKey = readDotenvKey(envVar);
3034
+ if (dotenvKey) return dotenvKey;
3035
+ }
3036
+ throw new Error(
3037
+ `No API key found for provider "${config.provider}". Set ${envVar ?? "SOMA_API_KEY"} env var or pass --api-key.`
3038
+ );
3039
+ }
3040
+ function readDotenvKey(varName) {
3041
+ try {
3042
+ const envPath = join3(homedir(), ".env");
3043
+ if (!existsSync8(envPath)) return null;
3044
+ const content = readFileSync8(envPath, "utf-8");
3045
+ for (const line of content.split("\n")) {
3046
+ const trimmed = line.trim();
3047
+ if (trimmed.startsWith("#") || !trimmed.includes("=")) continue;
3048
+ const eqIdx = trimmed.indexOf("=");
3049
+ const key = trimmed.slice(0, eqIdx).trim();
3050
+ const value = trimmed.slice(eqIdx + 1).trim();
3051
+ if (key === varName) return value;
3052
+ }
3053
+ return null;
3054
+ } catch (err) {
3055
+ if (err.code === "ENOENT") return null;
3056
+ throw new Error(`Failed to read ${join3(homedir(), ".env")}: ${err.message}`);
3057
+ }
3058
+ }
3059
+ var BASE_URLS = {
3060
+ openrouter: "https://openrouter.ai/api/v1",
3061
+ openai: "https://api.openai.com/v1"
3062
+ };
3063
+ function createOpenAICompatibleAnalysisFn(baseUrl, model, apiKey, maxTokens) {
3064
+ return async (prompt) => {
3065
+ var _a, _b, _c;
3066
+ const response = await fetch(`${baseUrl}/chat/completions`, {
3067
+ method: "POST",
3068
+ headers: {
3069
+ "Content-Type": "application/json",
3070
+ "Authorization": `Bearer ${apiKey}`
3071
+ },
3072
+ body: JSON.stringify({
3073
+ model,
3074
+ messages: [{ role: "user", content: prompt }],
3075
+ max_tokens: maxTokens,
3076
+ temperature: 0.3
3077
+ })
3078
+ });
3079
+ if (!response.ok) {
3080
+ const body = await response.text();
3081
+ throw new Error(`LLM API error ${response.status}: ${body.slice(0, 200)}`);
3082
+ }
3083
+ const data = await response.json();
3084
+ return ((_c = (_b = (_a = data.choices) == null ? void 0 : _a[0]) == null ? void 0 : _b.message) == null ? void 0 : _c.content) ?? "";
3085
+ };
3086
+ }
3087
+ function createAnthropicAnalysisFn(apiKey, model, maxTokens) {
3088
+ return async (prompt) => {
3089
+ var _a, _b;
3090
+ const response = await fetch("https://api.anthropic.com/v1/messages", {
3091
+ method: "POST",
3092
+ headers: {
3093
+ "Content-Type": "application/json",
3094
+ "x-api-key": apiKey,
3095
+ "anthropic-version": "2023-06-01"
3096
+ },
3097
+ body: JSON.stringify({
3098
+ model,
3099
+ max_tokens: maxTokens,
3100
+ messages: [{ role: "user", content: prompt }],
3101
+ temperature: 0.3
3102
+ })
3103
+ });
3104
+ if (!response.ok) {
3105
+ const body = await response.text();
3106
+ throw new Error(`Anthropic API error ${response.status}: ${body.slice(0, 200)}`);
3107
+ }
3108
+ const data = await response.json();
3109
+ return ((_b = (_a = data.content) == null ? void 0 : _a.find((c) => c.type === "text")) == null ? void 0 : _b.text) ?? "";
3110
+ };
3111
+ }
3112
+ function createOpenAICompatibleEmbedFn(baseUrl, model, apiKey) {
3113
+ return async (text) => {
3114
+ var _a, _b;
3115
+ const response = await fetch(`${baseUrl}/embeddings`, {
3116
+ method: "POST",
3117
+ headers: {
3118
+ "Content-Type": "application/json",
3119
+ "Authorization": `Bearer ${apiKey}`
3120
+ },
3121
+ body: JSON.stringify({
3122
+ model,
3123
+ input: text.slice(0, 8e3)
3124
+ // Limit input length
3125
+ })
3126
+ });
3127
+ if (!response.ok) {
3128
+ const body = await response.text();
3129
+ throw new Error(`Embedding API error ${response.status}: ${body.slice(0, 200)}`);
3130
+ }
3131
+ const data = await response.json();
3132
+ const embedding = (_b = (_a = data.data) == null ? void 0 : _a[0]) == null ? void 0 : _b.embedding;
3133
+ if (!embedding) {
3134
+ console.warn("[Embedding] API response missing embedding data, returning null");
3135
+ return null;
3136
+ }
3137
+ return embedding;
3138
+ };
3139
+ }
3140
+ function createProvider(config) {
3141
+ const apiKey = resolveApiKey(config);
3142
+ const maxTokens = config.maxTokens ?? 4096;
3143
+ let analysisFn;
3144
+ if (config.provider === "anthropic") {
3145
+ analysisFn = createAnthropicAnalysisFn(apiKey, config.model, maxTokens);
3146
+ } else {
3147
+ const baseUrl = config.baseUrl ?? BASE_URLS[config.provider] ?? config.baseUrl;
3148
+ if (!baseUrl) throw new Error(`No base URL for provider "${config.provider}". Set baseUrl in config.`);
3149
+ analysisFn = createOpenAICompatibleAnalysisFn(baseUrl, config.model, apiKey, maxTokens);
3150
+ }
3151
+ let embedFn = null;
3152
+ if (config.embeddingModel) {
3153
+ const embedBaseUrl = config.provider === "anthropic" ? "https://api.openai.com/v1" : config.baseUrl ?? BASE_URLS[config.provider];
3154
+ if (embedBaseUrl) {
3155
+ embedFn = createOpenAICompatibleEmbedFn(embedBaseUrl, config.embeddingModel, apiKey);
3156
+ }
3157
+ }
3158
+ return {
3159
+ analysisFn,
3160
+ embedFn,
3161
+ provider: config.provider,
3162
+ model: config.model
3163
+ };
3164
+ }
3165
+ export {
3166
+ GovernanceError,
3167
+ KNOWLEDGE_LAYERS,
3168
+ LAYER_REQUIRED_FIELDS,
3169
+ LAYER_SEMANTIC_WEIGHTS,
3170
+ LayerPermissionError,
3171
+ WORKER_WRITE_PERMISSIONS,
3172
+ canWrite,
3173
+ checkDanglingReferences,
3174
+ computePatternSignature,
3175
+ computeToolPatternSignature,
3176
+ cosineSimilarity,
3177
+ createCartographer,
3178
+ createDecayProcessor,
3179
+ createGovernanceAPI,
3180
+ createGuardedBuilder,
3181
+ createHarvester,
3182
+ createJsonVectorStore,
3183
+ createLanceVectorStore,
3184
+ createMilvusVectorStore,
3185
+ createPolicyBridge,
3186
+ createProvider,
3187
+ createReconciler,
3188
+ createSoma,
3189
+ createSomaPolicySource,
3190
+ createSynthesizer,
3191
+ createVault,
3192
+ decisionsToEntities,
3193
+ detectDrift,
3194
+ enforceWritePermission,
3195
+ evaluateAssertions,
3196
+ evaluatePreflight,
3197
+ extractContentFromSession,
3198
+ extractDecisionsFromGraph,
3199
+ extractDecisionsFromLangChain,
3200
+ extractDecisionsFromNodes,
3201
+ extractDecisionsFromSession,
3202
+ extractWikilinks,
3203
+ findVariantsWithModel,
3204
+ getAgentBriefingData,
3205
+ getDecisionReplayData,
3206
+ getEfficiency,
3207
+ isExecutionGraph,
3208
+ isLangChainRun,
3209
+ isLayerEnabled,
3210
+ langchainAdapter,
3211
+ langchainRunToGraphLike,
3212
+ migrateToLayers,
3213
+ parseEntity,
3214
+ queryByLayer,
3215
+ scanTraces,
3216
+ serializeEntity,
3217
+ setLayersConfig,
3218
+ trackConformanceTrend,
3219
+ validateLayerFields,
3220
+ vaultEntityCount,
3221
+ vaultFingerprint,
3222
+ writeToLayer
3223
+ };