amalfa 1.0.28 → 1.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,662 @@
1
+ import { existsSync, mkdirSync } from "node:fs";
2
+ import { join } from "node:path";
3
+ import type { GraphEngine } from "@src/core/GraphEngine";
4
+ import type { GraphGardener } from "@src/core/GraphGardener";
5
+ import { VectorEngine } from "@src/core/VectorEngine";
6
+ import type { ResonanceDB } from "@src/resonance/db";
7
+ import { getLogger } from "@src/utils/Logger";
8
+ import { TagInjector } from "@src/utils/TagInjector";
9
+ import { callOllama, inferenceState } from "./sonar-inference";
10
+ import {
11
+ extractDate,
12
+ judgeRelationship,
13
+ summarizeCommunity,
14
+ } from "./sonar-strategies";
15
+ import type { ChatSession, Message, SonarTask } from "./sonar-types";
16
+
17
+ const log = getLogger("SonarLogic");
18
+
19
+ export interface SonarContext {
20
+ db: ResonanceDB;
21
+ graphEngine: GraphEngine;
22
+ gardener: GraphGardener;
23
+ chatSessions: Map<string, ChatSession>;
24
+ }
25
+
26
+ /**
27
+ * Handle metadata enhancement for a single document
28
+ */
29
+ export async function handleMetadataEnhancement(
30
+ nodeId: string,
31
+ context: SonarContext,
32
+ ): Promise<void> {
33
+ if (!inferenceState.ollamaAvailable) return;
34
+
35
+ try {
36
+ const node = context.db.getNode(nodeId);
37
+ if (!node) return;
38
+
39
+ const content = await context.gardener.getContent(nodeId);
40
+ if (!content) return;
41
+
42
+ const response = await callOllama(
43
+ [
44
+ {
45
+ role: "system",
46
+ content:
47
+ 'Extract 3-5 keywords and a 1-sentence summary from the following text. Return JSON: { "keywords": [], "summary": "" }',
48
+ },
49
+ { role: "user", content },
50
+ ],
51
+ { temperature: 0.2, format: "json" },
52
+ );
53
+
54
+ const metadata = JSON.parse(response.message.content);
55
+ context.db.updateNodeMeta(nodeId, {
56
+ ...node.meta,
57
+ keywords: metadata.keywords,
58
+ summary: metadata.summary,
59
+ sonar_enhanced: true,
60
+ enhanced_at: new Date().toISOString(),
61
+ });
62
+ } catch (error) {
63
+ log.error({ nodeId, error }, "Metadata enhancement failed");
64
+ }
65
+ }
66
+
67
+ /**
68
+ * Handle batch enhancement of documents
69
+ */
70
+ export async function handleBatchEnhancement(
71
+ limit: number,
72
+ context: SonarContext,
73
+ ): Promise<{ successful: number; failed: number; total: number }> {
74
+ const allNodes = context.db.getNodes({ excludeContent: true });
75
+ const unenhanced = allNodes
76
+ .filter((n: { meta?: Record<string, unknown> }) => {
77
+ try {
78
+ const meta = n.meta || {};
79
+ return !meta.sonar_enhanced && !meta.phi3_enhanced;
80
+ } catch {
81
+ return false;
82
+ }
83
+ })
84
+ .map((row: { id: string }) => ({ id: row.id }));
85
+
86
+ const batch = unenhanced.slice(0, limit);
87
+ log.info(`🔄 Enhancing ${batch.length} docs with Sonar...`);
88
+
89
+ const results = await Promise.allSettled(
90
+ batch.map((node: { id: string }) =>
91
+ handleMetadataEnhancement(node.id, context),
92
+ ),
93
+ );
94
+
95
+ const successful = results.filter((r) => r.status === "fulfilled").length;
96
+ const failed = results.filter((r) => r.status === "rejected").length;
97
+
98
+ return { successful, failed, total: batch.length };
99
+ }
100
+
101
+ /**
102
+ * Handle chat request
103
+ */
104
+ export async function handleChat(
105
+ sessionId: string,
106
+ userMessage: string,
107
+ context: SonarContext,
108
+ modelOverride?: string,
109
+ ): Promise<{ message: Message; sessionId: string }> {
110
+ if (!inferenceState.ollamaAvailable) {
111
+ throw new Error("Sonar is not available");
112
+ }
113
+
114
+ let session = context.chatSessions.get(sessionId);
115
+ if (!session) {
116
+ session = {
117
+ id: sessionId,
118
+ messages: [
119
+ {
120
+ role: "system",
121
+ content: `You are AMALFA Corpus Assistant. Help users understand and explore their knowledge base.
122
+ Current Date: ${new Date().toISOString().split("T")[0]}`,
123
+ },
124
+ ],
125
+ startedAt: new Date(),
126
+ };
127
+ context.chatSessions.set(sessionId, session);
128
+ }
129
+
130
+ const vectors = new VectorEngine(context.db.getRawDb());
131
+ try {
132
+ const results = await vectors.search(userMessage, 3);
133
+ const directNodeIds = new Set(results.map((r) => r.id));
134
+ const relatedNodeIds = new Set<string>();
135
+
136
+ for (const r of results) {
137
+ const neighbors = context.graphEngine.getNeighbors(r.id);
138
+ for (const neighborId of neighbors) {
139
+ if (!directNodeIds.has(neighborId)) {
140
+ relatedNodeIds.add(neighborId);
141
+ }
142
+ }
143
+ }
144
+
145
+ let augmentContext = "\n\nRELEVANT CONTEXT FROM KNOWLEDGE BASE:\n";
146
+ if (results.length > 0) {
147
+ augmentContext += `\n--- [DIRECT SEARCH RESULTS] ---\n`;
148
+ results.forEach((r) => {
149
+ const node = context.db.getNode(r.id);
150
+ const content = node?.content ?? "";
151
+ augmentContext += `[Document: ${r.id}] (Similarity: ${r.score.toFixed(2)})\n${content.slice(0, 800)}\n\n`;
152
+ });
153
+
154
+ if (relatedNodeIds.size > 0) {
155
+ augmentContext += `\n--- [RELATED NEIGHBORS (GRAPH DISCOVERY)] ---\n`;
156
+ Array.from(relatedNodeIds)
157
+ .slice(0, 5)
158
+ .forEach((nrId) => {
159
+ const node = context.db.getNode(nrId);
160
+ augmentContext += `[Related: ${nrId}] (Via: ${node?.label || nrId})\n${(node?.content ?? "").slice(0, 400)}\n\n`;
161
+ });
162
+ }
163
+ }
164
+
165
+ const response = await callOllama(
166
+ [
167
+ ...session.messages,
168
+ { role: "user", content: userMessage + augmentContext },
169
+ ],
170
+ { model: modelOverride },
171
+ );
172
+
173
+ session.messages.push({ role: "user", content: userMessage });
174
+ session.messages.push(response.message);
175
+
176
+ return { message: response.message, sessionId };
177
+ } catch (error) {
178
+ log.error({ error, userMessage }, "Chat failed");
179
+ throw error;
180
+ }
181
+ }
182
+
183
+ /**
184
+ * Handle search query analysis
185
+ */
186
+ export async function handleSearchAnalysis(
187
+ query: string,
188
+ _context: SonarContext, // Reserved for future graph-aware analysis
189
+ ): Promise<unknown> {
190
+ if (!inferenceState.ollamaAvailable) {
191
+ throw new Error("Sonar is not available");
192
+ }
193
+
194
+ try {
195
+ const response = await callOllama(
196
+ [
197
+ {
198
+ role: "system",
199
+ content:
200
+ 'Analyze search queries and extract intent, entities and suggested filters. Return JSON: { "intent": "", "entities": [], "filters": {} }',
201
+ },
202
+ { role: "user", content: query },
203
+ ],
204
+ { temperature: 0.1, format: "json" },
205
+ );
206
+
207
+ return JSON.parse(response.message.content);
208
+ } catch (error) {
209
+ log.error({ error, query }, "Query analysis failed");
210
+ throw error;
211
+ }
212
+ }
213
+
214
+ /**
215
+ * Handle result re-ranking
216
+ */
217
+ export async function handleResultReranking(
218
+ results: Array<{ id: string; content: string; score: number }>,
219
+ query: string,
220
+ intent?: string,
221
+ ): Promise<
222
+ Array<{
223
+ id: string;
224
+ content: string;
225
+ score: number;
226
+ relevance_score: number;
227
+ }>
228
+ > {
229
+ if (!inferenceState.ollamaAvailable) {
230
+ throw new Error("Sonar is not available");
231
+ }
232
+
233
+ try {
234
+ const response = await callOllama(
235
+ [
236
+ {
237
+ role: "system",
238
+ content:
239
+ "You are a search result re-ranker. Analyze relevance and provide scores.",
240
+ },
241
+ {
242
+ role: "user",
243
+ content: `Re-rank these search results for query: "${query}"${
244
+ intent ? `\nQuery intent: ${intent}` : ""
245
+ }\n\nResults:\n${results.map((r, i) => `${i + 1}. ${r.content.slice(0, 200)}`).join("\n")}\n\nReturn JSON array with relevance scores (0.0 to 1.0): [{"index": 1, "relevance": 0.95, "reason": ""}]`,
246
+ },
247
+ ],
248
+ { temperature: 0.2, format: "json" },
249
+ );
250
+
251
+ const content = response.message.content;
252
+ try {
253
+ const rankings = JSON.parse(content);
254
+ return results.map((result, idx) => {
255
+ const ranking = rankings.find(
256
+ (r: { index: number }) => r.index === idx + 1,
257
+ );
258
+ return { ...result, relevance_score: ranking?.relevance || 0.5 };
259
+ });
260
+ } catch {
261
+ return results.map((r) => ({ ...r, relevance_score: r.score }));
262
+ }
263
+ } catch (error) {
264
+ log.error({ error, query }, "Result re-ranking failed");
265
+ throw error;
266
+ }
267
+ }
268
+
269
+ /**
270
+ * Handle context extraction (smart snippets)
271
+ */
272
+ export async function handleContextExtraction(
273
+ result: { id: string; content: string },
274
+ query: string,
275
+ ): Promise<unknown> {
276
+ if (!inferenceState.ollamaAvailable) {
277
+ throw new Error("Sonar is not available");
278
+ }
279
+
280
+ try {
281
+ const response = await callOllama(
282
+ [
283
+ {
284
+ role: "system",
285
+ content:
286
+ "Extract the most relevant 200-300 character snippet from the document for the given query.",
287
+ },
288
+ {
289
+ role: "user",
290
+ content: `Query: ${query}\nDocument [${result.id}]:\n${result.content.slice(0, 4000)}`,
291
+ },
292
+ ],
293
+ { temperature: 0 },
294
+ );
295
+
296
+ return {
297
+ id: result.id,
298
+ snippet: response.message.content.trim(),
299
+ };
300
+ } catch (error) {
301
+ log.error({ error, docId: result.id }, "Context extraction failed");
302
+ throw error;
303
+ }
304
+ }
305
+ /**
306
+ * Handle synthesis task (Phase 2)
307
+ */
308
+ export async function handleSynthesisTask(
309
+ task: SonarTask,
310
+ context: SonarContext,
311
+ taskModel?: string,
312
+ ): Promise<string> {
313
+ let output = "";
314
+ const minSize = task.minSize || 5;
315
+ const validClusters = context.gardener
316
+ .analyzeCommunities()
317
+ .filter((c) => c.nodes.length >= minSize);
318
+
319
+ for (const cluster of validClusters) {
320
+ const reps = context.gardener.getClusterRepresentatives(cluster.nodes, 4);
321
+ const nodeData = [];
322
+ for (const id of reps) {
323
+ const content = await context.gardener.getContent(id);
324
+ if (content) nodeData.push({ id, content });
325
+ }
326
+
327
+ if (nodeData.length > 0) {
328
+ const synthesis = await summarizeCommunity(nodeData, taskModel);
329
+ const slug = synthesis.label
330
+ .toLowerCase()
331
+ .replace(/[^a-z0-9]/g, "-")
332
+ .replace(/-+/g, "-");
333
+ const filename = `synthesis-${cluster.clusterId}-${slug}.md`;
334
+
335
+ output += `#### Community: ${synthesis.label}\n- **Members:** ${cluster.nodes.length} nodes\n- **Summary:** ${synthesis.summary}\n\n`;
336
+
337
+ if (task.autoApply) {
338
+ const synthDir = join(process.cwd(), "docs/synthesis");
339
+ if (!existsSync(synthDir)) mkdirSync(synthDir, { recursive: true });
340
+ await Bun.write(
341
+ join(synthDir, filename),
342
+ `---\ntitle: "${synthesis.label}"\ntype: synthesis\nnodes: [${cluster.nodes.join(", ")}]\n---\n\n# ${synthesis.label}\n\n${synthesis.summary}\n\n## Cluster Members\n${cluster.nodes.map((id) => `- [[${id}]]`).join("\n")}\n`,
343
+ );
344
+ output += `- **Action:** 📝 Created synthesis node at \`${join("docs/synthesis", filename)}\`\n\n`;
345
+ }
346
+ }
347
+ }
348
+ return output;
349
+ }
350
+
351
+ /**
352
+ * Handle timeline task (Phase 3)
353
+ */
354
+ export async function handleTimelineTask(
355
+ task: SonarTask,
356
+ context: SonarContext,
357
+ taskModel?: string,
358
+ ): Promise<string> {
359
+ let output = "";
360
+ const limit = task.limit || 50;
361
+ const nodes = context.db.getNodes({ limit, excludeContent: true });
362
+ let updatedCount = 0;
363
+
364
+ for (const node of nodes) {
365
+ if (node.date) continue;
366
+ const content = await context.gardener.getContent(node.id);
367
+ if (!content) continue;
368
+ const date = await extractDate(node.id, content, taskModel);
369
+ if (date) {
370
+ if (task.autoApply) context.db.updateNodeDate(node.id, date);
371
+ output += `- ${task.autoApply ? "✅" : "🔍"} **${node.id}**: Anchored to ${date}\n`;
372
+ updatedCount++;
373
+ }
374
+ }
375
+ output += `\n**Total Updated:** ${updatedCount} nodes\n`;
376
+ return output;
377
+ }
378
+
379
+ /**
380
+ * Handle garden task (Phase 1 & 4)
381
+ */
382
+ export async function handleGardenTask(
383
+ task: SonarTask,
384
+ context: SonarContext,
385
+ taskModel?: string,
386
+ ): Promise<string> {
387
+ let output = "";
388
+ const limit = task.limit || 5;
389
+ const semanticSuggestions = await context.gardener.findGaps(limit);
390
+ const structuralSuggestions = context.gardener.findStructuralGaps(limit);
391
+ const temporal = context.gardener.weaveTimeline();
392
+
393
+ output += `### Semantic Gaps (Vector)\n`;
394
+ for (const sug of semanticSuggestions) {
395
+ const sourceContent = await context.gardener.getContent(sug.sourceId);
396
+ const targetContent = await context.gardener.getContent(sug.targetId);
397
+ if (sourceContent && targetContent) {
398
+ const judgment = await judgeRelationship(
399
+ { id: sug.sourceId, content: sourceContent },
400
+ { id: sug.targetId, content: targetContent },
401
+ taskModel,
402
+ );
403
+ if (judgment.related) {
404
+ const relType = judgment.type || "SEE_ALSO";
405
+ const sourcePath = context.gardener.resolveSource(sug.sourceId);
406
+ if (task.autoApply && sourcePath)
407
+ TagInjector.injectTag(sourcePath, relType, sug.targetId);
408
+ output += `- ${task.autoApply ? "💉" : "⚖️"} **${sug.sourceId} ↔ ${sug.targetId}**: ${relType} (${judgment.reason})\n`;
409
+ } else {
410
+ output += `- ❌ **${sug.sourceId} ↔ ${sug.targetId}**: DISMISSED (${judgment.reason || "Not related"})\n`;
411
+ }
412
+ if (taskModel?.includes(":free"))
413
+ await new Promise((r) => setTimeout(r, 1000));
414
+ }
415
+ }
416
+
417
+ output += `\n### Structural Gaps (Adamic-Adar)\n`;
418
+ for (const sug of structuralSuggestions) {
419
+ const sourceContent = await context.gardener.getContent(sug.sourceId);
420
+ const targetContent = await context.gardener.getContent(sug.targetId);
421
+ if (sourceContent && targetContent) {
422
+ const judgment = await judgeRelationship(
423
+ { id: sug.sourceId, content: sourceContent },
424
+ { id: sug.targetId, content: targetContent },
425
+ taskModel,
426
+ );
427
+ if (judgment.related) {
428
+ const relType = judgment.type || "SEE_ALSO";
429
+ const sourcePath = context.gardener.resolveSource(sug.sourceId);
430
+ if (task.autoApply && sourcePath)
431
+ TagInjector.injectTag(sourcePath, relType, sug.targetId);
432
+ output += `- ${task.autoApply ? "💉" : "⚖️"} **${sug.sourceId} ↔ ${sug.targetId}**: ${relType} (${judgment.reason})\n`;
433
+ } else {
434
+ output += `- ❌ **${sug.sourceId} ↔ ${sug.targetId}**: DISMISSED (${judgment.reason || "Not related"})\n`;
435
+ }
436
+ if (taskModel?.includes(":free"))
437
+ await new Promise((r) => setTimeout(r, 1000));
438
+ }
439
+ }
440
+
441
+ output += `\n### Temporal Sequence\n`;
442
+ for (const sug of temporal) {
443
+ const sourcePath = context.gardener.resolveSource(sug.sourceId);
444
+ if (task.autoApply && sourcePath)
445
+ TagInjector.injectTag(sourcePath, "FOLLOWS", sug.targetId);
446
+ output += `- ${task.autoApply ? "💉" : "🕒"} **${sug.sourceId} → ${sug.targetId}**: FOLLOWS (${sug.reason})\n`;
447
+ }
448
+
449
+ return output;
450
+ }
451
+
452
+ /**
453
+ * Handle autonomous research task (Phase 5)
454
+ */
455
+ export async function handleResearchTask(
456
+ task: SonarTask,
457
+ context: SonarContext,
458
+ taskModel?: string,
459
+ ): Promise<string> {
460
+ if (!task.query) return "❌ Error: Research task requires a query.";
461
+
462
+ let output = `## Recursive Discovery: "${task.query}"\n\n`;
463
+ const maxSteps = 5;
464
+ const findings: string[] = [];
465
+ const visitedNodes = new Set<string>();
466
+
467
+ log.info({ query: task.query }, "🕵️‍♂️ Starting recursive research");
468
+ const isNarrative =
469
+ task.query.toLowerCase().includes("timeline") ||
470
+ task.query.toLowerCase().includes("history") ||
471
+ task.query.toLowerCase().includes("how did");
472
+
473
+ const hubs = context.gardener.identifyHubs(3);
474
+ const hubContext = hubs
475
+ .map((h) => `- [[${h.id}]] (Centrality: ${h.score.toFixed(2)})`)
476
+ .join("\n");
477
+
478
+ for (let step = 1; step <= maxSteps; step++) {
479
+ output += `### Step ${step}: Analysis\n`;
480
+
481
+ // 1. Analyze findings and decide next move
482
+ const prompt = `
483
+ You are the AMALFA Research Agent. Your goal is to answer this query: "${
484
+ task.query
485
+ }"
486
+ ${
487
+ isNarrative
488
+ ? "MODE: Narrative Investigation (Prioritize dates and chronological sequence)"
489
+ : ""
490
+ }
491
+
492
+ Graph Context: ${context.graphEngine.getStats().nodes} nodes available.
493
+ Structural Hubs (Important Entry Points):
494
+ ${hubContext}
495
+
496
+ Current Findings:
497
+ ${findings.length > 0 ? findings.join("\n") : "None yet."}
498
+
499
+ Based on these findings, what is your next step?
500
+ You can:
501
+ - "SEARCH": Provide a vector search query to find more docs.
502
+ - "READ": Provide a specific Node ID to read its full content.
503
+ - "EXPLORE": Provide a Node ID to see its direct graph neighbors (traversal).
504
+ - "FINISH": Provide the final comprehensive answer.
505
+
506
+ IMPORTANT: Return ONLY raw JSON. No preamble, no explanation outside JSON.
507
+ Return JSON: { "action": "SEARCH"|"READ"|"EXPLORE"|"FINISH", "query": "...", "nodeId": "...", "reasoning": "...", "answer": "..." }
508
+ `;
509
+
510
+ try {
511
+ const actionResponse = await callOllama(
512
+ [{ role: "user", content: prompt }],
513
+ { model: taskModel, temperature: 0.1, format: "json" },
514
+ );
515
+
516
+ const content = actionResponse.message.content;
517
+ let decision: {
518
+ action: "SEARCH" | "READ" | "EXPLORE" | "FINISH";
519
+ query?: string;
520
+ nodeId?: string;
521
+ reasoning: string;
522
+ answer?: string;
523
+ };
524
+ try {
525
+ decision = JSON.parse(content);
526
+ } catch {
527
+ // Try to extract JSON from markdown blocks
528
+ const match = content.match(/\{[\s\S]*\}/);
529
+ if (match) {
530
+ decision = JSON.parse(match[0]);
531
+ } else {
532
+ throw new Error("Could not parse JSON from response");
533
+ }
534
+ }
535
+ output += `> **Reasoning:** ${decision.reasoning}\n\n`;
536
+
537
+ if (decision.action === "FINISH") {
538
+ output += `### Final Conclusion\n${decision.answer}\n`;
539
+ break;
540
+ }
541
+
542
+ if (decision.action === "SEARCH") {
543
+ const searchQuery = decision.query || task.query;
544
+ output += `🔍 **Action:** Searching for \`${searchQuery}\`\n`;
545
+ const results = await context.gardener.findRelated(searchQuery, 3);
546
+ const summaries = results
547
+ .map(
548
+ (r) =>
549
+ `- [[${r.id}]] (Score: ${r.score.toFixed(2)}${
550
+ r.date ? `, Date: ${r.date}` : ""
551
+ })`,
552
+ )
553
+ .join("\n");
554
+ findings.push(`Search results for "${searchQuery}":\n${summaries}`);
555
+ output += `${summaries}\n\n`;
556
+ } else if (decision.action === "READ") {
557
+ const nodeId = decision.nodeId;
558
+ if (!nodeId || visitedNodes.has(nodeId)) {
559
+ findings.push(`Already visited or invalid node: ${nodeId}`);
560
+ continue;
561
+ }
562
+ output += `📖 **Action:** Reading node \`${nodeId}\`\n`;
563
+ const content = await context.gardener.getContent(nodeId);
564
+ visitedNodes.add(nodeId);
565
+ if (content) {
566
+ findings.push(`Content of ${nodeId}:\n${content.slice(0, 1000)}...`);
567
+ output += `Successfully read ${nodeId} (${content.length} chars)\n\n`;
568
+ } else {
569
+ findings.push(`Node not found: ${nodeId}`);
570
+ output += `⚠️ Node not found: ${nodeId}\n\n`;
571
+ }
572
+ } else if (decision.action === "EXPLORE") {
573
+ const nodeId = decision.nodeId;
574
+ if (!nodeId || visitedNodes.has(`explore-${nodeId}`)) {
575
+ findings.push(`Already explored or invalid node: ${nodeId}`);
576
+ continue;
577
+ }
578
+ output += `🌐 **Action:** Exploring neighborhood of \`${nodeId}\`\n`;
579
+ const neighbors = context.graphEngine.getNeighbors(nodeId);
580
+ visitedNodes.add(`explore-${nodeId}`);
581
+ if (neighbors.length > 0) {
582
+ const neighborDetails = neighbors
583
+ .slice(0, 8)
584
+ .map((n) => {
585
+ const attrs = context.graphEngine.getNodeAttributes(n);
586
+ return `- [[${n}]]${attrs?.date ? ` (Date: ${attrs.date})` : ""}`;
587
+ })
588
+ .join("\n");
589
+ findings.push(`Graph neighbors of ${nodeId}:\n${neighborDetails}`);
590
+ output += `Found ${neighbors.length} neighbors. Leads injected into findings.\n\n`;
591
+ } else {
592
+ findings.push(`Node ${nodeId} has no graph neighbors.`);
593
+ output += `⚠️ No neighbors found for ${nodeId}\n\n`;
594
+ }
595
+ }
596
+
597
+ // Throttling for free tiers
598
+ if (taskModel?.includes(":free"))
599
+ await new Promise((r) => setTimeout(r, 1000));
600
+ } catch (error) {
601
+ output += `❌ Step failed: ${error}\n`;
602
+ break;
603
+ }
604
+ }
605
+
606
+ // Final summary and Chain Verification
607
+ if (!output.includes("### Final Conclusion")) {
608
+ output += `### Final Conclusion (Auto-Summarized)\n`;
609
+ } else {
610
+ output += `\n### Chain Verification\n`;
611
+ }
612
+
613
+ const verificationPrompt = `
614
+ You are the AMALFA Auditor. Review the following research findings and the original query.
615
+ Query: "${task.query}"
616
+ Findings:
617
+ ${findings.join("\n")}
618
+
619
+ 1. Does the gathered information fully answer the query?
620
+ 2. If not, what specifically is missing?
621
+ 3. Provide a final, polished answer based ON ONLY the findings.
622
+
623
+ Return JSON: { "answered": true|false, "missing_info": "...", "final_answer": "..." }
624
+ `;
625
+
626
+ try {
627
+ const verificationResponse = await callOllama(
628
+ [{ role: "user", content: verificationPrompt }],
629
+ { model: taskModel, temperature: 0.1, format: "json" },
630
+ );
631
+
632
+ const resultSnippet = verificationResponse.message.content;
633
+ type AuditResult = {
634
+ answered: boolean;
635
+ missing_info: string;
636
+ final_answer: string;
637
+ };
638
+ let audit: AuditResult | null = null;
639
+ try {
640
+ audit = JSON.parse(resultSnippet);
641
+ } catch {
642
+ const match = resultSnippet.match(/\{[\s\S]*\}/);
643
+ audit = match ? JSON.parse(match[0]) : null;
644
+ }
645
+
646
+ if (audit) {
647
+ if (!audit.answered) {
648
+ output += `⚠️ **Auditor Note:** Research incomplete. Missing: ${audit.missing_info}\n\n`;
649
+ } else {
650
+ output += `✅ **Auditor Note:** Research verified. Query fully addressed.\n\n`;
651
+ }
652
+ output += audit.final_answer;
653
+ } else {
654
+ output += `⚠️ Verification failed to parse. Returning raw summary fallback.\n`;
655
+ output += findings.join("\n\n");
656
+ }
657
+ } catch (e) {
658
+ output += `⚠️ Verification failed: ${e}`;
659
+ }
660
+
661
+ return output;
662
+ }