amalfa 1.0.28 → 1.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,28 +1,63 @@
1
1
  #!/usr/bin/env bun
2
+
2
3
  /**
3
4
  * AMALFA Sonar Multi-Purpose Sub-Agent
4
- * Daemon for search intelligence, metadata enhancement, and interactive chat
5
+ * Main Entry Point & Daemon Controller
5
6
  */
6
7
 
7
- import { join } from "path";
8
- import { readdirSync, existsSync, renameSync, writeFileSync } from "node:fs";
9
- import { loadConfig, AMALFA_DIRS } from "@src/config/defaults";
8
+ import { existsSync, mkdirSync } from "node:fs";
9
+ import { readdir, rename } from "node:fs/promises";
10
+
11
+ import { join } from "node:path";
12
+ import { AMALFA_DIRS, loadConfig } from "@src/config/defaults";
13
+ import { GraphEngine } from "@src/core/GraphEngine";
14
+ import { GraphGardener } from "@src/core/GraphGardener";
15
+ import { VectorEngine } from "@src/core/VectorEngine";
16
+ import { ResonanceDB } from "@src/resonance/db";
10
17
  import { getLogger } from "@src/utils/Logger";
11
- import { ServiceLifecycle } from "@src/utils/ServiceLifecycle";
12
18
  import { sendNotification } from "@src/utils/Notifications";
13
19
  import {
14
20
  checkOllamaHealth,
15
21
  discoverOllamaCapabilities,
16
22
  } from "@src/utils/ollama-discovery";
23
+ import { ServiceLifecycle } from "@src/utils/ServiceLifecycle";
24
+ import {
25
+ handleBatchEnhancement,
26
+ handleChat,
27
+ handleContextExtraction,
28
+ handleGardenTask,
29
+ handleMetadataEnhancement,
30
+ handleResearchTask,
31
+ handleResultReranking,
32
+ handleSearchAnalysis,
33
+ handleSynthesisTask,
34
+ handleTimelineTask,
35
+ type SonarContext,
36
+ } from "./sonar-logic";
37
+ import { getTaskModel } from "./sonar-strategies";
38
+ import type {
39
+ ChatRequest,
40
+ ChatSession,
41
+ MetadataEnhanceRequest,
42
+ SearchAnalyzeRequest,
43
+ SearchContextRequest,
44
+ SearchRerankRequest,
45
+ SonarTask,
46
+ } from "./sonar-types";
47
+ import { inferenceState } from "./sonar-inference";
17
48
 
18
49
  const args = process.argv.slice(2);
19
50
  const command = args[0] || "serve";
20
51
  const log = getLogger("SonarAgent");
21
52
 
22
- // Database initialization
23
- import { ResonanceDB } from "@src/resonance/db";
24
- import { VectorEngine } from "@src/core/VectorEngine";
25
53
  let DB_PATH: string;
54
+ let db: ResonanceDB;
55
+ const graphEngine = new GraphEngine();
56
+ let gardener: GraphGardener;
57
+ let vectorEngine: VectorEngine;
58
+
59
+ // Global state
60
+ const chatSessions = new Map<string, ChatSession>();
26
61
 
27
62
  // Service lifecycle management
28
63
  const lifecycle = new ServiceLifecycle({
@@ -32,927 +67,221 @@ const lifecycle = new ServiceLifecycle({
32
67
  entryPoint: "src/daemon/sonar-agent.ts",
33
68
  });
34
69
 
35
- // Global state
36
- // Global state
37
- let server: Bun.Server<unknown> | null = null;
38
- let ollamaAvailable = false;
39
- let ollamaModel = "phi3:latest";
40
-
41
- // Chat Session Management
42
- const chatSessions = new Map<string, ChatSession>();
43
-
44
- interface ChatSession {
45
- id: string;
46
- messages: Message[];
47
- startedAt: Date;
48
- }
49
-
50
- /**
51
- * Message interface for chat API
52
- */
53
- interface Message {
54
- role: "system" | "user" | "assistant";
55
- content: string;
56
- }
57
-
58
- /**
59
- * Request options for Ollama API
60
- */
61
- interface RequestOptions {
62
- temperature?: number;
63
- num_predict?: number;
64
- stream?: boolean;
65
- format?: "json"; // Enable GBNF-constrained JSON output
66
- model?: string; // Override model for this specific call (tiered strategy)
67
- }
68
-
69
70
  /**
70
- * Call Ollama HTTP API for inference
71
- * This is the preferred method for inference (faster, supports streaming)
71
+ * Main logical loop for the Sonar Agent
72
72
  */
73
- async function callOllama(
74
- messages: Message[],
75
- options: RequestOptions = {},
76
- ): Promise<{ message: Message }> {
73
+ async function main() {
77
74
  const config = await loadConfig();
78
- // @ts-ignore - backward compatibility with phi3 config
79
- const hostArgs = config.sonar || config.phi3 || {};
80
-
81
- // Cloud toggle: dev-cloud/prod-local strategy
82
- const cloudConfig = hostArgs.cloud;
83
- const useCloud = cloudConfig?.enabled === true;
84
- const provider = useCloud ? cloudConfig.provider || "ollama" : "ollama";
85
-
86
- // Tiered model strategy: options.model > cloud.model > discovered > config > default
87
- const { format, model: overrideModel, ...modelOptions } = options;
88
- const model =
89
- overrideModel ||
90
- (useCloud ? cloudConfig.model : null) ||
91
- ollamaModel ||
92
- hostArgs.model ||
93
- "qwen2.5:1.5b";
94
-
95
- // Build headers
96
- const headers: Record<string, string> = {
97
- "Content-Type": "application/json",
98
- };
99
- // API key: prefer env var (OPENROUTER_API_KEY) over config
100
- const apiKey = process.env.OPENROUTER_API_KEY || cloudConfig?.apiKey;
101
- if (useCloud && apiKey) {
102
- headers["Authorization"] = `Bearer ${apiKey}`;
103
- log.info(
104
- { provider, hasKey: !!apiKey, keyLength: apiKey?.length },
105
- "Cloud request with API key",
106
- );
107
- } else if (useCloud) {
108
- log.warn("Cloud enabled but no API key found in env or config!");
109
- }
110
- // OpenRouter requires site headers for tracking
111
- if (provider === "openrouter") {
112
- headers["HTTP-Referer"] = "https://github.com/pjsvis/amalfa";
113
- headers["X-Title"] = "AMALFA Knowledge Graph";
114
- }
115
-
116
- // Determine endpoint and request format based on provider
117
- let endpoint: string;
118
- let body: string;
119
-
120
- if (provider === "openrouter") {
121
- // OpenRouter uses OpenAI-compatible format at openrouter.ai/api/v1
122
- endpoint = "https://openrouter.ai/api/v1/chat/completions";
123
- body = JSON.stringify({
124
- model,
125
- messages,
126
- stream: false,
127
- temperature: modelOptions.temperature ?? 0.1,
128
- max_tokens: modelOptions.num_predict ?? 500,
129
- });
130
- } else {
131
- // Ollama format (local or cloud Ollama server)
132
- const host = useCloud
133
- ? cloudConfig.host
134
- : hostArgs.host || "localhost:11434";
135
- endpoint = `http://${host}/api/chat`;
136
- body = JSON.stringify({
137
- model,
138
- messages,
139
- stream: false,
140
- format, // Pass format (e.g. "json") for GBNF grammar
141
- options: {
142
- temperature: 0.1,
143
- num_predict: 200,
144
- ...modelOptions,
145
- },
146
- });
75
+ if (!config.sonar.enabled) {
76
+ log.info("Sonar Agent is disabled in config. Exiting.");
77
+ return;
147
78
  }
148
79
 
149
- const response = await fetch(endpoint, {
150
- method: "POST",
151
- headers,
152
- body,
80
+ DB_PATH = config.database;
81
+ db = new ResonanceDB(DB_PATH);
82
+ vectorEngine = new VectorEngine(db.getRawDb());
83
+ gardener = new GraphGardener(db, graphEngine, vectorEngine);
84
+
85
+ // Ensure task directories exist
86
+ [
87
+ AMALFA_DIRS.tasks.pending,
88
+ AMALFA_DIRS.tasks.processing,
89
+ AMALFA_DIRS.tasks.completed,
90
+ ].forEach((dir) => {
91
+ if (!existsSync(dir)) mkdirSync(dir, { recursive: true });
153
92
  });
154
93
 
155
- if (!response.ok) {
156
- // Try to get error details from response body
157
- let errorBody = "";
158
- try {
159
- errorBody = await response.text();
160
- } catch {}
161
- log.error(
162
- {
163
- status: response.status,
164
- statusText: response.statusText,
165
- body: errorBody,
166
- },
167
- "API request failed",
168
- );
169
- throw new Error(`${provider} API error: ${response.statusText}`);
170
- }
171
-
172
- const result = await response.json();
173
-
174
- // Normalize response format (OpenRouter uses OpenAI format)
175
- if (provider === "openrouter") {
176
- // OpenAI format: { choices: [{ message: { role, content } }] }
177
- const openaiResult = result as { choices: { message: Message }[] };
178
- return {
179
- message: openaiResult.choices[0]?.message || {
180
- role: "assistant",
181
- content: "",
182
- },
183
- };
184
- }
185
-
186
- // Ollama format: { message: { role, content } }
187
- return result as { message: Message };
188
- }
189
-
190
- /**
191
- * Handle search analysis task
192
- * Analyzes query intent, entities, and technical level
193
- */
194
- async function handleSearchAnalysis(query: string): Promise<unknown> {
195
- if (!ollamaAvailable) {
196
- throw new Error("Sonar is not available");
197
- }
94
+ // Initial health check
95
+ inferenceState.ollamaAvailable = await checkOllamaHealth();
198
96
 
199
- try {
200
- const response = await callOllama(
201
- [
202
- {
203
- role: "system",
204
- content: "You are a search query analyzer. Return JSON only.",
205
- },
206
- {
207
- role: "user",
208
- content: `Analyze this query: "${query}"
209
-
210
- Return JSON:
211
- {
212
- "intent": "implementation|conceptual|example",
213
- "entities": ["term1", "term2"],
214
- "technical_level": "high|medium|low",
215
- "suggested_queries": ["query1", "query2"]
216
- }`,
217
- },
218
- ],
219
- {
220
- temperature: 0.1,
221
- num_predict: 200,
222
- format: "json", // Force valid JSON output
223
- },
224
- );
225
-
226
- // Parse JSON response
227
- const content = response.message.content;
228
- try {
229
- return JSON.parse(content);
230
- } catch {
231
- // Fallback if not JSON
232
- return {
233
- intent: "unknown",
234
- entities: [],
235
- technical_level: "medium",
236
- suggested_queries: [],
237
- };
97
+ if (inferenceState.ollamaAvailable) {
98
+ const capabilities = await discoverOllamaCapabilities();
99
+ const firstModel = capabilities.allModels?.[0];
100
+ if (capabilities.available && firstModel) {
101
+ inferenceState.ollamaModel = config.sonar.model || firstModel.name;
102
+ log.info({ model: inferenceState.ollamaModel }, "Sonar Agent ready");
238
103
  }
239
- } catch (error) {
240
- log.error({ error, query }, "Search analysis failed");
241
- throw error;
242
- }
243
- }
244
-
245
- /**
246
- * Handle metadata enhancement task
247
- * Comprehensive document analysis for enhanced metadata
248
- */
249
- async function handleMetadataEnhancement(docId: string): Promise<unknown> {
250
- if (!ollamaAvailable) {
251
- throw new Error("Sonar is not available");
104
+ } else if (config.sonar.cloud?.enabled) {
105
+ log.info("Local Ollama not found, but Cloud is enabled. Proceeding.");
106
+ inferenceState.ollamaAvailable = true;
107
+ } else {
108
+ log.warn("Sonar Agent limited: Ollama unreachable and Cloud disabled.");
252
109
  }
253
110
 
254
- try {
255
- // Connect to DB and fetch node source path
256
- const db = new ResonanceDB(DB_PATH);
257
- const node = db.getNode(docId);
258
- if (!node) {
259
- throw new Error(`Node not found: ${docId}`);
260
- }
261
-
262
- const meta = node.meta || {};
263
- const sourcePath = meta.source as string | undefined;
264
- if (!sourcePath) {
265
- throw new Error(`No source file for node: ${docId}`);
266
- }
267
-
268
- // Read content from filesystem
269
- const file = Bun.file(sourcePath);
270
- if (!(await file.exists())) {
271
- throw new Error(`File not found: ${sourcePath}`);
272
- }
273
- const content = await file.text();
274
-
275
- const response = await callOllama(
276
- [
277
- {
278
- role: "system",
279
- content:
280
- "You are a document analyzer. Extract comprehensive metadata.",
281
- },
282
- {
283
- role: "user",
284
- content: `Analyze this document comprehensively:
285
-
286
- Content: ${content}
287
-
288
- Return JSON:
289
- {
290
- "themes": ["theme1", "theme2"],
291
- "code_patterns": ["pattern1", "pattern2"],
292
- "summary": "2-3 sentence summary",
293
- "doc_type": "implementation|conceptual|architecture|reference",
294
- "technical_depth": "deep|medium|shallow",
295
- "audience": "developer|user|architect",
296
- "related_docs": ["doc1", "doc2"]
297
- }`,
298
- },
299
- ],
300
- {
301
- temperature: 0.2,
302
- num_predict: 500,
303
- format: "json", // Force valid JSON output
304
- },
305
- );
306
-
307
- // Save enhanced metadata back to DB
308
- const contentStr = response.message.content;
309
- let enhancedMeta: Record<string, unknown>;
310
- try {
311
- enhancedMeta = JSON.parse(contentStr);
312
- } catch {
313
- enhancedMeta = {
314
- themes: [],
315
- code_patterns: [],
316
- summary: "",
317
- doc_type: "unknown",
318
- technical_depth: "medium",
319
- audience: "developer",
320
- related_docs: [],
321
- };
322
- }
323
-
324
- // Update node metadata
325
- const newMeta = {
326
- ...node.meta,
327
- sonar_enhanced: true,
328
- sonar_enhanced_at: new Date().toISOString(),
329
- ...enhancedMeta,
330
- };
331
-
332
- db.updateNodeMeta(docId, newMeta);
333
- return enhancedMeta;
334
- } catch (error) {
335
- log.error({ error, docId }, "Metadata enhancement failed");
336
- throw error;
337
- }
338
- }
111
+ // Initial graph load
112
+ await graphEngine.load(db.getRawDb());
339
113
 
340
- /**
341
- * Handle batch enhancement task
342
- * Processes multiple documents for metadata enhancement
343
- */
344
- async function handleBatchEnhancement(limit = 50): Promise<{
345
- successful: number;
346
- failed: number;
347
- total: number;
348
- }> {
349
- if (!ollamaAvailable) {
350
- throw new Error("Sonar is not available");
114
+ // Start HTTP API if serve mode
115
+ if (command === "serve") {
116
+ startServer(config.sonar.port || 3030);
351
117
  }
352
118
 
353
- const db = new ResonanceDB(DB_PATH);
354
-
355
- // Find unenhanced nodes
356
- // Note: We need to query nodes that don't have 'sonar_enhanced' in meta
357
- const allNodes = db.getRawDb().query("SELECT id, meta FROM nodes").all() as {
358
- id: string;
359
- meta: string;
360
- }[];
361
-
362
- const unenhanced = allNodes
363
- .filter((row) => {
364
- try {
365
- const meta = JSON.parse(row.meta);
366
- // Check for sonar_enhanced OR phi3_enhanced (migration)
367
- return !meta.sonar_enhanced && !meta.phi3_enhanced;
368
- } catch {
369
- return false;
370
- }
371
- })
372
- .map((row) => ({ id: row.id }));
373
-
374
- const batch = unenhanced.slice(0, limit);
375
-
376
- log.info(`🔄 Enhancing ${batch.length} docs with Sonar...`);
377
-
378
- const results = await Promise.allSettled(
379
- batch.map((node) => handleMetadataEnhancement(node.id)),
119
+ // Task Watcher Loop
120
+ log.info(
121
+ "Watcher started: Listening for tasks in .amalfa/agent/tasks/pending",
380
122
  );
381
-
382
- const successful = results.filter((r) => r.status === "fulfilled").length;
383
- const failed = results.filter((r) => r.status === "rejected").length;
384
-
385
- log.info(`✅ Enhanced: ${successful}, ❌ Failed: ${failed}`);
386
-
387
- return { successful, failed, total: batch.length };
388
- }
389
-
390
- /**
391
- * Handle result re-ranking
392
- * Re-ranks search results based on query intent and context
393
- */
394
- async function handleResultReranking(
395
- results: Array<{ id: string; content: string; score: number }>,
396
- query: string,
397
- intent?: string,
398
- ): Promise<
399
- Array<{ id: string; content: string; score: number; relevance_score: number }>
400
- > {
401
- if (!ollamaAvailable) {
402
- throw new Error("Sonar is not available");
403
- }
404
-
405
- try {
406
- const response = await callOllama(
407
- [
408
- {
409
- role: "system",
410
- content:
411
- "You are a search result re-ranker. Analyze relevance and provide scores.",
412
- },
413
- {
414
- role: "user",
415
- content: `Re-rank these search results for query: "${query}"${
416
- intent ? `\nQuery intent: ${intent}` : ""
417
- }
418
-
419
- Results:
420
- ${results.map((r, i) => `${i + 1}. ${r.content.slice(0, 200)}`).join("\n")}
421
-
422
- Return JSON array with relevance scores (0.0 to 1.0):
423
- [
424
- {"index": 1, "relevance": 0.95, "reason": "Direct match"},
425
- {"index": 2, "relevance": 0.7, "reason": "Related concept"}
426
- ]`,
427
- },
428
- ],
429
- {
430
- temperature: 0.2,
431
- num_predict: 300,
432
- format: "json", // Force valid JSON output
433
- },
434
- );
435
-
436
- const content = response.message.content;
437
- try {
438
- const rankings = JSON.parse(content);
439
-
440
- // Apply rankings to results
441
- return results.map((result, idx) => {
442
- const ranking = rankings.find(
443
- (r: { index: number }) => r.index === idx + 1,
444
- );
445
- return {
446
- ...result,
447
- relevance_score: ranking?.relevance || 0.5,
448
- };
449
- });
450
- } catch {
451
- // Fallback: return original scores
452
- return results.map((r) => ({ ...r, relevance_score: r.score }));
453
- }
454
- } catch (error) {
455
- log.error({ error, query }, "Result re-ranking failed");
456
- throw error;
123
+ while (true) {
124
+ // Reload graph to pick up new edges before processing tasks
125
+ await graphEngine.load(db.getRawDb());
126
+ await processPendingTasks();
127
+ await new Promise((resolve) => setTimeout(resolve, 5000));
457
128
  }
458
129
  }
459
130
 
460
131
  /**
461
- * Handle chat request
462
- * Maintains session context and converses with user
132
+ * Start Bun HTTP Server
463
133
  */
464
- async function handleChat(
465
- sessionId: string,
466
- userMessage: string,
467
- modelOverride?: string, // Optional: Use specific model (e.g., mistral-nemo for research)
468
- ): Promise<{ message: Message; sessionId: string }> {
469
- if (!ollamaAvailable) {
470
- throw new Error("Sonar is not available");
471
- }
472
-
473
- // Get or create session
474
- let session = chatSessions.get(sessionId);
475
- if (!session) {
476
- session = {
477
- id: sessionId,
478
- messages: [
479
- {
480
- role: "system",
481
- content: `You are AMALFA Corpus Assistant. Help users understand and explore their knowledge base.
482
- Current Date: ${new Date().toISOString().split("T")[0]}
483
-
484
- User can ask you about:
485
- 1. Corpus structure and themes
486
- 2. What you're currently working on
487
- 3. Search for documents by theme/type
488
- 4. Guide enhancement process
489
- 5. Natural language queries to knowledge base`,
490
- },
491
- ],
492
- startedAt: new Date(),
493
- };
494
- chatSessions.set(sessionId, session);
495
- }
496
-
497
- // Add user message
498
-
499
- // RAG: Perform vector search to augment context
500
- const db = new ResonanceDB(DB_PATH);
501
- const vectors = new VectorEngine(db.getRawDb());
502
- try {
503
- const results = await vectors.search(userMessage, 3);
504
-
505
- let augmentContext = "";
506
- if (results.length > 0) {
507
- augmentContext = `\n\nRELEVANT CONTEXT FROM KNOWLEDGE BASE:\n`;
508
- results.forEach((r: { id: string; score: number }, i: number) => {
509
- // Read full node content if possible, or just use what we have
510
- const node = db.getNode(r.id);
511
- // Truncate content to avoid blowing up context window
512
- const content = node?.content ?? "";
513
- const snippet = content.slice(0, 1000);
514
- augmentContext += `[Document ${i + 1}: ${r.id}] (Score: ${r.score.toFixed(2)})\n${snippet}\n\n`;
515
- });
516
- augmentContext += `INSTRUCTIONS: Use the above context to answer the user's question. Cite sources if possible.\n`;
517
- }
518
-
519
- // Append context to user message
520
- session.messages.push({
521
- role: "user",
522
- content: userMessage + augmentContext,
523
- });
524
- } catch (e) {
525
- // Fallback to ignoring RAG on error
526
- log.warn({ err: e }, "RAG search failed, proceeding without context");
527
- session.messages.push({ role: "user", content: userMessage });
528
- }
529
-
530
- // Maintain context window (keep system msg + last 10 messages)
531
- const contextMessages = [
532
- session.messages[0],
533
- ...session.messages.slice(-10),
534
- ].filter((m): m is Message => m !== undefined);
535
-
536
- try {
537
- // NOTE: No format: "json" for chat! We want natural language.
538
- // Use modelOverride if provided (e.g., mistral-nemo for research)
539
- const response = await callOllama(contextMessages, {
540
- temperature: 0.7,
541
- num_predict: 500,
542
- model: modelOverride,
543
- });
544
-
545
- // Add assistant response to history
546
- session.messages.push(response.message);
547
-
548
- return {
549
- message: response.message,
550
- sessionId: session.id,
551
- };
552
- } catch (error) {
553
- const errorMessage = error instanceof Error ? error.message : String(error);
554
- log.error({ err: error, sessionId, errorMessage }, "Chat request failed");
555
- throw error;
556
- }
557
- }
558
-
559
- /**
560
- * Handle context extraction
561
- * Generates smart snippets with context awareness
562
- */
563
- async function handleContextExtraction(
564
- result: { id: string; content: string },
565
- query: string,
566
- ): Promise<{ snippet: string; context: string; confidence: number }> {
567
- if (!ollamaAvailable) {
568
- throw new Error("Sonar is not available");
569
- }
570
-
571
- try {
572
- const response = await callOllama(
573
- [
574
- {
575
- role: "system",
576
- content:
577
- "You are a context extractor. Provide relevant snippets with context.",
578
- },
579
- {
580
- role: "user",
581
- content: `Extract relevant context for query: "${query}"
582
-
583
- Content:
584
- ${result.content}
585
-
586
- Return JSON:
587
- {
588
- "snippet": "Most relevant 2-3 sentences",
589
- "context": "Brief explanation of relevance",
590
- "confidence": 0.9
591
- }`,
592
- },
593
- ],
594
- {
595
- temperature: 0.1,
596
- num_predict: 200,
597
- format: "json", // Force valid JSON output
598
- },
599
- );
600
-
601
- const content = response.message.content;
602
- try {
603
- const parsed = JSON.parse(content);
604
- if (!parsed.snippet && !parsed.context) {
605
- throw new Error("Missing snippet/context in JSON");
606
- }
607
- return parsed;
608
- } catch {
609
- // Fallback: return simple snippet
610
- const words = result.content.split(" ");
611
- const snippet = words.slice(0, 50).join(" ");
612
- return {
613
- snippet,
614
- context: "Full content available",
615
- confidence: 0.5,
616
- };
617
- }
618
- } catch (error) {
619
- log.error({ error, resultId: result.id }, "Context extraction failed");
620
- throw error;
621
- }
622
- }
623
-
624
- /**
625
- * Main daemon logic
626
- */
627
- async function main() {
628
- const config = await loadConfig();
629
- DB_PATH = join(process.cwd(), config.database);
630
-
631
- // @ts-ignore
632
- const isEnabled = config.sonar?.enabled ?? config.phi3?.enabled;
633
-
634
- if (!isEnabled) {
635
- log.warn("⚠️ Sonar is disabled in configuration. Exiting.");
636
- process.exit(0);
637
- }
638
-
639
- log.info("🚀 Sonar Agent starting...");
640
-
641
- // Check Ollama availability
642
- log.info("🔍 Checking Ollama availability...");
643
- const capabilities = await discoverOllamaCapabilities();
644
- ollamaAvailable = capabilities.available;
645
-
646
- if (ollamaAvailable) {
647
- log.info("✅ Ollama is available and healthy");
648
- // Use discovered preferred model (e.g., tinydolphin) unless overridden in config
649
- // @ts-ignore
650
- ollamaModel =
651
- config.sonar?.model ||
652
- config.phi3?.model ||
653
- capabilities.model ||
654
- "phi3:latest";
655
- log.info(`✅ Using model: ${ollamaModel}`);
656
- } else {
657
- log.warn("⚠️ Ollama is not available");
658
- log.warn(" Sonar features will be disabled");
659
- log.info(" Install: curl -fsSL https://ollama.ai/install.sh | sh");
660
- log.info(" Then run: ollama pull phi3:latest (or minidolphin)");
661
- }
662
-
663
- log.info("✅ Sonar Agent ready");
664
-
665
- // Register signal handlers for graceful shutdown
666
- const shutdown = async (signal: string) => {
667
- log.info(`🛑 Received ${signal}, shutting down...`);
668
- if (server) {
669
- server.stop();
670
- server = null;
671
- }
672
- process.exit(0);
134
+ function startServer(port: number) {
135
+ const corsHeaders = {
136
+ "Access-Control-Allow-Origin": "*",
137
+ "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
138
+ "Access-Control-Allow-Headers": "Content-Type",
673
139
  };
674
140
 
675
- process.on("SIGTERM", () => shutdown("SIGTERM"));
676
- process.on("SIGINT", () => shutdown("SIGINT"));
141
+ const context: SonarContext = { db, graphEngine, gardener, chatSessions };
677
142
 
678
- // Start HTTP server
679
- // @ts-ignore
680
- const port = (config.sonar || config.phi3)?.port || 3012;
681
-
682
- log.info(`🚀 Starting HTTP server on port ${port}`);
683
- log.info("📋 Available endpoints:");
684
- log.info(" POST /search/analyze - Query analysis");
685
- log.info(" POST /search/rerank - Result re-ranking");
686
- log.info(" POST /search/context - Smart snippet generation");
687
- log.info(" GET /health - Health check");
688
-
689
- server = Bun.serve({
143
+ Bun.serve({
690
144
  port,
691
145
  async fetch(req) {
692
- const url = new URL(req.url);
693
-
694
- // CORS headers
695
- const corsHeaders = {
696
- "Access-Control-Allow-Origin": "*",
697
- "Access-Control-Allow-Methods": "GET, POST, OPTIONS",
698
- "Access-Control-Allow-Headers": "Content-Type",
699
- };
700
-
701
- // Handle preflight requests
702
- if (req.method === "OPTIONS") {
146
+ if (req.method === "OPTIONS")
703
147
  return new Response(null, { headers: corsHeaders });
704
- }
148
+ const url = new URL(req.url);
705
149
 
706
- // Health check endpoint
150
+ // Health check
707
151
  if (url.pathname === "/health") {
708
- const healthy = await checkOllamaHealth();
709
152
  return Response.json(
710
- {
711
- status: healthy ? "healthy" : "unhealthy",
712
- ollama_available: ollamaAvailable,
713
- model: ollamaModel,
714
- },
153
+ { status: "ok", ollama: inferenceState.ollamaAvailable },
715
154
  { headers: corsHeaders },
716
155
  );
717
156
  }
718
157
 
719
- // Search analysis endpoint
720
- if (url.pathname === "/search/analyze" && req.method === "POST") {
158
+ // Chat endpoint
159
+ if (url.pathname === "/chat" && req.method === "POST") {
721
160
  try {
722
- const body = (await req.json()) as { query: unknown };
723
- const { query } = body;
724
-
725
- if (!query || typeof query !== "string") {
726
- return Response.json(
727
- { error: "Missing or invalid 'query' parameter" },
728
- { status: 400, headers: corsHeaders },
729
- );
730
- }
731
-
732
- const analysis = await handleSearchAnalysis(query);
733
- return Response.json(analysis, { headers: corsHeaders });
161
+ const body = (await req.json()) as ChatRequest;
162
+ const { sessionId, message, model } = body;
163
+ const result = await handleChat(sessionId, message, context, model);
164
+ return Response.json(result, { headers: corsHeaders });
734
165
  } catch (error) {
735
- log.error({ error }, "Search analysis failed");
736
166
  return Response.json(
737
- { error: error instanceof Error ? error.message : "Unknown error" },
167
+ { error: String(error) },
738
168
  { status: 500, headers: corsHeaders },
739
169
  );
740
170
  }
741
171
  }
742
172
 
743
- // Result re-ranking endpoint
744
- if (url.pathname === "/search/rerank" && req.method === "POST") {
173
+ // Metadata enhancement endpoint
174
+ if (url.pathname === "/metadata/enhance" && req.method === "POST") {
745
175
  try {
746
- const body = (await req.json()) as {
747
- results: unknown;
748
- query: unknown;
749
- intent: unknown;
750
- };
751
- const { results, query, intent } = body;
752
-
753
- if (
754
- !results ||
755
- !Array.isArray(results) ||
756
- !query ||
757
- typeof query !== "string"
758
- ) {
759
- return Response.json(
760
- { error: "Missing or invalid 'results' parameter" },
761
- { status: 400, headers: corsHeaders },
762
- );
763
- }
764
-
765
- const ranked = await handleResultReranking(
766
- results as { id: string; content: string; score: number }[],
767
- query,
768
- intent as string | undefined,
769
- );
770
- return Response.json(ranked, { headers: corsHeaders });
176
+ const body = (await req.json()) as MetadataEnhanceRequest;
177
+ const { docId } = body;
178
+ await handleMetadataEnhancement(docId, context);
179
+ return Response.json({ status: "success" }, { headers: corsHeaders });
771
180
  } catch (error) {
772
- log.error({ error }, "Result re-ranking failed");
773
181
  return Response.json(
774
- { error: error instanceof Error ? error.message : "Unknown error" },
182
+ { error: String(error) },
775
183
  { status: 500, headers: corsHeaders },
776
184
  );
777
185
  }
778
186
  }
779
187
 
780
- // Context extraction endpoint
781
- if (url.pathname === "/search/context" && req.method === "POST") {
782
- try {
783
- const body = (await req.json()) as {
784
- result: unknown;
785
- query: unknown;
786
- };
787
- const { result, query } = body;
788
-
789
- if (!result || !query || typeof query !== "string") {
790
- return Response.json(
791
- { error: "Missing 'result' or 'query' parameter" },
792
- { status: 400, headers: corsHeaders },
793
- );
794
- }
795
-
796
- const context = await handleContextExtraction(
797
- result as { id: string; content: string },
798
- query,
799
- );
800
- return Response.json(context, { headers: corsHeaders });
801
- } catch (error) {
802
- log.error({ error }, "Context extraction failed");
803
- return Response.json(
804
- { error: error instanceof Error ? error.message : "Unknown error" },
805
- { status: 500, headers: corsHeaders },
806
- );
807
- }
188
+ // Graph Stats endpoint
189
+ if (url.pathname === "/graph/stats" && req.method === "GET") {
190
+ return Response.json(graphEngine.getStats(), { headers: corsHeaders });
808
191
  }
809
192
 
810
- // Metadata enhancement endpoint
811
- if (url.pathname === "/metadata/enhance" && req.method === "POST") {
193
+ // Search endpoints (analysis, rerank, context)
194
+ if (url.pathname === "/search/analyze" && req.method === "POST") {
812
195
  try {
813
- const body = (await req.json()) as { docId: unknown };
814
- const { docId } = body;
815
-
816
- if (!docId || typeof docId !== "string") {
817
- return Response.json(
818
- { error: "Missing 'docId' parameter" },
819
- { status: 400, headers: corsHeaders },
820
- );
821
- }
822
-
823
- const enhancement = await handleMetadataEnhancement(docId);
824
- return Response.json(enhancement, { headers: corsHeaders });
196
+ const body = (await req.json()) as SearchAnalyzeRequest;
197
+ const { query } = body;
198
+ const result = await handleSearchAnalysis(query, context);
199
+ return Response.json(result, { headers: corsHeaders });
825
200
  } catch (error) {
826
- log.error({ error }, "Metadata enhancement endpoint failed");
827
201
  return Response.json(
828
- { error: error instanceof Error ? error.message : "Unknown error" },
202
+ { error: String(error) },
829
203
  { status: 500, headers: corsHeaders },
830
204
  );
831
205
  }
832
206
  }
833
207
 
834
- // Batch enhancement endpoint
835
- if (url.pathname === "/metadata/batch" && req.method === "POST") {
208
+ if (url.pathname === "/search/rerank" && req.method === "POST") {
836
209
  try {
837
- const body = (await req.json()) as { limit: unknown };
838
- const limit = typeof body.limit === "number" ? body.limit : 50;
839
-
840
- const result = await handleBatchEnhancement(limit);
210
+ const body = (await req.json()) as SearchRerankRequest;
211
+ const { results, query, intent } = body;
212
+ const result = await handleResultReranking(results, query, intent);
841
213
  return Response.json(result, { headers: corsHeaders });
842
214
  } catch (error) {
843
- log.error({ error }, "Batch enhancement endpoint failed");
844
215
  return Response.json(
845
- { error: error instanceof Error ? error.message : "Unknown error" },
216
+ { error: String(error) },
846
217
  { status: 500, headers: corsHeaders },
847
218
  );
848
219
  }
849
220
  }
850
221
 
851
- // Chat endpoint
852
- if (url.pathname === "/chat" && req.method === "POST") {
222
+ if (url.pathname === "/search/context" && req.method === "POST") {
853
223
  try {
854
- const body = (await req.json()) as {
855
- sessionId?: unknown;
856
- message: unknown;
857
- };
858
- const sessionId =
859
- typeof body.sessionId === "string"
860
- ? body.sessionId
861
- : crypto.randomUUID();
862
- const message = body.message;
863
-
864
- if (!message || typeof message !== "string") {
865
- return Response.json(
866
- { error: "Missing 'message' parameter" },
867
- { status: 400, headers: corsHeaders },
868
- );
869
- }
870
-
871
- const response = await handleChat(sessionId, message);
872
- return Response.json(response, { headers: corsHeaders });
224
+ const body = (await req.json()) as SearchContextRequest;
225
+ const { result, query } = body;
226
+ const contextResult = await handleContextExtraction(result, query);
227
+ return Response.json(contextResult, { headers: corsHeaders });
873
228
  } catch (error) {
874
- log.error({ error }, "Chat endpoint failed");
875
229
  return Response.json(
876
- { error: error instanceof Error ? error.message : "Unknown error" },
230
+ { error: String(error) },
877
231
  { status: 500, headers: corsHeaders },
878
232
  );
879
233
  }
880
234
  }
881
235
 
882
- // 404 for unknown endpoints
883
- return Response.json(
884
- { error: "Not found" },
885
- { status: 404, headers: corsHeaders },
886
- );
236
+ return new Response("Not Found", { status: 404, headers: corsHeaders });
887
237
  },
888
238
  });
889
239
 
890
- log.info(`✅ HTTP server listening on port ${port}`);
891
- log.info("⏳ Daemon ready to handle requests");
892
-
893
- // Task Watcher Loop
894
- log.info(`👀 Watching for tasks in ${AMALFA_DIRS.tasks.pending}`);
895
-
896
- // Check every 5 seconds
897
- setInterval(async () => {
898
- try {
899
- await processPendingTasks();
900
- } catch (error) {
901
- log.error({ error }, "Task processing error");
902
- }
903
- }, 5000);
240
+ log.info(`Server started on port ${port}`);
904
241
  }
905
242
 
906
243
  /**
907
- * Scan and process pending tasks
244
+ * Process tasks from the pending directory
908
245
  */
909
246
  async function processPendingTasks() {
910
- if (!ollamaAvailable) return;
911
-
912
247
  const pendingDir = AMALFA_DIRS.tasks.pending;
913
248
  if (!existsSync(pendingDir)) return;
914
249
 
915
- const files = readdirSync(pendingDir);
916
- for (const file of files) {
917
- if (!file.endsWith(".json")) continue;
250
+ const files = (await readdir(pendingDir)).filter((f: string) =>
251
+ f.endsWith(".json"),
252
+ );
918
253
 
919
- const taskPath = join(pendingDir, file);
254
+ for (const file of files) {
255
+ const pendingPath = join(pendingDir, file);
920
256
  const processingPath = join(AMALFA_DIRS.tasks.processing, file);
921
257
 
922
258
  try {
923
- // Move to processing
924
- renameSync(taskPath, processingPath);
925
- log.info({ file }, "🔄 Processing task...");
259
+ await rename(pendingPath, processingPath);
260
+ const taskContent = JSON.parse(
261
+ await Bun.file(processingPath).text(),
262
+ ) as SonarTask;
926
263
 
927
- const taskContent = await Bun.file(processingPath).json();
928
264
  const report = await executeTask(taskContent);
929
-
930
- // Save report
931
265
  const reportName = file.replace(".json", "-report.md");
932
266
  const reportPath = join(AMALFA_DIRS.tasks.completed, reportName);
933
- writeFileSync(reportPath, report);
934
-
935
- // Move original task to completed
936
- const completedPath = join(AMALFA_DIRS.tasks.completed, file);
937
- renameSync(processingPath, completedPath);
267
+ await Bun.write(reportPath, report);
938
268
 
269
+ await rename(processingPath, join(AMALFA_DIRS.tasks.completed, file));
939
270
  log.info({ file }, "✅ Task completed");
940
271
 
941
- // Notification
942
272
  if (taskContent.notify !== false) {
943
273
  await sendNotification("Sonar Agent", `Task Complete: ${file}`);
944
274
  }
945
275
  } catch (error) {
946
276
  log.error({ file, error }, "❌ Task failed");
947
- // Move back to pending? Or to a failed dir? For now, leave in processing or move to failed could be better.
948
- // Let's create a failed report so user knows.
949
- const reportName = file.replace(".json", "-FAILED.md");
950
- const reportPath = join(AMALFA_DIRS.tasks.completed, reportName);
951
- writeFileSync(reportPath, `# Task Failed\n\nError: ${error}`);
952
-
953
- // Move to completed so we don't loop forever
954
- const completedPath = join(AMALFA_DIRS.tasks.completed, file);
955
- renameSync(processingPath, completedPath);
277
+ const failedReport = join(
278
+ AMALFA_DIRS.tasks.completed,
279
+ file.replace(".json", "-FAILED.md"),
280
+ );
281
+ await Bun.write(failedReport, `# Task Failed\n\nError: ${error}`);
282
+ if (existsSync(processingPath)) {
283
+ await rename(processingPath, join(AMALFA_DIRS.tasks.completed, file));
284
+ }
956
285
  }
957
286
  }
958
287
  }
@@ -960,47 +289,38 @@ async function processPendingTasks() {
960
289
  /**
961
290
  * Execute a specific task based on its type
962
291
  */
963
- async function executeTask(task: any): Promise<string> {
292
+ async function executeTask(task: SonarTask): Promise<string> {
293
+ log.info({ type: task.type }, "🚀 Starting executeTask");
964
294
  const startTime = Date.now();
965
295
  let output = `# Task Report: ${task.type}\nDate: ${new Date().toISOString()}\n\n`;
966
296
 
967
- if (task.type === "enhance_batch") {
968
- const limit = task.limit || 10;
969
- output += `## Objective\nEnhance ${limit} documents with metadata.\n\n`;
970
-
971
- const result = await handleBatchEnhancement(limit);
972
-
973
- output += `## Results\n`;
974
- output += `- Total: ${result.total}\n`;
975
- output += `- Successful: ${result.successful}\n`;
976
- output += `- Failed: ${result.failed}\n\n`;
297
+ const taskModel = task.model || (await getTaskModel(task.type));
298
+ if (taskModel)
299
+ output += `> **Routing:** Task assigned to model \`${taskModel}\`\n\n`;
977
300
 
978
- output += `Check daemon logs for detailed errors per document.\n`;
979
- } else if (task.type === "research") {
980
- output += `## Objective\nResearch Query: "${task.query}"\n\n`;
301
+ const context: SonarContext = { db, graphEngine, gardener, chatSessions };
981
302
 
982
- try {
983
- const sessionId = `task-${Date.now()}`;
984
- // For research: use task.model if specified, otherwise let the cloud/local config decide
985
- // Don't hardcode mistral-nemo since it's not valid on OpenRouter
986
- const researchModel = task.model || undefined;
987
- const response = await handleChat(sessionId, task.query, researchModel);
988
-
989
- output += `## Analysis\n${response.message.content}\n\n`;
990
- output += `(Model: ${researchModel || "default"})\n`;
991
-
992
- // Note: chat doesn't return structured sources yet
993
- output += `(Source citation not available in simple research task)\n`;
994
- } catch (e) {
995
- output += `## Error\nResearch failed: ${e instanceof Error ? e.message : String(e)}\n`;
303
+ try {
304
+ if (task.type === "synthesis") {
305
+ output += await handleSynthesisTask(task, context, taskModel);
306
+ } else if (task.type === "timeline") {
307
+ output += await handleTimelineTask(task, context, taskModel);
308
+ } else if (task.type === "garden") {
309
+ output += await handleGardenTask(task, context, taskModel);
310
+ } else if (task.type === "research") {
311
+ output += await handleResearchTask(task, context, taskModel);
312
+ } else if (task.type === "enhance_batch") {
313
+ const result = await handleBatchEnhancement(task.limit || 10, context);
314
+ output += `## Results\n- Successful: ${result.successful}\n- Failed: ${result.failed}\n- Total: ${result.total}\n`;
315
+ } else {
316
+ output += `⚠️ Unknown task type: ${task.type}\n`;
996
317
  }
997
- } else {
998
- output += `Error: Unknown task type '${task.type}'\n`;
318
+ } catch (error) {
319
+ output += `❌ Error during task execution: ${error}\n`;
320
+ throw error;
999
321
  }
1000
322
 
1001
- const duration = ((Date.now() - startTime) / 1000).toFixed(1);
1002
- output += `\n---\n**Duration:** ${duration}s\n`;
1003
-
323
+ output += `\n---\n**Duration:** ${((Date.now() - startTime) / 1000).toFixed(1)}s\n`;
1004
324
  return output;
1005
325
  }
1006
326