@hamp10/agentforge 0.2.0 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/agentforge.js +246 -36
- package/package.json +1 -1
- package/src/OllamaAgent.js +92 -15
package/bin/agentforge.js
CHANGED
|
@@ -54,7 +54,7 @@ function saveConfig(config) {
|
|
|
54
54
|
program
|
|
55
55
|
.name('agentforge')
|
|
56
56
|
.description('AgentForge worker - connect your machine to agentforge.ai')
|
|
57
|
-
.version('0.1
|
|
57
|
+
.version('0.2.1');
|
|
58
58
|
|
|
59
59
|
program
|
|
60
60
|
.command('login')
|
|
@@ -450,66 +450,276 @@ program
|
|
|
450
450
|
console.log('');
|
|
451
451
|
});
|
|
452
452
|
|
|
453
|
+
program
|
|
454
|
+
.command('doctor')
|
|
455
|
+
.description('Check your AgentForge worker health — diagnose any issues')
|
|
456
|
+
.action(async () => {
|
|
457
|
+
console.log('');
|
|
458
|
+
console.log('🩺 AgentForge Doctor');
|
|
459
|
+
console.log('================================');
|
|
460
|
+
console.log('');
|
|
461
|
+
|
|
462
|
+
let allGood = true;
|
|
463
|
+
|
|
464
|
+
// 1. Authentication
|
|
465
|
+
const config = loadConfig();
|
|
466
|
+
if (config.token) {
|
|
467
|
+
console.log('✅ Authenticated');
|
|
468
|
+
} else {
|
|
469
|
+
console.log('❌ Not authenticated');
|
|
470
|
+
console.log(' Fix: agentforge login');
|
|
471
|
+
allGood = false;
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// 2. Server reachability
|
|
475
|
+
const serverUrl = config.url || 'https://agentforgeai-production.up.railway.app';
|
|
476
|
+
try {
|
|
477
|
+
const res = await fetch(`${serverUrl}/api/wsstatus`, { signal: AbortSignal.timeout(5000) });
|
|
478
|
+
if (res.ok || res.status === 401 || res.status === 403) {
|
|
479
|
+
console.log(`✅ Server reachable (${serverUrl})`);
|
|
480
|
+
} else {
|
|
481
|
+
console.log(`⚠️ Server returned ${res.status}`);
|
|
482
|
+
}
|
|
483
|
+
} catch (err) {
|
|
484
|
+
console.log(`❌ Cannot reach server: ${err.message}`);
|
|
485
|
+
allGood = false;
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
// 3. AI Backend
|
|
489
|
+
if (config.provider === 'local') {
|
|
490
|
+
const localUrl = config.localUrl || 'http://localhost:11434';
|
|
491
|
+
try {
|
|
492
|
+
const res = await fetch(`${localUrl}/v1/models`, { signal: AbortSignal.timeout(5000) });
|
|
493
|
+
if (res.ok) {
|
|
494
|
+
const data = await res.json();
|
|
495
|
+
const models = (data.data ?? data.models ?? []).map(m => m.id || m.name);
|
|
496
|
+
const configured = config.localModel || '(not set)';
|
|
497
|
+
const found = models.includes(configured);
|
|
498
|
+
console.log(`✅ Local model server running (${localUrl})`);
|
|
499
|
+
console.log(` Configured model: ${configured} ${found ? '✅' : '⚠️ (not found in model list)'}`);
|
|
500
|
+
if (!found && models.length > 0) {
|
|
501
|
+
console.log(` Available models: ${models.slice(0, 5).join(', ')}`);
|
|
502
|
+
console.log(` Fix: agentforge local --model ${models[0]}`);
|
|
503
|
+
allGood = false;
|
|
504
|
+
}
|
|
505
|
+
} else {
|
|
506
|
+
console.log(`❌ Local server at ${localUrl} returned ${res.status}`);
|
|
507
|
+
allGood = false;
|
|
508
|
+
}
|
|
509
|
+
} catch {
|
|
510
|
+
console.log(`❌ Local server not running at ${localUrl}`);
|
|
511
|
+
console.log(` Make sure Ollama/LM Studio/Jan is running, then: agentforge start`);
|
|
512
|
+
allGood = false;
|
|
513
|
+
}
|
|
514
|
+
} else if (OpenClawCLI.isAvailable()) {
|
|
515
|
+
console.log('✅ openclaw backend available');
|
|
516
|
+
} else {
|
|
517
|
+
// Auto-detect Ollama
|
|
518
|
+
try {
|
|
519
|
+
const res = await fetch('http://localhost:11434/v1/models', { signal: AbortSignal.timeout(2000) });
|
|
520
|
+
if (res.ok) {
|
|
521
|
+
const data = await res.json();
|
|
522
|
+
const models = (data.data ?? data.models ?? []).map(m => m.id || m.name);
|
|
523
|
+
console.log(`⚠️ Ollama is running but not configured as your backend`);
|
|
524
|
+
if (models.length > 0) console.log(` Models available: ${models.slice(0, 5).join(', ')}`);
|
|
525
|
+
console.log(` Fix: agentforge local --model ${models[0] || 'llama3.1:8b'}`);
|
|
526
|
+
allGood = false;
|
|
527
|
+
}
|
|
528
|
+
} catch {
|
|
529
|
+
console.log('❌ No AI backend configured');
|
|
530
|
+
console.log(' Fix: install Ollama (https://ollama.ai) then: agentforge local --model llama3.1:8b');
|
|
531
|
+
allGood = false;
|
|
532
|
+
}
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
// 4. Worker running
|
|
536
|
+
const supervisorPidFile = path.join(CONFIG_DIR, 'supervisor.pid');
|
|
537
|
+
const workerPidFile = path.join(CONFIG_DIR, 'worker.pid');
|
|
538
|
+
if (fs.existsSync(supervisorPidFile)) {
|
|
539
|
+
const pid = parseInt(fs.readFileSync(supervisorPidFile, 'utf8').trim());
|
|
540
|
+
try {
|
|
541
|
+
process.kill(pid, 0);
|
|
542
|
+
const wpid = fs.existsSync(workerPidFile) ? fs.readFileSync(workerPidFile, 'utf8').trim() : null;
|
|
543
|
+
console.log(`✅ Worker running (supervisor PID ${pid}${wpid ? ', worker PID ' + wpid : ''})`);
|
|
544
|
+
} catch {
|
|
545
|
+
console.log('⚠️ Supervisor PID file exists but process is not running');
|
|
546
|
+
console.log(' Fix: agentforge start');
|
|
547
|
+
allGood = false;
|
|
548
|
+
}
|
|
549
|
+
} else {
|
|
550
|
+
console.log('⚠️ Worker not running');
|
|
551
|
+
console.log(' Fix: agentforge start');
|
|
552
|
+
}
|
|
553
|
+
|
|
554
|
+
console.log('');
|
|
555
|
+
if (allGood) {
|
|
556
|
+
console.log('✅ Everything looks healthy!');
|
|
557
|
+
} else {
|
|
558
|
+
console.log('Fix the issues above, then run: agentforge start');
|
|
559
|
+
}
|
|
560
|
+
console.log('');
|
|
561
|
+
});
|
|
562
|
+
|
|
453
563
|
program
|
|
454
564
|
.command('setup')
|
|
455
|
-
.description('
|
|
565
|
+
.description('Interactive setup wizard — gets AgentForge running in minutes')
|
|
456
566
|
.option('--tailscale-key <key>', 'Tailscale auth key (from tailscale.com/admin/settings/keys)')
|
|
457
567
|
.action(async (options) => {
|
|
458
568
|
const { execSync, spawnSync } = await import('child_process');
|
|
569
|
+
const readline = await import('readline');
|
|
570
|
+
|
|
571
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
572
|
+
const ask = (q) => new Promise(resolve => rl.question(q, resolve));
|
|
459
573
|
|
|
460
574
|
console.log('');
|
|
461
|
-
console.log('🚀 AgentForge
|
|
575
|
+
console.log('🚀 AgentForge Setup');
|
|
462
576
|
console.log('================================');
|
|
463
|
-
console.log('');
|
|
577
|
+
console.log('Getting your machine ready to run AI agents.\n');
|
|
464
578
|
|
|
465
|
-
// Step 1:
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
579
|
+
// ── Step 1: Authentication ──────────────────────────────────────────────
|
|
580
|
+
const config = loadConfig();
|
|
581
|
+
if (config.token) {
|
|
582
|
+
console.log('✅ Step 1/2: Already logged in\n');
|
|
583
|
+
} else {
|
|
584
|
+
console.log('Step 1/2: Log in to AgentForge\n');
|
|
585
|
+
console.log('A browser window will open — log in there and come back.\n');
|
|
586
|
+
rl.close();
|
|
587
|
+
|
|
588
|
+
// Spawn login as a child process with inherited stdio so the interactive
|
|
589
|
+
// OAuth flow works (readline on stdin, browser opens, polling, etc.)
|
|
590
|
+
const { spawnSync: sp } = await import('child_process');
|
|
591
|
+
const loginResult = sp(process.execPath, [process.argv[1], 'login'], { stdio: 'inherit' });
|
|
592
|
+
if (loginResult.status !== 0) {
|
|
593
|
+
console.error('\n❌ Login failed — run: agentforge login');
|
|
594
|
+
process.exit(1);
|
|
595
|
+
}
|
|
470
596
|
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
597
|
+
// Re-open readline for the rest of setup
|
|
598
|
+
const rl2 = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
599
|
+
Object.assign(rl, rl2); // replace for remaining ask() calls (won't be used further)
|
|
600
|
+
console.log('');
|
|
601
|
+
}
|
|
476
602
|
|
|
477
|
-
// Step
|
|
478
|
-
console.log('Step
|
|
479
|
-
console.log('');
|
|
603
|
+
// ── Step 2: AI Backend ──────────────────────────────────────────────────
|
|
604
|
+
console.log('Step 2/2: AI Backend\n');
|
|
480
605
|
|
|
481
|
-
const
|
|
606
|
+
const freshConfig = loadConfig();
|
|
482
607
|
|
|
483
|
-
if (
|
|
484
|
-
|
|
485
|
-
} else {
|
|
486
|
-
console.log('Installing Tailscale...');
|
|
608
|
+
if (freshConfig.provider === 'local') {
|
|
609
|
+
const localUrl = freshConfig.localUrl || 'http://localhost:11434';
|
|
487
610
|
try {
|
|
488
|
-
|
|
489
|
-
|
|
490
|
-
|
|
491
|
-
|
|
611
|
+
const res = await fetch(`${localUrl}/v1/models`, { signal: AbortSignal.timeout(3000) });
|
|
612
|
+
if (res.ok) {
|
|
613
|
+
const data = await res.json();
|
|
614
|
+
const models = (data.data ?? data.models ?? []).map(m => m.id || m.name);
|
|
615
|
+
console.log(`✅ Already configured: ${localUrl} with model "${freshConfig.localModel}"`);
|
|
616
|
+
if (models.length > 0 && !models.includes(freshConfig.localModel)) {
|
|
617
|
+
console.log(` ⚠️ Model "${freshConfig.localModel}" not found. Available: ${models.slice(0, 5).join(', ')}`);
|
|
618
|
+
}
|
|
619
|
+
console.log('');
|
|
620
|
+
}
|
|
621
|
+
} catch {
|
|
622
|
+
console.log(`⚠️ Configured local server at ${localUrl} is not reachable. Make sure it's running.\n`);
|
|
623
|
+
}
|
|
624
|
+
} else if (OpenClawCLI.isAvailable()) {
|
|
625
|
+
console.log('✅ openclaw detected and ready\n');
|
|
626
|
+
} else {
|
|
627
|
+
// Probe all common local model servers
|
|
628
|
+
const probes = [
|
|
629
|
+
{ name: 'Ollama', url: 'http://localhost:11434' },
|
|
630
|
+
{ name: 'LM Studio', url: 'http://localhost:1234' },
|
|
631
|
+
{ name: 'Jan', url: 'http://localhost:1337' },
|
|
632
|
+
{ name: 'llama.cpp', url: 'http://localhost:8080' },
|
|
633
|
+
{ name: 'vLLM', url: 'http://localhost:8000' },
|
|
634
|
+
];
|
|
635
|
+
|
|
636
|
+
const found = [];
|
|
637
|
+
process.stdout.write('Detecting local model servers...');
|
|
638
|
+
for (const probe of probes) {
|
|
639
|
+
try {
|
|
640
|
+
const res = await fetch(`${probe.url}/v1/models`, { signal: AbortSignal.timeout(1500) });
|
|
641
|
+
if (res.ok) {
|
|
642
|
+
const data = await res.json();
|
|
643
|
+
const models = (data.data ?? data.models ?? []).map(m => m.id || m.name).filter(Boolean);
|
|
644
|
+
found.push({ ...probe, models });
|
|
645
|
+
}
|
|
646
|
+
} catch {}
|
|
647
|
+
}
|
|
648
|
+
console.log('');
|
|
649
|
+
|
|
650
|
+
if (found.length === 0) {
|
|
651
|
+
console.log('');
|
|
652
|
+
console.log('No local model server detected.\n');
|
|
653
|
+
console.log('AgentForge needs an AI model to power your agents. The easiest option is Ollama:\n');
|
|
654
|
+
console.log(' 1. Go to https://ollama.ai and install it');
|
|
655
|
+
console.log(' 2. Run: ollama pull llama3.1:8b');
|
|
656
|
+
console.log(' 3. Run: agentforge setup (come back here when done)');
|
|
657
|
+
console.log('');
|
|
658
|
+
console.log('You can also use LM Studio, Jan, llama.cpp, or openclaw.');
|
|
659
|
+
rl.close();
|
|
660
|
+
process.exit(0);
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
// Let user pick if multiple found, or auto-select if only one
|
|
664
|
+
let chosen = found[0];
|
|
665
|
+
if (found.length > 1) {
|
|
666
|
+
console.log('\nFound these model servers:\n');
|
|
667
|
+
found.forEach((b, i) => {
|
|
668
|
+
console.log(` ${i + 1}. ${b.name} (${b.url}) — ${b.models.length} model(s): ${b.models.slice(0, 3).join(', ')}`);
|
|
669
|
+
});
|
|
670
|
+
const ans = await ask(`\nWhich one to use? [1]: `);
|
|
671
|
+
const idx = parseInt(ans.trim()) - 1;
|
|
672
|
+
chosen = found[isNaN(idx) || idx < 0 || idx >= found.length ? 0 : idx];
|
|
673
|
+
} else {
|
|
674
|
+
console.log(`\nFound: ${chosen.name} (${chosen.url}) with ${chosen.models.length} model(s)`);
|
|
492
675
|
}
|
|
676
|
+
|
|
677
|
+
// Pick model
|
|
678
|
+
let model = chosen.models[0] || 'llama3.1:8b';
|
|
679
|
+
if (chosen.models.length > 1) {
|
|
680
|
+
console.log(`\nAvailable models: ${chosen.models.join(', ')}`);
|
|
681
|
+
const ans = await ask(`Model to use [${model}]: `);
|
|
682
|
+
if (ans.trim()) model = ans.trim();
|
|
683
|
+
} else if (chosen.models.length === 1) {
|
|
684
|
+
console.log(`Using model: ${model}`);
|
|
685
|
+
}
|
|
686
|
+
|
|
687
|
+
const newConfig = loadConfig();
|
|
688
|
+
newConfig.provider = 'local';
|
|
689
|
+
newConfig.localUrl = chosen.url;
|
|
690
|
+
newConfig.localModel = model;
|
|
691
|
+
saveConfig(newConfig);
|
|
692
|
+
console.log(`\n✅ Configured ${chosen.name} with model "${model}"\n`);
|
|
493
693
|
}
|
|
494
694
|
|
|
695
|
+
rl.close();
|
|
696
|
+
|
|
697
|
+
// ── Optional: Tailscale (only shown if installed or key provided) ────────
|
|
698
|
+
const tailscaleInstalled = spawnSync('which', ['tailscale'], { encoding: 'utf-8' }).status === 0;
|
|
495
699
|
if (options.tailscaleKey) {
|
|
496
700
|
try {
|
|
497
701
|
execSync(`sudo tailscale up --authkey=${options.tailscaleKey}`, { stdio: 'inherit' });
|
|
498
|
-
console.log('✅ Tailscale connected');
|
|
499
702
|
const result = spawnSync('tailscale', ['ip', '--4'], { encoding: 'utf-8' });
|
|
500
|
-
|
|
501
|
-
} catch
|
|
502
|
-
console.
|
|
703
|
+
console.log(`✅ Tailscale connected${result.stdout ? ' — IP: ' + result.stdout.trim() : ''}`);
|
|
704
|
+
} catch {
|
|
705
|
+
console.log('⚠️ Tailscale key failed — run: sudo tailscale up');
|
|
503
706
|
}
|
|
504
|
-
} else {
|
|
505
|
-
console.log('
|
|
506
|
-
console.log(' sudo tailscale up');
|
|
507
|
-
console.log(' (or: agentforge setup --tailscale-key <key>)');
|
|
508
|
-
console.log(' Get a key at: tailscale.com/admin/settings/keys');
|
|
707
|
+
} else if (tailscaleInstalled) {
|
|
708
|
+
console.log('✅ Tailscale installed (run "sudo tailscale up" to connect remotely)');
|
|
509
709
|
}
|
|
510
710
|
|
|
511
711
|
console.log('');
|
|
512
|
-
console.log('
|
|
712
|
+
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
713
|
+
console.log('✅ Setup complete!');
|
|
714
|
+
console.log('');
|
|
715
|
+
console.log(' Run this to start your worker:');
|
|
716
|
+
console.log('');
|
|
717
|
+
console.log(' agentforge start');
|
|
718
|
+
console.log('');
|
|
719
|
+
console.log(' Run this to check your health:');
|
|
720
|
+
console.log('');
|
|
721
|
+
console.log(' agentforge doctor');
|
|
722
|
+
console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━');
|
|
513
723
|
console.log('');
|
|
514
724
|
});
|
|
515
725
|
|
package/package.json
CHANGED
package/src/OllamaAgent.js
CHANGED
|
@@ -185,7 +185,7 @@ export class OllamaAgent extends EventEmitter {
|
|
|
185
185
|
messages,
|
|
186
186
|
tools: TOOLS,
|
|
187
187
|
tool_choice: 'auto',
|
|
188
|
-
stream:
|
|
188
|
+
stream: true
|
|
189
189
|
})
|
|
190
190
|
});
|
|
191
191
|
} catch (fetchErr) {
|
|
@@ -198,9 +198,86 @@ export class OllamaAgent extends EventEmitter {
|
|
|
198
198
|
throw new Error(`Local model error ${response.status}: ${body}`);
|
|
199
199
|
}
|
|
200
200
|
|
|
201
|
-
|
|
202
|
-
//
|
|
203
|
-
|
|
201
|
+
// ── Stream the SSE response ──
|
|
202
|
+
// Accumulate content and tool calls from streaming deltas.
|
|
203
|
+
// Filter out <think>...</think> blocks (qwen3 chain-of-thought) — never show to user.
|
|
204
|
+
let streamContent = '';
|
|
205
|
+
let streamToolCalls = {};
|
|
206
|
+
let inThinkBlock = false;
|
|
207
|
+
let thinkBuffer = '';
|
|
208
|
+
|
|
209
|
+
const reader = response.body.getReader();
|
|
210
|
+
const decoder = new TextDecoder();
|
|
211
|
+
let buf = '';
|
|
212
|
+
|
|
213
|
+
while (true) {
|
|
214
|
+
if (controller.signal.aborted) break;
|
|
215
|
+
const { done, value } = await reader.read();
|
|
216
|
+
if (done) break;
|
|
217
|
+
|
|
218
|
+
buf += decoder.decode(value, { stream: true });
|
|
219
|
+
const lines = buf.split('\n');
|
|
220
|
+
buf = lines.pop(); // keep incomplete line
|
|
221
|
+
|
|
222
|
+
for (const line of lines) {
|
|
223
|
+
if (!line.startsWith('data: ')) continue;
|
|
224
|
+
const payload = line.slice(6).trim();
|
|
225
|
+
if (payload === '[DONE]') continue;
|
|
226
|
+
let evt;
|
|
227
|
+
try { evt = JSON.parse(payload); } catch { continue; }
|
|
228
|
+
|
|
229
|
+
const delta = evt.choices?.[0]?.delta;
|
|
230
|
+
if (!delta) continue;
|
|
231
|
+
|
|
232
|
+
// Accumulate tool call deltas
|
|
233
|
+
if (delta.tool_calls) {
|
|
234
|
+
for (const tc of delta.tool_calls) {
|
|
235
|
+
const idx = tc.index ?? 0;
|
|
236
|
+
if (!streamToolCalls[idx]) streamToolCalls[idx] = { id: tc.id || '', type: 'function', function: { name: '', arguments: '' } };
|
|
237
|
+
if (tc.id) streamToolCalls[idx].id = tc.id;
|
|
238
|
+
if (tc.function?.name) streamToolCalls[idx].function.name += tc.function.name;
|
|
239
|
+
if (tc.function?.arguments) streamToolCalls[idx].function.arguments += tc.function.arguments;
|
|
240
|
+
}
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
// Stream content tokens, filtering <think>...</think> blocks
|
|
244
|
+
if (delta.content) {
|
|
245
|
+
thinkBuffer += delta.content;
|
|
246
|
+
|
|
247
|
+
// Process thinkBuffer to extract non-thinking text
|
|
248
|
+
let out = '';
|
|
249
|
+
let i = 0;
|
|
250
|
+
while (i < thinkBuffer.length) {
|
|
251
|
+
if (!inThinkBlock) {
|
|
252
|
+
const thinkStart = thinkBuffer.indexOf('<think>', i);
|
|
253
|
+
if (thinkStart === -1) {
|
|
254
|
+
out += thinkBuffer.slice(i);
|
|
255
|
+
i = thinkBuffer.length;
|
|
256
|
+
} else {
|
|
257
|
+
out += thinkBuffer.slice(i, thinkStart);
|
|
258
|
+
inThinkBlock = true;
|
|
259
|
+
i = thinkStart + 7;
|
|
260
|
+
}
|
|
261
|
+
} else {
|
|
262
|
+
const thinkEnd = thinkBuffer.indexOf('</think>', i);
|
|
263
|
+
if (thinkEnd === -1) {
|
|
264
|
+
// still inside think block, keep buffering
|
|
265
|
+
i = thinkBuffer.length;
|
|
266
|
+
} else {
|
|
267
|
+
inThinkBlock = false;
|
|
268
|
+
i = thinkEnd + 8;
|
|
269
|
+
}
|
|
270
|
+
}
|
|
271
|
+
}
|
|
272
|
+
thinkBuffer = inThinkBlock ? thinkBuffer.slice(thinkBuffer.lastIndexOf('<think>')) : '';
|
|
273
|
+
|
|
274
|
+
streamContent += out;
|
|
275
|
+
if (out) {
|
|
276
|
+
this.emit('agent_output', { agentId, output: out });
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
}
|
|
204
281
|
|
|
205
282
|
this.emit('tool_activity', {
|
|
206
283
|
agentId,
|
|
@@ -208,6 +285,14 @@ export class OllamaAgent extends EventEmitter {
|
|
|
208
285
|
description: `✅ Ollama responded`
|
|
209
286
|
});
|
|
210
287
|
|
|
288
|
+
// Reconstruct message from streamed parts
|
|
289
|
+
const toolCallsArray = Object.values(streamToolCalls);
|
|
290
|
+
const message = {
|
|
291
|
+
role: 'assistant',
|
|
292
|
+
content: streamContent || null,
|
|
293
|
+
tool_calls: toolCallsArray.length > 0 ? toolCallsArray : undefined
|
|
294
|
+
};
|
|
295
|
+
|
|
211
296
|
messages.push(message);
|
|
212
297
|
|
|
213
298
|
// ── Handle tool calls ──
|
|
@@ -242,17 +327,9 @@ export class OllamaAgent extends EventEmitter {
|
|
|
242
327
|
continue;
|
|
243
328
|
}
|
|
244
329
|
|
|
245
|
-
// ── No tool calls:
|
|
246
|
-
if (
|
|
247
|
-
finalContent =
|
|
248
|
-
// Stream the response in chunks so the UI feels live
|
|
249
|
-
const words = message.content.split(' ');
|
|
250
|
-
const CHUNK_SIZE = 8;
|
|
251
|
-
for (let i = 0; i < words.length; i += CHUNK_SIZE) {
|
|
252
|
-
if (controller.signal.aborted) break;
|
|
253
|
-
const chunk = words.slice(i, i + CHUNK_SIZE).join(' ') + (i + CHUNK_SIZE < words.length ? ' ' : '');
|
|
254
|
-
this.emit('agent_output', { agentId, output: chunk });
|
|
255
|
-
}
|
|
330
|
+
// ── No tool calls: final answer already streamed above ──
|
|
331
|
+
if (streamContent) {
|
|
332
|
+
finalContent = streamContent;
|
|
256
333
|
}
|
|
257
334
|
break;
|
|
258
335
|
}
|