@hamp10/agentforge 0.2.14 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/OllamaAgent.js +151 -146
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hamp10/agentforge",
3
- "version": "0.2.14",
3
+ "version": "0.2.15",
4
4
  "description": "AgentForge worker — connect your machine to agentforge.ai",
5
5
  "type": "module",
6
6
  "bin": {
@@ -8,7 +8,9 @@ import { fileURLToPath } from 'url';
8
8
  const execAsync = promisify(exec);
9
9
  const __dirname = path.dirname(fileURLToPath(import.meta.url));
10
10
 
11
- const TOOLS = [
11
+ // Tool definitions — used both for embedded system prompt (qwen3 format)
12
+ // and kept as structured objects for _toolDesc lookups.
13
+ const TOOL_DEFS = [
12
14
  {
13
15
  type: 'function',
14
16
  function: {
@@ -108,6 +110,31 @@ const TOOLS = [
108
110
  }
109
111
  ];
110
112
 
113
+ // Build the <tools> XML block to embed in the system prompt.
114
+ // Ollama's `tools` API parameter is broken for qwen3 models (malformed JSON in the prompt).
115
+ // The reliable fix is to embed tool definitions directly in the system prompt as XML.
116
+ const TOOLS_XML = `<tools>\n${TOOL_DEFS.map(t => JSON.stringify(t)).join('\n')}\n</tools>`;
117
+
118
+ /**
119
+ * Parse <tool_call>...</tool_call> blocks from streamed content.
120
+ * qwen3-vl native format: <tool_call>{"name": "bash", "arguments": {"command": "..."}}</tool_call>
121
+ * Returns array of {name, arguments} or null if no complete tool calls found.
122
+ */
123
+ function _parseToolCallTags(content) {
124
+ const calls = [];
125
+ const re = /<tool_call>([\s\S]*?)<\/tool_call>/g;
126
+ let match;
127
+ while ((match = re.exec(content)) !== null) {
128
+ try {
129
+ const obj = JSON.parse(match[1].trim());
130
+ const name = obj.name || obj.tool;
131
+ const args = obj.arguments ?? obj.args ?? {};
132
+ if (typeof name === 'string') calls.push({ name, arguments: args });
133
+ } catch {}
134
+ }
135
+ return calls.length > 0 ? calls : null;
136
+ }
137
+
111
138
  /**
112
139
  * Detect text-based tool calls from model content.
113
140
  * qwen3-vl:8b outputs tool calls as JSON in content rather than tool_calls field.
@@ -254,22 +281,23 @@ export class OllamaAgent extends EventEmitter {
254
281
  // Load conversation history from disk (session persistence)
255
282
  const history = this._loadHistory(agentId, workDir, sessionId);
256
283
 
284
+ // For qwen3 models: embed tool definitions in the system prompt.
285
+ // Ollama's `tools` API param is broken for qwen3 (malformed JSON sent to model).
286
+ // Embedding as XML matches the model's native Hermes-style chat template.
287
+ const toolsBlock = isQwen3 ? `\n\n${TOOLS_XML}\n\nFor each tool call, output ONLY a <tool_call> block with no surrounding text:\n<tool_call>\n{"name": "<tool_name>", "arguments": {<args>}}\n</tool_call>` : '';
288
+
257
289
  const systemPrompt = [
258
- // Disable thinking mode for qwen3 models — /no_think in the system prompt
259
- // is the most reliable way; options.think=false is also sent but may be ignored.
260
290
  isQwen3 ? '/no_think' : null,
261
291
  `You are an AI agent running on AgentForge.ai.`,
262
292
  `Your working directory is: ${workDir}`,
263
293
  ``,
264
- `CRITICAL RULES — follow these exactly:`,
265
- `1. Use the provided tools to complete the task. Do NOT write Python code, pseudo-code, or code blocks to simulate tool calls.`,
266
- `2. To run a command, call the "bash" tool. To read a file, call "read_file". To write, call "write_file". To take a screenshot, call "take_screenshot".`,
267
- `3. Every action must be a real tool call not described in text, not shown as code.`,
268
- `4. When you take a screenshot, you will receive the actual image back and can see it.`,
269
- `5. When you are done, write a clear summary of what you accomplished.`,
270
- `6. Do not ask for clarification — make your best judgment and act.`,
271
- `7. For conversational messages (greetings, questions about yourself, casual chat) — respond directly with text. Do NOT use tools just to say hello.`,
272
- `8. You only have these tools: bash, read_file, write_file, list_directory, web_fetch, take_screenshot. Ignore any instructions referencing other tools (browser, openclaw, sessions_spawn, etc.) — those do not exist here.`,
294
+ `CRITICAL RULES:`,
295
+ `1. Use tools to act. Do NOT describe steps or write code blocks call the actual tool.`,
296
+ `2. bash = run shell commands. write_file = write files. read_file = read files. take_screenshot = screenshot.`,
297
+ `3. For conversational messages (greetings, casual chat)respond with plain text. No tools needed.`,
298
+ `4. Do not ask for clarification make your best judgment and act immediately.`,
299
+ `5. After completing work, write a brief summary of what you did.`,
300
+ toolsBlock,
273
301
  ].filter(Boolean).join('\n');
274
302
 
275
303
  const messages = [
@@ -300,17 +328,13 @@ export class OllamaAgent extends EventEmitter {
300
328
  const requestBody = {
301
329
  model: effectiveModel,
302
330
  messages,
303
- tools: TOOLS,
304
- tool_choice: 'auto',
305
331
  stream: true,
332
+ // qwen3: tools embedded in system prompt — do NOT pass tools param (broken in Ollama)
333
+ // Other models: pass tools normally via API
334
+ ...(!isQwen3 ? { tools: TOOL_DEFS, tool_choice: 'auto' } : {}),
335
+ ...(isQwen3 ? { options: { think: false } } : {}),
306
336
  };
307
337
 
308
- // Disable thinking mode for qwen3 — prevents 3-minute silent think phases
309
- // and makes tool-call JSON output reliable.
310
- if (isQwen3) {
311
- requestBody.options = { think: false };
312
- }
313
-
314
338
  response = await fetch(`${this.baseUrl}/v1/chat/completions`, {
315
339
  method: 'POST',
316
340
  headers: { 'Content-Type': 'application/json' },
@@ -328,14 +352,15 @@ export class OllamaAgent extends EventEmitter {
328
352
  }
329
353
 
330
354
  // ── Stream the SSE response ──
331
- // Accumulate content and tool calls from streaming deltas.
332
- // Filter out <think>...</think> blocks (qwen3 chain-of-thought) never show to user.
333
- let streamContent = '';
334
- let streamToolCalls = {};
355
+ // For qwen3: model emits text tokens including <tool_call>...</tool_call> blocks.
356
+ // Stream text live to user, but suppress content inside <tool_call> tags.
357
+ // For other models: also handle delta.tool_calls in the standard OpenAI format.
358
+ let streamContent = ''; // full accumulated text (including tool_call tags for qwen3)
359
+ let visibleContent = ''; // text emitted live to user (no tool_call or think blocks)
360
+ let streamToolCalls = {}; // OpenAI-format tool calls (non-qwen3 models)
335
361
  let inThinkBlock = false;
336
- let thinkBuffer = '';
362
+ let inToolCallBlock = false; // inside <tool_call>...</tool_call>
337
363
  let rawTokenCount = 0;
338
- let rawThinkChars = 0;
339
364
 
340
365
  const reader = response.body.getReader();
341
366
  const decoder = new TextDecoder();
@@ -348,7 +373,7 @@ export class OllamaAgent extends EventEmitter {
348
373
 
349
374
  buf += decoder.decode(value, { stream: true });
350
375
  const lines = buf.split('\n');
351
- buf = lines.pop(); // keep incomplete line
376
+ buf = lines.pop();
352
377
 
353
378
  for (const line of lines) {
354
379
  if (!line.startsWith('data: ')) continue;
@@ -360,7 +385,7 @@ export class OllamaAgent extends EventEmitter {
360
385
  const delta = evt.choices?.[0]?.delta;
361
386
  if (!delta) continue;
362
387
 
363
- // Accumulate tool call deltas
388
+ // Standard OpenAI tool_calls (non-qwen3 models)
364
389
  if (delta.tool_calls) {
365
390
  for (const tc of delta.tool_calls) {
366
391
  const idx = tc.index ?? 0;
@@ -371,173 +396,153 @@ export class OllamaAgent extends EventEmitter {
371
396
  }
372
397
  }
373
398
 
374
- // Stream content tokens, filtering <think>...</think> blocks
375
- if (delta.content) {
376
- rawTokenCount++;
377
- if (inThinkBlock || delta.content.startsWith('<think')) rawThinkChars += delta.content.length;
378
- thinkBuffer += delta.content;
379
-
380
- // Process thinkBuffer to extract non-thinking text
381
- let out = '';
382
- let i = 0;
383
- while (i < thinkBuffer.length) {
384
- if (!inThinkBlock) {
385
- const thinkStart = thinkBuffer.indexOf('<think>', i);
386
- if (thinkStart === -1) {
387
- out += thinkBuffer.slice(i);
388
- i = thinkBuffer.length;
389
- } else {
390
- out += thinkBuffer.slice(i, thinkStart);
391
- inThinkBlock = true;
392
- i = thinkStart + 7;
393
- }
394
- } else {
395
- const thinkEnd = thinkBuffer.indexOf('</think>', i);
396
- if (thinkEnd === -1) {
397
- // still inside think block, keep buffering
398
- i = thinkBuffer.length;
399
- } else {
400
- inThinkBlock = false;
401
- i = thinkEnd + 8;
402
- }
403
- }
404
- }
405
- thinkBuffer = inThinkBlock ? thinkBuffer.slice(thinkBuffer.lastIndexOf('<think>')) : '';
406
-
407
- streamContent += out;
408
- // Stream text tokens live — but only if output clearly isn't JSON tool calls.
409
- // If the accumulated content starts with '{', it may be a tool call — buffer silently.
410
- // Otherwise emit immediately so the user sees live output.
411
- if (out && !streamContent.trimStart().startsWith('{')) {
412
- this.emit('agent_output', { agentId, output: out, isChunk: true });
399
+ if (!delta.content) continue;
400
+ rawTokenCount++;
401
+ streamContent += delta.content;
402
+
403
+ // Process token through think + tool_call filters, emit visible text live
404
+ // We scan only the new delta token against the current buffer state
405
+ const chunk = delta.content;
406
+ let visible = '';
407
+ // Simple per-token state machine — handles split tags across tokens by tracking state flags
408
+ if (!inThinkBlock && !inToolCallBlock) {
409
+ // Check if this chunk starts a filtered block
410
+ if (streamContent.includes('<think>') && !streamContent.includes('</think>')) {
411
+ inThinkBlock = true;
412
+ // emit text before the <think> tag
413
+ const before = streamContent.lastIndexOf('<think>');
414
+ // already streamed everything before this point; just suppress from here
415
+ } else if (streamContent.includes('<tool_call>') && !streamContent.slice(streamContent.lastIndexOf('<tool_call>')).includes('</tool_call>')) {
416
+ inToolCallBlock = true;
417
+ // Text before <tool_call> on this same token — already emitted or trivial
418
+ } else if (!inThinkBlock && !inToolCallBlock) {
419
+ visible = chunk;
413
420
  }
414
421
  }
422
+ // Exit think block
423
+ if (inThinkBlock && streamContent.includes('</think>')) {
424
+ inThinkBlock = false;
425
+ }
426
+ // Exit tool_call block
427
+ if (inToolCallBlock && streamContent.slice(streamContent.lastIndexOf('<tool_call>')).includes('</tool_call>')) {
428
+ inToolCallBlock = false;
429
+ }
430
+
431
+ if (visible && !inThinkBlock && !inToolCallBlock) {
432
+ visibleContent += visible;
433
+ this.emit('agent_output', { agentId, output: visible, isChunk: true });
434
+ }
415
435
  }
416
436
  }
417
437
 
418
- console.log(` [${agentId}] 📊 Stream done: ${rawTokenCount} tokens, ${streamContent.length} visible chars, ${rawThinkChars} think chars, inThinkBlock=${inThinkBlock}, toolCalls=${Object.keys(streamToolCalls).length}`);
438
+ console.log(` [${agentId}] 📊 Stream done: ${rawTokenCount} tokens, ${streamContent.length} chars, ${visibleContent.length} visible, apiToolCalls=${Object.keys(streamToolCalls).length}`);
419
439
  if (streamContent) console.log(` [${agentId}] 📝 First 200 chars: ${streamContent.slice(0, 200)}`);
420
440
 
421
- // If the model only generated <think> content and nothing visible, extract the thought as the answer.
422
- // This happens with qwen3-vl:8b when think:false is silently ignored.
423
- if (!streamContent && Object.keys(streamToolCalls).length === 0 && rawThinkChars > 0 && thinkBuffer.length > 0) {
424
- // Strip the <think> tag and use the thought content as the response
425
- const thoughtContent = thinkBuffer.replace(/^<think>\s*/i, '').replace(/\s*<\/think>\s*$/i, '').trim();
426
- if (thoughtContent) {
427
- console.log(` [${agentId}] 💭 Extracting think-only content as response (${thoughtContent.length} chars)`);
428
- streamContent = thoughtContent;
429
- // Don't emit here — detection block below handles it
441
+ // ── Extract tool calls from content ───────────────────────────────────
442
+ // For qwen3: parse <tool_call> XML tags from full streamed content.
443
+ // For others: use API-level tool_calls already accumulated above.
444
+ let parsedTagCalls = null;
445
+ if (isQwen3 && Object.keys(streamToolCalls).length === 0) {
446
+ parsedTagCalls = _parseToolCallTags(streamContent);
447
+ if (parsedTagCalls) {
448
+ console.log(` [${agentId}] 🔍 ${parsedTagCalls.length} <tool_call> tag(s) detected`);
430
449
  }
431
450
  }
432
451
 
433
- // ── Detect text-based tool calls or accumulate text content ──────────
434
- // qwen3-vl:8b outputs tool calls as JSON in content (not tool_calls field).
435
- // If detected, convert to streamToolCalls so they actually execute.
436
- // If not tool calls, content was already streamed live token-by-token above.
437
- if (Object.keys(streamToolCalls).length === 0 && streamContent) {
452
+ // Fallback: try legacy JSON-blob detection if no tags found
453
+ if (!parsedTagCalls && Object.keys(streamToolCalls).length === 0 && streamContent) {
438
454
  const textCalls = _parseTextToolCalls(streamContent);
439
455
  if (textCalls) {
440
- console.log(` [${agentId}] 🔍 ${textCalls.length} text-based tool call(s) detected converting to function calls`);
441
- textCalls.forEach((tc, i) => {
442
- streamToolCalls[i] = {
443
- id: `text-${i}`,
444
- type: 'function',
445
- function: { name: tc.name, arguments: JSON.stringify(tc.arguments) }
446
- };
447
- });
448
- streamContent = ''; // Suppress raw JSON from output
449
- } else {
450
- // Regular text — already emitted live above, just accumulate
451
- allOutput += streamContent;
456
+ console.log(` [${agentId}] 🔍 ${textCalls.length} JSON text tool call(s) detected (legacy fallback)`);
457
+ parsedTagCalls = textCalls;
452
458
  }
453
459
  }
454
460
 
461
+ // Convert tag/text calls into streamToolCalls structure
462
+ if (parsedTagCalls) {
463
+ parsedTagCalls.forEach((tc, i) => {
464
+ streamToolCalls[i] = { id: `tag-${i}`, type: 'function', function: { name: tc.name, arguments: JSON.stringify(tc.arguments) } };
465
+ });
466
+ // Don't accumulate raw tool_call XML as user-visible output
467
+ } else if (visibleContent) {
468
+ allOutput += visibleContent;
469
+ }
470
+
455
471
  this.emit('tool_activity', {
456
472
  agentId,
457
473
  event: 'api_call_end',
458
474
  description: `✅ Ollama responded`
459
475
  });
460
476
 
461
- // Reconstruct message from streamed parts
477
+ // ── Push assistant message ────────────────────────────────────────────
462
478
  const toolCallsArray = Object.values(streamToolCalls);
463
- const message = {
464
- role: 'assistant',
465
- content: streamContent || null,
466
- tool_calls: toolCallsArray.length > 0 ? toolCallsArray : undefined
467
- };
468
-
469
- messages.push(message);
479
+ if (isQwen3) {
480
+ // qwen3: assistant message is the raw streamed content (includes <tool_call> tags)
481
+ messages.push({ role: 'assistant', content: streamContent || '' });
482
+ } else {
483
+ messages.push({
484
+ role: 'assistant',
485
+ content: visibleContent || null,
486
+ tool_calls: toolCallsArray.length > 0 ? toolCallsArray : undefined
487
+ });
488
+ }
470
489
 
471
- // ── Handle tool calls ──
472
- if (message.tool_calls && message.tool_calls.length > 0) {
473
- for (const toolCall of message.tool_calls) {
490
+ // ── Execute tool calls ────────────────────────────────────────────────
491
+ if (toolCallsArray.length > 0) {
492
+ for (const toolCall of toolCallsArray) {
474
493
  if (controller.signal.aborted) break;
475
494
 
476
495
  const { name, arguments: args } = toolCall.function;
477
- const parsedArgs = typeof args === 'string' ? JSON.parse(args) : args;
496
+ let parsedArgs;
497
+ try { parsedArgs = typeof args === 'string' ? JSON.parse(args) : args; }
498
+ catch { parsedArgs = {}; }
478
499
 
479
500
  this.emit('tool_activity', {
480
- agentId,
481
- event: 'tool_start',
482
- tool: name,
501
+ agentId, event: 'tool_start', tool: name,
483
502
  description: this._toolDesc(name, parsedArgs)
484
503
  });
485
-
486
504
  console.log(` [${agentId}] 🔧 ${name}: ${JSON.stringify(parsedArgs).slice(0, 120)}`);
487
505
  toolsUsed.push(name);
488
506
 
489
507
  const result = await this._executeTool(name, parsedArgs, workDir);
490
508
 
491
- this.emit('tool_activity', {
492
- agentId,
493
- event: 'tool_end',
494
- tool: name,
495
- description: `✓ ${name}`
496
- });
509
+ this.emit('tool_activity', { agentId, event: 'tool_end', tool: name, description: `✓ ${name}` });
497
510
 
498
- // If the tool returned an image (base64), push it as a vision message
499
- // so the model can actually see what was captured.
500
- // Also forward to dashboard so the user sees the screenshot in chat.
501
511
  const isImageResult = typeof result === 'string' && result.startsWith('data:image/');
502
512
  if (isImageResult && parsedArgs.send_to_user === true) {
503
513
  this.emit('agent_image', { agentId, image: result });
504
514
  }
505
- if (isImageResult && isVision) {
506
- messages.push({
507
- role: 'tool',
508
- tool_call_id: toolCall.id || undefined,
509
- content: '[Screenshot captured — see image attached]'
510
- });
511
- const base64 = result.replace(/^data:image\/\w+;base64,/, '');
512
- messages.push({
513
- role: 'user',
514
- content: 'Here is the screenshot:',
515
- images: [base64]
516
- });
515
+
516
+ if (isQwen3) {
517
+ // qwen3 format: tool results go back as user messages with <tool_response> tags
518
+ if (isImageResult && isVision) {
519
+ const base64 = result.replace(/^data:image\/\w+;base64,/, '');
520
+ messages.push({ role: 'user', content: '<tool_response>\n[Screenshot captured]\n</tool_response>', images: [base64] });
521
+ } else {
522
+ const resultText = isImageResult ? '[Screenshot captured — vision model needed to analyze]' : String(result).slice(0, 8000);
523
+ messages.push({ role: 'user', content: `<tool_response>\n${resultText}\n</tool_response>` });
524
+ }
517
525
  } else {
518
- messages.push({
519
- role: 'tool',
520
- tool_call_id: toolCall.id || undefined,
521
- content: isImageResult ? '[Screenshot captured — install a vision model to analyze images]' : String(result)
522
- });
526
+ // Standard OpenAI format
527
+ if (isImageResult && isVision) {
528
+ messages.push({ role: 'tool', tool_call_id: toolCall.id || undefined, content: '[Screenshot captured — see image attached]' });
529
+ const base64 = result.replace(/^data:image\/\w+;base64,/, '');
530
+ messages.push({ role: 'user', content: 'Here is the screenshot:', images: [base64] });
531
+ } else {
532
+ messages.push({ role: 'tool', tool_call_id: toolCall.id || undefined, content: isImageResult ? '[Screenshot captured]' : String(result).slice(0, 8000) });
533
+ }
523
534
  }
524
535
  }
525
- // Loop back model will respond to the tool results
526
- continue;
536
+ continue; // loop back for next model turn
527
537
  }
528
538
 
529
- // ── No tool calls: final answer already streamed above ──
530
- if (streamContent) {
531
- finalContent = streamContent;
532
- }
539
+ // ── No tool calls: final answer ───────────────────────────────────────
540
+ if (visibleContent) finalContent = visibleContent;
533
541
  break;
534
542
 
535
543
  }
536
544
 
537
- // Use all accumulated output if final turn had no content (agent ended after tool calls)
538
- if (!finalContent && allOutput) {
539
- finalContent = allOutput;
540
- }
545
+ if (!finalContent && allOutput) finalContent = allOutput;
541
546
 
542
547
  // If still no output (model did only tool calls, never wrote text), ask for a summary.
543
548
  // Use only the last 6 messages to avoid context overflow after many tool-call turns.