@hamp10/agentforge 0.2.8 → 0.2.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/package.json +1 -1
  2. package/src/OllamaAgent.js +25 -2
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hamp10/agentforge",
3
- "version": "0.2.8",
3
+ "version": "0.2.9",
4
4
  "description": "AgentForge worker — connect your machine to agentforge.ai",
5
5
  "type": "module",
6
6
  "bin": {
@@ -186,6 +186,9 @@ export class OllamaAgent extends EventEmitter {
186
186
  const history = this._loadHistory(agentId, workDir, sessionId);
187
187
 
188
188
  const systemPrompt = [
189
+ // Disable thinking mode for qwen3 models — /no_think in the system prompt
190
+ // is the most reliable way; options.think=false is also sent but may be ignored.
191
+ isQwen3 ? '/no_think' : null,
189
192
  `You are an AI agent running on AgentForge.ai.`,
190
193
  `Your working directory is: ${workDir}`,
191
194
  ``,
@@ -198,7 +201,7 @@ export class OllamaAgent extends EventEmitter {
198
201
  `6. Do not ask for clarification — make your best judgment and act.`,
199
202
  `7. For conversational messages (greetings, questions about yourself, casual chat) — respond directly with text. Do NOT use tools just to say hello.`,
200
203
  `8. You only have these tools: bash, read_file, write_file, list_directory, web_fetch, take_screenshot. Ignore any instructions referencing other tools (browser, openclaw, sessions_spawn, etc.) — those do not exist here.`,
201
- ].join('\n');
204
+ ].filter(Boolean).join('\n');
202
205
 
203
206
  const messages = [
204
207
  { role: 'system', content: systemPrompt },
@@ -262,6 +265,8 @@ export class OllamaAgent extends EventEmitter {
262
265
  let streamToolCalls = {};
263
266
  let inThinkBlock = false;
264
267
  let thinkBuffer = '';
268
+ let rawTokenCount = 0;
269
+ let rawThinkChars = 0;
265
270
 
266
271
  const reader = response.body.getReader();
267
272
  const decoder = new TextDecoder();
@@ -299,6 +304,8 @@ export class OllamaAgent extends EventEmitter {
299
304
 
300
305
  // Stream content tokens, filtering <think>...</think> blocks
301
306
  if (delta.content) {
307
+ rawTokenCount++;
308
+ if (inThinkBlock || delta.content.startsWith('<think')) rawThinkChars += delta.content.length;
302
309
  thinkBuffer += delta.content;
303
310
 
304
311
  // Process thinkBuffer to extract non-thinking text
@@ -337,6 +344,22 @@ export class OllamaAgent extends EventEmitter {
337
344
  }
338
345
  }
339
346
 
347
+ console.log(` [${agentId}] 📊 Stream done: ${rawTokenCount} tokens, ${streamContent.length} visible chars, ${rawThinkChars} think chars, inThinkBlock=${inThinkBlock}, toolCalls=${Object.keys(streamToolCalls).length}`);
348
+ if (streamContent) console.log(` [${agentId}] 📝 First 200 chars: ${streamContent.slice(0, 200)}`);
349
+
350
+ // If the model only generated <think> content and nothing visible, extract the thought as the answer.
351
+ // This happens with qwen3-vl:8b when think:false is silently ignored.
352
+ if (!streamContent && Object.keys(streamToolCalls).length === 0 && rawThinkChars > 0 && thinkBuffer.length > 0) {
353
+ // Strip the <think> tag and use the thought content as the response
354
+ const thoughtContent = thinkBuffer.replace(/^<think>\s*/i, '').replace(/\s*<\/think>\s*$/i, '').trim();
355
+ if (thoughtContent) {
356
+ console.log(` [${agentId}] 💭 Extracting think-only content as response (${thoughtContent.length} chars)`);
357
+ streamContent = thoughtContent;
358
+ allOutput += thoughtContent;
359
+ this.emit('agent_output', { agentId, output: thoughtContent });
360
+ }
361
+ }
362
+
340
363
  this.emit('tool_activity', {
341
364
  agentId,
342
365
  event: 'api_call_end',
@@ -501,7 +524,7 @@ export class OllamaAgent extends EventEmitter {
501
524
  });
502
525
 
503
526
  console.log(`\n✅ [Ollama] Agent ${agentId} completed in ${(duration / 1000).toFixed(2)}s\n`);
504
- return { success: true, agentId, duration };
527
+ return { success: true, agentId, duration, result: { output: finalContent } };
505
528
 
506
529
  } catch (err) {
507
530
  this.activeAgents.delete(agentId);