open-agents-ai 0.11.6 → 0.11.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +172 -12
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -6588,7 +6588,9 @@ Commands run non-interactively (CI=true). When running scaffolding tools:
6588
6588
  taskTimeoutMs: options?.taskTimeoutMs ?? 12e5,
6589
6589
  compactionThreshold: options?.compactionThreshold ?? 4e4,
6590
6590
  dynamicContext: options?.dynamicContext ?? "",
6591
- streamEnabled: options?.streamEnabled ?? false
6591
+ streamEnabled: options?.streamEnabled ?? false,
6592
+ bruteForce: options?.bruteForce ?? false,
6593
+ bruteForceMaxCycles: options?.bruteForceMaxCycles ?? 3
6592
6594
  };
6593
6595
  }
6594
6596
  /** Register a tool for the agent to use */
@@ -6638,9 +6640,11 @@ TASK: ${task}` : task }
6638
6640
  ];
6639
6641
  const toolDefs = this.buildToolDefinitions();
6640
6642
  let totalTokens = 0;
6643
+ let estimatedTokens = 0;
6641
6644
  let toolCallCount = 0;
6642
6645
  let completed = false;
6643
6646
  let summary = "";
6647
+ let bruteForceCycle = 0;
6644
6648
  for (let turn = 0; turn < this.options.maxTurns; turn++) {
6645
6649
  if (this.aborted) {
6646
6650
  this.emit({ type: "error", content: "Task aborted by user", timestamp: (/* @__PURE__ */ new Date()).toISOString() });
@@ -6693,6 +6697,9 @@ Integrate this guidance into your current approach. Continue working on the task
6693
6697
  };
6694
6698
  const response = this.options.streamEnabled && this.hasStreamingSupport() ? await this.streamingRequest(chatRequest, turn) : await this.backend.chatCompletion(chatRequest);
6695
6699
  totalTokens += response.usage?.totalTokens ?? 0;
6700
+ const choiceContent = response.choices[0]?.message?.content ?? "";
6701
+ const choiceArgs = response.choices[0]?.message?.toolCalls?.map((tc) => JSON.stringify(tc.arguments)).join("") ?? "";
6702
+ estimatedTokens += Math.ceil((choiceContent.length + choiceArgs.length) / 4);
6696
6703
  const choice = response.choices[0];
6697
6704
  if (!choice)
6698
6705
  break;
@@ -6771,6 +6778,122 @@ ${result.output}`;
6771
6778
  });
6772
6779
  }
6773
6780
  }
6781
+ if (!completed && !this.aborted && this.options.bruteForce && bruteForceCycle < this.options.bruteForceMaxCycles && Date.now() < deadline) {
6782
+ bruteForceCycle++;
6783
+ const totalTurns = messages.filter((m) => m.role === "assistant").length;
6784
+ this.emit({
6785
+ type: "compaction",
6786
+ content: `Brute-force cycle ${bruteForceCycle}/${this.options.bruteForceMaxCycles} \u2014 re-engaging agent`,
6787
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
6788
+ });
6789
+ messages.push({
6790
+ role: "user",
6791
+ content: `[BRUTE-FORCE RE-ENGAGEMENT \u2014 Cycle ${bruteForceCycle}/${this.options.bruteForceMaxCycles}]
6792
+
6793
+ You have used ${totalTurns} turns and ${toolCallCount} tool calls without completing the task. DO NOT give up. Reassess your approach:
6794
+
6795
+ 1. DIAGNOSE: What exactly is blocking progress? Read error messages carefully.
6796
+ 2. PIVOT: If the current approach isn't working, try a completely different strategy.
6797
+ 3. INVESTIGATE: Use grep_search, find_files, and web_search to find solutions.
6798
+ 4. SIMPLIFY: Break the problem into smaller steps. Fix one thing at a time.
6799
+ 5. VERIFY: Run tests/build after EVERY change \u2014 don't batch multiple changes.
6800
+
6801
+ You have ${this.options.maxTurns} more turns. Be creative and investigative. If a file is confusing, re-read it. If a test fails, read the FULL error. Call task_complete when done.`
6802
+ });
6803
+ const compacted = this.compactMessages(messages);
6804
+ messages.length = 0;
6805
+ messages.push(...compacted);
6806
+ for (let turn = 0; turn < this.options.maxTurns; turn++) {
6807
+ if (this.aborted) {
6808
+ this.emit({ type: "error", content: "Task aborted by user", timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6809
+ break;
6810
+ }
6811
+ if (Date.now() > deadline) {
6812
+ this.emit({ type: "error", content: "Task timeout reached", timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6813
+ break;
6814
+ }
6815
+ while (this.pendingUserMessages.length > 0) {
6816
+ const userMsg = this.pendingUserMessages.shift();
6817
+ const imagePattern = /\[IMAGE_BASE64:([^:]+):([^\]]+)\]/;
6818
+ const imgMatch = userMsg.match(imagePattern);
6819
+ if (imgMatch) {
6820
+ const mime = imgMatch[1];
6821
+ const base64 = imgMatch[2];
6822
+ const textContent = userMsg.replace(imagePattern, "").trim();
6823
+ const parts = [];
6824
+ if (textContent) {
6825
+ parts.push({ type: "text", text: `[User added context]: ${textContent}
6826
+
6827
+ Describe what you see and integrate this into your current approach.` });
6828
+ } else {
6829
+ parts.push({ type: "text", text: "[User shared an image]. Describe what you see and integrate this into your current approach." });
6830
+ }
6831
+ parts.push({ type: "image_url", image_url: { url: `data:${mime};base64,${base64}` } });
6832
+ messages.push({ role: "user", content: parts });
6833
+ } else {
6834
+ messages.push({ role: "user", content: `[User added context]: ${userMsg}
6835
+
6836
+ Integrate this guidance into your current approach. Continue working on the task.` });
6837
+ }
6838
+ this.emit({ type: "user_interrupt", content: userMsg.replace(/\[IMAGE_BASE64:[^\]]+\]/, "[image]").slice(0, 200), turn, timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6839
+ }
6840
+ const compactedMsgs = this.compactMessages(messages);
6841
+ const chatRequest = { messages: compactedMsgs, tools: toolDefs, temperature: this.options.temperature, maxTokens: this.options.maxTokens, timeoutMs: this.options.requestTimeoutMs };
6842
+ const response = this.options.streamEnabled && this.hasStreamingSupport() ? await this.streamingRequest(chatRequest, turn) : await this.backend.chatCompletion(chatRequest);
6843
+ totalTokens += response.usage?.totalTokens ?? 0;
6844
+ const choiceContent2 = response.choices[0]?.message?.content ?? "";
6845
+ const choiceArgs2 = response.choices[0]?.message?.toolCalls?.map((tc) => JSON.stringify(tc.arguments)).join("") ?? "";
6846
+ estimatedTokens += Math.ceil((choiceContent2.length + choiceArgs2.length) / 4);
6847
+ const choice = response.choices[0];
6848
+ if (!choice)
6849
+ break;
6850
+ const msg = choice.message;
6851
+ if (msg.toolCalls && msg.toolCalls.length > 0) {
6852
+ messages.push({ role: "assistant", content: msg.content || null, tool_calls: msg.toolCalls.map((tc) => ({ id: tc.id, type: "function", function: { name: tc.name, arguments: JSON.stringify(tc.arguments) } })) });
6853
+ for (const tc of msg.toolCalls) {
6854
+ if (this.aborted)
6855
+ break;
6856
+ toolCallCount++;
6857
+ this.emit({ type: "tool_call", toolName: tc.name, toolArgs: tc.arguments, turn, timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6858
+ const tool = this.tools.get(tc.name);
6859
+ let result;
6860
+ if (!tool) {
6861
+ result = { success: false, output: "", error: `Unknown tool: ${tc.name}` };
6862
+ } else {
6863
+ try {
6864
+ result = await tool.execute(tc.arguments);
6865
+ } catch (err) {
6866
+ result = { success: false, output: "", error: err instanceof Error ? err.message : String(err) };
6867
+ }
6868
+ }
6869
+ const maxLen = 8e3;
6870
+ const output = result.success ? result.output.length > maxLen ? result.output.slice(0, maxLen) + `
6871
+ ...(truncated)` : result.output : `Error: ${result.error || "unknown error"}
6872
+ ${result.output}`;
6873
+ this.emit({ type: "tool_result", toolName: tc.name, content: output.slice(0, 200), success: result.success, turn, timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6874
+ const toolMsg = this.buildToolMessage(output, tc.id);
6875
+ messages.push(toolMsg);
6876
+ if (tc.name === "task_complete") {
6877
+ completed = true;
6878
+ summary = tc.arguments.summary || "";
6879
+ break;
6880
+ }
6881
+ }
6882
+ if (completed)
6883
+ break;
6884
+ } else {
6885
+ const content = msg.content || "";
6886
+ messages.push({ role: "assistant", content });
6887
+ this.emit({ type: "model_response", content: content.slice(0, 200), turn, timestamp: (/* @__PURE__ */ new Date()).toISOString() });
6888
+ if (/task.?complete|all tests pass/i.test(content)) {
6889
+ completed = true;
6890
+ summary = content;
6891
+ break;
6892
+ }
6893
+ messages.push({ role: "user", content: "Continue working. Use tools to read files, make changes, and run validation. Call task_complete when done." });
6894
+ }
6895
+ }
6896
+ }
6774
6897
  const durationMs = Date.now() - start;
6775
6898
  this.emit({
6776
6899
  type: "complete",
@@ -6778,7 +6901,7 @@ ${result.output}`;
6778
6901
  success: completed,
6779
6902
  timestamp: (/* @__PURE__ */ new Date()).toISOString()
6780
6903
  });
6781
- return { completed, turns: messages.filter((m) => m.role === "assistant").length, toolCalls: toolCallCount, totalTokens, summary, durationMs };
6904
+ return { completed, turns: messages.filter((m) => m.role === "assistant").length, toolCalls: toolCallCount, totalTokens, estimatedTokens, summary, durationMs };
6782
6905
  }
6783
6906
  // -------------------------------------------------------------------------
6784
6907
  // Image / multimodal support
@@ -7460,11 +7583,16 @@ function renderToolResult(toolName, success, output) {
7460
7583
  `);
7461
7584
  }
7462
7585
  }
7463
- function renderTaskComplete(summary, turns, toolCalls, durationMs) {
7586
+ function renderTaskComplete(summary, turns, toolCalls, durationMs, tokens) {
7464
7587
  const duration = formatDuration2(durationMs);
7588
+ const tokenStr = tokens ? ` ${formatTokenCount(tokens)}` : "";
7465
7589
  process.stdout.write(`
7466
7590
  ${c2.green("\u2714")} ${c2.bold("Task completed")} ${c2.dim(`(${turns} turns, ${toolCalls} tool calls, ${duration})`)}
7467
7591
  `);
7592
+ if (tokenStr) {
7593
+ process.stdout.write(` ${c2.dim(tokenStr)}
7594
+ `);
7595
+ }
7468
7596
  if (summary) {
7469
7597
  const lines = summary.split("\n");
7470
7598
  for (const line of lines) {
@@ -7474,12 +7602,26 @@ ${c2.green("\u2714")} ${c2.bold("Task completed")} ${c2.dim(`(${turns} turns, ${
7474
7602
  }
7475
7603
  process.stdout.write("\n");
7476
7604
  }
7477
- function renderTaskIncomplete(turns, toolCalls, durationMs) {
7605
+ function renderTaskIncomplete(turns, toolCalls, durationMs, tokens) {
7478
7606
  const duration = formatDuration2(durationMs);
7607
+ const tokenStr = tokens ? ` ${formatTokenCount(tokens)}` : "";
7479
7608
  process.stdout.write(`
7480
7609
  ${c2.yellow("\u26A0")} ${c2.bold("Task incomplete")} ${c2.dim(`(${turns} turns, ${toolCalls} tool calls, ${duration})`)}
7481
-
7482
7610
  `);
7611
+ if (tokenStr) {
7612
+ process.stdout.write(` ${c2.dim(tokenStr)}
7613
+ `);
7614
+ }
7615
+ process.stdout.write("\n");
7616
+ }
7617
+ function formatTokenCount(tokens) {
7618
+ if (tokens.total > 0) {
7619
+ return `Tokens: ${tokens.total.toLocaleString()}`;
7620
+ }
7621
+ if (tokens.estimated > 0) {
7622
+ return `Tokens: ~${tokens.estimated.toLocaleString()} (estimated)`;
7623
+ }
7624
+ return "";
7483
7625
  }
7484
7626
  function renderError(message) {
7485
7627
  process.stdout.write(`
@@ -7565,6 +7707,7 @@ function renderSlashHelp() {
7565
7707
  ["/voice", "Toggle TTS voice feedback (GLaDOS)"],
7566
7708
  ["/voice <model>", "Set voice: glados, overwatch"],
7567
7709
  ["/stream", "Toggle real-time token streaming (pastel syntax highlighting)"],
7710
+ ["/bruteforce", "Toggle brute-force mode (auto re-engage on turn limit)"],
7568
7711
  ["/verbose", "Toggle verbose mode"],
7569
7712
  ["/clear", "Clear the screen"],
7570
7713
  ["/help", "Show this help"],
@@ -7844,6 +7987,7 @@ var init_render = __esm({
7844
7987
  "/voice",
7845
7988
  "/stream",
7846
7989
  "/verbose",
7990
+ "/bruteforce",
7847
7991
  "/clear",
7848
7992
  "/help",
7849
7993
  "/quit"
@@ -7936,6 +8080,14 @@ async function handleSlashCommand(input, ctx) {
7936
8080
  renderInfo(`Token streaming: ${isOn ? "on" : "off"}${hasLocal ? " (project-local)" : ""}` + (isOn ? " \u2014 thinking tokens in grey italics, responses with pastel syntax highlighting" : ""));
7937
8081
  return "handled";
7938
8082
  }
8083
+ case "bruteforce":
8084
+ case "brute": {
8085
+ const isOn = ctx.bruteForceToggle();
8086
+ const save = hasLocal ? ctx.saveLocalSettings.bind(ctx) : ctx.saveSettings.bind(ctx);
8087
+ save({ bruteforce: isOn });
8088
+ renderInfo(`Brute-force mode: ${isOn ? "on" : "off"}${hasLocal ? " (project-local)" : ""}` + (isOn ? " \u2014 agent will auto re-engage when turn limit is hit, reassess and try creative strategies" : ""));
8089
+ return "handled";
8090
+ }
7939
8091
  default:
7940
8092
  renderWarning(`Unknown command: /${cmd}. Type /help for available commands.`);
7941
8093
  return "handled";
@@ -10721,7 +10873,7 @@ Use task_status("${taskId}") or task_output("${taskId}") to check progress.`
10721
10873
  }
10722
10874
  };
10723
10875
  }
10724
- function startTask(task, config, repoRoot, voice, stream, taskStores) {
10876
+ function startTask(task, config, repoRoot, voice, stream, taskStores, bruteForce) {
10725
10877
  const projectCtx = buildProjectContext(repoRoot, taskStores?.contextStores);
10726
10878
  const dynamicContext = formatContextForPrompt(projectCtx);
10727
10879
  const backend = new OllamaAgenticBackend(config.backendUrl.replace(/\/$/, ""), config.model);
@@ -10733,7 +10885,8 @@ function startTask(task, config, repoRoot, voice, stream, taskStores) {
10733
10885
  taskTimeoutMs: config.timeoutMs * 4,
10734
10886
  compactionThreshold: 4e4,
10735
10887
  dynamicContext,
10736
- streamEnabled: stream?.enabled ?? false
10888
+ streamEnabled: stream?.enabled ?? false,
10889
+ bruteForce: bruteForce ?? false
10737
10890
  });
10738
10891
  runner.registerTools(buildTools(repoRoot, config));
10739
10892
  const filesTouched = /* @__PURE__ */ new Set();
@@ -10795,14 +10948,15 @@ function startTask(task, config, repoRoot, voice, stream, taskStores) {
10795
10948
  });
10796
10949
  const sessionId = `${Date.now()}-${Math.random().toString(36).slice(2, 8)}`;
10797
10950
  const promise = runner.run(task, `Working directory: ${repoRoot}`).then((result) => {
10951
+ const tokens = { total: result.totalTokens, estimated: result.estimatedTokens };
10798
10952
  if (result.completed) {
10799
- renderTaskComplete(result.summary, result.turns, result.toolCalls, result.durationMs);
10953
+ renderTaskComplete(result.summary, result.turns, result.toolCalls, result.durationMs, tokens);
10800
10954
  if (voice?.enabled && result.summary) {
10801
10955
  const ttsText = result.summary.length > 300 ? result.summary.slice(0, 300) + "..." : result.summary;
10802
10956
  voice.speak(`Task complete. ${ttsText}`);
10803
10957
  }
10804
10958
  } else {
10805
- renderTaskIncomplete(result.turns, result.toolCalls, result.durationMs);
10959
+ renderTaskIncomplete(result.turns, result.toolCalls, result.durationMs, tokens);
10806
10960
  if (voice?.enabled) {
10807
10961
  voice.speak("Task did not complete.");
10808
10962
  }
@@ -10878,6 +11032,7 @@ async function startInteractive(config, repoPath) {
10878
11032
  if (savedSettings.dbPath)
10879
11033
  config = { ...config, dbPath: savedSettings.dbPath };
10880
11034
  let streamEnabled = savedSettings.stream ?? false;
11035
+ let bruteForceEnabled = savedSettings.bruteforce ?? false;
10881
11036
  if (!isResumed) {
10882
11037
  const needsSetup = isFirstRun() || !await isModelAvailable(config);
10883
11038
  if (needsSetup && config.backendType === "ollama") {
@@ -11006,6 +11161,10 @@ async function startInteractive(config, repoPath) {
11006
11161
  streamEnabled = !streamEnabled;
11007
11162
  return streamEnabled;
11008
11163
  },
11164
+ bruteForceToggle() {
11165
+ bruteForceEnabled = !bruteForceEnabled;
11166
+ return bruteForceEnabled;
11167
+ },
11009
11168
  saveSettings(settings) {
11010
11169
  try {
11011
11170
  saveProjectSettings(repoRoot, settings);
@@ -11089,7 +11248,7 @@ ${c2.dim("Goodbye!")}
11089
11248
  contextStores,
11090
11249
  taskMemoryStore: taskMemoryStore ?? void 0,
11091
11250
  failureStore: failureStore ?? void 0
11092
- });
11251
+ }, bruteForceEnabled);
11093
11252
  activeTask = task;
11094
11253
  showPrompt();
11095
11254
  await task.promise;
@@ -11813,10 +11972,11 @@ var init_config3 = __esm({
11813
11972
  dbPath: "Path to SQLite memory database",
11814
11973
  voice: "Enable TTS voice feedback (true/false)",
11815
11974
  voiceModel: "TTS voice model: glados, overwatch",
11816
- stream: "Enable real-time token streaming with pastel syntax highlighting (true/false)"
11975
+ stream: "Enable real-time token streaming with pastel syntax highlighting (true/false)",
11976
+ bruteforce: "Brute-force mode: auto re-engage agent when turn limit hit (true/false)"
11817
11977
  };
11818
11978
  INT_KEYS = /* @__PURE__ */ new Set(["maxRetries", "timeoutMs"]);
11819
- BOOL_KEYS = /* @__PURE__ */ new Set(["dryRun", "verbose", "voice", "stream"]);
11979
+ BOOL_KEYS = /* @__PURE__ */ new Set(["dryRun", "verbose", "voice", "stream", "bruteforce"]);
11820
11980
  }
11821
11981
  });
11822
11982
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.11.6",
3
+ "version": "0.11.7",
4
4
  "description": "AI coding agent powered by open-source models (Ollama/vLLM) — interactive TUI with agentic tool-calling loop",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",