open-agents-ai 0.187.470 → 0.187.471

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -518311,10 +518311,17 @@ var init_agenticRunner = __esm({
518311
518311
  const sections = [];
518312
518312
  const pressureCue = pressureCheck(task);
518313
518313
  const basePrompt = getSystemPromptForTier(this.options.modelTier) + pressureCue;
518314
+ const _BATCH_GUIDANCE = {
518315
+ small: "\n\n## Response batching\n\nEmit AT MOST 2 tool calls per response. After observing their results, plan the next 2 in your following response. Smaller batches let the orchestrator deliver cache/failure/progress signals to you between actions. Tool calls beyond the cap are dropped. Use todo_write between batches to mark progress.",
518316
+ medium: "\n\n## Response batching\n\nEmit AT MOST 4 tool calls per response. After observing their results, plan the next batch in your following response. Smaller batches let the orchestrator deliver cache/failure/progress signals to you between actions. Tool calls beyond the cap are dropped. Use todo_write between batches to mark progress.",
518317
+ large: "\n\n## Response batching\n\nEmit AT MOST 6 tool calls per response. Smaller batches receive better feedback (cache/failure/progress signals between actions). Tool calls beyond the cap are dropped. Use todo_write between batches."
518318
+ };
518319
+ const batchGuidance = _BATCH_GUIDANCE[this.options.modelTier ?? "large"] ?? _BATCH_GUIDANCE.large;
518320
+ const basePromptWithBatching = basePrompt + batchGuidance;
518314
518321
  sections.push({
518315
518322
  label: "c_instr",
518316
- content: basePrompt,
518317
- tokenEstimate: Math.ceil(basePrompt.length / 4)
518323
+ content: basePromptWithBatching,
518324
+ tokenEstimate: Math.ceil(basePromptWithBatching.length / 4)
518318
518325
  });
518319
518326
  const personalitySuffix = this.options.personality ? compilePersonalityPrompt(this.options.personality, this.options.personalityName) : "";
518320
518327
  if (personalitySuffix) {
@@ -520820,6 +520827,23 @@ ${memoryLines.join("\n")}`
520820
520827
  if (msg.toolCalls && msg.toolCalls.length > 0) {
520821
520828
  consecutiveTextOnly = 0;
520822
520829
  consecutiveThinkOnly = 0;
520830
+ const _RESPONSE_CALL_CAPS = {
520831
+ small: 2,
520832
+ medium: 4,
520833
+ large: 6
520834
+ };
520835
+ const _responseCap = _RESPONSE_CALL_CAPS[this.options.modelTier ?? "large"] ?? 6;
520836
+ const _originalCallCount = msg.toolCalls.length;
520837
+ let _capDeferredCount = 0;
520838
+ if (msg.toolCalls.length > _responseCap) {
520839
+ _capDeferredCount = msg.toolCalls.length - _responseCap;
520840
+ msg.toolCalls = msg.toolCalls.slice(0, _responseCap);
520841
+ this.emit({
520842
+ type: "status",
520843
+ content: `Response cap: ${_capDeferredCount} of ${_originalCallCount} tool call(s) deferred (tier=${this.options.modelTier ?? "large"}, cap=${_responseCap})`,
520844
+ timestamp: (/* @__PURE__ */ new Date()).toISOString()
520845
+ });
520846
+ }
520823
520847
  messages2.push({
520824
520848
  role: "assistant",
520825
520849
  content: msg.content || null,
@@ -520829,6 +520853,21 @@ ${memoryLines.join("\n")}`
520829
520853
  function: { name: tc.name, arguments: JSON.stringify(tc.arguments) }
520830
520854
  }))
520831
520855
  });
520856
+ if (_capDeferredCount > 0) {
520857
+ messages2.push({
520858
+ role: "system",
520859
+ content: [
520860
+ `[BATCH CAPPED]`,
520861
+ `You emitted ${_originalCallCount} tool calls in one response. The orchestrator caps responses at ${_responseCap} for the ${this.options.modelTier ?? "large"} model tier.`,
520862
+ `The first ${_responseCap} executed; the remaining ${_capDeferredCount} were DROPPED.`,
520863
+ ``,
520864
+ `Best practice for this model tier: emit at most ${_responseCap} tool calls per turn.`,
520865
+ `Observe the results of the first ${_responseCap}, then plan the next batch in the NEXT turn.`,
520866
+ `Smaller batches let cache/dedup/progress-gate/failure signals reach you BEFORE you commit to more work.`,
520867
+ `Use todo_write between batches to track which items are done.`
520868
+ ].join("\n")
520869
+ });
520870
+ }
520832
520871
  const executeSingle = async (tc) => {
520833
520872
  if (this.aborted)
520834
520873
  return null;
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.187.470",
3
+ "version": "0.187.471",
4
4
  "lockfileVersion": 3,
5
5
  "requires": true,
6
6
  "packages": {
7
7
  "": {
8
8
  "name": "open-agents-ai",
9
- "version": "0.187.470",
9
+ "version": "0.187.471",
10
10
  "hasInstallScript": true,
11
11
  "license": "CC-BY-NC-4.0",
12
12
  "dependencies": {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "open-agents-ai",
3
- "version": "0.187.470",
3
+ "version": "0.187.471",
4
4
  "description": "AI coding agent powered by open-source models (Ollama/vLLM) — interactive TUI with agentic tool-calling loop",
5
5
  "type": "module",
6
6
  "main": "./dist/index.js",