codebot-ai 1.4.1 → 1.4.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/agent.d.ts CHANGED
@@ -28,9 +28,16 @@ export declare class Agent {
28
28
  after: number;
29
29
  };
30
30
  getMessages(): Message[];
31
- /** Ensure every assistant message with tool_calls has matching tool response messages.
32
- * OpenAI returns 400 if any tool_call_id lacks a response. This can happen if
33
- * a previous LLM call errored out mid-flow. */
31
+ /**
32
+ * Validate and repair message history to prevent OpenAI 400 errors.
33
+ * Handles three types of corruption:
34
+ * 1. Orphaned tool messages — tool_call_id doesn't match any preceding assistant's tool_calls
35
+ * 2. Duplicate tool responses — multiple tool messages for the same tool_call_id
36
+ * 3. Missing tool responses — assistant has tool_calls but no matching tool response
37
+ *
38
+ * This runs before every LLM call to self-heal from stream errors, compaction artifacts,
39
+ * or session resume corruption.
40
+ */
34
41
  private repairToolCallMessages;
35
42
  private buildSystemPrompt;
36
43
  }
package/dist/agent.js CHANGED
@@ -260,10 +260,45 @@ class Agent {
260
260
  getMessages() {
261
261
  return [...this.messages];
262
262
  }
263
- /** Ensure every assistant message with tool_calls has matching tool response messages.
264
- * OpenAI returns 400 if any tool_call_id lacks a response. This can happen if
265
- * a previous LLM call errored out mid-flow. */
263
+ /**
264
+ * Validate and repair message history to prevent OpenAI 400 errors.
265
+ * Handles three types of corruption:
266
+ * 1. Orphaned tool messages — tool_call_id doesn't match any preceding assistant's tool_calls
267
+ * 2. Duplicate tool responses — multiple tool messages for the same tool_call_id
268
+ * 3. Missing tool responses — assistant has tool_calls but no matching tool response
269
+ *
270
+ * This runs before every LLM call to self-heal from stream errors, compaction artifacts,
271
+ * or session resume corruption.
272
+ */
266
273
  repairToolCallMessages() {
274
+ // Phase 1: Collect all valid tool_call_ids from assistant messages (in order)
275
+ const validToolCallIds = new Set();
276
+ for (const msg of this.messages) {
277
+ if (msg.role === 'assistant' && msg.tool_calls?.length) {
278
+ for (const tc of msg.tool_calls) {
279
+ validToolCallIds.add(tc.id);
280
+ }
281
+ }
282
+ }
283
+ // Phase 2: Remove orphaned tool messages and duplicates
284
+ const seenToolResponseIds = new Set();
285
+ this.messages = this.messages.filter(msg => {
286
+ if (msg.role !== 'tool')
287
+ return true;
288
+ const tcId = msg.tool_call_id;
289
+ // No tool_call_id at all — malformed, remove
290
+ if (!tcId)
291
+ return false;
292
+ // Orphaned: tool_call_id doesn't match any assistant's tool_calls
293
+ if (!validToolCallIds.has(tcId))
294
+ return false;
295
+ // Duplicate: already have a response for this tool_call_id
296
+ if (seenToolResponseIds.has(tcId))
297
+ return false;
298
+ seenToolResponseIds.add(tcId);
299
+ return true;
300
+ });
301
+ // Phase 3: Add missing tool responses (assistant has tool_calls but no tool response)
267
302
  const toolResponseIds = new Set();
268
303
  for (const msg of this.messages) {
269
304
  if (msg.role === 'tool' && msg.tool_call_id) {
@@ -275,13 +310,12 @@ class Agent {
275
310
  if (msg.role === 'assistant' && msg.tool_calls?.length) {
276
311
  for (const tc of msg.tool_calls) {
277
312
  if (!toolResponseIds.has(tc.id)) {
278
- // Missing tool response — inject one right after the assistant message
279
313
  const repairMsg = {
280
314
  role: 'tool',
281
315
  content: 'Error: tool call was not executed (interrupted).',
282
316
  tool_call_id: tc.id,
283
317
  };
284
- // Find the right position: after the assistant message and any existing tool responses
318
+ // Insert after the assistant message and any existing tool responses
285
319
  let insertAt = i + 1;
286
320
  while (insertAt < this.messages.length && this.messages[insertAt].role === 'tool') {
287
321
  insertAt++;
package/dist/cli.js CHANGED
@@ -44,7 +44,7 @@ const setup_1 = require("./setup");
44
44
  const banner_1 = require("./banner");
45
45
  const tools_1 = require("./tools");
46
46
  const scheduler_1 = require("./scheduler");
47
- const VERSION = '1.4.1';
47
+ const VERSION = '1.4.3';
48
48
  // Session-wide token tracking
49
49
  let sessionTokens = { input: 0, output: 0, total: 0 };
50
50
  const C = {
@@ -14,9 +14,16 @@ export declare class ContextManager {
14
14
  availableTokens(): number;
15
15
  /** Check if messages fit within budget */
16
16
  fitsInBudget(messages: Message[]): boolean;
17
- /** Compact conversation by dropping old messages and inserting a summary placeholder */
17
+ /**
18
+ * Group messages into atomic blocks that must never be split.
19
+ * An assistant message with tool_calls + its following tool responses = one block.
20
+ * All other messages are individual blocks.
21
+ * This prevents compaction from creating orphaned tool messages.
22
+ */
23
+ private groupMessages;
24
+ /** Compact conversation by dropping old messages. Never splits tool_call groups. */
18
25
  compact(messages: Message[], force?: boolean): Message[];
19
- /** Smart compaction: use LLM to summarize dropped messages instead of just discarding */
26
+ /** Smart compaction: use LLM to summarize dropped messages. Never splits tool_call groups. */
20
27
  compactWithSummary(messages: Message[]): Promise<{
21
28
  messages: Message[];
22
29
  summary: string;
@@ -29,23 +29,55 @@ class ContextManager {
29
29
  const total = messages.reduce((sum, m) => sum + this.estimateTokens(m.content), 0);
30
30
  return total <= this.availableTokens();
31
31
  }
32
- /** Compact conversation by dropping old messages and inserting a summary placeholder */
32
+ /**
33
+ * Group messages into atomic blocks that must never be split.
34
+ * An assistant message with tool_calls + its following tool responses = one block.
35
+ * All other messages are individual blocks.
36
+ * This prevents compaction from creating orphaned tool messages.
37
+ */
38
+ groupMessages(messages) {
39
+ const groups = [];
40
+ let i = 0;
41
+ while (i < messages.length) {
42
+ const msg = messages[i];
43
+ if (msg.role === 'assistant' && msg.tool_calls?.length) {
44
+ // Start of a tool_call group — keep assistant + all following tool messages together
45
+ const group = [msg];
46
+ i++;
47
+ while (i < messages.length && messages[i].role === 'tool') {
48
+ group.push(messages[i]);
49
+ i++;
50
+ }
51
+ groups.push(group);
52
+ }
53
+ else {
54
+ groups.push([msg]);
55
+ i++;
56
+ }
57
+ }
58
+ return groups;
59
+ }
60
+ /** Compact conversation by dropping old messages. Never splits tool_call groups. */
33
61
  compact(messages, force = false) {
34
62
  if (!force && this.fitsInBudget(messages))
35
63
  return messages;
36
64
  const system = messages[0]?.role === 'system' ? messages[0] : null;
37
65
  const rest = system ? messages.slice(1) : [...messages];
38
- // Keep recent messages that fit within 80% of budget
39
- const kept = [];
66
+ // Group messages into atomic blocks (assistant + tool responses stay together)
67
+ const groups = this.groupMessages(rest);
68
+ // Keep recent groups that fit within 80% of budget
69
+ const keptGroups = [];
40
70
  let tokenCount = 0;
41
71
  const budget = this.availableTokens();
42
- for (let i = rest.length - 1; i >= 0; i--) {
43
- const msgTokens = this.estimateTokens(rest[i].content);
44
- if (tokenCount + msgTokens > budget * 0.8)
72
+ for (let i = groups.length - 1; i >= 0; i--) {
73
+ const group = groups[i];
74
+ const groupTokens = group.reduce((sum, m) => sum + this.estimateTokens(m.content), 0);
75
+ if (tokenCount + groupTokens > budget * 0.8)
45
76
  break;
46
- kept.unshift(rest[i]);
47
- tokenCount += msgTokens;
77
+ keptGroups.unshift(group);
78
+ tokenCount += groupTokens;
48
79
  }
80
+ const kept = keptGroups.flat();
49
81
  const dropped = rest.length - kept.length;
50
82
  if (dropped > 0) {
51
83
  kept.unshift({
@@ -57,21 +89,25 @@ class ContextManager {
57
89
  kept.unshift(system);
58
90
  return kept;
59
91
  }
60
- /** Smart compaction: use LLM to summarize dropped messages instead of just discarding */
92
+ /** Smart compaction: use LLM to summarize dropped messages. Never splits tool_call groups. */
61
93
  async compactWithSummary(messages) {
62
94
  const system = messages[0]?.role === 'system' ? messages[0] : null;
63
95
  const rest = system ? messages.slice(1) : [...messages];
64
- // Determine which messages to keep vs summarize
65
- const kept = [];
96
+ // Group messages into atomic blocks
97
+ const groups = this.groupMessages(rest);
98
+ // Keep recent groups that fit within 80% of budget
99
+ const keptGroups = [];
66
100
  let tokenCount = 0;
67
101
  const budget = this.availableTokens();
68
- for (let i = rest.length - 1; i >= 0; i--) {
69
- const msgTokens = this.estimateTokens(rest[i].content);
70
- if (tokenCount + msgTokens > budget * 0.8)
102
+ for (let i = groups.length - 1; i >= 0; i--) {
103
+ const group = groups[i];
104
+ const groupTokens = group.reduce((sum, m) => sum + this.estimateTokens(m.content), 0);
105
+ if (tokenCount + groupTokens > budget * 0.8)
71
106
  break;
72
- kept.unshift(rest[i]);
73
- tokenCount += msgTokens;
107
+ keptGroups.unshift(group);
108
+ tokenCount += groupTokens;
74
109
  }
110
+ const kept = keptGroups.flat();
75
111
  const droppedCount = rest.length - kept.length;
76
112
  if (droppedCount === 0) {
77
113
  return { messages, summary: '' };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "codebot-ai",
3
- "version": "1.4.1",
3
+ "version": "1.4.3",
4
4
  "description": "Zero-dependency autonomous AI agent. Code, browse, search, automate. Works with any LLM — Ollama, Claude, GPT, Gemini, DeepSeek, Groq, Mistral, Grok.",
5
5
  "main": "dist/index.js",
6
6
  "types": "dist/index.d.ts",