@illuma-ai/agents 1.1.3 → 1.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -64,6 +64,15 @@ const SUMMARIZATION_CONTEXT_THRESHOLD = 80;
64
64
  * 100% → graceful: use existing summary + recent messages, never block
65
65
  */
66
66
  const PROACTIVE_SUMMARY_THRESHOLD = 0.8;
67
+ /**
68
+ * Number of recent conversation rounds (human+AI pairs) to keep in the
69
+ * windowed view when a summary is available. Everything older is covered
70
+ * by the summary. 2 rounds = last 2 user questions + 2 AI responses.
71
+ *
72
+ * This prevents wasting tokens on raw messages the summary already covers
73
+ * and keeps context tight for the LLM.
74
+ */
75
+ const COMPACTION_RECENT_ROUNDS = 2;
67
76
  /**
68
77
  * Default reserve ratio (0-1) — fraction of context window to preserve as recent messages.
69
78
  * 0.3 means 30% of the context budget is reserved for the most recent messages,
@@ -101,6 +110,7 @@ const TOOL_DISCOVERY_CACHE_MAX_SIZE = 200;
101
110
  */
102
111
  const DEDUP_MAX_CONTENT_LENGTH = 10000;
103
112
 
113
+ exports.COMPACTION_RECENT_ROUNDS = COMPACTION_RECENT_ROUNDS;
104
114
  exports.CONTEXT_SAFETY_BUFFER = CONTEXT_SAFETY_BUFFER;
105
115
  exports.DEDUP_MAX_CONTENT_LENGTH = DEDUP_MAX_CONTENT_LENGTH;
106
116
  exports.MIN_THINKING_BUDGET = MIN_THINKING_BUDGET;
@@ -1 +1 @@
1
- {"version":3,"file":"constants.cjs","sources":["../../../src/common/constants.ts"],"sourcesContent":["// src/common/constants.ts\n\n/**\n * Minimum thinking budget allowed by the Anthropic API.\n * Extended thinking requires at least 1024 budget_tokens.\n * @see https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking\n */\nexport const MIN_THINKING_BUDGET = 1024;\n\n/**\n * Reduced thinking budget for subsequent ReAct iterations (tool-result turns).\n *\n * In a ReAct agent loop, the first LLM call processes the user's query and\n * may need deep reasoning. Subsequent iterations (after tool results return)\n * typically only need to decide \"call next tool\" or \"generate final response\"\n * — 1024 tokens is sufficient for this routing logic.\n *\n * This reduces wall-clock time per iteration from ~20-30s to ~5-10s,\n * compounding across multi-tool conversations (e.g., 10 tool calls).\n */\nexport const TOOL_TURN_THINKING_BUDGET = 1024;\n\n// ============================================================================\n// CONTEXT OVERFLOW MANAGEMENT\n//\n// Context overflow is handled mechanically — no token budget numbers are\n// exposed to the LLM. The system uses: pruning (Graph), summarization\n// (summarizeCallback), and auto-continuation (client.js max_tokens detection).\n//\n// See: docs/context-overflow-architecture.md\n// ============================================================================\n\n/**\n * Minimum number of attached documents before the multi-document delegation\n * hint is injected. Below this threshold, the agent processes documents\n * directly within its own context.\n */\nexport const MULTI_DOCUMENT_THRESHOLD = 3;\n\n/**\n * Context utilization safety buffer multiplier (0-1).\n * Applied as: effectiveMax = (maxContextTokens - maxOutputTokens) * CONTEXT_SAFETY_BUFFER\n *\n * Reserves headroom so the LLM doesn't hit hard token limits mid-generation.\n * 0.9 = 10% reserved for safety.\n */\nexport const CONTEXT_SAFETY_BUFFER = 0.9;\n\n// ============================================================================\n// SUMMARIZATION CONFIGURATION DEFAULTS\n//\n// These constants provide sensible defaults for the SummarizationConfig.\n// They can be overridden per-agent via AgentInputs.summarizationConfig.\n// ============================================================================\n\n/**\n * Default context utilization percentage (0-100) at which summarization triggers.\n * When the context window is ≥80% full, pruning + summarization activates.\n */\nexport const SUMMARIZATION_CONTEXT_THRESHOLD = 80;\n\n/**\n * Proactive summarization threshold (0-1 fraction of context window).\n * At this utilization level, background summarization fires BEFORE pruning is needed.\n * This gives the summary time to complete so it's ready when context actually fills up.\n *\n * Inspired by VS Code Copilot Chat's 3-tier strategy:\n * 80% → proactive background summary\n * 90% → pruning kicks in (with summary already cached)\n * 100% → graceful: use existing summary + recent messages, never block\n */\nexport const PROACTIVE_SUMMARY_THRESHOLD = 0.8;\n\n/**\n * Default reserve ratio (0-1) — fraction of context window to preserve as recent messages.\n * 0.3 means 30% of the context budget is reserved for the most recent messages,\n * ensuring the model always has immediate conversation history even after aggressive pruning.\n */\nexport const SUMMARIZATION_RESERVE_RATIO = 0.3;\n\n/**\n * Default EMA (Exponential Moving Average) alpha for pruning calibration.\n * Controls how quickly the calibration adapts to new token counts.\n * Higher α = faster adaptation (more responsive to recent changes).\n * Lower α = smoother adaptation (more stable across iterations).\n * 0.3 provides a balance between responsiveness and stability.\n */\nexport const PRUNING_EMA_ALPHA = 0.3;\n\n/**\n * Default initial calibration ratio for EMA pruning.\n * 1.0 means no adjustment on the first iteration (trust the raw token counts).\n * Subsequent iterations will adjust based on actual vs. estimated token usage.\n */\nexport const PRUNING_INITIAL_CALIBRATION = 1.0;\n\n// ============================================================================\n// TOOL DISCOVERY CACHING\n// ============================================================================\n\n/**\n * Maximum number of tool discovery entries to cache per conversation.\n * Prevents unbounded memory growth in very long conversations.\n */\nexport const TOOL_DISCOVERY_CACHE_MAX_SIZE = 200;\n\n// ============================================================================\n// MESSAGE DEDUPLICATION\n// ============================================================================\n\n/**\n * Maximum length of system message content to hash for deduplication.\n * Messages longer than this are always considered unique (hashing would be expensive).\n */\nexport const DEDUP_MAX_CONTENT_LENGTH = 10000;\n"],"names":[],"mappings":";;AAAA;AAEA;;;;AAIG;AACI,MAAM,mBAAmB,GAAG;AAEnC;;;;;;;;;;AAUG;AACI,MAAM,yBAAyB,GAAG;AAEzC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAEA;;;;AAIG;AACI,MAAM,wBAAwB,GAAG;AAExC;;;;;;AAMG;AACI,MAAM,qBAAqB,GAAG;AAErC;AACA;AACA;AACA;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,+BAA+B,GAAG;AAE/C;;;;;;;;;AASG;AACI,MAAM,2BAA2B,GAAG;AAE3C;;;;AAIG;AACI,MAAM,2BAA2B,GAAG;AAE3C;;;;;;AAMG;AACI,MAAM,iBAAiB,GAAG;AAEjC;;;;AAIG;AACI,MAAM,2BAA2B,GAAG;AAE3C;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,6BAA6B,GAAG;AAE7C;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,wBAAwB,GAAG;;;;;;;;;;;;;;"}
1
+ {"version":3,"file":"constants.cjs","sources":["../../../src/common/constants.ts"],"sourcesContent":["// src/common/constants.ts\n\n/**\n * Minimum thinking budget allowed by the Anthropic API.\n * Extended thinking requires at least 1024 budget_tokens.\n * @see https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking\n */\nexport const MIN_THINKING_BUDGET = 1024;\n\n/**\n * Reduced thinking budget for subsequent ReAct iterations (tool-result turns).\n *\n * In a ReAct agent loop, the first LLM call processes the user's query and\n * may need deep reasoning. Subsequent iterations (after tool results return)\n * typically only need to decide \"call next tool\" or \"generate final response\"\n * — 1024 tokens is sufficient for this routing logic.\n *\n * This reduces wall-clock time per iteration from ~20-30s to ~5-10s,\n * compounding across multi-tool conversations (e.g., 10 tool calls).\n */\nexport const TOOL_TURN_THINKING_BUDGET = 1024;\n\n// ============================================================================\n// CONTEXT OVERFLOW MANAGEMENT\n//\n// Context overflow is handled mechanically — no token budget numbers are\n// exposed to the LLM. The system uses: pruning (Graph), summarization\n// (summarizeCallback), and auto-continuation (client.js max_tokens detection).\n//\n// See: docs/context-overflow-architecture.md\n// ============================================================================\n\n/**\n * Minimum number of attached documents before the multi-document delegation\n * hint is injected. Below this threshold, the agent processes documents\n * directly within its own context.\n */\nexport const MULTI_DOCUMENT_THRESHOLD = 3;\n\n/**\n * Context utilization safety buffer multiplier (0-1).\n * Applied as: effectiveMax = (maxContextTokens - maxOutputTokens) * CONTEXT_SAFETY_BUFFER\n *\n * Reserves headroom so the LLM doesn't hit hard token limits mid-generation.\n * 0.9 = 10% reserved for safety.\n */\nexport const CONTEXT_SAFETY_BUFFER = 0.9;\n\n// ============================================================================\n// SUMMARIZATION CONFIGURATION DEFAULTS\n//\n// These constants provide sensible defaults for the SummarizationConfig.\n// They can be overridden per-agent via AgentInputs.summarizationConfig.\n// ============================================================================\n\n/**\n * Default context utilization percentage (0-100) at which summarization triggers.\n * When the context window is ≥80% full, pruning + summarization activates.\n */\nexport const SUMMARIZATION_CONTEXT_THRESHOLD = 80;\n\n/**\n * Proactive summarization threshold (0-1 fraction of context window).\n * At this utilization level, background summarization fires BEFORE pruning is needed.\n * This gives the summary time to complete so it's ready when context actually fills up.\n *\n * Inspired by VS Code Copilot Chat's 3-tier strategy:\n * 80% → proactive background summary\n * 90% → pruning kicks in (with summary already cached)\n * 100% → graceful: use existing summary + recent messages, never block\n */\nexport const PROACTIVE_SUMMARY_THRESHOLD = 0.8;\n\n/**\n * Number of recent conversation rounds (human+AI pairs) to keep in the\n * windowed view when a summary is available. Everything older is covered\n * by the summary. 2 rounds = last 2 user questions + 2 AI responses.\n *\n * This prevents wasting tokens on raw messages the summary already covers\n * and keeps context tight for the LLM.\n */\nexport const COMPACTION_RECENT_ROUNDS = 2;\n\n/**\n * Default reserve ratio (0-1) — fraction of context window to preserve as recent messages.\n * 0.3 means 30% of the context budget is reserved for the most recent messages,\n * ensuring the model always has immediate conversation history even after aggressive pruning.\n */\nexport const SUMMARIZATION_RESERVE_RATIO = 0.3;\n\n/**\n * Default EMA (Exponential Moving Average) alpha for pruning calibration.\n * Controls how quickly the calibration adapts to new token counts.\n * Higher α = faster adaptation (more responsive to recent changes).\n * Lower α = smoother adaptation (more stable across iterations).\n * 0.3 provides a balance between responsiveness and stability.\n */\nexport const PRUNING_EMA_ALPHA = 0.3;\n\n/**\n * Default initial calibration ratio for EMA pruning.\n * 1.0 means no adjustment on the first iteration (trust the raw token counts).\n * Subsequent iterations will adjust based on actual vs. estimated token usage.\n */\nexport const PRUNING_INITIAL_CALIBRATION = 1.0;\n\n// ============================================================================\n// TOOL DISCOVERY CACHING\n// ============================================================================\n\n/**\n * Maximum number of tool discovery entries to cache per conversation.\n * Prevents unbounded memory growth in very long conversations.\n */\nexport const TOOL_DISCOVERY_CACHE_MAX_SIZE = 200;\n\n// ============================================================================\n// MESSAGE DEDUPLICATION\n// ============================================================================\n\n/**\n * Maximum length of system message content to hash for deduplication.\n * Messages longer than this are always considered unique (hashing would be expensive).\n */\nexport const DEDUP_MAX_CONTENT_LENGTH = 10000;\n"],"names":[],"mappings":";;AAAA;AAEA;;;;AAIG;AACI,MAAM,mBAAmB,GAAG;AAEnC;;;;;;;;;;AAUG;AACI,MAAM,yBAAyB,GAAG;AAEzC;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AACA;AAEA;;;;AAIG;AACI,MAAM,wBAAwB,GAAG;AAExC;;;;;;AAMG;AACI,MAAM,qBAAqB,GAAG;AAErC;AACA;AACA;AACA;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,+BAA+B,GAAG;AAE/C;;;;;;;;;AASG;AACI,MAAM,2BAA2B,GAAG;AAE3C;;;;;;;AAOG;AACI,MAAM,wBAAwB,GAAG;AAExC;;;;AAIG;AACI,MAAM,2BAA2B,GAAG;AAE3C;;;;;;AAMG;AACI,MAAM,iBAAiB,GAAG;AAEjC;;;;AAIG;AACI,MAAM,2BAA2B,GAAG;AAE3C;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,6BAA6B,GAAG;AAE7C;AACA;AACA;AAEA;;;AAGG;AACI,MAAM,wBAAwB,GAAG;;;;;;;;;;;;;;;"}
@@ -1181,17 +1181,51 @@ class StandardGraph extends Graph {
1181
1181
  : 0;
1182
1182
  // Budget for recent messages = total - system - summary - 3 (assistant priming)
1183
1183
  const recentBudget = calibratedMax - systemTokens - summaryTokens - 3;
1184
- // Step 3: Walk newest→oldest, collect messages that fit in the budget
1184
+ // Step 3: Determine window of recent messages to include.
1185
+ //
1186
+ // Two modes:
1187
+ // A) No summary available → fill the budget (all messages that fit)
1188
+ // B) Summary available → keep last 2 conversation rounds (H+A pairs)
1189
+ // + any trailing tool messages. The summary covers everything else.
1190
+ // This avoids wasting tokens on raw messages the summary already covers.
1191
+ //
1192
+ // A "round" = one human message + one AI response (+ any tool messages between).
1185
1193
  const contentStart = systemMsg != null ? 1 : 0;
1186
1194
  let usedTokens = 0;
1187
1195
  let windowStart = messages$1.length; // index where the recent window begins
1188
- for (let i = messages$1.length - 1; i >= contentStart; i--) {
1189
- const msgTokens = agentContext.indexTokenCountMap[i] ?? 0;
1190
- if (usedTokens + msgTokens > recentBudget) {
1191
- break;
1196
+ if (summary == null || summary === '') {
1197
+ // Mode A: No summary — include as many recent messages as fit in budget
1198
+ for (let i = messages$1.length - 1; i >= contentStart; i--) {
1199
+ const msgTokens = agentContext.indexTokenCountMap[i] ?? 0;
1200
+ if (usedTokens + msgTokens > recentBudget) {
1201
+ break;
1202
+ }
1203
+ usedTokens += msgTokens;
1204
+ windowStart = i;
1205
+ }
1206
+ }
1207
+ else {
1208
+ // Mode B: Summary exists — keep last 2 rounds (4 core messages: H+A+H+A)
1209
+ // Walk backward counting human messages as round boundaries.
1210
+ const MAX_RECENT_ROUNDS = constants.COMPACTION_RECENT_ROUNDS;
1211
+ let roundsSeen = 0;
1212
+ for (let i = messages$1.length - 1; i >= contentStart; i--) {
1213
+ const msgType = messages$1[i]?.getType();
1214
+ const msgTokens = agentContext.indexTokenCountMap[i] ?? 0;
1215
+ // Budget guard — even in round-limited mode, don't exceed budget
1216
+ if (usedTokens + msgTokens > recentBudget) {
1217
+ break;
1218
+ }
1219
+ usedTokens += msgTokens;
1220
+ windowStart = i;
1221
+ // Count a human message as a round boundary
1222
+ if (msgType === 'human') {
1223
+ roundsSeen++;
1224
+ if (roundsSeen >= MAX_RECENT_ROUNDS) {
1225
+ break;
1226
+ }
1227
+ }
1192
1228
  }
1193
- usedTokens += msgTokens;
1194
- windowStart = i;
1195
1229
  }
1196
1230
  // Ensure we don't split tool-call / tool-result pairs.
1197
1231
  // If windowStart lands on a ToolMessage, walk back to include its AI message.
@@ -1214,6 +1248,23 @@ class StandardGraph extends Graph {
1214
1248
  }
1215
1249
  viewParts.push(...recentMessages);
1216
1250
  messagesToUse = viewParts;
1251
+ // Rebuild indexTokenCountMap for the windowed view so downstream
1252
+ // analytics and summarization triggers see accurate token counts.
1253
+ const viewTokenMap = {};
1254
+ let viewIdx = 0;
1255
+ if (systemMsg != null) {
1256
+ viewTokenMap[viewIdx] = systemTokens;
1257
+ viewIdx++;
1258
+ }
1259
+ if (summaryMsg != null) {
1260
+ viewTokenMap[viewIdx] = summaryTokens;
1261
+ viewIdx++;
1262
+ }
1263
+ for (let i = windowStart; i < messages$1.length; i++) {
1264
+ viewTokenMap[viewIdx] = agentContext.indexTokenCountMap[i];
1265
+ viewIdx++;
1266
+ }
1267
+ agentContext.indexTokenCountMap = viewTokenMap;
1217
1268
  console.debug(`[Graph:Compaction] View: ${messages$1.length}→${viewParts.length} msgs ` +
1218
1269
  `(${compactedMessages.length} behind summary, ${recentMessages.length} in window) | ` +
1219
1270
  `summary=${summarySource}${summary ? ` (len=${summary.length})` : ''} | ` +