illuma-agents 1.0.70 → 1.0.72

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -571,6 +571,78 @@ class StandardGraph extends Graph {
571
571
  this.signal.removeEventListener('abort', client.abortHandler);
572
572
  client.abortHandler = undefined;
573
573
  }
574
+ /**
575
+ * Perform structured output invocation: creates a fresh model without tools bound,
576
+ * removes thinking configuration, invokes with the schema, emits the event,
577
+ * and returns a clean AIMessageChunk without tool_calls.
578
+ *
579
+ * Used by both the immediate path (no tools) and the deferred path (after tool use).
580
+ */
581
+ async performStructuredOutput({ agentContext, finalMessages, config, }) {
582
+ const schema = agentContext.getStructuredOutputSchema();
583
+ if (!schema) {
584
+ throw new Error('Structured output schema is not configured');
585
+ }
586
+ // Get a fresh model WITHOUT tools bound
587
+ // bindTools() returns RunnableBinding which lacks withStructuredOutput
588
+ // Also disable thinking mode - Anthropic/Bedrock doesn't allow tool_choice with thinking enabled
589
+ const structuredClientOptions = { ...agentContext.clientOptions };
590
+ // Disable streaming for structured output
591
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
592
+ structuredClientOptions.streaming = false;
593
+ // Remove thinking configuration for Bedrock
594
+ if (agentContext.provider === _enum.Providers.BEDROCK) {
595
+ const bedrockOpts = structuredClientOptions;
596
+ if (bedrockOpts.additionalModelRequestFields) {
597
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
598
+ const additionalFields = Object.assign({}, bedrockOpts.additionalModelRequestFields);
599
+ delete additionalFields.thinking;
600
+ delete additionalFields.budgetTokens;
601
+ bedrockOpts.additionalModelRequestFields = additionalFields;
602
+ }
603
+ }
604
+ // Remove thinking configuration for Anthropic direct API
605
+ if (agentContext.provider === _enum.Providers.ANTHROPIC) {
606
+ const anthropicOpts = structuredClientOptions;
607
+ if (anthropicOpts.thinking) {
608
+ delete anthropicOpts.thinking;
609
+ }
610
+ }
611
+ const structuredModel = this.getNewModel({
612
+ provider: agentContext.provider,
613
+ clientOptions: structuredClientOptions,
614
+ });
615
+ const { structuredResponse, rawMessage } = await this.attemptStructuredInvoke({
616
+ currentModel: structuredModel,
617
+ finalMessages,
618
+ schema,
619
+ structuredOutputConfig: agentContext.structuredOutput,
620
+ provider: agentContext.provider,
621
+ }, config);
622
+ // Emit structured output event
623
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_STRUCTURED_OUTPUT, {
624
+ structuredResponse,
625
+ schema,
626
+ raw: rawMessage,
627
+ }, config);
628
+ // Create a clean message WITHOUT tool_calls for structured output.
629
+ // The rawMessage contains a tool_call for the structured output schema (e.g., "response"),
630
+ // which would cause the graph router to send it to the tool node.
631
+ // We return a clean AI message that ends the graph.
632
+ let cleanMessage;
633
+ if (rawMessage) {
634
+ cleanMessage = new messages.AIMessageChunk({
635
+ content: JSON.stringify(structuredResponse, null, 2),
636
+ id: rawMessage.id,
637
+ response_metadata: rawMessage.response_metadata,
638
+ usage_metadata: rawMessage.usage_metadata,
639
+ });
640
+ }
641
+ return {
642
+ messages: cleanMessage ? [cleanMessage] : [],
643
+ structuredResponse,
644
+ };
645
+ }
574
646
  createCallModel(agentId = 'default') {
575
647
  return async (state, config) => {
576
648
  /**
@@ -624,6 +696,38 @@ class StandardGraph extends Graph {
624
696
  }
625
697
  this.config = config;
626
698
  let messagesToUse = messages$1;
699
+ // ====================================================================
700
+ // PRE-PRUNING DELEGATION CHECK
701
+ // Before pruning strips messages (losing context), check if we should
702
+ // delegate instead. If context would be pruned AND the agent has the
703
+ // task tool, inject a delegation hint and SKIP pruning — preserving
704
+ // the content for the LLM to understand what to delegate.
705
+ // ====================================================================
706
+ let delegationInjectedPrePrune = false;
707
+ const hasTaskToolPrePrune = agentContext.tools?.some((tool) => {
708
+ const toolName = typeof tool === 'object' && 'name' in tool
709
+ ? tool.name
710
+ : '';
711
+ return toolName === 'task';
712
+ });
713
+ if (hasTaskToolPrePrune &&
714
+ agentContext.tokenCounter &&
715
+ agentContext.maxContextTokens != null) {
716
+ // Estimate total tokens in messages BEFORE pruning
717
+ let prePruneTokens = 0;
718
+ for (const msg of messages$1) {
719
+ prePruneTokens += agentContext.tokenCounter(msg);
720
+ }
721
+ // Add instruction tokens (system prompt)
722
+ prePruneTokens += agentContext.instructionTokens ?? 0;
723
+ const prePruneUtilization = (prePruneTokens / agentContext.maxContextTokens) * 100;
724
+ if (prePruneUtilization > 70) {
725
+ console.warn(`[Graph] PRE-PRUNE delegation check: ${prePruneUtilization.toFixed(1)}% utilization ` +
726
+ `(${prePruneTokens}/${agentContext.maxContextTokens} tokens). ` +
727
+ `Injecting delegation hint INSTEAD of pruning.`);
728
+ delegationInjectedPrePrune = true;
729
+ }
730
+ }
627
731
  if (!agentContext.pruneMessages &&
628
732
  agentContext.tokenCounter &&
629
733
  agentContext.maxContextTokens != null &&
@@ -646,7 +750,7 @@ class StandardGraph extends Graph {
646
750
  indexTokenCountMap: agentContext.indexTokenCountMap,
647
751
  });
648
752
  }
649
- if (agentContext.pruneMessages) {
753
+ if (agentContext.pruneMessages && !delegationInjectedPrePrune) {
650
754
  const { context, indexTokenCountMap } = agentContext.pruneMessages({
651
755
  messages: messages$1,
652
756
  usageMetadata: agentContext.currentUsage,
@@ -655,6 +759,9 @@ class StandardGraph extends Graph {
655
759
  agentContext.indexTokenCountMap = indexTokenCountMap;
656
760
  messagesToUse = context;
657
761
  }
762
+ else if (delegationInjectedPrePrune) {
763
+ console.info('[Graph] Skipping pruning — delegation will handle context pressure');
764
+ }
658
765
  let finalMessages = messagesToUse;
659
766
  if (agentContext.useLegacyContent) {
660
767
  finalMessages = content.formatContentStrings(finalMessages);
@@ -768,12 +875,13 @@ class StandardGraph extends Graph {
768
875
  // ====================================================================
769
876
  // CONTEXT PRESSURE AWARENESS — Intelligent Sub-Agent Delegation
770
877
  //
771
- // At EVERY iteration of the agent loop (not just pre-flight), check
772
- // context utilization. If the agent has a "task" tool (sub-agents)
773
- // and context is filling up, inject a hint telling the LLM to
774
- // delegate remaining work to sub-agents instead of continuing to
775
- // accumulate context. The LLM then makes the intelligent decision
776
- // about WHAT to delegate and HOW to split the work.
878
+ // Two triggers for delegation hints:
879
+ // 1. DOCUMENT COUNT: When 3+ documents are detected in the conversation,
880
+ // inject a delegation hint on the FIRST iteration (before the LLM
881
+ // has called any tools). This ensures the agent delegates upfront
882
+ // rather than trying to process all documents itself.
883
+ // 2. TOKEN UTILIZATION: At EVERY iteration, if context is filling up
884
+ // (70%/85%), inject escalating hints to delegate remaining work.
777
885
  //
778
886
  // This runs mid-chain — so even if tool responses push context up
779
887
  // after the first LLM call, subsequent iterations get the hint.
@@ -791,22 +899,80 @@ class StandardGraph extends Graph {
791
899
  const totalTokens = contextAnalytics$1.totalTokens;
792
900
  const maxTokens = contextAnalytics$1.maxContextTokens;
793
901
  const remainingTokens = maxTokens - totalTokens;
902
+ // Count attached documents by scanning for document patterns in HumanMessages:
903
+ // 1. # "filename" headers in "Attached document(s):" blocks (text content)
904
+ // 2. **filename1, filename2** in "The user has attached:" blocks (embedded files)
905
+ // 3. Filenames in file_search tool results
906
+ let documentCount = 0;
907
+ const documentNames = [];
908
+ for (const msg of finalMessages) {
909
+ const content = typeof msg.content === 'string'
910
+ ? msg.content
911
+ : Array.isArray(msg.content)
912
+ ? msg.content.map((p) => {
913
+ const part = p;
914
+ return String(part.text || part.content || '');
915
+ }).join(' ')
916
+ : '';
917
+ // Pattern 1: # "filename" headers in attached document blocks
918
+ const docMatches = content.match(/# "([^"]+)"/g);
919
+ if (docMatches) {
920
+ for (const match of docMatches) {
921
+ const name = match.replace(/# "/, '').replace(/"$/, '');
922
+ if (!documentNames.includes(name)) {
923
+ documentNames.push(name);
924
+ documentCount++;
925
+ }
926
+ }
927
+ }
928
+ // Pattern 2: "The user has attached: **file1, file2**" (embedded files)
929
+ const attachedMatch = content.match(/user has attached:\s*\*\*([^*]+)\*\*/i);
930
+ if (attachedMatch) {
931
+ const names = attachedMatch[1].split(',').map((n) => n.trim()).filter(Boolean);
932
+ for (const name of names) {
933
+ if (!documentNames.includes(name)) {
934
+ documentNames.push(name);
935
+ documentCount++;
936
+ }
937
+ }
938
+ }
939
+ }
794
940
  // BASELINE LOG: Always fires so we can verify this code path runs
795
941
  console.info(`[Graph] Context utilization: ${utilization.toFixed(1)}% ` +
796
942
  `(${totalTokens}/${maxTokens} tokens, ${remainingTokens} remaining) | ` +
797
- `hasTaskTool: true | messages: ${finalMessages.length}`);
798
- if (utilization > 85) {
799
- // CRITICAL: Context nearly full MANDATE delegation
943
+ `hasTaskTool: true | messages: ${finalMessages.length} | docs: ${documentCount}`);
944
+ // TRIGGER 1: Multi-document delegation (3+ documents detected)
945
+ // Only inject on first iteration (no AI messages yet = agent hasn't responded)
946
+ const hasAiResponse = finalMessages.some((m) => m._getType?.() === 'ai' || m._getType?.() === 'tool');
947
+ if (documentCount >= 3 && !hasAiResponse) {
948
+ const pressureMsg = new messages.HumanMessage({
949
+ content: `[MULTI-DOCUMENT PROCESSING — ${documentCount} documents detected]\n` +
950
+ `Documents: ${documentNames.join(', ')}\n\n` +
951
+ `You have ${documentCount} documents attached. For thorough analysis, use the "task" tool ` +
952
+ `to delegate each document (or group of related documents) to a sub-agent.\n` +
953
+ `Each sub-agent has its own fresh context window and can use file_search to retrieve the full document content.\n` +
954
+ `After all sub-agents complete, synthesize their results into a comprehensive response.\n\n` +
955
+ `This approach ensures each document gets full attention without context limitations.`,
956
+ });
957
+ finalMessages = [...finalMessages, pressureMsg];
958
+ console.info(`[Graph] Multi-document delegation hint injected for ${documentCount} documents: ` +
959
+ `${documentNames.join(', ')}`);
960
+ }
961
+ // TRIGGER 2: Token utilization thresholds (mid-chain safety net)
962
+ // Also fires when we skipped pruning due to delegationInjectedPrePrune
963
+ if (utilization > 85 || (delegationInjectedPrePrune && utilization > 50)) {
964
+ // CRITICAL: Context is high — MANDATE delegation
800
965
  const pressureMsg = new messages.HumanMessage({
801
966
  content: `[CONTEXT BUDGET CRITICAL — ${utilization.toFixed(0)}% used]\n` +
802
967
  `You have used ${totalTokens} of ${maxTokens} tokens (${remainingTokens} remaining).\n` +
803
- `Your context is almost full. You MUST use the "task" tool to delegate any remaining work to sub-agents.\n` +
968
+ `Your context is very large. You MUST use the "task" tool to delegate work to sub-agents.\n` +
804
969
  `Each sub-agent runs in its own fresh context window and can use file_search to access documents.\n` +
805
- `Do NOT make any more tool calls yourself except "task". Delegate and then synthesize the results.`,
970
+ `Do NOT attempt to process documents directly delegate each document to a sub-agent, then synthesize results.`,
806
971
  });
807
972
  finalMessages = [...finalMessages, pressureMsg];
808
973
  console.warn(`[Graph] Context pressure CRITICAL (${utilization.toFixed(0)}%): ` +
809
- `Injected mandatory delegation hint. ${remainingTokens} tokens remaining.`);
974
+ `Injected mandatory delegation hint. ${remainingTokens} tokens remaining. ` +
975
+ `prePruneSkipped: ${delegationInjectedPrePrune}`);
810
976
  }
811
977
  else if (utilization > 70) {
812
978
  // WARNING: Context filling up — suggest delegation
@@ -821,85 +987,22 @@ class StandardGraph extends Graph {
821
987
  `Injected delegation suggestion. ${remainingTokens} tokens remaining.`);
822
988
  }
823
989
  }
824
- // Check if structured output mode is enabled
990
+ // Structured output mode: when the agent has NO tools, produce structured JSON immediately.
991
+ // When the agent HAS tools, we defer structured output until after tool use completes
992
+ // (see the deferred structured output block after attemptInvoke below).
993
+ const hasTools = (toolsForBinding?.length ?? 0) > 0;
825
994
  if (agentContext.isStructuredOutputMode &&
826
- agentContext.structuredOutput) {
827
- const schema = agentContext.getStructuredOutputSchema();
828
- if (!schema) {
829
- throw new Error('Structured output schema is not configured');
830
- }
995
+ agentContext.structuredOutput &&
996
+ !hasTools) {
831
997
  try {
832
- // Use structured output invocation (non-streaming)
833
- // Get a fresh model WITHOUT tools bound - bindTools() returns RunnableBinding which lacks withStructuredOutput
834
- // Also disable thinking mode - Anthropic/Bedrock doesn't allow tool_choice with thinking enabled
835
- const structuredClientOptions = { ...agentContext.clientOptions };
836
- // CRITICAL: Disable streaming for structured output
837
- // Structured output uses model.invoke() with tool binding, not streaming
838
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
839
- structuredClientOptions.streaming = false;
840
- // Remove thinking configuration for Bedrock
841
- // Bedrock uses additionalModelRequestFields.thinking for extended thinking
842
- if (agentContext.provider === _enum.Providers.BEDROCK) {
843
- const bedrockOpts = structuredClientOptions;
844
- if (bedrockOpts.additionalModelRequestFields) {
845
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
846
- const additionalFields = Object.assign({}, bedrockOpts.additionalModelRequestFields);
847
- // Remove thinking configuration
848
- delete additionalFields.thinking;
849
- // Remove budget tokens which is also related to thinking
850
- delete additionalFields.budgetTokens;
851
- bedrockOpts.additionalModelRequestFields = additionalFields;
852
- }
853
- }
854
- // Remove thinking configuration for Anthropic direct API
855
- if (agentContext.provider === _enum.Providers.ANTHROPIC) {
856
- const anthropicOpts = structuredClientOptions;
857
- if (anthropicOpts.thinking) {
858
- delete anthropicOpts.thinking;
859
- }
860
- }
861
- const structuredModel = this.getNewModel({
862
- provider: agentContext.provider,
863
- clientOptions: structuredClientOptions,
864
- });
865
- const { structuredResponse, rawMessage } = await this.attemptStructuredInvoke({
866
- currentModel: structuredModel,
998
+ const structuredResult = await this.performStructuredOutput({
999
+ agentContext,
867
1000
  finalMessages,
868
- schema,
869
- structuredOutputConfig: agentContext.structuredOutput,
870
- provider: agentContext.provider,
871
- }, config);
872
- // Emit structured output event
873
- await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_STRUCTURED_OUTPUT, {
874
- structuredResponse,
875
- schema,
876
- raw: rawMessage,
877
- }, config);
878
- agentContext.currentUsage = rawMessage
879
- ? this.getUsageMetadata(rawMessage)
880
- : undefined;
1001
+ config,
1002
+ });
1003
+ agentContext.currentUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
881
1004
  this.cleanupSignalListener();
882
- // CRITICAL: Create a clean message WITHOUT tool_calls for structured output
883
- // The rawMessage contains a tool_call for the structured output schema (e.g., "response"),
884
- // which would cause the graph router to send it to the tool node, failing with
885
- // "Tool 'response' not found". We need to return a clean AI message that ends the graph.
886
- let cleanMessage;
887
- if (rawMessage) {
888
- // Create a new AIMessageChunk with the structured response as JSON content
889
- // but WITHOUT the tool_calls that would trigger tool execution
890
- cleanMessage = new messages.AIMessageChunk({
891
- content: JSON.stringify(structuredResponse, null, 2),
892
- id: rawMessage.id,
893
- response_metadata: rawMessage.response_metadata,
894
- usage_metadata: rawMessage.usage_metadata,
895
- // Explicitly exclude tool_calls to prevent routing to tool node
896
- });
897
- }
898
- // Return the clean message (no tool_calls) so the graph ends here
899
- return {
900
- messages: cleanMessage ? [cleanMessage] : [],
901
- structuredResponse,
902
- };
1005
+ return structuredResult;
903
1006
  }
904
1007
  catch (structuredError) {
905
1008
  console.error('[Graph] Structured output failed:', structuredError);
@@ -1075,6 +1178,57 @@ If I seem to be missing something we discussed earlier, just give me a quick rem
1075
1178
  }
1076
1179
  agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
1077
1180
  this.cleanupSignalListener();
1181
+ // DEFERRED STRUCTURED OUTPUT: When the agent has tools AND structured output configured,
1182
+ // we let the agent use tools normally via attemptInvoke(). Once the agent's response
1183
+ // has NO tool_calls (it's done with tools), we produce the final structured JSON response.
1184
+ if (agentContext.isStructuredOutputMode &&
1185
+ agentContext.structuredOutput &&
1186
+ result) {
1187
+ const lastMessage = result.messages?.[0];
1188
+ const resultHasToolCalls = lastMessage &&
1189
+ 'tool_calls' in lastMessage &&
1190
+ (lastMessage.tool_calls?.length ?? 0) > 0;
1191
+ if (!resultHasToolCalls) {
1192
+ try {
1193
+ // Build messages for structured output: include the full conversation
1194
+ // plus the agent's text response from attemptInvoke, so the structured
1195
+ // output model has full context (tool results + agent reasoning).
1196
+ const messagesForStructured = [...finalMessages];
1197
+ if (lastMessage) {
1198
+ messagesForStructured.push(lastMessage);
1199
+ }
1200
+ const structuredResult = await this.performStructuredOutput({
1201
+ agentContext,
1202
+ finalMessages: messagesForStructured,
1203
+ config,
1204
+ });
1205
+ // Accumulate token usage from both API calls
1206
+ const structuredUsage = this.getUsageMetadata(structuredResult.messages?.[0]);
1207
+ if (structuredUsage && agentContext.currentUsage) {
1208
+ agentContext.currentUsage = {
1209
+ input_tokens: (agentContext.currentUsage.input_tokens ?? 0) +
1210
+ (structuredUsage.input_tokens ?? 0),
1211
+ output_tokens: (agentContext.currentUsage.output_tokens ?? 0) +
1212
+ (structuredUsage.output_tokens ?? 0),
1213
+ total_tokens: (agentContext.currentUsage.total_tokens ?? 0) +
1214
+ (structuredUsage.total_tokens ?? 0),
1215
+ };
1216
+ }
1217
+ else if (structuredUsage) {
1218
+ agentContext.currentUsage = structuredUsage;
1219
+ }
1220
+ return structuredResult;
1221
+ }
1222
+ catch (structuredError) {
1223
+ // Graceful fallback: the agent completed its work with tools,
1224
+ // but we couldn't format the output as structured JSON.
1225
+ // Return the unstructured text response from attemptInvoke.
1226
+ console.error('[Graph] Deferred structured output failed after successful tool use:', structuredError);
1227
+ console.warn('[Graph] Falling back to unstructured response from tool-use phase');
1228
+ return result;
1229
+ }
1230
+ }
1231
+ }
1078
1232
  return result;
1079
1233
  };
1080
1234
  }