@illuma-ai/agents 1.1.21 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/run.cjs +20 -9
  6. package/dist/cjs/run.cjs.map +1 -1
  7. package/dist/esm/graphs/Graph.mjs +12 -1
  8. package/dist/esm/graphs/Graph.mjs.map +1 -1
  9. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  10. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  11. package/dist/esm/run.mjs +20 -9
  12. package/dist/esm/run.mjs.map +1 -1
  13. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  14. package/package.json +1 -1
  15. package/src/graphs/Graph.ts +12 -1
  16. package/src/graphs/MultiAgentGraph.ts +105 -1
  17. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  18. package/src/run.ts +20 -11
  19. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  20. package/src/agents/AgentContext.js +0 -782
  21. package/src/agents/AgentContext.test.js +0 -421
  22. package/src/agents/__tests__/AgentContext.test.js +0 -678
  23. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  24. package/src/common/enum.js +0 -192
  25. package/src/common/index.js +0 -3
  26. package/src/events.js +0 -166
  27. package/src/graphs/Graph.js +0 -1857
  28. package/src/graphs/MultiAgentGraph.js +0 -1092
  29. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  30. package/src/graphs/__tests__/structured-output.test.js +0 -144
  31. package/src/graphs/contextManagement.e2e.test.js +0 -718
  32. package/src/graphs/contextManagement.test.js +0 -485
  33. package/src/graphs/handoffValidation.test.js +0 -276
  34. package/src/graphs/index.js +0 -3
  35. package/src/index.js +0 -28
  36. package/src/instrumentation.js +0 -21
  37. package/src/llm/anthropic/index.js +0 -319
  38. package/src/llm/anthropic/types.js +0 -46
  39. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  40. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  41. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  42. package/src/llm/anthropic/utils/tools.js +0 -25
  43. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  44. package/src/llm/bedrock/index.js +0 -303
  45. package/src/llm/bedrock/types.js +0 -2
  46. package/src/llm/bedrock/utils/index.js +0 -6
  47. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  48. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  49. package/src/llm/fake.js +0 -92
  50. package/src/llm/google/index.js +0 -215
  51. package/src/llm/google/types.js +0 -12
  52. package/src/llm/google/utils/common.js +0 -670
  53. package/src/llm/google/utils/tools.js +0 -111
  54. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  55. package/src/llm/openai/index.js +0 -1033
  56. package/src/llm/openai/types.js +0 -2
  57. package/src/llm/openai/utils/index.js +0 -756
  58. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  59. package/src/llm/openrouter/index.js +0 -261
  60. package/src/llm/openrouter/reasoning.test.js +0 -181
  61. package/src/llm/providers.js +0 -36
  62. package/src/llm/text.js +0 -65
  63. package/src/llm/vertexai/index.js +0 -402
  64. package/src/messages/__tests__/tools.test.js +0 -392
  65. package/src/messages/cache.js +0 -404
  66. package/src/messages/cache.test.js +0 -1167
  67. package/src/messages/content.js +0 -48
  68. package/src/messages/content.test.js +0 -314
  69. package/src/messages/core.js +0 -359
  70. package/src/messages/ensureThinkingBlock.test.js +0 -997
  71. package/src/messages/format.js +0 -973
  72. package/src/messages/formatAgentMessages.test.js +0 -2278
  73. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  74. package/src/messages/formatMessage.test.js +0 -608
  75. package/src/messages/ids.js +0 -18
  76. package/src/messages/index.js +0 -9
  77. package/src/messages/labelContentByAgent.test.js +0 -725
  78. package/src/messages/prune.js +0 -438
  79. package/src/messages/reducer.js +0 -60
  80. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  81. package/src/messages/summarize.js +0 -146
  82. package/src/messages/summarize.test.js +0 -332
  83. package/src/messages/tools.js +0 -90
  84. package/src/mockStream.js +0 -81
  85. package/src/prompts/collab.js +0 -7
  86. package/src/prompts/index.js +0 -3
  87. package/src/prompts/taskmanager.js +0 -58
  88. package/src/run.js +0 -427
  89. package/src/schemas/index.js +0 -3
  90. package/src/schemas/schema-preparation.test.js +0 -370
  91. package/src/schemas/validate.js +0 -314
  92. package/src/schemas/validate.test.js +0 -264
  93. package/src/scripts/abort.js +0 -127
  94. package/src/scripts/ant_web_search.js +0 -130
  95. package/src/scripts/ant_web_search_edge_case.js +0 -133
  96. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  97. package/src/scripts/args.js +0 -41
  98. package/src/scripts/bedrock-cache-debug.js +0 -186
  99. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  100. package/src/scripts/bedrock-merge-test.js +0 -80
  101. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  102. package/src/scripts/caching.js +0 -106
  103. package/src/scripts/cli.js +0 -152
  104. package/src/scripts/cli2.js +0 -119
  105. package/src/scripts/cli3.js +0 -163
  106. package/src/scripts/cli4.js +0 -165
  107. package/src/scripts/cli5.js +0 -165
  108. package/src/scripts/code_exec.js +0 -171
  109. package/src/scripts/code_exec_files.js +0 -180
  110. package/src/scripts/code_exec_multi_session.js +0 -185
  111. package/src/scripts/code_exec_ptc.js +0 -265
  112. package/src/scripts/code_exec_session.js +0 -217
  113. package/src/scripts/code_exec_simple.js +0 -120
  114. package/src/scripts/content.js +0 -111
  115. package/src/scripts/empty_input.js +0 -125
  116. package/src/scripts/handoff-test.js +0 -96
  117. package/src/scripts/image.js +0 -138
  118. package/src/scripts/memory.js +0 -83
  119. package/src/scripts/multi-agent-chain.js +0 -271
  120. package/src/scripts/multi-agent-conditional.js +0 -185
  121. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  122. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  123. package/src/scripts/multi-agent-parallel-start.js +0 -214
  124. package/src/scripts/multi-agent-parallel.js +0 -346
  125. package/src/scripts/multi-agent-sequence.js +0 -184
  126. package/src/scripts/multi-agent-supervisor.js +0 -324
  127. package/src/scripts/multi-agent-test.js +0 -147
  128. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  129. package/src/scripts/parallel-full-metadata-test.js +0 -176
  130. package/src/scripts/parallel-tools-test.js +0 -256
  131. package/src/scripts/programmatic_exec.js +0 -277
  132. package/src/scripts/programmatic_exec_agent.js +0 -168
  133. package/src/scripts/search.js +0 -118
  134. package/src/scripts/sequential-full-metadata-test.js +0 -143
  135. package/src/scripts/simple.js +0 -174
  136. package/src/scripts/single-agent-metadata-test.js +0 -152
  137. package/src/scripts/stream.js +0 -113
  138. package/src/scripts/test-custom-prompt-key.js +0 -132
  139. package/src/scripts/test-handoff-input.js +0 -143
  140. package/src/scripts/test-handoff-preamble.js +0 -227
  141. package/src/scripts/test-handoff-steering.js +0 -353
  142. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  143. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  144. package/src/scripts/test-parallel-handoffs.js +0 -229
  145. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  146. package/src/scripts/test-thinking-handoff.js +0 -132
  147. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  148. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  149. package/src/scripts/test-tools-before-handoff.js +0 -187
  150. package/src/scripts/test_code_api.js +0 -263
  151. package/src/scripts/thinking-bedrock.js +0 -128
  152. package/src/scripts/thinking-vertexai.js +0 -130
  153. package/src/scripts/thinking.js +0 -134
  154. package/src/scripts/tool_search.js +0 -114
  155. package/src/scripts/tools.js +0 -125
  156. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  157. package/src/specs/agent-handoffs.test.js +0 -924
  158. package/src/specs/anthropic.simple.test.js +0 -287
  159. package/src/specs/azure.simple.test.js +0 -381
  160. package/src/specs/cache.simple.test.js +0 -282
  161. package/src/specs/custom-event-await.test.js +0 -148
  162. package/src/specs/deepseek.simple.test.js +0 -189
  163. package/src/specs/emergency-prune.test.js +0 -308
  164. package/src/specs/moonshot.simple.test.js +0 -237
  165. package/src/specs/observability.integration.test.js +0 -1337
  166. package/src/specs/openai.simple.test.js +0 -233
  167. package/src/specs/openrouter.simple.test.js +0 -202
  168. package/src/specs/prune.test.js +0 -733
  169. package/src/specs/reasoning.test.js +0 -144
  170. package/src/specs/spec.utils.js +0 -4
  171. package/src/specs/thinking-handoff.test.js +0 -486
  172. package/src/specs/thinking-prune.test.js +0 -600
  173. package/src/specs/token-distribution-edge-case.test.js +0 -246
  174. package/src/specs/token-memoization.test.js +0 -32
  175. package/src/specs/tokens.test.js +0 -49
  176. package/src/specs/tool-error.test.js +0 -139
  177. package/src/splitStream.js +0 -204
  178. package/src/splitStream.test.js +0 -504
  179. package/src/stream.js +0 -650
  180. package/src/stream.test.js +0 -225
  181. package/src/test/mockTools.js +0 -340
  182. package/src/tools/BrowserTools.js +0 -245
  183. package/src/tools/Calculator.js +0 -38
  184. package/src/tools/Calculator.test.js +0 -225
  185. package/src/tools/CodeExecutor.js +0 -233
  186. package/src/tools/ProgrammaticToolCalling.js +0 -602
  187. package/src/tools/StreamingToolCallBuffer.js +0 -179
  188. package/src/tools/ToolNode.js +0 -930
  189. package/src/tools/ToolSearch.js +0 -904
  190. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  191. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  192. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  193. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  194. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  195. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  196. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  197. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  198. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  199. package/src/tools/__tests__/handlers.test.js +0 -799
  200. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  201. package/src/tools/handlers.js +0 -306
  202. package/src/tools/schema.js +0 -25
  203. package/src/tools/search/anthropic.js +0 -34
  204. package/src/tools/search/content.js +0 -116
  205. package/src/tools/search/content.test.js +0 -133
  206. package/src/tools/search/firecrawl.js +0 -173
  207. package/src/tools/search/format.js +0 -198
  208. package/src/tools/search/highlights.js +0 -241
  209. package/src/tools/search/index.js +0 -3
  210. package/src/tools/search/jina-reranker.test.js +0 -106
  211. package/src/tools/search/rerankers.js +0 -165
  212. package/src/tools/search/schema.js +0 -102
  213. package/src/tools/search/search.js +0 -561
  214. package/src/tools/search/serper-scraper.js +0 -126
  215. package/src/tools/search/test.js +0 -129
  216. package/src/tools/search/tool.js +0 -453
  217. package/src/tools/search/types.js +0 -2
  218. package/src/tools/search/utils.js +0 -59
  219. package/src/types/graph.js +0 -24
  220. package/src/types/graph.test.js +0 -192
  221. package/src/types/index.js +0 -7
  222. package/src/types/llm.js +0 -2
  223. package/src/types/messages.js +0 -2
  224. package/src/types/run.js +0 -2
  225. package/src/types/stream.js +0 -2
  226. package/src/types/tools.js +0 -2
  227. package/src/utils/contextAnalytics.js +0 -79
  228. package/src/utils/contextAnalytics.test.js +0 -166
  229. package/src/utils/events.js +0 -26
  230. package/src/utils/graph.js +0 -11
  231. package/src/utils/handlers.js +0 -65
  232. package/src/utils/index.js +0 -10
  233. package/src/utils/llm.js +0 -21
  234. package/src/utils/llmConfig.js +0 -205
  235. package/src/utils/logging.js +0 -37
  236. package/src/utils/misc.js +0 -51
  237. package/src/utils/run.js +0 -69
  238. package/src/utils/schema.js +0 -21
  239. package/src/utils/title.js +0 -119
  240. package/src/utils/tokens.js +0 -92
  241. package/src/utils/toonFormat.js +0 -379
@@ -1,973 +0,0 @@
1
- /* eslint-disable @typescript-eslint/no-explicit-any */
2
- import { AIMessage, AIMessageChunk, ToolMessage, HumanMessage, SystemMessage, } from '@langchain/core/messages';
3
- import { Providers, ContentTypes, Constants } from '@/common';
4
- import { processToolOutput } from '@/utils/toonFormat';
5
- /**
6
- * Formats a message with media content (images, documents, videos, audios) to API payload format.
7
- *
8
- * @param params - The parameters for formatting.
9
- * @returns - The formatted message.
10
- */
11
- export const formatMediaMessage = ({ message, endpoint, mediaParts, }) => {
12
- // Create a new object to avoid mutating the input
13
- const result = {
14
- ...message,
15
- content: [],
16
- };
17
- if (endpoint === Providers.ANTHROPIC) {
18
- result.content = [
19
- ...mediaParts,
20
- { type: ContentTypes.TEXT, text: message.content },
21
- ];
22
- return result;
23
- }
24
- result.content = [
25
- { type: ContentTypes.TEXT, text: message.content },
26
- ...mediaParts,
27
- ];
28
- return result;
29
- };
30
- /**
31
- * Formats a message to OpenAI payload format based on the provided options.
32
- *
33
- * @param params - The parameters for formatting.
34
- * @returns - The formatted message.
35
- */
36
- export const formatMessage = ({ message, userName, endpoint, assistantName, langChain = false, }) => {
37
- // eslint-disable-next-line prefer-const
38
- let { role: _role, _name, sender, text, content: _content, lc_id } = message;
39
- if (lc_id && lc_id[2] && !langChain) {
40
- const roleMapping = {
41
- SystemMessage: 'system',
42
- HumanMessage: 'user',
43
- AIMessage: 'assistant',
44
- };
45
- _role = roleMapping[lc_id[2]] || _role;
46
- }
47
- const role = _role ??
48
- (sender != null && sender && sender.toLowerCase() === 'user'
49
- ? 'user'
50
- : 'assistant');
51
- const content = _content ?? text ?? '';
52
- const formattedMessage = {
53
- role,
54
- content,
55
- };
56
- // Set name fields first
57
- if (_name != null && _name) {
58
- formattedMessage.name = _name;
59
- }
60
- if (userName != null && userName && formattedMessage.role === 'user') {
61
- formattedMessage.name = userName;
62
- }
63
- if (assistantName != null &&
64
- assistantName &&
65
- formattedMessage.role === 'assistant') {
66
- formattedMessage.name = assistantName;
67
- }
68
- if (formattedMessage.name != null && formattedMessage.name) {
69
- // Conform to API regex: ^[a-zA-Z0-9_-]{1,64}$
70
- // https://community.openai.com/t/the-format-of-the-name-field-in-the-documentation-is-incorrect/175684/2
71
- formattedMessage.name = formattedMessage.name.replace(/[^a-zA-Z0-9_-]/g, '_');
72
- if (formattedMessage.name.length > 64) {
73
- formattedMessage.name = formattedMessage.name.substring(0, 64);
74
- }
75
- }
76
- const { image_urls, documents, videos, audios } = message;
77
- const mediaParts = [];
78
- if (Array.isArray(documents) && documents.length > 0) {
79
- mediaParts.push(...documents);
80
- }
81
- if (Array.isArray(videos) && videos.length > 0) {
82
- mediaParts.push(...videos);
83
- }
84
- if (Array.isArray(audios) && audios.length > 0) {
85
- mediaParts.push(...audios);
86
- }
87
- if (Array.isArray(image_urls) && image_urls.length > 0) {
88
- mediaParts.push(...image_urls);
89
- }
90
- if (mediaParts.length > 0 && role === 'user') {
91
- const mediaMessage = formatMediaMessage({
92
- message: {
93
- ...formattedMessage,
94
- content: typeof formattedMessage.content === 'string'
95
- ? formattedMessage.content
96
- : '',
97
- },
98
- mediaParts,
99
- endpoint,
100
- });
101
- if (!langChain) {
102
- return mediaMessage;
103
- }
104
- return new HumanMessage(mediaMessage);
105
- }
106
- if (!langChain) {
107
- return formattedMessage;
108
- }
109
- if (role === 'user') {
110
- return new HumanMessage(formattedMessage);
111
- }
112
- else if (role === 'assistant') {
113
- return new AIMessage(formattedMessage);
114
- }
115
- else {
116
- return new SystemMessage(formattedMessage);
117
- }
118
- };
119
- /**
120
- * Formats an array of messages for LangChain.
121
- *
122
- * @param messages - The array of messages to format.
123
- * @param formatOptions - The options for formatting each message.
124
- * @returns - The array of formatted LangChain messages.
125
- */
126
- export const formatLangChainMessages = (messages, formatOptions) => {
127
- return messages.map((msg) => {
128
- const formatted = formatMessage({
129
- ...formatOptions,
130
- message: msg,
131
- langChain: true,
132
- });
133
- return formatted;
134
- });
135
- };
136
- /**
137
- * Formats a LangChain message object by merging properties from `lc_kwargs` or `kwargs` and `additional_kwargs`.
138
- *
139
- * @param message - The message object to format.
140
- * @returns - The formatted LangChain message.
141
- */
142
- export const formatFromLangChain = (message) => {
143
- const kwargs = message.lc_kwargs ?? message.kwargs ?? {};
144
- const { additional_kwargs = {}, ...message_kwargs } = kwargs;
145
- return {
146
- ...message_kwargs,
147
- ...additional_kwargs,
148
- };
149
- };
150
- /**
151
- * Helper function to format an assistant message
152
- * @param message The message to format
153
- * @returns Array of formatted messages
154
- */
155
- function formatAssistantMessage(message) {
156
- const formattedMessages = [];
157
- let currentContent = [];
158
- let lastAIMessage = null;
159
- let hasReasoning = false;
160
- if (Array.isArray(message.content)) {
161
- for (const part of message.content) {
162
- if (part == null) {
163
- continue;
164
- }
165
- if (part.type === ContentTypes.TEXT && part.tool_call_ids) {
166
- /*
167
- If there's pending content, it needs to be aggregated as a single string to prepare for tool calls.
168
- For Anthropic models, the "tool_calls" field on a message is only respected if content is a string.
169
- */
170
- if (currentContent.length > 0) {
171
- let content = currentContent.reduce((acc, curr) => {
172
- if (curr.type === ContentTypes.TEXT) {
173
- return `${acc}${String(curr[ContentTypes.TEXT] ?? '')}\n`;
174
- }
175
- return acc;
176
- }, '');
177
- content =
178
- `${content}\n${part[ContentTypes.TEXT] ?? part.text ?? ''}`.trim();
179
- lastAIMessage = new AIMessage({ content });
180
- formattedMessages.push(lastAIMessage);
181
- currentContent = [];
182
- continue;
183
- }
184
- // Create a new AIMessage with this text and prepare for tool calls
185
- lastAIMessage = new AIMessage({
186
- content: part.text != null ? part.text : '',
187
- });
188
- formattedMessages.push(lastAIMessage);
189
- }
190
- else if (part.type === ContentTypes.TOOL_CALL) {
191
- // Skip malformed tool call entries without tool_call property
192
- if (part.tool_call == null) {
193
- continue;
194
- }
195
- // Note: `tool_calls` list is defined when constructed by `AIMessage` class, and outputs should be excluded from it
196
- const { output, args: _args, ..._tool_call } = part.tool_call;
197
- // Skip invalid tool calls that have no name AND no output
198
- if (_tool_call.name == null ||
199
- (_tool_call.name === '' && (output == null || output === ''))) {
200
- continue;
201
- }
202
- if (!lastAIMessage) {
203
- // "Heal" the payload by creating an AIMessage to precede the tool call
204
- lastAIMessage = new AIMessage({ content: '' });
205
- formattedMessages.push(lastAIMessage);
206
- }
207
- const tool_call = _tool_call;
208
- // TODO: investigate; args as dictionary may need to be providers-or-tool-specific
209
- let args = _args;
210
- try {
211
- if (typeof _args === 'string') {
212
- args = JSON.parse(_args);
213
- }
214
- }
215
- catch {
216
- if (typeof _args === 'string') {
217
- args = { input: _args };
218
- }
219
- }
220
- tool_call.args = args;
221
- if (!lastAIMessage.tool_calls) {
222
- lastAIMessage.tool_calls = [];
223
- }
224
- lastAIMessage.tool_calls.push(tool_call);
225
- // Apply TOON compression to historical tool outputs for context efficiency
226
- // processToolOutput handles: JSON→TOON conversion, already-TOON detection (skip), truncation
227
- // Skip TOON for content_tool — its output is line-numbered source code that must stay verbatim
228
- // for accurate edit (str_replace) matching.
229
- const processedOutput = output != null
230
- ? processToolOutput(output, {
231
- enableToon: tool_call.name !== 'content_tool',
232
- }).content
233
- : '';
234
- formattedMessages.push(new ToolMessage({
235
- tool_call_id: tool_call.id ?? '',
236
- name: tool_call.name,
237
- content: processedOutput,
238
- }));
239
- }
240
- else if (part.type === ContentTypes.THINK ||
241
- part.type === ContentTypes.THINKING ||
242
- part.type === ContentTypes.REASONING_CONTENT ||
243
- part.type === 'redacted_thinking') {
244
- hasReasoning = true;
245
- continue;
246
- }
247
- else if (part.type === ContentTypes.ERROR ||
248
- part.type === ContentTypes.AGENT_UPDATE) {
249
- continue;
250
- }
251
- else {
252
- if (part.type === ContentTypes.TEXT &&
253
- !String(part.text ?? '').trim()) {
254
- continue;
255
- }
256
- currentContent.push(part);
257
- }
258
- }
259
- }
260
- if (hasReasoning && currentContent.length > 0) {
261
- const content = currentContent
262
- .reduce((acc, curr) => {
263
- if (curr.type === ContentTypes.TEXT) {
264
- return `${acc}${String(curr[ContentTypes.TEXT] ?? '')}\n`;
265
- }
266
- return acc;
267
- }, '')
268
- .trim();
269
- if (content) {
270
- formattedMessages.push(new AIMessage({ content }));
271
- }
272
- }
273
- else if (currentContent.length > 0) {
274
- formattedMessages.push(new AIMessage({ content: currentContent }));
275
- }
276
- return formattedMessages;
277
- }
278
- /**
279
- * Labels all agent content for parallel patterns (fan-out/fan-in)
280
- * Groups consecutive content by agent and wraps with clear labels
281
- */
282
- function labelAllAgentContent(contentParts, agentIdMap, agentNames) {
283
- const result = [];
284
- let currentAgentId;
285
- let agentContentBuffer = [];
286
- const flushAgentBuffer = () => {
287
- if (agentContentBuffer.length === 0) {
288
- return;
289
- }
290
- if (currentAgentId != null && currentAgentId !== '') {
291
- const agentName = (agentNames?.[currentAgentId] ?? '') || currentAgentId;
292
- const formattedParts = [];
293
- formattedParts.push(`--- ${agentName} ---`);
294
- for (const part of agentContentBuffer) {
295
- if (part.type === ContentTypes.THINK) {
296
- const thinkContent = part.think || '';
297
- if (thinkContent) {
298
- formattedParts.push(`${agentName}: ${JSON.stringify({
299
- type: 'think',
300
- think: thinkContent,
301
- })}`);
302
- }
303
- }
304
- else if (part.type === ContentTypes.TEXT) {
305
- const textContent = part.text ?? '';
306
- if (textContent) {
307
- formattedParts.push(`${agentName}: ${textContent}`);
308
- }
309
- }
310
- else if (part.type === ContentTypes.TOOL_CALL) {
311
- formattedParts.push(`${agentName}: ${JSON.stringify({
312
- type: 'tool_call',
313
- tool_call: part.tool_call,
314
- })}`);
315
- }
316
- }
317
- formattedParts.push(`--- End of ${agentName} ---`);
318
- // Create a single text content part with all agent content
319
- result.push({
320
- type: ContentTypes.TEXT,
321
- text: formattedParts.join('\n\n'),
322
- });
323
- }
324
- else {
325
- // No agent ID, pass through as-is
326
- result.push(...agentContentBuffer);
327
- }
328
- agentContentBuffer = [];
329
- };
330
- for (let i = 0; i < contentParts.length; i++) {
331
- const part = contentParts[i];
332
- const agentId = agentIdMap[i];
333
- // If agent changed, flush previous buffer
334
- if (agentId !== currentAgentId && currentAgentId !== undefined) {
335
- flushAgentBuffer();
336
- }
337
- currentAgentId = agentId;
338
- agentContentBuffer.push(part);
339
- }
340
- // Flush any remaining content
341
- flushAgentBuffer();
342
- return result;
343
- }
344
- /**
345
- * Groups content parts by agent and formats them with agent labels
346
- * This preprocesses multi-agent content to prevent identity confusion
347
- *
348
- * @param contentParts - The content parts from a run
349
- * @param agentIdMap - Map of content part index to agent ID
350
- * @param agentNames - Optional map of agent ID to display name
351
- * @param options - Configuration options
352
- * @param options.labelNonTransferContent - If true, labels all agent transitions (for parallel patterns)
353
- * @returns Modified content parts with agent labels where appropriate
354
- */
355
- export const labelContentByAgent = (contentParts, agentIdMap, agentNames, options) => {
356
- if (!agentIdMap || Object.keys(agentIdMap).length === 0) {
357
- return contentParts;
358
- }
359
- // If labelNonTransferContent is true, use a different strategy for parallel patterns
360
- if (options?.labelNonTransferContent === true) {
361
- return labelAllAgentContent(contentParts, agentIdMap, agentNames);
362
- }
363
- const result = [];
364
- let currentAgentId;
365
- let agentContentBuffer = [];
366
- let transferToolCallIndex;
367
- let transferToolCallId;
368
- const flushAgentBuffer = () => {
369
- if (agentContentBuffer.length === 0) {
370
- return;
371
- }
372
- // If this is content from a transferred agent, format it specially
373
- if (currentAgentId != null &&
374
- currentAgentId !== '' &&
375
- transferToolCallIndex !== undefined) {
376
- const agentName = (agentNames?.[currentAgentId] ?? '') || currentAgentId;
377
- const formattedParts = [];
378
- formattedParts.push(`--- Transfer to ${agentName} ---`);
379
- for (const part of agentContentBuffer) {
380
- if (part.type === ContentTypes.THINK) {
381
- formattedParts.push(`${agentName}: ${JSON.stringify({
382
- type: 'think',
383
- think: part.think,
384
- })}`);
385
- }
386
- else if ('text' in part && part.type === ContentTypes.TEXT) {
387
- const textContent = part.text ?? '';
388
- if (textContent) {
389
- formattedParts.push(`${agentName}: ${JSON.stringify({
390
- type: 'text',
391
- text: textContent,
392
- })}`);
393
- }
394
- }
395
- else if (part.type === ContentTypes.TOOL_CALL) {
396
- formattedParts.push(`${agentName}: ${JSON.stringify({
397
- type: 'tool_call',
398
- tool_call: part.tool_call,
399
- })}`);
400
- }
401
- }
402
- formattedParts.push(`--- End of ${agentName} response ---`);
403
- // Find the tool call that triggered this transfer and update its output
404
- if (transferToolCallIndex < result.length) {
405
- const transferToolCall = result[transferToolCallIndex];
406
- if (transferToolCall.type === ContentTypes.TOOL_CALL &&
407
- transferToolCall.tool_call?.id === transferToolCallId) {
408
- transferToolCall.tool_call.output = formattedParts.join('\n\n');
409
- }
410
- }
411
- }
412
- else {
413
- // Not from a transfer, add as-is
414
- result.push(...agentContentBuffer);
415
- }
416
- agentContentBuffer = [];
417
- transferToolCallIndex = undefined;
418
- transferToolCallId = undefined;
419
- };
420
- for (let i = 0; i < contentParts.length; i++) {
421
- const part = contentParts[i];
422
- const agentId = agentIdMap[i];
423
- // Check if this is a transfer tool call
424
- const isTransferTool = (part.type === ContentTypes.TOOL_CALL &&
425
- part.tool_call?.name?.startsWith('lc_transfer_to_')) ??
426
- false;
427
- // If agent changed, flush previous buffer
428
- if (agentId !== currentAgentId && currentAgentId !== undefined) {
429
- flushAgentBuffer();
430
- }
431
- currentAgentId = agentId;
432
- if (isTransferTool) {
433
- // Flush any existing buffer first
434
- flushAgentBuffer();
435
- // Add the transfer tool call to result
436
- result.push(part);
437
- // Mark that the next agent's content should be captured
438
- transferToolCallIndex = result.length - 1;
439
- transferToolCallId = part.tool_call?.id;
440
- currentAgentId = undefined; // Reset to capture the next agent
441
- }
442
- else {
443
- agentContentBuffer.push(part);
444
- }
445
- }
446
- flushAgentBuffer();
447
- return result;
448
- };
449
- /** Extracts tool names from a tool_search output JSON string. */
450
- function extractToolNamesFromSearchOutput(output) {
451
- try {
452
- const parsed = JSON.parse(output);
453
- if (typeof parsed === 'object' &&
454
- parsed !== null &&
455
- Array.isArray(parsed.tools)) {
456
- return parsed.tools
457
- .map((t) => t.name)
458
- .filter((name) => typeof name === 'string');
459
- }
460
- }
461
- catch {
462
- /** Output may have warnings prepended, try to find JSON within it */
463
- const jsonMatch = output.match(/\{[\s\S]*\}/);
464
- if (jsonMatch) {
465
- try {
466
- const parsed = JSON.parse(jsonMatch[0]);
467
- if (typeof parsed === 'object' &&
468
- parsed !== null &&
469
- Array.isArray(parsed.tools)) {
470
- return parsed.tools
471
- .map((t) => t.name)
472
- .filter((name) => typeof name === 'string');
473
- }
474
- }
475
- catch {
476
- /* ignore */
477
- }
478
- }
479
- }
480
- return [];
481
- }
482
- /**
483
- * Formats an array of messages for LangChain, handling tool calls and creating ToolMessage instances.
484
- *
485
- * @param payload - The array of messages to format.
486
- * @param indexTokenCountMap - Optional map of message indices to token counts.
487
- * @param tools - Optional set of tool names that are allowed in the request.
488
- * @returns - Object containing formatted messages and updated indexTokenCountMap if provided.
489
- */
490
- export const formatAgentMessages = (payload, indexTokenCountMap, tools) => {
491
- const messages = [];
492
- // If indexTokenCountMap is provided, create a new map to track the updated indices
493
- const updatedIndexTokenCountMap = {};
494
- // Keep track of the mapping from original payload indices to result indices
495
- const indexMapping = {};
496
- /**
497
- * Create a mutable copy of the tools set that can be expanded dynamically.
498
- * When we encounter tool_search results, we add discovered tools to this set,
499
- * making their subsequent tool calls valid.
500
- */
501
- const discoveredTools = tools ? new Set(tools) : undefined;
502
- // Process messages with tool conversion if tools set is provided
503
- for (let i = 0; i < payload.length; i++) {
504
- const message = payload[i];
505
- // Q: Store the current length of messages to track where this payload message starts in the result?
506
- // const startIndex = messages.length;
507
- if (typeof message.content === 'string') {
508
- message.content = [
509
- { type: ContentTypes.TEXT, [ContentTypes.TEXT]: message.content },
510
- ];
511
- }
512
- if (message.role !== 'assistant') {
513
- messages.push(formatMessage({
514
- message: message,
515
- langChain: true,
516
- }));
517
- // Update the index mapping for this message
518
- indexMapping[i] = [messages.length - 1];
519
- continue;
520
- }
521
- // For assistant messages, track the starting index before processing
522
- const startMessageIndex = messages.length;
523
- /**
524
- * If tools set is provided, process tool_calls:
525
- * - Keep valid tool_calls (tools in the set or dynamically discovered)
526
- * - Convert invalid tool_calls to string representation for context preservation
527
- * - Dynamically expand the set when tool_search results are encountered
528
- */
529
- let processedMessage = message;
530
- if (discoveredTools) {
531
- const content = message.content;
532
- if (content && Array.isArray(content)) {
533
- const filteredContent = [];
534
- const invalidToolCallIds = new Set();
535
- const invalidToolStrings = [];
536
- for (const part of content) {
537
- if (part.type !== ContentTypes.TOOL_CALL) {
538
- filteredContent.push(part);
539
- continue;
540
- }
541
- /** Skip malformed tool_call entries */
542
- if (part.tool_call == null ||
543
- part.tool_call.name == null ||
544
- part.tool_call.name === '') {
545
- if (typeof part.tool_call?.id === 'string' &&
546
- part.tool_call.id !== '') {
547
- invalidToolCallIds.add(part.tool_call.id);
548
- }
549
- continue;
550
- }
551
- const toolName = part.tool_call.name;
552
- /**
553
- * If this is a tool_search result with output, extract discovered tool names
554
- * and add them to the discoveredTools set for subsequent validation.
555
- */
556
- if (toolName === Constants.TOOL_SEARCH &&
557
- typeof part.tool_call.output === 'string' &&
558
- part.tool_call.output !== '') {
559
- const extracted = extractToolNamesFromSearchOutput(part.tool_call.output);
560
- for (const name of extracted) {
561
- discoveredTools.add(name);
562
- }
563
- }
564
- if (discoveredTools.has(toolName)) {
565
- /** Valid tool - keep it */
566
- filteredContent.push(part);
567
- }
568
- else {
569
- /** Invalid tool - convert to string for context preservation */
570
- if (typeof part.tool_call.id === 'string' &&
571
- part.tool_call.id !== '') {
572
- invalidToolCallIds.add(part.tool_call.id);
573
- }
574
- const output = part.tool_call.output ?? '';
575
- invalidToolStrings.push(`Tool: ${toolName}, ${output}`);
576
- }
577
- }
578
- /** Remove tool_call_ids references to invalid tools from text parts */
579
- if (invalidToolCallIds.size > 0) {
580
- for (const part of filteredContent) {
581
- if (part.type === ContentTypes.TEXT &&
582
- Array.isArray(part.tool_call_ids)) {
583
- part.tool_call_ids = part.tool_call_ids.filter((id) => !invalidToolCallIds.has(id));
584
- if (part.tool_call_ids.length === 0) {
585
- delete part.tool_call_ids;
586
- }
587
- }
588
- }
589
- }
590
- /** Append invalid tool strings to the content for context preservation */
591
- if (invalidToolStrings.length > 0) {
592
- /** Find the last text part or create one */
593
- let lastTextPartIndex = -1;
594
- for (let j = filteredContent.length - 1; j >= 0; j--) {
595
- if (filteredContent[j].type === ContentTypes.TEXT) {
596
- lastTextPartIndex = j;
597
- break;
598
- }
599
- }
600
- const invalidToolText = invalidToolStrings.join('\n');
601
- if (lastTextPartIndex >= 0) {
602
- const lastTextPart = filteredContent[lastTextPartIndex];
603
- const existingText = lastTextPart[ContentTypes.TEXT] ?? lastTextPart.text ?? '';
604
- filteredContent[lastTextPartIndex] = {
605
- ...lastTextPart,
606
- [ContentTypes.TEXT]: existingText
607
- ? `${existingText}\n${invalidToolText}`
608
- : invalidToolText,
609
- };
610
- }
611
- else {
612
- /** No text part exists, create one */
613
- filteredContent.push({
614
- type: ContentTypes.TEXT,
615
- [ContentTypes.TEXT]: invalidToolText,
616
- });
617
- }
618
- }
619
- /** Use filtered content if we made any changes */
620
- if (filteredContent.length !== content.length ||
621
- invalidToolStrings.length > 0) {
622
- processedMessage = { ...message, content: filteredContent };
623
- }
624
- }
625
- }
626
- // Process the assistant message using the helper function
627
- const formattedMessages = formatAssistantMessage(processedMessage);
628
- messages.push(...formattedMessages);
629
- // Update the index mapping for this assistant message
630
- // Store all indices that were created from this original message
631
- const endMessageIndex = messages.length;
632
- const resultIndices = [];
633
- for (let j = startMessageIndex; j < endMessageIndex; j++) {
634
- resultIndices.push(j);
635
- }
636
- indexMapping[i] = resultIndices;
637
- }
638
- if (indexTokenCountMap) {
639
- for (let originalIndex = 0; originalIndex < payload.length; originalIndex++) {
640
- const resultIndices = indexMapping[originalIndex] || [];
641
- const tokenCount = indexTokenCountMap[originalIndex];
642
- if (tokenCount === undefined) {
643
- continue;
644
- }
645
- const msgCount = resultIndices.length;
646
- if (msgCount === 1) {
647
- updatedIndexTokenCountMap[resultIndices[0]] = tokenCount;
648
- continue;
649
- }
650
- if (msgCount < 2) {
651
- continue;
652
- }
653
- let totalLength = 0;
654
- const lastIdx = msgCount - 1;
655
- const lengths = new Array(msgCount);
656
- for (let k = 0; k < msgCount; k++) {
657
- const msg = messages[resultIndices[k]];
658
- const { content } = msg;
659
- let len = 0;
660
- if (typeof content === 'string') {
661
- len = content.length;
662
- }
663
- else if (Array.isArray(content)) {
664
- for (const part of content) {
665
- if (typeof part === 'string') {
666
- len += part.length;
667
- }
668
- else if (part != null && typeof part === 'object') {
669
- const val = part.text ?? part.content;
670
- if (typeof val === 'string') {
671
- len += val.length;
672
- }
673
- }
674
- }
675
- }
676
- const toolCalls = msg.tool_calls;
677
- if (Array.isArray(toolCalls)) {
678
- for (const tc of toolCalls) {
679
- if (typeof tc.name === 'string') {
680
- len += tc.name.length;
681
- }
682
- const { args } = tc;
683
- if (typeof args === 'string') {
684
- len += args.length;
685
- }
686
- else if (args != null) {
687
- len += JSON.stringify(args).length;
688
- }
689
- }
690
- }
691
- lengths[k] = len;
692
- totalLength += len;
693
- }
694
- if (totalLength === 0) {
695
- const countPerMessage = Math.floor(tokenCount / msgCount);
696
- for (let k = 0; k < lastIdx; k++) {
697
- updatedIndexTokenCountMap[resultIndices[k]] = countPerMessage;
698
- }
699
- updatedIndexTokenCountMap[resultIndices[lastIdx]] =
700
- tokenCount - countPerMessage * lastIdx;
701
- }
702
- else {
703
- let distributed = 0;
704
- for (let k = 0; k < lastIdx; k++) {
705
- const share = Math.floor((lengths[k] / totalLength) * tokenCount);
706
- updatedIndexTokenCountMap[resultIndices[k]] = share;
707
- distributed += share;
708
- }
709
- updatedIndexTokenCountMap[resultIndices[lastIdx]] =
710
- tokenCount - distributed;
711
- }
712
- }
713
- }
714
- return {
715
- messages,
716
- indexTokenCountMap: indexTokenCountMap
717
- ? updatedIndexTokenCountMap
718
- : undefined,
719
- };
720
- };
721
- /**
722
- * Adds a value at key 0 for system messages and shifts all key indices by one in an indexTokenCountMap.
723
- * This is useful when adding a system message at the beginning of a conversation.
724
- *
725
- * @param indexTokenCountMap - The original map of message indices to token counts
726
- * @param instructionsTokenCount - The token count for the system message to add at index 0
727
- * @returns A new map with the system message at index 0 and all other indices shifted by 1
728
- */
729
- export function shiftIndexTokenCountMap(indexTokenCountMap, instructionsTokenCount) {
730
- // Create a new map to avoid modifying the original
731
- const shiftedMap = {};
732
- shiftedMap[0] = instructionsTokenCount;
733
- // Shift all existing indices by 1
734
- for (const [indexStr, tokenCount] of Object.entries(indexTokenCountMap)) {
735
- const index = Number(indexStr);
736
- shiftedMap[index + 1] = tokenCount;
737
- }
738
- return shiftedMap;
739
- }
740
- /** Block types that contain binary image data and must be preserved structurally. */
741
- const IMAGE_BLOCK_TYPES = new Set(['image_url', 'image']);
742
- /** Checks whether a BaseMessage is a tool-role message. */
743
- const isToolMessage = (m) => m instanceof ToolMessage || ('role' in m && m.role === 'tool');
744
- /** Flushes accumulated text chunks into `parts` as a single text block. */
745
- function flushTextChunks(textChunks, parts) {
746
- if (textChunks.length === 0) {
747
- return;
748
- }
749
- parts.push({
750
- type: ContentTypes.TEXT,
751
- text: textChunks.join('\n'),
752
- });
753
- textChunks.length = 0;
754
- }
755
- /**
756
- * Appends a single message's content to the running `textChunks` / `parts`
757
- * accumulators. Image blocks are shallow-copied into `parts` as-is so that
758
- * binary data (base64 images) never becomes text tokens. All other block
759
- * types are serialized to text — unrecognized types are JSON-serialized
760
- * rather than silently dropped.
761
- *
762
- * When `content` is an array containing tool_use blocks, `tool_calls` is NOT
763
- * additionally serialized (avoiding double output). `tool_calls` is used as
764
- * a fallback when `content` is a plain string or an array with no tool_use.
765
- */
766
- function appendMessageContent(msg, role, textChunks, parts) {
767
- const { content } = msg;
768
- if (typeof content === 'string') {
769
- if (content) {
770
- textChunks.push(`${role}: ${content}`);
771
- }
772
- appendToolCalls(msg, role, textChunks);
773
- return;
774
- }
775
- if (!Array.isArray(content)) {
776
- appendToolCalls(msg, role, textChunks);
777
- return;
778
- }
779
- let hasToolUseBlock = false;
780
- for (const block of content) {
781
- if (IMAGE_BLOCK_TYPES.has(block.type ?? '')) {
782
- flushTextChunks(textChunks, parts);
783
- parts.push({ ...block });
784
- continue;
785
- }
786
- if (block.type === 'tool_use') {
787
- hasToolUseBlock = true;
788
- textChunks.push(`${role}: [tool_use] ${String(block.name ?? '')} ${JSON.stringify(block.input ?? {})}`);
789
- continue;
790
- }
791
- const text = block.text ?? block.input;
792
- if (typeof text === 'string' && text) {
793
- textChunks.push(`${role}: ${text}`);
794
- continue;
795
- }
796
- // Fallback: serialize unrecognized block types to preserve context
797
- if (block.type != null && block.type !== '') {
798
- textChunks.push(`${role}: [${block.type}] ${JSON.stringify(block)}`);
799
- }
800
- }
801
- // If content array had no tool_use blocks, fall back to tool_calls metadata
802
- // (handles edge case: empty content array with tool_calls populated)
803
- if (!hasToolUseBlock) {
804
- appendToolCalls(msg, role, textChunks);
805
- }
806
- }
807
- function appendToolCalls(msg, role, textChunks) {
808
- if (role !== 'AI') {
809
- return;
810
- }
811
- const aiMsg = msg;
812
- if (!aiMsg.tool_calls || aiMsg.tool_calls.length === 0) {
813
- return;
814
- }
815
- for (const tc of aiMsg.tool_calls) {
816
- textChunks.push(`AI: [tool_call] ${tc.name}(${JSON.stringify(tc.args)})`);
817
- }
818
- }
819
- /**
820
- * Ensures compatibility when switching from a non-thinking agent to a thinking-enabled agent.
821
- * Converts AI messages with tool calls (that lack thinking/reasoning blocks) into buffer strings,
822
- * avoiding the thinking block signature requirement.
823
- *
824
- * Recognizes the following as valid thinking/reasoning blocks:
825
- * - ContentTypes.THINKING (Anthropic)
826
- * - ContentTypes.REASONING_CONTENT (Bedrock)
827
- * - ContentTypes.REASONING (VertexAI / Google)
828
- * - 'redacted_thinking'
829
- *
830
- * @param messages - Array of messages to process
831
- * @param provider - The provider being used (unused but kept for future compatibility)
832
- * @returns The messages array with tool sequences converted to buffer strings if necessary
833
- */
834
- export function ensureThinkingBlockInMessages(messages, _provider) {
835
- if (messages.length === 0) {
836
- return messages;
837
- }
838
- // If the last message is already a HumanMessage, there is no trailing tool
839
- // sequence to convert — return early to preserve prompt caching and avoid
840
- // redundant token overhead from re-processing the entire history.
841
- const lastMsg = messages[messages.length - 1];
842
- const lastIsHuman = lastMsg instanceof HumanMessage ||
843
- ('role' in lastMsg && lastMsg.role === 'user');
844
- if (lastIsHuman) {
845
- return messages;
846
- }
847
- const result = [];
848
- let i = 0;
849
- while (i < messages.length) {
850
- const msg = messages[i];
851
- /** Detect AI messages by instanceof OR by role, in case cache-control cloning
852
- produced a plain object that lost the LangChain prototype. */
853
- const isAI = msg instanceof AIMessage ||
854
- msg instanceof AIMessageChunk ||
855
- ('role' in msg && msg.role === 'assistant');
856
- if (!isAI) {
857
- result.push(msg);
858
- i++;
859
- continue;
860
- }
861
- const aiMsg = msg;
862
- const hasToolCalls = aiMsg.tool_calls && aiMsg.tool_calls.length > 0;
863
- const contentIsArray = Array.isArray(aiMsg.content);
864
- // Check if the message has tool calls or tool_use content
865
- let hasToolUse = hasToolCalls ?? false;
866
- let hasThinkingBlock = false;
867
- if (contentIsArray && aiMsg.content.length > 0) {
868
- for (const c of aiMsg.content) {
869
- if (typeof c !== 'object') {
870
- continue;
871
- }
872
- if (c.type === 'tool_use') {
873
- hasToolUse = true;
874
- }
875
- else if (c.type === ContentTypes.THINKING ||
876
- c.type === ContentTypes.REASONING_CONTENT ||
877
- c.type === ContentTypes.REASONING ||
878
- c.type === 'redacted_thinking') {
879
- hasThinkingBlock = true;
880
- }
881
- if (hasToolUse && hasThinkingBlock) {
882
- break;
883
- }
884
- }
885
- }
886
- // Bedrock also stores reasoning in additional_kwargs (may not be in content array)
887
- if (!hasThinkingBlock &&
888
- aiMsg.additional_kwargs.reasoning_content != null) {
889
- hasThinkingBlock = true;
890
- }
891
- // If message has tool use but no thinking block, check whether this is a
892
- // continuation of a thinking-enabled agent's chain before converting.
893
- // Bedrock reasoning models can produce multiple AI→Tool rounds after an
894
- // initial reasoning response: the first AI message has reasoning_content,
895
- // but follow-ups have content: "" with only tool_calls. These are the
896
- // same agent's turn and must NOT be converted to HumanMessages.
897
- if (hasToolUse && !hasThinkingBlock) {
898
- // Walk backwards — if an earlier AI message in the same chain (before
899
- // the nearest HumanMessage) has a thinking/reasoning block, this is a
900
- // continuation of a thinking-enabled turn, not a non-thinking handoff.
901
- if (chainHasThinkingBlock(messages, i)) {
902
- result.push(msg);
903
- i++;
904
- continue;
905
- }
906
- // Build structured content in a single pass over the AI + following
907
- // ToolMessages — preserves image blocks as-is to avoid serializing
908
- // binary data as text (which caused 174× token amplification).
909
- const parts = [];
910
- const textChunks = ['[Previous agent context]'];
911
- appendMessageContent(msg, 'AI', textChunks, parts);
912
- let j = i + 1;
913
- while (j < messages.length && isToolMessage(messages[j])) {
914
- appendMessageContent(messages[j], 'Tool', textChunks, parts);
915
- j++;
916
- }
917
- flushTextChunks(textChunks, parts);
918
- result.push(new HumanMessage({ content: parts }));
919
- i = j;
920
- }
921
- else {
922
- // Keep the message as is
923
- result.push(msg);
924
- i++;
925
- }
926
- }
927
- return result;
928
- }
929
- /**
930
- * Walks backwards from `currentIndex` through the message array to check
931
- * whether an earlier AI message in the same "chain" (no HumanMessage boundary)
932
- * contains a thinking/reasoning block.
933
- *
934
- * A "chain" is a contiguous sequence of AI + Tool messages with no intervening
935
- * HumanMessage. Bedrock reasoning models produce reasoning on the first AI
936
- * response, then issue follow-up tool calls with `content: ""` and no
937
- * reasoning block. These follow-ups are part of the same thinking-enabled
938
- * turn and should not be converted.
939
- */
940
- function chainHasThinkingBlock(messages, currentIndex) {
941
- for (let k = currentIndex - 1; k >= 0; k--) {
942
- const prev = messages[k];
943
- // HumanMessage = turn boundary — stop searching
944
- if (prev instanceof HumanMessage ||
945
- ('role' in prev && prev.role === 'user')) {
946
- return false;
947
- }
948
- // Check AI messages for thinking/reasoning blocks
949
- const isPrevAI = prev instanceof AIMessage ||
950
- prev instanceof AIMessageChunk ||
951
- ('role' in prev && prev.role === 'assistant');
952
- if (isPrevAI) {
953
- const prevAiMsg = prev;
954
- if (Array.isArray(prevAiMsg.content) && prevAiMsg.content.length > 0) {
955
- const content = prevAiMsg.content;
956
- if (content.some((c) => typeof c === 'object' &&
957
- (c.type === ContentTypes.THINKING ||
958
- c.type === ContentTypes.REASONING_CONTENT ||
959
- c.type === ContentTypes.REASONING ||
960
- c.type === 'redacted_thinking'))) {
961
- return true;
962
- }
963
- }
964
- // Bedrock also stores reasoning in additional_kwargs
965
- if (prevAiMsg.additional_kwargs.reasoning_content != null) {
966
- return true;
967
- }
968
- }
969
- // ToolMessages are part of the chain — keep walking back
970
- }
971
- return false;
972
- }
973
- //# sourceMappingURL=format.js.map