@illuma-ai/agents 1.1.21 → 1.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (241) hide show
  1. package/dist/cjs/graphs/Graph.cjs +12 -1
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/graphs/MultiAgentGraph.cjs +85 -1
  4. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  5. package/dist/cjs/run.cjs +20 -9
  6. package/dist/cjs/run.cjs.map +1 -1
  7. package/dist/esm/graphs/Graph.mjs +12 -1
  8. package/dist/esm/graphs/Graph.mjs.map +1 -1
  9. package/dist/esm/graphs/MultiAgentGraph.mjs +85 -1
  10. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  11. package/dist/esm/run.mjs +20 -9
  12. package/dist/esm/run.mjs.map +1 -1
  13. package/dist/types/graphs/MultiAgentGraph.d.ts +17 -0
  14. package/package.json +1 -1
  15. package/src/graphs/Graph.ts +12 -1
  16. package/src/graphs/MultiAgentGraph.ts +105 -1
  17. package/src/graphs/__tests__/multi-agent-delegate.test.ts +191 -0
  18. package/src/run.ts +20 -11
  19. package/src/scripts/test-bedrock-handoff-autonomous.ts +231 -0
  20. package/src/agents/AgentContext.js +0 -782
  21. package/src/agents/AgentContext.test.js +0 -421
  22. package/src/agents/__tests__/AgentContext.test.js +0 -678
  23. package/src/agents/__tests__/resolveStructuredOutputMode.test.js +0 -117
  24. package/src/common/enum.js +0 -192
  25. package/src/common/index.js +0 -3
  26. package/src/events.js +0 -166
  27. package/src/graphs/Graph.js +0 -1857
  28. package/src/graphs/MultiAgentGraph.js +0 -1092
  29. package/src/graphs/__tests__/structured-output.integration.test.js +0 -624
  30. package/src/graphs/__tests__/structured-output.test.js +0 -144
  31. package/src/graphs/contextManagement.e2e.test.js +0 -718
  32. package/src/graphs/contextManagement.test.js +0 -485
  33. package/src/graphs/handoffValidation.test.js +0 -276
  34. package/src/graphs/index.js +0 -3
  35. package/src/index.js +0 -28
  36. package/src/instrumentation.js +0 -21
  37. package/src/llm/anthropic/index.js +0 -319
  38. package/src/llm/anthropic/types.js +0 -46
  39. package/src/llm/anthropic/utils/message_inputs.js +0 -627
  40. package/src/llm/anthropic/utils/message_outputs.js +0 -290
  41. package/src/llm/anthropic/utils/output_parsers.js +0 -89
  42. package/src/llm/anthropic/utils/tools.js +0 -25
  43. package/src/llm/bedrock/__tests__/bedrock-caching.test.js +0 -392
  44. package/src/llm/bedrock/index.js +0 -303
  45. package/src/llm/bedrock/types.js +0 -2
  46. package/src/llm/bedrock/utils/index.js +0 -6
  47. package/src/llm/bedrock/utils/message_inputs.js +0 -463
  48. package/src/llm/bedrock/utils/message_outputs.js +0 -269
  49. package/src/llm/fake.js +0 -92
  50. package/src/llm/google/index.js +0 -215
  51. package/src/llm/google/types.js +0 -12
  52. package/src/llm/google/utils/common.js +0 -670
  53. package/src/llm/google/utils/tools.js +0 -111
  54. package/src/llm/google/utils/zod_to_genai_parameters.js +0 -47
  55. package/src/llm/openai/index.js +0 -1033
  56. package/src/llm/openai/types.js +0 -2
  57. package/src/llm/openai/utils/index.js +0 -756
  58. package/src/llm/openai/utils/isReasoningModel.test.js +0 -79
  59. package/src/llm/openrouter/index.js +0 -261
  60. package/src/llm/openrouter/reasoning.test.js +0 -181
  61. package/src/llm/providers.js +0 -36
  62. package/src/llm/text.js +0 -65
  63. package/src/llm/vertexai/index.js +0 -402
  64. package/src/messages/__tests__/tools.test.js +0 -392
  65. package/src/messages/cache.js +0 -404
  66. package/src/messages/cache.test.js +0 -1167
  67. package/src/messages/content.js +0 -48
  68. package/src/messages/content.test.js +0 -314
  69. package/src/messages/core.js +0 -359
  70. package/src/messages/ensureThinkingBlock.test.js +0 -997
  71. package/src/messages/format.js +0 -973
  72. package/src/messages/formatAgentMessages.test.js +0 -2278
  73. package/src/messages/formatAgentMessages.tools.test.js +0 -362
  74. package/src/messages/formatMessage.test.js +0 -608
  75. package/src/messages/ids.js +0 -18
  76. package/src/messages/index.js +0 -9
  77. package/src/messages/labelContentByAgent.test.js +0 -725
  78. package/src/messages/prune.js +0 -438
  79. package/src/messages/reducer.js +0 -60
  80. package/src/messages/shiftIndexTokenCountMap.test.js +0 -63
  81. package/src/messages/summarize.js +0 -146
  82. package/src/messages/summarize.test.js +0 -332
  83. package/src/messages/tools.js +0 -90
  84. package/src/mockStream.js +0 -81
  85. package/src/prompts/collab.js +0 -7
  86. package/src/prompts/index.js +0 -3
  87. package/src/prompts/taskmanager.js +0 -58
  88. package/src/run.js +0 -427
  89. package/src/schemas/index.js +0 -3
  90. package/src/schemas/schema-preparation.test.js +0 -370
  91. package/src/schemas/validate.js +0 -314
  92. package/src/schemas/validate.test.js +0 -264
  93. package/src/scripts/abort.js +0 -127
  94. package/src/scripts/ant_web_search.js +0 -130
  95. package/src/scripts/ant_web_search_edge_case.js +0 -133
  96. package/src/scripts/ant_web_search_error_edge_case.js +0 -119
  97. package/src/scripts/args.js +0 -41
  98. package/src/scripts/bedrock-cache-debug.js +0 -186
  99. package/src/scripts/bedrock-content-aggregation-test.js +0 -195
  100. package/src/scripts/bedrock-merge-test.js +0 -80
  101. package/src/scripts/bedrock-parallel-tools-test.js +0 -150
  102. package/src/scripts/caching.js +0 -106
  103. package/src/scripts/cli.js +0 -152
  104. package/src/scripts/cli2.js +0 -119
  105. package/src/scripts/cli3.js +0 -163
  106. package/src/scripts/cli4.js +0 -165
  107. package/src/scripts/cli5.js +0 -165
  108. package/src/scripts/code_exec.js +0 -171
  109. package/src/scripts/code_exec_files.js +0 -180
  110. package/src/scripts/code_exec_multi_session.js +0 -185
  111. package/src/scripts/code_exec_ptc.js +0 -265
  112. package/src/scripts/code_exec_session.js +0 -217
  113. package/src/scripts/code_exec_simple.js +0 -120
  114. package/src/scripts/content.js +0 -111
  115. package/src/scripts/empty_input.js +0 -125
  116. package/src/scripts/handoff-test.js +0 -96
  117. package/src/scripts/image.js +0 -138
  118. package/src/scripts/memory.js +0 -83
  119. package/src/scripts/multi-agent-chain.js +0 -271
  120. package/src/scripts/multi-agent-conditional.js +0 -185
  121. package/src/scripts/multi-agent-document-review-chain.js +0 -171
  122. package/src/scripts/multi-agent-hybrid-flow.js +0 -264
  123. package/src/scripts/multi-agent-parallel-start.js +0 -214
  124. package/src/scripts/multi-agent-parallel.js +0 -346
  125. package/src/scripts/multi-agent-sequence.js +0 -184
  126. package/src/scripts/multi-agent-supervisor.js +0 -324
  127. package/src/scripts/multi-agent-test.js +0 -147
  128. package/src/scripts/parallel-asymmetric-tools-test.js +0 -202
  129. package/src/scripts/parallel-full-metadata-test.js +0 -176
  130. package/src/scripts/parallel-tools-test.js +0 -256
  131. package/src/scripts/programmatic_exec.js +0 -277
  132. package/src/scripts/programmatic_exec_agent.js +0 -168
  133. package/src/scripts/search.js +0 -118
  134. package/src/scripts/sequential-full-metadata-test.js +0 -143
  135. package/src/scripts/simple.js +0 -174
  136. package/src/scripts/single-agent-metadata-test.js +0 -152
  137. package/src/scripts/stream.js +0 -113
  138. package/src/scripts/test-custom-prompt-key.js +0 -132
  139. package/src/scripts/test-handoff-input.js +0 -143
  140. package/src/scripts/test-handoff-preamble.js +0 -227
  141. package/src/scripts/test-handoff-steering.js +0 -353
  142. package/src/scripts/test-multi-agent-list-handoff.js +0 -318
  143. package/src/scripts/test-parallel-agent-labeling.js +0 -253
  144. package/src/scripts/test-parallel-handoffs.js +0 -229
  145. package/src/scripts/test-thinking-handoff-bedrock.js +0 -132
  146. package/src/scripts/test-thinking-handoff.js +0 -132
  147. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.js +0 -140
  148. package/src/scripts/test-tool-before-handoff-role-order.js +0 -223
  149. package/src/scripts/test-tools-before-handoff.js +0 -187
  150. package/src/scripts/test_code_api.js +0 -263
  151. package/src/scripts/thinking-bedrock.js +0 -128
  152. package/src/scripts/thinking-vertexai.js +0 -130
  153. package/src/scripts/thinking.js +0 -134
  154. package/src/scripts/tool_search.js +0 -114
  155. package/src/scripts/tools.js +0 -125
  156. package/src/specs/agent-handoffs-bedrock.integration.test.js +0 -280
  157. package/src/specs/agent-handoffs.test.js +0 -924
  158. package/src/specs/anthropic.simple.test.js +0 -287
  159. package/src/specs/azure.simple.test.js +0 -381
  160. package/src/specs/cache.simple.test.js +0 -282
  161. package/src/specs/custom-event-await.test.js +0 -148
  162. package/src/specs/deepseek.simple.test.js +0 -189
  163. package/src/specs/emergency-prune.test.js +0 -308
  164. package/src/specs/moonshot.simple.test.js +0 -237
  165. package/src/specs/observability.integration.test.js +0 -1337
  166. package/src/specs/openai.simple.test.js +0 -233
  167. package/src/specs/openrouter.simple.test.js +0 -202
  168. package/src/specs/prune.test.js +0 -733
  169. package/src/specs/reasoning.test.js +0 -144
  170. package/src/specs/spec.utils.js +0 -4
  171. package/src/specs/thinking-handoff.test.js +0 -486
  172. package/src/specs/thinking-prune.test.js +0 -600
  173. package/src/specs/token-distribution-edge-case.test.js +0 -246
  174. package/src/specs/token-memoization.test.js +0 -32
  175. package/src/specs/tokens.test.js +0 -49
  176. package/src/specs/tool-error.test.js +0 -139
  177. package/src/splitStream.js +0 -204
  178. package/src/splitStream.test.js +0 -504
  179. package/src/stream.js +0 -650
  180. package/src/stream.test.js +0 -225
  181. package/src/test/mockTools.js +0 -340
  182. package/src/tools/BrowserTools.js +0 -245
  183. package/src/tools/Calculator.js +0 -38
  184. package/src/tools/Calculator.test.js +0 -225
  185. package/src/tools/CodeExecutor.js +0 -233
  186. package/src/tools/ProgrammaticToolCalling.js +0 -602
  187. package/src/tools/StreamingToolCallBuffer.js +0 -179
  188. package/src/tools/ToolNode.js +0 -930
  189. package/src/tools/ToolSearch.js +0 -904
  190. package/src/tools/__tests__/BrowserTools.test.js +0 -306
  191. package/src/tools/__tests__/ProgrammaticToolCalling.integration.test.js +0 -276
  192. package/src/tools/__tests__/ProgrammaticToolCalling.test.js +0 -807
  193. package/src/tools/__tests__/StreamingToolCallBuffer.test.js +0 -175
  194. package/src/tools/__tests__/ToolApproval.test.js +0 -675
  195. package/src/tools/__tests__/ToolNode.recovery.test.js +0 -200
  196. package/src/tools/__tests__/ToolNode.session.test.js +0 -319
  197. package/src/tools/__tests__/ToolSearch.integration.test.js +0 -125
  198. package/src/tools/__tests__/ToolSearch.test.js +0 -812
  199. package/src/tools/__tests__/handlers.test.js +0 -799
  200. package/src/tools/__tests__/truncation-recovery.integration.test.js +0 -362
  201. package/src/tools/handlers.js +0 -306
  202. package/src/tools/schema.js +0 -25
  203. package/src/tools/search/anthropic.js +0 -34
  204. package/src/tools/search/content.js +0 -116
  205. package/src/tools/search/content.test.js +0 -133
  206. package/src/tools/search/firecrawl.js +0 -173
  207. package/src/tools/search/format.js +0 -198
  208. package/src/tools/search/highlights.js +0 -241
  209. package/src/tools/search/index.js +0 -3
  210. package/src/tools/search/jina-reranker.test.js +0 -106
  211. package/src/tools/search/rerankers.js +0 -165
  212. package/src/tools/search/schema.js +0 -102
  213. package/src/tools/search/search.js +0 -561
  214. package/src/tools/search/serper-scraper.js +0 -126
  215. package/src/tools/search/test.js +0 -129
  216. package/src/tools/search/tool.js +0 -453
  217. package/src/tools/search/types.js +0 -2
  218. package/src/tools/search/utils.js +0 -59
  219. package/src/types/graph.js +0 -24
  220. package/src/types/graph.test.js +0 -192
  221. package/src/types/index.js +0 -7
  222. package/src/types/llm.js +0 -2
  223. package/src/types/messages.js +0 -2
  224. package/src/types/run.js +0 -2
  225. package/src/types/stream.js +0 -2
  226. package/src/types/tools.js +0 -2
  227. package/src/utils/contextAnalytics.js +0 -79
  228. package/src/utils/contextAnalytics.test.js +0 -166
  229. package/src/utils/events.js +0 -26
  230. package/src/utils/graph.js +0 -11
  231. package/src/utils/handlers.js +0 -65
  232. package/src/utils/index.js +0 -10
  233. package/src/utils/llm.js +0 -21
  234. package/src/utils/llmConfig.js +0 -205
  235. package/src/utils/logging.js +0 -37
  236. package/src/utils/misc.js +0 -51
  237. package/src/utils/run.js +0 -69
  238. package/src/utils/schema.js +0 -21
  239. package/src/utils/title.js +0 -119
  240. package/src/utils/tokens.js +0 -92
  241. package/src/utils/toonFormat.js +0 -379
@@ -1,79 +0,0 @@
1
- import { isReasoningModel } from './index';
2
- describe('isReasoningModel', () => {
3
- describe('should return true for reasoning models', () => {
4
- test('basic o-series models', () => {
5
- expect(isReasoningModel('o1')).toBe(true);
6
- expect(isReasoningModel('o2')).toBe(true);
7
- expect(isReasoningModel('o9')).toBe(true);
8
- expect(isReasoningModel('o1-preview')).toBe(true);
9
- expect(isReasoningModel('o1-mini')).toBe(true);
10
- });
11
- test('gpt-5+ models', () => {
12
- expect(isReasoningModel('gpt-5')).toBe(true);
13
- expect(isReasoningModel('gpt-6')).toBe(true);
14
- expect(isReasoningModel('gpt-7')).toBe(true);
15
- expect(isReasoningModel('gpt-8')).toBe(true);
16
- expect(isReasoningModel('gpt-9')).toBe(true);
17
- });
18
- test('with provider prefixes', () => {
19
- expect(isReasoningModel('azure/o1')).toBe(true);
20
- expect(isReasoningModel('azure/gpt-5')).toBe(true);
21
- expect(isReasoningModel('openai/o1')).toBe(true);
22
- expect(isReasoningModel('openai/gpt-5')).toBe(true);
23
- });
24
- test('with custom prefixes', () => {
25
- expect(isReasoningModel('custom-provider/o1')).toBe(true);
26
- expect(isReasoningModel('my-deployment/gpt-5')).toBe(true);
27
- expect(isReasoningModel('company/azure/gpt-5')).toBe(true);
28
- });
29
- test('case insensitive', () => {
30
- expect(isReasoningModel('O1')).toBe(true);
31
- expect(isReasoningModel('GPT-5')).toBe(true);
32
- expect(isReasoningModel('gPt-6')).toBe(true);
33
- expect(isReasoningModel('Azure/O1')).toBe(true);
34
- });
35
- });
36
- describe('should return false for non-reasoning models', () => {
37
- test('older GPT models', () => {
38
- expect(isReasoningModel('gpt-3.5-turbo')).toBe(false);
39
- expect(isReasoningModel('gpt-4')).toBe(false);
40
- expect(isReasoningModel('gpt-4-turbo')).toBe(false);
41
- expect(isReasoningModel('gpt-4o')).toBe(false);
42
- expect(isReasoningModel('gpt-4o-mini')).toBe(false);
43
- });
44
- test('other model families', () => {
45
- expect(isReasoningModel('claude-3')).toBe(false);
46
- expect(isReasoningModel('claude-3-opus')).toBe(false);
47
- expect(isReasoningModel('llama-2')).toBe(false);
48
- expect(isReasoningModel('gemini-pro')).toBe(false);
49
- });
50
- test('partial matches that should not match', () => {
51
- expect(isReasoningModel('proto1')).toBe(false);
52
- expect(isReasoningModel('version-o1')).toBe(true);
53
- expect(isReasoningModel('gpt-40')).toBe(false);
54
- expect(isReasoningModel('gpt-3.5')).toBe(false);
55
- });
56
- test('empty, null, and undefined', () => {
57
- expect(isReasoningModel('')).toBe(false);
58
- expect(isReasoningModel()).toBe(false);
59
- expect(isReasoningModel(undefined)).toBe(false);
60
- });
61
- });
62
- describe('edge cases', () => {
63
- test('with special characters', () => {
64
- expect(isReasoningModel('deployment_o1_model')).toBe(false);
65
- expect(isReasoningModel('gpt-5-deployment')).toBe(true);
66
- expect(isReasoningModel('o1@latest')).toBe(true);
67
- expect(isReasoningModel('gpt-5.0')).toBe(true);
68
- });
69
- test('word boundary behavior', () => {
70
- // These should match because o1 and gpt-5 are whole words
71
- expect(isReasoningModel('use-o1-model')).toBe(true);
72
- expect(isReasoningModel('model-gpt-5-latest')).toBe(true);
73
- // These should not match because o1/gpt-5 are not whole words
74
- expect(isReasoningModel('proto1model')).toBe(false);
75
- expect(isReasoningModel('supergpt-50')).toBe(false);
76
- });
77
- });
78
- });
79
- //# sourceMappingURL=isReasoningModel.test.js.map
@@ -1,261 +0,0 @@
1
- import { ChatOpenAI } from '@/llm/openai';
2
- import { ChatGenerationChunk } from '@langchain/core/outputs';
3
- import { AIMessageChunk as AIMessageChunkClass } from '@langchain/core/messages';
4
- import { _convertMessagesToOpenAIParams } from '@/llm/openai/utils';
5
- export class ChatOpenRouter extends ChatOpenAI {
6
- openRouterReasoning;
7
- /** @deprecated Use `reasoning` object instead */
8
- includeReasoning;
9
- constructor(_fields) {
10
- const { include_reasoning, reasoning: openRouterReasoning, modelKwargs = {}, ...fields } = _fields;
11
- // Extract reasoning from modelKwargs if provided there (e.g., from LLMConfig)
12
- const { reasoning: mkReasoning, ...restModelKwargs } = modelKwargs;
13
- super({
14
- ...fields,
15
- modelKwargs: restModelKwargs,
16
- });
17
- // Merge reasoning config: modelKwargs.reasoning < constructor reasoning
18
- if (mkReasoning != null || openRouterReasoning != null) {
19
- this.openRouterReasoning = {
20
- ...mkReasoning,
21
- ...openRouterReasoning,
22
- };
23
- }
24
- this.includeReasoning = include_reasoning;
25
- }
26
- static lc_name() {
27
- return 'IllumaOpenRouter';
28
- }
29
- // @ts-expect-error - OpenRouter reasoning extends OpenAI Reasoning with additional
30
- // effort levels ('xhigh' | 'none' | 'minimal') not in ReasoningEffort.
31
- // The parent's generic conditional return type cannot be widened in an override.
32
- invocationParams(options, extra) {
33
- const params = super.invocationParams(options, extra);
34
- // Remove the OpenAI-native reasoning_effort that the parent sets;
35
- // OpenRouter uses a `reasoning` object instead
36
- delete params.reasoning_effort;
37
- // Build the OpenRouter reasoning config
38
- const reasoning = this.buildOpenRouterReasoning(options);
39
- if (reasoning != null) {
40
- params.reasoning = reasoning;
41
- }
42
- else {
43
- delete params.reasoning;
44
- }
45
- return params;
46
- }
47
- buildOpenRouterReasoning(options) {
48
- let reasoning;
49
- // 1. Instance-level reasoning config (from constructor)
50
- if (this.openRouterReasoning != null) {
51
- reasoning = { ...this.openRouterReasoning };
52
- }
53
- // 2. LangChain-style reasoning params (from parent's `this.reasoning`)
54
- const lcReasoning = this.getReasoningParams(options);
55
- if (lcReasoning?.effort != null) {
56
- reasoning = {
57
- ...reasoning,
58
- effort: lcReasoning.effort,
59
- };
60
- }
61
- // 3. Call-level reasoning override
62
- const callReasoning = options
63
- ?.reasoning;
64
- if (callReasoning != null) {
65
- reasoning = { ...reasoning, ...callReasoning };
66
- }
67
- // 4. Legacy include_reasoning backward compatibility
68
- if (reasoning == null && this.includeReasoning === true) {
69
- reasoning = { enabled: true };
70
- }
71
- return reasoning;
72
- }
73
- _convertOpenAIDeltaToBaseMessageChunk(
74
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
75
- delta, rawResponse, defaultRole) {
76
- const messageChunk = super._convertOpenAIDeltaToBaseMessageChunk(delta, rawResponse, defaultRole);
77
- if (delta.reasoning != null) {
78
- messageChunk.additional_kwargs.reasoning = delta.reasoning;
79
- }
80
- if (delta.reasoning_details != null) {
81
- messageChunk.additional_kwargs.reasoning_details =
82
- delta.reasoning_details;
83
- }
84
- return messageChunk;
85
- }
86
- async *_streamResponseChunks2(messages, options, runManager) {
87
- const messagesMapped = _convertMessagesToOpenAIParams(messages, this.model, {
88
- includeReasoningDetails: true,
89
- convertReasoningDetailsToContent: true,
90
- });
91
- const params = {
92
- ...this.invocationParams(options, {
93
- streaming: true,
94
- }),
95
- messages: messagesMapped,
96
- stream: true,
97
- };
98
- let defaultRole;
99
- const streamIterable = await this.completionWithRetry(params, options);
100
- let usage;
101
- // Store reasoning_details keyed by unique identifier to prevent incorrect merging
102
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
103
- const reasoningTextByIndex = new Map();
104
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
105
- const reasoningEncryptedById = new Map();
106
- for await (const data of streamIterable) {
107
- const choice = data.choices[0];
108
- if (data.usage) {
109
- usage = data.usage;
110
- }
111
- if (!choice) {
112
- continue;
113
- }
114
- const { delta } = choice;
115
- if (!delta) {
116
- continue;
117
- }
118
- // Accumulate reasoning_details from each delta
119
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
120
- const deltaAny = delta;
121
- // Extract current chunk's reasoning text for streaming (before accumulation)
122
- let currentChunkReasoningText = '';
123
- if (deltaAny.reasoning_details != null &&
124
- Array.isArray(deltaAny.reasoning_details)) {
125
- for (const detail of deltaAny.reasoning_details) {
126
- // For encrypted reasoning (thought signatures), store by ID - MUST be separate
127
- if (detail.type === 'reasoning.encrypted' && detail.id) {
128
- reasoningEncryptedById.set(detail.id, {
129
- type: detail.type,
130
- id: detail.id,
131
- data: detail.data,
132
- format: detail.format,
133
- index: detail.index,
134
- });
135
- }
136
- else if (detail.type === 'reasoning.text') {
137
- // Extract current chunk's text for streaming
138
- currentChunkReasoningText += detail.text || '';
139
- // For text reasoning, accumulate text by index for final message
140
- const idx = detail.index ?? 0;
141
- const existing = reasoningTextByIndex.get(idx);
142
- if (existing) {
143
- // Only append text, keep other fields from first entry
144
- existing.text = (existing.text || '') + (detail.text || '');
145
- }
146
- else {
147
- reasoningTextByIndex.set(idx, {
148
- type: detail.type,
149
- text: detail.text || '',
150
- format: detail.format,
151
- index: idx,
152
- });
153
- }
154
- }
155
- }
156
- }
157
- const chunk = this._convertOpenAIDeltaToBaseMessageChunk(delta, data, defaultRole);
158
- // For models that send reasoning_details (Gemini style) instead of reasoning (DeepSeek style),
159
- // set the current chunk's reasoning text to additional_kwargs.reasoning for streaming
160
- if (currentChunkReasoningText && !chunk.additional_kwargs.reasoning) {
161
- chunk.additional_kwargs.reasoning = currentChunkReasoningText;
162
- }
163
- // IMPORTANT: Only set reasoning_details on the FINAL chunk to prevent
164
- // LangChain's chunk concatenation from corrupting the array
165
- // Check if this is the final chunk (has finish_reason)
166
- if (choice.finish_reason != null) {
167
- // Build properly structured reasoning_details array
168
- // Text entries first (but we only need the encrypted ones for thought signatures)
169
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
170
- const finalReasoningDetails = [
171
- ...reasoningTextByIndex.values(),
172
- ...reasoningEncryptedById.values(),
173
- ];
174
- if (finalReasoningDetails.length > 0) {
175
- chunk.additional_kwargs.reasoning_details = finalReasoningDetails;
176
- }
177
- }
178
- else {
179
- // Clear reasoning_details from intermediate chunks to prevent concatenation issues
180
- delete chunk.additional_kwargs.reasoning_details;
181
- }
182
- defaultRole = delta.role ?? defaultRole;
183
- const newTokenIndices = {
184
- prompt: options.promptIndex ?? 0,
185
- completion: choice.index ?? 0,
186
- };
187
- if (typeof chunk.content !== 'string') {
188
- // eslint-disable-next-line no-console
189
- console.log('[WARNING]: Received non-string content from OpenAI. This is currently not supported.');
190
- continue;
191
- }
192
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
193
- const generationInfo = { ...newTokenIndices };
194
- if (choice.finish_reason != null) {
195
- generationInfo.finish_reason = choice.finish_reason;
196
- generationInfo.system_fingerprint = data.system_fingerprint;
197
- generationInfo.model_name = data.model;
198
- generationInfo.service_tier = data.service_tier;
199
- }
200
- if (this.logprobs == true) {
201
- generationInfo.logprobs = choice.logprobs;
202
- }
203
- const generationChunk = new ChatGenerationChunk({
204
- message: chunk,
205
- text: chunk.content,
206
- generationInfo,
207
- });
208
- yield generationChunk;
209
- if (this._lc_stream_delay != null) {
210
- await new Promise((resolve) => setTimeout(resolve, this._lc_stream_delay));
211
- }
212
- await runManager?.handleLLMNewToken(generationChunk.text || '', newTokenIndices, undefined, undefined, undefined, { chunk: generationChunk });
213
- }
214
- if (usage) {
215
- const inputTokenDetails = {
216
- ...(usage.prompt_tokens_details?.audio_tokens != null && {
217
- audio: usage.prompt_tokens_details.audio_tokens,
218
- }),
219
- ...(usage.prompt_tokens_details?.cached_tokens != null && {
220
- cache_read: usage.prompt_tokens_details.cached_tokens,
221
- }),
222
- };
223
- const outputTokenDetails = {
224
- ...(usage.completion_tokens_details?.audio_tokens != null && {
225
- audio: usage.completion_tokens_details.audio_tokens,
226
- }),
227
- ...(usage.completion_tokens_details?.reasoning_tokens != null && {
228
- reasoning: usage.completion_tokens_details.reasoning_tokens,
229
- }),
230
- };
231
- const generationChunk = new ChatGenerationChunk({
232
- message: new AIMessageChunkClass({
233
- content: '',
234
- response_metadata: {
235
- usage: { ...usage },
236
- },
237
- usage_metadata: {
238
- input_tokens: usage.prompt_tokens,
239
- output_tokens: usage.completion_tokens,
240
- total_tokens: usage.total_tokens,
241
- ...(Object.keys(inputTokenDetails).length > 0 && {
242
- input_token_details: inputTokenDetails,
243
- }),
244
- ...(Object.keys(outputTokenDetails).length > 0 && {
245
- output_token_details: outputTokenDetails,
246
- }),
247
- },
248
- }),
249
- text: '',
250
- });
251
- yield generationChunk;
252
- if (this._lc_stream_delay != null) {
253
- await new Promise((resolve) => setTimeout(resolve, this._lc_stream_delay));
254
- }
255
- }
256
- if (options.signal?.aborted === true) {
257
- throw new Error('AbortError');
258
- }
259
- }
260
- }
261
- //# sourceMappingURL=index.js.map
@@ -1,181 +0,0 @@
1
- import { ChatOpenRouter } from './index';
2
- function createRouter(overrides = {}) {
3
- return new ChatOpenRouter({
4
- model: 'openrouter/test-model',
5
- apiKey: 'test-key',
6
- ...overrides,
7
- });
8
- }
9
- describe('ChatOpenRouter reasoning handling', () => {
10
- // ---------------------------------------------------------------
11
- // 1. Constructor reasoning config
12
- // ---------------------------------------------------------------
13
- describe('constructor reasoning config', () => {
14
- it('stores reasoning when passed directly', () => {
15
- const router = createRouter({ reasoning: { effort: 'high' } });
16
- const params = router.invocationParams();
17
- expect(params.reasoning).toEqual({ effort: 'high' });
18
- });
19
- });
20
- // ---------------------------------------------------------------
21
- // 2. modelKwargs reasoning extraction
22
- // ---------------------------------------------------------------
23
- describe('modelKwargs reasoning extraction', () => {
24
- it('extracts reasoning from modelKwargs and places it into params.reasoning', () => {
25
- const router = createRouter({
26
- modelKwargs: { reasoning: { effort: 'medium' } },
27
- });
28
- const params = router.invocationParams();
29
- expect(params.reasoning).toEqual({ effort: 'medium' });
30
- });
31
- it('does not leak reasoning into modelKwargs that reach the parent', () => {
32
- const router = createRouter({
33
- modelKwargs: {
34
- reasoning: { effort: 'medium' },
35
- },
36
- });
37
- const params = router.invocationParams();
38
- // reasoning should be the structured OpenRouter object, not buried in modelKwargs
39
- expect(params.reasoning).toEqual({ effort: 'medium' });
40
- });
41
- });
42
- // ---------------------------------------------------------------
43
- // 3. Reasoning merge precedence
44
- // ---------------------------------------------------------------
45
- describe('reasoning merge precedence', () => {
46
- it('constructor reasoning overrides modelKwargs.reasoning', () => {
47
- const router = createRouter({
48
- reasoning: { effort: 'high' },
49
- modelKwargs: { reasoning: { effort: 'low' } },
50
- });
51
- const params = router.invocationParams();
52
- expect(params.reasoning).toEqual({ effort: 'high' });
53
- });
54
- it('merges non-overlapping keys from modelKwargs.reasoning and constructor reasoning', () => {
55
- const router = createRouter({
56
- reasoning: { effort: 'high' },
57
- modelKwargs: { reasoning: { max_tokens: 5000 } },
58
- });
59
- const params = router.invocationParams();
60
- expect(params.reasoning).toEqual({ effort: 'high', max_tokens: 5000 });
61
- });
62
- });
63
- // ---------------------------------------------------------------
64
- // 4. invocationParams output
65
- // ---------------------------------------------------------------
66
- describe('invocationParams output', () => {
67
- it('includes reasoning object in params', () => {
68
- const router = createRouter({ reasoning: { effort: 'high' } });
69
- const params = router.invocationParams();
70
- expect(params.reasoning).toBeDefined();
71
- expect(params.reasoning).toEqual({ effort: 'high' });
72
- });
73
- it('does NOT include reasoning_effort in params', () => {
74
- const router = createRouter({ reasoning: { effort: 'high' } });
75
- const params = router.invocationParams();
76
- expect(params.reasoning_effort).toBeUndefined();
77
- });
78
- it('does not include reasoning when none is configured', () => {
79
- const router = createRouter();
80
- const params = router.invocationParams();
81
- expect(params.reasoning).toBeUndefined();
82
- expect(params.reasoning_effort).toBeUndefined();
83
- });
84
- });
85
- // ---------------------------------------------------------------
86
- // 5. Legacy include_reasoning
87
- // ---------------------------------------------------------------
88
- describe('legacy include_reasoning', () => {
89
- it('produces { enabled: true } when only include_reasoning is true', () => {
90
- const router = createRouter({ include_reasoning: true });
91
- const params = router.invocationParams();
92
- expect(params.reasoning).toEqual({ enabled: true });
93
- });
94
- it('does not produce reasoning when include_reasoning is false', () => {
95
- const router = createRouter({ include_reasoning: false });
96
- const params = router.invocationParams();
97
- expect(params.reasoning).toBeUndefined();
98
- });
99
- });
100
- // ---------------------------------------------------------------
101
- // 6. Legacy include_reasoning ignored when reasoning is provided
102
- // ---------------------------------------------------------------
103
- describe('legacy include_reasoning ignored when reasoning provided', () => {
104
- it('reasoning wins over include_reasoning', () => {
105
- const router = createRouter({
106
- reasoning: { effort: 'medium' },
107
- include_reasoning: true,
108
- });
109
- const params = router.invocationParams();
110
- // Should use the structured reasoning, NOT fall back to { enabled: true }
111
- expect(params.reasoning).toEqual({ effort: 'medium' });
112
- });
113
- it('reasoning from modelKwargs also wins over include_reasoning', () => {
114
- const router = createRouter({
115
- modelKwargs: { reasoning: { effort: 'low' } },
116
- include_reasoning: true,
117
- });
118
- const params = router.invocationParams();
119
- expect(params.reasoning).toEqual({ effort: 'low' });
120
- });
121
- });
122
- // ---------------------------------------------------------------
123
- // 7. Various effort levels (OpenRouter-specific)
124
- // ---------------------------------------------------------------
125
- describe('various effort levels', () => {
126
- const efforts = [
127
- { effort: 'xhigh' },
128
- { effort: 'none' },
129
- { effort: 'minimal' },
130
- { effort: 'high' },
131
- { effort: 'medium' },
132
- { effort: 'low' },
133
- ];
134
- it.each(efforts)('supports effort level "$effort"', ({ effort }) => {
135
- const router = createRouter({ reasoning: { effort } });
136
- const params = router.invocationParams();
137
- expect(params.reasoning).toEqual({ effort });
138
- expect(params.reasoning_effort).toBeUndefined();
139
- });
140
- });
141
- // ---------------------------------------------------------------
142
- // 8. max_tokens reasoning
143
- // ---------------------------------------------------------------
144
- describe('max_tokens reasoning', () => {
145
- it('passes max_tokens in reasoning object', () => {
146
- const router = createRouter({
147
- reasoning: { max_tokens: 8000 },
148
- });
149
- const params = router.invocationParams();
150
- expect(params.reasoning).toEqual({ max_tokens: 8000 });
151
- });
152
- it('combines max_tokens with effort', () => {
153
- const router = createRouter({
154
- reasoning: { effort: 'high', max_tokens: 8000 },
155
- });
156
- const params = router.invocationParams();
157
- expect(params.reasoning).toEqual({ effort: 'high', max_tokens: 8000 });
158
- expect(params.reasoning_effort).toBeUndefined();
159
- });
160
- });
161
- // ---------------------------------------------------------------
162
- // 9. exclude reasoning
163
- // ---------------------------------------------------------------
164
- describe('exclude reasoning', () => {
165
- it('passes exclude flag in reasoning object', () => {
166
- const router = createRouter({
167
- reasoning: { effort: 'high', exclude: true },
168
- });
169
- const params = router.invocationParams();
170
- expect(params.reasoning).toEqual({ effort: 'high', exclude: true });
171
- });
172
- it('supports exclude without effort', () => {
173
- const router = createRouter({
174
- reasoning: { exclude: true },
175
- });
176
- const params = router.invocationParams();
177
- expect(params.reasoning).toEqual({ exclude: true });
178
- });
179
- });
180
- });
181
- //# sourceMappingURL=reasoning.test.js.map
@@ -1,36 +0,0 @@
1
- // src/llm/providers.ts
2
- import { ChatMistralAI } from '@langchain/mistralai';
3
- import { AzureChatOpenAI, ChatDeepSeek, ChatMoonshot, ChatOpenAI, ChatXAI, } from '@/llm/openai';
4
- import { CustomChatGoogleGenerativeAI } from '@/llm/google';
5
- import { CustomChatBedrockConverse } from '@/llm/bedrock';
6
- import { CustomAnthropic } from '@/llm/anthropic';
7
- import { ChatOpenRouter } from '@/llm/openrouter';
8
- import { ChatVertexAI } from '@/llm/vertexai';
9
- import { Providers } from '@/common';
10
- export const llmProviders = {
11
- [Providers.XAI]: ChatXAI,
12
- [Providers.OPENAI]: ChatOpenAI,
13
- [Providers.AZURE]: AzureChatOpenAI,
14
- [Providers.VERTEXAI]: ChatVertexAI,
15
- [Providers.DEEPSEEK]: ChatDeepSeek,
16
- [Providers.MISTRALAI]: ChatMistralAI,
17
- [Providers.MISTRAL]: ChatMistralAI,
18
- [Providers.ANTHROPIC]: CustomAnthropic,
19
- [Providers.OPENROUTER]: ChatOpenRouter,
20
- [Providers.BEDROCK]: CustomChatBedrockConverse,
21
- // [Providers.ANTHROPIC]: ChatAnthropic,
22
- [Providers.GOOGLE]: CustomChatGoogleGenerativeAI,
23
- [Providers.MOONSHOT]: ChatMoonshot,
24
- };
25
- export const manualToolStreamProviders = new Set([
26
- Providers.ANTHROPIC,
27
- Providers.BEDROCK,
28
- ]);
29
- export const getChatModelClass = (provider) => {
30
- const ChatModelClass = llmProviders[provider];
31
- if (!ChatModelClass) {
32
- throw new Error(`Unsupported LLM provider: ${provider}`);
33
- }
34
- return ChatModelClass;
35
- };
36
- //# sourceMappingURL=providers.js.map
package/src/llm/text.js DELETED
@@ -1,65 +0,0 @@
1
- export class TextStream {
2
- text;
3
- currentIndex;
4
- minChunkSize;
5
- maxChunkSize;
6
- delay;
7
- firstWordChunk;
8
- constructor(text, options = {}) {
9
- this.text = text;
10
- this.currentIndex = 0;
11
- this.minChunkSize = options.minChunkSize ?? 4;
12
- this.maxChunkSize = options.maxChunkSize ?? 8;
13
- this.delay = options.delay ?? 20;
14
- this.firstWordChunk = options.firstWordChunk ?? true;
15
- }
16
- randomInt(min, max) {
17
- return Math.floor(Math.random() * (max - min)) + min;
18
- }
19
- static BOUNDARIES = new Set([
20
- ' ',
21
- '.',
22
- ',',
23
- '!',
24
- '?',
25
- ';',
26
- ':',
27
- ]);
28
- findFirstWordBoundary(text, minSize) {
29
- if (minSize >= text.length)
30
- return text.length;
31
- // Ensure we meet the minimum size first
32
- let pos = minSize;
33
- // Look forward until we find a boundary
34
- while (pos < text.length) {
35
- if (TextStream.BOUNDARIES.has(text[pos])) {
36
- return pos + 1; // Include the boundary character
37
- }
38
- pos++;
39
- }
40
- return text.length; // If no boundary found, return entire remaining text
41
- }
42
- async *generateText(signal, progressCallback) {
43
- const { delay, minChunkSize, maxChunkSize } = this;
44
- while (this.currentIndex < this.text.length) {
45
- if (signal?.aborted === true) {
46
- break;
47
- }
48
- await new Promise((resolve) => setTimeout(resolve, delay));
49
- const remainingText = this.text.slice(this.currentIndex);
50
- let chunkSize;
51
- if (this.firstWordChunk) {
52
- chunkSize = this.findFirstWordBoundary(remainingText, minChunkSize);
53
- }
54
- else {
55
- const remainingChars = remainingText.length;
56
- chunkSize = Math.min(this.randomInt(minChunkSize, maxChunkSize + 1), remainingChars);
57
- }
58
- const chunk = this.text.slice(this.currentIndex, this.currentIndex + chunkSize);
59
- progressCallback?.(chunk);
60
- yield chunk;
61
- this.currentIndex += chunkSize;
62
- }
63
- }
64
- }
65
- //# sourceMappingURL=text.js.map