@librechat/agents 2.4.322 → 3.0.0-rc10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (279) hide show
  1. package/dist/cjs/agents/AgentContext.cjs +218 -0
  2. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  3. package/dist/cjs/common/enum.cjs +15 -5
  4. package/dist/cjs/common/enum.cjs.map +1 -1
  5. package/dist/cjs/events.cjs +10 -6
  6. package/dist/cjs/events.cjs.map +1 -1
  7. package/dist/cjs/graphs/Graph.cjs +309 -213
  8. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  9. package/dist/cjs/graphs/MultiAgentGraph.cjs +507 -0
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  11. package/dist/cjs/llm/anthropic/index.cjs +54 -9
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  14. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +52 -6
  15. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  16. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +22 -2
  17. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  18. package/dist/cjs/llm/anthropic/utils/tools.cjs +29 -0
  19. package/dist/cjs/llm/anthropic/utils/tools.cjs.map +1 -0
  20. package/dist/cjs/llm/google/index.cjs +144 -0
  21. package/dist/cjs/llm/google/index.cjs.map +1 -0
  22. package/dist/cjs/llm/google/utils/common.cjs +477 -0
  23. package/dist/cjs/llm/google/utils/common.cjs.map +1 -0
  24. package/dist/cjs/llm/ollama/index.cjs +67 -0
  25. package/dist/cjs/llm/ollama/index.cjs.map +1 -0
  26. package/dist/cjs/llm/ollama/utils.cjs +158 -0
  27. package/dist/cjs/llm/ollama/utils.cjs.map +1 -0
  28. package/dist/cjs/llm/openai/index.cjs +422 -3
  29. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  30. package/dist/cjs/llm/openai/utils/index.cjs +672 -0
  31. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -0
  32. package/dist/cjs/llm/providers.cjs +15 -15
  33. package/dist/cjs/llm/providers.cjs.map +1 -1
  34. package/dist/cjs/llm/text.cjs +14 -3
  35. package/dist/cjs/llm/text.cjs.map +1 -1
  36. package/dist/cjs/llm/vertexai/index.cjs +330 -0
  37. package/dist/cjs/llm/vertexai/index.cjs.map +1 -0
  38. package/dist/cjs/main.cjs +11 -0
  39. package/dist/cjs/main.cjs.map +1 -1
  40. package/dist/cjs/run.cjs +137 -85
  41. package/dist/cjs/run.cjs.map +1 -1
  42. package/dist/cjs/stream.cjs +86 -52
  43. package/dist/cjs/stream.cjs.map +1 -1
  44. package/dist/cjs/tools/ToolNode.cjs +10 -4
  45. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  46. package/dist/cjs/tools/handlers.cjs +119 -13
  47. package/dist/cjs/tools/handlers.cjs.map +1 -1
  48. package/dist/cjs/tools/search/anthropic.cjs +40 -0
  49. package/dist/cjs/tools/search/anthropic.cjs.map +1 -0
  50. package/dist/cjs/tools/search/firecrawl.cjs +55 -9
  51. package/dist/cjs/tools/search/firecrawl.cjs.map +1 -1
  52. package/dist/cjs/tools/search/format.cjs +6 -6
  53. package/dist/cjs/tools/search/format.cjs.map +1 -1
  54. package/dist/cjs/tools/search/rerankers.cjs +7 -29
  55. package/dist/cjs/tools/search/rerankers.cjs.map +1 -1
  56. package/dist/cjs/tools/search/search.cjs +86 -16
  57. package/dist/cjs/tools/search/search.cjs.map +1 -1
  58. package/dist/cjs/tools/search/tool.cjs +4 -2
  59. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  60. package/dist/cjs/tools/search/utils.cjs +1 -1
  61. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  62. package/dist/cjs/utils/events.cjs +31 -0
  63. package/dist/cjs/utils/events.cjs.map +1 -0
  64. package/dist/cjs/utils/title.cjs +57 -21
  65. package/dist/cjs/utils/title.cjs.map +1 -1
  66. package/dist/cjs/utils/tokens.cjs +54 -7
  67. package/dist/cjs/utils/tokens.cjs.map +1 -1
  68. package/dist/esm/agents/AgentContext.mjs +216 -0
  69. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  70. package/dist/esm/common/enum.mjs +16 -6
  71. package/dist/esm/common/enum.mjs.map +1 -1
  72. package/dist/esm/events.mjs +10 -6
  73. package/dist/esm/events.mjs.map +1 -1
  74. package/dist/esm/graphs/Graph.mjs +311 -215
  75. package/dist/esm/graphs/Graph.mjs.map +1 -1
  76. package/dist/esm/graphs/MultiAgentGraph.mjs +505 -0
  77. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  78. package/dist/esm/llm/anthropic/index.mjs +54 -9
  79. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  80. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  81. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +52 -6
  82. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  83. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +22 -2
  84. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  85. package/dist/esm/llm/anthropic/utils/tools.mjs +27 -0
  86. package/dist/esm/llm/anthropic/utils/tools.mjs.map +1 -0
  87. package/dist/esm/llm/google/index.mjs +142 -0
  88. package/dist/esm/llm/google/index.mjs.map +1 -0
  89. package/dist/esm/llm/google/utils/common.mjs +471 -0
  90. package/dist/esm/llm/google/utils/common.mjs.map +1 -0
  91. package/dist/esm/llm/ollama/index.mjs +65 -0
  92. package/dist/esm/llm/ollama/index.mjs.map +1 -0
  93. package/dist/esm/llm/ollama/utils.mjs +155 -0
  94. package/dist/esm/llm/ollama/utils.mjs.map +1 -0
  95. package/dist/esm/llm/openai/index.mjs +421 -4
  96. package/dist/esm/llm/openai/index.mjs.map +1 -1
  97. package/dist/esm/llm/openai/utils/index.mjs +666 -0
  98. package/dist/esm/llm/openai/utils/index.mjs.map +1 -0
  99. package/dist/esm/llm/providers.mjs +5 -5
  100. package/dist/esm/llm/providers.mjs.map +1 -1
  101. package/dist/esm/llm/text.mjs +14 -3
  102. package/dist/esm/llm/text.mjs.map +1 -1
  103. package/dist/esm/llm/vertexai/index.mjs +328 -0
  104. package/dist/esm/llm/vertexai/index.mjs.map +1 -0
  105. package/dist/esm/main.mjs +6 -5
  106. package/dist/esm/main.mjs.map +1 -1
  107. package/dist/esm/run.mjs +138 -87
  108. package/dist/esm/run.mjs.map +1 -1
  109. package/dist/esm/stream.mjs +88 -55
  110. package/dist/esm/stream.mjs.map +1 -1
  111. package/dist/esm/tools/ToolNode.mjs +10 -4
  112. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  113. package/dist/esm/tools/handlers.mjs +119 -15
  114. package/dist/esm/tools/handlers.mjs.map +1 -1
  115. package/dist/esm/tools/search/anthropic.mjs +37 -0
  116. package/dist/esm/tools/search/anthropic.mjs.map +1 -0
  117. package/dist/esm/tools/search/firecrawl.mjs +55 -9
  118. package/dist/esm/tools/search/firecrawl.mjs.map +1 -1
  119. package/dist/esm/tools/search/format.mjs +7 -7
  120. package/dist/esm/tools/search/format.mjs.map +1 -1
  121. package/dist/esm/tools/search/rerankers.mjs +7 -29
  122. package/dist/esm/tools/search/rerankers.mjs.map +1 -1
  123. package/dist/esm/tools/search/search.mjs +86 -16
  124. package/dist/esm/tools/search/search.mjs.map +1 -1
  125. package/dist/esm/tools/search/tool.mjs +4 -2
  126. package/dist/esm/tools/search/tool.mjs.map +1 -1
  127. package/dist/esm/tools/search/utils.mjs +1 -1
  128. package/dist/esm/tools/search/utils.mjs.map +1 -1
  129. package/dist/esm/utils/events.mjs +29 -0
  130. package/dist/esm/utils/events.mjs.map +1 -0
  131. package/dist/esm/utils/title.mjs +57 -22
  132. package/dist/esm/utils/title.mjs.map +1 -1
  133. package/dist/esm/utils/tokens.mjs +54 -8
  134. package/dist/esm/utils/tokens.mjs.map +1 -1
  135. package/dist/types/agents/AgentContext.d.ts +91 -0
  136. package/dist/types/common/enum.d.ts +17 -7
  137. package/dist/types/events.d.ts +5 -4
  138. package/dist/types/graphs/Graph.d.ts +64 -67
  139. package/dist/types/graphs/MultiAgentGraph.d.ts +47 -0
  140. package/dist/types/graphs/index.d.ts +1 -0
  141. package/dist/types/llm/anthropic/index.d.ts +11 -0
  142. package/dist/types/llm/anthropic/types.d.ts +9 -3
  143. package/dist/types/llm/anthropic/utils/message_inputs.d.ts +1 -1
  144. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +4 -4
  145. package/dist/types/llm/anthropic/utils/tools.d.ts +3 -0
  146. package/dist/types/llm/google/index.d.ts +13 -0
  147. package/dist/types/llm/google/types.d.ts +32 -0
  148. package/dist/types/llm/google/utils/common.d.ts +19 -0
  149. package/dist/types/llm/google/utils/tools.d.ts +10 -0
  150. package/dist/types/llm/google/utils/zod_to_genai_parameters.d.ts +14 -0
  151. package/dist/types/llm/ollama/index.d.ts +7 -0
  152. package/dist/types/llm/ollama/utils.d.ts +7 -0
  153. package/dist/types/llm/openai/index.d.ts +82 -3
  154. package/dist/types/llm/openai/types.d.ts +10 -0
  155. package/dist/types/llm/openai/utils/index.d.ts +20 -0
  156. package/dist/types/llm/text.d.ts +1 -1
  157. package/dist/types/llm/vertexai/index.d.ts +293 -0
  158. package/dist/types/messages/reducer.d.ts +9 -0
  159. package/dist/types/run.d.ts +19 -12
  160. package/dist/types/stream.d.ts +10 -3
  161. package/dist/types/tools/CodeExecutor.d.ts +2 -2
  162. package/dist/types/tools/ToolNode.d.ts +1 -1
  163. package/dist/types/tools/handlers.d.ts +17 -4
  164. package/dist/types/tools/search/anthropic.d.ts +16 -0
  165. package/dist/types/tools/search/firecrawl.d.ts +15 -0
  166. package/dist/types/tools/search/rerankers.d.ts +0 -1
  167. package/dist/types/tools/search/types.d.ts +30 -9
  168. package/dist/types/types/graph.d.ts +129 -15
  169. package/dist/types/types/llm.d.ts +25 -10
  170. package/dist/types/types/run.d.ts +50 -8
  171. package/dist/types/types/stream.d.ts +16 -2
  172. package/dist/types/types/tools.d.ts +1 -1
  173. package/dist/types/utils/events.d.ts +6 -0
  174. package/dist/types/utils/title.d.ts +2 -1
  175. package/dist/types/utils/tokens.d.ts +24 -0
  176. package/package.json +41 -17
  177. package/src/agents/AgentContext.ts +315 -0
  178. package/src/common/enum.ts +15 -5
  179. package/src/events.ts +24 -13
  180. package/src/graphs/Graph.ts +495 -313
  181. package/src/graphs/MultiAgentGraph.ts +598 -0
  182. package/src/graphs/index.ts +2 -1
  183. package/src/llm/anthropic/Jacob_Lee_Resume_2023.pdf +0 -0
  184. package/src/llm/anthropic/index.ts +78 -13
  185. package/src/llm/anthropic/llm.spec.ts +491 -115
  186. package/src/llm/anthropic/types.ts +39 -3
  187. package/src/llm/anthropic/utils/message_inputs.ts +67 -11
  188. package/src/llm/anthropic/utils/message_outputs.ts +21 -2
  189. package/src/llm/anthropic/utils/output_parsers.ts +25 -6
  190. package/src/llm/anthropic/utils/tools.ts +29 -0
  191. package/src/llm/google/index.ts +218 -0
  192. package/src/llm/google/types.ts +43 -0
  193. package/src/llm/google/utils/common.ts +646 -0
  194. package/src/llm/google/utils/tools.ts +160 -0
  195. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -0
  196. package/src/llm/ollama/index.ts +89 -0
  197. package/src/llm/ollama/utils.ts +193 -0
  198. package/src/llm/openai/index.ts +641 -14
  199. package/src/llm/openai/types.ts +24 -0
  200. package/src/llm/openai/utils/index.ts +912 -0
  201. package/src/llm/openai/utils/isReasoningModel.test.ts +90 -0
  202. package/src/llm/providers.ts +10 -9
  203. package/src/llm/text.ts +26 -7
  204. package/src/llm/vertexai/index.ts +360 -0
  205. package/src/messages/reducer.ts +80 -0
  206. package/src/run.ts +196 -116
  207. package/src/scripts/ant_web_search.ts +158 -0
  208. package/src/scripts/args.ts +12 -8
  209. package/src/scripts/cli4.ts +29 -21
  210. package/src/scripts/cli5.ts +29 -21
  211. package/src/scripts/code_exec.ts +54 -23
  212. package/src/scripts/code_exec_files.ts +48 -17
  213. package/src/scripts/code_exec_simple.ts +46 -27
  214. package/src/scripts/handoff-test.ts +135 -0
  215. package/src/scripts/image.ts +52 -20
  216. package/src/scripts/multi-agent-chain.ts +278 -0
  217. package/src/scripts/multi-agent-conditional.ts +220 -0
  218. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  219. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  220. package/src/scripts/multi-agent-parallel.ts +341 -0
  221. package/src/scripts/multi-agent-sequence.ts +212 -0
  222. package/src/scripts/multi-agent-supervisor.ts +362 -0
  223. package/src/scripts/multi-agent-test.ts +186 -0
  224. package/src/scripts/search.ts +1 -9
  225. package/src/scripts/simple.ts +25 -10
  226. package/src/scripts/test-custom-prompt-key.ts +145 -0
  227. package/src/scripts/test-handoff-input.ts +170 -0
  228. package/src/scripts/test-multi-agent-list-handoff.ts +261 -0
  229. package/src/scripts/test-tools-before-handoff.ts +233 -0
  230. package/src/scripts/tools.ts +48 -18
  231. package/src/specs/anthropic.simple.test.ts +150 -34
  232. package/src/specs/azure.simple.test.ts +325 -0
  233. package/src/specs/openai.simple.test.ts +140 -33
  234. package/src/specs/openrouter.simple.test.ts +107 -0
  235. package/src/specs/prune.test.ts +4 -9
  236. package/src/specs/reasoning.test.ts +80 -44
  237. package/src/specs/token-memoization.test.ts +39 -0
  238. package/src/stream.test.ts +94 -0
  239. package/src/stream.ts +143 -61
  240. package/src/tools/ToolNode.ts +21 -7
  241. package/src/tools/handlers.ts +192 -18
  242. package/src/tools/search/anthropic.ts +51 -0
  243. package/src/tools/search/firecrawl.ts +69 -20
  244. package/src/tools/search/format.ts +6 -8
  245. package/src/tools/search/rerankers.ts +7 -40
  246. package/src/tools/search/search.ts +97 -16
  247. package/src/tools/search/tool.ts +5 -2
  248. package/src/tools/search/types.ts +30 -10
  249. package/src/tools/search/utils.ts +1 -1
  250. package/src/types/graph.ts +318 -103
  251. package/src/types/llm.ts +26 -12
  252. package/src/types/run.ts +56 -13
  253. package/src/types/stream.ts +22 -1
  254. package/src/types/tools.ts +16 -10
  255. package/src/utils/events.ts +32 -0
  256. package/src/utils/llmConfig.ts +19 -7
  257. package/src/utils/title.ts +104 -30
  258. package/src/utils/tokens.ts +69 -10
  259. package/dist/types/scripts/abort.d.ts +0 -1
  260. package/dist/types/scripts/args.d.ts +0 -6
  261. package/dist/types/scripts/caching.d.ts +0 -1
  262. package/dist/types/scripts/cli.d.ts +0 -1
  263. package/dist/types/scripts/cli2.d.ts +0 -1
  264. package/dist/types/scripts/cli3.d.ts +0 -1
  265. package/dist/types/scripts/cli4.d.ts +0 -1
  266. package/dist/types/scripts/cli5.d.ts +0 -1
  267. package/dist/types/scripts/code_exec.d.ts +0 -1
  268. package/dist/types/scripts/code_exec_files.d.ts +0 -1
  269. package/dist/types/scripts/code_exec_simple.d.ts +0 -1
  270. package/dist/types/scripts/content.d.ts +0 -1
  271. package/dist/types/scripts/empty_input.d.ts +0 -1
  272. package/dist/types/scripts/image.d.ts +0 -1
  273. package/dist/types/scripts/memory.d.ts +0 -1
  274. package/dist/types/scripts/search.d.ts +0 -1
  275. package/dist/types/scripts/simple.d.ts +0 -1
  276. package/dist/types/scripts/stream.d.ts +0 -1
  277. package/dist/types/scripts/thinking.d.ts +0 -1
  278. package/dist/types/scripts/tools.d.ts +0 -1
  279. package/dist/types/specs/spec.utils.d.ts +0 -1
@@ -4,28 +4,26 @@ var nanoid = require('nanoid');
4
4
  var stream = require('@langchain/core/utils/stream');
5
5
  var googleVertexai = require('@langchain/google-vertexai');
6
6
  var langgraph = require('@langchain/langgraph');
7
- var dispatch = require('@langchain/core/callbacks/dispatch');
7
+ var runnables = require('@langchain/core/runnables');
8
8
  var messages = require('@langchain/core/messages');
9
9
  var _enum = require('../common/enum.cjs');
10
- var providers = require('../llm/providers.cjs');
11
- var ToolNode = require('../tools/ToolNode.cjs');
12
10
  var core = require('../messages/core.cjs');
13
11
  var prune = require('../messages/prune.cjs');
14
12
  var graph = require('../utils/graph.cjs');
15
13
  var llm = require('../utils/llm.cjs');
16
14
  var run = require('../utils/run.cjs');
17
15
  require('js-tiktoken/lite');
16
+ var providers = require('../llm/providers.cjs');
17
+ var ToolNode = require('../tools/ToolNode.cjs');
18
18
  var index = require('../llm/openai/index.cjs');
19
+ var events = require('../utils/events.cjs');
20
+ var AgentContext = require('../agents/AgentContext.cjs');
19
21
  var fake = require('../llm/fake.cjs');
20
22
 
21
23
  /* eslint-disable no-console */
22
24
  // src/graphs/Graph.ts
23
25
  const { AGENT, TOOLS } = _enum.GraphNodeKeys;
24
26
  class Graph {
25
- lastToken;
26
- tokenTypeSwitch;
27
- reasoningKey = 'reasoning_content';
28
- currentTokenType = _enum.ContentTypes.TEXT;
29
27
  messageStepHasToolCalls = new Map();
30
28
  messageIdsByStepKey = new Map();
31
29
  prelimMessageIdsByStepKey = new Map();
@@ -34,71 +32,37 @@ class Graph {
34
32
  stepKeyIds = new Map();
35
33
  contentIndexMap = new Map();
36
34
  toolCallStepIds = new Map();
37
- currentUsage;
38
- indexTokenCountMap = {};
39
- maxContextTokens;
40
- pruneMessages;
41
- /** The amount of time that should pass before another consecutive API call */
42
- streamBuffer;
43
- tokenCounter;
44
35
  signal;
36
+ /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
37
+ invokedToolIds;
38
+ handlerRegistry;
45
39
  }
46
40
  class StandardGraph extends Graph {
47
- graphState;
48
- clientOptions;
49
- boundModel;
50
- /** The last recorded timestamp that a stream API call was invoked */
51
- lastStreamCall;
52
- handlerRegistry;
53
- systemMessage;
41
+ overrideModel;
42
+ /** Optional compile options passed into workflow.compile() */
43
+ compileOptions;
54
44
  messages = [];
55
45
  runId;
56
- tools;
57
- toolMap;
58
46
  startIndex = 0;
59
- provider;
60
- toolEnd;
61
47
  signal;
62
- constructor({ runId, tools, signal, toolMap, provider, streamBuffer, instructions, reasoningKey, clientOptions, toolEnd = false, additional_instructions = '', }) {
48
+ /** Map of agent contexts by agent ID */
49
+ agentContexts = new Map();
50
+ /** Default agent ID to use */
51
+ defaultAgentId;
52
+ constructor({
53
+ // parent-level graph inputs
54
+ runId, signal, agents, tokenCounter, indexTokenCountMap, }) {
63
55
  super();
64
56
  this.runId = runId;
65
- this.tools = tools;
66
57
  this.signal = signal;
67
- this.toolEnd = toolEnd;
68
- this.toolMap = toolMap;
69
- this.provider = provider;
70
- this.streamBuffer = streamBuffer;
71
- this.clientOptions = clientOptions;
72
- this.graphState = this.createGraphState();
73
- this.boundModel = this.initializeModel();
74
- if (reasoningKey) {
75
- this.reasoningKey = reasoningKey;
58
+ if (agents.length === 0) {
59
+ throw new Error('At least one agent configuration is required');
76
60
  }
77
- let finalInstructions = instructions;
78
- if (additional_instructions) {
79
- finalInstructions =
80
- finalInstructions != null && finalInstructions
81
- ? `${finalInstructions}\n\n${additional_instructions}`
82
- : additional_instructions;
83
- }
84
- if (finalInstructions != null &&
85
- finalInstructions &&
86
- provider === _enum.Providers.ANTHROPIC &&
87
- (clientOptions.clientOptions?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
88
- false)) {
89
- finalInstructions = {
90
- content: [
91
- {
92
- type: 'text',
93
- text: instructions,
94
- cache_control: { type: 'ephemeral' },
95
- },
96
- ],
97
- };
98
- }
99
- if (finalInstructions != null && finalInstructions !== '') {
100
- this.systemMessage = new messages.SystemMessage(finalInstructions);
61
+ for (const agentConfig of agents) {
62
+ const agentContext = AgentContext.AgentContext.fromConfig(agentConfig, tokenCounter, indexTokenCountMap);
63
+ this.agentContexts.set(agentConfig.agentId, agentContext);
101
64
  }
65
+ this.defaultAgentId = agents[0].agentId;
102
66
  }
103
67
  /* Init */
104
68
  resetValues(keepContent) {
@@ -111,15 +75,12 @@ class StandardGraph extends Graph {
111
75
  this.stepKeyIds = graph.resetIfNotEmpty(this.stepKeyIds, new Map());
112
76
  this.toolCallStepIds = graph.resetIfNotEmpty(this.toolCallStepIds, new Map());
113
77
  this.messageIdsByStepKey = graph.resetIfNotEmpty(this.messageIdsByStepKey, new Map());
114
- this.messageStepHasToolCalls = graph.resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
78
+ this.messageStepHasToolCalls = graph.resetIfNotEmpty(this.messageStepHasToolCalls, new Map());
115
79
  this.prelimMessageIdsByStepKey = graph.resetIfNotEmpty(this.prelimMessageIdsByStepKey, new Map());
116
- this.currentTokenType = graph.resetIfNotEmpty(this.currentTokenType, _enum.ContentTypes.TEXT);
117
- this.lastToken = graph.resetIfNotEmpty(this.lastToken, undefined);
118
- this.tokenTypeSwitch = graph.resetIfNotEmpty(this.tokenTypeSwitch, undefined);
119
- this.indexTokenCountMap = graph.resetIfNotEmpty(this.indexTokenCountMap, {});
120
- this.currentUsage = graph.resetIfNotEmpty(this.currentUsage, undefined);
121
- this.tokenCounter = graph.resetIfNotEmpty(this.tokenCounter, undefined);
122
- this.maxContextTokens = graph.resetIfNotEmpty(this.maxContextTokens, undefined);
80
+ this.invokedToolIds = graph.resetIfNotEmpty(this.invokedToolIds, undefined);
81
+ for (const context of this.agentContexts.values()) {
82
+ context.reset();
83
+ }
123
84
  }
124
85
  /* Run Step Processing */
125
86
  getRunStep(stepId) {
@@ -129,6 +90,27 @@ class StandardGraph extends Graph {
129
90
  }
130
91
  return undefined;
131
92
  }
93
+ getAgentContext(metadata) {
94
+ if (!metadata) {
95
+ throw new Error('No metadata provided to retrieve agent context');
96
+ }
97
+ const currentNode = metadata.langgraph_node;
98
+ if (!currentNode) {
99
+ throw new Error('No langgraph_node in metadata to retrieve agent context');
100
+ }
101
+ let agentId;
102
+ if (currentNode.startsWith(AGENT)) {
103
+ agentId = currentNode.substring(AGENT.length);
104
+ }
105
+ else if (currentNode.startsWith(TOOLS)) {
106
+ agentId = currentNode.substring(TOOLS.length);
107
+ }
108
+ const agentContext = this.agentContexts.get(agentId ?? '');
109
+ if (!agentContext) {
110
+ throw new Error(`No agent context found for agent ID ${agentId}`);
111
+ }
112
+ return agentContext;
113
+ }
132
114
  getStepKey(metadata) {
133
115
  if (!metadata)
134
116
  return '';
@@ -174,10 +156,14 @@ class StandardGraph extends Graph {
174
156
  metadata.langgraph_step,
175
157
  metadata.checkpoint_ns,
176
158
  ];
177
- if (this.currentTokenType === _enum.ContentTypes.THINK ||
178
- this.currentTokenType === 'think_and_text') {
159
+ const agentContext = this.getAgentContext(metadata);
160
+ if (agentContext.currentTokenType === _enum.ContentTypes.THINK ||
161
+ agentContext.currentTokenType === 'think_and_text') {
179
162
  keyList.push('reasoning');
180
163
  }
164
+ if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
165
+ keyList.push(this.invokedToolIds.size + '');
166
+ }
181
167
  return keyList;
182
168
  }
183
169
  checkKeyList(keyList) {
@@ -191,113 +177,158 @@ class StandardGraph extends Graph {
191
177
  return core.convertMessagesToContent(this.messages.slice(this.startIndex));
192
178
  }
193
179
  /* Graph */
194
- createGraphState() {
195
- return {
196
- messages: {
197
- value: (x, y) => {
198
- if (!x.length) {
199
- if (this.systemMessage) {
200
- x.push(this.systemMessage);
201
- }
202
- this.startIndex = x.length + y.length;
203
- }
204
- const current = x.concat(y);
205
- this.messages = current;
206
- return current;
207
- },
208
- default: () => [],
209
- },
210
- };
180
+ createSystemRunnable({ provider, clientOptions, instructions, additional_instructions, }) {
181
+ let finalInstructions = instructions;
182
+ if (additional_instructions != null && additional_instructions !== '') {
183
+ finalInstructions =
184
+ finalInstructions != null && finalInstructions
185
+ ? `${finalInstructions}\n\n${additional_instructions}`
186
+ : additional_instructions;
187
+ }
188
+ if (finalInstructions != null &&
189
+ finalInstructions &&
190
+ provider === _enum.Providers.ANTHROPIC &&
191
+ (clientOptions.clientOptions
192
+ ?.defaultHeaders?.['anthropic-beta']?.includes('prompt-caching') ??
193
+ false)) {
194
+ finalInstructions = {
195
+ content: [
196
+ {
197
+ type: 'text',
198
+ text: instructions,
199
+ cache_control: { type: 'ephemeral' },
200
+ },
201
+ ],
202
+ };
203
+ }
204
+ if (finalInstructions != null && finalInstructions !== '') {
205
+ const systemMessage = new messages.SystemMessage(finalInstructions);
206
+ return runnables.RunnableLambda.from((messages) => {
207
+ return [systemMessage, ...messages];
208
+ }).withConfig({ runName: 'prompt' });
209
+ }
211
210
  }
212
- initializeTools() {
213
- // return new ToolNode<t.BaseGraphState>(this.tools);
211
+ initializeTools({ currentTools, currentToolMap, }) {
214
212
  return new ToolNode.ToolNode({
215
- tools: this.tools || [],
216
- toolMap: this.toolMap,
213
+ tools: currentTools ?? [],
214
+ toolMap: currentToolMap,
217
215
  toolCallStepIds: this.toolCallStepIds,
218
216
  errorHandler: (data, metadata) => StandardGraph.handleToolCallErrorStatic(this, data, metadata),
219
217
  });
220
218
  }
221
- initializeModel() {
222
- const ChatModelClass = providers.getChatModelClass(this.provider);
223
- const model = new ChatModelClass(this.clientOptions);
224
- if (llm.isOpenAILike(this.provider) &&
219
+ initializeModel({ provider, tools, clientOptions, }) {
220
+ const ChatModelClass = providers.getChatModelClass(provider);
221
+ const model = new ChatModelClass(clientOptions ?? {});
222
+ if (llm.isOpenAILike(provider) &&
225
223
  (model instanceof index.ChatOpenAI || model instanceof index.AzureChatOpenAI)) {
226
- model.temperature = this.clientOptions
224
+ model.temperature = clientOptions
227
225
  .temperature;
228
- model.topP = this.clientOptions.topP;
229
- model.frequencyPenalty = this.clientOptions
226
+ model.topP = clientOptions.topP;
227
+ model.frequencyPenalty = clientOptions
230
228
  .frequencyPenalty;
231
- model.presencePenalty = this.clientOptions
229
+ model.presencePenalty = clientOptions
232
230
  .presencePenalty;
233
- model.n = this.clientOptions.n;
231
+ model.n = clientOptions.n;
234
232
  }
235
- else if (this.provider === _enum.Providers.VERTEXAI &&
233
+ else if (provider === _enum.Providers.VERTEXAI &&
236
234
  model instanceof googleVertexai.ChatVertexAI) {
237
- model.temperature = this.clientOptions
235
+ model.temperature = clientOptions
238
236
  .temperature;
239
- model.topP = this.clientOptions
240
- .topP;
241
- model.topK = this.clientOptions
242
- .topK;
243
- model.topLogprobs = this.clientOptions
237
+ model.topP = clientOptions.topP;
238
+ model.topK = clientOptions.topK;
239
+ model.topLogprobs = clientOptions
244
240
  .topLogprobs;
245
- model.frequencyPenalty = this.clientOptions
241
+ model.frequencyPenalty = clientOptions
246
242
  .frequencyPenalty;
247
- model.presencePenalty = this.clientOptions
243
+ model.presencePenalty = clientOptions
248
244
  .presencePenalty;
249
- model.maxOutputTokens = this.clientOptions
245
+ model.maxOutputTokens = clientOptions
250
246
  .maxOutputTokens;
251
247
  }
252
- if (!this.tools || this.tools.length === 0) {
248
+ if (!tools || tools.length === 0) {
253
249
  return model;
254
250
  }
255
- return model.bindTools(this.tools);
251
+ return model.bindTools(tools);
256
252
  }
257
253
  overrideTestModel(responses, sleep, toolCalls) {
258
- this.boundModel = fake.createFakeStreamingLLM({
254
+ this.overrideModel = fake.createFakeStreamingLLM({
259
255
  responses,
260
256
  sleep,
261
257
  toolCalls,
262
258
  });
263
259
  }
264
- getNewModel({ clientOptions = {}, omitOriginalOptions, }) {
265
- const ChatModelClass = providers.getChatModelClass(this.provider);
266
- const _options = omitOriginalOptions
267
- ? Object.fromEntries(Object.entries(this.clientOptions).filter(([key]) => !omitOriginalOptions.has(key)))
268
- : this.clientOptions;
269
- const options = Object.assign(_options, clientOptions);
270
- return new ChatModelClass(options);
260
+ getNewModel({ provider, clientOptions, }) {
261
+ const ChatModelClass = providers.getChatModelClass(provider);
262
+ return new ChatModelClass(clientOptions ?? {});
271
263
  }
272
- storeUsageMetadata(finalMessage) {
264
+ getUsageMetadata(finalMessage) {
273
265
  if (finalMessage &&
274
266
  'usage_metadata' in finalMessage &&
275
267
  finalMessage.usage_metadata != null) {
276
- this.currentUsage = finalMessage.usage_metadata;
268
+ return finalMessage.usage_metadata;
269
+ }
270
+ }
271
+ /** Execute model invocation with streaming support */
272
+ async attemptInvoke({ currentModel, finalMessages, provider, tools, }, config) {
273
+ const model = this.overrideModel ?? currentModel;
274
+ if (!model) {
275
+ throw new Error('No model found');
276
+ }
277
+ if ((tools?.length ?? 0) > 0 && providers.manualToolStreamProviders.has(provider)) {
278
+ if (!model.stream) {
279
+ throw new Error('Model does not support stream');
280
+ }
281
+ const stream$1 = await model.stream(finalMessages, config);
282
+ let finalChunk;
283
+ for await (const chunk of stream$1) {
284
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.CHAT_MODEL_STREAM, { chunk, emitted: true }, config);
285
+ finalChunk = finalChunk ? stream.concat(finalChunk, chunk) : chunk;
286
+ }
287
+ finalChunk = core.modifyDeltaProperties(provider, finalChunk);
288
+ return { messages: [finalChunk] };
289
+ }
290
+ else {
291
+ const finalMessage = await model.invoke(finalMessages, config);
292
+ if ((finalMessage.tool_calls?.length ?? 0) > 0) {
293
+ finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => !!tool_call.name);
294
+ }
295
+ return { messages: [finalMessage] };
277
296
  }
278
297
  }
279
- cleanupSignalListener() {
298
+ cleanupSignalListener(currentModel) {
280
299
  if (!this.signal) {
281
300
  return;
282
301
  }
283
- if (!this.boundModel) {
302
+ const model = this.overrideModel ?? currentModel;
303
+ if (!model) {
284
304
  return;
285
305
  }
286
- const client = this.boundModel?.exposedClient;
306
+ const client = model?.exposedClient;
287
307
  if (!client?.abortHandler) {
288
308
  return;
289
309
  }
290
310
  this.signal.removeEventListener('abort', client.abortHandler);
291
311
  client.abortHandler = undefined;
292
312
  }
293
- createCallModel() {
313
+ createCallModel(agentId = 'default', currentModel) {
294
314
  return async (state, config) => {
295
- const { provider = '' } = config?.configurable ?? {};
296
- if (this.boundModel == null) {
315
+ /**
316
+ * Get agent context - it must exist by this point
317
+ */
318
+ const agentContext = this.agentContexts.get(agentId);
319
+ if (!agentContext) {
320
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
321
+ }
322
+ const model = this.overrideModel ?? currentModel;
323
+ if (!model) {
297
324
  throw new Error('No Graph model found');
298
325
  }
299
- if (!config || !provider) {
300
- throw new Error(`No ${config ? 'provider' : 'config'} provided`);
326
+ if (!config) {
327
+ throw new Error('No config provided');
328
+ }
329
+ // Ensure token calculations are complete before proceeding
330
+ if (agentContext.tokenCalculationPromise) {
331
+ await agentContext.tokenCalculationPromise;
301
332
  }
302
333
  if (!config.signal) {
303
334
  config.signal = this.signal;
@@ -305,32 +336,32 @@ class StandardGraph extends Graph {
305
336
  this.config = config;
306
337
  const { messages: messages$1 } = state;
307
338
  let messagesToUse = messages$1;
308
- if (!this.pruneMessages &&
309
- this.tokenCounter &&
310
- this.maxContextTokens != null &&
311
- this.indexTokenCountMap[0] != null) {
312
- const isAnthropicWithThinking = (this.provider === _enum.Providers.ANTHROPIC &&
313
- this.clientOptions.thinking !=
339
+ if (!agentContext.pruneMessages &&
340
+ agentContext.tokenCounter &&
341
+ agentContext.maxContextTokens != null &&
342
+ agentContext.indexTokenCountMap[0] != null) {
343
+ const isAnthropicWithThinking = (agentContext.provider === _enum.Providers.ANTHROPIC &&
344
+ agentContext.clientOptions.thinking !=
314
345
  null) ||
315
- (this.provider === _enum.Providers.BEDROCK &&
316
- this.clientOptions
346
+ (agentContext.provider === _enum.Providers.BEDROCK &&
347
+ agentContext.clientOptions
317
348
  .additionalModelRequestFields?.['thinking'] != null);
318
- this.pruneMessages = prune.createPruneMessages({
319
- provider: this.provider,
320
- indexTokenCountMap: this.indexTokenCountMap,
321
- maxTokens: this.maxContextTokens,
322
- tokenCounter: this.tokenCounter,
349
+ agentContext.pruneMessages = prune.createPruneMessages({
323
350
  startIndex: this.startIndex,
351
+ provider: agentContext.provider,
352
+ tokenCounter: agentContext.tokenCounter,
353
+ maxTokens: agentContext.maxContextTokens,
324
354
  thinkingEnabled: isAnthropicWithThinking,
355
+ indexTokenCountMap: agentContext.indexTokenCountMap,
325
356
  });
326
357
  }
327
- if (this.pruneMessages) {
328
- const { context, indexTokenCountMap } = this.pruneMessages({
358
+ if (agentContext.pruneMessages) {
359
+ const { context, indexTokenCountMap } = agentContext.pruneMessages({
329
360
  messages: messages$1,
330
- usageMetadata: this.currentUsage,
361
+ usageMetadata: agentContext.currentUsage,
331
362
  // startOnMessageType: 'human',
332
363
  });
333
- this.indexTokenCountMap = indexTokenCountMap;
364
+ agentContext.indexTokenCountMap = indexTokenCountMap;
334
365
  messagesToUse = context;
335
366
  }
336
367
  const finalMessages = messagesToUse;
@@ -340,87 +371,145 @@ class StandardGraph extends Graph {
340
371
  const lastMessageY = finalMessages.length >= 1
341
372
  ? finalMessages[finalMessages.length - 1]
342
373
  : null;
343
- if (provider === _enum.Providers.BEDROCK &&
374
+ if (agentContext.provider === _enum.Providers.BEDROCK &&
344
375
  lastMessageX instanceof messages.AIMessageChunk &&
345
376
  lastMessageY instanceof messages.ToolMessage &&
346
377
  typeof lastMessageX.content === 'string') {
347
378
  finalMessages[finalMessages.length - 2].content = '';
348
379
  }
349
380
  const isLatestToolMessage = lastMessageY instanceof messages.ToolMessage;
350
- if (isLatestToolMessage && provider === _enum.Providers.ANTHROPIC) {
381
+ if (isLatestToolMessage &&
382
+ agentContext.provider === _enum.Providers.ANTHROPIC) {
351
383
  core.formatAnthropicArtifactContent(finalMessages);
352
384
  }
353
385
  else if (isLatestToolMessage &&
354
- (llm.isOpenAILike(provider) || llm.isGoogleLike(provider))) {
386
+ (llm.isOpenAILike(agentContext.provider) ||
387
+ llm.isGoogleLike(agentContext.provider))) {
355
388
  core.formatArtifactPayload(finalMessages);
356
389
  }
357
- if (this.lastStreamCall != null && this.streamBuffer != null) {
358
- const timeSinceLastCall = Date.now() - this.lastStreamCall;
359
- if (timeSinceLastCall < this.streamBuffer) {
360
- const timeToWait = Math.ceil((this.streamBuffer - timeSinceLastCall) / 1000) * 1000;
390
+ if (agentContext.lastStreamCall != null &&
391
+ agentContext.streamBuffer != null) {
392
+ const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
393
+ if (timeSinceLastCall < agentContext.streamBuffer) {
394
+ const timeToWait = Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
395
+ 1000;
361
396
  await run.sleep(timeToWait);
362
397
  }
363
398
  }
364
- this.lastStreamCall = Date.now();
399
+ agentContext.lastStreamCall = Date.now();
365
400
  let result;
366
- if ((this.tools?.length ?? 0) > 0 &&
367
- providers.manualToolStreamProviders.has(provider)) {
368
- const stream$1 = await this.boundModel.stream(finalMessages, config);
369
- let finalChunk;
370
- for await (const chunk of stream$1) {
371
- dispatch.dispatchCustomEvent(_enum.GraphEvents.CHAT_MODEL_STREAM, { chunk }, config);
372
- if (!finalChunk) {
373
- finalChunk = chunk;
401
+ const fallbacks = agentContext.clientOptions?.fallbacks ??
402
+ [];
403
+ try {
404
+ result = await this.attemptInvoke({
405
+ currentModel: model,
406
+ finalMessages,
407
+ provider: agentContext.provider,
408
+ tools: agentContext.tools,
409
+ }, config);
410
+ }
411
+ catch (primaryError) {
412
+ let lastError = primaryError;
413
+ for (const fb of fallbacks) {
414
+ try {
415
+ let model = this.getNewModel({
416
+ provider: fb.provider,
417
+ clientOptions: fb.clientOptions,
418
+ });
419
+ const bindableTools = agentContext.tools;
420
+ model = (!bindableTools || bindableTools.length === 0
421
+ ? model
422
+ : model.bindTools(bindableTools));
423
+ result = await this.attemptInvoke({
424
+ currentModel: model,
425
+ finalMessages,
426
+ provider: fb.provider,
427
+ tools: agentContext.tools,
428
+ }, config);
429
+ lastError = undefined;
430
+ break;
374
431
  }
375
- else {
376
- finalChunk = stream.concat(finalChunk, chunk);
432
+ catch (e) {
433
+ lastError = e;
434
+ continue;
377
435
  }
378
436
  }
379
- finalChunk = core.modifyDeltaProperties(this.provider, finalChunk);
380
- result = { messages: [finalChunk] };
381
- }
382
- else {
383
- const finalMessage = (await this.boundModel.invoke(finalMessages, config));
384
- if ((finalMessage.tool_calls?.length ?? 0) > 0) {
385
- finalMessage.tool_calls = finalMessage.tool_calls?.filter((tool_call) => {
386
- if (!tool_call.name) {
387
- return false;
388
- }
389
- return true;
390
- });
437
+ if (lastError !== undefined) {
438
+ throw lastError;
391
439
  }
392
- result = { messages: [finalMessage] };
393
440
  }
394
- this.storeUsageMetadata(result.messages?.[0]);
441
+ if (!result) {
442
+ throw new Error('No result after model invocation');
443
+ }
444
+ agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
395
445
  this.cleanupSignalListener();
396
446
  return result;
397
447
  };
398
448
  }
399
- createWorkflow() {
449
+ createAgentNode(agentId) {
450
+ const agentContext = this.agentContexts.get(agentId);
451
+ if (!agentContext) {
452
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
453
+ }
454
+ let currentModel = this.initializeModel({
455
+ tools: agentContext.tools,
456
+ provider: agentContext.provider,
457
+ clientOptions: agentContext.clientOptions,
458
+ });
459
+ if (agentContext.systemRunnable) {
460
+ currentModel = agentContext.systemRunnable.pipe(currentModel);
461
+ }
462
+ const agentNode = `${AGENT}${agentId}`;
463
+ const toolNode = `${TOOLS}${agentId}`;
400
464
  const routeMessage = (state, config) => {
401
465
  this.config = config;
402
- // const lastMessage = state.messages[state.messages.length - 1] as AIMessage;
403
- // if (!lastMessage?.tool_calls?.length) {
404
- // return END;
405
- // }
406
- // return TOOLS;
407
- return ToolNode.toolsCondition(state);
466
+ return ToolNode.toolsCondition(state, toolNode, this.invokedToolIds);
408
467
  };
409
- const workflow = new langgraph.StateGraph({
410
- channels: this.graphState,
411
- })
412
- .addNode(AGENT, this.createCallModel())
413
- .addNode(TOOLS, this.initializeTools())
414
- .addEdge(langgraph.START, AGENT)
415
- .addConditionalEdges(AGENT, routeMessage)
416
- .addEdge(TOOLS, this.toolEnd ? langgraph.END : AGENT);
417
- return workflow.compile();
468
+ const StateAnnotation = langgraph.Annotation.Root({
469
+ messages: langgraph.Annotation({
470
+ reducer: langgraph.messagesStateReducer,
471
+ default: () => [],
472
+ }),
473
+ });
474
+ const workflow = new langgraph.StateGraph(StateAnnotation)
475
+ .addNode(agentNode, this.createCallModel(agentId, currentModel))
476
+ .addNode(toolNode, this.initializeTools({
477
+ currentTools: agentContext.tools,
478
+ currentToolMap: agentContext.toolMap,
479
+ }))
480
+ .addEdge(langgraph.START, agentNode)
481
+ .addConditionalEdges(agentNode, routeMessage)
482
+ .addEdge(toolNode, agentContext.toolEnd ? langgraph.END : agentNode);
483
+ // Cast to unknown to avoid tight coupling to external types; options are opt-in
484
+ return workflow.compile(this.compileOptions);
485
+ }
486
+ createWorkflow() {
487
+ /** Use the default (first) agent for now */
488
+ const agentNode = this.createAgentNode(this.defaultAgentId);
489
+ const StateAnnotation = langgraph.Annotation.Root({
490
+ messages: langgraph.Annotation({
491
+ reducer: (a, b) => {
492
+ if (!a.length) {
493
+ this.startIndex = a.length + b.length;
494
+ }
495
+ const result = langgraph.messagesStateReducer(a, b);
496
+ this.messages = result;
497
+ return result;
498
+ },
499
+ default: () => [],
500
+ }),
501
+ });
502
+ const workflow = new langgraph.StateGraph(StateAnnotation)
503
+ .addNode(this.defaultAgentId, agentNode, { ends: [langgraph.END] })
504
+ .addEdge(langgraph.START, this.defaultAgentId)
505
+ .compile();
506
+ return workflow;
418
507
  }
419
508
  /* Dispatchers */
420
509
  /**
421
510
  * Dispatches a run step to the client, returns the step ID
422
511
  */
423
- dispatchRunStep(stepKey, stepDetails) {
512
+ async dispatchRunStep(stepKey, stepDetails) {
424
513
  if (!this.config) {
425
514
  throw new Error('No config provided');
426
515
  }
@@ -448,17 +537,21 @@ class StandardGraph extends Graph {
448
537
  }
449
538
  this.contentData.push(runStep);
450
539
  this.contentIndexMap.set(stepId, runStep.index);
451
- dispatch.dispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP, runStep, this.config);
540
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP, runStep, this.config);
452
541
  return stepId;
453
542
  }
454
- handleToolCallCompleted(data, metadata) {
543
+ async handleToolCallCompleted(data, metadata, omitOutput) {
455
544
  if (!this.config) {
456
545
  throw new Error('No config provided');
457
546
  }
458
547
  if (!data.output) {
459
548
  return;
460
549
  }
461
- const { input, output } = data;
550
+ const { input, output: _output } = data;
551
+ if (_output?.lg_name === 'Command') {
552
+ return;
553
+ }
554
+ const output = _output;
462
555
  const { tool_call_id } = output;
463
556
  const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
464
557
  if (!stepId) {
@@ -468,17 +561,20 @@ class StandardGraph extends Graph {
468
561
  if (!runStep) {
469
562
  throw new Error(`No run step found for stepId ${stepId}`);
470
563
  }
564
+ const dispatchedOutput = typeof output.content === 'string'
565
+ ? output.content
566
+ : JSON.stringify(output.content);
471
567
  const args = typeof input === 'string' ? input : input.input;
472
568
  const tool_call = {
473
569
  args: typeof args === 'string' ? args : JSON.stringify(args),
474
570
  name: output.name ?? '',
475
571
  id: output.tool_call_id,
476
- output: typeof output.content === 'string'
477
- ? output.content
478
- : JSON.stringify(output.content),
572
+ output: omitOutput === true ? '' : dispatchedOutput,
479
573
  progress: 1,
480
574
  };
481
- this.handlerRegistry?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
575
+ await this.handlerRegistry
576
+ ?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
577
+ ?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
482
578
  result: {
483
579
  id: stepId,
484
580
  index: runStep.index,
@@ -491,7 +587,7 @@ class StandardGraph extends Graph {
491
587
  * Static version of handleToolCallError to avoid creating strong references
492
588
  * that prevent garbage collection
493
589
  */
494
- static handleToolCallErrorStatic(graph, data, metadata) {
590
+ static async handleToolCallErrorStatic(graph, data, metadata) {
495
591
  if (!graph.config) {
496
592
  throw new Error('No config provided');
497
593
  }
@@ -515,7 +611,7 @@ class StandardGraph extends Graph {
515
611
  output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
516
612
  progress: 1,
517
613
  };
518
- graph.handlerRegistry
614
+ await graph.handlerRegistry
519
615
  ?.getHandler(_enum.GraphEvents.ON_RUN_STEP_COMPLETED)
520
616
  ?.handle(_enum.GraphEvents.ON_RUN_STEP_COMPLETED, {
521
617
  result: {
@@ -530,10 +626,10 @@ class StandardGraph extends Graph {
530
626
  * Instance method that delegates to the static method
531
627
  * Kept for backward compatibility
532
628
  */
533
- handleToolCallError(data, metadata) {
534
- StandardGraph.handleToolCallErrorStatic(this, data, metadata);
629
+ async handleToolCallError(data, metadata) {
630
+ await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
535
631
  }
536
- dispatchRunStepDelta(id, delta) {
632
+ async dispatchRunStepDelta(id, delta) {
537
633
  if (!this.config) {
538
634
  throw new Error('No config provided');
539
635
  }
@@ -544,9 +640,9 @@ class StandardGraph extends Graph {
544
640
  id,
545
641
  delta,
546
642
  };
547
- dispatch.dispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
643
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_RUN_STEP_DELTA, runStepDelta, this.config);
548
644
  }
549
- dispatchMessageDelta(id, delta) {
645
+ async dispatchMessageDelta(id, delta) {
550
646
  if (!this.config) {
551
647
  throw new Error('No config provided');
552
648
  }
@@ -554,9 +650,9 @@ class StandardGraph extends Graph {
554
650
  id,
555
651
  delta,
556
652
  };
557
- dispatch.dispatchCustomEvent(_enum.GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
653
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_MESSAGE_DELTA, messageDelta, this.config);
558
654
  }
559
- dispatchReasoningDelta = (stepId, delta) => {
655
+ dispatchReasoningDelta = async (stepId, delta) => {
560
656
  if (!this.config) {
561
657
  throw new Error('No config provided');
562
658
  }
@@ -564,7 +660,7 @@ class StandardGraph extends Graph {
564
660
  id: stepId,
565
661
  delta,
566
662
  };
567
- dispatch.dispatchCustomEvent(_enum.GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
663
+ await events.safeDispatchCustomEvent(_enum.GraphEvents.ON_REASONING_DELTA, reasoningDelta, this.config);
568
664
  };
569
665
  }
570
666