illuma-agents 1.0.2 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (225) hide show
  1. package/LICENSE +25 -21
  2. package/dist/cjs/agents/AgentContext.cjs +222 -0
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -0
  4. package/dist/cjs/common/enum.cjs +5 -4
  5. package/dist/cjs/common/enum.cjs.map +1 -1
  6. package/dist/cjs/events.cjs +7 -5
  7. package/dist/cjs/events.cjs.map +1 -1
  8. package/dist/cjs/graphs/Graph.cjs +328 -207
  9. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  10. package/dist/cjs/graphs/MultiAgentGraph.cjs +507 -0
  11. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -0
  12. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  13. package/dist/cjs/llm/google/index.cjs.map +1 -1
  14. package/dist/cjs/llm/ollama/index.cjs.map +1 -1
  15. package/dist/cjs/llm/openai/index.cjs +35 -0
  16. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  17. package/dist/cjs/llm/openai/utils/index.cjs +3 -1
  18. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  19. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  20. package/dist/cjs/llm/providers.cjs +0 -2
  21. package/dist/cjs/llm/providers.cjs.map +1 -1
  22. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  23. package/dist/cjs/main.cjs +12 -1
  24. package/dist/cjs/main.cjs.map +1 -1
  25. package/dist/cjs/messages/cache.cjs +123 -0
  26. package/dist/cjs/messages/cache.cjs.map +1 -0
  27. package/dist/cjs/messages/content.cjs +53 -0
  28. package/dist/cjs/messages/content.cjs.map +1 -0
  29. package/dist/cjs/messages/format.cjs +17 -29
  30. package/dist/cjs/messages/format.cjs.map +1 -1
  31. package/dist/cjs/run.cjs +119 -74
  32. package/dist/cjs/run.cjs.map +1 -1
  33. package/dist/cjs/stream.cjs +77 -73
  34. package/dist/cjs/stream.cjs.map +1 -1
  35. package/dist/cjs/tools/Calculator.cjs +45 -0
  36. package/dist/cjs/tools/Calculator.cjs.map +1 -0
  37. package/dist/cjs/tools/CodeExecutor.cjs +22 -22
  38. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  39. package/dist/cjs/tools/ToolNode.cjs +5 -3
  40. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  41. package/dist/cjs/tools/handlers.cjs +20 -20
  42. package/dist/cjs/tools/handlers.cjs.map +1 -1
  43. package/dist/cjs/utils/events.cjs +31 -0
  44. package/dist/cjs/utils/events.cjs.map +1 -0
  45. package/dist/cjs/utils/handlers.cjs +70 -0
  46. package/dist/cjs/utils/handlers.cjs.map +1 -0
  47. package/dist/cjs/utils/tokens.cjs +54 -7
  48. package/dist/cjs/utils/tokens.cjs.map +1 -1
  49. package/dist/esm/agents/AgentContext.mjs +220 -0
  50. package/dist/esm/agents/AgentContext.mjs.map +1 -0
  51. package/dist/esm/common/enum.mjs +5 -4
  52. package/dist/esm/common/enum.mjs.map +1 -1
  53. package/dist/esm/events.mjs +7 -5
  54. package/dist/esm/events.mjs.map +1 -1
  55. package/dist/esm/graphs/Graph.mjs +330 -209
  56. package/dist/esm/graphs/Graph.mjs.map +1 -1
  57. package/dist/esm/graphs/MultiAgentGraph.mjs +505 -0
  58. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -0
  59. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  60. package/dist/esm/llm/google/index.mjs.map +1 -1
  61. package/dist/esm/llm/ollama/index.mjs.map +1 -1
  62. package/dist/esm/llm/openai/index.mjs +35 -0
  63. package/dist/esm/llm/openai/index.mjs.map +1 -1
  64. package/dist/esm/llm/openai/utils/index.mjs +3 -1
  65. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  66. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  67. package/dist/esm/llm/providers.mjs +0 -2
  68. package/dist/esm/llm/providers.mjs.map +1 -1
  69. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  70. package/dist/esm/main.mjs +7 -2
  71. package/dist/esm/main.mjs.map +1 -1
  72. package/dist/esm/messages/cache.mjs +120 -0
  73. package/dist/esm/messages/cache.mjs.map +1 -0
  74. package/dist/esm/messages/content.mjs +51 -0
  75. package/dist/esm/messages/content.mjs.map +1 -0
  76. package/dist/esm/messages/format.mjs +18 -29
  77. package/dist/esm/messages/format.mjs.map +1 -1
  78. package/dist/esm/run.mjs +119 -74
  79. package/dist/esm/run.mjs.map +1 -1
  80. package/dist/esm/stream.mjs +77 -73
  81. package/dist/esm/stream.mjs.map +1 -1
  82. package/dist/esm/tools/Calculator.mjs +24 -0
  83. package/dist/esm/tools/Calculator.mjs.map +1 -0
  84. package/dist/esm/tools/CodeExecutor.mjs +22 -22
  85. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  86. package/dist/esm/tools/ToolNode.mjs +5 -3
  87. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  88. package/dist/esm/tools/handlers.mjs +20 -20
  89. package/dist/esm/tools/handlers.mjs.map +1 -1
  90. package/dist/esm/utils/events.mjs +29 -0
  91. package/dist/esm/utils/events.mjs.map +1 -0
  92. package/dist/esm/utils/handlers.mjs +68 -0
  93. package/dist/esm/utils/handlers.mjs.map +1 -0
  94. package/dist/esm/utils/tokens.mjs +54 -8
  95. package/dist/esm/utils/tokens.mjs.map +1 -1
  96. package/dist/types/agents/AgentContext.d.ts +94 -0
  97. package/dist/types/common/enum.d.ts +7 -5
  98. package/dist/types/events.d.ts +3 -3
  99. package/dist/types/graphs/Graph.d.ts +60 -66
  100. package/dist/types/graphs/MultiAgentGraph.d.ts +47 -0
  101. package/dist/types/graphs/index.d.ts +1 -0
  102. package/dist/types/index.d.ts +1 -0
  103. package/dist/types/llm/openai/index.d.ts +10 -0
  104. package/dist/types/messages/cache.d.ts +20 -0
  105. package/dist/types/messages/content.d.ts +7 -0
  106. package/dist/types/messages/format.d.ts +1 -7
  107. package/dist/types/messages/index.d.ts +2 -0
  108. package/dist/types/messages/reducer.d.ts +9 -0
  109. package/dist/types/run.d.ts +16 -10
  110. package/dist/types/stream.d.ts +4 -3
  111. package/dist/types/tools/Calculator.d.ts +8 -0
  112. package/dist/types/tools/ToolNode.d.ts +1 -1
  113. package/dist/types/tools/handlers.d.ts +9 -7
  114. package/dist/types/tools/search/tool.d.ts +4 -4
  115. package/dist/types/types/graph.d.ts +124 -11
  116. package/dist/types/types/llm.d.ts +13 -9
  117. package/dist/types/types/messages.d.ts +4 -0
  118. package/dist/types/types/run.d.ts +46 -8
  119. package/dist/types/types/stream.d.ts +3 -2
  120. package/dist/types/utils/events.d.ts +6 -0
  121. package/dist/types/utils/handlers.d.ts +34 -0
  122. package/dist/types/utils/index.d.ts +1 -0
  123. package/dist/types/utils/tokens.d.ts +24 -0
  124. package/package.json +162 -145
  125. package/src/agents/AgentContext.ts +323 -0
  126. package/src/common/enum.ts +177 -176
  127. package/src/events.ts +197 -191
  128. package/src/graphs/Graph.ts +1058 -846
  129. package/src/graphs/MultiAgentGraph.ts +598 -0
  130. package/src/graphs/index.ts +2 -1
  131. package/src/index.ts +25 -24
  132. package/src/llm/anthropic/index.ts +413 -413
  133. package/src/llm/google/index.ts +222 -222
  134. package/src/llm/google/utils/zod_to_genai_parameters.ts +86 -88
  135. package/src/llm/ollama/index.ts +92 -92
  136. package/src/llm/openai/index.ts +894 -853
  137. package/src/llm/openai/utils/index.ts +920 -918
  138. package/src/llm/openrouter/index.ts +60 -60
  139. package/src/llm/providers.ts +55 -57
  140. package/src/llm/vertexai/index.ts +360 -360
  141. package/src/messages/cache.test.ts +461 -0
  142. package/src/messages/cache.ts +151 -0
  143. package/src/messages/content.test.ts +362 -0
  144. package/src/messages/content.ts +63 -0
  145. package/src/messages/format.ts +611 -625
  146. package/src/messages/formatAgentMessages.test.ts +1144 -917
  147. package/src/messages/index.ts +6 -4
  148. package/src/messages/reducer.ts +80 -0
  149. package/src/run.ts +447 -381
  150. package/src/scripts/abort.ts +157 -138
  151. package/src/scripts/ant_web_search.ts +158 -158
  152. package/src/scripts/cli.ts +172 -167
  153. package/src/scripts/cli2.ts +133 -125
  154. package/src/scripts/cli3.ts +184 -178
  155. package/src/scripts/cli4.ts +191 -184
  156. package/src/scripts/cli5.ts +191 -184
  157. package/src/scripts/code_exec.ts +213 -214
  158. package/src/scripts/code_exec_simple.ts +147 -129
  159. package/src/scripts/content.ts +138 -120
  160. package/src/scripts/handoff-test.ts +135 -0
  161. package/src/scripts/multi-agent-chain.ts +278 -0
  162. package/src/scripts/multi-agent-conditional.ts +220 -0
  163. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  164. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  165. package/src/scripts/multi-agent-parallel.ts +343 -0
  166. package/src/scripts/multi-agent-sequence.ts +212 -0
  167. package/src/scripts/multi-agent-supervisor.ts +364 -0
  168. package/src/scripts/multi-agent-test.ts +186 -0
  169. package/src/scripts/search.ts +146 -150
  170. package/src/scripts/simple.ts +225 -225
  171. package/src/scripts/stream.ts +140 -122
  172. package/src/scripts/test-custom-prompt-key.ts +145 -0
  173. package/src/scripts/test-handoff-input.ts +170 -0
  174. package/src/scripts/test-multi-agent-list-handoff.ts +261 -0
  175. package/src/scripts/test-tools-before-handoff.ts +222 -0
  176. package/src/scripts/tools.ts +153 -155
  177. package/src/specs/agent-handoffs.test.ts +889 -0
  178. package/src/specs/anthropic.simple.test.ts +320 -317
  179. package/src/specs/azure.simple.test.ts +325 -316
  180. package/src/specs/openai.simple.test.ts +311 -316
  181. package/src/specs/openrouter.simple.test.ts +107 -0
  182. package/src/specs/prune.test.ts +758 -763
  183. package/src/specs/reasoning.test.ts +201 -165
  184. package/src/specs/thinking-prune.test.ts +769 -703
  185. package/src/specs/token-memoization.test.ts +39 -0
  186. package/src/stream.ts +664 -651
  187. package/src/tools/Calculator.test.ts +278 -0
  188. package/src/tools/Calculator.ts +25 -0
  189. package/src/tools/CodeExecutor.ts +220 -220
  190. package/src/tools/ToolNode.ts +170 -170
  191. package/src/tools/handlers.ts +341 -336
  192. package/src/types/graph.ts +372 -185
  193. package/src/types/llm.ts +141 -140
  194. package/src/types/messages.ts +4 -0
  195. package/src/types/run.ts +128 -89
  196. package/src/types/stream.ts +401 -400
  197. package/src/utils/events.ts +32 -0
  198. package/src/utils/handlers.ts +107 -0
  199. package/src/utils/index.ts +6 -5
  200. package/src/utils/llmConfig.ts +183 -183
  201. package/src/utils/tokens.ts +129 -70
  202. package/dist/types/scripts/abort.d.ts +0 -1
  203. package/dist/types/scripts/ant_web_search.d.ts +0 -1
  204. package/dist/types/scripts/args.d.ts +0 -7
  205. package/dist/types/scripts/caching.d.ts +0 -1
  206. package/dist/types/scripts/cli.d.ts +0 -1
  207. package/dist/types/scripts/cli2.d.ts +0 -1
  208. package/dist/types/scripts/cli3.d.ts +0 -1
  209. package/dist/types/scripts/cli4.d.ts +0 -1
  210. package/dist/types/scripts/cli5.d.ts +0 -1
  211. package/dist/types/scripts/code_exec.d.ts +0 -1
  212. package/dist/types/scripts/code_exec_files.d.ts +0 -1
  213. package/dist/types/scripts/code_exec_simple.d.ts +0 -1
  214. package/dist/types/scripts/content.d.ts +0 -1
  215. package/dist/types/scripts/empty_input.d.ts +0 -1
  216. package/dist/types/scripts/image.d.ts +0 -1
  217. package/dist/types/scripts/memory.d.ts +0 -1
  218. package/dist/types/scripts/search.d.ts +0 -1
  219. package/dist/types/scripts/simple.d.ts +0 -1
  220. package/dist/types/scripts/stream.d.ts +0 -1
  221. package/dist/types/scripts/thinking.d.ts +0 -1
  222. package/dist/types/scripts/tools.d.ts +0 -1
  223. package/dist/types/specs/spec.utils.d.ts +0 -1
  224. package/dist/types/tools/example.d.ts +0 -78
  225. package/src/tools/example.ts +0 -129
@@ -1,846 +1,1058 @@
1
- /* eslint-disable no-console */
2
- // src/graphs/Graph.ts
3
- import { nanoid } from 'nanoid';
4
- import { concat } from '@langchain/core/utils/stream';
5
- import { ToolNode } from '@langchain/langgraph/prebuilt';
6
- import { ChatVertexAI } from '@langchain/google-vertexai';
7
- import { START, END, StateGraph } from '@langchain/langgraph';
8
- import { Runnable, RunnableConfig } from '@langchain/core/runnables';
9
- import { dispatchCustomEvent } from '@langchain/core/callbacks/dispatch';
10
- import {
11
- AIMessageChunk,
12
- ToolMessage,
13
- SystemMessage,
14
- } from '@langchain/core/messages';
15
- import type {
16
- BaseMessage,
17
- BaseMessageFields,
18
- UsageMetadata,
19
- } from '@langchain/core/messages';
20
- import type * as t from '@/types';
21
- import {
22
- Providers,
23
- GraphEvents,
24
- GraphNodeKeys,
25
- StepTypes,
26
- Callback,
27
- ContentTypes,
28
- } from '@/common';
29
- import type { ToolCall } from '@langchain/core/messages/tool';
30
- import { getChatModelClass, manualToolStreamProviders } from '@/llm/providers';
31
- import { ToolNode as CustomToolNode, toolsCondition } from '@/tools/ToolNode';
32
- import {
33
- createPruneMessages,
34
- modifyDeltaProperties,
35
- formatArtifactPayload,
36
- convertMessagesToContent,
37
- formatAnthropicArtifactContent,
38
- } from '@/messages';
39
- import {
40
- resetIfNotEmpty,
41
- isOpenAILike,
42
- isGoogleLike,
43
- joinKeys,
44
- sleep,
45
- } from '@/utils';
46
- import { ChatOpenAI, AzureChatOpenAI } from '@/llm/openai';
47
- import { createFakeStreamingLLM } from '@/llm/fake';
48
- import { HandlerRegistry } from '@/events';
49
-
50
- const { AGENT, TOOLS } = GraphNodeKeys;
51
- export type GraphNode = GraphNodeKeys | typeof START;
52
- export type ClientCallback<T extends unknown[]> = (
53
- graph: StandardGraph,
54
- ...args: T
55
- ) => void;
56
- export type ClientCallbacks = {
57
- [Callback.TOOL_ERROR]?: ClientCallback<[Error, string]>;
58
- [Callback.TOOL_START]?: ClientCallback<unknown[]>;
59
- [Callback.TOOL_END]?: ClientCallback<unknown[]>;
60
- };
61
- export type SystemCallbacks = {
62
- [K in keyof ClientCallbacks]: ClientCallbacks[K] extends ClientCallback<
63
- infer Args
64
- >
65
- ? (...args: Args) => void
66
- : never;
67
- };
68
-
69
- export abstract class Graph<
70
- T extends t.BaseGraphState = t.BaseGraphState,
71
- // eslint-disable-next-line @typescript-eslint/no-unused-vars
72
- TNodeName extends string = string,
73
- > {
74
- abstract resetValues(): void;
75
- abstract createGraphState(): t.GraphStateChannels<T>;
76
- abstract initializeTools(): CustomToolNode<T> | ToolNode<T>;
77
- abstract initializeModel(): Runnable;
78
- abstract getRunMessages(): BaseMessage[] | undefined;
79
- abstract getContentParts(): t.MessageContentComplex[] | undefined;
80
- abstract generateStepId(stepKey: string): [string, number];
81
- abstract getKeyList(
82
- metadata: Record<string, unknown> | undefined
83
- ): (string | number | undefined)[];
84
- abstract getStepKey(metadata: Record<string, unknown> | undefined): string;
85
- abstract checkKeyList(keyList: (string | number | undefined)[]): boolean;
86
- abstract getStepIdByKey(stepKey: string, index?: number): string;
87
- abstract getRunStep(stepId: string): t.RunStep | undefined;
88
- abstract dispatchRunStep(stepKey: string, stepDetails: t.StepDetails): string;
89
- abstract dispatchRunStepDelta(id: string, delta: t.ToolCallDelta): void;
90
- abstract dispatchMessageDelta(id: string, delta: t.MessageDelta): void;
91
- abstract dispatchReasoningDelta(
92
- stepId: string,
93
- delta: t.ReasoningDelta
94
- ): void;
95
- abstract handleToolCallCompleted(
96
- data: t.ToolEndData,
97
- metadata?: Record<string, unknown>,
98
- omitOutput?: boolean
99
- ): void;
100
-
101
- abstract createCallModel(): (
102
- state: T,
103
- config?: RunnableConfig
104
- ) => Promise<Partial<T>>;
105
- abstract createWorkflow(): t.CompiledWorkflow<T>;
106
- lastToken?: string;
107
- tokenTypeSwitch?: 'reasoning' | 'content';
108
- reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content';
109
- currentTokenType: ContentTypes.TEXT | ContentTypes.THINK | 'think_and_text' =
110
- ContentTypes.TEXT;
111
- messageStepHasToolCalls: Map<string, boolean> = new Map();
112
- messageIdsByStepKey: Map<string, string> = new Map();
113
- prelimMessageIdsByStepKey: Map<string, string> = new Map();
114
- config: RunnableConfig | undefined;
115
- contentData: t.RunStep[] = [];
116
- stepKeyIds: Map<string, string[]> = new Map<string, string[]>();
117
- contentIndexMap: Map<string, number> = new Map();
118
- toolCallStepIds: Map<string, string> = new Map();
119
- currentUsage: Partial<UsageMetadata> | undefined;
120
- indexTokenCountMap: Record<string, number | undefined> = {};
121
- maxContextTokens: number | undefined;
122
- pruneMessages?: ReturnType<typeof createPruneMessages>;
123
- /** The amount of time that should pass before another consecutive API call */
124
- streamBuffer: number | undefined;
125
- tokenCounter?: t.TokenCounter;
126
- signal?: AbortSignal;
127
- /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
128
- invokedToolIds?: Set<string>;
129
- handlerRegistry: HandlerRegistry | undefined;
130
- }
131
-
132
- export class StandardGraph extends Graph<t.BaseGraphState, GraphNode> {
133
- private graphState: t.GraphStateChannels<t.BaseGraphState>;
134
- clientOptions: t.ClientOptions;
135
- boundModel?: Runnable;
136
- /** The last recorded timestamp that a stream API call was invoked */
137
- lastStreamCall: number | undefined;
138
- systemMessage: SystemMessage | undefined;
139
- messages: BaseMessage[] = [];
140
- runId: string | undefined;
141
- tools?: t.GraphTools;
142
- toolMap?: t.ToolMap;
143
- startIndex: number = 0;
144
- provider: Providers;
145
- toolEnd: boolean;
146
- signal?: AbortSignal;
147
-
148
- constructor({
149
- runId,
150
- tools,
151
- signal,
152
- toolMap,
153
- provider,
154
- streamBuffer,
155
- instructions,
156
- reasoningKey,
157
- clientOptions,
158
- toolEnd = false,
159
- additional_instructions = '',
160
- }: t.StandardGraphInput) {
161
- super();
162
- this.runId = runId;
163
- this.tools = tools;
164
- this.signal = signal;
165
- this.toolEnd = toolEnd;
166
- this.toolMap = toolMap;
167
- this.provider = provider;
168
- this.streamBuffer = streamBuffer;
169
- this.clientOptions = clientOptions;
170
- this.graphState = this.createGraphState();
171
- this.boundModel = this.initializeModel();
172
- if (reasoningKey) {
173
- this.reasoningKey = reasoningKey;
174
- }
175
-
176
- let finalInstructions: string | BaseMessageFields | undefined =
177
- instructions;
178
- if (additional_instructions) {
179
- finalInstructions =
180
- finalInstructions != null && finalInstructions
181
- ? `${finalInstructions}\n\n${additional_instructions}`
182
- : additional_instructions;
183
- }
184
-
185
- if (
186
- finalInstructions != null &&
187
- finalInstructions &&
188
- provider === Providers.ANTHROPIC &&
189
- ((
190
- (clientOptions as t.AnthropicClientOptions).clientOptions
191
- ?.defaultHeaders as Record<string, string> | undefined
192
- )?.['anthropic-beta']?.includes('prompt-caching') ??
193
- false)
194
- ) {
195
- finalInstructions = {
196
- content: [
197
- {
198
- type: 'text',
199
- text: instructions,
200
- cache_control: { type: 'ephemeral' },
201
- },
202
- ],
203
- };
204
- }
205
-
206
- if (finalInstructions != null && finalInstructions !== '') {
207
- this.systemMessage = new SystemMessage(finalInstructions);
208
- }
209
- }
210
-
211
- /* Init */
212
-
213
- resetValues(keepContent?: boolean): void {
214
- this.messages = [];
215
- this.config = resetIfNotEmpty(this.config, undefined);
216
- if (keepContent !== true) {
217
- this.contentData = resetIfNotEmpty(this.contentData, []);
218
- this.contentIndexMap = resetIfNotEmpty(this.contentIndexMap, new Map());
219
- }
220
- this.stepKeyIds = resetIfNotEmpty(this.stepKeyIds, new Map());
221
- this.toolCallStepIds = resetIfNotEmpty(this.toolCallStepIds, new Map());
222
- this.messageIdsByStepKey = resetIfNotEmpty(
223
- this.messageIdsByStepKey,
224
- new Map()
225
- );
226
- this.messageStepHasToolCalls = resetIfNotEmpty(
227
- this.prelimMessageIdsByStepKey,
228
- new Map()
229
- );
230
- this.prelimMessageIdsByStepKey = resetIfNotEmpty(
231
- this.prelimMessageIdsByStepKey,
232
- new Map()
233
- );
234
- this.currentTokenType = resetIfNotEmpty(
235
- this.currentTokenType,
236
- ContentTypes.TEXT
237
- );
238
- this.lastToken = resetIfNotEmpty(this.lastToken, undefined);
239
- this.tokenTypeSwitch = resetIfNotEmpty(this.tokenTypeSwitch, undefined);
240
- this.indexTokenCountMap = resetIfNotEmpty(this.indexTokenCountMap, {});
241
- this.currentUsage = resetIfNotEmpty(this.currentUsage, undefined);
242
- this.tokenCounter = resetIfNotEmpty(this.tokenCounter, undefined);
243
- this.maxContextTokens = resetIfNotEmpty(this.maxContextTokens, undefined);
244
- this.invokedToolIds = resetIfNotEmpty(this.invokedToolIds, undefined);
245
- }
246
-
247
- /* Run Step Processing */
248
-
249
- getRunStep(stepId: string): t.RunStep | undefined {
250
- const index = this.contentIndexMap.get(stepId);
251
- if (index !== undefined) {
252
- return this.contentData[index];
253
- }
254
- return undefined;
255
- }
256
-
257
- getStepKey(metadata: Record<string, unknown> | undefined): string {
258
- if (!metadata) return '';
259
-
260
- const keyList = this.getKeyList(metadata);
261
- if (this.checkKeyList(keyList)) {
262
- throw new Error('Missing metadata');
263
- }
264
-
265
- return joinKeys(keyList);
266
- }
267
-
268
- getStepIdByKey(stepKey: string, index?: number): string {
269
- const stepIds = this.stepKeyIds.get(stepKey);
270
- if (!stepIds) {
271
- throw new Error(`No step IDs found for stepKey ${stepKey}`);
272
- }
273
-
274
- if (index === undefined) {
275
- return stepIds[stepIds.length - 1];
276
- }
277
-
278
- return stepIds[index];
279
- }
280
-
281
- generateStepId(stepKey: string): [string, number] {
282
- const stepIds = this.stepKeyIds.get(stepKey);
283
- let newStepId: string | undefined;
284
- let stepIndex = 0;
285
- if (stepIds) {
286
- stepIndex = stepIds.length;
287
- newStepId = `step_${nanoid()}`;
288
- stepIds.push(newStepId);
289
- this.stepKeyIds.set(stepKey, stepIds);
290
- } else {
291
- newStepId = `step_${nanoid()}`;
292
- this.stepKeyIds.set(stepKey, [newStepId]);
293
- }
294
-
295
- return [newStepId, stepIndex];
296
- }
297
-
298
- getKeyList(
299
- metadata: Record<string, unknown> | undefined
300
- ): (string | number | undefined)[] {
301
- if (!metadata) return [];
302
-
303
- const keyList = [
304
- metadata.run_id as string,
305
- metadata.thread_id as string,
306
- metadata.langgraph_node as string,
307
- metadata.langgraph_step as number,
308
- metadata.checkpoint_ns as string,
309
- ];
310
- if (
311
- this.currentTokenType === ContentTypes.THINK ||
312
- this.currentTokenType === 'think_and_text'
313
- ) {
314
- keyList.push('reasoning');
315
- } else if (this.tokenTypeSwitch === 'content') {
316
- keyList.push('post-reasoning');
317
- }
318
-
319
- if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
320
- keyList.push(this.invokedToolIds.size + '');
321
- }
322
-
323
- return keyList;
324
- }
325
-
326
- checkKeyList(keyList: (string | number | undefined)[]): boolean {
327
- return keyList.some((key) => key === undefined);
328
- }
329
-
330
- /* Misc.*/
331
-
332
- getRunMessages(): BaseMessage[] | undefined {
333
- return this.messages.slice(this.startIndex);
334
- }
335
-
336
- getContentParts(): t.MessageContentComplex[] | undefined {
337
- return convertMessagesToContent(this.messages.slice(this.startIndex));
338
- }
339
-
340
- /* Graph */
341
-
342
- createGraphState(): t.GraphStateChannels<t.BaseGraphState> {
343
- return {
344
- messages: {
345
- value: (x: BaseMessage[], y: BaseMessage[]): BaseMessage[] => {
346
- if (!x.length) {
347
- if (this.systemMessage) {
348
- x.push(this.systemMessage);
349
- }
350
-
351
- this.startIndex = x.length + y.length;
352
- }
353
- const current = x.concat(y);
354
- this.messages = current;
355
- return current;
356
- },
357
- default: () => [],
358
- },
359
- };
360
- }
361
-
362
- initializeTools():
363
- | CustomToolNode<t.BaseGraphState>
364
- | ToolNode<t.BaseGraphState> {
365
- // return new ToolNode<t.BaseGraphState>(this.tools);
366
- return new CustomToolNode<t.BaseGraphState>({
367
- tools: (this.tools as t.GenericTool[] | undefined) || [],
368
- toolMap: this.toolMap,
369
- toolCallStepIds: this.toolCallStepIds,
370
- errorHandler: (data, metadata) =>
371
- StandardGraph.handleToolCallErrorStatic(this, data, metadata),
372
- });
373
- }
374
-
375
- initializeModel(): Runnable {
376
- const ChatModelClass = getChatModelClass(this.provider);
377
- const model = new ChatModelClass(this.clientOptions);
378
-
379
- if (
380
- isOpenAILike(this.provider) &&
381
- (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)
382
- ) {
383
- model.temperature = (this.clientOptions as t.OpenAIClientOptions)
384
- .temperature as number;
385
- model.topP = (this.clientOptions as t.OpenAIClientOptions).topP as number;
386
- model.frequencyPenalty = (this.clientOptions as t.OpenAIClientOptions)
387
- .frequencyPenalty as number;
388
- model.presencePenalty = (this.clientOptions as t.OpenAIClientOptions)
389
- .presencePenalty as number;
390
- model.n = (this.clientOptions as t.OpenAIClientOptions).n as number;
391
- } else if (
392
- this.provider === Providers.VERTEXAI &&
393
- model instanceof ChatVertexAI
394
- ) {
395
- model.temperature = (this.clientOptions as t.VertexAIClientOptions)
396
- .temperature as number;
397
- model.topP = (this.clientOptions as t.VertexAIClientOptions)
398
- .topP as number;
399
- model.topK = (this.clientOptions as t.VertexAIClientOptions)
400
- .topK as number;
401
- model.topLogprobs = (this.clientOptions as t.VertexAIClientOptions)
402
- .topLogprobs as number;
403
- model.frequencyPenalty = (this.clientOptions as t.VertexAIClientOptions)
404
- .frequencyPenalty as number;
405
- model.presencePenalty = (this.clientOptions as t.VertexAIClientOptions)
406
- .presencePenalty as number;
407
- model.maxOutputTokens = (this.clientOptions as t.VertexAIClientOptions)
408
- .maxOutputTokens as number;
409
- }
410
-
411
- if (!this.tools || this.tools.length === 0) {
412
- return model as unknown as Runnable;
413
- }
414
-
415
- return (model as t.ModelWithTools).bindTools(this.tools);
416
- }
417
- overrideTestModel(
418
- responses: string[],
419
- sleep?: number,
420
- toolCalls?: ToolCall[]
421
- ): void {
422
- this.boundModel = createFakeStreamingLLM({
423
- responses,
424
- sleep,
425
- toolCalls,
426
- });
427
- }
428
-
429
- getNewModel({
430
- provider,
431
- clientOptions,
432
- omitOptions,
433
- }: {
434
- provider: Providers;
435
- clientOptions?: t.ClientOptions;
436
- omitOptions?: Set<string>;
437
- }): t.ChatModelInstance {
438
- const ChatModelClass = getChatModelClass(provider);
439
- const options =
440
- omitOptions && clientOptions == null
441
- ? Object.assign(
442
- Object.fromEntries(
443
- Object.entries(this.clientOptions).filter(
444
- ([key]) => !omitOptions.has(key)
445
- )
446
- ),
447
- clientOptions
448
- )
449
- : (clientOptions ?? this.clientOptions);
450
- return new ChatModelClass(options);
451
- }
452
-
453
- storeUsageMetadata(finalMessage?: BaseMessage): void {
454
- if (
455
- finalMessage &&
456
- 'usage_metadata' in finalMessage &&
457
- finalMessage.usage_metadata != null
458
- ) {
459
- this.currentUsage = finalMessage.usage_metadata as Partial<UsageMetadata>;
460
- }
461
- }
462
-
463
- cleanupSignalListener(): void {
464
- if (!this.signal) {
465
- return;
466
- }
467
- if (!this.boundModel) {
468
- return;
469
- }
470
- const client = (this.boundModel as ChatOpenAI | undefined)?.exposedClient;
471
- if (!client?.abortHandler) {
472
- return;
473
- }
474
- this.signal.removeEventListener('abort', client.abortHandler);
475
- client.abortHandler = undefined;
476
- }
477
-
478
- createCallModel() {
479
- return async (
480
- state: t.BaseGraphState,
481
- config?: RunnableConfig
482
- ): Promise<Partial<t.BaseGraphState>> => {
483
- const { provider = '' } =
484
- (config?.configurable as t.GraphConfig | undefined) ?? {};
485
- if (this.boundModel == null) {
486
- throw new Error('No Graph model found');
487
- }
488
- if (!config || !provider) {
489
- throw new Error(`No ${config ? 'provider' : 'config'} provided`);
490
- }
491
- if (!config.signal) {
492
- config.signal = this.signal;
493
- }
494
- this.config = config;
495
- const { messages } = state;
496
-
497
- let messagesToUse = messages;
498
- if (
499
- !this.pruneMessages &&
500
- this.tokenCounter &&
501
- this.maxContextTokens != null &&
502
- this.indexTokenCountMap[0] != null
503
- ) {
504
- const isAnthropicWithThinking =
505
- (this.provider === Providers.ANTHROPIC &&
506
- (this.clientOptions as t.AnthropicClientOptions).thinking !=
507
- null) ||
508
- (this.provider === Providers.BEDROCK &&
509
- (this.clientOptions as t.BedrockAnthropicInput)
510
- .additionalModelRequestFields?.['thinking'] != null) ||
511
- (this.provider === Providers.OPENAI &&
512
- (
513
- (this.clientOptions as t.OpenAIClientOptions).modelKwargs
514
- ?.thinking as t.AnthropicClientOptions['thinking']
515
- )?.type === 'enabled');
516
-
517
- this.pruneMessages = createPruneMessages({
518
- provider: this.provider,
519
- indexTokenCountMap: this.indexTokenCountMap,
520
- maxTokens: this.maxContextTokens,
521
- tokenCounter: this.tokenCounter,
522
- startIndex: this.startIndex,
523
- thinkingEnabled: isAnthropicWithThinking,
524
- });
525
- }
526
- if (this.pruneMessages) {
527
- const { context, indexTokenCountMap } = this.pruneMessages({
528
- messages,
529
- usageMetadata: this.currentUsage,
530
- // startOnMessageType: 'human',
531
- });
532
- this.indexTokenCountMap = indexTokenCountMap;
533
- messagesToUse = context;
534
- }
535
-
536
- const finalMessages = messagesToUse;
537
- const lastMessageX =
538
- finalMessages.length >= 2
539
- ? finalMessages[finalMessages.length - 2]
540
- : null;
541
- const lastMessageY =
542
- finalMessages.length >= 1
543
- ? finalMessages[finalMessages.length - 1]
544
- : null;
545
-
546
- if (
547
- provider === Providers.BEDROCK &&
548
- lastMessageX instanceof AIMessageChunk &&
549
- lastMessageY instanceof ToolMessage &&
550
- typeof lastMessageX.content === 'string'
551
- ) {
552
- finalMessages[finalMessages.length - 2].content = '';
553
- }
554
-
555
- const isLatestToolMessage = lastMessageY instanceof ToolMessage;
556
-
557
- if (isLatestToolMessage && provider === Providers.ANTHROPIC) {
558
- formatAnthropicArtifactContent(finalMessages);
559
- } else if (
560
- isLatestToolMessage &&
561
- (isOpenAILike(provider) || isGoogleLike(provider))
562
- ) {
563
- formatArtifactPayload(finalMessages);
564
- }
565
-
566
- if (this.lastStreamCall != null && this.streamBuffer != null) {
567
- const timeSinceLastCall = Date.now() - this.lastStreamCall;
568
- if (timeSinceLastCall < this.streamBuffer) {
569
- const timeToWait =
570
- Math.ceil((this.streamBuffer - timeSinceLastCall) / 1000) * 1000;
571
- await sleep(timeToWait);
572
- }
573
- }
574
-
575
- this.lastStreamCall = Date.now();
576
-
577
- let result: Partial<t.BaseGraphState>;
578
- if (
579
- (this.tools?.length ?? 0) > 0 &&
580
- manualToolStreamProviders.has(provider)
581
- ) {
582
- const stream = await this.boundModel.stream(finalMessages, config);
583
- let finalChunk: AIMessageChunk | undefined;
584
- for await (const chunk of stream) {
585
- dispatchCustomEvent(GraphEvents.CHAT_MODEL_STREAM, { chunk }, config);
586
- if (!finalChunk) {
587
- finalChunk = chunk;
588
- } else {
589
- finalChunk = concat(finalChunk, chunk);
590
- }
591
- }
592
-
593
- finalChunk = modifyDeltaProperties(this.provider, finalChunk);
594
- result = { messages: [finalChunk as AIMessageChunk] };
595
- } else {
596
- const finalMessage = (await this.boundModel.invoke(
597
- finalMessages,
598
- config
599
- )) as AIMessageChunk;
600
- if ((finalMessage.tool_calls?.length ?? 0) > 0) {
601
- finalMessage.tool_calls = finalMessage.tool_calls?.filter(
602
- (tool_call) => {
603
- if (!tool_call.name) {
604
- return false;
605
- }
606
- return true;
607
- }
608
- );
609
- }
610
- result = { messages: [finalMessage] };
611
- }
612
-
613
- this.storeUsageMetadata(result.messages?.[0]);
614
- this.cleanupSignalListener();
615
- return result;
616
- };
617
- }
618
-
619
- createWorkflow(): t.CompiledWorkflow<t.BaseGraphState> {
620
- const routeMessage = (
621
- state: t.BaseGraphState,
622
- config?: RunnableConfig
623
- ): string => {
624
- this.config = config;
625
- return toolsCondition(state, this.invokedToolIds);
626
- };
627
-
628
- const workflow = new StateGraph<t.BaseGraphState>({
629
- channels: this.graphState,
630
- })
631
- .addNode(AGENT, this.createCallModel())
632
- .addNode(TOOLS, this.initializeTools())
633
- .addEdge(START, AGENT)
634
- .addConditionalEdges(AGENT, routeMessage)
635
- .addEdge(TOOLS, this.toolEnd ? END : AGENT);
636
-
637
- return workflow.compile();
638
- }
639
-
640
- /* Dispatchers */
641
-
642
- /**
643
- * Dispatches a run step to the client, returns the step ID
644
- */
645
- dispatchRunStep(stepKey: string, stepDetails: t.StepDetails): string {
646
- if (!this.config) {
647
- throw new Error('No config provided');
648
- }
649
-
650
- const [stepId, stepIndex] = this.generateStepId(stepKey);
651
- if (stepDetails.type === StepTypes.TOOL_CALLS && stepDetails.tool_calls) {
652
- for (const tool_call of stepDetails.tool_calls) {
653
- const toolCallId = tool_call.id ?? '';
654
- if (!toolCallId || this.toolCallStepIds.has(toolCallId)) {
655
- continue;
656
- }
657
- this.toolCallStepIds.set(toolCallId, stepId);
658
- }
659
- }
660
-
661
- const runStep: t.RunStep = {
662
- stepIndex,
663
- id: stepId,
664
- type: stepDetails.type,
665
- index: this.contentData.length,
666
- stepDetails,
667
- usage: null,
668
- };
669
-
670
- const runId = this.runId ?? '';
671
- if (runId) {
672
- runStep.runId = runId;
673
- }
674
-
675
- this.contentData.push(runStep);
676
- this.contentIndexMap.set(stepId, runStep.index);
677
- dispatchCustomEvent(GraphEvents.ON_RUN_STEP, runStep, this.config);
678
- return stepId;
679
- }
680
-
681
- handleToolCallCompleted(
682
- data: t.ToolEndData,
683
- metadata?: Record<string, unknown>,
684
- omitOutput?: boolean
685
- ): void {
686
- if (!this.config) {
687
- throw new Error('No config provided');
688
- }
689
-
690
- if (!data.output) {
691
- return;
692
- }
693
-
694
- const { input, output } = data;
695
- const { tool_call_id } = output;
696
- const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
697
- if (!stepId) {
698
- throw new Error(`No stepId found for tool_call_id ${tool_call_id}`);
699
- }
700
-
701
- const runStep = this.getRunStep(stepId);
702
- if (!runStep) {
703
- throw new Error(`No run step found for stepId ${stepId}`);
704
- }
705
-
706
- const dispatchedOutput =
707
- typeof output.content === 'string'
708
- ? output.content
709
- : JSON.stringify(output.content);
710
-
711
- const args = typeof input === 'string' ? input : input.input;
712
- const tool_call = {
713
- args: typeof args === 'string' ? args : JSON.stringify(args),
714
- name: output.name ?? '',
715
- id: output.tool_call_id,
716
- output: omitOutput === true ? '' : dispatchedOutput,
717
- progress: 1,
718
- };
719
-
720
- this.handlerRegistry?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)?.handle(
721
- GraphEvents.ON_RUN_STEP_COMPLETED,
722
- {
723
- result: {
724
- id: stepId,
725
- index: runStep.index,
726
- type: 'tool_call',
727
- tool_call,
728
- } as t.ToolCompleteEvent,
729
- },
730
- metadata,
731
- this
732
- );
733
- }
734
- /**
735
- * Static version of handleToolCallError to avoid creating strong references
736
- * that prevent garbage collection
737
- */
738
- static handleToolCallErrorStatic(
739
- graph: StandardGraph,
740
- data: t.ToolErrorData,
741
- metadata?: Record<string, unknown>
742
- ): void {
743
- if (!graph.config) {
744
- throw new Error('No config provided');
745
- }
746
-
747
- if (!data.id) {
748
- console.warn('No Tool ID provided for Tool Error');
749
- return;
750
- }
751
-
752
- const stepId = graph.toolCallStepIds.get(data.id) ?? '';
753
- if (!stepId) {
754
- throw new Error(`No stepId found for tool_call_id ${data.id}`);
755
- }
756
-
757
- const { name, input: args, error } = data;
758
-
759
- const runStep = graph.getRunStep(stepId);
760
- if (!runStep) {
761
- throw new Error(`No run step found for stepId ${stepId}`);
762
- }
763
-
764
- const tool_call: t.ProcessedToolCall = {
765
- id: data.id,
766
- name: name || '',
767
- args: typeof args === 'string' ? args : JSON.stringify(args),
768
- output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
769
- progress: 1,
770
- };
771
-
772
- graph.handlerRegistry
773
- ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
774
- ?.handle(
775
- GraphEvents.ON_RUN_STEP_COMPLETED,
776
- {
777
- result: {
778
- id: stepId,
779
- index: runStep.index,
780
- type: 'tool_call',
781
- tool_call,
782
- } as t.ToolCompleteEvent,
783
- },
784
- metadata,
785
- graph
786
- );
787
- }
788
-
789
- /**
790
- * Instance method that delegates to the static method
791
- * Kept for backward compatibility
792
- */
793
- handleToolCallError(
794
- data: t.ToolErrorData,
795
- metadata?: Record<string, unknown>
796
- ): void {
797
- StandardGraph.handleToolCallErrorStatic(this, data, metadata);
798
- }
799
-
800
- dispatchRunStepDelta(id: string, delta: t.ToolCallDelta): void {
801
- if (!this.config) {
802
- throw new Error('No config provided');
803
- } else if (!id) {
804
- throw new Error('No step ID found');
805
- }
806
- const runStepDelta: t.RunStepDeltaEvent = {
807
- id,
808
- delta,
809
- };
810
- dispatchCustomEvent(
811
- GraphEvents.ON_RUN_STEP_DELTA,
812
- runStepDelta,
813
- this.config
814
- );
815
- }
816
-
817
- dispatchMessageDelta(id: string, delta: t.MessageDelta): void {
818
- if (!this.config) {
819
- throw new Error('No config provided');
820
- }
821
- const messageDelta: t.MessageDeltaEvent = {
822
- id,
823
- delta,
824
- };
825
- dispatchCustomEvent(
826
- GraphEvents.ON_MESSAGE_DELTA,
827
- messageDelta,
828
- this.config
829
- );
830
- }
831
-
832
- dispatchReasoningDelta = (stepId: string, delta: t.ReasoningDelta): void => {
833
- if (!this.config) {
834
- throw new Error('No config provided');
835
- }
836
- const reasoningDelta: t.ReasoningDeltaEvent = {
837
- id: stepId,
838
- delta,
839
- };
840
- dispatchCustomEvent(
841
- GraphEvents.ON_REASONING_DELTA,
842
- reasoningDelta,
843
- this.config
844
- );
845
- };
846
- }
1
+ /* eslint-disable no-console */
2
+ // src/graphs/Graph.ts
3
+ import { nanoid } from 'nanoid';
4
+ import { concat } from '@langchain/core/utils/stream';
5
+ import { ToolNode } from '@langchain/langgraph/prebuilt';
6
+ import { ChatVertexAI } from '@langchain/google-vertexai';
7
+ import {
8
+ START,
9
+ END,
10
+ Command,
11
+ StateGraph,
12
+ Annotation,
13
+ messagesStateReducer,
14
+ } from '@langchain/langgraph';
15
+ import {
16
+ Runnable,
17
+ RunnableConfig,
18
+ RunnableLambda,
19
+ } from '@langchain/core/runnables';
20
+ import {
21
+ ToolMessage,
22
+ SystemMessage,
23
+ AIMessageChunk,
24
+ } from '@langchain/core/messages';
25
+ import type {
26
+ BaseMessageFields,
27
+ UsageMetadata,
28
+ BaseMessage,
29
+ } from '@langchain/core/messages';
30
+ import type { ToolCall } from '@langchain/core/messages/tool';
31
+ import type * as t from '@/types';
32
+ import {
33
+ GraphNodeKeys,
34
+ ContentTypes,
35
+ GraphEvents,
36
+ Providers,
37
+ StepTypes,
38
+ } from '@/common';
39
+ import {
40
+ formatAnthropicArtifactContent,
41
+ convertMessagesToContent,
42
+ addBedrockCacheControl,
43
+ modifyDeltaProperties,
44
+ formatArtifactPayload,
45
+ formatContentStrings,
46
+ createPruneMessages,
47
+ addCacheControl,
48
+ } from '@/messages';
49
+ import {
50
+ resetIfNotEmpty,
51
+ isOpenAILike,
52
+ isGoogleLike,
53
+ joinKeys,
54
+ sleep,
55
+ } from '@/utils';
56
+ import { getChatModelClass, manualToolStreamProviders } from '@/llm/providers';
57
+ import { ToolNode as CustomToolNode, toolsCondition } from '@/tools/ToolNode';
58
+ import { ChatOpenAI, AzureChatOpenAI } from '@/llm/openai';
59
+ import { safeDispatchCustomEvent } from '@/utils/events';
60
+ import { AgentContext } from '@/agents/AgentContext';
61
+ import { createFakeStreamingLLM } from '@/llm/fake';
62
+ import { HandlerRegistry } from '@/events';
63
+
64
+ const { AGENT, TOOLS } = GraphNodeKeys;
65
+
66
+ export abstract class Graph<
67
+ T extends t.BaseGraphState = t.BaseGraphState,
68
+ _TNodeName extends string = string,
69
+ > {
70
+ abstract resetValues(): void;
71
+ abstract initializeTools({
72
+ currentTools,
73
+ currentToolMap,
74
+ }: {
75
+ currentTools?: t.GraphTools;
76
+ currentToolMap?: t.ToolMap;
77
+ }): CustomToolNode<T> | ToolNode<T>;
78
+ abstract initializeModel({
79
+ currentModel,
80
+ tools,
81
+ clientOptions,
82
+ }: {
83
+ currentModel?: t.ChatModel;
84
+ tools?: t.GraphTools;
85
+ clientOptions?: t.ClientOptions;
86
+ }): Runnable;
87
+ abstract getRunMessages(): BaseMessage[] | undefined;
88
+ abstract getContentParts(): t.MessageContentComplex[] | undefined;
89
+ abstract generateStepId(stepKey: string): [string, number];
90
+ abstract getKeyList(
91
+ metadata: Record<string, unknown> | undefined
92
+ ): (string | number | undefined)[];
93
+ abstract getStepKey(metadata: Record<string, unknown> | undefined): string;
94
+ abstract checkKeyList(keyList: (string | number | undefined)[]): boolean;
95
+ abstract getStepIdByKey(stepKey: string, index?: number): string;
96
+ abstract getRunStep(stepId: string): t.RunStep | undefined;
97
+ abstract dispatchRunStep(
98
+ stepKey: string,
99
+ stepDetails: t.StepDetails
100
+ ): Promise<string>;
101
+ abstract dispatchRunStepDelta(
102
+ id: string,
103
+ delta: t.ToolCallDelta
104
+ ): Promise<void>;
105
+ abstract dispatchMessageDelta(
106
+ id: string,
107
+ delta: t.MessageDelta
108
+ ): Promise<void>;
109
+ abstract dispatchReasoningDelta(
110
+ stepId: string,
111
+ delta: t.ReasoningDelta
112
+ ): Promise<void>;
113
+ abstract handleToolCallCompleted(
114
+ data: t.ToolEndData,
115
+ metadata?: Record<string, unknown>,
116
+ omitOutput?: boolean
117
+ ): Promise<void>;
118
+
119
+ abstract createCallModel(
120
+ agentId?: string,
121
+ currentModel?: t.ChatModel
122
+ ): (state: T, config?: RunnableConfig) => Promise<Partial<T>>;
123
+ messageStepHasToolCalls: Map<string, boolean> = new Map();
124
+ messageIdsByStepKey: Map<string, string> = new Map();
125
+ prelimMessageIdsByStepKey: Map<string, string> = new Map();
126
+ config: RunnableConfig | undefined;
127
+ contentData: t.RunStep[] = [];
128
+ stepKeyIds: Map<string, string[]> = new Map<string, string[]>();
129
+ contentIndexMap: Map<string, number> = new Map();
130
+ toolCallStepIds: Map<string, string> = new Map();
131
+ signal?: AbortSignal;
132
+ /** Set of invoked tool call IDs from non-message run steps completed mid-run, if any */
133
+ invokedToolIds?: Set<string>;
134
+ handlerRegistry: HandlerRegistry | undefined;
135
+ }
136
+
137
+ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
138
+ overrideModel?: t.ChatModel;
139
+ /** Optional compile options passed into workflow.compile() */
140
+ compileOptions?: t.CompileOptions | undefined;
141
+ messages: BaseMessage[] = [];
142
+ runId: string | undefined;
143
+ startIndex: number = 0;
144
+ signal?: AbortSignal;
145
+ /** Map of agent contexts by agent ID */
146
+ agentContexts: Map<string, AgentContext> = new Map();
147
+ /** Default agent ID to use */
148
+ defaultAgentId: string;
149
+
150
+ constructor({
151
+ // parent-level graph inputs
152
+ runId,
153
+ signal,
154
+ agents,
155
+ tokenCounter,
156
+ indexTokenCountMap,
157
+ }: t.StandardGraphInput) {
158
+ super();
159
+ this.runId = runId;
160
+ this.signal = signal;
161
+
162
+ if (agents.length === 0) {
163
+ throw new Error('At least one agent configuration is required');
164
+ }
165
+
166
+ for (const agentConfig of agents) {
167
+ const agentContext = AgentContext.fromConfig(
168
+ agentConfig,
169
+ tokenCounter,
170
+ indexTokenCountMap
171
+ );
172
+
173
+ this.agentContexts.set(agentConfig.agentId, agentContext);
174
+ }
175
+
176
+ this.defaultAgentId = agents[0].agentId;
177
+ }
178
+
179
+ /* Init */
180
+
181
+ resetValues(keepContent?: boolean): void {
182
+ this.messages = [];
183
+ this.config = resetIfNotEmpty(this.config, undefined);
184
+ if (keepContent !== true) {
185
+ this.contentData = resetIfNotEmpty(this.contentData, []);
186
+ this.contentIndexMap = resetIfNotEmpty(this.contentIndexMap, new Map());
187
+ }
188
+ this.stepKeyIds = resetIfNotEmpty(this.stepKeyIds, new Map());
189
+ this.toolCallStepIds = resetIfNotEmpty(this.toolCallStepIds, new Map());
190
+ this.messageIdsByStepKey = resetIfNotEmpty(
191
+ this.messageIdsByStepKey,
192
+ new Map()
193
+ );
194
+ this.messageStepHasToolCalls = resetIfNotEmpty(
195
+ this.messageStepHasToolCalls,
196
+ new Map()
197
+ );
198
+ this.prelimMessageIdsByStepKey = resetIfNotEmpty(
199
+ this.prelimMessageIdsByStepKey,
200
+ new Map()
201
+ );
202
+ this.invokedToolIds = resetIfNotEmpty(this.invokedToolIds, undefined);
203
+ for (const context of this.agentContexts.values()) {
204
+ context.reset();
205
+ }
206
+ }
207
+
208
+ /* Run Step Processing */
209
+
210
+ getRunStep(stepId: string): t.RunStep | undefined {
211
+ const index = this.contentIndexMap.get(stepId);
212
+ if (index !== undefined) {
213
+ return this.contentData[index];
214
+ }
215
+ return undefined;
216
+ }
217
+
218
+ getAgentContext(metadata: Record<string, unknown> | undefined): AgentContext {
219
+ if (!metadata) {
220
+ throw new Error('No metadata provided to retrieve agent context');
221
+ }
222
+
223
+ const currentNode = metadata.langgraph_node as string;
224
+ if (!currentNode) {
225
+ throw new Error(
226
+ 'No langgraph_node in metadata to retrieve agent context'
227
+ );
228
+ }
229
+
230
+ let agentId: string | undefined;
231
+ if (currentNode.startsWith(AGENT)) {
232
+ agentId = currentNode.substring(AGENT.length);
233
+ } else if (currentNode.startsWith(TOOLS)) {
234
+ agentId = currentNode.substring(TOOLS.length);
235
+ }
236
+
237
+ const agentContext = this.agentContexts.get(agentId ?? '');
238
+ if (!agentContext) {
239
+ throw new Error(`No agent context found for agent ID ${agentId}`);
240
+ }
241
+
242
+ return agentContext;
243
+ }
244
+
245
+ getStepKey(metadata: Record<string, unknown> | undefined): string {
246
+ if (!metadata) return '';
247
+
248
+ const keyList = this.getKeyList(metadata);
249
+ if (this.checkKeyList(keyList)) {
250
+ throw new Error('Missing metadata');
251
+ }
252
+
253
+ return joinKeys(keyList);
254
+ }
255
+
256
+ getStepIdByKey(stepKey: string, index?: number): string {
257
+ const stepIds = this.stepKeyIds.get(stepKey);
258
+ if (!stepIds) {
259
+ throw new Error(`No step IDs found for stepKey ${stepKey}`);
260
+ }
261
+
262
+ if (index === undefined) {
263
+ return stepIds[stepIds.length - 1];
264
+ }
265
+
266
+ return stepIds[index];
267
+ }
268
+
269
+ generateStepId(stepKey: string): [string, number] {
270
+ const stepIds = this.stepKeyIds.get(stepKey);
271
+ let newStepId: string | undefined;
272
+ let stepIndex = 0;
273
+ if (stepIds) {
274
+ stepIndex = stepIds.length;
275
+ newStepId = `step_${nanoid()}`;
276
+ stepIds.push(newStepId);
277
+ this.stepKeyIds.set(stepKey, stepIds);
278
+ } else {
279
+ newStepId = `step_${nanoid()}`;
280
+ this.stepKeyIds.set(stepKey, [newStepId]);
281
+ }
282
+
283
+ return [newStepId, stepIndex];
284
+ }
285
+
286
+ getKeyList(
287
+ metadata: Record<string, unknown> | undefined
288
+ ): (string | number | undefined)[] {
289
+ if (!metadata) return [];
290
+
291
+ const keyList = [
292
+ metadata.run_id as string,
293
+ metadata.thread_id as string,
294
+ metadata.langgraph_node as string,
295
+ metadata.langgraph_step as number,
296
+ metadata.checkpoint_ns as string,
297
+ ];
298
+
299
+ const agentContext = this.getAgentContext(metadata);
300
+ if (
301
+ agentContext.currentTokenType === ContentTypes.THINK ||
302
+ agentContext.currentTokenType === 'think_and_text'
303
+ ) {
304
+ keyList.push('reasoning');
305
+ } else if (agentContext.tokenTypeSwitch === 'content') {
306
+ keyList.push('post-reasoning');
307
+ }
308
+
309
+ if (this.invokedToolIds != null && this.invokedToolIds.size > 0) {
310
+ keyList.push(this.invokedToolIds.size + '');
311
+ }
312
+
313
+ return keyList;
314
+ }
315
+
316
+ checkKeyList(keyList: (string | number | undefined)[]): boolean {
317
+ return keyList.some((key) => key === undefined);
318
+ }
319
+
320
+ /* Misc.*/
321
+
322
+ getRunMessages(): BaseMessage[] | undefined {
323
+ return this.messages.slice(this.startIndex);
324
+ }
325
+
326
+ getContentParts(): t.MessageContentComplex[] | undefined {
327
+ return convertMessagesToContent(this.messages.slice(this.startIndex));
328
+ }
329
+
330
+ /* Graph */
331
+
332
+ createSystemRunnable({
333
+ provider,
334
+ clientOptions,
335
+ instructions,
336
+ additional_instructions,
337
+ }: {
338
+ provider?: Providers;
339
+ clientOptions?: t.ClientOptions;
340
+ instructions?: string;
341
+ additional_instructions?: string;
342
+ }): t.SystemRunnable | undefined {
343
+ let finalInstructions: string | BaseMessageFields | undefined =
344
+ instructions;
345
+ if (additional_instructions != null && additional_instructions !== '') {
346
+ finalInstructions =
347
+ finalInstructions != null && finalInstructions
348
+ ? `${finalInstructions}\n\n${additional_instructions}`
349
+ : additional_instructions;
350
+ }
351
+
352
+ if (
353
+ finalInstructions != null &&
354
+ finalInstructions &&
355
+ provider === Providers.ANTHROPIC &&
356
+ ((
357
+ (clientOptions as t.AnthropicClientOptions).clientOptions
358
+ ?.defaultHeaders as Record<string, string> | undefined
359
+ )?.['anthropic-beta']?.includes('prompt-caching') ??
360
+ false)
361
+ ) {
362
+ finalInstructions = {
363
+ content: [
364
+ {
365
+ type: 'text',
366
+ text: instructions,
367
+ cache_control: { type: 'ephemeral' },
368
+ },
369
+ ],
370
+ };
371
+ }
372
+
373
+ if (finalInstructions != null && finalInstructions !== '') {
374
+ const systemMessage = new SystemMessage(finalInstructions);
375
+ return RunnableLambda.from((messages: BaseMessage[]) => {
376
+ return [systemMessage, ...messages];
377
+ }).withConfig({ runName: 'prompt' });
378
+ }
379
+ }
380
+
381
+ initializeTools({
382
+ currentTools,
383
+ currentToolMap,
384
+ }: {
385
+ currentTools?: t.GraphTools;
386
+ currentToolMap?: t.ToolMap;
387
+ }): CustomToolNode<t.BaseGraphState> | ToolNode<t.BaseGraphState> {
388
+ return new CustomToolNode<t.BaseGraphState>({
389
+ tools: (currentTools as t.GenericTool[] | undefined) ?? [],
390
+ toolMap: currentToolMap,
391
+ toolCallStepIds: this.toolCallStepIds,
392
+ errorHandler: (data, metadata) =>
393
+ StandardGraph.handleToolCallErrorStatic(this, data, metadata),
394
+ });
395
+ }
396
+
397
+ initializeModel({
398
+ provider,
399
+ tools,
400
+ clientOptions,
401
+ }: {
402
+ provider: Providers;
403
+ tools?: t.GraphTools;
404
+ clientOptions?: t.ClientOptions;
405
+ }): Runnable {
406
+ const ChatModelClass = getChatModelClass(provider);
407
+ const model = new ChatModelClass(clientOptions ?? {});
408
+
409
+ if (
410
+ isOpenAILike(provider) &&
411
+ (model instanceof ChatOpenAI || model instanceof AzureChatOpenAI)
412
+ ) {
413
+ model.temperature = (clientOptions as t.OpenAIClientOptions)
414
+ .temperature as number;
415
+ model.topP = (clientOptions as t.OpenAIClientOptions).topP as number;
416
+ model.frequencyPenalty = (clientOptions as t.OpenAIClientOptions)
417
+ .frequencyPenalty as number;
418
+ model.presencePenalty = (clientOptions as t.OpenAIClientOptions)
419
+ .presencePenalty as number;
420
+ model.n = (clientOptions as t.OpenAIClientOptions).n as number;
421
+ } else if (
422
+ provider === Providers.VERTEXAI &&
423
+ model instanceof ChatVertexAI
424
+ ) {
425
+ model.temperature = (clientOptions as t.VertexAIClientOptions)
426
+ .temperature as number;
427
+ model.topP = (clientOptions as t.VertexAIClientOptions).topP as number;
428
+ model.topK = (clientOptions as t.VertexAIClientOptions).topK as number;
429
+ model.topLogprobs = (clientOptions as t.VertexAIClientOptions)
430
+ .topLogprobs as number;
431
+ model.frequencyPenalty = (clientOptions as t.VertexAIClientOptions)
432
+ .frequencyPenalty as number;
433
+ model.presencePenalty = (clientOptions as t.VertexAIClientOptions)
434
+ .presencePenalty as number;
435
+ model.maxOutputTokens = (clientOptions as t.VertexAIClientOptions)
436
+ .maxOutputTokens as number;
437
+ }
438
+
439
+ if (!tools || tools.length === 0) {
440
+ return model as unknown as Runnable;
441
+ }
442
+
443
+ return (model as t.ModelWithTools).bindTools(tools);
444
+ }
445
+
446
+ overrideTestModel(
447
+ responses: string[],
448
+ sleep?: number,
449
+ toolCalls?: ToolCall[]
450
+ ): void {
451
+ this.overrideModel = createFakeStreamingLLM({
452
+ responses,
453
+ sleep,
454
+ toolCalls,
455
+ });
456
+ }
457
+
458
+ getNewModel({
459
+ provider,
460
+ clientOptions,
461
+ }: {
462
+ provider: Providers;
463
+ clientOptions?: t.ClientOptions;
464
+ }): t.ChatModelInstance {
465
+ const ChatModelClass = getChatModelClass(provider);
466
+ return new ChatModelClass(clientOptions ?? {});
467
+ }
468
+
469
+ getUsageMetadata(
470
+ finalMessage?: BaseMessage
471
+ ): Partial<UsageMetadata> | undefined {
472
+ if (
473
+ finalMessage &&
474
+ 'usage_metadata' in finalMessage &&
475
+ finalMessage.usage_metadata != null
476
+ ) {
477
+ return finalMessage.usage_metadata as Partial<UsageMetadata>;
478
+ }
479
+ }
480
+
481
+ /** Execute model invocation with streaming support */
482
+ private async attemptInvoke(
483
+ {
484
+ currentModel,
485
+ finalMessages,
486
+ provider,
487
+ tools,
488
+ }: {
489
+ currentModel?: t.ChatModel;
490
+ finalMessages: BaseMessage[];
491
+ provider: Providers;
492
+ tools?: t.GraphTools;
493
+ },
494
+ config?: RunnableConfig
495
+ ): Promise<Partial<t.BaseGraphState>> {
496
+ const model = this.overrideModel ?? currentModel;
497
+ if (!model) {
498
+ throw new Error('No model found');
499
+ }
500
+
501
+ if ((tools?.length ?? 0) > 0 && manualToolStreamProviders.has(provider)) {
502
+ if (!model.stream) {
503
+ throw new Error('Model does not support stream');
504
+ }
505
+ const stream = await model.stream(finalMessages, config);
506
+ let finalChunk: AIMessageChunk | undefined;
507
+ for await (const chunk of stream) {
508
+ await safeDispatchCustomEvent(
509
+ GraphEvents.CHAT_MODEL_STREAM,
510
+ { chunk, emitted: true },
511
+ config
512
+ );
513
+ finalChunk = finalChunk ? concat(finalChunk, chunk) : chunk;
514
+ }
515
+ finalChunk = modifyDeltaProperties(provider, finalChunk);
516
+ return { messages: [finalChunk as AIMessageChunk] };
517
+ } else {
518
+ const finalMessage = await model.invoke(finalMessages, config);
519
+ if ((finalMessage.tool_calls?.length ?? 0) > 0) {
520
+ finalMessage.tool_calls = finalMessage.tool_calls?.filter(
521
+ (tool_call: ToolCall) => !!tool_call.name
522
+ );
523
+ }
524
+ return { messages: [finalMessage] };
525
+ }
526
+ }
527
+
528
+ cleanupSignalListener(currentModel?: t.ChatModel): void {
529
+ if (!this.signal) {
530
+ return;
531
+ }
532
+ const model = this.overrideModel ?? currentModel;
533
+ if (!model) {
534
+ return;
535
+ }
536
+ const client = (model as ChatOpenAI | undefined)?.exposedClient;
537
+ if (!client?.abortHandler) {
538
+ return;
539
+ }
540
+ this.signal.removeEventListener('abort', client.abortHandler);
541
+ client.abortHandler = undefined;
542
+ }
543
+
544
+ createCallModel(agentId = 'default', currentModel?: t.ChatModel) {
545
+ return async (
546
+ state: t.BaseGraphState,
547
+ config?: RunnableConfig
548
+ ): Promise<Partial<t.BaseGraphState>> => {
549
+ /**
550
+ * Get agent context - it must exist by this point
551
+ */
552
+ const agentContext = this.agentContexts.get(agentId);
553
+ if (!agentContext) {
554
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
555
+ }
556
+
557
+ const model = this.overrideModel ?? currentModel;
558
+ if (!model) {
559
+ throw new Error('No Graph model found');
560
+ }
561
+ if (!config) {
562
+ throw new Error('No config provided');
563
+ }
564
+
565
+ // Ensure token calculations are complete before proceeding
566
+ if (agentContext.tokenCalculationPromise) {
567
+ await agentContext.tokenCalculationPromise;
568
+ }
569
+ if (!config.signal) {
570
+ config.signal = this.signal;
571
+ }
572
+ this.config = config;
573
+ const { messages } = state;
574
+
575
+ let messagesToUse = messages;
576
+ if (
577
+ !agentContext.pruneMessages &&
578
+ agentContext.tokenCounter &&
579
+ agentContext.maxContextTokens != null &&
580
+ agentContext.indexTokenCountMap[0] != null
581
+ ) {
582
+ const isAnthropicWithThinking =
583
+ (agentContext.provider === Providers.ANTHROPIC &&
584
+ (agentContext.clientOptions as t.AnthropicClientOptions).thinking !=
585
+ null) ||
586
+ (agentContext.provider === Providers.BEDROCK &&
587
+ (agentContext.clientOptions as t.BedrockAnthropicInput)
588
+ .additionalModelRequestFields?.['thinking'] != null) ||
589
+ (agentContext.provider === Providers.OPENAI &&
590
+ (
591
+ (agentContext.clientOptions as t.OpenAIClientOptions).modelKwargs
592
+ ?.thinking as t.AnthropicClientOptions['thinking']
593
+ )?.type === 'enabled');
594
+
595
+ agentContext.pruneMessages = createPruneMessages({
596
+ startIndex: this.startIndex,
597
+ provider: agentContext.provider,
598
+ tokenCounter: agentContext.tokenCounter,
599
+ maxTokens: agentContext.maxContextTokens,
600
+ thinkingEnabled: isAnthropicWithThinking,
601
+ indexTokenCountMap: agentContext.indexTokenCountMap,
602
+ });
603
+ }
604
+ if (agentContext.pruneMessages) {
605
+ const { context, indexTokenCountMap } = agentContext.pruneMessages({
606
+ messages,
607
+ usageMetadata: agentContext.currentUsage,
608
+ // startOnMessageType: 'human',
609
+ });
610
+ agentContext.indexTokenCountMap = indexTokenCountMap;
611
+ messagesToUse = context;
612
+ }
613
+
614
+ let finalMessages = messagesToUse;
615
+ if (agentContext.useLegacyContent) {
616
+ finalMessages = formatContentStrings(finalMessages);
617
+ }
618
+
619
+ const lastMessageX =
620
+ finalMessages.length >= 2
621
+ ? finalMessages[finalMessages.length - 2]
622
+ : null;
623
+ const lastMessageY =
624
+ finalMessages.length >= 1
625
+ ? finalMessages[finalMessages.length - 1]
626
+ : null;
627
+
628
+ if (
629
+ agentContext.provider === Providers.BEDROCK &&
630
+ lastMessageX instanceof AIMessageChunk &&
631
+ lastMessageY instanceof ToolMessage &&
632
+ typeof lastMessageX.content === 'string'
633
+ ) {
634
+ finalMessages[finalMessages.length - 2].content = '';
635
+ }
636
+
637
+ const isLatestToolMessage = lastMessageY instanceof ToolMessage;
638
+
639
+ if (
640
+ isLatestToolMessage &&
641
+ agentContext.provider === Providers.ANTHROPIC
642
+ ) {
643
+ formatAnthropicArtifactContent(finalMessages);
644
+ } else if (
645
+ isLatestToolMessage &&
646
+ (isOpenAILike(agentContext.provider) ||
647
+ isGoogleLike(agentContext.provider))
648
+ ) {
649
+ formatArtifactPayload(finalMessages);
650
+ }
651
+
652
+ if (agentContext.provider === Providers.ANTHROPIC) {
653
+ const anthropicOptions = agentContext.clientOptions as
654
+ | t.AnthropicClientOptions
655
+ | undefined;
656
+ const defaultHeaders = anthropicOptions?.clientOptions
657
+ ?.defaultHeaders as Record<string, string> | undefined;
658
+ const anthropicBeta = defaultHeaders?.['anthropic-beta'];
659
+
660
+ if (
661
+ typeof anthropicBeta === 'string' &&
662
+ anthropicBeta.includes('prompt-caching')
663
+ ) {
664
+ finalMessages = addCacheControl<BaseMessage>(finalMessages);
665
+ }
666
+ } else if (agentContext.provider === Providers.BEDROCK) {
667
+ const bedrockOptions = agentContext.clientOptions as
668
+ | t.BedrockAnthropicClientOptions
669
+ | undefined;
670
+ if (bedrockOptions?.promptCache === true) {
671
+ finalMessages = addBedrockCacheControl<BaseMessage>(finalMessages);
672
+ }
673
+ }
674
+
675
+ if (
676
+ agentContext.lastStreamCall != null &&
677
+ agentContext.streamBuffer != null
678
+ ) {
679
+ const timeSinceLastCall = Date.now() - agentContext.lastStreamCall;
680
+ if (timeSinceLastCall < agentContext.streamBuffer) {
681
+ const timeToWait =
682
+ Math.ceil((agentContext.streamBuffer - timeSinceLastCall) / 1000) *
683
+ 1000;
684
+ await sleep(timeToWait);
685
+ }
686
+ }
687
+
688
+ agentContext.lastStreamCall = Date.now();
689
+
690
+ let result: Partial<t.BaseGraphState> | undefined;
691
+ const fallbacks =
692
+ (agentContext.clientOptions as t.LLMConfig | undefined)?.fallbacks ??
693
+ [];
694
+
695
+ if (finalMessages.length === 0) {
696
+ throw new Error(
697
+ JSON.stringify({
698
+ type: 'empty_messages',
699
+ info: 'Message pruning removed all messages as none fit in the context window. Please increase the context window size or make your message shorter.',
700
+ })
701
+ );
702
+ }
703
+
704
+ try {
705
+ result = await this.attemptInvoke(
706
+ {
707
+ currentModel: model,
708
+ finalMessages,
709
+ provider: agentContext.provider,
710
+ tools: agentContext.tools,
711
+ },
712
+ config
713
+ );
714
+ } catch (primaryError) {
715
+ let lastError: unknown = primaryError;
716
+ for (const fb of fallbacks) {
717
+ try {
718
+ let model = this.getNewModel({
719
+ provider: fb.provider,
720
+ clientOptions: fb.clientOptions,
721
+ });
722
+ const bindableTools = agentContext.tools;
723
+ model = (
724
+ !bindableTools || bindableTools.length === 0
725
+ ? model
726
+ : model.bindTools(bindableTools)
727
+ ) as t.ChatModelInstance;
728
+ result = await this.attemptInvoke(
729
+ {
730
+ currentModel: model,
731
+ finalMessages,
732
+ provider: fb.provider,
733
+ tools: agentContext.tools,
734
+ },
735
+ config
736
+ );
737
+ lastError = undefined;
738
+ break;
739
+ } catch (e) {
740
+ lastError = e;
741
+ continue;
742
+ }
743
+ }
744
+ if (lastError !== undefined) {
745
+ throw lastError;
746
+ }
747
+ }
748
+
749
+ if (!result) {
750
+ throw new Error('No result after model invocation');
751
+ }
752
+ agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
753
+ this.cleanupSignalListener();
754
+ return result;
755
+ };
756
+ }
757
+
758
+ createAgentNode(agentId: string): t.CompiledAgentWorfklow {
759
+ const agentContext = this.agentContexts.get(agentId);
760
+ if (!agentContext) {
761
+ throw new Error(`Agent context not found for agentId: ${agentId}`);
762
+ }
763
+
764
+ let currentModel = this.initializeModel({
765
+ tools: agentContext.tools,
766
+ provider: agentContext.provider,
767
+ clientOptions: agentContext.clientOptions,
768
+ });
769
+
770
+ if (agentContext.systemRunnable) {
771
+ currentModel = agentContext.systemRunnable.pipe(currentModel);
772
+ }
773
+
774
+ const agentNode = `${AGENT}${agentId}` as const;
775
+ const toolNode = `${TOOLS}${agentId}` as const;
776
+
777
+ const routeMessage = (
778
+ state: t.BaseGraphState,
779
+ config?: RunnableConfig
780
+ ): string => {
781
+ this.config = config;
782
+ return toolsCondition(state, toolNode, this.invokedToolIds);
783
+ };
784
+
785
+ const StateAnnotation = Annotation.Root({
786
+ messages: Annotation<BaseMessage[]>({
787
+ reducer: messagesStateReducer,
788
+ default: () => [],
789
+ }),
790
+ });
791
+
792
+ const workflow = new StateGraph(StateAnnotation)
793
+ .addNode(agentNode, this.createCallModel(agentId, currentModel))
794
+ .addNode(
795
+ toolNode,
796
+ this.initializeTools({
797
+ currentTools: agentContext.tools,
798
+ currentToolMap: agentContext.toolMap,
799
+ })
800
+ )
801
+ .addEdge(START, agentNode)
802
+ .addConditionalEdges(agentNode, routeMessage)
803
+ .addEdge(toolNode, agentContext.toolEnd ? END : agentNode);
804
+
805
+ // Cast to unknown to avoid tight coupling to external types; options are opt-in
806
+ return workflow.compile(this.compileOptions as unknown as never);
807
+ }
808
+
809
+ createWorkflow(): t.CompiledStateWorkflow {
810
+ /** Use the default (first) agent for now */
811
+ const agentNode = this.createAgentNode(this.defaultAgentId);
812
+ const StateAnnotation = Annotation.Root({
813
+ messages: Annotation<BaseMessage[]>({
814
+ reducer: (a, b) => {
815
+ if (!a.length) {
816
+ this.startIndex = a.length + b.length;
817
+ }
818
+ const result = messagesStateReducer(a, b);
819
+ this.messages = result;
820
+ return result;
821
+ },
822
+ default: () => [],
823
+ }),
824
+ });
825
+ const workflow = new StateGraph(StateAnnotation)
826
+ .addNode(this.defaultAgentId, agentNode, { ends: [END] })
827
+ .addEdge(START, this.defaultAgentId)
828
+ .compile();
829
+
830
+ return workflow;
831
+ }
832
+
833
+ /* Dispatchers */
834
+
835
+ /**
836
+ * Dispatches a run step to the client, returns the step ID
837
+ */
838
+ async dispatchRunStep(
839
+ stepKey: string,
840
+ stepDetails: t.StepDetails
841
+ ): Promise<string> {
842
+ if (!this.config) {
843
+ throw new Error('No config provided');
844
+ }
845
+
846
+ const [stepId, stepIndex] = this.generateStepId(stepKey);
847
+ if (stepDetails.type === StepTypes.TOOL_CALLS && stepDetails.tool_calls) {
848
+ for (const tool_call of stepDetails.tool_calls) {
849
+ const toolCallId = tool_call.id ?? '';
850
+ if (!toolCallId || this.toolCallStepIds.has(toolCallId)) {
851
+ continue;
852
+ }
853
+ this.toolCallStepIds.set(toolCallId, stepId);
854
+ }
855
+ }
856
+
857
+ const runStep: t.RunStep = {
858
+ stepIndex,
859
+ id: stepId,
860
+ type: stepDetails.type,
861
+ index: this.contentData.length,
862
+ stepDetails,
863
+ usage: null,
864
+ };
865
+
866
+ const runId = this.runId ?? '';
867
+ if (runId) {
868
+ runStep.runId = runId;
869
+ }
870
+
871
+ this.contentData.push(runStep);
872
+ this.contentIndexMap.set(stepId, runStep.index);
873
+ await safeDispatchCustomEvent(
874
+ GraphEvents.ON_RUN_STEP,
875
+ runStep,
876
+ this.config
877
+ );
878
+ return stepId;
879
+ }
880
+
881
+ async handleToolCallCompleted(
882
+ data: t.ToolEndData,
883
+ metadata?: Record<string, unknown>,
884
+ omitOutput?: boolean
885
+ ): Promise<void> {
886
+ if (!this.config) {
887
+ throw new Error('No config provided');
888
+ }
889
+
890
+ if (!data.output) {
891
+ return;
892
+ }
893
+
894
+ const { input, output: _output } = data;
895
+ if ((_output as Command | undefined)?.lg_name === 'Command') {
896
+ return;
897
+ }
898
+ const output = _output as ToolMessage;
899
+ const { tool_call_id } = output;
900
+ const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
901
+ if (!stepId) {
902
+ throw new Error(`No stepId found for tool_call_id ${tool_call_id}`);
903
+ }
904
+
905
+ const runStep = this.getRunStep(stepId);
906
+ if (!runStep) {
907
+ throw new Error(`No run step found for stepId ${stepId}`);
908
+ }
909
+
910
+ const dispatchedOutput =
911
+ typeof output.content === 'string'
912
+ ? output.content
913
+ : JSON.stringify(output.content);
914
+
915
+ const args = typeof input === 'string' ? input : input.input;
916
+ const tool_call = {
917
+ args: typeof args === 'string' ? args : JSON.stringify(args),
918
+ name: output.name ?? '',
919
+ id: output.tool_call_id,
920
+ output: omitOutput === true ? '' : dispatchedOutput,
921
+ progress: 1,
922
+ };
923
+
924
+ await this.handlerRegistry
925
+ ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
926
+ ?.handle(
927
+ GraphEvents.ON_RUN_STEP_COMPLETED,
928
+ {
929
+ result: {
930
+ id: stepId,
931
+ index: runStep.index,
932
+ type: 'tool_call',
933
+ tool_call,
934
+ } as t.ToolCompleteEvent,
935
+ },
936
+ metadata,
937
+ this
938
+ );
939
+ }
940
+ /**
941
+ * Static version of handleToolCallError to avoid creating strong references
942
+ * that prevent garbage collection
943
+ */
944
+ static async handleToolCallErrorStatic(
945
+ graph: StandardGraph,
946
+ data: t.ToolErrorData,
947
+ metadata?: Record<string, unknown>
948
+ ): Promise<void> {
949
+ if (!graph.config) {
950
+ throw new Error('No config provided');
951
+ }
952
+
953
+ if (!data.id) {
954
+ console.warn('No Tool ID provided for Tool Error');
955
+ return;
956
+ }
957
+
958
+ const stepId = graph.toolCallStepIds.get(data.id) ?? '';
959
+ if (!stepId) {
960
+ throw new Error(`No stepId found for tool_call_id ${data.id}`);
961
+ }
962
+
963
+ const { name, input: args, error } = data;
964
+
965
+ const runStep = graph.getRunStep(stepId);
966
+ if (!runStep) {
967
+ throw new Error(`No run step found for stepId ${stepId}`);
968
+ }
969
+
970
+ const tool_call: t.ProcessedToolCall = {
971
+ id: data.id,
972
+ name: name || '',
973
+ args: typeof args === 'string' ? args : JSON.stringify(args),
974
+ output: `Error processing tool${error?.message != null ? `: ${error.message}` : ''}`,
975
+ progress: 1,
976
+ };
977
+
978
+ await graph.handlerRegistry
979
+ ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
980
+ ?.handle(
981
+ GraphEvents.ON_RUN_STEP_COMPLETED,
982
+ {
983
+ result: {
984
+ id: stepId,
985
+ index: runStep.index,
986
+ type: 'tool_call',
987
+ tool_call,
988
+ } as t.ToolCompleteEvent,
989
+ },
990
+ metadata,
991
+ graph
992
+ );
993
+ }
994
+
995
+ /**
996
+ * Instance method that delegates to the static method
997
+ * Kept for backward compatibility
998
+ */
999
+ async handleToolCallError(
1000
+ data: t.ToolErrorData,
1001
+ metadata?: Record<string, unknown>
1002
+ ): Promise<void> {
1003
+ await StandardGraph.handleToolCallErrorStatic(this, data, metadata);
1004
+ }
1005
+
1006
+ async dispatchRunStepDelta(
1007
+ id: string,
1008
+ delta: t.ToolCallDelta
1009
+ ): Promise<void> {
1010
+ if (!this.config) {
1011
+ throw new Error('No config provided');
1012
+ } else if (!id) {
1013
+ throw new Error('No step ID found');
1014
+ }
1015
+ const runStepDelta: t.RunStepDeltaEvent = {
1016
+ id,
1017
+ delta,
1018
+ };
1019
+ await safeDispatchCustomEvent(
1020
+ GraphEvents.ON_RUN_STEP_DELTA,
1021
+ runStepDelta,
1022
+ this.config
1023
+ );
1024
+ }
1025
+
1026
+ async dispatchMessageDelta(id: string, delta: t.MessageDelta): Promise<void> {
1027
+ if (!this.config) {
1028
+ throw new Error('No config provided');
1029
+ }
1030
+ const messageDelta: t.MessageDeltaEvent = {
1031
+ id,
1032
+ delta,
1033
+ };
1034
+ await safeDispatchCustomEvent(
1035
+ GraphEvents.ON_MESSAGE_DELTA,
1036
+ messageDelta,
1037
+ this.config
1038
+ );
1039
+ }
1040
+
1041
+ dispatchReasoningDelta = async (
1042
+ stepId: string,
1043
+ delta: t.ReasoningDelta
1044
+ ): Promise<void> => {
1045
+ if (!this.config) {
1046
+ throw new Error('No config provided');
1047
+ }
1048
+ const reasoningDelta: t.ReasoningDeltaEvent = {
1049
+ id: stepId,
1050
+ delta,
1051
+ };
1052
+ await safeDispatchCustomEvent(
1053
+ GraphEvents.ON_REASONING_DELTA,
1054
+ reasoningDelta,
1055
+ this.config
1056
+ );
1057
+ };
1058
+ }