@librechat/agents 3.0.0-rc1 → 3.0.0-rc10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (78) hide show
  1. package/dist/cjs/common/enum.cjs +1 -0
  2. package/dist/cjs/common/enum.cjs.map +1 -1
  3. package/dist/cjs/graphs/Graph.cjs +0 -1
  4. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  5. package/dist/cjs/graphs/MultiAgentGraph.cjs +229 -44
  6. package/dist/cjs/graphs/MultiAgentGraph.cjs.map +1 -1
  7. package/dist/cjs/llm/openai/index.cjs +33 -0
  8. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  9. package/dist/cjs/run.cjs +28 -15
  10. package/dist/cjs/run.cjs.map +1 -1
  11. package/dist/cjs/stream.cjs +1 -1
  12. package/dist/cjs/stream.cjs.map +1 -1
  13. package/dist/esm/common/enum.mjs +1 -0
  14. package/dist/esm/common/enum.mjs.map +1 -1
  15. package/dist/esm/graphs/Graph.mjs +0 -1
  16. package/dist/esm/graphs/Graph.mjs.map +1 -1
  17. package/dist/esm/graphs/MultiAgentGraph.mjs +230 -45
  18. package/dist/esm/graphs/MultiAgentGraph.mjs.map +1 -1
  19. package/dist/esm/llm/openai/index.mjs +33 -0
  20. package/dist/esm/llm/openai/index.mjs.map +1 -1
  21. package/dist/esm/run.mjs +28 -15
  22. package/dist/esm/run.mjs.map +1 -1
  23. package/dist/esm/stream.mjs +1 -1
  24. package/dist/esm/stream.mjs.map +1 -1
  25. package/dist/types/common/enum.d.ts +2 -1
  26. package/dist/types/graphs/MultiAgentGraph.d.ts +12 -2
  27. package/dist/types/llm/openai/index.d.ts +10 -0
  28. package/dist/types/run.d.ts +1 -1
  29. package/dist/types/types/graph.d.ts +38 -4
  30. package/dist/types/types/llm.d.ts +1 -0
  31. package/dist/types/types/run.d.ts +5 -1
  32. package/package.json +10 -2
  33. package/src/common/enum.ts +1 -0
  34. package/src/graphs/Graph.ts +0 -1
  35. package/src/graphs/MultiAgentGraph.ts +267 -50
  36. package/src/llm/openai/index.ts +41 -0
  37. package/src/run.ts +38 -27
  38. package/src/scripts/multi-agent-chain.ts +278 -0
  39. package/src/scripts/multi-agent-document-review-chain.ts +197 -0
  40. package/src/scripts/multi-agent-hybrid-flow.ts +310 -0
  41. package/src/scripts/multi-agent-parallel.ts +27 -23
  42. package/src/scripts/multi-agent-supervisor.ts +362 -0
  43. package/src/scripts/test-custom-prompt-key.ts +145 -0
  44. package/src/scripts/test-handoff-input.ts +170 -0
  45. package/src/scripts/test-multi-agent-list-handoff.ts +261 -0
  46. package/src/scripts/test-tools-before-handoff.ts +233 -0
  47. package/src/stream.ts +4 -1
  48. package/src/types/graph.ts +51 -5
  49. package/src/types/llm.ts +1 -0
  50. package/src/types/run.ts +6 -1
  51. package/dist/types/scripts/abort.d.ts +0 -1
  52. package/dist/types/scripts/ant_web_search.d.ts +0 -1
  53. package/dist/types/scripts/args.d.ts +0 -7
  54. package/dist/types/scripts/caching.d.ts +0 -1
  55. package/dist/types/scripts/cli.d.ts +0 -1
  56. package/dist/types/scripts/cli2.d.ts +0 -1
  57. package/dist/types/scripts/cli3.d.ts +0 -1
  58. package/dist/types/scripts/cli4.d.ts +0 -1
  59. package/dist/types/scripts/cli5.d.ts +0 -1
  60. package/dist/types/scripts/code_exec.d.ts +0 -1
  61. package/dist/types/scripts/code_exec_files.d.ts +0 -1
  62. package/dist/types/scripts/code_exec_simple.d.ts +0 -1
  63. package/dist/types/scripts/content.d.ts +0 -1
  64. package/dist/types/scripts/empty_input.d.ts +0 -1
  65. package/dist/types/scripts/handoff-test.d.ts +0 -1
  66. package/dist/types/scripts/image.d.ts +0 -1
  67. package/dist/types/scripts/memory.d.ts +0 -1
  68. package/dist/types/scripts/multi-agent-conditional.d.ts +0 -1
  69. package/dist/types/scripts/multi-agent-parallel.d.ts +0 -1
  70. package/dist/types/scripts/multi-agent-sequence.d.ts +0 -1
  71. package/dist/types/scripts/multi-agent-test.d.ts +0 -1
  72. package/dist/types/scripts/search.d.ts +0 -1
  73. package/dist/types/scripts/simple.d.ts +0 -1
  74. package/dist/types/scripts/stream.d.ts +0 -1
  75. package/dist/types/scripts/thinking.d.ts +0 -1
  76. package/dist/types/scripts/tools.d.ts +0 -1
  77. package/dist/types/specs/spec.utils.d.ts +0 -1
  78. package/src/scripts/multi-agent-example-output.md +0 -110
@@ -1,6 +1,11 @@
1
1
  import { z } from 'zod';
2
2
  import { tool } from '@langchain/core/tools';
3
- import { ToolMessage, HumanMessage } from '@langchain/core/messages';
3
+ import { PromptTemplate } from '@langchain/core/prompts';
4
+ import {
5
+ ToolMessage,
6
+ HumanMessage,
7
+ getBufferString,
8
+ } from '@langchain/core/messages';
4
9
  import {
5
10
  END,
6
11
  START,
@@ -14,10 +19,21 @@ import type { ToolRunnableConfig } from '@langchain/core/tools';
14
19
  import type { BaseMessage } from '@langchain/core/messages';
15
20
  import type * as t from '@/types';
16
21
  import { StandardGraph } from './Graph';
22
+ import { Constants } from '@/common';
17
23
 
18
24
  /**
19
25
  * MultiAgentGraph extends StandardGraph to support dynamic multi-agent workflows
20
- * with handoffs, fan-in/fan-out, and other composable patterns
26
+ * with handoffs, fan-in/fan-out, and other composable patterns.
27
+ *
28
+ * Key behavior:
29
+ * - Agents with ONLY handoff edges: Can dynamically route to any handoff destination
30
+ * - Agents with ONLY direct edges: Always follow their direct edges
31
+ * - Agents with BOTH: Use Command for exclusive routing (handoff OR direct, not both)
32
+ * - If handoff occurs: Only the handoff destination executes
33
+ * - If no handoff: Direct edges execute (potentially in parallel)
34
+ *
35
+ * This enables the common pattern where an agent either delegates (handoff)
36
+ * OR continues its workflow (direct edges), but not both simultaneously.
21
37
  */
22
38
  export class MultiAgentGraph extends StandardGraph {
23
39
  private edges: t.GraphEdge[];
@@ -119,14 +135,6 @@ export class MultiAgentGraph extends StandardGraph {
119
135
  agentContext.tools = [];
120
136
  }
121
137
  agentContext.tools.push(...handoffTools);
122
-
123
- // Update tool map
124
- for (const tool of handoffTools) {
125
- if (!agentContext.toolMap) {
126
- agentContext.toolMap = new Map();
127
- }
128
- agentContext.toolMap.set(tool.name, tool);
129
- }
130
138
  }
131
139
  }
132
140
 
@@ -137,37 +145,52 @@ export class MultiAgentGraph extends StandardGraph {
137
145
  const tools: t.GenericTool[] = [];
138
146
  const destinations = Array.isArray(edge.to) ? edge.to : [edge.to];
139
147
 
140
- // If there's a condition, create a single conditional handoff tool
148
+ /** If there's a condition, create a single conditional handoff tool */
141
149
  if (edge.condition != null) {
142
150
  const toolName = 'conditional_transfer';
143
151
  const toolDescription =
144
152
  edge.description ?? 'Conditionally transfer control based on state';
145
153
 
154
+ /** Check if we have a prompt for handoff input */
155
+ const hasHandoffInput =
156
+ edge.prompt != null && typeof edge.prompt === 'string';
157
+ const handoffInputDescription = hasHandoffInput ? edge.prompt : undefined;
158
+ const promptKey = edge.promptKey ?? 'instructions';
159
+
146
160
  tools.push(
147
161
  tool(
148
- async (_, config) => {
162
+ async (input: Record<string, unknown>, config) => {
149
163
  const state = getCurrentTaskInput() as t.BaseGraphState;
150
164
  const toolCallId =
151
165
  (config as ToolRunnableConfig | undefined)?.toolCall?.id ??
152
166
  'unknown';
153
167
 
154
- // Evaluate condition
168
+ /** Evaluated condition */
155
169
  const result = edge.condition!(state);
156
170
  let destination: string;
157
171
 
158
172
  if (typeof result === 'boolean') {
159
- // If true, use first destination; if false, don't transfer
173
+ /** If true, use first destination; if false, don't transfer */
160
174
  if (!result) return null;
161
175
  destination = destinations[0];
162
176
  } else if (typeof result === 'string') {
163
177
  destination = result;
164
178
  } else {
165
- // Array of destinations - for now, use the first
179
+ /** Array of destinations - for now, use the first */
166
180
  destination = Array.isArray(result) ? result[0] : destinations[0];
167
181
  }
168
182
 
183
+ let content = `Conditionally transferred to ${destination}`;
184
+ if (
185
+ hasHandoffInput &&
186
+ promptKey in input &&
187
+ input[promptKey] != null
188
+ ) {
189
+ content += `\n\n${promptKey.charAt(0).toUpperCase() + promptKey.slice(1)}: ${input[promptKey]}`;
190
+ }
191
+
169
192
  const toolMessage = new ToolMessage({
170
- content: `Conditionally transferred to ${destination}`,
193
+ content,
171
194
  name: toolName,
172
195
  tool_call_id: toolCallId,
173
196
  });
@@ -180,26 +203,51 @@ export class MultiAgentGraph extends StandardGraph {
180
203
  },
181
204
  {
182
205
  name: toolName,
183
- schema: z.object({}),
206
+ schema: hasHandoffInput
207
+ ? z.object({
208
+ [promptKey]: z
209
+ .string()
210
+ .optional()
211
+ .describe(handoffInputDescription as string),
212
+ })
213
+ : z.object({}),
184
214
  description: toolDescription,
185
215
  }
186
216
  )
187
217
  );
188
218
  } else {
189
- // Create individual tools for each destination
219
+ /** Create individual tools for each destination */
190
220
  for (const destination of destinations) {
191
- const toolName = `transfer_to_${destination}`;
221
+ const toolName = `${Constants.LC_TRANSFER_TO_}${destination}`;
192
222
  const toolDescription =
193
223
  edge.description ?? `Transfer control to agent '${destination}'`;
194
224
 
225
+ /** Check if we have a prompt for handoff input */
226
+ const hasHandoffInput =
227
+ edge.prompt != null && typeof edge.prompt === 'string';
228
+ const handoffInputDescription = hasHandoffInput
229
+ ? edge.prompt
230
+ : undefined;
231
+ const promptKey = edge.promptKey ?? 'instructions';
232
+
195
233
  tools.push(
196
234
  tool(
197
- async (_, config) => {
235
+ async (input: Record<string, unknown>, config) => {
198
236
  const toolCallId =
199
237
  (config as ToolRunnableConfig | undefined)?.toolCall?.id ??
200
238
  'unknown';
239
+
240
+ let content = `Successfully transferred to ${destination}`;
241
+ if (
242
+ hasHandoffInput &&
243
+ promptKey in input &&
244
+ input[promptKey] != null
245
+ ) {
246
+ content += `\n\n${promptKey.charAt(0).toUpperCase() + promptKey.slice(1)}: ${input[promptKey]}`;
247
+ }
248
+
201
249
  const toolMessage = new ToolMessage({
202
- content: `Successfully transferred to ${destination}`,
250
+ content,
203
251
  name: toolName,
204
252
  tool_call_id: toolCallId,
205
253
  });
@@ -214,7 +262,14 @@ export class MultiAgentGraph extends StandardGraph {
214
262
  },
215
263
  {
216
264
  name: toolName,
217
- schema: z.object({}),
265
+ schema: hasHandoffInput
266
+ ? z.object({
267
+ [promptKey]: z
268
+ .string()
269
+ .optional()
270
+ .describe(handoffInputDescription as string),
271
+ })
272
+ : z.object({}),
218
273
  description: toolDescription,
219
274
  }
220
275
  )
@@ -229,14 +284,14 @@ export class MultiAgentGraph extends StandardGraph {
229
284
  * Create a complete agent subgraph (similar to createReactAgent)
230
285
  */
231
286
  private createAgentSubgraph(agentId: string): t.CompiledAgentWorfklow {
232
- // This is essentially the same as createAgentNode from StandardGraph
287
+ /** This is essentially the same as `createAgentNode` from `StandardGraph` */
233
288
  return this.createAgentNode(agentId);
234
289
  }
235
290
 
236
291
  /**
237
292
  * Create the multi-agent workflow with dynamic handoffs
238
293
  */
239
- override createWorkflow(): t.CompiledStateWorkflow {
294
+ override createWorkflow(): t.CompiledMultiAgentWorkflow {
240
295
  const StateAnnotation = Annotation.Root({
241
296
  messages: Annotation<BaseMessage[]>({
242
297
  reducer: (a, b) => {
@@ -249,6 +304,12 @@ export class MultiAgentGraph extends StandardGraph {
249
304
  },
250
305
  default: () => [],
251
306
  }),
307
+ /** Channel for passing filtered messages to agents when excludeResults is true */
308
+ agentMessages: Annotation<BaseMessage[]>({
309
+ /** Replaces state entirely */
310
+ reducer: (a, b) => b,
311
+ default: () => [],
312
+ }),
252
313
  });
253
314
 
254
315
  const builder = new StateGraph(StateAnnotation);
@@ -277,19 +338,123 @@ export class MultiAgentGraph extends StandardGraph {
277
338
  }
278
339
  }
279
340
 
280
- // If agent has handoff destinations, add END to possible ends
281
- // If agent only has direct destinations, it naturally ends without explicit END
282
- const destinations = new Set([...handoffDestinations]);
341
+ /** Check if this agent has BOTH handoff and direct edges */
342
+ const hasHandoffEdges = handoffDestinations.size > 0;
343
+ const hasDirectEdges = directDestinations.size > 0;
344
+ const needsCommandRouting = hasHandoffEdges && hasDirectEdges;
345
+
346
+ /** Collect all possible destinations for this agent */
347
+ const allDestinations = new Set([
348
+ ...handoffDestinations,
349
+ ...directDestinations,
350
+ ]);
283
351
  if (handoffDestinations.size > 0 || directDestinations.size === 0) {
284
- destinations.add(END);
352
+ allDestinations.add(END);
285
353
  }
286
354
 
287
- // Create the agent subgraph (includes agent + tools)
355
+ /** Agent subgraph (includes agent + tools) */
288
356
  const agentSubgraph = this.createAgentSubgraph(agentId);
289
357
 
290
- // Add the agent as a node with its possible destinations
291
- builder.addNode(agentId, agentSubgraph, {
292
- ends: Array.from(destinations),
358
+ /** Wrapper function that handles agentMessages channel and conditional routing */
359
+ const agentWrapper = async (
360
+ state: t.MultiAgentGraphState
361
+ ): Promise<t.MultiAgentGraphState | Command> => {
362
+ let result: t.MultiAgentGraphState;
363
+
364
+ if (state.agentMessages != null && state.agentMessages.length > 0) {
365
+ /**
366
+ * When using agentMessages (excludeResults=true), we need to update
367
+ * the token map to account for the new prompt message
368
+ */
369
+ const agentContext = this.agentContexts.get(agentId);
370
+ if (agentContext && agentContext.tokenCounter) {
371
+ // The agentMessages contains:
372
+ // 1. Filtered messages (0 to startIndex) - already have token counts
373
+ // 2. New prompt message - needs token counting
374
+
375
+ const freshTokenMap: Record<string, number> = {};
376
+
377
+ // Copy existing token counts for filtered messages (0 to startIndex)
378
+ for (let i = 0; i < this.startIndex; i++) {
379
+ const tokenCount = agentContext.indexTokenCountMap[i];
380
+ if (tokenCount !== undefined) {
381
+ freshTokenMap[i] = tokenCount;
382
+ }
383
+ }
384
+
385
+ // Calculate tokens only for the new prompt message (last message)
386
+ const promptMessageIndex = state.agentMessages.length - 1;
387
+ if (promptMessageIndex >= this.startIndex) {
388
+ const promptMessage = state.agentMessages[promptMessageIndex];
389
+ freshTokenMap[promptMessageIndex] =
390
+ agentContext.tokenCounter(promptMessage);
391
+ }
392
+
393
+ // Update the agent's token map with instructions added
394
+ agentContext.updateTokenMapWithInstructions(freshTokenMap);
395
+ }
396
+
397
+ /** Temporary state with messages replaced by `agentMessages` */
398
+ const transformedState: t.MultiAgentGraphState = {
399
+ ...state,
400
+ messages: state.agentMessages,
401
+ };
402
+ result = await agentSubgraph.invoke(transformedState);
403
+ result = {
404
+ ...result,
405
+ /** Clear agentMessages for next agent */
406
+ agentMessages: [],
407
+ };
408
+ } else {
409
+ result = await agentSubgraph.invoke(state);
410
+ }
411
+
412
+ /** If agent has both handoff and direct edges, use Command for exclusive routing */
413
+ if (needsCommandRouting) {
414
+ /** Check if a handoff occurred */
415
+ const lastMessage = result.messages[
416
+ result.messages.length - 1
417
+ ] as BaseMessage | null;
418
+ if (
419
+ lastMessage != null &&
420
+ lastMessage.getType() === 'tool' &&
421
+ typeof lastMessage.name === 'string' &&
422
+ lastMessage.name.startsWith(Constants.LC_TRANSFER_TO_)
423
+ ) {
424
+ /** Handoff occurred - extract destination and navigate there exclusively */
425
+ const handoffDest = lastMessage.name.replace(
426
+ Constants.LC_TRANSFER_TO_,
427
+ ''
428
+ );
429
+ return new Command({
430
+ update: result,
431
+ goto: handoffDest,
432
+ });
433
+ } else {
434
+ /** No handoff - proceed with direct edges */
435
+ const directDests = Array.from(directDestinations);
436
+ if (directDests.length === 1) {
437
+ return new Command({
438
+ update: result,
439
+ goto: directDests[0],
440
+ });
441
+ } else if (directDests.length > 1) {
442
+ /** Multiple direct destinations - they'll run in parallel */
443
+ return new Command({
444
+ update: result,
445
+ goto: directDests,
446
+ });
447
+ }
448
+ }
449
+ }
450
+
451
+ /** No special routing needed - return state normally */
452
+ return result;
453
+ };
454
+
455
+ /** Wrapped agent as a node with its possible destinations */
456
+ builder.addNode(agentId, agentWrapper, {
457
+ ends: Array.from(allDestinations),
293
458
  });
294
459
  }
295
460
 
@@ -300,7 +465,8 @@ export class MultiAgentGraph extends StandardGraph {
300
465
  builder.addEdge(START, startNode);
301
466
  }
302
467
 
303
- /** Add direct edges for automatic transitions
468
+ /**
469
+ * Add direct edges for automatic transitions
304
470
  * Group edges by destination to handle fan-in scenarios
305
471
  */
306
472
  const edgesByDestination = new Map<string, t.GraphEdge[]>();
@@ -318,38 +484,74 @@ export class MultiAgentGraph extends StandardGraph {
318
484
  for (const [destination, edges] of edgesByDestination) {
319
485
  /** Checks if this is a fan-in scenario with prompt instructions */
320
486
  const edgesWithPrompt = edges.filter(
321
- (edge) =>
322
- edge.promptInstructions != null && edge.promptInstructions !== ''
487
+ (edge) => edge.prompt != null && edge.prompt !== ''
323
488
  );
324
489
 
325
490
  if (edgesWithPrompt.length > 0) {
326
- // Fan-in with prompt: create a single wrapper node for this destination
491
+ /**
492
+ * Single wrapper node for destination (Fan-in with prompt)
493
+ */
327
494
  const wrapperNodeId = `fan_in_${destination}_prompt`;
328
-
329
- // Use the first edge's prompt instructions (they should all be the same for fan-in)
330
- const promptInstructions = edgesWithPrompt[0].promptInstructions;
495
+ /**
496
+ * First edge's `prompt`
497
+ * (they should all be the same for fan-in)
498
+ */
499
+ const prompt = edgesWithPrompt[0].prompt;
500
+ /**
501
+ * First edge's `excludeResults` flag
502
+ * (they should all be the same for fan-in)
503
+ */
504
+ const excludeResults = edgesWithPrompt[0].excludeResults;
331
505
 
332
506
  builder.addNode(wrapperNodeId, async (state: t.BaseGraphState) => {
333
507
  let promptText: string | undefined;
334
-
335
- if (typeof promptInstructions === 'function') {
336
- promptText = promptInstructions(state.messages);
337
- } else {
338
- promptText = promptInstructions;
508
+ let effectiveExcludeResults = excludeResults;
509
+
510
+ if (typeof prompt === 'function') {
511
+ promptText = await prompt(state.messages, this.startIndex);
512
+ } else if (prompt != null) {
513
+ if (prompt.includes('{results}')) {
514
+ const resultsMessages = state.messages.slice(this.startIndex);
515
+ const resultsString = getBufferString(resultsMessages);
516
+ const promptTemplate = PromptTemplate.fromTemplate(prompt);
517
+ const result = await promptTemplate.invoke({
518
+ results: resultsString,
519
+ });
520
+ promptText = result.value;
521
+ effectiveExcludeResults =
522
+ excludeResults !== false && promptText !== '';
523
+ } else {
524
+ promptText = prompt;
525
+ }
339
526
  }
340
527
 
341
528
  if (promptText != null && promptText !== '') {
342
- // Return state with the prompt message added
529
+ if (
530
+ effectiveExcludeResults == null ||
531
+ effectiveExcludeResults === false
532
+ ) {
533
+ return {
534
+ messages: [new HumanMessage(promptText)],
535
+ };
536
+ }
537
+
538
+ /** When `excludeResults` is true, use agentMessages channel
539
+ * to pass filtered messages + prompt to the destination agent
540
+ */
541
+ const filteredMessages = state.messages.slice(0, this.startIndex);
343
542
  return {
344
- messages: [...state.messages, new HumanMessage(promptText)],
543
+ messages: [new HumanMessage(promptText)],
544
+ agentMessages: messagesStateReducer(filteredMessages, [
545
+ new HumanMessage(promptText),
546
+ ]),
345
547
  };
346
548
  }
347
549
 
348
- // No prompt needed, return empty update
550
+ /** No prompt needed, return empty update */
349
551
  return {};
350
552
  });
351
553
 
352
- // Add edges from all sources to the wrapper, then wrapper to destination
554
+ /** Add edges from all sources to the wrapper, then wrapper to destination */
353
555
  for (const edge of edges) {
354
556
  const sources = Array.isArray(edge.from) ? edge.from : [edge.from];
355
557
  for (const source of sources) {
@@ -359,15 +561,30 @@ export class MultiAgentGraph extends StandardGraph {
359
561
  }
360
562
  }
361
563
 
362
- // Single edge from wrapper to destination
564
+ /** Single edge from wrapper to destination */
363
565
  // eslint-disable-next-line @typescript-eslint/ban-ts-comment
364
566
  /** @ts-ignore */
365
567
  builder.addEdge(wrapperNodeId, destination);
366
568
  } else {
367
- // No prompt instructions, add direct edges
569
+ /** No prompt instructions, add direct edges (skip if source uses Command routing) */
368
570
  for (const edge of edges) {
369
571
  const sources = Array.isArray(edge.from) ? edge.from : [edge.from];
370
572
  for (const source of sources) {
573
+ /** Check if this source node has both handoff and direct edges */
574
+ const sourceHandoffEdges = this.handoffEdges.filter((e) => {
575
+ const eSources = Array.isArray(e.from) ? e.from : [e.from];
576
+ return eSources.includes(source);
577
+ });
578
+ const sourceDirectEdges = this.directEdges.filter((e) => {
579
+ const eSources = Array.isArray(e.from) ? e.from : [e.from];
580
+ return eSources.includes(source);
581
+ });
582
+
583
+ /** Skip adding edge if source uses Command routing (has both types) */
584
+ if (sourceHandoffEdges.length > 0 && sourceDirectEdges.length > 0) {
585
+ continue;
586
+ }
587
+
371
588
  // eslint-disable-next-line @typescript-eslint/ban-ts-comment
372
589
  /** @ts-ignore */
373
590
  builder.addEdge(source, destination);
@@ -30,6 +30,7 @@ import {
30
30
  _convertOpenAIResponsesDeltaToBaseMessageChunk,
31
31
  type ResponseReturnStreamEvents,
32
32
  } from './utils';
33
+ import { sleep } from '@/utils';
33
34
 
34
35
  // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
35
36
  const iife = <T>(fn: () => T) => fn();
@@ -195,6 +196,17 @@ export class CustomAzureOpenAIClient extends AzureOpenAIClient {
195
196
 
196
197
  /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
197
198
  export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
199
+ _lc_stream_delay?: number;
200
+
201
+ constructor(
202
+ fields?: t.ChatOpenAICallOptions & {
203
+ _lc_stream_delay?: number;
204
+ } & t.OpenAIChatInput['modelKwargs']
205
+ ) {
206
+ super(fields);
207
+ this._lc_stream_delay = fields?._lc_stream_delay;
208
+ }
209
+
198
210
  public get exposedClient(): CustomOpenAIClient {
199
211
  return this.client;
200
212
  }
@@ -288,6 +300,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
288
300
  );
289
301
  if (chunk == null) continue;
290
302
  yield chunk;
303
+ if (this._lc_stream_delay != null) {
304
+ await sleep(this._lc_stream_delay);
305
+ }
291
306
  await runManager?.handleLLMNewToken(
292
307
  chunk.text || '',
293
308
  undefined,
@@ -376,6 +391,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
376
391
  generationInfo,
377
392
  });
378
393
  yield generationChunk;
394
+ if (this._lc_stream_delay != null) {
395
+ await sleep(this._lc_stream_delay);
396
+ }
379
397
  await runManager?.handleLLMNewToken(
380
398
  generationChunk.text || '',
381
399
  newTokenIndices,
@@ -423,6 +441,9 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
423
441
  text: '',
424
442
  });
425
443
  yield generationChunk;
444
+ if (this._lc_stream_delay != null) {
445
+ await sleep(this._lc_stream_delay);
446
+ }
426
447
  }
427
448
  if (options.signal?.aborted === true) {
428
449
  throw new Error('AbortError');
@@ -432,6 +453,13 @@ export class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
432
453
 
433
454
  /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
434
455
  export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
456
+ _lc_stream_delay?: number;
457
+
458
+ constructor(fields?: t.AzureOpenAIInput & { _lc_stream_delay?: number }) {
459
+ super(fields);
460
+ this._lc_stream_delay = fields?._lc_stream_delay;
461
+ }
462
+
435
463
  public get exposedClient(): CustomOpenAIClient {
436
464
  return this.client;
437
465
  }
@@ -559,6 +587,9 @@ export class AzureChatOpenAI extends OriginalAzureChatOpenAI {
559
587
  );
560
588
  if (chunk == null) continue;
561
589
  yield chunk;
590
+ if (this._lc_stream_delay != null) {
591
+ await sleep(this._lc_stream_delay);
592
+ }
562
593
  await runManager?.handleLLMNewToken(
563
594
  chunk.text || '',
564
595
  undefined,
@@ -624,13 +655,17 @@ export interface XAIUsageMetadata
624
655
  }
625
656
 
626
657
  export class ChatXAI extends OriginalChatXAI {
658
+ _lc_stream_delay?: number;
659
+
627
660
  constructor(
628
661
  fields?: Partial<ChatXAIInput> & {
629
662
  configuration?: { baseURL?: string };
630
663
  clientConfig?: { baseURL?: string };
664
+ _lc_stream_delay?: number;
631
665
  }
632
666
  ) {
633
667
  super(fields);
668
+ this._lc_stream_delay = fields?._lc_stream_delay;
634
669
  const customBaseURL =
635
670
  fields?.configuration?.baseURL ?? fields?.clientConfig?.baseURL;
636
671
  if (customBaseURL != null && customBaseURL) {
@@ -759,6 +794,9 @@ export class ChatXAI extends OriginalChatXAI {
759
794
  generationInfo,
760
795
  });
761
796
  yield generationChunk;
797
+ if (this._lc_stream_delay != null) {
798
+ await sleep(this._lc_stream_delay);
799
+ }
762
800
  await runManager?.handleLLMNewToken(
763
801
  generationChunk.text || '',
764
802
  newTokenIndices,
@@ -832,6 +870,9 @@ export class ChatXAI extends OriginalChatXAI {
832
870
  text: '',
833
871
  });
834
872
  yield generationChunk;
873
+ if (this._lc_stream_delay != null) {
874
+ await sleep(this._lc_stream_delay);
875
+ }
835
876
  }
836
877
  if (options.signal?.aborted === true) {
837
878
  throw new Error('AbortError');