@librechat/agents 3.1.45 → 3.1.50

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/dist/cjs/events.cjs +9 -4
  2. package/dist/cjs/events.cjs.map +1 -1
  3. package/dist/cjs/graphs/Graph.cjs +142 -106
  4. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  5. package/dist/cjs/run.cjs +0 -4
  6. package/dist/cjs/run.cjs.map +1 -1
  7. package/dist/cjs/tools/ToolNode.cjs +100 -1
  8. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  9. package/dist/cjs/tools/handlers.cjs.map +1 -1
  10. package/dist/esm/events.mjs +9 -4
  11. package/dist/esm/events.mjs.map +1 -1
  12. package/dist/esm/graphs/Graph.mjs +138 -102
  13. package/dist/esm/graphs/Graph.mjs.map +1 -1
  14. package/dist/esm/run.mjs +1 -5
  15. package/dist/esm/run.mjs.map +1 -1
  16. package/dist/esm/tools/ToolNode.mjs +100 -1
  17. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  18. package/dist/esm/tools/handlers.mjs.map +1 -1
  19. package/dist/types/events.d.ts +10 -3
  20. package/dist/types/graphs/Graph.d.ts +0 -2
  21. package/dist/types/tools/ToolNode.d.ts +13 -1
  22. package/dist/types/tools/handlers.d.ts +2 -2
  23. package/package.json +1 -1
  24. package/src/events.ts +11 -14
  25. package/src/graphs/Graph.ts +181 -144
  26. package/src/run.ts +0 -6
  27. package/src/specs/anthropic.simple.test.ts +1 -2
  28. package/src/specs/azure.simple.test.ts +1 -2
  29. package/src/specs/cache.simple.test.ts +1 -2
  30. package/src/specs/custom-event-await.test.ts +2 -4
  31. package/src/specs/deepseek.simple.test.ts +1 -2
  32. package/src/specs/moonshot.simple.test.ts +1 -2
  33. package/src/specs/openai.simple.test.ts +1 -2
  34. package/src/specs/openrouter.simple.test.ts +1 -2
  35. package/src/specs/reasoning.test.ts +1 -2
  36. package/src/specs/tool-error.test.ts +1 -2
  37. package/src/tools/ToolNode.ts +130 -1
  38. package/src/tools/handlers.ts +2 -2
@@ -1,4 +1,4 @@
1
- import type { UsageMetadata, BaseMessageFields } from '@langchain/core/messages';
1
+ import type { BaseMessageFields, UsageMetadata } from '@langchain/core/messages';
2
2
  import type { MultiAgentGraph, StandardGraph } from '@/graphs';
3
3
  import type { Logger } from 'winston';
4
4
  import type * as t from '@/types';
@@ -15,8 +15,15 @@ export declare class ModelEndHandler implements t.EventHandler {
15
15
  export declare class ToolEndHandler implements t.EventHandler {
16
16
  private callback?;
17
17
  private logger?;
18
- private omitOutput?;
19
- constructor(callback?: t.ToolEndCallback, logger?: Logger, omitOutput?: (name?: string) => boolean);
18
+ constructor(callback?: t.ToolEndCallback, logger?: Logger);
19
+ /**
20
+ * Handles on_tool_end events from the for-await stream consumer.
21
+ *
22
+ * This handler is now purely a consumer callback — tool completion
23
+ * (ON_RUN_STEP_COMPLETED dispatch + session context storage) is handled
24
+ * in graph context by ToolNode directly, eliminating the race between
25
+ * the stream consumer and graph execution.
26
+ */
20
27
  handle(event: string, data: t.StreamEventData | undefined, metadata?: Record<string, unknown>, graph?: StandardGraph | MultiAgentGraph): Promise<void>;
21
28
  }
22
29
  export declare class TestLLMStreamHandler implements t.EventHandler {
@@ -30,7 +30,6 @@ export declare abstract class Graph<T extends t.BaseGraphState = t.BaseGraphStat
30
30
  abstract dispatchRunStepDelta(id: string, delta: t.ToolCallDelta): Promise<void>;
31
31
  abstract dispatchMessageDelta(id: string, delta: t.MessageDelta): Promise<void>;
32
32
  abstract dispatchReasoningDelta(stepId: string, delta: t.ReasoningDelta): Promise<void>;
33
- abstract handleToolCallCompleted(data: t.ToolEndData, metadata?: Record<string, unknown>, omitOutput?: boolean): Promise<void>;
34
33
  abstract createCallModel(agentId?: string, currentModel?: t.ChatModel): (state: T, config?: RunnableConfig) => Promise<Partial<T>>;
35
34
  messageStepHasToolCalls: Map<string, boolean>;
36
35
  messageIdsByStepKey: Map<string, string>;
@@ -137,7 +136,6 @@ export declare class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode>
137
136
  * Dispatches a run step to the client, returns the step ID
138
137
  */
139
138
  dispatchRunStep(stepKey: string, stepDetails: t.StepDetails, metadata?: Record<string, unknown>): Promise<string>;
140
- handleToolCallCompleted(data: t.ToolEndData, metadata?: Record<string, unknown>, omitOutput?: boolean): Promise<void>;
141
139
  /**
142
140
  * Static version of handleToolCallError to avoid creating strong references
143
141
  * that prevent garbage collection
@@ -12,6 +12,8 @@ export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
12
12
  toolCallStepIds?: Map<string, string>;
13
13
  errorHandler?: t.ToolNodeConstructorParams['errorHandler'];
14
14
  private toolUsageCount;
15
+ /** Maps toolCallId → turn captured in runTool, used by handleRunToolCompletions */
16
+ private toolCallTurns;
15
17
  /** Tool registry for filtering (lazy computation of programmatic maps) */
16
18
  private toolRegistry?;
17
19
  /** Cached programmatic tools (computed once on first PTC call) */
@@ -46,9 +48,19 @@ export declare class ToolNode<T = any> extends RunnableCallable<T, T> {
46
48
  private getCodeSessionContext;
47
49
  /**
48
50
  * Extracts code execution session context from tool results and stores in Graph.sessions.
49
- * Mirrors the session storage logic in Graph.handleToolCallCompleted() for direct execution.
51
+ * Mirrors the session storage logic in handleRunToolCompletions for direct execution.
50
52
  */
51
53
  private storeCodeSessionFromResults;
54
+ /**
55
+ * Post-processes standard runTool outputs: dispatches ON_RUN_STEP_COMPLETED
56
+ * and stores code session context. Mirrors the completion handling in
57
+ * dispatchToolEvents for the event-driven path.
58
+ *
59
+ * By handling completions here in graph context (rather than in the
60
+ * stream consumer via ToolEndHandler), the race between the stream
61
+ * consumer and graph execution is eliminated.
62
+ */
63
+ private handleRunToolCompletions;
52
64
  /**
53
65
  * Dispatches tool calls to the host via ON_TOOL_EXECUTE event and returns raw ToolMessages.
54
66
  * Core logic for event-driven execution, separated from output shaping.
@@ -1,5 +1,5 @@
1
1
  import type { ToolCall, ToolCallChunk } from '@langchain/core/messages/tool';
2
- import type { MultiAgentGraph, StandardGraph } from '@/graphs';
2
+ import type { Graph, MultiAgentGraph, StandardGraph } from '@/graphs';
3
3
  import type { AgentContext } from '@/agents/AgentContext';
4
4
  import type * as t from '@/types';
5
5
  export declare function handleToolCallChunks({ graph, stepKey, toolCallChunks, metadata, }: {
@@ -8,7 +8,7 @@ export declare function handleToolCallChunks({ graph, stepKey, toolCallChunks, m
8
8
  toolCallChunks: ToolCallChunk[];
9
9
  metadata?: Record<string, unknown>;
10
10
  }): Promise<void>;
11
- export declare const handleToolCalls: (toolCalls?: ToolCall[], metadata?: Record<string, unknown>, graph?: StandardGraph | MultiAgentGraph) => Promise<void>;
11
+ export declare const handleToolCalls: (toolCalls?: ToolCall[], metadata?: Record<string, unknown>, graph?: Graph | StandardGraph | MultiAgentGraph) => Promise<void>;
12
12
  export declare const toolResultTypes: Set<string>;
13
13
  /**
14
14
  * Handles the result of a server tool call; in other words, a provider's built-in tool.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.45",
3
+ "version": "3.1.50",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
package/src/events.ts CHANGED
@@ -1,9 +1,8 @@
1
1
  /* eslint-disable no-console */
2
2
  // src/events.ts
3
3
  import type {
4
- ToolMessage,
5
- UsageMetadata,
6
4
  BaseMessageFields,
5
+ UsageMetadata,
7
6
  } from '@langchain/core/messages';
8
7
  import type { MultiAgentGraph, StandardGraph } from '@/graphs';
9
8
  import type { Logger } from 'winston';
@@ -76,16 +75,19 @@ export class ModelEndHandler implements t.EventHandler {
76
75
  export class ToolEndHandler implements t.EventHandler {
77
76
  private callback?: t.ToolEndCallback;
78
77
  private logger?: Logger;
79
- private omitOutput?: (name?: string) => boolean;
80
- constructor(
81
- callback?: t.ToolEndCallback,
82
- logger?: Logger,
83
- omitOutput?: (name?: string) => boolean
84
- ) {
78
+ constructor(callback?: t.ToolEndCallback, logger?: Logger) {
85
79
  this.callback = callback;
86
80
  this.logger = logger;
87
- this.omitOutput = omitOutput;
88
81
  }
82
+
83
+ /**
84
+ * Handles on_tool_end events from the for-await stream consumer.
85
+ *
86
+ * This handler is now purely a consumer callback — tool completion
87
+ * (ON_RUN_STEP_COMPLETED dispatch + session context storage) is handled
88
+ * in graph context by ToolNode directly, eliminating the race between
89
+ * the stream consumer and graph execution.
90
+ */
89
91
  async handle(
90
92
  event: string,
91
93
  data: t.StreamEventData | undefined,
@@ -119,11 +121,6 @@ export class ToolEndHandler implements t.EventHandler {
119
121
  if (this.callback) {
120
122
  await this.callback(toolEndData, metadata);
121
123
  }
122
- await graph.handleToolCallCompleted(
123
- { input: toolEndData.input, output: toolEndData.output },
124
- metadata,
125
- this.omitOutput?.((toolEndData.output as ToolMessage | undefined)?.name)
126
- );
127
124
  } catch (error) {
128
125
  if (this.logger) {
129
126
  this.logger.error('Error handling tool_end event:', error);
@@ -7,7 +7,6 @@ import { ChatVertexAI } from '@langchain/google-vertexai';
7
7
  import {
8
8
  START,
9
9
  END,
10
- Command,
11
10
  StateGraph,
12
11
  Annotation,
13
12
  messagesStateReducer,
@@ -24,31 +23,32 @@ import {
24
23
  } from '@langchain/core/messages';
25
24
  import type {
26
25
  BaseMessageFields,
26
+ MessageContent,
27
27
  UsageMetadata,
28
28
  BaseMessage,
29
29
  } from '@langchain/core/messages';
30
30
  import type { ToolCall } from '@langchain/core/messages/tool';
31
31
  import type * as t from '@/types';
32
- import {
33
- GraphNodeKeys,
34
- ContentTypes,
35
- GraphEvents,
36
- Providers,
37
- StepTypes,
38
- Constants,
39
- } from '@/common';
40
32
  import {
41
33
  formatAnthropicArtifactContent,
42
34
  ensureThinkingBlockInMessages,
43
35
  convertMessagesToContent,
44
36
  addBedrockCacheControl,
37
+ extractToolDiscoveries,
45
38
  modifyDeltaProperties,
46
39
  formatArtifactPayload,
47
40
  formatContentStrings,
48
41
  createPruneMessages,
49
42
  addCacheControl,
50
- extractToolDiscoveries,
43
+ getMessageId,
51
44
  } from '@/messages';
45
+ import {
46
+ GraphNodeKeys,
47
+ ContentTypes,
48
+ GraphEvents,
49
+ Providers,
50
+ StepTypes,
51
+ } from '@/common';
52
52
  import {
53
53
  resetIfNotEmpty,
54
54
  isOpenAILike,
@@ -63,6 +63,8 @@ import { safeDispatchCustomEvent } from '@/utils/events';
63
63
  import { createSchemaOnlyTools } from '@/tools/schema';
64
64
  import { AgentContext } from '@/agents/AgentContext';
65
65
  import { createFakeStreamingLLM } from '@/llm/fake';
66
+ import { handleToolCalls } from '@/tools/handlers';
67
+ import { ChatModelStreamHandler } from '@/stream';
66
68
  import { HandlerRegistry } from '@/events';
67
69
 
68
70
  const { AGENT, TOOLS } = GraphNodeKeys;
@@ -115,12 +117,6 @@ export abstract class Graph<
115
117
  stepId: string,
116
118
  delta: t.ReasoningDelta
117
119
  ): Promise<void>;
118
- abstract handleToolCallCompleted(
119
- data: t.ToolEndData,
120
- metadata?: Record<string, unknown>,
121
- omitOutput?: boolean
122
- ): Promise<void>;
123
-
124
120
  abstract createCallModel(
125
121
  agentId?: string,
126
122
  currentModel?: t.ChatModel
@@ -197,7 +193,13 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
197
193
  this.contentIndexMap = resetIfNotEmpty(this.contentIndexMap, new Map());
198
194
  }
199
195
  this.stepKeyIds = resetIfNotEmpty(this.stepKeyIds, new Map());
200
- this.toolCallStepIds = resetIfNotEmpty(this.toolCallStepIds, new Map());
196
+ /**
197
+ * Clear in-place instead of replacing with a new Map to preserve the
198
+ * shared reference held by ToolNode (passed at construction time).
199
+ * Using resetIfNotEmpty would create a new Map, leaving ToolNode with
200
+ * a stale reference on 2nd+ processStream calls.
201
+ */
202
+ this.toolCallStepIds.clear();
201
203
  this.messageIdsByStepKey = resetIfNotEmpty(
202
204
  this.messageIdsByStepKey,
203
205
  new Map()
@@ -613,7 +615,7 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
613
615
  currentModel,
614
616
  finalMessages,
615
617
  provider,
616
- tools,
618
+ tools: _tools,
617
619
  }: {
618
620
  currentModel?: t.ChatModel;
619
621
  finalMessages: BaseMessage[];
@@ -627,23 +629,54 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
627
629
  throw new Error('No model found');
628
630
  }
629
631
 
630
- if ((tools?.length ?? 0) > 0 && manualToolStreamProviders.has(provider)) {
631
- if (!model.stream) {
632
- throw new Error('Model does not support stream');
633
- }
632
+ if (model.stream) {
633
+ /**
634
+ * Process all model output through a local ChatModelStreamHandler in the
635
+ * graph execution context. Each chunk is awaited before the next one is
636
+ * consumed, so by the time the stream is exhausted every run step
637
+ * (MESSAGE_CREATION, TOOL_CALLS) has been created and toolCallStepIds is
638
+ * fully populated — the graph will not transition to ToolNode until this
639
+ * is done.
640
+ *
641
+ * This replaces the previous pattern where ChatModelStreamHandler lived
642
+ * in the for-await stream consumer (handler registry). That consumer
643
+ * runs concurrently with graph execution, so the graph could advance to
644
+ * ToolNode before the consumer had processed all events. By handling
645
+ * chunks here, inside the agent node, the race is eliminated.
646
+ *
647
+ * The for-await consumer no longer needs a ChatModelStreamHandler; its
648
+ * on_chat_model_stream events are simply ignored (no handler registered).
649
+ * The dispatched custom events (ON_RUN_STEP, ON_MESSAGE_DELTA, etc.)
650
+ * still reach the content aggregator and SSE handlers through the custom
651
+ * event callback in Run.createCustomEventCallback.
652
+ */
653
+ const metadata = config?.metadata as Record<string, unknown> | undefined;
654
+ const streamHandler = new ChatModelStreamHandler();
634
655
  const stream = await model.stream(finalMessages, config);
635
656
  let finalChunk: AIMessageChunk | undefined;
636
657
  for await (const chunk of stream) {
637
- await safeDispatchCustomEvent(
658
+ await streamHandler.handle(
638
659
  GraphEvents.CHAT_MODEL_STREAM,
639
- { chunk, emitted: true },
640
- config
660
+ { chunk },
661
+ metadata,
662
+ this
641
663
  );
642
664
  finalChunk = finalChunk ? concat(finalChunk, chunk) : chunk;
643
665
  }
644
- finalChunk = modifyDeltaProperties(provider, finalChunk);
666
+
667
+ if (manualToolStreamProviders.has(provider)) {
668
+ finalChunk = modifyDeltaProperties(provider, finalChunk);
669
+ }
670
+
671
+ if ((finalChunk?.tool_calls?.length ?? 0) > 0) {
672
+ finalChunk!.tool_calls = finalChunk!.tool_calls?.filter(
673
+ (tool_call: ToolCall) => !!tool_call.name
674
+ );
675
+ }
676
+
645
677
  return { messages: [finalChunk as AIMessageChunk] };
646
678
  } else {
679
+ /** Fallback for models without stream support. */
647
680
  const finalMessage = await model.invoke(finalMessages, config);
648
681
  if ((finalMessage.tool_calls?.length ?? 0) > 0) {
649
682
  finalMessage.tool_calls = finalMessage.tool_calls?.filter(
@@ -907,6 +940,128 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
907
940
  if (!result) {
908
941
  throw new Error('No result after model invocation');
909
942
  }
943
+
944
+ /**
945
+ * Fallback: populate toolCallStepIds in the graph execution context.
946
+ *
947
+ * When model.stream() is available (the common case), attemptInvoke
948
+ * processes all chunks through a local ChatModelStreamHandler which
949
+ * creates run steps and populates toolCallStepIds before returning.
950
+ * The code below is a fallback for the rare case where model.stream
951
+ * is unavailable and model.invoke() was used instead.
952
+ *
953
+ * Text content is dispatched FIRST so that MESSAGE_CREATION is the
954
+ * current step when handleToolCalls runs. handleToolCalls then creates
955
+ * TOOL_CALLS on top of it. The dedup in getMessageId and
956
+ * toolCallStepIds.has makes this safe when attemptInvoke already
957
+ * handled everything — both paths become no-ops.
958
+ */
959
+ const responseMessage = result.messages?.[0];
960
+ const toolCalls = (responseMessage as AIMessageChunk | undefined)
961
+ ?.tool_calls;
962
+ const hasToolCalls = Array.isArray(toolCalls) && toolCalls.length > 0;
963
+
964
+ if (hasToolCalls) {
965
+ const metadata = config.metadata as Record<string, unknown>;
966
+ const stepKey = this.getStepKey(metadata);
967
+ const content = responseMessage?.content as MessageContent | undefined;
968
+ const hasTextContent =
969
+ content != null &&
970
+ (typeof content === 'string'
971
+ ? content !== ''
972
+ : Array.isArray(content) && content.length > 0);
973
+
974
+ /**
975
+ * Dispatch text content BEFORE creating TOOL_CALLS steps.
976
+ * getMessageId returns a new ID only on the first call for a step key;
977
+ * if the for-await consumer already claimed it, this is a no-op.
978
+ */
979
+ if (hasTextContent) {
980
+ const messageId = getMessageId(stepKey, this) ?? '';
981
+ if (messageId) {
982
+ await this.dispatchRunStep(
983
+ stepKey,
984
+ {
985
+ type: StepTypes.MESSAGE_CREATION,
986
+ message_creation: { message_id: messageId },
987
+ },
988
+ metadata
989
+ );
990
+ const stepId = this.getStepIdByKey(stepKey);
991
+ if (typeof content === 'string') {
992
+ await this.dispatchMessageDelta(stepId, {
993
+ content: [{ type: ContentTypes.TEXT, text: content }],
994
+ });
995
+ } else if (
996
+ Array.isArray(content) &&
997
+ content.every(
998
+ (c) =>
999
+ typeof c === 'object' &&
1000
+ 'type' in c &&
1001
+ typeof c.type === 'string' &&
1002
+ c.type.startsWith('text')
1003
+ )
1004
+ ) {
1005
+ await this.dispatchMessageDelta(stepId, {
1006
+ content: content as t.MessageDelta['content'],
1007
+ });
1008
+ }
1009
+ }
1010
+ }
1011
+
1012
+ await handleToolCalls(toolCalls as ToolCall[], metadata, this);
1013
+ }
1014
+
1015
+ /**
1016
+ * When streaming is disabled, on_chat_model_stream events are never
1017
+ * emitted so ChatModelStreamHandler never fires. Dispatch the text
1018
+ * content as MESSAGE_CREATION + MESSAGE_DELTA here.
1019
+ */
1020
+ const disableStreaming =
1021
+ (agentContext.clientOptions as t.OpenAIClientOptions | undefined)
1022
+ ?.disableStreaming === true;
1023
+
1024
+ if (
1025
+ disableStreaming &&
1026
+ !hasToolCalls &&
1027
+ responseMessage != null &&
1028
+ (responseMessage.content as MessageContent | undefined) != null
1029
+ ) {
1030
+ const metadata = config.metadata as Record<string, unknown>;
1031
+ const stepKey = this.getStepKey(metadata);
1032
+ const messageId = getMessageId(stepKey, this) ?? '';
1033
+ if (messageId) {
1034
+ await this.dispatchRunStep(
1035
+ stepKey,
1036
+ {
1037
+ type: StepTypes.MESSAGE_CREATION,
1038
+ message_creation: { message_id: messageId },
1039
+ },
1040
+ metadata
1041
+ );
1042
+ }
1043
+ const stepId = this.getStepIdByKey(stepKey);
1044
+ const content = responseMessage.content;
1045
+ if (typeof content === 'string') {
1046
+ await this.dispatchMessageDelta(stepId, {
1047
+ content: [{ type: ContentTypes.TEXT, text: content }],
1048
+ });
1049
+ } else if (
1050
+ Array.isArray(content) &&
1051
+ content.every(
1052
+ (c) =>
1053
+ typeof c === 'object' &&
1054
+ 'type' in c &&
1055
+ typeof c.type === 'string' &&
1056
+ c.type.startsWith('text')
1057
+ )
1058
+ ) {
1059
+ await this.dispatchMessageDelta(stepId, {
1060
+ content: content as t.MessageDelta['content'],
1061
+ });
1062
+ }
1063
+ }
1064
+
910
1065
  agentContext.currentUsage = this.getUsageMetadata(result.messages?.[0]);
911
1066
  this.cleanupSignalListener();
912
1067
  return result;
@@ -1070,124 +1225,6 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1070
1225
  return stepId;
1071
1226
  }
1072
1227
 
1073
- async handleToolCallCompleted(
1074
- data: t.ToolEndData,
1075
- metadata?: Record<string, unknown>,
1076
- omitOutput?: boolean
1077
- ): Promise<void> {
1078
- if (!this.config) {
1079
- throw new Error('No config provided');
1080
- }
1081
-
1082
- if (!data.output) {
1083
- return;
1084
- }
1085
-
1086
- const { input, output: _output } = data;
1087
- if ((_output as Command | undefined)?.lg_name === 'Command') {
1088
- return;
1089
- }
1090
- const output = _output as ToolMessage;
1091
- const { tool_call_id } = output;
1092
- const stepId = this.toolCallStepIds.get(tool_call_id) ?? '';
1093
- if (!stepId) {
1094
- throw new Error(`No stepId found for tool_call_id ${tool_call_id}`);
1095
- }
1096
-
1097
- const runStep = this.getRunStep(stepId);
1098
- if (!runStep) {
1099
- throw new Error(`No run step found for stepId ${stepId}`);
1100
- }
1101
-
1102
- /**
1103
- * Extract and store code execution session context from artifacts.
1104
- * Each file is stamped with its source session_id to support multi-session file tracking.
1105
- * When the same filename appears in a later execution, the newer version replaces the old.
1106
- */
1107
- const toolName = output.name;
1108
- if (
1109
- toolName === Constants.EXECUTE_CODE ||
1110
- toolName === Constants.PROGRAMMATIC_TOOL_CALLING
1111
- ) {
1112
- const artifact = output.artifact as t.CodeExecutionArtifact | undefined;
1113
- if (artifact?.session_id != null && artifact.session_id !== '') {
1114
- const newFiles = artifact.files ?? [];
1115
- const existingSession = this.sessions.get(Constants.EXECUTE_CODE) as
1116
- | t.CodeSessionContext
1117
- | undefined;
1118
- const existingFiles = existingSession?.files ?? [];
1119
-
1120
- if (newFiles.length > 0) {
1121
- /**
1122
- * Stamp each new file with its source session_id.
1123
- * This enables files from different executions (parallel or sequential)
1124
- * to be tracked and passed to subsequent calls.
1125
- */
1126
- const filesWithSession: t.FileRefs = newFiles.map((file) => ({
1127
- ...file,
1128
- session_id: artifact.session_id,
1129
- }));
1130
-
1131
- /**
1132
- * Merge files, preferring latest versions by name.
1133
- * If a file with the same name exists, replace it with the new version.
1134
- * This handles cases where files are edited/recreated in subsequent executions.
1135
- */
1136
- const newFileNames = new Set(filesWithSession.map((f) => f.name));
1137
- const filteredExisting = existingFiles.filter(
1138
- (f) => !newFileNames.has(f.name)
1139
- );
1140
-
1141
- this.sessions.set(Constants.EXECUTE_CODE, {
1142
- session_id: artifact.session_id,
1143
- files: [...filteredExisting, ...filesWithSession],
1144
- lastUpdated: Date.now(),
1145
- });
1146
- } else {
1147
- /**
1148
- * Store session_id even without new files for session continuity.
1149
- * The CodeExecutor can fall back to the /files endpoint to discover
1150
- * session files not explicitly returned in the exec response.
1151
- */
1152
- this.sessions.set(Constants.EXECUTE_CODE, {
1153
- session_id: artifact.session_id,
1154
- files: existingFiles,
1155
- lastUpdated: Date.now(),
1156
- });
1157
- }
1158
- }
1159
- }
1160
-
1161
- const dispatchedOutput =
1162
- typeof output.content === 'string'
1163
- ? output.content
1164
- : JSON.stringify(output.content);
1165
-
1166
- const args = typeof input === 'string' ? input : input.input;
1167
- const tool_call = {
1168
- args: typeof args === 'string' ? args : JSON.stringify(args),
1169
- name: output.name ?? '',
1170
- id: output.tool_call_id,
1171
- output: omitOutput === true ? '' : dispatchedOutput,
1172
- progress: 1,
1173
- };
1174
-
1175
- await this.handlerRegistry
1176
- ?.getHandler(GraphEvents.ON_RUN_STEP_COMPLETED)
1177
- ?.handle(
1178
- GraphEvents.ON_RUN_STEP_COMPLETED,
1179
- {
1180
- result: {
1181
- id: stepId,
1182
- index: runStep.index,
1183
- type: 'tool_call',
1184
- tool_call,
1185
- } as t.ToolCompleteEvent,
1186
- },
1187
- metadata,
1188
- this
1189
- );
1190
- }
1191
1228
  /**
1192
1229
  * Static version of handleToolCallError to avoid creating strong references
1193
1230
  * that prevent garbage collection
package/src/run.ts CHANGED
@@ -190,12 +190,6 @@ export class Run<_T extends t.BaseGraphState> {
190
190
  tags?: string[],
191
191
  metadata?: Record<string, unknown>
192
192
  ): Promise<void> => {
193
- if (
194
- (data as t.StreamEventData)['emitted'] === true &&
195
- eventName === GraphEvents.CHAT_MODEL_STREAM
196
- ) {
197
- return;
198
- }
199
193
  const handler = this.handlerRegistry?.getHandler(eventName);
200
194
  if (handler && this.Graph) {
201
195
  return await handler.handle(
@@ -16,8 +16,8 @@ import {
16
16
  createMetadataAggregator,
17
17
  } from '@/events';
18
18
  import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
19
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
20
19
  import { capitalizeFirstLetter } from './spec.utils';
20
+ import { createContentAggregator } from '@/stream';
21
21
  import { getLLMConfig } from '@/utils/llmConfig';
22
22
  import { getArgs } from '@/scripts/args';
23
23
  import { Run } from '@/run';
@@ -63,7 +63,6 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
63
63
  > => ({
64
64
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
65
65
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
66
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
67
66
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
68
67
  handle: (
69
68
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
@@ -16,8 +16,8 @@ import {
16
16
  createMetadataAggregator,
17
17
  } from '@/events';
18
18
  import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
19
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
20
19
  import { capitalizeFirstLetter } from './spec.utils';
20
+ import { createContentAggregator } from '@/stream';
21
21
  import { getLLMConfig } from '@/utils/llmConfig';
22
22
  import { Run } from '@/run';
23
23
 
@@ -83,7 +83,6 @@ describeIfAzure(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
83
83
  > => ({
84
84
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
85
85
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
86
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
87
86
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
88
87
  handle: (
89
88
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
@@ -10,9 +10,9 @@ import {
10
10
  UsageMetadata,
11
11
  } from '@langchain/core/messages';
12
12
  import type * as t from '@/types';
13
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
14
13
  import { ModelEndHandler, ToolEndHandler } from '@/events';
15
14
  import { capitalizeFirstLetter } from './spec.utils';
15
+ import { createContentAggregator } from '@/stream';
16
16
  import { GraphEvents, Providers } from '@/common';
17
17
  import { getLLMConfig } from '@/utils/llmConfig';
18
18
  import { getArgs } from '@/scripts/args';
@@ -36,7 +36,6 @@ describe('Prompt Caching Integration Tests', () => {
36
36
  const customHandlers: Record<string | GraphEvents, t.EventHandler> = {
37
37
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
38
38
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
39
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
40
39
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
41
40
  handle: (
42
41
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
@@ -1,8 +1,8 @@
1
1
  import { HumanMessage } from '@langchain/core/messages';
2
2
  import type * as t from '@/types';
3
- import { ToolEndHandler, ModelEndHandler } from '@/events';
4
3
  import { ContentTypes, GraphEvents, Providers } from '@/common';
5
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
4
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
5
+ import { createContentAggregator } from '@/stream';
6
6
  import { Run } from '@/run';
7
7
 
8
8
  describe('Custom event handler awaitHandlers behavior', () => {
@@ -39,7 +39,6 @@ describe('Custom event handler awaitHandlers behavior', () => {
39
39
  const customHandlers: Record<string | GraphEvents, t.EventHandler> = {
40
40
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
41
41
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
42
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
43
42
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
44
43
  handle: (
45
44
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
@@ -133,7 +132,6 @@ describe('Custom event handler awaitHandlers behavior', () => {
133
132
  const customHandlers: Record<string | GraphEvents, t.EventHandler> = {
134
133
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
135
134
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
136
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
137
135
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
138
136
  handle: (
139
137
  event: GraphEvents.ON_RUN_STEP_COMPLETED,
@@ -11,8 +11,8 @@ import {
11
11
  import type * as t from '@/types';
12
12
  import { ToolEndHandler, ModelEndHandler } from '@/events';
13
13
  import { ContentTypes, GraphEvents, Providers } from '@/common';
14
- import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
15
14
  import { capitalizeFirstLetter } from './spec.utils';
15
+ import { createContentAggregator } from '@/stream';
16
16
  import { getLLMConfig } from '@/utils/llmConfig';
17
17
  import { Run } from '@/run';
18
18
 
@@ -64,7 +64,6 @@ const skipTests = process.env.DEEPSEEK_API_KEY == null;
64
64
  > => ({
65
65
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
66
66
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
67
- [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
68
67
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
69
68
  handle: (
70
69
  event: GraphEvents.ON_RUN_STEP_COMPLETED,