@librechat/agents 3.0.81 → 3.0.771

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/cjs/graphs/Graph.cjs +5 -19
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/llm/bedrock/index.cjs +25 -98
  4. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  5. package/dist/cjs/messages/core.cjs +1 -1
  6. package/dist/cjs/messages/core.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +2 -4
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/cjs/tools/ToolNode.cjs +5 -9
  10. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  11. package/dist/esm/graphs/Graph.mjs +5 -19
  12. package/dist/esm/graphs/Graph.mjs.map +1 -1
  13. package/dist/esm/llm/bedrock/index.mjs +24 -97
  14. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  15. package/dist/esm/messages/core.mjs +1 -1
  16. package/dist/esm/messages/core.mjs.map +1 -1
  17. package/dist/esm/stream.mjs +2 -4
  18. package/dist/esm/stream.mjs.map +1 -1
  19. package/dist/esm/tools/ToolNode.mjs +5 -9
  20. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  21. package/dist/types/llm/bedrock/index.d.ts +7 -86
  22. package/dist/types/types/tools.d.ts +0 -2
  23. package/package.json +3 -6
  24. package/src/graphs/Graph.ts +5 -23
  25. package/src/llm/bedrock/index.ts +43 -180
  26. package/src/messages/core.ts +1 -1
  27. package/src/scripts/thinking.ts +18 -39
  28. package/src/scripts/tools.ts +3 -7
  29. package/src/stream.ts +2 -4
  30. package/src/tools/ToolNode.ts +5 -9
  31. package/src/types/tools.ts +0 -2
  32. package/dist/types/llm/bedrock/types.d.ts +0 -27
  33. package/dist/types/llm/bedrock/utils/index.d.ts +0 -5
  34. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +0 -31
  35. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +0 -33
  36. package/src/llm/bedrock/llm.spec.ts +0 -616
  37. package/src/llm/bedrock/types.ts +0 -51
  38. package/src/llm/bedrock/utils/index.ts +0 -18
  39. package/src/llm/bedrock/utils/message_inputs.ts +0 -563
  40. package/src/llm/bedrock/utils/message_outputs.ts +0 -310
  41. package/src/scripts/code_exec_multi_session.ts +0 -241
  42. package/src/scripts/thinking-bedrock.ts +0 -159
@@ -1,9 +1,5 @@
1
1
  /**
2
2
  * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
3
- * and adds support for latest @langchain/aws features:
4
- *
5
- * - Application Inference Profiles (PR #9129)
6
- * - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+
7
3
  *
8
4
  * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
9
5
  * causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
@@ -15,94 +11,19 @@
15
11
  * the purpose of tracking tool call ordering.
16
12
  */
17
13
  import { ChatBedrockConverse } from '@langchain/aws';
18
- import { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';
19
- import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
20
14
  import type { ChatBedrockConverseInput } from '@langchain/aws';
21
15
  import type { BaseMessage } from '@langchain/core/messages';
22
- /**
23
- * Service tier type for Bedrock invocations.
24
- * Requires AWS SDK >= 3.966.0 to actually work.
25
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
26
- */
27
- export type ServiceTierType = 'priority' | 'default' | 'flex' | 'reserved';
28
- /**
29
- * Extended input interface with additional features:
30
- * - applicationInferenceProfile: Use an inference profile ARN instead of model ID
31
- * - serviceTier: Specify service tier (Priority, Standard, Flex, Reserved)
32
- */
33
- export interface CustomChatBedrockConverseInput extends ChatBedrockConverseInput {
34
- /**
35
- * Application Inference Profile ARN to use for the model.
36
- * For example, "arn:aws:bedrock:eu-west-1:123456789102:application-inference-profile/fm16bt65tzgx"
37
- * When provided, this ARN will be used for the actual inference calls instead of the model ID.
38
- * Must still provide `model` as normal modelId to benefit from all the metadata.
39
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-create.html
40
- */
41
- applicationInferenceProfile?: string;
42
- /**
43
- * Service tier for model invocation.
44
- * Specifies the processing tier type used for serving the request.
45
- * Supported values are 'priority', 'default', 'flex', and 'reserved'.
46
- *
47
- * - 'priority': Prioritized processing for lower latency
48
- * - 'default': Standard processing tier
49
- * - 'flex': Flexible processing tier with lower cost
50
- * - 'reserved': Reserved capacity for consistent performance
51
- *
52
- * If not provided, AWS uses the default tier.
53
- * Note: Requires AWS SDK >= 3.966.0 to work.
54
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
55
- */
56
- serviceTier?: ServiceTierType;
57
- }
58
- /**
59
- * Extended call options with serviceTier override support.
60
- */
61
- export interface CustomChatBedrockConverseCallOptions {
62
- serviceTier?: ServiceTierType;
63
- }
16
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
17
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
64
18
  export declare class CustomChatBedrockConverse extends ChatBedrockConverse {
65
- /**
66
- * Application Inference Profile ARN to use instead of model ID.
67
- */
68
- applicationInferenceProfile?: string;
69
- /**
70
- * Service tier for model invocation.
71
- */
72
- serviceTier?: ServiceTierType;
73
- constructor(fields?: CustomChatBedrockConverseInput);
19
+ constructor(fields?: ChatBedrockConverseInput);
74
20
  static lc_name(): string;
75
21
  /**
76
- * Get the model ID to use for API calls.
77
- * Returns applicationInferenceProfile if set, otherwise returns this.model.
78
- */
79
- protected getModelId(): string;
80
- /**
81
- * Override invocationParams to add serviceTier support.
82
- */
83
- invocationParams(options?: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions): ReturnType<ChatBedrockConverse['invocationParams']> & {
84
- serviceTier?: {
85
- type: ServiceTierType;
86
- };
87
- };
88
- /**
89
- * Override _generateNonStreaming to use applicationInferenceProfile as modelId.
90
- * Uses the same model-swapping pattern as streaming for consistency.
91
- */
92
- _generateNonStreaming(messages: BaseMessage[], options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
93
- /**
94
- * Override _streamResponseChunks to:
95
- * 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)
96
- * 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts
97
- *
98
- * Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's
99
- * internal chunk handling which correctly preserves array content for reasoning blocks.
100
- */
101
- _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions, runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
102
- /**
103
- * Clean a chunk by removing contentBlockIndex from response_metadata.
22
+ * Override _streamResponseChunks to strip contentBlockIndex from response_metadata
23
+ * This prevents LangChain's merge conflicts when the same index is used for
24
+ * different content types (text vs tool calls)
104
25
  */
105
- private cleanChunk;
26
+ _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
106
27
  /**
107
28
  * Check if contentBlockIndex exists at any level in the object
108
29
  */
@@ -57,8 +57,6 @@ export type FileRef = {
57
57
  id: string;
58
58
  name: string;
59
59
  path?: string;
60
- /** Session ID this file belongs to (for multi-session file tracking) */
61
- session_id?: string;
62
60
  };
63
61
  export type FileRefs = FileRef[];
64
62
  export type ExecuteResult = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.0.81",
3
+ "version": "3.0.771",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -51,14 +51,12 @@
51
51
  "image": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/image.ts --provider 'google' --name 'Jo' --location 'New York, NY'",
52
52
  "code_exec_files": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_files.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
53
53
  "code_exec_session": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_session.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
- "code_exec_multi_session": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_multi_session.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
55
54
  "code_exec_simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_simple.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
56
- "simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/simple.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
55
+ "simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/simple.ts --provider 'openrouter' --name 'Jo' --location 'New York, NY'",
57
56
  "caching": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/caching.ts --name 'Jo' --location 'New York, NY'",
58
57
  "thinking": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking.ts --name 'Jo' --location 'New York, NY'",
59
- "thinking:bedrock": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking-bedrock.ts --name 'Jo' --location 'New York, NY'",
60
58
  "memory": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/memory.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
61
- "tool": "node --trace-warnings -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
59
+ "tool": "node --trace-warnings -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'openrouter' --name 'Jo' --location 'New York, NY'",
62
60
  "search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/search.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
63
61
  "tool_search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tool_search.ts",
64
62
  "programmatic_exec": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/programmatic_exec.ts",
@@ -112,7 +110,6 @@
112
110
  }
113
111
  },
114
112
  "dependencies": {
115
- "@aws-sdk/client-bedrock-runtime": "^3.970.0",
116
113
  "@langchain/anthropic": "^0.3.26",
117
114
  "@langchain/aws": "^0.1.15",
118
115
  "@langchain/core": "^0.3.80",
@@ -1042,8 +1042,8 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1042
1042
 
1043
1043
  /**
1044
1044
  * Extract and store code execution session context from artifacts.
1045
- * Each file is stamped with its source session_id to support multi-session file tracking.
1046
- * When the same filename appears in a later execution, the newer version replaces the old.
1045
+ * Only update session_id when files are generated - this ensures we don't
1046
+ * lose the original session that contains the files.
1047
1047
  */
1048
1048
  const toolName = output.name;
1049
1049
  if (
@@ -1060,35 +1060,17 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1060
1060
  artifact.session_id !== ''
1061
1061
  ) {
1062
1062
  /**
1063
- * Stamp each new file with its source session_id.
1064
- * This enables files from different executions (parallel or sequential)
1065
- * to be tracked and passed to subsequent calls.
1063
+ * Files were generated - update session with the new session_id.
1064
+ * The new session_id is the one that contains these files.
1066
1065
  */
1067
- const filesWithSession: t.FileRefs = newFiles.map((file) => ({
1068
- ...file,
1069
- session_id: artifact.session_id,
1070
- }));
1071
-
1072
1066
  const existingSession = this.sessions.get(Constants.EXECUTE_CODE) as
1073
1067
  | t.CodeSessionContext
1074
1068
  | undefined;
1075
1069
  const existingFiles = existingSession?.files ?? [];
1076
1070
 
1077
- /**
1078
- * Merge files, preferring latest versions by name.
1079
- * If a file with the same name exists, replace it with the new version.
1080
- * This handles cases where files are edited/recreated in subsequent executions.
1081
- */
1082
- const newFileNames = new Set(filesWithSession.map((f) => f.name));
1083
- const filteredExisting = existingFiles.filter(
1084
- (f) => !newFileNames.has(f.name)
1085
- );
1086
-
1087
1071
  this.sessions.set(Constants.EXECUTE_CODE, {
1088
- /** Keep latest session_id for reference/fallback */
1089
1072
  session_id: artifact.session_id,
1090
- /** Accumulated files with latest versions preferred */
1091
- files: [...filteredExisting, ...filesWithSession],
1073
+ files: [...existingFiles, ...newFiles],
1092
1074
  lastUpdated: Date.now(),
1093
1075
  });
1094
1076
  }
@@ -1,9 +1,5 @@
1
1
  /**
2
2
  * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
3
- * and adds support for latest @langchain/aws features:
4
- *
5
- * - Application Inference Profiles (PR #9129)
6
- * - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+
7
3
  *
8
4
  * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
9
5
  * causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
@@ -16,74 +12,15 @@
16
12
  */
17
13
 
18
14
  import { ChatBedrockConverse } from '@langchain/aws';
19
- import { AIMessageChunk } from '@langchain/core/messages';
20
- import { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';
21
- import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
22
15
  import type { ChatBedrockConverseInput } from '@langchain/aws';
16
+ import { AIMessageChunk } from '@langchain/core/messages';
23
17
  import type { BaseMessage } from '@langchain/core/messages';
24
-
25
- /**
26
- * Service tier type for Bedrock invocations.
27
- * Requires AWS SDK >= 3.966.0 to actually work.
28
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
29
- */
30
- export type ServiceTierType = 'priority' | 'default' | 'flex' | 'reserved';
31
-
32
- /**
33
- * Extended input interface with additional features:
34
- * - applicationInferenceProfile: Use an inference profile ARN instead of model ID
35
- * - serviceTier: Specify service tier (Priority, Standard, Flex, Reserved)
36
- */
37
- export interface CustomChatBedrockConverseInput
38
- extends ChatBedrockConverseInput {
39
- /**
40
- * Application Inference Profile ARN to use for the model.
41
- * For example, "arn:aws:bedrock:eu-west-1:123456789102:application-inference-profile/fm16bt65tzgx"
42
- * When provided, this ARN will be used for the actual inference calls instead of the model ID.
43
- * Must still provide `model` as normal modelId to benefit from all the metadata.
44
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-create.html
45
- */
46
- applicationInferenceProfile?: string;
47
-
48
- /**
49
- * Service tier for model invocation.
50
- * Specifies the processing tier type used for serving the request.
51
- * Supported values are 'priority', 'default', 'flex', and 'reserved'.
52
- *
53
- * - 'priority': Prioritized processing for lower latency
54
- * - 'default': Standard processing tier
55
- * - 'flex': Flexible processing tier with lower cost
56
- * - 'reserved': Reserved capacity for consistent performance
57
- *
58
- * If not provided, AWS uses the default tier.
59
- * Note: Requires AWS SDK >= 3.966.0 to work.
60
- * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
61
- */
62
- serviceTier?: ServiceTierType;
63
- }
64
-
65
- /**
66
- * Extended call options with serviceTier override support.
67
- */
68
- export interface CustomChatBedrockConverseCallOptions {
69
- serviceTier?: ServiceTierType;
70
- }
18
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
19
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
71
20
 
72
21
  export class CustomChatBedrockConverse extends ChatBedrockConverse {
73
- /**
74
- * Application Inference Profile ARN to use instead of model ID.
75
- */
76
- applicationInferenceProfile?: string;
77
-
78
- /**
79
- * Service tier for model invocation.
80
- */
81
- serviceTier?: ServiceTierType;
82
-
83
- constructor(fields?: CustomChatBedrockConverseInput) {
22
+ constructor(fields?: ChatBedrockConverseInput) {
84
23
  super(fields);
85
- this.applicationInferenceProfile = fields?.applicationInferenceProfile;
86
- this.serviceTier = fields?.serviceTier;
87
24
  }
88
25
 
89
26
  static lc_name(): string {
@@ -91,126 +28,52 @@ export class CustomChatBedrockConverse extends ChatBedrockConverse {
91
28
  }
92
29
 
93
30
  /**
94
- * Get the model ID to use for API calls.
95
- * Returns applicationInferenceProfile if set, otherwise returns this.model.
96
- */
97
- protected getModelId(): string {
98
- return this.applicationInferenceProfile ?? this.model;
99
- }
100
-
101
- /**
102
- * Override invocationParams to add serviceTier support.
103
- */
104
- override invocationParams(
105
- options?: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions
106
- ): ReturnType<ChatBedrockConverse['invocationParams']> & {
107
- serviceTier?: { type: ServiceTierType };
108
- } {
109
- const baseParams = super.invocationParams(options);
110
-
111
- /** Service tier from options or fall back to class-level setting */
112
- const serviceTierType = options?.serviceTier ?? this.serviceTier;
113
-
114
- return {
115
- ...baseParams,
116
- serviceTier: serviceTierType ? { type: serviceTierType } : undefined,
117
- };
118
- }
119
-
120
- /**
121
- * Override _generateNonStreaming to use applicationInferenceProfile as modelId.
122
- * Uses the same model-swapping pattern as streaming for consistency.
31
+ * Override _streamResponseChunks to strip contentBlockIndex from response_metadata
32
+ * This prevents LangChain's merge conflicts when the same index is used for
33
+ * different content types (text vs tool calls)
123
34
  */
124
- override async _generateNonStreaming(
35
+ async *_streamResponseChunks(
125
36
  messages: BaseMessage[],
126
- options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,
127
- runManager?: CallbackManagerForLLMRun
128
- ): Promise<ChatResult> {
129
- // Temporarily swap model for applicationInferenceProfile support
130
- const originalModel = this.model;
131
- if (
132
- this.applicationInferenceProfile != null &&
133
- this.applicationInferenceProfile !== ''
134
- ) {
135
- this.model = this.applicationInferenceProfile;
136
- }
137
-
138
- try {
139
- return await super._generateNonStreaming(messages, options, runManager);
140
- } finally {
141
- // Restore original model
142
- this.model = originalModel;
143
- }
144
- }
145
-
146
- /**
147
- * Override _streamResponseChunks to:
148
- * 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)
149
- * 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts
150
- *
151
- * Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's
152
- * internal chunk handling which correctly preserves array content for reasoning blocks.
153
- */
154
- override async *_streamResponseChunks(
155
- messages: BaseMessage[],
156
- options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,
37
+ options: this['ParsedCallOptions'],
157
38
  runManager?: CallbackManagerForLLMRun
158
39
  ): AsyncGenerator<ChatGenerationChunk> {
159
- // Temporarily swap model for applicationInferenceProfile support
160
- const originalModel = this.model;
161
- if (
162
- this.applicationInferenceProfile != null &&
163
- this.applicationInferenceProfile !== ''
164
- ) {
165
- this.model = this.applicationInferenceProfile;
166
- }
167
-
168
- try {
169
- // Use parent's streaming logic which correctly handles reasoning content
170
- const baseStream = super._streamResponseChunks(
171
- messages,
172
- options,
173
- runManager
174
- );
175
-
176
- for await (const chunk of baseStream) {
177
- // Clean contentBlockIndex from response_metadata to prevent merge conflicts
178
- yield this.cleanChunk(chunk);
40
+ const baseStream = super._streamResponseChunks(
41
+ messages,
42
+ options,
43
+ runManager
44
+ );
45
+
46
+ for await (const chunk of baseStream) {
47
+ // Only process if we have response_metadata
48
+ if (
49
+ chunk.message instanceof AIMessageChunk &&
50
+ (chunk.message as Partial<AIMessageChunk>).response_metadata &&
51
+ typeof chunk.message.response_metadata === 'object'
52
+ ) {
53
+ // Check if contentBlockIndex exists anywhere in response_metadata (top level or nested)
54
+ const hasContentBlockIndex = this.hasContentBlockIndex(
55
+ chunk.message.response_metadata
56
+ );
57
+
58
+ if (hasContentBlockIndex) {
59
+ const cleanedMetadata = this.removeContentBlockIndex(
60
+ chunk.message.response_metadata
61
+ ) as Record<string, unknown>;
62
+
63
+ yield new ChatGenerationChunk({
64
+ text: chunk.text,
65
+ message: new AIMessageChunk({
66
+ ...chunk.message,
67
+ response_metadata: cleanedMetadata,
68
+ }),
69
+ generationInfo: chunk.generationInfo,
70
+ });
71
+ continue;
72
+ }
179
73
  }
180
- } finally {
181
- // Restore original model
182
- this.model = originalModel;
183
- }
184
- }
185
74
 
186
- /**
187
- * Clean a chunk by removing contentBlockIndex from response_metadata.
188
- */
189
- private cleanChunk(chunk: ChatGenerationChunk): ChatGenerationChunk {
190
- const message = chunk.message;
191
- if (!(message instanceof AIMessageChunk)) {
192
- return chunk;
75
+ yield chunk;
193
76
  }
194
-
195
- const metadata = message.response_metadata as Record<string, unknown>;
196
- const hasContentBlockIndex = this.hasContentBlockIndex(metadata);
197
- if (!hasContentBlockIndex) {
198
- return chunk;
199
- }
200
-
201
- const cleanedMetadata = this.removeContentBlockIndex(metadata) as Record<
202
- string,
203
- unknown
204
- >;
205
-
206
- return new ChatGenerationChunk({
207
- text: chunk.text,
208
- message: new AIMessageChunk({
209
- ...message,
210
- response_metadata: cleanedMetadata,
211
- }),
212
- generationInfo: chunk.generationInfo,
213
- });
214
77
  }
215
78
 
216
79
  /**
@@ -41,7 +41,7 @@ User: ${userMessage[1]}
41
41
  const _allowedTypes = ['image_url', 'text', 'tool_use', 'tool_result'];
42
42
  const allowedTypesByProvider: Record<string, string[]> = {
43
43
  default: _allowedTypes,
44
- [Providers.ANTHROPIC]: [..._allowedTypes, 'thinking', 'redacted_thinking'],
44
+ [Providers.ANTHROPIC]: [..._allowedTypes, 'thinking'],
45
45
  [Providers.BEDROCK]: [..._allowedTypes, 'reasoning_content'],
46
46
  [Providers.OPENAI]: _allowedTypes,
47
47
  };
@@ -1,11 +1,7 @@
1
1
  // src/scripts/test-thinking.ts
2
2
  import { config } from 'dotenv';
3
3
  config();
4
- import {
5
- HumanMessage,
6
- SystemMessage,
7
- BaseMessage,
8
- } from '@langchain/core/messages';
4
+ import { HumanMessage, SystemMessage, BaseMessage } from '@langchain/core/messages';
9
5
  import type { UsageMetadata } from '@langchain/core/messages';
10
6
  import * as t from '@/types';
11
7
  import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
@@ -25,23 +21,17 @@ async function testThinking(): Promise<void> {
25
21
  const instructions = `You are a helpful AI assistant for ${userName}. When answering questions, be thorough in your reasoning.`;
26
22
  const { contentParts, aggregateContent } = createContentAggregator();
27
23
  _contentParts = contentParts as t.MessageContentComplex[];
28
-
24
+
29
25
  // Set up event handlers
30
26
  const customHandlers = {
31
27
  [GraphEvents.TOOL_END]: new ToolEndHandler(),
32
28
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
33
29
  [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
34
30
  [GraphEvents.ON_RUN_STEP_COMPLETED]: {
35
- handle: (
36
- event: GraphEvents.ON_RUN_STEP_COMPLETED,
37
- data: t.StreamEventData
38
- ): void => {
31
+ handle: (event: GraphEvents.ON_RUN_STEP_COMPLETED, data: t.StreamEventData): void => {
39
32
  console.log('====== ON_RUN_STEP_COMPLETED ======');
40
- aggregateContent({
41
- event,
42
- data: data as unknown as { result: t.ToolEndEvent },
43
- });
44
- },
33
+ aggregateContent({ event, data: data as unknown as { result: t.ToolEndEvent } });
34
+ }
45
35
  },
46
36
  [GraphEvents.ON_RUN_STEP]: {
47
37
  handle: (event: GraphEvents.ON_RUN_STEP, data: t.RunStep) => {
@@ -49,38 +39,29 @@ async function testThinking(): Promise<void> {
49
39
  },
50
40
  },
51
41
  [GraphEvents.ON_RUN_STEP_DELTA]: {
52
- handle: (
53
- event: GraphEvents.ON_RUN_STEP_DELTA,
54
- data: t.RunStepDeltaEvent
55
- ) => {
42
+ handle: (event: GraphEvents.ON_RUN_STEP_DELTA, data: t.RunStepDeltaEvent) => {
56
43
  aggregateContent({ event, data });
57
44
  },
58
45
  },
59
46
  [GraphEvents.ON_MESSAGE_DELTA]: {
60
- handle: (
61
- event: GraphEvents.ON_MESSAGE_DELTA,
62
- data: t.MessageDeltaEvent
63
- ) => {
47
+ handle: (event: GraphEvents.ON_MESSAGE_DELTA, data: t.MessageDeltaEvent) => {
64
48
  aggregateContent({ event, data });
65
49
  },
66
50
  },
67
51
  [GraphEvents.ON_REASONING_DELTA]: {
68
- handle: (
69
- event: GraphEvents.ON_REASONING_DELTA,
70
- data: t.ReasoningDeltaEvent
71
- ) => {
52
+ handle: (event: GraphEvents.ON_REASONING_DELTA, data: t.ReasoningDeltaEvent) => {
72
53
  aggregateContent({ event, data });
73
54
  },
74
55
  },
75
56
  };
76
57
 
77
58
  const baseLlmConfig: t.LLMConfig = getLLMConfig(Providers.ANTHROPIC);
78
-
59
+
79
60
  // Enable thinking with token budget
80
61
  const llmConfig = {
81
62
  ...baseLlmConfig,
82
63
  model: 'claude-3-7-sonnet-latest',
83
- thinking: { type: 'enabled', budget_tokens: 2000 },
64
+ thinking: { type: "enabled", budget_tokens: 2000 }
84
65
  };
85
66
 
86
67
  const run = await Run.create<t.IState>({
@@ -112,7 +93,7 @@ async function testThinking(): Promise<void> {
112
93
  console.log('Running first query with thinking enabled...');
113
94
  const firstInputs = { messages: [...conversationHistory] };
114
95
  await run.processStream(firstInputs, config);
115
-
96
+
116
97
  // Extract and display thinking blocks
117
98
  const finalMessages = run.getRunMessages();
118
99
 
@@ -120,32 +101,30 @@ async function testThinking(): Promise<void> {
120
101
  console.log('\n\nTest 2: Multi-turn conversation with thinking enabled');
121
102
  const userMessage2 = `Given your previous analysis, what would be the most significant technical challenges in making this transition?`;
122
103
  conversationHistory.push(new HumanMessage(userMessage2));
123
-
104
+
124
105
  console.log('Running second query with thinking enabled...');
125
106
  const secondInputs = { messages: [...conversationHistory] };
126
107
  await run.processStream(secondInputs, config);
127
-
108
+
128
109
  // Display thinking blocks for second response
129
110
  const finalMessages2 = run.getRunMessages();
130
111
 
131
112
  // Test 3: Redacted thinking mode
132
113
  console.log('\n\nTest 3: Redacted thinking mode');
133
- const magicString =
134
- 'ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB';
114
+ const magicString = "ANTHROPIC_MAGIC_STRING_TRIGGER_REDACTED_THINKING_46C9A13E193C177646C7398A98432ECCCE4C1253D5E2D82641AC0E52CC2876CB";
135
115
  const userMessage3 = `${magicString}\n\nExplain how quantum computing works in simple terms.`;
136
-
116
+
137
117
  // Reset conversation for clean test
138
118
  conversationHistory.length = 0;
139
119
  conversationHistory.push(new HumanMessage(userMessage3));
140
-
120
+
141
121
  console.log('Running query with redacted thinking...');
142
122
  const thirdInputs = { messages: [...conversationHistory] };
143
123
  await run.processStream(thirdInputs, config);
144
-
124
+
145
125
  // Display redacted thinking blocks
146
126
  const finalMessages3 = run.getRunMessages();
147
127
  console.log('\n\nThinking feature test completed!');
148
- console.dir(finalMessages3, { depth: null });
149
128
  }
150
129
 
151
130
  process.on('unhandledRejection', (reason, promise) => {
@@ -168,4 +147,4 @@ testThinking().catch((err) => {
168
147
  console.log('Content parts:');
169
148
  console.dir(_contentParts, { depth: null });
170
149
  process.exit(1);
171
- });
150
+ });
@@ -18,13 +18,9 @@ async function testStandardStreaming(): Promise<void> {
18
18
  const { userName, location, provider, currentDate } = await getArgs();
19
19
  const { contentParts, aggregateContent } = createContentAggregator();
20
20
  const customHandlers = {
21
- [GraphEvents.TOOL_END]: new ToolEndHandler(
22
- undefined,
23
- undefined,
24
- (name?: string) => {
25
- return true;
26
- }
27
- ),
21
+ [GraphEvents.TOOL_END]: new ToolEndHandler(undefined, (name?: string) => {
22
+ return true;
23
+ }),
28
24
  [GraphEvents.CHAT_MODEL_END]: {
29
25
  handle: (
30
26
  _event: string,
package/src/stream.ts CHANGED
@@ -339,8 +339,7 @@ hasToolCallChunks: ${hasToolCallChunks}
339
339
  (c) =>
340
340
  (c.type?.startsWith(ContentTypes.THINKING) ?? false) ||
341
341
  (c.type?.startsWith(ContentTypes.REASONING) ?? false) ||
342
- (c.type?.startsWith(ContentTypes.REASONING_CONTENT) ?? false) ||
343
- c.type === 'redacted_thinking'
342
+ (c.type?.startsWith(ContentTypes.REASONING_CONTENT) ?? false)
344
343
  )
345
344
  ) {
346
345
  await graph.dispatchReasoningDelta(stepId, {
@@ -366,8 +365,7 @@ hasToolCallChunks: ${hasToolCallChunks}
366
365
  Array.isArray(chunk.content) &&
367
366
  (chunk.content[0]?.type === ContentTypes.THINKING ||
368
367
  chunk.content[0]?.type === ContentTypes.REASONING ||
369
- chunk.content[0]?.type === ContentTypes.REASONING_CONTENT ||
370
- chunk.content[0]?.type === 'redacted_thinking')
368
+ chunk.content[0]?.type === ContentTypes.REASONING_CONTENT)
371
369
  ) {
372
370
  reasoning_content = 'valid';
373
371
  } else if (