@librechat/agents 3.0.776 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. package/dist/cjs/graphs/Graph.cjs +19 -5
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/llm/bedrock/index.cjs +98 -25
  4. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  5. package/dist/cjs/messages/core.cjs +1 -1
  6. package/dist/cjs/messages/core.cjs.map +1 -1
  7. package/dist/cjs/stream.cjs +4 -2
  8. package/dist/cjs/stream.cjs.map +1 -1
  9. package/dist/cjs/tools/ToolNode.cjs +9 -5
  10. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  11. package/dist/esm/graphs/Graph.mjs +19 -5
  12. package/dist/esm/graphs/Graph.mjs.map +1 -1
  13. package/dist/esm/llm/bedrock/index.mjs +97 -24
  14. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  15. package/dist/esm/messages/core.mjs +1 -1
  16. package/dist/esm/messages/core.mjs.map +1 -1
  17. package/dist/esm/stream.mjs +4 -2
  18. package/dist/esm/stream.mjs.map +1 -1
  19. package/dist/esm/tools/ToolNode.mjs +9 -5
  20. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  21. package/dist/types/llm/bedrock/index.d.ts +86 -7
  22. package/dist/types/llm/bedrock/types.d.ts +27 -0
  23. package/dist/types/llm/bedrock/utils/index.d.ts +5 -0
  24. package/dist/types/llm/bedrock/utils/message_inputs.d.ts +31 -0
  25. package/dist/types/llm/bedrock/utils/message_outputs.d.ts +33 -0
  26. package/dist/types/types/tools.d.ts +2 -0
  27. package/package.json +5 -2
  28. package/src/graphs/Graph.ts +23 -5
  29. package/src/llm/bedrock/index.ts +180 -43
  30. package/src/llm/bedrock/llm.spec.ts +616 -0
  31. package/src/llm/bedrock/types.ts +51 -0
  32. package/src/llm/bedrock/utils/index.ts +18 -0
  33. package/src/llm/bedrock/utils/message_inputs.ts +563 -0
  34. package/src/llm/bedrock/utils/message_outputs.ts +310 -0
  35. package/src/messages/core.ts +1 -1
  36. package/src/scripts/code_exec_multi_session.ts +241 -0
  37. package/src/scripts/thinking-bedrock.ts +159 -0
  38. package/src/scripts/thinking.ts +39 -18
  39. package/src/scripts/tools.ts +7 -3
  40. package/src/stream.ts +4 -2
  41. package/src/tools/ToolNode.ts +9 -5
  42. package/src/types/tools.ts +2 -0
@@ -1,5 +1,9 @@
1
1
  /**
2
2
  * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
3
+ * and adds support for latest @langchain/aws features:
4
+ *
5
+ * - Application Inference Profiles (PR #9129)
6
+ * - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+
3
7
  *
4
8
  * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
5
9
  * causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
@@ -11,19 +15,94 @@
11
15
  * the purpose of tracking tool call ordering.
12
16
  */
13
17
  import { ChatBedrockConverse } from '@langchain/aws';
18
+ import { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';
19
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
14
20
  import type { ChatBedrockConverseInput } from '@langchain/aws';
15
21
  import type { BaseMessage } from '@langchain/core/messages';
16
- import { ChatGenerationChunk } from '@langchain/core/outputs';
17
- import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
22
+ /**
23
+ * Service tier type for Bedrock invocations.
24
+ * Requires AWS SDK >= 3.966.0 to actually work.
25
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
26
+ */
27
+ export type ServiceTierType = 'priority' | 'default' | 'flex' | 'reserved';
28
+ /**
29
+ * Extended input interface with additional features:
30
+ * - applicationInferenceProfile: Use an inference profile ARN instead of model ID
31
+ * - serviceTier: Specify service tier (Priority, Standard, Flex, Reserved)
32
+ */
33
+ export interface CustomChatBedrockConverseInput extends ChatBedrockConverseInput {
34
+ /**
35
+ * Application Inference Profile ARN to use for the model.
36
+ * For example, "arn:aws:bedrock:eu-west-1:123456789102:application-inference-profile/fm16bt65tzgx"
37
+ * When provided, this ARN will be used for the actual inference calls instead of the model ID.
38
+ * Must still provide `model` as normal modelId to benefit from all the metadata.
39
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-create.html
40
+ */
41
+ applicationInferenceProfile?: string;
42
+ /**
43
+ * Service tier for model invocation.
44
+ * Specifies the processing tier type used for serving the request.
45
+ * Supported values are 'priority', 'default', 'flex', and 'reserved'.
46
+ *
47
+ * - 'priority': Prioritized processing for lower latency
48
+ * - 'default': Standard processing tier
49
+ * - 'flex': Flexible processing tier with lower cost
50
+ * - 'reserved': Reserved capacity for consistent performance
51
+ *
52
+ * If not provided, AWS uses the default tier.
53
+ * Note: Requires AWS SDK >= 3.966.0 to work.
54
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
55
+ */
56
+ serviceTier?: ServiceTierType;
57
+ }
58
+ /**
59
+ * Extended call options with serviceTier override support.
60
+ */
61
+ export interface CustomChatBedrockConverseCallOptions {
62
+ serviceTier?: ServiceTierType;
63
+ }
18
64
  export declare class CustomChatBedrockConverse extends ChatBedrockConverse {
19
- constructor(fields?: ChatBedrockConverseInput);
65
+ /**
66
+ * Application Inference Profile ARN to use instead of model ID.
67
+ */
68
+ applicationInferenceProfile?: string;
69
+ /**
70
+ * Service tier for model invocation.
71
+ */
72
+ serviceTier?: ServiceTierType;
73
+ constructor(fields?: CustomChatBedrockConverseInput);
20
74
  static lc_name(): string;
21
75
  /**
22
- * Override _streamResponseChunks to strip contentBlockIndex from response_metadata
23
- * This prevents LangChain's merge conflicts when the same index is used for
24
- * different content types (text vs tool calls)
76
+ * Get the model ID to use for API calls.
77
+ * Returns applicationInferenceProfile if set, otherwise returns this.model.
78
+ */
79
+ protected getModelId(): string;
80
+ /**
81
+ * Override invocationParams to add serviceTier support.
82
+ */
83
+ invocationParams(options?: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions): ReturnType<ChatBedrockConverse['invocationParams']> & {
84
+ serviceTier?: {
85
+ type: ServiceTierType;
86
+ };
87
+ };
88
+ /**
89
+ * Override _generateNonStreaming to use applicationInferenceProfile as modelId.
90
+ * Uses the same model-swapping pattern as streaming for consistency.
91
+ */
92
+ _generateNonStreaming(messages: BaseMessage[], options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
93
+ /**
94
+ * Override _streamResponseChunks to:
95
+ * 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)
96
+ * 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts
97
+ *
98
+ * Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's
99
+ * internal chunk handling which correctly preserves array content for reasoning blocks.
100
+ */
101
+ _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions, runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
102
+ /**
103
+ * Clean a chunk by removing contentBlockIndex from response_metadata.
25
104
  */
26
- _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
105
+ private cleanChunk;
27
106
  /**
28
107
  * Check if contentBlockIndex exists at any level in the object
29
108
  */
@@ -0,0 +1,27 @@
1
+ /**
2
+ * Type definitions for Bedrock Converse utilities.
3
+ */
4
+ import type { Message as BedrockMessage, SystemContentBlock as BedrockSystemContentBlock, ContentBlock as BedrockContentBlock, ConverseResponse, ContentBlockDeltaEvent, ConverseStreamMetadataEvent, ContentBlockStartEvent, ReasoningContentBlock, ReasoningContentBlockDelta } from '@aws-sdk/client-bedrock-runtime';
5
+ /**
6
+ * Reasoning content block type for LangChain messages.
7
+ */
8
+ export interface MessageContentReasoningBlock {
9
+ type: 'reasoning_content';
10
+ reasoningText?: {
11
+ text?: string;
12
+ signature?: string;
13
+ };
14
+ redactedContent?: string;
15
+ }
16
+ export interface MessageContentReasoningBlockReasoningTextPartial {
17
+ type: 'reasoning_content';
18
+ reasoningText: {
19
+ text?: string;
20
+ signature?: string;
21
+ };
22
+ }
23
+ export interface MessageContentReasoningBlockRedacted {
24
+ type: 'reasoning_content';
25
+ redactedContent: string;
26
+ }
27
+ export type { BedrockMessage, BedrockSystemContentBlock, BedrockContentBlock, ConverseResponse, ContentBlockDeltaEvent, ConverseStreamMetadataEvent, ContentBlockStartEvent, ReasoningContentBlock, ReasoningContentBlockDelta, };
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Bedrock Converse utility exports.
3
+ */
4
+ export { convertToConverseMessages, extractImageInfo, langchainReasoningBlockToBedrockReasoningBlock, concatenateLangchainReasoningBlocks, } from './message_inputs';
5
+ export { convertConverseMessageToLangChainMessage, handleConverseStreamContentBlockStart, handleConverseStreamContentBlockDelta, handleConverseStreamMetadata, bedrockReasoningBlockToLangchainReasoningBlock, bedrockReasoningDeltaToLangchainPartialReasoningBlock, } from './message_outputs';
@@ -0,0 +1,31 @@
1
+ /**
2
+ * Utility functions for converting LangChain messages to Bedrock Converse messages.
3
+ * Ported from @langchain/aws common.js
4
+ */
5
+ import { type BaseMessage, MessageContentComplex } from '@langchain/core/messages';
6
+ import type { BedrockMessage, BedrockSystemContentBlock, BedrockContentBlock, MessageContentReasoningBlock } from '../types';
7
+ /**
8
+ * Convert a LangChain reasoning block to a Bedrock reasoning block.
9
+ */
10
+ export declare function langchainReasoningBlockToBedrockReasoningBlock(content: MessageContentReasoningBlock): {
11
+ reasoningText?: {
12
+ text?: string;
13
+ signature?: string;
14
+ };
15
+ redactedContent?: Uint8Array;
16
+ };
17
+ /**
18
+ * Concatenate consecutive reasoning blocks in content array.
19
+ */
20
+ export declare function concatenateLangchainReasoningBlocks(content: Array<MessageContentComplex | MessageContentReasoningBlock>): Array<MessageContentComplex | MessageContentReasoningBlock>;
21
+ /**
22
+ * Extract image info from a base64 string or URL.
23
+ */
24
+ export declare function extractImageInfo(base64: string): BedrockContentBlock;
25
+ /**
26
+ * Convert LangChain messages to Bedrock Converse messages.
27
+ */
28
+ export declare function convertToConverseMessages(messages: BaseMessage[]): {
29
+ converseMessages: BedrockMessage[];
30
+ converseSystem: BedrockSystemContentBlock[];
31
+ };
@@ -0,0 +1,33 @@
1
+ /**
2
+ * Utility functions for converting Bedrock Converse responses to LangChain messages.
3
+ * Ported from @langchain/aws common.js
4
+ */
5
+ import { AIMessage } from '@langchain/core/messages';
6
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
7
+ import type { BedrockMessage, ConverseResponse, ContentBlockDeltaEvent, ConverseStreamMetadataEvent, ContentBlockStartEvent, ReasoningContentBlock, ReasoningContentBlockDelta, MessageContentReasoningBlock, MessageContentReasoningBlockReasoningTextPartial, MessageContentReasoningBlockRedacted } from '../types';
8
+ /**
9
+ * Convert a Bedrock reasoning block delta to a LangChain partial reasoning block.
10
+ */
11
+ export declare function bedrockReasoningDeltaToLangchainPartialReasoningBlock(reasoningContent: ReasoningContentBlockDelta): MessageContentReasoningBlockReasoningTextPartial | MessageContentReasoningBlockRedacted;
12
+ /**
13
+ * Convert a Bedrock reasoning block to a LangChain reasoning block.
14
+ */
15
+ export declare function bedrockReasoningBlockToLangchainReasoningBlock(reasoningContent: ReasoningContentBlock): MessageContentReasoningBlock;
16
+ /**
17
+ * Convert a Bedrock Converse message to a LangChain message.
18
+ */
19
+ export declare function convertConverseMessageToLangChainMessage(message: BedrockMessage, responseMetadata: Omit<ConverseResponse, 'output'>): AIMessage;
20
+ /**
21
+ * Handle a content block delta event from Bedrock Converse stream.
22
+ */
23
+ export declare function handleConverseStreamContentBlockDelta(contentBlockDelta: ContentBlockDeltaEvent): ChatGenerationChunk;
24
+ /**
25
+ * Handle a content block start event from Bedrock Converse stream.
26
+ */
27
+ export declare function handleConverseStreamContentBlockStart(contentBlockStart: ContentBlockStartEvent): ChatGenerationChunk | null;
28
+ /**
29
+ * Handle a metadata event from Bedrock Converse stream.
30
+ */
31
+ export declare function handleConverseStreamMetadata(metadata: ConverseStreamMetadataEvent, extra: {
32
+ streamUsage: boolean;
33
+ }): ChatGenerationChunk;
@@ -57,6 +57,8 @@ export type FileRef = {
57
57
  id: string;
58
58
  name: string;
59
59
  path?: string;
60
+ /** Session ID this file belongs to (for multi-session file tracking) */
61
+ session_id?: string;
60
62
  };
61
63
  export type FileRefs = FileRef[];
62
64
  export type ExecuteResult = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.0.776",
3
+ "version": "3.1.00",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -51,12 +51,14 @@
51
51
  "image": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/image.ts --provider 'google' --name 'Jo' --location 'New York, NY'",
52
52
  "code_exec_files": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_files.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
53
53
  "code_exec_session": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_session.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
+ "code_exec_multi_session": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_multi_session.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
54
55
  "code_exec_simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/code_exec_simple.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
55
56
  "simple": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/simple.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
56
57
  "caching": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/caching.ts --name 'Jo' --location 'New York, NY'",
57
58
  "thinking": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking.ts --name 'Jo' --location 'New York, NY'",
59
+ "thinking:bedrock": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/thinking-bedrock.ts --name 'Jo' --location 'New York, NY'",
58
60
  "memory": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/memory.ts --provider 'openAI' --name 'Jo' --location 'New York, NY'",
59
- "tool": "node --trace-warnings -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'openrouter' --name 'Jo' --location 'New York, NY'",
61
+ "tool": "node --trace-warnings -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tools.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
60
62
  "search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/search.ts --provider 'bedrock' --name 'Jo' --location 'New York, NY'",
61
63
  "tool_search": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/tool_search.ts",
62
64
  "programmatic_exec": "node -r dotenv/config --loader ./tsconfig-paths-bootstrap.mjs --experimental-specifier-resolution=node ./src/scripts/programmatic_exec.ts",
@@ -110,6 +112,7 @@
110
112
  }
111
113
  },
112
114
  "dependencies": {
115
+ "@aws-sdk/client-bedrock-runtime": "^3.970.0",
113
116
  "@langchain/anthropic": "^0.3.26",
114
117
  "@langchain/aws": "^0.1.15",
115
118
  "@langchain/core": "^0.3.80",
@@ -1042,8 +1042,8 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1042
1042
 
1043
1043
  /**
1044
1044
  * Extract and store code execution session context from artifacts.
1045
- * Only update session_id when files are generated - this ensures we don't
1046
- * lose the original session that contains the files.
1045
+ * Each file is stamped with its source session_id to support multi-session file tracking.
1046
+ * When the same filename appears in a later execution, the newer version replaces the old.
1047
1047
  */
1048
1048
  const toolName = output.name;
1049
1049
  if (
@@ -1060,17 +1060,35 @@ export class StandardGraph extends Graph<t.BaseGraphState, t.GraphNode> {
1060
1060
  artifact.session_id !== ''
1061
1061
  ) {
1062
1062
  /**
1063
- * Files were generated - update session with the new session_id.
1064
- * The new session_id is the one that contains these files.
1063
+ * Stamp each new file with its source session_id.
1064
+ * This enables files from different executions (parallel or sequential)
1065
+ * to be tracked and passed to subsequent calls.
1065
1066
  */
1067
+ const filesWithSession: t.FileRefs = newFiles.map((file) => ({
1068
+ ...file,
1069
+ session_id: artifact.session_id,
1070
+ }));
1071
+
1066
1072
  const existingSession = this.sessions.get(Constants.EXECUTE_CODE) as
1067
1073
  | t.CodeSessionContext
1068
1074
  | undefined;
1069
1075
  const existingFiles = existingSession?.files ?? [];
1070
1076
 
1077
+ /**
1078
+ * Merge files, preferring latest versions by name.
1079
+ * If a file with the same name exists, replace it with the new version.
1080
+ * This handles cases where files are edited/recreated in subsequent executions.
1081
+ */
1082
+ const newFileNames = new Set(filesWithSession.map((f) => f.name));
1083
+ const filteredExisting = existingFiles.filter(
1084
+ (f) => !newFileNames.has(f.name)
1085
+ );
1086
+
1071
1087
  this.sessions.set(Constants.EXECUTE_CODE, {
1088
+ /** Keep latest session_id for reference/fallback */
1072
1089
  session_id: artifact.session_id,
1073
- files: [...existingFiles, ...newFiles],
1090
+ /** Accumulated files with latest versions preferred */
1091
+ files: [...filteredExisting, ...filesWithSession],
1074
1092
  lastUpdated: Date.now(),
1075
1093
  });
1076
1094
  }
@@ -1,5 +1,9 @@
1
1
  /**
2
2
  * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
3
+ * and adds support for latest @langchain/aws features:
4
+ *
5
+ * - Application Inference Profiles (PR #9129)
6
+ * - Service Tiers (Priority/Standard/Flex) (PR #9785) - requires AWS SDK 3.966.0+
3
7
  *
4
8
  * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
5
9
  * causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
@@ -12,15 +16,74 @@
12
16
  */
13
17
 
14
18
  import { ChatBedrockConverse } from '@langchain/aws';
15
- import type { ChatBedrockConverseInput } from '@langchain/aws';
16
19
  import { AIMessageChunk } from '@langchain/core/messages';
17
- import type { BaseMessage } from '@langchain/core/messages';
18
- import { ChatGenerationChunk } from '@langchain/core/outputs';
20
+ import { ChatGenerationChunk, ChatResult } from '@langchain/core/outputs';
19
21
  import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
22
+ import type { ChatBedrockConverseInput } from '@langchain/aws';
23
+ import type { BaseMessage } from '@langchain/core/messages';
24
+
25
+ /**
26
+ * Service tier type for Bedrock invocations.
27
+ * Requires AWS SDK >= 3.966.0 to actually work.
28
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
29
+ */
30
+ export type ServiceTierType = 'priority' | 'default' | 'flex' | 'reserved';
31
+
32
+ /**
33
+ * Extended input interface with additional features:
34
+ * - applicationInferenceProfile: Use an inference profile ARN instead of model ID
35
+ * - serviceTier: Specify service tier (Priority, Standard, Flex, Reserved)
36
+ */
37
+ export interface CustomChatBedrockConverseInput
38
+ extends ChatBedrockConverseInput {
39
+ /**
40
+ * Application Inference Profile ARN to use for the model.
41
+ * For example, "arn:aws:bedrock:eu-west-1:123456789102:application-inference-profile/fm16bt65tzgx"
42
+ * When provided, this ARN will be used for the actual inference calls instead of the model ID.
43
+ * Must still provide `model` as normal modelId to benefit from all the metadata.
44
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-create.html
45
+ */
46
+ applicationInferenceProfile?: string;
47
+
48
+ /**
49
+ * Service tier for model invocation.
50
+ * Specifies the processing tier type used for serving the request.
51
+ * Supported values are 'priority', 'default', 'flex', and 'reserved'.
52
+ *
53
+ * - 'priority': Prioritized processing for lower latency
54
+ * - 'default': Standard processing tier
55
+ * - 'flex': Flexible processing tier with lower cost
56
+ * - 'reserved': Reserved capacity for consistent performance
57
+ *
58
+ * If not provided, AWS uses the default tier.
59
+ * Note: Requires AWS SDK >= 3.966.0 to work.
60
+ * @see https://docs.aws.amazon.com/bedrock/latest/userguide/service-tiers-inference.html
61
+ */
62
+ serviceTier?: ServiceTierType;
63
+ }
64
+
65
+ /**
66
+ * Extended call options with serviceTier override support.
67
+ */
68
+ export interface CustomChatBedrockConverseCallOptions {
69
+ serviceTier?: ServiceTierType;
70
+ }
20
71
 
21
72
  export class CustomChatBedrockConverse extends ChatBedrockConverse {
22
- constructor(fields?: ChatBedrockConverseInput) {
73
+ /**
74
+ * Application Inference Profile ARN to use instead of model ID.
75
+ */
76
+ applicationInferenceProfile?: string;
77
+
78
+ /**
79
+ * Service tier for model invocation.
80
+ */
81
+ serviceTier?: ServiceTierType;
82
+
83
+ constructor(fields?: CustomChatBedrockConverseInput) {
23
84
  super(fields);
85
+ this.applicationInferenceProfile = fields?.applicationInferenceProfile;
86
+ this.serviceTier = fields?.serviceTier;
24
87
  }
25
88
 
26
89
  static lc_name(): string {
@@ -28,52 +91,126 @@ export class CustomChatBedrockConverse extends ChatBedrockConverse {
28
91
  }
29
92
 
30
93
  /**
31
- * Override _streamResponseChunks to strip contentBlockIndex from response_metadata
32
- * This prevents LangChain's merge conflicts when the same index is used for
33
- * different content types (text vs tool calls)
94
+ * Get the model ID to use for API calls.
95
+ * Returns applicationInferenceProfile if set, otherwise returns this.model.
96
+ */
97
+ protected getModelId(): string {
98
+ return this.applicationInferenceProfile ?? this.model;
99
+ }
100
+
101
+ /**
102
+ * Override invocationParams to add serviceTier support.
103
+ */
104
+ override invocationParams(
105
+ options?: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions
106
+ ): ReturnType<ChatBedrockConverse['invocationParams']> & {
107
+ serviceTier?: { type: ServiceTierType };
108
+ } {
109
+ const baseParams = super.invocationParams(options);
110
+
111
+ /** Service tier from options or fall back to class-level setting */
112
+ const serviceTierType = options?.serviceTier ?? this.serviceTier;
113
+
114
+ return {
115
+ ...baseParams,
116
+ serviceTier: serviceTierType ? { type: serviceTierType } : undefined,
117
+ };
118
+ }
119
+
120
+ /**
121
+ * Override _generateNonStreaming to use applicationInferenceProfile as modelId.
122
+ * Uses the same model-swapping pattern as streaming for consistency.
34
123
  */
35
- async *_streamResponseChunks(
124
+ override async _generateNonStreaming(
36
125
  messages: BaseMessage[],
37
- options: this['ParsedCallOptions'],
126
+ options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,
127
+ runManager?: CallbackManagerForLLMRun
128
+ ): Promise<ChatResult> {
129
+ // Temporarily swap model for applicationInferenceProfile support
130
+ const originalModel = this.model;
131
+ if (
132
+ this.applicationInferenceProfile != null &&
133
+ this.applicationInferenceProfile !== ''
134
+ ) {
135
+ this.model = this.applicationInferenceProfile;
136
+ }
137
+
138
+ try {
139
+ return await super._generateNonStreaming(messages, options, runManager);
140
+ } finally {
141
+ // Restore original model
142
+ this.model = originalModel;
143
+ }
144
+ }
145
+
146
+ /**
147
+ * Override _streamResponseChunks to:
148
+ * 1. Use applicationInferenceProfile as modelId (by temporarily swapping this.model)
149
+ * 2. Strip contentBlockIndex from response_metadata to prevent merge conflicts
150
+ *
151
+ * Note: We delegate to super._streamResponseChunks() to preserve @langchain/aws's
152
+ * internal chunk handling which correctly preserves array content for reasoning blocks.
153
+ */
154
+ override async *_streamResponseChunks(
155
+ messages: BaseMessage[],
156
+ options: this['ParsedCallOptions'] & CustomChatBedrockConverseCallOptions,
38
157
  runManager?: CallbackManagerForLLMRun
39
158
  ): AsyncGenerator<ChatGenerationChunk> {
40
- const baseStream = super._streamResponseChunks(
41
- messages,
42
- options,
43
- runManager
44
- );
45
-
46
- for await (const chunk of baseStream) {
47
- // Only process if we have response_metadata
48
- if (
49
- chunk.message instanceof AIMessageChunk &&
50
- (chunk.message as Partial<AIMessageChunk>).response_metadata &&
51
- typeof chunk.message.response_metadata === 'object'
52
- ) {
53
- // Check if contentBlockIndex exists anywhere in response_metadata (top level or nested)
54
- const hasContentBlockIndex = this.hasContentBlockIndex(
55
- chunk.message.response_metadata
56
- );
57
-
58
- if (hasContentBlockIndex) {
59
- const cleanedMetadata = this.removeContentBlockIndex(
60
- chunk.message.response_metadata
61
- ) as Record<string, unknown>;
62
-
63
- yield new ChatGenerationChunk({
64
- text: chunk.text,
65
- message: new AIMessageChunk({
66
- ...chunk.message,
67
- response_metadata: cleanedMetadata,
68
- }),
69
- generationInfo: chunk.generationInfo,
70
- });
71
- continue;
72
- }
159
+ // Temporarily swap model for applicationInferenceProfile support
160
+ const originalModel = this.model;
161
+ if (
162
+ this.applicationInferenceProfile != null &&
163
+ this.applicationInferenceProfile !== ''
164
+ ) {
165
+ this.model = this.applicationInferenceProfile;
166
+ }
167
+
168
+ try {
169
+ // Use parent's streaming logic which correctly handles reasoning content
170
+ const baseStream = super._streamResponseChunks(
171
+ messages,
172
+ options,
173
+ runManager
174
+ );
175
+
176
+ for await (const chunk of baseStream) {
177
+ // Clean contentBlockIndex from response_metadata to prevent merge conflicts
178
+ yield this.cleanChunk(chunk);
73
179
  }
180
+ } finally {
181
+ // Restore original model
182
+ this.model = originalModel;
183
+ }
184
+ }
74
185
 
75
- yield chunk;
186
+ /**
187
+ * Clean a chunk by removing contentBlockIndex from response_metadata.
188
+ */
189
+ private cleanChunk(chunk: ChatGenerationChunk): ChatGenerationChunk {
190
+ const message = chunk.message;
191
+ if (!(message instanceof AIMessageChunk)) {
192
+ return chunk;
76
193
  }
194
+
195
+ const metadata = message.response_metadata as Record<string, unknown>;
196
+ const hasContentBlockIndex = this.hasContentBlockIndex(metadata);
197
+ if (!hasContentBlockIndex) {
198
+ return chunk;
199
+ }
200
+
201
+ const cleanedMetadata = this.removeContentBlockIndex(metadata) as Record<
202
+ string,
203
+ unknown
204
+ >;
205
+
206
+ return new ChatGenerationChunk({
207
+ text: chunk.text,
208
+ message: new AIMessageChunk({
209
+ ...message,
210
+ response_metadata: cleanedMetadata,
211
+ }),
212
+ generationInfo: chunk.generationInfo,
213
+ });
77
214
  }
78
215
 
79
216
  /**