@librechat/agents 3.0.29 → 3.0.31

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,128 @@
1
+ /**
2
+ * Optimized ChatBedrockConverse wrapper that fixes contentBlockIndex conflicts
3
+ *
4
+ * Bedrock sends the same contentBlockIndex for both text and tool_use content blocks,
5
+ * causing LangChain's merge logic to fail with "field[contentBlockIndex] already exists"
6
+ * errors. This wrapper simply strips contentBlockIndex from response_metadata to avoid
7
+ * the conflict.
8
+ *
9
+ * The contentBlockIndex field is only used internally by Bedrock's streaming protocol
10
+ * and isn't needed by application logic - the index field on tool_call_chunks serves
11
+ * the purpose of tracking tool call ordering.
12
+ */
13
+
14
+ import { ChatBedrockConverse } from '@langchain/aws';
15
+ import type { ChatBedrockConverseInput } from '@langchain/aws';
16
+ import { AIMessageChunk } from '@langchain/core/messages';
17
+ import type { BaseMessage } from '@langchain/core/messages';
18
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
19
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
20
+
21
+ export class CustomChatBedrockConverse extends ChatBedrockConverse {
22
+ constructor(fields?: ChatBedrockConverseInput) {
23
+ super(fields);
24
+ }
25
+
26
+ static lc_name(): string {
27
+ return 'LibreChatBedrockConverse';
28
+ }
29
+
30
+ /**
31
+ * Override _streamResponseChunks to strip contentBlockIndex from response_metadata
32
+ * This prevents LangChain's merge conflicts when the same index is used for
33
+ * different content types (text vs tool calls)
34
+ */
35
+ async *_streamResponseChunks(
36
+ messages: BaseMessage[],
37
+ options: this['ParsedCallOptions'],
38
+ runManager?: CallbackManagerForLLMRun
39
+ ): AsyncGenerator<ChatGenerationChunk> {
40
+ const baseStream = super._streamResponseChunks(
41
+ messages,
42
+ options,
43
+ runManager
44
+ );
45
+
46
+ for await (const chunk of baseStream) {
47
+ // Only process if we have response_metadata
48
+ if (
49
+ chunk.message instanceof AIMessageChunk &&
50
+ (chunk.message as Partial<AIMessageChunk>).response_metadata &&
51
+ typeof chunk.message.response_metadata === 'object'
52
+ ) {
53
+ // Check if contentBlockIndex exists anywhere in response_metadata (top level or nested)
54
+ const hasContentBlockIndex = this.hasContentBlockIndex(
55
+ chunk.message.response_metadata
56
+ );
57
+
58
+ if (hasContentBlockIndex) {
59
+ const cleanedMetadata = this.removeContentBlockIndex(
60
+ chunk.message.response_metadata
61
+ ) as Record<string, unknown>;
62
+
63
+ yield new ChatGenerationChunk({
64
+ text: chunk.text,
65
+ message: new AIMessageChunk({
66
+ ...chunk.message,
67
+ response_metadata: cleanedMetadata,
68
+ }),
69
+ generationInfo: chunk.generationInfo,
70
+ });
71
+ continue;
72
+ }
73
+ }
74
+
75
+ yield chunk;
76
+ }
77
+ }
78
+
79
+ /**
80
+ * Check if contentBlockIndex exists at any level in the object
81
+ */
82
+ private hasContentBlockIndex(obj: unknown): boolean {
83
+ if (obj === null || obj === undefined || typeof obj !== 'object') {
84
+ return false;
85
+ }
86
+
87
+ if ('contentBlockIndex' in obj) {
88
+ return true;
89
+ }
90
+
91
+ for (const value of Object.values(obj)) {
92
+ if (typeof value === 'object' && value !== null) {
93
+ if (this.hasContentBlockIndex(value)) {
94
+ return true;
95
+ }
96
+ }
97
+ }
98
+
99
+ return false;
100
+ }
101
+
102
+ /**
103
+ * Recursively remove contentBlockIndex from all levels of an object
104
+ */
105
+ private removeContentBlockIndex(obj: unknown): unknown {
106
+ if (obj === null || obj === undefined) {
107
+ return obj;
108
+ }
109
+
110
+ if (Array.isArray(obj)) {
111
+ return obj.map((item) => this.removeContentBlockIndex(item));
112
+ }
113
+
114
+ if (typeof obj === 'object') {
115
+ const cleaned: Record<string, unknown> = {};
116
+ for (const [key, value] of Object.entries(obj)) {
117
+ if (key !== 'contentBlockIndex') {
118
+ cleaned[key] = this.removeContentBlockIndex(value);
119
+ }
120
+ }
121
+ return cleaned;
122
+ }
123
+
124
+ return obj;
125
+ }
126
+ }
127
+
128
+ export type { ChatBedrockConverseInput };
@@ -1,8 +1,5 @@
1
1
  // src/llm/providers.ts
2
2
  import { ChatMistralAI } from '@langchain/mistralai';
3
- import { ChatBedrockConverse } from '@langchain/aws';
4
- // import { ChatAnthropic } from '@langchain/anthropic';
5
- // import { ChatVertexAI } from '@langchain/google-vertexai';
6
3
  import type {
7
4
  ChatModelConstructorMap,
8
5
  ProviderOptionsMap,
@@ -15,6 +12,7 @@ import {
15
12
  ChatXAI,
16
13
  } from '@/llm/openai';
17
14
  import { CustomChatGoogleGenerativeAI } from '@/llm/google';
15
+ import { CustomChatBedrockConverse } from '@/llm/bedrock';
18
16
  import { CustomAnthropic } from '@/llm/anthropic';
19
17
  import { ChatOpenRouter } from '@/llm/openrouter';
20
18
  import { ChatVertexAI } from '@/llm/vertexai';
@@ -32,7 +30,7 @@ export const llmProviders: Partial<ChatModelConstructorMap> = {
32
30
  [Providers.MISTRAL]: ChatMistralAI,
33
31
  [Providers.ANTHROPIC]: CustomAnthropic,
34
32
  [Providers.OPENROUTER]: ChatOpenRouter,
35
- [Providers.BEDROCK]: ChatBedrockConverse,
33
+ [Providers.BEDROCK]: CustomChatBedrockConverse,
36
34
  // [Providers.ANTHROPIC]: ChatAnthropic,
37
35
  [Providers.GOOGLE]: CustomChatGoogleGenerativeAI,
38
36
  };
@@ -0,0 +1,148 @@
1
+ /* eslint-disable no-console */
2
+ // src/scripts/ant_web_search_error_edge_case.ts
3
+ import { config } from 'dotenv';
4
+ config();
5
+ import { HumanMessage, BaseMessage } from '@langchain/core/messages';
6
+ import type * as t from '@/types';
7
+ import { ChatModelStreamHandler, createContentAggregator } from '@/stream';
8
+ import { ToolEndHandler, ModelEndHandler } from '@/events';
9
+
10
+ import { getArgs } from '@/scripts/args';
11
+ import { Run } from '@/run';
12
+ import { GraphEvents, Providers } from '@/common';
13
+ import { getLLMConfig } from '@/utils/llmConfig';
14
+
15
+ const conversationHistory: BaseMessage[] = [];
16
+ let _contentParts: (t.MessageContentComplex | undefined)[] = [];
17
+ async function testStandardStreaming(): Promise<void> {
18
+ const { userName, location, currentDate } = await getArgs();
19
+ const { contentParts, aggregateContent } = createContentAggregator();
20
+ _contentParts = contentParts;
21
+ const customHandlers = {
22
+ [GraphEvents.TOOL_END]: new ToolEndHandler(),
23
+ [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(),
24
+ [GraphEvents.CHAT_MODEL_STREAM]: new ChatModelStreamHandler(),
25
+ [GraphEvents.ON_RUN_STEP_COMPLETED]: {
26
+ handle: (
27
+ event: GraphEvents.ON_RUN_STEP_COMPLETED,
28
+ data: t.StreamEventData
29
+ ): void => {
30
+ console.log('====== ON_RUN_STEP_COMPLETED ======');
31
+ // console.dir(data, { depth: null });
32
+ aggregateContent({
33
+ event,
34
+ data: data as unknown as { result: t.ToolEndEvent },
35
+ });
36
+ },
37
+ },
38
+ [GraphEvents.ON_RUN_STEP]: {
39
+ handle: (
40
+ event: GraphEvents.ON_RUN_STEP,
41
+ data: t.StreamEventData
42
+ ): void => {
43
+ console.log('====== ON_RUN_STEP ======');
44
+ console.dir(data, { depth: null });
45
+ aggregateContent({ event, data: data as t.RunStep });
46
+ },
47
+ },
48
+ [GraphEvents.ON_RUN_STEP_DELTA]: {
49
+ handle: (
50
+ event: GraphEvents.ON_RUN_STEP_DELTA,
51
+ data: t.StreamEventData
52
+ ): void => {
53
+ console.log('====== ON_RUN_STEP_DELTA ======');
54
+ console.dir(data, { depth: null });
55
+ aggregateContent({ event, data: data as t.RunStepDeltaEvent });
56
+ },
57
+ },
58
+ [GraphEvents.ON_MESSAGE_DELTA]: {
59
+ handle: (
60
+ event: GraphEvents.ON_MESSAGE_DELTA,
61
+ data: t.StreamEventData
62
+ ): void => {
63
+ // console.log('====== ON_MESSAGE_DELTA ======');
64
+ // console.dir(data, { depth: null });
65
+ aggregateContent({ event, data: data as t.MessageDeltaEvent });
66
+ },
67
+ },
68
+ [GraphEvents.TOOL_START]: {
69
+ handle: (
70
+ _event: string,
71
+ data: t.StreamEventData,
72
+ metadata?: Record<string, unknown>
73
+ ): void => {
74
+ console.log('====== TOOL_START ======');
75
+ // console.dir(data, { depth: null });
76
+ },
77
+ },
78
+ };
79
+
80
+ const llmConfig = getLLMConfig(
81
+ Providers.ANTHROPIC
82
+ ) as t.AnthropicClientOptions & t.SharedLLMConfig;
83
+ llmConfig.model = 'claude-haiku-4-5';
84
+
85
+ const run = await Run.create<t.IState>({
86
+ runId: 'test-run-id',
87
+ graphConfig: {
88
+ type: 'standard',
89
+ llmConfig,
90
+ tools: [
91
+ {
92
+ type: 'web_search_20250305',
93
+ name: 'web_search',
94
+ max_uses: 5,
95
+ },
96
+ ],
97
+ instructions: 'You are a helpful AI research assistant.',
98
+ },
99
+ returnContent: true,
100
+ customHandlers,
101
+ });
102
+
103
+ const config = {
104
+ configurable: {
105
+ provider: Providers.ANTHROPIC,
106
+ thread_id: 'conversation-num-1',
107
+ },
108
+ streamMode: 'values',
109
+ version: 'v2' as const,
110
+ };
111
+
112
+ console.log('Test: Web search with multiple searches (error edge case test)');
113
+
114
+ // This prompt should trigger multiple web searches which may result in errors
115
+ const userMessage =
116
+ 'Do a deep deep research on CoreWeave. I need you to perform multiple searches before you generate the answer. The basis of our research should be to investigate if this is a solid long term investment.';
117
+
118
+ conversationHistory.push(new HumanMessage(userMessage));
119
+
120
+ const inputs = {
121
+ messages: conversationHistory,
122
+ };
123
+ const finalContentParts = await run.processStream(inputs, config);
124
+ const finalMessages = run.getRunMessages();
125
+ if (finalMessages) {
126
+ conversationHistory.push(...finalMessages);
127
+ console.dir(conversationHistory, { depth: null });
128
+ }
129
+ // console.dir(finalContentParts, { depth: null });
130
+ console.log('\n\n====================\n\n');
131
+ // console.dir(contentParts, { depth: null });
132
+ }
133
+
134
+ process.on('unhandledRejection', (reason, promise) => {
135
+ console.error('Unhandled Rejection at:', promise, 'reason:', reason);
136
+ console.log('Content Parts:');
137
+ console.dir(_contentParts, { depth: null });
138
+ process.exit(1);
139
+ });
140
+
141
+ testStandardStreaming().catch((err) => {
142
+ console.error(err);
143
+ console.log('Conversation history:');
144
+ console.dir(conversationHistory, { depth: null });
145
+ console.log('Content Parts:');
146
+ console.dir(_contentParts, { depth: null });
147
+ process.exit(1);
148
+ });
@@ -87,13 +87,27 @@ export async function handleToolCallChunks({
87
87
  const alreadyDispatched =
88
88
  prevRunStep?.type === StepTypes.MESSAGE_CREATION &&
89
89
  graph.messageStepHasToolCalls.has(prevStepId);
90
- if (!alreadyDispatched && tool_calls?.length === toolCallChunks.length) {
90
+
91
+ if (prevRunStep?.type === StepTypes.TOOL_CALLS) {
92
+ /**
93
+ * If previous step is already a tool_calls step, use that step ID
94
+ * This ensures tool call deltas are dispatched to the correct step
95
+ */
96
+ stepId = prevStepId;
97
+ } else if (
98
+ !alreadyDispatched &&
99
+ prevRunStep?.type === StepTypes.MESSAGE_CREATION
100
+ ) {
101
+ /**
102
+ * Create tool_calls step as soon as we receive the first tool call chunk
103
+ * This ensures deltas are always associated with the correct step
104
+ */
91
105
  await graph.dispatchMessageDelta(prevStepId, {
92
106
  content: [
93
107
  {
94
108
  type: ContentTypes.TEXT,
95
109
  text: '',
96
- tool_call_ids: tool_calls.map((tc) => tc.id ?? ''),
110
+ tool_call_ids: tool_calls?.map((tc) => tc.id ?? '') ?? [],
97
111
  },
98
112
  ],
99
113
  });
@@ -102,7 +116,7 @@ export async function handleToolCallChunks({
102
116
  stepKey,
103
117
  {
104
118
  type: StepTypes.TOOL_CALLS,
105
- tool_calls,
119
+ tool_calls: tool_calls ?? [],
106
120
  },
107
121
  metadata
108
122
  );
package/src/types/llm.ts CHANGED
@@ -1,7 +1,6 @@
1
1
  // src/types/llm.ts
2
2
  import { ChatOllama } from '@langchain/ollama';
3
3
  import { ChatMistralAI } from '@langchain/mistralai';
4
- import { ChatBedrockConverse } from '@langchain/aws';
5
4
  import type {
6
5
  BindToolsInput,
7
6
  BaseChatModelParams,
@@ -33,6 +32,7 @@ import {
33
32
  ChatXAI,
34
33
  } from '@/llm/openai';
35
34
  import { CustomChatGoogleGenerativeAI } from '@/llm/google';
35
+ import { CustomChatBedrockConverse } from '@/llm/bedrock';
36
36
  import { CustomAnthropic } from '@/llm/anthropic';
37
37
  import { ChatOpenRouter } from '@/llm/openrouter';
38
38
  import { ChatVertexAI } from '@/llm/vertexai';
@@ -126,7 +126,7 @@ export type ChatModelMap = {
126
126
  [Providers.MISTRALAI]: ChatMistralAI;
127
127
  [Providers.MISTRAL]: ChatMistralAI;
128
128
  [Providers.OPENROUTER]: ChatOpenRouter;
129
- [Providers.BEDROCK]: ChatBedrockConverse;
129
+ [Providers.BEDROCK]: CustomChatBedrockConverse;
130
130
  [Providers.GOOGLE]: CustomChatGoogleGenerativeAI;
131
131
  };
132
132