@librechat/agents 3.1.51 → 3.1.53

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (105) hide show
  1. package/dist/cjs/graphs/Graph.cjs +43 -16
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/llm/google/index.cjs.map +1 -1
  4. package/dist/cjs/llm/openrouter/index.cjs +59 -5
  5. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  6. package/dist/cjs/llm/vertexai/index.cjs +16 -2
  7. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  8. package/dist/cjs/main.cjs +2 -0
  9. package/dist/cjs/main.cjs.map +1 -1
  10. package/dist/cjs/run.cjs +32 -2
  11. package/dist/cjs/run.cjs.map +1 -1
  12. package/dist/cjs/utils/run.cjs +3 -1
  13. package/dist/cjs/utils/run.cjs.map +1 -1
  14. package/dist/esm/graphs/Graph.mjs +43 -16
  15. package/dist/esm/graphs/Graph.mjs.map +1 -1
  16. package/dist/esm/llm/google/index.mjs.map +1 -1
  17. package/dist/esm/llm/openrouter/index.mjs +59 -5
  18. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  19. package/dist/esm/llm/vertexai/index.mjs +16 -2
  20. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  21. package/dist/esm/main.mjs +1 -0
  22. package/dist/esm/main.mjs.map +1 -1
  23. package/dist/esm/run.mjs +32 -2
  24. package/dist/esm/run.mjs.map +1 -1
  25. package/dist/esm/utils/run.mjs +3 -1
  26. package/dist/esm/utils/run.mjs.map +1 -1
  27. package/dist/types/graphs/Graph.d.ts +7 -0
  28. package/dist/types/index.d.ts +2 -0
  29. package/dist/types/llm/google/index.d.ts +2 -3
  30. package/dist/types/llm/openrouter/index.d.ts +21 -1
  31. package/dist/types/llm/vertexai/index.d.ts +2 -1
  32. package/dist/types/run.d.ts +1 -0
  33. package/dist/types/types/llm.d.ts +7 -2
  34. package/dist/types/types/run.d.ts +2 -0
  35. package/package.json +1 -1
  36. package/src/graphs/Graph.ts +49 -20
  37. package/src/index.ts +6 -0
  38. package/src/llm/google/index.ts +2 -3
  39. package/src/llm/openrouter/index.ts +117 -6
  40. package/src/llm/openrouter/reasoning.test.ts +207 -0
  41. package/src/llm/vertexai/index.ts +20 -3
  42. package/src/run.ts +40 -2
  43. package/src/scripts/ant_web_search.ts +1 -0
  44. package/src/scripts/ant_web_search_edge_case.ts +1 -0
  45. package/src/scripts/ant_web_search_error_edge_case.ts +1 -0
  46. package/src/scripts/bedrock-content-aggregation-test.ts +1 -0
  47. package/src/scripts/bedrock-parallel-tools-test.ts +1 -0
  48. package/src/scripts/caching.ts +1 -0
  49. package/src/scripts/code_exec.ts +1 -0
  50. package/src/scripts/code_exec_files.ts +1 -0
  51. package/src/scripts/code_exec_multi_session.ts +1 -0
  52. package/src/scripts/code_exec_ptc.ts +1 -0
  53. package/src/scripts/code_exec_session.ts +1 -0
  54. package/src/scripts/code_exec_simple.ts +1 -0
  55. package/src/scripts/content.ts +1 -0
  56. package/src/scripts/image.ts +1 -0
  57. package/src/scripts/memory.ts +16 -6
  58. package/src/scripts/multi-agent-chain.ts +1 -0
  59. package/src/scripts/multi-agent-conditional.ts +1 -0
  60. package/src/scripts/multi-agent-document-review-chain.ts +1 -0
  61. package/src/scripts/multi-agent-hybrid-flow.ts +1 -0
  62. package/src/scripts/multi-agent-parallel-start.ts +1 -0
  63. package/src/scripts/multi-agent-parallel.ts +1 -0
  64. package/src/scripts/multi-agent-sequence.ts +1 -0
  65. package/src/scripts/multi-agent-supervisor.ts +1 -0
  66. package/src/scripts/multi-agent-test.ts +1 -0
  67. package/src/scripts/parallel-asymmetric-tools-test.ts +1 -0
  68. package/src/scripts/parallel-full-metadata-test.ts +1 -0
  69. package/src/scripts/parallel-tools-test.ts +1 -0
  70. package/src/scripts/programmatic_exec_agent.ts +1 -0
  71. package/src/scripts/search.ts +1 -0
  72. package/src/scripts/sequential-full-metadata-test.ts +1 -0
  73. package/src/scripts/simple.ts +1 -0
  74. package/src/scripts/single-agent-metadata-test.ts +1 -0
  75. package/src/scripts/stream.ts +1 -0
  76. package/src/scripts/test-handoff-preamble.ts +1 -0
  77. package/src/scripts/test-handoff-steering.ts +3 -0
  78. package/src/scripts/test-multi-agent-list-handoff.ts +1 -0
  79. package/src/scripts/test-parallel-agent-labeling.ts +2 -0
  80. package/src/scripts/test-parallel-handoffs.ts +1 -0
  81. package/src/scripts/test-thinking-handoff-bedrock.ts +1 -0
  82. package/src/scripts/test-thinking-handoff.ts +1 -0
  83. package/src/scripts/test-thinking-to-thinking-handoff-bedrock.ts +1 -0
  84. package/src/scripts/test-tool-before-handoff-role-order.ts +1 -0
  85. package/src/scripts/test-tools-before-handoff.ts +1 -0
  86. package/src/scripts/thinking-bedrock.ts +1 -0
  87. package/src/scripts/thinking.ts +1 -0
  88. package/src/scripts/tools.ts +1 -0
  89. package/src/specs/agent-handoffs.test.ts +1 -0
  90. package/src/specs/anthropic.simple.test.ts +4 -0
  91. package/src/specs/azure.simple.test.ts +142 -3
  92. package/src/specs/cache.simple.test.ts +8 -0
  93. package/src/specs/custom-event-await.test.ts +2 -0
  94. package/src/specs/deepseek.simple.test.ts +3 -0
  95. package/src/specs/moonshot.simple.test.ts +5 -0
  96. package/src/specs/openai.simple.test.ts +3 -0
  97. package/src/specs/openrouter.simple.test.ts +164 -2
  98. package/src/specs/prune.test.ts +1 -0
  99. package/src/specs/reasoning.test.ts +1 -0
  100. package/src/specs/thinking-handoff.test.ts +1 -0
  101. package/src/specs/tool-error.test.ts +1 -0
  102. package/src/types/llm.ts +7 -2
  103. package/src/types/run.ts +2 -0
  104. package/src/utils/llmConfig.ts +3 -4
  105. package/src/utils/run.ts +4 -2
@@ -131,6 +131,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
131
131
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
132
132
  },
133
133
  returnContent: true,
134
+ skipCleanup: true,
134
135
  customHandlers,
135
136
  });
136
137
 
@@ -202,6 +203,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
202
203
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
203
204
  },
204
205
  returnContent: true,
206
+ skipCleanup: true,
205
207
  customHandlers,
206
208
  });
207
209
 
@@ -261,6 +263,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
261
263
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
262
264
  },
263
265
  returnContent: true,
266
+ skipCleanup: true,
264
267
  customHandlers,
265
268
  });
266
269
 
@@ -7,6 +7,7 @@ import {
7
7
  UsageMetadata,
8
8
  } from '@langchain/core/messages';
9
9
  import type * as t from '@/types';
10
+ import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
10
11
  import { ToolEndHandler, ModelEndHandler } from '@/events';
11
12
  import { ContentTypes, GraphEvents, Providers, TitleMethod } from '@/common';
12
13
  import { capitalizeFirstLetter } from './spec.utils';
@@ -33,6 +34,8 @@ describeIf(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
33
34
  version: 'v2' as const,
34
35
  };
35
36
 
37
+ const baseLLMConfig = getLLMConfig(provider);
38
+
36
39
  beforeEach(async () => {
37
40
  conversationHistory = [];
38
41
  collectedUsage = [];
@@ -56,21 +59,87 @@ describeIf(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
56
59
  [GraphEvents.CHAT_MODEL_END]: new ModelEndHandler(collectedUsage),
57
60
  });
58
61
 
62
+ /**
63
+ * Helper: run a reasoning test against a specific model with the given reasoning config.
64
+ * Asserts that reasoning tokens are reported and content is produced.
65
+ */
66
+ // eslint-disable-next-line @typescript-eslint/explicit-function-return-type
67
+ async function runReasoningTest(opts: {
68
+ model: string;
69
+ reasoning?: ChatOpenRouterCallOptions['reasoning'];
70
+ threadId: string;
71
+ runId: string;
72
+ }) {
73
+ const { reasoning: _baseReasoning, ...baseWithoutReasoning } =
74
+ baseLLMConfig as unknown as Record<string, unknown>;
75
+ const llmConfig = {
76
+ ...baseWithoutReasoning,
77
+ model: opts.model,
78
+ ...(opts.reasoning != null ? { reasoning: opts.reasoning } : {}),
79
+ } as t.LLMConfig;
80
+ const customHandlers = setupCustomHandlers();
81
+
82
+ run = await Run.create<t.IState>({
83
+ runId: opts.runId,
84
+ graphConfig: {
85
+ type: 'standard',
86
+ llmConfig,
87
+ instructions: 'You are a helpful AI assistant. Think step by step.',
88
+ },
89
+ returnContent: true,
90
+ skipCleanup: true,
91
+ customHandlers,
92
+ });
93
+
94
+ const userMessage = 'What is 15 * 37 + 128 / 4? Show your work.';
95
+ conversationHistory.push(new HumanMessage(userMessage));
96
+
97
+ const finalContentParts = await run.processStream(
98
+ { messages: conversationHistory },
99
+ { ...configV2, configurable: { thread_id: opts.threadId } }
100
+ );
101
+
102
+ expect(finalContentParts).toBeDefined();
103
+ expect(finalContentParts?.length).toBeGreaterThan(0);
104
+
105
+ // Verify usage metadata was collected
106
+ expect(collectedUsage.length).toBeGreaterThan(0);
107
+ const usage = collectedUsage[0];
108
+ expect(usage.input_tokens).toBeGreaterThan(0);
109
+ expect(usage.output_tokens).toBeGreaterThan(0);
110
+
111
+ // Verify reasoning tokens are reported in output_token_details
112
+ const reasoningTokens =
113
+ (usage.output_token_details as Record<string, number> | undefined)
114
+ ?.reasoning ?? 0;
115
+ expect(reasoningTokens).toBeGreaterThan(0);
116
+
117
+ // Verify the final message has content
118
+ const finalMessages = run.getRunMessages();
119
+ expect(finalMessages).toBeDefined();
120
+ expect(finalMessages?.length).toBeGreaterThan(0);
121
+ const assistantMsg = finalMessages?.[0];
122
+ expect(typeof assistantMsg?.content).toBe('string');
123
+ expect((assistantMsg?.content as string).length).toBeGreaterThan(0);
124
+
125
+ return { usage, reasoningTokens, finalMessages };
126
+ }
127
+
59
128
  test(`${capitalizeFirstLetter(provider)}: simple stream + title`, async () => {
60
129
  const { userName, location } = await getArgs();
61
- const llmConfig = getLLMConfig(provider);
62
130
  const customHandlers = setupCustomHandlers();
63
131
 
64
132
  run = await Run.create<t.IState>({
65
133
  runId: 'or-run-1',
66
134
  graphConfig: {
67
135
  type: 'standard',
68
- llmConfig,
136
+ llmConfig: baseLLMConfig,
69
137
  tools: [new Calculator()],
70
138
  instructions: 'You are a friendly AI assistant.',
71
139
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
72
140
  },
73
141
  returnContent: true,
142
+ skipCleanup: true,
74
143
  customHandlers,
75
144
  });
76
145
 
@@ -103,4 +172,97 @@ describeIf(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
103
172
  });
104
173
  expect(titleRes.title).toBeDefined();
105
174
  });
175
+
176
+ test(`${capitalizeFirstLetter(provider)}: Anthropic does NOT reason by default (no config)`, async () => {
177
+ const { reasoning: _baseReasoning, ...baseWithoutReasoning } =
178
+ baseLLMConfig as unknown as Record<string, unknown>;
179
+ const llmConfig = {
180
+ ...baseWithoutReasoning,
181
+ model: 'anthropic/claude-sonnet-4',
182
+ } as t.LLMConfig;
183
+ const customHandlers = setupCustomHandlers();
184
+
185
+ run = await Run.create<t.IState>({
186
+ runId: 'or-anthropic-default-1',
187
+ graphConfig: {
188
+ type: 'standard',
189
+ llmConfig,
190
+ instructions: 'You are a helpful AI assistant.',
191
+ },
192
+ returnContent: true,
193
+ skipCleanup: true,
194
+ customHandlers,
195
+ });
196
+
197
+ conversationHistory.push(
198
+ new HumanMessage('What is 15 * 37 + 128 / 4? Show your work.')
199
+ );
200
+
201
+ await run.processStream(
202
+ { messages: conversationHistory },
203
+ { ...configV2, configurable: { thread_id: 'or-anthropic-default-1' } }
204
+ );
205
+
206
+ expect(collectedUsage.length).toBeGreaterThan(0);
207
+ const usage = collectedUsage[0];
208
+ // Anthropic requires explicit reasoning config — no reasoning tokens by default
209
+ const reasoningTokens =
210
+ (usage.output_token_details as Record<string, number> | undefined)
211
+ ?.reasoning ?? 0;
212
+ expect(reasoningTokens).toBe(0);
213
+ });
214
+
215
+ test(`${capitalizeFirstLetter(provider)}: Gemini 3 reasons by default (no config)`, async () => {
216
+ await runReasoningTest({
217
+ model: 'google/gemini-3-pro-preview',
218
+ reasoning: undefined,
219
+ threadId: 'or-gemini-default-1',
220
+ runId: 'or-gemini-default-1',
221
+ });
222
+ });
223
+
224
+ test(`${capitalizeFirstLetter(provider)}: Gemini reasoning with max_tokens`, async () => {
225
+ await runReasoningTest({
226
+ model: 'google/gemini-3-pro-preview',
227
+ reasoning: { max_tokens: 4000 },
228
+ threadId: 'or-gemini-reasoning-1',
229
+ runId: 'or-gemini-reasoning-1',
230
+ });
231
+ });
232
+
233
+ test(`${capitalizeFirstLetter(provider)}: Gemini reasoning with effort`, async () => {
234
+ await runReasoningTest({
235
+ model: 'google/gemini-3-flash-preview',
236
+ reasoning: { effort: 'low' },
237
+ threadId: 'or-gemini-effort-1',
238
+ runId: 'or-gemini-effort-1',
239
+ });
240
+ });
241
+
242
+ test(`${capitalizeFirstLetter(provider)}: Anthropic reasoning with max_tokens`, async () => {
243
+ await runReasoningTest({
244
+ model: 'anthropic/claude-sonnet-4',
245
+ reasoning: { max_tokens: 4000 },
246
+ threadId: 'or-anthropic-reasoning-1',
247
+ runId: 'or-anthropic-reasoning-1',
248
+ });
249
+ });
250
+
251
+ test(`${capitalizeFirstLetter(provider)}: Anthropic sonnet-4 reasoning with effort`, async () => {
252
+ await runReasoningTest({
253
+ model: 'anthropic/claude-sonnet-4',
254
+ reasoning: { effort: 'medium' },
255
+ threadId: 'or-anthropic-effort-s4-1',
256
+ runId: 'or-anthropic-effort-s4-1',
257
+ });
258
+ });
259
+
260
+ test(`${capitalizeFirstLetter(provider)}: Anthropic sonnet-4-6 reasoning with effort`, async () => {
261
+ await runReasoningTest({
262
+ model: 'anthropic/claude-sonnet-4-6',
263
+ reasoning: { effort: 'medium' },
264
+ threadId: 'or-anthropic-effort-s46-1',
265
+ runId: 'or-anthropic-effort-s46-1',
266
+ });
267
+ });
106
268
  });
@@ -728,6 +728,7 @@ describe('Prune Messages Tests', () => {
728
728
  maxContextTokens: 1000,
729
729
  },
730
730
  returnContent: true,
731
+ skipCleanup: true,
731
732
  tokenCounter,
732
733
  indexTokenCountMap: {},
733
734
  });
@@ -158,6 +158,7 @@ describe(`${capitalizeFirstLetter(provider)} Streaming Tests`, () => {
158
158
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
159
159
  },
160
160
  returnContent: true,
161
+ skipCleanup: true,
161
162
  customHandlers,
162
163
  });
163
164
 
@@ -40,6 +40,7 @@ describe('Thinking-Enabled Agent Handoff Tests', () => {
40
40
  edges,
41
41
  },
42
42
  returnContent: true,
43
+ skipCleanup: true,
43
44
  });
44
45
 
45
46
  describe('OpenAI to Anthropic with Thinking', () => {
@@ -150,6 +150,7 @@ describe('Tool Error Handling Tests', () => {
150
150
  additional_instructions: `The user's name is ${userName} and they are located in ${location}.`,
151
151
  },
152
152
  returnContent: true,
153
+ skipCleanup: true,
153
154
  customHandlers,
154
155
  });
155
156
 
package/src/types/llm.ts CHANGED
@@ -11,7 +11,6 @@ import type {
11
11
  ClientOptions as OAIClientOptions,
12
12
  } from '@langchain/openai';
13
13
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
14
- import type { GeminiGenerationConfig } from '@langchain/google-common';
15
14
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
16
15
  import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
17
16
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
@@ -55,6 +54,11 @@ export type AnthropicReasoning = {
55
54
  thinking?: ThinkingConfig | boolean;
56
55
  thinkingBudget?: number;
57
56
  };
57
+ export type GoogleThinkingConfig = {
58
+ thinkingBudget?: number;
59
+ includeThoughts?: boolean;
60
+ thinkingLevel?: string;
61
+ };
58
62
  export type OpenAIClientOptions = ChatOpenAIFields;
59
63
  export type AnthropicClientOptions = AnthropicInput & {
60
64
  promptCache?: boolean;
@@ -62,6 +66,7 @@ export type AnthropicClientOptions = AnthropicInput & {
62
66
  export type MistralAIClientOptions = ChatMistralAIInput;
63
67
  export type VertexAIClientOptions = ChatVertexAIInput & {
64
68
  includeThoughts?: boolean;
69
+ thinkingConfig?: GoogleThinkingConfig;
65
70
  };
66
71
  export type BedrockAnthropicInput = ChatBedrockConverseInput & {
67
72
  additionalModelRequestFields?: ChatBedrockConverseInput['additionalModelRequestFields'] &
@@ -72,7 +77,7 @@ export type BedrockConverseClientOptions = ChatBedrockConverseInput;
72
77
  export type BedrockAnthropicClientOptions = BedrockAnthropicInput;
73
78
  export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
74
79
  customHeaders?: RequestOptions['customHeaders'];
75
- thinkingConfig?: GeminiGenerationConfig['thinkingConfig'];
80
+ thinkingConfig?: GoogleThinkingConfig;
76
81
  };
77
82
  export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
78
83
  export type XAIClientOptions = ChatXAIInput;
package/src/types/run.ts CHANGED
@@ -115,6 +115,8 @@ export type RunConfig = {
115
115
  returnContent?: boolean;
116
116
  tokenCounter?: TokenCounter;
117
117
  indexTokenCountMap?: Record<string, number>;
118
+ /** Skip post-stream cleanup (clearHeavyState) — useful for tests that inspect graph state after processStream */
119
+ skipCleanup?: boolean;
118
120
  };
119
121
 
120
122
  export type ProvidedCallbacks =
@@ -67,11 +67,10 @@ export const llmConfigs: Record<string, t.LLMConfig | undefined> = {
67
67
  'X-Title': 'LibreChat',
68
68
  },
69
69
  },
70
- include_reasoning: true,
70
+ reasoning: {
71
+ max_tokens: 8000,
72
+ },
71
73
  modelKwargs: {
72
- reasoning: {
73
- max_tokens: 8000,
74
- },
75
74
  max_tokens: 10000,
76
75
  },
77
76
  } as or.ChatOpenRouterCallOptions & t.LLMConfig,
package/src/utils/run.ts CHANGED
@@ -56,7 +56,7 @@ export class RunnableCallable<I = unknown, O = unknown> extends Runnable<I, O> {
56
56
  runManager?: CallbackManagerForChainRun
57
57
  ): Promise<O> {
58
58
  return new Promise<O>((resolve, reject) => {
59
- const childConfig = patchConfig(config, {
59
+ let childConfig: Partial<RunnableConfig> | null = patchConfig(config, {
60
60
  callbacks: runManager?.getChild(),
61
61
  });
62
62
  void AsyncLocalStorageProviderSingleton.runWithConfig(
@@ -64,8 +64,10 @@ export class RunnableCallable<I = unknown, O = unknown> extends Runnable<I, O> {
64
64
  async () => {
65
65
  try {
66
66
  const output = await this.func(input, childConfig);
67
+ childConfig = null;
67
68
  resolve(output);
68
69
  } catch (e) {
70
+ childConfig = null;
69
71
  reject(e);
70
72
  }
71
73
  }
@@ -98,4 +100,4 @@ export class RunnableCallable<I = unknown, O = unknown> extends Runnable<I, O> {
98
100
 
99
101
  return returnValue;
100
102
  }
101
- }
103
+ }