@librechat/agents 3.1.74 → 3.1.75-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (203) hide show
  1. package/README.md +66 -0
  2. package/dist/cjs/agents/AgentContext.cjs +84 -37
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  4. package/dist/cjs/graphs/Graph.cjs +13 -3
  5. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  6. package/dist/cjs/langchain/google-common.cjs +3 -0
  7. package/dist/cjs/langchain/google-common.cjs.map +1 -0
  8. package/dist/cjs/langchain/index.cjs +86 -0
  9. package/dist/cjs/langchain/index.cjs.map +1 -0
  10. package/dist/cjs/langchain/language_models/chat_models.cjs +3 -0
  11. package/dist/cjs/langchain/language_models/chat_models.cjs.map +1 -0
  12. package/dist/cjs/langchain/messages/tool.cjs +3 -0
  13. package/dist/cjs/langchain/messages/tool.cjs.map +1 -0
  14. package/dist/cjs/langchain/messages.cjs +51 -0
  15. package/dist/cjs/langchain/messages.cjs.map +1 -0
  16. package/dist/cjs/langchain/openai.cjs +3 -0
  17. package/dist/cjs/langchain/openai.cjs.map +1 -0
  18. package/dist/cjs/langchain/prompts.cjs +11 -0
  19. package/dist/cjs/langchain/prompts.cjs.map +1 -0
  20. package/dist/cjs/langchain/runnables.cjs +19 -0
  21. package/dist/cjs/langchain/runnables.cjs.map +1 -0
  22. package/dist/cjs/langchain/tools.cjs +23 -0
  23. package/dist/cjs/langchain/tools.cjs.map +1 -0
  24. package/dist/cjs/langchain/utils/env.cjs +11 -0
  25. package/dist/cjs/langchain/utils/env.cjs.map +1 -0
  26. package/dist/cjs/llm/anthropic/index.cjs +145 -52
  27. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  28. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  29. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +25 -15
  30. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  31. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +84 -70
  32. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  33. package/dist/cjs/llm/bedrock/index.cjs +1 -1
  34. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  35. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +213 -3
  36. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
  37. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +2 -1
  38. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
  39. package/dist/cjs/llm/google/utils/common.cjs +5 -4
  40. package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
  41. package/dist/cjs/llm/openai/index.cjs +468 -647
  42. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  43. package/dist/cjs/llm/openai/utils/index.cjs +1 -448
  44. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  45. package/dist/cjs/llm/openrouter/index.cjs +57 -175
  46. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  47. package/dist/cjs/llm/vertexai/index.cjs +5 -3
  48. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  49. package/dist/cjs/main.cjs +83 -3
  50. package/dist/cjs/main.cjs.map +1 -1
  51. package/dist/cjs/messages/cache.cjs +39 -4
  52. package/dist/cjs/messages/cache.cjs.map +1 -1
  53. package/dist/cjs/messages/core.cjs +7 -6
  54. package/dist/cjs/messages/core.cjs.map +1 -1
  55. package/dist/cjs/messages/format.cjs +7 -6
  56. package/dist/cjs/messages/format.cjs.map +1 -1
  57. package/dist/cjs/messages/langchain.cjs +26 -0
  58. package/dist/cjs/messages/langchain.cjs.map +1 -0
  59. package/dist/cjs/messages/prune.cjs +7 -6
  60. package/dist/cjs/messages/prune.cjs.map +1 -1
  61. package/dist/cjs/tools/ToolNode.cjs +5 -1
  62. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  63. package/dist/esm/agents/AgentContext.mjs +85 -38
  64. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  65. package/dist/esm/graphs/Graph.mjs +13 -3
  66. package/dist/esm/graphs/Graph.mjs.map +1 -1
  67. package/dist/esm/langchain/google-common.mjs +2 -0
  68. package/dist/esm/langchain/google-common.mjs.map +1 -0
  69. package/dist/esm/langchain/index.mjs +5 -0
  70. package/dist/esm/langchain/index.mjs.map +1 -0
  71. package/dist/esm/langchain/language_models/chat_models.mjs +2 -0
  72. package/dist/esm/langchain/language_models/chat_models.mjs.map +1 -0
  73. package/dist/esm/langchain/messages/tool.mjs +2 -0
  74. package/dist/esm/langchain/messages/tool.mjs.map +1 -0
  75. package/dist/esm/langchain/messages.mjs +2 -0
  76. package/dist/esm/langchain/messages.mjs.map +1 -0
  77. package/dist/esm/langchain/openai.mjs +2 -0
  78. package/dist/esm/langchain/openai.mjs.map +1 -0
  79. package/dist/esm/langchain/prompts.mjs +2 -0
  80. package/dist/esm/langchain/prompts.mjs.map +1 -0
  81. package/dist/esm/langchain/runnables.mjs +2 -0
  82. package/dist/esm/langchain/runnables.mjs.map +1 -0
  83. package/dist/esm/langchain/tools.mjs +2 -0
  84. package/dist/esm/langchain/tools.mjs.map +1 -0
  85. package/dist/esm/langchain/utils/env.mjs +2 -0
  86. package/dist/esm/langchain/utils/env.mjs.map +1 -0
  87. package/dist/esm/llm/anthropic/index.mjs +146 -54
  88. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  89. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  90. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +25 -15
  91. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  92. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +84 -71
  93. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  94. package/dist/esm/llm/bedrock/index.mjs +1 -1
  95. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  96. package/dist/esm/llm/bedrock/utils/message_inputs.mjs +214 -4
  97. package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
  98. package/dist/esm/llm/bedrock/utils/message_outputs.mjs +2 -1
  99. package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
  100. package/dist/esm/llm/google/utils/common.mjs +5 -4
  101. package/dist/esm/llm/google/utils/common.mjs.map +1 -1
  102. package/dist/esm/llm/openai/index.mjs +469 -648
  103. package/dist/esm/llm/openai/index.mjs.map +1 -1
  104. package/dist/esm/llm/openai/utils/index.mjs +4 -449
  105. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  106. package/dist/esm/llm/openrouter/index.mjs +57 -175
  107. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  108. package/dist/esm/llm/vertexai/index.mjs +5 -3
  109. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  110. package/dist/esm/main.mjs +4 -0
  111. package/dist/esm/main.mjs.map +1 -1
  112. package/dist/esm/messages/cache.mjs +39 -4
  113. package/dist/esm/messages/cache.mjs.map +1 -1
  114. package/dist/esm/messages/core.mjs +7 -6
  115. package/dist/esm/messages/core.mjs.map +1 -1
  116. package/dist/esm/messages/format.mjs +7 -6
  117. package/dist/esm/messages/format.mjs.map +1 -1
  118. package/dist/esm/messages/langchain.mjs +23 -0
  119. package/dist/esm/messages/langchain.mjs.map +1 -0
  120. package/dist/esm/messages/prune.mjs +7 -6
  121. package/dist/esm/messages/prune.mjs.map +1 -1
  122. package/dist/esm/tools/ToolNode.mjs +5 -1
  123. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  124. package/dist/types/agents/AgentContext.d.ts +14 -4
  125. package/dist/types/agents/__tests__/promptCacheLiveHelpers.d.ts +46 -0
  126. package/dist/types/index.d.ts +1 -0
  127. package/dist/types/langchain/google-common.d.ts +1 -0
  128. package/dist/types/langchain/index.d.ts +8 -0
  129. package/dist/types/langchain/language_models/chat_models.d.ts +1 -0
  130. package/dist/types/langchain/messages/tool.d.ts +1 -0
  131. package/dist/types/langchain/messages.d.ts +2 -0
  132. package/dist/types/langchain/openai.d.ts +1 -0
  133. package/dist/types/langchain/prompts.d.ts +1 -0
  134. package/dist/types/langchain/runnables.d.ts +2 -0
  135. package/dist/types/langchain/tools.d.ts +2 -0
  136. package/dist/types/langchain/utils/env.d.ts +1 -0
  137. package/dist/types/llm/anthropic/index.d.ts +22 -9
  138. package/dist/types/llm/anthropic/types.d.ts +5 -1
  139. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +13 -6
  140. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +1 -1
  141. package/dist/types/llm/openai/index.d.ts +21 -24
  142. package/dist/types/llm/openrouter/index.d.ts +11 -9
  143. package/dist/types/llm/vertexai/index.d.ts +1 -0
  144. package/dist/types/messages/cache.d.ts +4 -1
  145. package/dist/types/messages/langchain.d.ts +27 -0
  146. package/dist/types/types/graph.d.ts +26 -38
  147. package/dist/types/types/llm.d.ts +3 -3
  148. package/dist/types/types/run.d.ts +2 -0
  149. package/dist/types/types/stream.d.ts +1 -1
  150. package/package.json +80 -17
  151. package/src/agents/AgentContext.ts +123 -44
  152. package/src/agents/__tests__/AgentContext.anthropic.live.test.ts +116 -0
  153. package/src/agents/__tests__/AgentContext.bedrock.live.test.ts +149 -0
  154. package/src/agents/__tests__/AgentContext.test.ts +155 -2
  155. package/src/agents/__tests__/promptCacheLiveHelpers.ts +165 -0
  156. package/src/graphs/Graph.ts +24 -4
  157. package/src/graphs/__tests__/composition.smoke.test.ts +188 -0
  158. package/src/index.ts +3 -0
  159. package/src/langchain/google-common.ts +1 -0
  160. package/src/langchain/index.ts +8 -0
  161. package/src/langchain/language_models/chat_models.ts +1 -0
  162. package/src/langchain/messages/tool.ts +5 -0
  163. package/src/langchain/messages.ts +21 -0
  164. package/src/langchain/openai.ts +1 -0
  165. package/src/langchain/prompts.ts +1 -0
  166. package/src/langchain/runnables.ts +7 -0
  167. package/src/langchain/tools.ts +8 -0
  168. package/src/langchain/utils/env.ts +1 -0
  169. package/src/llm/anthropic/index.ts +252 -84
  170. package/src/llm/anthropic/llm.spec.ts +751 -102
  171. package/src/llm/anthropic/types.ts +9 -1
  172. package/src/llm/anthropic/utils/message_inputs.ts +43 -20
  173. package/src/llm/anthropic/utils/message_outputs.ts +119 -101
  174. package/src/llm/anthropic/utils/server-tool-inputs.test.ts +77 -0
  175. package/src/llm/bedrock/index.ts +2 -2
  176. package/src/llm/bedrock/llm.spec.ts +341 -0
  177. package/src/llm/bedrock/utils/message_inputs.ts +303 -4
  178. package/src/llm/bedrock/utils/message_outputs.ts +2 -1
  179. package/src/llm/custom-chat-models.smoke.test.ts +662 -0
  180. package/src/llm/google/llm.spec.ts +339 -57
  181. package/src/llm/google/utils/common.ts +53 -48
  182. package/src/llm/openai/contentBlocks.test.ts +346 -0
  183. package/src/llm/openai/index.ts +736 -837
  184. package/src/llm/openai/utils/index.ts +84 -64
  185. package/src/llm/openrouter/index.ts +124 -247
  186. package/src/llm/openrouter/reasoning.test.ts +8 -1
  187. package/src/llm/vertexai/index.ts +11 -5
  188. package/src/llm/vertexai/llm.spec.ts +28 -1
  189. package/src/messages/cache.test.ts +106 -4
  190. package/src/messages/cache.ts +57 -5
  191. package/src/messages/core.ts +16 -9
  192. package/src/messages/format.ts +9 -6
  193. package/src/messages/langchain.ts +39 -0
  194. package/src/messages/prune.ts +12 -8
  195. package/src/scripts/caching.ts +2 -3
  196. package/src/specs/anthropic.simple.test.ts +61 -0
  197. package/src/specs/summarization.test.ts +58 -61
  198. package/src/tools/ToolNode.ts +5 -1
  199. package/src/types/graph.ts +35 -88
  200. package/src/types/llm.ts +3 -3
  201. package/src/types/run.ts +2 -0
  202. package/src/types/stream.ts +1 -1
  203. package/src/utils/llmConfig.ts +1 -6
@@ -4,18 +4,31 @@ import type { BaseChatModelParams } from '@langchain/core/language_models/chat_m
4
4
  import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
5
5
  import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
6
6
  import type { AnthropicInput } from '@langchain/anthropic';
7
- import type { AnthropicMessageCreateParams, AnthropicStreamingMessageCreateParams, AnthropicOutputConfig } from '@/llm/anthropic/types';
7
+ import type { Anthropic } from '@anthropic-ai/sdk';
8
+ import type { AnthropicMessageCreateParams, AnthropicStreamingMessageCreateParams, AnthropicOutputConfig, AnthropicBeta, AnthropicMCPServerURLDefinition, AnthropicContextManagementConfigParam } from '@/llm/anthropic/types';
9
+ export declare function _documentsInParams(params: AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams): boolean;
8
10
  export type CustomAnthropicInput = AnthropicInput & {
9
11
  _lc_stream_delay?: number;
10
12
  outputConfig?: AnthropicOutputConfig;
11
13
  inferenceGeo?: string;
12
- contextManagement?: any;
14
+ contextManagement?: AnthropicContextManagementConfigParam;
13
15
  } & BaseChatModelParams;
14
- /**
15
- * A type representing additional parameters that can be passed to the
16
- * Anthropic API.
17
- */
18
- type Kwargs = Record<string, any>;
16
+ export type CustomAnthropicCallOptions = {
17
+ outputConfig?: AnthropicOutputConfig;
18
+ outputFormat?: Anthropic.Messages.JSONOutputFormat;
19
+ inferenceGeo?: string;
20
+ betas?: AnthropicBeta[];
21
+ container?: string;
22
+ mcp_servers?: AnthropicMCPServerURLDefinition[];
23
+ };
24
+ type CustomAnthropicInvocationParams = {
25
+ betas?: AnthropicBeta[];
26
+ container?: string;
27
+ context_management?: AnthropicContextManagementConfigParam;
28
+ inference_geo?: string;
29
+ mcp_servers?: AnthropicMCPServerURLDefinition[];
30
+ output_config?: AnthropicOutputConfig;
31
+ };
19
32
  export declare class CustomAnthropic extends ChatAnthropicMessages {
20
33
  _lc_stream_delay: number;
21
34
  private message_start;
@@ -25,13 +38,13 @@ export declare class CustomAnthropic extends ChatAnthropicMessages {
25
38
  top_k: number | undefined;
26
39
  outputConfig?: AnthropicOutputConfig;
27
40
  inferenceGeo?: string;
28
- contextManagement?: any;
41
+ contextManagement?: AnthropicContextManagementConfigParam;
29
42
  constructor(fields?: CustomAnthropicInput);
30
43
  static lc_name(): 'LibreChatAnthropic';
31
44
  /**
32
45
  * Get the parameters used to invoke the model
33
46
  */
34
- invocationParams(options?: this['ParsedCallOptions']): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> & Kwargs;
47
+ invocationParams(options?: this['ParsedCallOptions'] & CustomAnthropicCallOptions): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> & CustomAnthropicInvocationParams;
35
48
  /**
36
49
  * Get stream usage as returned by this client's API response.
37
50
  * @returns The stream usage object.
@@ -1,5 +1,7 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
- import { BindToolsInput } from '@langchain/core/language_models/chat_models';
2
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
3
+ import type { AnthropicBeta } from '@anthropic-ai/sdk/resources';
4
+ export type { AnthropicBeta };
3
5
  export type AnthropicStreamUsage = Anthropic.Usage;
4
6
  export type AnthropicMessageDeltaEvent = Anthropic.MessageDeltaEvent;
5
7
  export type AnthropicMessageStartEvent = Anthropic.MessageStartEvent;
@@ -14,8 +16,10 @@ export type AnthropicMessageResponse = Anthropic.ContentBlock | AnthropicToolRes
14
16
  export type AnthropicMessageCreateParams = Anthropic.MessageCreateParamsNonStreaming;
15
17
  export type AnthropicStreamingMessageCreateParams = Anthropic.MessageCreateParamsStreaming;
16
18
  export type AnthropicThinkingConfigParam = Anthropic.ThinkingConfigParam;
19
+ export type AnthropicContextManagementConfigParam = Anthropic.Beta.BetaContextManagementConfig;
17
20
  export type AnthropicMessageStreamEvent = Anthropic.MessageStreamEvent;
18
21
  export type AnthropicRequestOptions = Anthropic.RequestOptions;
22
+ export type AnthropicMCPServerURLDefinition = Anthropic.Beta.Messages.BetaRequestMCPServerURLDefinition;
19
23
  export type AnthropicToolChoice = {
20
24
  type: 'tool';
21
25
  name: string;
@@ -1,10 +1,16 @@
1
- /**
2
- * This util file contains functions for converting Anthropic messages to LangChain messages.
3
- */
4
- import Anthropic from '@anthropic-ai/sdk';
1
+ /** This util file contains functions for converting Anthropic messages to LangChain messages. */
5
2
  import { AIMessageChunk } from '@langchain/core/messages';
6
- import { ChatGeneration } from '@langchain/core/outputs';
7
- import { AnthropicMessageResponse } from '../types';
3
+ import type Anthropic from '@anthropic-ai/sdk';
4
+ import type { UsageMetadata } from '@langchain/core/messages';
5
+ import type { ChatGeneration } from '@langchain/core/outputs';
6
+ import type { AnthropicMessageResponse } from '../types';
7
+ interface AnthropicUsageData {
8
+ input_tokens?: number | null;
9
+ output_tokens?: number | null;
10
+ cache_creation_input_tokens?: number | null;
11
+ cache_read_input_tokens?: number | null;
12
+ }
13
+ export declare function getAnthropicUsageMetadata(usage: AnthropicUsageData | null | undefined): UsageMetadata | undefined;
8
14
  export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta.Messages.BetaRawMessageStreamEvent, fields: {
9
15
  streamUsage: boolean;
10
16
  coerceContentToString: boolean;
@@ -12,3 +18,4 @@ export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta
12
18
  chunk: AIMessageChunk;
13
19
  } | null;
14
20
  export declare function anthropicResponseToChatMessages(messages: AnthropicMessageResponse[], additionalKwargs: Record<string, unknown>): ChatGeneration[];
21
+ export {};
@@ -18,5 +18,5 @@ export declare class AnthropicToolsOutputParser<T extends Record<string, any> =
18
18
  protected _validateResult(result: unknown): Promise<T>;
19
19
  parseResult(generations: ChatGeneration[]): Promise<T>;
20
20
  }
21
- export declare function extractToolCalls(content: Record<string, any>[]): ToolCall[];
21
+ export declare function extractToolCalls(content: Record<string, any>[]): ToolCall<string, Record<string, any>>[];
22
22
  export {};
@@ -1,20 +1,26 @@
1
1
  import { AzureOpenAI as AzureOpenAIClient } from 'openai';
2
2
  import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
3
3
  import { ChatGenerationChunk } from '@langchain/core/outputs';
4
- import { AIMessage } from '@langchain/core/messages';
5
4
  import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
6
5
  import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
7
6
  import { OpenAIClient, ChatOpenAI as OriginalChatOpenAI, AzureChatOpenAI as OriginalAzureChatOpenAI } from '@langchain/openai';
8
7
  import type { HeaderValue, HeadersLike } from './types';
9
- import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
10
8
  import type { BaseMessage } from '@langchain/core/messages';
11
- import type { ChatResult } from '@langchain/core/outputs';
9
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
12
10
  import type { ChatXAIInput } from '@langchain/xai';
13
11
  import type * as t from '@langchain/openai';
14
12
  export declare function isHeaders(headers: unknown): headers is Headers;
15
13
  export declare function normalizeHeaders(headers: HeadersLike): Record<string, HeaderValue | readonly HeaderValue[]>;
16
- type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
17
14
  type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
15
+ type LibreChatOpenAIFields = t.ChatOpenAIFields & {
16
+ _lc_stream_delay?: number;
17
+ includeReasoningContent?: boolean;
18
+ includeReasoningDetails?: boolean;
19
+ convertReasoningDetailsToContent?: boolean;
20
+ };
21
+ type LibreChatAzureOpenAIFields = t.AzureOpenAIInput & {
22
+ _lc_stream_delay?: number;
23
+ };
18
24
  /**
19
25
  * Formats a tool in either OpenAI format, or LangChain structured tool format
20
26
  * into an OpenAI tool format. If the tool is already in OpenAI format, return without
@@ -41,15 +47,12 @@ export declare class CustomAzureOpenAIClient extends AzureOpenAIClient {
41
47
  abortHandler?: () => void;
42
48
  fetchWithTimeout(url: RequestInfo, init: RequestInit | undefined, ms: number, controller: AbortController): Promise<Response>;
43
49
  }
44
- /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
45
50
  export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
46
51
  _lc_stream_delay?: number;
47
- constructor(fields?: t.ChatOpenAICallOptions & {
48
- _lc_stream_delay?: number;
49
- } & t.OpenAIChatInput['modelKwargs']);
52
+ constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
50
53
  get exposedClient(): CustomOpenAIClient;
51
54
  static lc_name(): string;
52
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
55
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
53
56
  /**
54
57
  * Returns backwards compatible reasoning parameters from constructor params and call options
55
58
  * @internal
@@ -57,14 +60,10 @@ export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptio
57
60
  getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
58
61
  protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
59
62
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
60
- _streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
61
63
  }
62
- /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
63
64
  export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
64
65
  _lc_stream_delay?: number;
65
- constructor(fields?: t.AzureOpenAIInput & {
66
- _lc_stream_delay?: number;
67
- });
66
+ constructor(fields?: LibreChatAzureOpenAIFields);
68
67
  get exposedClient(): CustomOpenAIClient;
69
68
  static lc_name(): 'LibreChatAzureOpenAI';
70
69
  /**
@@ -73,16 +72,17 @@ export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
73
72
  */
74
73
  getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
75
74
  protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
76
- protected _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
75
+ _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
77
76
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
78
77
  }
79
78
  export declare class ChatDeepSeek extends OriginalChatDeepSeek {
79
+ _lc_stream_delay?: number;
80
+ constructor(fields?: ConstructorParameters<typeof OriginalChatDeepSeek>[0] & {
81
+ _lc_stream_delay?: number;
82
+ });
80
83
  get exposedClient(): CustomOpenAIClient;
81
84
  static lc_name(): 'LibreChatDeepSeek';
82
- protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
83
- _generate(messages: BaseMessage[], options: this['ParsedCallOptions'] | undefined, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
84
- protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
85
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
85
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
86
86
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
87
87
  }
88
88
  /** xAI-specific usage metadata type */
@@ -102,11 +102,8 @@ export interface XAIUsageMetadata extends OpenAIClient.Completions.CompletionUsa
102
102
  num_sources_used?: number;
103
103
  }
104
104
  export declare class ChatMoonshot extends ChatOpenAI {
105
+ constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
105
106
  static lc_name(): 'LibreChatMoonshot';
106
- protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
107
- _generate(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
108
- protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
109
- _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
110
107
  }
111
108
  export declare class ChatXAI extends OriginalChatXAI {
112
109
  _lc_stream_delay?: number;
@@ -121,7 +118,7 @@ export declare class ChatXAI extends OriginalChatXAI {
121
118
  });
122
119
  static lc_name(): 'LibreChatXAI';
123
120
  get exposedClient(): CustomOpenAIClient;
124
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
121
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
125
122
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
126
123
  }
127
124
  export {};
@@ -1,7 +1,7 @@
1
1
  import { ChatOpenAI } from '@/llm/openai';
2
- import { ChatGenerationChunk } from '@langchain/core/outputs';
3
- import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
4
- import type { FunctionMessageChunk, SystemMessageChunk, HumanMessageChunk, ToolMessageChunk, ChatMessageChunk, AIMessageChunk, BaseMessage } from '@langchain/core/messages';
2
+ import type { BaseMessage } from '@langchain/core/messages';
3
+ import type { ChatGenerationChunk } from '@langchain/core/outputs';
4
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
5
5
  import type { ChatOpenAICallOptions, OpenAIChatInput, OpenAIClient } from '@langchain/openai';
6
6
  export type OpenRouterReasoningEffort = 'xhigh' | 'high' | 'medium' | 'low' | 'minimal' | 'none';
7
7
  export interface OpenRouterReasoning {
@@ -16,20 +16,22 @@ export interface ChatOpenRouterCallOptions extends Omit<ChatOpenAICallOptions, '
16
16
  reasoning?: OpenRouterReasoning;
17
17
  modelKwargs?: OpenAIChatInput['modelKwargs'];
18
18
  }
19
+ export type ChatOpenRouterInput = Partial<ChatOpenRouterCallOptions & OpenAIChatInput>;
19
20
  /** invocationParams return type extended with OpenRouter reasoning */
20
21
  export type OpenRouterInvocationParams = Omit<OpenAIClient.Chat.ChatCompletionCreateParams, 'messages'> & {
21
22
  reasoning?: OpenRouterReasoning;
22
23
  };
24
+ type InvocationParamsExtra = {
25
+ streaming?: boolean;
26
+ };
23
27
  export declare class ChatOpenRouter extends ChatOpenAI {
24
28
  private openRouterReasoning?;
25
29
  /** @deprecated Use `reasoning` object instead */
26
30
  private includeReasoning?;
27
- constructor(_fields: Partial<ChatOpenRouterCallOptions>);
31
+ constructor(_fields: ChatOpenRouterInput);
28
32
  static lc_name(): 'LibreChatOpenRouter';
29
- invocationParams(options?: this['ParsedCallOptions'], extra?: {
30
- streaming?: boolean;
31
- }): OpenRouterInvocationParams;
33
+ invocationParams(options?: this['ParsedCallOptions'], extra?: InvocationParamsExtra): OpenRouterInvocationParams;
32
34
  private buildOpenRouterReasoning;
33
- protected _convertOpenAIDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: 'function' | 'user' | 'system' | 'developer' | 'assistant' | 'tool'): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
34
- _streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
35
+ _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
35
36
  }
37
+ export {};
@@ -288,6 +288,7 @@ export declare class ChatVertexAI extends ChatGoogle {
288
288
  dynamicThinkingBudget: boolean;
289
289
  thinkingConfig?: GoogleThinkingConfig;
290
290
  static lc_name(): 'LibreChatVertexAI';
291
+ constructor(model: string, fields?: Omit<VertexAIClientOptions, 'model'>);
291
292
  constructor(fields?: VertexAIClientOptions);
292
293
  invocationParams(options?: this['ParsedCallOptions'] | undefined): GoogleAIModelRequestParams;
293
294
  buildConnection(fields: VertexAIClientOptions | undefined, client: GoogleAbstractedClient): void;
@@ -36,5 +36,8 @@ export declare function stripBedrockCacheControl<T extends MessageWithContent>(m
36
36
  * @param messages - The array of message objects.
37
37
  * @returns - A new array of message objects with cache points added.
38
38
  */
39
- export declare function addBedrockCacheControl<T extends Partial<BaseMessage> & MessageWithContent>(messages: T[]): T[];
39
+ export declare function addBedrockCacheControl<T extends MessageWithContent & {
40
+ getType?: () => string;
41
+ role?: string;
42
+ }>(messages: T[]): T[];
40
43
  export {};
@@ -0,0 +1,27 @@
1
+ import type { MessageContent } from '@langchain/core/messages';
2
+ import type * as t from '@/types';
3
+ type LibreChatMessageContent = MessageContent | string | t.MessageContentComplex[] | t.ExtendedMessageContent[];
4
+ type WithLangChainContent<T extends {
5
+ content: LibreChatMessageContent;
6
+ }> = Omit<T, 'content'> & {
7
+ content: MessageContent;
8
+ };
9
+ /**
10
+ * Bridges LibreChat's extended content blocks to LangChain 1.x MessageContent.
11
+ *
12
+ * LangChain 1.x narrowed message constructor types around ContentBlock, while
13
+ * LibreChat still carries provider-specific blocks through the same content
14
+ * field. This helper keeps the runtime shape unchanged during the dependency
15
+ * upgrade; tracking issue: https://github.com/danny-avila/agents/issues/130.
16
+ */
17
+ export declare function toLangChainContent(content: LibreChatMessageContent): MessageContent;
18
+ /**
19
+ * Applies the same LangChain 1.x content bridge to message constructor fields.
20
+ *
21
+ * Keep this cast-only helper local to constructor boundaries so follow-up work
22
+ * can replace it with aligned content types or explicit conversion logic.
23
+ */
24
+ export declare function toLangChainMessageFields<T extends {
25
+ content: LibreChatMessageContent;
26
+ }>(message: T): WithLangChainContent<T>;
27
+ export {};
@@ -1,4 +1,4 @@
1
- import type { START, StateType, UpdateType, StateGraph, StateGraphArgs, StateDefinition, CompiledStateGraph, BinaryOperatorAggregate } from '@langchain/langgraph';
1
+ import type { START, StateGraph, StateGraphArgs } from '@langchain/langgraph';
2
2
  import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
3
3
  import type { BaseMessage, AIMessageChunk, SystemMessage } from '@langchain/core/messages';
4
4
  import type { RunnableConfig, Runnable } from '@langchain/core/runnables';
@@ -51,51 +51,36 @@ export interface EventHandler {
51
51
  }
52
52
  export type GraphStateChannels<T extends BaseGraphState> = StateGraphArgs<T>['channels'];
53
53
  export type Workflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = StateGraph<T, U, N>;
54
- export type CompiledWorkflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = CompiledStateGraph<T, U, N>;
55
- export type CompiledStateWorkflow = CompiledStateGraph<StateType<{
56
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
57
- }>, UpdateType<{
58
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
59
- }>, string, {
60
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
61
- }, {
62
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
63
- }, StateDefinition>;
64
- export type CompiledMultiAgentWorkflow = CompiledStateGraph<StateType<{
65
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
66
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
67
- }>, UpdateType<{
68
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
69
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
70
- }>, string, {
71
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
72
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
73
- }, {
74
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
75
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
76
- }, StateDefinition>;
77
- export type CompiledAgentWorfklow = CompiledStateGraph<AgentSubgraphState, Partial<AgentSubgraphState>, '__start__' | `agent=${string}` | `tools=${string}` | `summarize=${string}`, {
78
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
79
- summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
80
- }, {
81
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
82
- summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
83
- }, StateDefinition, {
84
- [x: `agent=${string}`]: Partial<BaseGraphState>;
85
- [x: `tools=${string}`]: any;
86
- [x: `summarize=${string}`]: any;
87
- }>;
54
+ type LangChainEventStreamCallbackHandlerInput = NonNullable<Parameters<Runnable['streamEvents']>[2]>;
55
+ export type EventStreamCallbackHandlerInput = LangChainEventStreamCallbackHandlerInput & {
56
+ autoClose?: boolean;
57
+ raiseError?: boolean;
58
+ ignoreCustomEvent?: boolean;
59
+ };
60
+ export type WorkflowValuesStreamConfig = RunnableConfig & {
61
+ streamMode: 'values';
62
+ };
63
+ /**
64
+ * LangGraph stream output is mode-dependent (`values`, `updates`, SSE, etc.).
65
+ * Keep the base Runnable stream output as unknown and narrow at callsites that
66
+ * choose a concrete streamMode.
67
+ */
68
+ export type CompiledWorkflow<TInput extends BaseGraphState = BaseGraphState, TOutput extends BaseGraphState = TInput> = Omit<Runnable<TInput, unknown>, 'invoke'> & {
69
+ invoke(input: TInput, config?: RunnableConfig): Promise<TOutput>;
70
+ };
71
+ export type CompiledStateWorkflow = CompiledWorkflow;
72
+ export type CompiledMultiAgentWorkflow = CompiledWorkflow<MultiAgentGraphState>;
73
+ export type CompiledAgentWorfklow = CompiledWorkflow<AgentSubgraphState, AgentSubgraphState>;
88
74
  export type SystemRunnable = Runnable<BaseMessage[], (BaseMessage | SystemMessage)[], RunnableConfig<Record<string, unknown>>> | undefined;
89
75
  /**
90
76
  * Optional compile options passed to workflow.compile().
91
77
  * These are intentionally untyped to avoid coupling to library internals.
92
78
  */
93
79
  export type CompileOptions = {
94
- checkpointer?: any;
80
+ checkpointer?: unknown;
95
81
  interruptBefore?: string[];
96
82
  interruptAfter?: string[];
97
83
  };
98
- export type EventStreamCallbackHandlerInput = Parameters<CompiledWorkflow['streamEvents']>[2] extends Omit<infer T, 'autoClose'> ? T : never;
99
84
  export type StreamChunk = (ChatGenerationChunk & {
100
85
  message: AIMessageChunk;
101
86
  }) | AIMessageChunk;
@@ -311,10 +296,12 @@ export interface AgentInputs {
311
296
  toolMap?: ToolMap;
312
297
  tools?: GraphTools;
313
298
  provider: Providers;
299
+ /** Stable/cacheable system instructions. */
314
300
  instructions?: string;
315
301
  streamBuffer?: number;
316
302
  maxContextTokens?: number;
317
303
  clientOptions?: ClientOptions;
304
+ /** Dynamic system tail appended after stable instructions without provider cache markers. */
318
305
  additional_instructions?: string;
319
306
  reasoningKey?: 'reasoning_content' | 'reasoning';
320
307
  /** Format content blocks as strings (for legacy compatibility i.e. Ollama/Azure Serverless) */
@@ -340,7 +327,7 @@ export interface AgentInputs {
340
327
  summarizationEnabled?: boolean;
341
328
  summarizationConfig?: SummarizationConfig;
342
329
  /** Cross-run summary from a previous run, forwarded from formatAgentMessages.
343
- * Injected into the system message via AgentContext.buildInstructionsString(). */
330
+ * Injected into the dynamic system tail via AgentContext. */
344
331
  initialSummary?: {
345
332
  text: string;
346
333
  tokenCount: number;
@@ -370,3 +357,4 @@ export interface ContextPruningConfig {
370
357
  placeholder?: string;
371
358
  };
372
359
  }
360
+ export {};
@@ -3,7 +3,7 @@ import type { BindToolsInput, BaseChatModelParams } from '@langchain/core/langua
3
3
  import type { OpenAIChatInput, ChatOpenAIFields, AzureOpenAIInput, ClientOptions as OAIClientOptions } from '@langchain/openai';
4
4
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
5
5
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
6
- import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
6
+ import type { ChatDeepSeekInput } from '@langchain/deepseek';
7
7
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
8
8
  import type { ChatBedrockConverseInput } from '@langchain/aws';
9
9
  import type { ChatMistralAIInput } from '@langchain/mistralai';
@@ -49,7 +49,7 @@ export type AnthropicReasoning = {
49
49
  export type GoogleThinkingConfig = {
50
50
  thinkingBudget?: number;
51
51
  includeThoughts?: boolean;
52
- thinkingLevel?: string;
52
+ thinkingLevel?: 'THINKING_LEVEL_UNSPECIFIED' | 'LOW' | 'MEDIUM' | 'HIGH';
53
53
  };
54
54
  export type OpenAIClientOptions = ChatOpenAIFields;
55
55
  export type AnthropicClientOptions = Omit<AnthropicInput, 'thinking'> & {
@@ -71,7 +71,7 @@ export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
71
71
  customHeaders?: RequestOptions['customHeaders'];
72
72
  thinkingConfig?: GoogleThinkingConfig;
73
73
  };
74
- export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
74
+ export type DeepSeekClientOptions = Partial<ChatDeepSeekInput>;
75
75
  export type XAIClientOptions = ChatXAIInput;
76
76
  export type ClientOptions = OpenAIClientOptions | AzureClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockConverseClientOptions | GoogleClientOptions | DeepSeekClientOptions | XAIClientOptions;
77
77
  export type SharedLLMConfig = {
@@ -63,7 +63,9 @@ export interface AgentStateChannels {
63
63
  messages: BaseMessage[];
64
64
  next: string;
65
65
  [key: string]: unknown;
66
+ /** Stable/cacheable system instructions. */
66
67
  instructions?: string;
68
+ /** Dynamic system tail appended after stable instructions. */
67
69
  additional_instructions?: string;
68
70
  }
69
71
  export interface Member {
@@ -138,7 +138,7 @@ export interface ExtendedMessageContent {
138
138
  type?: string;
139
139
  text?: string;
140
140
  input?: string;
141
- index?: number;
141
+ index?: string | number;
142
142
  id?: string;
143
143
  name?: string;
144
144
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.74",
3
+ "version": "3.1.75-dev.1",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -9,6 +9,66 @@
9
9
  "import": "./dist/esm/main.mjs",
10
10
  "require": "./dist/cjs/main.cjs",
11
11
  "types": "./dist/types/index.d.ts"
12
+ },
13
+ "./langchain": {
14
+ "import": "./dist/esm/langchain/index.mjs",
15
+ "require": "./dist/cjs/langchain/index.cjs",
16
+ "types": "./dist/types/langchain/index.d.ts"
17
+ },
18
+ "./langchain/language_models/chat_models": {
19
+ "import": "./dist/esm/langchain/language_models/chat_models.mjs",
20
+ "require": "./dist/cjs/langchain/language_models/chat_models.cjs",
21
+ "types": "./dist/types/langchain/language_models/chat_models.d.ts"
22
+ },
23
+ "./langchain/messages": {
24
+ "import": "./dist/esm/langchain/messages.mjs",
25
+ "require": "./dist/cjs/langchain/messages.cjs",
26
+ "types": "./dist/types/langchain/messages.d.ts"
27
+ },
28
+ "./langchain/messages/tool": {
29
+ "import": "./dist/esm/langchain/messages/tool.mjs",
30
+ "require": "./dist/cjs/langchain/messages/tool.cjs",
31
+ "types": "./dist/types/langchain/messages/tool.d.ts"
32
+ },
33
+ "./langchain/google-common": {
34
+ "import": "./dist/esm/langchain/google-common.mjs",
35
+ "require": "./dist/cjs/langchain/google-common.cjs",
36
+ "types": "./dist/types/langchain/google-common.d.ts"
37
+ },
38
+ "./langchain/openai": {
39
+ "import": "./dist/esm/langchain/openai.mjs",
40
+ "require": "./dist/cjs/langchain/openai.cjs",
41
+ "types": "./dist/types/langchain/openai.d.ts"
42
+ },
43
+ "./langchain/prompts": {
44
+ "import": "./dist/esm/langchain/prompts.mjs",
45
+ "require": "./dist/cjs/langchain/prompts.cjs",
46
+ "types": "./dist/types/langchain/prompts.d.ts"
47
+ },
48
+ "./langchain/runnables": {
49
+ "import": "./dist/esm/langchain/runnables.mjs",
50
+ "require": "./dist/cjs/langchain/runnables.cjs",
51
+ "types": "./dist/types/langchain/runnables.d.ts"
52
+ },
53
+ "./langchain/tools": {
54
+ "import": "./dist/esm/langchain/tools.mjs",
55
+ "require": "./dist/cjs/langchain/tools.cjs",
56
+ "types": "./dist/types/langchain/tools.d.ts"
57
+ },
58
+ "./langchain/utils/env": {
59
+ "import": "./dist/esm/langchain/utils/env.mjs",
60
+ "require": "./dist/cjs/langchain/utils/env.cjs",
61
+ "types": "./dist/types/langchain/utils/env.d.ts"
62
+ }
63
+ },
64
+ "typesVersions": {
65
+ "*": {
66
+ "langchain": [
67
+ "dist/types/langchain/index.d.ts"
68
+ ],
69
+ "langchain/*": [
70
+ "dist/types/langchain/*"
71
+ ]
12
72
  }
13
73
  },
14
74
  "type": "module",
@@ -27,7 +87,7 @@
27
87
  ],
28
88
  "packageManager": "npm@10.5.2",
29
89
  "engines": {
30
- "node": ">=14.0.0"
90
+ "node": ">=20.0.0"
31
91
  },
32
92
  "files": [
33
93
  "dist",
@@ -111,29 +171,31 @@
111
171
  "format": "prettier --write ."
112
172
  },
113
173
  "overrides": {
114
- "@langchain/openai": "0.5.18",
174
+ "@langchain/openai": "1.4.5",
115
175
  "@anthropic-ai/sdk": "$@anthropic-ai/sdk",
116
176
  "@browserbasehq/stagehand": {
117
177
  "openai": "$openai"
118
178
  },
119
- "fast-xml-parser": "5.5.7",
179
+ "fast-xml-parser": "5.7.2",
120
180
  "ajv": "6.14.0",
121
181
  "minimatch": "3.1.4"
122
182
  },
123
183
  "dependencies": {
124
- "@anthropic-ai/sdk": "^0.73.0",
184
+ "@anthropic-ai/sdk": "^0.92.0",
125
185
  "@aws-sdk/client-bedrock-runtime": "^3.1013.0",
126
- "@langchain/anthropic": "^0.3.26",
127
- "@langchain/aws": "^0.1.15",
128
- "@langchain/core": "^0.3.80",
129
- "@langchain/deepseek": "^0.0.2",
130
- "@langchain/google-genai": "^0.2.18",
131
- "@langchain/google-vertexai": "^0.2.18",
132
- "@langchain/langgraph": "^0.4.9",
133
- "@langchain/mistralai": "^0.2.1",
134
- "@langchain/openai": "0.5.18",
135
- "@langchain/textsplitters": "^0.1.0",
136
- "@langchain/xai": "^0.0.3",
186
+ "@langchain/anthropic": "^1.3.28",
187
+ "@langchain/aws": "^1.3.5",
188
+ "@langchain/core": "1.1.44",
189
+ "@langchain/deepseek": "^1.0.25",
190
+ "@langchain/google-common": "2.1.30",
191
+ "@langchain/google-gauth": "2.1.30",
192
+ "@langchain/google-genai": "2.1.30",
193
+ "@langchain/google-vertexai": "2.1.30",
194
+ "@langchain/langgraph": "^1.2.9",
195
+ "@langchain/mistralai": "^1.0.8",
196
+ "@langchain/openai": "1.4.5",
197
+ "@langchain/textsplitters": "^1.0.1",
198
+ "@langchain/xai": "^1.3.17",
137
199
  "@langfuse/langchain": "^4.3.0",
138
200
  "@langfuse/otel": "^4.3.0",
139
201
  "@langfuse/tracing": "^4.3.0",
@@ -147,7 +209,8 @@
147
209
  "mathjs": "^15.2.0",
148
210
  "nanoid": "^3.3.7",
149
211
  "okapibm25": "^1.4.1",
150
- "openai": "5.8.2"
212
+ "openai": "^6.35.0",
213
+ "uuid": "^11.1.1"
151
214
  },
152
215
  "imports": {
153
216
  "@/*": "./src/*",