@librechat/agents 3.1.73 → 3.1.75-dev.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (159) hide show
  1. package/README.md +66 -0
  2. package/dist/cjs/agents/AgentContext.cjs +146 -57
  3. package/dist/cjs/agents/AgentContext.cjs.map +1 -1
  4. package/dist/cjs/graphs/Graph.cjs +13 -3
  5. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  6. package/dist/cjs/llm/anthropic/index.cjs +145 -52
  7. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  8. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  9. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +25 -15
  10. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  11. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +84 -70
  12. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  13. package/dist/cjs/llm/bedrock/index.cjs +1 -1
  14. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  15. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +213 -3
  16. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
  17. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +2 -1
  18. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
  19. package/dist/cjs/llm/google/utils/common.cjs +5 -4
  20. package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
  21. package/dist/cjs/llm/openai/index.cjs +468 -647
  22. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  23. package/dist/cjs/llm/openai/utils/index.cjs +1 -448
  24. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  25. package/dist/cjs/llm/openrouter/index.cjs +57 -175
  26. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  27. package/dist/cjs/llm/vertexai/index.cjs +5 -3
  28. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  29. package/dist/cjs/main.cjs +1 -0
  30. package/dist/cjs/main.cjs.map +1 -1
  31. package/dist/cjs/messages/cache.cjs +39 -4
  32. package/dist/cjs/messages/cache.cjs.map +1 -1
  33. package/dist/cjs/messages/core.cjs +7 -6
  34. package/dist/cjs/messages/core.cjs.map +1 -1
  35. package/dist/cjs/messages/format.cjs +7 -6
  36. package/dist/cjs/messages/format.cjs.map +1 -1
  37. package/dist/cjs/messages/langchain.cjs +26 -0
  38. package/dist/cjs/messages/langchain.cjs.map +1 -0
  39. package/dist/cjs/messages/prune.cjs +7 -6
  40. package/dist/cjs/messages/prune.cjs.map +1 -1
  41. package/dist/cjs/tools/BashExecutor.cjs +21 -11
  42. package/dist/cjs/tools/BashExecutor.cjs.map +1 -1
  43. package/dist/cjs/tools/CodeExecutor.cjs +37 -10
  44. package/dist/cjs/tools/CodeExecutor.cjs.map +1 -1
  45. package/dist/cjs/tools/ProgrammaticToolCalling.cjs +16 -11
  46. package/dist/cjs/tools/ProgrammaticToolCalling.cjs.map +1 -1
  47. package/dist/cjs/tools/ToolNode.cjs +5 -1
  48. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  49. package/dist/esm/agents/AgentContext.mjs +147 -58
  50. package/dist/esm/agents/AgentContext.mjs.map +1 -1
  51. package/dist/esm/graphs/Graph.mjs +13 -3
  52. package/dist/esm/graphs/Graph.mjs.map +1 -1
  53. package/dist/esm/llm/anthropic/index.mjs +146 -54
  54. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  55. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  56. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +25 -15
  57. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  58. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +84 -71
  59. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  60. package/dist/esm/llm/bedrock/index.mjs +1 -1
  61. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  62. package/dist/esm/llm/bedrock/utils/message_inputs.mjs +214 -4
  63. package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
  64. package/dist/esm/llm/bedrock/utils/message_outputs.mjs +2 -1
  65. package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
  66. package/dist/esm/llm/google/utils/common.mjs +5 -4
  67. package/dist/esm/llm/google/utils/common.mjs.map +1 -1
  68. package/dist/esm/llm/openai/index.mjs +469 -648
  69. package/dist/esm/llm/openai/index.mjs.map +1 -1
  70. package/dist/esm/llm/openai/utils/index.mjs +4 -449
  71. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  72. package/dist/esm/llm/openrouter/index.mjs +57 -175
  73. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  74. package/dist/esm/llm/vertexai/index.mjs +5 -3
  75. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  76. package/dist/esm/main.mjs +1 -1
  77. package/dist/esm/messages/cache.mjs +39 -4
  78. package/dist/esm/messages/cache.mjs.map +1 -1
  79. package/dist/esm/messages/core.mjs +7 -6
  80. package/dist/esm/messages/core.mjs.map +1 -1
  81. package/dist/esm/messages/format.mjs +7 -6
  82. package/dist/esm/messages/format.mjs.map +1 -1
  83. package/dist/esm/messages/langchain.mjs +23 -0
  84. package/dist/esm/messages/langchain.mjs.map +1 -0
  85. package/dist/esm/messages/prune.mjs +7 -6
  86. package/dist/esm/messages/prune.mjs.map +1 -1
  87. package/dist/esm/tools/BashExecutor.mjs +22 -12
  88. package/dist/esm/tools/BashExecutor.mjs.map +1 -1
  89. package/dist/esm/tools/CodeExecutor.mjs +37 -11
  90. package/dist/esm/tools/CodeExecutor.mjs.map +1 -1
  91. package/dist/esm/tools/ProgrammaticToolCalling.mjs +17 -12
  92. package/dist/esm/tools/ProgrammaticToolCalling.mjs.map +1 -1
  93. package/dist/esm/tools/ToolNode.mjs +5 -1
  94. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  95. package/dist/types/agents/AgentContext.d.ts +29 -4
  96. package/dist/types/agents/__tests__/promptCacheLiveHelpers.d.ts +46 -0
  97. package/dist/types/llm/anthropic/index.d.ts +22 -9
  98. package/dist/types/llm/anthropic/types.d.ts +5 -1
  99. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +13 -6
  100. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +1 -1
  101. package/dist/types/llm/openai/index.d.ts +21 -24
  102. package/dist/types/llm/openrouter/index.d.ts +11 -9
  103. package/dist/types/llm/vertexai/index.d.ts +1 -0
  104. package/dist/types/messages/cache.d.ts +4 -1
  105. package/dist/types/messages/langchain.d.ts +27 -0
  106. package/dist/types/tools/CodeExecutor.d.ts +6 -0
  107. package/dist/types/types/graph.d.ts +26 -38
  108. package/dist/types/types/llm.d.ts +3 -3
  109. package/dist/types/types/run.d.ts +2 -0
  110. package/dist/types/types/stream.d.ts +1 -1
  111. package/dist/types/types/tools.d.ts +9 -0
  112. package/package.json +17 -16
  113. package/src/agents/AgentContext.ts +189 -71
  114. package/src/agents/__tests__/AgentContext.anthropic.live.test.ts +116 -0
  115. package/src/agents/__tests__/AgentContext.bedrock.live.test.ts +149 -0
  116. package/src/agents/__tests__/AgentContext.test.ts +333 -2
  117. package/src/agents/__tests__/promptCacheLiveHelpers.ts +165 -0
  118. package/src/graphs/Graph.ts +24 -4
  119. package/src/graphs/__tests__/composition.smoke.test.ts +188 -0
  120. package/src/llm/anthropic/index.ts +252 -84
  121. package/src/llm/anthropic/llm.spec.ts +751 -102
  122. package/src/llm/anthropic/types.ts +9 -1
  123. package/src/llm/anthropic/utils/message_inputs.ts +43 -20
  124. package/src/llm/anthropic/utils/message_outputs.ts +119 -101
  125. package/src/llm/anthropic/utils/server-tool-inputs.test.ts +77 -0
  126. package/src/llm/bedrock/index.ts +2 -2
  127. package/src/llm/bedrock/llm.spec.ts +341 -0
  128. package/src/llm/bedrock/utils/message_inputs.ts +303 -4
  129. package/src/llm/bedrock/utils/message_outputs.ts +2 -1
  130. package/src/llm/custom-chat-models.smoke.test.ts +662 -0
  131. package/src/llm/google/llm.spec.ts +339 -57
  132. package/src/llm/google/utils/common.ts +53 -48
  133. package/src/llm/openai/contentBlocks.test.ts +346 -0
  134. package/src/llm/openai/index.ts +736 -837
  135. package/src/llm/openai/utils/index.ts +84 -64
  136. package/src/llm/openrouter/index.ts +124 -247
  137. package/src/llm/openrouter/reasoning.test.ts +8 -1
  138. package/src/llm/vertexai/index.ts +11 -5
  139. package/src/llm/vertexai/llm.spec.ts +28 -1
  140. package/src/messages/cache.test.ts +106 -4
  141. package/src/messages/cache.ts +57 -5
  142. package/src/messages/core.ts +16 -9
  143. package/src/messages/format.ts +9 -6
  144. package/src/messages/langchain.ts +39 -0
  145. package/src/messages/prune.ts +12 -8
  146. package/src/scripts/caching.ts +2 -3
  147. package/src/specs/anthropic.simple.test.ts +61 -0
  148. package/src/specs/summarization.test.ts +58 -61
  149. package/src/tools/BashExecutor.ts +37 -13
  150. package/src/tools/CodeExecutor.ts +55 -11
  151. package/src/tools/ProgrammaticToolCalling.ts +29 -14
  152. package/src/tools/ToolNode.ts +5 -1
  153. package/src/tools/__tests__/ProgrammaticToolCalling.test.ts +60 -0
  154. package/src/types/graph.ts +35 -88
  155. package/src/types/llm.ts +3 -3
  156. package/src/types/run.ts +2 -0
  157. package/src/types/stream.ts +1 -1
  158. package/src/types/tools.ts +9 -0
  159. package/src/utils/llmConfig.ts +1 -6
@@ -4,18 +4,31 @@ import type { BaseChatModelParams } from '@langchain/core/language_models/chat_m
4
4
  import type { BaseMessage, UsageMetadata } from '@langchain/core/messages';
5
5
  import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
6
6
  import type { AnthropicInput } from '@langchain/anthropic';
7
- import type { AnthropicMessageCreateParams, AnthropicStreamingMessageCreateParams, AnthropicOutputConfig } from '@/llm/anthropic/types';
7
+ import type { Anthropic } from '@anthropic-ai/sdk';
8
+ import type { AnthropicMessageCreateParams, AnthropicStreamingMessageCreateParams, AnthropicOutputConfig, AnthropicBeta, AnthropicMCPServerURLDefinition, AnthropicContextManagementConfigParam } from '@/llm/anthropic/types';
9
+ export declare function _documentsInParams(params: AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams): boolean;
8
10
  export type CustomAnthropicInput = AnthropicInput & {
9
11
  _lc_stream_delay?: number;
10
12
  outputConfig?: AnthropicOutputConfig;
11
13
  inferenceGeo?: string;
12
- contextManagement?: any;
14
+ contextManagement?: AnthropicContextManagementConfigParam;
13
15
  } & BaseChatModelParams;
14
- /**
15
- * A type representing additional parameters that can be passed to the
16
- * Anthropic API.
17
- */
18
- type Kwargs = Record<string, any>;
16
+ export type CustomAnthropicCallOptions = {
17
+ outputConfig?: AnthropicOutputConfig;
18
+ outputFormat?: Anthropic.Messages.JSONOutputFormat;
19
+ inferenceGeo?: string;
20
+ betas?: AnthropicBeta[];
21
+ container?: string;
22
+ mcp_servers?: AnthropicMCPServerURLDefinition[];
23
+ };
24
+ type CustomAnthropicInvocationParams = {
25
+ betas?: AnthropicBeta[];
26
+ container?: string;
27
+ context_management?: AnthropicContextManagementConfigParam;
28
+ inference_geo?: string;
29
+ mcp_servers?: AnthropicMCPServerURLDefinition[];
30
+ output_config?: AnthropicOutputConfig;
31
+ };
19
32
  export declare class CustomAnthropic extends ChatAnthropicMessages {
20
33
  _lc_stream_delay: number;
21
34
  private message_start;
@@ -25,13 +38,13 @@ export declare class CustomAnthropic extends ChatAnthropicMessages {
25
38
  top_k: number | undefined;
26
39
  outputConfig?: AnthropicOutputConfig;
27
40
  inferenceGeo?: string;
28
- contextManagement?: any;
41
+ contextManagement?: AnthropicContextManagementConfigParam;
29
42
  constructor(fields?: CustomAnthropicInput);
30
43
  static lc_name(): 'LibreChatAnthropic';
31
44
  /**
32
45
  * Get the parameters used to invoke the model
33
46
  */
34
- invocationParams(options?: this['ParsedCallOptions']): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> & Kwargs;
47
+ invocationParams(options?: this['ParsedCallOptions'] & CustomAnthropicCallOptions): Omit<AnthropicMessageCreateParams | AnthropicStreamingMessageCreateParams, 'messages'> & CustomAnthropicInvocationParams;
35
48
  /**
36
49
  * Get stream usage as returned by this client's API response.
37
50
  * @returns The stream usage object.
@@ -1,5 +1,7 @@
1
1
  import Anthropic from '@anthropic-ai/sdk';
2
- import { BindToolsInput } from '@langchain/core/language_models/chat_models';
2
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
3
+ import type { AnthropicBeta } from '@anthropic-ai/sdk/resources';
4
+ export type { AnthropicBeta };
3
5
  export type AnthropicStreamUsage = Anthropic.Usage;
4
6
  export type AnthropicMessageDeltaEvent = Anthropic.MessageDeltaEvent;
5
7
  export type AnthropicMessageStartEvent = Anthropic.MessageStartEvent;
@@ -14,8 +16,10 @@ export type AnthropicMessageResponse = Anthropic.ContentBlock | AnthropicToolRes
14
16
  export type AnthropicMessageCreateParams = Anthropic.MessageCreateParamsNonStreaming;
15
17
  export type AnthropicStreamingMessageCreateParams = Anthropic.MessageCreateParamsStreaming;
16
18
  export type AnthropicThinkingConfigParam = Anthropic.ThinkingConfigParam;
19
+ export type AnthropicContextManagementConfigParam = Anthropic.Beta.BetaContextManagementConfig;
17
20
  export type AnthropicMessageStreamEvent = Anthropic.MessageStreamEvent;
18
21
  export type AnthropicRequestOptions = Anthropic.RequestOptions;
22
+ export type AnthropicMCPServerURLDefinition = Anthropic.Beta.Messages.BetaRequestMCPServerURLDefinition;
19
23
  export type AnthropicToolChoice = {
20
24
  type: 'tool';
21
25
  name: string;
@@ -1,10 +1,16 @@
1
- /**
2
- * This util file contains functions for converting Anthropic messages to LangChain messages.
3
- */
4
- import Anthropic from '@anthropic-ai/sdk';
1
+ /** This util file contains functions for converting Anthropic messages to LangChain messages. */
5
2
  import { AIMessageChunk } from '@langchain/core/messages';
6
- import { ChatGeneration } from '@langchain/core/outputs';
7
- import { AnthropicMessageResponse } from '../types';
3
+ import type Anthropic from '@anthropic-ai/sdk';
4
+ import type { UsageMetadata } from '@langchain/core/messages';
5
+ import type { ChatGeneration } from '@langchain/core/outputs';
6
+ import type { AnthropicMessageResponse } from '../types';
7
+ interface AnthropicUsageData {
8
+ input_tokens?: number | null;
9
+ output_tokens?: number | null;
10
+ cache_creation_input_tokens?: number | null;
11
+ cache_read_input_tokens?: number | null;
12
+ }
13
+ export declare function getAnthropicUsageMetadata(usage: AnthropicUsageData | null | undefined): UsageMetadata | undefined;
8
14
  export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta.Messages.BetaRawMessageStreamEvent, fields: {
9
15
  streamUsage: boolean;
10
16
  coerceContentToString: boolean;
@@ -12,3 +18,4 @@ export declare function _makeMessageChunkFromAnthropicEvent(data: Anthropic.Beta
12
18
  chunk: AIMessageChunk;
13
19
  } | null;
14
20
  export declare function anthropicResponseToChatMessages(messages: AnthropicMessageResponse[], additionalKwargs: Record<string, unknown>): ChatGeneration[];
21
+ export {};
@@ -18,5 +18,5 @@ export declare class AnthropicToolsOutputParser<T extends Record<string, any> =
18
18
  protected _validateResult(result: unknown): Promise<T>;
19
19
  parseResult(generations: ChatGeneration[]): Promise<T>;
20
20
  }
21
- export declare function extractToolCalls(content: Record<string, any>[]): ToolCall[];
21
+ export declare function extractToolCalls(content: Record<string, any>[]): ToolCall<string, Record<string, any>>[];
22
22
  export {};
@@ -1,20 +1,26 @@
1
1
  import { AzureOpenAI as AzureOpenAIClient } from 'openai';
2
2
  import { ChatXAI as OriginalChatXAI } from '@langchain/xai';
3
3
  import { ChatGenerationChunk } from '@langchain/core/outputs';
4
- import { AIMessage } from '@langchain/core/messages';
5
4
  import { ChatDeepSeek as OriginalChatDeepSeek } from '@langchain/deepseek';
6
5
  import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
7
6
  import { OpenAIClient, ChatOpenAI as OriginalChatOpenAI, AzureChatOpenAI as OriginalAzureChatOpenAI } from '@langchain/openai';
8
7
  import type { HeaderValue, HeadersLike } from './types';
9
- import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
10
8
  import type { BaseMessage } from '@langchain/core/messages';
11
- import type { ChatResult } from '@langchain/core/outputs';
9
+ import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
12
10
  import type { ChatXAIInput } from '@langchain/xai';
13
11
  import type * as t from '@langchain/openai';
14
12
  export declare function isHeaders(headers: unknown): headers is Headers;
15
13
  export declare function normalizeHeaders(headers: HeadersLike): Record<string, HeaderValue | readonly HeaderValue[]>;
16
- type OpenAICompletionParam = OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
17
14
  type OpenAICoreRequestOptions = OpenAIClient.RequestOptions;
15
+ type LibreChatOpenAIFields = t.ChatOpenAIFields & {
16
+ _lc_stream_delay?: number;
17
+ includeReasoningContent?: boolean;
18
+ includeReasoningDetails?: boolean;
19
+ convertReasoningDetailsToContent?: boolean;
20
+ };
21
+ type LibreChatAzureOpenAIFields = t.AzureOpenAIInput & {
22
+ _lc_stream_delay?: number;
23
+ };
18
24
  /**
19
25
  * Formats a tool in either OpenAI format, or LangChain structured tool format
20
26
  * into an OpenAI tool format. If the tool is already in OpenAI format, return without
@@ -41,15 +47,12 @@ export declare class CustomAzureOpenAIClient extends AzureOpenAIClient {
41
47
  abortHandler?: () => void;
42
48
  fetchWithTimeout(url: RequestInfo, init: RequestInit | undefined, ms: number, controller: AbortController): Promise<Response>;
43
49
  }
44
- /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
45
50
  export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptions> {
46
51
  _lc_stream_delay?: number;
47
- constructor(fields?: t.ChatOpenAICallOptions & {
48
- _lc_stream_delay?: number;
49
- } & t.OpenAIChatInput['modelKwargs']);
52
+ constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
50
53
  get exposedClient(): CustomOpenAIClient;
51
54
  static lc_name(): string;
52
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
55
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
53
56
  /**
54
57
  * Returns backwards compatible reasoning parameters from constructor params and call options
55
58
  * @internal
@@ -57,14 +60,10 @@ export declare class ChatOpenAI extends OriginalChatOpenAI<t.ChatOpenAICallOptio
57
60
  getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
58
61
  protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
59
62
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
60
- _streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
61
63
  }
62
- /** @ts-expect-error We are intentionally overriding `getReasoningParams` */
63
64
  export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
64
65
  _lc_stream_delay?: number;
65
- constructor(fields?: t.AzureOpenAIInput & {
66
- _lc_stream_delay?: number;
67
- });
66
+ constructor(fields?: LibreChatAzureOpenAIFields);
68
67
  get exposedClient(): CustomOpenAIClient;
69
68
  static lc_name(): 'LibreChatAzureOpenAI';
70
69
  /**
@@ -73,16 +72,17 @@ export declare class AzureChatOpenAI extends OriginalAzureChatOpenAI {
73
72
  */
74
73
  getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
75
74
  protected _getReasoningParams(options?: this['ParsedCallOptions']): OpenAIClient.Reasoning | undefined;
76
- protected _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
75
+ _getClientOptions(options: OpenAICoreRequestOptions | undefined): OpenAICoreRequestOptions;
77
76
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
78
77
  }
79
78
  export declare class ChatDeepSeek extends OriginalChatDeepSeek {
79
+ _lc_stream_delay?: number;
80
+ constructor(fields?: ConstructorParameters<typeof OriginalChatDeepSeek>[0] & {
81
+ _lc_stream_delay?: number;
82
+ });
80
83
  get exposedClient(): CustomOpenAIClient;
81
84
  static lc_name(): 'LibreChatDeepSeek';
82
- protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
83
- _generate(messages: BaseMessage[], options: this['ParsedCallOptions'] | undefined, runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
84
- protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
85
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
85
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
86
86
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
87
87
  }
88
88
  /** xAI-specific usage metadata type */
@@ -102,11 +102,8 @@ export interface XAIUsageMetadata extends OpenAIClient.Completions.CompletionUsa
102
102
  num_sources_used?: number;
103
103
  }
104
104
  export declare class ChatMoonshot extends ChatOpenAI {
105
+ constructor(fields?: LibreChatOpenAIFields & t.OpenAIChatInput['modelKwargs']);
105
106
  static lc_name(): 'LibreChatMoonshot';
106
- protected _convertMessages(messages: BaseMessage[]): OpenAICompletionParam[];
107
- _generate(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
108
- protected _convertResponseToMessage(choice: OpenAIClient.Chat.Completions.ChatCompletion.Choice, data: OpenAIClient.Chat.Completions.ChatCompletion): AIMessage;
109
- _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
110
107
  }
111
108
  export declare class ChatXAI extends OriginalChatXAI {
112
109
  _lc_stream_delay?: number;
@@ -121,7 +118,7 @@ export declare class ChatXAI extends OriginalChatXAI {
121
118
  });
122
119
  static lc_name(): 'LibreChatXAI';
123
120
  get exposedClient(): CustomOpenAIClient;
124
- protected _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
121
+ _getClientOptions(options?: OpenAICoreRequestOptions): OpenAICoreRequestOptions;
125
122
  _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
126
123
  }
127
124
  export {};
@@ -1,7 +1,7 @@
1
1
  import { ChatOpenAI } from '@/llm/openai';
2
- import { ChatGenerationChunk } from '@langchain/core/outputs';
3
- import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
4
- import type { FunctionMessageChunk, SystemMessageChunk, HumanMessageChunk, ToolMessageChunk, ChatMessageChunk, AIMessageChunk, BaseMessage } from '@langchain/core/messages';
2
+ import type { BaseMessage } from '@langchain/core/messages';
3
+ import type { ChatGenerationChunk } from '@langchain/core/outputs';
4
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
5
5
  import type { ChatOpenAICallOptions, OpenAIChatInput, OpenAIClient } from '@langchain/openai';
6
6
  export type OpenRouterReasoningEffort = 'xhigh' | 'high' | 'medium' | 'low' | 'minimal' | 'none';
7
7
  export interface OpenRouterReasoning {
@@ -16,20 +16,22 @@ export interface ChatOpenRouterCallOptions extends Omit<ChatOpenAICallOptions, '
16
16
  reasoning?: OpenRouterReasoning;
17
17
  modelKwargs?: OpenAIChatInput['modelKwargs'];
18
18
  }
19
+ export type ChatOpenRouterInput = Partial<ChatOpenRouterCallOptions & OpenAIChatInput>;
19
20
  /** invocationParams return type extended with OpenRouter reasoning */
20
21
  export type OpenRouterInvocationParams = Omit<OpenAIClient.Chat.ChatCompletionCreateParams, 'messages'> & {
21
22
  reasoning?: OpenRouterReasoning;
22
23
  };
24
+ type InvocationParamsExtra = {
25
+ streaming?: boolean;
26
+ };
23
27
  export declare class ChatOpenRouter extends ChatOpenAI {
24
28
  private openRouterReasoning?;
25
29
  /** @deprecated Use `reasoning` object instead */
26
30
  private includeReasoning?;
27
- constructor(_fields: Partial<ChatOpenRouterCallOptions>);
31
+ constructor(_fields: ChatOpenRouterInput);
28
32
  static lc_name(): 'LibreChatOpenRouter';
29
- invocationParams(options?: this['ParsedCallOptions'], extra?: {
30
- streaming?: boolean;
31
- }): OpenRouterInvocationParams;
33
+ invocationParams(options?: this['ParsedCallOptions'], extra?: InvocationParamsExtra): OpenRouterInvocationParams;
32
34
  private buildOpenRouterReasoning;
33
- protected _convertOpenAIDeltaToBaseMessageChunk(delta: Record<string, any>, rawResponse: OpenAIClient.ChatCompletionChunk, defaultRole?: 'function' | 'user' | 'system' | 'developer' | 'assistant' | 'tool'): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
34
- _streamResponseChunks2(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
35
+ _streamResponseChunks(messages: BaseMessage[], options: this['ParsedCallOptions'], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
35
36
  }
37
+ export {};
@@ -288,6 +288,7 @@ export declare class ChatVertexAI extends ChatGoogle {
288
288
  dynamicThinkingBudget: boolean;
289
289
  thinkingConfig?: GoogleThinkingConfig;
290
290
  static lc_name(): 'LibreChatVertexAI';
291
+ constructor(model: string, fields?: Omit<VertexAIClientOptions, 'model'>);
291
292
  constructor(fields?: VertexAIClientOptions);
292
293
  invocationParams(options?: this['ParsedCallOptions'] | undefined): GoogleAIModelRequestParams;
293
294
  buildConnection(fields: VertexAIClientOptions | undefined, client: GoogleAbstractedClient): void;
@@ -36,5 +36,8 @@ export declare function stripBedrockCacheControl<T extends MessageWithContent>(m
36
36
  * @param messages - The array of message objects.
37
37
  * @returns - A new array of message objects with cache points added.
38
38
  */
39
- export declare function addBedrockCacheControl<T extends Partial<BaseMessage> & MessageWithContent>(messages: T[]): T[];
39
+ export declare function addBedrockCacheControl<T extends MessageWithContent & {
40
+ getType?: () => string;
41
+ role?: string;
42
+ }>(messages: T[]): T[];
40
43
  export {};
@@ -0,0 +1,27 @@
1
+ import type { MessageContent } from '@langchain/core/messages';
2
+ import type * as t from '@/types';
3
+ type LibreChatMessageContent = MessageContent | string | t.MessageContentComplex[] | t.ExtendedMessageContent[];
4
+ type WithLangChainContent<T extends {
5
+ content: LibreChatMessageContent;
6
+ }> = Omit<T, 'content'> & {
7
+ content: MessageContent;
8
+ };
9
+ /**
10
+ * Bridges LibreChat's extended content blocks to LangChain 1.x MessageContent.
11
+ *
12
+ * LangChain 1.x narrowed message constructor types around ContentBlock, while
13
+ * LibreChat still carries provider-specific blocks through the same content
14
+ * field. This helper keeps the runtime shape unchanged during the dependency
15
+ * upgrade; tracking issue: https://github.com/danny-avila/agents/issues/130.
16
+ */
17
+ export declare function toLangChainContent(content: LibreChatMessageContent): MessageContent;
18
+ /**
19
+ * Applies the same LangChain 1.x content bridge to message constructor fields.
20
+ *
21
+ * Keep this cast-only helper local to constructor boundaries so follow-up work
22
+ * can replace it with aligned content types or explicit conversion logic.
23
+ */
24
+ export declare function toLangChainMessageFields<T extends {
25
+ content: LibreChatMessageContent;
26
+ }>(message: T): WithLangChainContent<T>;
27
+ export {};
@@ -3,6 +3,12 @@ import type * as t from '@/types';
3
3
  import { Constants } from '@/common';
4
4
  export declare const imageExtRegex: RegExp;
5
5
  export declare const getCodeBaseURL: () => string;
6
+ /**
7
+ * Renders one section of the post-execution file listing. Used by the
8
+ * code/bash tool formatters to keep generated outputs and inherited
9
+ * inputs visually separated. See BashExecutor for full docs.
10
+ */
11
+ export declare function renderFileSection(header: string, files: t.FileRefs, defaultMessage: string): string;
6
12
  export declare const CodeExecutionToolSchema: {
7
13
  readonly type: "object";
8
14
  readonly properties: {
@@ -1,4 +1,4 @@
1
- import type { START, StateType, UpdateType, StateGraph, StateGraphArgs, StateDefinition, CompiledStateGraph, BinaryOperatorAggregate } from '@langchain/langgraph';
1
+ import type { START, StateGraph, StateGraphArgs } from '@langchain/langgraph';
2
2
  import type { BindToolsInput } from '@langchain/core/language_models/chat_models';
3
3
  import type { BaseMessage, AIMessageChunk, SystemMessage } from '@langchain/core/messages';
4
4
  import type { RunnableConfig, Runnable } from '@langchain/core/runnables';
@@ -51,51 +51,36 @@ export interface EventHandler {
51
51
  }
52
52
  export type GraphStateChannels<T extends BaseGraphState> = StateGraphArgs<T>['channels'];
53
53
  export type Workflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = StateGraph<T, U, N>;
54
- export type CompiledWorkflow<T extends BaseGraphState = BaseGraphState, U extends Partial<T> = Partial<T>, N extends string = string> = CompiledStateGraph<T, U, N>;
55
- export type CompiledStateWorkflow = CompiledStateGraph<StateType<{
56
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
57
- }>, UpdateType<{
58
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
59
- }>, string, {
60
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
61
- }, {
62
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
63
- }, StateDefinition>;
64
- export type CompiledMultiAgentWorkflow = CompiledStateGraph<StateType<{
65
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
66
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
67
- }>, UpdateType<{
68
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
69
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
70
- }>, string, {
71
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
72
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
73
- }, {
74
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
75
- agentMessages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
76
- }, StateDefinition>;
77
- export type CompiledAgentWorfklow = CompiledStateGraph<AgentSubgraphState, Partial<AgentSubgraphState>, '__start__' | `agent=${string}` | `tools=${string}` | `summarize=${string}`, {
78
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
79
- summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
80
- }, {
81
- messages: BinaryOperatorAggregate<BaseMessage[], BaseMessage[]>;
82
- summarizationRequest: BinaryOperatorAggregate<SummarizationNodeInput | undefined, SummarizationNodeInput | undefined>;
83
- }, StateDefinition, {
84
- [x: `agent=${string}`]: Partial<BaseGraphState>;
85
- [x: `tools=${string}`]: any;
86
- [x: `summarize=${string}`]: any;
87
- }>;
54
+ type LangChainEventStreamCallbackHandlerInput = NonNullable<Parameters<Runnable['streamEvents']>[2]>;
55
+ export type EventStreamCallbackHandlerInput = LangChainEventStreamCallbackHandlerInput & {
56
+ autoClose?: boolean;
57
+ raiseError?: boolean;
58
+ ignoreCustomEvent?: boolean;
59
+ };
60
+ export type WorkflowValuesStreamConfig = RunnableConfig & {
61
+ streamMode: 'values';
62
+ };
63
+ /**
64
+ * LangGraph stream output is mode-dependent (`values`, `updates`, SSE, etc.).
65
+ * Keep the base Runnable stream output as unknown and narrow at callsites that
66
+ * choose a concrete streamMode.
67
+ */
68
+ export type CompiledWorkflow<TInput extends BaseGraphState = BaseGraphState, TOutput extends BaseGraphState = TInput> = Omit<Runnable<TInput, unknown>, 'invoke'> & {
69
+ invoke(input: TInput, config?: RunnableConfig): Promise<TOutput>;
70
+ };
71
+ export type CompiledStateWorkflow = CompiledWorkflow;
72
+ export type CompiledMultiAgentWorkflow = CompiledWorkflow<MultiAgentGraphState>;
73
+ export type CompiledAgentWorfklow = CompiledWorkflow<AgentSubgraphState, AgentSubgraphState>;
88
74
  export type SystemRunnable = Runnable<BaseMessage[], (BaseMessage | SystemMessage)[], RunnableConfig<Record<string, unknown>>> | undefined;
89
75
  /**
90
76
  * Optional compile options passed to workflow.compile().
91
77
  * These are intentionally untyped to avoid coupling to library internals.
92
78
  */
93
79
  export type CompileOptions = {
94
- checkpointer?: any;
80
+ checkpointer?: unknown;
95
81
  interruptBefore?: string[];
96
82
  interruptAfter?: string[];
97
83
  };
98
- export type EventStreamCallbackHandlerInput = Parameters<CompiledWorkflow['streamEvents']>[2] extends Omit<infer T, 'autoClose'> ? T : never;
99
84
  export type StreamChunk = (ChatGenerationChunk & {
100
85
  message: AIMessageChunk;
101
86
  }) | AIMessageChunk;
@@ -311,10 +296,12 @@ export interface AgentInputs {
311
296
  toolMap?: ToolMap;
312
297
  tools?: GraphTools;
313
298
  provider: Providers;
299
+ /** Stable/cacheable system instructions. */
314
300
  instructions?: string;
315
301
  streamBuffer?: number;
316
302
  maxContextTokens?: number;
317
303
  clientOptions?: ClientOptions;
304
+ /** Dynamic system tail appended after stable instructions without provider cache markers. */
318
305
  additional_instructions?: string;
319
306
  reasoningKey?: 'reasoning_content' | 'reasoning';
320
307
  /** Format content blocks as strings (for legacy compatibility i.e. Ollama/Azure Serverless) */
@@ -340,7 +327,7 @@ export interface AgentInputs {
340
327
  summarizationEnabled?: boolean;
341
328
  summarizationConfig?: SummarizationConfig;
342
329
  /** Cross-run summary from a previous run, forwarded from formatAgentMessages.
343
- * Injected into the system message via AgentContext.buildInstructionsString(). */
330
+ * Injected into the dynamic system tail via AgentContext. */
344
331
  initialSummary?: {
345
332
  text: string;
346
333
  tokenCount: number;
@@ -370,3 +357,4 @@ export interface ContextPruningConfig {
370
357
  placeholder?: string;
371
358
  };
372
359
  }
360
+ export {};
@@ -3,7 +3,7 @@ import type { BindToolsInput, BaseChatModelParams } from '@langchain/core/langua
3
3
  import type { OpenAIChatInput, ChatOpenAIFields, AzureOpenAIInput, ClientOptions as OAIClientOptions } from '@langchain/openai';
4
4
  import type { GoogleGenerativeAIChatInput } from '@langchain/google-genai';
5
5
  import type { ChatVertexAIInput } from '@langchain/google-vertexai';
6
- import type { ChatDeepSeekCallOptions } from '@langchain/deepseek';
6
+ import type { ChatDeepSeekInput } from '@langchain/deepseek';
7
7
  import type { ChatOpenRouterCallOptions } from '@/llm/openrouter';
8
8
  import type { ChatBedrockConverseInput } from '@langchain/aws';
9
9
  import type { ChatMistralAIInput } from '@langchain/mistralai';
@@ -49,7 +49,7 @@ export type AnthropicReasoning = {
49
49
  export type GoogleThinkingConfig = {
50
50
  thinkingBudget?: number;
51
51
  includeThoughts?: boolean;
52
- thinkingLevel?: string;
52
+ thinkingLevel?: 'THINKING_LEVEL_UNSPECIFIED' | 'LOW' | 'MEDIUM' | 'HIGH';
53
53
  };
54
54
  export type OpenAIClientOptions = ChatOpenAIFields;
55
55
  export type AnthropicClientOptions = Omit<AnthropicInput, 'thinking'> & {
@@ -71,7 +71,7 @@ export type GoogleClientOptions = GoogleGenerativeAIChatInput & {
71
71
  customHeaders?: RequestOptions['customHeaders'];
72
72
  thinkingConfig?: GoogleThinkingConfig;
73
73
  };
74
- export type DeepSeekClientOptions = ChatDeepSeekCallOptions;
74
+ export type DeepSeekClientOptions = Partial<ChatDeepSeekInput>;
75
75
  export type XAIClientOptions = ChatXAIInput;
76
76
  export type ClientOptions = OpenAIClientOptions | AzureClientOptions | AnthropicClientOptions | MistralAIClientOptions | VertexAIClientOptions | BedrockConverseClientOptions | GoogleClientOptions | DeepSeekClientOptions | XAIClientOptions;
77
77
  export type SharedLLMConfig = {
@@ -63,7 +63,9 @@ export interface AgentStateChannels {
63
63
  messages: BaseMessage[];
64
64
  next: string;
65
65
  [key: string]: unknown;
66
+ /** Stable/cacheable system instructions. */
66
67
  instructions?: string;
68
+ /** Dynamic system tail appended after stable instructions. */
67
69
  additional_instructions?: string;
68
70
  }
69
71
  export interface Member {
@@ -138,7 +138,7 @@ export interface ExtendedMessageContent {
138
138
  type?: string;
139
139
  text?: string;
140
140
  input?: string;
141
- index?: number;
141
+ index?: string | number;
142
142
  id?: string;
143
143
  name?: string;
144
144
  }
@@ -96,6 +96,15 @@ export type FileRef = {
96
96
  path?: string;
97
97
  /** Session ID this file belongs to (for multi-session file tracking) */
98
98
  session_id?: string;
99
+ /**
100
+ * `true` when the codeapi sandbox echoed this entry as an unchanged
101
+ * passthrough of an input the caller already owns (skill files,
102
+ * downloaded inputs whose hash matched the baseline, inherited
103
+ * `.dirkeep` markers). The tool-result formatter renders these as
104
+ * "Available files" rather than "Generated files" so the LLM doesn't
105
+ * conflate infrastructure inputs with newly-produced outputs.
106
+ */
107
+ inherited?: true;
99
108
  };
100
109
  export type FileRefs = FileRef[];
101
110
  export type ExecuteResult = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@librechat/agents",
3
- "version": "3.1.73",
3
+ "version": "3.1.75-dev.0",
4
4
  "main": "./dist/cjs/main.cjs",
5
5
  "module": "./dist/esm/main.mjs",
6
6
  "types": "./dist/types/index.d.ts",
@@ -27,7 +27,7 @@
27
27
  ],
28
28
  "packageManager": "npm@10.5.2",
29
29
  "engines": {
30
- "node": ">=14.0.0"
30
+ "node": ">=20.0.0"
31
31
  },
32
32
  "files": [
33
33
  "dist",
@@ -111,7 +111,7 @@
111
111
  "format": "prettier --write ."
112
112
  },
113
113
  "overrides": {
114
- "@langchain/openai": "0.5.18",
114
+ "@langchain/openai": "1.4.5",
115
115
  "@anthropic-ai/sdk": "$@anthropic-ai/sdk",
116
116
  "@browserbasehq/stagehand": {
117
117
  "openai": "$openai"
@@ -121,19 +121,19 @@
121
121
  "minimatch": "3.1.4"
122
122
  },
123
123
  "dependencies": {
124
- "@anthropic-ai/sdk": "^0.73.0",
124
+ "@anthropic-ai/sdk": "^0.92.0",
125
125
  "@aws-sdk/client-bedrock-runtime": "^3.1013.0",
126
- "@langchain/anthropic": "^0.3.26",
127
- "@langchain/aws": "^0.1.15",
128
- "@langchain/core": "^0.3.80",
129
- "@langchain/deepseek": "^0.0.2",
130
- "@langchain/google-genai": "^0.2.18",
131
- "@langchain/google-vertexai": "^0.2.18",
132
- "@langchain/langgraph": "^0.4.9",
133
- "@langchain/mistralai": "^0.2.1",
134
- "@langchain/openai": "0.5.18",
135
- "@langchain/textsplitters": "^0.1.0",
136
- "@langchain/xai": "^0.0.3",
126
+ "@langchain/anthropic": "^1.3.28",
127
+ "@langchain/aws": "^1.3.5",
128
+ "@langchain/core": "^1.1.42",
129
+ "@langchain/deepseek": "^1.0.25",
130
+ "@langchain/google-genai": "^2.1.29",
131
+ "@langchain/google-vertexai": "^2.1.29",
132
+ "@langchain/langgraph": "^1.2.9",
133
+ "@langchain/mistralai": "^1.0.8",
134
+ "@langchain/openai": "1.4.5",
135
+ "@langchain/textsplitters": "^1.0.1",
136
+ "@langchain/xai": "^1.3.17",
137
137
  "@langfuse/langchain": "^4.3.0",
138
138
  "@langfuse/otel": "^4.3.0",
139
139
  "@langfuse/tracing": "^4.3.0",
@@ -147,7 +147,8 @@
147
147
  "mathjs": "^15.2.0",
148
148
  "nanoid": "^3.3.7",
149
149
  "okapibm25": "^1.4.1",
150
- "openai": "5.8.2"
150
+ "openai": "^6.35.0",
151
+ "uuid": "^11.1.1"
151
152
  },
152
153
  "imports": {
153
154
  "@/*": "./src/*",