@librechat/agents 3.1.75 → 3.1.77-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. package/dist/cjs/graphs/Graph.cjs +22 -3
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/hitl/askUserQuestion.cjs +67 -0
  4. package/dist/cjs/hitl/askUserQuestion.cjs.map +1 -0
  5. package/dist/cjs/hooks/HookRegistry.cjs +54 -0
  6. package/dist/cjs/hooks/HookRegistry.cjs.map +1 -1
  7. package/dist/cjs/hooks/createToolPolicyHook.cjs +115 -0
  8. package/dist/cjs/hooks/createToolPolicyHook.cjs.map +1 -0
  9. package/dist/cjs/hooks/executeHooks.cjs +40 -1
  10. package/dist/cjs/hooks/executeHooks.cjs.map +1 -1
  11. package/dist/cjs/hooks/types.cjs +1 -0
  12. package/dist/cjs/hooks/types.cjs.map +1 -1
  13. package/dist/cjs/langchain/google-common.cjs +3 -0
  14. package/dist/cjs/langchain/google-common.cjs.map +1 -0
  15. package/dist/cjs/langchain/index.cjs +86 -0
  16. package/dist/cjs/langchain/index.cjs.map +1 -0
  17. package/dist/cjs/langchain/language_models/chat_models.cjs +3 -0
  18. package/dist/cjs/langchain/language_models/chat_models.cjs.map +1 -0
  19. package/dist/cjs/langchain/messages/tool.cjs +3 -0
  20. package/dist/cjs/langchain/messages/tool.cjs.map +1 -0
  21. package/dist/cjs/langchain/messages.cjs +51 -0
  22. package/dist/cjs/langchain/messages.cjs.map +1 -0
  23. package/dist/cjs/langchain/openai.cjs +3 -0
  24. package/dist/cjs/langchain/openai.cjs.map +1 -0
  25. package/dist/cjs/langchain/prompts.cjs +11 -0
  26. package/dist/cjs/langchain/prompts.cjs.map +1 -0
  27. package/dist/cjs/langchain/runnables.cjs +19 -0
  28. package/dist/cjs/langchain/runnables.cjs.map +1 -0
  29. package/dist/cjs/langchain/tools.cjs +23 -0
  30. package/dist/cjs/langchain/tools.cjs.map +1 -0
  31. package/dist/cjs/langchain/utils/env.cjs +11 -0
  32. package/dist/cjs/langchain/utils/env.cjs.map +1 -0
  33. package/dist/cjs/llm/anthropic/index.cjs +145 -52
  34. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  35. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  36. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +21 -14
  37. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  38. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +84 -70
  39. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  40. package/dist/cjs/llm/bedrock/index.cjs +1 -1
  41. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  42. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +213 -3
  43. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
  44. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +2 -1
  45. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
  46. package/dist/cjs/llm/google/utils/common.cjs +5 -4
  47. package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
  48. package/dist/cjs/llm/openai/index.cjs +519 -655
  49. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  50. package/dist/cjs/llm/openai/utils/index.cjs +20 -458
  51. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  52. package/dist/cjs/llm/openrouter/index.cjs +57 -175
  53. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  54. package/dist/cjs/llm/vertexai/index.cjs +5 -3
  55. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  56. package/dist/cjs/main.cjs +112 -3
  57. package/dist/cjs/main.cjs.map +1 -1
  58. package/dist/cjs/messages/cache.cjs +2 -1
  59. package/dist/cjs/messages/cache.cjs.map +1 -1
  60. package/dist/cjs/messages/core.cjs +7 -6
  61. package/dist/cjs/messages/core.cjs.map +1 -1
  62. package/dist/cjs/messages/format.cjs +73 -15
  63. package/dist/cjs/messages/format.cjs.map +1 -1
  64. package/dist/cjs/messages/langchain.cjs +26 -0
  65. package/dist/cjs/messages/langchain.cjs.map +1 -0
  66. package/dist/cjs/messages/prune.cjs +7 -6
  67. package/dist/cjs/messages/prune.cjs.map +1 -1
  68. package/dist/cjs/run.cjs +400 -42
  69. package/dist/cjs/run.cjs.map +1 -1
  70. package/dist/cjs/tools/ToolNode.cjs +556 -56
  71. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  72. package/dist/cjs/tools/search/search.cjs +55 -66
  73. package/dist/cjs/tools/search/search.cjs.map +1 -1
  74. package/dist/cjs/tools/search/tavily-scraper.cjs +189 -0
  75. package/dist/cjs/tools/search/tavily-scraper.cjs.map +1 -0
  76. package/dist/cjs/tools/search/tavily-search.cjs +372 -0
  77. package/dist/cjs/tools/search/tavily-search.cjs.map +1 -0
  78. package/dist/cjs/tools/search/tool.cjs +26 -4
  79. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  80. package/dist/cjs/tools/search/utils.cjs +10 -3
  81. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  82. package/dist/esm/graphs/Graph.mjs +22 -3
  83. package/dist/esm/graphs/Graph.mjs.map +1 -1
  84. package/dist/esm/hitl/askUserQuestion.mjs +65 -0
  85. package/dist/esm/hitl/askUserQuestion.mjs.map +1 -0
  86. package/dist/esm/hooks/HookRegistry.mjs +54 -0
  87. package/dist/esm/hooks/HookRegistry.mjs.map +1 -1
  88. package/dist/esm/hooks/createToolPolicyHook.mjs +113 -0
  89. package/dist/esm/hooks/createToolPolicyHook.mjs.map +1 -0
  90. package/dist/esm/hooks/executeHooks.mjs +40 -1
  91. package/dist/esm/hooks/executeHooks.mjs.map +1 -1
  92. package/dist/esm/hooks/types.mjs +1 -0
  93. package/dist/esm/hooks/types.mjs.map +1 -1
  94. package/dist/esm/langchain/google-common.mjs +2 -0
  95. package/dist/esm/langchain/google-common.mjs.map +1 -0
  96. package/dist/esm/langchain/index.mjs +5 -0
  97. package/dist/esm/langchain/index.mjs.map +1 -0
  98. package/dist/esm/langchain/language_models/chat_models.mjs +2 -0
  99. package/dist/esm/langchain/language_models/chat_models.mjs.map +1 -0
  100. package/dist/esm/langchain/messages/tool.mjs +2 -0
  101. package/dist/esm/langchain/messages/tool.mjs.map +1 -0
  102. package/dist/esm/langchain/messages.mjs +2 -0
  103. package/dist/esm/langchain/messages.mjs.map +1 -0
  104. package/dist/esm/langchain/openai.mjs +2 -0
  105. package/dist/esm/langchain/openai.mjs.map +1 -0
  106. package/dist/esm/langchain/prompts.mjs +2 -0
  107. package/dist/esm/langchain/prompts.mjs.map +1 -0
  108. package/dist/esm/langchain/runnables.mjs +2 -0
  109. package/dist/esm/langchain/runnables.mjs.map +1 -0
  110. package/dist/esm/langchain/tools.mjs +2 -0
  111. package/dist/esm/langchain/tools.mjs.map +1 -0
  112. package/dist/esm/langchain/utils/env.mjs +2 -0
  113. package/dist/esm/langchain/utils/env.mjs.map +1 -0
  114. package/dist/esm/llm/anthropic/index.mjs +146 -54
  115. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  116. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  117. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +21 -14
  118. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  119. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +84 -71
  120. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  121. package/dist/esm/llm/bedrock/index.mjs +1 -1
  122. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  123. package/dist/esm/llm/bedrock/utils/message_inputs.mjs +214 -4
  124. package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
  125. package/dist/esm/llm/bedrock/utils/message_outputs.mjs +2 -1
  126. package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
  127. package/dist/esm/llm/google/utils/common.mjs +5 -4
  128. package/dist/esm/llm/google/utils/common.mjs.map +1 -1
  129. package/dist/esm/llm/openai/index.mjs +520 -656
  130. package/dist/esm/llm/openai/index.mjs.map +1 -1
  131. package/dist/esm/llm/openai/utils/index.mjs +23 -459
  132. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  133. package/dist/esm/llm/openrouter/index.mjs +57 -175
  134. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  135. package/dist/esm/llm/vertexai/index.mjs +5 -3
  136. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  137. package/dist/esm/main.mjs +7 -0
  138. package/dist/esm/main.mjs.map +1 -1
  139. package/dist/esm/messages/cache.mjs +2 -1
  140. package/dist/esm/messages/cache.mjs.map +1 -1
  141. package/dist/esm/messages/core.mjs +7 -6
  142. package/dist/esm/messages/core.mjs.map +1 -1
  143. package/dist/esm/messages/format.mjs +73 -15
  144. package/dist/esm/messages/format.mjs.map +1 -1
  145. package/dist/esm/messages/langchain.mjs +23 -0
  146. package/dist/esm/messages/langchain.mjs.map +1 -0
  147. package/dist/esm/messages/prune.mjs +7 -6
  148. package/dist/esm/messages/prune.mjs.map +1 -1
  149. package/dist/esm/run.mjs +400 -42
  150. package/dist/esm/run.mjs.map +1 -1
  151. package/dist/esm/tools/ToolNode.mjs +557 -57
  152. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  153. package/dist/esm/tools/search/search.mjs +55 -66
  154. package/dist/esm/tools/search/search.mjs.map +1 -1
  155. package/dist/esm/tools/search/tavily-scraper.mjs +186 -0
  156. package/dist/esm/tools/search/tavily-scraper.mjs.map +1 -0
  157. package/dist/esm/tools/search/tavily-search.mjs +370 -0
  158. package/dist/esm/tools/search/tavily-search.mjs.map +1 -0
  159. package/dist/esm/tools/search/tool.mjs +26 -4
  160. package/dist/esm/tools/search/tool.mjs.map +1 -1
  161. package/dist/esm/tools/search/utils.mjs +10 -3
  162. package/dist/esm/tools/search/utils.mjs.map +1 -1
  163. package/dist/types/graphs/Graph.d.ts +7 -0
  164. package/dist/types/hitl/askUserQuestion.d.ts +55 -0
  165. package/dist/types/hitl/index.d.ts +6 -0
  166. package/dist/types/hooks/HookRegistry.d.ts +58 -0
  167. package/dist/types/hooks/createToolPolicyHook.d.ts +87 -0
  168. package/dist/types/hooks/index.d.ts +4 -1
  169. package/dist/types/hooks/types.d.ts +109 -3
  170. package/dist/types/index.d.ts +10 -0
  171. package/dist/types/langchain/google-common.d.ts +1 -0
  172. package/dist/types/langchain/index.d.ts +8 -0
  173. package/dist/types/langchain/language_models/chat_models.d.ts +1 -0
  174. package/dist/types/langchain/messages/tool.d.ts +1 -0
  175. package/dist/types/langchain/messages.d.ts +2 -0
  176. package/dist/types/langchain/openai.d.ts +1 -0
  177. package/dist/types/langchain/prompts.d.ts +1 -0
  178. package/dist/types/langchain/runnables.d.ts +2 -0
  179. package/dist/types/langchain/tools.d.ts +2 -0
  180. package/dist/types/langchain/utils/env.d.ts +1 -0
  181. package/dist/types/llm/anthropic/index.d.ts +22 -9
  182. package/dist/types/llm/anthropic/types.d.ts +5 -1
  183. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +13 -6
  184. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +1 -1
  185. package/dist/types/llm/openai/index.d.ts +21 -24
  186. package/dist/types/llm/openrouter/index.d.ts +11 -9
  187. package/dist/types/llm/vertexai/index.d.ts +1 -0
  188. package/dist/types/messages/cache.d.ts +4 -1
  189. package/dist/types/messages/format.d.ts +4 -1
  190. package/dist/types/messages/langchain.d.ts +27 -0
  191. package/dist/types/run.d.ts +117 -1
  192. package/dist/types/tools/ToolNode.d.ts +26 -1
  193. package/dist/types/tools/search/tavily-scraper.d.ts +19 -0
  194. package/dist/types/tools/search/tavily-search.d.ts +4 -0
  195. package/dist/types/tools/search/types.d.ts +99 -5
  196. package/dist/types/tools/search/utils.d.ts +2 -2
  197. package/dist/types/types/graph.d.ts +23 -37
  198. package/dist/types/types/hitl.d.ts +272 -0
  199. package/dist/types/types/index.d.ts +1 -0
  200. package/dist/types/types/llm.d.ts +3 -3
  201. package/dist/types/types/run.d.ts +33 -0
  202. package/dist/types/types/stream.d.ts +1 -1
  203. package/dist/types/types/tools.d.ts +19 -0
  204. package/package.json +80 -17
  205. package/src/graphs/Graph.ts +33 -4
  206. package/src/graphs/__tests__/composition.smoke.test.ts +188 -0
  207. package/src/hitl/askUserQuestion.ts +72 -0
  208. package/src/hitl/index.ts +7 -0
  209. package/src/hooks/HookRegistry.ts +71 -0
  210. package/src/hooks/__tests__/createToolPolicyHook.test.ts +259 -0
  211. package/src/hooks/createToolPolicyHook.ts +184 -0
  212. package/src/hooks/executeHooks.ts +50 -1
  213. package/src/hooks/index.ts +6 -0
  214. package/src/hooks/types.ts +112 -0
  215. package/src/index.ts +22 -0
  216. package/src/langchain/google-common.ts +1 -0
  217. package/src/langchain/index.ts +8 -0
  218. package/src/langchain/language_models/chat_models.ts +1 -0
  219. package/src/langchain/messages/tool.ts +5 -0
  220. package/src/langchain/messages.ts +21 -0
  221. package/src/langchain/openai.ts +1 -0
  222. package/src/langchain/prompts.ts +1 -0
  223. package/src/langchain/runnables.ts +7 -0
  224. package/src/langchain/tools.ts +8 -0
  225. package/src/langchain/utils/env.ts +1 -0
  226. package/src/llm/anthropic/index.ts +252 -84
  227. package/src/llm/anthropic/llm.spec.ts +751 -102
  228. package/src/llm/anthropic/types.ts +9 -1
  229. package/src/llm/anthropic/utils/message_inputs.ts +37 -19
  230. package/src/llm/anthropic/utils/message_outputs.ts +119 -101
  231. package/src/llm/bedrock/index.ts +2 -2
  232. package/src/llm/bedrock/llm.spec.ts +341 -0
  233. package/src/llm/bedrock/utils/message_inputs.ts +303 -4
  234. package/src/llm/bedrock/utils/message_outputs.ts +2 -1
  235. package/src/llm/custom-chat-models.smoke.test.ts +836 -0
  236. package/src/llm/google/llm.spec.ts +339 -57
  237. package/src/llm/google/utils/common.ts +53 -48
  238. package/src/llm/openai/contentBlocks.test.ts +346 -0
  239. package/src/llm/openai/index.ts +856 -833
  240. package/src/llm/openai/utils/index.ts +107 -78
  241. package/src/llm/openai/utils/messages.test.ts +159 -0
  242. package/src/llm/openrouter/index.ts +124 -247
  243. package/src/llm/openrouter/reasoning.test.ts +8 -1
  244. package/src/llm/vertexai/index.ts +11 -5
  245. package/src/llm/vertexai/llm.spec.ts +28 -1
  246. package/src/messages/cache.test.ts +4 -3
  247. package/src/messages/cache.ts +3 -2
  248. package/src/messages/core.ts +16 -9
  249. package/src/messages/format.ts +96 -16
  250. package/src/messages/formatAgentMessages.test.ts +166 -1
  251. package/src/messages/langchain.ts +39 -0
  252. package/src/messages/prune.ts +12 -8
  253. package/src/run.ts +456 -47
  254. package/src/scripts/caching.ts +2 -3
  255. package/src/specs/summarization.test.ts +51 -58
  256. package/src/tools/ToolNode.ts +706 -63
  257. package/src/tools/__tests__/hitl.test.ts +3593 -0
  258. package/src/tools/search/search.ts +83 -73
  259. package/src/tools/search/tavily-scraper.ts +235 -0
  260. package/src/tools/search/tavily-search.ts +424 -0
  261. package/src/tools/search/tavily.test.ts +965 -0
  262. package/src/tools/search/tool.ts +36 -26
  263. package/src/tools/search/types.ts +133 -8
  264. package/src/tools/search/utils.ts +13 -5
  265. package/src/types/graph.ts +32 -87
  266. package/src/types/hitl.ts +303 -0
  267. package/src/types/index.ts +1 -0
  268. package/src/types/llm.ts +3 -3
  269. package/src/types/run.ts +33 -0
  270. package/src/types/stream.ts +1 -1
  271. package/src/types/tools.ts +19 -0
  272. package/src/utils/llmConfig.ts +1 -6
@@ -1,33 +1,12 @@
1
1
  import { ChatOpenAI } from '@/llm/openai';
2
- import { ChatGenerationChunk } from '@langchain/core/outputs';
3
- import { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
4
- import { AIMessageChunk as AIMessageChunkClass } from '@langchain/core/messages';
5
- import type {
6
- FunctionMessageChunk,
7
- SystemMessageChunk,
8
- HumanMessageChunk,
9
- ToolMessageChunk,
10
- ChatMessageChunk,
11
- AIMessageChunk,
12
- BaseMessage,
13
- } from '@langchain/core/messages';
2
+ import type { BaseMessage } from '@langchain/core/messages';
3
+ import type { ChatGenerationChunk } from '@langchain/core/outputs';
4
+ import type { CallbackManagerForLLMRun } from '@langchain/core/callbacks/manager';
14
5
  import type {
15
6
  ChatOpenAICallOptions,
16
7
  OpenAIChatInput,
17
8
  OpenAIClient,
18
9
  } from '@langchain/openai';
19
- import { _convertMessagesToOpenAIParams } from '@/llm/openai/utils';
20
-
21
- type OpenAICompletionParam =
22
- OpenAIClient.Chat.Completions.ChatCompletionMessageParam;
23
-
24
- type OpenAIRoleEnum =
25
- | 'system'
26
- | 'developer'
27
- | 'assistant'
28
- | 'user'
29
- | 'function'
30
- | 'tool';
31
10
 
32
11
  export type OpenRouterReasoningEffort =
33
12
  | 'xhigh'
@@ -52,6 +31,10 @@ export interface ChatOpenRouterCallOptions
52
31
  modelKwargs?: OpenAIChatInput['modelKwargs'];
53
32
  }
54
33
 
34
+ export type ChatOpenRouterInput = Partial<
35
+ ChatOpenRouterCallOptions & OpenAIChatInput
36
+ >;
37
+
55
38
  /** invocationParams return type extended with OpenRouter reasoning */
56
39
  export type OpenRouterInvocationParams = Omit<
57
40
  OpenAIClient.Chat.ChatCompletionCreateParams,
@@ -59,12 +42,68 @@ export type OpenRouterInvocationParams = Omit<
59
42
  > & {
60
43
  reasoning?: OpenRouterReasoning;
61
44
  };
45
+
46
+ type InvocationParamsExtra = {
47
+ streaming?: boolean;
48
+ };
49
+
50
+ interface OpenRouterReasoningTextDetail {
51
+ type: 'reasoning.text';
52
+ text?: string;
53
+ format?: string;
54
+ index?: number;
55
+ }
56
+
57
+ interface OpenRouterReasoningEncryptedDetail {
58
+ type: 'reasoning.encrypted';
59
+ id?: string;
60
+ data?: string;
61
+ format?: string;
62
+ index?: number;
63
+ }
64
+
65
+ type OpenRouterReasoningDetail =
66
+ | OpenRouterReasoningTextDetail
67
+ | OpenRouterReasoningEncryptedDetail;
68
+
69
+ function isReasoningTextDetail(
70
+ value: unknown
71
+ ): value is OpenRouterReasoningTextDetail {
72
+ return (
73
+ typeof value === 'object' &&
74
+ value !== null &&
75
+ 'type' in value &&
76
+ value.type === 'reasoning.text'
77
+ );
78
+ }
79
+
80
+ function isReasoningEncryptedDetail(
81
+ value: unknown
82
+ ): value is OpenRouterReasoningEncryptedDetail {
83
+ return (
84
+ typeof value === 'object' &&
85
+ value !== null &&
86
+ 'type' in value &&
87
+ value.type === 'reasoning.encrypted'
88
+ );
89
+ }
90
+
91
+ function getReasoningDetails(value: unknown): OpenRouterReasoningDetail[] {
92
+ if (!Array.isArray(value)) {
93
+ return [];
94
+ }
95
+ return value.filter(
96
+ (detail): detail is OpenRouterReasoningDetail =>
97
+ isReasoningTextDetail(detail) || isReasoningEncryptedDetail(detail)
98
+ );
99
+ }
100
+
62
101
  export class ChatOpenRouter extends ChatOpenAI {
63
102
  private openRouterReasoning?: OpenRouterReasoning;
64
103
  /** @deprecated Use `reasoning` object instead */
65
104
  private includeReasoning?: boolean;
66
105
 
67
- constructor(_fields: Partial<ChatOpenRouterCallOptions>) {
106
+ constructor(_fields: ChatOpenRouterInput) {
68
107
  const {
69
108
  include_reasoning,
70
109
  reasoning: openRouterReasoning,
@@ -80,6 +119,8 @@ export class ChatOpenRouter extends ChatOpenAI {
80
119
  super({
81
120
  ...fields,
82
121
  modelKwargs: restModelKwargs,
122
+ includeReasoningDetails: true,
123
+ convertReasoningDetailsToContent: true,
83
124
  });
84
125
 
85
126
  // Merge reasoning config: modelKwargs.reasoning < constructor reasoning
@@ -101,21 +142,26 @@ export class ChatOpenRouter extends ChatOpenAI {
101
142
  // The parent's generic conditional return type cannot be widened in an override.
102
143
  override invocationParams(
103
144
  options?: this['ParsedCallOptions'],
104
- extra?: { streaming?: boolean }
145
+ extra?: InvocationParamsExtra
105
146
  ): OpenRouterInvocationParams {
106
147
  type MutableParams = Omit<
107
148
  OpenAIClient.Chat.ChatCompletionCreateParams,
108
149
  'messages'
109
150
  > & { reasoning_effort?: string; reasoning?: OpenRouterReasoning };
110
151
 
111
- const params = super.invocationParams(options, extra) as MutableParams;
152
+ const optionsWithDefaults = this._combineCallOptions(options);
153
+ const params = (
154
+ this._useResponsesApi(options)
155
+ ? this.responses.invocationParams(optionsWithDefaults)
156
+ : this.completions.invocationParams(optionsWithDefaults, extra)
157
+ ) as MutableParams;
112
158
 
113
159
  // Remove the OpenAI-native reasoning_effort that the parent sets;
114
160
  // OpenRouter uses a `reasoning` object instead
115
161
  delete params.reasoning_effort;
116
162
 
117
163
  // Build the OpenRouter reasoning config
118
- const reasoning = this.buildOpenRouterReasoning(options);
164
+ const reasoning = this.buildOpenRouterReasoning(optionsWithDefaults);
119
165
  if (reasoning != null) {
120
166
  params.reasoning = reasoning;
121
167
  } else {
@@ -158,245 +204,76 @@ export class ChatOpenRouter extends ChatOpenAI {
158
204
 
159
205
  return reasoning;
160
206
  }
161
- protected override _convertOpenAIDeltaToBaseMessageChunk(
162
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
163
- delta: Record<string, any>,
164
- rawResponse: OpenAIClient.ChatCompletionChunk,
165
- defaultRole?:
166
- | 'function'
167
- | 'user'
168
- | 'system'
169
- | 'developer'
170
- | 'assistant'
171
- | 'tool'
172
- ):
173
- | AIMessageChunk
174
- | HumanMessageChunk
175
- | SystemMessageChunk
176
- | FunctionMessageChunk
177
- | ToolMessageChunk
178
- | ChatMessageChunk {
179
- const messageChunk = super._convertOpenAIDeltaToBaseMessageChunk(
180
- delta,
181
- rawResponse,
182
- defaultRole
183
- );
184
- if (delta.reasoning != null) {
185
- messageChunk.additional_kwargs.reasoning = delta.reasoning;
186
- }
187
- if (delta.reasoning_details != null) {
188
- messageChunk.additional_kwargs.reasoning_details =
189
- delta.reasoning_details;
190
- }
191
- return messageChunk;
192
- }
193
207
 
194
- async *_streamResponseChunks2(
208
+ override async *_streamResponseChunks(
195
209
  messages: BaseMessage[],
196
210
  options: this['ParsedCallOptions'],
197
211
  runManager?: CallbackManagerForLLMRun
198
212
  ): AsyncGenerator<ChatGenerationChunk> {
199
- const messagesMapped: OpenAICompletionParam[] =
200
- _convertMessagesToOpenAIParams(messages, this.model, {
201
- includeReasoningDetails: true,
202
- convertReasoningDetailsToContent: true,
203
- });
204
-
205
- const params = {
206
- ...this.invocationParams(options, {
207
- streaming: true,
208
- }),
209
- messages: messagesMapped,
210
- stream: true as const,
211
- };
212
- let defaultRole: OpenAIRoleEnum | undefined;
213
-
214
- const streamIterable = await this.completionWithRetry(params, options);
215
- let usage: OpenAIClient.Completions.CompletionUsage | undefined;
216
-
217
- // Store reasoning_details keyed by unique identifier to prevent incorrect merging
218
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
219
- const reasoningTextByIndex: Map<number, Record<string, any>> = new Map();
220
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
221
- const reasoningEncryptedById: Map<string, Record<string, any>> = new Map();
222
-
223
- for await (const data of streamIterable) {
224
- const choice = data.choices[0] as
225
- | Partial<OpenAIClient.Chat.Completions.ChatCompletionChunk.Choice>
226
- | undefined;
227
- if (data.usage) {
228
- usage = data.usage;
229
- }
230
- if (!choice) {
231
- continue;
232
- }
233
-
234
- const { delta } = choice;
235
- if (!delta) {
236
- continue;
237
- }
213
+ const reasoningTextByIndex = new Map<
214
+ number,
215
+ OpenRouterReasoningTextDetail
216
+ >();
217
+ const reasoningEncryptedById = new Map<
218
+ string,
219
+ OpenRouterReasoningEncryptedDetail
220
+ >();
221
+
222
+ for await (const generationChunk of super._streamResponseChunks(
223
+ messages,
224
+ options,
225
+ runManager
226
+ )) {
227
+ let currentReasoningText = '';
228
+ const reasoningDetails = getReasoningDetails(
229
+ generationChunk.message.additional_kwargs.reasoning_details
230
+ );
238
231
 
239
- // Accumulate reasoning_details from each delta
240
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
241
- const deltaAny = delta as Record<string, any>;
242
- // Extract current chunk's reasoning text for streaming (before accumulation)
243
- let currentChunkReasoningText = '';
244
- if (
245
- deltaAny.reasoning_details != null &&
246
- Array.isArray(deltaAny.reasoning_details)
247
- ) {
248
- for (const detail of deltaAny.reasoning_details) {
249
- // For encrypted reasoning (thought signatures), store by ID - MUST be separate
250
- if (detail.type === 'reasoning.encrypted' && detail.id) {
251
- reasoningEncryptedById.set(detail.id, {
252
- type: detail.type,
253
- id: detail.id,
254
- data: detail.data,
255
- format: detail.format,
256
- index: detail.index,
257
- });
258
- } else if (detail.type === 'reasoning.text') {
259
- // Extract current chunk's text for streaming
260
- currentChunkReasoningText += detail.text || '';
261
- // For text reasoning, accumulate text by index for final message
262
- const idx = detail.index ?? 0;
263
- const existing = reasoningTextByIndex.get(idx);
264
- if (existing) {
265
- // Only append text, keep other fields from first entry
266
- existing.text = (existing.text || '') + (detail.text || '');
267
- } else {
268
- reasoningTextByIndex.set(idx, {
269
- type: detail.type,
270
- text: detail.text || '',
271
- format: detail.format,
272
- index: idx,
273
- });
274
- }
232
+ for (const detail of reasoningDetails) {
233
+ if (detail.type === 'reasoning.text') {
234
+ currentReasoningText += detail.text ?? '';
235
+ const index = detail.index ?? 0;
236
+ const existing = reasoningTextByIndex.get(index);
237
+ if (existing != null) {
238
+ existing.text = `${existing.text ?? ''}${detail.text ?? ''}`;
239
+ continue;
275
240
  }
241
+ reasoningTextByIndex.set(index, {
242
+ ...detail,
243
+ text: detail.text ?? '',
244
+ });
245
+ continue;
246
+ }
247
+ if (detail.id != null) {
248
+ reasoningEncryptedById.set(detail.id, { ...detail });
276
249
  }
277
250
  }
278
251
 
279
- const chunk = this._convertOpenAIDeltaToBaseMessageChunk(
280
- delta,
281
- data,
282
- defaultRole
283
- );
284
-
285
- // For models that send reasoning_details (Gemini style) instead of reasoning (DeepSeek style),
286
- // set the current chunk's reasoning text to additional_kwargs.reasoning for streaming
287
- if (currentChunkReasoningText && !chunk.additional_kwargs.reasoning) {
288
- chunk.additional_kwargs.reasoning = currentChunkReasoningText;
252
+ if (
253
+ currentReasoningText.length > 0 &&
254
+ generationChunk.message.additional_kwargs.reasoning == null
255
+ ) {
256
+ generationChunk.message.additional_kwargs.reasoning =
257
+ currentReasoningText;
289
258
  }
290
259
 
291
- // IMPORTANT: Only set reasoning_details on the FINAL chunk to prevent
292
- // LangChain's chunk concatenation from corrupting the array
293
- // Check if this is the final chunk (has finish_reason)
294
- if (choice.finish_reason != null) {
295
- // Build properly structured reasoning_details array
296
- // Text entries first (but we only need the encrypted ones for thought signatures)
297
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
298
- const finalReasoningDetails: Record<string, any>[] = [
260
+ if (generationChunk.generationInfo?.finish_reason != null) {
261
+ const finalReasoningDetails = [
299
262
  ...reasoningTextByIndex.values(),
300
263
  ...reasoningEncryptedById.values(),
301
264
  ];
302
-
303
265
  if (finalReasoningDetails.length > 0) {
304
- chunk.additional_kwargs.reasoning_details = finalReasoningDetails;
266
+ generationChunk.message.additional_kwargs.reasoning_details =
267
+ finalReasoningDetails;
268
+ } else {
269
+ delete generationChunk.message.additional_kwargs.reasoning_details;
305
270
  }
306
- } else {
307
- // Clear reasoning_details from intermediate chunks to prevent concatenation issues
308
- delete chunk.additional_kwargs.reasoning_details;
309
- }
310
-
311
- defaultRole = delta.role ?? defaultRole;
312
- const newTokenIndices = {
313
- prompt: options.promptIndex ?? 0,
314
- completion: choice.index ?? 0,
315
- };
316
- if (typeof chunk.content !== 'string') {
317
- // eslint-disable-next-line no-console
318
- console.log(
319
- '[WARNING]: Received non-string content from OpenAI. This is currently not supported.'
320
- );
271
+ yield generationChunk;
321
272
  continue;
322
273
  }
323
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
324
- const generationInfo: Record<string, any> = { ...newTokenIndices };
325
- if (choice.finish_reason != null) {
326
- generationInfo.finish_reason = choice.finish_reason;
327
- generationInfo.system_fingerprint = data.system_fingerprint;
328
- generationInfo.model_name = data.model;
329
- generationInfo.service_tier = data.service_tier;
330
- }
331
- if (this.logprobs == true) {
332
- generationInfo.logprobs = choice.logprobs;
333
- }
334
- const generationChunk = new ChatGenerationChunk({
335
- message: chunk,
336
- text: chunk.content,
337
- generationInfo,
338
- });
339
- yield generationChunk;
340
- if (this._lc_stream_delay != null) {
341
- await new Promise((resolve) =>
342
- setTimeout(resolve, this._lc_stream_delay)
343
- );
344
- }
345
- await runManager?.handleLLMNewToken(
346
- generationChunk.text || '',
347
- newTokenIndices,
348
- undefined,
349
- undefined,
350
- undefined,
351
- { chunk: generationChunk }
352
- );
353
- }
354
- if (usage) {
355
- const inputTokenDetails = {
356
- ...(usage.prompt_tokens_details?.audio_tokens != null && {
357
- audio: usage.prompt_tokens_details.audio_tokens,
358
- }),
359
- ...(usage.prompt_tokens_details?.cached_tokens != null && {
360
- cache_read: usage.prompt_tokens_details.cached_tokens,
361
- }),
362
- };
363
- const outputTokenDetails = {
364
- ...(usage.completion_tokens_details?.audio_tokens != null && {
365
- audio: usage.completion_tokens_details.audio_tokens,
366
- }),
367
- ...(usage.completion_tokens_details?.reasoning_tokens != null && {
368
- reasoning: usage.completion_tokens_details.reasoning_tokens,
369
- }),
370
- };
371
- const generationChunk = new ChatGenerationChunk({
372
- message: new AIMessageChunkClass({
373
- content: '',
374
- response_metadata: {
375
- usage: { ...usage },
376
- },
377
- usage_metadata: {
378
- input_tokens: usage.prompt_tokens,
379
- output_tokens: usage.completion_tokens,
380
- total_tokens: usage.total_tokens,
381
- ...(Object.keys(inputTokenDetails).length > 0 && {
382
- input_token_details: inputTokenDetails,
383
- }),
384
- ...(Object.keys(outputTokenDetails).length > 0 && {
385
- output_token_details: outputTokenDetails,
386
- }),
387
- },
388
- }),
389
- text: '',
390
- });
274
+
275
+ delete generationChunk.message.additional_kwargs.reasoning_details;
391
276
  yield generationChunk;
392
- if (this._lc_stream_delay != null) {
393
- await new Promise((resolve) =>
394
- setTimeout(resolve, this._lc_stream_delay)
395
- );
396
- }
397
- }
398
- if (options.signal?.aborted === true) {
399
- throw new Error('AbortError');
400
277
  }
401
278
  }
402
279
  }
@@ -3,7 +3,8 @@ import type { OpenRouterReasoning, ChatOpenRouterCallOptions } from './index';
3
3
  import type { OpenAIChatInput } from '@langchain/openai';
4
4
 
5
5
  type CreateRouterOptions = Partial<
6
- ChatOpenRouterCallOptions & Pick<OpenAIChatInput, 'model' | 'apiKey'>
6
+ ChatOpenRouterCallOptions &
7
+ Pick<OpenAIChatInput, 'model' | 'apiKey' | 'streamUsage'>
7
8
  >;
8
9
 
9
10
  function createRouter(overrides: CreateRouterOptions = {}): ChatOpenRouter {
@@ -96,6 +97,12 @@ describe('ChatOpenRouter reasoning handling', () => {
96
97
  expect(params.reasoning).toBeUndefined();
97
98
  expect(params.reasoning_effort).toBeUndefined();
98
99
  });
100
+
101
+ it('preserves streaming extras from parent invocation params', () => {
102
+ const router = createRouter({ streamUsage: true });
103
+ const params = router.invocationParams(undefined, { streaming: true });
104
+ expect(params.stream_options).toEqual({ include_usage: true });
105
+ });
99
106
  });
100
107
 
101
108
  // ---------------------------------------------------------------
@@ -97,10 +97,7 @@ class CustomChatConnection extends ChatConnection<VertexAIClientOptions> {
97
97
  }
98
98
  delete formattedData.generationConfig.thinkingConfig.thinkingBudget;
99
99
  }
100
- if (
101
- this.thinkingConfig?.thinkingLevel != null &&
102
- this.thinkingConfig.thinkingLevel !== ''
103
- ) {
100
+ if (this.thinkingConfig?.thinkingLevel != null) {
104
101
  formattedData.generationConfig ??= {};
105
102
  // thinkingLevel and thinkingBudget cannot coexist — the API rejects the request.
106
103
  // Remove thinkingBudget when thinkingLevel is set.
@@ -422,7 +419,16 @@ export class ChatVertexAI extends ChatGoogle {
422
419
  return 'LibreChatVertexAI';
423
420
  }
424
421
 
425
- constructor(fields?: VertexAIClientOptions) {
422
+ constructor(model: string, fields?: Omit<VertexAIClientOptions, 'model'>);
423
+ constructor(fields?: VertexAIClientOptions);
424
+ constructor(
425
+ modelOrFields?: string | VertexAIClientOptions,
426
+ params?: Omit<VertexAIClientOptions, 'model'>
427
+ ) {
428
+ const fields =
429
+ typeof modelOrFields === 'string'
430
+ ? { ...(params ?? {}), model: modelOrFields }
431
+ : modelOrFields;
426
432
  const dynamicThinkingBudget = fields?.thinkingBudget === -1;
427
433
  super({
428
434
  ...fields,
@@ -1,6 +1,6 @@
1
1
  import { config } from 'dotenv';
2
2
  config();
3
- import { test, describe, jest } from '@jest/globals';
3
+ import { expect, test, describe, jest } from '@jest/globals';
4
4
 
5
5
  jest.setTimeout(90000);
6
6
  import {
@@ -26,6 +26,33 @@ const weatherTool = tool(async () => 'The weather is 80 degrees and sunny', {
26
26
  }),
27
27
  });
28
28
 
29
+ describe('ChatVertexAI upstream compatibility', () => {
30
+ test('serialization uses the LibreChat constructor name on the Vertex namespace', () => {
31
+ const model = new ChatVertexAI();
32
+ expect(JSON.stringify(model)).toEqual(
33
+ '{"lc":1,"type":"constructor","id":["langchain","chat_models","vertexai","LibreChatVertexAI"],"kwargs":{"platform_type":"gcp"}}'
34
+ );
35
+ });
36
+
37
+ test('labels parameter support', () => {
38
+ expect(() => {
39
+ const model = new ChatVertexAI({
40
+ labels: {
41
+ team: 'test',
42
+ environment: 'development',
43
+ },
44
+ });
45
+ expect(model.platform).toEqual('gcp');
46
+ }).not.toThrow();
47
+ });
48
+
49
+ test('constructor overload supports model string', () => {
50
+ const model = new ChatVertexAI('gemini-1.5-pro');
51
+ expect(model.model).toEqual('gemini-1.5-pro');
52
+ expect(model.platform).toEqual('gcp');
53
+ });
54
+ });
55
+
29
56
  describe.each(gemini3Models)(
30
57
  'Vertex AI reasoning with thinkingLevel (%s)',
31
58
  (modelName) => {
@@ -4,8 +4,8 @@ import {
4
4
  HumanMessage,
5
5
  SystemMessage,
6
6
  ToolMessage,
7
- MessageContentComplex,
8
7
  } from '@langchain/core/messages';
8
+ import type { MessageContentComplex } from '@langchain/core/messages';
9
9
  import type Anthropic from '@anthropic-ai/sdk';
10
10
  import type { AnthropicMessages } from '@/types/messages';
11
11
  import {
@@ -14,6 +14,7 @@ import {
14
14
  addBedrockCacheControl,
15
15
  addCacheControl,
16
16
  } from './cache';
17
+ import { toLangChainContent } from './langchain';
17
18
  import { ContentTypes } from '@/common/enum';
18
19
 
19
20
  describe('addCacheControl', () => {
@@ -412,7 +413,7 @@ describe('addBedrockCacheControl (Bedrock cache checkpoints)', () => {
412
413
  { type: ContentTypes.TEXT, text: 'Dynamic system text' },
413
414
  ] as MessageContentComplex[];
414
415
  const messages: BaseMessage[] = [
415
- new SystemMessage({ content: systemContent }),
416
+ new SystemMessage({ content: toLangChainContent(systemContent) }),
416
417
  new HumanMessage('Hello'),
417
418
  new AIMessage('Hi'),
418
419
  ];
@@ -456,7 +457,7 @@ describe('addBedrockCacheControl (Bedrock cache checkpoints)', () => {
456
457
  } as MessageContentComplex,
457
458
  ] as MessageContentComplex[];
458
459
  const messages: BaseMessage[] = [
459
- new SystemMessage({ content: systemContent }),
460
+ new SystemMessage({ content: toLangChainContent(systemContent) }),
460
461
  new HumanMessage('Hello'),
461
462
  new AIMessage('Hi'),
462
463
  ];
@@ -9,6 +9,7 @@ import {
9
9
  import type { AnthropicMessage } from '@/types/messages';
10
10
  import type Anthropic from '@anthropic-ai/sdk';
11
11
  import { ContentTypes } from '@/common/enum';
12
+ import { toLangChainContent } from './langchain';
12
13
 
13
14
  type MessageWithContent = {
14
15
  content?: string | MessageContentComplex[];
@@ -45,7 +46,7 @@ function cloneMessage<T extends MessageWithContent>(
45
46
  ): T {
46
47
  if (message instanceof BaseMessage) {
47
48
  const baseParams = {
48
- content,
49
+ content: toLangChainContent(content),
49
50
  additional_kwargs: { ...message.additional_kwargs },
50
51
  response_metadata: { ...message.response_metadata },
51
52
  id: message.id,
@@ -337,7 +338,7 @@ export function stripBedrockCacheControl<T extends MessageWithContent>(
337
338
  * @returns - A new array of message objects with cache points added.
338
339
  */
339
340
  export function addBedrockCacheControl<
340
- T extends Partial<BaseMessage> & MessageWithContent,
341
+ T extends MessageWithContent & { getType?: () => string; role?: string },
341
342
  >(messages: T[]): T[] {
342
343
  if (!Array.isArray(messages) || messages.length < 2) {
343
344
  return messages;
@@ -9,6 +9,7 @@ import {
9
9
  import type { ToolCall } from '@langchain/core/messages/tool';
10
10
  import type * as t from '@/types';
11
11
  import { Providers } from '@/common';
12
+ import { toLangChainContent } from './langchain';
12
13
 
13
14
  export function getConverseOverrideMessage({
14
15
  userMessage,
@@ -153,14 +154,18 @@ export function modifyDeltaProperties(
153
154
  : '';
154
155
 
155
156
  if (provider === Providers.BEDROCK && Array.isArray(obj.content)) {
156
- obj.content = reduceBlocks(obj.content as ContentBlock[]);
157
+ obj.content = toLangChainContent(
158
+ reduceBlocks(obj.content as ContentBlock[])
159
+ );
157
160
  }
158
161
  if (Array.isArray(obj.content)) {
159
- obj.content = modifyContent({
160
- provider,
161
- messageType,
162
- content: obj.content,
163
- }) as t.MessageContentComplex[];
162
+ obj.content = toLangChainContent(
163
+ modifyContent({
164
+ provider,
165
+ messageType,
166
+ content: obj.content as t.ExtendedMessageContent[],
167
+ }) as t.MessageContentComplex[]
168
+ );
164
169
  }
165
170
  if (
166
171
  (obj as Partial<AIMessageChunk>).lc_kwargs &&
@@ -182,7 +187,7 @@ export function modifyDeltaProperties(
182
187
 
183
188
  export function formatAnthropicMessage(message: AIMessageChunk): AIMessage {
184
189
  if (!message.tool_calls || message.tool_calls.length === 0) {
185
- return new AIMessage({ content: message.content });
190
+ return new AIMessage({ content: toLangChainContent(message.content) });
186
191
  }
187
192
 
188
193
  const toolCallMap = new Map(message.tool_calls.map((tc) => [tc.id, tc]));
@@ -269,7 +274,7 @@ export function formatAnthropicMessage(message: AIMessageChunk): AIMessage {
269
274
  );
270
275
 
271
276
  return new AIMessage({
272
- content: formattedContent,
277
+ content: toLangChainContent(formattedContent),
273
278
  tool_calls: formattedToolCalls as ToolCall[],
274
279
  additional_kwargs: {
275
280
  ...message.additional_kwargs,
@@ -437,7 +442,9 @@ export function formatArtifactPayload(messages: BaseMessage[]): void {
437
442
  }
438
443
 
439
444
  if (aggregatedContent.length > 0) {
440
- messages.push(new HumanMessage({ content: aggregatedContent }));
445
+ messages.push(
446
+ new HumanMessage({ content: toLangChainContent(aggregatedContent) })
447
+ );
441
448
  }
442
449
  }
443
450