@librechat/agents 3.1.75 → 3.1.77-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (272) hide show
  1. package/dist/cjs/graphs/Graph.cjs +22 -3
  2. package/dist/cjs/graphs/Graph.cjs.map +1 -1
  3. package/dist/cjs/hitl/askUserQuestion.cjs +67 -0
  4. package/dist/cjs/hitl/askUserQuestion.cjs.map +1 -0
  5. package/dist/cjs/hooks/HookRegistry.cjs +54 -0
  6. package/dist/cjs/hooks/HookRegistry.cjs.map +1 -1
  7. package/dist/cjs/hooks/createToolPolicyHook.cjs +115 -0
  8. package/dist/cjs/hooks/createToolPolicyHook.cjs.map +1 -0
  9. package/dist/cjs/hooks/executeHooks.cjs +40 -1
  10. package/dist/cjs/hooks/executeHooks.cjs.map +1 -1
  11. package/dist/cjs/hooks/types.cjs +1 -0
  12. package/dist/cjs/hooks/types.cjs.map +1 -1
  13. package/dist/cjs/langchain/google-common.cjs +3 -0
  14. package/dist/cjs/langchain/google-common.cjs.map +1 -0
  15. package/dist/cjs/langchain/index.cjs +86 -0
  16. package/dist/cjs/langchain/index.cjs.map +1 -0
  17. package/dist/cjs/langchain/language_models/chat_models.cjs +3 -0
  18. package/dist/cjs/langchain/language_models/chat_models.cjs.map +1 -0
  19. package/dist/cjs/langchain/messages/tool.cjs +3 -0
  20. package/dist/cjs/langchain/messages/tool.cjs.map +1 -0
  21. package/dist/cjs/langchain/messages.cjs +51 -0
  22. package/dist/cjs/langchain/messages.cjs.map +1 -0
  23. package/dist/cjs/langchain/openai.cjs +3 -0
  24. package/dist/cjs/langchain/openai.cjs.map +1 -0
  25. package/dist/cjs/langchain/prompts.cjs +11 -0
  26. package/dist/cjs/langchain/prompts.cjs.map +1 -0
  27. package/dist/cjs/langchain/runnables.cjs +19 -0
  28. package/dist/cjs/langchain/runnables.cjs.map +1 -0
  29. package/dist/cjs/langchain/tools.cjs +23 -0
  30. package/dist/cjs/langchain/tools.cjs.map +1 -0
  31. package/dist/cjs/langchain/utils/env.cjs +11 -0
  32. package/dist/cjs/langchain/utils/env.cjs.map +1 -0
  33. package/dist/cjs/llm/anthropic/index.cjs +145 -52
  34. package/dist/cjs/llm/anthropic/index.cjs.map +1 -1
  35. package/dist/cjs/llm/anthropic/types.cjs.map +1 -1
  36. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs +21 -14
  37. package/dist/cjs/llm/anthropic/utils/message_inputs.cjs.map +1 -1
  38. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs +84 -70
  39. package/dist/cjs/llm/anthropic/utils/message_outputs.cjs.map +1 -1
  40. package/dist/cjs/llm/bedrock/index.cjs +1 -1
  41. package/dist/cjs/llm/bedrock/index.cjs.map +1 -1
  42. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs +213 -3
  43. package/dist/cjs/llm/bedrock/utils/message_inputs.cjs.map +1 -1
  44. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs +2 -1
  45. package/dist/cjs/llm/bedrock/utils/message_outputs.cjs.map +1 -1
  46. package/dist/cjs/llm/google/utils/common.cjs +5 -4
  47. package/dist/cjs/llm/google/utils/common.cjs.map +1 -1
  48. package/dist/cjs/llm/openai/index.cjs +519 -655
  49. package/dist/cjs/llm/openai/index.cjs.map +1 -1
  50. package/dist/cjs/llm/openai/utils/index.cjs +20 -458
  51. package/dist/cjs/llm/openai/utils/index.cjs.map +1 -1
  52. package/dist/cjs/llm/openrouter/index.cjs +57 -175
  53. package/dist/cjs/llm/openrouter/index.cjs.map +1 -1
  54. package/dist/cjs/llm/vertexai/index.cjs +5 -3
  55. package/dist/cjs/llm/vertexai/index.cjs.map +1 -1
  56. package/dist/cjs/main.cjs +112 -3
  57. package/dist/cjs/main.cjs.map +1 -1
  58. package/dist/cjs/messages/cache.cjs +2 -1
  59. package/dist/cjs/messages/cache.cjs.map +1 -1
  60. package/dist/cjs/messages/core.cjs +7 -6
  61. package/dist/cjs/messages/core.cjs.map +1 -1
  62. package/dist/cjs/messages/format.cjs +73 -15
  63. package/dist/cjs/messages/format.cjs.map +1 -1
  64. package/dist/cjs/messages/langchain.cjs +26 -0
  65. package/dist/cjs/messages/langchain.cjs.map +1 -0
  66. package/dist/cjs/messages/prune.cjs +7 -6
  67. package/dist/cjs/messages/prune.cjs.map +1 -1
  68. package/dist/cjs/run.cjs +400 -42
  69. package/dist/cjs/run.cjs.map +1 -1
  70. package/dist/cjs/tools/ToolNode.cjs +556 -56
  71. package/dist/cjs/tools/ToolNode.cjs.map +1 -1
  72. package/dist/cjs/tools/search/search.cjs +55 -66
  73. package/dist/cjs/tools/search/search.cjs.map +1 -1
  74. package/dist/cjs/tools/search/tavily-scraper.cjs +189 -0
  75. package/dist/cjs/tools/search/tavily-scraper.cjs.map +1 -0
  76. package/dist/cjs/tools/search/tavily-search.cjs +372 -0
  77. package/dist/cjs/tools/search/tavily-search.cjs.map +1 -0
  78. package/dist/cjs/tools/search/tool.cjs +26 -4
  79. package/dist/cjs/tools/search/tool.cjs.map +1 -1
  80. package/dist/cjs/tools/search/utils.cjs +10 -3
  81. package/dist/cjs/tools/search/utils.cjs.map +1 -1
  82. package/dist/esm/graphs/Graph.mjs +22 -3
  83. package/dist/esm/graphs/Graph.mjs.map +1 -1
  84. package/dist/esm/hitl/askUserQuestion.mjs +65 -0
  85. package/dist/esm/hitl/askUserQuestion.mjs.map +1 -0
  86. package/dist/esm/hooks/HookRegistry.mjs +54 -0
  87. package/dist/esm/hooks/HookRegistry.mjs.map +1 -1
  88. package/dist/esm/hooks/createToolPolicyHook.mjs +113 -0
  89. package/dist/esm/hooks/createToolPolicyHook.mjs.map +1 -0
  90. package/dist/esm/hooks/executeHooks.mjs +40 -1
  91. package/dist/esm/hooks/executeHooks.mjs.map +1 -1
  92. package/dist/esm/hooks/types.mjs +1 -0
  93. package/dist/esm/hooks/types.mjs.map +1 -1
  94. package/dist/esm/langchain/google-common.mjs +2 -0
  95. package/dist/esm/langchain/google-common.mjs.map +1 -0
  96. package/dist/esm/langchain/index.mjs +5 -0
  97. package/dist/esm/langchain/index.mjs.map +1 -0
  98. package/dist/esm/langchain/language_models/chat_models.mjs +2 -0
  99. package/dist/esm/langchain/language_models/chat_models.mjs.map +1 -0
  100. package/dist/esm/langchain/messages/tool.mjs +2 -0
  101. package/dist/esm/langchain/messages/tool.mjs.map +1 -0
  102. package/dist/esm/langchain/messages.mjs +2 -0
  103. package/dist/esm/langchain/messages.mjs.map +1 -0
  104. package/dist/esm/langchain/openai.mjs +2 -0
  105. package/dist/esm/langchain/openai.mjs.map +1 -0
  106. package/dist/esm/langchain/prompts.mjs +2 -0
  107. package/dist/esm/langchain/prompts.mjs.map +1 -0
  108. package/dist/esm/langchain/runnables.mjs +2 -0
  109. package/dist/esm/langchain/runnables.mjs.map +1 -0
  110. package/dist/esm/langchain/tools.mjs +2 -0
  111. package/dist/esm/langchain/tools.mjs.map +1 -0
  112. package/dist/esm/langchain/utils/env.mjs +2 -0
  113. package/dist/esm/langchain/utils/env.mjs.map +1 -0
  114. package/dist/esm/llm/anthropic/index.mjs +146 -54
  115. package/dist/esm/llm/anthropic/index.mjs.map +1 -1
  116. package/dist/esm/llm/anthropic/types.mjs.map +1 -1
  117. package/dist/esm/llm/anthropic/utils/message_inputs.mjs +21 -14
  118. package/dist/esm/llm/anthropic/utils/message_inputs.mjs.map +1 -1
  119. package/dist/esm/llm/anthropic/utils/message_outputs.mjs +84 -71
  120. package/dist/esm/llm/anthropic/utils/message_outputs.mjs.map +1 -1
  121. package/dist/esm/llm/bedrock/index.mjs +1 -1
  122. package/dist/esm/llm/bedrock/index.mjs.map +1 -1
  123. package/dist/esm/llm/bedrock/utils/message_inputs.mjs +214 -4
  124. package/dist/esm/llm/bedrock/utils/message_inputs.mjs.map +1 -1
  125. package/dist/esm/llm/bedrock/utils/message_outputs.mjs +2 -1
  126. package/dist/esm/llm/bedrock/utils/message_outputs.mjs.map +1 -1
  127. package/dist/esm/llm/google/utils/common.mjs +5 -4
  128. package/dist/esm/llm/google/utils/common.mjs.map +1 -1
  129. package/dist/esm/llm/openai/index.mjs +520 -656
  130. package/dist/esm/llm/openai/index.mjs.map +1 -1
  131. package/dist/esm/llm/openai/utils/index.mjs +23 -459
  132. package/dist/esm/llm/openai/utils/index.mjs.map +1 -1
  133. package/dist/esm/llm/openrouter/index.mjs +57 -175
  134. package/dist/esm/llm/openrouter/index.mjs.map +1 -1
  135. package/dist/esm/llm/vertexai/index.mjs +5 -3
  136. package/dist/esm/llm/vertexai/index.mjs.map +1 -1
  137. package/dist/esm/main.mjs +7 -0
  138. package/dist/esm/main.mjs.map +1 -1
  139. package/dist/esm/messages/cache.mjs +2 -1
  140. package/dist/esm/messages/cache.mjs.map +1 -1
  141. package/dist/esm/messages/core.mjs +7 -6
  142. package/dist/esm/messages/core.mjs.map +1 -1
  143. package/dist/esm/messages/format.mjs +73 -15
  144. package/dist/esm/messages/format.mjs.map +1 -1
  145. package/dist/esm/messages/langchain.mjs +23 -0
  146. package/dist/esm/messages/langchain.mjs.map +1 -0
  147. package/dist/esm/messages/prune.mjs +7 -6
  148. package/dist/esm/messages/prune.mjs.map +1 -1
  149. package/dist/esm/run.mjs +400 -42
  150. package/dist/esm/run.mjs.map +1 -1
  151. package/dist/esm/tools/ToolNode.mjs +557 -57
  152. package/dist/esm/tools/ToolNode.mjs.map +1 -1
  153. package/dist/esm/tools/search/search.mjs +55 -66
  154. package/dist/esm/tools/search/search.mjs.map +1 -1
  155. package/dist/esm/tools/search/tavily-scraper.mjs +186 -0
  156. package/dist/esm/tools/search/tavily-scraper.mjs.map +1 -0
  157. package/dist/esm/tools/search/tavily-search.mjs +370 -0
  158. package/dist/esm/tools/search/tavily-search.mjs.map +1 -0
  159. package/dist/esm/tools/search/tool.mjs +26 -4
  160. package/dist/esm/tools/search/tool.mjs.map +1 -1
  161. package/dist/esm/tools/search/utils.mjs +10 -3
  162. package/dist/esm/tools/search/utils.mjs.map +1 -1
  163. package/dist/types/graphs/Graph.d.ts +7 -0
  164. package/dist/types/hitl/askUserQuestion.d.ts +55 -0
  165. package/dist/types/hitl/index.d.ts +6 -0
  166. package/dist/types/hooks/HookRegistry.d.ts +58 -0
  167. package/dist/types/hooks/createToolPolicyHook.d.ts +87 -0
  168. package/dist/types/hooks/index.d.ts +4 -1
  169. package/dist/types/hooks/types.d.ts +109 -3
  170. package/dist/types/index.d.ts +10 -0
  171. package/dist/types/langchain/google-common.d.ts +1 -0
  172. package/dist/types/langchain/index.d.ts +8 -0
  173. package/dist/types/langchain/language_models/chat_models.d.ts +1 -0
  174. package/dist/types/langchain/messages/tool.d.ts +1 -0
  175. package/dist/types/langchain/messages.d.ts +2 -0
  176. package/dist/types/langchain/openai.d.ts +1 -0
  177. package/dist/types/langchain/prompts.d.ts +1 -0
  178. package/dist/types/langchain/runnables.d.ts +2 -0
  179. package/dist/types/langchain/tools.d.ts +2 -0
  180. package/dist/types/langchain/utils/env.d.ts +1 -0
  181. package/dist/types/llm/anthropic/index.d.ts +22 -9
  182. package/dist/types/llm/anthropic/types.d.ts +5 -1
  183. package/dist/types/llm/anthropic/utils/message_outputs.d.ts +13 -6
  184. package/dist/types/llm/anthropic/utils/output_parsers.d.ts +1 -1
  185. package/dist/types/llm/openai/index.d.ts +21 -24
  186. package/dist/types/llm/openrouter/index.d.ts +11 -9
  187. package/dist/types/llm/vertexai/index.d.ts +1 -0
  188. package/dist/types/messages/cache.d.ts +4 -1
  189. package/dist/types/messages/format.d.ts +4 -1
  190. package/dist/types/messages/langchain.d.ts +27 -0
  191. package/dist/types/run.d.ts +117 -1
  192. package/dist/types/tools/ToolNode.d.ts +26 -1
  193. package/dist/types/tools/search/tavily-scraper.d.ts +19 -0
  194. package/dist/types/tools/search/tavily-search.d.ts +4 -0
  195. package/dist/types/tools/search/types.d.ts +99 -5
  196. package/dist/types/tools/search/utils.d.ts +2 -2
  197. package/dist/types/types/graph.d.ts +23 -37
  198. package/dist/types/types/hitl.d.ts +272 -0
  199. package/dist/types/types/index.d.ts +1 -0
  200. package/dist/types/types/llm.d.ts +3 -3
  201. package/dist/types/types/run.d.ts +33 -0
  202. package/dist/types/types/stream.d.ts +1 -1
  203. package/dist/types/types/tools.d.ts +19 -0
  204. package/package.json +80 -17
  205. package/src/graphs/Graph.ts +33 -4
  206. package/src/graphs/__tests__/composition.smoke.test.ts +188 -0
  207. package/src/hitl/askUserQuestion.ts +72 -0
  208. package/src/hitl/index.ts +7 -0
  209. package/src/hooks/HookRegistry.ts +71 -0
  210. package/src/hooks/__tests__/createToolPolicyHook.test.ts +259 -0
  211. package/src/hooks/createToolPolicyHook.ts +184 -0
  212. package/src/hooks/executeHooks.ts +50 -1
  213. package/src/hooks/index.ts +6 -0
  214. package/src/hooks/types.ts +112 -0
  215. package/src/index.ts +22 -0
  216. package/src/langchain/google-common.ts +1 -0
  217. package/src/langchain/index.ts +8 -0
  218. package/src/langchain/language_models/chat_models.ts +1 -0
  219. package/src/langchain/messages/tool.ts +5 -0
  220. package/src/langchain/messages.ts +21 -0
  221. package/src/langchain/openai.ts +1 -0
  222. package/src/langchain/prompts.ts +1 -0
  223. package/src/langchain/runnables.ts +7 -0
  224. package/src/langchain/tools.ts +8 -0
  225. package/src/langchain/utils/env.ts +1 -0
  226. package/src/llm/anthropic/index.ts +252 -84
  227. package/src/llm/anthropic/llm.spec.ts +751 -102
  228. package/src/llm/anthropic/types.ts +9 -1
  229. package/src/llm/anthropic/utils/message_inputs.ts +37 -19
  230. package/src/llm/anthropic/utils/message_outputs.ts +119 -101
  231. package/src/llm/bedrock/index.ts +2 -2
  232. package/src/llm/bedrock/llm.spec.ts +341 -0
  233. package/src/llm/bedrock/utils/message_inputs.ts +303 -4
  234. package/src/llm/bedrock/utils/message_outputs.ts +2 -1
  235. package/src/llm/custom-chat-models.smoke.test.ts +836 -0
  236. package/src/llm/google/llm.spec.ts +339 -57
  237. package/src/llm/google/utils/common.ts +53 -48
  238. package/src/llm/openai/contentBlocks.test.ts +346 -0
  239. package/src/llm/openai/index.ts +856 -833
  240. package/src/llm/openai/utils/index.ts +107 -78
  241. package/src/llm/openai/utils/messages.test.ts +159 -0
  242. package/src/llm/openrouter/index.ts +124 -247
  243. package/src/llm/openrouter/reasoning.test.ts +8 -1
  244. package/src/llm/vertexai/index.ts +11 -5
  245. package/src/llm/vertexai/llm.spec.ts +28 -1
  246. package/src/messages/cache.test.ts +4 -3
  247. package/src/messages/cache.ts +3 -2
  248. package/src/messages/core.ts +16 -9
  249. package/src/messages/format.ts +96 -16
  250. package/src/messages/formatAgentMessages.test.ts +166 -1
  251. package/src/messages/langchain.ts +39 -0
  252. package/src/messages/prune.ts +12 -8
  253. package/src/run.ts +456 -47
  254. package/src/scripts/caching.ts +2 -3
  255. package/src/specs/summarization.test.ts +51 -58
  256. package/src/tools/ToolNode.ts +706 -63
  257. package/src/tools/__tests__/hitl.test.ts +3593 -0
  258. package/src/tools/search/search.ts +83 -73
  259. package/src/tools/search/tavily-scraper.ts +235 -0
  260. package/src/tools/search/tavily-search.ts +424 -0
  261. package/src/tools/search/tavily.test.ts +965 -0
  262. package/src/tools/search/tool.ts +36 -26
  263. package/src/tools/search/types.ts +133 -8
  264. package/src/tools/search/utils.ts +13 -5
  265. package/src/types/graph.ts +32 -87
  266. package/src/types/hitl.ts +303 -0
  267. package/src/types/index.ts +1 -0
  268. package/src/types/llm.ts +3 -3
  269. package/src/types/run.ts +33 -0
  270. package/src/types/stream.ts +1 -1
  271. package/src/types/tools.ts +19 -0
  272. package/src/utils/llmConfig.ts +1 -6
@@ -1,8 +1,8 @@
1
1
  import { config } from 'dotenv';
2
2
  config();
3
- import { test, jest } from '@jest/globals';
3
+ import { beforeEach, test, jest } from '@jest/globals';
4
4
 
5
- jest.setTimeout(90000);
5
+ jest.setTimeout(Number(process.env.GOOGLE_TEST_TIMEOUT_MS ?? 120000));
6
6
  import * as fs from 'node:fs/promises';
7
7
  import * as path from 'node:path';
8
8
  import {
@@ -19,29 +19,177 @@ import {
19
19
  import { StructuredTool, tool } from '@langchain/core/tools';
20
20
  import { z } from 'zod/v3';
21
21
  import {
22
- CodeExecutionTool,
23
22
  DynamicRetrievalMode,
24
23
  SchemaType as FunctionDeclarationSchemaType,
25
- GoogleSearchRetrievalTool,
26
24
  } from '@google/generative-ai';
27
25
  import { concat } from '@langchain/core/utils/stream';
26
+ import type {
27
+ CodeExecutionTool,
28
+ GoogleSearchRetrievalTool,
29
+ } from '@google/generative-ai';
30
+ import type { ContentBlock } from '@langchain/core/messages';
28
31
  import { CustomChatGoogleGenerativeAI as ChatGoogleGenerativeAI } from './index';
29
32
  import { _FUNCTION_CALL_THOUGHT_SIGNATURES_MAP_KEY } from './utils/common';
30
33
 
31
34
  // Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
32
35
  const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;
33
36
 
37
+ const GOOGLE_TEST_MAX_RETRIES = Number(
38
+ process.env.GOOGLE_TEST_MAX_RETRIES ?? 2
39
+ );
40
+ const GOOGLE_TEST_CALL_GAP_MS = Number(
41
+ process.env.GOOGLE_TEST_CALL_GAP_MS ?? 1000
42
+ );
43
+ const GOOGLE_TEST_MAX_RETRY_DELAY_MS = Number(
44
+ process.env.GOOGLE_TEST_MAX_RETRY_DELAY_MS ?? 50000
45
+ );
46
+ let lastGoogleTestCallAt = 0;
47
+
48
+ const wait = async (ms: number): Promise<void> =>
49
+ new Promise((resolve) => {
50
+ setTimeout(resolve, ms);
51
+ });
52
+
53
+ async function waitForGoogleTestSlot(): Promise<void> {
54
+ const waitMs = Math.max(
55
+ 0,
56
+ lastGoogleTestCallAt + GOOGLE_TEST_CALL_GAP_MS - Date.now()
57
+ );
58
+ if (waitMs > 0) {
59
+ await wait(waitMs);
60
+ }
61
+ lastGoogleTestCallAt = Date.now();
62
+ }
63
+
64
+ const GOOGLE_NO_RETRY_STATUSES = new Set([
65
+ 400, 401, 402, 403, 404, 405, 406, 407, 409,
66
+ ]);
67
+
68
+ interface GoogleRetryInfoDetail {
69
+ '@type'?: string;
70
+ retryDelay?: string;
71
+ }
72
+
73
+ interface GoogleFetchErrorLike extends Error {
74
+ status?: number;
75
+ errorDetails?: GoogleRetryInfoDetail[];
76
+ }
77
+
78
+ interface ErrorWithCode extends Error {
79
+ code?: string;
80
+ }
81
+
82
+ function throwIfNonRetryableError(error: unknown): void {
83
+ if (!(error instanceof Error)) {
84
+ return;
85
+ }
86
+ if (
87
+ error.message.startsWith('Cancel') ||
88
+ error.message.startsWith('AbortError') ||
89
+ error.name === 'AbortError'
90
+ ) {
91
+ throw error;
92
+ }
93
+ const errorCode = (error as Partial<ErrorWithCode>).code;
94
+ if (errorCode === 'ECONNABORTED') {
95
+ throw error;
96
+ }
97
+ }
98
+
99
+ function getGoogleFetchError(error: unknown): GoogleFetchErrorLike | undefined {
100
+ if (!(error instanceof Error)) {
101
+ return undefined;
102
+ }
103
+ const possibleError = error as Partial<GoogleFetchErrorLike>;
104
+ if (
105
+ typeof possibleError.status !== 'number' &&
106
+ !Array.isArray(possibleError.errorDetails)
107
+ ) {
108
+ return undefined;
109
+ }
110
+ return possibleError as GoogleFetchErrorLike;
111
+ }
112
+
113
+ function parseGoogleRetryDelayMs(retryDelay: string): number | undefined {
114
+ const retryDelaySeconds = /^(\d+(?:\.\d+)?)s$/.exec(retryDelay)?.[1];
115
+ if (retryDelaySeconds === undefined) {
116
+ return undefined;
117
+ }
118
+ return Math.ceil(Number(retryDelaySeconds) * 1000);
119
+ }
120
+
121
+ function getGoogleRetryDelayMs(
122
+ error: GoogleFetchErrorLike
123
+ ): number | undefined {
124
+ const retryInfoDelay = error.errorDetails?.find(
125
+ (detail) => detail['@type'] === 'type.googleapis.com/google.rpc.RetryInfo'
126
+ )?.retryDelay;
127
+ if (retryInfoDelay !== undefined) {
128
+ return parseGoogleRetryDelayMs(retryInfoDelay);
129
+ }
130
+ const messageDelay = /Please retry in (\d+(?:\.\d+)?)s/.exec(
131
+ error.message
132
+ )?.[1];
133
+ if (messageDelay === undefined) {
134
+ return undefined;
135
+ }
136
+ return Math.ceil(Number(messageDelay) * 1000);
137
+ }
138
+
139
+ async function handleGoogleFailedAttempt(error: unknown): Promise<void> {
140
+ throwIfNonRetryableError(error);
141
+ const googleError = getGoogleFetchError(error);
142
+ if (googleError === undefined) {
143
+ return;
144
+ }
145
+ if (
146
+ googleError.status !== undefined &&
147
+ GOOGLE_NO_RETRY_STATUSES.has(googleError.status)
148
+ ) {
149
+ throw googleError;
150
+ }
151
+ if (googleError.status !== 429) {
152
+ return;
153
+ }
154
+ if (/\blimit:\s*0\b/.test(googleError.message)) {
155
+ throw googleError;
156
+ }
157
+ const retryDelayMs = getGoogleRetryDelayMs(googleError);
158
+ if (retryDelayMs === undefined) {
159
+ return;
160
+ }
161
+ await wait(Math.min(retryDelayMs, GOOGLE_TEST_MAX_RETRY_DELAY_MS));
162
+ }
163
+
164
+ function createGoogleModel(
165
+ fields: ConstructorParameters<typeof ChatGoogleGenerativeAI>[0]
166
+ ): ChatGoogleGenerativeAI {
167
+ return new ChatGoogleGenerativeAI({
168
+ ...fields,
169
+ maxRetries: GOOGLE_TEST_MAX_RETRIES,
170
+ onFailedAttempt: handleGoogleFailedAttempt,
171
+ });
172
+ }
173
+
174
+ beforeEach(async () => {
175
+ await waitForGoogleTestSlot();
176
+ });
177
+
34
178
  const dummyToolResponse =
35
179
  "[{\"title\":\"Weather in New York City\",\"url\":\"https://www.weatherapi.com/\",\"content\":\"{'location': {'name': 'New York', 'region': 'New York', 'country': 'United States of America', 'lat': 40.71, 'lon': -74.01, 'tz_id': 'America/New_York', 'localtime_epoch': 1718659486, 'localtime': '2024-06-17 17:24'}, 'current': {'last_updated_epoch': 1718658900, 'last_updated': '2024-06-17 17:15', 'temp_c': 27.8, 'temp_f': 82.0, 'is_day': 1, 'condition': {'text': 'Partly cloudy', 'icon': '//cdn.weatherapi.com/weather/64x64/day/116.png', 'code': 1003}, 'wind_mph': 2.2, 'wind_kph': 3.6, 'wind_degree': 159, 'wind_dir': 'SSE', 'pressure_mb': 1021.0, 'pressure_in': 30.15, 'precip_mm': 0.0, 'precip_in': 0.0, 'humidity': 58, 'cloud': 25, 'feelslike_c': 29.0, 'feelslike_f': 84.2, 'windchill_c': 26.9, 'windchill_f': 80.5, 'heatindex_c': 27.9, 'heatindex_f': 82.2, 'dewpoint_c': 17.1, 'dewpoint_f': 62.8, 'vis_km': 16.0, 'vis_miles': 9.0, 'uv': 7.0, 'gust_mph': 18.3, 'gust_kph': 29.4}}\",\"score\":0.98192,\"raw_content\":null},{\"title\":\"New York, NY Monthly Weather | AccuWeather\",\"url\":\"https://www.accuweather.com/en/us/new-york/10021/june-weather/349727\",\"content\":\"Get the monthly weather forecast for New York, NY, including daily high/low, historical averages, to help you plan ahead.\",\"score\":0.97504,\"raw_content\":null}]";
36
180
 
37
181
  test('Test Google AI', async () => {
38
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
182
+ const model = createGoogleModel({
183
+ model: 'gemini-2.0-flash',
184
+ });
39
185
  const res = await model.invoke('what is 1 + 1?');
40
186
  expect(res).toBeTruthy();
41
187
  });
42
188
 
43
189
  test('Test Google AI generation', async () => {
44
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
190
+ const model = createGoogleModel({
191
+ model: 'gemini-2.0-flash',
192
+ });
45
193
  const res = await model.generate([
46
194
  [['human', 'Translate "I love programming" into Korean.']],
47
195
  ]);
@@ -49,7 +197,7 @@ test('Test Google AI generation', async () => {
49
197
  });
50
198
 
51
199
  test('Test Google AI generation with a stop sequence', async () => {
52
- const model = new ChatGoogleGenerativeAI({
200
+ const model = createGoogleModel({
53
201
  model: 'gemini-2.0-flash',
54
202
  stopSequences: ['two', '2'],
55
203
  });
@@ -63,7 +211,9 @@ test('Test Google AI generation with a stop sequence', async () => {
63
211
  });
64
212
 
65
213
  test('Test Google AI generation with a system message', async () => {
66
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
214
+ const model = createGoogleModel({
215
+ model: 'gemini-2.0-flash',
216
+ });
67
217
  const res = await model.generate([
68
218
  [
69
219
  ['system', 'You are an amazing translator.'],
@@ -77,7 +227,7 @@ test('Test Google AI multimodal generation', async () => {
77
227
  const imageData = (
78
228
  await fs.readFile(path.join(__dirname, '/data/hotdog.jpg'))
79
229
  ).toString('base64');
80
- const model = new ChatGoogleGenerativeAI({
230
+ const model = createGoogleModel({
81
231
  model: 'gemini-2.0-flash',
82
232
  });
83
233
  const res = await model.invoke([
@@ -104,19 +254,19 @@ test('Test Google AI handleLLMNewToken callback', async () => {
104
254
  process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
105
255
 
106
256
  try {
107
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
257
+ const model = createGoogleModel({
258
+ model: 'gemini-2.0-flash',
259
+ });
108
260
  let tokens = '';
109
- const res = await model.call(
110
- [new HumanMessage('what is 1 + 1?')],
111
- undefined,
112
- [
261
+ const res = await model.invoke([new HumanMessage('what is 1 + 1?')], {
262
+ callbacks: [
113
263
  {
114
264
  handleLLMNewToken(token: string): void {
115
265
  tokens += token;
116
266
  },
117
267
  },
118
- ]
119
- );
268
+ ],
269
+ });
120
270
  const responseContent = typeof res.content === 'string' ? res.content : '';
121
271
  expect(tokens).toBe(responseContent);
122
272
  } finally {
@@ -132,7 +282,9 @@ test('Test Google AI handleLLMNewToken callback with streaming', async () => {
132
282
  process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
133
283
 
134
284
  try {
135
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
285
+ const model = createGoogleModel({
286
+ model: 'gemini-2.0-flash',
287
+ });
136
288
  let tokens = '';
137
289
  const res = await model.stream([new HumanMessage('what is 1 + 1?')], {
138
290
  callbacks: [
@@ -161,7 +313,7 @@ test('Test Google AI in streaming mode', async () => {
161
313
  process.env.LANGCHAIN_CALLBACKS_BACKGROUND = 'false';
162
314
 
163
315
  try {
164
- const model = new ChatGoogleGenerativeAI({
316
+ const model = createGoogleModel({
165
317
  model: 'gemini-2.0-flash',
166
318
  streaming: true,
167
319
  });
@@ -198,10 +350,9 @@ test('Gemini can understand audio', async () => {
198
350
  const audioPath = path.join(__dirname, 'data/gettysburg10.wav');
199
351
  const audioMimeType = 'audio/wav';
200
352
 
201
- const model = new ChatGoogleGenerativeAI({
353
+ const model = createGoogleModel({
202
354
  model: 'gemini-2.0-flash',
203
355
  temperature: 0,
204
- maxRetries: 0,
205
356
  });
206
357
 
207
358
  const audioBase64 = await fileToBase64(audioPath);
@@ -272,7 +423,9 @@ const prompt = new HumanMessage(
272
423
  );
273
424
 
274
425
  test('ChatGoogleGenerativeAI can bind and invoke langchain tools', async () => {
275
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
426
+ const model = createGoogleModel({
427
+ model: 'gemini-2.5-flash',
428
+ });
276
429
 
277
430
  const modelWithTools = model.bindTools([new FakeBrowserTool()]);
278
431
  const res = await modelWithTools.invoke([prompt]);
@@ -287,7 +440,7 @@ test('ChatGoogleGenerativeAI can bind and invoke langchain tools', async () => {
287
440
  });
288
441
 
289
442
  test('ChatGoogleGenerativeAI can bind and stream langchain tools', async () => {
290
- const model = new ChatGoogleGenerativeAI({
443
+ const model = createGoogleModel({
291
444
  model: 'gemini-2.5-flash',
292
445
  });
293
446
 
@@ -315,9 +468,8 @@ test('ChatGoogleGenerativeAI can bind and stream langchain tools', async () => {
315
468
  });
316
469
 
317
470
  test('ChatGoogleGenerativeAI can handle streaming tool messages.', async () => {
318
- const model = new ChatGoogleGenerativeAI({
471
+ const model = createGoogleModel({
319
472
  model: 'gemini-2.5-flash',
320
- maxRetries: 1,
321
473
  });
322
474
 
323
475
  const browserTool = new FakeBrowserTool();
@@ -359,9 +511,8 @@ test('ChatGoogleGenerativeAI can handle streaming tool messages.', async () => {
359
511
  });
360
512
 
361
513
  test('ChatGoogleGenerativeAI can handle invoking tool messages.', async () => {
362
- const model = new ChatGoogleGenerativeAI({
514
+ const model = createGoogleModel({
363
515
  model: 'gemini-2.5-flash',
364
- maxRetries: 1,
365
516
  });
366
517
 
367
518
  const browserTool = new FakeBrowserTool();
@@ -393,7 +544,9 @@ test('ChatGoogleGenerativeAI can handle invoking tool messages.', async () => {
393
544
  });
394
545
 
395
546
  test('ChatGoogleGenerativeAI can bind and invoke genai tools', async () => {
396
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
547
+ const model = createGoogleModel({
548
+ model: 'gemini-2.5-flash',
549
+ });
397
550
 
398
551
  const modelWithTools = model.bindTools([googleGenAITool]);
399
552
  const res = await modelWithTools.invoke([prompt]);
@@ -408,7 +561,9 @@ test('ChatGoogleGenerativeAI can bind and invoke genai tools', async () => {
408
561
  });
409
562
 
410
563
  test('ChatGoogleGenerativeAI can bindTools with langchain tools and invoke', async () => {
411
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
564
+ const model = createGoogleModel({
565
+ model: 'gemini-2.5-flash',
566
+ });
412
567
 
413
568
  const modelWithTools = model.bindTools([new FakeBrowserTool()]);
414
569
  const res = await modelWithTools.invoke([prompt]);
@@ -423,7 +578,9 @@ test('ChatGoogleGenerativeAI can bindTools with langchain tools and invoke', asy
423
578
  });
424
579
 
425
580
  test('ChatGoogleGenerativeAI can bindTools with genai tools and invoke', async () => {
426
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.5-flash' });
581
+ const model = createGoogleModel({
582
+ model: 'gemini-2.5-flash',
583
+ });
427
584
 
428
585
  const modelWithTools = model.bindTools([googleGenAITool]);
429
586
  const res = await modelWithTools.invoke([prompt]);
@@ -438,7 +595,9 @@ test('ChatGoogleGenerativeAI can bindTools with genai tools and invoke', async (
438
595
  });
439
596
 
440
597
  test('ChatGoogleGenerativeAI can call withStructuredOutput langchain tools and invoke', async () => {
441
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
598
+ const model = createGoogleModel({
599
+ model: 'gemini-2.0-flash',
600
+ });
442
601
 
443
602
  const modelWithTools = model.withStructuredOutput(
444
603
  z.object({
@@ -451,7 +610,9 @@ test('ChatGoogleGenerativeAI can call withStructuredOutput langchain tools and i
451
610
  });
452
611
 
453
612
  test('ChatGoogleGenerativeAI can call withStructuredOutput genai tools and invoke', async () => {
454
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
613
+ const model = createGoogleModel({
614
+ model: 'gemini-2.0-flash',
615
+ });
455
616
 
456
617
  type GeminiTool = {
457
618
  url: string;
@@ -466,7 +627,7 @@ test('ChatGoogleGenerativeAI can call withStructuredOutput genai tools and invok
466
627
  });
467
628
 
468
629
  test('Stream token count usage_metadata', async () => {
469
- const model = new ChatGoogleGenerativeAI({
630
+ const model = createGoogleModel({
470
631
  temperature: 0,
471
632
  model: 'gemini-2.0-flash',
472
633
  maxOutputTokens: 10,
@@ -497,10 +658,9 @@ describe('ChatGoogleGenerativeAI should count tokens correctly', () => {
497
658
  test.each(['gemini-2.5-flash', 'gemini-2.5-pro'])(
498
659
  'with %s',
499
660
  async (modelName) => {
500
- const model = new ChatGoogleGenerativeAI({
661
+ const model = createGoogleModel({
501
662
  model: modelName,
502
663
  temperature: 0,
503
- maxRetries: 0,
504
664
  });
505
665
  const res = await model.stream('Why is the sky blue? Be concise.');
506
666
  let full: AIMessageChunk | undefined;
@@ -516,7 +676,7 @@ describe('ChatGoogleGenerativeAI should count tokens correctly', () => {
516
676
  });
517
677
 
518
678
  test('streamUsage excludes token usage', async () => {
519
- const model = new ChatGoogleGenerativeAI({
679
+ const model = createGoogleModel({
520
680
  temperature: 0,
521
681
  model: 'gemini-2.0-flash',
522
682
  streamUsage: false,
@@ -535,7 +695,7 @@ test('streamUsage excludes token usage', async () => {
535
695
  });
536
696
 
537
697
  test('Invoke token count usage_metadata', async () => {
538
- const model = new ChatGoogleGenerativeAI({
698
+ const model = createGoogleModel({
539
699
  temperature: 0,
540
700
  model: 'gemini-2.0-flash',
541
701
  maxOutputTokens: 10,
@@ -553,7 +713,7 @@ test('Invoke token count usage_metadata', async () => {
553
713
  });
554
714
 
555
715
  test('Invoke with JSON mode', async () => {
556
- const model = new ChatGoogleGenerativeAI({
716
+ const model = createGoogleModel({
557
717
  model: 'gemini-2.0-flash',
558
718
  temperature: 0,
559
719
  maxOutputTokens: 10,
@@ -572,7 +732,9 @@ test('Invoke with JSON mode', async () => {
572
732
  });
573
733
 
574
734
  test('Supports tool_choice', async () => {
575
- const model = new ChatGoogleGenerativeAI({ model: 'gemini-2.0-flash' });
735
+ const model = createGoogleModel({
736
+ model: 'gemini-2.0-flash',
737
+ });
576
738
  const tools = [
577
739
  {
578
740
  name: 'get_weather',
@@ -600,16 +762,63 @@ test('Supports tool_choice', async () => {
600
762
  expect(response.tool_calls?.length).toBe(1);
601
763
  });
602
764
 
765
+ describe('GoogleSearchRetrievalTool', () => {
766
+ test('Supports GoogleSearchRetrievalTool', async () => {
767
+ const searchRetrievalTool: GoogleSearchRetrievalTool = {
768
+ googleSearchRetrieval: {
769
+ dynamicRetrievalConfig: {
770
+ mode: DynamicRetrievalMode.MODE_DYNAMIC,
771
+ dynamicThreshold: 0.7,
772
+ },
773
+ },
774
+ };
775
+ const model = createGoogleModel({
776
+ model: 'gemini-1.5-pro',
777
+ temperature: 0,
778
+ }).bindTools([searchRetrievalTool]);
779
+
780
+ const result = await model.invoke('Who won the 2024 MLB World Series?');
781
+
782
+ expect(result.response_metadata?.groundingMetadata).toBeDefined();
783
+ expect(result.content as string).toContain('Dodgers');
784
+ });
785
+
786
+ test('Can stream GoogleSearchRetrievalTool', async () => {
787
+ const searchRetrievalTool: GoogleSearchRetrievalTool = {
788
+ googleSearchRetrieval: {
789
+ dynamicRetrievalConfig: {
790
+ mode: DynamicRetrievalMode.MODE_DYNAMIC,
791
+ dynamicThreshold: 0.7,
792
+ },
793
+ },
794
+ };
795
+ const model = createGoogleModel({
796
+ model: 'gemini-1.5-pro',
797
+ temperature: 0,
798
+ }).bindTools([searchRetrievalTool]);
799
+
800
+ const stream = await model.stream('Who won the 2024 MLB World Series?');
801
+ let finalMsg: AIMessageChunk | undefined;
802
+ for await (const msg of stream) {
803
+ finalMsg = finalMsg ? concat(finalMsg, msg) : msg;
804
+ }
805
+ if (!finalMsg) {
806
+ throw new Error('finalMsg is undefined');
807
+ }
808
+ expect(finalMsg.response_metadata?.groundingMetadata).toBeDefined();
809
+ expect(finalMsg.content as string).toContain('Dodgers');
810
+ });
811
+ });
812
+
603
813
  describe('GoogleSearch (new API)', () => {
604
814
  test('Supports GoogleSearch tool', async () => {
605
815
  // New google_search tool for Gemini 2.0+ models
606
816
  const googleSearchTool = {
607
817
  googleSearch: {},
608
818
  };
609
- const model = new ChatGoogleGenerativeAI({
819
+ const model = createGoogleModel({
610
820
  model: 'gemini-2.5-flash',
611
821
  temperature: 0,
612
- maxRetries: 0,
613
822
  }).bindTools([googleSearchTool]);
614
823
 
615
824
  // Ask about something that requires current web data beyond training cutoff
@@ -636,10 +845,9 @@ describe('GoogleSearch (new API)', () => {
636
845
  const googleSearchTool = {
637
846
  googleSearch: {},
638
847
  };
639
- const model = new ChatGoogleGenerativeAI({
848
+ const model = createGoogleModel({
640
849
  model: 'gemini-2.5-flash',
641
850
  temperature: 0,
642
- maxRetries: 0,
643
851
  }).bindTools([googleSearchTool]);
644
852
 
645
853
  const stream = await model.stream(
@@ -674,10 +882,9 @@ describe('CodeExecutionTool', () => {
674
882
  const codeExecutionTool: CodeExecutionTool = {
675
883
  codeExecution: {}, // Simply pass an empty object to enable it.
676
884
  };
677
- const model = new ChatGoogleGenerativeAI({
885
+ const model = createGoogleModel({
678
886
  model: 'gemini-2.5-flash',
679
887
  temperature: 0,
680
- maxRetries: 0,
681
888
  }).bindTools([codeExecutionTool]);
682
889
 
683
890
  const result = await model.invoke(
@@ -707,10 +914,9 @@ describe('CodeExecutionTool', () => {
707
914
  const codeExecutionTool: CodeExecutionTool = {
708
915
  codeExecution: {}, // Simply pass an empty object to enable it.
709
916
  };
710
- const model = new ChatGoogleGenerativeAI({
917
+ const model = createGoogleModel({
711
918
  model: 'gemini-2.5-flash',
712
919
  temperature: 0,
713
- maxRetries: 0,
714
920
  }).bindTools([codeExecutionTool]);
715
921
 
716
922
  const codeResult = await model.invoke(
@@ -740,10 +946,9 @@ describe('CodeExecutionTool', () => {
740
946
  const codeExecutionTool: CodeExecutionTool = {
741
947
  codeExecution: {}, // Simply pass an empty object to enable it.
742
948
  };
743
- const model = new ChatGoogleGenerativeAI({
949
+ const model = createGoogleModel({
744
950
  model: 'gemini-2.5-flash',
745
951
  temperature: 0,
746
- maxRetries: 0,
747
952
  }).bindTools([codeExecutionTool]);
748
953
 
749
954
  const stream = await model.stream(
@@ -778,10 +983,9 @@ describe('CodeExecutionTool', () => {
778
983
  });
779
984
 
780
985
  test('pass pdf to request', async () => {
781
- const model = new ChatGoogleGenerativeAI({
986
+ const model = createGoogleModel({
782
987
  model: 'gemini-2.0-flash-exp',
783
988
  temperature: 0,
784
- maxRetries: 0,
785
989
  });
786
990
  const pdfPath = path.join(
787
991
  __dirname,
@@ -810,9 +1014,8 @@ test('pass pdf to request', async () => {
810
1014
  });
811
1015
 
812
1016
  test('calling tool with no args should work', async () => {
813
- const llm = new ChatGoogleGenerativeAI({
1017
+ const llm = createGoogleModel({
814
1018
  model: 'gemini-2.0-flash',
815
- maxRetries: 0,
816
1019
  });
817
1020
  const sfWeatherTool = tool(
818
1021
  async () => 'The weather is 80 degrees and sunny',
@@ -843,9 +1046,8 @@ test('calling tool with no args should work', async () => {
843
1046
  });
844
1047
 
845
1048
  describe('tool calling with thought signatures', () => {
846
- const model = new ChatGoogleGenerativeAI({
1049
+ const model = createGoogleModel({
847
1050
  model: 'gemini-3-pro-preview',
848
- maxRetries: 0,
849
1051
  });
850
1052
  const weatherTool = tool(async () => 'The weather is 80 degrees and sunny', {
851
1053
  name: 'weather',
@@ -921,14 +1123,94 @@ describe('tool calling with thought signatures', () => {
921
1123
  });
922
1124
 
923
1125
  test('works with thinking config', async () => {
924
- const model = new ChatGoogleGenerativeAI({
1126
+ const model = createGoogleModel({
925
1127
  model: 'gemini-3-pro-preview',
926
- maxRetries: 0,
927
1128
  thinkingConfig: {
928
1129
  includeThoughts: true,
929
1130
  thinkingBudget: 100,
930
1131
  },
931
1132
  });
932
- const result = await model.invoke('What is the current weather in SF?');
1133
+ const result = await model.invoke('What is 2+2?');
933
1134
  expect(result.content).toBeDefined();
1135
+
1136
+ if (Array.isArray(result.content)) {
1137
+ const thinkingBlocks = result.content.filter(
1138
+ (block): block is ContentBlock =>
1139
+ typeof block === 'object' &&
1140
+ block !== null &&
1141
+ 'type' in block &&
1142
+ block.type === 'thinking'
1143
+ );
1144
+ const textBlocks = result.content.filter(
1145
+ (block): block is ContentBlock =>
1146
+ typeof block === 'object' &&
1147
+ block !== null &&
1148
+ 'type' in block &&
1149
+ block.type === 'text'
1150
+ );
1151
+
1152
+ expect(thinkingBlocks.length).toBeGreaterThan(0);
1153
+
1154
+ thinkingBlocks.forEach((block) => {
1155
+ expect(block).toHaveProperty('thinking');
1156
+ expect(typeof block.thinking).toBe('string');
1157
+ });
1158
+
1159
+ textBlocks.forEach((block) => {
1160
+ expect(block).toHaveProperty('text');
1161
+ expect(typeof block.text).toBe('string');
1162
+ });
1163
+ }
1164
+ });
1165
+
1166
+ describe('Google GenAI Reasoning with contentBlocks', () => {
1167
+ test('invoke returns thinking as reasoning in contentBlocks', async () => {
1168
+ const model = createGoogleModel({
1169
+ model: 'gemini-3-pro-preview',
1170
+ thinkingConfig: {
1171
+ includeThoughts: true,
1172
+ thinkingBudget: 100,
1173
+ },
1174
+ });
1175
+
1176
+ const result = await model.invoke('What is 2 + 2?');
1177
+ const blocks = result.contentBlocks;
1178
+
1179
+ expect(blocks.length).toBeGreaterThan(0);
1180
+
1181
+ const reasoningBlocks = blocks.filter(
1182
+ (block): block is ContentBlock.Reasoning => block.type === 'reasoning'
1183
+ );
1184
+ expect(reasoningBlocks.length).toBeGreaterThan(0);
1185
+ expect(reasoningBlocks[0].reasoning.length).toBeGreaterThan(0);
1186
+
1187
+ const textBlocks = blocks.filter((block) => block.type === 'text');
1188
+ expect(textBlocks.length).toBeGreaterThan(0);
1189
+ });
1190
+
1191
+ test('stream returns thinking as reasoning in contentBlocks', async () => {
1192
+ const model = createGoogleModel({
1193
+ model: 'gemini-3-pro-preview',
1194
+ thinkingConfig: {
1195
+ includeThoughts: true,
1196
+ thinkingBudget: 100,
1197
+ },
1198
+ });
1199
+
1200
+ let fullMessage: AIMessageChunk | null = null;
1201
+ for await (const chunk of await model.stream('What is 3 + 3?')) {
1202
+ fullMessage = fullMessage ? concat(fullMessage, chunk) : chunk;
1203
+ }
1204
+
1205
+ expect(fullMessage).toBeDefined();
1206
+
1207
+ const blocks = fullMessage!.contentBlocks;
1208
+ expect(blocks.length).toBeGreaterThan(0);
1209
+
1210
+ const reasoningBlocks = blocks.filter(
1211
+ (block): block is ContentBlock.Reasoning => block.type === 'reasoning'
1212
+ );
1213
+ expect(reasoningBlocks.length).toBeGreaterThan(0);
1214
+ expect(reasoningBlocks[0].reasoning.length).toBeGreaterThan(0);
1215
+ });
934
1216
  });