@threadwell/ai 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (178) hide show
  1. package/README.md +1313 -0
  2. package/dist/api-registry.d.ts +20 -0
  3. package/dist/api-registry.d.ts.map +1 -0
  4. package/dist/api-registry.js +44 -0
  5. package/dist/api-registry.js.map +1 -0
  6. package/dist/bedrock-provider.d.ts +5 -0
  7. package/dist/bedrock-provider.d.ts.map +1 -0
  8. package/dist/bedrock-provider.js +6 -0
  9. package/dist/bedrock-provider.js.map +1 -0
  10. package/dist/cli.d.ts +3 -0
  11. package/dist/cli.d.ts.map +1 -0
  12. package/dist/cli.js +116 -0
  13. package/dist/cli.js.map +1 -0
  14. package/dist/env-api-keys.d.ts +18 -0
  15. package/dist/env-api-keys.d.ts.map +1 -0
  16. package/dist/env-api-keys.js +169 -0
  17. package/dist/env-api-keys.js.map +1 -0
  18. package/dist/index.d.ts +28 -0
  19. package/dist/index.d.ts.map +1 -0
  20. package/dist/index.js +16 -0
  21. package/dist/index.js.map +1 -0
  22. package/dist/models.d.ts +18 -0
  23. package/dist/models.d.ts.map +1 -0
  24. package/dist/models.generated.d.ts +16993 -0
  25. package/dist/models.generated.d.ts.map +1 -0
  26. package/dist/models.generated.js +16228 -0
  27. package/dist/models.generated.js.map +1 -0
  28. package/dist/models.js +71 -0
  29. package/dist/models.js.map +1 -0
  30. package/dist/oauth.d.ts +2 -0
  31. package/dist/oauth.d.ts.map +1 -0
  32. package/dist/oauth.js +2 -0
  33. package/dist/oauth.js.map +1 -0
  34. package/dist/providers/amazon-bedrock.d.ts +38 -0
  35. package/dist/providers/amazon-bedrock.d.ts.map +1 -0
  36. package/dist/providers/amazon-bedrock.js +759 -0
  37. package/dist/providers/amazon-bedrock.js.map +1 -0
  38. package/dist/providers/anthropic.d.ts +54 -0
  39. package/dist/providers/anthropic.d.ts.map +1 -0
  40. package/dist/providers/anthropic.js +951 -0
  41. package/dist/providers/anthropic.js.map +1 -0
  42. package/dist/providers/azure-openai-responses.d.ts +15 -0
  43. package/dist/providers/azure-openai-responses.d.ts.map +1 -0
  44. package/dist/providers/azure-openai-responses.js +208 -0
  45. package/dist/providers/azure-openai-responses.js.map +1 -0
  46. package/dist/providers/cloudflare.d.ts +13 -0
  47. package/dist/providers/cloudflare.d.ts.map +1 -0
  48. package/dist/providers/cloudflare.js +26 -0
  49. package/dist/providers/cloudflare.js.map +1 -0
  50. package/dist/providers/faux.d.ts +56 -0
  51. package/dist/providers/faux.d.ts.map +1 -0
  52. package/dist/providers/faux.js +368 -0
  53. package/dist/providers/faux.js.map +1 -0
  54. package/dist/providers/github-copilot-headers.d.ts +8 -0
  55. package/dist/providers/github-copilot-headers.d.ts.map +1 -0
  56. package/dist/providers/github-copilot-headers.js +29 -0
  57. package/dist/providers/github-copilot-headers.js.map +1 -0
  58. package/dist/providers/google-shared.d.ts +70 -0
  59. package/dist/providers/google-shared.d.ts.map +1 -0
  60. package/dist/providers/google-shared.js +329 -0
  61. package/dist/providers/google-shared.js.map +1 -0
  62. package/dist/providers/google-vertex.d.ts +15 -0
  63. package/dist/providers/google-vertex.d.ts.map +1 -0
  64. package/dist/providers/google-vertex.js +442 -0
  65. package/dist/providers/google-vertex.js.map +1 -0
  66. package/dist/providers/google.d.ts +13 -0
  67. package/dist/providers/google.d.ts.map +1 -0
  68. package/dist/providers/google.js +400 -0
  69. package/dist/providers/google.js.map +1 -0
  70. package/dist/providers/mistral.d.ts +25 -0
  71. package/dist/providers/mistral.d.ts.map +1 -0
  72. package/dist/providers/mistral.js +535 -0
  73. package/dist/providers/mistral.js.map +1 -0
  74. package/dist/providers/openai-codex-responses.d.ts +30 -0
  75. package/dist/providers/openai-codex-responses.d.ts.map +1 -0
  76. package/dist/providers/openai-codex-responses.js +1034 -0
  77. package/dist/providers/openai-codex-responses.js.map +1 -0
  78. package/dist/providers/openai-completions.d.ts +19 -0
  79. package/dist/providers/openai-completions.d.ts.map +1 -0
  80. package/dist/providers/openai-completions.js +925 -0
  81. package/dist/providers/openai-completions.js.map +1 -0
  82. package/dist/providers/openai-responses-shared.d.ts +18 -0
  83. package/dist/providers/openai-responses-shared.d.ts.map +1 -0
  84. package/dist/providers/openai-responses-shared.js +492 -0
  85. package/dist/providers/openai-responses-shared.js.map +1 -0
  86. package/dist/providers/openai-responses.d.ts +13 -0
  87. package/dist/providers/openai-responses.d.ts.map +1 -0
  88. package/dist/providers/openai-responses.js +220 -0
  89. package/dist/providers/openai-responses.js.map +1 -0
  90. package/dist/providers/register-builtins.d.ts +35 -0
  91. package/dist/providers/register-builtins.d.ts.map +1 -0
  92. package/dist/providers/register-builtins.js +243 -0
  93. package/dist/providers/register-builtins.js.map +1 -0
  94. package/dist/providers/simple-options.d.ts +8 -0
  95. package/dist/providers/simple-options.d.ts.map +1 -0
  96. package/dist/providers/simple-options.js +39 -0
  97. package/dist/providers/simple-options.js.map +1 -0
  98. package/dist/providers/transform-messages.d.ts +8 -0
  99. package/dist/providers/transform-messages.d.ts.map +1 -0
  100. package/dist/providers/transform-messages.js +184 -0
  101. package/dist/providers/transform-messages.js.map +1 -0
  102. package/dist/session-resources.d.ts +4 -0
  103. package/dist/session-resources.d.ts.map +1 -0
  104. package/dist/session-resources.js +22 -0
  105. package/dist/session-resources.js.map +1 -0
  106. package/dist/stream.d.ts +8 -0
  107. package/dist/stream.d.ts.map +1 -0
  108. package/dist/stream.js +27 -0
  109. package/dist/stream.js.map +1 -0
  110. package/dist/types.d.ts +405 -0
  111. package/dist/types.d.ts.map +1 -0
  112. package/dist/types.js +2 -0
  113. package/dist/types.js.map +1 -0
  114. package/dist/utils/diagnostics.d.ts +19 -0
  115. package/dist/utils/diagnostics.d.ts.map +1 -0
  116. package/dist/utils/diagnostics.js +25 -0
  117. package/dist/utils/diagnostics.js.map +1 -0
  118. package/dist/utils/event-stream.d.ts +21 -0
  119. package/dist/utils/event-stream.d.ts.map +1 -0
  120. package/dist/utils/event-stream.js +81 -0
  121. package/dist/utils/event-stream.js.map +1 -0
  122. package/dist/utils/hash.d.ts +3 -0
  123. package/dist/utils/hash.d.ts.map +1 -0
  124. package/dist/utils/hash.js +14 -0
  125. package/dist/utils/hash.js.map +1 -0
  126. package/dist/utils/headers.d.ts +2 -0
  127. package/dist/utils/headers.d.ts.map +1 -0
  128. package/dist/utils/headers.js +8 -0
  129. package/dist/utils/headers.js.map +1 -0
  130. package/dist/utils/json-parse.d.ts +16 -0
  131. package/dist/utils/json-parse.d.ts.map +1 -0
  132. package/dist/utils/json-parse.js +113 -0
  133. package/dist/utils/json-parse.js.map +1 -0
  134. package/dist/utils/oauth/anthropic.d.ts +25 -0
  135. package/dist/utils/oauth/anthropic.d.ts.map +1 -0
  136. package/dist/utils/oauth/anthropic.js +335 -0
  137. package/dist/utils/oauth/anthropic.js.map +1 -0
  138. package/dist/utils/oauth/github-copilot.d.ts +30 -0
  139. package/dist/utils/oauth/github-copilot.d.ts.map +1 -0
  140. package/dist/utils/oauth/github-copilot.js +292 -0
  141. package/dist/utils/oauth/github-copilot.js.map +1 -0
  142. package/dist/utils/oauth/index.d.ts +57 -0
  143. package/dist/utils/oauth/index.d.ts.map +1 -0
  144. package/dist/utils/oauth/index.js +121 -0
  145. package/dist/utils/oauth/index.js.map +1 -0
  146. package/dist/utils/oauth/oauth-page.d.ts +3 -0
  147. package/dist/utils/oauth/oauth-page.d.ts.map +1 -0
  148. package/dist/utils/oauth/oauth-page.js +105 -0
  149. package/dist/utils/oauth/oauth-page.js.map +1 -0
  150. package/dist/utils/oauth/openai-codex.d.ts +34 -0
  151. package/dist/utils/oauth/openai-codex.d.ts.map +1 -0
  152. package/dist/utils/oauth/openai-codex.js +385 -0
  153. package/dist/utils/oauth/openai-codex.js.map +1 -0
  154. package/dist/utils/oauth/pkce.d.ts +13 -0
  155. package/dist/utils/oauth/pkce.d.ts.map +1 -0
  156. package/dist/utils/oauth/pkce.js +31 -0
  157. package/dist/utils/oauth/pkce.js.map +1 -0
  158. package/dist/utils/oauth/types.d.ts +57 -0
  159. package/dist/utils/oauth/types.d.ts.map +1 -0
  160. package/dist/utils/oauth/types.js +2 -0
  161. package/dist/utils/oauth/types.js.map +1 -0
  162. package/dist/utils/overflow.d.ts +55 -0
  163. package/dist/utils/overflow.d.ts.map +1 -0
  164. package/dist/utils/overflow.js +146 -0
  165. package/dist/utils/overflow.js.map +1 -0
  166. package/dist/utils/sanitize-unicode.d.ts +22 -0
  167. package/dist/utils/sanitize-unicode.d.ts.map +1 -0
  168. package/dist/utils/sanitize-unicode.js +26 -0
  169. package/dist/utils/sanitize-unicode.js.map +1 -0
  170. package/dist/utils/typebox-helpers.d.ts +17 -0
  171. package/dist/utils/typebox-helpers.d.ts.map +1 -0
  172. package/dist/utils/typebox-helpers.js +21 -0
  173. package/dist/utils/typebox-helpers.js.map +1 -0
  174. package/dist/utils/validation.d.ts +18 -0
  175. package/dist/utils/validation.d.ts.map +1 -0
  176. package/dist/utils/validation.js +281 -0
  177. package/dist/utils/validation.js.map +1 -0
  178. package/package.json +108 -0
@@ -0,0 +1,329 @@
1
+ /**
2
+ * Shared utilities for Google Generative AI and Google Vertex providers.
3
+ */
4
+ import { FinishReason, FunctionCallingConfigMode } from "@google/genai";
5
+ import { sanitizeSurrogates } from "../utils/sanitize-unicode.js";
6
+ import { transformMessages } from "./transform-messages.js";
7
+ /**
8
+ * Determines whether a streamed Gemini `Part` should be treated as "thinking".
9
+ *
10
+ * Protocol note (Gemini / Vertex AI thought signatures):
11
+ * - `thought: true` is the definitive marker for thinking content (thought summaries).
12
+ * - `thoughtSignature` is an encrypted representation of the model's internal thought process
13
+ * used to preserve reasoning context across multi-turn interactions.
14
+ * - `thoughtSignature` can appear on ANY part type (text, functionCall, etc.) - it does NOT
15
+ * indicate the part itself is thinking content.
16
+ * - For non-functionCall responses, the signature appears on the last part for context replay.
17
+ * - When persisting/replaying model outputs, signature-bearing parts must be preserved as-is;
18
+ * do not merge/move signatures across parts.
19
+ *
20
+ * See: https://ai.google.dev/gemini-api/docs/thought-signatures
21
+ */
22
+ export function isThinkingPart(part) {
23
+ return part.thought === true;
24
+ }
25
+ /**
26
+ * Retain thought signatures during streaming.
27
+ *
28
+ * Some backends only send `thoughtSignature` on the first delta for a given part/block; later deltas may omit it.
29
+ * This helper preserves the last non-empty signature for the current block.
30
+ *
31
+ * Note: this does NOT merge or move signatures across distinct response parts. It only prevents
32
+ * a signature from being overwritten with `undefined` within the same streamed block.
33
+ */
34
+ export function retainThoughtSignature(existing, incoming) {
35
+ if (typeof incoming === "string" && incoming.length > 0)
36
+ return incoming;
37
+ return existing;
38
+ }
39
+ // Thought signatures must be base64 for Google APIs (TYPE_BYTES).
40
+ const base64SignaturePattern = /^[A-Za-z0-9+/]+={0,2}$/;
41
+ function isValidThoughtSignature(signature) {
42
+ if (!signature)
43
+ return false;
44
+ if (signature.length % 4 !== 0)
45
+ return false;
46
+ return base64SignaturePattern.test(signature);
47
+ }
48
+ /**
49
+ * Only keep signatures from the same provider/model and with valid base64.
50
+ */
51
+ function resolveThoughtSignature(isSameProviderAndModel, signature) {
52
+ return isSameProviderAndModel && isValidThoughtSignature(signature) ? signature : undefined;
53
+ }
54
+ /**
55
+ * Models via Google APIs that require explicit tool call IDs in function calls/responses.
56
+ */
57
+ export function requiresToolCallId(modelId) {
58
+ return modelId.startsWith("claude-") || modelId.startsWith("gpt-oss-");
59
+ }
60
+ function getGeminiMajorVersion(modelId) {
61
+ const match = modelId.toLowerCase().match(/^gemini(?:-live)?-(\d+)/);
62
+ if (!match)
63
+ return undefined;
64
+ return Number.parseInt(match[1], 10);
65
+ }
66
+ function supportsMultimodalFunctionResponse(modelId) {
67
+ const geminiMajorVersion = getGeminiMajorVersion(modelId);
68
+ if (geminiMajorVersion !== undefined) {
69
+ return geminiMajorVersion >= 3;
70
+ }
71
+ return true;
72
+ }
73
+ /**
74
+ * Convert internal messages to Gemini Content[] format.
75
+ */
76
+ export function convertMessages(model, context) {
77
+ const contents = [];
78
+ const normalizeToolCallId = (id) => {
79
+ if (!requiresToolCallId(model.id))
80
+ return id;
81
+ return id.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 64);
82
+ };
83
+ const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);
84
+ for (const msg of transformedMessages) {
85
+ if (msg.role === "user") {
86
+ if (typeof msg.content === "string") {
87
+ contents.push({
88
+ role: "user",
89
+ parts: [{ text: sanitizeSurrogates(msg.content) }],
90
+ });
91
+ }
92
+ else {
93
+ const parts = msg.content.map((item) => {
94
+ if (item.type === "text") {
95
+ return { text: sanitizeSurrogates(item.text) };
96
+ }
97
+ else {
98
+ return {
99
+ inlineData: {
100
+ mimeType: item.mimeType,
101
+ data: item.data,
102
+ },
103
+ };
104
+ }
105
+ });
106
+ if (parts.length === 0)
107
+ continue;
108
+ contents.push({
109
+ role: "user",
110
+ parts,
111
+ });
112
+ }
113
+ }
114
+ else if (msg.role === "assistant") {
115
+ const parts = [];
116
+ // Check if message is from same provider and model - only then keep thinking blocks
117
+ const isSameProviderAndModel = msg.provider === model.provider && msg.model === model.id;
118
+ for (const block of msg.content) {
119
+ if (block.type === "text") {
120
+ // Skip empty text blocks
121
+ if (!block.text || block.text.trim() === "")
122
+ continue;
123
+ const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.textSignature);
124
+ parts.push({
125
+ text: sanitizeSurrogates(block.text),
126
+ ...(thoughtSignature && { thoughtSignature }),
127
+ });
128
+ }
129
+ else if (block.type === "thinking") {
130
+ // Skip empty thinking blocks
131
+ if (!block.thinking || block.thinking.trim() === "")
132
+ continue;
133
+ // Only keep as thinking block if same provider AND same model
134
+ // Otherwise convert to plain text (no tags to avoid model mimicking them)
135
+ if (isSameProviderAndModel) {
136
+ const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thinkingSignature);
137
+ parts.push({
138
+ thought: true,
139
+ text: sanitizeSurrogates(block.thinking),
140
+ ...(thoughtSignature && { thoughtSignature }),
141
+ });
142
+ }
143
+ else {
144
+ parts.push({
145
+ text: sanitizeSurrogates(block.thinking),
146
+ });
147
+ }
148
+ }
149
+ else if (block.type === "toolCall") {
150
+ const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thoughtSignature);
151
+ const part = {
152
+ functionCall: {
153
+ name: block.name,
154
+ args: block.arguments ?? {},
155
+ ...(requiresToolCallId(model.id) ? { id: block.id } : {}),
156
+ },
157
+ ...(thoughtSignature && { thoughtSignature }),
158
+ };
159
+ parts.push(part);
160
+ }
161
+ }
162
+ if (parts.length === 0)
163
+ continue;
164
+ contents.push({
165
+ role: "model",
166
+ parts,
167
+ });
168
+ }
169
+ else if (msg.role === "toolResult") {
170
+ // Extract text and image content
171
+ const textContent = msg.content.filter((c) => c.type === "text");
172
+ const textResult = textContent.map((c) => c.text).join("\n");
173
+ const imageContent = model.input.includes("image")
174
+ ? msg.content.filter((c) => c.type === "image")
175
+ : [];
176
+ const hasText = textResult.length > 0;
177
+ const hasImages = imageContent.length > 0;
178
+ // Gemini 3+ models support multimodal function responses with images nested inside
179
+ // functionResponse.parts. Claude and other non-Gemini models behind Cloud Code Assist /
180
+ // Gemini < 3 still needs a separate user image turn.
181
+ const modelSupportsMultimodalFunctionResponse = supportsMultimodalFunctionResponse(model.id);
182
+ // Use "output" key for success, "error" key for errors as per SDK documentation
183
+ const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : "";
184
+ const imageParts = imageContent.map((imageBlock) => ({
185
+ inlineData: {
186
+ mimeType: imageBlock.mimeType,
187
+ data: imageBlock.data,
188
+ },
189
+ }));
190
+ const includeId = requiresToolCallId(model.id);
191
+ const functionResponsePart = {
192
+ functionResponse: {
193
+ name: msg.toolName,
194
+ response: msg.isError ? { error: responseValue } : { output: responseValue },
195
+ ...(hasImages && modelSupportsMultimodalFunctionResponse && { parts: imageParts }),
196
+ ...(includeId ? { id: msg.toolCallId } : {}),
197
+ },
198
+ };
199
+ // Cloud Code Assist API requires all function responses to be in a single user turn.
200
+ // Check if the last content is already a user turn with function responses and merge.
201
+ const lastContent = contents[contents.length - 1];
202
+ if (lastContent?.role === "user" && lastContent.parts?.some((p) => p.functionResponse)) {
203
+ lastContent.parts.push(functionResponsePart);
204
+ }
205
+ else {
206
+ contents.push({
207
+ role: "user",
208
+ parts: [functionResponsePart],
209
+ });
210
+ }
211
+ // For Gemini < 3, add images in a separate user message
212
+ if (hasImages && !modelSupportsMultimodalFunctionResponse) {
213
+ contents.push({
214
+ role: "user",
215
+ parts: [{ text: "Tool result image:" }, ...imageParts],
216
+ });
217
+ }
218
+ }
219
+ }
220
+ return contents;
221
+ }
222
+ const JSON_SCHEMA_META_DECLARATIONS = new Set([
223
+ "$schema",
224
+ "$id",
225
+ "$anchor",
226
+ "$dynamicAnchor",
227
+ "$vocabulary",
228
+ "$comment",
229
+ "$defs",
230
+ "definitions", // pre-draft-2019-09 equivalent of $defs
231
+ ]);
232
+ /**
233
+ * Strip meta-declarations from a schema obj
234
+ */
235
+ function sanitizeForOpenApi(schema) {
236
+ if (typeof schema !== "object" || schema === null || Array.isArray(schema)) {
237
+ return schema;
238
+ }
239
+ const result = {};
240
+ for (const [key, value] of Object.entries(schema)) {
241
+ if (JSON_SCHEMA_META_DECLARATIONS.has(key))
242
+ continue;
243
+ result[key] = sanitizeForOpenApi(value);
244
+ }
245
+ return result;
246
+ }
247
+ /**
248
+ * Convert tools to Gemini function declarations format.
249
+ *
250
+ * By default uses `parametersJsonSchema` which supports full JSON Schema (including
251
+ * anyOf, oneOf, const, etc.). Set `useParameters` to true to use the legacy `parameters`
252
+ * field instead (OpenAPI 3.03 Schema). This is needed for Cloud Code Assist with Claude
253
+ * models, where the API translates `parameters` into Anthropic's `input_schema`.
254
+ */
255
+ export function convertTools(tools, useParameters = false) {
256
+ if (tools.length === 0)
257
+ return undefined;
258
+ return [
259
+ {
260
+ functionDeclarations: tools.map((tool) => ({
261
+ name: tool.name,
262
+ description: tool.description,
263
+ ...(useParameters
264
+ ? { parameters: sanitizeForOpenApi(tool.parameters) }
265
+ : { parametersJsonSchema: tool.parameters }),
266
+ })),
267
+ },
268
+ ];
269
+ }
270
+ /**
271
+ * Map tool choice string to Gemini FunctionCallingConfigMode.
272
+ */
273
+ export function mapToolChoice(choice) {
274
+ switch (choice) {
275
+ case "auto":
276
+ return FunctionCallingConfigMode.AUTO;
277
+ case "none":
278
+ return FunctionCallingConfigMode.NONE;
279
+ case "any":
280
+ return FunctionCallingConfigMode.ANY;
281
+ default:
282
+ return FunctionCallingConfigMode.AUTO;
283
+ }
284
+ }
285
+ /**
286
+ * Map Gemini FinishReason to our StopReason.
287
+ */
288
+ export function mapStopReason(reason) {
289
+ switch (reason) {
290
+ case FinishReason.STOP:
291
+ return "stop";
292
+ case FinishReason.MAX_TOKENS:
293
+ return "length";
294
+ case FinishReason.BLOCKLIST:
295
+ case FinishReason.PROHIBITED_CONTENT:
296
+ case FinishReason.SPII:
297
+ case FinishReason.SAFETY:
298
+ case FinishReason.IMAGE_SAFETY:
299
+ case FinishReason.IMAGE_PROHIBITED_CONTENT:
300
+ case FinishReason.IMAGE_RECITATION:
301
+ case FinishReason.IMAGE_OTHER:
302
+ case FinishReason.RECITATION:
303
+ case FinishReason.FINISH_REASON_UNSPECIFIED:
304
+ case FinishReason.OTHER:
305
+ case FinishReason.LANGUAGE:
306
+ case FinishReason.MALFORMED_FUNCTION_CALL:
307
+ case FinishReason.UNEXPECTED_TOOL_CALL:
308
+ case FinishReason.NO_IMAGE:
309
+ return "error";
310
+ default: {
311
+ const _exhaustive = reason;
312
+ throw new Error(`Unhandled stop reason: ${_exhaustive}`);
313
+ }
314
+ }
315
+ }
316
+ /**
317
+ * Map string finish reason to our StopReason (for raw API responses).
318
+ */
319
+ export function mapStopReasonString(reason) {
320
+ switch (reason) {
321
+ case "STOP":
322
+ return "stop";
323
+ case "MAX_TOKENS":
324
+ return "length";
325
+ default:
326
+ return "error";
327
+ }
328
+ }
329
+ //# sourceMappingURL=google-shared.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"google-shared.js","sourceRoot":"","sources":["../../src/providers/google-shared.ts"],"names":[],"mappings":"AAAA;;GAEG;AAEH,OAAO,EAAgB,YAAY,EAAE,yBAAyB,EAAa,MAAM,eAAe,CAAC;AAEjG,OAAO,EAAE,kBAAkB,EAAE,MAAM,8BAA8B,CAAC;AAClE,OAAO,EAAE,iBAAiB,EAAE,MAAM,yBAAyB,CAAC;AAU5D;;;;;;;;;;;;;;GAcG;AACH,MAAM,UAAU,cAAc,CAAC,IAAgD,EAAW;IACzF,OAAO,IAAI,CAAC,OAAO,KAAK,IAAI,CAAC;AAAA,CAC7B;AAED;;;;;;;;GAQG;AACH,MAAM,UAAU,sBAAsB,CAAC,QAA4B,EAAE,QAA4B,EAAsB;IACtH,IAAI,OAAO,QAAQ,KAAK,QAAQ,IAAI,QAAQ,CAAC,MAAM,GAAG,CAAC;QAAE,OAAO,QAAQ,CAAC;IACzE,OAAO,QAAQ,CAAC;AAAA,CAChB;AAED,kEAAkE;AAClE,MAAM,sBAAsB,GAAG,wBAAwB,CAAC;AAExD,SAAS,uBAAuB,CAAC,SAA6B,EAAW;IACxE,IAAI,CAAC,SAAS;QAAE,OAAO,KAAK,CAAC;IAC7B,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,KAAK,CAAC;QAAE,OAAO,KAAK,CAAC;IAC7C,OAAO,sBAAsB,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;AAAA,CAC9C;AAED;;GAEG;AACH,SAAS,uBAAuB,CAAC,sBAA+B,EAAE,SAA6B,EAAsB;IACpH,OAAO,sBAAsB,IAAI,uBAAuB,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,SAAS,CAAC;AAAA,CAC5F;AAED;;GAEG;AACH,MAAM,UAAU,kBAAkB,CAAC,OAAe,EAAW;IAC5D,OAAO,OAAO,CAAC,UAAU,CAAC,SAAS,CAAC,IAAI,OAAO,CAAC,UAAU,CAAC,UAAU,CAAC,CAAC;AAAA,CACvE;AAED,SAAS,qBAAqB,CAAC,OAAe,EAAsB;IACnE,MAAM,KAAK,GAAG,OAAO,CAAC,WAAW,EAAE,CAAC,KAAK,CAAC,yBAAyB,CAAC,CAAC;IACrE,IAAI,CAAC,KAAK;QAAE,OAAO,SAAS,CAAC;IAC7B,OAAO,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;AAAA,CACrC;AAED,SAAS,kCAAkC,CAAC,OAAe,EAAW;IACrE,MAAM,kBAAkB,GAAG,qBAAqB,CAAC,OAAO,CAAC,CAAC;IAC1D,IAAI,kBAAkB,KAAK,SAAS,EAAE,CAAC;QACtC,OAAO,kBAAkB,IAAI,CAAC,CAAC;IAChC,CAAC;IACD,OAAO,IAAI,CAAC;AAAA,CACZ;AAED;;GAEG;AACH,MAAM,UAAU,eAAe,CAA0B,KAAe,EAAE,OAAgB,EAAa;IACtG,MAAM,QAAQ,GAAc,EAAE,CAAC;IAC/B,MAAM,mBAAmB,GAAG,CAAC,EAAU,EAAU,EAAE,CAAC;QACnD,IAAI,CAAC,kBAAkB,CAAC,KAAK,CAAC,EAAE,CAAC;YAAE,OAAO,EAAE,CAAC;QAC7C,OAAO,EAAE,CAAC,OAAO,CAAC,iBAAiB,EAAE,GAAG,CAAC,CAAC,KAAK,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC;IAAA,CACvD,CAAC;IAEF,MAAM,mBAAmB,GAAG,iBAAiB,CAAC,OAAO,CAAC,QAAQ,EAAE,KAAK,EAAE,mBAAmB,CAAC,CAAC;IAE5F,KAAK,MAAM,GAAG,IAAI,mBAAmB,EAAE,CAAC;QACvC,IAAI,GAAG,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;YACzB,IAAI,OAAO,GAAG,CAAC,OAAO,KAAK,QAAQ,EAAE,CAAC;gBACrC,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,MAAM;oBACZ,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,kBAAkB,CAAC,GAAG,CAAC,OAAO,CAAC,EAAE,CAAC;iBAClD,CAAC,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACP,MAAM,KAAK,GAAW,GAAG,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC;oBAC/C,IAAI,IAAI,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;wBAC1B,OAAO,EAAE,IAAI,EAAE,kBAAkB,CAAC,IAAI,CAAC,IAAI,CAAC,EAAE,CAAC;oBAChD,CAAC;yBAAM,CAAC;wBACP,OAAO;4BACN,UAAU,EAAE;gCACX,QAAQ,EAAE,IAAI,CAAC,QAAQ;gCACvB,IAAI,EAAE,IAAI,CAAC,IAAI;6BACf;yBACD,CAAC;oBACH,CAAC;gBAAA,CACD,CAAC,CAAC;gBACH,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;oBAAE,SAAS;gBACjC,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,MAAM;oBACZ,KAAK;iBACL,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,WAAW,EAAE,CAAC;YACrC,MAAM,KAAK,GAAW,EAAE,CAAC;YACzB,oFAAoF;YACpF,MAAM,sBAAsB,GAAG,GAAG,CAAC,QAAQ,KAAK,KAAK,CAAC,QAAQ,IAAI,GAAG,CAAC,KAAK,KAAK,KAAK,CAAC,EAAE,CAAC;YAEzF,KAAK,MAAM,KAAK,IAAI,GAAG,CAAC,OAAO,EAAE,CAAC;gBACjC,IAAI,KAAK,CAAC,IAAI,KAAK,MAAM,EAAE,CAAC;oBAC3B,yBAAyB;oBACzB,IAAI,CAAC,KAAK,CAAC,IAAI,IAAI,KAAK,CAAC,IAAI,CAAC,IAAI,EAAE,KAAK,EAAE;wBAAE,SAAS;oBACtD,MAAM,gBAAgB,GAAG,uBAAuB,CAAC,sBAAsB,EAAE,KAAK,CAAC,aAAa,CAAC,CAAC;oBAC9F,KAAK,CAAC,IAAI,CAAC;wBACV,IAAI,EAAE,kBAAkB,CAAC,KAAK,CAAC,IAAI,CAAC;wBACpC,GAAG,CAAC,gBAAgB,IAAI,EAAE,gBAAgB,EAAE,CAAC;qBAC7C,CAAC,CAAC;gBACJ,CAAC;qBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;oBACtC,6BAA6B;oBAC7B,IAAI,CAAC,KAAK,CAAC,QAAQ,IAAI,KAAK,CAAC,QAAQ,CAAC,IAAI,EAAE,KAAK,EAAE;wBAAE,SAAS;oBAC9D,8DAA8D;oBAC9D,0EAA0E;oBAC1E,IAAI,sBAAsB,EAAE,CAAC;wBAC5B,MAAM,gBAAgB,GAAG,uBAAuB,CAAC,sBAAsB,EAAE,KAAK,CAAC,iBAAiB,CAAC,CAAC;wBAClG,KAAK,CAAC,IAAI,CAAC;4BACV,OAAO,EAAE,IAAI;4BACb,IAAI,EAAE,kBAAkB,CAAC,KAAK,CAAC,QAAQ,CAAC;4BACxC,GAAG,CAAC,gBAAgB,IAAI,EAAE,gBAAgB,EAAE,CAAC;yBAC7C,CAAC,CAAC;oBACJ,CAAC;yBAAM,CAAC;wBACP,KAAK,CAAC,IAAI,CAAC;4BACV,IAAI,EAAE,kBAAkB,CAAC,KAAK,CAAC,QAAQ,CAAC;yBACxC,CAAC,CAAC;oBACJ,CAAC;gBACF,CAAC;qBAAM,IAAI,KAAK,CAAC,IAAI,KAAK,UAAU,EAAE,CAAC;oBACtC,MAAM,gBAAgB,GAAG,uBAAuB,CAAC,sBAAsB,EAAE,KAAK,CAAC,gBAAgB,CAAC,CAAC;oBACjG,MAAM,IAAI,GAAS;wBAClB,YAAY,EAAE;4BACb,IAAI,EAAE,KAAK,CAAC,IAAI;4BAChB,IAAI,EAAE,KAAK,CAAC,SAAS,IAAI,EAAE;4BAC3B,GAAG,CAAC,kBAAkB,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,KAAK,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;yBACzD;wBACD,GAAG,CAAC,gBAAgB,IAAI,EAAE,gBAAgB,EAAE,CAAC;qBAC7C,CAAC;oBACF,KAAK,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;gBAClB,CAAC;YACF,CAAC;YAED,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;gBAAE,SAAS;YACjC,QAAQ,CAAC,IAAI,CAAC;gBACb,IAAI,EAAE,OAAO;gBACb,KAAK;aACL,CAAC,CAAC;QACJ,CAAC;aAAM,IAAI,GAAG,CAAC,IAAI,KAAK,YAAY,EAAE,CAAC;YACtC,iCAAiC;YACjC,MAAM,WAAW,GAAG,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAoB,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,MAAM,CAAC,CAAC;YACnF,MAAM,UAAU,GAAG,WAAW,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,IAAI,CAAC,CAAC,IAAI,CAAC,IAAI,CAAC,CAAC;YAC7D,MAAM,YAAY,GAAG,KAAK,CAAC,KAAK,CAAC,QAAQ,CAAC,OAAO,CAAC;gBACjD,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC,EAAqB,EAAE,CAAC,CAAC,CAAC,IAAI,KAAK,OAAO,CAAC;gBAClE,CAAC,CAAC,EAAE,CAAC;YAEN,MAAM,OAAO,GAAG,UAAU,CAAC,MAAM,GAAG,CAAC,CAAC;YACtC,MAAM,SAAS,GAAG,YAAY,CAAC,MAAM,GAAG,CAAC,CAAC;YAE1C,mFAAmF;YACnF,wFAAwF;YACxF,qDAAqD;YACrD,MAAM,uCAAuC,GAAG,kCAAkC,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;YAE7F,gFAAgF;YAChF,MAAM,aAAa,GAAG,OAAO,CAAC,CAAC,CAAC,kBAAkB,CAAC,UAAU,CAAC,CAAC,CAAC,CAAC,SAAS,CAAC,CAAC,CAAC,sBAAsB,CAAC,CAAC,CAAC,EAAE,CAAC;YAEzG,MAAM,UAAU,GAAW,YAAY,CAAC,GAAG,CAAC,CAAC,UAAU,EAAE,EAAE,CAAC,CAAC;gBAC5D,UAAU,EAAE;oBACX,QAAQ,EAAE,UAAU,CAAC,QAAQ;oBAC7B,IAAI,EAAE,UAAU,CAAC,IAAI;iBACrB;aACD,CAAC,CAAC,CAAC;YAEJ,MAAM,SAAS,GAAG,kBAAkB,CAAC,KAAK,CAAC,EAAE,CAAC,CAAC;YAC/C,MAAM,oBAAoB,GAAS;gBAClC,gBAAgB,EAAE;oBACjB,IAAI,EAAE,GAAG,CAAC,QAAQ;oBAClB,QAAQ,EAAE,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,KAAK,EAAE,aAAa,EAAE,CAAC,CAAC,CAAC,EAAE,MAAM,EAAE,aAAa,EAAE;oBAC5E,GAAG,CAAC,SAAS,IAAI,uCAAuC,IAAI,EAAE,KAAK,EAAE,UAAU,EAAE,CAAC;oBAClF,GAAG,CAAC,SAAS,CAAC,CAAC,CAAC,EAAE,EAAE,EAAE,GAAG,CAAC,UAAU,EAAE,CAAC,CAAC,CAAC,EAAE,CAAC;iBAC5C;aACD,CAAC;YAEF,qFAAqF;YACrF,sFAAsF;YACtF,MAAM,WAAW,GAAG,QAAQ,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;YAClD,IAAI,WAAW,EAAE,IAAI,KAAK,MAAM,IAAI,WAAW,CAAC,KAAK,EAAE,IAAI,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,CAAC,CAAC,gBAAgB,CAAC,EAAE,CAAC;gBACxF,WAAW,CAAC,KAAK,CAAC,IAAI,CAAC,oBAAoB,CAAC,CAAC;YAC9C,CAAC;iBAAM,CAAC;gBACP,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,MAAM;oBACZ,KAAK,EAAE,CAAC,oBAAoB,CAAC;iBAC7B,CAAC,CAAC;YACJ,CAAC;YAED,wDAAwD;YACxD,IAAI,SAAS,IAAI,CAAC,uCAAuC,EAAE,CAAC;gBAC3D,QAAQ,CAAC,IAAI,CAAC;oBACb,IAAI,EAAE,MAAM;oBACZ,KAAK,EAAE,CAAC,EAAE,IAAI,EAAE,oBAAoB,EAAE,EAAE,GAAG,UAAU,CAAC;iBACtD,CAAC,CAAC;YACJ,CAAC;QACF,CAAC;IACF,CAAC;IAED,OAAO,QAAQ,CAAC;AAAA,CAChB;AAED,MAAM,6BAA6B,GAAG,IAAI,GAAG,CAAC;IAC7C,SAAS;IACT,KAAK;IACL,SAAS;IACT,gBAAgB;IAChB,aAAa;IACb,UAAU;IACV,OAAO;IACP,aAAa,EAAE,wCAAwC;CACvD,CAAC,CAAC;AAEH;;GAEG;AACH,SAAS,kBAAkB,CAAC,MAAe,EAAW;IACrD,IAAI,OAAO,MAAM,KAAK,QAAQ,IAAI,MAAM,KAAK,IAAI,IAAI,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC;QAC5E,OAAO,MAAM,CAAC;IACf,CAAC;IAED,MAAM,MAAM,GAA4B,EAAE,CAAC;IAC3C,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,MAAM,CAAC,EAAE,CAAC;QACnD,IAAI,6BAA6B,CAAC,GAAG,CAAC,GAAG,CAAC;YAAE,SAAS;QACrD,MAAM,CAAC,GAAG,CAAC,GAAG,kBAAkB,CAAC,KAAK,CAAC,CAAC;IACzC,CAAC;IACD,OAAO,MAAM,CAAC;AAAA,CACd;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,YAAY,CAC3B,KAAa,EACb,aAAa,GAAG,KAAK,EAC+C;IACpE,IAAI,KAAK,CAAC,MAAM,KAAK,CAAC;QAAE,OAAO,SAAS,CAAC;IACzC,OAAO;QACN;YACC,oBAAoB,EAAE,KAAK,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC;gBAC1C,IAAI,EAAE,IAAI,CAAC,IAAI;gBACf,WAAW,EAAE,IAAI,CAAC,WAAW;gBAC7B,GAAG,CAAC,aAAa;oBAChB,CAAC,CAAC,EAAE,UAAU,EAAE,kBAAkB,CAAC,IAAI,CAAC,UAAqB,CAAC,EAAE;oBAChE,CAAC,CAAC,EAAE,oBAAoB,EAAE,IAAI,CAAC,UAAU,EAAE,CAAC;aAC7C,CAAC,CAAC;SACH;KACD,CAAC;AAAA,CACF;AAED;;GAEG;AACH,MAAM,UAAU,aAAa,CAAC,MAAc,EAA6B;IACxE,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,yBAAyB,CAAC,IAAI,CAAC;QACvC,KAAK,MAAM;YACV,OAAO,yBAAyB,CAAC,IAAI,CAAC;QACvC,KAAK,KAAK;YACT,OAAO,yBAAyB,CAAC,GAAG,CAAC;QACtC;YACC,OAAO,yBAAyB,CAAC,IAAI,CAAC;IACxC,CAAC;AAAA,CACD;AAED;;GAEG;AACH,MAAM,UAAU,aAAa,CAAC,MAAoB,EAAc;IAC/D,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,YAAY,CAAC,IAAI;YACrB,OAAO,MAAM,CAAC;QACf,KAAK,YAAY,CAAC,UAAU;YAC3B,OAAO,QAAQ,CAAC;QACjB,KAAK,YAAY,CAAC,SAAS,CAAC;QAC5B,KAAK,YAAY,CAAC,kBAAkB,CAAC;QACrC,KAAK,YAAY,CAAC,IAAI,CAAC;QACvB,KAAK,YAAY,CAAC,MAAM,CAAC;QACzB,KAAK,YAAY,CAAC,YAAY,CAAC;QAC/B,KAAK,YAAY,CAAC,wBAAwB,CAAC;QAC3C,KAAK,YAAY,CAAC,gBAAgB,CAAC;QACnC,KAAK,YAAY,CAAC,WAAW,CAAC;QAC9B,KAAK,YAAY,CAAC,UAAU,CAAC;QAC7B,KAAK,YAAY,CAAC,yBAAyB,CAAC;QAC5C,KAAK,YAAY,CAAC,KAAK,CAAC;QACxB,KAAK,YAAY,CAAC,QAAQ,CAAC;QAC3B,KAAK,YAAY,CAAC,uBAAuB,CAAC;QAC1C,KAAK,YAAY,CAAC,oBAAoB,CAAC;QACvC,KAAK,YAAY,CAAC,QAAQ;YACzB,OAAO,OAAO,CAAC;QAChB,SAAS,CAAC;YACT,MAAM,WAAW,GAAU,MAAM,CAAC;YAClC,MAAM,IAAI,KAAK,CAAC,0BAA0B,WAAW,EAAE,CAAC,CAAC;QAC1D,CAAC;IACF,CAAC;AAAA,CACD;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,CAAC,MAAc,EAAc;IAC/D,QAAQ,MAAM,EAAE,CAAC;QAChB,KAAK,MAAM;YACV,OAAO,MAAM,CAAC;QACf,KAAK,YAAY;YAChB,OAAO,QAAQ,CAAC;QACjB;YACC,OAAO,OAAO,CAAC;IACjB,CAAC;AAAA,CACD","sourcesContent":["/**\n * Shared utilities for Google Generative AI and Google Vertex providers.\n */\n\nimport { type Content, FinishReason, FunctionCallingConfigMode, type Part } from \"@google/genai\";\nimport type { Context, ImageContent, Model, StopReason, TextContent, Tool } from \"../types.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport { transformMessages } from \"./transform-messages.js\";\n\ntype GoogleApiType = \"google-generative-ai\" | \"google-vertex\";\n\n/**\n * Thinking level for Gemini 3 models.\n * Mirrors Google's ThinkingLevel enum values.\n */\nexport type GoogleThinkingLevel = \"THINKING_LEVEL_UNSPECIFIED\" | \"MINIMAL\" | \"LOW\" | \"MEDIUM\" | \"HIGH\";\n\n/**\n * Determines whether a streamed Gemini `Part` should be treated as \"thinking\".\n *\n * Protocol note (Gemini / Vertex AI thought signatures):\n * - `thought: true` is the definitive marker for thinking content (thought summaries).\n * - `thoughtSignature` is an encrypted representation of the model's internal thought process\n * used to preserve reasoning context across multi-turn interactions.\n * - `thoughtSignature` can appear on ANY part type (text, functionCall, etc.) - it does NOT\n * indicate the part itself is thinking content.\n * - For non-functionCall responses, the signature appears on the last part for context replay.\n * - When persisting/replaying model outputs, signature-bearing parts must be preserved as-is;\n * do not merge/move signatures across parts.\n *\n * See: https://ai.google.dev/gemini-api/docs/thought-signatures\n */\nexport function isThinkingPart(part: Pick<Part, \"thought\" | \"thoughtSignature\">): boolean {\n\treturn part.thought === true;\n}\n\n/**\n * Retain thought signatures during streaming.\n *\n * Some backends only send `thoughtSignature` on the first delta for a given part/block; later deltas may omit it.\n * This helper preserves the last non-empty signature for the current block.\n *\n * Note: this does NOT merge or move signatures across distinct response parts. It only prevents\n * a signature from being overwritten with `undefined` within the same streamed block.\n */\nexport function retainThoughtSignature(existing: string | undefined, incoming: string | undefined): string | undefined {\n\tif (typeof incoming === \"string\" && incoming.length > 0) return incoming;\n\treturn existing;\n}\n\n// Thought signatures must be base64 for Google APIs (TYPE_BYTES).\nconst base64SignaturePattern = /^[A-Za-z0-9+/]+={0,2}$/;\n\nfunction isValidThoughtSignature(signature: string | undefined): boolean {\n\tif (!signature) return false;\n\tif (signature.length % 4 !== 0) return false;\n\treturn base64SignaturePattern.test(signature);\n}\n\n/**\n * Only keep signatures from the same provider/model and with valid base64.\n */\nfunction resolveThoughtSignature(isSameProviderAndModel: boolean, signature: string | undefined): string | undefined {\n\treturn isSameProviderAndModel && isValidThoughtSignature(signature) ? signature : undefined;\n}\n\n/**\n * Models via Google APIs that require explicit tool call IDs in function calls/responses.\n */\nexport function requiresToolCallId(modelId: string): boolean {\n\treturn modelId.startsWith(\"claude-\") || modelId.startsWith(\"gpt-oss-\");\n}\n\nfunction getGeminiMajorVersion(modelId: string): number | undefined {\n\tconst match = modelId.toLowerCase().match(/^gemini(?:-live)?-(\\d+)/);\n\tif (!match) return undefined;\n\treturn Number.parseInt(match[1], 10);\n}\n\nfunction supportsMultimodalFunctionResponse(modelId: string): boolean {\n\tconst geminiMajorVersion = getGeminiMajorVersion(modelId);\n\tif (geminiMajorVersion !== undefined) {\n\t\treturn geminiMajorVersion >= 3;\n\t}\n\treturn true;\n}\n\n/**\n * Convert internal messages to Gemini Content[] format.\n */\nexport function convertMessages<T extends GoogleApiType>(model: Model<T>, context: Context): Content[] {\n\tconst contents: Content[] = [];\n\tconst normalizeToolCallId = (id: string): string => {\n\t\tif (!requiresToolCallId(model.id)) return id;\n\t\treturn id.replace(/[^a-zA-Z0-9_-]/g, \"_\").slice(0, 64);\n\t};\n\n\tconst transformedMessages = transformMessages(context.messages, model, normalizeToolCallId);\n\n\tfor (const msg of transformedMessages) {\n\t\tif (msg.role === \"user\") {\n\t\t\tif (typeof msg.content === \"string\") {\n\t\t\t\tcontents.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tparts: [{ text: sanitizeSurrogates(msg.content) }],\n\t\t\t\t});\n\t\t\t} else {\n\t\t\t\tconst parts: Part[] = msg.content.map((item) => {\n\t\t\t\t\tif (item.type === \"text\") {\n\t\t\t\t\t\treturn { text: sanitizeSurrogates(item.text) };\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn {\n\t\t\t\t\t\t\tinlineData: {\n\t\t\t\t\t\t\t\tmimeType: item.mimeType,\n\t\t\t\t\t\t\t\tdata: item.data,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t};\n\t\t\t\t\t}\n\t\t\t\t});\n\t\t\t\tif (parts.length === 0) continue;\n\t\t\t\tcontents.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tparts,\n\t\t\t\t});\n\t\t\t}\n\t\t} else if (msg.role === \"assistant\") {\n\t\t\tconst parts: Part[] = [];\n\t\t\t// Check if message is from same provider and model - only then keep thinking blocks\n\t\t\tconst isSameProviderAndModel = msg.provider === model.provider && msg.model === model.id;\n\n\t\t\tfor (const block of msg.content) {\n\t\t\t\tif (block.type === \"text\") {\n\t\t\t\t\t// Skip empty text blocks\n\t\t\t\t\tif (!block.text || block.text.trim() === \"\") continue;\n\t\t\t\t\tconst thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.textSignature);\n\t\t\t\t\tparts.push({\n\t\t\t\t\t\ttext: sanitizeSurrogates(block.text),\n\t\t\t\t\t\t...(thoughtSignature && { thoughtSignature }),\n\t\t\t\t\t});\n\t\t\t\t} else if (block.type === \"thinking\") {\n\t\t\t\t\t// Skip empty thinking blocks\n\t\t\t\t\tif (!block.thinking || block.thinking.trim() === \"\") continue;\n\t\t\t\t\t// Only keep as thinking block if same provider AND same model\n\t\t\t\t\t// Otherwise convert to plain text (no tags to avoid model mimicking them)\n\t\t\t\t\tif (isSameProviderAndModel) {\n\t\t\t\t\t\tconst thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thinkingSignature);\n\t\t\t\t\t\tparts.push({\n\t\t\t\t\t\t\tthought: true,\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(block.thinking),\n\t\t\t\t\t\t\t...(thoughtSignature && { thoughtSignature }),\n\t\t\t\t\t\t});\n\t\t\t\t\t} else {\n\t\t\t\t\t\tparts.push({\n\t\t\t\t\t\t\ttext: sanitizeSurrogates(block.thinking),\n\t\t\t\t\t\t});\n\t\t\t\t\t}\n\t\t\t\t} else if (block.type === \"toolCall\") {\n\t\t\t\t\tconst thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thoughtSignature);\n\t\t\t\t\tconst part: Part = {\n\t\t\t\t\t\tfunctionCall: {\n\t\t\t\t\t\t\tname: block.name,\n\t\t\t\t\t\t\targs: block.arguments ?? {},\n\t\t\t\t\t\t\t...(requiresToolCallId(model.id) ? { id: block.id } : {}),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t...(thoughtSignature && { thoughtSignature }),\n\t\t\t\t\t};\n\t\t\t\t\tparts.push(part);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (parts.length === 0) continue;\n\t\t\tcontents.push({\n\t\t\t\trole: \"model\",\n\t\t\t\tparts,\n\t\t\t});\n\t\t} else if (msg.role === \"toolResult\") {\n\t\t\t// Extract text and image content\n\t\t\tconst textContent = msg.content.filter((c): c is TextContent => c.type === \"text\");\n\t\t\tconst textResult = textContent.map((c) => c.text).join(\"\\n\");\n\t\t\tconst imageContent = model.input.includes(\"image\")\n\t\t\t\t? msg.content.filter((c): c is ImageContent => c.type === \"image\")\n\t\t\t\t: [];\n\n\t\t\tconst hasText = textResult.length > 0;\n\t\t\tconst hasImages = imageContent.length > 0;\n\n\t\t\t// Gemini 3+ models support multimodal function responses with images nested inside\n\t\t\t// functionResponse.parts. Claude and other non-Gemini models behind Cloud Code Assist /\n\t\t\t// Gemini < 3 still needs a separate user image turn.\n\t\t\tconst modelSupportsMultimodalFunctionResponse = supportsMultimodalFunctionResponse(model.id);\n\n\t\t\t// Use \"output\" key for success, \"error\" key for errors as per SDK documentation\n\t\t\tconst responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? \"(see attached image)\" : \"\";\n\n\t\t\tconst imageParts: Part[] = imageContent.map((imageBlock) => ({\n\t\t\t\tinlineData: {\n\t\t\t\t\tmimeType: imageBlock.mimeType,\n\t\t\t\t\tdata: imageBlock.data,\n\t\t\t\t},\n\t\t\t}));\n\n\t\t\tconst includeId = requiresToolCallId(model.id);\n\t\t\tconst functionResponsePart: Part = {\n\t\t\t\tfunctionResponse: {\n\t\t\t\t\tname: msg.toolName,\n\t\t\t\t\tresponse: msg.isError ? { error: responseValue } : { output: responseValue },\n\t\t\t\t\t...(hasImages && modelSupportsMultimodalFunctionResponse && { parts: imageParts }),\n\t\t\t\t\t...(includeId ? { id: msg.toolCallId } : {}),\n\t\t\t\t},\n\t\t\t};\n\n\t\t\t// Cloud Code Assist API requires all function responses to be in a single user turn.\n\t\t\t// Check if the last content is already a user turn with function responses and merge.\n\t\t\tconst lastContent = contents[contents.length - 1];\n\t\t\tif (lastContent?.role === \"user\" && lastContent.parts?.some((p) => p.functionResponse)) {\n\t\t\t\tlastContent.parts.push(functionResponsePart);\n\t\t\t} else {\n\t\t\t\tcontents.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tparts: [functionResponsePart],\n\t\t\t\t});\n\t\t\t}\n\n\t\t\t// For Gemini < 3, add images in a separate user message\n\t\t\tif (hasImages && !modelSupportsMultimodalFunctionResponse) {\n\t\t\t\tcontents.push({\n\t\t\t\t\trole: \"user\",\n\t\t\t\t\tparts: [{ text: \"Tool result image:\" }, ...imageParts],\n\t\t\t\t});\n\t\t\t}\n\t\t}\n\t}\n\n\treturn contents;\n}\n\nconst JSON_SCHEMA_META_DECLARATIONS = new Set([\n\t\"$schema\",\n\t\"$id\",\n\t\"$anchor\",\n\t\"$dynamicAnchor\",\n\t\"$vocabulary\",\n\t\"$comment\",\n\t\"$defs\",\n\t\"definitions\", // pre-draft-2019-09 equivalent of $defs\n]);\n\n/**\n * Strip meta-declarations from a schema obj\n */\nfunction sanitizeForOpenApi(schema: unknown): unknown {\n\tif (typeof schema !== \"object\" || schema === null || Array.isArray(schema)) {\n\t\treturn schema;\n\t}\n\n\tconst result: Record<string, unknown> = {};\n\tfor (const [key, value] of Object.entries(schema)) {\n\t\tif (JSON_SCHEMA_META_DECLARATIONS.has(key)) continue;\n\t\tresult[key] = sanitizeForOpenApi(value);\n\t}\n\treturn result;\n}\n\n/**\n * Convert tools to Gemini function declarations format.\n *\n * By default uses `parametersJsonSchema` which supports full JSON Schema (including\n * anyOf, oneOf, const, etc.). Set `useParameters` to true to use the legacy `parameters`\n * field instead (OpenAPI 3.03 Schema). This is needed for Cloud Code Assist with Claude\n * models, where the API translates `parameters` into Anthropic's `input_schema`.\n */\nexport function convertTools(\n\ttools: Tool[],\n\tuseParameters = false,\n): { functionDeclarations: Record<string, unknown>[] }[] | undefined {\n\tif (tools.length === 0) return undefined;\n\treturn [\n\t\t{\n\t\t\tfunctionDeclarations: tools.map((tool) => ({\n\t\t\t\tname: tool.name,\n\t\t\t\tdescription: tool.description,\n\t\t\t\t...(useParameters\n\t\t\t\t\t? { parameters: sanitizeForOpenApi(tool.parameters as unknown) }\n\t\t\t\t\t: { parametersJsonSchema: tool.parameters }),\n\t\t\t})),\n\t\t},\n\t];\n}\n\n/**\n * Map tool choice string to Gemini FunctionCallingConfigMode.\n */\nexport function mapToolChoice(choice: string): FunctionCallingConfigMode {\n\tswitch (choice) {\n\t\tcase \"auto\":\n\t\t\treturn FunctionCallingConfigMode.AUTO;\n\t\tcase \"none\":\n\t\t\treturn FunctionCallingConfigMode.NONE;\n\t\tcase \"any\":\n\t\t\treturn FunctionCallingConfigMode.ANY;\n\t\tdefault:\n\t\t\treturn FunctionCallingConfigMode.AUTO;\n\t}\n}\n\n/**\n * Map Gemini FinishReason to our StopReason.\n */\nexport function mapStopReason(reason: FinishReason): StopReason {\n\tswitch (reason) {\n\t\tcase FinishReason.STOP:\n\t\t\treturn \"stop\";\n\t\tcase FinishReason.MAX_TOKENS:\n\t\t\treturn \"length\";\n\t\tcase FinishReason.BLOCKLIST:\n\t\tcase FinishReason.PROHIBITED_CONTENT:\n\t\tcase FinishReason.SPII:\n\t\tcase FinishReason.SAFETY:\n\t\tcase FinishReason.IMAGE_SAFETY:\n\t\tcase FinishReason.IMAGE_PROHIBITED_CONTENT:\n\t\tcase FinishReason.IMAGE_RECITATION:\n\t\tcase FinishReason.IMAGE_OTHER:\n\t\tcase FinishReason.RECITATION:\n\t\tcase FinishReason.FINISH_REASON_UNSPECIFIED:\n\t\tcase FinishReason.OTHER:\n\t\tcase FinishReason.LANGUAGE:\n\t\tcase FinishReason.MALFORMED_FUNCTION_CALL:\n\t\tcase FinishReason.UNEXPECTED_TOOL_CALL:\n\t\tcase FinishReason.NO_IMAGE:\n\t\t\treturn \"error\";\n\t\tdefault: {\n\t\t\tconst _exhaustive: never = reason;\n\t\t\tthrow new Error(`Unhandled stop reason: ${_exhaustive}`);\n\t\t}\n\t}\n}\n\n/**\n * Map string finish reason to our StopReason (for raw API responses).\n */\nexport function mapStopReasonString(reason: string): StopReason {\n\tswitch (reason) {\n\t\tcase \"STOP\":\n\t\t\treturn \"stop\";\n\t\tcase \"MAX_TOKENS\":\n\t\t\treturn \"length\";\n\t\tdefault:\n\t\t\treturn \"error\";\n\t}\n}\n"]}
@@ -0,0 +1,15 @@
1
+ import type { SimpleStreamOptions, StreamFunction, StreamOptions } from "../types.js";
2
+ import type { GoogleThinkingLevel } from "./google-shared.js";
3
+ export interface GoogleVertexOptions extends StreamOptions {
4
+ toolChoice?: "auto" | "none" | "any";
5
+ thinking?: {
6
+ enabled: boolean;
7
+ budgetTokens?: number;
8
+ level?: GoogleThinkingLevel;
9
+ };
10
+ project?: string;
11
+ location?: string;
12
+ }
13
+ export declare const streamGoogleVertex: StreamFunction<"google-vertex", GoogleVertexOptions>;
14
+ export declare const streamSimpleGoogleVertex: StreamFunction<"google-vertex", SimpleStreamOptions>;
15
+ //# sourceMappingURL=google-vertex.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"google-vertex.d.ts","sourceRoot":"","sources":["../../src/providers/google-vertex.ts"],"names":[],"mappings":"AAUA,OAAO,KAAK,EAMX,mBAAmB,EACnB,cAAc,EACd,aAAa,EAKb,MAAM,aAAa,CAAC;AAGrB,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,oBAAoB,CAAC;AAW9D,MAAM,WAAW,mBAAoB,SAAQ,aAAa;IACzD,UAAU,CAAC,EAAE,MAAM,GAAG,MAAM,GAAG,KAAK,CAAC;IACrC,QAAQ,CAAC,EAAE;QACV,OAAO,EAAE,OAAO,CAAC;QACjB,YAAY,CAAC,EAAE,MAAM,CAAC;QACtB,KAAK,CAAC,EAAE,mBAAmB,CAAC;KAC5B,CAAC;IACF,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB,QAAQ,CAAC,EAAE,MAAM,CAAC;CAClB;AAgBD,eAAO,MAAM,kBAAkB,EAAE,cAAc,CAAC,eAAe,EAAE,mBAAmB,CAsOnF,CAAC;AAEF,eAAO,MAAM,wBAAwB,EAAE,cAAc,CAAC,eAAe,EAAE,mBAAmB,CAkCzF,CAAC","sourcesContent":["import {\n\ttype GenerateContentConfig,\n\ttype GenerateContentParameters,\n\tGoogleGenAI,\n\ttype HttpOptions,\n\tResourceScope,\n\ttype ThinkingConfig,\n\tThinkingLevel,\n} from \"@google/genai\";\nimport { calculateCost, clampThinkingLevel } from \"../models.js\";\nimport type {\n\tApi,\n\tAssistantMessage,\n\tContext,\n\tModel,\n\tThinkingLevel as PiThinkingLevel,\n\tSimpleStreamOptions,\n\tStreamFunction,\n\tStreamOptions,\n\tTextContent,\n\tThinkingBudgets,\n\tThinkingContent,\n\tToolCall,\n} from \"../types.js\";\nimport { AssistantMessageEventStream } from \"../utils/event-stream.js\";\nimport { sanitizeSurrogates } from \"../utils/sanitize-unicode.js\";\nimport type { GoogleThinkingLevel } from \"./google-shared.js\";\nimport {\n\tconvertMessages,\n\tconvertTools,\n\tisThinkingPart,\n\tmapStopReason,\n\tmapToolChoice,\n\tretainThoughtSignature,\n} from \"./google-shared.js\";\nimport { buildBaseOptions } from \"./simple-options.js\";\n\nexport interface GoogleVertexOptions extends StreamOptions {\n\ttoolChoice?: \"auto\" | \"none\" | \"any\";\n\tthinking?: {\n\t\tenabled: boolean;\n\t\tbudgetTokens?: number; // -1 for dynamic, 0 to disable\n\t\tlevel?: GoogleThinkingLevel;\n\t};\n\tproject?: string;\n\tlocation?: string;\n}\n\nconst API_VERSION = \"v1\";\nconst GCP_VERTEX_CREDENTIALS_MARKER = \"gcp-vertex-credentials\";\n\nconst THINKING_LEVEL_MAP: Record<GoogleThinkingLevel, ThinkingLevel> = {\n\tTHINKING_LEVEL_UNSPECIFIED: ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,\n\tMINIMAL: ThinkingLevel.MINIMAL,\n\tLOW: ThinkingLevel.LOW,\n\tMEDIUM: ThinkingLevel.MEDIUM,\n\tHIGH: ThinkingLevel.HIGH,\n};\n\n// Counter for generating unique tool call IDs\nlet toolCallCounter = 0;\n\nexport const streamGoogleVertex: StreamFunction<\"google-vertex\", GoogleVertexOptions> = (\n\tmodel: Model<\"google-vertex\">,\n\tcontext: Context,\n\toptions?: GoogleVertexOptions,\n): AssistantMessageEventStream => {\n\tconst stream = new AssistantMessageEventStream();\n\n\t(async () => {\n\t\tconst output: AssistantMessage = {\n\t\t\trole: \"assistant\",\n\t\t\tcontent: [],\n\t\t\tapi: \"google-vertex\" as Api,\n\t\t\tprovider: model.provider,\n\t\t\tmodel: model.id,\n\t\t\tusage: {\n\t\t\t\tinput: 0,\n\t\t\t\toutput: 0,\n\t\t\t\tcacheRead: 0,\n\t\t\t\tcacheWrite: 0,\n\t\t\t\ttotalTokens: 0,\n\t\t\t\tcost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 },\n\t\t\t},\n\t\t\tstopReason: \"stop\",\n\t\t\ttimestamp: Date.now(),\n\t\t};\n\n\t\ttry {\n\t\t\tconst apiKey = resolveApiKey(options);\n\t\t\t// Create the client using either a Vertex API key, if provided, or ADC with project and location\n\t\t\tconst client = apiKey\n\t\t\t\t? createClientWithApiKey(model, apiKey, options?.headers)\n\t\t\t\t: createClient(model, resolveProject(options), resolveLocation(options), options?.headers);\n\t\t\tlet params = buildParams(model, context, options);\n\t\t\tconst nextParams = await options?.onPayload?.(params, model);\n\t\t\tif (nextParams !== undefined) {\n\t\t\t\tparams = nextParams as GenerateContentParameters;\n\t\t\t}\n\t\t\tconst googleStream = await client.models.generateContentStream(params);\n\n\t\t\tstream.push({ type: \"start\", partial: output });\n\t\t\tlet currentBlock: TextContent | ThinkingContent | null = null;\n\t\t\tconst blocks = output.content;\n\t\t\tconst blockIndex = () => blocks.length - 1;\n\t\t\tfor await (const chunk of googleStream) {\n\t\t\t\t// Vertex uses the same @google/genai GenerateContentResponse type as Gemini.\n\t\t\t\t// responseId is documented there as an output-only identifier for each response.\n\t\t\t\toutput.responseId ||= chunk.responseId;\n\t\t\t\tconst candidate = chunk.candidates?.[0];\n\t\t\t\tif (candidate?.content?.parts) {\n\t\t\t\t\tfor (const part of candidate.content.parts) {\n\t\t\t\t\t\tif (part.text !== undefined) {\n\t\t\t\t\t\t\tconst isThinking = isThinkingPart(part);\n\t\t\t\t\t\t\tif (\n\t\t\t\t\t\t\t\t!currentBlock ||\n\t\t\t\t\t\t\t\t(isThinking && currentBlock.type !== \"thinking\") ||\n\t\t\t\t\t\t\t\t(!isThinking && currentBlock.type !== \"text\")\n\t\t\t\t\t\t\t) {\n\t\t\t\t\t\t\t\tif (currentBlock) {\n\t\t\t\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\t\t\t\t\tcontentIndex: blocks.length - 1,\n\t\t\t\t\t\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tif (isThinking) {\n\t\t\t\t\t\t\t\t\tcurrentBlock = { type: \"thinking\", thinking: \"\", thinkingSignature: undefined };\n\t\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\t\tstream.push({ type: \"thinking_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tcurrentBlock = { type: \"text\", text: \"\" };\n\t\t\t\t\t\t\t\t\toutput.content.push(currentBlock);\n\t\t\t\t\t\t\t\t\tstream.push({ type: \"text_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif (currentBlock.type === \"thinking\") {\n\t\t\t\t\t\t\t\tcurrentBlock.thinking += part.text;\n\t\t\t\t\t\t\t\tcurrentBlock.thinkingSignature = retainThoughtSignature(\n\t\t\t\t\t\t\t\t\tcurrentBlock.thinkingSignature,\n\t\t\t\t\t\t\t\t\tpart.thoughtSignature,\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"thinking_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta: part.text,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\tcurrentBlock.text += part.text;\n\t\t\t\t\t\t\t\tcurrentBlock.textSignature = retainThoughtSignature(\n\t\t\t\t\t\t\t\t\tcurrentBlock.textSignature,\n\t\t\t\t\t\t\t\t\tpart.thoughtSignature,\n\t\t\t\t\t\t\t\t);\n\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\ttype: \"text_delta\",\n\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\tdelta: part.text,\n\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tif (part.functionCall) {\n\t\t\t\t\t\t\tif (currentBlock) {\n\t\t\t\t\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\tcurrentBlock = null;\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\tconst providedId = part.functionCall.id;\n\t\t\t\t\t\t\tconst needsNewId =\n\t\t\t\t\t\t\t\t!providedId || output.content.some((b) => b.type === \"toolCall\" && b.id === providedId);\n\t\t\t\t\t\t\tconst toolCallId = needsNewId\n\t\t\t\t\t\t\t\t? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}`\n\t\t\t\t\t\t\t\t: providedId;\n\n\t\t\t\t\t\t\tconst toolCall: ToolCall = {\n\t\t\t\t\t\t\t\ttype: \"toolCall\",\n\t\t\t\t\t\t\t\tid: toolCallId,\n\t\t\t\t\t\t\t\tname: part.functionCall.name || \"\",\n\t\t\t\t\t\t\t\targuments: (part.functionCall.args as Record<string, any>) ?? {},\n\t\t\t\t\t\t\t\t...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }),\n\t\t\t\t\t\t\t};\n\n\t\t\t\t\t\t\toutput.content.push(toolCall);\n\t\t\t\t\t\t\tstream.push({ type: \"toolcall_start\", contentIndex: blockIndex(), partial: output });\n\t\t\t\t\t\t\tstream.push({\n\t\t\t\t\t\t\t\ttype: \"toolcall_delta\",\n\t\t\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\t\t\tdelta: JSON.stringify(toolCall.arguments),\n\t\t\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t\t\t});\n\t\t\t\t\t\t\tstream.push({ type: \"toolcall_end\", contentIndex: blockIndex(), toolCall, partial: output });\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (candidate?.finishReason) {\n\t\t\t\t\toutput.stopReason = mapStopReason(candidate.finishReason);\n\t\t\t\t\tif (output.content.some((b) => b.type === \"toolCall\")) {\n\t\t\t\t\t\toutput.stopReason = \"toolUse\";\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif (chunk.usageMetadata) {\n\t\t\t\t\toutput.usage = {\n\t\t\t\t\t\tinput:\n\t\t\t\t\t\t\t(chunk.usageMetadata.promptTokenCount || 0) - (chunk.usageMetadata.cachedContentTokenCount || 0),\n\t\t\t\t\t\toutput:\n\t\t\t\t\t\t\t(chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0),\n\t\t\t\t\t\tcacheRead: chunk.usageMetadata.cachedContentTokenCount || 0,\n\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\ttotalTokens: chunk.usageMetadata.totalTokenCount || 0,\n\t\t\t\t\t\tcost: {\n\t\t\t\t\t\t\tinput: 0,\n\t\t\t\t\t\t\toutput: 0,\n\t\t\t\t\t\t\tcacheRead: 0,\n\t\t\t\t\t\t\tcacheWrite: 0,\n\t\t\t\t\t\t\ttotal: 0,\n\t\t\t\t\t\t},\n\t\t\t\t\t};\n\t\t\t\t\tcalculateCost(model, output.usage);\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (currentBlock) {\n\t\t\t\tif (currentBlock.type === \"text\") {\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"text_end\",\n\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\tcontent: currentBlock.text,\n\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t});\n\t\t\t\t} else {\n\t\t\t\t\tstream.push({\n\t\t\t\t\t\ttype: \"thinking_end\",\n\t\t\t\t\t\tcontentIndex: blockIndex(),\n\t\t\t\t\t\tcontent: currentBlock.thinking,\n\t\t\t\t\t\tpartial: output,\n\t\t\t\t\t});\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif (options?.signal?.aborted) {\n\t\t\t\tthrow new Error(\"Request was aborted\");\n\t\t\t}\n\n\t\t\tif (output.stopReason === \"aborted\" || output.stopReason === \"error\") {\n\t\t\t\tthrow new Error(\"An unknown error occurred\");\n\t\t\t}\n\n\t\t\tstream.push({ type: \"done\", reason: output.stopReason, message: output });\n\t\t\tstream.end();\n\t\t} catch (error) {\n\t\t\t// Remove internal index property used during streaming\n\t\t\tfor (const block of output.content) {\n\t\t\t\tif (\"index\" in block) {\n\t\t\t\t\tdelete (block as { index?: number }).index;\n\t\t\t\t}\n\t\t\t}\n\t\t\toutput.stopReason = options?.signal?.aborted ? \"aborted\" : \"error\";\n\t\t\toutput.errorMessage = error instanceof Error ? error.message : JSON.stringify(error);\n\t\t\tstream.push({ type: \"error\", reason: output.stopReason, error: output });\n\t\t\tstream.end();\n\t\t}\n\t})();\n\n\treturn stream;\n};\n\nexport const streamSimpleGoogleVertex: StreamFunction<\"google-vertex\", SimpleStreamOptions> = (\n\tmodel: Model<\"google-vertex\">,\n\tcontext: Context,\n\toptions?: SimpleStreamOptions,\n): AssistantMessageEventStream => {\n\tconst base = buildBaseOptions(model, options, undefined);\n\tif (!options?.reasoning) {\n\t\treturn streamGoogleVertex(model, context, {\n\t\t\t...base,\n\t\t\tthinking: { enabled: false },\n\t\t} satisfies GoogleVertexOptions);\n\t}\n\n\tconst clampedReasoning = clampThinkingLevel(model, options.reasoning);\n\tconst effort = (clampedReasoning === \"off\" ? \"high\" : clampedReasoning) as ClampedThinkingLevel;\n\tconst geminiModel = model as unknown as Model<\"google-generative-ai\">;\n\n\tif (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) {\n\t\treturn streamGoogleVertex(model, context, {\n\t\t\t...base,\n\t\t\tthinking: {\n\t\t\t\tenabled: true,\n\t\t\t\tlevel: getGemini3ThinkingLevel(effort, geminiModel),\n\t\t\t},\n\t\t} satisfies GoogleVertexOptions);\n\t}\n\n\treturn streamGoogleVertex(model, context, {\n\t\t...base,\n\t\tthinking: {\n\t\t\tenabled: true,\n\t\t\tbudgetTokens: getGoogleBudget(geminiModel, effort, options.thinkingBudgets),\n\t\t},\n\t} satisfies GoogleVertexOptions);\n};\n\nfunction createClient(\n\tmodel: Model<\"google-vertex\">,\n\tproject: string,\n\tlocation: string,\n\toptionsHeaders?: Record<string, string>,\n): GoogleGenAI {\n\treturn new GoogleGenAI({\n\t\tvertexai: true,\n\t\tproject,\n\t\tlocation,\n\t\tapiVersion: API_VERSION,\n\t\thttpOptions: buildHttpOptions(model, optionsHeaders),\n\t});\n}\n\nfunction createClientWithApiKey(\n\tmodel: Model<\"google-vertex\">,\n\tapiKey: string,\n\toptionsHeaders?: Record<string, string>,\n): GoogleGenAI {\n\treturn new GoogleGenAI({\n\t\tvertexai: true,\n\t\tapiKey,\n\t\tapiVersion: API_VERSION,\n\t\thttpOptions: buildHttpOptions(model, optionsHeaders),\n\t});\n}\n\nfunction buildHttpOptions(\n\tmodel: Model<\"google-vertex\">,\n\toptionsHeaders?: Record<string, string>,\n): HttpOptions | undefined {\n\tconst httpOptions: HttpOptions = {};\n\tconst baseUrl = resolveCustomBaseUrl(model.baseUrl);\n\tif (baseUrl) {\n\t\thttpOptions.baseUrl = baseUrl;\n\t\thttpOptions.baseUrlResourceScope = ResourceScope.COLLECTION;\n\t\tif (baseUrlIncludesApiVersion(baseUrl)) {\n\t\t\thttpOptions.apiVersion = \"\";\n\t\t}\n\t}\n\n\tif (model.headers || optionsHeaders) {\n\t\thttpOptions.headers = { ...model.headers, ...optionsHeaders };\n\t}\n\n\treturn Object.keys(httpOptions).length > 0 ? httpOptions : undefined;\n}\n\nfunction resolveCustomBaseUrl(baseUrl: string): string | undefined {\n\tconst trimmed = baseUrl.trim();\n\tif (!trimmed || trimmed.includes(\"{location}\")) {\n\t\treturn undefined;\n\t}\n\treturn trimmed;\n}\n\nfunction baseUrlIncludesApiVersion(baseUrl: string): boolean {\n\ttry {\n\t\tconst url = new URL(baseUrl);\n\t\treturn url.pathname.split(\"/\").some((part) => /^v\\d+(?:beta\\d*)?$/.test(part));\n\t} catch {\n\t\treturn /(?:^|\\/)v\\d+(?:beta\\d*)?(?:\\/|$)/.test(baseUrl);\n\t}\n}\n\nfunction resolveApiKey(options?: GoogleVertexOptions): string | undefined {\n\tconst apiKey = options?.apiKey?.trim() || process.env.GOOGLE_CLOUD_API_KEY?.trim();\n\tif (!apiKey || apiKey === GCP_VERTEX_CREDENTIALS_MARKER || isPlaceholderApiKey(apiKey)) {\n\t\treturn undefined;\n\t}\n\treturn apiKey;\n}\n\nfunction isPlaceholderApiKey(apiKey: string): boolean {\n\treturn /^<[^>]+>$/.test(apiKey);\n}\n\nfunction resolveProject(options?: GoogleVertexOptions): string {\n\tconst project = options?.project || process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT;\n\tif (!project) {\n\t\tthrow new Error(\n\t\t\t\"Vertex AI requires a project ID. Set GOOGLE_CLOUD_PROJECT/GCLOUD_PROJECT or pass project in options.\",\n\t\t);\n\t}\n\treturn project;\n}\n\nfunction resolveLocation(options?: GoogleVertexOptions): string {\n\tconst location = options?.location || process.env.GOOGLE_CLOUD_LOCATION;\n\tif (!location) {\n\t\tthrow new Error(\"Vertex AI requires a location. Set GOOGLE_CLOUD_LOCATION or pass location in options.\");\n\t}\n\treturn location;\n}\n\nfunction buildParams(\n\tmodel: Model<\"google-vertex\">,\n\tcontext: Context,\n\toptions: GoogleVertexOptions = {},\n): GenerateContentParameters {\n\tconst contents = convertMessages(model, context);\n\n\tconst generationConfig: GenerateContentConfig = {};\n\tif (options.temperature !== undefined) {\n\t\tgenerationConfig.temperature = options.temperature;\n\t}\n\tif (options.maxTokens !== undefined) {\n\t\tgenerationConfig.maxOutputTokens = options.maxTokens;\n\t}\n\n\tconst config: GenerateContentConfig = {\n\t\t...(Object.keys(generationConfig).length > 0 && generationConfig),\n\t\t...(context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) }),\n\t\t...(context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }),\n\t};\n\n\tif (context.tools && context.tools.length > 0 && options.toolChoice) {\n\t\tconfig.toolConfig = {\n\t\t\tfunctionCallingConfig: {\n\t\t\t\tmode: mapToolChoice(options.toolChoice),\n\t\t\t},\n\t\t};\n\t} else {\n\t\tconfig.toolConfig = undefined;\n\t}\n\n\tif (options.thinking?.enabled && model.reasoning) {\n\t\tconst thinkingConfig: ThinkingConfig = { includeThoughts: true };\n\t\tif (options.thinking.level !== undefined) {\n\t\t\tthinkingConfig.thinkingLevel = THINKING_LEVEL_MAP[options.thinking.level];\n\t\t} else if (options.thinking.budgetTokens !== undefined) {\n\t\t\tthinkingConfig.thinkingBudget = options.thinking.budgetTokens;\n\t\t}\n\t\tconfig.thinkingConfig = thinkingConfig;\n\t} else if (model.reasoning && options.thinking && !options.thinking.enabled) {\n\t\tconfig.thinkingConfig = getDisabledThinkingConfig(model);\n\t}\n\n\tif (options.signal) {\n\t\tif (options.signal.aborted) {\n\t\t\tthrow new Error(\"Request aborted\");\n\t\t}\n\t\tconfig.abortSignal = options.signal;\n\t}\n\n\tconst params: GenerateContentParameters = {\n\t\tmodel: model.id,\n\t\tcontents,\n\t\tconfig,\n\t};\n\n\treturn params;\n}\n\ntype ClampedThinkingLevel = Exclude<PiThinkingLevel, \"xhigh\">;\n\nfunction isGemini3ProModel(model: Model<\"google-generative-ai\">): boolean {\n\treturn /gemini-3(?:\\.\\d+)?-pro/.test(model.id.toLowerCase());\n}\n\nfunction isGemini3FlashModel(model: Model<\"google-generative-ai\">): boolean {\n\treturn /gemini-3(?:\\.\\d+)?-flash/.test(model.id.toLowerCase());\n}\n\nfunction getDisabledThinkingConfig(model: Model<\"google-vertex\">): ThinkingConfig {\n\t// Google docs: Gemini 3.1 Pro cannot disable thinking, and Gemini 3 Flash / Flash-Lite\n\t// do not support full thinking-off either. For Gemini 3 models, use the lowest supported\n\t// thinkingLevel without includeThoughts so hidden thinking remains invisible to Threadwell.\n\tconst geminiModel = model as unknown as Model<\"google-generative-ai\">;\n\tif (isGemini3ProModel(geminiModel)) {\n\t\treturn { thinkingLevel: ThinkingLevel.LOW };\n\t}\n\tif (isGemini3FlashModel(geminiModel)) {\n\t\treturn { thinkingLevel: ThinkingLevel.MINIMAL };\n\t}\n\n\t// Gemini 2.x supports disabling via thinkingBudget = 0.\n\treturn { thinkingBudget: 0 };\n}\n\nfunction getGemini3ThinkingLevel(\n\teffort: ClampedThinkingLevel,\n\tmodel: Model<\"google-generative-ai\">,\n): GoogleThinkingLevel {\n\tif (isGemini3ProModel(model)) {\n\t\tswitch (effort) {\n\t\t\tcase \"minimal\":\n\t\t\tcase \"low\":\n\t\t\t\treturn \"LOW\";\n\t\t\tcase \"medium\":\n\t\t\tcase \"high\":\n\t\t\t\treturn \"HIGH\";\n\t\t}\n\t}\n\tswitch (effort) {\n\t\tcase \"minimal\":\n\t\t\treturn \"MINIMAL\";\n\t\tcase \"low\":\n\t\t\treturn \"LOW\";\n\t\tcase \"medium\":\n\t\t\treturn \"MEDIUM\";\n\t\tcase \"high\":\n\t\t\treturn \"HIGH\";\n\t}\n}\n\nfunction getGoogleBudget(\n\tmodel: Model<\"google-generative-ai\">,\n\teffort: ClampedThinkingLevel,\n\tcustomBudgets?: ThinkingBudgets,\n): number {\n\tif (customBudgets?.[effort] !== undefined) {\n\t\treturn customBudgets[effort]!;\n\t}\n\n\tif (model.id.includes(\"2.5-pro\")) {\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 32768,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\tif (model.id.includes(\"2.5-flash\")) {\n\t\tconst budgets: Record<ClampedThinkingLevel, number> = {\n\t\t\tminimal: 128,\n\t\t\tlow: 2048,\n\t\t\tmedium: 8192,\n\t\t\thigh: 24576,\n\t\t};\n\t\treturn budgets[effort];\n\t}\n\n\treturn -1;\n}\n"]}