@ai-sdk/google 4.0.0-beta.9 → 4.0.0-canary.51

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +354 -4
  2. package/README.md +6 -4
  3. package/dist/index.d.ts +97 -54
  4. package/dist/index.js +1643 -575
  5. package/dist/index.js.map +1 -1
  6. package/dist/internal/index.d.ts +62 -22
  7. package/dist/internal/index.js +1261 -449
  8. package/dist/internal/index.js.map +1 -1
  9. package/docs/{15-google-generative-ai.mdx → 15-google.mdx} +46 -40
  10. package/package.json +13 -14
  11. package/src/{convert-google-generative-ai-usage.ts → convert-google-usage.ts} +11 -4
  12. package/src/convert-json-schema-to-openapi-schema.ts +1 -1
  13. package/src/convert-to-google-messages.ts +585 -0
  14. package/src/{google-generative-ai-embedding-options.ts → google-embedding-model-options.ts} +2 -2
  15. package/src/{google-generative-ai-embedding-model.ts → google-embedding-model.ts} +28 -15
  16. package/src/google-error.ts +1 -1
  17. package/src/google-files.ts +225 -0
  18. package/src/google-image-model-options.ts +23 -0
  19. package/src/{google-generative-ai-image-model.ts → google-image-model.ts} +61 -49
  20. package/src/{google-generative-ai-image-settings.ts → google-image-settings.ts} +2 -2
  21. package/src/google-json-accumulator.ts +336 -0
  22. package/src/{google-generative-ai-options.ts → google-language-model-options.ts} +32 -5
  23. package/src/{google-generative-ai-language-model.ts → google-language-model.ts} +586 -191
  24. package/src/google-prepare-tools.ts +68 -8
  25. package/src/google-prompt.ts +82 -0
  26. package/src/google-provider.ts +56 -47
  27. package/src/google-video-model-options.ts +43 -0
  28. package/src/{google-generative-ai-video-model.ts → google-video-model.ts} +11 -50
  29. package/src/{google-generative-ai-video-settings.ts → google-video-settings.ts} +2 -1
  30. package/src/index.ts +28 -9
  31. package/src/internal/index.ts +2 -2
  32. package/src/{map-google-generative-ai-finish-reason.ts → map-google-finish-reason.ts} +2 -2
  33. package/src/tool/code-execution.ts +2 -2
  34. package/src/tool/enterprise-web-search.ts +9 -3
  35. package/src/tool/file-search.ts +5 -7
  36. package/src/tool/google-maps.ts +3 -2
  37. package/src/tool/google-search.ts +10 -11
  38. package/src/tool/url-context.ts +4 -2
  39. package/src/tool/vertex-rag-store.ts +9 -6
  40. package/dist/index.d.mts +0 -384
  41. package/dist/index.mjs +0 -2519
  42. package/dist/index.mjs.map +0 -1
  43. package/dist/internal/index.d.mts +0 -287
  44. package/dist/internal/index.mjs +0 -1708
  45. package/dist/internal/index.mjs.map +0 -1
  46. package/src/convert-to-google-generative-ai-messages.ts +0 -239
  47. package/src/google-generative-ai-prompt.ts +0 -47
package/dist/index.js CHANGED
@@ -1,81 +1,78 @@
1
- "use strict";
2
- var __defProp = Object.defineProperty;
3
- var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
- var __getOwnPropNames = Object.getOwnPropertyNames;
5
- var __hasOwnProp = Object.prototype.hasOwnProperty;
6
- var __export = (target, all) => {
7
- for (var name in all)
8
- __defProp(target, name, { get: all[name], enumerable: true });
9
- };
10
- var __copyProps = (to, from, except, desc) => {
11
- if (from && typeof from === "object" || typeof from === "function") {
12
- for (let key of __getOwnPropNames(from))
13
- if (!__hasOwnProp.call(to, key) && key !== except)
14
- __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
- }
16
- return to;
17
- };
18
- var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
-
20
- // src/index.ts
21
- var src_exports = {};
22
- __export(src_exports, {
23
- VERSION: () => VERSION,
24
- createGoogleGenerativeAI: () => createGoogleGenerativeAI,
25
- google: () => google
26
- });
27
- module.exports = __toCommonJS(src_exports);
28
-
29
1
  // src/google-provider.ts
30
- var import_provider_utils16 = require("@ai-sdk/provider-utils");
2
+ import {
3
+ generateId as generateId2,
4
+ loadApiKey,
5
+ withoutTrailingSlash,
6
+ withUserAgentSuffix
7
+ } from "@ai-sdk/provider-utils";
31
8
 
32
9
  // src/version.ts
33
- var VERSION = true ? "4.0.0-beta.9" : "0.0.0-test";
10
+ var VERSION = true ? "4.0.0-canary.51" : "0.0.0-test";
34
11
 
35
- // src/google-generative-ai-embedding-model.ts
36
- var import_provider = require("@ai-sdk/provider");
37
- var import_provider_utils3 = require("@ai-sdk/provider-utils");
38
- var import_v43 = require("zod/v4");
12
+ // src/google-embedding-model.ts
13
+ import {
14
+ TooManyEmbeddingValuesForCallError
15
+ } from "@ai-sdk/provider";
16
+ import {
17
+ combineHeaders,
18
+ createJsonResponseHandler,
19
+ lazySchema as lazySchema3,
20
+ parseProviderOptions,
21
+ postJsonToApi,
22
+ resolve,
23
+ serializeModelOptions,
24
+ WORKFLOW_SERIALIZE,
25
+ WORKFLOW_DESERIALIZE,
26
+ zodSchema as zodSchema3
27
+ } from "@ai-sdk/provider-utils";
28
+ import { z as z3 } from "zod/v4";
39
29
 
40
30
  // src/google-error.ts
41
- var import_provider_utils = require("@ai-sdk/provider-utils");
42
- var import_v4 = require("zod/v4");
43
- var googleErrorDataSchema = (0, import_provider_utils.lazySchema)(
44
- () => (0, import_provider_utils.zodSchema)(
45
- import_v4.z.object({
46
- error: import_v4.z.object({
47
- code: import_v4.z.number().nullable(),
48
- message: import_v4.z.string(),
49
- status: import_v4.z.string()
31
+ import {
32
+ createJsonErrorResponseHandler,
33
+ lazySchema,
34
+ zodSchema
35
+ } from "@ai-sdk/provider-utils";
36
+ import { z } from "zod/v4";
37
+ var googleErrorDataSchema = lazySchema(
38
+ () => zodSchema(
39
+ z.object({
40
+ error: z.object({
41
+ code: z.number().nullable(),
42
+ message: z.string(),
43
+ status: z.string()
50
44
  })
51
45
  })
52
46
  )
53
47
  );
54
- var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
48
+ var googleFailedResponseHandler = createJsonErrorResponseHandler({
55
49
  errorSchema: googleErrorDataSchema,
56
50
  errorToMessage: (data) => data.error.message
57
51
  });
58
52
 
59
- // src/google-generative-ai-embedding-options.ts
60
- var import_provider_utils2 = require("@ai-sdk/provider-utils");
61
- var import_v42 = require("zod/v4");
62
- var googleEmbeddingContentPartSchema = import_v42.z.union([
63
- import_v42.z.object({ text: import_v42.z.string() }),
64
- import_v42.z.object({
65
- inlineData: import_v42.z.object({
66
- mimeType: import_v42.z.string(),
67
- data: import_v42.z.string()
53
+ // src/google-embedding-model-options.ts
54
+ import {
55
+ lazySchema as lazySchema2,
56
+ zodSchema as zodSchema2
57
+ } from "@ai-sdk/provider-utils";
58
+ import { z as z2 } from "zod/v4";
59
+ var googleEmbeddingContentPartSchema = z2.union([
60
+ z2.object({ text: z2.string() }),
61
+ z2.object({
62
+ inlineData: z2.object({
63
+ mimeType: z2.string(),
64
+ data: z2.string()
68
65
  })
69
66
  })
70
67
  ]);
71
- var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
72
- () => (0, import_provider_utils2.zodSchema)(
73
- import_v42.z.object({
68
+ var googleEmbeddingModelOptions = lazySchema2(
69
+ () => zodSchema2(
70
+ z2.object({
74
71
  /**
75
72
  * Optional. Optional reduced dimension for the output embedding.
76
73
  * If set, excessive values in the output embedding are truncated from the end.
77
74
  */
78
- outputDimensionality: import_v42.z.number().optional(),
75
+ outputDimensionality: z2.number().optional(),
79
76
  /**
80
77
  * Optional. Specifies the task type for generating embeddings.
81
78
  * Supported task types:
@@ -88,7 +85,7 @@ var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
88
85
  * - FACT_VERIFICATION: Optimized for verifying factual information.
89
86
  * - CODE_RETRIEVAL_QUERY: Optimized for retrieving code blocks based on natural language queries.
90
87
  */
91
- taskType: import_v42.z.enum([
88
+ taskType: z2.enum([
92
89
  "SEMANTIC_SIMILARITY",
93
90
  "CLASSIFICATION",
94
91
  "CLUSTERING",
@@ -107,13 +104,13 @@ var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
107
104
  * The array length must match the number of values being embedded. In
108
105
  * the case of a single embedding, the array length must be 1.
109
106
  */
110
- content: import_v42.z.array(import_v42.z.array(googleEmbeddingContentPartSchema).min(1).nullable()).optional()
107
+ content: z2.array(z2.array(googleEmbeddingContentPartSchema).min(1).nullable()).optional()
111
108
  })
112
109
  )
113
110
  );
114
111
 
115
- // src/google-generative-ai-embedding-model.ts
116
- var GoogleGenerativeAIEmbeddingModel = class {
112
+ // src/google-embedding-model.ts
113
+ var GoogleEmbeddingModel = class _GoogleEmbeddingModel {
117
114
  constructor(modelId, config) {
118
115
  this.specificationVersion = "v4";
119
116
  this.maxEmbeddingsPerCall = 2048;
@@ -121,6 +118,15 @@ var GoogleGenerativeAIEmbeddingModel = class {
121
118
  this.modelId = modelId;
122
119
  this.config = config;
123
120
  }
121
+ static [WORKFLOW_SERIALIZE](model) {
122
+ return serializeModelOptions({
123
+ modelId: model.modelId,
124
+ config: model.config
125
+ });
126
+ }
127
+ static [WORKFLOW_DESERIALIZE](options) {
128
+ return new _GoogleEmbeddingModel(options.modelId, options.config);
129
+ }
124
130
  get provider() {
125
131
  return this.config.provider;
126
132
  }
@@ -130,21 +136,21 @@ var GoogleGenerativeAIEmbeddingModel = class {
130
136
  abortSignal,
131
137
  providerOptions
132
138
  }) {
133
- const googleOptions = await (0, import_provider_utils3.parseProviderOptions)({
139
+ const googleOptions = await parseProviderOptions({
134
140
  provider: "google",
135
141
  providerOptions,
136
142
  schema: googleEmbeddingModelOptions
137
143
  });
138
144
  if (values.length > this.maxEmbeddingsPerCall) {
139
- throw new import_provider.TooManyEmbeddingValuesForCallError({
145
+ throw new TooManyEmbeddingValuesForCallError({
140
146
  provider: this.provider,
141
147
  modelId: this.modelId,
142
148
  maxEmbeddingsPerCall: this.maxEmbeddingsPerCall,
143
149
  values
144
150
  });
145
151
  }
146
- const mergedHeaders = (0, import_provider_utils3.combineHeaders)(
147
- await (0, import_provider_utils3.resolve)(this.config.headers),
152
+ const mergedHeaders = combineHeaders(
153
+ this.config.headers ? await resolve(this.config.headers) : void 0,
148
154
  headers
149
155
  );
150
156
  const multimodalContent = googleOptions == null ? void 0 : googleOptions.content;
@@ -161,7 +167,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
161
167
  responseHeaders: responseHeaders2,
162
168
  value: response2,
163
169
  rawValue: rawValue2
164
- } = await (0, import_provider_utils3.postJsonToApi)({
170
+ } = await postJsonToApi({
165
171
  url: `${this.config.baseURL}/models/${this.modelId}:embedContent`,
166
172
  headers: mergedHeaders,
167
173
  body: {
@@ -173,7 +179,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
173
179
  taskType: googleOptions == null ? void 0 : googleOptions.taskType
174
180
  },
175
181
  failedResponseHandler: googleFailedResponseHandler,
176
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
182
+ successfulResponseHandler: createJsonResponseHandler(
177
183
  googleGenerativeAISingleEmbeddingResponseSchema
178
184
  ),
179
185
  abortSignal,
@@ -190,7 +196,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
190
196
  responseHeaders,
191
197
  value: response,
192
198
  rawValue
193
- } = await (0, import_provider_utils3.postJsonToApi)({
199
+ } = await postJsonToApi({
194
200
  url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
195
201
  headers: mergedHeaders,
196
202
  body: {
@@ -209,7 +215,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
209
215
  })
210
216
  },
211
217
  failedResponseHandler: googleFailedResponseHandler,
212
- successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
218
+ successfulResponseHandler: createJsonResponseHandler(
213
219
  googleGenerativeAITextEmbeddingResponseSchema
214
220
  ),
215
221
  abortSignal,
@@ -223,27 +229,43 @@ var GoogleGenerativeAIEmbeddingModel = class {
223
229
  };
224
230
  }
225
231
  };
226
- var googleGenerativeAITextEmbeddingResponseSchema = (0, import_provider_utils3.lazySchema)(
227
- () => (0, import_provider_utils3.zodSchema)(
228
- import_v43.z.object({
229
- embeddings: import_v43.z.array(import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) }))
232
+ var googleGenerativeAITextEmbeddingResponseSchema = lazySchema3(
233
+ () => zodSchema3(
234
+ z3.object({
235
+ embeddings: z3.array(z3.object({ values: z3.array(z3.number()) }))
230
236
  })
231
237
  )
232
238
  );
233
- var googleGenerativeAISingleEmbeddingResponseSchema = (0, import_provider_utils3.lazySchema)(
234
- () => (0, import_provider_utils3.zodSchema)(
235
- import_v43.z.object({
236
- embedding: import_v43.z.object({ values: import_v43.z.array(import_v43.z.number()) })
239
+ var googleGenerativeAISingleEmbeddingResponseSchema = lazySchema3(
240
+ () => zodSchema3(
241
+ z3.object({
242
+ embedding: z3.object({ values: z3.array(z3.number()) })
237
243
  })
238
244
  )
239
245
  );
240
246
 
241
- // src/google-generative-ai-language-model.ts
242
- var import_provider_utils6 = require("@ai-sdk/provider-utils");
243
- var import_v45 = require("zod/v4");
247
+ // src/google-language-model.ts
248
+ import {
249
+ combineHeaders as combineHeaders2,
250
+ createEventSourceResponseHandler,
251
+ createJsonResponseHandler as createJsonResponseHandler2,
252
+ generateId,
253
+ isCustomReasoning,
254
+ lazySchema as lazySchema5,
255
+ mapReasoningToProviderBudget,
256
+ mapReasoningToProviderEffort,
257
+ parseProviderOptions as parseProviderOptions2,
258
+ postJsonToApi as postJsonToApi2,
259
+ resolve as resolve2,
260
+ serializeModelOptions as serializeModelOptions2,
261
+ WORKFLOW_SERIALIZE as WORKFLOW_SERIALIZE2,
262
+ WORKFLOW_DESERIALIZE as WORKFLOW_DESERIALIZE2,
263
+ zodSchema as zodSchema5
264
+ } from "@ai-sdk/provider-utils";
265
+ import { z as z5 } from "zod/v4";
244
266
 
245
- // src/convert-google-generative-ai-usage.ts
246
- function convertGoogleGenerativeAIUsage(usage) {
267
+ // src/convert-google-usage.ts
268
+ function convertGoogleUsage(usage) {
247
269
  var _a, _b, _c, _d;
248
270
  if (usage == null) {
249
271
  return {
@@ -397,21 +419,151 @@ function isEmptyObjectSchema(jsonSchema) {
397
419
  return jsonSchema != null && typeof jsonSchema === "object" && jsonSchema.type === "object" && (jsonSchema.properties == null || Object.keys(jsonSchema.properties).length === 0) && !jsonSchema.additionalProperties;
398
420
  }
399
421
 
400
- // src/convert-to-google-generative-ai-messages.ts
401
- var import_provider2 = require("@ai-sdk/provider");
402
- var import_provider_utils4 = require("@ai-sdk/provider-utils");
403
- function convertToGoogleGenerativeAIMessages(prompt, options) {
404
- var _a, _b, _c;
422
+ // src/convert-to-google-messages.ts
423
+ import {
424
+ UnsupportedFunctionalityError
425
+ } from "@ai-sdk/provider";
426
+ import {
427
+ convertToBase64,
428
+ getTopLevelMediaType,
429
+ isFullMediaType,
430
+ resolveFullMediaType,
431
+ resolveProviderReference
432
+ } from "@ai-sdk/provider-utils";
433
+ var dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
434
+ function parseBase64DataUrl(value) {
435
+ const match = dataUrlRegex.exec(value);
436
+ if (match == null) {
437
+ return void 0;
438
+ }
439
+ return {
440
+ mediaType: match[1],
441
+ data: match[2]
442
+ };
443
+ }
444
+ function convertUrlToolResultPart(url) {
445
+ const parsedDataUrl = parseBase64DataUrl(url);
446
+ if (parsedDataUrl == null) {
447
+ return void 0;
448
+ }
449
+ return {
450
+ inlineData: {
451
+ mimeType: parsedDataUrl.mediaType,
452
+ data: parsedDataUrl.data
453
+ }
454
+ };
455
+ }
456
+ function appendToolResultParts(parts, toolName, outputValue) {
457
+ const functionResponseParts = [];
458
+ const responseTextParts = [];
459
+ for (const contentPart of outputValue) {
460
+ switch (contentPart.type) {
461
+ case "text": {
462
+ responseTextParts.push(contentPart.text);
463
+ break;
464
+ }
465
+ case "file": {
466
+ if (contentPart.data.type === "data") {
467
+ functionResponseParts.push({
468
+ inlineData: {
469
+ mimeType: resolveFullMediaType({ part: contentPart }),
470
+ data: convertToBase64(contentPart.data.data)
471
+ }
472
+ });
473
+ } else if (contentPart.data.type === "url") {
474
+ const functionResponsePart = convertUrlToolResultPart(
475
+ contentPart.data.url.toString()
476
+ );
477
+ if (functionResponsePart != null) {
478
+ functionResponseParts.push(functionResponsePart);
479
+ } else {
480
+ responseTextParts.push(JSON.stringify(contentPart));
481
+ }
482
+ } else {
483
+ responseTextParts.push(JSON.stringify(contentPart));
484
+ }
485
+ break;
486
+ }
487
+ default: {
488
+ responseTextParts.push(JSON.stringify(contentPart));
489
+ break;
490
+ }
491
+ }
492
+ }
493
+ parts.push({
494
+ functionResponse: {
495
+ name: toolName,
496
+ response: {
497
+ name: toolName,
498
+ content: responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully."
499
+ },
500
+ ...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
501
+ }
502
+ });
503
+ }
504
+ function appendLegacyToolResultParts(parts, toolName, outputValue) {
505
+ for (const contentPart of outputValue) {
506
+ switch (contentPart.type) {
507
+ case "text":
508
+ parts.push({
509
+ functionResponse: {
510
+ name: toolName,
511
+ response: {
512
+ name: toolName,
513
+ content: contentPart.text
514
+ }
515
+ }
516
+ });
517
+ break;
518
+ case "file": {
519
+ if (contentPart.data.type === "data" && getTopLevelMediaType(contentPart.mediaType) === "image") {
520
+ parts.push(
521
+ {
522
+ inlineData: {
523
+ mimeType: resolveFullMediaType({ part: contentPart }),
524
+ data: convertToBase64(contentPart.data.data)
525
+ }
526
+ },
527
+ {
528
+ text: "Tool executed successfully and returned this image as a response"
529
+ }
530
+ );
531
+ } else {
532
+ parts.push({ text: JSON.stringify(contentPart) });
533
+ }
534
+ break;
535
+ }
536
+ default:
537
+ parts.push({ text: JSON.stringify(contentPart) });
538
+ break;
539
+ }
540
+ }
541
+ }
542
+ function convertToGoogleMessages(prompt, options) {
543
+ var _a, _b, _c, _d;
405
544
  const systemInstructionParts = [];
406
545
  const contents = [];
407
546
  let systemMessagesAllowed = true;
408
547
  const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
409
- const providerOptionsName = (_b = options == null ? void 0 : options.providerOptionsName) != null ? _b : "google";
548
+ const providerOptionsNames = (_b = options == null ? void 0 : options.providerOptionsNames) != null ? _b : ["google"];
549
+ const isVertexLike = !providerOptionsNames.includes("google");
550
+ const supportsFunctionResponseParts = (_c = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _c : true;
551
+ const readProviderOpts = (part) => {
552
+ var _a2, _b2, _c2, _d2, _e;
553
+ for (const name of providerOptionsNames) {
554
+ const v = (_a2 = part.providerOptions) == null ? void 0 : _a2[name];
555
+ if (v != null) return v;
556
+ }
557
+ if (isVertexLike) {
558
+ return (_b2 = part.providerOptions) == null ? void 0 : _b2.google;
559
+ }
560
+ return (_e = (_c2 = part.providerOptions) == null ? void 0 : _c2.googleVertex) != null ? _e : (_d2 = part.providerOptions) == null ? void 0 : _d2.vertex;
561
+ };
410
562
  for (const { role, content } of prompt) {
411
563
  switch (role) {
412
564
  case "system": {
413
565
  if (!systemMessagesAllowed) {
414
- throw new import_provider2.UnsupportedFunctionalityError({
566
+ throw new UnsupportedFunctionalityError({
415
567
  functionality: "system messages are only supported at the beginning of the conversation"
416
568
  });
417
569
  }
@@ -428,20 +580,54 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
428
580
  break;
429
581
  }
430
582
  case "file": {
431
- const mediaType = part.mediaType === "image/*" ? "image/jpeg" : part.mediaType;
432
- parts.push(
433
- part.data instanceof URL ? {
434
- fileData: {
435
- mimeType: mediaType,
436
- fileUri: part.data.toString()
437
- }
438
- } : {
439
- inlineData: {
440
- mimeType: mediaType,
441
- data: (0, import_provider_utils4.convertToBase64)(part.data)
583
+ switch (part.data.type) {
584
+ case "url": {
585
+ parts.push({
586
+ fileData: {
587
+ mimeType: resolveFullMediaType({ part }),
588
+ fileUri: part.data.url.toString()
589
+ }
590
+ });
591
+ break;
592
+ }
593
+ case "reference": {
594
+ if (isVertexLike) {
595
+ throw new UnsupportedFunctionalityError({
596
+ functionality: "file parts with provider references"
597
+ });
442
598
  }
599
+ parts.push({
600
+ fileData: {
601
+ mimeType: resolveFullMediaType({ part }),
602
+ fileUri: resolveProviderReference({
603
+ reference: part.data.reference,
604
+ provider: "google"
605
+ })
606
+ }
607
+ });
608
+ break;
609
+ }
610
+ case "text": {
611
+ parts.push({
612
+ inlineData: {
613
+ mimeType: isFullMediaType(part.mediaType) ? part.mediaType : "text/plain",
614
+ data: convertToBase64(
615
+ new TextEncoder().encode(part.data.text)
616
+ )
617
+ }
618
+ });
619
+ break;
443
620
  }
444
- );
621
+ case "data": {
622
+ parts.push({
623
+ inlineData: {
624
+ mimeType: resolveFullMediaType({ part }),
625
+ data: convertToBase64(part.data.data)
626
+ }
627
+ });
628
+ break;
629
+ }
630
+ }
445
631
  break;
446
632
  }
447
633
  }
@@ -454,8 +640,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
454
640
  contents.push({
455
641
  role: "model",
456
642
  parts: content.map((part) => {
457
- var _a2, _b2, _c2, _d;
458
- const providerOpts = (_d = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
643
+ const providerOpts = readProviderOpts(part);
459
644
  const thoughtSignature = (providerOpts == null ? void 0 : providerOpts.thoughtSignature) != null ? String(providerOpts.thoughtSignature) : void 0;
460
645
  switch (part.type) {
461
646
  case "text": {
@@ -471,22 +656,89 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
471
656
  thoughtSignature
472
657
  };
473
658
  }
659
+ case "reasoning-file": {
660
+ switch (part.data.type) {
661
+ case "url": {
662
+ throw new UnsupportedFunctionalityError({
663
+ functionality: "File data URLs in assistant messages are not supported"
664
+ });
665
+ }
666
+ case "data": {
667
+ return {
668
+ inlineData: {
669
+ mimeType: part.mediaType,
670
+ data: convertToBase64(part.data.data)
671
+ },
672
+ thought: true,
673
+ thoughtSignature
674
+ };
675
+ }
676
+ }
677
+ break;
678
+ }
474
679
  case "file": {
475
- if (part.data instanceof URL) {
476
- throw new import_provider2.UnsupportedFunctionalityError({
477
- functionality: "File data URLs in assistant messages are not supported"
478
- });
680
+ switch (part.data.type) {
681
+ case "url": {
682
+ throw new UnsupportedFunctionalityError({
683
+ functionality: "File data URLs in assistant messages are not supported"
684
+ });
685
+ }
686
+ case "reference": {
687
+ if (isVertexLike) {
688
+ throw new UnsupportedFunctionalityError({
689
+ functionality: "file parts with provider references"
690
+ });
691
+ }
692
+ return {
693
+ fileData: {
694
+ mimeType: part.mediaType,
695
+ fileUri: resolveProviderReference({
696
+ reference: part.data.reference,
697
+ provider: "google"
698
+ })
699
+ },
700
+ ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
701
+ thoughtSignature
702
+ };
703
+ }
704
+ case "text": {
705
+ return {
706
+ inlineData: {
707
+ mimeType: isFullMediaType(part.mediaType) ? part.mediaType : "text/plain",
708
+ data: convertToBase64(
709
+ new TextEncoder().encode(part.data.text)
710
+ )
711
+ },
712
+ ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
713
+ thoughtSignature
714
+ };
715
+ }
716
+ case "data": {
717
+ return {
718
+ inlineData: {
719
+ mimeType: part.mediaType,
720
+ data: convertToBase64(part.data.data)
721
+ },
722
+ ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
723
+ thoughtSignature
724
+ };
725
+ }
479
726
  }
480
- return {
481
- inlineData: {
482
- mimeType: part.mediaType,
483
- data: (0, import_provider_utils4.convertToBase64)(part.data)
484
- },
485
- ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
486
- thoughtSignature
487
- };
727
+ break;
488
728
  }
489
729
  case "tool-call": {
730
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
731
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
732
+ if (serverToolCallId && serverToolType) {
733
+ return {
734
+ toolCall: {
735
+ toolType: serverToolType,
736
+ args: typeof part.input === "string" ? JSON.parse(part.input) : part.input,
737
+ id: serverToolCallId
738
+ },
739
+ thoughtSignature
740
+ };
741
+ }
490
742
  return {
491
743
  functionCall: {
492
744
  name: part.toolName,
@@ -495,6 +747,21 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
495
747
  thoughtSignature
496
748
  };
497
749
  }
750
+ case "tool-result": {
751
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
752
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
753
+ if (serverToolCallId && serverToolType) {
754
+ return {
755
+ toolResponse: {
756
+ toolType: serverToolType,
757
+ response: part.output.type === "json" ? part.output.value : {},
758
+ id: serverToolCallId
759
+ },
760
+ thoughtSignature
761
+ };
762
+ }
763
+ return void 0;
764
+ }
498
765
  }
499
766
  }).filter((part) => part !== void 0)
500
767
  });
@@ -507,38 +774,32 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
507
774
  if (part.type === "tool-approval-response") {
508
775
  continue;
509
776
  }
777
+ const partProviderOpts = readProviderOpts(part);
778
+ const serverToolCallId = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolCallId) != null ? String(partProviderOpts.serverToolCallId) : void 0;
779
+ const serverToolType = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolType) != null ? String(partProviderOpts.serverToolType) : void 0;
780
+ if (serverToolCallId && serverToolType) {
781
+ const serverThoughtSignature = (partProviderOpts == null ? void 0 : partProviderOpts.thoughtSignature) != null ? String(partProviderOpts.thoughtSignature) : void 0;
782
+ if (contents.length > 0) {
783
+ const lastContent = contents[contents.length - 1];
784
+ if (lastContent.role === "model") {
785
+ lastContent.parts.push({
786
+ toolResponse: {
787
+ toolType: serverToolType,
788
+ response: part.output.type === "json" ? part.output.value : {},
789
+ id: serverToolCallId
790
+ },
791
+ thoughtSignature: serverThoughtSignature
792
+ });
793
+ continue;
794
+ }
795
+ }
796
+ }
510
797
  const output = part.output;
511
798
  if (output.type === "content") {
512
- for (const contentPart of output.value) {
513
- switch (contentPart.type) {
514
- case "text":
515
- parts.push({
516
- functionResponse: {
517
- name: part.toolName,
518
- response: {
519
- name: part.toolName,
520
- content: contentPart.text
521
- }
522
- }
523
- });
524
- break;
525
- case "image-data":
526
- parts.push(
527
- {
528
- inlineData: {
529
- mimeType: contentPart.mediaType,
530
- data: contentPart.data
531
- }
532
- },
533
- {
534
- text: "Tool executed successfully and returned this image as a response"
535
- }
536
- );
537
- break;
538
- default:
539
- parts.push({ text: JSON.stringify(contentPart) });
540
- break;
541
- }
799
+ if (supportsFunctionResponseParts) {
800
+ appendToolResultParts(parts, part.toolName, output.value);
801
+ } else {
802
+ appendLegacyToolResultParts(parts, part.toolName, output.value);
542
803
  }
543
804
  } else {
544
805
  parts.push({
@@ -546,7 +807,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
546
807
  name: part.toolName,
547
808
  response: {
548
809
  name: part.toolName,
549
- content: output.type === "execution-denied" ? (_c = output.reason) != null ? _c : "Tool execution denied." : output.value
810
+ content: output.type === "execution-denied" ? (_d = output.reason) != null ? _d : "Tool call execution denied." : output.value
550
811
  }
551
812
  }
552
813
  });
@@ -575,40 +836,43 @@ function getModelPath(modelId) {
575
836
  return modelId.includes("/") ? modelId : `models/${modelId}`;
576
837
  }
577
838
 
578
- // src/google-generative-ai-options.ts
579
- var import_provider_utils5 = require("@ai-sdk/provider-utils");
580
- var import_v44 = require("zod/v4");
581
- var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
582
- () => (0, import_provider_utils5.zodSchema)(
583
- import_v44.z.object({
584
- responseModalities: import_v44.z.array(import_v44.z.enum(["TEXT", "IMAGE"])).optional(),
585
- thinkingConfig: import_v44.z.object({
586
- thinkingBudget: import_v44.z.number().optional(),
587
- includeThoughts: import_v44.z.boolean().optional(),
839
+ // src/google-language-model-options.ts
840
+ import {
841
+ lazySchema as lazySchema4,
842
+ zodSchema as zodSchema4
843
+ } from "@ai-sdk/provider-utils";
844
+ import { z as z4 } from "zod/v4";
845
+ var googleLanguageModelOptions = lazySchema4(
846
+ () => zodSchema4(
847
+ z4.object({
848
+ responseModalities: z4.array(z4.enum(["TEXT", "IMAGE"])).optional(),
849
+ thinkingConfig: z4.object({
850
+ thinkingBudget: z4.number().optional(),
851
+ includeThoughts: z4.boolean().optional(),
588
852
  // https://ai.google.dev/gemini-api/docs/gemini-3?thinking=high#thinking_level
589
- thinkingLevel: import_v44.z.enum(["minimal", "low", "medium", "high"]).optional()
853
+ thinkingLevel: z4.enum(["minimal", "low", "medium", "high"]).optional()
590
854
  }).optional(),
591
855
  /**
592
856
  * Optional.
593
857
  * The name of the cached content used as context to serve the prediction.
594
858
  * Format: cachedContents/{cachedContent}
595
859
  */
596
- cachedContent: import_v44.z.string().optional(),
860
+ cachedContent: z4.string().optional(),
597
861
  /**
598
862
  * Optional. Enable structured output. Default is true.
599
863
  *
600
864
  * This is useful when the JSON Schema contains elements that are
601
865
  * not supported by the OpenAPI schema version that
602
- * Google Generative AI uses. You can use this to disable
866
+ * Google uses. You can use this to disable
603
867
  * structured outputs if you need to.
604
868
  */
605
- structuredOutputs: import_v44.z.boolean().optional(),
869
+ structuredOutputs: z4.boolean().optional(),
606
870
  /**
607
871
  * Optional. A list of unique safety settings for blocking unsafe content.
608
872
  */
609
- safetySettings: import_v44.z.array(
610
- import_v44.z.object({
611
- category: import_v44.z.enum([
873
+ safetySettings: z4.array(
874
+ z4.object({
875
+ category: z4.enum([
612
876
  "HARM_CATEGORY_UNSPECIFIED",
613
877
  "HARM_CATEGORY_HATE_SPEECH",
614
878
  "HARM_CATEGORY_DANGEROUS_CONTENT",
@@ -616,7 +880,7 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
616
880
  "HARM_CATEGORY_SEXUALLY_EXPLICIT",
617
881
  "HARM_CATEGORY_CIVIC_INTEGRITY"
618
882
  ]),
619
- threshold: import_v44.z.enum([
883
+ threshold: z4.enum([
620
884
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
621
885
  "BLOCK_LOW_AND_ABOVE",
622
886
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -626,7 +890,7 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
626
890
  ])
627
891
  })
628
892
  ).optional(),
629
- threshold: import_v44.z.enum([
893
+ threshold: z4.enum([
630
894
  "HARM_BLOCK_THRESHOLD_UNSPECIFIED",
631
895
  "BLOCK_LOW_AND_ABOVE",
632
896
  "BLOCK_MEDIUM_AND_ABOVE",
@@ -639,19 +903,19 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
639
903
  *
640
904
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/audio-understanding
641
905
  */
642
- audioTimestamp: import_v44.z.boolean().optional(),
906
+ audioTimestamp: z4.boolean().optional(),
643
907
  /**
644
908
  * Optional. Defines labels used in billing reports. Available on Vertex AI only.
645
909
  *
646
910
  * https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/add-labels-to-api-calls
647
911
  */
648
- labels: import_v44.z.record(import_v44.z.string(), import_v44.z.string()).optional(),
912
+ labels: z4.record(z4.string(), z4.string()).optional(),
649
913
  /**
650
914
  * Optional. If specified, the media resolution specified will be used.
651
915
  *
652
916
  * https://ai.google.dev/api/generate-content#MediaResolution
653
917
  */
654
- mediaResolution: import_v44.z.enum([
918
+ mediaResolution: z4.enum([
655
919
  "MEDIA_RESOLUTION_UNSPECIFIED",
656
920
  "MEDIA_RESOLUTION_LOW",
657
921
  "MEDIA_RESOLUTION_MEDIUM",
@@ -662,8 +926,8 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
662
926
  *
663
927
  * https://ai.google.dev/gemini-api/docs/image-generation#aspect_ratios
664
928
  */
665
- imageConfig: import_v44.z.object({
666
- aspectRatio: import_v44.z.enum([
929
+ imageConfig: z4.object({
930
+ aspectRatio: z4.enum([
667
931
  "1:1",
668
932
  "2:3",
669
933
  "3:2",
@@ -679,7 +943,7 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
679
943
  "1:4",
680
944
  "4:1"
681
945
  ]).optional(),
682
- imageSize: import_v44.z.enum(["1K", "2K", "4K", "512"]).optional()
946
+ imageSize: z4.enum(["1K", "2K", "4K", "512"]).optional()
683
947
  }).optional(),
684
948
  /**
685
949
  * Optional. Configuration for grounding retrieval.
@@ -687,24 +951,47 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
687
951
  *
688
952
  * https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/grounding-with-google-maps
689
953
  */
690
- retrievalConfig: import_v44.z.object({
691
- latLng: import_v44.z.object({
692
- latitude: import_v44.z.number(),
693
- longitude: import_v44.z.number()
954
+ retrievalConfig: z4.object({
955
+ latLng: z4.object({
956
+ latitude: z4.number(),
957
+ longitude: z4.number()
694
958
  }).optional()
695
- }).optional()
959
+ }).optional(),
960
+ /**
961
+ * Optional. When set to true, function call arguments will be streamed
962
+ * incrementally via partialArgs in streaming responses. Only supported
963
+ * on the Vertex AI API (not the Gemini API) and only for Gemini 3+
964
+ * models.
965
+ *
966
+ * @default false
967
+ *
968
+ * https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc
969
+ */
970
+ streamFunctionCallArguments: z4.boolean().optional(),
971
+ /**
972
+ * Optional. The service tier to use for the request.
973
+ */
974
+ serviceTier: z4.enum(["standard", "flex", "priority"]).optional()
696
975
  })
697
976
  )
698
977
  );
978
+ var VertexServiceTierMap = {
979
+ standard: "SERVICE_TIER_STANDARD",
980
+ flex: "SERVICE_TIER_FLEX",
981
+ priority: "SERVICE_TIER_PRIORITY"
982
+ };
699
983
 
700
984
  // src/google-prepare-tools.ts
701
- var import_provider3 = require("@ai-sdk/provider");
985
+ import {
986
+ UnsupportedFunctionalityError as UnsupportedFunctionalityError2
987
+ } from "@ai-sdk/provider";
702
988
  function prepareTools({
703
989
  tools,
704
990
  toolChoice,
705
- modelId
991
+ modelId,
992
+ isVertexProvider = false
706
993
  }) {
707
- var _a;
994
+ var _a, _b;
708
995
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
709
996
  const toolWarnings = [];
710
997
  const isLatest = [
@@ -713,13 +1000,14 @@ function prepareTools({
713
1000
  "gemini-pro-latest"
714
1001
  ].some((id) => id === modelId);
715
1002
  const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || modelId.includes("nano-banana") || isLatest;
1003
+ const isGemini3orNewer = modelId.includes("gemini-3");
716
1004
  const supportsFileSearch = modelId.includes("gemini-2.5") || modelId.includes("gemini-3");
717
1005
  if (tools == null) {
718
1006
  return { tools: void 0, toolConfig: void 0, toolWarnings };
719
1007
  }
720
1008
  const hasFunctionTools = tools.some((tool) => tool.type === "function");
721
1009
  const hasProviderTools = tools.some((tool) => tool.type === "provider");
722
- if (hasFunctionTools && hasProviderTools) {
1010
+ if (hasFunctionTools && hasProviderTools && !isGemini3orNewer) {
723
1011
  toolWarnings.push({
724
1012
  type: "unsupported",
725
1013
  feature: `combination of function and provider-defined tools`
@@ -770,7 +1058,7 @@ function prepareTools({
770
1058
  toolWarnings.push({
771
1059
  type: "unsupported",
772
1060
  feature: `provider-defined tool ${tool.id}`,
773
- details: "The code execution tools is not supported with other Gemini models than Gemini 2."
1061
+ details: "The code execution tool is not supported with other Gemini models than Gemini 2."
774
1062
  });
775
1063
  }
776
1064
  break;
@@ -824,6 +1112,47 @@ function prepareTools({
824
1112
  break;
825
1113
  }
826
1114
  });
1115
+ if (hasFunctionTools && isGemini3orNewer && googleTools2.length > 0) {
1116
+ const functionDeclarations2 = [];
1117
+ for (const tool of tools) {
1118
+ if (tool.type === "function") {
1119
+ functionDeclarations2.push({
1120
+ name: tool.name,
1121
+ description: (_a = tool.description) != null ? _a : "",
1122
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
1123
+ });
1124
+ }
1125
+ }
1126
+ const combinedToolConfig = {
1127
+ functionCallingConfig: { mode: "VALIDATED" },
1128
+ ...!isVertexProvider && {
1129
+ includeServerSideToolInvocations: true
1130
+ }
1131
+ };
1132
+ if (toolChoice != null) {
1133
+ switch (toolChoice.type) {
1134
+ case "auto":
1135
+ break;
1136
+ case "none":
1137
+ combinedToolConfig.functionCallingConfig = { mode: "NONE" };
1138
+ break;
1139
+ case "required":
1140
+ combinedToolConfig.functionCallingConfig = { mode: "ANY" };
1141
+ break;
1142
+ case "tool":
1143
+ combinedToolConfig.functionCallingConfig = {
1144
+ mode: "ANY",
1145
+ allowedFunctionNames: [toolChoice.toolName]
1146
+ };
1147
+ break;
1148
+ }
1149
+ }
1150
+ return {
1151
+ tools: [...googleTools2, { functionDeclarations: functionDeclarations2 }],
1152
+ toolConfig: combinedToolConfig,
1153
+ toolWarnings
1154
+ };
1155
+ }
827
1156
  return {
828
1157
  tools: googleTools2.length > 0 ? googleTools2 : void 0,
829
1158
  toolConfig: void 0,
@@ -837,7 +1166,7 @@ function prepareTools({
837
1166
  case "function":
838
1167
  functionDeclarations.push({
839
1168
  name: tool.name,
840
- description: (_a = tool.description) != null ? _a : "",
1169
+ description: (_b = tool.description) != null ? _b : "",
841
1170
  parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
842
1171
  });
843
1172
  if (tool.strict === true) {
@@ -900,15 +1229,238 @@ function prepareTools({
900
1229
  };
901
1230
  default: {
902
1231
  const _exhaustiveCheck = type;
903
- throw new import_provider3.UnsupportedFunctionalityError({
1232
+ throw new UnsupportedFunctionalityError2({
904
1233
  functionality: `tool choice type: ${_exhaustiveCheck}`
905
1234
  });
906
1235
  }
907
1236
  }
908
1237
  }
909
1238
 
910
- // src/map-google-generative-ai-finish-reason.ts
911
- function mapGoogleGenerativeAIFinishReason({
1239
+ // src/google-json-accumulator.ts
1240
+ var GoogleJSONAccumulator = class {
1241
+ constructor() {
1242
+ this.accumulatedArgs = {};
1243
+ this.jsonText = "";
1244
+ /**
1245
+ * Stack representing the currently "open" containers in the JSON output.
1246
+ * Entry 0 is always the root `{` object once the first value is written.
1247
+ */
1248
+ this.pathStack = [];
1249
+ /**
1250
+ * Whether a string value is currently "open" (willContinue was true),
1251
+ * meaning the closing quote has not yet been emitted.
1252
+ */
1253
+ this.stringOpen = false;
1254
+ }
1255
+ /**
1256
+ * Input: [{jsonPath:"$.brightness",numberValue:50}]
1257
+ * Output: { currentJSON:{brightness:50}, textDelta:'{"brightness":50' }
1258
+ */
1259
+ processPartialArgs(partialArgs) {
1260
+ let delta = "";
1261
+ for (const arg of partialArgs) {
1262
+ const rawPath = arg.jsonPath.replace(/^\$\./, "");
1263
+ if (!rawPath) continue;
1264
+ const segments = parsePath(rawPath);
1265
+ const existingValue = getNestedValue(this.accumulatedArgs, segments);
1266
+ const isStringContinuation = arg.stringValue != null && existingValue !== void 0;
1267
+ if (isStringContinuation) {
1268
+ const escaped = JSON.stringify(arg.stringValue).slice(1, -1);
1269
+ setNestedValue(
1270
+ this.accumulatedArgs,
1271
+ segments,
1272
+ existingValue + arg.stringValue
1273
+ );
1274
+ delta += escaped;
1275
+ continue;
1276
+ }
1277
+ const resolved = resolvePartialArgValue(arg);
1278
+ if (resolved == null) continue;
1279
+ setNestedValue(this.accumulatedArgs, segments, resolved.value);
1280
+ delta += this.emitNavigationTo(segments, arg, resolved.json);
1281
+ }
1282
+ this.jsonText += delta;
1283
+ return {
1284
+ currentJSON: this.accumulatedArgs,
1285
+ textDelta: delta
1286
+ };
1287
+ }
1288
+ /**
1289
+ * Input: jsonText='{"brightness":50', accumulatedArgs={brightness:50}
1290
+ * Output: { finalJSON:'{"brightness":50}', closingDelta:'}' }
1291
+ */
1292
+ finalize() {
1293
+ const finalArgs = JSON.stringify(this.accumulatedArgs);
1294
+ const closingDelta = finalArgs.slice(this.jsonText.length);
1295
+ return { finalJSON: finalArgs, closingDelta };
1296
+ }
1297
+ /**
1298
+ * Input: pathStack=[] (first call) or pathStack=[root,...] (subsequent calls)
1299
+ * Output: '{' (first call) or '' (subsequent calls)
1300
+ */
1301
+ ensureRoot() {
1302
+ if (this.pathStack.length === 0) {
1303
+ this.pathStack.push({ segment: "", isArray: false, childCount: 0 });
1304
+ return "{";
1305
+ }
1306
+ return "";
1307
+ }
1308
+ /**
1309
+ * Emits the JSON text fragment needed to navigate from the current open
1310
+ * path to the new leaf at `targetSegments`, then writes the value.
1311
+ *
1312
+ * Input: targetSegments=["recipe","name"], arg={jsonPath:"$.recipe.name",stringValue:"Lasagna"}, valueJson='"Lasagna"'
1313
+ * Output: '{"recipe":{"name":"Lasagna"'
1314
+ */
1315
+ emitNavigationTo(targetSegments, arg, valueJson) {
1316
+ let fragment = "";
1317
+ if (this.stringOpen) {
1318
+ fragment += '"';
1319
+ this.stringOpen = false;
1320
+ }
1321
+ fragment += this.ensureRoot();
1322
+ const targetContainerSegments = targetSegments.slice(0, -1);
1323
+ const leafSegment = targetSegments[targetSegments.length - 1];
1324
+ const commonDepth = this.findCommonStackDepth(targetContainerSegments);
1325
+ fragment += this.closeDownTo(commonDepth);
1326
+ fragment += this.openDownTo(targetContainerSegments, leafSegment);
1327
+ fragment += this.emitLeaf(leafSegment, arg, valueJson);
1328
+ return fragment;
1329
+ }
1330
+ /**
1331
+ * Returns the stack depth to preserve when navigating to a new target
1332
+ * container path. Always >= 1 (the root is never popped).
1333
+ *
1334
+ * Input: stack=[root,"recipe","ingredients",0], target=["recipe","ingredients",1]
1335
+ * Output: 3 (keep root+"recipe"+"ingredients")
1336
+ */
1337
+ findCommonStackDepth(targetContainer) {
1338
+ const maxDepth = Math.min(
1339
+ this.pathStack.length - 1,
1340
+ targetContainer.length
1341
+ );
1342
+ let common = 0;
1343
+ for (let i = 0; i < maxDepth; i++) {
1344
+ if (this.pathStack[i + 1].segment === targetContainer[i]) {
1345
+ common++;
1346
+ } else {
1347
+ break;
1348
+ }
1349
+ }
1350
+ return common + 1;
1351
+ }
1352
+ /**
1353
+ * Closes containers from the current stack depth back down to `targetDepth`.
1354
+ *
1355
+ * Input: this.pathStack=[root,"recipe","ingredients",0], targetDepth=3
1356
+ * Output: '}'
1357
+ */
1358
+ closeDownTo(targetDepth) {
1359
+ let fragment = "";
1360
+ while (this.pathStack.length > targetDepth) {
1361
+ const entry = this.pathStack.pop();
1362
+ fragment += entry.isArray ? "]" : "}";
1363
+ }
1364
+ return fragment;
1365
+ }
1366
+ /**
1367
+ * Opens containers from the current stack depth down to the full target
1368
+ * container path, emitting opening `{`, `[`, keys, and commas as needed.
1369
+ * `leafSegment` is used to determine if the innermost container is an array.
1370
+ *
1371
+ * Input: this.pathStack=[root], targetContainer=["recipe","ingredients"], leafSegment=0
1372
+ * Output: '"recipe":{"ingredients":['
1373
+ */
1374
+ openDownTo(targetContainer, leafSegment) {
1375
+ let fragment = "";
1376
+ const startIdx = this.pathStack.length - 1;
1377
+ for (let i = startIdx; i < targetContainer.length; i++) {
1378
+ const seg = targetContainer[i];
1379
+ const parentEntry = this.pathStack[this.pathStack.length - 1];
1380
+ if (parentEntry.childCount > 0) {
1381
+ fragment += ",";
1382
+ }
1383
+ parentEntry.childCount++;
1384
+ if (typeof seg === "string") {
1385
+ fragment += `${JSON.stringify(seg)}:`;
1386
+ }
1387
+ const childSeg = i + 1 < targetContainer.length ? targetContainer[i + 1] : leafSegment;
1388
+ const isArray = typeof childSeg === "number";
1389
+ fragment += isArray ? "[" : "{";
1390
+ this.pathStack.push({ segment: seg, isArray, childCount: 0 });
1391
+ }
1392
+ return fragment;
1393
+ }
1394
+ /**
1395
+ * Emits the comma, key, and value for a leaf entry in the current container.
1396
+ *
1397
+ * Input: leafSegment="name", arg={stringValue:"Lasagna"}, valueJson='"Lasagna"'
1398
+ * Output: '"name":"Lasagna"' (or ',"name":"Lasagna"' if container.childCount > 0)
1399
+ */
1400
+ emitLeaf(leafSegment, arg, valueJson) {
1401
+ let fragment = "";
1402
+ const container = this.pathStack[this.pathStack.length - 1];
1403
+ if (container.childCount > 0) {
1404
+ fragment += ",";
1405
+ }
1406
+ container.childCount++;
1407
+ if (typeof leafSegment === "string") {
1408
+ fragment += `${JSON.stringify(leafSegment)}:`;
1409
+ }
1410
+ if (arg.stringValue != null && arg.willContinue) {
1411
+ fragment += valueJson.slice(0, -1);
1412
+ this.stringOpen = true;
1413
+ } else {
1414
+ fragment += valueJson;
1415
+ }
1416
+ return fragment;
1417
+ }
1418
+ };
1419
+ function parsePath(rawPath) {
1420
+ const segments = [];
1421
+ for (const part of rawPath.split(".")) {
1422
+ const bracketIdx = part.indexOf("[");
1423
+ if (bracketIdx === -1) {
1424
+ segments.push(part);
1425
+ } else {
1426
+ if (bracketIdx > 0) segments.push(part.slice(0, bracketIdx));
1427
+ for (const m of part.matchAll(/\[(\d+)\]/g)) {
1428
+ segments.push(parseInt(m[1], 10));
1429
+ }
1430
+ }
1431
+ }
1432
+ return segments;
1433
+ }
1434
+ function getNestedValue(obj, segments) {
1435
+ let current = obj;
1436
+ for (const seg of segments) {
1437
+ if (current == null || typeof current !== "object") return void 0;
1438
+ current = current[seg];
1439
+ }
1440
+ return current;
1441
+ }
1442
+ function setNestedValue(obj, segments, value) {
1443
+ let current = obj;
1444
+ for (let i = 0; i < segments.length - 1; i++) {
1445
+ const seg = segments[i];
1446
+ const nextSeg = segments[i + 1];
1447
+ if (current[seg] == null) {
1448
+ current[seg] = typeof nextSeg === "number" ? [] : {};
1449
+ }
1450
+ current = current[seg];
1451
+ }
1452
+ current[segments[segments.length - 1]] = value;
1453
+ }
1454
+ function resolvePartialArgValue(arg) {
1455
+ var _a, _b;
1456
+ const value = (_b = (_a = arg.stringValue) != null ? _a : arg.numberValue) != null ? _b : arg.boolValue;
1457
+ if (value != null) return { value, json: JSON.stringify(value) };
1458
+ if ("nullValue" in arg) return { value: null, json: "null" };
1459
+ return void 0;
1460
+ }
1461
+
1462
+ // src/map-google-finish-reason.ts
1463
+ function mapGoogleFinishReason({
912
1464
  finishReason,
913
1465
  hasToolCalls
914
1466
  }) {
@@ -933,14 +1485,23 @@ function mapGoogleGenerativeAIFinishReason({
933
1485
  }
934
1486
  }
935
1487
 
936
- // src/google-generative-ai-language-model.ts
937
- var GoogleGenerativeAILanguageModel = class {
1488
+ // src/google-language-model.ts
1489
+ var GoogleLanguageModel = class _GoogleLanguageModel {
938
1490
  constructor(modelId, config) {
939
1491
  this.specificationVersion = "v4";
940
1492
  var _a;
941
1493
  this.modelId = modelId;
942
1494
  this.config = config;
943
- this.generateId = (_a = config.generateId) != null ? _a : import_provider_utils6.generateId;
1495
+ this.generateId = (_a = config.generateId) != null ? _a : generateId;
1496
+ }
1497
+ static [WORKFLOW_SERIALIZE2](model) {
1498
+ return serializeModelOptions2({
1499
+ modelId: model.modelId,
1500
+ config: model.config
1501
+ });
1502
+ }
1503
+ static [WORKFLOW_DESERIALIZE2](options) {
1504
+ return new _GoogleLanguageModel(options.modelId, options.config);
944
1505
  }
945
1506
  get provider() {
946
1507
  return this.config.provider;
@@ -962,36 +1523,54 @@ var GoogleGenerativeAILanguageModel = class {
962
1523
  seed,
963
1524
  tools,
964
1525
  toolChoice,
1526
+ reasoning,
965
1527
  providerOptions
966
- }) {
967
- var _a;
1528
+ }, { isStreaming = false } = {}) {
1529
+ var _a, _b;
968
1530
  const warnings = [];
969
- const providerOptionsName = this.config.provider.includes("vertex") ? "vertex" : "google";
970
- let googleOptions = await (0, import_provider_utils6.parseProviderOptions)({
971
- provider: providerOptionsName,
972
- providerOptions,
973
- schema: googleLanguageModelOptions
974
- });
975
- if (googleOptions == null && providerOptionsName !== "google") {
976
- googleOptions = await (0, import_provider_utils6.parseProviderOptions)({
1531
+ const providerOptionsNames = this.config.provider.includes("vertex") ? ["googleVertex", "vertex"] : ["google"];
1532
+ let googleOptions;
1533
+ for (const name of providerOptionsNames) {
1534
+ googleOptions = await parseProviderOptions2({
1535
+ provider: name,
1536
+ providerOptions,
1537
+ schema: googleLanguageModelOptions
1538
+ });
1539
+ if (googleOptions != null) break;
1540
+ }
1541
+ if (googleOptions == null && !providerOptionsNames.includes("google")) {
1542
+ googleOptions = await parseProviderOptions2({
977
1543
  provider: "google",
978
1544
  providerOptions,
979
1545
  schema: googleLanguageModelOptions
980
1546
  });
981
1547
  }
1548
+ const isVertexProvider = this.config.provider.startsWith("google.vertex.");
982
1549
  if ((tools == null ? void 0 : tools.some(
983
1550
  (tool) => tool.type === "provider" && tool.id === "google.vertex_rag_store"
984
- )) && !this.config.provider.startsWith("google.vertex.")) {
1551
+ )) && !isVertexProvider) {
985
1552
  warnings.push({
986
1553
  type: "other",
987
1554
  message: `The 'vertex_rag_store' tool is only supported with the Google Vertex provider and might not be supported or could behave unexpectedly with the current Google provider (${this.config.provider}).`
988
1555
  });
989
1556
  }
1557
+ if ((googleOptions == null ? void 0 : googleOptions.streamFunctionCallArguments) && !isVertexProvider) {
1558
+ warnings.push({
1559
+ type: "other",
1560
+ message: `'streamFunctionCallArguments' is only supported on the Vertex AI API and will be ignored with the current Google provider (${this.config.provider}). See https://docs.cloud.google.com/vertex-ai/generative-ai/docs/multimodal/function-calling#streaming-fc`
1561
+ });
1562
+ }
1563
+ let sanitizedServiceTier = googleOptions == null ? void 0 : googleOptions.serviceTier;
1564
+ if ((googleOptions == null ? void 0 : googleOptions.serviceTier) && isVertexProvider) {
1565
+ sanitizedServiceTier = VertexServiceTierMap[googleOptions.serviceTier];
1566
+ }
990
1567
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
991
- const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
992
- prompt,
993
- { isGemmaModel, providerOptionsName }
994
- );
1568
+ const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
1569
+ const { contents, systemInstruction } = convertToGoogleMessages(prompt, {
1570
+ isGemmaModel,
1571
+ providerOptionsNames,
1572
+ supportsFunctionResponseParts
1573
+ });
995
1574
  const {
996
1575
  tools: googleTools2,
997
1576
  toolConfig: googleToolConfig,
@@ -999,8 +1578,28 @@ var GoogleGenerativeAILanguageModel = class {
999
1578
  } = prepareTools({
1000
1579
  tools,
1001
1580
  toolChoice,
1002
- modelId: this.modelId
1581
+ modelId: this.modelId,
1582
+ isVertexProvider
1583
+ });
1584
+ const resolvedThinking = resolveThinkingConfig({
1585
+ reasoning,
1586
+ modelId: this.modelId,
1587
+ warnings
1003
1588
  });
1589
+ const thinkingConfig = (googleOptions == null ? void 0 : googleOptions.thinkingConfig) || resolvedThinking ? { ...resolvedThinking, ...googleOptions == null ? void 0 : googleOptions.thinkingConfig } : void 0;
1590
+ const streamFunctionCallArguments = isStreaming && isVertexProvider ? (_a = googleOptions == null ? void 0 : googleOptions.streamFunctionCallArguments) != null ? _a : false : void 0;
1591
+ const toolConfig = googleToolConfig || streamFunctionCallArguments || (googleOptions == null ? void 0 : googleOptions.retrievalConfig) ? {
1592
+ ...googleToolConfig,
1593
+ ...streamFunctionCallArguments && {
1594
+ functionCallingConfig: {
1595
+ ...googleToolConfig == null ? void 0 : googleToolConfig.functionCallingConfig,
1596
+ streamFunctionCallArguments: true
1597
+ }
1598
+ },
1599
+ ...(googleOptions == null ? void 0 : googleOptions.retrievalConfig) && {
1600
+ retrievalConfig: googleOptions.retrievalConfig
1601
+ }
1602
+ } : void 0;
1004
1603
  return {
1005
1604
  args: {
1006
1605
  generationConfig: {
@@ -1018,13 +1617,13 @@ var GoogleGenerativeAILanguageModel = class {
1018
1617
  responseSchema: (responseFormat == null ? void 0 : responseFormat.type) === "json" && responseFormat.schema != null && // Google GenAI does not support all OpenAPI Schema features,
1019
1618
  // so this is needed as an escape hatch:
1020
1619
  // TODO convert into provider option
1021
- ((_a = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _a : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
1620
+ ((_b = googleOptions == null ? void 0 : googleOptions.structuredOutputs) != null ? _b : true) ? convertJSONSchemaToOpenAPISchema(responseFormat.schema) : void 0,
1022
1621
  ...(googleOptions == null ? void 0 : googleOptions.audioTimestamp) && {
1023
1622
  audioTimestamp: googleOptions.audioTimestamp
1024
1623
  },
1025
1624
  // provider options:
1026
1625
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
1027
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
1626
+ thinkingConfig,
1028
1627
  ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
1029
1628
  mediaResolution: googleOptions.mediaResolution
1030
1629
  },
@@ -1036,36 +1635,37 @@ var GoogleGenerativeAILanguageModel = class {
1036
1635
  systemInstruction: isGemmaModel ? void 0 : systemInstruction,
1037
1636
  safetySettings: googleOptions == null ? void 0 : googleOptions.safetySettings,
1038
1637
  tools: googleTools2,
1039
- toolConfig: (googleOptions == null ? void 0 : googleOptions.retrievalConfig) ? {
1040
- ...googleToolConfig,
1041
- retrievalConfig: googleOptions.retrievalConfig
1042
- } : googleToolConfig,
1638
+ toolConfig,
1043
1639
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
1044
- labels: googleOptions == null ? void 0 : googleOptions.labels
1640
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
1641
+ serviceTier: sanitizedServiceTier
1045
1642
  },
1046
1643
  warnings: [...warnings, ...toolWarnings],
1047
- providerOptionsName
1644
+ providerOptionsNames
1048
1645
  };
1049
1646
  }
1050
1647
  async doGenerate(options) {
1051
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1052
- const { args, warnings, providerOptionsName } = await this.getArgs(options);
1053
- const mergedHeaders = (0, import_provider_utils6.combineHeaders)(
1054
- await (0, import_provider_utils6.resolve)(this.config.headers),
1648
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
1649
+ const { args, warnings, providerOptionsNames } = await this.getArgs(options);
1650
+ const wrapProviderMetadata = (payload) => Object.fromEntries(
1651
+ providerOptionsNames.map((name) => [name, payload])
1652
+ );
1653
+ const mergedHeaders = combineHeaders2(
1654
+ this.config.headers ? await resolve2(this.config.headers) : void 0,
1055
1655
  options.headers
1056
1656
  );
1057
1657
  const {
1058
1658
  responseHeaders,
1059
1659
  value: response,
1060
1660
  rawValue: rawResponse
1061
- } = await (0, import_provider_utils6.postJsonToApi)({
1661
+ } = await postJsonToApi2({
1062
1662
  url: `${this.config.baseURL}/${getModelPath(
1063
1663
  this.modelId
1064
1664
  )}:generateContent`,
1065
1665
  headers: mergedHeaders,
1066
1666
  body: args,
1067
1667
  failedResponseHandler: googleFailedResponseHandler,
1068
- successfulResponseHandler: (0, import_provider_utils6.createJsonResponseHandler)(responseSchema),
1668
+ successfulResponseHandler: createJsonResponseHandler2(responseSchema),
1069
1669
  abortSignal: options.abortSignal,
1070
1670
  fetch: this.config.fetch
1071
1671
  });
@@ -1074,6 +1674,7 @@ var GoogleGenerativeAILanguageModel = class {
1074
1674
  const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
1075
1675
  const usageMetadata = response.usageMetadata;
1076
1676
  let lastCodeExecutionToolCallId;
1677
+ let lastServerToolCallId;
1077
1678
  for (const part of parts) {
1078
1679
  if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
1079
1680
  const toolCallId = this.config.generateId();
@@ -1098,11 +1699,9 @@ var GoogleGenerativeAILanguageModel = class {
1098
1699
  });
1099
1700
  lastCodeExecutionToolCallId = void 0;
1100
1701
  } else if ("text" in part && part.text != null) {
1101
- const thoughtSignatureMetadata = part.thoughtSignature ? {
1102
- [providerOptionsName]: {
1103
- thoughtSignature: part.thoughtSignature
1104
- }
1105
- } : void 0;
1702
+ const thoughtSignatureMetadata = part.thoughtSignature ? wrapProviderMetadata({
1703
+ thoughtSignature: part.thoughtSignature
1704
+ }) : void 0;
1106
1705
  if (part.text.length === 0) {
1107
1706
  if (thoughtSignatureMetadata != null && content.length > 0) {
1108
1707
  const lastContent = content[content.length - 1];
@@ -1115,65 +1714,95 @@ var GoogleGenerativeAILanguageModel = class {
1115
1714
  providerMetadata: thoughtSignatureMetadata
1116
1715
  });
1117
1716
  }
1118
- } else if ("functionCall" in part) {
1717
+ } else if ("functionCall" in part && part.functionCall.name != null && part.functionCall.args != null) {
1119
1718
  content.push({
1120
1719
  type: "tool-call",
1121
1720
  toolCallId: this.config.generateId(),
1122
1721
  toolName: part.functionCall.name,
1123
1722
  input: JSON.stringify(part.functionCall.args),
1124
- providerMetadata: part.thoughtSignature ? {
1125
- [providerOptionsName]: {
1126
- thoughtSignature: part.thoughtSignature
1127
- }
1128
- } : void 0
1723
+ providerMetadata: part.thoughtSignature ? wrapProviderMetadata({
1724
+ thoughtSignature: part.thoughtSignature
1725
+ }) : void 0
1129
1726
  });
1130
1727
  } else if ("inlineData" in part) {
1131
1728
  const hasThought = part.thought === true;
1132
1729
  const hasThoughtSignature = !!part.thoughtSignature;
1133
1730
  content.push({
1134
- type: "file",
1135
- data: part.inlineData.data,
1731
+ type: hasThought ? "reasoning-file" : "file",
1732
+ data: { type: "data", data: part.inlineData.data },
1136
1733
  mediaType: part.inlineData.mimeType,
1137
- providerMetadata: hasThought || hasThoughtSignature ? {
1138
- [providerOptionsName]: {
1139
- ...hasThought ? { thought: true } : {},
1140
- ...hasThoughtSignature ? { thoughtSignature: part.thoughtSignature } : {}
1141
- }
1142
- } : void 0
1734
+ providerMetadata: hasThoughtSignature ? wrapProviderMetadata({
1735
+ thoughtSignature: part.thoughtSignature
1736
+ }) : void 0
1143
1737
  });
1738
+ } else if ("toolCall" in part && part.toolCall) {
1739
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : this.config.generateId();
1740
+ lastServerToolCallId = toolCallId;
1741
+ content.push({
1742
+ type: "tool-call",
1743
+ toolCallId,
1744
+ toolName: `server:${part.toolCall.toolType}`,
1745
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
1746
+ providerExecuted: true,
1747
+ dynamic: true,
1748
+ providerMetadata: part.thoughtSignature ? wrapProviderMetadata({
1749
+ thoughtSignature: part.thoughtSignature,
1750
+ serverToolCallId: toolCallId,
1751
+ serverToolType: part.toolCall.toolType
1752
+ }) : wrapProviderMetadata({
1753
+ serverToolCallId: toolCallId,
1754
+ serverToolType: part.toolCall.toolType
1755
+ })
1756
+ });
1757
+ } else if ("toolResponse" in part && part.toolResponse) {
1758
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : this.config.generateId();
1759
+ content.push({
1760
+ type: "tool-result",
1761
+ toolCallId: responseToolCallId,
1762
+ toolName: `server:${part.toolResponse.toolType}`,
1763
+ result: (_h = part.toolResponse.response) != null ? _h : {},
1764
+ providerMetadata: part.thoughtSignature ? wrapProviderMetadata({
1765
+ thoughtSignature: part.thoughtSignature,
1766
+ serverToolCallId: responseToolCallId,
1767
+ serverToolType: part.toolResponse.toolType
1768
+ }) : wrapProviderMetadata({
1769
+ serverToolCallId: responseToolCallId,
1770
+ serverToolType: part.toolResponse.toolType
1771
+ })
1772
+ });
1773
+ lastServerToolCallId = void 0;
1144
1774
  }
1145
1775
  }
1146
- const sources = (_e = extractSources({
1776
+ const sources = (_i = extractSources({
1147
1777
  groundingMetadata: candidate.groundingMetadata,
1148
1778
  generateId: this.config.generateId
1149
- })) != null ? _e : [];
1779
+ })) != null ? _i : [];
1150
1780
  for (const source of sources) {
1151
1781
  content.push(source);
1152
1782
  }
1153
1783
  return {
1154
1784
  content,
1155
1785
  finishReason: {
1156
- unified: mapGoogleGenerativeAIFinishReason({
1786
+ unified: mapGoogleFinishReason({
1157
1787
  finishReason: candidate.finishReason,
1158
1788
  // Only count client-executed tool calls for finish reason determination.
1159
1789
  hasToolCalls: content.some(
1160
1790
  (part) => part.type === "tool-call" && !part.providerExecuted
1161
1791
  )
1162
1792
  }),
1163
- raw: (_f = candidate.finishReason) != null ? _f : void 0
1793
+ raw: (_j = candidate.finishReason) != null ? _j : void 0
1164
1794
  },
1165
- usage: convertGoogleGenerativeAIUsage(usageMetadata),
1795
+ usage: convertGoogleUsage(usageMetadata),
1166
1796
  warnings,
1167
- providerMetadata: {
1168
- [providerOptionsName]: {
1169
- promptFeedback: (_g = response.promptFeedback) != null ? _g : null,
1170
- groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
1171
- urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
1172
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1173
- usageMetadata: usageMetadata != null ? usageMetadata : null,
1174
- finishMessage: (_k = candidate.finishMessage) != null ? _k : null
1175
- }
1176
- },
1797
+ providerMetadata: wrapProviderMetadata({
1798
+ promptFeedback: (_k = response.promptFeedback) != null ? _k : null,
1799
+ groundingMetadata: (_l = candidate.groundingMetadata) != null ? _l : null,
1800
+ urlContextMetadata: (_m = candidate.urlContextMetadata) != null ? _m : null,
1801
+ safetyRatings: (_n = candidate.safetyRatings) != null ? _n : null,
1802
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
1803
+ finishMessage: (_o = candidate.finishMessage) != null ? _o : null,
1804
+ serviceTier: (_p = response.serviceTier) != null ? _p : null
1805
+ }),
1177
1806
  request: { body: args },
1178
1807
  response: {
1179
1808
  // TODO timestamp, model id, id
@@ -1183,19 +1812,25 @@ var GoogleGenerativeAILanguageModel = class {
1183
1812
  };
1184
1813
  }
1185
1814
  async doStream(options) {
1186
- const { args, warnings, providerOptionsName } = await this.getArgs(options);
1187
- const headers = (0, import_provider_utils6.combineHeaders)(
1188
- await (0, import_provider_utils6.resolve)(this.config.headers),
1815
+ const { args, warnings, providerOptionsNames } = await this.getArgs(
1816
+ options,
1817
+ { isStreaming: true }
1818
+ );
1819
+ const wrapProviderMetadata = (payload) => Object.fromEntries(
1820
+ providerOptionsNames.map((name) => [name, payload])
1821
+ );
1822
+ const headers = combineHeaders2(
1823
+ this.config.headers ? await resolve2(this.config.headers) : void 0,
1189
1824
  options.headers
1190
1825
  );
1191
- const { responseHeaders, value: response } = await (0, import_provider_utils6.postJsonToApi)({
1826
+ const { responseHeaders, value: response } = await postJsonToApi2({
1192
1827
  url: `${this.config.baseURL}/${getModelPath(
1193
1828
  this.modelId
1194
1829
  )}:streamGenerateContent?alt=sse`,
1195
1830
  headers,
1196
1831
  body: args,
1197
1832
  failedResponseHandler: googleFailedResponseHandler,
1198
- successfulResponseHandler: (0, import_provider_utils6.createEventSourceResponseHandler)(chunkSchema),
1833
+ successfulResponseHandler: createEventSourceResponseHandler(chunkSchema),
1199
1834
  abortSignal: options.abortSignal,
1200
1835
  fetch: this.config.fetch
1201
1836
  });
@@ -1207,6 +1842,7 @@ var GoogleGenerativeAILanguageModel = class {
1207
1842
  let providerMetadata = void 0;
1208
1843
  let lastGroundingMetadata = null;
1209
1844
  let lastUrlContextMetadata = null;
1845
+ let serviceTier = null;
1210
1846
  const generateId3 = this.config.generateId;
1211
1847
  let hasToolCalls = false;
1212
1848
  let currentTextBlockId = null;
@@ -1214,6 +1850,8 @@ var GoogleGenerativeAILanguageModel = class {
1214
1850
  let blockCounter = 0;
1215
1851
  const emittedSourceUrls = /* @__PURE__ */ new Set();
1216
1852
  let lastCodeExecutionToolCallId;
1853
+ let lastServerToolCallId;
1854
+ const activeStreamingToolCalls = [];
1217
1855
  return {
1218
1856
  stream: response.pipeThrough(
1219
1857
  new TransformStream({
@@ -1221,7 +1859,7 @@ var GoogleGenerativeAILanguageModel = class {
1221
1859
  controller.enqueue({ type: "stream-start", warnings });
1222
1860
  },
1223
1861
  transform(chunk, controller) {
1224
- var _a, _b, _c, _d, _e, _f, _g;
1862
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l;
1225
1863
  if (options.includeRawChunks) {
1226
1864
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1227
1865
  }
@@ -1234,6 +1872,9 @@ var GoogleGenerativeAILanguageModel = class {
1234
1872
  if (usageMetadata != null) {
1235
1873
  usage = usageMetadata;
1236
1874
  }
1875
+ if (value.serviceTier != null) {
1876
+ serviceTier = value.serviceTier;
1877
+ }
1237
1878
  const candidate = (_a = value.candidates) == null ? void 0 : _a[0];
1238
1879
  if (candidate == null) {
1239
1880
  return;
@@ -1285,11 +1926,9 @@ var GoogleGenerativeAILanguageModel = class {
1285
1926
  lastCodeExecutionToolCallId = void 0;
1286
1927
  }
1287
1928
  } else if ("text" in part && part.text != null) {
1288
- const thoughtSignatureMetadata = part.thoughtSignature ? {
1289
- [providerOptionsName]: {
1290
- thoughtSignature: part.thoughtSignature
1291
- }
1292
- } : void 0;
1929
+ const thoughtSignatureMetadata = part.thoughtSignature ? wrapProviderMetadata({
1930
+ thoughtSignature: part.thoughtSignature
1931
+ }) : void 0;
1293
1932
  if (part.text.length === 0) {
1294
1933
  if (thoughtSignatureMetadata != null && currentTextBlockId !== null) {
1295
1934
  controller.enqueue({
@@ -1361,50 +2000,151 @@ var GoogleGenerativeAILanguageModel = class {
1361
2000
  }
1362
2001
  const hasThought = part.thought === true;
1363
2002
  const hasThoughtSignature = !!part.thoughtSignature;
1364
- const fileMeta = hasThought || hasThoughtSignature ? {
1365
- [providerOptionsName]: {
1366
- ...hasThought ? { thought: true } : {},
1367
- ...hasThoughtSignature ? { thoughtSignature: part.thoughtSignature } : {}
1368
- }
1369
- } : void 0;
2003
+ const fileMeta = hasThoughtSignature ? wrapProviderMetadata({
2004
+ thoughtSignature: part.thoughtSignature
2005
+ }) : void 0;
1370
2006
  controller.enqueue({
1371
- type: "file",
2007
+ type: hasThought ? "reasoning-file" : "file",
1372
2008
  mediaType: part.inlineData.mimeType,
1373
- data: part.inlineData.data,
2009
+ data: { type: "data", data: part.inlineData.data },
1374
2010
  providerMetadata: fileMeta
1375
2011
  });
2012
+ } else if ("toolCall" in part && part.toolCall) {
2013
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : generateId3();
2014
+ lastServerToolCallId = toolCallId;
2015
+ const serverMeta = wrapProviderMetadata({
2016
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
2017
+ serverToolCallId: toolCallId,
2018
+ serverToolType: part.toolCall.toolType
2019
+ });
2020
+ controller.enqueue({
2021
+ type: "tool-call",
2022
+ toolCallId,
2023
+ toolName: `server:${part.toolCall.toolType}`,
2024
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
2025
+ providerExecuted: true,
2026
+ dynamic: true,
2027
+ providerMetadata: serverMeta
2028
+ });
2029
+ } else if ("toolResponse" in part && part.toolResponse) {
2030
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : generateId3();
2031
+ const serverMeta = wrapProviderMetadata({
2032
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
2033
+ serverToolCallId: responseToolCallId,
2034
+ serverToolType: part.toolResponse.toolType
2035
+ });
2036
+ controller.enqueue({
2037
+ type: "tool-result",
2038
+ toolCallId: responseToolCallId,
2039
+ toolName: `server:${part.toolResponse.toolType}`,
2040
+ result: (_h = part.toolResponse.response) != null ? _h : {},
2041
+ providerMetadata: serverMeta
2042
+ });
2043
+ lastServerToolCallId = void 0;
1376
2044
  }
1377
2045
  }
1378
- const toolCallDeltas = getToolCallsFromParts({
1379
- parts: content.parts,
1380
- generateId: generateId3,
1381
- providerOptionsName
1382
- });
1383
- if (toolCallDeltas != null) {
1384
- for (const toolCall of toolCallDeltas) {
2046
+ for (const part of parts) {
2047
+ if (!("functionCall" in part)) continue;
2048
+ const providerMeta = part.thoughtSignature ? wrapProviderMetadata({
2049
+ thoughtSignature: part.thoughtSignature
2050
+ }) : void 0;
2051
+ const isStreamingChunk = part.functionCall.partialArgs != null || part.functionCall.name != null && part.functionCall.willContinue === true;
2052
+ const isTerminalChunk = part.functionCall.name == null && part.functionCall.args == null && part.functionCall.partialArgs == null && part.functionCall.willContinue == null;
2053
+ const isCompleteCall = part.functionCall.name != null && part.functionCall.args != null && part.functionCall.partialArgs == null;
2054
+ if (isStreamingChunk) {
2055
+ if (part.functionCall.name != null && part.functionCall.willContinue === true) {
2056
+ const toolCallId = generateId3();
2057
+ const accumulator = new GoogleJSONAccumulator();
2058
+ activeStreamingToolCalls.push({
2059
+ toolCallId,
2060
+ toolName: part.functionCall.name,
2061
+ accumulator,
2062
+ providerMetadata: providerMeta
2063
+ });
2064
+ controller.enqueue({
2065
+ type: "tool-input-start",
2066
+ id: toolCallId,
2067
+ toolName: part.functionCall.name,
2068
+ providerMetadata: providerMeta
2069
+ });
2070
+ if (part.functionCall.partialArgs != null) {
2071
+ const { textDelta } = accumulator.processPartialArgs(
2072
+ part.functionCall.partialArgs
2073
+ );
2074
+ if (textDelta.length > 0) {
2075
+ controller.enqueue({
2076
+ type: "tool-input-delta",
2077
+ id: toolCallId,
2078
+ delta: textDelta,
2079
+ providerMetadata: providerMeta
2080
+ });
2081
+ }
2082
+ }
2083
+ } else if (part.functionCall.partialArgs != null && activeStreamingToolCalls.length > 0) {
2084
+ const active = activeStreamingToolCalls[activeStreamingToolCalls.length - 1];
2085
+ const { textDelta } = active.accumulator.processPartialArgs(
2086
+ part.functionCall.partialArgs
2087
+ );
2088
+ if (textDelta.length > 0) {
2089
+ controller.enqueue({
2090
+ type: "tool-input-delta",
2091
+ id: active.toolCallId,
2092
+ delta: textDelta,
2093
+ providerMetadata: providerMeta
2094
+ });
2095
+ }
2096
+ }
2097
+ } else if (isTerminalChunk && activeStreamingToolCalls.length > 0) {
2098
+ const active = activeStreamingToolCalls.pop();
2099
+ const { finalJSON, closingDelta } = active.accumulator.finalize();
2100
+ if (closingDelta.length > 0) {
2101
+ controller.enqueue({
2102
+ type: "tool-input-delta",
2103
+ id: active.toolCallId,
2104
+ delta: closingDelta,
2105
+ providerMetadata: active.providerMetadata
2106
+ });
2107
+ }
2108
+ controller.enqueue({
2109
+ type: "tool-input-end",
2110
+ id: active.toolCallId,
2111
+ providerMetadata: active.providerMetadata
2112
+ });
2113
+ controller.enqueue({
2114
+ type: "tool-call",
2115
+ toolCallId: active.toolCallId,
2116
+ toolName: active.toolName,
2117
+ input: finalJSON,
2118
+ providerMetadata: active.providerMetadata
2119
+ });
2120
+ hasToolCalls = true;
2121
+ } else if (isCompleteCall) {
2122
+ const toolCallId = generateId3();
2123
+ const toolName = part.functionCall.name;
2124
+ const args2 = typeof part.functionCall.args === "string" ? part.functionCall.args : JSON.stringify((_i = part.functionCall.args) != null ? _i : {});
1385
2125
  controller.enqueue({
1386
2126
  type: "tool-input-start",
1387
- id: toolCall.toolCallId,
1388
- toolName: toolCall.toolName,
1389
- providerMetadata: toolCall.providerMetadata
2127
+ id: toolCallId,
2128
+ toolName,
2129
+ providerMetadata: providerMeta
1390
2130
  });
1391
2131
  controller.enqueue({
1392
2132
  type: "tool-input-delta",
1393
- id: toolCall.toolCallId,
1394
- delta: toolCall.args,
1395
- providerMetadata: toolCall.providerMetadata
2133
+ id: toolCallId,
2134
+ delta: args2,
2135
+ providerMetadata: providerMeta
1396
2136
  });
1397
2137
  controller.enqueue({
1398
2138
  type: "tool-input-end",
1399
- id: toolCall.toolCallId,
1400
- providerMetadata: toolCall.providerMetadata
2139
+ id: toolCallId,
2140
+ providerMetadata: providerMeta
1401
2141
  });
1402
2142
  controller.enqueue({
1403
2143
  type: "tool-call",
1404
- toolCallId: toolCall.toolCallId,
1405
- toolName: toolCall.toolName,
1406
- input: toolCall.args,
1407
- providerMetadata: toolCall.providerMetadata
2144
+ toolCallId,
2145
+ toolName,
2146
+ input: args2,
2147
+ providerMetadata: providerMeta
1408
2148
  });
1409
2149
  hasToolCalls = true;
1410
2150
  }
@@ -1412,22 +2152,21 @@ var GoogleGenerativeAILanguageModel = class {
1412
2152
  }
1413
2153
  if (candidate.finishReason != null) {
1414
2154
  finishReason = {
1415
- unified: mapGoogleGenerativeAIFinishReason({
2155
+ unified: mapGoogleFinishReason({
1416
2156
  finishReason: candidate.finishReason,
1417
2157
  hasToolCalls
1418
2158
  }),
1419
2159
  raw: candidate.finishReason
1420
2160
  };
1421
- providerMetadata = {
1422
- [providerOptionsName]: {
1423
- promptFeedback: (_e = value.promptFeedback) != null ? _e : null,
1424
- groundingMetadata: lastGroundingMetadata,
1425
- urlContextMetadata: lastUrlContextMetadata,
1426
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null,
1427
- usageMetadata: usageMetadata != null ? usageMetadata : null,
1428
- finishMessage: (_g = candidate.finishMessage) != null ? _g : null
1429
- }
1430
- };
2161
+ providerMetadata = wrapProviderMetadata({
2162
+ promptFeedback: (_j = value.promptFeedback) != null ? _j : null,
2163
+ groundingMetadata: lastGroundingMetadata,
2164
+ urlContextMetadata: lastUrlContextMetadata,
2165
+ safetyRatings: (_k = candidate.safetyRatings) != null ? _k : null,
2166
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
2167
+ finishMessage: (_l = candidate.finishMessage) != null ? _l : null,
2168
+ serviceTier
2169
+ });
1431
2170
  }
1432
2171
  },
1433
2172
  flush(controller) {
@@ -1446,7 +2185,7 @@ var GoogleGenerativeAILanguageModel = class {
1446
2185
  controller.enqueue({
1447
2186
  type: "finish",
1448
2187
  finishReason,
1449
- usage: convertGoogleGenerativeAIUsage(usage),
2188
+ usage: convertGoogleUsage(usage),
1450
2189
  providerMetadata
1451
2190
  });
1452
2191
  }
@@ -1457,25 +2196,74 @@ var GoogleGenerativeAILanguageModel = class {
1457
2196
  };
1458
2197
  }
1459
2198
  };
1460
- function getToolCallsFromParts({
1461
- parts,
1462
- generateId: generateId3,
1463
- providerOptionsName
2199
+ function isGemini3Model(modelId) {
2200
+ return /gemini-3[\.\-]/i.test(modelId) || /gemini-3$/i.test(modelId);
2201
+ }
2202
+ function getMaxOutputTokensForGemini25Model() {
2203
+ return 65536;
2204
+ }
2205
+ function getMaxThinkingTokensForGemini25Model(modelId) {
2206
+ const id = modelId.toLowerCase();
2207
+ if (id.includes("2.5-pro") || id.includes("gemini-3-pro-image")) {
2208
+ return 32768;
2209
+ }
2210
+ return 24576;
2211
+ }
2212
+ function resolveThinkingConfig({
2213
+ reasoning,
2214
+ modelId,
2215
+ warnings
1464
2216
  }) {
1465
- const functionCallParts = parts == null ? void 0 : parts.filter(
1466
- (part) => "functionCall" in part
1467
- );
1468
- return functionCallParts == null || functionCallParts.length === 0 ? void 0 : functionCallParts.map((part) => ({
1469
- type: "tool-call",
1470
- toolCallId: generateId3(),
1471
- toolName: part.functionCall.name,
1472
- args: JSON.stringify(part.functionCall.args),
1473
- providerMetadata: part.thoughtSignature ? {
1474
- [providerOptionsName]: {
1475
- thoughtSignature: part.thoughtSignature
1476
- }
1477
- } : void 0
1478
- }));
2217
+ if (!isCustomReasoning(reasoning)) {
2218
+ return void 0;
2219
+ }
2220
+ if (isGemini3Model(modelId) && !modelId.includes("gemini-3-pro-image")) {
2221
+ return resolveGemini3ThinkingConfig({ reasoning, warnings });
2222
+ }
2223
+ return resolveGemini25ThinkingConfig({ reasoning, modelId, warnings });
2224
+ }
2225
+ function resolveGemini3ThinkingConfig({
2226
+ reasoning,
2227
+ warnings
2228
+ }) {
2229
+ if (reasoning === "none") {
2230
+ return { thinkingLevel: "minimal" };
2231
+ }
2232
+ const thinkingLevel = mapReasoningToProviderEffort({
2233
+ reasoning,
2234
+ effortMap: {
2235
+ minimal: "minimal",
2236
+ low: "low",
2237
+ medium: "medium",
2238
+ high: "high",
2239
+ xhigh: "high"
2240
+ },
2241
+ warnings
2242
+ });
2243
+ if (thinkingLevel == null) {
2244
+ return void 0;
2245
+ }
2246
+ return { thinkingLevel };
2247
+ }
2248
+ function resolveGemini25ThinkingConfig({
2249
+ reasoning,
2250
+ modelId,
2251
+ warnings
2252
+ }) {
2253
+ if (reasoning === "none") {
2254
+ return { thinkingBudget: 0 };
2255
+ }
2256
+ const thinkingBudget = mapReasoningToProviderBudget({
2257
+ reasoning,
2258
+ maxOutputTokens: getMaxOutputTokensForGemini25Model(),
2259
+ maxReasoningBudget: getMaxThinkingTokensForGemini25Model(modelId),
2260
+ minReasoningBudget: 0,
2261
+ warnings
2262
+ });
2263
+ if (thinkingBudget == null) {
2264
+ return void 0;
2265
+ }
2266
+ return { thinkingBudget };
1479
2267
  }
1480
2268
  function extractSources({
1481
2269
  groundingMetadata,
@@ -1571,256 +2359,312 @@ function extractSources({
1571
2359
  }
1572
2360
  return sources.length > 0 ? sources : void 0;
1573
2361
  }
1574
- var getGroundingMetadataSchema = () => import_v45.z.object({
1575
- webSearchQueries: import_v45.z.array(import_v45.z.string()).nullish(),
1576
- imageSearchQueries: import_v45.z.array(import_v45.z.string()).nullish(),
1577
- retrievalQueries: import_v45.z.array(import_v45.z.string()).nullish(),
1578
- searchEntryPoint: import_v45.z.object({ renderedContent: import_v45.z.string() }).nullish(),
1579
- groundingChunks: import_v45.z.array(
1580
- import_v45.z.object({
1581
- web: import_v45.z.object({ uri: import_v45.z.string(), title: import_v45.z.string().nullish() }).nullish(),
1582
- image: import_v45.z.object({
1583
- sourceUri: import_v45.z.string(),
1584
- imageUri: import_v45.z.string(),
1585
- title: import_v45.z.string().nullish(),
1586
- domain: import_v45.z.string().nullish()
2362
+ var getGroundingMetadataSchema = () => z5.object({
2363
+ webSearchQueries: z5.array(z5.string()).nullish(),
2364
+ imageSearchQueries: z5.array(z5.string()).nullish(),
2365
+ retrievalQueries: z5.array(z5.string()).nullish(),
2366
+ searchEntryPoint: z5.object({ renderedContent: z5.string() }).nullish(),
2367
+ groundingChunks: z5.array(
2368
+ z5.object({
2369
+ web: z5.object({ uri: z5.string(), title: z5.string().nullish() }).nullish(),
2370
+ image: z5.object({
2371
+ sourceUri: z5.string(),
2372
+ imageUri: z5.string(),
2373
+ title: z5.string().nullish(),
2374
+ domain: z5.string().nullish()
1587
2375
  }).nullish(),
1588
- retrievedContext: import_v45.z.object({
1589
- uri: import_v45.z.string().nullish(),
1590
- title: import_v45.z.string().nullish(),
1591
- text: import_v45.z.string().nullish(),
1592
- fileSearchStore: import_v45.z.string().nullish()
2376
+ retrievedContext: z5.object({
2377
+ uri: z5.string().nullish(),
2378
+ title: z5.string().nullish(),
2379
+ text: z5.string().nullish(),
2380
+ fileSearchStore: z5.string().nullish()
1593
2381
  }).nullish(),
1594
- maps: import_v45.z.object({
1595
- uri: import_v45.z.string().nullish(),
1596
- title: import_v45.z.string().nullish(),
1597
- text: import_v45.z.string().nullish(),
1598
- placeId: import_v45.z.string().nullish()
2382
+ maps: z5.object({
2383
+ uri: z5.string().nullish(),
2384
+ title: z5.string().nullish(),
2385
+ text: z5.string().nullish(),
2386
+ placeId: z5.string().nullish()
1599
2387
  }).nullish()
1600
2388
  })
1601
2389
  ).nullish(),
1602
- groundingSupports: import_v45.z.array(
1603
- import_v45.z.object({
1604
- segment: import_v45.z.object({
1605
- startIndex: import_v45.z.number().nullish(),
1606
- endIndex: import_v45.z.number().nullish(),
1607
- text: import_v45.z.string().nullish()
2390
+ groundingSupports: z5.array(
2391
+ z5.object({
2392
+ segment: z5.object({
2393
+ startIndex: z5.number().nullish(),
2394
+ endIndex: z5.number().nullish(),
2395
+ text: z5.string().nullish()
1608
2396
  }).nullish(),
1609
- segment_text: import_v45.z.string().nullish(),
1610
- groundingChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
1611
- supportChunkIndices: import_v45.z.array(import_v45.z.number()).nullish(),
1612
- confidenceScores: import_v45.z.array(import_v45.z.number()).nullish(),
1613
- confidenceScore: import_v45.z.array(import_v45.z.number()).nullish()
2397
+ segment_text: z5.string().nullish(),
2398
+ groundingChunkIndices: z5.array(z5.number()).nullish(),
2399
+ supportChunkIndices: z5.array(z5.number()).nullish(),
2400
+ confidenceScores: z5.array(z5.number()).nullish(),
2401
+ confidenceScore: z5.array(z5.number()).nullish()
1614
2402
  })
1615
2403
  ).nullish(),
1616
- retrievalMetadata: import_v45.z.union([
1617
- import_v45.z.object({
1618
- webDynamicRetrievalScore: import_v45.z.number()
2404
+ retrievalMetadata: z5.union([
2405
+ z5.object({
2406
+ webDynamicRetrievalScore: z5.number()
1619
2407
  }),
1620
- import_v45.z.object({})
2408
+ z5.object({})
1621
2409
  ]).nullish()
1622
2410
  });
1623
- var getContentSchema = () => import_v45.z.object({
1624
- parts: import_v45.z.array(
1625
- import_v45.z.union([
2411
+ var partialArgSchema = z5.object({
2412
+ jsonPath: z5.string(),
2413
+ stringValue: z5.string().nullish(),
2414
+ numberValue: z5.number().nullish(),
2415
+ boolValue: z5.boolean().nullish(),
2416
+ nullValue: z5.unknown().nullish(),
2417
+ willContinue: z5.boolean().nullish()
2418
+ });
2419
+ var getContentSchema = () => z5.object({
2420
+ parts: z5.array(
2421
+ z5.union([
1626
2422
  // note: order matters since text can be fully empty
1627
- import_v45.z.object({
1628
- functionCall: import_v45.z.object({
1629
- name: import_v45.z.string(),
1630
- args: import_v45.z.unknown()
2423
+ z5.object({
2424
+ functionCall: z5.object({
2425
+ name: z5.string().nullish(),
2426
+ args: z5.unknown().nullish(),
2427
+ partialArgs: z5.array(partialArgSchema).nullish(),
2428
+ willContinue: z5.boolean().nullish()
2429
+ }),
2430
+ thoughtSignature: z5.string().nullish()
2431
+ }),
2432
+ z5.object({
2433
+ inlineData: z5.object({
2434
+ mimeType: z5.string(),
2435
+ data: z5.string()
2436
+ }),
2437
+ thought: z5.boolean().nullish(),
2438
+ thoughtSignature: z5.string().nullish()
2439
+ }),
2440
+ z5.object({
2441
+ toolCall: z5.object({
2442
+ toolType: z5.string(),
2443
+ args: z5.unknown().nullish(),
2444
+ id: z5.string()
1631
2445
  }),
1632
- thoughtSignature: import_v45.z.string().nullish()
2446
+ thoughtSignature: z5.string().nullish()
1633
2447
  }),
1634
- import_v45.z.object({
1635
- inlineData: import_v45.z.object({
1636
- mimeType: import_v45.z.string(),
1637
- data: import_v45.z.string()
2448
+ z5.object({
2449
+ toolResponse: z5.object({
2450
+ toolType: z5.string(),
2451
+ response: z5.unknown().nullish(),
2452
+ id: z5.string()
1638
2453
  }),
1639
- thought: import_v45.z.boolean().nullish(),
1640
- thoughtSignature: import_v45.z.string().nullish()
2454
+ thoughtSignature: z5.string().nullish()
1641
2455
  }),
1642
- import_v45.z.object({
1643
- executableCode: import_v45.z.object({
1644
- language: import_v45.z.string(),
1645
- code: import_v45.z.string()
2456
+ z5.object({
2457
+ executableCode: z5.object({
2458
+ language: z5.string(),
2459
+ code: z5.string()
1646
2460
  }).nullish(),
1647
- codeExecutionResult: import_v45.z.object({
1648
- outcome: import_v45.z.string(),
1649
- output: import_v45.z.string().nullish()
2461
+ codeExecutionResult: z5.object({
2462
+ outcome: z5.string(),
2463
+ output: z5.string().nullish()
1650
2464
  }).nullish(),
1651
- text: import_v45.z.string().nullish(),
1652
- thought: import_v45.z.boolean().nullish(),
1653
- thoughtSignature: import_v45.z.string().nullish()
2465
+ text: z5.string().nullish(),
2466
+ thought: z5.boolean().nullish(),
2467
+ thoughtSignature: z5.string().nullish()
1654
2468
  })
1655
2469
  ])
1656
2470
  ).nullish()
1657
2471
  });
1658
- var getSafetyRatingSchema = () => import_v45.z.object({
1659
- category: import_v45.z.string().nullish(),
1660
- probability: import_v45.z.string().nullish(),
1661
- probabilityScore: import_v45.z.number().nullish(),
1662
- severity: import_v45.z.string().nullish(),
1663
- severityScore: import_v45.z.number().nullish(),
1664
- blocked: import_v45.z.boolean().nullish()
2472
+ var getSafetyRatingSchema = () => z5.object({
2473
+ category: z5.string().nullish(),
2474
+ probability: z5.string().nullish(),
2475
+ probabilityScore: z5.number().nullish(),
2476
+ severity: z5.string().nullish(),
2477
+ severityScore: z5.number().nullish(),
2478
+ blocked: z5.boolean().nullish()
1665
2479
  });
1666
- var usageSchema = import_v45.z.object({
1667
- cachedContentTokenCount: import_v45.z.number().nullish(),
1668
- thoughtsTokenCount: import_v45.z.number().nullish(),
1669
- promptTokenCount: import_v45.z.number().nullish(),
1670
- candidatesTokenCount: import_v45.z.number().nullish(),
1671
- totalTokenCount: import_v45.z.number().nullish(),
2480
+ var tokenDetailsSchema = z5.array(
2481
+ z5.object({
2482
+ modality: z5.string(),
2483
+ tokenCount: z5.number()
2484
+ })
2485
+ ).nullish();
2486
+ var usageSchema = z5.object({
2487
+ cachedContentTokenCount: z5.number().nullish(),
2488
+ thoughtsTokenCount: z5.number().nullish(),
2489
+ promptTokenCount: z5.number().nullish(),
2490
+ candidatesTokenCount: z5.number().nullish(),
2491
+ totalTokenCount: z5.number().nullish(),
1672
2492
  // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
1673
- trafficType: import_v45.z.string().nullish()
2493
+ trafficType: z5.string().nullish(),
2494
+ // https://ai.google.dev/api/generate-content#Modality
2495
+ promptTokensDetails: tokenDetailsSchema,
2496
+ candidatesTokensDetails: tokenDetailsSchema
1674
2497
  });
1675
- var getUrlContextMetadataSchema = () => import_v45.z.object({
1676
- urlMetadata: import_v45.z.array(
1677
- import_v45.z.object({
1678
- retrievedUrl: import_v45.z.string(),
1679
- urlRetrievalStatus: import_v45.z.string()
2498
+ var getUrlContextMetadataSchema = () => z5.object({
2499
+ urlMetadata: z5.array(
2500
+ z5.object({
2501
+ retrievedUrl: z5.string(),
2502
+ urlRetrievalStatus: z5.string()
1680
2503
  })
1681
2504
  ).nullish()
1682
2505
  });
1683
- var responseSchema = (0, import_provider_utils6.lazySchema)(
1684
- () => (0, import_provider_utils6.zodSchema)(
1685
- import_v45.z.object({
1686
- candidates: import_v45.z.array(
1687
- import_v45.z.object({
1688
- content: getContentSchema().nullish().or(import_v45.z.object({}).strict()),
1689
- finishReason: import_v45.z.string().nullish(),
1690
- finishMessage: import_v45.z.string().nullish(),
1691
- safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish(),
2506
+ var responseSchema = lazySchema5(
2507
+ () => zodSchema5(
2508
+ z5.object({
2509
+ candidates: z5.array(
2510
+ z5.object({
2511
+ content: getContentSchema().nullish().or(z5.object({}).strict()),
2512
+ finishReason: z5.string().nullish(),
2513
+ finishMessage: z5.string().nullish(),
2514
+ safetyRatings: z5.array(getSafetyRatingSchema()).nullish(),
1692
2515
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1693
2516
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
1694
2517
  })
1695
2518
  ),
1696
2519
  usageMetadata: usageSchema.nullish(),
1697
- promptFeedback: import_v45.z.object({
1698
- blockReason: import_v45.z.string().nullish(),
1699
- safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish()
1700
- }).nullish()
2520
+ promptFeedback: z5.object({
2521
+ blockReason: z5.string().nullish(),
2522
+ safetyRatings: z5.array(getSafetyRatingSchema()).nullish()
2523
+ }).nullish(),
2524
+ serviceTier: z5.string().nullish()
1701
2525
  })
1702
2526
  )
1703
2527
  );
1704
- var chunkSchema = (0, import_provider_utils6.lazySchema)(
1705
- () => (0, import_provider_utils6.zodSchema)(
1706
- import_v45.z.object({
1707
- candidates: import_v45.z.array(
1708
- import_v45.z.object({
2528
+ var chunkSchema = lazySchema5(
2529
+ () => zodSchema5(
2530
+ z5.object({
2531
+ candidates: z5.array(
2532
+ z5.object({
1709
2533
  content: getContentSchema().nullish(),
1710
- finishReason: import_v45.z.string().nullish(),
1711
- finishMessage: import_v45.z.string().nullish(),
1712
- safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish(),
2534
+ finishReason: z5.string().nullish(),
2535
+ finishMessage: z5.string().nullish(),
2536
+ safetyRatings: z5.array(getSafetyRatingSchema()).nullish(),
1713
2537
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1714
2538
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
1715
2539
  })
1716
2540
  ).nullish(),
1717
2541
  usageMetadata: usageSchema.nullish(),
1718
- promptFeedback: import_v45.z.object({
1719
- blockReason: import_v45.z.string().nullish(),
1720
- safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish()
1721
- }).nullish()
2542
+ promptFeedback: z5.object({
2543
+ blockReason: z5.string().nullish(),
2544
+ safetyRatings: z5.array(getSafetyRatingSchema()).nullish()
2545
+ }).nullish(),
2546
+ serviceTier: z5.string().nullish()
1722
2547
  })
1723
2548
  )
1724
2549
  );
1725
2550
 
1726
2551
  // src/tool/code-execution.ts
1727
- var import_provider_utils7 = require("@ai-sdk/provider-utils");
1728
- var import_v46 = require("zod/v4");
1729
- var codeExecution = (0, import_provider_utils7.createProviderToolFactoryWithOutputSchema)({
2552
+ import { createProviderExecutedToolFactory } from "@ai-sdk/provider-utils";
2553
+ import { z as z6 } from "zod/v4";
2554
+ var codeExecution = createProviderExecutedToolFactory({
1730
2555
  id: "google.code_execution",
1731
- inputSchema: import_v46.z.object({
1732
- language: import_v46.z.string().describe("The programming language of the code."),
1733
- code: import_v46.z.string().describe("The code to be executed.")
2556
+ inputSchema: z6.object({
2557
+ language: z6.string().describe("The programming language of the code."),
2558
+ code: z6.string().describe("The code to be executed.")
1734
2559
  }),
1735
- outputSchema: import_v46.z.object({
1736
- outcome: import_v46.z.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
1737
- output: import_v46.z.string().describe("The output from the code execution.")
2560
+ outputSchema: z6.object({
2561
+ outcome: z6.string().describe('The outcome of the execution (e.g., "OUTCOME_OK").'),
2562
+ output: z6.string().describe("The output from the code execution.")
1738
2563
  })
1739
2564
  });
1740
2565
 
1741
2566
  // src/tool/enterprise-web-search.ts
1742
- var import_provider_utils8 = require("@ai-sdk/provider-utils");
1743
- var import_v47 = require("zod/v4");
1744
- var enterpriseWebSearch = (0, import_provider_utils8.createProviderToolFactory)({
2567
+ import {
2568
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory2,
2569
+ lazySchema as lazySchema6,
2570
+ zodSchema as zodSchema6
2571
+ } from "@ai-sdk/provider-utils";
2572
+ import { z as z7 } from "zod/v4";
2573
+ var enterpriseWebSearch = createProviderExecutedToolFactory2({
1745
2574
  id: "google.enterprise_web_search",
1746
- inputSchema: (0, import_provider_utils8.lazySchema)(() => (0, import_provider_utils8.zodSchema)(import_v47.z.object({})))
2575
+ inputSchema: lazySchema6(() => zodSchema6(z7.object({}))),
2576
+ outputSchema: lazySchema6(() => zodSchema6(z7.object({})))
1747
2577
  });
1748
2578
 
1749
2579
  // src/tool/file-search.ts
1750
- var import_provider_utils9 = require("@ai-sdk/provider-utils");
1751
- var import_v48 = require("zod/v4");
1752
- var fileSearchArgsBaseSchema = import_v48.z.object({
2580
+ import {
2581
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory3,
2582
+ lazySchema as lazySchema7,
2583
+ zodSchema as zodSchema7
2584
+ } from "@ai-sdk/provider-utils";
2585
+ import { z as z8 } from "zod/v4";
2586
+ var fileSearchArgsBaseSchema = z8.object({
1753
2587
  /** The names of the file_search_stores to retrieve from.
1754
2588
  * Example: `fileSearchStores/my-file-search-store-123`
1755
2589
  */
1756
- fileSearchStoreNames: import_v48.z.array(import_v48.z.string()).describe(
2590
+ fileSearchStoreNames: z8.array(z8.string()).describe(
1757
2591
  "The names of the file_search_stores to retrieve from. Example: `fileSearchStores/my-file-search-store-123`"
1758
2592
  ),
1759
2593
  /** The number of file search retrieval chunks to retrieve. */
1760
- topK: import_v48.z.number().int().positive().describe("The number of file search retrieval chunks to retrieve.").optional(),
2594
+ topK: z8.number().int().positive().describe("The number of file search retrieval chunks to retrieve.").optional(),
1761
2595
  /** Metadata filter to apply to the file search retrieval documents.
1762
2596
  * See https://google.aip.dev/160 for the syntax of the filter expression.
1763
2597
  */
1764
- metadataFilter: import_v48.z.string().describe(
2598
+ metadataFilter: z8.string().describe(
1765
2599
  "Metadata filter to apply to the file search retrieval documents. See https://google.aip.dev/160 for the syntax of the filter expression."
1766
2600
  ).optional()
1767
2601
  }).passthrough();
1768
- var fileSearchArgsSchema = (0, import_provider_utils9.lazySchema)(
1769
- () => (0, import_provider_utils9.zodSchema)(fileSearchArgsBaseSchema)
1770
- );
1771
- var fileSearch = (0, import_provider_utils9.createProviderToolFactory)({
2602
+ var fileSearch = createProviderExecutedToolFactory3({
1772
2603
  id: "google.file_search",
1773
- inputSchema: fileSearchArgsSchema
2604
+ inputSchema: lazySchema7(() => zodSchema7(z8.object({}))),
2605
+ outputSchema: lazySchema7(() => zodSchema7(z8.object({})))
1774
2606
  });
1775
2607
 
1776
2608
  // src/tool/google-maps.ts
1777
- var import_provider_utils10 = require("@ai-sdk/provider-utils");
1778
- var import_v49 = require("zod/v4");
1779
- var googleMaps = (0, import_provider_utils10.createProviderToolFactory)({
2609
+ import {
2610
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory4,
2611
+ lazySchema as lazySchema8,
2612
+ zodSchema as zodSchema8
2613
+ } from "@ai-sdk/provider-utils";
2614
+ import { z as z9 } from "zod/v4";
2615
+ var googleMaps = createProviderExecutedToolFactory4({
1780
2616
  id: "google.google_maps",
1781
- inputSchema: (0, import_provider_utils10.lazySchema)(() => (0, import_provider_utils10.zodSchema)(import_v49.z.object({})))
2617
+ inputSchema: lazySchema8(() => zodSchema8(z9.object({}))),
2618
+ outputSchema: lazySchema8(() => zodSchema8(z9.object({})))
1782
2619
  });
1783
2620
 
1784
2621
  // src/tool/google-search.ts
1785
- var import_provider_utils11 = require("@ai-sdk/provider-utils");
1786
- var import_v410 = require("zod/v4");
1787
- var googleSearchToolArgsBaseSchema = import_v410.z.object({
1788
- searchTypes: import_v410.z.object({
1789
- webSearch: import_v410.z.object({}).optional(),
1790
- imageSearch: import_v410.z.object({}).optional()
2622
+ import {
2623
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory5,
2624
+ lazySchema as lazySchema9,
2625
+ zodSchema as zodSchema9
2626
+ } from "@ai-sdk/provider-utils";
2627
+ import { z as z10 } from "zod/v4";
2628
+ var googleSearchToolArgsBaseSchema = z10.object({
2629
+ searchTypes: z10.object({
2630
+ webSearch: z10.object({}).optional(),
2631
+ imageSearch: z10.object({}).optional()
1791
2632
  }).optional(),
1792
- timeRangeFilter: import_v410.z.object({
1793
- startTime: import_v410.z.string(),
1794
- endTime: import_v410.z.string()
2633
+ timeRangeFilter: z10.object({
2634
+ startTime: z10.string(),
2635
+ endTime: z10.string()
1795
2636
  }).optional()
1796
2637
  }).passthrough();
1797
- var googleSearchToolArgsSchema = (0, import_provider_utils11.lazySchema)(
1798
- () => (0, import_provider_utils11.zodSchema)(googleSearchToolArgsBaseSchema)
1799
- );
1800
- var googleSearch = (0, import_provider_utils11.createProviderToolFactory)(
1801
- {
1802
- id: "google.google_search",
1803
- inputSchema: googleSearchToolArgsSchema
1804
- }
1805
- );
2638
+ var googleSearch = createProviderExecutedToolFactory5({
2639
+ id: "google.google_search",
2640
+ inputSchema: lazySchema9(() => zodSchema9(z10.object({}))),
2641
+ outputSchema: lazySchema9(() => zodSchema9(z10.object({})))
2642
+ });
1806
2643
 
1807
2644
  // src/tool/url-context.ts
1808
- var import_provider_utils12 = require("@ai-sdk/provider-utils");
1809
- var import_v411 = require("zod/v4");
1810
- var urlContext = (0, import_provider_utils12.createProviderToolFactory)({
2645
+ import {
2646
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory6,
2647
+ lazySchema as lazySchema10,
2648
+ zodSchema as zodSchema10
2649
+ } from "@ai-sdk/provider-utils";
2650
+ import { z as z11 } from "zod/v4";
2651
+ var urlContext = createProviderExecutedToolFactory6({
1811
2652
  id: "google.url_context",
1812
- inputSchema: (0, import_provider_utils12.lazySchema)(() => (0, import_provider_utils12.zodSchema)(import_v411.z.object({})))
2653
+ inputSchema: lazySchema10(() => zodSchema10(z11.object({}))),
2654
+ outputSchema: lazySchema10(() => zodSchema10(z11.object({})))
1813
2655
  });
1814
2656
 
1815
2657
  // src/tool/vertex-rag-store.ts
1816
- var import_provider_utils13 = require("@ai-sdk/provider-utils");
1817
- var import_v412 = require("zod/v4");
1818
- var vertexRagStore = (0, import_provider_utils13.createProviderToolFactory)({
2658
+ import {
2659
+ createProviderExecutedToolFactory as createProviderExecutedToolFactory7,
2660
+ lazySchema as lazySchema11,
2661
+ zodSchema as zodSchema11
2662
+ } from "@ai-sdk/provider-utils";
2663
+ import { z as z12 } from "zod/v4";
2664
+ var vertexRagStore = createProviderExecutedToolFactory7({
1819
2665
  id: "google.vertex_rag_store",
1820
- inputSchema: import_v412.z.object({
1821
- ragCorpus: import_v412.z.string(),
1822
- topK: import_v412.z.number().optional()
1823
- })
2666
+ inputSchema: lazySchema11(() => zodSchema11(z12.object({}))),
2667
+ outputSchema: lazySchema11(() => zodSchema11(z12.object({})))
1824
2668
  });
1825
2669
 
1826
2670
  // src/google-tools.ts
@@ -1883,16 +2727,55 @@ var googleTools = {
1883
2727
  vertexRagStore
1884
2728
  };
1885
2729
 
1886
- // src/google-generative-ai-image-model.ts
1887
- var import_provider_utils14 = require("@ai-sdk/provider-utils");
1888
- var import_v413 = require("zod/v4");
1889
- var GoogleGenerativeAIImageModel = class {
2730
+ // src/google-image-model.ts
2731
+ import {
2732
+ combineHeaders as combineHeaders3,
2733
+ convertToBase64 as convertToBase642,
2734
+ createJsonResponseHandler as createJsonResponseHandler3,
2735
+ generateId as defaultGenerateId,
2736
+ lazySchema as lazySchema13,
2737
+ parseProviderOptions as parseProviderOptions3,
2738
+ postJsonToApi as postJsonToApi3,
2739
+ resolve as resolve3,
2740
+ serializeModelOptions as serializeModelOptions3,
2741
+ WORKFLOW_SERIALIZE as WORKFLOW_SERIALIZE3,
2742
+ WORKFLOW_DESERIALIZE as WORKFLOW_DESERIALIZE3,
2743
+ zodSchema as zodSchema13
2744
+ } from "@ai-sdk/provider-utils";
2745
+ import { z as z14 } from "zod/v4";
2746
+
2747
+ // src/google-image-model-options.ts
2748
+ import {
2749
+ lazySchema as lazySchema12,
2750
+ zodSchema as zodSchema12
2751
+ } from "@ai-sdk/provider-utils";
2752
+ import { z as z13 } from "zod/v4";
2753
+ var googleImageModelOptionsSchema = lazySchema12(
2754
+ () => zodSchema12(
2755
+ z13.object({
2756
+ personGeneration: z13.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
2757
+ aspectRatio: z13.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
2758
+ })
2759
+ )
2760
+ );
2761
+
2762
+ // src/google-image-model.ts
2763
+ var GoogleImageModel = class _GoogleImageModel {
1890
2764
  constructor(modelId, settings, config) {
1891
2765
  this.modelId = modelId;
1892
2766
  this.settings = settings;
1893
2767
  this.config = config;
1894
2768
  this.specificationVersion = "v4";
1895
2769
  }
2770
+ static [WORKFLOW_SERIALIZE3](model) {
2771
+ return serializeModelOptions3({
2772
+ modelId: model.modelId,
2773
+ config: model.config
2774
+ });
2775
+ }
2776
+ static [WORKFLOW_DESERIALIZE3](options) {
2777
+ return new _GoogleImageModel(options.modelId, {}, options.config);
2778
+ }
1896
2779
  get maxImagesPerCall() {
1897
2780
  if (this.settings.maxImagesPerCall != null) {
1898
2781
  return this.settings.maxImagesPerCall;
@@ -1928,12 +2811,12 @@ var GoogleGenerativeAIImageModel = class {
1928
2811
  const warnings = [];
1929
2812
  if (files != null && files.length > 0) {
1930
2813
  throw new Error(
1931
- "Google Generative AI does not support image editing with Imagen models. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
2814
+ "Google Gemini API does not support image editing with Imagen models. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
1932
2815
  );
1933
2816
  }
1934
2817
  if (mask != null) {
1935
2818
  throw new Error(
1936
- "Google Generative AI does not support image editing with masks. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
2819
+ "Google Gemini API does not support image editing with masks. Use Google Vertex AI (@ai-sdk/google-vertex) for image editing capabilities."
1937
2820
  );
1938
2821
  }
1939
2822
  if (size != null) {
@@ -1950,7 +2833,7 @@ var GoogleGenerativeAIImageModel = class {
1950
2833
  details: "This model does not support the `seed` option through this provider."
1951
2834
  });
1952
2835
  }
1953
- const googleOptions = await (0, import_provider_utils14.parseProviderOptions)({
2836
+ const googleOptions = await parseProviderOptions3({
1954
2837
  provider: "google",
1955
2838
  providerOptions,
1956
2839
  schema: googleImageModelOptionsSchema
@@ -1969,12 +2852,15 @@ var GoogleGenerativeAIImageModel = class {
1969
2852
  instances: [{ prompt }],
1970
2853
  parameters
1971
2854
  };
1972
- const { responseHeaders, value: response } = await (0, import_provider_utils14.postJsonToApi)({
2855
+ const { responseHeaders, value: response } = await postJsonToApi3({
1973
2856
  url: `${this.config.baseURL}/models/${this.modelId}:predict`,
1974
- headers: (0, import_provider_utils14.combineHeaders)(await (0, import_provider_utils14.resolve)(this.config.headers), headers),
2857
+ headers: combineHeaders3(
2858
+ this.config.headers ? await resolve3(this.config.headers) : void 0,
2859
+ headers
2860
+ ),
1975
2861
  body,
1976
2862
  failedResponseHandler: googleFailedResponseHandler,
1977
- successfulResponseHandler: (0, import_provider_utils14.createJsonResponseHandler)(
2863
+ successfulResponseHandler: createJsonResponseHandler3(
1978
2864
  googleImageResponseSchema
1979
2865
  ),
1980
2866
  abortSignal,
@@ -2040,13 +2926,16 @@ var GoogleGenerativeAIImageModel = class {
2040
2926
  if (file.type === "url") {
2041
2927
  userContent.push({
2042
2928
  type: "file",
2043
- data: new URL(file.url),
2929
+ data: { type: "url", url: new URL(file.url) },
2044
2930
  mediaType: "image/*"
2045
2931
  });
2046
2932
  } else {
2047
2933
  userContent.push({
2048
2934
  type: "file",
2049
- data: typeof file.data === "string" ? file.data : new Uint8Array(file.data),
2935
+ data: {
2936
+ type: "data",
2937
+ data: typeof file.data === "string" ? file.data : new Uint8Array(file.data)
2938
+ },
2050
2939
  mediaType: file.mediaType
2051
2940
  });
2052
2941
  }
@@ -2055,12 +2944,12 @@ var GoogleGenerativeAIImageModel = class {
2055
2944
  const languageModelPrompt = [
2056
2945
  { role: "user", content: userContent }
2057
2946
  ];
2058
- const languageModel = new GoogleGenerativeAILanguageModel(this.modelId, {
2947
+ const languageModel = new GoogleLanguageModel(this.modelId, {
2059
2948
  provider: this.config.provider,
2060
2949
  baseURL: this.config.baseURL,
2061
2950
  headers: (_a = this.config.headers) != null ? _a : {},
2062
2951
  fetch: this.config.fetch,
2063
- generateId: (_b = this.config.generateId) != null ? _b : import_provider_utils14.generateId
2952
+ generateId: (_b = this.config.generateId) != null ? _b : defaultGenerateId
2064
2953
  });
2065
2954
  const result = await languageModel.doGenerate({
2066
2955
  prompt: languageModelPrompt,
@@ -2080,8 +2969,8 @@ var GoogleGenerativeAIImageModel = class {
2080
2969
  const currentDate = (_f = (_e = (_d = this.config._internal) == null ? void 0 : _d.currentDate) == null ? void 0 : _e.call(_d)) != null ? _f : /* @__PURE__ */ new Date();
2081
2970
  const images = [];
2082
2971
  for (const part of result.content) {
2083
- if (part.type === "file" && part.mediaType.startsWith("image/")) {
2084
- images.push((0, import_provider_utils14.convertToBase64)(part.data));
2972
+ if (part.type === "file" && part.mediaType.startsWith("image/") && part.data.type === "data") {
2973
+ images.push(convertToBase642(part.data.data));
2085
2974
  }
2086
2975
  }
2087
2976
  return {
@@ -2108,27 +2997,215 @@ var GoogleGenerativeAIImageModel = class {
2108
2997
  function isGeminiModel(modelId) {
2109
2998
  return modelId.startsWith("gemini-");
2110
2999
  }
2111
- var googleImageResponseSchema = (0, import_provider_utils14.lazySchema)(
2112
- () => (0, import_provider_utils14.zodSchema)(
2113
- import_v413.z.object({
2114
- predictions: import_v413.z.array(import_v413.z.object({ bytesBase64Encoded: import_v413.z.string() })).default([])
3000
+ var googleImageResponseSchema = lazySchema13(
3001
+ () => zodSchema13(
3002
+ z14.object({
3003
+ predictions: z14.array(z14.object({ bytesBase64Encoded: z14.string() })).default([])
2115
3004
  })
2116
3005
  )
2117
3006
  );
2118
- var googleImageModelOptionsSchema = (0, import_provider_utils14.lazySchema)(
2119
- () => (0, import_provider_utils14.zodSchema)(
2120
- import_v413.z.object({
2121
- personGeneration: import_v413.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
2122
- aspectRatio: import_v413.z.enum(["1:1", "3:4", "4:3", "9:16", "16:9"]).nullish()
3007
+
3008
+ // src/google-files.ts
3009
+ import {
3010
+ AISDKError
3011
+ } from "@ai-sdk/provider";
3012
+ import {
3013
+ combineHeaders as combineHeaders4,
3014
+ convertInlineFileDataToUint8Array,
3015
+ createJsonResponseHandler as createJsonResponseHandler4,
3016
+ delay,
3017
+ lazySchema as lazySchema14,
3018
+ parseProviderOptions as parseProviderOptions4,
3019
+ zodSchema as zodSchema14,
3020
+ getFromApi
3021
+ } from "@ai-sdk/provider-utils";
3022
+ import { z as z15 } from "zod/v4";
3023
+ var GoogleFiles = class {
3024
+ constructor(config) {
3025
+ this.config = config;
3026
+ this.specificationVersion = "v4";
3027
+ }
3028
+ get provider() {
3029
+ return this.config.provider;
3030
+ }
3031
+ async uploadFile(options) {
3032
+ var _a, _b, _c, _d;
3033
+ const googleOptions = await parseProviderOptions4({
3034
+ provider: "google",
3035
+ providerOptions: options.providerOptions,
3036
+ schema: googleFilesUploadOptionsSchema
3037
+ });
3038
+ const resolvedHeaders = this.config.headers();
3039
+ const fetchFn = (_a = this.config.fetch) != null ? _a : globalThis.fetch;
3040
+ const warnings = [];
3041
+ if (options.filename != null) {
3042
+ warnings.push({ type: "unsupported", feature: "filename" });
3043
+ }
3044
+ const fileBytes = convertInlineFileDataToUint8Array(options.data);
3045
+ const mediaType = options.mediaType;
3046
+ const displayName = googleOptions == null ? void 0 : googleOptions.displayName;
3047
+ const baseOrigin = this.config.baseURL.replace(/\/v1beta$/, "");
3048
+ const initResponse = await fetchFn(`${baseOrigin}/upload/v1beta/files`, {
3049
+ method: "POST",
3050
+ headers: {
3051
+ ...resolvedHeaders,
3052
+ "X-Goog-Upload-Protocol": "resumable",
3053
+ "X-Goog-Upload-Command": "start",
3054
+ "X-Goog-Upload-Header-Content-Length": String(fileBytes.length),
3055
+ "X-Goog-Upload-Header-Content-Type": mediaType,
3056
+ "Content-Type": "application/json"
3057
+ },
3058
+ body: JSON.stringify({
3059
+ file: {
3060
+ ...displayName != null ? { display_name: displayName } : {}
3061
+ }
3062
+ })
3063
+ });
3064
+ if (!initResponse.ok) {
3065
+ const errorBody = await initResponse.text();
3066
+ throw new AISDKError({
3067
+ name: "GOOGLE_FILES_UPLOAD_ERROR",
3068
+ message: `Failed to initiate resumable upload: ${initResponse.status} ${errorBody}`
3069
+ });
3070
+ }
3071
+ const uploadUrl = initResponse.headers.get("x-goog-upload-url");
3072
+ if (!uploadUrl) {
3073
+ throw new AISDKError({
3074
+ name: "GOOGLE_FILES_UPLOAD_ERROR",
3075
+ message: "No upload URL returned from initiation request"
3076
+ });
3077
+ }
3078
+ const uploadResponse = await fetchFn(uploadUrl, {
3079
+ method: "POST",
3080
+ headers: {
3081
+ "Content-Length": String(fileBytes.length),
3082
+ "X-Goog-Upload-Offset": "0",
3083
+ "X-Goog-Upload-Command": "upload, finalize"
3084
+ },
3085
+ body: fileBytes
3086
+ });
3087
+ if (!uploadResponse.ok) {
3088
+ const errorBody = await uploadResponse.text();
3089
+ throw new AISDKError({
3090
+ name: "GOOGLE_FILES_UPLOAD_ERROR",
3091
+ message: `Failed to upload file data: ${uploadResponse.status} ${errorBody}`
3092
+ });
3093
+ }
3094
+ const uploadResult = await uploadResponse.json();
3095
+ let file = uploadResult.file;
3096
+ const pollIntervalMs = (_b = googleOptions == null ? void 0 : googleOptions.pollIntervalMs) != null ? _b : 2e3;
3097
+ const pollTimeoutMs = (_c = googleOptions == null ? void 0 : googleOptions.pollTimeoutMs) != null ? _c : 3e5;
3098
+ const startTime = Date.now();
3099
+ while (file.state === "PROCESSING") {
3100
+ if (Date.now() - startTime > pollTimeoutMs) {
3101
+ throw new AISDKError({
3102
+ name: "GOOGLE_FILES_UPLOAD_TIMEOUT",
3103
+ message: `File processing timed out after ${pollTimeoutMs}ms`
3104
+ });
3105
+ }
3106
+ await delay(pollIntervalMs);
3107
+ const { value: fileStatus } = await getFromApi({
3108
+ url: `${this.config.baseURL}/${file.name}`,
3109
+ headers: combineHeaders4(resolvedHeaders),
3110
+ successfulResponseHandler: createJsonResponseHandler4(
3111
+ googleFileResponseSchema
3112
+ ),
3113
+ failedResponseHandler: googleFailedResponseHandler,
3114
+ fetch: this.config.fetch
3115
+ });
3116
+ file = fileStatus;
3117
+ }
3118
+ if (file.state === "FAILED") {
3119
+ throw new AISDKError({
3120
+ name: "GOOGLE_FILES_UPLOAD_FAILED",
3121
+ message: `File processing failed for ${file.name}`
3122
+ });
3123
+ }
3124
+ return {
3125
+ warnings,
3126
+ providerReference: { google: file.uri },
3127
+ mediaType: (_d = file.mimeType) != null ? _d : options.mediaType,
3128
+ providerMetadata: {
3129
+ google: {
3130
+ name: file.name,
3131
+ displayName: file.displayName,
3132
+ mimeType: file.mimeType,
3133
+ sizeBytes: file.sizeBytes,
3134
+ state: file.state,
3135
+ uri: file.uri,
3136
+ ...file.createTime != null ? { createTime: file.createTime } : {},
3137
+ ...file.updateTime != null ? { updateTime: file.updateTime } : {},
3138
+ ...file.expirationTime != null ? { expirationTime: file.expirationTime } : {},
3139
+ ...file.sha256Hash != null ? { sha256Hash: file.sha256Hash } : {}
3140
+ }
3141
+ }
3142
+ };
3143
+ }
3144
+ };
3145
+ var googleFileResponseSchema = lazySchema14(
3146
+ () => zodSchema14(
3147
+ z15.object({
3148
+ name: z15.string(),
3149
+ displayName: z15.string().nullish(),
3150
+ mimeType: z15.string(),
3151
+ sizeBytes: z15.string().nullish(),
3152
+ createTime: z15.string().nullish(),
3153
+ updateTime: z15.string().nullish(),
3154
+ expirationTime: z15.string().nullish(),
3155
+ sha256Hash: z15.string().nullish(),
3156
+ uri: z15.string(),
3157
+ state: z15.string()
2123
3158
  })
2124
3159
  )
2125
3160
  );
3161
+ var googleFilesUploadOptionsSchema = lazySchema14(
3162
+ () => zodSchema14(
3163
+ z15.object({
3164
+ displayName: z15.string().nullish(),
3165
+ pollIntervalMs: z15.number().positive().nullish(),
3166
+ pollTimeoutMs: z15.number().positive().nullish()
3167
+ }).passthrough()
3168
+ )
3169
+ );
3170
+
3171
+ // src/google-video-model.ts
3172
+ import {
3173
+ AISDKError as AISDKError2
3174
+ } from "@ai-sdk/provider";
3175
+ import {
3176
+ combineHeaders as combineHeaders5,
3177
+ convertUint8ArrayToBase64,
3178
+ createJsonResponseHandler as createJsonResponseHandler5,
3179
+ delay as delay2,
3180
+ getFromApi as getFromApi2,
3181
+ parseProviderOptions as parseProviderOptions5,
3182
+ postJsonToApi as postJsonToApi4,
3183
+ resolve as resolve4
3184
+ } from "@ai-sdk/provider-utils";
3185
+ import { z as z17 } from "zod/v4";
3186
+
3187
+ // src/google-video-model-options.ts
3188
+ import { lazySchema as lazySchema15, zodSchema as zodSchema15 } from "@ai-sdk/provider-utils";
3189
+ import { z as z16 } from "zod/v4";
3190
+ var googleVideoModelOptionsSchema = lazySchema15(
3191
+ () => zodSchema15(
3192
+ z16.object({
3193
+ pollIntervalMs: z16.number().positive().nullish(),
3194
+ pollTimeoutMs: z16.number().positive().nullish(),
3195
+ personGeneration: z16.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
3196
+ negativePrompt: z16.string().nullish(),
3197
+ referenceImages: z16.array(
3198
+ z16.object({
3199
+ bytesBase64Encoded: z16.string().nullish(),
3200
+ gcsUri: z16.string().nullish()
3201
+ })
3202
+ ).nullish()
3203
+ }).passthrough()
3204
+ )
3205
+ );
2126
3206
 
2127
- // src/google-generative-ai-video-model.ts
2128
- var import_provider4 = require("@ai-sdk/provider");
2129
- var import_provider_utils15 = require("@ai-sdk/provider-utils");
2130
- var import_v414 = require("zod/v4");
2131
- var GoogleGenerativeAIVideoModel = class {
3207
+ // src/google-video-model.ts
3208
+ var GoogleVideoModel = class {
2132
3209
  constructor(modelId, config) {
2133
3210
  this.modelId = modelId;
2134
3211
  this.config = config;
@@ -2144,7 +3221,7 @@ var GoogleGenerativeAIVideoModel = class {
2144
3221
  var _a, _b, _c, _d, _e, _f, _g, _h;
2145
3222
  const currentDate = (_c = (_b = (_a = this.config._internal) == null ? void 0 : _a.currentDate) == null ? void 0 : _b.call(_a)) != null ? _c : /* @__PURE__ */ new Date();
2146
3223
  const warnings = [];
2147
- const googleOptions = await (0, import_provider_utils15.parseProviderOptions)({
3224
+ const googleOptions = await parseProviderOptions5({
2148
3225
  provider: "google",
2149
3226
  providerOptions: options.providerOptions,
2150
3227
  schema: googleVideoModelOptionsSchema
@@ -2162,7 +3239,7 @@ var GoogleGenerativeAIVideoModel = class {
2162
3239
  details: "Google Generative AI video models require base64-encoded images. URL will be ignored."
2163
3240
  });
2164
3241
  } else {
2165
- const base64Data = typeof options.image.data === "string" ? options.image.data : (0, import_provider_utils15.convertUint8ArrayToBase64)(options.image.data);
3242
+ const base64Data = typeof options.image.data === "string" ? options.image.data : convertUint8ArrayToBase64(options.image.data);
2166
3243
  instance.image = {
2167
3244
  inlineData: {
2168
3245
  mimeType: options.image.mediaType || "image/png",
@@ -2228,17 +3305,17 @@ var GoogleGenerativeAIVideoModel = class {
2228
3305
  }
2229
3306
  }
2230
3307
  }
2231
- const { value: operation } = await (0, import_provider_utils15.postJsonToApi)({
3308
+ const { value: operation } = await postJsonToApi4({
2232
3309
  url: `${this.config.baseURL}/models/${this.modelId}:predictLongRunning`,
2233
- headers: (0, import_provider_utils15.combineHeaders)(
2234
- await (0, import_provider_utils15.resolve)(this.config.headers),
3310
+ headers: combineHeaders5(
3311
+ await resolve4(this.config.headers),
2235
3312
  options.headers
2236
3313
  ),
2237
3314
  body: {
2238
3315
  instances,
2239
3316
  parameters
2240
3317
  },
2241
- successfulResponseHandler: (0, import_provider_utils15.createJsonResponseHandler)(
3318
+ successfulResponseHandler: createJsonResponseHandler5(
2242
3319
  googleOperationSchema
2243
3320
  ),
2244
3321
  failedResponseHandler: googleFailedResponseHandler,
@@ -2247,7 +3324,7 @@ var GoogleGenerativeAIVideoModel = class {
2247
3324
  });
2248
3325
  const operationName = operation.name;
2249
3326
  if (!operationName) {
2250
- throw new import_provider4.AISDKError({
3327
+ throw new AISDKError2({
2251
3328
  name: "GOOGLE_VIDEO_GENERATION_ERROR",
2252
3329
  message: "No operation name returned from API"
2253
3330
  });
@@ -2259,25 +3336,25 @@ var GoogleGenerativeAIVideoModel = class {
2259
3336
  let responseHeaders;
2260
3337
  while (!finalOperation.done) {
2261
3338
  if (Date.now() - startTime > pollTimeoutMs) {
2262
- throw new import_provider4.AISDKError({
3339
+ throw new AISDKError2({
2263
3340
  name: "GOOGLE_VIDEO_GENERATION_TIMEOUT",
2264
3341
  message: `Video generation timed out after ${pollTimeoutMs}ms`
2265
3342
  });
2266
3343
  }
2267
- await (0, import_provider_utils15.delay)(pollIntervalMs);
3344
+ await delay2(pollIntervalMs);
2268
3345
  if ((_f = options.abortSignal) == null ? void 0 : _f.aborted) {
2269
- throw new import_provider4.AISDKError({
3346
+ throw new AISDKError2({
2270
3347
  name: "GOOGLE_VIDEO_GENERATION_ABORTED",
2271
3348
  message: "Video generation request was aborted"
2272
3349
  });
2273
3350
  }
2274
- const { value: statusOperation, responseHeaders: pollHeaders } = await (0, import_provider_utils15.getFromApi)({
3351
+ const { value: statusOperation, responseHeaders: pollHeaders } = await getFromApi2({
2275
3352
  url: `${this.config.baseURL}/${operationName}`,
2276
- headers: (0, import_provider_utils15.combineHeaders)(
2277
- await (0, import_provider_utils15.resolve)(this.config.headers),
3353
+ headers: combineHeaders5(
3354
+ await resolve4(this.config.headers),
2278
3355
  options.headers
2279
3356
  ),
2280
- successfulResponseHandler: (0, import_provider_utils15.createJsonResponseHandler)(
3357
+ successfulResponseHandler: createJsonResponseHandler5(
2281
3358
  googleOperationSchema
2282
3359
  ),
2283
3360
  failedResponseHandler: googleFailedResponseHandler,
@@ -2288,21 +3365,21 @@ var GoogleGenerativeAIVideoModel = class {
2288
3365
  responseHeaders = pollHeaders;
2289
3366
  }
2290
3367
  if (finalOperation.error) {
2291
- throw new import_provider4.AISDKError({
3368
+ throw new AISDKError2({
2292
3369
  name: "GOOGLE_VIDEO_GENERATION_FAILED",
2293
3370
  message: `Video generation failed: ${finalOperation.error.message}`
2294
3371
  });
2295
3372
  }
2296
3373
  const response = finalOperation.response;
2297
3374
  if (!((_g = response == null ? void 0 : response.generateVideoResponse) == null ? void 0 : _g.generatedSamples) || response.generateVideoResponse.generatedSamples.length === 0) {
2298
- throw new import_provider4.AISDKError({
3375
+ throw new AISDKError2({
2299
3376
  name: "GOOGLE_VIDEO_GENERATION_ERROR",
2300
3377
  message: `No videos in response. Response: ${JSON.stringify(finalOperation)}`
2301
3378
  });
2302
3379
  }
2303
3380
  const videos = [];
2304
3381
  const videoMetadata = [];
2305
- const resolvedHeaders = await (0, import_provider_utils15.resolve)(this.config.headers);
3382
+ const resolvedHeaders = await resolve4(this.config.headers);
2306
3383
  const apiKey = resolvedHeaders == null ? void 0 : resolvedHeaders["x-goog-api-key"];
2307
3384
  for (const generatedSample of response.generateVideoResponse.generatedSamples) {
2308
3385
  if ((_h = generatedSample.video) == null ? void 0 : _h.uri) {
@@ -2318,7 +3395,7 @@ var GoogleGenerativeAIVideoModel = class {
2318
3395
  }
2319
3396
  }
2320
3397
  if (videos.length === 0) {
2321
- throw new import_provider4.AISDKError({
3398
+ throw new AISDKError2({
2322
3399
  name: "GOOGLE_VIDEO_GENERATION_ERROR",
2323
3400
  message: "No valid videos in response"
2324
3401
  });
@@ -2339,51 +3416,35 @@ var GoogleGenerativeAIVideoModel = class {
2339
3416
  };
2340
3417
  }
2341
3418
  };
2342
- var googleOperationSchema = import_v414.z.object({
2343
- name: import_v414.z.string().nullish(),
2344
- done: import_v414.z.boolean().nullish(),
2345
- error: import_v414.z.object({
2346
- code: import_v414.z.number().nullish(),
2347
- message: import_v414.z.string(),
2348
- status: import_v414.z.string().nullish()
3419
+ var googleOperationSchema = z17.object({
3420
+ name: z17.string().nullish(),
3421
+ done: z17.boolean().nullish(),
3422
+ error: z17.object({
3423
+ code: z17.number().nullish(),
3424
+ message: z17.string(),
3425
+ status: z17.string().nullish()
2349
3426
  }).nullish(),
2350
- response: import_v414.z.object({
2351
- generateVideoResponse: import_v414.z.object({
2352
- generatedSamples: import_v414.z.array(
2353
- import_v414.z.object({
2354
- video: import_v414.z.object({
2355
- uri: import_v414.z.string().nullish()
3427
+ response: z17.object({
3428
+ generateVideoResponse: z17.object({
3429
+ generatedSamples: z17.array(
3430
+ z17.object({
3431
+ video: z17.object({
3432
+ uri: z17.string().nullish()
2356
3433
  }).nullish()
2357
3434
  })
2358
3435
  ).nullish()
2359
3436
  }).nullish()
2360
3437
  }).nullish()
2361
3438
  });
2362
- var googleVideoModelOptionsSchema = (0, import_provider_utils15.lazySchema)(
2363
- () => (0, import_provider_utils15.zodSchema)(
2364
- import_v414.z.object({
2365
- pollIntervalMs: import_v414.z.number().positive().nullish(),
2366
- pollTimeoutMs: import_v414.z.number().positive().nullish(),
2367
- personGeneration: import_v414.z.enum(["dont_allow", "allow_adult", "allow_all"]).nullish(),
2368
- negativePrompt: import_v414.z.string().nullish(),
2369
- referenceImages: import_v414.z.array(
2370
- import_v414.z.object({
2371
- bytesBase64Encoded: import_v414.z.string().nullish(),
2372
- gcsUri: import_v414.z.string().nullish()
2373
- })
2374
- ).nullish()
2375
- }).passthrough()
2376
- )
2377
- );
2378
3439
 
2379
3440
  // src/google-provider.ts
2380
- function createGoogleGenerativeAI(options = {}) {
3441
+ function createGoogle(options = {}) {
2381
3442
  var _a, _b;
2382
- const baseURL = (_a = (0, import_provider_utils16.withoutTrailingSlash)(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
3443
+ const baseURL = (_a = withoutTrailingSlash(options.baseURL)) != null ? _a : "https://generativelanguage.googleapis.com/v1beta";
2383
3444
  const providerName = (_b = options.name) != null ? _b : "google.generative-ai";
2384
- const getHeaders = () => (0, import_provider_utils16.withUserAgentSuffix)(
3445
+ const getHeaders = () => withUserAgentSuffix(
2385
3446
  {
2386
- "x-goog-api-key": (0, import_provider_utils16.loadApiKey)({
3447
+ "x-goog-api-key": loadApiKey({
2387
3448
  apiKey: options.apiKey,
2388
3449
  environmentVariableName: "GOOGLE_GENERATIVE_AI_API_KEY",
2389
3450
  description: "Google Generative AI"
@@ -2394,11 +3455,11 @@ function createGoogleGenerativeAI(options = {}) {
2394
3455
  );
2395
3456
  const createChatModel = (modelId) => {
2396
3457
  var _a2;
2397
- return new GoogleGenerativeAILanguageModel(modelId, {
3458
+ return new GoogleLanguageModel(modelId, {
2398
3459
  provider: providerName,
2399
3460
  baseURL,
2400
3461
  headers: getHeaders,
2401
- generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils16.generateId,
3462
+ generateId: (_a2 = options.generateId) != null ? _a2 : generateId2,
2402
3463
  supportedUrls: () => ({
2403
3464
  "*": [
2404
3465
  // Google Generative Language "files" endpoint
@@ -2414,13 +3475,19 @@ function createGoogleGenerativeAI(options = {}) {
2414
3475
  fetch: options.fetch
2415
3476
  });
2416
3477
  };
2417
- const createEmbeddingModel = (modelId) => new GoogleGenerativeAIEmbeddingModel(modelId, {
3478
+ const createEmbeddingModel = (modelId) => new GoogleEmbeddingModel(modelId, {
3479
+ provider: providerName,
3480
+ baseURL,
3481
+ headers: getHeaders,
3482
+ fetch: options.fetch
3483
+ });
3484
+ const createImageModel = (modelId, settings = {}) => new GoogleImageModel(modelId, settings, {
2418
3485
  provider: providerName,
2419
3486
  baseURL,
2420
3487
  headers: getHeaders,
2421
3488
  fetch: options.fetch
2422
3489
  });
2423
- const createImageModel = (modelId, settings = {}) => new GoogleGenerativeAIImageModel(modelId, settings, {
3490
+ const createFiles = () => new GoogleFiles({
2424
3491
  provider: providerName,
2425
3492
  baseURL,
2426
3493
  headers: getHeaders,
@@ -2428,12 +3495,12 @@ function createGoogleGenerativeAI(options = {}) {
2428
3495
  });
2429
3496
  const createVideoModel = (modelId) => {
2430
3497
  var _a2;
2431
- return new GoogleGenerativeAIVideoModel(modelId, {
3498
+ return new GoogleVideoModel(modelId, {
2432
3499
  provider: providerName,
2433
3500
  baseURL,
2434
3501
  headers: getHeaders,
2435
3502
  fetch: options.fetch,
2436
- generateId: (_a2 = options.generateId) != null ? _a2 : import_provider_utils16.generateId
3503
+ generateId: (_a2 = options.generateId) != null ? _a2 : generateId2
2437
3504
  });
2438
3505
  };
2439
3506
  const provider = function(modelId) {
@@ -2456,14 +3523,15 @@ function createGoogleGenerativeAI(options = {}) {
2456
3523
  provider.imageModel = createImageModel;
2457
3524
  provider.video = createVideoModel;
2458
3525
  provider.videoModel = createVideoModel;
3526
+ provider.files = createFiles;
2459
3527
  provider.tools = googleTools;
2460
3528
  return provider;
2461
3529
  }
2462
- var google = createGoogleGenerativeAI();
2463
- // Annotate the CommonJS export names for ESM import in node:
2464
- 0 && (module.exports = {
3530
+ var google = createGoogle();
3531
+ export {
2465
3532
  VERSION,
2466
- createGoogleGenerativeAI,
3533
+ createGoogle,
3534
+ createGoogle as createGoogleGenerativeAI,
2467
3535
  google
2468
- });
3536
+ };
2469
3537
  //# sourceMappingURL=index.js.map