@ai-sdk/google 4.0.0-beta.2 → 4.0.0-beta.21

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -18,19 +18,19 @@ var __copyProps = (to, from, except, desc) => {
18
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
19
 
20
20
  // src/index.ts
21
- var src_exports = {};
22
- __export(src_exports, {
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
23
  VERSION: () => VERSION,
24
24
  createGoogleGenerativeAI: () => createGoogleGenerativeAI,
25
25
  google: () => google
26
26
  });
27
- module.exports = __toCommonJS(src_exports);
27
+ module.exports = __toCommonJS(index_exports);
28
28
 
29
29
  // src/google-provider.ts
30
30
  var import_provider_utils16 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/version.ts
33
- var VERSION = true ? "4.0.0-beta.2" : "0.0.0-test";
33
+ var VERSION = true ? "4.0.0-beta.21" : "0.0.0-test";
34
34
 
35
35
  // src/google-generative-ai-embedding-model.ts
36
36
  var import_provider = require("@ai-sdk/provider");
@@ -59,6 +59,15 @@ var googleFailedResponseHandler = (0, import_provider_utils.createJsonErrorRespo
59
59
  // src/google-generative-ai-embedding-options.ts
60
60
  var import_provider_utils2 = require("@ai-sdk/provider-utils");
61
61
  var import_v42 = require("zod/v4");
62
+ var googleEmbeddingContentPartSchema = import_v42.z.union([
63
+ import_v42.z.object({ text: import_v42.z.string() }),
64
+ import_v42.z.object({
65
+ inlineData: import_v42.z.object({
66
+ mimeType: import_v42.z.string(),
67
+ data: import_v42.z.string()
68
+ })
69
+ })
70
+ ]);
62
71
  var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
63
72
  () => (0, import_provider_utils2.zodSchema)(
64
73
  import_v42.z.object({
@@ -88,7 +97,17 @@ var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
88
97
  "QUESTION_ANSWERING",
89
98
  "FACT_VERIFICATION",
90
99
  "CODE_RETRIEVAL_QUERY"
91
- ]).optional()
100
+ ]).optional(),
101
+ /**
102
+ * Optional. Per-value multimodal content parts for embedding non-text
103
+ * content (images, video, PDF, audio). Each entry corresponds to the
104
+ * embedding value at the same index and its parts are merged with the
105
+ * text value in the request. Use `null` for entries that are text-only.
106
+ *
107
+ * The array length must match the number of values being embedded. In
108
+ * the case of a single embedding, the array length must be 1.
109
+ */
110
+ content: import_v42.z.array(import_v42.z.array(googleEmbeddingContentPartSchema).min(1).nullable()).optional()
92
111
  })
93
112
  )
94
113
  );
@@ -96,7 +115,7 @@ var googleEmbeddingModelOptions = (0, import_provider_utils2.lazySchema)(
96
115
  // src/google-generative-ai-embedding-model.ts
97
116
  var GoogleGenerativeAIEmbeddingModel = class {
98
117
  constructor(modelId, config) {
99
- this.specificationVersion = "v3";
118
+ this.specificationVersion = "v4";
100
119
  this.maxEmbeddingsPerCall = 2048;
101
120
  this.supportsParallelCalls = true;
102
121
  this.modelId = modelId;
@@ -128,7 +147,16 @@ var GoogleGenerativeAIEmbeddingModel = class {
128
147
  await (0, import_provider_utils3.resolve)(this.config.headers),
129
148
  headers
130
149
  );
150
+ const multimodalContent = googleOptions == null ? void 0 : googleOptions.content;
151
+ if (multimodalContent != null && multimodalContent.length !== values.length) {
152
+ throw new Error(
153
+ `The number of multimodal content entries (${multimodalContent.length}) must match the number of values (${values.length}).`
154
+ );
155
+ }
131
156
  if (values.length === 1) {
157
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[0];
158
+ const textPart = values[0] ? [{ text: values[0] }] : [];
159
+ const parts = valueParts != null ? [...textPart, ...valueParts] : [{ text: values[0] }];
132
160
  const {
133
161
  responseHeaders: responseHeaders2,
134
162
  value: response2,
@@ -139,7 +167,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
139
167
  body: {
140
168
  model: `models/${this.modelId}`,
141
169
  content: {
142
- parts: [{ text: values[0] }]
170
+ parts
143
171
  },
144
172
  outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
145
173
  taskType: googleOptions == null ? void 0 : googleOptions.taskType
@@ -166,12 +194,19 @@ var GoogleGenerativeAIEmbeddingModel = class {
166
194
  url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
167
195
  headers: mergedHeaders,
168
196
  body: {
169
- requests: values.map((value) => ({
170
- model: `models/${this.modelId}`,
171
- content: { role: "user", parts: [{ text: value }] },
172
- outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
173
- taskType: googleOptions == null ? void 0 : googleOptions.taskType
174
- }))
197
+ requests: values.map((value, index) => {
198
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[index];
199
+ const textPart = value ? [{ text: value }] : [];
200
+ return {
201
+ model: `models/${this.modelId}`,
202
+ content: {
203
+ role: "user",
204
+ parts: valueParts != null ? [...textPart, ...valueParts] : [{ text: value }]
205
+ },
206
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
207
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
208
+ };
209
+ })
175
210
  },
176
211
  failedResponseHandler: googleFailedResponseHandler,
177
212
  successfulResponseHandler: (0, import_provider_utils3.createJsonResponseHandler)(
@@ -365,13 +400,118 @@ function isEmptyObjectSchema(jsonSchema) {
365
400
  // src/convert-to-google-generative-ai-messages.ts
366
401
  var import_provider2 = require("@ai-sdk/provider");
367
402
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
403
+ var dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
404
+ function parseBase64DataUrl(value) {
405
+ const match = dataUrlRegex.exec(value);
406
+ if (match == null) {
407
+ return void 0;
408
+ }
409
+ return {
410
+ mediaType: match[1],
411
+ data: match[2]
412
+ };
413
+ }
414
+ function convertUrlToolResultPart(url) {
415
+ const parsedDataUrl = parseBase64DataUrl(url);
416
+ if (parsedDataUrl == null) {
417
+ return void 0;
418
+ }
419
+ return {
420
+ inlineData: {
421
+ mimeType: parsedDataUrl.mediaType,
422
+ data: parsedDataUrl.data
423
+ }
424
+ };
425
+ }
426
+ function appendToolResultParts(parts, toolName, outputValue) {
427
+ const functionResponseParts = [];
428
+ const responseTextParts = [];
429
+ for (const contentPart of outputValue) {
430
+ switch (contentPart.type) {
431
+ case "text": {
432
+ responseTextParts.push(contentPart.text);
433
+ break;
434
+ }
435
+ case "image-data":
436
+ case "file-data": {
437
+ functionResponseParts.push({
438
+ inlineData: {
439
+ mimeType: contentPart.mediaType,
440
+ data: contentPart.data
441
+ }
442
+ });
443
+ break;
444
+ }
445
+ case "image-url":
446
+ case "file-url": {
447
+ const functionResponsePart = convertUrlToolResultPart(
448
+ contentPart.url
449
+ );
450
+ if (functionResponsePart != null) {
451
+ functionResponseParts.push(functionResponsePart);
452
+ } else {
453
+ responseTextParts.push(JSON.stringify(contentPart));
454
+ }
455
+ break;
456
+ }
457
+ default: {
458
+ responseTextParts.push(JSON.stringify(contentPart));
459
+ break;
460
+ }
461
+ }
462
+ }
463
+ parts.push({
464
+ functionResponse: {
465
+ name: toolName,
466
+ response: {
467
+ name: toolName,
468
+ content: responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully."
469
+ },
470
+ ...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
471
+ }
472
+ });
473
+ }
474
+ function appendLegacyToolResultParts(parts, toolName, outputValue) {
475
+ for (const contentPart of outputValue) {
476
+ switch (contentPart.type) {
477
+ case "text":
478
+ parts.push({
479
+ functionResponse: {
480
+ name: toolName,
481
+ response: {
482
+ name: toolName,
483
+ content: contentPart.text
484
+ }
485
+ }
486
+ });
487
+ break;
488
+ case "image-data":
489
+ parts.push(
490
+ {
491
+ inlineData: {
492
+ mimeType: String(contentPart.mediaType),
493
+ data: String(contentPart.data)
494
+ }
495
+ },
496
+ {
497
+ text: "Tool executed successfully and returned this image as a response"
498
+ }
499
+ );
500
+ break;
501
+ default:
502
+ parts.push({ text: JSON.stringify(contentPart) });
503
+ break;
504
+ }
505
+ }
506
+ }
368
507
  function convertToGoogleGenerativeAIMessages(prompt, options) {
369
- var _a, _b, _c;
508
+ var _a, _b, _c, _d, _e, _f, _g, _h;
370
509
  const systemInstructionParts = [];
371
510
  const contents = [];
372
511
  let systemMessagesAllowed = true;
373
512
  const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
374
513
  const providerOptionsName = (_b = options == null ? void 0 : options.providerOptionsName) != null ? _b : "google";
514
+ const supportsFunctionResponseParts = (_c = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _c : true;
375
515
  for (const { role, content } of prompt) {
376
516
  switch (role) {
377
517
  case "system": {
@@ -419,8 +559,8 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
419
559
  contents.push({
420
560
  role: "model",
421
561
  parts: content.map((part) => {
422
- var _a2, _b2, _c2, _d;
423
- const providerOpts = (_d = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
562
+ var _a2, _b2, _c2, _d2;
563
+ const providerOpts = (_d2 = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d2 : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
424
564
  const thoughtSignature = (providerOpts == null ? void 0 : providerOpts.thoughtSignature) != null ? String(providerOpts.thoughtSignature) : void 0;
425
565
  switch (part.type) {
426
566
  case "text": {
@@ -436,6 +576,21 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
436
576
  thoughtSignature
437
577
  };
438
578
  }
579
+ case "reasoning-file": {
580
+ if (part.data instanceof URL) {
581
+ throw new import_provider2.UnsupportedFunctionalityError({
582
+ functionality: "File data URLs in assistant messages are not supported"
583
+ });
584
+ }
585
+ return {
586
+ inlineData: {
587
+ mimeType: part.mediaType,
588
+ data: (0, import_provider_utils4.convertToBase64)(part.data)
589
+ },
590
+ thought: true,
591
+ thoughtSignature
592
+ };
593
+ }
439
594
  case "file": {
440
595
  if (part.data instanceof URL) {
441
596
  throw new import_provider2.UnsupportedFunctionalityError({
@@ -447,10 +602,23 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
447
602
  mimeType: part.mediaType,
448
603
  data: (0, import_provider_utils4.convertToBase64)(part.data)
449
604
  },
605
+ ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
450
606
  thoughtSignature
451
607
  };
452
608
  }
453
609
  case "tool-call": {
610
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
611
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
612
+ if (serverToolCallId && serverToolType) {
613
+ return {
614
+ toolCall: {
615
+ toolType: serverToolType,
616
+ args: typeof part.input === "string" ? JSON.parse(part.input) : part.input,
617
+ id: serverToolCallId
618
+ },
619
+ thoughtSignature
620
+ };
621
+ }
454
622
  return {
455
623
  functionCall: {
456
624
  name: part.toolName,
@@ -459,6 +627,21 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
459
627
  thoughtSignature
460
628
  };
461
629
  }
630
+ case "tool-result": {
631
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
632
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
633
+ if (serverToolCallId && serverToolType) {
634
+ return {
635
+ toolResponse: {
636
+ toolType: serverToolType,
637
+ response: part.output.type === "json" ? part.output.value : {},
638
+ id: serverToolCallId
639
+ },
640
+ thoughtSignature
641
+ };
642
+ }
643
+ return void 0;
644
+ }
462
645
  }
463
646
  }).filter((part) => part !== void 0)
464
647
  });
@@ -471,38 +654,32 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
471
654
  if (part.type === "tool-approval-response") {
472
655
  continue;
473
656
  }
657
+ const partProviderOpts = (_g = (_d = part.providerOptions) == null ? void 0 : _d[providerOptionsName]) != null ? _g : providerOptionsName !== "google" ? (_e = part.providerOptions) == null ? void 0 : _e.google : (_f = part.providerOptions) == null ? void 0 : _f.vertex;
658
+ const serverToolCallId = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolCallId) != null ? String(partProviderOpts.serverToolCallId) : void 0;
659
+ const serverToolType = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolType) != null ? String(partProviderOpts.serverToolType) : void 0;
660
+ if (serverToolCallId && serverToolType) {
661
+ const serverThoughtSignature = (partProviderOpts == null ? void 0 : partProviderOpts.thoughtSignature) != null ? String(partProviderOpts.thoughtSignature) : void 0;
662
+ if (contents.length > 0) {
663
+ const lastContent = contents[contents.length - 1];
664
+ if (lastContent.role === "model") {
665
+ lastContent.parts.push({
666
+ toolResponse: {
667
+ toolType: serverToolType,
668
+ response: part.output.type === "json" ? part.output.value : {},
669
+ id: serverToolCallId
670
+ },
671
+ thoughtSignature: serverThoughtSignature
672
+ });
673
+ continue;
674
+ }
675
+ }
676
+ }
474
677
  const output = part.output;
475
678
  if (output.type === "content") {
476
- for (const contentPart of output.value) {
477
- switch (contentPart.type) {
478
- case "text":
479
- parts.push({
480
- functionResponse: {
481
- name: part.toolName,
482
- response: {
483
- name: part.toolName,
484
- content: contentPart.text
485
- }
486
- }
487
- });
488
- break;
489
- case "image-data":
490
- parts.push(
491
- {
492
- inlineData: {
493
- mimeType: contentPart.mediaType,
494
- data: contentPart.data
495
- }
496
- },
497
- {
498
- text: "Tool executed successfully and returned this image as a response"
499
- }
500
- );
501
- break;
502
- default:
503
- parts.push({ text: JSON.stringify(contentPart) });
504
- break;
505
- }
679
+ if (supportsFunctionResponseParts) {
680
+ appendToolResultParts(parts, part.toolName, output.value);
681
+ } else {
682
+ appendLegacyToolResultParts(parts, part.toolName, output.value);
506
683
  }
507
684
  } else {
508
685
  parts.push({
@@ -510,7 +687,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
510
687
  name: part.toolName,
511
688
  response: {
512
689
  name: part.toolName,
513
- content: output.type === "execution-denied" ? (_c = output.reason) != null ? _c : "Tool execution denied." : output.value
690
+ content: output.type === "execution-denied" ? (_h = output.reason) != null ? _h : "Tool execution denied." : output.value
514
691
  }
515
692
  }
516
693
  });
@@ -656,7 +833,15 @@ var googleLanguageModelOptions = (0, import_provider_utils5.lazySchema)(
656
833
  latitude: import_v44.z.number(),
657
834
  longitude: import_v44.z.number()
658
835
  }).optional()
659
- }).optional()
836
+ }).optional(),
837
+ /**
838
+ * Optional. The service tier to use for the request.
839
+ */
840
+ serviceTier: import_v44.z.enum([
841
+ "SERVICE_TIER_STANDARD",
842
+ "SERVICE_TIER_FLEX",
843
+ "SERVICE_TIER_PRIORITY"
844
+ ]).optional()
660
845
  })
661
846
  )
662
847
  );
@@ -668,7 +853,7 @@ function prepareTools({
668
853
  toolChoice,
669
854
  modelId
670
855
  }) {
671
- var _a;
856
+ var _a, _b;
672
857
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
673
858
  const toolWarnings = [];
674
859
  const isLatest = [
@@ -677,13 +862,14 @@ function prepareTools({
677
862
  "gemini-pro-latest"
678
863
  ].some((id) => id === modelId);
679
864
  const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || modelId.includes("nano-banana") || isLatest;
865
+ const isGemini3orNewer = modelId.includes("gemini-3");
680
866
  const supportsFileSearch = modelId.includes("gemini-2.5") || modelId.includes("gemini-3");
681
867
  if (tools == null) {
682
868
  return { tools: void 0, toolConfig: void 0, toolWarnings };
683
869
  }
684
870
  const hasFunctionTools = tools.some((tool) => tool.type === "function");
685
871
  const hasProviderTools = tools.some((tool) => tool.type === "provider");
686
- if (hasFunctionTools && hasProviderTools) {
872
+ if (hasFunctionTools && hasProviderTools && !isGemini3orNewer) {
687
873
  toolWarnings.push({
688
874
  type: "unsupported",
689
875
  feature: `combination of function and provider-defined tools`
@@ -788,6 +974,45 @@ function prepareTools({
788
974
  break;
789
975
  }
790
976
  });
977
+ if (hasFunctionTools && isGemini3orNewer && googleTools2.length > 0) {
978
+ const functionDeclarations2 = [];
979
+ for (const tool of tools) {
980
+ if (tool.type === "function") {
981
+ functionDeclarations2.push({
982
+ name: tool.name,
983
+ description: (_a = tool.description) != null ? _a : "",
984
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
985
+ });
986
+ }
987
+ }
988
+ const combinedToolConfig = {
989
+ functionCallingConfig: { mode: "VALIDATED" },
990
+ includeServerSideToolInvocations: true
991
+ };
992
+ if (toolChoice != null) {
993
+ switch (toolChoice.type) {
994
+ case "auto":
995
+ break;
996
+ case "none":
997
+ combinedToolConfig.functionCallingConfig = { mode: "NONE" };
998
+ break;
999
+ case "required":
1000
+ combinedToolConfig.functionCallingConfig = { mode: "ANY" };
1001
+ break;
1002
+ case "tool":
1003
+ combinedToolConfig.functionCallingConfig = {
1004
+ mode: "ANY",
1005
+ allowedFunctionNames: [toolChoice.toolName]
1006
+ };
1007
+ break;
1008
+ }
1009
+ }
1010
+ return {
1011
+ tools: [...googleTools2, { functionDeclarations: functionDeclarations2 }],
1012
+ toolConfig: combinedToolConfig,
1013
+ toolWarnings
1014
+ };
1015
+ }
791
1016
  return {
792
1017
  tools: googleTools2.length > 0 ? googleTools2 : void 0,
793
1018
  toolConfig: void 0,
@@ -801,7 +1026,7 @@ function prepareTools({
801
1026
  case "function":
802
1027
  functionDeclarations.push({
803
1028
  name: tool.name,
804
- description: (_a = tool.description) != null ? _a : "",
1029
+ description: (_b = tool.description) != null ? _b : "",
805
1030
  parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
806
1031
  });
807
1032
  if (tool.strict === true) {
@@ -900,7 +1125,7 @@ function mapGoogleGenerativeAIFinishReason({
900
1125
  // src/google-generative-ai-language-model.ts
901
1126
  var GoogleGenerativeAILanguageModel = class {
902
1127
  constructor(modelId, config) {
903
- this.specificationVersion = "v3";
1128
+ this.specificationVersion = "v4";
904
1129
  var _a;
905
1130
  this.modelId = modelId;
906
1131
  this.config = config;
@@ -926,6 +1151,7 @@ var GoogleGenerativeAILanguageModel = class {
926
1151
  seed,
927
1152
  tools,
928
1153
  toolChoice,
1154
+ reasoning,
929
1155
  providerOptions
930
1156
  }) {
931
1157
  var _a;
@@ -952,9 +1178,14 @@ var GoogleGenerativeAILanguageModel = class {
952
1178
  });
953
1179
  }
954
1180
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
1181
+ const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
955
1182
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
956
1183
  prompt,
957
- { isGemmaModel, providerOptionsName }
1184
+ {
1185
+ isGemmaModel,
1186
+ providerOptionsName,
1187
+ supportsFunctionResponseParts
1188
+ }
958
1189
  );
959
1190
  const {
960
1191
  tools: googleTools2,
@@ -965,6 +1196,12 @@ var GoogleGenerativeAILanguageModel = class {
965
1196
  toolChoice,
966
1197
  modelId: this.modelId
967
1198
  });
1199
+ const resolvedThinking = resolveThinkingConfig({
1200
+ reasoning,
1201
+ modelId: this.modelId,
1202
+ warnings
1203
+ });
1204
+ const thinkingConfig = (googleOptions == null ? void 0 : googleOptions.thinkingConfig) || resolvedThinking ? { ...resolvedThinking, ...googleOptions == null ? void 0 : googleOptions.thinkingConfig } : void 0;
968
1205
  return {
969
1206
  args: {
970
1207
  generationConfig: {
@@ -988,7 +1225,7 @@ var GoogleGenerativeAILanguageModel = class {
988
1225
  },
989
1226
  // provider options:
990
1227
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
991
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
1228
+ thinkingConfig,
992
1229
  ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
993
1230
  mediaResolution: googleOptions.mediaResolution
994
1231
  },
@@ -1005,14 +1242,15 @@ var GoogleGenerativeAILanguageModel = class {
1005
1242
  retrievalConfig: googleOptions.retrievalConfig
1006
1243
  } : googleToolConfig,
1007
1244
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
1008
- labels: googleOptions == null ? void 0 : googleOptions.labels
1245
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
1246
+ serviceTier: googleOptions == null ? void 0 : googleOptions.serviceTier
1009
1247
  },
1010
1248
  warnings: [...warnings, ...toolWarnings],
1011
1249
  providerOptionsName
1012
1250
  };
1013
1251
  }
1014
1252
  async doGenerate(options) {
1015
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
1253
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
1016
1254
  const { args, warnings, providerOptionsName } = await this.getArgs(options);
1017
1255
  const mergedHeaders = (0, import_provider_utils6.combineHeaders)(
1018
1256
  await (0, import_provider_utils6.resolve)(this.config.headers),
@@ -1038,6 +1276,7 @@ var GoogleGenerativeAILanguageModel = class {
1038
1276
  const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
1039
1277
  const usageMetadata = response.usageMetadata;
1040
1278
  let lastCodeExecutionToolCallId;
1279
+ let lastServerToolCallId;
1041
1280
  for (const part of parts) {
1042
1281
  if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
1043
1282
  const toolCallId = this.config.generateId();
@@ -1092,22 +1331,68 @@ var GoogleGenerativeAILanguageModel = class {
1092
1331
  } : void 0
1093
1332
  });
1094
1333
  } else if ("inlineData" in part) {
1334
+ const hasThought = part.thought === true;
1335
+ const hasThoughtSignature = !!part.thoughtSignature;
1095
1336
  content.push({
1096
- type: "file",
1337
+ type: hasThought ? "reasoning-file" : "file",
1097
1338
  data: part.inlineData.data,
1098
1339
  mediaType: part.inlineData.mimeType,
1099
- providerMetadata: part.thoughtSignature ? {
1340
+ providerMetadata: hasThoughtSignature ? {
1100
1341
  [providerOptionsName]: {
1101
1342
  thoughtSignature: part.thoughtSignature
1102
1343
  }
1103
1344
  } : void 0
1104
1345
  });
1346
+ } else if ("toolCall" in part && part.toolCall) {
1347
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : this.config.generateId();
1348
+ lastServerToolCallId = toolCallId;
1349
+ content.push({
1350
+ type: "tool-call",
1351
+ toolCallId,
1352
+ toolName: `server:${part.toolCall.toolType}`,
1353
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
1354
+ providerExecuted: true,
1355
+ dynamic: true,
1356
+ providerMetadata: part.thoughtSignature ? {
1357
+ [providerOptionsName]: {
1358
+ thoughtSignature: part.thoughtSignature,
1359
+ serverToolCallId: toolCallId,
1360
+ serverToolType: part.toolCall.toolType
1361
+ }
1362
+ } : {
1363
+ [providerOptionsName]: {
1364
+ serverToolCallId: toolCallId,
1365
+ serverToolType: part.toolCall.toolType
1366
+ }
1367
+ }
1368
+ });
1369
+ } else if ("toolResponse" in part && part.toolResponse) {
1370
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : this.config.generateId();
1371
+ content.push({
1372
+ type: "tool-result",
1373
+ toolCallId: responseToolCallId,
1374
+ toolName: `server:${part.toolResponse.toolType}`,
1375
+ result: (_h = part.toolResponse.response) != null ? _h : {},
1376
+ providerMetadata: part.thoughtSignature ? {
1377
+ [providerOptionsName]: {
1378
+ thoughtSignature: part.thoughtSignature,
1379
+ serverToolCallId: responseToolCallId,
1380
+ serverToolType: part.toolResponse.toolType
1381
+ }
1382
+ } : {
1383
+ [providerOptionsName]: {
1384
+ serverToolCallId: responseToolCallId,
1385
+ serverToolType: part.toolResponse.toolType
1386
+ }
1387
+ }
1388
+ });
1389
+ lastServerToolCallId = void 0;
1105
1390
  }
1106
1391
  }
1107
- const sources = (_e = extractSources({
1392
+ const sources = (_i = extractSources({
1108
1393
  groundingMetadata: candidate.groundingMetadata,
1109
1394
  generateId: this.config.generateId
1110
- })) != null ? _e : [];
1395
+ })) != null ? _i : [];
1111
1396
  for (const source of sources) {
1112
1397
  content.push(source);
1113
1398
  }
@@ -1121,17 +1406,19 @@ var GoogleGenerativeAILanguageModel = class {
1121
1406
  (part) => part.type === "tool-call" && !part.providerExecuted
1122
1407
  )
1123
1408
  }),
1124
- raw: (_f = candidate.finishReason) != null ? _f : void 0
1409
+ raw: (_j = candidate.finishReason) != null ? _j : void 0
1125
1410
  },
1126
1411
  usage: convertGoogleGenerativeAIUsage(usageMetadata),
1127
1412
  warnings,
1128
1413
  providerMetadata: {
1129
1414
  [providerOptionsName]: {
1130
- promptFeedback: (_g = response.promptFeedback) != null ? _g : null,
1131
- groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
1132
- urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
1133
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1134
- usageMetadata: usageMetadata != null ? usageMetadata : null
1415
+ promptFeedback: (_k = response.promptFeedback) != null ? _k : null,
1416
+ groundingMetadata: (_l = candidate.groundingMetadata) != null ? _l : null,
1417
+ urlContextMetadata: (_m = candidate.urlContextMetadata) != null ? _m : null,
1418
+ safetyRatings: (_n = candidate.safetyRatings) != null ? _n : null,
1419
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
1420
+ finishMessage: (_o = candidate.finishMessage) != null ? _o : null,
1421
+ serviceTier: (_p = response.serviceTier) != null ? _p : null
1135
1422
  }
1136
1423
  },
1137
1424
  request: { body: args },
@@ -1167,6 +1454,7 @@ var GoogleGenerativeAILanguageModel = class {
1167
1454
  let providerMetadata = void 0;
1168
1455
  let lastGroundingMetadata = null;
1169
1456
  let lastUrlContextMetadata = null;
1457
+ let serviceTier = null;
1170
1458
  const generateId3 = this.config.generateId;
1171
1459
  let hasToolCalls = false;
1172
1460
  let currentTextBlockId = null;
@@ -1174,6 +1462,7 @@ var GoogleGenerativeAILanguageModel = class {
1174
1462
  let blockCounter = 0;
1175
1463
  const emittedSourceUrls = /* @__PURE__ */ new Set();
1176
1464
  let lastCodeExecutionToolCallId;
1465
+ let lastServerToolCallId;
1177
1466
  return {
1178
1467
  stream: response.pipeThrough(
1179
1468
  new TransformStream({
@@ -1181,7 +1470,7 @@ var GoogleGenerativeAILanguageModel = class {
1181
1470
  controller.enqueue({ type: "stream-start", warnings });
1182
1471
  },
1183
1472
  transform(chunk, controller) {
1184
- var _a, _b, _c, _d, _e, _f;
1473
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1185
1474
  if (options.includeRawChunks) {
1186
1475
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1187
1476
  }
@@ -1194,6 +1483,9 @@ var GoogleGenerativeAILanguageModel = class {
1194
1483
  if (usageMetadata != null) {
1195
1484
  usage = usageMetadata;
1196
1485
  }
1486
+ if (value.serviceTier != null) {
1487
+ serviceTier = value.serviceTier;
1488
+ }
1197
1489
  const candidate = (_a = value.candidates) == null ? void 0 : _a[0];
1198
1490
  if (candidate == null) {
1199
1491
  return;
@@ -1319,17 +1611,55 @@ var GoogleGenerativeAILanguageModel = class {
1319
1611
  });
1320
1612
  currentReasoningBlockId = null;
1321
1613
  }
1322
- const thoughtSignatureMetadata = part.thoughtSignature ? {
1614
+ const hasThought = part.thought === true;
1615
+ const hasThoughtSignature = !!part.thoughtSignature;
1616
+ const fileMeta = hasThoughtSignature ? {
1323
1617
  [providerOptionsName]: {
1324
1618
  thoughtSignature: part.thoughtSignature
1325
1619
  }
1326
1620
  } : void 0;
1327
1621
  controller.enqueue({
1328
- type: "file",
1622
+ type: hasThought ? "reasoning-file" : "file",
1329
1623
  mediaType: part.inlineData.mimeType,
1330
1624
  data: part.inlineData.data,
1331
- providerMetadata: thoughtSignatureMetadata
1625
+ providerMetadata: fileMeta
1332
1626
  });
1627
+ } else if ("toolCall" in part && part.toolCall) {
1628
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : generateId3();
1629
+ lastServerToolCallId = toolCallId;
1630
+ const serverMeta = {
1631
+ [providerOptionsName]: {
1632
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
1633
+ serverToolCallId: toolCallId,
1634
+ serverToolType: part.toolCall.toolType
1635
+ }
1636
+ };
1637
+ controller.enqueue({
1638
+ type: "tool-call",
1639
+ toolCallId,
1640
+ toolName: `server:${part.toolCall.toolType}`,
1641
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
1642
+ providerExecuted: true,
1643
+ dynamic: true,
1644
+ providerMetadata: serverMeta
1645
+ });
1646
+ } else if ("toolResponse" in part && part.toolResponse) {
1647
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : generateId3();
1648
+ const serverMeta = {
1649
+ [providerOptionsName]: {
1650
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
1651
+ serverToolCallId: responseToolCallId,
1652
+ serverToolType: part.toolResponse.toolType
1653
+ }
1654
+ };
1655
+ controller.enqueue({
1656
+ type: "tool-result",
1657
+ toolCallId: responseToolCallId,
1658
+ toolName: `server:${part.toolResponse.toolType}`,
1659
+ result: (_h = part.toolResponse.response) != null ? _h : {},
1660
+ providerMetadata: serverMeta
1661
+ });
1662
+ lastServerToolCallId = void 0;
1333
1663
  }
1334
1664
  }
1335
1665
  const toolCallDeltas = getToolCallsFromParts({
@@ -1377,15 +1707,15 @@ var GoogleGenerativeAILanguageModel = class {
1377
1707
  };
1378
1708
  providerMetadata = {
1379
1709
  [providerOptionsName]: {
1380
- promptFeedback: (_e = value.promptFeedback) != null ? _e : null,
1710
+ promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1381
1711
  groundingMetadata: lastGroundingMetadata,
1382
1712
  urlContextMetadata: lastUrlContextMetadata,
1383
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
1713
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1714
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
1715
+ finishMessage: (_k = candidate.finishMessage) != null ? _k : null,
1716
+ serviceTier
1384
1717
  }
1385
1718
  };
1386
- if (usageMetadata != null) {
1387
- providerMetadata[providerOptionsName].usageMetadata = usageMetadata;
1388
- }
1389
1719
  }
1390
1720
  },
1391
1721
  flush(controller) {
@@ -1415,6 +1745,75 @@ var GoogleGenerativeAILanguageModel = class {
1415
1745
  };
1416
1746
  }
1417
1747
  };
1748
+ function isGemini3Model(modelId) {
1749
+ return /gemini-3[\.\-]/i.test(modelId) || /gemini-3$/i.test(modelId);
1750
+ }
1751
+ function getMaxOutputTokensForGemini25Model() {
1752
+ return 65536;
1753
+ }
1754
+ function getMaxThinkingTokensForGemini25Model(modelId) {
1755
+ const id = modelId.toLowerCase();
1756
+ if (id.includes("2.5-pro") || id.includes("gemini-3-pro-image")) {
1757
+ return 32768;
1758
+ }
1759
+ return 24576;
1760
+ }
1761
+ function resolveThinkingConfig({
1762
+ reasoning,
1763
+ modelId,
1764
+ warnings
1765
+ }) {
1766
+ if (!(0, import_provider_utils6.isCustomReasoning)(reasoning)) {
1767
+ return void 0;
1768
+ }
1769
+ if (isGemini3Model(modelId) && !modelId.includes("gemini-3-pro-image")) {
1770
+ return resolveGemini3ThinkingConfig({ reasoning, warnings });
1771
+ }
1772
+ return resolveGemini25ThinkingConfig({ reasoning, modelId, warnings });
1773
+ }
1774
+ function resolveGemini3ThinkingConfig({
1775
+ reasoning,
1776
+ warnings
1777
+ }) {
1778
+ if (reasoning === "none") {
1779
+ return { thinkingLevel: "minimal" };
1780
+ }
1781
+ const thinkingLevel = (0, import_provider_utils6.mapReasoningToProviderEffort)({
1782
+ reasoning,
1783
+ effortMap: {
1784
+ minimal: "minimal",
1785
+ low: "low",
1786
+ medium: "medium",
1787
+ high: "high",
1788
+ xhigh: "high"
1789
+ },
1790
+ warnings
1791
+ });
1792
+ if (thinkingLevel == null) {
1793
+ return void 0;
1794
+ }
1795
+ return { thinkingLevel };
1796
+ }
1797
+ function resolveGemini25ThinkingConfig({
1798
+ reasoning,
1799
+ modelId,
1800
+ warnings
1801
+ }) {
1802
+ if (reasoning === "none") {
1803
+ return { thinkingBudget: 0 };
1804
+ }
1805
+ const thinkingBudget = (0, import_provider_utils6.mapReasoningToProviderBudget)({
1806
+ reasoning,
1807
+ maxOutputTokens: getMaxOutputTokensForGemini25Model(),
1808
+ maxReasoningBudget: getMaxThinkingTokensForGemini25Model(modelId),
1809
+ minReasoningBudget: 0,
1810
+ warnings
1811
+ });
1812
+ if (thinkingBudget == null) {
1813
+ return void 0;
1814
+ }
1815
+ return { thinkingBudget };
1816
+ }
1418
1817
  function getToolCallsFromParts({
1419
1818
  parts,
1420
1819
  generateId: generateId3,
@@ -1594,6 +1993,23 @@ var getContentSchema = () => import_v45.z.object({
1594
1993
  mimeType: import_v45.z.string(),
1595
1994
  data: import_v45.z.string()
1596
1995
  }),
1996
+ thought: import_v45.z.boolean().nullish(),
1997
+ thoughtSignature: import_v45.z.string().nullish()
1998
+ }),
1999
+ import_v45.z.object({
2000
+ toolCall: import_v45.z.object({
2001
+ toolType: import_v45.z.string(),
2002
+ args: import_v45.z.unknown().nullish(),
2003
+ id: import_v45.z.string()
2004
+ }),
2005
+ thoughtSignature: import_v45.z.string().nullish()
2006
+ }),
2007
+ import_v45.z.object({
2008
+ toolResponse: import_v45.z.object({
2009
+ toolType: import_v45.z.string(),
2010
+ response: import_v45.z.unknown().nullish(),
2011
+ id: import_v45.z.string()
2012
+ }),
1597
2013
  thoughtSignature: import_v45.z.string().nullish()
1598
2014
  }),
1599
2015
  import_v45.z.object({
@@ -1644,6 +2060,7 @@ var responseSchema = (0, import_provider_utils6.lazySchema)(
1644
2060
  import_v45.z.object({
1645
2061
  content: getContentSchema().nullish().or(import_v45.z.object({}).strict()),
1646
2062
  finishReason: import_v45.z.string().nullish(),
2063
+ finishMessage: import_v45.z.string().nullish(),
1647
2064
  safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish(),
1648
2065
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1649
2066
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
@@ -1653,7 +2070,8 @@ var responseSchema = (0, import_provider_utils6.lazySchema)(
1653
2070
  promptFeedback: import_v45.z.object({
1654
2071
  blockReason: import_v45.z.string().nullish(),
1655
2072
  safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish()
1656
- }).nullish()
2073
+ }).nullish(),
2074
+ serviceTier: import_v45.z.string().nullish()
1657
2075
  })
1658
2076
  )
1659
2077
  );
@@ -1664,6 +2082,7 @@ var chunkSchema = (0, import_provider_utils6.lazySchema)(
1664
2082
  import_v45.z.object({
1665
2083
  content: getContentSchema().nullish(),
1666
2084
  finishReason: import_v45.z.string().nullish(),
2085
+ finishMessage: import_v45.z.string().nullish(),
1667
2086
  safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish(),
1668
2087
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1669
2088
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
@@ -1673,7 +2092,8 @@ var chunkSchema = (0, import_provider_utils6.lazySchema)(
1673
2092
  promptFeedback: import_v45.z.object({
1674
2093
  blockReason: import_v45.z.string().nullish(),
1675
2094
  safetyRatings: import_v45.z.array(getSafetyRatingSchema()).nullish()
1676
- }).nullish()
2095
+ }).nullish(),
2096
+ serviceTier: import_v45.z.string().nullish()
1677
2097
  })
1678
2098
  )
1679
2099
  );
@@ -1846,7 +2266,7 @@ var GoogleGenerativeAIImageModel = class {
1846
2266
  this.modelId = modelId;
1847
2267
  this.settings = settings;
1848
2268
  this.config = config;
1849
- this.specificationVersion = "v3";
2269
+ this.specificationVersion = "v4";
1850
2270
  }
1851
2271
  get maxImagesPerCall() {
1852
2272
  if (this.settings.maxImagesPerCall != null) {
@@ -2087,7 +2507,7 @@ var GoogleGenerativeAIVideoModel = class {
2087
2507
  constructor(modelId, config) {
2088
2508
  this.modelId = modelId;
2089
2509
  this.config = config;
2090
- this.specificationVersion = "v3";
2510
+ this.specificationVersion = "v4";
2091
2511
  }
2092
2512
  get provider() {
2093
2513
  return this.config.provider;
@@ -2399,7 +2819,7 @@ function createGoogleGenerativeAI(options = {}) {
2399
2819
  }
2400
2820
  return createChatModel(modelId);
2401
2821
  };
2402
- provider.specificationVersion = "v3";
2822
+ provider.specificationVersion = "v4";
2403
2823
  provider.languageModel = createChatModel;
2404
2824
  provider.chat = createChatModel;
2405
2825
  provider.generativeAI = createChatModel;