@ai-sdk/google 4.0.0-beta.2 → 4.0.0-beta.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -7,7 +7,7 @@ import {
7
7
  } from "@ai-sdk/provider-utils";
8
8
 
9
9
  // src/version.ts
10
- var VERSION = true ? "4.0.0-beta.2" : "0.0.0-test";
10
+ var VERSION = true ? "4.0.0-beta.20" : "0.0.0-test";
11
11
 
12
12
  // src/google-generative-ai-embedding-model.ts
13
13
  import {
@@ -53,6 +53,15 @@ import {
53
53
  zodSchema as zodSchema2
54
54
  } from "@ai-sdk/provider-utils";
55
55
  import { z as z2 } from "zod/v4";
56
+ var googleEmbeddingContentPartSchema = z2.union([
57
+ z2.object({ text: z2.string() }),
58
+ z2.object({
59
+ inlineData: z2.object({
60
+ mimeType: z2.string(),
61
+ data: z2.string()
62
+ })
63
+ })
64
+ ]);
56
65
  var googleEmbeddingModelOptions = lazySchema2(
57
66
  () => zodSchema2(
58
67
  z2.object({
@@ -82,7 +91,17 @@ var googleEmbeddingModelOptions = lazySchema2(
82
91
  "QUESTION_ANSWERING",
83
92
  "FACT_VERIFICATION",
84
93
  "CODE_RETRIEVAL_QUERY"
85
- ]).optional()
94
+ ]).optional(),
95
+ /**
96
+ * Optional. Per-value multimodal content parts for embedding non-text
97
+ * content (images, video, PDF, audio). Each entry corresponds to the
98
+ * embedding value at the same index and its parts are merged with the
99
+ * text value in the request. Use `null` for entries that are text-only.
100
+ *
101
+ * The array length must match the number of values being embedded. In
102
+ * the case of a single embedding, the array length must be 1.
103
+ */
104
+ content: z2.array(z2.array(googleEmbeddingContentPartSchema).min(1).nullable()).optional()
86
105
  })
87
106
  )
88
107
  );
@@ -90,7 +109,7 @@ var googleEmbeddingModelOptions = lazySchema2(
90
109
  // src/google-generative-ai-embedding-model.ts
91
110
  var GoogleGenerativeAIEmbeddingModel = class {
92
111
  constructor(modelId, config) {
93
- this.specificationVersion = "v3";
112
+ this.specificationVersion = "v4";
94
113
  this.maxEmbeddingsPerCall = 2048;
95
114
  this.supportsParallelCalls = true;
96
115
  this.modelId = modelId;
@@ -122,7 +141,16 @@ var GoogleGenerativeAIEmbeddingModel = class {
122
141
  await resolve(this.config.headers),
123
142
  headers
124
143
  );
144
+ const multimodalContent = googleOptions == null ? void 0 : googleOptions.content;
145
+ if (multimodalContent != null && multimodalContent.length !== values.length) {
146
+ throw new Error(
147
+ `The number of multimodal content entries (${multimodalContent.length}) must match the number of values (${values.length}).`
148
+ );
149
+ }
125
150
  if (values.length === 1) {
151
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[0];
152
+ const textPart = values[0] ? [{ text: values[0] }] : [];
153
+ const parts = valueParts != null ? [...textPart, ...valueParts] : [{ text: values[0] }];
126
154
  const {
127
155
  responseHeaders: responseHeaders2,
128
156
  value: response2,
@@ -133,7 +161,7 @@ var GoogleGenerativeAIEmbeddingModel = class {
133
161
  body: {
134
162
  model: `models/${this.modelId}`,
135
163
  content: {
136
- parts: [{ text: values[0] }]
164
+ parts
137
165
  },
138
166
  outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
139
167
  taskType: googleOptions == null ? void 0 : googleOptions.taskType
@@ -160,12 +188,19 @@ var GoogleGenerativeAIEmbeddingModel = class {
160
188
  url: `${this.config.baseURL}/models/${this.modelId}:batchEmbedContents`,
161
189
  headers: mergedHeaders,
162
190
  body: {
163
- requests: values.map((value) => ({
164
- model: `models/${this.modelId}`,
165
- content: { role: "user", parts: [{ text: value }] },
166
- outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
167
- taskType: googleOptions == null ? void 0 : googleOptions.taskType
168
- }))
191
+ requests: values.map((value, index) => {
192
+ const valueParts = multimodalContent == null ? void 0 : multimodalContent[index];
193
+ const textPart = value ? [{ text: value }] : [];
194
+ return {
195
+ model: `models/${this.modelId}`,
196
+ content: {
197
+ role: "user",
198
+ parts: valueParts != null ? [...textPart, ...valueParts] : [{ text: value }]
199
+ },
200
+ outputDimensionality: googleOptions == null ? void 0 : googleOptions.outputDimensionality,
201
+ taskType: googleOptions == null ? void 0 : googleOptions.taskType
202
+ };
203
+ })
169
204
  },
170
205
  failedResponseHandler: googleFailedResponseHandler,
171
206
  successfulResponseHandler: createJsonResponseHandler(
@@ -203,7 +238,10 @@ import {
203
238
  createEventSourceResponseHandler,
204
239
  createJsonResponseHandler as createJsonResponseHandler2,
205
240
  generateId,
241
+ isCustomReasoning,
206
242
  lazySchema as lazySchema5,
243
+ mapReasoningToProviderBudget,
244
+ mapReasoningToProviderEffort,
207
245
  parseProviderOptions as parseProviderOptions2,
208
246
  postJsonToApi as postJsonToApi2,
209
247
  resolve as resolve2,
@@ -371,13 +409,118 @@ import {
371
409
  UnsupportedFunctionalityError
372
410
  } from "@ai-sdk/provider";
373
411
  import { convertToBase64 } from "@ai-sdk/provider-utils";
412
+ var dataUrlRegex = /^data:([^;,]+);base64,(.+)$/s;
413
+ function parseBase64DataUrl(value) {
414
+ const match = dataUrlRegex.exec(value);
415
+ if (match == null) {
416
+ return void 0;
417
+ }
418
+ return {
419
+ mediaType: match[1],
420
+ data: match[2]
421
+ };
422
+ }
423
+ function convertUrlToolResultPart(url) {
424
+ const parsedDataUrl = parseBase64DataUrl(url);
425
+ if (parsedDataUrl == null) {
426
+ return void 0;
427
+ }
428
+ return {
429
+ inlineData: {
430
+ mimeType: parsedDataUrl.mediaType,
431
+ data: parsedDataUrl.data
432
+ }
433
+ };
434
+ }
435
+ function appendToolResultParts(parts, toolName, outputValue) {
436
+ const functionResponseParts = [];
437
+ const responseTextParts = [];
438
+ for (const contentPart of outputValue) {
439
+ switch (contentPart.type) {
440
+ case "text": {
441
+ responseTextParts.push(contentPart.text);
442
+ break;
443
+ }
444
+ case "image-data":
445
+ case "file-data": {
446
+ functionResponseParts.push({
447
+ inlineData: {
448
+ mimeType: contentPart.mediaType,
449
+ data: contentPart.data
450
+ }
451
+ });
452
+ break;
453
+ }
454
+ case "image-url":
455
+ case "file-url": {
456
+ const functionResponsePart = convertUrlToolResultPart(
457
+ contentPart.url
458
+ );
459
+ if (functionResponsePart != null) {
460
+ functionResponseParts.push(functionResponsePart);
461
+ } else {
462
+ responseTextParts.push(JSON.stringify(contentPart));
463
+ }
464
+ break;
465
+ }
466
+ default: {
467
+ responseTextParts.push(JSON.stringify(contentPart));
468
+ break;
469
+ }
470
+ }
471
+ }
472
+ parts.push({
473
+ functionResponse: {
474
+ name: toolName,
475
+ response: {
476
+ name: toolName,
477
+ content: responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully."
478
+ },
479
+ ...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
480
+ }
481
+ });
482
+ }
483
+ function appendLegacyToolResultParts(parts, toolName, outputValue) {
484
+ for (const contentPart of outputValue) {
485
+ switch (contentPart.type) {
486
+ case "text":
487
+ parts.push({
488
+ functionResponse: {
489
+ name: toolName,
490
+ response: {
491
+ name: toolName,
492
+ content: contentPart.text
493
+ }
494
+ }
495
+ });
496
+ break;
497
+ case "image-data":
498
+ parts.push(
499
+ {
500
+ inlineData: {
501
+ mimeType: String(contentPart.mediaType),
502
+ data: String(contentPart.data)
503
+ }
504
+ },
505
+ {
506
+ text: "Tool executed successfully and returned this image as a response"
507
+ }
508
+ );
509
+ break;
510
+ default:
511
+ parts.push({ text: JSON.stringify(contentPart) });
512
+ break;
513
+ }
514
+ }
515
+ }
374
516
  function convertToGoogleGenerativeAIMessages(prompt, options) {
375
- var _a, _b, _c;
517
+ var _a, _b, _c, _d, _e, _f, _g, _h;
376
518
  const systemInstructionParts = [];
377
519
  const contents = [];
378
520
  let systemMessagesAllowed = true;
379
521
  const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
380
522
  const providerOptionsName = (_b = options == null ? void 0 : options.providerOptionsName) != null ? _b : "google";
523
+ const supportsFunctionResponseParts = (_c = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _c : true;
381
524
  for (const { role, content } of prompt) {
382
525
  switch (role) {
383
526
  case "system": {
@@ -425,8 +568,8 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
425
568
  contents.push({
426
569
  role: "model",
427
570
  parts: content.map((part) => {
428
- var _a2, _b2, _c2, _d;
429
- const providerOpts = (_d = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
571
+ var _a2, _b2, _c2, _d2;
572
+ const providerOpts = (_d2 = (_a2 = part.providerOptions) == null ? void 0 : _a2[providerOptionsName]) != null ? _d2 : providerOptionsName !== "google" ? (_b2 = part.providerOptions) == null ? void 0 : _b2.google : (_c2 = part.providerOptions) == null ? void 0 : _c2.vertex;
430
573
  const thoughtSignature = (providerOpts == null ? void 0 : providerOpts.thoughtSignature) != null ? String(providerOpts.thoughtSignature) : void 0;
431
574
  switch (part.type) {
432
575
  case "text": {
@@ -442,6 +585,21 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
442
585
  thoughtSignature
443
586
  };
444
587
  }
588
+ case "reasoning-file": {
589
+ if (part.data instanceof URL) {
590
+ throw new UnsupportedFunctionalityError({
591
+ functionality: "File data URLs in assistant messages are not supported"
592
+ });
593
+ }
594
+ return {
595
+ inlineData: {
596
+ mimeType: part.mediaType,
597
+ data: convertToBase64(part.data)
598
+ },
599
+ thought: true,
600
+ thoughtSignature
601
+ };
602
+ }
445
603
  case "file": {
446
604
  if (part.data instanceof URL) {
447
605
  throw new UnsupportedFunctionalityError({
@@ -453,10 +611,23 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
453
611
  mimeType: part.mediaType,
454
612
  data: convertToBase64(part.data)
455
613
  },
614
+ ...(providerOpts == null ? void 0 : providerOpts.thought) === true ? { thought: true } : {},
456
615
  thoughtSignature
457
616
  };
458
617
  }
459
618
  case "tool-call": {
619
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
620
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
621
+ if (serverToolCallId && serverToolType) {
622
+ return {
623
+ toolCall: {
624
+ toolType: serverToolType,
625
+ args: typeof part.input === "string" ? JSON.parse(part.input) : part.input,
626
+ id: serverToolCallId
627
+ },
628
+ thoughtSignature
629
+ };
630
+ }
460
631
  return {
461
632
  functionCall: {
462
633
  name: part.toolName,
@@ -465,6 +636,21 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
465
636
  thoughtSignature
466
637
  };
467
638
  }
639
+ case "tool-result": {
640
+ const serverToolCallId = (providerOpts == null ? void 0 : providerOpts.serverToolCallId) != null ? String(providerOpts.serverToolCallId) : void 0;
641
+ const serverToolType = (providerOpts == null ? void 0 : providerOpts.serverToolType) != null ? String(providerOpts.serverToolType) : void 0;
642
+ if (serverToolCallId && serverToolType) {
643
+ return {
644
+ toolResponse: {
645
+ toolType: serverToolType,
646
+ response: part.output.type === "json" ? part.output.value : {},
647
+ id: serverToolCallId
648
+ },
649
+ thoughtSignature
650
+ };
651
+ }
652
+ return void 0;
653
+ }
468
654
  }
469
655
  }).filter((part) => part !== void 0)
470
656
  });
@@ -477,38 +663,32 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
477
663
  if (part.type === "tool-approval-response") {
478
664
  continue;
479
665
  }
666
+ const partProviderOpts = (_g = (_d = part.providerOptions) == null ? void 0 : _d[providerOptionsName]) != null ? _g : providerOptionsName !== "google" ? (_e = part.providerOptions) == null ? void 0 : _e.google : (_f = part.providerOptions) == null ? void 0 : _f.vertex;
667
+ const serverToolCallId = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolCallId) != null ? String(partProviderOpts.serverToolCallId) : void 0;
668
+ const serverToolType = (partProviderOpts == null ? void 0 : partProviderOpts.serverToolType) != null ? String(partProviderOpts.serverToolType) : void 0;
669
+ if (serverToolCallId && serverToolType) {
670
+ const serverThoughtSignature = (partProviderOpts == null ? void 0 : partProviderOpts.thoughtSignature) != null ? String(partProviderOpts.thoughtSignature) : void 0;
671
+ if (contents.length > 0) {
672
+ const lastContent = contents[contents.length - 1];
673
+ if (lastContent.role === "model") {
674
+ lastContent.parts.push({
675
+ toolResponse: {
676
+ toolType: serverToolType,
677
+ response: part.output.type === "json" ? part.output.value : {},
678
+ id: serverToolCallId
679
+ },
680
+ thoughtSignature: serverThoughtSignature
681
+ });
682
+ continue;
683
+ }
684
+ }
685
+ }
480
686
  const output = part.output;
481
687
  if (output.type === "content") {
482
- for (const contentPart of output.value) {
483
- switch (contentPart.type) {
484
- case "text":
485
- parts.push({
486
- functionResponse: {
487
- name: part.toolName,
488
- response: {
489
- name: part.toolName,
490
- content: contentPart.text
491
- }
492
- }
493
- });
494
- break;
495
- case "image-data":
496
- parts.push(
497
- {
498
- inlineData: {
499
- mimeType: contentPart.mediaType,
500
- data: contentPart.data
501
- }
502
- },
503
- {
504
- text: "Tool executed successfully and returned this image as a response"
505
- }
506
- );
507
- break;
508
- default:
509
- parts.push({ text: JSON.stringify(contentPart) });
510
- break;
511
- }
688
+ if (supportsFunctionResponseParts) {
689
+ appendToolResultParts(parts, part.toolName, output.value);
690
+ } else {
691
+ appendLegacyToolResultParts(parts, part.toolName, output.value);
512
692
  }
513
693
  } else {
514
694
  parts.push({
@@ -516,7 +696,7 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
516
696
  name: part.toolName,
517
697
  response: {
518
698
  name: part.toolName,
519
- content: output.type === "execution-denied" ? (_c = output.reason) != null ? _c : "Tool execution denied." : output.value
699
+ content: output.type === "execution-denied" ? (_h = output.reason) != null ? _h : "Tool execution denied." : output.value
520
700
  }
521
701
  }
522
702
  });
@@ -662,7 +842,15 @@ var googleLanguageModelOptions = lazySchema4(
662
842
  latitude: z4.number(),
663
843
  longitude: z4.number()
664
844
  }).optional()
665
- }).optional()
845
+ }).optional(),
846
+ /**
847
+ * Optional. The service tier to use for the request.
848
+ */
849
+ serviceTier: z4.enum([
850
+ "SERVICE_TIER_STANDARD",
851
+ "SERVICE_TIER_FLEX",
852
+ "SERVICE_TIER_PRIORITY"
853
+ ]).optional()
666
854
  })
667
855
  )
668
856
  );
@@ -676,7 +864,7 @@ function prepareTools({
676
864
  toolChoice,
677
865
  modelId
678
866
  }) {
679
- var _a;
867
+ var _a, _b;
680
868
  tools = (tools == null ? void 0 : tools.length) ? tools : void 0;
681
869
  const toolWarnings = [];
682
870
  const isLatest = [
@@ -685,13 +873,14 @@ function prepareTools({
685
873
  "gemini-pro-latest"
686
874
  ].some((id) => id === modelId);
687
875
  const isGemini2orNewer = modelId.includes("gemini-2") || modelId.includes("gemini-3") || modelId.includes("nano-banana") || isLatest;
876
+ const isGemini3orNewer = modelId.includes("gemini-3");
688
877
  const supportsFileSearch = modelId.includes("gemini-2.5") || modelId.includes("gemini-3");
689
878
  if (tools == null) {
690
879
  return { tools: void 0, toolConfig: void 0, toolWarnings };
691
880
  }
692
881
  const hasFunctionTools = tools.some((tool) => tool.type === "function");
693
882
  const hasProviderTools = tools.some((tool) => tool.type === "provider");
694
- if (hasFunctionTools && hasProviderTools) {
883
+ if (hasFunctionTools && hasProviderTools && !isGemini3orNewer) {
695
884
  toolWarnings.push({
696
885
  type: "unsupported",
697
886
  feature: `combination of function and provider-defined tools`
@@ -796,6 +985,45 @@ function prepareTools({
796
985
  break;
797
986
  }
798
987
  });
988
+ if (hasFunctionTools && isGemini3orNewer && googleTools2.length > 0) {
989
+ const functionDeclarations2 = [];
990
+ for (const tool of tools) {
991
+ if (tool.type === "function") {
992
+ functionDeclarations2.push({
993
+ name: tool.name,
994
+ description: (_a = tool.description) != null ? _a : "",
995
+ parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
996
+ });
997
+ }
998
+ }
999
+ const combinedToolConfig = {
1000
+ functionCallingConfig: { mode: "VALIDATED" },
1001
+ includeServerSideToolInvocations: true
1002
+ };
1003
+ if (toolChoice != null) {
1004
+ switch (toolChoice.type) {
1005
+ case "auto":
1006
+ break;
1007
+ case "none":
1008
+ combinedToolConfig.functionCallingConfig = { mode: "NONE" };
1009
+ break;
1010
+ case "required":
1011
+ combinedToolConfig.functionCallingConfig = { mode: "ANY" };
1012
+ break;
1013
+ case "tool":
1014
+ combinedToolConfig.functionCallingConfig = {
1015
+ mode: "ANY",
1016
+ allowedFunctionNames: [toolChoice.toolName]
1017
+ };
1018
+ break;
1019
+ }
1020
+ }
1021
+ return {
1022
+ tools: [...googleTools2, { functionDeclarations: functionDeclarations2 }],
1023
+ toolConfig: combinedToolConfig,
1024
+ toolWarnings
1025
+ };
1026
+ }
799
1027
  return {
800
1028
  tools: googleTools2.length > 0 ? googleTools2 : void 0,
801
1029
  toolConfig: void 0,
@@ -809,7 +1037,7 @@ function prepareTools({
809
1037
  case "function":
810
1038
  functionDeclarations.push({
811
1039
  name: tool.name,
812
- description: (_a = tool.description) != null ? _a : "",
1040
+ description: (_b = tool.description) != null ? _b : "",
813
1041
  parameters: convertJSONSchemaToOpenAPISchema(tool.inputSchema)
814
1042
  });
815
1043
  if (tool.strict === true) {
@@ -908,7 +1136,7 @@ function mapGoogleGenerativeAIFinishReason({
908
1136
  // src/google-generative-ai-language-model.ts
909
1137
  var GoogleGenerativeAILanguageModel = class {
910
1138
  constructor(modelId, config) {
911
- this.specificationVersion = "v3";
1139
+ this.specificationVersion = "v4";
912
1140
  var _a;
913
1141
  this.modelId = modelId;
914
1142
  this.config = config;
@@ -934,6 +1162,7 @@ var GoogleGenerativeAILanguageModel = class {
934
1162
  seed,
935
1163
  tools,
936
1164
  toolChoice,
1165
+ reasoning,
937
1166
  providerOptions
938
1167
  }) {
939
1168
  var _a;
@@ -960,9 +1189,14 @@ var GoogleGenerativeAILanguageModel = class {
960
1189
  });
961
1190
  }
962
1191
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
1192
+ const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
963
1193
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
964
1194
  prompt,
965
- { isGemmaModel, providerOptionsName }
1195
+ {
1196
+ isGemmaModel,
1197
+ providerOptionsName,
1198
+ supportsFunctionResponseParts
1199
+ }
966
1200
  );
967
1201
  const {
968
1202
  tools: googleTools2,
@@ -973,6 +1207,12 @@ var GoogleGenerativeAILanguageModel = class {
973
1207
  toolChoice,
974
1208
  modelId: this.modelId
975
1209
  });
1210
+ const resolvedThinking = resolveThinkingConfig({
1211
+ reasoning,
1212
+ modelId: this.modelId,
1213
+ warnings
1214
+ });
1215
+ const thinkingConfig = (googleOptions == null ? void 0 : googleOptions.thinkingConfig) || resolvedThinking ? { ...resolvedThinking, ...googleOptions == null ? void 0 : googleOptions.thinkingConfig } : void 0;
976
1216
  return {
977
1217
  args: {
978
1218
  generationConfig: {
@@ -996,7 +1236,7 @@ var GoogleGenerativeAILanguageModel = class {
996
1236
  },
997
1237
  // provider options:
998
1238
  responseModalities: googleOptions == null ? void 0 : googleOptions.responseModalities,
999
- thinkingConfig: googleOptions == null ? void 0 : googleOptions.thinkingConfig,
1239
+ thinkingConfig,
1000
1240
  ...(googleOptions == null ? void 0 : googleOptions.mediaResolution) && {
1001
1241
  mediaResolution: googleOptions.mediaResolution
1002
1242
  },
@@ -1013,14 +1253,15 @@ var GoogleGenerativeAILanguageModel = class {
1013
1253
  retrievalConfig: googleOptions.retrievalConfig
1014
1254
  } : googleToolConfig,
1015
1255
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
1016
- labels: googleOptions == null ? void 0 : googleOptions.labels
1256
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
1257
+ serviceTier: googleOptions == null ? void 0 : googleOptions.serviceTier
1017
1258
  },
1018
1259
  warnings: [...warnings, ...toolWarnings],
1019
1260
  providerOptionsName
1020
1261
  };
1021
1262
  }
1022
1263
  async doGenerate(options) {
1023
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j;
1264
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n, _o, _p;
1024
1265
  const { args, warnings, providerOptionsName } = await this.getArgs(options);
1025
1266
  const mergedHeaders = combineHeaders2(
1026
1267
  await resolve2(this.config.headers),
@@ -1046,6 +1287,7 @@ var GoogleGenerativeAILanguageModel = class {
1046
1287
  const parts = (_b = (_a = candidate.content) == null ? void 0 : _a.parts) != null ? _b : [];
1047
1288
  const usageMetadata = response.usageMetadata;
1048
1289
  let lastCodeExecutionToolCallId;
1290
+ let lastServerToolCallId;
1049
1291
  for (const part of parts) {
1050
1292
  if ("executableCode" in part && ((_c = part.executableCode) == null ? void 0 : _c.code)) {
1051
1293
  const toolCallId = this.config.generateId();
@@ -1100,22 +1342,68 @@ var GoogleGenerativeAILanguageModel = class {
1100
1342
  } : void 0
1101
1343
  });
1102
1344
  } else if ("inlineData" in part) {
1345
+ const hasThought = part.thought === true;
1346
+ const hasThoughtSignature = !!part.thoughtSignature;
1103
1347
  content.push({
1104
- type: "file",
1348
+ type: hasThought ? "reasoning-file" : "file",
1105
1349
  data: part.inlineData.data,
1106
1350
  mediaType: part.inlineData.mimeType,
1107
- providerMetadata: part.thoughtSignature ? {
1351
+ providerMetadata: hasThoughtSignature ? {
1108
1352
  [providerOptionsName]: {
1109
1353
  thoughtSignature: part.thoughtSignature
1110
1354
  }
1111
1355
  } : void 0
1112
1356
  });
1357
+ } else if ("toolCall" in part && part.toolCall) {
1358
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : this.config.generateId();
1359
+ lastServerToolCallId = toolCallId;
1360
+ content.push({
1361
+ type: "tool-call",
1362
+ toolCallId,
1363
+ toolName: `server:${part.toolCall.toolType}`,
1364
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
1365
+ providerExecuted: true,
1366
+ dynamic: true,
1367
+ providerMetadata: part.thoughtSignature ? {
1368
+ [providerOptionsName]: {
1369
+ thoughtSignature: part.thoughtSignature,
1370
+ serverToolCallId: toolCallId,
1371
+ serverToolType: part.toolCall.toolType
1372
+ }
1373
+ } : {
1374
+ [providerOptionsName]: {
1375
+ serverToolCallId: toolCallId,
1376
+ serverToolType: part.toolCall.toolType
1377
+ }
1378
+ }
1379
+ });
1380
+ } else if ("toolResponse" in part && part.toolResponse) {
1381
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : this.config.generateId();
1382
+ content.push({
1383
+ type: "tool-result",
1384
+ toolCallId: responseToolCallId,
1385
+ toolName: `server:${part.toolResponse.toolType}`,
1386
+ result: (_h = part.toolResponse.response) != null ? _h : {},
1387
+ providerMetadata: part.thoughtSignature ? {
1388
+ [providerOptionsName]: {
1389
+ thoughtSignature: part.thoughtSignature,
1390
+ serverToolCallId: responseToolCallId,
1391
+ serverToolType: part.toolResponse.toolType
1392
+ }
1393
+ } : {
1394
+ [providerOptionsName]: {
1395
+ serverToolCallId: responseToolCallId,
1396
+ serverToolType: part.toolResponse.toolType
1397
+ }
1398
+ }
1399
+ });
1400
+ lastServerToolCallId = void 0;
1113
1401
  }
1114
1402
  }
1115
- const sources = (_e = extractSources({
1403
+ const sources = (_i = extractSources({
1116
1404
  groundingMetadata: candidate.groundingMetadata,
1117
1405
  generateId: this.config.generateId
1118
- })) != null ? _e : [];
1406
+ })) != null ? _i : [];
1119
1407
  for (const source of sources) {
1120
1408
  content.push(source);
1121
1409
  }
@@ -1129,17 +1417,19 @@ var GoogleGenerativeAILanguageModel = class {
1129
1417
  (part) => part.type === "tool-call" && !part.providerExecuted
1130
1418
  )
1131
1419
  }),
1132
- raw: (_f = candidate.finishReason) != null ? _f : void 0
1420
+ raw: (_j = candidate.finishReason) != null ? _j : void 0
1133
1421
  },
1134
1422
  usage: convertGoogleGenerativeAIUsage(usageMetadata),
1135
1423
  warnings,
1136
1424
  providerMetadata: {
1137
1425
  [providerOptionsName]: {
1138
- promptFeedback: (_g = response.promptFeedback) != null ? _g : null,
1139
- groundingMetadata: (_h = candidate.groundingMetadata) != null ? _h : null,
1140
- urlContextMetadata: (_i = candidate.urlContextMetadata) != null ? _i : null,
1141
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1142
- usageMetadata: usageMetadata != null ? usageMetadata : null
1426
+ promptFeedback: (_k = response.promptFeedback) != null ? _k : null,
1427
+ groundingMetadata: (_l = candidate.groundingMetadata) != null ? _l : null,
1428
+ urlContextMetadata: (_m = candidate.urlContextMetadata) != null ? _m : null,
1429
+ safetyRatings: (_n = candidate.safetyRatings) != null ? _n : null,
1430
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
1431
+ finishMessage: (_o = candidate.finishMessage) != null ? _o : null,
1432
+ serviceTier: (_p = response.serviceTier) != null ? _p : null
1143
1433
  }
1144
1434
  },
1145
1435
  request: { body: args },
@@ -1175,6 +1465,7 @@ var GoogleGenerativeAILanguageModel = class {
1175
1465
  let providerMetadata = void 0;
1176
1466
  let lastGroundingMetadata = null;
1177
1467
  let lastUrlContextMetadata = null;
1468
+ let serviceTier = null;
1178
1469
  const generateId3 = this.config.generateId;
1179
1470
  let hasToolCalls = false;
1180
1471
  let currentTextBlockId = null;
@@ -1182,6 +1473,7 @@ var GoogleGenerativeAILanguageModel = class {
1182
1473
  let blockCounter = 0;
1183
1474
  const emittedSourceUrls = /* @__PURE__ */ new Set();
1184
1475
  let lastCodeExecutionToolCallId;
1476
+ let lastServerToolCallId;
1185
1477
  return {
1186
1478
  stream: response.pipeThrough(
1187
1479
  new TransformStream({
@@ -1189,7 +1481,7 @@ var GoogleGenerativeAILanguageModel = class {
1189
1481
  controller.enqueue({ type: "stream-start", warnings });
1190
1482
  },
1191
1483
  transform(chunk, controller) {
1192
- var _a, _b, _c, _d, _e, _f;
1484
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k;
1193
1485
  if (options.includeRawChunks) {
1194
1486
  controller.enqueue({ type: "raw", rawValue: chunk.rawValue });
1195
1487
  }
@@ -1202,6 +1494,9 @@ var GoogleGenerativeAILanguageModel = class {
1202
1494
  if (usageMetadata != null) {
1203
1495
  usage = usageMetadata;
1204
1496
  }
1497
+ if (value.serviceTier != null) {
1498
+ serviceTier = value.serviceTier;
1499
+ }
1205
1500
  const candidate = (_a = value.candidates) == null ? void 0 : _a[0];
1206
1501
  if (candidate == null) {
1207
1502
  return;
@@ -1327,17 +1622,55 @@ var GoogleGenerativeAILanguageModel = class {
1327
1622
  });
1328
1623
  currentReasoningBlockId = null;
1329
1624
  }
1330
- const thoughtSignatureMetadata = part.thoughtSignature ? {
1625
+ const hasThought = part.thought === true;
1626
+ const hasThoughtSignature = !!part.thoughtSignature;
1627
+ const fileMeta = hasThoughtSignature ? {
1331
1628
  [providerOptionsName]: {
1332
1629
  thoughtSignature: part.thoughtSignature
1333
1630
  }
1334
1631
  } : void 0;
1335
1632
  controller.enqueue({
1336
- type: "file",
1633
+ type: hasThought ? "reasoning-file" : "file",
1337
1634
  mediaType: part.inlineData.mimeType,
1338
1635
  data: part.inlineData.data,
1339
- providerMetadata: thoughtSignatureMetadata
1636
+ providerMetadata: fileMeta
1340
1637
  });
1638
+ } else if ("toolCall" in part && part.toolCall) {
1639
+ const toolCallId = (_e = part.toolCall.id) != null ? _e : generateId3();
1640
+ lastServerToolCallId = toolCallId;
1641
+ const serverMeta = {
1642
+ [providerOptionsName]: {
1643
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
1644
+ serverToolCallId: toolCallId,
1645
+ serverToolType: part.toolCall.toolType
1646
+ }
1647
+ };
1648
+ controller.enqueue({
1649
+ type: "tool-call",
1650
+ toolCallId,
1651
+ toolName: `server:${part.toolCall.toolType}`,
1652
+ input: JSON.stringify((_f = part.toolCall.args) != null ? _f : {}),
1653
+ providerExecuted: true,
1654
+ dynamic: true,
1655
+ providerMetadata: serverMeta
1656
+ });
1657
+ } else if ("toolResponse" in part && part.toolResponse) {
1658
+ const responseToolCallId = (_g = lastServerToolCallId != null ? lastServerToolCallId : part.toolResponse.id) != null ? _g : generateId3();
1659
+ const serverMeta = {
1660
+ [providerOptionsName]: {
1661
+ ...part.thoughtSignature ? { thoughtSignature: part.thoughtSignature } : {},
1662
+ serverToolCallId: responseToolCallId,
1663
+ serverToolType: part.toolResponse.toolType
1664
+ }
1665
+ };
1666
+ controller.enqueue({
1667
+ type: "tool-result",
1668
+ toolCallId: responseToolCallId,
1669
+ toolName: `server:${part.toolResponse.toolType}`,
1670
+ result: (_h = part.toolResponse.response) != null ? _h : {},
1671
+ providerMetadata: serverMeta
1672
+ });
1673
+ lastServerToolCallId = void 0;
1341
1674
  }
1342
1675
  }
1343
1676
  const toolCallDeltas = getToolCallsFromParts({
@@ -1385,15 +1718,15 @@ var GoogleGenerativeAILanguageModel = class {
1385
1718
  };
1386
1719
  providerMetadata = {
1387
1720
  [providerOptionsName]: {
1388
- promptFeedback: (_e = value.promptFeedback) != null ? _e : null,
1721
+ promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1389
1722
  groundingMetadata: lastGroundingMetadata,
1390
1723
  urlContextMetadata: lastUrlContextMetadata,
1391
- safetyRatings: (_f = candidate.safetyRatings) != null ? _f : null
1724
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1725
+ usageMetadata: usageMetadata != null ? usageMetadata : null,
1726
+ finishMessage: (_k = candidate.finishMessage) != null ? _k : null,
1727
+ serviceTier
1392
1728
  }
1393
1729
  };
1394
- if (usageMetadata != null) {
1395
- providerMetadata[providerOptionsName].usageMetadata = usageMetadata;
1396
- }
1397
1730
  }
1398
1731
  },
1399
1732
  flush(controller) {
@@ -1423,6 +1756,75 @@ var GoogleGenerativeAILanguageModel = class {
1423
1756
  };
1424
1757
  }
1425
1758
  };
1759
+ function isGemini3Model(modelId) {
1760
+ return /gemini-3[\.\-]/i.test(modelId) || /gemini-3$/i.test(modelId);
1761
+ }
1762
+ function getMaxOutputTokensForGemini25Model() {
1763
+ return 65536;
1764
+ }
1765
+ function getMaxThinkingTokensForGemini25Model(modelId) {
1766
+ const id = modelId.toLowerCase();
1767
+ if (id.includes("2.5-pro") || id.includes("gemini-3-pro-image")) {
1768
+ return 32768;
1769
+ }
1770
+ return 24576;
1771
+ }
1772
+ function resolveThinkingConfig({
1773
+ reasoning,
1774
+ modelId,
1775
+ warnings
1776
+ }) {
1777
+ if (!isCustomReasoning(reasoning)) {
1778
+ return void 0;
1779
+ }
1780
+ if (isGemini3Model(modelId) && !modelId.includes("gemini-3-pro-image")) {
1781
+ return resolveGemini3ThinkingConfig({ reasoning, warnings });
1782
+ }
1783
+ return resolveGemini25ThinkingConfig({ reasoning, modelId, warnings });
1784
+ }
1785
+ function resolveGemini3ThinkingConfig({
1786
+ reasoning,
1787
+ warnings
1788
+ }) {
1789
+ if (reasoning === "none") {
1790
+ return { thinkingLevel: "minimal" };
1791
+ }
1792
+ const thinkingLevel = mapReasoningToProviderEffort({
1793
+ reasoning,
1794
+ effortMap: {
1795
+ minimal: "minimal",
1796
+ low: "low",
1797
+ medium: "medium",
1798
+ high: "high",
1799
+ xhigh: "high"
1800
+ },
1801
+ warnings
1802
+ });
1803
+ if (thinkingLevel == null) {
1804
+ return void 0;
1805
+ }
1806
+ return { thinkingLevel };
1807
+ }
1808
+ function resolveGemini25ThinkingConfig({
1809
+ reasoning,
1810
+ modelId,
1811
+ warnings
1812
+ }) {
1813
+ if (reasoning === "none") {
1814
+ return { thinkingBudget: 0 };
1815
+ }
1816
+ const thinkingBudget = mapReasoningToProviderBudget({
1817
+ reasoning,
1818
+ maxOutputTokens: getMaxOutputTokensForGemini25Model(),
1819
+ maxReasoningBudget: getMaxThinkingTokensForGemini25Model(modelId),
1820
+ minReasoningBudget: 0,
1821
+ warnings
1822
+ });
1823
+ if (thinkingBudget == null) {
1824
+ return void 0;
1825
+ }
1826
+ return { thinkingBudget };
1827
+ }
1426
1828
  function getToolCallsFromParts({
1427
1829
  parts,
1428
1830
  generateId: generateId3,
@@ -1602,6 +2004,23 @@ var getContentSchema = () => z5.object({
1602
2004
  mimeType: z5.string(),
1603
2005
  data: z5.string()
1604
2006
  }),
2007
+ thought: z5.boolean().nullish(),
2008
+ thoughtSignature: z5.string().nullish()
2009
+ }),
2010
+ z5.object({
2011
+ toolCall: z5.object({
2012
+ toolType: z5.string(),
2013
+ args: z5.unknown().nullish(),
2014
+ id: z5.string()
2015
+ }),
2016
+ thoughtSignature: z5.string().nullish()
2017
+ }),
2018
+ z5.object({
2019
+ toolResponse: z5.object({
2020
+ toolType: z5.string(),
2021
+ response: z5.unknown().nullish(),
2022
+ id: z5.string()
2023
+ }),
1605
2024
  thoughtSignature: z5.string().nullish()
1606
2025
  }),
1607
2026
  z5.object({
@@ -1652,6 +2071,7 @@ var responseSchema = lazySchema5(
1652
2071
  z5.object({
1653
2072
  content: getContentSchema().nullish().or(z5.object({}).strict()),
1654
2073
  finishReason: z5.string().nullish(),
2074
+ finishMessage: z5.string().nullish(),
1655
2075
  safetyRatings: z5.array(getSafetyRatingSchema()).nullish(),
1656
2076
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1657
2077
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
@@ -1661,7 +2081,8 @@ var responseSchema = lazySchema5(
1661
2081
  promptFeedback: z5.object({
1662
2082
  blockReason: z5.string().nullish(),
1663
2083
  safetyRatings: z5.array(getSafetyRatingSchema()).nullish()
1664
- }).nullish()
2084
+ }).nullish(),
2085
+ serviceTier: z5.string().nullish()
1665
2086
  })
1666
2087
  )
1667
2088
  );
@@ -1672,6 +2093,7 @@ var chunkSchema = lazySchema5(
1672
2093
  z5.object({
1673
2094
  content: getContentSchema().nullish(),
1674
2095
  finishReason: z5.string().nullish(),
2096
+ finishMessage: z5.string().nullish(),
1675
2097
  safetyRatings: z5.array(getSafetyRatingSchema()).nullish(),
1676
2098
  groundingMetadata: getGroundingMetadataSchema().nullish(),
1677
2099
  urlContextMetadata: getUrlContextMetadataSchema().nullish()
@@ -1681,7 +2103,8 @@ var chunkSchema = lazySchema5(
1681
2103
  promptFeedback: z5.object({
1682
2104
  blockReason: z5.string().nullish(),
1683
2105
  safetyRatings: z5.array(getSafetyRatingSchema()).nullish()
1684
- }).nullish()
2106
+ }).nullish(),
2107
+ serviceTier: z5.string().nullish()
1685
2108
  })
1686
2109
  )
1687
2110
  );
@@ -1884,7 +2307,7 @@ var GoogleGenerativeAIImageModel = class {
1884
2307
  this.modelId = modelId;
1885
2308
  this.settings = settings;
1886
2309
  this.config = config;
1887
- this.specificationVersion = "v3";
2310
+ this.specificationVersion = "v4";
1888
2311
  }
1889
2312
  get maxImagesPerCall() {
1890
2313
  if (this.settings.maxImagesPerCall != null) {
@@ -2138,7 +2561,7 @@ var GoogleGenerativeAIVideoModel = class {
2138
2561
  constructor(modelId, config) {
2139
2562
  this.modelId = modelId;
2140
2563
  this.config = config;
2141
- this.specificationVersion = "v3";
2564
+ this.specificationVersion = "v4";
2142
2565
  }
2143
2566
  get provider() {
2144
2567
  return this.config.provider;
@@ -2450,7 +2873,7 @@ function createGoogleGenerativeAI(options = {}) {
2450
2873
  }
2451
2874
  return createChatModel(modelId);
2452
2875
  };
2453
- provider.specificationVersion = "v3";
2876
+ provider.specificationVersion = "v4";
2454
2877
  provider.languageModel = createChatModel;
2455
2878
  provider.chat = createChatModel;
2456
2879
  provider.generativeAI = createChatModel;