@ai-sdk/google 2.0.62 → 2.0.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -191,6 +191,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
191
191
  blocked?: boolean | null | undefined;
192
192
  }[] | null | undefined;
193
193
  } | null | undefined;
194
+ serviceTier?: string | null | undefined;
194
195
  }>;
195
196
  type GroundingMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
196
197
  type UrlContextMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
@@ -191,6 +191,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
191
191
  blocked?: boolean | null | undefined;
192
192
  }[] | null | undefined;
193
193
  } | null | undefined;
194
+ serviceTier?: string | null | undefined;
194
195
  }>;
195
196
  type GroundingMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
196
197
  type UrlContextMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
@@ -18,14 +18,14 @@ var __copyProps = (to, from, except, desc) => {
18
18
  var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
19
 
20
20
  // src/internal/index.ts
21
- var internal_exports = {};
22
- __export(internal_exports, {
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
23
  GoogleGenerativeAILanguageModel: () => GoogleGenerativeAILanguageModel,
24
24
  getGroundingMetadataSchema: () => getGroundingMetadataSchema,
25
25
  getUrlContextMetadataSchema: () => getUrlContextMetadataSchema,
26
26
  googleTools: () => googleTools
27
27
  });
28
- module.exports = __toCommonJS(internal_exports);
28
+ module.exports = __toCommonJS(index_exports);
29
29
 
30
30
  // src/google-generative-ai-language-model.ts
31
31
  var import_provider_utils4 = require("@ai-sdk/provider-utils");
@@ -151,11 +151,12 @@ function isEmptyObjectSchema(jsonSchema) {
151
151
  var import_provider = require("@ai-sdk/provider");
152
152
  var import_provider_utils = require("@ai-sdk/provider-utils");
153
153
  function convertToGoogleGenerativeAIMessages(prompt, options) {
154
- var _a;
154
+ var _a, _b;
155
155
  const systemInstructionParts = [];
156
156
  const contents = [];
157
157
  let systemMessagesAllowed = true;
158
158
  const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
159
+ const supportsFunctionResponseParts = (_b = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _b : true;
159
160
  for (const { role, content } of prompt) {
160
161
  switch (role) {
161
162
  case "system": {
@@ -203,8 +204,8 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
203
204
  contents.push({
204
205
  role: "model",
205
206
  parts: content.map((part) => {
206
- var _a2, _b, _c;
207
- const thoughtSignature = ((_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
207
+ var _a2, _b2, _c;
208
+ const thoughtSignature = ((_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b2.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
208
209
  switch (part.type) {
209
210
  case "text": {
210
211
  return part.text.length === 0 ? void 0 : {
@@ -257,36 +258,10 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
257
258
  for (const part of content) {
258
259
  const output = part.output;
259
260
  if (output.type === "content") {
260
- for (const contentPart of output.value) {
261
- switch (contentPart.type) {
262
- case "text":
263
- parts.push({
264
- functionResponse: {
265
- name: part.toolName,
266
- response: {
267
- name: part.toolName,
268
- content: contentPart.text
269
- }
270
- }
271
- });
272
- break;
273
- case "media":
274
- parts.push(
275
- {
276
- inlineData: {
277
- mimeType: contentPart.mediaType,
278
- data: contentPart.data
279
- }
280
- },
281
- {
282
- text: "Tool executed successfully and returned this image as a response"
283
- }
284
- );
285
- break;
286
- default:
287
- parts.push({ text: JSON.stringify(contentPart) });
288
- break;
289
- }
261
+ if (supportsFunctionResponseParts) {
262
+ appendToolResultParts({ parts, part, output });
263
+ } else {
264
+ appendLegacyToolResultParts({ parts, part, output });
290
265
  }
291
266
  } else {
292
267
  parts.push({
@@ -317,6 +292,77 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
317
292
  contents
318
293
  };
319
294
  }
295
+ function appendToolResultParts({
296
+ parts,
297
+ part,
298
+ output
299
+ }) {
300
+ const responseTextParts = [];
301
+ const functionResponseParts = [];
302
+ for (const contentPart of output.value) {
303
+ switch (contentPart.type) {
304
+ case "text":
305
+ responseTextParts.push(contentPart.text);
306
+ break;
307
+ case "media":
308
+ functionResponseParts.push({
309
+ inlineData: {
310
+ mimeType: contentPart.mediaType,
311
+ data: contentPart.data
312
+ }
313
+ });
314
+ break;
315
+ }
316
+ }
317
+ const responseText = responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully.";
318
+ parts.push({
319
+ functionResponse: {
320
+ name: part.toolName,
321
+ response: {
322
+ name: part.toolName,
323
+ content: responseText
324
+ },
325
+ ...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
326
+ }
327
+ });
328
+ }
329
+ function appendLegacyToolResultParts({
330
+ parts,
331
+ part,
332
+ output
333
+ }) {
334
+ for (const contentPart of output.value) {
335
+ switch (contentPart.type) {
336
+ case "text":
337
+ parts.push({
338
+ functionResponse: {
339
+ name: part.toolName,
340
+ response: {
341
+ name: part.toolName,
342
+ content: contentPart.text
343
+ }
344
+ }
345
+ });
346
+ break;
347
+ case "media":
348
+ parts.push(
349
+ {
350
+ inlineData: {
351
+ mimeType: contentPart.mediaType,
352
+ data: contentPart.data
353
+ }
354
+ },
355
+ {
356
+ text: "Tool executed successfully and returned this image as a response"
357
+ }
358
+ );
359
+ break;
360
+ default:
361
+ parts.push({ text: JSON.stringify(contentPart) });
362
+ break;
363
+ }
364
+ }
365
+ }
320
366
 
321
367
  // src/get-model-path.ts
322
368
  function getModelPath(modelId) {
@@ -459,7 +505,15 @@ var googleGenerativeAIProviderOptions = (0, import_provider_utils3.lazySchema)(
459
505
  latitude: import_v42.z.number(),
460
506
  longitude: import_v42.z.number()
461
507
  }).optional()
462
- }).optional()
508
+ }).optional(),
509
+ /**
510
+ * Optional. The service tier to use for the request.
511
+ */
512
+ serviceTier: import_v42.z.enum([
513
+ "SERVICE_TIER_STANDARD",
514
+ "SERVICE_TIER_FLEX",
515
+ "SERVICE_TIER_PRIORITY"
516
+ ]).optional()
463
517
  })
464
518
  )
465
519
  );
@@ -736,9 +790,10 @@ var GoogleGenerativeAILanguageModel = class {
736
790
  });
737
791
  }
738
792
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
793
+ const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
739
794
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
740
795
  prompt,
741
- { isGemmaModel }
796
+ { isGemmaModel, supportsFunctionResponseParts }
742
797
  );
743
798
  const {
744
799
  tools: googleTools2,
@@ -789,13 +844,14 @@ var GoogleGenerativeAILanguageModel = class {
789
844
  retrievalConfig: googleOptions.retrievalConfig
790
845
  } : googleToolConfig,
791
846
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
792
- labels: googleOptions == null ? void 0 : googleOptions.labels
847
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
848
+ serviceTier: googleOptions == null ? void 0 : googleOptions.serviceTier
793
849
  },
794
850
  warnings: [...warnings, ...toolWarnings]
795
851
  };
796
852
  }
797
853
  async doGenerate(options) {
798
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
854
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
799
855
  const { args, warnings } = await this.getArgs(options);
800
856
  const body = JSON.stringify(args);
801
857
  const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
@@ -905,7 +961,7 @@ var GoogleGenerativeAILanguageModel = class {
905
961
  groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
906
962
  urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
907
963
  safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
908
- usageMetadata: usageMetadata != null ? usageMetadata : null
964
+ serviceTier: (_n = response.serviceTier) != null ? _n : null
909
965
  }
910
966
  },
911
967
  request: { body },
@@ -943,6 +999,7 @@ var GoogleGenerativeAILanguageModel = class {
943
999
  let providerMetadata = void 0;
944
1000
  let lastGroundingMetadata = null;
945
1001
  let lastUrlContextMetadata = null;
1002
+ let serviceTier = null;
946
1003
  const generateId2 = this.config.generateId;
947
1004
  let hasToolCalls = false;
948
1005
  let currentTextBlockId = null;
@@ -974,6 +1031,9 @@ var GoogleGenerativeAILanguageModel = class {
974
1031
  usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
975
1032
  usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
976
1033
  }
1034
+ if (value.serviceTier != null) {
1035
+ serviceTier = value.serviceTier;
1036
+ }
977
1037
  const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
978
1038
  if (candidate == null) {
979
1039
  return;
@@ -1138,7 +1198,8 @@ var GoogleGenerativeAILanguageModel = class {
1138
1198
  promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1139
1199
  groundingMetadata: lastGroundingMetadata,
1140
1200
  urlContextMetadata: lastUrlContextMetadata,
1141
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
1201
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1202
+ serviceTier
1142
1203
  }
1143
1204
  };
1144
1205
  if (usageMetadata != null) {
@@ -1405,7 +1466,8 @@ var responseSchema = (0, import_provider_utils4.lazySchema)(
1405
1466
  promptFeedback: import_v43.z.object({
1406
1467
  blockReason: import_v43.z.string().nullish(),
1407
1468
  safetyRatings: import_v43.z.array(getSafetyRatingSchema()).nullish()
1408
- }).nullish()
1469
+ }).nullish(),
1470
+ serviceTier: import_v43.z.string().nullish()
1409
1471
  })
1410
1472
  )
1411
1473
  );
@@ -1425,7 +1487,8 @@ var chunkSchema = (0, import_provider_utils4.lazySchema)(
1425
1487
  promptFeedback: import_v43.z.object({
1426
1488
  blockReason: import_v43.z.string().nullish(),
1427
1489
  safetyRatings: import_v43.z.array(getSafetyRatingSchema()).nullish()
1428
- }).nullish()
1490
+ }).nullish(),
1491
+ serviceTier: import_v43.z.string().nullish()
1429
1492
  })
1430
1493
  )
1431
1494
  );