@ai-sdk/google 2.0.62 → 2.0.64

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -134,11 +134,12 @@ import {
134
134
  } from "@ai-sdk/provider";
135
135
  import { convertToBase64 } from "@ai-sdk/provider-utils";
136
136
  function convertToGoogleGenerativeAIMessages(prompt, options) {
137
- var _a;
137
+ var _a, _b;
138
138
  const systemInstructionParts = [];
139
139
  const contents = [];
140
140
  let systemMessagesAllowed = true;
141
141
  const isGemmaModel = (_a = options == null ? void 0 : options.isGemmaModel) != null ? _a : false;
142
+ const supportsFunctionResponseParts = (_b = options == null ? void 0 : options.supportsFunctionResponseParts) != null ? _b : true;
142
143
  for (const { role, content } of prompt) {
143
144
  switch (role) {
144
145
  case "system": {
@@ -186,8 +187,8 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
186
187
  contents.push({
187
188
  role: "model",
188
189
  parts: content.map((part) => {
189
- var _a2, _b, _c;
190
- const thoughtSignature = ((_b = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
190
+ var _a2, _b2, _c;
191
+ const thoughtSignature = ((_b2 = (_a2 = part.providerOptions) == null ? void 0 : _a2.google) == null ? void 0 : _b2.thoughtSignature) != null ? String((_c = part.providerOptions.google) == null ? void 0 : _c.thoughtSignature) : void 0;
191
192
  switch (part.type) {
192
193
  case "text": {
193
194
  return part.text.length === 0 ? void 0 : {
@@ -240,36 +241,10 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
240
241
  for (const part of content) {
241
242
  const output = part.output;
242
243
  if (output.type === "content") {
243
- for (const contentPart of output.value) {
244
- switch (contentPart.type) {
245
- case "text":
246
- parts.push({
247
- functionResponse: {
248
- name: part.toolName,
249
- response: {
250
- name: part.toolName,
251
- content: contentPart.text
252
- }
253
- }
254
- });
255
- break;
256
- case "media":
257
- parts.push(
258
- {
259
- inlineData: {
260
- mimeType: contentPart.mediaType,
261
- data: contentPart.data
262
- }
263
- },
264
- {
265
- text: "Tool executed successfully and returned this image as a response"
266
- }
267
- );
268
- break;
269
- default:
270
- parts.push({ text: JSON.stringify(contentPart) });
271
- break;
272
- }
244
+ if (supportsFunctionResponseParts) {
245
+ appendToolResultParts({ parts, part, output });
246
+ } else {
247
+ appendLegacyToolResultParts({ parts, part, output });
273
248
  }
274
249
  } else {
275
250
  parts.push({
@@ -300,6 +275,77 @@ function convertToGoogleGenerativeAIMessages(prompt, options) {
300
275
  contents
301
276
  };
302
277
  }
278
+ function appendToolResultParts({
279
+ parts,
280
+ part,
281
+ output
282
+ }) {
283
+ const responseTextParts = [];
284
+ const functionResponseParts = [];
285
+ for (const contentPart of output.value) {
286
+ switch (contentPart.type) {
287
+ case "text":
288
+ responseTextParts.push(contentPart.text);
289
+ break;
290
+ case "media":
291
+ functionResponseParts.push({
292
+ inlineData: {
293
+ mimeType: contentPart.mediaType,
294
+ data: contentPart.data
295
+ }
296
+ });
297
+ break;
298
+ }
299
+ }
300
+ const responseText = responseTextParts.length > 0 ? responseTextParts.join("\n") : "Tool executed successfully.";
301
+ parts.push({
302
+ functionResponse: {
303
+ name: part.toolName,
304
+ response: {
305
+ name: part.toolName,
306
+ content: responseText
307
+ },
308
+ ...functionResponseParts.length > 0 ? { parts: functionResponseParts } : {}
309
+ }
310
+ });
311
+ }
312
+ function appendLegacyToolResultParts({
313
+ parts,
314
+ part,
315
+ output
316
+ }) {
317
+ for (const contentPart of output.value) {
318
+ switch (contentPart.type) {
319
+ case "text":
320
+ parts.push({
321
+ functionResponse: {
322
+ name: part.toolName,
323
+ response: {
324
+ name: part.toolName,
325
+ content: contentPart.text
326
+ }
327
+ }
328
+ });
329
+ break;
330
+ case "media":
331
+ parts.push(
332
+ {
333
+ inlineData: {
334
+ mimeType: contentPart.mediaType,
335
+ data: contentPart.data
336
+ }
337
+ },
338
+ {
339
+ text: "Tool executed successfully and returned this image as a response"
340
+ }
341
+ );
342
+ break;
343
+ default:
344
+ parts.push({ text: JSON.stringify(contentPart) });
345
+ break;
346
+ }
347
+ }
348
+ }
303
349
 
304
350
  // src/get-model-path.ts
305
351
  function getModelPath(modelId) {
@@ -449,7 +495,15 @@ var googleGenerativeAIProviderOptions = lazySchema2(
449
495
  latitude: z2.number(),
450
496
  longitude: z2.number()
451
497
  }).optional()
452
- }).optional()
498
+ }).optional(),
499
+ /**
500
+ * Optional. The service tier to use for the request.
501
+ */
502
+ serviceTier: z2.enum([
503
+ "SERVICE_TIER_STANDARD",
504
+ "SERVICE_TIER_FLEX",
505
+ "SERVICE_TIER_PRIORITY"
506
+ ]).optional()
453
507
  })
454
508
  )
455
509
  );
@@ -728,9 +782,10 @@ var GoogleGenerativeAILanguageModel = class {
728
782
  });
729
783
  }
730
784
  const isGemmaModel = this.modelId.toLowerCase().startsWith("gemma-");
785
+ const supportsFunctionResponseParts = this.modelId.startsWith("gemini-3");
731
786
  const { contents, systemInstruction } = convertToGoogleGenerativeAIMessages(
732
787
  prompt,
733
- { isGemmaModel }
788
+ { isGemmaModel, supportsFunctionResponseParts }
734
789
  );
735
790
  const {
736
791
  tools: googleTools2,
@@ -781,13 +836,14 @@ var GoogleGenerativeAILanguageModel = class {
781
836
  retrievalConfig: googleOptions.retrievalConfig
782
837
  } : googleToolConfig,
783
838
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
784
- labels: googleOptions == null ? void 0 : googleOptions.labels
839
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
840
+ serviceTier: googleOptions == null ? void 0 : googleOptions.serviceTier
785
841
  },
786
842
  warnings: [...warnings, ...toolWarnings]
787
843
  };
788
844
  }
789
845
  async doGenerate(options) {
790
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
846
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
791
847
  const { args, warnings } = await this.getArgs(options);
792
848
  const body = JSON.stringify(args);
793
849
  const mergedHeaders = combineHeaders(
@@ -897,7 +953,7 @@ var GoogleGenerativeAILanguageModel = class {
897
953
  groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
898
954
  urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
899
955
  safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
900
- usageMetadata: usageMetadata != null ? usageMetadata : null
956
+ serviceTier: (_n = response.serviceTier) != null ? _n : null
901
957
  }
902
958
  },
903
959
  request: { body },
@@ -935,6 +991,7 @@ var GoogleGenerativeAILanguageModel = class {
935
991
  let providerMetadata = void 0;
936
992
  let lastGroundingMetadata = null;
937
993
  let lastUrlContextMetadata = null;
994
+ let serviceTier = null;
938
995
  const generateId2 = this.config.generateId;
939
996
  let hasToolCalls = false;
940
997
  let currentTextBlockId = null;
@@ -966,6 +1023,9 @@ var GoogleGenerativeAILanguageModel = class {
966
1023
  usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
967
1024
  usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
968
1025
  }
1026
+ if (value.serviceTier != null) {
1027
+ serviceTier = value.serviceTier;
1028
+ }
969
1029
  const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
970
1030
  if (candidate == null) {
971
1031
  return;
@@ -1130,7 +1190,8 @@ var GoogleGenerativeAILanguageModel = class {
1130
1190
  promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1131
1191
  groundingMetadata: lastGroundingMetadata,
1132
1192
  urlContextMetadata: lastUrlContextMetadata,
1133
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
1193
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1194
+ serviceTier
1134
1195
  }
1135
1196
  };
1136
1197
  if (usageMetadata != null) {
@@ -1397,7 +1458,8 @@ var responseSchema = lazySchema3(
1397
1458
  promptFeedback: z3.object({
1398
1459
  blockReason: z3.string().nullish(),
1399
1460
  safetyRatings: z3.array(getSafetyRatingSchema()).nullish()
1400
- }).nullish()
1461
+ }).nullish(),
1462
+ serviceTier: z3.string().nullish()
1401
1463
  })
1402
1464
  )
1403
1465
  );
@@ -1417,7 +1479,8 @@ var chunkSchema = lazySchema3(
1417
1479
  promptFeedback: z3.object({
1418
1480
  blockReason: z3.string().nullish(),
1419
1481
  safetyRatings: z3.array(getSafetyRatingSchema()).nullish()
1420
- }).nullish()
1482
+ }).nullish(),
1483
+ serviceTier: z3.string().nullish()
1421
1484
  })
1422
1485
  )
1423
1486
  );