@ai-sdk/google 2.0.63 → 2.0.65

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -191,6 +191,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
191
191
  blocked?: boolean | null | undefined;
192
192
  }[] | null | undefined;
193
193
  } | null | undefined;
194
+ serviceTier?: string | null | undefined;
194
195
  }>;
195
196
  type GroundingMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
196
197
  type UrlContextMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
@@ -191,6 +191,7 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
191
191
  blocked?: boolean | null | undefined;
192
192
  }[] | null | undefined;
193
193
  } | null | undefined;
194
+ serviceTier?: string | null | undefined;
194
195
  }>;
195
196
  type GroundingMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['groundingMetadata']>;
196
197
  type UrlContextMetadataSchema = NonNullable<InferValidator<typeof responseSchema>['candidates'][number]['urlContextMetadata']>;
@@ -505,7 +505,15 @@ var googleGenerativeAIProviderOptions = (0, import_provider_utils3.lazySchema)(
505
505
  latitude: import_v42.z.number(),
506
506
  longitude: import_v42.z.number()
507
507
  }).optional()
508
- }).optional()
508
+ }).optional(),
509
+ /**
510
+ * Optional. The service tier to use for the request.
511
+ */
512
+ serviceTier: import_v42.z.enum([
513
+ "SERVICE_TIER_STANDARD",
514
+ "SERVICE_TIER_FLEX",
515
+ "SERVICE_TIER_PRIORITY"
516
+ ]).optional()
509
517
  })
510
518
  )
511
519
  );
@@ -836,13 +844,14 @@ var GoogleGenerativeAILanguageModel = class {
836
844
  retrievalConfig: googleOptions.retrievalConfig
837
845
  } : googleToolConfig,
838
846
  cachedContent: googleOptions == null ? void 0 : googleOptions.cachedContent,
839
- labels: googleOptions == null ? void 0 : googleOptions.labels
847
+ labels: googleOptions == null ? void 0 : googleOptions.labels,
848
+ serviceTier: googleOptions == null ? void 0 : googleOptions.serviceTier
840
849
  },
841
850
  warnings: [...warnings, ...toolWarnings]
842
851
  };
843
852
  }
844
853
  async doGenerate(options) {
845
- var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m;
854
+ var _a, _b, _c, _d, _e, _f, _g, _h, _i, _j, _k, _l, _m, _n;
846
855
  const { args, warnings } = await this.getArgs(options);
847
856
  const body = JSON.stringify(args);
848
857
  const mergedHeaders = (0, import_provider_utils4.combineHeaders)(
@@ -952,7 +961,7 @@ var GoogleGenerativeAILanguageModel = class {
952
961
  groundingMetadata: (_k = candidate.groundingMetadata) != null ? _k : null,
953
962
  urlContextMetadata: (_l = candidate.urlContextMetadata) != null ? _l : null,
954
963
  safetyRatings: (_m = candidate.safetyRatings) != null ? _m : null,
955
- usageMetadata: usageMetadata != null ? usageMetadata : null
964
+ serviceTier: (_n = response.serviceTier) != null ? _n : null
956
965
  }
957
966
  },
958
967
  request: { body },
@@ -990,6 +999,7 @@ var GoogleGenerativeAILanguageModel = class {
990
999
  let providerMetadata = void 0;
991
1000
  let lastGroundingMetadata = null;
992
1001
  let lastUrlContextMetadata = null;
1002
+ let serviceTier = null;
993
1003
  const generateId2 = this.config.generateId;
994
1004
  let hasToolCalls = false;
995
1005
  let currentTextBlockId = null;
@@ -1021,6 +1031,9 @@ var GoogleGenerativeAILanguageModel = class {
1021
1031
  usage.reasoningTokens = (_d = usageMetadata.thoughtsTokenCount) != null ? _d : void 0;
1022
1032
  usage.cachedInputTokens = (_e = usageMetadata.cachedContentTokenCount) != null ? _e : void 0;
1023
1033
  }
1034
+ if (value.serviceTier != null) {
1035
+ serviceTier = value.serviceTier;
1036
+ }
1024
1037
  const candidate = (_f = value.candidates) == null ? void 0 : _f[0];
1025
1038
  if (candidate == null) {
1026
1039
  return;
@@ -1185,7 +1198,8 @@ var GoogleGenerativeAILanguageModel = class {
1185
1198
  promptFeedback: (_i = value.promptFeedback) != null ? _i : null,
1186
1199
  groundingMetadata: lastGroundingMetadata,
1187
1200
  urlContextMetadata: lastUrlContextMetadata,
1188
- safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null
1201
+ safetyRatings: (_j = candidate.safetyRatings) != null ? _j : null,
1202
+ serviceTier
1189
1203
  }
1190
1204
  };
1191
1205
  if (usageMetadata != null) {
@@ -1452,7 +1466,8 @@ var responseSchema = (0, import_provider_utils4.lazySchema)(
1452
1466
  promptFeedback: import_v43.z.object({
1453
1467
  blockReason: import_v43.z.string().nullish(),
1454
1468
  safetyRatings: import_v43.z.array(getSafetyRatingSchema()).nullish()
1455
- }).nullish()
1469
+ }).nullish(),
1470
+ serviceTier: import_v43.z.string().nullish()
1456
1471
  })
1457
1472
  )
1458
1473
  );
@@ -1472,7 +1487,8 @@ var chunkSchema = (0, import_provider_utils4.lazySchema)(
1472
1487
  promptFeedback: import_v43.z.object({
1473
1488
  blockReason: import_v43.z.string().nullish(),
1474
1489
  safetyRatings: import_v43.z.array(getSafetyRatingSchema()).nullish()
1475
- }).nullish()
1490
+ }).nullish(),
1491
+ serviceTier: import_v43.z.string().nullish()
1476
1492
  })
1477
1493
  )
1478
1494
  );