@ai-sdk/google 2.0.65 → 2.0.67

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/google
2
2
 
3
+ ## 2.0.67
4
+
5
+ ### Patch Changes
6
+
7
+ - 84bbfee: feat(provider/google): preserve per-modality token details in usage data
8
+
9
+ ## 2.0.66
10
+
11
+ ### Patch Changes
12
+
13
+ - d634323: fix(provider/google): fix Gemini service tier enum after upstream update
14
+
3
15
  ## 2.0.65
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -39,7 +39,7 @@ declare const googleGenerativeAIProviderOptions: _ai_sdk_provider_utils.LazySche
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
- serviceTier?: "SERVICE_TIER_STANDARD" | "SERVICE_TIER_FLEX" | "SERVICE_TIER_PRIORITY" | undefined;
42
+ serviceTier?: "standard" | "flex" | "priority" | undefined;
43
43
  }>;
44
44
  type GoogleGenerativeAIProviderOptions = InferValidator<typeof googleGenerativeAIProviderOptions>;
45
45
 
@@ -141,6 +141,14 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
141
141
  candidatesTokenCount?: number | null | undefined;
142
142
  totalTokenCount?: number | null | undefined;
143
143
  trafficType?: string | null | undefined;
144
+ promptTokensDetails?: {
145
+ modality: string;
146
+ tokenCount: number;
147
+ }[] | null | undefined;
148
+ candidatesTokensDetails?: {
149
+ modality: string;
150
+ tokenCount: number;
151
+ }[] | null | undefined;
144
152
  } | null | undefined;
145
153
  promptFeedback?: {
146
154
  blockReason?: string | null | undefined;
package/dist/index.d.ts CHANGED
@@ -39,7 +39,7 @@ declare const googleGenerativeAIProviderOptions: _ai_sdk_provider_utils.LazySche
39
39
  longitude: number;
40
40
  } | undefined;
41
41
  } | undefined;
42
- serviceTier?: "SERVICE_TIER_STANDARD" | "SERVICE_TIER_FLEX" | "SERVICE_TIER_PRIORITY" | undefined;
42
+ serviceTier?: "standard" | "flex" | "priority" | undefined;
43
43
  }>;
44
44
  type GoogleGenerativeAIProviderOptions = InferValidator<typeof googleGenerativeAIProviderOptions>;
45
45
 
@@ -141,6 +141,14 @@ declare const responseSchema: _ai_sdk_provider_utils.LazySchema<{
141
141
  candidatesTokenCount?: number | null | undefined;
142
142
  totalTokenCount?: number | null | undefined;
143
143
  trafficType?: string | null | undefined;
144
+ promptTokensDetails?: {
145
+ modality: string;
146
+ tokenCount: number;
147
+ }[] | null | undefined;
148
+ candidatesTokensDetails?: {
149
+ modality: string;
150
+ tokenCount: number;
151
+ }[] | null | undefined;
144
152
  } | null | undefined;
145
153
  promptFeedback?: {
146
154
  blockReason?: string | null | undefined;
package/dist/index.js CHANGED
@@ -30,7 +30,7 @@ module.exports = __toCommonJS(index_exports);
30
30
  var import_provider_utils15 = require("@ai-sdk/provider-utils");
31
31
 
32
32
  // src/version.ts
33
- var VERSION = true ? "2.0.65" : "0.0.0-test";
33
+ var VERSION = true ? "2.0.67" : "0.0.0-test";
34
34
 
35
35
  // src/google-generative-ai-embedding-model.ts
36
36
  var import_provider = require("@ai-sdk/provider");
@@ -699,11 +699,7 @@ var googleGenerativeAIProviderOptions = (0, import_provider_utils5.lazySchema)(
699
699
  /**
700
700
  * Optional. The service tier to use for the request.
701
701
  */
702
- serviceTier: import_v44.z.enum([
703
- "SERVICE_TIER_STANDARD",
704
- "SERVICE_TIER_FLEX",
705
- "SERVICE_TIER_PRIORITY"
706
- ]).optional()
702
+ serviceTier: import_v44.z.enum(["standard", "flex", "priority"]).optional()
707
703
  })
708
704
  )
709
705
  );
@@ -1623,6 +1619,12 @@ var getSafetyRatingSchema = () => import_v45.z.object({
1623
1619
  severityScore: import_v45.z.number().nullish(),
1624
1620
  blocked: import_v45.z.boolean().nullish()
1625
1621
  });
1622
+ var tokenDetailsSchema = import_v45.z.array(
1623
+ import_v45.z.object({
1624
+ modality: import_v45.z.string(),
1625
+ tokenCount: import_v45.z.number()
1626
+ })
1627
+ ).nullish();
1626
1628
  var usageSchema = import_v45.z.object({
1627
1629
  cachedContentTokenCount: import_v45.z.number().nullish(),
1628
1630
  thoughtsTokenCount: import_v45.z.number().nullish(),
@@ -1630,7 +1632,10 @@ var usageSchema = import_v45.z.object({
1630
1632
  candidatesTokenCount: import_v45.z.number().nullish(),
1631
1633
  totalTokenCount: import_v45.z.number().nullish(),
1632
1634
  // https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest/v1/GenerateContentResponse#TrafficType
1633
- trafficType: import_v45.z.string().nullish()
1635
+ trafficType: import_v45.z.string().nullish(),
1636
+ // https://ai.google.dev/api/generate-content#Modality
1637
+ promptTokensDetails: tokenDetailsSchema,
1638
+ candidatesTokensDetails: tokenDetailsSchema
1634
1639
  });
1635
1640
  var getUrlContextMetadataSchema = () => import_v45.z.object({
1636
1641
  urlMetadata: import_v45.z.array(