@ai-sdk/provider 3.0.0-beta.24 → 3.0.0-beta.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/provider
2
2
 
3
+ ## 3.0.0-beta.26
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd2689: feat: extended token usage
8
+
9
+ ## 3.0.0-beta.25
10
+
11
+ ### Patch Changes
12
+
13
+ - 53f3368: feat(provider): support embedding model call warnings in specification
14
+
3
15
  ## 3.0.0-beta.24
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -247,6 +247,10 @@ type EmbeddingModelV3 = {
247
247
  */
248
248
  body?: unknown;
249
249
  };
250
+ /**
251
+ Warnings for the call, e.g. unsupported settings.
252
+ */
253
+ warnings: Array<SharedV3Warning>;
250
254
  }>;
251
255
  };
252
256
 
@@ -1599,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1599
1603
  }
1600
1604
 
1601
1605
  /**
1602
- Usage information for a language model call.
1603
-
1604
- If your API return additional usage information, you can add it to the
1605
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1606
1607
  */
1607
1608
  type LanguageModelV3Usage = {
1608
1609
  /**
1609
- The number of input (prompt) tokens used.
1610
+ * Information about the input tokens.
1610
1611
  */
1611
- inputTokens: number | undefined;
1612
- /**
1613
- The number of output (completion) tokens used.
1614
- */
1615
- outputTokens: number | undefined;
1616
- /**
1617
- The total number of tokens as reported by the provider.
1618
- This number might be different from the sum of `inputTokens` and `outputTokens`
1619
- and e.g. include reasoning tokens or other overhead.
1620
- */
1621
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1622
1630
  /**
1623
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1624
1632
  */
1625
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1626
1647
  /**
1627
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1628
1652
  */
1629
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1630
1654
  };
1631
1655
 
1632
1656
  type LanguageModelV3StreamPart = {
package/dist/index.d.ts CHANGED
@@ -247,6 +247,10 @@ type EmbeddingModelV3 = {
247
247
  */
248
248
  body?: unknown;
249
249
  };
250
+ /**
251
+ Warnings for the call, e.g. unsupported settings.
252
+ */
253
+ warnings: Array<SharedV3Warning>;
250
254
  }>;
251
255
  };
252
256
 
@@ -1599,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1599
1603
  }
1600
1604
 
1601
1605
  /**
1602
- Usage information for a language model call.
1603
-
1604
- If your API return additional usage information, you can add it to the
1605
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1606
1607
  */
1607
1608
  type LanguageModelV3Usage = {
1608
1609
  /**
1609
- The number of input (prompt) tokens used.
1610
+ * Information about the input tokens.
1610
1611
  */
1611
- inputTokens: number | undefined;
1612
- /**
1613
- The number of output (completion) tokens used.
1614
- */
1615
- outputTokens: number | undefined;
1616
- /**
1617
- The total number of tokens as reported by the provider.
1618
- This number might be different from the sum of `inputTokens` and `outputTokens`
1619
- and e.g. include reasoning tokens or other overhead.
1620
- */
1621
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1622
1630
  /**
1623
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1624
1632
  */
1625
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1626
1647
  /**
1627
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1628
1652
  */
1629
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1630
1654
  };
1631
1655
 
1632
1656
  type LanguageModelV3StreamPart = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/provider",
3
- "version": "3.0.0-beta.24",
3
+ "version": "3.0.0-beta.26",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",