@ai-sdk/provider 3.0.0-beta.25 → 3.0.0-beta.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,11 @@
1
1
  # @ai-sdk/provider
2
2
 
3
+ ## 3.0.0-beta.26
4
+
5
+ ### Patch Changes
6
+
7
+ - 3bd2689: feat: extended token usage
8
+
3
9
  ## 3.0.0-beta.25
4
10
 
5
11
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1603,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1603
1603
  }
1604
1604
 
1605
1605
  /**
1606
- Usage information for a language model call.
1607
-
1608
- If your API return additional usage information, you can add it to the
1609
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1610
1607
  */
1611
1608
  type LanguageModelV3Usage = {
1612
1609
  /**
1613
- The number of input (prompt) tokens used.
1614
- */
1615
- inputTokens: number | undefined;
1616
- /**
1617
- The number of output (completion) tokens used.
1610
+ * Information about the input tokens.
1618
1611
  */
1619
- outputTokens: number | undefined;
1620
- /**
1621
- The total number of tokens as reported by the provider.
1622
- This number might be different from the sum of `inputTokens` and `outputTokens`
1623
- and e.g. include reasoning tokens or other overhead.
1624
- */
1625
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1626
1630
  /**
1627
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1628
1632
  */
1629
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1630
1647
  /**
1631
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1632
1652
  */
1633
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1634
1654
  };
1635
1655
 
1636
1656
  type LanguageModelV3StreamPart = {
package/dist/index.d.ts CHANGED
@@ -1603,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1603
1603
  }
1604
1604
 
1605
1605
  /**
1606
- Usage information for a language model call.
1607
-
1608
- If your API return additional usage information, you can add it to the
1609
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1610
1607
  */
1611
1608
  type LanguageModelV3Usage = {
1612
1609
  /**
1613
- The number of input (prompt) tokens used.
1614
- */
1615
- inputTokens: number | undefined;
1616
- /**
1617
- The number of output (completion) tokens used.
1610
+ * Information about the input tokens.
1618
1611
  */
1619
- outputTokens: number | undefined;
1620
- /**
1621
- The total number of tokens as reported by the provider.
1622
- This number might be different from the sum of `inputTokens` and `outputTokens`
1623
- and e.g. include reasoning tokens or other overhead.
1624
- */
1625
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1626
1630
  /**
1627
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1628
1632
  */
1629
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1630
1647
  /**
1631
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1632
1652
  */
1633
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1634
1654
  };
1635
1655
 
1636
1656
  type LanguageModelV3StreamPart = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/provider",
3
- "version": "3.0.0-beta.25",
3
+ "version": "3.0.0-beta.26",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",