@ai-sdk/provider 3.0.0-beta.25 → 3.0.0-beta.27

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,17 @@
1
1
  # @ai-sdk/provider
2
2
 
3
+ ## 3.0.0-beta.27
4
+
5
+ ### Patch Changes
6
+
7
+ - 366f50b: chore(provider): add deprecated textEmbeddingModel and textEmbedding aliases
8
+
9
+ ## 3.0.0-beta.26
10
+
11
+ ### Patch Changes
12
+
13
+ - 3bd2689: feat: extended token usage
14
+
3
15
  ## 3.0.0-beta.25
4
16
 
5
17
  ### Patch Changes
package/dist/index.d.mts CHANGED
@@ -1603,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1603
1603
  }
1604
1604
 
1605
1605
  /**
1606
- Usage information for a language model call.
1607
-
1608
- If your API return additional usage information, you can add it to the
1609
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1610
1607
  */
1611
1608
  type LanguageModelV3Usage = {
1612
1609
  /**
1613
- The number of input (prompt) tokens used.
1614
- */
1615
- inputTokens: number | undefined;
1616
- /**
1617
- The number of output (completion) tokens used.
1618
- */
1619
- outputTokens: number | undefined;
1620
- /**
1621
- The total number of tokens as reported by the provider.
1622
- This number might be different from the sum of `inputTokens` and `outputTokens`
1623
- and e.g. include reasoning tokens or other overhead.
1610
+ * Information about the input tokens.
1624
1611
  */
1625
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1626
1630
  /**
1627
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1628
1632
  */
1629
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1630
1647
  /**
1631
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1632
1652
  */
1633
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1634
1654
  };
1635
1655
 
1636
1656
  type LanguageModelV3StreamPart = {
@@ -3190,6 +3210,19 @@ interface ProviderV3 {
3190
3210
  */
3191
3211
  embeddingModel(modelId: string): EmbeddingModelV3;
3192
3212
  /**
3213
+ Returns the text embedding model with the given id.
3214
+ The model id is then passed to the provider function to get the model.
3215
+
3216
+ @param {string} modelId - The id of the model to return.
3217
+
3218
+ @returns {EmbeddingModel} The embedding model associated with the id
3219
+
3220
+ @throws {NoSuchModelError} If no such model exists.
3221
+
3222
+ @deprecated Use `embeddingModel` instead.
3223
+ */
3224
+ textEmbeddingModel?(modelId: string): EmbeddingModelV3;
3225
+ /**
3193
3226
  Returns the image model with the given id.
3194
3227
  The model id is then passed to the provider function to get the model.
3195
3228
 
package/dist/index.d.ts CHANGED
@@ -1603,34 +1603,54 @@ interface LanguageModelV3ResponseMetadata {
1603
1603
  }
1604
1604
 
1605
1605
  /**
1606
- Usage information for a language model call.
1607
-
1608
- If your API return additional usage information, you can add it to the
1609
- provider metadata under your provider's key.
1606
+ * Usage information for a language model call.
1610
1607
  */
1611
1608
  type LanguageModelV3Usage = {
1612
1609
  /**
1613
- The number of input (prompt) tokens used.
1614
- */
1615
- inputTokens: number | undefined;
1616
- /**
1617
- The number of output (completion) tokens used.
1618
- */
1619
- outputTokens: number | undefined;
1620
- /**
1621
- The total number of tokens as reported by the provider.
1622
- This number might be different from the sum of `inputTokens` and `outputTokens`
1623
- and e.g. include reasoning tokens or other overhead.
1610
+ * Information about the input tokens.
1624
1611
  */
1625
- totalTokens: number | undefined;
1612
+ inputTokens: {
1613
+ /**
1614
+ *The total number of input (prompt) tokens used.
1615
+ */
1616
+ total: number | undefined;
1617
+ /**
1618
+ * The number of non-cached input (prompt) tokens used.
1619
+ */
1620
+ noCache: number | undefined;
1621
+ /**
1622
+ * The number of cached input (prompt) tokens read.
1623
+ */
1624
+ cacheRead: number | undefined;
1625
+ /**
1626
+ * The number of cached input (prompt) tokens written.
1627
+ */
1628
+ cacheWrite: number | undefined;
1629
+ };
1626
1630
  /**
1627
- The number of reasoning tokens used.
1631
+ * Information about the output tokens.
1628
1632
  */
1629
- reasoningTokens?: number | undefined;
1633
+ outputTokens: {
1634
+ /**
1635
+ * The total number of output (completion) tokens used.
1636
+ */
1637
+ total: number | undefined;
1638
+ /**
1639
+ * The number of text tokens used.
1640
+ */
1641
+ text: number | undefined;
1642
+ /**
1643
+ * The number of reasoning tokens used.
1644
+ */
1645
+ reasoning: number | undefined;
1646
+ };
1630
1647
  /**
1631
- The number of cached input tokens.
1648
+ * Raw usage information from the provider.
1649
+ *
1650
+ * This is the usage information in the shape that the provider returns.
1651
+ * It can include additional information that is not part of the standard usage information.
1632
1652
  */
1633
- cachedInputTokens?: number | undefined;
1653
+ raw?: JSONObject;
1634
1654
  };
1635
1655
 
1636
1656
  type LanguageModelV3StreamPart = {
@@ -3190,6 +3210,19 @@ interface ProviderV3 {
3190
3210
  */
3191
3211
  embeddingModel(modelId: string): EmbeddingModelV3;
3192
3212
  /**
3213
+ Returns the text embedding model with the given id.
3214
+ The model id is then passed to the provider function to get the model.
3215
+
3216
+ @param {string} modelId - The id of the model to return.
3217
+
3218
+ @returns {EmbeddingModel} The embedding model associated with the id
3219
+
3220
+ @throws {NoSuchModelError} If no such model exists.
3221
+
3222
+ @deprecated Use `embeddingModel` instead.
3223
+ */
3224
+ textEmbeddingModel?(modelId: string): EmbeddingModelV3;
3225
+ /**
3193
3226
  Returns the image model with the given id.
3194
3227
  The model id is then passed to the provider function to get the model.
3195
3228
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/provider",
3
- "version": "3.0.0-beta.25",
3
+ "version": "3.0.0-beta.27",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",