@ai-sdk/mistral 0.0.0-70e0935a-20260114150030 → 0.0.0-98261322-20260122142521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,12 +1,39 @@
1
1
  # @ai-sdk/mistral
2
2
 
3
- ## 0.0.0-70e0935a-20260114150030
3
+ ## 0.0.0-98261322-20260122142521
4
4
 
5
5
  ### Patch Changes
6
6
 
7
- - Updated dependencies [9fbe723]
8
- - @ai-sdk/provider-utils@0.0.0-70e0935a-20260114150030
9
- - @ai-sdk/provider@0.0.0-70e0935a-20260114150030
7
+ - 080559b: chore: add docs to package dist
8
+
9
+ ## 3.0.10
10
+
11
+ ### Patch Changes
12
+
13
+ - 8dc54db: chore: add src folders to package bundle
14
+
15
+ ## 3.0.9
16
+
17
+ ### Patch Changes
18
+
19
+ - Updated dependencies [5c090e7]
20
+ - @ai-sdk/provider@3.0.4
21
+ - @ai-sdk/provider-utils@4.0.8
22
+
23
+ ## 3.0.8
24
+
25
+ ### Patch Changes
26
+
27
+ - Updated dependencies [46f46e4]
28
+ - @ai-sdk/provider-utils@4.0.7
29
+
30
+ ## 3.0.7
31
+
32
+ ### Patch Changes
33
+
34
+ - Updated dependencies [1b11dcb]
35
+ - @ai-sdk/provider-utils@4.0.6
36
+ - @ai-sdk/provider@3.0.3
10
37
 
11
38
  ## 3.0.6
12
39
 
package/dist/index.js CHANGED
@@ -828,7 +828,7 @@ var MistralTextEmbeddingResponseSchema = import_v44.z.object({
828
828
  });
829
829
 
830
830
  // src/version.ts
831
- var VERSION = true ? "0.0.0-70e0935a-20260114150030" : "0.0.0-test";
831
+ var VERSION = true ? "0.0.0-98261322-20260122142521" : "0.0.0-test";
832
832
 
833
833
  // src/mistral-provider.ts
834
834
  function createMistral(options = {}) {
package/dist/index.mjs CHANGED
@@ -824,7 +824,7 @@ var MistralTextEmbeddingResponseSchema = z4.object({
824
824
  });
825
825
 
826
826
  // src/version.ts
827
- var VERSION = true ? "0.0.0-70e0935a-20260114150030" : "0.0.0-test";
827
+ var VERSION = true ? "0.0.0-98261322-20260122142521" : "0.0.0-test";
828
828
 
829
829
  // src/mistral-provider.ts
830
830
  function createMistral(options = {}) {
@@ -0,0 +1,327 @@
1
+ ---
2
+ title: Mistral AI
3
+ description: Learn how to use Mistral.
4
+ ---
5
+
6
+ # Mistral AI Provider
7
+
8
+ The [Mistral AI](https://mistral.ai/) provider contains language model support for the Mistral chat API.
9
+
10
+ ## Setup
11
+
12
+ The Mistral provider is available in the `@ai-sdk/mistral` module. You can install it with
13
+
14
+ <Tabs items={['pnpm', 'npm', 'yarn', 'bun']}>
15
+ <Tab>
16
+ <Snippet text="pnpm add @ai-sdk/mistral" dark />
17
+ </Tab>
18
+ <Tab>
19
+ <Snippet text="npm install @ai-sdk/mistral" dark />
20
+ </Tab>
21
+ <Tab>
22
+ <Snippet text="yarn add @ai-sdk/mistral" dark />
23
+ </Tab>
24
+
25
+ <Tab>
26
+ <Snippet text="bun add @ai-sdk/mistral" dark />
27
+ </Tab>
28
+ </Tabs>
29
+
30
+ ## Provider Instance
31
+
32
+ You can import the default provider instance `mistral` from `@ai-sdk/mistral`:
33
+
34
+ ```ts
35
+ import { mistral } from '@ai-sdk/mistral';
36
+ ```
37
+
38
+ If you need a customized setup, you can import `createMistral` from `@ai-sdk/mistral`
39
+ and create a provider instance with your settings:
40
+
41
+ ```ts
42
+ import { createMistral } from '@ai-sdk/mistral';
43
+
44
+ const mistral = createMistral({
45
+ // custom settings
46
+ });
47
+ ```
48
+
49
+ You can use the following optional settings to customize the Mistral provider instance:
50
+
51
+ - **baseURL** _string_
52
+
53
+ Use a different URL prefix for API calls, e.g. to use proxy servers.
54
+ The default prefix is `https://api.mistral.ai/v1`.
55
+
56
+ - **apiKey** _string_
57
+
58
+ API key that is being sent using the `Authorization` header.
59
+ It defaults to the `MISTRAL_API_KEY` environment variable.
60
+
61
+ - **headers** _Record&lt;string,string&gt;_
62
+
63
+ Custom headers to include in the requests.
64
+
65
+ - **fetch** _(input: RequestInfo, init?: RequestInit) => Promise&lt;Response&gt;_
66
+
67
+ Custom [fetch](https://developer.mozilla.org/en-US/docs/Web/API/fetch) implementation.
68
+ Defaults to the global `fetch` function.
69
+ You can use it as a middleware to intercept requests,
70
+ or to provide a custom fetch implementation for e.g. testing.
71
+
72
+ ## Language Models
73
+
74
+ You can create models that call the [Mistral chat API](https://docs.mistral.ai/api/#operation/createChatCompletion) using a provider instance.
75
+ The first argument is the model id, e.g. `mistral-large-latest`.
76
+ Some Mistral chat models support tool calls.
77
+
78
+ ```ts
79
+ const model = mistral('mistral-large-latest');
80
+ ```
81
+
82
+ Mistral chat models also support additional model settings that are not part of the [standard call settings](/docs/ai-sdk-core/settings).
83
+ You can pass them as an options argument and utilize `MistralLanguageModelOptions` for typing:
84
+
85
+ ```ts
86
+ import { mistral, type MistralLanguageModelOptions } from '@ai-sdk/mistral';
87
+ const model = mistral('mistral-large-latest');
88
+
89
+ await generateText({
90
+ model,
91
+ providerOptions: {
92
+ mistral: {
93
+ safePrompt: true, // optional safety prompt injection
94
+ parallelToolCalls: false, // disable parallel tool calls (one tool per response)
95
+ } satisfies MistralLanguageModelOptions,
96
+ },
97
+ });
98
+ ```
99
+
100
+ The following optional provider options are available for Mistral models:
101
+
102
+ - **safePrompt** _boolean_
103
+
104
+ Whether to inject a safety prompt before all conversations.
105
+
106
+ Defaults to `false`.
107
+
108
+ - **documentImageLimit** _number_
109
+
110
+ Maximum number of images to process in a document.
111
+
112
+ - **documentPageLimit** _number_
113
+
114
+ Maximum number of pages to process in a document.
115
+
116
+ - **strictJsonSchema** _boolean_
117
+
118
+ Whether to use strict JSON schema validation for structured outputs. Only applies when a schema is provided and only sets the [`strict` flag](https://docs.mistral.ai/api/#tag/chat/operation/chat_completion_v1_chat_completions_post) in addition to using [Custom Structured Outputs](https://docs.mistral.ai/capabilities/structured-output/custom_structured_output/), which is used by default if a schema is provided.
119
+
120
+ Defaults to `false`.
121
+
122
+ - **structuredOutputs** _boolean_
123
+
124
+ Whether to use [structured outputs](#structured-outputs). When enabled, tool calls and object generation will be strict and follow the provided schema.
125
+
126
+ Defaults to `true`.
127
+
128
+ - **parallelToolCalls** _boolean_
129
+
130
+ Whether to enable parallel function calling during tool use. When set to false, the model will use at most one tool per response.
131
+
132
+ Defaults to `true`.
133
+
134
+ ### Document OCR
135
+
136
+ Mistral chat models support document OCR for PDF files.
137
+ You can optionally set image and page limits using the provider options.
138
+
139
+ ```ts
140
+ const result = await generateText({
141
+ model: mistral('mistral-small-latest'),
142
+ messages: [
143
+ {
144
+ role: 'user',
145
+ content: [
146
+ {
147
+ type: 'text',
148
+ text: 'What is an embedding model according to this document?',
149
+ },
150
+ {
151
+ type: 'file',
152
+ data: new URL(
153
+ 'https://github.com/vercel/ai/blob/main/examples/ai-functions/data/ai.pdf?raw=true',
154
+ ),
155
+ mediaType: 'application/pdf',
156
+ },
157
+ ],
158
+ },
159
+ ],
160
+ // optional settings:
161
+ providerOptions: {
162
+ mistral: {
163
+ documentImageLimit: 8,
164
+ documentPageLimit: 64,
165
+ },
166
+ },
167
+ });
168
+ ```
169
+
170
+ ### Reasoning Models
171
+
172
+ Mistral offers reasoning models that provide step-by-step thinking capabilities:
173
+
174
+ - **magistral-small-2506**: Smaller reasoning model for efficient step-by-step thinking
175
+ - **magistral-medium-2506**: More powerful reasoning model balancing performance and cost
176
+
177
+ These models return content that includes `<think>...</think>` tags containing the reasoning process. To properly extract and separate the reasoning from the final answer, use the [extract reasoning middleware](/docs/reference/ai-sdk-core/extract-reasoning-middleware):
178
+
179
+ ```ts
180
+ import { mistral } from '@ai-sdk/mistral';
181
+ import {
182
+ extractReasoningMiddleware,
183
+ generateText,
184
+ wrapLanguageModel,
185
+ } from 'ai';
186
+
187
+ const result = await generateText({
188
+ model: wrapLanguageModel({
189
+ model: mistral('magistral-small-2506'),
190
+ middleware: extractReasoningMiddleware({
191
+ tagName: 'think',
192
+ }),
193
+ }),
194
+ prompt: 'What is 15 * 24?',
195
+ });
196
+
197
+ console.log('REASONING:', result.reasoningText);
198
+ // Output: "Let me calculate this step by step..."
199
+
200
+ console.log('ANSWER:', result.text);
201
+ // Output: "360"
202
+ ```
203
+
204
+ The middleware automatically parses the `<think>` tags and provides separate `reasoningText` and `text` properties in the result.
205
+
206
+ ### Example
207
+
208
+ You can use Mistral language models to generate text with the `generateText` function:
209
+
210
+ ```ts
211
+ import { mistral } from '@ai-sdk/mistral';
212
+ import { generateText } from 'ai';
213
+
214
+ const { text } = await generateText({
215
+ model: mistral('mistral-large-latest'),
216
+ prompt: 'Write a vegetarian lasagna recipe for 4 people.',
217
+ });
218
+ ```
219
+
220
+ Mistral language models can also be used in the `streamText`, `generateObject`, and `streamObject` functions
221
+ (see [AI SDK Core](/docs/ai-sdk-core)).
222
+
223
+ #### Structured Outputs
224
+
225
+ Mistral chat models support structured outputs using JSON Schema. You can use `generateObject` or `streamObject`
226
+ with Zod, Valibot, or raw JSON Schema. The SDK sends your schema via Mistral's `response_format: { type: 'json_schema' }`.
227
+
228
+ ```ts
229
+ import { mistral } from '@ai-sdk/mistral';
230
+ import { generateObject } from 'ai';
231
+ import { z } from 'zod';
232
+
233
+ const result = await generateObject({
234
+ model: mistral('mistral-large-latest'),
235
+ schema: z.object({
236
+ recipe: z.object({
237
+ name: z.string(),
238
+ ingredients: z.array(z.string()),
239
+ instructions: z.array(z.string()),
240
+ }),
241
+ }),
242
+ prompt: 'Generate a simple pasta recipe.',
243
+ });
244
+
245
+ console.log(JSON.stringify(result.object, null, 2));
246
+ ```
247
+
248
+ You can enable strict JSON Schema validation using a provider option:
249
+
250
+ ```ts highlight="7-11"
251
+ import { mistral } from '@ai-sdk/mistral';
252
+ import { generateObject } from 'ai';
253
+ import { z } from 'zod';
254
+
255
+ const result = await generateObject({
256
+ model: mistral('mistral-large-latest'),
257
+ providerOptions: {
258
+ mistral: {
259
+ strictJsonSchema: true, // reject outputs that don't strictly match the schema
260
+ },
261
+ },
262
+ schema: z.object({
263
+ title: z.string(),
264
+ items: z.array(z.object({ id: z.string(), qty: z.number().int().min(1) })),
265
+ }),
266
+ prompt: 'Generate a small shopping list.',
267
+ });
268
+ ```
269
+
270
+ <Note>
271
+ When using structured outputs, the SDK no longer injects an extra "answer with
272
+ JSON" instruction. It relies on Mistral's native `json_schema`/`json_object`
273
+ response formats instead. You can customize the schema name/description via
274
+ the standard structured-output APIs.
275
+ </Note>
276
+
277
+ ### Model Capabilities
278
+
279
+ | Model | Image Input | Object Generation | Tool Usage | Tool Streaming |
280
+ | ----------------------- | ------------------- | ------------------- | ------------------- | ------------------- |
281
+ | `pixtral-large-latest` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
282
+ | `mistral-large-latest` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
283
+ | `mistral-medium-latest` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
284
+ | `mistral-medium-2505` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
285
+ | `mistral-small-latest` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
286
+ | `magistral-small-2506` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
287
+ | `magistral-medium-2506` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
288
+ | `ministral-3b-latest` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
289
+ | `ministral-8b-latest` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
290
+ | `pixtral-12b-2409` | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
291
+ | `open-mistral-7b` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
292
+ | `open-mixtral-8x7b` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
293
+ | `open-mixtral-8x22b` | <Cross size={18} /> | <Check size={18} /> | <Check size={18} /> | <Check size={18} /> |
294
+
295
+ <Note>
296
+ The table above lists popular models. Please see the [Mistral
297
+ docs](https://docs.mistral.ai/getting-started/models/models_overview/) for a
298
+ full list of available models. The table above lists popular models. You can
299
+ also pass any available provider model ID as a string if needed.
300
+ </Note>
301
+
302
+ ## Embedding Models
303
+
304
+ You can create models that call the [Mistral embeddings API](https://docs.mistral.ai/api/#operation/createEmbedding)
305
+ using the `.embedding()` factory method.
306
+
307
+ ```ts
308
+ const model = mistral.embedding('mistral-embed');
309
+ ```
310
+
311
+ You can use Mistral embedding models to generate embeddings with the `embed` function:
312
+
313
+ ```ts
314
+ import { mistral } from '@ai-sdk/mistral';
315
+ import { embed } from 'ai';
316
+
317
+ const { embedding } = await embed({
318
+ model: mistral.embedding('mistral-embed'),
319
+ value: 'sunny day at the beach',
320
+ });
321
+ ```
322
+
323
+ ### Model Capabilities
324
+
325
+ | Model | Default Dimensions |
326
+ | --------------- | ------------------ |
327
+ | `mistral-embed` | 1024 |
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/mistral",
3
- "version": "0.0.0-70e0935a-20260114150030",
3
+ "version": "0.0.0-98261322-20260122142521",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -8,9 +8,14 @@
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
11
+ "docs/**/*",
12
+ "src",
11
13
  "CHANGELOG.md",
12
14
  "README.md"
13
15
  ],
16
+ "directories": {
17
+ "doc": "./docs"
18
+ },
14
19
  "exports": {
15
20
  "./package.json": "./package.json",
16
21
  ".": {
@@ -20,15 +25,15 @@
20
25
  }
21
26
  },
22
27
  "dependencies": {
23
- "@ai-sdk/provider": "0.0.0-70e0935a-20260114150030",
24
- "@ai-sdk/provider-utils": "0.0.0-70e0935a-20260114150030"
28
+ "@ai-sdk/provider": "3.0.4",
29
+ "@ai-sdk/provider-utils": "4.0.8"
25
30
  },
26
31
  "devDependencies": {
27
32
  "@types/node": "20.17.24",
28
33
  "tsup": "^8",
29
34
  "typescript": "5.8.3",
30
35
  "zod": "3.25.76",
31
- "@ai-sdk/test-server": "1.0.1",
36
+ "@ai-sdk/test-server": "1.0.2",
32
37
  "@vercel/ai-tsconfig": "0.0.0"
33
38
  },
34
39
  "peerDependencies": {
@@ -54,7 +59,7 @@
54
59
  "scripts": {
55
60
  "build": "pnpm clean && tsup --tsconfig tsconfig.build.json",
56
61
  "build:watch": "pnpm clean && tsup --watch",
57
- "clean": "del-cli dist *.tsbuildinfo",
62
+ "clean": "del-cli dist docs *.tsbuildinfo",
58
63
  "lint": "eslint \"./**/*.ts*\"",
59
64
  "type-check": "tsc --build",
60
65
  "prettier-check": "prettier --check \"./**/*.ts*\"",
@@ -0,0 +1,22 @@
1
+ {
2
+ "id": "5319bd0299614c679a0068a4f2c8ffd0",
3
+ "created": 1769088720,
4
+ "model": "mistral-small-latest",
5
+ "usage": {
6
+ "prompt_tokens": 13,
7
+ "total_tokens": 447,
8
+ "completion_tokens": 434
9
+ },
10
+ "object": "chat.completion",
11
+ "choices": [
12
+ {
13
+ "index": 0,
14
+ "finish_reason": "stop",
15
+ "message": {
16
+ "role": "assistant",
17
+ "tool_calls": null,
18
+ "content": "**Holiday Name: \"World Kindness Day of Sharing\"**\n\n**Date:** The third Saturday in October\n\n**Purpose:** To celebrate and promote acts of kindness, generosity, and connection by sharing something meaningful with others—whether it's time, skills, stories, or physical items.\n\n### **Traditions & Customs:**\n\n1. **\"Share a Skill\" Exchanges:**\n - People teach others something they're good at—cooking, crafting, gardening, or even life advice—free of charge. Community centers, schools, and online platforms host skill-sharing sessions.\n\n2. **\"Story Swap\" Gatherings:**\n - Friends and strangers meet in parks, cafes, or virtual spaces to share personal stories, jokes, or wisdom. The goal is to foster empathy and connection through storytelling.\n\n3. **\"Kindness Kits\" for Strangers:**\n - People assemble small care packages (handwritten notes, snacks, seeds, or handmade crafts) and leave them in public places (libraries, bus stops, parks) for others to find.\n\n4. **\"Pay It Forward\" Chains:**\n - Individuals perform random acts of kindness (buying coffee for the next person, donating to a cause, or helping a neighbor) and encourage others to do the same.\n\n5. **\"Memory Lane\" Sharing:**\n - Families and friends gather to share old photos, heirlooms, or family recipes, passing down traditions and creating new ones.\n\n6. **\"Global Kindness Map\":**\n - An interactive online map where people pin acts of kindness they've done or received, inspiring others to contribute.\n\n**Symbol:** A **hand holding a heart** (representing giving and compassion).\n\n**Food & Drink:** \"Kindness Cookies\" (homemade treats shared with neighbors) and \"Unity Tea\" (a blend of herbs from different cultures, symbolizing harmony).\n\n**Why It's Special:** Unlike commercial holidays, this day focuses on **meaningful human connection**—reminding everyone that kindness is a universal language.\n\nWould you celebrate it? What would you share? 😊"
19
+ }
20
+ }
21
+ ]
22
+ }
@@ -0,0 +1,57 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`assistant messages > should add prefix true to trailing assistant messages 1`] = `
4
+ [
5
+ {
6
+ "content": [
7
+ {
8
+ "text": "Hello",
9
+ "type": "text",
10
+ },
11
+ ],
12
+ "role": "user",
13
+ },
14
+ {
15
+ "content": "Hello!",
16
+ "prefix": true,
17
+ "role": "assistant",
18
+ "tool_calls": undefined,
19
+ },
20
+ ]
21
+ `;
22
+
23
+ exports[`user messages > should convert messages with PDF file parts using URL 1`] = `
24
+ [
25
+ {
26
+ "content": [
27
+ {
28
+ "text": "Please analyze this document",
29
+ "type": "text",
30
+ },
31
+ {
32
+ "document_url": "https://example.com/document.pdf",
33
+ "type": "document_url",
34
+ },
35
+ ],
36
+ "role": "user",
37
+ },
38
+ ]
39
+ `;
40
+
41
+ exports[`user messages > should convert messages with image parts 1`] = `
42
+ [
43
+ {
44
+ "content": [
45
+ {
46
+ "text": "Hello",
47
+ "type": "text",
48
+ },
49
+ {
50
+ "image_url": "data:image/png;base64,AAECAw==",
51
+ "type": "image_url",
52
+ },
53
+ ],
54
+ "role": "user",
55
+ },
56
+ ]
57
+ `;
@@ -0,0 +1,44 @@
1
+ // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
+
3
+ exports[`doEmbed > should expose the raw response 1`] = `
4
+ {
5
+ "body": {
6
+ "data": [
7
+ {
8
+ "embedding": [
9
+ 0.1,
10
+ 0.2,
11
+ 0.3,
12
+ 0.4,
13
+ 0.5,
14
+ ],
15
+ "index": 0,
16
+ "object": "embedding",
17
+ },
18
+ {
19
+ "embedding": [
20
+ 0.6,
21
+ 0.7,
22
+ 0.8,
23
+ 0.9,
24
+ 1,
25
+ ],
26
+ "index": 1,
27
+ "object": "embedding",
28
+ },
29
+ ],
30
+ "id": "b322cfc2b9d34e2f8e14fc99874faee5",
31
+ "model": "mistral-embed",
32
+ "object": "list",
33
+ "usage": {
34
+ "prompt_tokens": 8,
35
+ "total_tokens": 8,
36
+ },
37
+ },
38
+ "headers": {
39
+ "content-length": "267",
40
+ "content-type": "application/json",
41
+ "test-header": "test-value",
42
+ },
43
+ }
44
+ `;
@@ -0,0 +1,46 @@
1
+ import { LanguageModelV3Usage } from '@ai-sdk/provider';
2
+
3
+ export type MistralUsage = {
4
+ prompt_tokens: number;
5
+ completion_tokens: number;
6
+ total_tokens: number;
7
+ };
8
+
9
+ export function convertMistralUsage(
10
+ usage: MistralUsage | undefined | null,
11
+ ): LanguageModelV3Usage {
12
+ if (usage == null) {
13
+ return {
14
+ inputTokens: {
15
+ total: undefined,
16
+ noCache: undefined,
17
+ cacheRead: undefined,
18
+ cacheWrite: undefined,
19
+ },
20
+ outputTokens: {
21
+ total: undefined,
22
+ text: undefined,
23
+ reasoning: undefined,
24
+ },
25
+ raw: undefined,
26
+ };
27
+ }
28
+
29
+ const promptTokens = usage.prompt_tokens;
30
+ const completionTokens = usage.completion_tokens;
31
+
32
+ return {
33
+ inputTokens: {
34
+ total: promptTokens,
35
+ noCache: promptTokens,
36
+ cacheRead: undefined,
37
+ cacheWrite: undefined,
38
+ },
39
+ outputTokens: {
40
+ total: completionTokens,
41
+ text: completionTokens,
42
+ reasoning: undefined,
43
+ },
44
+ raw: usage,
45
+ };
46
+ }