@ai-sdk/xai 3.0.28 → 3.0.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/CHANGELOG.md +15 -0
  2. package/dist/index.js +1 -1
  3. package/dist/index.mjs +1 -1
  4. package/package.json +5 -4
  5. package/src/convert-to-xai-chat-messages.test.ts +243 -0
  6. package/src/convert-to-xai-chat-messages.ts +142 -0
  7. package/src/convert-xai-chat-usage.test.ts +240 -0
  8. package/src/convert-xai-chat-usage.ts +23 -0
  9. package/src/get-response-metadata.ts +19 -0
  10. package/src/index.ts +14 -0
  11. package/src/map-xai-finish-reason.ts +19 -0
  12. package/src/responses/__fixtures__/xai-code-execution-tool.1.json +68 -0
  13. package/src/responses/__fixtures__/xai-text-streaming.1.chunks.txt +698 -0
  14. package/src/responses/__fixtures__/xai-text-with-reasoning-streaming-store-false.1.chunks.txt +655 -0
  15. package/src/responses/__fixtures__/xai-text-with-reasoning-streaming.1.chunks.txt +679 -0
  16. package/src/responses/__fixtures__/xai-web-search-tool.1.chunks.txt +274 -0
  17. package/src/responses/__fixtures__/xai-web-search-tool.1.json +90 -0
  18. package/src/responses/__fixtures__/xai-x-search-tool.1.json +149 -0
  19. package/src/responses/__fixtures__/xai-x-search-tool.chunks.txt +1757 -0
  20. package/src/responses/__snapshots__/xai-responses-language-model.test.ts.snap +21929 -0
  21. package/src/responses/convert-to-xai-responses-input.test.ts +463 -0
  22. package/src/responses/convert-to-xai-responses-input.ts +206 -0
  23. package/src/responses/convert-xai-responses-usage.ts +24 -0
  24. package/src/responses/map-xai-responses-finish-reason.ts +20 -0
  25. package/src/responses/xai-responses-api.ts +393 -0
  26. package/src/responses/xai-responses-language-model.test.ts +1803 -0
  27. package/src/responses/xai-responses-language-model.ts +732 -0
  28. package/src/responses/xai-responses-options.ts +34 -0
  29. package/src/responses/xai-responses-prepare-tools.test.ts +497 -0
  30. package/src/responses/xai-responses-prepare-tools.ts +226 -0
  31. package/src/tool/code-execution.ts +17 -0
  32. package/src/tool/index.ts +15 -0
  33. package/src/tool/view-image.ts +20 -0
  34. package/src/tool/view-x-video.ts +18 -0
  35. package/src/tool/web-search.ts +56 -0
  36. package/src/tool/x-search.ts +63 -0
  37. package/src/version.ts +6 -0
  38. package/src/xai-chat-language-model.test.ts +1805 -0
  39. package/src/xai-chat-language-model.ts +681 -0
  40. package/src/xai-chat-options.ts +131 -0
  41. package/src/xai-chat-prompt.ts +44 -0
  42. package/src/xai-error.ts +19 -0
  43. package/src/xai-image-settings.ts +1 -0
  44. package/src/xai-prepare-tools.ts +95 -0
  45. package/src/xai-provider.test.ts +167 -0
  46. package/src/xai-provider.ts +162 -0
package/CHANGELOG.md CHANGED
@@ -1,5 +1,20 @@
1
1
  # @ai-sdk/xai
2
2
 
3
+ ## 3.0.30
4
+
5
+ ### Patch Changes
6
+
7
+ - 8dc54db: chore: add src folders to package bundle
8
+ - Updated dependencies [8dc54db]
9
+ - @ai-sdk/openai-compatible@2.0.17
10
+
11
+ ## 3.0.29
12
+
13
+ ### Patch Changes
14
+
15
+ - Updated dependencies [78555ad]
16
+ - @ai-sdk/openai-compatible@2.0.16
17
+
3
18
  ## 3.0.28
4
19
 
5
20
  ### Patch Changes
package/dist/index.js CHANGED
@@ -2201,7 +2201,7 @@ var xaiTools = {
2201
2201
  };
2202
2202
 
2203
2203
  // src/version.ts
2204
- var VERSION = true ? "3.0.28" : "0.0.0-test";
2204
+ var VERSION = true ? "3.0.30" : "0.0.0-test";
2205
2205
 
2206
2206
  // src/xai-provider.ts
2207
2207
  var xaiErrorStructure = {
package/dist/index.mjs CHANGED
@@ -2208,7 +2208,7 @@ var xaiTools = {
2208
2208
  };
2209
2209
 
2210
2210
  // src/version.ts
2211
- var VERSION = true ? "3.0.28" : "0.0.0-test";
2211
+ var VERSION = true ? "3.0.30" : "0.0.0-test";
2212
2212
 
2213
2213
  // src/xai-provider.ts
2214
2214
  var xaiErrorStructure = {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/xai",
3
- "version": "3.0.28",
3
+ "version": "3.0.30",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -8,6 +8,7 @@
8
8
  "types": "./dist/index.d.ts",
9
9
  "files": [
10
10
  "dist/**/*",
11
+ "src",
11
12
  "CHANGELOG.md",
12
13
  "README.md"
13
14
  ],
@@ -20,16 +21,16 @@
20
21
  }
21
22
  },
22
23
  "dependencies": {
23
- "@ai-sdk/openai-compatible": "2.0.15",
24
24
  "@ai-sdk/provider": "3.0.4",
25
- "@ai-sdk/provider-utils": "4.0.8"
25
+ "@ai-sdk/provider-utils": "4.0.8",
26
+ "@ai-sdk/openai-compatible": "2.0.17"
26
27
  },
27
28
  "devDependencies": {
28
29
  "@types/node": "20.17.24",
29
30
  "tsup": "^8",
30
31
  "typescript": "5.8.3",
31
32
  "zod": "3.25.76",
32
- "@ai-sdk/test-server": "1.0.1",
33
+ "@ai-sdk/test-server": "1.0.2",
33
34
  "@vercel/ai-tsconfig": "0.0.0"
34
35
  },
35
36
  "peerDependencies": {
@@ -0,0 +1,243 @@
1
+ import { convertToXaiChatMessages } from './convert-to-xai-chat-messages';
2
+ import { describe, it, expect } from 'vitest';
3
+
4
+ describe('convertToXaiChatMessages', () => {
5
+ it('should convert simple text messages', () => {
6
+ const { messages, warnings } = convertToXaiChatMessages([
7
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
8
+ ]);
9
+
10
+ expect(warnings).toEqual([]);
11
+ expect(messages).toEqual([{ role: 'user', content: 'Hello' }]);
12
+ });
13
+
14
+ it('should convert system messages', () => {
15
+ const { messages, warnings } = convertToXaiChatMessages([
16
+ { role: 'system', content: 'You are a helpful assistant.' },
17
+ ]);
18
+
19
+ expect(warnings).toEqual([]);
20
+ expect(messages).toEqual([
21
+ { role: 'system', content: 'You are a helpful assistant.' },
22
+ ]);
23
+ });
24
+
25
+ it('should convert assistant messages', () => {
26
+ const { messages, warnings } = convertToXaiChatMessages([
27
+ { role: 'assistant', content: [{ type: 'text', text: 'Hello there!' }] },
28
+ ]);
29
+
30
+ expect(warnings).toEqual([]);
31
+ expect(messages).toEqual([
32
+ { role: 'assistant', content: 'Hello there!', tool_calls: undefined },
33
+ ]);
34
+ });
35
+
36
+ it('should convert messages with image parts', () => {
37
+ const { messages, warnings } = convertToXaiChatMessages([
38
+ {
39
+ role: 'user',
40
+ content: [
41
+ { type: 'text', text: 'What is in this image?' },
42
+ {
43
+ type: 'file',
44
+ mediaType: 'image/png',
45
+ data: Buffer.from([0, 1, 2, 3]),
46
+ },
47
+ ],
48
+ },
49
+ ]);
50
+
51
+ expect(warnings).toEqual([]);
52
+ expect(messages).toEqual([
53
+ {
54
+ role: 'user',
55
+ content: [
56
+ { type: 'text', text: 'What is in this image?' },
57
+ {
58
+ type: 'image_url',
59
+ image_url: { url: 'data:image/png;base64,AAECAw==' },
60
+ },
61
+ ],
62
+ },
63
+ ]);
64
+ });
65
+
66
+ it('should convert image URLs', () => {
67
+ const { messages, warnings } = convertToXaiChatMessages([
68
+ {
69
+ role: 'user',
70
+ content: [
71
+ {
72
+ type: 'file',
73
+ mediaType: 'image/jpeg',
74
+ data: new URL('https://example.com/image.jpg'),
75
+ },
76
+ ],
77
+ },
78
+ ]);
79
+
80
+ expect(warnings).toEqual([]);
81
+ expect(messages).toEqual([
82
+ {
83
+ role: 'user',
84
+ content: [
85
+ {
86
+ type: 'image_url',
87
+ image_url: { url: 'https://example.com/image.jpg' },
88
+ },
89
+ ],
90
+ },
91
+ ]);
92
+ });
93
+
94
+ it('should throw error for unsupported file types', () => {
95
+ expect(() => {
96
+ convertToXaiChatMessages([
97
+ {
98
+ role: 'user',
99
+ content: [
100
+ {
101
+ type: 'file',
102
+ mediaType: 'application/pdf',
103
+ data: Buffer.from([0, 1, 2, 3]),
104
+ },
105
+ ],
106
+ },
107
+ ]);
108
+ }).toThrow('file part media type application/pdf');
109
+ });
110
+
111
+ it('should convert tool calls and tool responses', () => {
112
+ const { messages, warnings } = convertToXaiChatMessages([
113
+ {
114
+ role: 'assistant',
115
+ content: [
116
+ {
117
+ type: 'tool-call',
118
+ toolCallId: 'call_123',
119
+ toolName: 'weather',
120
+ input: { location: 'Paris' },
121
+ },
122
+ ],
123
+ },
124
+ {
125
+ role: 'tool',
126
+ content: [
127
+ {
128
+ type: 'tool-result',
129
+ toolCallId: 'call_123',
130
+ toolName: 'weather',
131
+ output: { type: 'json', value: { temperature: 20 } },
132
+ },
133
+ ],
134
+ },
135
+ ]);
136
+
137
+ expect(warnings).toEqual([]);
138
+ expect(messages).toEqual([
139
+ {
140
+ role: 'assistant',
141
+ content: '',
142
+ tool_calls: [
143
+ {
144
+ id: 'call_123',
145
+ type: 'function',
146
+ function: {
147
+ name: 'weather',
148
+ arguments: '{"location":"Paris"}',
149
+ },
150
+ },
151
+ ],
152
+ },
153
+ {
154
+ role: 'tool',
155
+ tool_call_id: 'call_123',
156
+ content: '{"temperature":20}',
157
+ },
158
+ ]);
159
+ });
160
+
161
+ it('should handle multiple tool calls in one message', () => {
162
+ const { messages, warnings } = convertToXaiChatMessages([
163
+ {
164
+ role: 'assistant',
165
+ content: [
166
+ {
167
+ type: 'tool-call',
168
+ toolCallId: 'call_123',
169
+ toolName: 'weather',
170
+ input: { location: 'Paris' },
171
+ },
172
+ {
173
+ type: 'tool-call',
174
+ toolCallId: 'call_456',
175
+ toolName: 'time',
176
+ input: { timezone: 'UTC' },
177
+ },
178
+ ],
179
+ },
180
+ ]);
181
+
182
+ expect(warnings).toEqual([]);
183
+ expect(messages).toEqual([
184
+ {
185
+ role: 'assistant',
186
+ content: '',
187
+ tool_calls: [
188
+ {
189
+ id: 'call_123',
190
+ type: 'function',
191
+ function: {
192
+ name: 'weather',
193
+ arguments: '{"location":"Paris"}',
194
+ },
195
+ },
196
+ {
197
+ id: 'call_456',
198
+ type: 'function',
199
+ function: {
200
+ name: 'time',
201
+ arguments: '{"timezone":"UTC"}',
202
+ },
203
+ },
204
+ ],
205
+ },
206
+ ]);
207
+ });
208
+
209
+ it('should handle mixed content with text and tool calls', () => {
210
+ const { messages, warnings } = convertToXaiChatMessages([
211
+ {
212
+ role: 'assistant',
213
+ content: [
214
+ { type: 'text', text: 'Let me check the weather for you.' },
215
+ {
216
+ type: 'tool-call',
217
+ toolCallId: 'call_123',
218
+ toolName: 'weather',
219
+ input: { location: 'Paris' },
220
+ },
221
+ ],
222
+ },
223
+ ]);
224
+
225
+ expect(warnings).toEqual([]);
226
+ expect(messages).toEqual([
227
+ {
228
+ role: 'assistant',
229
+ content: 'Let me check the weather for you.',
230
+ tool_calls: [
231
+ {
232
+ id: 'call_123',
233
+ type: 'function',
234
+ function: {
235
+ name: 'weather',
236
+ arguments: '{"location":"Paris"}',
237
+ },
238
+ },
239
+ ],
240
+ },
241
+ ]);
242
+ });
243
+ });
@@ -0,0 +1,142 @@
1
+ import {
2
+ SharedV3Warning,
3
+ LanguageModelV3Prompt,
4
+ UnsupportedFunctionalityError,
5
+ } from '@ai-sdk/provider';
6
+ import { convertToBase64 } from '@ai-sdk/provider-utils';
7
+ import { XaiChatPrompt } from './xai-chat-prompt';
8
+
9
+ export function convertToXaiChatMessages(prompt: LanguageModelV3Prompt): {
10
+ messages: XaiChatPrompt;
11
+ warnings: Array<SharedV3Warning>;
12
+ } {
13
+ const messages: XaiChatPrompt = [];
14
+ const warnings: Array<SharedV3Warning> = [];
15
+
16
+ for (const { role, content } of prompt) {
17
+ switch (role) {
18
+ case 'system': {
19
+ messages.push({ role: 'system', content });
20
+ break;
21
+ }
22
+
23
+ case 'user': {
24
+ if (content.length === 1 && content[0].type === 'text') {
25
+ messages.push({ role: 'user', content: content[0].text });
26
+ break;
27
+ }
28
+
29
+ messages.push({
30
+ role: 'user',
31
+ content: content.map(part => {
32
+ switch (part.type) {
33
+ case 'text': {
34
+ return { type: 'text', text: part.text };
35
+ }
36
+ case 'file': {
37
+ if (part.mediaType.startsWith('image/')) {
38
+ const mediaType =
39
+ part.mediaType === 'image/*'
40
+ ? 'image/jpeg'
41
+ : part.mediaType;
42
+
43
+ return {
44
+ type: 'image_url',
45
+ image_url: {
46
+ url:
47
+ part.data instanceof URL
48
+ ? part.data.toString()
49
+ : `data:${mediaType};base64,${convertToBase64(part.data)}`,
50
+ },
51
+ };
52
+ } else {
53
+ throw new UnsupportedFunctionalityError({
54
+ functionality: `file part media type ${part.mediaType}`,
55
+ });
56
+ }
57
+ }
58
+ }
59
+ }),
60
+ });
61
+
62
+ break;
63
+ }
64
+
65
+ case 'assistant': {
66
+ let text = '';
67
+ const toolCalls: Array<{
68
+ id: string;
69
+ type: 'function';
70
+ function: { name: string; arguments: string };
71
+ }> = [];
72
+
73
+ for (const part of content) {
74
+ switch (part.type) {
75
+ case 'text': {
76
+ text += part.text;
77
+ break;
78
+ }
79
+ case 'tool-call': {
80
+ toolCalls.push({
81
+ id: part.toolCallId,
82
+ type: 'function',
83
+ function: {
84
+ name: part.toolName,
85
+ arguments: JSON.stringify(part.input),
86
+ },
87
+ });
88
+ break;
89
+ }
90
+ }
91
+ }
92
+
93
+ messages.push({
94
+ role: 'assistant',
95
+ content: text,
96
+ tool_calls: toolCalls.length > 0 ? toolCalls : undefined,
97
+ });
98
+
99
+ break;
100
+ }
101
+
102
+ case 'tool': {
103
+ for (const toolResponse of content) {
104
+ if (toolResponse.type === 'tool-approval-response') {
105
+ continue;
106
+ }
107
+ const output = toolResponse.output;
108
+
109
+ let contentValue: string;
110
+ switch (output.type) {
111
+ case 'text':
112
+ case 'error-text':
113
+ contentValue = output.value;
114
+ break;
115
+ case 'execution-denied':
116
+ contentValue = output.reason ?? 'Tool execution denied.';
117
+ break;
118
+ case 'content':
119
+ case 'json':
120
+ case 'error-json':
121
+ contentValue = JSON.stringify(output.value);
122
+ break;
123
+ }
124
+
125
+ messages.push({
126
+ role: 'tool',
127
+ tool_call_id: toolResponse.toolCallId,
128
+ content: contentValue,
129
+ });
130
+ }
131
+ break;
132
+ }
133
+
134
+ default: {
135
+ const _exhaustiveCheck: never = role;
136
+ throw new Error(`Unsupported role: ${_exhaustiveCheck}`);
137
+ }
138
+ }
139
+ }
140
+
141
+ return { messages, warnings };
142
+ }
@@ -0,0 +1,240 @@
1
+ import { convertXaiChatUsage } from './convert-xai-chat-usage';
2
+ import { describe, it, expect } from 'vitest';
3
+
4
+ describe('convertXaiChatUsage', () => {
5
+ it('should convert basic usage without reasoning tokens', () => {
6
+ const result = convertXaiChatUsage({
7
+ prompt_tokens: 100,
8
+ completion_tokens: 50,
9
+ total_tokens: 150,
10
+ });
11
+
12
+ expect(result).toEqual({
13
+ inputTokens: {
14
+ total: 100,
15
+ noCache: 100,
16
+ cacheRead: 0,
17
+ cacheWrite: undefined,
18
+ },
19
+ outputTokens: {
20
+ total: 50,
21
+ text: 50,
22
+ reasoning: 0,
23
+ },
24
+ raw: {
25
+ prompt_tokens: 100,
26
+ completion_tokens: 50,
27
+ total_tokens: 150,
28
+ },
29
+ });
30
+ });
31
+
32
+ it('should convert usage with reasoning tokens', () => {
33
+ const result = convertXaiChatUsage({
34
+ prompt_tokens: 168,
35
+ completion_tokens: 870, // includes reasoning tokens
36
+ total_tokens: 1038,
37
+ completion_tokens_details: {
38
+ reasoning_tokens: 528,
39
+ },
40
+ });
41
+
42
+ expect(result).toEqual({
43
+ inputTokens: {
44
+ total: 168,
45
+ noCache: 168,
46
+ cacheRead: 0,
47
+ cacheWrite: undefined,
48
+ },
49
+ outputTokens: {
50
+ total: 870, // completion_tokens (includes reasoning)
51
+ text: 342, // 870 - 528
52
+ reasoning: 528,
53
+ },
54
+ raw: {
55
+ prompt_tokens: 168,
56
+ completion_tokens: 870,
57
+ total_tokens: 1038,
58
+ completion_tokens_details: {
59
+ reasoning_tokens: 528,
60
+ },
61
+ },
62
+ });
63
+ });
64
+
65
+ it('should handle reasoning tokens greater than completion tokens', () => {
66
+ // When reasoning tokens are greater, completion_tokens would include them
67
+ // This scenario tests edge case where we still get valid results
68
+ const result = convertXaiChatUsage({
69
+ prompt_tokens: 168,
70
+ completion_tokens: 870, // includes reasoning
71
+ total_tokens: 1038,
72
+ completion_tokens_details: {
73
+ reasoning_tokens: 528,
74
+ },
75
+ });
76
+
77
+ expect(result.outputTokens.text).toBe(342); // 870 - 528
78
+ expect(result.outputTokens.text).toBeGreaterThanOrEqual(0);
79
+ expect(result.outputTokens.total).toBe(870);
80
+ });
81
+
82
+ it('should convert usage with cached input tokens', () => {
83
+ const result = convertXaiChatUsage({
84
+ prompt_tokens: 168,
85
+ completion_tokens: 1036, // includes reasoning tokens
86
+ total_tokens: 1204,
87
+ prompt_tokens_details: {
88
+ text_tokens: 168,
89
+ audio_tokens: 0,
90
+ image_tokens: 0,
91
+ cached_tokens: 146,
92
+ },
93
+ completion_tokens_details: {
94
+ reasoning_tokens: 458,
95
+ },
96
+ });
97
+
98
+ expect(result).toEqual({
99
+ inputTokens: {
100
+ total: 168,
101
+ noCache: 22, // 168 - 146
102
+ cacheRead: 146,
103
+ cacheWrite: undefined,
104
+ },
105
+ outputTokens: {
106
+ total: 1036, // completion_tokens (includes reasoning)
107
+ text: 578, // 1036 - 458
108
+ reasoning: 458,
109
+ },
110
+ raw: {
111
+ prompt_tokens: 168,
112
+ completion_tokens: 1036,
113
+ total_tokens: 1204,
114
+ prompt_tokens_details: {
115
+ text_tokens: 168,
116
+ audio_tokens: 0,
117
+ image_tokens: 0,
118
+ cached_tokens: 146,
119
+ },
120
+ completion_tokens_details: {
121
+ reasoning_tokens: 458,
122
+ },
123
+ },
124
+ });
125
+ });
126
+
127
+ it('should handle null/undefined token details gracefully', () => {
128
+ const result = convertXaiChatUsage({
129
+ prompt_tokens: 100,
130
+ completion_tokens: 200,
131
+ total_tokens: 300,
132
+ prompt_tokens_details: null,
133
+ completion_tokens_details: null,
134
+ });
135
+
136
+ expect(result.inputTokens.cacheRead).toBe(0);
137
+ expect(result.inputTokens.noCache).toBe(100);
138
+ expect(result.outputTokens.reasoning).toBe(0);
139
+ expect(result.outputTokens.text).toBe(200);
140
+ expect(result.outputTokens.total).toBe(200);
141
+ });
142
+
143
+ it('should handle zero reasoning tokens', () => {
144
+ const result = convertXaiChatUsage({
145
+ prompt_tokens: 50,
146
+ completion_tokens: 100,
147
+ total_tokens: 150,
148
+ completion_tokens_details: {
149
+ reasoning_tokens: 0,
150
+ },
151
+ });
152
+
153
+ expect(result.outputTokens).toEqual({
154
+ total: 100,
155
+ text: 100,
156
+ reasoning: 0,
157
+ });
158
+ });
159
+
160
+ it('should preserve raw usage data', () => {
161
+ const rawUsage = {
162
+ prompt_tokens: 168,
163
+ completion_tokens: 870, // includes reasoning
164
+ total_tokens: 1038,
165
+ prompt_tokens_details: {
166
+ text_tokens: 168,
167
+ audio_tokens: 0,
168
+ image_tokens: 0,
169
+ cached_tokens: 167,
170
+ },
171
+ completion_tokens_details: {
172
+ reasoning_tokens: 528,
173
+ audio_tokens: 0,
174
+ accepted_prediction_tokens: 0,
175
+ rejected_prediction_tokens: 0,
176
+ },
177
+ };
178
+
179
+ const result = convertXaiChatUsage(rawUsage);
180
+
181
+ expect(result.raw).toEqual(rawUsage);
182
+ });
183
+
184
+ it('should handle the issue case with grok-4-fast-reasoning', () => {
185
+ // Test case from GitHub issue: grok-4-fast-reasoning returning negative text_output_tokens
186
+ // Raw API response format where completion_tokens includes reasoning tokens
187
+ const result = convertXaiChatUsage({
188
+ prompt_tokens: 279,
189
+ completion_tokens: 95, // includes reasoning tokens (6 text + 89 reasoning)
190
+ total_tokens: 374,
191
+ prompt_tokens_details: {
192
+ text_tokens: 279,
193
+ audio_tokens: 0,
194
+ image_tokens: 0,
195
+ cached_tokens: 149,
196
+ },
197
+ completion_tokens_details: {
198
+ reasoning_tokens: 89,
199
+ audio_tokens: 0,
200
+ accepted_prediction_tokens: 0,
201
+ rejected_prediction_tokens: 0,
202
+ },
203
+ });
204
+
205
+ expect(result).toEqual({
206
+ inputTokens: {
207
+ total: 279,
208
+ noCache: 130, // 279 - 149
209
+ cacheRead: 149,
210
+ cacheWrite: undefined,
211
+ },
212
+ outputTokens: {
213
+ total: 95, // completion_tokens (includes reasoning)
214
+ text: 6, // 95 - 89 (should be positive, not -83!)
215
+ reasoning: 89,
216
+ },
217
+ raw: {
218
+ prompt_tokens: 279,
219
+ completion_tokens: 95,
220
+ total_tokens: 374,
221
+ prompt_tokens_details: {
222
+ text_tokens: 279,
223
+ audio_tokens: 0,
224
+ image_tokens: 0,
225
+ cached_tokens: 149,
226
+ },
227
+ completion_tokens_details: {
228
+ reasoning_tokens: 89,
229
+ audio_tokens: 0,
230
+ accepted_prediction_tokens: 0,
231
+ rejected_prediction_tokens: 0,
232
+ },
233
+ },
234
+ });
235
+
236
+ // Verify no negative text tokens
237
+ expect(result.outputTokens.text).toBeGreaterThanOrEqual(0);
238
+ expect(result.outputTokens.text).toBe(6);
239
+ });
240
+ });