@ai-sdk/openai 0.0.0-2f1ae29d-20260122140908 → 0.0.0-4115c213-20260122152721

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/CHANGELOG.md +17 -2
  2. package/dist/index.js +15 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/index.mjs +15 -1
  5. package/dist/index.mjs.map +1 -1
  6. package/dist/internal/index.js +14 -0
  7. package/dist/internal/index.js.map +1 -1
  8. package/dist/internal/index.mjs +14 -0
  9. package/dist/internal/index.mjs.map +1 -1
  10. package/package.json +9 -5
  11. package/src/responses/convert-to-openai-responses-input.ts +20 -1
  12. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +0 -8
  13. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +0 -88
  14. package/src/chat/convert-to-openai-chat-messages.test.ts +0 -516
  15. package/src/chat/openai-chat-language-model.test.ts +0 -3496
  16. package/src/chat/openai-chat-prepare-tools.test.ts +0 -322
  17. package/src/completion/openai-completion-language-model.test.ts +0 -752
  18. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +0 -43
  19. package/src/embedding/openai-embedding-model.test.ts +0 -146
  20. package/src/image/openai-image-model.test.ts +0 -722
  21. package/src/openai-error.test.ts +0 -34
  22. package/src/openai-language-model-capabilities.test.ts +0 -93
  23. package/src/openai-provider.test.ts +0 -98
  24. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +0 -5
  25. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +0 -38
  26. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +0 -69
  27. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +0 -393
  28. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +0 -137
  29. package/src/responses/__fixtures__/openai-error.1.chunks.txt +0 -4
  30. package/src/responses/__fixtures__/openai-error.1.json +0 -8
  31. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +0 -94
  32. package/src/responses/__fixtures__/openai-file-search-tool.1.json +0 -89
  33. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +0 -93
  34. package/src/responses/__fixtures__/openai-file-search-tool.2.json +0 -112
  35. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +0 -16
  36. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +0 -96
  37. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +0 -7
  38. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +0 -70
  39. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +0 -11
  40. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +0 -169
  41. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +0 -123
  42. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +0 -176
  43. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +0 -11
  44. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +0 -169
  45. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +0 -84
  46. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +0 -182
  47. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +0 -373
  48. package/src/responses/__fixtures__/openai-mcp-tool.1.json +0 -159
  49. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +0 -110
  50. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +0 -117
  51. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +0 -182
  52. package/src/responses/__fixtures__/openai-shell-tool.1.json +0 -73
  53. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +0 -185
  54. package/src/responses/__fixtures__/openai-web-search-tool.1.json +0 -266
  55. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +0 -10955
  56. package/src/responses/convert-to-openai-responses-input.test.ts +0 -2976
  57. package/src/responses/openai-responses-api.test.ts +0 -89
  58. package/src/responses/openai-responses-language-model.test.ts +0 -6927
  59. package/src/responses/openai-responses-prepare-tools.test.ts +0 -924
  60. package/src/speech/openai-speech-model.test.ts +0 -202
  61. package/src/tool/local-shell.test-d.ts +0 -20
  62. package/src/tool/web-search.test-d.ts +0 -13
  63. package/src/transcription/openai-transcription-model.test.ts +0 -507
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-sdk/openai",
3
- "version": "0.0.0-2f1ae29d-20260122140908",
3
+ "version": "0.0.0-4115c213-20260122152721",
4
4
  "license": "Apache-2.0",
5
5
  "sideEffects": false,
6
6
  "main": "./dist/index.js",
@@ -10,6 +10,10 @@
10
10
  "dist/**/*",
11
11
  "docs/**/*",
12
12
  "src",
13
+ "!src/**/*.test.ts",
14
+ "!src/**/*.test-d.ts",
15
+ "!src/**/__snapshots__",
16
+ "!src/**/__fixtures__",
13
17
  "CHANGELOG.md",
14
18
  "README.md",
15
19
  "internal.d.ts"
@@ -32,16 +36,16 @@
32
36
  }
33
37
  },
34
38
  "dependencies": {
35
- "@ai-sdk/provider-utils": "4.0.8",
36
- "@ai-sdk/provider": "3.0.4"
39
+ "@ai-sdk/provider": "0.0.0-4115c213-20260122152721",
40
+ "@ai-sdk/provider-utils": "0.0.0-4115c213-20260122152721"
37
41
  },
38
42
  "devDependencies": {
39
43
  "@types/node": "20.17.24",
40
44
  "tsup": "^8",
41
45
  "typescript": "5.8.3",
42
46
  "zod": "3.25.76",
43
- "@vercel/ai-tsconfig": "0.0.0",
44
- "@ai-sdk/test-server": "1.0.2"
47
+ "@ai-sdk/test-server": "0.0.0-4115c213-20260122152721",
48
+ "@vercel/ai-tsconfig": "0.0.0"
45
49
  },
46
50
  "peerDependencies": {
47
51
  "zod": "^3.25.76 || ^4.1.8"
@@ -12,7 +12,10 @@ import {
12
12
  validateTypes,
13
13
  } from '@ai-sdk/provider-utils';
14
14
  import { z } from 'zod/v4';
15
- import { applyPatchOutputSchema } from '../tool/apply-patch';
15
+ import {
16
+ applyPatchInputSchema,
17
+ applyPatchOutputSchema,
18
+ } from '../tool/apply-patch';
16
19
  import {
17
20
  localShellInputSchema,
18
21
  localShellOutputSchema,
@@ -253,6 +256,22 @@ export async function convertToOpenAIResponsesInput({
253
256
  break;
254
257
  }
255
258
 
259
+ if (hasApplyPatchTool && resolvedToolName === 'apply_patch') {
260
+ const parsedInput = await validateTypes({
261
+ value: part.input,
262
+ schema: applyPatchInputSchema,
263
+ });
264
+ input.push({
265
+ type: 'apply_patch_call',
266
+ call_id: parsedInput.callId,
267
+ id: id!,
268
+ status: 'completed',
269
+ operation: parsedInput.operation,
270
+ });
271
+
272
+ break;
273
+ }
274
+
256
275
  input.push({
257
276
  type: 'function_call',
258
277
  call_id: part.toolCallId,
@@ -1,8 +0,0 @@
1
- {"choices":[],"created":0,"id":"","model":"","object":"","prompt_filter_results":[{"prompt_index":0,"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"jailbreak":{"filtered":false,"detected":false},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}}}]}
2
- {"choices":[{"content_filter_results":{},"delta":{"content":"","refusal":null,"role":"assistant"},"finish_reason":null,"index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"D3WbtIxo1Q2j1Q","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
3
- {"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"delta":{"content":"Capital"},"finish_reason":null,"index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"NNpA6Dj2U","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
4
- {"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"delta":{"content":" of"},"finish_reason":null,"index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"etvV32yk5dbxb","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
5
- {"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"delta":{"content":" Denmark"},"finish_reason":null,"index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"iDOuV7Jz","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
6
- {"choices":[{"content_filter_results":{"hate":{"filtered":false,"severity":"safe"},"self_harm":{"filtered":false,"severity":"safe"},"sexual":{"filtered":false,"severity":"safe"},"violence":{"filtered":false,"severity":"safe"}},"delta":{"content":"."},"finish_reason":null,"index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"ywLH2r1kcaeOOkq","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
7
- {"choices":[{"content_filter_results":{},"delta":{},"finish_reason":"stop","index":0,"logprobs":null}],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"Zarov0xJhP","object":"chat.completion.chunk","system_fingerprint":null,"usage":null}
8
- {"choices":[],"created":1762317021,"id":"chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt","model":"gpt-5-nano-2025-08-07","obfuscation":"DjqQ9RbEQMJ3PX","object":"chat.completion.chunk","system_fingerprint":null,"usage":{"completion_tokens":78,"completion_tokens_details":{"accepted_prediction_tokens":0,"audio_tokens":0,"reasoning_tokens":64,"rejected_prediction_tokens":0},"prompt_tokens":15,"prompt_tokens_details":{"audio_tokens":0,"cached_tokens":0},"total_tokens":93}}
@@ -1,88 +0,0 @@
1
- // Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
2
-
3
- exports[`doStream > should set .modelId for model-router request 1`] = `
4
- [
5
- {
6
- "type": "stream-start",
7
- "warnings": [],
8
- },
9
- {
10
- "id": "chatcmpl-CYPS1lijGoK8gd9lYzY3r9Sx50nbt",
11
- "modelId": "gpt-5-nano-2025-08-07",
12
- "timestamp": 2025-11-05T04:30:21.000Z,
13
- "type": "response-metadata",
14
- },
15
- {
16
- "id": "0",
17
- "type": "text-start",
18
- },
19
- {
20
- "delta": "",
21
- "id": "0",
22
- "type": "text-delta",
23
- },
24
- {
25
- "delta": "Capital",
26
- "id": "0",
27
- "type": "text-delta",
28
- },
29
- {
30
- "delta": " of",
31
- "id": "0",
32
- "type": "text-delta",
33
- },
34
- {
35
- "delta": " Denmark",
36
- "id": "0",
37
- "type": "text-delta",
38
- },
39
- {
40
- "delta": ".",
41
- "id": "0",
42
- "type": "text-delta",
43
- },
44
- {
45
- "id": "0",
46
- "type": "text-end",
47
- },
48
- {
49
- "finishReason": {
50
- "raw": "stop",
51
- "unified": "stop",
52
- },
53
- "providerMetadata": {
54
- "openai": {
55
- "acceptedPredictionTokens": 0,
56
- "rejectedPredictionTokens": 0,
57
- },
58
- },
59
- "type": "finish",
60
- "usage": {
61
- "inputTokens": {
62
- "cacheRead": 0,
63
- "cacheWrite": undefined,
64
- "noCache": 15,
65
- "total": 15,
66
- },
67
- "outputTokens": {
68
- "reasoning": 64,
69
- "text": 14,
70
- "total": 78,
71
- },
72
- "raw": {
73
- "completion_tokens": 78,
74
- "completion_tokens_details": {
75
- "accepted_prediction_tokens": 0,
76
- "reasoning_tokens": 64,
77
- "rejected_prediction_tokens": 0,
78
- },
79
- "prompt_tokens": 15,
80
- "prompt_tokens_details": {
81
- "cached_tokens": 0,
82
- },
83
- "total_tokens": 93,
84
- },
85
- },
86
- },
87
- ]
88
- `;
@@ -1,516 +0,0 @@
1
- import { convertToOpenAIChatMessages } from './convert-to-openai-chat-messages';
2
- import { describe, it, expect } from 'vitest';
3
-
4
- describe('system messages', () => {
5
- it('should forward system messages', async () => {
6
- const result = convertToOpenAIChatMessages({
7
- prompt: [{ role: 'system', content: 'You are a helpful assistant.' }],
8
- });
9
-
10
- expect(result.messages).toEqual([
11
- { role: 'system', content: 'You are a helpful assistant.' },
12
- ]);
13
- });
14
-
15
- it('should convert system messages to developer messages when requested', async () => {
16
- const result = convertToOpenAIChatMessages({
17
- prompt: [{ role: 'system', content: 'You are a helpful assistant.' }],
18
- systemMessageMode: 'developer',
19
- });
20
-
21
- expect(result.messages).toEqual([
22
- { role: 'developer', content: 'You are a helpful assistant.' },
23
- ]);
24
- });
25
-
26
- it('should remove system messages when requested', async () => {
27
- const result = convertToOpenAIChatMessages({
28
- prompt: [{ role: 'system', content: 'You are a helpful assistant.' }],
29
- systemMessageMode: 'remove',
30
- });
31
-
32
- expect(result.messages).toEqual([]);
33
- });
34
- });
35
-
36
- describe('user messages', () => {
37
- it('should convert messages with only a text part to a string content', async () => {
38
- const result = convertToOpenAIChatMessages({
39
- prompt: [
40
- {
41
- role: 'user',
42
- content: [{ type: 'text', text: 'Hello' }],
43
- },
44
- ],
45
- });
46
-
47
- expect(result.messages).toEqual([{ role: 'user', content: 'Hello' }]);
48
- });
49
-
50
- it('should convert messages with image parts', async () => {
51
- const result = convertToOpenAIChatMessages({
52
- prompt: [
53
- {
54
- role: 'user',
55
- content: [
56
- { type: 'text', text: 'Hello' },
57
- {
58
- type: 'file',
59
- mediaType: 'image/png',
60
- data: Buffer.from([0, 1, 2, 3]).toString('base64'),
61
- },
62
- ],
63
- },
64
- ],
65
- });
66
-
67
- expect(result.messages).toEqual([
68
- {
69
- role: 'user',
70
- content: [
71
- { type: 'text', text: 'Hello' },
72
- {
73
- type: 'image_url',
74
- image_url: { url: 'data:image/png;base64,AAECAw==' },
75
- },
76
- ],
77
- },
78
- ]);
79
- });
80
-
81
- it('should add image detail when specified through extension', async () => {
82
- const result = convertToOpenAIChatMessages({
83
- prompt: [
84
- {
85
- role: 'user',
86
- content: [
87
- {
88
- type: 'file',
89
- mediaType: 'image/png',
90
- data: Buffer.from([0, 1, 2, 3]).toString('base64'),
91
- providerOptions: {
92
- openai: {
93
- imageDetail: 'low',
94
- },
95
- },
96
- },
97
- ],
98
- },
99
- ],
100
- });
101
-
102
- expect(result.messages).toEqual([
103
- {
104
- role: 'user',
105
- content: [
106
- {
107
- type: 'image_url',
108
- image_url: {
109
- url: 'data:image/png;base64,AAECAw==',
110
- detail: 'low',
111
- },
112
- },
113
- ],
114
- },
115
- ]);
116
- });
117
-
118
- describe('file parts', () => {
119
- it('should throw for unsupported mime types', () => {
120
- expect(() =>
121
- convertToOpenAIChatMessages({
122
- prompt: [
123
- {
124
- role: 'user',
125
- content: [
126
- {
127
- type: 'file',
128
- data: 'AAECAw==',
129
- mediaType: 'application/something',
130
- },
131
- ],
132
- },
133
- ],
134
- }),
135
- ).toThrow('file part media type application/something');
136
- });
137
-
138
- it('should throw for URL data', () => {
139
- expect(() =>
140
- convertToOpenAIChatMessages({
141
- prompt: [
142
- {
143
- role: 'user',
144
- content: [
145
- {
146
- type: 'file',
147
- data: new URL('https://example.com/foo.wav'),
148
- mediaType: 'audio/wav',
149
- },
150
- ],
151
- },
152
- ],
153
- }),
154
- ).toThrow('audio file parts with URLs');
155
- });
156
-
157
- it('should add audio content for audio/wav file parts', () => {
158
- const result = convertToOpenAIChatMessages({
159
- prompt: [
160
- {
161
- role: 'user',
162
- content: [
163
- {
164
- type: 'file',
165
- data: 'AAECAw==',
166
- mediaType: 'audio/wav',
167
- },
168
- ],
169
- },
170
- ],
171
- });
172
-
173
- expect(result.messages).toEqual([
174
- {
175
- role: 'user',
176
- content: [
177
- {
178
- type: 'input_audio',
179
- input_audio: { data: 'AAECAw==', format: 'wav' },
180
- },
181
- ],
182
- },
183
- ]);
184
- });
185
-
186
- it('should add audio content for audio/mpeg file parts', () => {
187
- const result = convertToOpenAIChatMessages({
188
- prompt: [
189
- {
190
- role: 'user',
191
- content: [
192
- {
193
- type: 'file',
194
- data: 'AAECAw==',
195
- mediaType: 'audio/mpeg',
196
- },
197
- ],
198
- },
199
- ],
200
- });
201
-
202
- expect(result.messages).toEqual([
203
- {
204
- role: 'user',
205
- content: [
206
- {
207
- type: 'input_audio',
208
- input_audio: { data: 'AAECAw==', format: 'mp3' },
209
- },
210
- ],
211
- },
212
- ]);
213
- });
214
-
215
- it('should add audio content for audio/mp3 file parts', () => {
216
- const result = convertToOpenAIChatMessages({
217
- prompt: [
218
- {
219
- role: 'user',
220
- content: [
221
- {
222
- type: 'file',
223
- data: 'AAECAw==',
224
- mediaType: 'audio/mp3', // not official but sometimes used
225
- },
226
- ],
227
- },
228
- ],
229
- });
230
-
231
- expect(result.messages).toEqual([
232
- {
233
- role: 'user',
234
- content: [
235
- {
236
- type: 'input_audio',
237
- input_audio: { data: 'AAECAw==', format: 'mp3' },
238
- },
239
- ],
240
- },
241
- ]);
242
- });
243
-
244
- it('should convert messages with PDF file parts', async () => {
245
- const base64Data = 'AQIDBAU='; // Base64 encoding of pdfData
246
-
247
- const result = convertToOpenAIChatMessages({
248
- prompt: [
249
- {
250
- role: 'user',
251
- content: [
252
- {
253
- type: 'file',
254
- mediaType: 'application/pdf',
255
- data: base64Data,
256
- filename: 'document.pdf',
257
- },
258
- ],
259
- },
260
- ],
261
- systemMessageMode: 'system',
262
- });
263
-
264
- expect(result.messages).toEqual([
265
- {
266
- role: 'user',
267
- content: [
268
- {
269
- type: 'file',
270
- file: {
271
- filename: 'document.pdf',
272
- file_data: 'data:application/pdf;base64,AQIDBAU=',
273
- },
274
- },
275
- ],
276
- },
277
- ]);
278
- });
279
-
280
- it('should convert messages with binary PDF file parts', async () => {
281
- const data = Uint8Array.from([1, 2, 3, 4, 5]);
282
-
283
- const result = convertToOpenAIChatMessages({
284
- prompt: [
285
- {
286
- role: 'user',
287
- content: [
288
- {
289
- type: 'file',
290
- mediaType: 'application/pdf',
291
- data,
292
- filename: 'document.pdf',
293
- },
294
- ],
295
- },
296
- ],
297
- systemMessageMode: 'system',
298
- });
299
-
300
- expect(result.messages).toEqual([
301
- {
302
- role: 'user',
303
- content: [
304
- {
305
- type: 'file',
306
- file: {
307
- filename: 'document.pdf',
308
- file_data: 'data:application/pdf;base64,AQIDBAU=',
309
- },
310
- },
311
- ],
312
- },
313
- ]);
314
- });
315
-
316
- it('should convert messages with PDF file parts using file_id', async () => {
317
- const result = convertToOpenAIChatMessages({
318
- prompt: [
319
- {
320
- role: 'user',
321
- content: [
322
- {
323
- type: 'file',
324
- mediaType: 'application/pdf',
325
- data: 'file-pdf-12345',
326
- },
327
- ],
328
- },
329
- ],
330
- });
331
-
332
- expect(result.messages).toEqual([
333
- {
334
- role: 'user',
335
- content: [
336
- {
337
- type: 'file',
338
- file: {
339
- file_id: 'file-pdf-12345',
340
- },
341
- },
342
- ],
343
- },
344
- ]);
345
- });
346
-
347
- it('should use default filename for PDF file parts when not provided', async () => {
348
- const base64Data = 'AQIDBAU=';
349
-
350
- const result = convertToOpenAIChatMessages({
351
- prompt: [
352
- {
353
- role: 'user',
354
- content: [
355
- {
356
- type: 'file',
357
- mediaType: 'application/pdf',
358
- data: base64Data,
359
- },
360
- ],
361
- },
362
- ],
363
- systemMessageMode: 'system',
364
- });
365
-
366
- expect(result.messages).toEqual([
367
- {
368
- role: 'user',
369
- content: [
370
- {
371
- type: 'file',
372
- file: {
373
- filename: 'part-0.pdf',
374
- file_data: 'data:application/pdf;base64,AQIDBAU=',
375
- },
376
- },
377
- ],
378
- },
379
- ]);
380
- });
381
-
382
- it('should throw error for unsupported file types', async () => {
383
- const base64Data = 'AQIDBAU=';
384
-
385
- expect(() => {
386
- convertToOpenAIChatMessages({
387
- prompt: [
388
- {
389
- role: 'user',
390
- content: [
391
- {
392
- type: 'file',
393
- mediaType: 'text/plain',
394
- data: base64Data,
395
- },
396
- ],
397
- },
398
- ],
399
- systemMessageMode: 'system',
400
- });
401
- }).toThrow('file part media type text/plain');
402
- });
403
-
404
- it('should throw error for file URLs', async () => {
405
- expect(() => {
406
- convertToOpenAIChatMessages({
407
- prompt: [
408
- {
409
- role: 'user',
410
- content: [
411
- {
412
- type: 'file',
413
- mediaType: 'application/pdf',
414
- data: new URL('https://example.com/document.pdf'),
415
- },
416
- ],
417
- },
418
- ],
419
- systemMessageMode: 'system',
420
- });
421
- }).toThrow('PDF file parts with URLs');
422
- });
423
- });
424
- });
425
-
426
- describe('tool calls', () => {
427
- it('should stringify arguments to tool calls', () => {
428
- const result = convertToOpenAIChatMessages({
429
- prompt: [
430
- {
431
- role: 'assistant',
432
- content: [
433
- {
434
- type: 'tool-call',
435
- input: { foo: 'bar123' },
436
- toolCallId: 'quux',
437
- toolName: 'thwomp',
438
- },
439
- ],
440
- },
441
- {
442
- role: 'tool',
443
- content: [
444
- {
445
- type: 'tool-result',
446
- toolCallId: 'quux',
447
- toolName: 'thwomp',
448
- output: { type: 'json', value: { oof: '321rab' } },
449
- },
450
- ],
451
- },
452
- ],
453
- });
454
-
455
- expect(result.messages).toEqual([
456
- {
457
- role: 'assistant',
458
- content: '',
459
- tool_calls: [
460
- {
461
- type: 'function',
462
- id: 'quux',
463
- function: {
464
- name: 'thwomp',
465
- arguments: JSON.stringify({ foo: 'bar123' }),
466
- },
467
- },
468
- ],
469
- },
470
- {
471
- role: 'tool',
472
- content: JSON.stringify({ oof: '321rab' }),
473
- tool_call_id: 'quux',
474
- },
475
- ]);
476
- });
477
-
478
- it('should handle different tool output types', () => {
479
- const result = convertToOpenAIChatMessages({
480
- prompt: [
481
- {
482
- role: 'tool',
483
- content: [
484
- {
485
- type: 'tool-result',
486
- toolCallId: 'text-tool',
487
- toolName: 'text-tool',
488
- output: { type: 'text', value: 'Hello world' },
489
- },
490
- {
491
- type: 'tool-result',
492
- toolCallId: 'error-tool',
493
- toolName: 'error-tool',
494
- output: { type: 'error-text', value: 'Something went wrong' },
495
- },
496
- ],
497
- },
498
- ],
499
- });
500
-
501
- expect(result.messages).toMatchInlineSnapshot(`
502
- [
503
- {
504
- "content": "Hello world",
505
- "role": "tool",
506
- "tool_call_id": "text-tool",
507
- },
508
- {
509
- "content": "Something went wrong",
510
- "role": "tool",
511
- "tool_call_id": "error-tool",
512
- },
513
- ]
514
- `);
515
- });
516
- });