@ai-sdk/openai 3.0.13 → 3.0.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +1 -1
  3. package/dist/index.d.ts +1 -1
  4. package/dist/index.js +1 -1
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +1 -1
  7. package/dist/index.mjs.map +1 -1
  8. package/dist/internal/index.d.mts +1 -1
  9. package/dist/internal/index.d.ts +1 -1
  10. package/dist/internal/index.js.map +1 -1
  11. package/dist/internal/index.mjs.map +1 -1
  12. package/package.json +5 -4
  13. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +8 -0
  14. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +88 -0
  15. package/src/chat/convert-openai-chat-usage.ts +57 -0
  16. package/src/chat/convert-to-openai-chat-messages.test.ts +516 -0
  17. package/src/chat/convert-to-openai-chat-messages.ts +225 -0
  18. package/src/chat/get-response-metadata.ts +15 -0
  19. package/src/chat/map-openai-finish-reason.ts +19 -0
  20. package/src/chat/openai-chat-api.ts +198 -0
  21. package/src/chat/openai-chat-language-model.test.ts +3496 -0
  22. package/src/chat/openai-chat-language-model.ts +700 -0
  23. package/src/chat/openai-chat-options.ts +186 -0
  24. package/src/chat/openai-chat-prepare-tools.test.ts +322 -0
  25. package/src/chat/openai-chat-prepare-tools.ts +84 -0
  26. package/src/chat/openai-chat-prompt.ts +70 -0
  27. package/src/completion/convert-openai-completion-usage.ts +46 -0
  28. package/src/completion/convert-to-openai-completion-prompt.ts +93 -0
  29. package/src/completion/get-response-metadata.ts +15 -0
  30. package/src/completion/map-openai-finish-reason.ts +19 -0
  31. package/src/completion/openai-completion-api.ts +81 -0
  32. package/src/completion/openai-completion-language-model.test.ts +752 -0
  33. package/src/completion/openai-completion-language-model.ts +336 -0
  34. package/src/completion/openai-completion-options.ts +58 -0
  35. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +43 -0
  36. package/src/embedding/openai-embedding-api.ts +13 -0
  37. package/src/embedding/openai-embedding-model.test.ts +146 -0
  38. package/src/embedding/openai-embedding-model.ts +95 -0
  39. package/src/embedding/openai-embedding-options.ts +30 -0
  40. package/src/image/openai-image-api.ts +35 -0
  41. package/src/image/openai-image-model.test.ts +722 -0
  42. package/src/image/openai-image-model.ts +305 -0
  43. package/src/image/openai-image-options.ts +28 -0
  44. package/src/index.ts +9 -0
  45. package/src/internal/index.ts +19 -0
  46. package/src/openai-config.ts +18 -0
  47. package/src/openai-error.test.ts +34 -0
  48. package/src/openai-error.ts +22 -0
  49. package/src/openai-language-model-capabilities.test.ts +93 -0
  50. package/src/openai-language-model-capabilities.ts +54 -0
  51. package/src/openai-provider.test.ts +98 -0
  52. package/src/openai-provider.ts +270 -0
  53. package/src/openai-tools.ts +114 -0
  54. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +5 -0
  55. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +38 -0
  56. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +69 -0
  57. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +393 -0
  58. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +137 -0
  59. package/src/responses/__fixtures__/openai-error.1.chunks.txt +4 -0
  60. package/src/responses/__fixtures__/openai-error.1.json +8 -0
  61. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +94 -0
  62. package/src/responses/__fixtures__/openai-file-search-tool.1.json +89 -0
  63. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +93 -0
  64. package/src/responses/__fixtures__/openai-file-search-tool.2.json +112 -0
  65. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +16 -0
  66. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +96 -0
  67. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +7 -0
  68. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +70 -0
  69. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +11 -0
  70. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +169 -0
  71. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +123 -0
  72. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +176 -0
  73. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +11 -0
  74. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +169 -0
  75. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +84 -0
  76. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +182 -0
  77. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +373 -0
  78. package/src/responses/__fixtures__/openai-mcp-tool.1.json +159 -0
  79. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +110 -0
  80. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +117 -0
  81. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +182 -0
  82. package/src/responses/__fixtures__/openai-shell-tool.1.json +73 -0
  83. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +185 -0
  84. package/src/responses/__fixtures__/openai-web-search-tool.1.json +266 -0
  85. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +10955 -0
  86. package/src/responses/convert-openai-responses-usage.ts +53 -0
  87. package/src/responses/convert-to-openai-responses-input.test.ts +2976 -0
  88. package/src/responses/convert-to-openai-responses-input.ts +578 -0
  89. package/src/responses/map-openai-responses-finish-reason.ts +22 -0
  90. package/src/responses/openai-responses-api.test.ts +89 -0
  91. package/src/responses/openai-responses-api.ts +1086 -0
  92. package/src/responses/openai-responses-language-model.test.ts +6927 -0
  93. package/src/responses/openai-responses-language-model.ts +1932 -0
  94. package/src/responses/openai-responses-options.ts +312 -0
  95. package/src/responses/openai-responses-prepare-tools.test.ts +924 -0
  96. package/src/responses/openai-responses-prepare-tools.ts +264 -0
  97. package/src/responses/openai-responses-provider-metadata.ts +39 -0
  98. package/src/speech/openai-speech-api.ts +38 -0
  99. package/src/speech/openai-speech-model.test.ts +202 -0
  100. package/src/speech/openai-speech-model.ts +137 -0
  101. package/src/speech/openai-speech-options.ts +22 -0
  102. package/src/tool/apply-patch.ts +141 -0
  103. package/src/tool/code-interpreter.ts +104 -0
  104. package/src/tool/file-search.ts +145 -0
  105. package/src/tool/image-generation.ts +126 -0
  106. package/src/tool/local-shell.test-d.ts +20 -0
  107. package/src/tool/local-shell.ts +72 -0
  108. package/src/tool/mcp.ts +125 -0
  109. package/src/tool/shell.ts +85 -0
  110. package/src/tool/web-search-preview.ts +139 -0
  111. package/src/tool/web-search.test-d.ts +13 -0
  112. package/src/tool/web-search.ts +179 -0
  113. package/src/transcription/openai-transcription-api.ts +37 -0
  114. package/src/transcription/openai-transcription-model.test.ts +507 -0
  115. package/src/transcription/openai-transcription-model.ts +232 -0
  116. package/src/transcription/openai-transcription-options.ts +50 -0
  117. package/src/transcription/transcription-test.mp3 +0 -0
  118. package/src/version.ts +6 -0
@@ -0,0 +1,722 @@
1
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
2
+ import { createOpenAI } from '../openai-provider';
3
+ import { OpenAIImageModel } from './openai-image-model';
4
+ import { describe, it, expect, vi } from 'vitest';
5
+
6
+ vi.mock('../version', () => ({
7
+ VERSION: '0.0.0-test',
8
+ }));
9
+
10
+ const prompt = 'A cute baby sea otter';
11
+
12
+ const provider = createOpenAI({ apiKey: 'test-api-key' });
13
+ const model = provider.image('dall-e-3');
14
+
15
+ const server = createTestServer({
16
+ 'https://api.openai.com/v1/images/generations': {},
17
+ 'https://api.openai.com/v1/images/edits': {},
18
+ });
19
+
20
+ describe('doGenerate', () => {
21
+ function prepareJsonResponse({
22
+ headers,
23
+ }: {
24
+ headers?: Record<string, string>;
25
+ } = {}) {
26
+ server.urls['https://api.openai.com/v1/images/generations'].response = {
27
+ type: 'json-value',
28
+ headers,
29
+ body: {
30
+ created: 1733837122,
31
+ data: [
32
+ {
33
+ revised_prompt:
34
+ 'A charming visual illustration of a baby sea otter swimming joyously.',
35
+ b64_json: 'base64-image-1',
36
+ },
37
+ {
38
+ b64_json: 'base64-image-2',
39
+ },
40
+ ],
41
+ },
42
+ };
43
+ }
44
+
45
+ it('should pass the model and the settings', async () => {
46
+ prepareJsonResponse();
47
+
48
+ await model.doGenerate({
49
+ prompt,
50
+ files: undefined,
51
+ mask: undefined,
52
+ n: 1,
53
+ size: '1024x1024',
54
+ aspectRatio: undefined,
55
+ seed: undefined,
56
+ providerOptions: { openai: { style: 'vivid' } },
57
+ });
58
+
59
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
60
+ model: 'dall-e-3',
61
+ prompt,
62
+ n: 1,
63
+ size: '1024x1024',
64
+ style: 'vivid',
65
+ response_format: 'b64_json',
66
+ });
67
+ });
68
+
69
+ it('should pass headers', async () => {
70
+ prepareJsonResponse();
71
+
72
+ const provider = createOpenAI({
73
+ apiKey: 'test-api-key',
74
+ organization: 'test-organization',
75
+ project: 'test-project',
76
+ headers: {
77
+ 'Custom-Provider-Header': 'provider-header-value',
78
+ },
79
+ });
80
+
81
+ await provider.image('dall-e-3').doGenerate({
82
+ prompt,
83
+ files: undefined,
84
+ mask: undefined,
85
+ n: 1,
86
+ size: '1024x1024',
87
+ aspectRatio: undefined,
88
+ seed: undefined,
89
+ providerOptions: { openai: { style: 'vivid' } },
90
+ headers: {
91
+ 'Custom-Request-Header': 'request-header-value',
92
+ },
93
+ });
94
+
95
+ expect(server.calls[0].requestHeaders).toStrictEqual({
96
+ authorization: 'Bearer test-api-key',
97
+ 'content-type': 'application/json',
98
+ 'custom-provider-header': 'provider-header-value',
99
+ 'custom-request-header': 'request-header-value',
100
+ 'openai-organization': 'test-organization',
101
+ 'openai-project': 'test-project',
102
+ });
103
+ expect(server.calls[0].requestUserAgent).toContain(
104
+ `ai-sdk/openai/0.0.0-test`,
105
+ );
106
+ });
107
+
108
+ it('should extract the generated images', async () => {
109
+ prepareJsonResponse();
110
+
111
+ const result = await model.doGenerate({
112
+ prompt,
113
+ files: undefined,
114
+ mask: undefined,
115
+ n: 1,
116
+ size: undefined,
117
+ aspectRatio: undefined,
118
+ seed: undefined,
119
+ providerOptions: {},
120
+ });
121
+
122
+ expect(result.images).toStrictEqual(['base64-image-1', 'base64-image-2']);
123
+ });
124
+
125
+ it('should return warnings for unsupported settings', async () => {
126
+ prepareJsonResponse();
127
+
128
+ const result = await model.doGenerate({
129
+ prompt,
130
+ files: undefined,
131
+ mask: undefined,
132
+ n: 1,
133
+ size: '1024x1024',
134
+ aspectRatio: '1:1',
135
+ seed: 123,
136
+ providerOptions: {},
137
+ });
138
+
139
+ expect(result.warnings).toMatchInlineSnapshot(`
140
+ [
141
+ {
142
+ "details": "This model does not support aspect ratio. Use \`size\` instead.",
143
+ "feature": "aspectRatio",
144
+ "type": "unsupported",
145
+ },
146
+ {
147
+ "feature": "seed",
148
+ "type": "unsupported",
149
+ },
150
+ ]
151
+ `);
152
+ });
153
+
154
+ it('should respect maxImagesPerCall setting', async () => {
155
+ const defaultModel = provider.image('dall-e-2');
156
+ expect(defaultModel.maxImagesPerCall).toBe(10); // dall-e-2's default from settings
157
+
158
+ const unknownModel = provider.image('unknown-model' as any);
159
+ expect(unknownModel.maxImagesPerCall).toBe(1); // fallback for unknown models
160
+ });
161
+
162
+ it('should include response data with timestamp, modelId and headers', async () => {
163
+ prepareJsonResponse({
164
+ headers: {
165
+ 'x-request-id': 'test-request-id',
166
+ 'x-ratelimit-remaining': '123',
167
+ },
168
+ });
169
+
170
+ const testDate = new Date('2024-03-15T12:00:00Z');
171
+
172
+ const customModel = new OpenAIImageModel('dall-e-3', {
173
+ provider: 'test-provider',
174
+ url: () => 'https://api.openai.com/v1/images/generations',
175
+ headers: () => ({}),
176
+ _internal: {
177
+ currentDate: () => testDate,
178
+ },
179
+ });
180
+
181
+ const result = await customModel.doGenerate({
182
+ prompt,
183
+ files: undefined,
184
+ mask: undefined,
185
+ n: 1,
186
+ size: '1024x1024',
187
+ aspectRatio: undefined,
188
+ seed: undefined,
189
+ providerOptions: {},
190
+ });
191
+
192
+ expect(result.response).toStrictEqual({
193
+ timestamp: testDate,
194
+ modelId: 'dall-e-3',
195
+ headers: {
196
+ 'content-length': '180',
197
+ 'content-type': 'application/json',
198
+ 'x-request-id': 'test-request-id',
199
+ 'x-ratelimit-remaining': '123',
200
+ },
201
+ });
202
+ });
203
+
204
+ it('should use real date when no custom date provider is specified', async () => {
205
+ prepareJsonResponse();
206
+ const beforeDate = new Date();
207
+
208
+ const result = await model.doGenerate({
209
+ prompt,
210
+ files: undefined,
211
+ mask: undefined,
212
+ n: 1,
213
+ size: '1024x1024',
214
+ aspectRatio: undefined,
215
+ seed: undefined,
216
+ providerOptions: {},
217
+ });
218
+
219
+ const afterDate = new Date();
220
+
221
+ expect(result.response.timestamp.getTime()).toBeGreaterThanOrEqual(
222
+ beforeDate.getTime(),
223
+ );
224
+ expect(result.response.timestamp.getTime()).toBeLessThanOrEqual(
225
+ afterDate.getTime(),
226
+ );
227
+ expect(result.response.modelId).toBe('dall-e-3');
228
+ });
229
+
230
+ it('should not include response_format for gpt-image-1', async () => {
231
+ prepareJsonResponse();
232
+
233
+ const gptImageModel = provider.image('gpt-image-1');
234
+ await gptImageModel.doGenerate({
235
+ prompt,
236
+ files: undefined,
237
+ mask: undefined,
238
+ n: 1,
239
+ size: '1024x1024',
240
+ aspectRatio: undefined,
241
+ seed: undefined,
242
+ providerOptions: {},
243
+ });
244
+
245
+ const requestBody =
246
+ await server.calls[server.calls.length - 1].requestBodyJson;
247
+ expect(requestBody).toStrictEqual({
248
+ model: 'gpt-image-1',
249
+ prompt,
250
+ n: 1,
251
+ size: '1024x1024',
252
+ });
253
+
254
+ expect(requestBody).not.toHaveProperty('response_format');
255
+ });
256
+
257
+ it('should not include response_format for date-suffixed gpt-image model IDs (Azure deployment names)', async () => {
258
+ prepareJsonResponse();
259
+
260
+ // Azure OpenAI allows custom deployment names like 'gpt-image-1.5-2025-12-16'
261
+ const azureDeploymentModel = provider.image('gpt-image-1.5-2025-12-16');
262
+ await azureDeploymentModel.doGenerate({
263
+ prompt,
264
+ files: undefined,
265
+ mask: undefined,
266
+ n: 1,
267
+ size: '1024x1024',
268
+ aspectRatio: undefined,
269
+ seed: undefined,
270
+ providerOptions: {},
271
+ });
272
+
273
+ const requestBody =
274
+ await server.calls[server.calls.length - 1].requestBodyJson;
275
+ expect(requestBody).toStrictEqual({
276
+ model: 'gpt-image-1.5-2025-12-16',
277
+ prompt,
278
+ n: 1,
279
+ size: '1024x1024',
280
+ });
281
+
282
+ expect(requestBody).not.toHaveProperty('response_format');
283
+ });
284
+
285
+ it('should handle null revised_prompt responses', async () => {
286
+ server.urls['https://api.openai.com/v1/images/generations'].response = {
287
+ type: 'json-value',
288
+ body: {
289
+ created: 1733837122,
290
+ data: [
291
+ {
292
+ revised_prompt: null,
293
+ b64_json: 'base64-image-1',
294
+ },
295
+ ],
296
+ },
297
+ };
298
+
299
+ const result = await provider.image('gpt-image-1').doGenerate({
300
+ prompt,
301
+ files: undefined,
302
+ mask: undefined,
303
+ n: 1,
304
+ size: '1024x1024',
305
+ aspectRatio: undefined,
306
+ seed: undefined,
307
+ providerOptions: {},
308
+ });
309
+
310
+ expect(result.images).toStrictEqual(['base64-image-1']);
311
+ expect(result.warnings).toStrictEqual([]);
312
+ expect(result.providerMetadata).toStrictEqual({
313
+ openai: {
314
+ images: [
315
+ {
316
+ created: 1733837122,
317
+ size: undefined,
318
+ quality: undefined,
319
+ background: undefined,
320
+ outputFormat: undefined,
321
+ },
322
+ ],
323
+ },
324
+ });
325
+ });
326
+
327
+ it('should include response_format for dall-e-3', async () => {
328
+ prepareJsonResponse();
329
+
330
+ await model.doGenerate({
331
+ prompt,
332
+ files: undefined,
333
+ mask: undefined,
334
+ n: 1,
335
+ size: '1024x1024',
336
+ aspectRatio: undefined,
337
+ seed: undefined,
338
+ providerOptions: {},
339
+ });
340
+
341
+ const requestBody =
342
+ await server.calls[server.calls.length - 1].requestBodyJson;
343
+ expect(requestBody).toHaveProperty('response_format', 'b64_json');
344
+ });
345
+
346
+ it('should return image meta data', async () => {
347
+ prepareJsonResponse();
348
+
349
+ const result = await model.doGenerate({
350
+ prompt,
351
+ files: undefined,
352
+ mask: undefined,
353
+ n: 1,
354
+ size: '1024x1024',
355
+ aspectRatio: undefined,
356
+ seed: undefined,
357
+ providerOptions: { openai: { style: 'vivid' } },
358
+ });
359
+
360
+ expect(result.providerMetadata).toStrictEqual({
361
+ openai: {
362
+ images: [
363
+ {
364
+ revisedPrompt:
365
+ 'A charming visual illustration of a baby sea otter swimming joyously.',
366
+ created: 1733837122,
367
+ size: undefined,
368
+ quality: undefined,
369
+ background: undefined,
370
+ outputFormat: undefined,
371
+ },
372
+ {
373
+ created: 1733837122,
374
+ size: undefined,
375
+ quality: undefined,
376
+ background: undefined,
377
+ outputFormat: undefined,
378
+ },
379
+ ],
380
+ },
381
+ });
382
+ });
383
+
384
+ it('should map OpenAI usage to usage', async () => {
385
+ server.urls['https://api.openai.com/v1/images/generations'].response = {
386
+ type: 'json-value',
387
+ body: {
388
+ created: 1733837122,
389
+ data: [
390
+ {
391
+ b64_json: 'base64-image-1',
392
+ },
393
+ ],
394
+ usage: {
395
+ input_tokens: 12,
396
+ output_tokens: 0,
397
+ total_tokens: 12,
398
+ input_tokens_details: {
399
+ image_tokens: 7,
400
+ text_tokens: 5,
401
+ },
402
+ },
403
+ },
404
+ };
405
+
406
+ const result = await provider.image('gpt-image-1').doGenerate({
407
+ prompt,
408
+ files: undefined,
409
+ mask: undefined,
410
+ n: 1,
411
+ size: '1024x1024',
412
+ aspectRatio: undefined,
413
+ seed: undefined,
414
+ providerOptions: {},
415
+ });
416
+
417
+ expect(result.usage).toStrictEqual({
418
+ inputTokens: 12,
419
+ outputTokens: 0,
420
+ totalTokens: 12,
421
+ });
422
+ });
423
+ });
424
+
425
+ describe('doGenerate - image editing', () => {
426
+ function prepareEditResponse({
427
+ headers,
428
+ }: {
429
+ headers?: Record<string, string>;
430
+ } = {}) {
431
+ server.urls['https://api.openai.com/v1/images/edits'].response = {
432
+ type: 'json-value',
433
+ headers,
434
+ body: {
435
+ created: 1733837122,
436
+ data: [
437
+ {
438
+ b64_json: 'edited-base64-image-1',
439
+ },
440
+ ],
441
+ },
442
+ };
443
+ }
444
+
445
+ it('should call /images/edits endpoint when files are provided', async () => {
446
+ prepareEditResponse();
447
+
448
+ await provider.image('gpt-image-1').doGenerate({
449
+ prompt,
450
+ files: [
451
+ {
452
+ type: 'file',
453
+ mediaType: 'image/png',
454
+ data: new Uint8Array([137, 80, 78, 71]), // PNG magic bytes
455
+ },
456
+ ],
457
+ mask: undefined,
458
+ n: 1,
459
+ size: '1024x1024',
460
+ aspectRatio: undefined,
461
+ seed: undefined,
462
+ providerOptions: {},
463
+ });
464
+
465
+ expect(server.calls[0].requestUrl).toBe(
466
+ 'https://api.openai.com/v1/images/edits',
467
+ );
468
+ });
469
+
470
+ it('should send image as form data with Uint8Array input', async () => {
471
+ prepareEditResponse();
472
+
473
+ await provider.image('gpt-image-1').doGenerate({
474
+ prompt,
475
+ files: [
476
+ {
477
+ type: 'file',
478
+ mediaType: 'image/png',
479
+ data: new Uint8Array([137, 80, 78, 71]),
480
+ },
481
+ ],
482
+ mask: undefined,
483
+ n: 1,
484
+ size: '1024x1024',
485
+ aspectRatio: undefined,
486
+ seed: undefined,
487
+ providerOptions: {},
488
+ });
489
+
490
+ expect(await server.calls[0].requestBodyMultipart).toMatchObject({
491
+ model: 'gpt-image-1',
492
+ prompt,
493
+ n: '1',
494
+ size: '1024x1024',
495
+ });
496
+ });
497
+
498
+ it('should send image as form data with base64 string input', async () => {
499
+ prepareEditResponse();
500
+
501
+ await provider.image('gpt-image-1').doGenerate({
502
+ prompt,
503
+ files: [
504
+ {
505
+ type: 'file',
506
+ mediaType: 'image/png',
507
+ data: 'iVBORw0KGgo=', // base64 encoded PNG header
508
+ },
509
+ ],
510
+ mask: undefined,
511
+ n: 1,
512
+ size: undefined,
513
+ aspectRatio: undefined,
514
+ seed: undefined,
515
+ providerOptions: {},
516
+ });
517
+
518
+ const multipart = await server.calls[0].requestBodyMultipart;
519
+ expect(multipart?.model).toBe('gpt-image-1');
520
+ expect(multipart?.image).toBeDefined();
521
+ });
522
+
523
+ it('should send multiple images as form data array', async () => {
524
+ prepareEditResponse();
525
+
526
+ await provider.image('gpt-image-1').doGenerate({
527
+ prompt,
528
+ files: [
529
+ {
530
+ type: 'file',
531
+ mediaType: 'image/png',
532
+ data: new Uint8Array([137, 80, 78, 71]),
533
+ },
534
+ {
535
+ type: 'file',
536
+ mediaType: 'image/jpeg',
537
+ data: new Uint8Array([255, 216, 255, 224]),
538
+ },
539
+ ],
540
+ mask: undefined,
541
+ n: 1,
542
+ size: undefined,
543
+ aspectRatio: undefined,
544
+ seed: undefined,
545
+ providerOptions: {},
546
+ });
547
+
548
+ const multipart = await server.calls[0].requestBodyMultipart;
549
+ expect(multipart?.['image[]']).toBeDefined();
550
+ });
551
+
552
+ it('should pass provider options in form data', async () => {
553
+ prepareEditResponse();
554
+
555
+ await provider.image('gpt-image-1').doGenerate({
556
+ prompt,
557
+ files: [
558
+ {
559
+ type: 'file',
560
+ mediaType: 'image/png',
561
+ data: new Uint8Array([137, 80, 78, 71]),
562
+ },
563
+ ],
564
+ mask: undefined,
565
+ n: 1,
566
+ size: '1024x1024',
567
+ aspectRatio: undefined,
568
+ seed: undefined,
569
+ providerOptions: {
570
+ openai: {
571
+ quality: 'high',
572
+ background: 'transparent',
573
+ },
574
+ },
575
+ });
576
+
577
+ expect(await server.calls[0].requestBodyMultipart).toMatchObject({
578
+ quality: 'high',
579
+ background: 'transparent',
580
+ });
581
+ });
582
+
583
+ it('should extract the edited images from response', async () => {
584
+ prepareEditResponse();
585
+
586
+ const result = await provider.image('gpt-image-1').doGenerate({
587
+ prompt,
588
+ files: [
589
+ {
590
+ type: 'file',
591
+ mediaType: 'image/png',
592
+ data: new Uint8Array([137, 80, 78, 71]),
593
+ },
594
+ ],
595
+ mask: undefined,
596
+ n: 1,
597
+ size: undefined,
598
+ aspectRatio: undefined,
599
+ seed: undefined,
600
+ providerOptions: {},
601
+ });
602
+
603
+ expect(result.images).toStrictEqual(['edited-base64-image-1']);
604
+ });
605
+
606
+ it('should include response metadata for edited images', async () => {
607
+ prepareEditResponse({
608
+ headers: {
609
+ 'x-request-id': 'edit-request-id',
610
+ },
611
+ });
612
+
613
+ const testDate = new Date('2024-03-15T12:00:00Z');
614
+
615
+ const customModel = new OpenAIImageModel('gpt-image-1', {
616
+ provider: 'test-provider',
617
+ url: ({ path }) => `https://api.openai.com/v1${path}`,
618
+ headers: () => ({}),
619
+ _internal: {
620
+ currentDate: () => testDate,
621
+ },
622
+ });
623
+
624
+ const result = await customModel.doGenerate({
625
+ prompt,
626
+ files: [
627
+ {
628
+ type: 'file',
629
+ mediaType: 'image/png',
630
+ data: new Uint8Array([137, 80, 78, 71]),
631
+ },
632
+ ],
633
+ mask: undefined,
634
+ n: 1,
635
+ size: undefined,
636
+ aspectRatio: undefined,
637
+ seed: undefined,
638
+ providerOptions: {},
639
+ });
640
+
641
+ expect(result.response.timestamp).toEqual(testDate);
642
+ expect(result.response.modelId).toBe('gpt-image-1');
643
+ expect(result.response.headers?.['x-request-id']).toBe('edit-request-id');
644
+ });
645
+
646
+ it('should return warnings for unsupported settings in edit mode', async () => {
647
+ prepareEditResponse();
648
+
649
+ const result = await provider.image('gpt-image-1').doGenerate({
650
+ prompt,
651
+ files: [
652
+ {
653
+ type: 'file',
654
+ mediaType: 'image/png',
655
+ data: new Uint8Array([137, 80, 78, 71]),
656
+ },
657
+ ],
658
+ mask: undefined,
659
+ n: 1,
660
+ size: '1024x1024',
661
+ aspectRatio: '16:9',
662
+ seed: 42,
663
+ providerOptions: {},
664
+ });
665
+
666
+ expect(result.warnings).toMatchInlineSnapshot(`
667
+ [
668
+ {
669
+ "details": "This model does not support aspect ratio. Use \`size\` instead.",
670
+ "feature": "aspectRatio",
671
+ "type": "unsupported",
672
+ },
673
+ {
674
+ "feature": "seed",
675
+ "type": "unsupported",
676
+ },
677
+ ]
678
+ `);
679
+ });
680
+
681
+ it('should return usage information for edited images', async () => {
682
+ server.urls['https://api.openai.com/v1/images/edits'].response = {
683
+ type: 'json-value',
684
+ body: {
685
+ created: 1733837122,
686
+ data: [
687
+ {
688
+ b64_json: 'edited-base64-image-1',
689
+ },
690
+ ],
691
+ usage: {
692
+ input_tokens: 25,
693
+ output_tokens: 0,
694
+ total_tokens: 25,
695
+ },
696
+ },
697
+ };
698
+
699
+ const result = await provider.image('gpt-image-1').doGenerate({
700
+ prompt,
701
+ files: [
702
+ {
703
+ type: 'file',
704
+ mediaType: 'image/png',
705
+ data: new Uint8Array([137, 80, 78, 71]),
706
+ },
707
+ ],
708
+ mask: undefined,
709
+ n: 1,
710
+ size: undefined,
711
+ aspectRatio: undefined,
712
+ seed: undefined,
713
+ providerOptions: {},
714
+ });
715
+
716
+ expect(result.usage).toStrictEqual({
717
+ inputTokens: 25,
718
+ outputTokens: 0,
719
+ totalTokens: 25,
720
+ });
721
+ });
722
+ });