@ai-sdk/openai 0.0.0-2f1ae29d-20260122140908 → 0.0.0-4115c213-20260122152721

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. package/CHANGELOG.md +17 -2
  2. package/dist/index.js +15 -1
  3. package/dist/index.js.map +1 -1
  4. package/dist/index.mjs +15 -1
  5. package/dist/index.mjs.map +1 -1
  6. package/dist/internal/index.js +14 -0
  7. package/dist/internal/index.js.map +1 -1
  8. package/dist/internal/index.mjs +14 -0
  9. package/dist/internal/index.mjs.map +1 -1
  10. package/package.json +9 -5
  11. package/src/responses/convert-to-openai-responses-input.ts +20 -1
  12. package/src/chat/__fixtures__/azure-model-router.1.chunks.txt +0 -8
  13. package/src/chat/__snapshots__/openai-chat-language-model.test.ts.snap +0 -88
  14. package/src/chat/convert-to-openai-chat-messages.test.ts +0 -516
  15. package/src/chat/openai-chat-language-model.test.ts +0 -3496
  16. package/src/chat/openai-chat-prepare-tools.test.ts +0 -322
  17. package/src/completion/openai-completion-language-model.test.ts +0 -752
  18. package/src/embedding/__snapshots__/openai-embedding-model.test.ts.snap +0 -43
  19. package/src/embedding/openai-embedding-model.test.ts +0 -146
  20. package/src/image/openai-image-model.test.ts +0 -722
  21. package/src/openai-error.test.ts +0 -34
  22. package/src/openai-language-model-capabilities.test.ts +0 -93
  23. package/src/openai-provider.test.ts +0 -98
  24. package/src/responses/__fixtures__/openai-apply-patch-tool-delete.1.chunks.txt +0 -5
  25. package/src/responses/__fixtures__/openai-apply-patch-tool.1.chunks.txt +0 -38
  26. package/src/responses/__fixtures__/openai-apply-patch-tool.1.json +0 -69
  27. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.chunks.txt +0 -393
  28. package/src/responses/__fixtures__/openai-code-interpreter-tool.1.json +0 -137
  29. package/src/responses/__fixtures__/openai-error.1.chunks.txt +0 -4
  30. package/src/responses/__fixtures__/openai-error.1.json +0 -8
  31. package/src/responses/__fixtures__/openai-file-search-tool.1.chunks.txt +0 -94
  32. package/src/responses/__fixtures__/openai-file-search-tool.1.json +0 -89
  33. package/src/responses/__fixtures__/openai-file-search-tool.2.chunks.txt +0 -93
  34. package/src/responses/__fixtures__/openai-file-search-tool.2.json +0 -112
  35. package/src/responses/__fixtures__/openai-image-generation-tool.1.chunks.txt +0 -16
  36. package/src/responses/__fixtures__/openai-image-generation-tool.1.json +0 -96
  37. package/src/responses/__fixtures__/openai-local-shell-tool.1.chunks.txt +0 -7
  38. package/src/responses/__fixtures__/openai-local-shell-tool.1.json +0 -70
  39. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.chunks.txt +0 -11
  40. package/src/responses/__fixtures__/openai-mcp-tool-approval.1.json +0 -169
  41. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.chunks.txt +0 -123
  42. package/src/responses/__fixtures__/openai-mcp-tool-approval.2.json +0 -176
  43. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.chunks.txt +0 -11
  44. package/src/responses/__fixtures__/openai-mcp-tool-approval.3.json +0 -169
  45. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.chunks.txt +0 -84
  46. package/src/responses/__fixtures__/openai-mcp-tool-approval.4.json +0 -182
  47. package/src/responses/__fixtures__/openai-mcp-tool.1.chunks.txt +0 -373
  48. package/src/responses/__fixtures__/openai-mcp-tool.1.json +0 -159
  49. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.chunks.txt +0 -110
  50. package/src/responses/__fixtures__/openai-reasoning-encrypted-content.1.json +0 -117
  51. package/src/responses/__fixtures__/openai-shell-tool.1.chunks.txt +0 -182
  52. package/src/responses/__fixtures__/openai-shell-tool.1.json +0 -73
  53. package/src/responses/__fixtures__/openai-web-search-tool.1.chunks.txt +0 -185
  54. package/src/responses/__fixtures__/openai-web-search-tool.1.json +0 -266
  55. package/src/responses/__snapshots__/openai-responses-language-model.test.ts.snap +0 -10955
  56. package/src/responses/convert-to-openai-responses-input.test.ts +0 -2976
  57. package/src/responses/openai-responses-api.test.ts +0 -89
  58. package/src/responses/openai-responses-language-model.test.ts +0 -6927
  59. package/src/responses/openai-responses-prepare-tools.test.ts +0 -924
  60. package/src/speech/openai-speech-model.test.ts +0 -202
  61. package/src/tool/local-shell.test-d.ts +0 -20
  62. package/src/tool/web-search.test-d.ts +0 -13
  63. package/src/transcription/openai-transcription-model.test.ts +0 -507
@@ -1,752 +0,0 @@
1
- import { LanguageModelV3Prompt } from '@ai-sdk/provider';
2
- import { createTestServer } from '@ai-sdk/test-server/with-vitest';
3
- import {
4
- convertReadableStreamToArray,
5
- isNodeVersion,
6
- } from '@ai-sdk/provider-utils/test';
7
- import { createOpenAI } from '../openai-provider';
8
- import { describe, it, expect, vi } from 'vitest';
9
-
10
- vi.mock('../version', () => ({
11
- VERSION: '0.0.0-test',
12
- }));
13
-
14
- const TEST_PROMPT: LanguageModelV3Prompt = [
15
- { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
16
- ];
17
-
18
- const TEST_LOGPROBS = {
19
- tokens: [' ever', ' after', '.\n\n', 'The', ' end', '.'],
20
- token_logprobs: [
21
- -0.0664508, -0.014520033, -1.3820221, -0.7890417, -0.5323165, -0.10247037,
22
- ],
23
- top_logprobs: [
24
- {
25
- ' ever': -0.0664508,
26
- },
27
- {
28
- ' after': -0.014520033,
29
- },
30
- {
31
- '.\n\n': -1.3820221,
32
- },
33
- {
34
- The: -0.7890417,
35
- },
36
- {
37
- ' end': -0.5323165,
38
- },
39
- {
40
- '.': -0.10247037,
41
- },
42
- ] as Record<string, number>[],
43
- };
44
-
45
- const provider = createOpenAI({
46
- apiKey: 'test-api-key',
47
- });
48
-
49
- const model = provider.completion('gpt-3.5-turbo-instruct');
50
-
51
- const server = createTestServer({
52
- 'https://api.openai.com/v1/completions': {},
53
- });
54
-
55
- describe('doGenerate', () => {
56
- function prepareJsonResponse({
57
- content = '',
58
- usage = {
59
- prompt_tokens: 4,
60
- total_tokens: 34,
61
- completion_tokens: 30,
62
- },
63
- logprobs = null,
64
- finish_reason = 'stop',
65
- id = 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB',
66
- created = 1711363706,
67
- model = 'gpt-3.5-turbo-instruct',
68
- headers,
69
- }: {
70
- content?: string;
71
- usage?: {
72
- prompt_tokens: number;
73
- total_tokens: number;
74
- completion_tokens: number;
75
- };
76
- logprobs?: {
77
- tokens: string[];
78
- token_logprobs: number[];
79
- top_logprobs: Record<string, number>[];
80
- } | null;
81
- finish_reason?: string;
82
- id?: string;
83
- created?: number;
84
- model?: string;
85
- headers?: Record<string, string>;
86
- }) {
87
- server.urls['https://api.openai.com/v1/completions'].response = {
88
- type: 'json-value',
89
- headers,
90
- body: {
91
- id,
92
- object: 'text_completion',
93
- created,
94
- model,
95
- choices: [
96
- {
97
- text: content,
98
- index: 0,
99
- ...(logprobs ? { logprobs } : {}),
100
- finish_reason,
101
- },
102
- ],
103
- usage,
104
- },
105
- };
106
- }
107
-
108
- it('should extract text response', async () => {
109
- prepareJsonResponse({ content: 'Hello, World!' });
110
-
111
- const { content } = await model.doGenerate({
112
- prompt: TEST_PROMPT,
113
- });
114
-
115
- expect(content).toMatchInlineSnapshot(`
116
- [
117
- {
118
- "text": "Hello, World!",
119
- "type": "text",
120
- },
121
- ]
122
- `);
123
- });
124
-
125
- it('should extract usage', async () => {
126
- prepareJsonResponse({
127
- usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
128
- });
129
-
130
- const { usage } = await model.doGenerate({
131
- prompt: TEST_PROMPT,
132
- });
133
-
134
- expect(usage).toMatchInlineSnapshot(`
135
- {
136
- "inputTokens": {
137
- "cacheRead": undefined,
138
- "cacheWrite": undefined,
139
- "noCache": 20,
140
- "total": 20,
141
- },
142
- "outputTokens": {
143
- "reasoning": undefined,
144
- "text": 5,
145
- "total": 5,
146
- },
147
- "raw": {
148
- "completion_tokens": 5,
149
- "prompt_tokens": 20,
150
- "total_tokens": 25,
151
- },
152
- }
153
- `);
154
- });
155
-
156
- it('should send request body', async () => {
157
- prepareJsonResponse({});
158
-
159
- const { request } = await model.doGenerate({
160
- prompt: TEST_PROMPT,
161
- });
162
-
163
- expect(request).toMatchInlineSnapshot(`
164
- {
165
- "body": {
166
- "echo": undefined,
167
- "frequency_penalty": undefined,
168
- "logit_bias": undefined,
169
- "logprobs": undefined,
170
- "max_tokens": undefined,
171
- "model": "gpt-3.5-turbo-instruct",
172
- "presence_penalty": undefined,
173
- "prompt": "user:
174
- Hello
175
-
176
- assistant:
177
- ",
178
- "seed": undefined,
179
- "stop": [
180
- "
181
- user:",
182
- ],
183
- "suffix": undefined,
184
- "temperature": undefined,
185
- "top_p": undefined,
186
- "user": undefined,
187
- },
188
- }
189
- `);
190
- });
191
-
192
- it('should send additional response information', async () => {
193
- prepareJsonResponse({
194
- id: 'test-id',
195
- created: 123,
196
- model: 'test-model',
197
- });
198
-
199
- const { response } = await model.doGenerate({
200
- prompt: TEST_PROMPT,
201
- });
202
-
203
- expect({
204
- id: response?.id,
205
- timestamp: response?.timestamp,
206
- modelId: response?.modelId,
207
- }).toStrictEqual({
208
- id: 'test-id',
209
- timestamp: new Date(123 * 1000),
210
- modelId: 'test-model',
211
- });
212
- });
213
-
214
- it('should extract logprobs', async () => {
215
- prepareJsonResponse({ logprobs: TEST_LOGPROBS });
216
-
217
- const provider = createOpenAI({ apiKey: 'test-api-key' });
218
-
219
- const response = await provider.completion('gpt-3.5-turbo').doGenerate({
220
- prompt: TEST_PROMPT,
221
- providerOptions: {
222
- openai: {
223
- logprobs: 1,
224
- },
225
- },
226
- });
227
- expect(response.providerMetadata?.openai.logprobs).toStrictEqual(
228
- TEST_LOGPROBS,
229
- );
230
- });
231
-
232
- it('should extract finish reason', async () => {
233
- prepareJsonResponse({
234
- finish_reason: 'stop',
235
- });
236
-
237
- const { finishReason } = await provider
238
- .completion('gpt-3.5-turbo-instruct')
239
- .doGenerate({
240
- prompt: TEST_PROMPT,
241
- });
242
-
243
- expect(finishReason).toMatchInlineSnapshot(`
244
- {
245
- "raw": "stop",
246
- "unified": "stop",
247
- }
248
- `);
249
- });
250
-
251
- it('should support unknown finish reason', async () => {
252
- prepareJsonResponse({
253
- finish_reason: 'eos',
254
- });
255
-
256
- const { finishReason } = await provider
257
- .completion('gpt-3.5-turbo-instruct')
258
- .doGenerate({
259
- prompt: TEST_PROMPT,
260
- });
261
-
262
- expect(finishReason).toMatchInlineSnapshot(`
263
- {
264
- "raw": "eos",
265
- "unified": "other",
266
- }
267
- `);
268
- });
269
-
270
- it('should expose the raw response headers', async () => {
271
- prepareJsonResponse({
272
- headers: {
273
- 'test-header': 'test-value',
274
- },
275
- });
276
-
277
- const { response } = await model.doGenerate({
278
- prompt: TEST_PROMPT,
279
- });
280
-
281
- expect(response?.headers).toMatchInlineSnapshot(`
282
- {
283
- "content-length": "250",
284
- "content-type": "application/json",
285
- "test-header": "test-value",
286
- }
287
- `);
288
- });
289
-
290
- it('should pass the model and the prompt', async () => {
291
- prepareJsonResponse({ content: '' });
292
-
293
- await model.doGenerate({
294
- prompt: TEST_PROMPT,
295
- });
296
-
297
- expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
298
- {
299
- "model": "gpt-3.5-turbo-instruct",
300
- "prompt": "user:
301
- Hello
302
-
303
- assistant:
304
- ",
305
- "stop": [
306
- "
307
- user:",
308
- ],
309
- }
310
- `);
311
- });
312
-
313
- it('should pass headers', async () => {
314
- prepareJsonResponse({ content: '' });
315
-
316
- const provider = createOpenAI({
317
- apiKey: 'test-api-key',
318
- organization: 'test-organization',
319
- project: 'test-project',
320
- headers: {
321
- 'Custom-Provider-Header': 'provider-header-value',
322
- },
323
- });
324
-
325
- await provider.completion('gpt-3.5-turbo-instruct').doGenerate({
326
- prompt: TEST_PROMPT,
327
- headers: {
328
- 'Custom-Request-Header': 'request-header-value',
329
- },
330
- });
331
-
332
- expect(server.calls[0].requestHeaders).toStrictEqual({
333
- authorization: 'Bearer test-api-key',
334
- 'content-type': 'application/json',
335
- 'custom-provider-header': 'provider-header-value',
336
- 'custom-request-header': 'request-header-value',
337
- 'openai-organization': 'test-organization',
338
- 'openai-project': 'test-project',
339
- });
340
- });
341
- });
342
-
343
- describe('doStream', () => {
344
- function prepareStreamResponse({
345
- content = [],
346
- finish_reason = 'stop',
347
- usage = {
348
- prompt_tokens: 10,
349
- total_tokens: 372,
350
- completion_tokens: 362,
351
- },
352
- logprobs = null,
353
- headers,
354
- }: {
355
- content?: string[];
356
- usage?: {
357
- prompt_tokens: number;
358
- total_tokens: number;
359
- completion_tokens: number;
360
- };
361
- logprobs?: {
362
- tokens: string[];
363
- token_logprobs: number[];
364
- top_logprobs: Record<string, number>[];
365
- } | null;
366
- finish_reason?: string;
367
- headers?: Record<string, string>;
368
- }) {
369
- server.urls['https://api.openai.com/v1/completions'].response = {
370
- type: 'stream-chunks',
371
- headers,
372
- chunks: [
373
- ...content.map(text => {
374
- return (
375
- `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,` +
376
- `"choices":[{"text":"${text}","index":0,"logprobs":${JSON.stringify(
377
- logprobs,
378
- )},"finish_reason":null}],"model":"gpt-3.5-turbo-instruct"}\n\n`
379
- );
380
- }),
381
- `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` +
382
- `"choices":[{"text":"","index":0,"logprobs":${JSON.stringify(logprobs)},"finish_reason":"${finish_reason}"}],"model":"gpt-3.5-turbo-instruct"}\n\n`,
383
- `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,` +
384
- `"model":"gpt-3.5-turbo-instruct","usage":${JSON.stringify(
385
- usage,
386
- )},"choices":[]}\n\n`,
387
- 'data: [DONE]\n\n',
388
- ],
389
- };
390
- }
391
-
392
- it('should stream text deltas', async () => {
393
- prepareStreamResponse({
394
- content: ['Hello', ', ', 'World!'],
395
- finish_reason: 'stop',
396
- usage: {
397
- prompt_tokens: 10,
398
- total_tokens: 372,
399
- completion_tokens: 362,
400
- },
401
- logprobs: TEST_LOGPROBS,
402
- });
403
-
404
- const { stream } = await model.doStream({
405
- prompt: TEST_PROMPT,
406
- includeRawChunks: false,
407
- });
408
-
409
- expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
410
- [
411
- {
412
- "type": "stream-start",
413
- "warnings": [],
414
- },
415
- {
416
- "id": "cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT",
417
- "modelId": "gpt-3.5-turbo-instruct",
418
- "timestamp": 2024-03-25T10:44:00.000Z,
419
- "type": "response-metadata",
420
- },
421
- {
422
- "id": "0",
423
- "type": "text-start",
424
- },
425
- {
426
- "delta": "Hello",
427
- "id": "0",
428
- "type": "text-delta",
429
- },
430
- {
431
- "delta": ", ",
432
- "id": "0",
433
- "type": "text-delta",
434
- },
435
- {
436
- "delta": "World!",
437
- "id": "0",
438
- "type": "text-delta",
439
- },
440
- {
441
- "id": "0",
442
- "type": "text-end",
443
- },
444
- {
445
- "finishReason": {
446
- "raw": "stop",
447
- "unified": "stop",
448
- },
449
- "providerMetadata": {
450
- "openai": {
451
- "logprobs": {
452
- "token_logprobs": [
453
- -0.0664508,
454
- -0.014520033,
455
- -1.3820221,
456
- -0.7890417,
457
- -0.5323165,
458
- -0.10247037,
459
- ],
460
- "tokens": [
461
- " ever",
462
- " after",
463
- ".
464
-
465
- ",
466
- "The",
467
- " end",
468
- ".",
469
- ],
470
- "top_logprobs": [
471
- {
472
- " ever": -0.0664508,
473
- },
474
- {
475
- " after": -0.014520033,
476
- },
477
- {
478
- ".
479
-
480
- ": -1.3820221,
481
- },
482
- {
483
- "The": -0.7890417,
484
- },
485
- {
486
- " end": -0.5323165,
487
- },
488
- {
489
- ".": -0.10247037,
490
- },
491
- ],
492
- },
493
- },
494
- },
495
- "type": "finish",
496
- "usage": {
497
- "inputTokens": {
498
- "cacheRead": undefined,
499
- "cacheWrite": undefined,
500
- "noCache": 10,
501
- "total": 10,
502
- },
503
- "outputTokens": {
504
- "reasoning": undefined,
505
- "text": 362,
506
- "total": 362,
507
- },
508
- "raw": {
509
- "completion_tokens": 362,
510
- "prompt_tokens": 10,
511
- "total_tokens": 372,
512
- },
513
- },
514
- },
515
- ]
516
- `);
517
- });
518
-
519
- it('should handle error stream parts', async () => {
520
- server.urls['https://api.openai.com/v1/completions'].response = {
521
- type: 'stream-chunks',
522
- chunks: [
523
- `data: {"error":{"message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our ` +
524
- `help center at help.openai.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`,
525
- 'data: [DONE]\n\n',
526
- ],
527
- };
528
-
529
- const { stream } = await model.doStream({
530
- prompt: TEST_PROMPT,
531
- includeRawChunks: false,
532
- });
533
-
534
- expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
535
- [
536
- {
537
- "type": "stream-start",
538
- "warnings": [],
539
- },
540
- {
541
- "error": {
542
- "code": null,
543
- "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our help center at help.openai.com if you keep seeing this error.",
544
- "param": null,
545
- "type": "server_error",
546
- },
547
- "type": "error",
548
- },
549
- {
550
- "finishReason": {
551
- "raw": undefined,
552
- "unified": "error",
553
- },
554
- "providerMetadata": {
555
- "openai": {},
556
- },
557
- "type": "finish",
558
- "usage": {
559
- "inputTokens": {
560
- "cacheRead": undefined,
561
- "cacheWrite": undefined,
562
- "noCache": undefined,
563
- "total": undefined,
564
- },
565
- "outputTokens": {
566
- "reasoning": undefined,
567
- "text": undefined,
568
- "total": undefined,
569
- },
570
- "raw": undefined,
571
- },
572
- },
573
- ]
574
- `);
575
- });
576
-
577
- it.skipIf(isNodeVersion(20))(
578
- 'should handle unparsable stream parts',
579
- async () => {
580
- server.urls['https://api.openai.com/v1/completions'].response = {
581
- type: 'stream-chunks',
582
- chunks: [`data: {unparsable}\n\n`, 'data: [DONE]\n\n'],
583
- };
584
-
585
- const { stream } = await model.doStream({
586
- prompt: TEST_PROMPT,
587
- includeRawChunks: false,
588
- });
589
-
590
- expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
591
- [
592
- {
593
- "type": "stream-start",
594
- "warnings": [],
595
- },
596
- {
597
- "error": [AI_JSONParseError: JSON parsing failed: Text: {unparsable}.
598
- Error message: Expected property name or '}' in JSON at position 1 (line 1 column 2)],
599
- "type": "error",
600
- },
601
- {
602
- "finishReason": {
603
- "raw": undefined,
604
- "unified": "error",
605
- },
606
- "providerMetadata": {
607
- "openai": {},
608
- },
609
- "type": "finish",
610
- "usage": {
611
- "inputTokens": {
612
- "cacheRead": undefined,
613
- "cacheWrite": undefined,
614
- "noCache": undefined,
615
- "total": undefined,
616
- },
617
- "outputTokens": {
618
- "reasoning": undefined,
619
- "text": undefined,
620
- "total": undefined,
621
- },
622
- "raw": undefined,
623
- },
624
- },
625
- ]
626
- `);
627
- },
628
- );
629
-
630
- it('should send request body', async () => {
631
- prepareStreamResponse({ content: [] });
632
-
633
- const { request } = await model.doStream({
634
- prompt: TEST_PROMPT,
635
- includeRawChunks: false,
636
- });
637
-
638
- expect(request).toMatchInlineSnapshot(`
639
- {
640
- "body": {
641
- "echo": undefined,
642
- "frequency_penalty": undefined,
643
- "logit_bias": undefined,
644
- "logprobs": undefined,
645
- "max_tokens": undefined,
646
- "model": "gpt-3.5-turbo-instruct",
647
- "presence_penalty": undefined,
648
- "prompt": "user:
649
- Hello
650
-
651
- assistant:
652
- ",
653
- "seed": undefined,
654
- "stop": [
655
- "
656
- user:",
657
- ],
658
- "stream": true,
659
- "stream_options": {
660
- "include_usage": true,
661
- },
662
- "suffix": undefined,
663
- "temperature": undefined,
664
- "top_p": undefined,
665
- "user": undefined,
666
- },
667
- }
668
- `);
669
- });
670
-
671
- it('should expose the raw response headers', async () => {
672
- prepareStreamResponse({
673
- headers: { 'test-header': 'test-value' },
674
- });
675
-
676
- const { response } = await model.doStream({
677
- prompt: TEST_PROMPT,
678
- includeRawChunks: false,
679
- });
680
-
681
- expect(response?.headers).toStrictEqual({
682
- // default headers:
683
- 'content-type': 'text/event-stream',
684
- 'cache-control': 'no-cache',
685
- connection: 'keep-alive',
686
-
687
- // custom header
688
- 'test-header': 'test-value',
689
- });
690
- });
691
-
692
- it('should pass the model and the prompt', async () => {
693
- prepareStreamResponse({ content: [] });
694
-
695
- await model.doStream({
696
- prompt: TEST_PROMPT,
697
- includeRawChunks: false,
698
- });
699
-
700
- expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
701
- {
702
- "model": "gpt-3.5-turbo-instruct",
703
- "prompt": "user:
704
- Hello
705
-
706
- assistant:
707
- ",
708
- "stop": [
709
- "
710
- user:",
711
- ],
712
- "stream": true,
713
- "stream_options": {
714
- "include_usage": true,
715
- },
716
- }
717
- `);
718
- });
719
-
720
- it('should pass headers', async () => {
721
- prepareStreamResponse({ content: [] });
722
-
723
- const provider = createOpenAI({
724
- apiKey: 'test-api-key',
725
- organization: 'test-organization',
726
- project: 'test-project',
727
- headers: {
728
- 'Custom-Provider-Header': 'provider-header-value',
729
- },
730
- });
731
-
732
- await provider.completion('gpt-3.5-turbo-instruct').doStream({
733
- prompt: TEST_PROMPT,
734
- headers: {
735
- 'Custom-Request-Header': 'request-header-value',
736
- },
737
- includeRawChunks: false,
738
- });
739
-
740
- expect(server.calls[0].requestHeaders).toStrictEqual({
741
- authorization: 'Bearer test-api-key',
742
- 'content-type': 'application/json',
743
- 'custom-provider-header': 'provider-header-value',
744
- 'custom-request-header': 'request-header-value',
745
- 'openai-organization': 'test-organization',
746
- 'openai-project': 'test-project',
747
- });
748
- expect(server.calls[0].requestUserAgent).toContain(
749
- `ai-sdk/openai/0.0.0-test`,
750
- );
751
- });
752
- });