@ai-sdk/openai-compatible 2.0.15 → 2.0.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/CHANGELOG.md +12 -0
  2. package/dist/index.d.mts +5 -0
  3. package/dist/index.d.ts +5 -0
  4. package/dist/index.js +23 -6
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +23 -6
  7. package/dist/index.mjs.map +1 -1
  8. package/package.json +3 -2
  9. package/src/chat/convert-openai-compatible-chat-usage.ts +55 -0
  10. package/src/chat/convert-to-openai-compatible-chat-messages.test.ts +1238 -0
  11. package/src/chat/convert-to-openai-compatible-chat-messages.ts +246 -0
  12. package/src/chat/get-response-metadata.ts +15 -0
  13. package/src/chat/map-openai-compatible-finish-reason.ts +19 -0
  14. package/src/chat/openai-compatible-api-types.ts +86 -0
  15. package/src/chat/openai-compatible-chat-language-model.test.ts +3292 -0
  16. package/src/chat/openai-compatible-chat-language-model.ts +830 -0
  17. package/src/chat/openai-compatible-chat-options.ts +34 -0
  18. package/src/chat/openai-compatible-metadata-extractor.ts +48 -0
  19. package/src/chat/openai-compatible-prepare-tools.test.ts +336 -0
  20. package/src/chat/openai-compatible-prepare-tools.ts +98 -0
  21. package/src/completion/convert-openai-compatible-completion-usage.ts +46 -0
  22. package/src/completion/convert-to-openai-compatible-completion-prompt.ts +93 -0
  23. package/src/completion/get-response-metadata.ts +15 -0
  24. package/src/completion/map-openai-compatible-finish-reason.ts +19 -0
  25. package/src/completion/openai-compatible-completion-language-model.test.ts +773 -0
  26. package/src/completion/openai-compatible-completion-language-model.ts +390 -0
  27. package/src/completion/openai-compatible-completion-options.ts +33 -0
  28. package/src/embedding/openai-compatible-embedding-model.test.ts +171 -0
  29. package/src/embedding/openai-compatible-embedding-model.ts +166 -0
  30. package/src/embedding/openai-compatible-embedding-options.ts +21 -0
  31. package/src/image/openai-compatible-image-model.test.ts +494 -0
  32. package/src/image/openai-compatible-image-model.ts +205 -0
  33. package/src/image/openai-compatible-image-settings.ts +1 -0
  34. package/src/index.ts +27 -0
  35. package/src/internal/index.ts +4 -0
  36. package/src/openai-compatible-error.ts +30 -0
  37. package/src/openai-compatible-provider.test.ts +329 -0
  38. package/src/openai-compatible-provider.ts +189 -0
  39. package/src/version.ts +5 -0
@@ -0,0 +1,773 @@
1
+ import { describe, it, expect } from 'vitest';
2
+ import { LanguageModelV3Prompt } from '@ai-sdk/provider';
3
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
4
+ import {
5
+ convertReadableStreamToArray,
6
+ isNodeVersion,
7
+ } from '@ai-sdk/provider-utils/test';
8
+ import { createOpenAICompatible } from '../openai-compatible-provider';
9
+ import { OpenAICompatibleCompletionLanguageModel } from './openai-compatible-completion-language-model';
10
+
11
+ const TEST_PROMPT: LanguageModelV3Prompt = [
12
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
13
+ ];
14
+
15
+ const provider = createOpenAICompatible({
16
+ baseURL: 'https://my.api.com/v1/',
17
+ name: 'test-provider',
18
+ headers: {
19
+ Authorization: `Bearer test-api-key`,
20
+ },
21
+ });
22
+
23
+ const model = provider.completionModel('gpt-3.5-turbo-instruct');
24
+
25
+ const server = createTestServer({
26
+ 'https://my.api.com/v1/completions': {},
27
+ });
28
+
29
+ describe('config', () => {
30
+ it('should extract base name from provider string', () => {
31
+ const model = new OpenAICompatibleCompletionLanguageModel('gpt-4', {
32
+ provider: 'anthropic.beta',
33
+ url: () => '',
34
+ headers: () => ({}),
35
+ });
36
+
37
+ expect(model['providerOptionsName']).toBe('anthropic');
38
+ });
39
+
40
+ it('should handle provider without dot notation', () => {
41
+ const model = new OpenAICompatibleCompletionLanguageModel('gpt-4', {
42
+ provider: 'openai',
43
+ url: () => '',
44
+ headers: () => ({}),
45
+ });
46
+
47
+ expect(model['providerOptionsName']).toBe('openai');
48
+ });
49
+
50
+ it('should return empty for empty provider', () => {
51
+ const model = new OpenAICompatibleCompletionLanguageModel(
52
+ 'gpt-4',
53
+
54
+ {
55
+ provider: '',
56
+ url: () => '',
57
+ headers: () => ({}),
58
+ },
59
+ );
60
+
61
+ expect(model['providerOptionsName']).toBe('');
62
+ });
63
+ });
64
+
65
+ describe('doGenerate', () => {
66
+ function prepareJsonResponse({
67
+ content = '',
68
+ usage = {
69
+ prompt_tokens: 4,
70
+ total_tokens: 34,
71
+ completion_tokens: 30,
72
+ },
73
+ finish_reason = 'stop',
74
+ id = 'cmpl-96cAM1v77r4jXa4qb2NSmRREV5oWB',
75
+ created = 1711363706,
76
+ model = 'gpt-3.5-turbo-instruct',
77
+ headers,
78
+ }: {
79
+ content?: string;
80
+ usage?: {
81
+ prompt_tokens: number;
82
+ total_tokens: number;
83
+ completion_tokens: number;
84
+ };
85
+ finish_reason?: string;
86
+ id?: string;
87
+ created?: number;
88
+ model?: string;
89
+ headers?: Record<string, string>;
90
+ }) {
91
+ server.urls['https://my.api.com/v1/completions'].response = {
92
+ type: 'json-value',
93
+ headers,
94
+ body: {
95
+ id,
96
+ object: 'text_completion',
97
+ created,
98
+ model,
99
+ choices: [
100
+ {
101
+ text: content,
102
+ index: 0,
103
+ finish_reason,
104
+ },
105
+ ],
106
+ usage,
107
+ },
108
+ };
109
+ }
110
+
111
+ it('should extract text response', async () => {
112
+ prepareJsonResponse({ content: 'Hello, World!' });
113
+
114
+ const { content } = await model.doGenerate({
115
+ prompt: TEST_PROMPT,
116
+ });
117
+
118
+ expect(content).toMatchInlineSnapshot(`
119
+ [
120
+ {
121
+ "text": "Hello, World!",
122
+ "type": "text",
123
+ },
124
+ ]
125
+ `);
126
+ });
127
+
128
+ it('should extract usage', async () => {
129
+ prepareJsonResponse({
130
+ usage: { prompt_tokens: 20, total_tokens: 25, completion_tokens: 5 },
131
+ });
132
+
133
+ const { usage } = await model.doGenerate({
134
+ prompt: TEST_PROMPT,
135
+ });
136
+
137
+ expect(usage).toMatchInlineSnapshot(`
138
+ {
139
+ "inputTokens": {
140
+ "cacheRead": undefined,
141
+ "cacheWrite": undefined,
142
+ "noCache": 20,
143
+ "total": 20,
144
+ },
145
+ "outputTokens": {
146
+ "reasoning": undefined,
147
+ "text": 5,
148
+ "total": 5,
149
+ },
150
+ "raw": {
151
+ "completion_tokens": 5,
152
+ "prompt_tokens": 20,
153
+ "total_tokens": 25,
154
+ },
155
+ }
156
+ `);
157
+ });
158
+ it('should send request body', async () => {
159
+ prepareJsonResponse({});
160
+
161
+ const { request } = await model.doGenerate({
162
+ prompt: TEST_PROMPT,
163
+ });
164
+
165
+ expect(request).toMatchInlineSnapshot(`
166
+ {
167
+ "body": {
168
+ "echo": undefined,
169
+ "frequency_penalty": undefined,
170
+ "logit_bias": undefined,
171
+ "max_tokens": undefined,
172
+ "model": "gpt-3.5-turbo-instruct",
173
+ "presence_penalty": undefined,
174
+ "prompt": "user:
175
+ Hello
176
+
177
+ assistant:
178
+ ",
179
+ "seed": undefined,
180
+ "stop": [
181
+ "
182
+ user:",
183
+ ],
184
+ "suffix": undefined,
185
+ "temperature": undefined,
186
+ "top_p": undefined,
187
+ "user": undefined,
188
+ },
189
+ }
190
+ `);
191
+ });
192
+
193
+ it('should send additional response information', async () => {
194
+ prepareJsonResponse({
195
+ id: 'test-id',
196
+ created: 123,
197
+ model: 'test-model',
198
+ });
199
+
200
+ const { response } = await model.doGenerate({
201
+ prompt: TEST_PROMPT,
202
+ });
203
+
204
+ expect(response).toMatchInlineSnapshot(`
205
+ {
206
+ "body": {
207
+ "choices": [
208
+ {
209
+ "finish_reason": "stop",
210
+ "index": 0,
211
+ "text": "",
212
+ },
213
+ ],
214
+ "created": 123,
215
+ "id": "test-id",
216
+ "model": "test-model",
217
+ "object": "text_completion",
218
+ "usage": {
219
+ "completion_tokens": 30,
220
+ "prompt_tokens": 4,
221
+ "total_tokens": 34,
222
+ },
223
+ },
224
+ "headers": {
225
+ "content-length": "204",
226
+ "content-type": "application/json",
227
+ },
228
+ "id": "test-id",
229
+ "modelId": "test-model",
230
+ "timestamp": 1970-01-01T00:02:03.000Z,
231
+ }
232
+ `);
233
+ });
234
+
235
+ it('should extract finish reason', async () => {
236
+ prepareJsonResponse({
237
+ finish_reason: 'stop',
238
+ });
239
+
240
+ const { finishReason } = await provider
241
+ .completionModel('gpt-3.5-turbo-instruct')
242
+ .doGenerate({
243
+ prompt: TEST_PROMPT,
244
+ });
245
+
246
+ expect(finishReason).toMatchInlineSnapshot(`
247
+ {
248
+ "raw": "stop",
249
+ "unified": "stop",
250
+ }
251
+ `);
252
+ });
253
+
254
+ it('should support unknown finish reason', async () => {
255
+ prepareJsonResponse({
256
+ finish_reason: 'eos',
257
+ });
258
+
259
+ const { finishReason } = await provider
260
+ .completionModel('gpt-3.5-turbo-instruct')
261
+ .doGenerate({
262
+ prompt: TEST_PROMPT,
263
+ });
264
+
265
+ expect(finishReason).toMatchInlineSnapshot(`
266
+ {
267
+ "raw": "eos",
268
+ "unified": "other",
269
+ }
270
+ `);
271
+ });
272
+
273
+ it('should expose the raw response headers', async () => {
274
+ prepareJsonResponse({
275
+ headers: { 'test-header': 'test-value' },
276
+ });
277
+
278
+ const { response } = await model.doGenerate({
279
+ prompt: TEST_PROMPT,
280
+ });
281
+
282
+ expect(response?.headers).toStrictEqual({
283
+ // default headers:
284
+ 'content-length': '250',
285
+ 'content-type': 'application/json',
286
+
287
+ // custom header
288
+ 'test-header': 'test-value',
289
+ });
290
+ });
291
+
292
+ it('should pass the model and the prompt', async () => {
293
+ prepareJsonResponse({ content: '' });
294
+
295
+ await model.doGenerate({
296
+ prompt: TEST_PROMPT,
297
+ });
298
+
299
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
300
+ {
301
+ "model": "gpt-3.5-turbo-instruct",
302
+ "prompt": "user:
303
+ Hello
304
+
305
+ assistant:
306
+ ",
307
+ "stop": [
308
+ "
309
+ user:",
310
+ ],
311
+ }
312
+ `);
313
+ });
314
+
315
+ it('should pass headers', async () => {
316
+ prepareJsonResponse({ content: '' });
317
+
318
+ const provider = createOpenAICompatible({
319
+ baseURL: 'https://my.api.com/v1/',
320
+ name: 'test-provider',
321
+ headers: {
322
+ Authorization: `Bearer test-api-key`,
323
+ 'Custom-Provider-Header': 'provider-header-value',
324
+ },
325
+ });
326
+
327
+ await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({
328
+ prompt: TEST_PROMPT,
329
+ headers: {
330
+ 'Custom-Request-Header': 'request-header-value',
331
+ },
332
+ });
333
+
334
+ expect(server.calls[0].requestHeaders).toStrictEqual({
335
+ authorization: 'Bearer test-api-key',
336
+ 'content-type': 'application/json',
337
+ 'custom-provider-header': 'provider-header-value',
338
+ 'custom-request-header': 'request-header-value',
339
+ });
340
+ });
341
+
342
+ it('should include provider-specific options', async () => {
343
+ prepareJsonResponse({ content: '' });
344
+
345
+ await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({
346
+ prompt: TEST_PROMPT,
347
+ providerOptions: {
348
+ 'test-provider': {
349
+ someCustomOption: 'test-value',
350
+ },
351
+ },
352
+ });
353
+
354
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
355
+ {
356
+ "model": "gpt-3.5-turbo-instruct",
357
+ "prompt": "user:
358
+ Hello
359
+
360
+ assistant:
361
+ ",
362
+ "someCustomOption": "test-value",
363
+ "stop": [
364
+ "
365
+ user:",
366
+ ],
367
+ }
368
+ `);
369
+ });
370
+
371
+ it('should not include provider-specific options for different provider', async () => {
372
+ prepareJsonResponse({ content: '' });
373
+
374
+ await provider.completionModel('gpt-3.5-turbo-instruct').doGenerate({
375
+ prompt: TEST_PROMPT,
376
+ providerOptions: {
377
+ notThisProviderName: {
378
+ someCustomOption: 'test-value',
379
+ },
380
+ },
381
+ });
382
+
383
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
384
+ {
385
+ "model": "gpt-3.5-turbo-instruct",
386
+ "prompt": "user:
387
+ Hello
388
+
389
+ assistant:
390
+ ",
391
+ "stop": [
392
+ "
393
+ user:",
394
+ ],
395
+ }
396
+ `);
397
+ });
398
+ });
399
+
400
+ describe('doStream', () => {
401
+ function prepareEmptyStreamResponse(headers?: Record<string, string>) {
402
+ server.urls['https://my.api.com/v1/completions'].response = {
403
+ type: 'stream-chunks',
404
+ headers,
405
+ chunks: [
406
+ `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"gpt-3.5-turbo-instruct","choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"stop"}]}\n\n`,
407
+ `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"gpt-3.5-turbo-instruct","usage":{"prompt_tokens":10,"completion_tokens":0,"total_tokens":10},"choices":[]}\n\n`,
408
+ 'data: [DONE]\n\n',
409
+ ],
410
+ };
411
+ }
412
+
413
+ it('should stream text deltas', async () => {
414
+ server.urls['https://my.api.com/v1/completions'].response = {
415
+ type: 'stream-chunks',
416
+ chunks: [
417
+ `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"model":"gpt-3.5-turbo-instruct","choices":[{"text":"Hello","index":0,"logprobs":null,"finish_reason":null}]}\n\n`,
418
+ `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"model":"gpt-3.5-turbo-instruct","choices":[{"text":",","index":0,"logprobs":null,"finish_reason":null}]}\n\n`,
419
+ `data: {"id":"cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT","object":"text_completion","created":1711363440,"model":"gpt-3.5-turbo-instruct","choices":[{"text":" World!","index":0,"logprobs":null,"finish_reason":null}]}\n\n`,
420
+ `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"gpt-3.5-turbo-instruct","choices":[{"text":"","index":0,"logprobs":null,"finish_reason":"stop"}]}\n\n`,
421
+ `data: {"id":"cmpl-96c3yLQE1TtZCd6n6OILVmzev8M8H","object":"text_completion","created":1711363310,"model":"gpt-3.5-turbo-instruct","usage":{"prompt_tokens":10,"completion_tokens":362,"total_tokens":372},"choices":[]}\n\n`,
422
+ 'data: [DONE]\n\n',
423
+ ],
424
+ };
425
+
426
+ const { stream } = await model.doStream({
427
+ prompt: TEST_PROMPT,
428
+ includeRawChunks: false,
429
+ });
430
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
431
+ [
432
+ {
433
+ "type": "stream-start",
434
+ "warnings": [],
435
+ },
436
+ {
437
+ "id": "cmpl-96c64EdfhOw8pjFFgVpLuT8k2MtdT",
438
+ "modelId": "gpt-3.5-turbo-instruct",
439
+ "timestamp": 2024-03-25T10:44:00.000Z,
440
+ "type": "response-metadata",
441
+ },
442
+ {
443
+ "id": "0",
444
+ "type": "text-start",
445
+ },
446
+ {
447
+ "delta": "Hello",
448
+ "id": "0",
449
+ "type": "text-delta",
450
+ },
451
+ {
452
+ "delta": ",",
453
+ "id": "0",
454
+ "type": "text-delta",
455
+ },
456
+ {
457
+ "delta": " World!",
458
+ "id": "0",
459
+ "type": "text-delta",
460
+ },
461
+ {
462
+ "delta": "",
463
+ "id": "0",
464
+ "type": "text-delta",
465
+ },
466
+ {
467
+ "id": "0",
468
+ "type": "text-end",
469
+ },
470
+ {
471
+ "finishReason": {
472
+ "raw": "stop",
473
+ "unified": "stop",
474
+ },
475
+ "type": "finish",
476
+ "usage": {
477
+ "inputTokens": {
478
+ "cacheRead": undefined,
479
+ "cacheWrite": undefined,
480
+ "noCache": 10,
481
+ "total": 10,
482
+ },
483
+ "outputTokens": {
484
+ "reasoning": undefined,
485
+ "text": 362,
486
+ "total": 362,
487
+ },
488
+ "raw": {
489
+ "completion_tokens": 362,
490
+ "prompt_tokens": 10,
491
+ "total_tokens": 372,
492
+ },
493
+ },
494
+ },
495
+ ]
496
+ `);
497
+ });
498
+
499
+ it('should handle error stream parts', async () => {
500
+ server.urls['https://my.api.com/v1/completions'].response = {
501
+ type: 'stream-chunks',
502
+ chunks: [
503
+ `data: {"error":{"message":"The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our help center at help.openai.com if you keep seeing this error.","type":"server_error","param":null,"code":null}}\n\n`,
504
+ 'data: [DONE]\n\n',
505
+ ],
506
+ };
507
+
508
+ const { stream } = await model.doStream({
509
+ prompt: TEST_PROMPT,
510
+ includeRawChunks: false,
511
+ });
512
+
513
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
514
+ [
515
+ {
516
+ "type": "stream-start",
517
+ "warnings": [],
518
+ },
519
+ {
520
+ "error": {
521
+ "code": null,
522
+ "message": "The server had an error processing your request. Sorry about that! You can retry your request, or contact us through our help center at help.openai.com if you keep seeing this error.",
523
+ "param": null,
524
+ "type": "server_error",
525
+ },
526
+ "type": "error",
527
+ },
528
+ {
529
+ "finishReason": {
530
+ "raw": undefined,
531
+ "unified": "error",
532
+ },
533
+ "type": "finish",
534
+ "usage": {
535
+ "inputTokens": {
536
+ "cacheRead": undefined,
537
+ "cacheWrite": undefined,
538
+ "noCache": undefined,
539
+ "total": undefined,
540
+ },
541
+ "outputTokens": {
542
+ "reasoning": undefined,
543
+ "text": undefined,
544
+ "total": undefined,
545
+ },
546
+ "raw": undefined,
547
+ },
548
+ },
549
+ ]
550
+ `);
551
+ });
552
+
553
+ it.skipIf(isNodeVersion(20))(
554
+ 'should handle unparsable stream parts',
555
+ async () => {
556
+ server.urls['https://my.api.com/v1/completions'].response = {
557
+ type: 'stream-chunks',
558
+ chunks: [`data: {unparsable}\n\n`, 'data: [DONE]\n\n'],
559
+ };
560
+
561
+ const { stream } = await model.doStream({
562
+ prompt: TEST_PROMPT,
563
+ includeRawChunks: false,
564
+ });
565
+
566
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
567
+ [
568
+ {
569
+ "type": "stream-start",
570
+ "warnings": [],
571
+ },
572
+ {
573
+ "error": [AI_JSONParseError: JSON parsing failed: Text: {unparsable}.
574
+ Error message: Expected property name or '}' in JSON at position 1 (line 1 column 2)],
575
+ "type": "error",
576
+ },
577
+ {
578
+ "finishReason": {
579
+ "raw": undefined,
580
+ "unified": "error",
581
+ },
582
+ "type": "finish",
583
+ "usage": {
584
+ "inputTokens": {
585
+ "cacheRead": undefined,
586
+ "cacheWrite": undefined,
587
+ "noCache": undefined,
588
+ "total": undefined,
589
+ },
590
+ "outputTokens": {
591
+ "reasoning": undefined,
592
+ "text": undefined,
593
+ "total": undefined,
594
+ },
595
+ "raw": undefined,
596
+ },
597
+ },
598
+ ]
599
+ `);
600
+ },
601
+ );
602
+
603
+ it('should send request body', async () => {
604
+ prepareEmptyStreamResponse();
605
+
606
+ const { request } = await model.doStream({
607
+ prompt: TEST_PROMPT,
608
+ includeRawChunks: false,
609
+ });
610
+
611
+ expect(request).toMatchInlineSnapshot(`
612
+ {
613
+ "body": {
614
+ "echo": undefined,
615
+ "frequency_penalty": undefined,
616
+ "logit_bias": undefined,
617
+ "max_tokens": undefined,
618
+ "model": "gpt-3.5-turbo-instruct",
619
+ "presence_penalty": undefined,
620
+ "prompt": "user:
621
+ Hello
622
+
623
+ assistant:
624
+ ",
625
+ "seed": undefined,
626
+ "stop": [
627
+ "
628
+ user:",
629
+ ],
630
+ "stream": true,
631
+ "stream_options": undefined,
632
+ "suffix": undefined,
633
+ "temperature": undefined,
634
+ "top_p": undefined,
635
+ "user": undefined,
636
+ },
637
+ }
638
+ `);
639
+ });
640
+
641
+ it('should expose the raw response headers', async () => {
642
+ prepareEmptyStreamResponse({ 'test-header': 'test-value' });
643
+
644
+ const { response } = await model.doStream({
645
+ prompt: TEST_PROMPT,
646
+ includeRawChunks: false,
647
+ });
648
+
649
+ expect(response?.headers).toStrictEqual({
650
+ // default headers:
651
+ 'content-type': 'text/event-stream',
652
+ 'cache-control': 'no-cache',
653
+ connection: 'keep-alive',
654
+
655
+ // custom header
656
+ 'test-header': 'test-value',
657
+ });
658
+ });
659
+
660
+ it('should pass the model and the prompt', async () => {
661
+ prepareEmptyStreamResponse();
662
+
663
+ await model.doStream({
664
+ prompt: TEST_PROMPT,
665
+ includeRawChunks: false,
666
+ });
667
+
668
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
669
+ {
670
+ "model": "gpt-3.5-turbo-instruct",
671
+ "prompt": "user:
672
+ Hello
673
+
674
+ assistant:
675
+ ",
676
+ "stop": [
677
+ "
678
+ user:",
679
+ ],
680
+ "stream": true,
681
+ }
682
+ `);
683
+ });
684
+
685
+ it('should pass headers', async () => {
686
+ prepareEmptyStreamResponse();
687
+
688
+ const provider = createOpenAICompatible({
689
+ baseURL: 'https://my.api.com/v1/',
690
+ name: 'test-provider',
691
+ headers: {
692
+ Authorization: `Bearer test-api-key`,
693
+ 'Custom-Provider-Header': 'provider-header-value',
694
+ },
695
+ });
696
+
697
+ await provider.completionModel('gpt-3.5-turbo-instruct').doStream({
698
+ prompt: TEST_PROMPT,
699
+ includeRawChunks: false,
700
+ headers: {
701
+ 'Custom-Request-Header': 'request-header-value',
702
+ },
703
+ });
704
+
705
+ expect(server.calls[0].requestHeaders).toStrictEqual({
706
+ authorization: 'Bearer test-api-key',
707
+ 'content-type': 'application/json',
708
+ 'custom-provider-header': 'provider-header-value',
709
+ 'custom-request-header': 'request-header-value',
710
+ });
711
+ });
712
+
713
+ it('should include provider-specific options', async () => {
714
+ prepareEmptyStreamResponse();
715
+
716
+ await model.doStream({
717
+ providerOptions: {
718
+ 'test-provider': {
719
+ someCustomOption: 'test-value',
720
+ },
721
+ },
722
+ prompt: TEST_PROMPT,
723
+ includeRawChunks: false,
724
+ });
725
+
726
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
727
+ {
728
+ "model": "gpt-3.5-turbo-instruct",
729
+ "prompt": "user:
730
+ Hello
731
+
732
+ assistant:
733
+ ",
734
+ "someCustomOption": "test-value",
735
+ "stop": [
736
+ "
737
+ user:",
738
+ ],
739
+ "stream": true,
740
+ }
741
+ `);
742
+ });
743
+
744
+ it('should not include provider-specific options for different provider', async () => {
745
+ prepareEmptyStreamResponse();
746
+
747
+ await model.doStream({
748
+ providerOptions: {
749
+ notThisProviderName: {
750
+ someCustomOption: 'test-value',
751
+ },
752
+ },
753
+ prompt: TEST_PROMPT,
754
+ includeRawChunks: false,
755
+ });
756
+
757
+ expect(await server.calls[0].requestBodyJson).toMatchInlineSnapshot(`
758
+ {
759
+ "model": "gpt-3.5-turbo-instruct",
760
+ "prompt": "user:
761
+ Hello
762
+
763
+ assistant:
764
+ ",
765
+ "stop": [
766
+ "
767
+ user:",
768
+ ],
769
+ "stream": true,
770
+ }
771
+ `);
772
+ });
773
+ });