@ai-sdk/gateway 0.0.0-64aae7dd-20260114144918 → 0.0.0-98261322-20260122142521

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/CHANGELOG.md +49 -4
  2. package/dist/index.d.mts +20 -10
  3. package/dist/index.d.ts +20 -10
  4. package/dist/index.js +62 -25
  5. package/dist/index.js.map +1 -1
  6. package/dist/index.mjs +62 -25
  7. package/dist/index.mjs.map +1 -1
  8. package/docs/00-ai-gateway.mdx +625 -0
  9. package/package.json +12 -5
  10. package/src/errors/as-gateway-error.ts +33 -0
  11. package/src/errors/create-gateway-error.test.ts +590 -0
  12. package/src/errors/create-gateway-error.ts +132 -0
  13. package/src/errors/extract-api-call-response.test.ts +270 -0
  14. package/src/errors/extract-api-call-response.ts +15 -0
  15. package/src/errors/gateway-authentication-error.ts +84 -0
  16. package/src/errors/gateway-error-types.test.ts +278 -0
  17. package/src/errors/gateway-error.ts +47 -0
  18. package/src/errors/gateway-internal-server-error.ts +33 -0
  19. package/src/errors/gateway-invalid-request-error.ts +33 -0
  20. package/src/errors/gateway-model-not-found-error.ts +47 -0
  21. package/src/errors/gateway-rate-limit-error.ts +33 -0
  22. package/src/errors/gateway-response-error.ts +42 -0
  23. package/src/errors/index.ts +16 -0
  24. package/src/errors/parse-auth-method.test.ts +136 -0
  25. package/src/errors/parse-auth-method.ts +23 -0
  26. package/src/gateway-config.ts +7 -0
  27. package/src/gateway-embedding-model-settings.ts +22 -0
  28. package/src/gateway-embedding-model.test.ts +213 -0
  29. package/src/gateway-embedding-model.ts +109 -0
  30. package/src/gateway-fetch-metadata.test.ts +774 -0
  31. package/src/gateway-fetch-metadata.ts +127 -0
  32. package/src/gateway-image-model-settings.ts +12 -0
  33. package/src/gateway-image-model.test.ts +823 -0
  34. package/src/gateway-image-model.ts +145 -0
  35. package/src/gateway-language-model-settings.ts +159 -0
  36. package/src/gateway-language-model.test.ts +1485 -0
  37. package/src/gateway-language-model.ts +212 -0
  38. package/src/gateway-model-entry.ts +58 -0
  39. package/src/gateway-provider-options.ts +66 -0
  40. package/src/gateway-provider.test.ts +1210 -0
  41. package/src/gateway-provider.ts +284 -0
  42. package/src/gateway-tools.ts +15 -0
  43. package/src/index.ts +27 -0
  44. package/src/tool/perplexity-search.ts +294 -0
  45. package/src/vercel-environment.test.ts +65 -0
  46. package/src/vercel-environment.ts +6 -0
  47. package/src/version.ts +6 -0
@@ -0,0 +1,1485 @@
1
+ import type {
2
+ LanguageModelV3Prompt,
3
+ LanguageModelV3FilePart,
4
+ } from '@ai-sdk/provider';
5
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
6
+ import { convertReadableStreamToArray } from '@ai-sdk/provider-utils/test';
7
+ import { GatewayLanguageModel } from './gateway-language-model';
8
+ import type { GatewayConfig } from './gateway-config';
9
+ import {
10
+ GatewayAuthenticationError,
11
+ GatewayRateLimitError,
12
+ GatewayInternalServerError,
13
+ GatewayInvalidRequestError,
14
+ GatewayModelNotFoundError,
15
+ GatewayResponseError,
16
+ } from './errors';
17
+ import { describe, it, expect, vi } from 'vitest';
18
+
19
+ const TEST_PROMPT: LanguageModelV3Prompt = [
20
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
21
+ ];
22
+
23
+ const createTestModel = (
24
+ config: Partial<
25
+ GatewayConfig & { o11yHeaders?: Record<string, string> }
26
+ > = {},
27
+ ) => {
28
+ return new GatewayLanguageModel('test-model', {
29
+ provider: 'test-provider',
30
+ baseURL: 'https://api.test.com',
31
+ headers: () => ({
32
+ Authorization: 'Bearer test-token',
33
+ 'ai-gateway-auth-method': 'api-key',
34
+ }),
35
+ fetch: globalThis.fetch,
36
+ o11yHeaders: config.o11yHeaders || {},
37
+ ...config,
38
+ });
39
+ };
40
+
41
+ describe('GatewayLanguageModel', () => {
42
+ const server = createTestServer({
43
+ 'https://api.test.com/language-model': {},
44
+ });
45
+
46
+ describe('constructor', () => {
47
+ it('should set basic properties', () => {
48
+ const model = createTestModel();
49
+ expect(model.modelId).toBe('test-model');
50
+ expect(model.provider).toBe('test-provider');
51
+ expect(model.specificationVersion).toBe('v3');
52
+ });
53
+ });
54
+
55
+ describe('doGenerate', () => {
56
+ function prepareJsonResponse({
57
+ content = { type: 'text', text: '' },
58
+ usage = {
59
+ prompt_tokens: 4,
60
+ completion_tokens: 30,
61
+ },
62
+ finish_reason = 'stop',
63
+ id = 'test-id',
64
+ created = 1711115037,
65
+ model = 'test-model',
66
+ } = {}) {
67
+ server.urls['https://api.test.com/language-model'].response = {
68
+ type: 'json-value',
69
+ body: {
70
+ id,
71
+ created,
72
+ model,
73
+ content,
74
+ finish_reason,
75
+ usage,
76
+ },
77
+ };
78
+ }
79
+
80
+ it('should pass headers correctly', async () => {
81
+ prepareJsonResponse({ content: { type: 'text', text: 'Hello, World!' } });
82
+
83
+ await createTestModel().doGenerate({
84
+ prompt: TEST_PROMPT,
85
+ headers: {
86
+ 'Custom-Header': 'test-value',
87
+ },
88
+ });
89
+
90
+ const headers = server.calls[0].requestHeaders;
91
+ expect(headers).toMatchObject({
92
+ authorization: 'Bearer test-token',
93
+ 'custom-header': 'test-value',
94
+ 'ai-language-model-specification-version': '3',
95
+ 'ai-language-model-id': 'test-model',
96
+ 'ai-language-model-streaming': 'false',
97
+ });
98
+ });
99
+
100
+ it('should extract text response', async () => {
101
+ prepareJsonResponse({ content: { type: 'text', text: 'Hello, World!' } });
102
+
103
+ const { content } = await createTestModel().doGenerate({
104
+ prompt: TEST_PROMPT,
105
+ });
106
+
107
+ expect(content).toEqual({ type: 'text', text: 'Hello, World!' });
108
+ });
109
+
110
+ it('should extract usage information', async () => {
111
+ prepareJsonResponse({
112
+ content: { type: 'text', text: 'Test' },
113
+ usage: {
114
+ prompt_tokens: 10,
115
+ completion_tokens: 20,
116
+ },
117
+ });
118
+
119
+ const { usage } = await createTestModel().doGenerate({
120
+ prompt: TEST_PROMPT,
121
+ });
122
+
123
+ expect(usage).toEqual({
124
+ prompt_tokens: 10,
125
+ completion_tokens: 20,
126
+ });
127
+ });
128
+
129
+ it('should remove abortSignal from the request body', async () => {
130
+ prepareJsonResponse({ content: { type: 'text', text: 'Test response' } });
131
+
132
+ const controller = new AbortController();
133
+ const signal = controller.signal;
134
+
135
+ await createTestModel().doGenerate({
136
+ prompt: TEST_PROMPT,
137
+ abortSignal: signal,
138
+ });
139
+
140
+ const requestBody = await server.calls[0].requestBodyJson;
141
+ expect(requestBody).not.toHaveProperty('abortSignal');
142
+ });
143
+
144
+ it('should pass abortSignal to fetch when provided', async () => {
145
+ prepareJsonResponse({ content: { type: 'text', text: 'Test response' } });
146
+
147
+ const mockFetch = vi.fn().mockImplementation(globalThis.fetch);
148
+
149
+ const controller = new AbortController();
150
+ const signal = controller.signal;
151
+
152
+ await createTestModel({
153
+ fetch: mockFetch,
154
+ }).doGenerate({
155
+ prompt: TEST_PROMPT,
156
+ abortSignal: signal,
157
+ });
158
+
159
+ expect(mockFetch).toHaveBeenCalled();
160
+ const fetchCallArgs = mockFetch.mock.calls[0];
161
+ expect(fetchCallArgs[1].signal).toBe(signal);
162
+ });
163
+
164
+ it('should not pass abortSignal to fetch when not provided', async () => {
165
+ prepareJsonResponse({ content: { type: 'text', text: 'Test response' } });
166
+
167
+ const mockFetch = vi.fn().mockImplementation(globalThis.fetch);
168
+
169
+ await createTestModel({
170
+ fetch: mockFetch,
171
+ }).doGenerate({
172
+ prompt: TEST_PROMPT,
173
+ });
174
+
175
+ expect(mockFetch).toHaveBeenCalled();
176
+ const fetchCallArgs = mockFetch.mock.calls[0];
177
+ expect(fetchCallArgs[1].signal).toBeUndefined();
178
+ });
179
+
180
+ it('should include o11y headers in the request', async () => {
181
+ prepareJsonResponse({ content: { type: 'text', text: 'Hello, World!' } });
182
+
183
+ const o11yHeaders = {
184
+ 'ai-o11y-deployment-id': 'test-deployment',
185
+ 'ai-o11y-environment': 'production',
186
+ 'ai-o11y-region': 'iad1',
187
+ };
188
+
189
+ await createTestModel({ o11yHeaders }).doGenerate({
190
+ prompt: TEST_PROMPT,
191
+ });
192
+
193
+ const headers = server.calls[0].requestHeaders;
194
+ expect(headers).toMatchObject(o11yHeaders);
195
+ });
196
+ it('should convert API call errors to Gateway errors', async () => {
197
+ server.urls['https://api.test.com/language-model'].response = {
198
+ type: 'error',
199
+ status: 401,
200
+ body: JSON.stringify({
201
+ error: {
202
+ message: 'Invalid API key provided',
203
+ type: 'authentication_error',
204
+ },
205
+ }),
206
+ };
207
+
208
+ const model = createTestModel();
209
+
210
+ try {
211
+ await model.doGenerate({ prompt: TEST_PROMPT });
212
+ expect.fail('Should have thrown an error');
213
+ } catch (error) {
214
+ expect(GatewayAuthenticationError.isInstance(error)).toBe(true);
215
+ const authError = error as GatewayAuthenticationError;
216
+ expect(authError.message).toContain('Invalid API key');
217
+ expect(authError.message).toContain('vercel.com/d?to=');
218
+ expect(authError.statusCode).toBe(401);
219
+ expect(authError.type).toBe('authentication_error');
220
+ }
221
+ });
222
+
223
+ it('should handle malformed error responses', async () => {
224
+ server.urls['https://api.test.com/language-model'].response = {
225
+ type: 'error',
226
+ status: 500,
227
+ body: 'Not JSON',
228
+ };
229
+
230
+ const model = createTestModel();
231
+
232
+ try {
233
+ await model.doGenerate({ prompt: TEST_PROMPT });
234
+ expect.fail('Should have thrown an error');
235
+ } catch (error) {
236
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
237
+ const responseError = error as GatewayResponseError;
238
+ expect(responseError.statusCode).toBe(500);
239
+ expect(responseError.type).toBe('response_error');
240
+ }
241
+ });
242
+
243
+ it('should handle rate limit errors', async () => {
244
+ server.urls['https://api.test.com/language-model'].response = {
245
+ type: 'error',
246
+ status: 429,
247
+ body: JSON.stringify({
248
+ error: {
249
+ message: 'Rate limit exceeded. Try again later.',
250
+ type: 'rate_limit_exceeded',
251
+ },
252
+ }),
253
+ };
254
+
255
+ const model = createTestModel();
256
+
257
+ try {
258
+ await model.doGenerate({ prompt: TEST_PROMPT });
259
+ expect.fail('Should have thrown an error');
260
+ } catch (error) {
261
+ expect(GatewayRateLimitError.isInstance(error)).toBe(true);
262
+ const rateLimitError = error as GatewayRateLimitError;
263
+ expect(rateLimitError.message).toBe(
264
+ 'Rate limit exceeded. Try again later.',
265
+ );
266
+ expect(rateLimitError.statusCode).toBe(429);
267
+ expect(rateLimitError.type).toBe('rate_limit_exceeded');
268
+ }
269
+ });
270
+
271
+ it('should handle invalid request errors', async () => {
272
+ server.urls['https://api.test.com/language-model'].response = {
273
+ type: 'error',
274
+ status: 400,
275
+ body: JSON.stringify({
276
+ error: {
277
+ message: 'Invalid prompt format',
278
+ type: 'invalid_request_error',
279
+ },
280
+ }),
281
+ };
282
+
283
+ const model = createTestModel();
284
+
285
+ try {
286
+ await model.doGenerate({ prompt: TEST_PROMPT });
287
+ expect.fail('Should have thrown an error');
288
+ } catch (error) {
289
+ expect(GatewayInvalidRequestError.isInstance(error)).toBe(true);
290
+ const invalidError = error as GatewayInvalidRequestError;
291
+ expect(invalidError.message).toBe('Invalid prompt format');
292
+ expect(invalidError.statusCode).toBe(400);
293
+ expect(invalidError.type).toBe('invalid_request_error');
294
+ }
295
+ });
296
+
297
+ describe('Image part encoding', () => {
298
+ it('should not modify prompt without image parts', async () => {
299
+ prepareJsonResponse({ content: { type: 'text', text: 'response' } });
300
+
301
+ await createTestModel().doGenerate({
302
+ prompt: TEST_PROMPT,
303
+ });
304
+
305
+ const requestBody = await server.calls[0].requestBodyJson;
306
+ expect(requestBody.prompt).toEqual(TEST_PROMPT);
307
+ });
308
+
309
+ it('should encode Uint8Array image part to base64 data URL with default mime type', async () => {
310
+ prepareJsonResponse({ content: { type: 'text', text: 'response' } });
311
+ const imageBytes = new Uint8Array([1, 2, 3, 4]);
312
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
313
+ const imagePrompt: LanguageModelV3Prompt = [
314
+ {
315
+ role: 'user',
316
+ content: [
317
+ { type: 'text', text: 'Describe this image:' },
318
+ { type: 'file', data: imageBytes, mediaType: 'image/jpeg' },
319
+ ],
320
+ },
321
+ ];
322
+
323
+ await createTestModel().doGenerate({
324
+ prompt: imagePrompt,
325
+ });
326
+
327
+ const requestBody = await server.calls[0].requestBodyJson;
328
+ const imagePart = requestBody.prompt[0]
329
+ .content[1] as LanguageModelV3FilePart;
330
+
331
+ expect(imagePart.type).toBe('file');
332
+ expect(imagePart.data).toBe(`data:image/jpeg;base64,${expectedBase64}`);
333
+ expect(imagePart.mediaType).toBe('image/jpeg');
334
+ });
335
+
336
+ it('should encode Uint8Array image part to base64 data URL with specified mime type', async () => {
337
+ prepareJsonResponse({ content: { type: 'text', text: 'response' } });
338
+ const imageBytes = new Uint8Array([5, 6, 7, 8]);
339
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
340
+ const mimeType = 'image/png';
341
+ const imagePrompt: LanguageModelV3Prompt = [
342
+ {
343
+ role: 'user',
344
+ content: [{ type: 'file', data: imageBytes, mediaType: mimeType }],
345
+ },
346
+ ];
347
+
348
+ await createTestModel().doGenerate({
349
+ prompt: imagePrompt,
350
+ });
351
+
352
+ const requestBody = await server.calls[0].requestBodyJson;
353
+ const imagePart = requestBody.prompt[0]
354
+ .content[0] as LanguageModelV3FilePart;
355
+
356
+ expect(imagePart.type).toBe('file');
357
+ expect(imagePart.data).toBe(
358
+ `data:${mimeType};base64,${expectedBase64}`,
359
+ );
360
+ expect(imagePart.mediaType).toBe(mimeType);
361
+ });
362
+
363
+ it('should not modify image part with URL', async () => {
364
+ prepareJsonResponse({ content: { type: 'text', text: 'response' } });
365
+ const imageUrl = new URL('https://example.com/image.jpg');
366
+ const imagePrompt: LanguageModelV3Prompt = [
367
+ {
368
+ role: 'user',
369
+ content: [
370
+ { type: 'text', text: 'Image URL:' },
371
+ { type: 'file', data: imageUrl, mediaType: 'image/jpeg' },
372
+ ],
373
+ },
374
+ ];
375
+
376
+ await createTestModel().doGenerate({
377
+ prompt: imagePrompt,
378
+ });
379
+
380
+ const requestBody = await server.calls[0].requestBodyJson;
381
+ const imagePart = requestBody.prompt[0]
382
+ .content[1] as LanguageModelV3FilePart;
383
+
384
+ expect(imagePart.type).toBe('file');
385
+ expect(imagePart.data).toBe(imageUrl.toString());
386
+ });
387
+
388
+ it('should handle mixed content types correctly', async () => {
389
+ prepareJsonResponse({ content: { type: 'text', text: 'response' } });
390
+ const imageBytes = new Uint8Array([1, 2, 3, 4]);
391
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
392
+ const imageUrl = new URL('https://example.com/image2.png');
393
+ const imagePrompt: LanguageModelV3Prompt = [
394
+ {
395
+ role: 'user',
396
+ content: [
397
+ { type: 'text', text: 'First text.' },
398
+ { type: 'file', data: imageBytes, mediaType: 'image/gif' },
399
+ { type: 'text', text: 'Second text.' },
400
+ { type: 'file', data: imageUrl, mediaType: 'image/png' },
401
+ ],
402
+ },
403
+ ];
404
+
405
+ await createTestModel().doGenerate({
406
+ prompt: imagePrompt,
407
+ });
408
+
409
+ const requestBody = await server.calls[0].requestBodyJson;
410
+ const content = requestBody.prompt[0].content;
411
+
412
+ expect(content[0]).toEqual({ type: 'text', text: 'First text.' });
413
+ expect(content[1]).toEqual({
414
+ type: 'file',
415
+ data: `data:image/gif;base64,${expectedBase64}`,
416
+ mediaType: 'image/gif',
417
+ });
418
+ expect(content[2]).toEqual({ type: 'text', text: 'Second text.' });
419
+ expect(content[3]).toEqual({
420
+ type: 'file',
421
+ data: imageUrl.toString(),
422
+ mediaType: 'image/png',
423
+ });
424
+ });
425
+ });
426
+
427
+ it('should handle various error types with proper conversion', async () => {
428
+ const model = createTestModel();
429
+
430
+ server.urls['https://api.test.com/language-model'].response = {
431
+ type: 'error',
432
+ status: 400,
433
+ body: JSON.stringify({
434
+ error: {
435
+ message: 'Invalid request format',
436
+ type: 'invalid_request_error',
437
+ },
438
+ }),
439
+ };
440
+
441
+ try {
442
+ await model.doGenerate({ prompt: TEST_PROMPT });
443
+ expect.fail('Should have thrown an error');
444
+ } catch (error) {
445
+ expect(GatewayInvalidRequestError.isInstance(error)).toBe(true);
446
+ const invalidError = error as GatewayInvalidRequestError;
447
+ expect(invalidError.message).toBe('Invalid request format');
448
+ expect(invalidError.statusCode).toBe(400);
449
+ expect(invalidError.type).toBe('invalid_request_error');
450
+ }
451
+
452
+ // Test model not found error
453
+ server.urls['https://api.test.com/language-model'].response = {
454
+ type: 'error',
455
+ status: 404,
456
+ body: JSON.stringify({
457
+ error: {
458
+ message: 'Model xyz not found',
459
+ type: 'model_not_found',
460
+ param: { modelId: 'xyz' },
461
+ },
462
+ }),
463
+ };
464
+
465
+ try {
466
+ await model.doGenerate({ prompt: TEST_PROMPT });
467
+ expect.fail('Should have thrown an error');
468
+ } catch (error) {
469
+ expect(GatewayModelNotFoundError.isInstance(error)).toBe(true);
470
+ const modelError = error as GatewayModelNotFoundError;
471
+ expect(modelError.message).toBe('Model xyz not found');
472
+ expect(modelError.statusCode).toBe(404);
473
+ expect(modelError.type).toBe('model_not_found');
474
+ expect(modelError.modelId).toBe('xyz');
475
+ }
476
+
477
+ // Test internal server error
478
+ server.urls['https://api.test.com/language-model'].response = {
479
+ type: 'error',
480
+ status: 500,
481
+ body: JSON.stringify({
482
+ error: {
483
+ message: 'Database connection failed',
484
+ type: 'internal_server_error',
485
+ },
486
+ }),
487
+ };
488
+
489
+ try {
490
+ await model.doGenerate({ prompt: TEST_PROMPT });
491
+ expect.fail('Should have thrown an error');
492
+ } catch (error) {
493
+ expect(GatewayInternalServerError.isInstance(error)).toBe(true);
494
+ const serverError = error as GatewayInternalServerError;
495
+ expect(serverError.message).toBe('Database connection failed');
496
+ expect(serverError.statusCode).toBe(500);
497
+ expect(serverError.type).toBe('internal_server_error');
498
+ }
499
+ });
500
+
501
+ describe('Gateway error handling for malformed responses', () => {
502
+ it('should include actual response body when APICallError has no data', async () => {
503
+ const malformedResponse = {
504
+ ferror: { message: 'Model not found', type: 'model_not_found' },
505
+ };
506
+
507
+ // Mock the server to return malformed response that can't be parsed by AI SDK
508
+ server.urls['https://api.test.com/language-model'].response = {
509
+ type: 'error',
510
+ status: 404,
511
+ body: JSON.stringify(malformedResponse),
512
+ };
513
+
514
+ const model = createTestModel();
515
+
516
+ try {
517
+ await model.doGenerate({
518
+ prompt: [
519
+ { role: 'user', content: [{ type: 'text', text: 'test' }] },
520
+ ],
521
+ });
522
+ expect.fail('Expected error to be thrown');
523
+ } catch (error) {
524
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
525
+ const gatewayError = error as GatewayResponseError;
526
+ expect(gatewayError.response).toEqual(malformedResponse);
527
+ expect(gatewayError.validationError).toBeDefined();
528
+ }
529
+ });
530
+
531
+ it('should use raw response body when JSON parsing fails', async () => {
532
+ const invalidJson = 'invalid json response';
533
+
534
+ // Mock the server to return invalid JSON
535
+ server.urls['https://api.test.com/language-model'].response = {
536
+ type: 'error',
537
+ status: 500,
538
+ body: invalidJson,
539
+ };
540
+
541
+ const model = createTestModel();
542
+
543
+ try {
544
+ await model.doGenerate({
545
+ prompt: [
546
+ { role: 'user', content: [{ type: 'text', text: 'test' }] },
547
+ ],
548
+ });
549
+ expect.fail('Expected error to be thrown');
550
+ } catch (error) {
551
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
552
+ const gatewayError = error as GatewayResponseError;
553
+ expect(gatewayError.response).toBe(invalidJson);
554
+ expect(gatewayError.validationError).toBeDefined();
555
+ }
556
+ });
557
+ });
558
+ });
559
+
560
+ describe('doStream', () => {
561
+ function prepareStreamResponse({
562
+ content,
563
+ finish_reason = 'stop',
564
+ }: {
565
+ content: string[];
566
+ finish_reason?: string;
567
+ }) {
568
+ server.urls['https://api.test.com/language-model'].response = {
569
+ type: 'stream-chunks',
570
+ chunks: [
571
+ ...content.map(
572
+ text =>
573
+ `data: ${JSON.stringify({
574
+ type: 'text-delta',
575
+ textDelta: text,
576
+ })}\n\n`,
577
+ ),
578
+ `data: ${JSON.stringify({
579
+ type: 'finish',
580
+ finishReason: finish_reason,
581
+ usage: {
582
+ prompt_tokens: 10,
583
+ completion_tokens: 20,
584
+ },
585
+ })}\n\n`,
586
+ ],
587
+ };
588
+ }
589
+
590
+ it('should stream text deltas', async () => {
591
+ prepareStreamResponse({
592
+ content: ['Hello', ', ', 'World!'],
593
+ });
594
+
595
+ const { stream } = await createTestModel().doStream({
596
+ prompt: TEST_PROMPT,
597
+ includeRawChunks: false,
598
+ });
599
+
600
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
601
+ [
602
+ {
603
+ "textDelta": "Hello",
604
+ "type": "text-delta",
605
+ },
606
+ {
607
+ "textDelta": ", ",
608
+ "type": "text-delta",
609
+ },
610
+ {
611
+ "textDelta": "World!",
612
+ "type": "text-delta",
613
+ },
614
+ {
615
+ "finishReason": "stop",
616
+ "type": "finish",
617
+ "usage": {
618
+ "completion_tokens": 20,
619
+ "prompt_tokens": 10,
620
+ },
621
+ },
622
+ ]
623
+ `);
624
+ });
625
+
626
+ it('should pass streaming headers', async () => {
627
+ prepareStreamResponse({
628
+ content: ['Test'],
629
+ });
630
+
631
+ await createTestModel().doStream({
632
+ prompt: TEST_PROMPT,
633
+ includeRawChunks: false,
634
+ });
635
+
636
+ const headers = server.calls[0].requestHeaders;
637
+ expect(headers).toMatchObject({
638
+ 'ai-language-model-specification-version': '3',
639
+ 'ai-language-model-id': 'test-model',
640
+ 'ai-language-model-streaming': 'true',
641
+ });
642
+ });
643
+
644
+ it('should remove abortSignal from the streaming request body', async () => {
645
+ prepareStreamResponse({
646
+ content: ['Test content'],
647
+ });
648
+
649
+ const controller = new AbortController();
650
+ const signal = controller.signal;
651
+
652
+ await createTestModel().doStream({
653
+ prompt: TEST_PROMPT,
654
+ abortSignal: signal,
655
+ includeRawChunks: false,
656
+ });
657
+
658
+ const requestBody = await server.calls[0].requestBodyJson;
659
+ expect(requestBody).not.toHaveProperty('abortSignal');
660
+ });
661
+
662
+ it('should pass abortSignal to fetch when provided for streaming', async () => {
663
+ prepareStreamResponse({
664
+ content: ['Test content'],
665
+ });
666
+
667
+ const mockFetch = vi.fn().mockImplementation(globalThis.fetch);
668
+
669
+ const controller = new AbortController();
670
+ const signal = controller.signal;
671
+
672
+ await createTestModel({
673
+ fetch: mockFetch,
674
+ }).doStream({
675
+ prompt: TEST_PROMPT,
676
+ abortSignal: signal,
677
+ includeRawChunks: false,
678
+ });
679
+
680
+ expect(mockFetch).toHaveBeenCalled();
681
+ const fetchCallArgs = mockFetch.mock.calls[0];
682
+ expect(fetchCallArgs[1].signal).toBe(signal);
683
+ });
684
+
685
+ it('should not pass abortSignal to fetch when not provided for streaming', async () => {
686
+ prepareStreamResponse({
687
+ content: ['Test content'],
688
+ });
689
+
690
+ const mockFetch = vi.fn().mockImplementation(globalThis.fetch);
691
+
692
+ await createTestModel({
693
+ fetch: mockFetch,
694
+ }).doStream({
695
+ prompt: TEST_PROMPT,
696
+ includeRawChunks: false,
697
+ });
698
+
699
+ expect(mockFetch).toHaveBeenCalled();
700
+ const fetchCallArgs = mockFetch.mock.calls[0];
701
+ expect(fetchCallArgs[1].signal).toBeUndefined();
702
+ });
703
+
704
+ it('should include o11y headers in the streaming request', async () => {
705
+ prepareStreamResponse({
706
+ content: ['Test content'],
707
+ });
708
+
709
+ const o11yHeaders = {
710
+ 'ai-o11y-deployment-id': 'test-deployment',
711
+ 'ai-o11y-environment': 'production',
712
+ 'ai-o11y-region': 'iad1',
713
+ };
714
+
715
+ await createTestModel({ o11yHeaders }).doStream({
716
+ prompt: TEST_PROMPT,
717
+ includeRawChunks: false,
718
+ });
719
+
720
+ const headers = server.calls[0].requestHeaders;
721
+ expect(headers).toMatchObject(o11yHeaders);
722
+ });
723
+
724
+ it('should convert API call errors to Gateway errors in streaming', async () => {
725
+ server.urls['https://api.test.com/language-model'].response = {
726
+ type: 'error',
727
+ status: 429,
728
+ body: JSON.stringify({
729
+ error: {
730
+ message: 'Rate limit exceeded',
731
+ type: 'rate_limit_exceeded',
732
+ },
733
+ }),
734
+ };
735
+
736
+ const model = createTestModel();
737
+
738
+ try {
739
+ await model.doStream({ prompt: TEST_PROMPT, includeRawChunks: false });
740
+ expect.fail('Should have thrown an error');
741
+ } catch (error) {
742
+ expect(GatewayRateLimitError.isInstance(error)).toBe(true);
743
+ const rateLimitError = error as GatewayRateLimitError;
744
+ expect(rateLimitError.message).toBe('Rate limit exceeded');
745
+ expect(rateLimitError.statusCode).toBe(429);
746
+ expect(rateLimitError.type).toBe('rate_limit_exceeded');
747
+ }
748
+ });
749
+
750
+ it('should handle authentication errors in streaming', async () => {
751
+ server.urls['https://api.test.com/language-model'].response = {
752
+ type: 'error',
753
+ status: 401,
754
+ body: JSON.stringify({
755
+ error: {
756
+ message: 'Authentication failed for streaming',
757
+ type: 'authentication_error',
758
+ },
759
+ }),
760
+ };
761
+
762
+ const model = createTestModel();
763
+
764
+ try {
765
+ await model.doStream({ prompt: TEST_PROMPT, includeRawChunks: false });
766
+ expect.fail('Should have thrown an error');
767
+ } catch (error) {
768
+ expect(GatewayAuthenticationError.isInstance(error)).toBe(true);
769
+ const authError = error as GatewayAuthenticationError;
770
+ expect(authError.message).toContain('Invalid API key');
771
+ expect(authError.message).toContain('vercel.com/d?to=');
772
+ expect(authError.statusCode).toBe(401);
773
+ expect(authError.type).toBe('authentication_error');
774
+ }
775
+ });
776
+
777
+ it('should handle invalid request errors in streaming', async () => {
778
+ server.urls['https://api.test.com/language-model'].response = {
779
+ type: 'error',
780
+ status: 400,
781
+ body: JSON.stringify({
782
+ error: {
783
+ message: 'Invalid streaming request',
784
+ type: 'invalid_request_error',
785
+ },
786
+ }),
787
+ };
788
+
789
+ const model = createTestModel();
790
+
791
+ try {
792
+ await model.doStream({ prompt: TEST_PROMPT, includeRawChunks: false });
793
+ expect.fail('Should have thrown an error');
794
+ } catch (error) {
795
+ expect(GatewayInvalidRequestError.isInstance(error)).toBe(true);
796
+ const invalidError = error as GatewayInvalidRequestError;
797
+ expect(invalidError.message).toBe('Invalid streaming request');
798
+ expect(invalidError.statusCode).toBe(400);
799
+ expect(invalidError.type).toBe('invalid_request_error');
800
+ }
801
+ });
802
+
803
+ it('should handle malformed error responses in streaming', async () => {
804
+ server.urls['https://api.test.com/language-model'].response = {
805
+ type: 'error',
806
+ status: 500,
807
+ body: 'Invalid JSON for streaming',
808
+ };
809
+
810
+ const model = createTestModel();
811
+
812
+ try {
813
+ await model.doStream({ prompt: TEST_PROMPT, includeRawChunks: false });
814
+ expect.fail('Should have thrown an error');
815
+ } catch (error) {
816
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
817
+ const responseError = error as GatewayResponseError;
818
+ expect(responseError.statusCode).toBe(500);
819
+ expect(responseError.type).toBe('response_error');
820
+ }
821
+ });
822
+
823
+ describe('Image part encoding', () => {
824
+ it('should not modify prompt without image parts', async () => {
825
+ prepareStreamResponse({ content: ['response'] });
826
+
827
+ await createTestModel().doStream({
828
+ prompt: TEST_PROMPT,
829
+ includeRawChunks: false,
830
+ });
831
+
832
+ const requestBody = await server.calls[0].requestBodyJson;
833
+ expect(requestBody.prompt).toEqual(TEST_PROMPT);
834
+ });
835
+
836
+ it('should encode Uint8Array image part to base64 data URL with default mime type', async () => {
837
+ prepareStreamResponse({ content: ['response'] });
838
+ const imageBytes = new Uint8Array([1, 2, 3, 4]);
839
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
840
+ const imagePrompt: LanguageModelV3Prompt = [
841
+ {
842
+ role: 'user',
843
+ content: [
844
+ { type: 'text', text: 'Describe:' },
845
+ { type: 'file', data: imageBytes, mediaType: 'image/jpeg' },
846
+ ],
847
+ },
848
+ ];
849
+
850
+ await createTestModel().doStream({
851
+ prompt: imagePrompt,
852
+ includeRawChunks: false,
853
+ });
854
+
855
+ const requestBody = await server.calls[0].requestBodyJson;
856
+ const imagePart = requestBody.prompt[0]
857
+ .content[1] as LanguageModelV3FilePart;
858
+
859
+ expect(imagePart.type).toBe('file');
860
+ expect(imagePart.data).toBe(`data:image/jpeg;base64,${expectedBase64}`);
861
+ expect(imagePart.mediaType).toBe('image/jpeg');
862
+ });
863
+
864
+ it('should encode Uint8Array image part to base64 data URL with specified mime type', async () => {
865
+ prepareStreamResponse({ content: ['response'] });
866
+ const imageBytes = new Uint8Array([5, 6, 7, 8]);
867
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
868
+ const mimeType = 'image/png';
869
+ const imagePrompt: LanguageModelV3Prompt = [
870
+ {
871
+ role: 'user',
872
+ content: [
873
+ { type: 'text', text: 'Describe:' },
874
+ { type: 'file', data: imageBytes, mediaType: mimeType },
875
+ ],
876
+ },
877
+ ];
878
+
879
+ await createTestModel().doStream({
880
+ prompt: imagePrompt,
881
+ includeRawChunks: false,
882
+ });
883
+
884
+ const requestBody = await server.calls[0].requestBodyJson;
885
+ const imagePart = requestBody.prompt[0]
886
+ .content[1] as LanguageModelV3FilePart;
887
+
888
+ expect(imagePart.type).toBe('file');
889
+ expect(imagePart.data).toBe(
890
+ `data:${mimeType};base64,${expectedBase64}`,
891
+ );
892
+ expect(imagePart.mediaType).toBe(mimeType);
893
+ });
894
+
895
+ it('should not modify image part with URL', async () => {
896
+ prepareStreamResponse({ content: ['response'] });
897
+ const imageUrl = new URL('https://example.com/image.jpg');
898
+ const imagePrompt: LanguageModelV3Prompt = [
899
+ {
900
+ role: 'user',
901
+ content: [
902
+ { type: 'text', text: 'URL:' },
903
+ { type: 'file', data: imageUrl, mediaType: 'image/jpeg' },
904
+ ],
905
+ },
906
+ ];
907
+
908
+ await createTestModel().doStream({
909
+ prompt: imagePrompt,
910
+ includeRawChunks: false,
911
+ });
912
+
913
+ const requestBody = await server.calls[0].requestBodyJson;
914
+ const imagePart = requestBody.prompt[0]
915
+ .content[1] as LanguageModelV3FilePart;
916
+
917
+ expect(imagePart.type).toBe('file');
918
+ expect(imagePart.data).toBe(imageUrl.toString());
919
+ expect(imagePart.mediaType).toBe('image/jpeg');
920
+ });
921
+
922
+ it('should handle mixed content types correctly for streaming', async () => {
923
+ prepareStreamResponse({ content: ['response'] });
924
+ const imageBytes = new Uint8Array([1, 2, 3, 4]);
925
+ const expectedBase64 = Buffer.from(imageBytes).toString('base64');
926
+ const imageUrl = new URL('https://example.com/image2.png');
927
+ const imagePrompt: LanguageModelV3Prompt = [
928
+ {
929
+ role: 'user',
930
+ content: [
931
+ { type: 'text', text: 'First text.' },
932
+ { type: 'file', data: imageBytes, mediaType: 'image/gif' },
933
+ { type: 'text', text: 'Second text.' },
934
+ { type: 'file', data: imageUrl, mediaType: 'image/png' },
935
+ ],
936
+ },
937
+ ];
938
+
939
+ await createTestModel().doStream({
940
+ prompt: imagePrompt,
941
+ includeRawChunks: false,
942
+ });
943
+
944
+ const requestBody = await server.calls[0].requestBodyJson;
945
+ const content = requestBody.prompt[0].content;
946
+
947
+ expect(content[0]).toEqual({ type: 'text', text: 'First text.' });
948
+ expect(content[1]).toEqual({
949
+ type: 'file',
950
+ data: `data:image/gif;base64,${expectedBase64}`,
951
+ mediaType: 'image/gif',
952
+ });
953
+ expect(content[2]).toEqual({ type: 'text', text: 'Second text.' });
954
+ expect(content[3]).toEqual({
955
+ type: 'file',
956
+ data: imageUrl.toString(),
957
+ mediaType: 'image/png',
958
+ });
959
+ });
960
+ });
961
+
962
+ describe('Error handling', () => {
963
+ it('should not double-wrap existing Gateway errors', async () => {
964
+ // Mock fetch to throw a Gateway error directly
965
+ const existingGatewayError = new GatewayAuthenticationError({
966
+ message: 'Already a Gateway error',
967
+ statusCode: 401,
968
+ });
969
+
970
+ const mockFetch = vi.fn().mockRejectedValue(existingGatewayError);
971
+ const model = createTestModel({ fetch: mockFetch });
972
+
973
+ try {
974
+ await model.doGenerate({ prompt: TEST_PROMPT });
975
+ expect.fail('Should have thrown an error');
976
+ } catch (error: unknown) {
977
+ // Should be the same instance, not wrapped
978
+ expect(error).toBe(existingGatewayError);
979
+ expect((error as GatewayAuthenticationError).message).toBe(
980
+ 'Already a Gateway error',
981
+ );
982
+ }
983
+ });
984
+
985
+ it('should handle network errors gracefully', async () => {
986
+ // Mock fetch to throw a network error
987
+ const networkError = new Error('Network connection failed');
988
+ const mockFetch = vi.fn().mockRejectedValue(networkError);
989
+ const model = createTestModel({ fetch: mockFetch });
990
+
991
+ try {
992
+ await model.doGenerate({ prompt: TEST_PROMPT });
993
+ expect.fail('Should have thrown an error');
994
+ } catch (error: unknown) {
995
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
996
+ const responseError = error as GatewayResponseError;
997
+ expect(responseError.message).toBe(
998
+ 'Invalid error response format: Gateway request failed: Network connection failed',
999
+ );
1000
+ expect(responseError.cause).toBe(networkError);
1001
+ }
1002
+ });
1003
+
1004
+ it('should handle network errors gracefully in streaming', async () => {
1005
+ // Mock fetch to throw a network error during streaming
1006
+ const networkError = new Error('Network connection failed');
1007
+ const mockFetch = vi.fn().mockRejectedValue(networkError);
1008
+ const model = createTestModel({ fetch: mockFetch });
1009
+
1010
+ try {
1011
+ await model.doStream({
1012
+ prompt: TEST_PROMPT,
1013
+ includeRawChunks: false,
1014
+ });
1015
+ expect.fail('Should have thrown an error');
1016
+ } catch (error: unknown) {
1017
+ expect(GatewayResponseError.isInstance(error)).toBe(true);
1018
+ const responseError = error as GatewayResponseError;
1019
+ expect(responseError.message).toBe(
1020
+ 'Invalid error response format: Gateway request failed: Network connection failed',
1021
+ );
1022
+ expect(responseError.cause).toBe(networkError);
1023
+ }
1024
+ });
1025
+
1026
+ it('should preserve error cause chain', async () => {
1027
+ server.urls['https://api.test.com/language-model'].response = {
1028
+ type: 'error',
1029
+ status: 401,
1030
+ body: JSON.stringify({
1031
+ error: {
1032
+ message: 'Token expired',
1033
+ type: 'authentication_error',
1034
+ },
1035
+ }),
1036
+ };
1037
+
1038
+ const model = createTestModel();
1039
+
1040
+ try {
1041
+ await model.doGenerate({ prompt: TEST_PROMPT });
1042
+ expect.fail('Should have thrown an error');
1043
+ } catch (error: unknown) {
1044
+ expect(GatewayAuthenticationError.isInstance(error)).toBe(true);
1045
+ const authError = error as GatewayAuthenticationError;
1046
+ expect(authError.cause).toBeDefined();
1047
+ }
1048
+ });
1049
+ });
1050
+ });
1051
+
1052
+ describe('raw chunks filtering', () => {
1053
+ it('should filter raw chunks based on includeRawChunks option', async () => {
1054
+ server.urls['https://api.test.com/language-model'].response = {
1055
+ type: 'stream-chunks',
1056
+ chunks: [
1057
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1058
+ `data: {"type":"raw","rawValue":{"id":"test-chunk","object":"chat.completion.chunk","choices":[{"delta":{"content":"Hello"}}]}}\n\n`,
1059
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1060
+ `data: {"type":"raw","rawValue":{"id":"test-chunk-2","object":"chat.completion.chunk","choices":[{"delta":{"content":" world"}}]}}\n\n`,
1061
+ `data: {"type":"text-delta","textDelta":" world"}\n\n`,
1062
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1063
+ ],
1064
+ };
1065
+
1066
+ const { stream } = await createTestModel().doStream({
1067
+ prompt: TEST_PROMPT,
1068
+ includeRawChunks: false, // Raw chunks should be filtered out
1069
+ });
1070
+
1071
+ const chunks = await convertReadableStreamToArray(stream);
1072
+
1073
+ expect(chunks).toMatchInlineSnapshot(`
1074
+ [
1075
+ {
1076
+ "type": "stream-start",
1077
+ "warnings": [],
1078
+ },
1079
+ {
1080
+ "textDelta": "Hello",
1081
+ "type": "text-delta",
1082
+ },
1083
+ {
1084
+ "textDelta": " world",
1085
+ "type": "text-delta",
1086
+ },
1087
+ {
1088
+ "finishReason": "stop",
1089
+ "type": "finish",
1090
+ "usage": {
1091
+ "completion_tokens": 5,
1092
+ "prompt_tokens": 10,
1093
+ },
1094
+ },
1095
+ ]
1096
+ `);
1097
+ });
1098
+
1099
+ it('should include raw chunks when includeRawChunks is true', async () => {
1100
+ server.urls['https://api.test.com/language-model'].response = {
1101
+ type: 'stream-chunks',
1102
+ chunks: [
1103
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1104
+ `data: {"type":"raw","rawValue":{"id":"test-chunk","object":"chat.completion.chunk","choices":[{"delta":{"content":"Hello"}}]}}\n\n`,
1105
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1106
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1107
+ ],
1108
+ };
1109
+
1110
+ const { stream } = await createTestModel().doStream({
1111
+ prompt: TEST_PROMPT,
1112
+ includeRawChunks: true, // Raw chunks should be included
1113
+ });
1114
+
1115
+ const chunks = await convertReadableStreamToArray(stream);
1116
+
1117
+ expect(chunks).toMatchInlineSnapshot(`
1118
+ [
1119
+ {
1120
+ "type": "stream-start",
1121
+ "warnings": [],
1122
+ },
1123
+ {
1124
+ "rawValue": {
1125
+ "choices": [
1126
+ {
1127
+ "delta": {
1128
+ "content": "Hello",
1129
+ },
1130
+ },
1131
+ ],
1132
+ "id": "test-chunk",
1133
+ "object": "chat.completion.chunk",
1134
+ },
1135
+ "type": "raw",
1136
+ },
1137
+ {
1138
+ "textDelta": "Hello",
1139
+ "type": "text-delta",
1140
+ },
1141
+ {
1142
+ "finishReason": "stop",
1143
+ "type": "finish",
1144
+ "usage": {
1145
+ "completion_tokens": 5,
1146
+ "prompt_tokens": 10,
1147
+ },
1148
+ },
1149
+ ]
1150
+ `);
1151
+ });
1152
+ });
1153
+
1154
+ describe('timestamp conversion', () => {
1155
+ it('should convert timestamp strings to Date objects in response-metadata chunks', async () => {
1156
+ const timestampString = '2023-12-07T10:30:00.000Z';
1157
+
1158
+ server.urls['https://api.test.com/language-model'].response = {
1159
+ type: 'stream-chunks',
1160
+ chunks: [
1161
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1162
+ `data: {"type":"response-metadata","id":"test-id","modelId":"test-model","timestamp":"${timestampString}"}\n\n`,
1163
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1164
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1165
+ ],
1166
+ };
1167
+
1168
+ const { stream } = await createTestModel().doStream({
1169
+ prompt: TEST_PROMPT,
1170
+ includeRawChunks: false,
1171
+ });
1172
+
1173
+ const chunks = await convertReadableStreamToArray(stream);
1174
+
1175
+ expect(chunks).toHaveLength(4);
1176
+ expect(chunks[0]).toEqual({
1177
+ type: 'stream-start',
1178
+ warnings: [],
1179
+ });
1180
+
1181
+ // Check that the response-metadata chunk has timestamp converted to Date
1182
+ const responseMetadataChunk = chunks[1] as any;
1183
+ expect(responseMetadataChunk).toMatchObject({
1184
+ type: 'response-metadata',
1185
+ id: 'test-id',
1186
+ modelId: 'test-model',
1187
+ });
1188
+ expect(responseMetadataChunk.timestamp).toBeInstanceOf(Date);
1189
+ expect(responseMetadataChunk.timestamp.toISOString()).toBe(
1190
+ timestampString,
1191
+ );
1192
+ });
1193
+
1194
+ it('should not modify timestamp if it is already a Date object', async () => {
1195
+ const timestampDate = new Date('2023-12-07T10:30:00.000Z');
1196
+
1197
+ // Use standard stream-chunks format with Date serialized as string, then manually parse
1198
+ server.urls['https://api.test.com/language-model'].response = {
1199
+ type: 'stream-chunks',
1200
+ chunks: [
1201
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1202
+ `data: {"type":"response-metadata","id":"test-id","modelId":"test-model","timestamp":"${timestampDate.toISOString()}"}\n\n`,
1203
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1204
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1205
+ ],
1206
+ };
1207
+
1208
+ const { stream } = await createTestModel().doStream({
1209
+ prompt: TEST_PROMPT,
1210
+ includeRawChunks: false,
1211
+ });
1212
+
1213
+ const chunks = await convertReadableStreamToArray(stream);
1214
+
1215
+ expect(chunks).toHaveLength(4);
1216
+
1217
+ // Check that the response-metadata chunk timestamp is converted to Date
1218
+ const responseMetadataChunk = chunks[1] as any;
1219
+ expect(responseMetadataChunk).toMatchObject({
1220
+ type: 'response-metadata',
1221
+ id: 'test-id',
1222
+ modelId: 'test-model',
1223
+ });
1224
+ expect(responseMetadataChunk.timestamp).toBeInstanceOf(Date);
1225
+ });
1226
+
1227
+ it('should not modify response-metadata chunks without timestamp', async () => {
1228
+ server.urls['https://api.test.com/language-model'].response = {
1229
+ type: 'stream-chunks',
1230
+ chunks: [
1231
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1232
+ `data: {"type":"response-metadata","id":"test-id","modelId":"test-model"}\n\n`,
1233
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1234
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1235
+ ],
1236
+ };
1237
+
1238
+ const { stream } = await createTestModel().doStream({
1239
+ prompt: TEST_PROMPT,
1240
+ includeRawChunks: false,
1241
+ });
1242
+
1243
+ const chunks = await convertReadableStreamToArray(stream);
1244
+
1245
+ expect(chunks).toHaveLength(4);
1246
+
1247
+ // Check that the response-metadata chunk without timestamp is unchanged
1248
+ const responseMetadataChunk = chunks[1] as any;
1249
+ expect(responseMetadataChunk).toEqual({
1250
+ type: 'response-metadata',
1251
+ id: 'test-id',
1252
+ modelId: 'test-model',
1253
+ });
1254
+ expect(responseMetadataChunk.timestamp).toBeUndefined();
1255
+ });
1256
+
1257
+ it('should handle null timestamp values gracefully', async () => {
1258
+ server.urls['https://api.test.com/language-model'].response = {
1259
+ type: 'stream-chunks',
1260
+ chunks: [
1261
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1262
+ `data: {"type":"response-metadata","id":"test-id","modelId":"test-model","timestamp":null}\n\n`,
1263
+ `data: {"type":"text-delta","textDelta":"Hello"}\n\n`,
1264
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5}}\n\n`,
1265
+ ],
1266
+ };
1267
+
1268
+ const { stream } = await createTestModel().doStream({
1269
+ prompt: TEST_PROMPT,
1270
+ includeRawChunks: false,
1271
+ });
1272
+
1273
+ const chunks = await convertReadableStreamToArray(stream);
1274
+
1275
+ expect(chunks).toHaveLength(4);
1276
+
1277
+ // Check that null timestamp is left as null
1278
+ const responseMetadataChunk = chunks[1] as any;
1279
+ expect(responseMetadataChunk).toEqual({
1280
+ type: 'response-metadata',
1281
+ id: 'test-id',
1282
+ modelId: 'test-model',
1283
+ timestamp: null,
1284
+ });
1285
+ });
1286
+
1287
+ it('should only convert timestamps for response-metadata chunks, not other chunk types', async () => {
1288
+ const timestampString = '2023-12-07T10:30:00.000Z';
1289
+
1290
+ server.urls['https://api.test.com/language-model'].response = {
1291
+ type: 'stream-chunks',
1292
+ chunks: [
1293
+ `data: {"type":"stream-start","warnings":[]}\n\n`,
1294
+ `data: {"type":"text-delta","textDelta":"Hello","timestamp":"${timestampString}"}\n\n`,
1295
+ `data: {"type":"finish","finishReason":"stop","usage":{"prompt_tokens":10,"completion_tokens":5},"timestamp":"${timestampString}"}\n\n`,
1296
+ ],
1297
+ };
1298
+
1299
+ const { stream } = await createTestModel().doStream({
1300
+ prompt: TEST_PROMPT,
1301
+ includeRawChunks: false,
1302
+ });
1303
+
1304
+ // Check that timestamps in non-response-metadata chunks are left as strings
1305
+ // Note: These chunks don't typically have timestamp properties in the real types,
1306
+ // but this test verifies our conversion logic only affects response-metadata chunks
1307
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
1308
+ [
1309
+ {
1310
+ "type": "stream-start",
1311
+ "warnings": [],
1312
+ },
1313
+ {
1314
+ "textDelta": "Hello",
1315
+ "timestamp": "2023-12-07T10:30:00.000Z",
1316
+ "type": "text-delta",
1317
+ },
1318
+ {
1319
+ "finishReason": "stop",
1320
+ "timestamp": "2023-12-07T10:30:00.000Z",
1321
+ "type": "finish",
1322
+ "usage": {
1323
+ "completion_tokens": 5,
1324
+ "prompt_tokens": 10,
1325
+ },
1326
+ },
1327
+ ]
1328
+ `);
1329
+ });
1330
+ });
1331
+
1332
+ describe('Provider Options', () => {
1333
+ function prepareJsonResponse({
1334
+ content = { type: 'text', text: '' },
1335
+ usage = {
1336
+ prompt_tokens: 4,
1337
+ completion_tokens: 30,
1338
+ },
1339
+ finish_reason = 'stop',
1340
+ id = 'test-id',
1341
+ created = 1711115037,
1342
+ model = 'test-model',
1343
+ } = {}) {
1344
+ server.urls['https://api.test.com/language-model'].response = {
1345
+ type: 'json-value',
1346
+ body: {
1347
+ id,
1348
+ created,
1349
+ model,
1350
+ content,
1351
+ finish_reason,
1352
+ usage,
1353
+ },
1354
+ };
1355
+ }
1356
+
1357
+ function prepareStreamResponse({
1358
+ content,
1359
+ finish_reason = 'stop',
1360
+ }: {
1361
+ content: string[];
1362
+ finish_reason?: string;
1363
+ }) {
1364
+ server.urls['https://api.test.com/language-model'].response = {
1365
+ type: 'stream-chunks',
1366
+ chunks: [
1367
+ ...content.map(
1368
+ text =>
1369
+ `data: ${JSON.stringify({
1370
+ type: 'text-delta',
1371
+ textDelta: text,
1372
+ })}\n\n`,
1373
+ ),
1374
+ `data: ${JSON.stringify({
1375
+ type: 'finish',
1376
+ finishReason: finish_reason,
1377
+ usage: {
1378
+ prompt_tokens: 10,
1379
+ completion_tokens: 20,
1380
+ },
1381
+ })}\n\n`,
1382
+ ],
1383
+ };
1384
+ }
1385
+
1386
+ it('should pass provider routing order for doGenerate', async () => {
1387
+ prepareJsonResponse({
1388
+ content: { type: 'text', text: 'Test response' },
1389
+ });
1390
+
1391
+ await createTestModel().doGenerate({
1392
+ prompt: TEST_PROMPT,
1393
+ providerOptions: {
1394
+ gateway: {
1395
+ order: ['bedrock', 'anthropic'],
1396
+ },
1397
+ },
1398
+ });
1399
+
1400
+ const requestBody = await server.calls[0].requestBodyJson;
1401
+ expect(requestBody.providerOptions).toEqual({
1402
+ gateway: { order: ['bedrock', 'anthropic'] },
1403
+ });
1404
+ });
1405
+
1406
+ it('should pass single provider in order array', async () => {
1407
+ prepareJsonResponse({
1408
+ content: { type: 'text', text: 'Test response' },
1409
+ });
1410
+
1411
+ await createTestModel().doGenerate({
1412
+ prompt: TEST_PROMPT,
1413
+ providerOptions: {
1414
+ gateway: {
1415
+ order: ['openai'],
1416
+ },
1417
+ },
1418
+ });
1419
+
1420
+ const requestBody = await server.calls[0].requestBodyJson;
1421
+ expect(requestBody.providerOptions).toEqual({
1422
+ gateway: { order: ['openai'] },
1423
+ });
1424
+ });
1425
+
1426
+ it('should work without provider options', async () => {
1427
+ prepareJsonResponse({
1428
+ content: { type: 'text', text: 'Test response' },
1429
+ });
1430
+
1431
+ const result = await createTestModel().doGenerate({
1432
+ prompt: TEST_PROMPT,
1433
+ });
1434
+
1435
+ const requestBody = await server.calls[0].requestBodyJson;
1436
+ expect(requestBody.providerOptions).toBeUndefined();
1437
+ expect(result.content).toEqual({
1438
+ type: 'text',
1439
+ text: 'Test response',
1440
+ });
1441
+ });
1442
+
1443
+ it('should pass provider routing order for doStream', async () => {
1444
+ prepareStreamResponse({
1445
+ content: ['Hello', ' world'],
1446
+ });
1447
+
1448
+ const { stream } = await createTestModel().doStream({
1449
+ prompt: TEST_PROMPT,
1450
+ providerOptions: {
1451
+ gateway: {
1452
+ order: ['groq', 'openai'],
1453
+ },
1454
+ },
1455
+ });
1456
+
1457
+ await convertReadableStreamToArray(stream);
1458
+
1459
+ const requestBody = await server.calls[0].requestBodyJson;
1460
+ expect(requestBody.providerOptions).toEqual({
1461
+ gateway: { order: ['groq', 'openai'] },
1462
+ });
1463
+ });
1464
+
1465
+ it('should validate provider options against schema', async () => {
1466
+ prepareJsonResponse({
1467
+ content: { type: 'text', text: 'Test response' },
1468
+ });
1469
+
1470
+ await createTestModel().doGenerate({
1471
+ prompt: TEST_PROMPT,
1472
+ providerOptions: {
1473
+ gateway: {
1474
+ order: ['anthropic', 'bedrock', 'openai'],
1475
+ },
1476
+ },
1477
+ });
1478
+
1479
+ const requestBody = await server.calls[0].requestBodyJson;
1480
+ expect(requestBody.providerOptions).toEqual({
1481
+ gateway: { order: ['anthropic', 'bedrock', 'openai'] },
1482
+ });
1483
+ });
1484
+ });
1485
+ });