@ai-sdk/huggingface 1.0.16 → 1.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,1544 @@
1
+ import { LanguageModelV3Prompt } from '@ai-sdk/provider';
2
+ import {
3
+ convertReadableStreamToArray,
4
+ mockId,
5
+ } from '@ai-sdk/provider-utils/test';
6
+ import { createTestServer } from '@ai-sdk/test-server/with-vitest';
7
+ import { describe, it, expect, beforeEach } from 'vitest';
8
+ import { HuggingFaceResponsesLanguageModel } from './huggingface-responses-language-model';
9
+
10
+ const TEST_PROMPT: LanguageModelV3Prompt = [
11
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
12
+ ];
13
+
14
+ function createModel(modelId: string) {
15
+ return new HuggingFaceResponsesLanguageModel(modelId, {
16
+ provider: 'huggingface.responses',
17
+ url: ({ path }) => `https://router.huggingface.co/v1${path}`,
18
+ headers: () => ({ Authorization: `Bearer APIKEY` }),
19
+ generateId: mockId(),
20
+ });
21
+ }
22
+
23
+ describe('HuggingFaceResponsesLanguageModel', () => {
24
+ const server = createTestServer({
25
+ 'https://router.huggingface.co/v1/responses': {},
26
+ });
27
+
28
+ describe('doGenerate', () => {
29
+ describe('basic text response', () => {
30
+ beforeEach(() => {
31
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
32
+ type: 'json-value',
33
+ body: {
34
+ id: 'resp_67c97c0203188190a025beb4a75242bc',
35
+ model: 'deepseek-ai/DeepSeek-V3-0324',
36
+ object: 'response',
37
+ created_at: 1741257730,
38
+ status: 'completed',
39
+ error: null,
40
+ instructions: null,
41
+ max_output_tokens: null,
42
+ metadata: null,
43
+ tool_choice: 'auto',
44
+ tools: [],
45
+ temperature: 1.0,
46
+ top_p: 1.0,
47
+ incomplete_details: null,
48
+ usage: {
49
+ input_tokens: 12,
50
+ output_tokens: 25,
51
+ total_tokens: 37,
52
+ },
53
+ output: [
54
+ {
55
+ id: 'msg_67c97c02656c81908e080dfdf4a03cd1',
56
+ type: 'message',
57
+ role: 'assistant',
58
+ status: 'completed',
59
+ content: [
60
+ {
61
+ type: 'output_text',
62
+ text: 'Hello! How can I help you today?',
63
+ },
64
+ ],
65
+ },
66
+ ],
67
+ output_text: 'Hello! How can I help you today?',
68
+ },
69
+ };
70
+ });
71
+
72
+ it('should generate text', async () => {
73
+ const result = await createModel(
74
+ 'deepseek-ai/DeepSeek-V3-0324',
75
+ ).doGenerate({
76
+ prompt: TEST_PROMPT,
77
+ });
78
+
79
+ expect(result.content).toMatchInlineSnapshot(`
80
+ [
81
+ {
82
+ "providerMetadata": {
83
+ "huggingface": {
84
+ "itemId": "msg_67c97c02656c81908e080dfdf4a03cd1",
85
+ },
86
+ },
87
+ "text": "Hello! How can I help you today?",
88
+ "type": "text",
89
+ },
90
+ ]
91
+ `);
92
+ });
93
+
94
+ it('should extract usage', async () => {
95
+ const result = await createModel(
96
+ 'deepseek-ai/DeepSeek-V3-0324',
97
+ ).doGenerate({
98
+ prompt: TEST_PROMPT,
99
+ });
100
+
101
+ expect(result.usage).toMatchInlineSnapshot(`
102
+ {
103
+ "inputTokens": {
104
+ "cacheRead": 0,
105
+ "cacheWrite": undefined,
106
+ "noCache": 12,
107
+ "total": 12,
108
+ },
109
+ "outputTokens": {
110
+ "reasoning": 0,
111
+ "text": 25,
112
+ "total": 25,
113
+ },
114
+ "raw": {
115
+ "input_tokens": 12,
116
+ "output_tokens": 25,
117
+ "total_tokens": 37,
118
+ },
119
+ }
120
+ `);
121
+ });
122
+
123
+ it('should extract text from output array when output_text is missing', async () => {
124
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
125
+ type: 'json-value',
126
+ body: {
127
+ id: 'resp_test',
128
+ model: 'deepseek-ai/DeepSeek-V3-0324',
129
+ object: 'response',
130
+ created_at: 1741257730,
131
+ status: 'completed',
132
+ error: null,
133
+ instructions: null,
134
+ max_output_tokens: null,
135
+ metadata: null,
136
+ tool_choice: 'auto',
137
+ tools: [],
138
+ temperature: 1.0,
139
+ top_p: 1.0,
140
+ incomplete_details: null,
141
+ usage: null,
142
+ output: [
143
+ {
144
+ id: 'msg_test',
145
+ type: 'message',
146
+ role: 'assistant',
147
+ status: 'completed',
148
+ content: [
149
+ {
150
+ type: 'output_text',
151
+ text: 'Extracted from output array',
152
+ },
153
+ ],
154
+ },
155
+ ],
156
+ output_text: null,
157
+ },
158
+ };
159
+
160
+ const result = await createModel(
161
+ 'deepseek-ai/DeepSeek-V3-0324',
162
+ ).doGenerate({
163
+ prompt: TEST_PROMPT,
164
+ });
165
+
166
+ expect(result.content).toMatchInlineSnapshot(`
167
+ [
168
+ {
169
+ "providerMetadata": {
170
+ "huggingface": {
171
+ "itemId": "msg_test",
172
+ },
173
+ },
174
+ "text": "Extracted from output array",
175
+ "type": "text",
176
+ },
177
+ ]
178
+ `);
179
+ });
180
+
181
+ it('should handle missing usage gracefully', async () => {
182
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
183
+ type: 'json-value',
184
+ body: {
185
+ id: 'resp_test',
186
+ model: 'deepseek-ai/DeepSeek-V3-0324',
187
+ object: 'response',
188
+ created_at: 1741257730,
189
+ status: 'completed',
190
+ error: null,
191
+ instructions: null,
192
+ max_output_tokens: null,
193
+ metadata: null,
194
+ tool_choice: 'auto',
195
+ tools: [],
196
+ temperature: 1.0,
197
+ top_p: 1.0,
198
+ incomplete_details: null,
199
+ usage: null,
200
+ output: [],
201
+ output_text: 'Test response',
202
+ },
203
+ };
204
+
205
+ const result = await createModel(
206
+ 'deepseek-ai/DeepSeek-V3-0324',
207
+ ).doGenerate({
208
+ prompt: TEST_PROMPT,
209
+ });
210
+
211
+ expect(result.usage).toMatchInlineSnapshot(`
212
+ {
213
+ "inputTokens": {
214
+ "cacheRead": undefined,
215
+ "cacheWrite": undefined,
216
+ "noCache": undefined,
217
+ "total": undefined,
218
+ },
219
+ "outputTokens": {
220
+ "reasoning": undefined,
221
+ "text": undefined,
222
+ "total": undefined,
223
+ },
224
+ "raw": undefined,
225
+ }
226
+ `);
227
+ });
228
+
229
+ it('should send model id, settings, and input', async () => {
230
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
231
+ prompt: [
232
+ { role: 'system', content: 'You are a helpful assistant.' },
233
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
234
+ ],
235
+ temperature: 0.5,
236
+ topP: 0.3,
237
+ maxOutputTokens: 100,
238
+ });
239
+
240
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
241
+ model: 'deepseek-ai/DeepSeek-V3-0324',
242
+ temperature: 0.5,
243
+ top_p: 0.3,
244
+ max_output_tokens: 100,
245
+ stream: false,
246
+ input: [
247
+ { role: 'system', content: 'You are a helpful assistant.' },
248
+ {
249
+ role: 'user',
250
+ content: [{ type: 'input_text', text: 'Hello' }],
251
+ },
252
+ ],
253
+ });
254
+ });
255
+
256
+ it('should handle unsupported settings with warnings', async () => {
257
+ const { warnings } = await createModel(
258
+ 'deepseek-ai/DeepSeek-V3-0324',
259
+ ).doGenerate({
260
+ prompt: TEST_PROMPT,
261
+ topK: 10,
262
+ seed: 123,
263
+ presencePenalty: 0.5,
264
+ frequencyPenalty: 0.3,
265
+ stopSequences: ['stop'],
266
+ });
267
+
268
+ expect(warnings).toMatchInlineSnapshot(`
269
+ [
270
+ {
271
+ "feature": "topK",
272
+ "type": "unsupported",
273
+ },
274
+ {
275
+ "feature": "seed",
276
+ "type": "unsupported",
277
+ },
278
+ {
279
+ "feature": "presencePenalty",
280
+ "type": "unsupported",
281
+ },
282
+ {
283
+ "feature": "frequencyPenalty",
284
+ "type": "unsupported",
285
+ },
286
+ {
287
+ "feature": "stopSequences",
288
+ "type": "unsupported",
289
+ },
290
+ ]
291
+ `);
292
+ });
293
+
294
+ it('should generate text and sources from annotations', async () => {
295
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
296
+ type: 'json-value',
297
+ body: {
298
+ id: 'resp_test_annotations',
299
+ model: 'deepseek-ai/DeepSeek-V3-0324',
300
+ object: 'response',
301
+ created_at: 1741257730,
302
+ status: 'completed',
303
+ error: null,
304
+ instructions: null,
305
+ max_output_tokens: null,
306
+ metadata: null,
307
+ tool_choice: 'auto',
308
+ tools: [],
309
+ temperature: 1.0,
310
+ top_p: 1.0,
311
+ incomplete_details: null,
312
+ usage: {
313
+ input_tokens: 20,
314
+ output_tokens: 50,
315
+ total_tokens: 70,
316
+ },
317
+ output: [
318
+ {
319
+ id: 'msg_test_annotations',
320
+ type: 'message',
321
+ role: 'assistant',
322
+ status: 'completed',
323
+ content: [
324
+ {
325
+ type: 'output_text',
326
+ text: 'Here are some recent articles about AI: The first article discusses new developments ([example.com](https://example.com/article1)). Another piece covers industry trends ([test.com](https://test.com/article2)).',
327
+ annotations: [
328
+ {
329
+ type: 'url_citation',
330
+ url: 'https://example.com/article1',
331
+ title: 'AI Developments Article',
332
+ },
333
+ {
334
+ type: 'url_citation',
335
+ url: 'https://test.com/article2',
336
+ title: 'Industry Trends Report',
337
+ },
338
+ ],
339
+ },
340
+ ],
341
+ },
342
+ ],
343
+ output_text: null,
344
+ },
345
+ };
346
+
347
+ const result = await createModel(
348
+ 'deepseek-ai/DeepSeek-V3-0324',
349
+ ).doGenerate({
350
+ prompt: TEST_PROMPT,
351
+ });
352
+
353
+ expect(result.content).toMatchInlineSnapshot(`
354
+ [
355
+ {
356
+ "providerMetadata": {
357
+ "huggingface": {
358
+ "itemId": "msg_test_annotations",
359
+ },
360
+ },
361
+ "text": "Here are some recent articles about AI: The first article discusses new developments ([example.com](https://example.com/article1)). Another piece covers industry trends ([test.com](https://test.com/article2)).",
362
+ "type": "text",
363
+ },
364
+ {
365
+ "id": "id-0",
366
+ "sourceType": "url",
367
+ "title": "AI Developments Article",
368
+ "type": "source",
369
+ "url": "https://example.com/article1",
370
+ },
371
+ {
372
+ "id": "id-1",
373
+ "sourceType": "url",
374
+ "title": "Industry Trends Report",
375
+ "type": "source",
376
+ "url": "https://test.com/article2",
377
+ },
378
+ ]
379
+ `);
380
+ });
381
+
382
+ it('should handle MCP tools with annotations', async () => {
383
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
384
+ type: 'json-value',
385
+ body: {
386
+ id: 'resp_mcp_test',
387
+ model: 'deepseek-ai/DeepSeek-V3-0324',
388
+ object: 'response',
389
+ created_at: 1741257730,
390
+ status: 'completed',
391
+ error: null,
392
+ instructions: null,
393
+ max_output_tokens: null,
394
+ metadata: null,
395
+ tool_choice: 'auto',
396
+ tools: [],
397
+ temperature: 1.0,
398
+ top_p: 1.0,
399
+ incomplete_details: null,
400
+ usage: {
401
+ input_tokens: 50,
402
+ output_tokens: 100,
403
+ total_tokens: 150,
404
+ },
405
+ output: [
406
+ {
407
+ id: 'mcp_search_test',
408
+ type: 'mcp_call',
409
+ server_label: 'web_search',
410
+ name: 'search',
411
+ arguments: '{"query": "San Francisco tech events"}',
412
+ output: 'Found 25 tech events in San Francisco',
413
+ },
414
+ {
415
+ id: 'msg_mcp_response',
416
+ type: 'message',
417
+ role: 'assistant',
418
+ status: 'completed',
419
+ content: [
420
+ {
421
+ type: 'output_text',
422
+ text: 'Based on the search results, here are the latest tech events in San Francisco: There are several AI conferences ([techevents.com](https://techevents.com/sf-ai)) and startup meetups ([eventbrite.com](https://eventbrite.com/sf-startups)) happening this week.',
423
+ annotations: [
424
+ {
425
+ type: 'url_citation',
426
+ url: 'https://techevents.com/sf-ai',
427
+ title: 'SF AI Conference 2025',
428
+ },
429
+ {
430
+ type: 'url_citation',
431
+ url: 'https://eventbrite.com/sf-startups',
432
+ title: 'SF Startup Meetups',
433
+ },
434
+ ],
435
+ },
436
+ ],
437
+ },
438
+ ],
439
+ output_text: null,
440
+ },
441
+ };
442
+
443
+ const result = await createModel(
444
+ 'deepseek-ai/DeepSeek-V3-0324',
445
+ ).doGenerate({
446
+ prompt: TEST_PROMPT,
447
+ });
448
+
449
+ expect(result.content).toMatchInlineSnapshot(`
450
+ [
451
+ {
452
+ "input": "{"query": "San Francisco tech events"}",
453
+ "providerExecuted": true,
454
+ "toolCallId": "mcp_search_test",
455
+ "toolName": "search",
456
+ "type": "tool-call",
457
+ },
458
+ {
459
+ "result": "Found 25 tech events in San Francisco",
460
+ "toolCallId": "mcp_search_test",
461
+ "toolName": "search",
462
+ "type": "tool-result",
463
+ },
464
+ {
465
+ "providerMetadata": {
466
+ "huggingface": {
467
+ "itemId": "msg_mcp_response",
468
+ },
469
+ },
470
+ "text": "Based on the search results, here are the latest tech events in San Francisco: There are several AI conferences ([techevents.com](https://techevents.com/sf-ai)) and startup meetups ([eventbrite.com](https://eventbrite.com/sf-startups)) happening this week.",
471
+ "type": "text",
472
+ },
473
+ {
474
+ "id": "id-0",
475
+ "sourceType": "url",
476
+ "title": "SF AI Conference 2025",
477
+ "type": "source",
478
+ "url": "https://techevents.com/sf-ai",
479
+ },
480
+ {
481
+ "id": "id-1",
482
+ "sourceType": "url",
483
+ "title": "SF Startup Meetups",
484
+ "type": "source",
485
+ "url": "https://eventbrite.com/sf-startups",
486
+ },
487
+ ]
488
+ `);
489
+ });
490
+ });
491
+ });
492
+
493
+ describe('doStream', () => {
494
+ it('should stream text deltas', async () => {
495
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
496
+ type: 'stream-chunks',
497
+ chunks: [
498
+ `data:{"type":"response.created","response":{"id":"resp_test","object":"response","created_at":1741269019,"status":"in_progress","model":"deepseek-ai/DeepSeek-V3-0324"}}\n\n`,
499
+ `data:{"type":"response.in_progress","response":{"id":"resp_test","object":"response","created_at":1741269019,"status":"in_progress"}}\n\n`,
500
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"msg_test","type":"message","role":"assistant","status":"in_progress","content":[]},"sequence_number":1}\n\n`,
501
+ `data:{"type":"response.output_text.delta","item_id":"msg_test","output_index":0,"content_index":0,"delta":"Hello,","sequence_number":2}\n\n`,
502
+ `data:{"type":"response.output_text.delta","item_id":"msg_test","output_index":0,"content_index":0,"delta":" World!","sequence_number":3}\n\n`,
503
+ `data:{"type":"response.output_item.done","output_index":0,"item":{"id":"msg_test","type":"message","role":"assistant","status":"completed","content":[{"type":"output_text","text":"Hello, World!"}]},"sequence_number":4}\n\n`,
504
+ `data:{"type":"response.completed","response":{"id":"resp_test","model":"deepseek-ai/DeepSeek-V3-0324","object":"response","created_at":1741269112,"status":"completed","incomplete_details":null,"usage":{"input_tokens":12,"output_tokens":25,"total_tokens":37},"output":[{"id":"msg_test","type":"message","role":"assistant","status":"completed","content":[{"type":"output_text","text":"Hello, World!"}]}]},"sequence_number":5}\n\n`,
505
+ ],
506
+ };
507
+
508
+ const { stream } = await createModel(
509
+ 'deepseek-ai/DeepSeek-V3-0324',
510
+ ).doStream({
511
+ prompt: TEST_PROMPT,
512
+ });
513
+
514
+ expect(await convertReadableStreamToArray(stream)).toMatchInlineSnapshot(`
515
+ [
516
+ {
517
+ "type": "stream-start",
518
+ "warnings": [],
519
+ },
520
+ {
521
+ "id": "resp_test",
522
+ "modelId": "deepseek-ai/DeepSeek-V3-0324",
523
+ "timestamp": 2025-03-06T13:50:19.000Z,
524
+ "type": "response-metadata",
525
+ },
526
+ {
527
+ "id": "msg_test",
528
+ "providerMetadata": {
529
+ "huggingface": {
530
+ "itemId": "msg_test",
531
+ },
532
+ },
533
+ "type": "text-start",
534
+ },
535
+ {
536
+ "delta": "Hello,",
537
+ "id": "msg_test",
538
+ "type": "text-delta",
539
+ },
540
+ {
541
+ "delta": " World!",
542
+ "id": "msg_test",
543
+ "type": "text-delta",
544
+ },
545
+ {
546
+ "id": "msg_test",
547
+ "type": "text-end",
548
+ },
549
+ {
550
+ "finishReason": {
551
+ "raw": undefined,
552
+ "unified": "stop",
553
+ },
554
+ "providerMetadata": {
555
+ "huggingface": {
556
+ "responseId": "resp_test",
557
+ },
558
+ },
559
+ "type": "finish",
560
+ "usage": {
561
+ "inputTokens": {
562
+ "cacheRead": 0,
563
+ "cacheWrite": undefined,
564
+ "noCache": 12,
565
+ "total": 12,
566
+ },
567
+ "outputTokens": {
568
+ "reasoning": 0,
569
+ "text": 25,
570
+ "total": 25,
571
+ },
572
+ "raw": {
573
+ "input_tokens": 12,
574
+ "output_tokens": 25,
575
+ "total_tokens": 37,
576
+ },
577
+ },
578
+ },
579
+ ]
580
+ `);
581
+ });
582
+
583
+ it('should handle streaming without usage', async () => {
584
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
585
+ type: 'stream-chunks',
586
+ chunks: [
587
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"msg_test","type":"message","role":"assistant","status":"in_progress"},"sequence_number":1}\n\n`,
588
+ `data:{"type":"response.output_text.delta","item_id":"msg_test","output_index":0,"content_index":0,"delta":"Hi!","sequence_number":2}\n\n`,
589
+ `data:{"type":"response.output_item.done","output_index":0,"item":{"id":"msg_test","type":"message","role":"assistant","status":"completed"},"sequence_number":3}\n\n`,
590
+ `data:{"type":"response.completed","response":{"id":"resp_test","status":"completed","incomplete_details":null,"usage":null},"sequence_number":4}\n\n`,
591
+ ],
592
+ };
593
+
594
+ const { stream } = await createModel(
595
+ 'deepseek-ai/DeepSeek-V3-0324',
596
+ ).doStream({
597
+ prompt: TEST_PROMPT,
598
+ });
599
+
600
+ const chunks = await convertReadableStreamToArray(stream);
601
+ const finishChunk = chunks.find(chunk => chunk.type === 'finish');
602
+
603
+ expect(finishChunk?.usage).toMatchInlineSnapshot(`
604
+ {
605
+ "inputTokens": {
606
+ "cacheRead": undefined,
607
+ "cacheWrite": undefined,
608
+ "noCache": undefined,
609
+ "total": undefined,
610
+ },
611
+ "outputTokens": {
612
+ "reasoning": undefined,
613
+ "text": undefined,
614
+ "total": undefined,
615
+ },
616
+ "raw": undefined,
617
+ }
618
+ `);
619
+ });
620
+
621
+ it('should handle non-message item types', async () => {
622
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
623
+ type: 'stream-chunks',
624
+ chunks: [
625
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"mcp_test","type":"mcp_list_tools","server_label":"test"},"sequence_number":1}\n\n`,
626
+ `data:{"type":"response.output_item.done","output_index":0,"item":{"id":"mcp_test","type":"mcp_list_tools","server_label":"test"},"sequence_number":2}\n\n`,
627
+ `data:{"type":"response.completed","response":{"id":"resp_test","status":"completed","incomplete_details":null},"sequence_number":3}\n\n`,
628
+ ],
629
+ };
630
+
631
+ const { stream } = await createModel(
632
+ 'deepseek-ai/DeepSeek-V3-0324',
633
+ ).doStream({
634
+ prompt: TEST_PROMPT,
635
+ });
636
+
637
+ const chunks = await convertReadableStreamToArray(stream);
638
+
639
+ // Should only have stream-start and finish events (no text events for non-message items)
640
+ expect(chunks.map(chunk => chunk.type)).toEqual([
641
+ 'stream-start',
642
+ 'finish',
643
+ ]);
644
+ });
645
+
646
+ it('should handle streaming errors', async () => {
647
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
648
+ type: 'stream-chunks',
649
+ chunks: [
650
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"msg_test","type":"message","role":"assistant"},"sequence_number":1}\n\n`,
651
+ `data:invalid json}\n\n`, // malformed JSON that will cause parsing error
652
+ ],
653
+ };
654
+
655
+ const { stream } = await createModel(
656
+ 'deepseek-ai/DeepSeek-V3-0324',
657
+ ).doStream({
658
+ prompt: TEST_PROMPT,
659
+ });
660
+
661
+ const chunks = await convertReadableStreamToArray(stream);
662
+ const errorChunk = chunks.find(chunk => chunk.type === 'error');
663
+ const finishChunk = chunks.find(chunk => chunk.type === 'finish');
664
+
665
+ expect(errorChunk).toBeDefined();
666
+ expect(errorChunk?.type).toBe('error');
667
+ expect(finishChunk?.finishReason).toMatchInlineSnapshot(`
668
+ {
669
+ "raw": undefined,
670
+ "unified": "error",
671
+ }
672
+ `);
673
+ });
674
+
675
+ it('should send correct streaming request', async () => {
676
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
677
+ type: 'stream-chunks',
678
+ chunks: [
679
+ `data:{"type":"response.completed","response":{"id":"resp_test","status":"completed"},"sequence_number":1}\n\n`,
680
+ ],
681
+ };
682
+
683
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doStream({
684
+ prompt: TEST_PROMPT,
685
+ temperature: 0.7,
686
+ });
687
+
688
+ expect(await server.calls[0].requestBodyJson).toStrictEqual({
689
+ model: 'deepseek-ai/DeepSeek-V3-0324',
690
+ temperature: 0.7,
691
+ stream: true,
692
+ input: [
693
+ {
694
+ role: 'user',
695
+ content: [{ type: 'input_text', text: 'Hello' }],
696
+ },
697
+ ],
698
+ });
699
+ });
700
+ });
701
+
702
+ describe('message conversion', () => {
703
+ beforeEach(() => {
704
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
705
+ type: 'json-value',
706
+ body: {
707
+ id: 'resp_test',
708
+ model: 'moonshotai/Kimi-K2-Instruct',
709
+ object: 'response',
710
+ created_at: 1741257730,
711
+ status: 'completed',
712
+ error: null,
713
+ instructions: null,
714
+ max_output_tokens: null,
715
+ metadata: null,
716
+ tool_choice: 'auto',
717
+ tools: [],
718
+ temperature: 1.0,
719
+ top_p: 1.0,
720
+ incomplete_details: null,
721
+ usage: null,
722
+ output: [],
723
+ output_text: 'Test response',
724
+ },
725
+ };
726
+ });
727
+
728
+ it('should convert user messages with images', async () => {
729
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
730
+ prompt: [
731
+ {
732
+ role: 'user',
733
+ content: [
734
+ { type: 'text', text: 'What do you see?' },
735
+ {
736
+ type: 'file',
737
+ mediaType: 'image/jpeg',
738
+ data: 'AQIDBA==',
739
+ },
740
+ ],
741
+ },
742
+ ],
743
+ });
744
+
745
+ const requestBody = await server.calls[0].requestBodyJson;
746
+ expect(requestBody.input[0].content).toMatchInlineSnapshot(`
747
+ [
748
+ {
749
+ "text": "What do you see?",
750
+ "type": "input_text",
751
+ },
752
+ {
753
+ "image_url": "data:image/jpeg;base64,AQIDBA==",
754
+ "type": "input_image",
755
+ },
756
+ ]
757
+ `);
758
+ });
759
+
760
+ it('should handle assistant messages', async () => {
761
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
762
+ prompt: [
763
+ { role: 'user', content: [{ type: 'text', text: 'Hello' }] },
764
+ { role: 'assistant', content: [{ type: 'text', text: 'Hi there!' }] },
765
+ { role: 'user', content: [{ type: 'text', text: 'How are you?' }] },
766
+ ],
767
+ });
768
+
769
+ const requestBody = await server.calls[0].requestBodyJson;
770
+ expect(requestBody.input).toMatchInlineSnapshot(`
771
+ [
772
+ {
773
+ "content": [
774
+ {
775
+ "text": "Hello",
776
+ "type": "input_text",
777
+ },
778
+ ],
779
+ "role": "user",
780
+ },
781
+ {
782
+ "content": [
783
+ {
784
+ "text": "Hi there!",
785
+ "type": "output_text",
786
+ },
787
+ ],
788
+ "role": "assistant",
789
+ },
790
+ {
791
+ "content": [
792
+ {
793
+ "text": "How are you?",
794
+ "type": "input_text",
795
+ },
796
+ ],
797
+ "role": "user",
798
+ },
799
+ ]
800
+ `);
801
+ });
802
+
803
+ it('should warn about unsupported assistant content types', async () => {
804
+ const { warnings } = await createModel(
805
+ 'deepseek-ai/DeepSeek-V3-0324',
806
+ ).doGenerate({
807
+ prompt: [
808
+ {
809
+ role: 'assistant',
810
+ content: [
811
+ {
812
+ type: 'tool-call',
813
+ toolCallId: 'test',
814
+ toolName: 'test',
815
+ input: {},
816
+ },
817
+ {
818
+ type: 'tool-result',
819
+ toolCallId: 'test',
820
+ toolName: 'test',
821
+ output: { type: 'text', value: 'test' },
822
+ },
823
+ { type: 'reasoning', text: 'thinking...' },
824
+ ],
825
+ },
826
+ ],
827
+ });
828
+
829
+ expect(warnings).toMatchInlineSnapshot(`[]`);
830
+ });
831
+
832
+ it('should warn about tool messages', async () => {
833
+ const { warnings } = await createModel(
834
+ 'deepseek-ai/DeepSeek-V3-0324',
835
+ ).doGenerate({
836
+ prompt: [
837
+ {
838
+ role: 'tool',
839
+ content: [
840
+ {
841
+ type: 'tool-result',
842
+ toolCallId: 'test',
843
+ toolName: 'test',
844
+ output: { type: 'text', value: 'test' },
845
+ },
846
+ ],
847
+ },
848
+ ],
849
+ });
850
+
851
+ expect(warnings).toMatchInlineSnapshot(`
852
+ [
853
+ {
854
+ "feature": "tool messages",
855
+ "type": "unsupported",
856
+ },
857
+ ]
858
+ `);
859
+ });
860
+ });
861
+
862
+ describe('tool calls', () => {
863
+ it('should handle function_call tool responses', async () => {
864
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
865
+ type: 'json-value',
866
+ body: {
867
+ id: 'resp_tool_test',
868
+ model: 'deepseek-ai/DeepSeek-V3-0324',
869
+ object: 'response',
870
+ created_at: 1741257730,
871
+ status: 'completed',
872
+ error: null,
873
+ instructions: null,
874
+ max_output_tokens: null,
875
+ metadata: null,
876
+ tool_choice: 'auto',
877
+ tools: [],
878
+ temperature: 1.0,
879
+ top_p: 1.0,
880
+ incomplete_details: null,
881
+ usage: {
882
+ input_tokens: 50,
883
+ output_tokens: 30,
884
+ total_tokens: 80,
885
+ },
886
+ output: [
887
+ {
888
+ id: 'fc_test',
889
+ type: 'function_call',
890
+ call_id: 'call_123',
891
+ name: 'getWeather',
892
+ arguments: '{"location": "New York"}',
893
+ output: '{"temperature": "72°F", "condition": "sunny"}',
894
+ },
895
+ {
896
+ id: 'msg_after_tool',
897
+ type: 'message',
898
+ role: 'assistant',
899
+ status: 'completed',
900
+ content: [
901
+ {
902
+ type: 'output_text',
903
+ text: 'The weather in New York is 72°F and sunny.',
904
+ },
905
+ ],
906
+ },
907
+ ],
908
+ output_text: null,
909
+ },
910
+ };
911
+
912
+ const result = await createModel(
913
+ 'deepseek-ai/DeepSeek-V3-0324',
914
+ ).doGenerate({
915
+ prompt: TEST_PROMPT,
916
+ });
917
+
918
+ expect(result.content).toMatchInlineSnapshot(`
919
+ [
920
+ {
921
+ "input": "{"location": "New York"}",
922
+ "toolCallId": "call_123",
923
+ "toolName": "getWeather",
924
+ "type": "tool-call",
925
+ },
926
+ {
927
+ "result": "{"temperature": "72°F", "condition": "sunny"}",
928
+ "toolCallId": "call_123",
929
+ "toolName": "getWeather",
930
+ "type": "tool-result",
931
+ },
932
+ {
933
+ "providerMetadata": {
934
+ "huggingface": {
935
+ "itemId": "msg_after_tool",
936
+ },
937
+ },
938
+ "text": "The weather in New York is 72°F and sunny.",
939
+ "type": "text",
940
+ },
941
+ ]
942
+ `);
943
+ });
944
+
945
+ it('should stream tool calls', async () => {
946
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
947
+ type: 'stream-chunks',
948
+ chunks: [
949
+ `data:{"type":"response.created","response":{"id":"resp_tool_stream","object":"response","created_at":1741269019,"status":"in_progress","model":"deepseek-ai/DeepSeek-V3-0324"}}\n\n`,
950
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"fc_stream","type":"function_call","call_id":"call_456","name":"calculator","arguments":""},"sequence_number":1}\n\n`,
951
+ `data:{"type":"response.function_call_arguments.delta","item_id":"fc_stream","output_index":0,"delta":"{\\"operation\\"","sequence_number":2}\n\n`,
952
+ `data:{"type":"response.function_call_arguments.delta","item_id":"fc_stream","output_index":0,"delta":": \\"add\\", \\"a\\": 5, \\"b\\": 3}","sequence_number":3}\n\n`,
953
+ `data:{"type":"response.output_item.done","output_index":0,"item":{"id":"fc_stream","type":"function_call","call_id":"call_456","name":"calculator","arguments":"{\\"operation\\": \\"add\\", \\"a\\": 5, \\"b\\": 3}","output":"8"},"sequence_number":4}\n\n`,
954
+ `data:{"type":"response.completed","response":{"id":"resp_tool_stream","status":"completed","usage":{"input_tokens":20,"output_tokens":15,"total_tokens":35}},"sequence_number":5}\n\n`,
955
+ ],
956
+ };
957
+
958
+ const { stream } = await createModel(
959
+ 'deepseek-ai/DeepSeek-V3-0324',
960
+ ).doStream({
961
+ prompt: TEST_PROMPT,
962
+ });
963
+
964
+ const chunks = await convertReadableStreamToArray(stream);
965
+
966
+ expect(chunks).toMatchInlineSnapshot(`
967
+ [
968
+ {
969
+ "type": "stream-start",
970
+ "warnings": [],
971
+ },
972
+ {
973
+ "id": "resp_tool_stream",
974
+ "modelId": "deepseek-ai/DeepSeek-V3-0324",
975
+ "timestamp": 2025-03-06T13:50:19.000Z,
976
+ "type": "response-metadata",
977
+ },
978
+ {
979
+ "id": "call_456",
980
+ "toolName": "calculator",
981
+ "type": "tool-input-start",
982
+ },
983
+ {
984
+ "id": "call_456",
985
+ "type": "tool-input-end",
986
+ },
987
+ {
988
+ "input": "{"operation": "add", "a": 5, "b": 3}",
989
+ "toolCallId": "call_456",
990
+ "toolName": "calculator",
991
+ "type": "tool-call",
992
+ },
993
+ {
994
+ "result": "8",
995
+ "toolCallId": "call_456",
996
+ "toolName": "calculator",
997
+ "type": "tool-result",
998
+ },
999
+ {
1000
+ "finishReason": {
1001
+ "raw": undefined,
1002
+ "unified": "stop",
1003
+ },
1004
+ "providerMetadata": {
1005
+ "huggingface": {
1006
+ "responseId": "resp_tool_stream",
1007
+ },
1008
+ },
1009
+ "type": "finish",
1010
+ "usage": {
1011
+ "inputTokens": {
1012
+ "cacheRead": 0,
1013
+ "cacheWrite": undefined,
1014
+ "noCache": 20,
1015
+ "total": 20,
1016
+ },
1017
+ "outputTokens": {
1018
+ "reasoning": 0,
1019
+ "text": 15,
1020
+ "total": 15,
1021
+ },
1022
+ "raw": {
1023
+ "input_tokens": 20,
1024
+ "output_tokens": 15,
1025
+ "total_tokens": 35,
1026
+ },
1027
+ },
1028
+ },
1029
+ ]
1030
+ `);
1031
+ });
1032
+ });
1033
+
1034
+ describe('structured output', () => {
1035
+ it('should send text.format for structured output', async () => {
1036
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1037
+ type: 'json-value',
1038
+ body: {
1039
+ id: 'resp_structured',
1040
+ model: 'moonshotai/Kimi-K2-Instruct',
1041
+ object: 'response',
1042
+ created_at: 1741257730,
1043
+ status: 'completed',
1044
+ error: null,
1045
+ instructions: null,
1046
+ max_output_tokens: null,
1047
+ metadata: null,
1048
+ tool_choice: 'auto',
1049
+ tools: [],
1050
+ temperature: 1.0,
1051
+ top_p: 1.0,
1052
+ incomplete_details: null,
1053
+ usage: null,
1054
+ output: [
1055
+ {
1056
+ id: 'msg_structured',
1057
+ type: 'message',
1058
+ role: 'assistant',
1059
+ status: 'completed',
1060
+ content: [
1061
+ {
1062
+ type: 'output_text',
1063
+ text: '{"name": "John Doe", "age": 30}',
1064
+ },
1065
+ ],
1066
+ },
1067
+ ],
1068
+ output_text: null,
1069
+ },
1070
+ };
1071
+
1072
+ await createModel('moonshotai/Kimi-K2-Instruct').doGenerate({
1073
+ prompt: TEST_PROMPT,
1074
+ responseFormat: {
1075
+ type: 'json',
1076
+ schema: {
1077
+ type: 'object',
1078
+ properties: {
1079
+ name: { type: 'string' },
1080
+ age: { type: 'number' },
1081
+ },
1082
+ required: ['name', 'age'],
1083
+ },
1084
+ },
1085
+ });
1086
+
1087
+ const requestBody = await server.calls[0].requestBodyJson;
1088
+ expect(requestBody.text).toMatchInlineSnapshot(`
1089
+ {
1090
+ "format": {
1091
+ "name": "response",
1092
+ "schema": {
1093
+ "properties": {
1094
+ "age": {
1095
+ "type": "number",
1096
+ },
1097
+ "name": {
1098
+ "type": "string",
1099
+ },
1100
+ },
1101
+ "required": [
1102
+ "name",
1103
+ "age",
1104
+ ],
1105
+ "type": "object",
1106
+ },
1107
+ "strict": false,
1108
+ "type": "json_schema",
1109
+ },
1110
+ }
1111
+ `);
1112
+ });
1113
+
1114
+ it('should handle structured output with custom name and description', async () => {
1115
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1116
+ type: 'json-value',
1117
+ body: {
1118
+ id: 'resp_structured',
1119
+ model: 'moonshotai/Kimi-K2-Instruct',
1120
+ object: 'response',
1121
+ created_at: 1741257730,
1122
+ status: 'completed',
1123
+ error: null,
1124
+ instructions: null,
1125
+ max_output_tokens: null,
1126
+ metadata: null,
1127
+ tool_choice: 'auto',
1128
+ tools: [],
1129
+ temperature: 1.0,
1130
+ top_p: 1.0,
1131
+ incomplete_details: null,
1132
+ usage: null,
1133
+ output: [],
1134
+ output_text: '{}',
1135
+ },
1136
+ };
1137
+
1138
+ await createModel('moonshotai/Kimi-K2-Instruct').doGenerate({
1139
+ prompt: TEST_PROMPT,
1140
+ responseFormat: {
1141
+ type: 'json',
1142
+ name: 'person_profile',
1143
+ description: 'A person profile with basic information',
1144
+ schema: {
1145
+ type: 'object',
1146
+ properties: {
1147
+ name: { type: 'string' },
1148
+ },
1149
+ },
1150
+ },
1151
+ });
1152
+
1153
+ const requestBody = await server.calls[0].requestBodyJson;
1154
+ expect(requestBody.text?.format?.name).toBe('person_profile');
1155
+ expect(requestBody.text?.format?.description).toBe(
1156
+ 'A person profile with basic information',
1157
+ );
1158
+ });
1159
+ });
1160
+
1161
+ describe('reasoning', () => {
1162
+ it('should handle reasoning content in responses', async () => {
1163
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1164
+ type: 'json-value',
1165
+ body: {
1166
+ id: 'resp_reasoning',
1167
+ model: 'deepseek-ai/DeepSeek-R1',
1168
+ object: 'response',
1169
+ created_at: 1741257730,
1170
+ status: 'completed',
1171
+ error: null,
1172
+ instructions: null,
1173
+ max_output_tokens: null,
1174
+ metadata: null,
1175
+ tool_choice: 'auto',
1176
+ tools: [],
1177
+ temperature: 1.0,
1178
+ top_p: 1.0,
1179
+ incomplete_details: null,
1180
+ usage: {
1181
+ input_tokens: 10,
1182
+ output_tokens: 50,
1183
+ total_tokens: 60,
1184
+ },
1185
+ output: [
1186
+ {
1187
+ id: 'reasoning_1',
1188
+ type: 'reasoning',
1189
+ content: [
1190
+ {
1191
+ type: 'reasoning_text',
1192
+ text: 'Let me think about this problem step by step...',
1193
+ },
1194
+ ],
1195
+ },
1196
+ {
1197
+ id: 'msg_after_reasoning',
1198
+ type: 'message',
1199
+ role: 'assistant',
1200
+ status: 'completed',
1201
+ content: [
1202
+ {
1203
+ type: 'output_text',
1204
+ text: 'The answer is 42.',
1205
+ },
1206
+ ],
1207
+ },
1208
+ ],
1209
+ output_text: null,
1210
+ },
1211
+ };
1212
+
1213
+ const result = await createModel('deepseek-ai/DeepSeek-R1').doGenerate({
1214
+ prompt: TEST_PROMPT,
1215
+ });
1216
+
1217
+ expect(result.content).toMatchInlineSnapshot(`
1218
+ [
1219
+ {
1220
+ "providerMetadata": {
1221
+ "huggingface": {
1222
+ "itemId": "reasoning_1",
1223
+ },
1224
+ },
1225
+ "text": "Let me think about this problem step by step...",
1226
+ "type": "reasoning",
1227
+ },
1228
+ {
1229
+ "providerMetadata": {
1230
+ "huggingface": {
1231
+ "itemId": "msg_after_reasoning",
1232
+ },
1233
+ },
1234
+ "text": "The answer is 42.",
1235
+ "type": "text",
1236
+ },
1237
+ ]
1238
+ `);
1239
+ });
1240
+
1241
+ it('should stream reasoning content', async () => {
1242
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1243
+ type: 'stream-chunks',
1244
+ chunks: [
1245
+ `data:{"type":"response.created","response":{"id":"resp_reasoning_stream","object":"response","created_at":1741269019,"status":"in_progress","model":"deepseek-ai/DeepSeek-R1"}}\n\n`,
1246
+ `data:{"type":"response.output_item.added","output_index":0,"item":{"id":"reasoning_stream","type":"reasoning"},"sequence_number":1}\n\n`,
1247
+ `data:{"type":"response.reasoning_text.delta","item_id":"reasoning_stream","output_index":0,"content_index":0,"delta":"Thinking about","sequence_number":2}\n\n`,
1248
+ `data:{"type":"response.reasoning_text.delta","item_id":"reasoning_stream","output_index":0,"content_index":0,"delta":" the problem...","sequence_number":3}\n\n`,
1249
+ `data:{"type":"response.reasoning_text.done","item_id":"reasoning_stream","output_index":0,"content_index":0,"text":"Thinking about the problem...","sequence_number":4}\n\n`,
1250
+ `data:{"type":"response.output_item.done","output_index":0,"item":{"id":"reasoning_stream","type":"reasoning","content":[{"type":"reasoning_text","text":"Thinking about the problem..."}]},"sequence_number":5}\n\n`,
1251
+ `data:{"type":"response.output_item.added","output_index":1,"item":{"id":"msg_stream","type":"message","role":"assistant"},"sequence_number":6}\n\n`,
1252
+ `data:{"type":"response.output_text.delta","item_id":"msg_stream","output_index":1,"content_index":0,"delta":"The solution is","sequence_number":7}\n\n`,
1253
+ `data:{"type":"response.output_text.delta","item_id":"msg_stream","output_index":1,"content_index":0,"delta":" simple.","sequence_number":8}\n\n`,
1254
+ `data:{"type":"response.output_item.done","output_index":1,"item":{"id":"msg_stream","type":"message","role":"assistant","status":"completed","content":[{"type":"output_text","text":"The solution is simple."}]},"sequence_number":9}\n\n`,
1255
+ `data:{"type":"response.completed","response":{"id":"resp_reasoning_stream","status":"completed","usage":{"input_tokens":10,"output_tokens":20,"total_tokens":30}},"sequence_number":10}\n\n`,
1256
+ ],
1257
+ };
1258
+
1259
+ const { stream } = await createModel('deepseek-ai/DeepSeek-R1').doStream({
1260
+ prompt: TEST_PROMPT,
1261
+ });
1262
+
1263
+ const chunks = await convertReadableStreamToArray(stream);
1264
+
1265
+ expect(chunks).toMatchInlineSnapshot(`
1266
+ [
1267
+ {
1268
+ "type": "stream-start",
1269
+ "warnings": [],
1270
+ },
1271
+ {
1272
+ "id": "resp_reasoning_stream",
1273
+ "modelId": "deepseek-ai/DeepSeek-R1",
1274
+ "timestamp": 2025-03-06T13:50:19.000Z,
1275
+ "type": "response-metadata",
1276
+ },
1277
+ {
1278
+ "id": "reasoning_stream",
1279
+ "providerMetadata": {
1280
+ "huggingface": {
1281
+ "itemId": "reasoning_stream",
1282
+ },
1283
+ },
1284
+ "type": "reasoning-start",
1285
+ },
1286
+ {
1287
+ "delta": "Thinking about",
1288
+ "id": "reasoning_stream",
1289
+ "type": "reasoning-delta",
1290
+ },
1291
+ {
1292
+ "delta": " the problem...",
1293
+ "id": "reasoning_stream",
1294
+ "type": "reasoning-delta",
1295
+ },
1296
+ {
1297
+ "id": "reasoning_stream",
1298
+ "type": "reasoning-end",
1299
+ },
1300
+ {
1301
+ "id": "msg_stream",
1302
+ "providerMetadata": {
1303
+ "huggingface": {
1304
+ "itemId": "msg_stream",
1305
+ },
1306
+ },
1307
+ "type": "text-start",
1308
+ },
1309
+ {
1310
+ "delta": "The solution is",
1311
+ "id": "msg_stream",
1312
+ "type": "text-delta",
1313
+ },
1314
+ {
1315
+ "delta": " simple.",
1316
+ "id": "msg_stream",
1317
+ "type": "text-delta",
1318
+ },
1319
+ {
1320
+ "id": "msg_stream",
1321
+ "type": "text-end",
1322
+ },
1323
+ {
1324
+ "finishReason": {
1325
+ "raw": undefined,
1326
+ "unified": "stop",
1327
+ },
1328
+ "providerMetadata": {
1329
+ "huggingface": {
1330
+ "responseId": "resp_reasoning_stream",
1331
+ },
1332
+ },
1333
+ "type": "finish",
1334
+ "usage": {
1335
+ "inputTokens": {
1336
+ "cacheRead": 0,
1337
+ "cacheWrite": undefined,
1338
+ "noCache": 10,
1339
+ "total": 10,
1340
+ },
1341
+ "outputTokens": {
1342
+ "reasoning": 0,
1343
+ "text": 20,
1344
+ "total": 20,
1345
+ },
1346
+ "raw": {
1347
+ "input_tokens": 10,
1348
+ "output_tokens": 20,
1349
+ "total_tokens": 30,
1350
+ },
1351
+ },
1352
+ },
1353
+ ]
1354
+ `);
1355
+ });
1356
+ });
1357
+
1358
+ describe('provider options', () => {
1359
+ it('should send provider-specific options', async () => {
1360
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1361
+ type: 'json-value',
1362
+ body: {
1363
+ id: 'resp_provider_options',
1364
+ model: 'deepseek-ai/DeepSeek-V3-0324',
1365
+ object: 'response',
1366
+ created_at: 1741257730,
1367
+ status: 'completed',
1368
+ error: null,
1369
+ instructions: null,
1370
+ max_output_tokens: null,
1371
+ metadata: null,
1372
+ tool_choice: 'auto',
1373
+ tools: [],
1374
+ temperature: 1.0,
1375
+ top_p: 1.0,
1376
+ incomplete_details: null,
1377
+ usage: null,
1378
+ output: [],
1379
+ output_text: 'Test',
1380
+ },
1381
+ };
1382
+
1383
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
1384
+ prompt: TEST_PROMPT,
1385
+ providerOptions: {
1386
+ huggingface: {
1387
+ metadata: { key: 'value' },
1388
+ instructions: 'Be concise',
1389
+ strictJsonSchema: true,
1390
+ },
1391
+ },
1392
+ responseFormat: {
1393
+ type: 'json',
1394
+ schema: { type: 'object' },
1395
+ },
1396
+ });
1397
+
1398
+ const requestBody = await server.calls[0].requestBodyJson;
1399
+ expect(requestBody.metadata).toMatchInlineSnapshot(`
1400
+ {
1401
+ "key": "value",
1402
+ }
1403
+ `);
1404
+ expect(requestBody.instructions).toBe('Be concise');
1405
+ expect(requestBody.text?.format?.strict).toBe(true);
1406
+ });
1407
+ });
1408
+
1409
+ describe('tool preparation', () => {
1410
+ it('should prepare tools correctly', async () => {
1411
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1412
+ type: 'json-value',
1413
+ body: {
1414
+ id: 'resp_tools',
1415
+ model: 'deepseek-ai/DeepSeek-V3-0324',
1416
+ object: 'response',
1417
+ created_at: 1741257730,
1418
+ status: 'completed',
1419
+ error: null,
1420
+ instructions: null,
1421
+ max_output_tokens: null,
1422
+ metadata: null,
1423
+ tool_choice: 'auto',
1424
+ tools: [],
1425
+ temperature: 1.0,
1426
+ top_p: 1.0,
1427
+ incomplete_details: null,
1428
+ usage: null,
1429
+ output: [],
1430
+ output_text: 'Test',
1431
+ },
1432
+ };
1433
+
1434
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
1435
+ prompt: TEST_PROMPT,
1436
+ tools: [
1437
+ {
1438
+ type: 'function',
1439
+ name: 'getWeather',
1440
+ description: 'Get weather information',
1441
+ inputSchema: {
1442
+ type: 'object',
1443
+ properties: {
1444
+ location: { type: 'string' },
1445
+ },
1446
+ required: ['location'],
1447
+ },
1448
+ },
1449
+ ],
1450
+ toolChoice: {
1451
+ type: 'tool',
1452
+ toolName: 'getWeather',
1453
+ },
1454
+ });
1455
+
1456
+ const requestBody = await server.calls[0].requestBodyJson;
1457
+ expect(requestBody.tools).toMatchInlineSnapshot(`
1458
+ [
1459
+ {
1460
+ "description": "Get weather information",
1461
+ "name": "getWeather",
1462
+ "parameters": {
1463
+ "properties": {
1464
+ "location": {
1465
+ "type": "string",
1466
+ },
1467
+ },
1468
+ "required": [
1469
+ "location",
1470
+ ],
1471
+ "type": "object",
1472
+ },
1473
+ "type": "function",
1474
+ },
1475
+ ]
1476
+ `);
1477
+ expect(requestBody.tool_choice).toMatchInlineSnapshot(`
1478
+ {
1479
+ "function": {
1480
+ "name": "getWeather",
1481
+ },
1482
+ "type": "function",
1483
+ }
1484
+ `);
1485
+ });
1486
+
1487
+ it('should handle auto and required tool choices', async () => {
1488
+ server.urls['https://router.huggingface.co/v1/responses'].response = {
1489
+ type: 'json-value',
1490
+ body: {
1491
+ id: 'resp_tools',
1492
+ model: 'deepseek-ai/DeepSeek-V3-0324',
1493
+ object: 'response',
1494
+ created_at: 1741257730,
1495
+ status: 'completed',
1496
+ error: null,
1497
+ instructions: null,
1498
+ max_output_tokens: null,
1499
+ metadata: null,
1500
+ tool_choice: 'auto',
1501
+ tools: [],
1502
+ temperature: 1.0,
1503
+ top_p: 1.0,
1504
+ incomplete_details: null,
1505
+ usage: null,
1506
+ output: [],
1507
+ output_text: 'Test',
1508
+ },
1509
+ };
1510
+
1511
+ // Test auto
1512
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
1513
+ prompt: TEST_PROMPT,
1514
+ tools: [
1515
+ {
1516
+ type: 'function',
1517
+ name: 'test',
1518
+ inputSchema: { type: 'object' },
1519
+ },
1520
+ ],
1521
+ toolChoice: { type: 'auto' },
1522
+ });
1523
+
1524
+ let requestBody = await server.calls[0].requestBodyJson;
1525
+ expect(requestBody.tool_choice).toBe('auto');
1526
+
1527
+ // Test required
1528
+ await createModel('deepseek-ai/DeepSeek-V3-0324').doGenerate({
1529
+ prompt: TEST_PROMPT,
1530
+ tools: [
1531
+ {
1532
+ type: 'function',
1533
+ name: 'test',
1534
+ inputSchema: { type: 'object' },
1535
+ },
1536
+ ],
1537
+ toolChoice: { type: 'required' },
1538
+ });
1539
+
1540
+ requestBody = await server.calls[1].requestBodyJson;
1541
+ expect(requestBody.tool_choice).toBe('required');
1542
+ });
1543
+ });
1544
+ });