@ai-sdk/huggingface 1.0.16 → 1.0.18

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,826 @@
1
+ import {
2
+ APICallError,
3
+ LanguageModelV3,
4
+ LanguageModelV3CallOptions,
5
+ LanguageModelV3Content,
6
+ LanguageModelV3FinishReason,
7
+ LanguageModelV3GenerateResult,
8
+ LanguageModelV3StreamPart,
9
+ LanguageModelV3StreamResult,
10
+ SharedV3Warning,
11
+ } from '@ai-sdk/provider';
12
+ import {
13
+ combineHeaders,
14
+ createEventSourceResponseHandler,
15
+ createJsonResponseHandler,
16
+ generateId,
17
+ parseProviderOptions,
18
+ ParseResult,
19
+ postJsonToApi,
20
+ } from '@ai-sdk/provider-utils';
21
+ import { z } from 'zod/v4';
22
+ import { HuggingFaceConfig } from '../huggingface-config';
23
+ import { huggingfaceFailedResponseHandler } from '../huggingface-error';
24
+ import {
25
+ convertHuggingFaceResponsesUsage,
26
+ HuggingFaceResponsesUsage,
27
+ } from './convert-huggingface-responses-usage';
28
+ import { convertToHuggingFaceResponsesMessages } from './convert-to-huggingface-responses-messages';
29
+ import { prepareResponsesTools } from './huggingface-responses-prepare-tools';
30
+ import { HuggingFaceResponsesModelId } from './huggingface-responses-settings';
31
+ import { mapHuggingFaceResponsesFinishReason } from './map-huggingface-responses-finish-reason';
32
+
33
+ export class HuggingFaceResponsesLanguageModel implements LanguageModelV3 {
34
+ readonly specificationVersion = 'v3';
35
+
36
+ readonly modelId: HuggingFaceResponsesModelId;
37
+
38
+ private readonly config: HuggingFaceConfig;
39
+
40
+ constructor(modelId: HuggingFaceResponsesModelId, config: HuggingFaceConfig) {
41
+ this.modelId = modelId;
42
+ this.config = config;
43
+ }
44
+
45
+ readonly supportedUrls: Record<string, RegExp[]> = {
46
+ 'image/*': [/^https?:\/\/.*$/],
47
+ };
48
+
49
+ get provider(): string {
50
+ return this.config.provider;
51
+ }
52
+
53
+ private async getArgs({
54
+ maxOutputTokens,
55
+ temperature,
56
+ stopSequences,
57
+ topP,
58
+ topK,
59
+ presencePenalty,
60
+ frequencyPenalty,
61
+ seed,
62
+ prompt,
63
+ providerOptions,
64
+ tools,
65
+ toolChoice,
66
+ responseFormat,
67
+ }: LanguageModelV3CallOptions) {
68
+ const warnings: SharedV3Warning[] = [];
69
+
70
+ if (topK != null) {
71
+ warnings.push({ type: 'unsupported', feature: 'topK' });
72
+ }
73
+
74
+ if (seed != null) {
75
+ warnings.push({ type: 'unsupported', feature: 'seed' });
76
+ }
77
+
78
+ if (presencePenalty != null) {
79
+ warnings.push({ type: 'unsupported', feature: 'presencePenalty' });
80
+ }
81
+
82
+ if (frequencyPenalty != null) {
83
+ warnings.push({ type: 'unsupported', feature: 'frequencyPenalty' });
84
+ }
85
+
86
+ if (stopSequences != null) {
87
+ warnings.push({ type: 'unsupported', feature: 'stopSequences' });
88
+ }
89
+
90
+ const { input, warnings: messageWarnings } =
91
+ await convertToHuggingFaceResponsesMessages({
92
+ prompt,
93
+ });
94
+
95
+ warnings.push(...messageWarnings);
96
+
97
+ const huggingfaceOptions = await parseProviderOptions({
98
+ provider: 'huggingface',
99
+ providerOptions,
100
+ schema: huggingfaceResponsesProviderOptionsSchema,
101
+ });
102
+
103
+ const {
104
+ tools: preparedTools,
105
+ toolChoice: preparedToolChoice,
106
+ toolWarnings,
107
+ } = prepareResponsesTools({
108
+ tools,
109
+ toolChoice,
110
+ });
111
+
112
+ warnings.push(...toolWarnings);
113
+
114
+ const baseArgs = {
115
+ model: this.modelId,
116
+ input,
117
+ temperature,
118
+ top_p: topP,
119
+ max_output_tokens: maxOutputTokens,
120
+
121
+ // HuggingFace Responses API uses text.format for structured output
122
+ ...(responseFormat?.type === 'json' &&
123
+ responseFormat.schema && {
124
+ text: {
125
+ format: {
126
+ type: 'json_schema',
127
+ strict: huggingfaceOptions?.strictJsonSchema ?? false,
128
+ name: responseFormat.name ?? 'response',
129
+ description: responseFormat.description,
130
+ schema: responseFormat.schema,
131
+ },
132
+ },
133
+ }),
134
+
135
+ metadata: huggingfaceOptions?.metadata,
136
+ instructions: huggingfaceOptions?.instructions,
137
+
138
+ ...(preparedTools && { tools: preparedTools }),
139
+ ...(preparedToolChoice && { tool_choice: preparedToolChoice }),
140
+ ...(huggingfaceOptions?.reasoningEffort != null && {
141
+ reasoning: {
142
+ ...(huggingfaceOptions?.reasoningEffort != null && {
143
+ effort: huggingfaceOptions.reasoningEffort,
144
+ }),
145
+ },
146
+ }),
147
+ };
148
+
149
+ return { args: baseArgs, warnings };
150
+ }
151
+
152
+ async doGenerate(
153
+ options: LanguageModelV3CallOptions,
154
+ ): Promise<LanguageModelV3GenerateResult> {
155
+ const { args, warnings } = await this.getArgs(options);
156
+
157
+ const body = {
158
+ ...args,
159
+ stream: false,
160
+ };
161
+
162
+ const url = this.config.url({
163
+ path: '/responses',
164
+ modelId: this.modelId,
165
+ });
166
+
167
+ const {
168
+ value: response,
169
+ responseHeaders,
170
+ rawValue: rawResponse,
171
+ } = await postJsonToApi({
172
+ url,
173
+ headers: combineHeaders(this.config.headers(), options.headers),
174
+ body,
175
+ failedResponseHandler: huggingfaceFailedResponseHandler,
176
+ successfulResponseHandler: createJsonResponseHandler(
177
+ huggingfaceResponsesResponseSchema,
178
+ ),
179
+ abortSignal: options.abortSignal,
180
+ fetch: this.config.fetch,
181
+ });
182
+
183
+ if (response.error) {
184
+ throw new APICallError({
185
+ message: response.error.message,
186
+ url,
187
+ requestBodyValues: body,
188
+ statusCode: 400,
189
+ responseHeaders,
190
+ responseBody: rawResponse as string,
191
+ isRetryable: false,
192
+ });
193
+ }
194
+
195
+ const content: Array<LanguageModelV3Content> = [];
196
+
197
+ // Process output array
198
+ for (const part of response.output) {
199
+ switch (part.type) {
200
+ case 'message': {
201
+ for (const contentPart of part.content) {
202
+ content.push({
203
+ type: 'text',
204
+ text: contentPart.text,
205
+ providerMetadata: {
206
+ huggingface: {
207
+ itemId: part.id,
208
+ },
209
+ },
210
+ });
211
+
212
+ if (contentPart.annotations) {
213
+ for (const annotation of contentPart.annotations) {
214
+ content.push({
215
+ type: 'source',
216
+ sourceType: 'url',
217
+ id: this.config.generateId?.() ?? generateId(),
218
+ url: annotation.url,
219
+ title: annotation.title,
220
+ });
221
+ }
222
+ }
223
+ }
224
+ break;
225
+ }
226
+
227
+ case 'reasoning': {
228
+ for (const contentPart of part.content) {
229
+ content.push({
230
+ type: 'reasoning',
231
+ text: contentPart.text,
232
+ providerMetadata: {
233
+ huggingface: {
234
+ itemId: part.id,
235
+ },
236
+ },
237
+ });
238
+ }
239
+ break;
240
+ }
241
+
242
+ case 'mcp_call': {
243
+ content.push({
244
+ type: 'tool-call',
245
+ toolCallId: part.id,
246
+ toolName: part.name,
247
+ input: part.arguments,
248
+ providerExecuted: true,
249
+ });
250
+
251
+ if (part.output) {
252
+ content.push({
253
+ type: 'tool-result',
254
+ toolCallId: part.id,
255
+ toolName: part.name,
256
+ result: part.output,
257
+ });
258
+ }
259
+ break;
260
+ }
261
+
262
+ case 'mcp_list_tools': {
263
+ content.push({
264
+ type: 'tool-call',
265
+ toolCallId: part.id,
266
+ toolName: 'list_tools',
267
+ input: JSON.stringify({ server_label: part.server_label }),
268
+ providerExecuted: true,
269
+ });
270
+
271
+ if (part.tools) {
272
+ content.push({
273
+ type: 'tool-result',
274
+ toolCallId: part.id,
275
+ toolName: 'list_tools',
276
+ result: { tools: part.tools },
277
+ });
278
+ }
279
+ break;
280
+ }
281
+
282
+ case 'function_call': {
283
+ content.push({
284
+ type: 'tool-call',
285
+ toolCallId: part.call_id,
286
+ toolName: part.name,
287
+ input: part.arguments,
288
+ });
289
+
290
+ if (part.output) {
291
+ content.push({
292
+ type: 'tool-result',
293
+ toolCallId: part.call_id,
294
+ toolName: part.name,
295
+ result: part.output,
296
+ });
297
+ }
298
+ break;
299
+ }
300
+
301
+ default: {
302
+ break;
303
+ }
304
+ }
305
+ }
306
+
307
+ return {
308
+ content,
309
+ finishReason: {
310
+ unified: mapHuggingFaceResponsesFinishReason(
311
+ response.incomplete_details?.reason ?? 'stop',
312
+ ),
313
+ raw: response.incomplete_details?.reason ?? undefined,
314
+ },
315
+ usage: convertHuggingFaceResponsesUsage(response.usage),
316
+ request: { body },
317
+ response: {
318
+ id: response.id,
319
+ timestamp: new Date(response.created_at * 1000),
320
+ modelId: response.model,
321
+ headers: responseHeaders,
322
+ body: rawResponse,
323
+ },
324
+ providerMetadata: {
325
+ huggingface: {
326
+ responseId: response.id,
327
+ },
328
+ },
329
+ warnings,
330
+ };
331
+ }
332
+
333
+ async doStream(
334
+ options: LanguageModelV3CallOptions,
335
+ ): Promise<LanguageModelV3StreamResult> {
336
+ const { args, warnings } = await this.getArgs(options);
337
+
338
+ const body = {
339
+ ...args,
340
+ stream: true,
341
+ };
342
+
343
+ const { value: response, responseHeaders } = await postJsonToApi({
344
+ url: this.config.url({
345
+ path: '/responses',
346
+ modelId: this.modelId,
347
+ }),
348
+ headers: combineHeaders(this.config.headers(), options.headers),
349
+ body,
350
+ failedResponseHandler: huggingfaceFailedResponseHandler,
351
+ successfulResponseHandler: createEventSourceResponseHandler(
352
+ huggingfaceResponsesChunkSchema,
353
+ ),
354
+ abortSignal: options.abortSignal,
355
+ fetch: this.config.fetch,
356
+ });
357
+
358
+ let finishReason: LanguageModelV3FinishReason = {
359
+ unified: 'other',
360
+ raw: undefined,
361
+ };
362
+ let responseId: string | null = null;
363
+ let usage: HuggingFaceResponsesUsage | undefined = undefined;
364
+
365
+ return {
366
+ stream: response.pipeThrough(
367
+ new TransformStream<
368
+ ParseResult<z.infer<typeof huggingfaceResponsesChunkSchema>>,
369
+ LanguageModelV3StreamPart
370
+ >({
371
+ start(controller) {
372
+ controller.enqueue({ type: 'stream-start', warnings });
373
+ },
374
+
375
+ transform(chunk, controller) {
376
+ if (!chunk.success) {
377
+ finishReason = {
378
+ unified: 'error',
379
+ raw: undefined,
380
+ };
381
+ controller.enqueue({ type: 'error', error: chunk.error });
382
+ return;
383
+ }
384
+
385
+ const value = chunk.value;
386
+
387
+ if (isResponseCreatedChunk(value)) {
388
+ responseId = value.response.id;
389
+ controller.enqueue({
390
+ type: 'response-metadata',
391
+ id: value.response.id,
392
+ timestamp: new Date(value.response.created_at * 1000),
393
+ modelId: value.response.model,
394
+ });
395
+ return;
396
+ }
397
+
398
+ if (isResponseOutputItemAddedChunk(value)) {
399
+ if (
400
+ value.item.type === 'message' &&
401
+ value.item.role === 'assistant'
402
+ ) {
403
+ controller.enqueue({
404
+ type: 'text-start',
405
+ id: value.item.id,
406
+ providerMetadata: {
407
+ huggingface: {
408
+ itemId: value.item.id,
409
+ },
410
+ },
411
+ });
412
+ } else if (value.item.type === 'function_call') {
413
+ controller.enqueue({
414
+ type: 'tool-input-start',
415
+ id: value.item.call_id,
416
+ toolName: value.item.name,
417
+ });
418
+ } else if (value.item.type === 'reasoning') {
419
+ controller.enqueue({
420
+ type: 'reasoning-start',
421
+ id: value.item.id,
422
+ providerMetadata: {
423
+ huggingface: {
424
+ itemId: value.item.id,
425
+ },
426
+ },
427
+ });
428
+ }
429
+ return;
430
+ }
431
+
432
+ if (isResponseOutputItemDoneChunk(value)) {
433
+ if (
434
+ value.item.type === 'message' &&
435
+ value.item.role === 'assistant'
436
+ ) {
437
+ controller.enqueue({
438
+ type: 'text-end',
439
+ id: value.item.id,
440
+ });
441
+ } else if (value.item.type === 'function_call') {
442
+ controller.enqueue({
443
+ type: 'tool-input-end',
444
+ id: value.item.call_id,
445
+ });
446
+
447
+ controller.enqueue({
448
+ type: 'tool-call',
449
+ toolCallId: value.item.call_id,
450
+ toolName: value.item.name,
451
+ input: value.item.arguments,
452
+ });
453
+
454
+ if (value.item.output) {
455
+ controller.enqueue({
456
+ type: 'tool-result',
457
+ toolCallId: value.item.call_id,
458
+ toolName: value.item.name,
459
+ result: value.item.output,
460
+ });
461
+ }
462
+ }
463
+ return;
464
+ }
465
+
466
+ if (isResponseCompletedChunk(value)) {
467
+ responseId = value.response.id;
468
+ finishReason = {
469
+ unified: mapHuggingFaceResponsesFinishReason(
470
+ value.response.incomplete_details?.reason ?? 'stop',
471
+ ),
472
+ raw: value.response.incomplete_details?.reason ?? undefined,
473
+ };
474
+ if (value.response.usage) {
475
+ usage = value.response.usage;
476
+ }
477
+ return;
478
+ }
479
+
480
+ if (isReasoningDeltaChunk(value)) {
481
+ controller.enqueue({
482
+ type: 'reasoning-delta',
483
+ id: value.item_id,
484
+ delta: value.delta,
485
+ });
486
+ return;
487
+ }
488
+
489
+ if (isReasoningEndChunk(value)) {
490
+ controller.enqueue({
491
+ type: 'reasoning-end',
492
+ id: value.item_id,
493
+ });
494
+ return;
495
+ }
496
+
497
+ if (isTextDeltaChunk(value)) {
498
+ controller.enqueue({
499
+ type: 'text-delta',
500
+ id: value.item_id,
501
+ delta: value.delta,
502
+ });
503
+ return;
504
+ }
505
+ },
506
+
507
+ flush(controller) {
508
+ controller.enqueue({
509
+ type: 'finish',
510
+ finishReason,
511
+ usage: convertHuggingFaceResponsesUsage(usage),
512
+ providerMetadata: {
513
+ huggingface: {
514
+ responseId,
515
+ },
516
+ },
517
+ });
518
+ },
519
+ }),
520
+ ),
521
+ request: { body },
522
+ response: { headers: responseHeaders },
523
+ };
524
+ }
525
+ }
526
+
527
+ const huggingfaceResponsesProviderOptionsSchema = z.object({
528
+ metadata: z.record(z.string(), z.string()).optional(),
529
+ instructions: z.string().optional(),
530
+ strictJsonSchema: z.boolean().optional(),
531
+ reasoningEffort: z.string().optional(),
532
+ });
533
+
534
+ const huggingfaceResponsesOutputSchema = z.discriminatedUnion('type', [
535
+ z.object({
536
+ type: z.literal('message'),
537
+ id: z.string(),
538
+ role: z.string().optional(),
539
+ status: z.string().optional(),
540
+ content: z.array(
541
+ z.object({
542
+ type: z.literal('output_text'),
543
+ text: z.string(),
544
+ annotations: z.array(z.any()).optional(),
545
+ }),
546
+ ),
547
+ }),
548
+ z.object({
549
+ type: z.literal('reasoning'),
550
+ id: z.string(),
551
+ status: z.string().optional(),
552
+ content: z.array(
553
+ z.object({
554
+ type: z.literal('reasoning_text'),
555
+ text: z.string(),
556
+ }),
557
+ ),
558
+ summary: z
559
+ .array(
560
+ z
561
+ .object({
562
+ type: z.literal('reasoning_summary'),
563
+ text: z.string(),
564
+ })
565
+ .optional(),
566
+ )
567
+ .optional(),
568
+ }),
569
+ z.object({
570
+ type: z.literal('function_call'),
571
+ id: z.string(),
572
+ call_id: z.string(),
573
+ name: z.string(),
574
+ arguments: z.string(),
575
+ output: z.string().optional(),
576
+ status: z.string().optional(),
577
+ }),
578
+ z.object({
579
+ type: z.literal('mcp_call'),
580
+ id: z.string(),
581
+ name: z.string(),
582
+ arguments: z.string(),
583
+ output: z.string().optional(),
584
+ status: z.string().optional(),
585
+ }),
586
+ z.object({
587
+ type: z.literal('mcp_list_tools'),
588
+ id: z.string(),
589
+ server_label: z.string(),
590
+ tools: z.array(z.any()).optional(),
591
+ status: z.string().optional(),
592
+ }),
593
+ ]);
594
+
595
+ const huggingfaceResponsesResponseSchema = z.object({
596
+ id: z.string(),
597
+ model: z.string(),
598
+ object: z.string(),
599
+ created_at: z.number(),
600
+ status: z.string(),
601
+ error: z.any().nullable(),
602
+ instructions: z.any().nullable(),
603
+ max_output_tokens: z.any().nullable(),
604
+ metadata: z.any().nullable(),
605
+ tool_choice: z.any(),
606
+ tools: z.array(z.any()),
607
+ temperature: z.number(),
608
+ top_p: z.number(),
609
+ incomplete_details: z
610
+ .object({
611
+ reason: z.string(),
612
+ })
613
+ .nullable()
614
+ .optional(),
615
+ usage: z
616
+ .object({
617
+ input_tokens: z.number(),
618
+ input_tokens_details: z
619
+ .object({
620
+ cached_tokens: z.number(),
621
+ })
622
+ .optional(),
623
+ output_tokens: z.number(),
624
+ output_tokens_details: z
625
+ .object({
626
+ reasoning_tokens: z.number(),
627
+ })
628
+ .optional(),
629
+ total_tokens: z.number(),
630
+ })
631
+ .nullable()
632
+ .optional(),
633
+ output: z.array(huggingfaceResponsesOutputSchema),
634
+ output_text: z.string().nullable().optional(),
635
+ });
636
+
637
+ const responseOutputItemAddedSchema = z.object({
638
+ type: z.literal('response.output_item.added'),
639
+ output_index: z.number(),
640
+ item: z.discriminatedUnion('type', [
641
+ z.object({
642
+ type: z.literal('message'),
643
+ id: z.string(),
644
+ role: z.string().optional(),
645
+ status: z.string().optional(),
646
+ content: z.array(z.any()).optional(),
647
+ }),
648
+ z.object({
649
+ type: z.literal('reasoning'),
650
+ id: z.string(),
651
+ status: z.string().optional(),
652
+ content: z.array(z.any()).optional(),
653
+ summary: z.array(z.any()).optional(),
654
+ }),
655
+ z.object({
656
+ type: z.literal('mcp_list_tools'),
657
+ id: z.string(),
658
+ server_label: z.string(),
659
+ tools: z.array(z.any()).optional(),
660
+ error: z.string().optional(),
661
+ }),
662
+ z.object({
663
+ type: z.literal('mcp_call'),
664
+ id: z.string(),
665
+ server_label: z.string(),
666
+ name: z.string(),
667
+ arguments: z.string(),
668
+ output: z.string().optional(),
669
+ error: z.string().optional(),
670
+ }),
671
+ z.object({
672
+ type: z.literal('function_call'),
673
+ id: z.string(),
674
+ call_id: z.string(),
675
+ name: z.string(),
676
+ arguments: z.string(),
677
+ output: z.string().optional(),
678
+ error: z.string().optional(),
679
+ }),
680
+ ]),
681
+ sequence_number: z.number(),
682
+ });
683
+
684
+ const responseOutputItemDoneSchema = z.object({
685
+ type: z.literal('response.output_item.done'),
686
+ output_index: z.number(),
687
+ item: z.discriminatedUnion('type', [
688
+ z.object({
689
+ type: z.literal('message'),
690
+ id: z.string(),
691
+ role: z.string().optional(),
692
+ status: z.string().optional(),
693
+ content: z.array(z.any()).optional(),
694
+ }),
695
+ z.object({
696
+ type: z.literal('mcp_list_tools'),
697
+ id: z.string(),
698
+ server_label: z.string(),
699
+ tools: z.array(z.any()).optional(),
700
+ error: z.string().optional(),
701
+ }),
702
+ z.object({
703
+ type: z.literal('mcp_call'),
704
+ id: z.string(),
705
+ server_label: z.string(),
706
+ name: z.string(),
707
+ arguments: z.string(),
708
+ output: z.string().optional(),
709
+ error: z.string().optional(),
710
+ }),
711
+ z.object({
712
+ type: z.literal('function_call'),
713
+ id: z.string(),
714
+ call_id: z.string(),
715
+ name: z.string(),
716
+ arguments: z.string(),
717
+ output: z.string().optional(),
718
+ error: z.string().optional(),
719
+ }),
720
+ z.object({
721
+ type: z.literal('reasoning'),
722
+ id: z.string(),
723
+ status: z.string().optional(),
724
+ content: z.array(z.any()).optional(),
725
+ summary: z.array(z.any()).optional(),
726
+ }),
727
+ ]),
728
+ sequence_number: z.number(),
729
+ });
730
+
731
+ const textDeltaChunkSchema = z.object({
732
+ type: z.literal('response.output_text.delta'),
733
+ item_id: z.string(),
734
+ output_index: z.number(),
735
+ content_index: z.number(),
736
+ delta: z.string(),
737
+ sequence_number: z.number(),
738
+ });
739
+
740
+ const reasoningTextDeltaChunkSchema = z.object({
741
+ type: z.literal('response.reasoning_text.delta'),
742
+ item_id: z.string(),
743
+ output_index: z.number(),
744
+ content_index: z.number(),
745
+ delta: z.string(),
746
+ sequence_number: z.number(),
747
+ });
748
+
749
+ const reasoningTextEndChunkSchema = z.object({
750
+ type: z.literal('response.reasoning_text.done'),
751
+ item_id: z.string(),
752
+ output_index: z.number(),
753
+ content_index: z.number(),
754
+ text: z.string(),
755
+ sequence_number: z.number(),
756
+ });
757
+
758
+ const responseCompletedChunkSchema = z.object({
759
+ type: z.literal('response.completed'),
760
+ response: huggingfaceResponsesResponseSchema,
761
+ sequence_number: z.number(),
762
+ });
763
+
764
+ const responseCreatedChunkSchema = z.object({
765
+ type: z.literal('response.created'),
766
+ response: z.object({
767
+ id: z.string(),
768
+ object: z.string(),
769
+ created_at: z.number(),
770
+ status: z.string(),
771
+ model: z.string(),
772
+ }),
773
+ });
774
+
775
+ const huggingfaceResponsesChunkSchema = z.union([
776
+ responseOutputItemAddedSchema,
777
+ responseOutputItemDoneSchema,
778
+ reasoningTextDeltaChunkSchema,
779
+ reasoningTextEndChunkSchema,
780
+ textDeltaChunkSchema,
781
+ responseCompletedChunkSchema,
782
+ responseCreatedChunkSchema,
783
+ z.object({ type: z.string() }).loose(), // fallback for unknown chunks
784
+ ]);
785
+
786
+ function isResponseOutputItemAddedChunk(
787
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
788
+ ): chunk is z.infer<typeof responseOutputItemAddedSchema> {
789
+ return chunk.type === 'response.output_item.added';
790
+ }
791
+
792
+ function isResponseOutputItemDoneChunk(
793
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
794
+ ): chunk is z.infer<typeof responseOutputItemDoneSchema> {
795
+ return chunk.type === 'response.output_item.done';
796
+ }
797
+
798
+ function isTextDeltaChunk(
799
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
800
+ ): chunk is z.infer<typeof textDeltaChunkSchema> {
801
+ return chunk.type === 'response.output_text.delta';
802
+ }
803
+
804
+ function isReasoningDeltaChunk(
805
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
806
+ ): chunk is z.infer<typeof reasoningTextDeltaChunkSchema> {
807
+ return chunk.type === 'response.reasoning_text.delta';
808
+ }
809
+
810
+ function isReasoningEndChunk(
811
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
812
+ ): chunk is z.infer<typeof reasoningTextEndChunkSchema> {
813
+ return chunk.type === 'response.reasoning_text.done';
814
+ }
815
+
816
+ function isResponseCompletedChunk(
817
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
818
+ ): chunk is z.infer<typeof responseCompletedChunkSchema> {
819
+ return chunk.type === 'response.completed';
820
+ }
821
+
822
+ function isResponseCreatedChunk(
823
+ chunk: z.infer<typeof huggingfaceResponsesChunkSchema>,
824
+ ): chunk is z.infer<typeof responseCreatedChunkSchema> {
825
+ return chunk.type === 'response.created';
826
+ }