@ai-sdk/huggingface 1.0.0-beta.8 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +528 -0
- package/dist/index.d.mts +4 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.js +362 -199
- package/dist/index.js.map +1 -1
- package/dist/index.mjs +243 -70
- package/dist/index.mjs.map +1 -1
- package/package.json +7 -6
package/dist/index.js
CHANGED
|
@@ -1,9 +1,7 @@
|
|
|
1
1
|
"use strict";
|
|
2
|
-
var __create = Object.create;
|
|
3
2
|
var __defProp = Object.defineProperty;
|
|
4
3
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
7
5
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
6
|
var __export = (target, all) => {
|
|
9
7
|
for (var name in all)
|
|
@@ -17,14 +15,6 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
17
15
|
}
|
|
18
16
|
return to;
|
|
19
17
|
};
|
|
20
|
-
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
-
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
-
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
-
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
-
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
-
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
-
mod
|
|
27
|
-
));
|
|
28
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
19
|
|
|
30
20
|
// src/index.ts
|
|
@@ -42,16 +32,16 @@ var import_provider_utils3 = require("@ai-sdk/provider-utils");
|
|
|
42
32
|
// src/responses/huggingface-responses-language-model.ts
|
|
43
33
|
var import_provider2 = require("@ai-sdk/provider");
|
|
44
34
|
var import_provider_utils2 = require("@ai-sdk/provider-utils");
|
|
45
|
-
var
|
|
35
|
+
var import_v42 = require("zod/v4");
|
|
46
36
|
|
|
47
37
|
// src/huggingface-error.ts
|
|
48
38
|
var import_provider_utils = require("@ai-sdk/provider-utils");
|
|
49
|
-
var
|
|
50
|
-
var huggingfaceErrorDataSchema = z.object({
|
|
51
|
-
error: z.object({
|
|
52
|
-
message: z.string(),
|
|
53
|
-
type: z.string().optional(),
|
|
54
|
-
code: z.string().optional()
|
|
39
|
+
var import_v4 = require("zod/v4");
|
|
40
|
+
var huggingfaceErrorDataSchema = import_v4.z.object({
|
|
41
|
+
error: import_v4.z.object({
|
|
42
|
+
message: import_v4.z.string(),
|
|
43
|
+
type: import_v4.z.string().optional(),
|
|
44
|
+
code: import_v4.z.string().optional()
|
|
55
45
|
})
|
|
56
46
|
});
|
|
57
47
|
var huggingfaceFailedResponseHandler = (0, import_provider_utils.createJsonErrorResponseHandler)({
|
|
@@ -59,6 +49,45 @@ var huggingfaceFailedResponseHandler = (0, import_provider_utils.createJsonError
|
|
|
59
49
|
errorToMessage: (data) => data.error.message
|
|
60
50
|
});
|
|
61
51
|
|
|
52
|
+
// src/responses/convert-huggingface-responses-usage.ts
|
|
53
|
+
function convertHuggingFaceResponsesUsage(usage) {
|
|
54
|
+
var _a, _b, _c, _d;
|
|
55
|
+
if (usage == null) {
|
|
56
|
+
return {
|
|
57
|
+
inputTokens: {
|
|
58
|
+
total: void 0,
|
|
59
|
+
noCache: void 0,
|
|
60
|
+
cacheRead: void 0,
|
|
61
|
+
cacheWrite: void 0
|
|
62
|
+
},
|
|
63
|
+
outputTokens: {
|
|
64
|
+
total: void 0,
|
|
65
|
+
text: void 0,
|
|
66
|
+
reasoning: void 0
|
|
67
|
+
},
|
|
68
|
+
raw: void 0
|
|
69
|
+
};
|
|
70
|
+
}
|
|
71
|
+
const inputTokens = usage.input_tokens;
|
|
72
|
+
const outputTokens = usage.output_tokens;
|
|
73
|
+
const cachedTokens = (_b = (_a = usage.input_tokens_details) == null ? void 0 : _a.cached_tokens) != null ? _b : 0;
|
|
74
|
+
const reasoningTokens = (_d = (_c = usage.output_tokens_details) == null ? void 0 : _c.reasoning_tokens) != null ? _d : 0;
|
|
75
|
+
return {
|
|
76
|
+
inputTokens: {
|
|
77
|
+
total: inputTokens,
|
|
78
|
+
noCache: inputTokens - cachedTokens,
|
|
79
|
+
cacheRead: cachedTokens,
|
|
80
|
+
cacheWrite: void 0
|
|
81
|
+
},
|
|
82
|
+
outputTokens: {
|
|
83
|
+
total: outputTokens,
|
|
84
|
+
text: outputTokens - reasoningTokens,
|
|
85
|
+
reasoning: reasoningTokens
|
|
86
|
+
},
|
|
87
|
+
raw: usage
|
|
88
|
+
};
|
|
89
|
+
}
|
|
90
|
+
|
|
62
91
|
// src/responses/convert-to-huggingface-responses-messages.ts
|
|
63
92
|
var import_provider = require("@ai-sdk/provider");
|
|
64
93
|
async function convertToHuggingFaceResponsesMessages({
|
|
@@ -130,10 +159,7 @@ async function convertToHuggingFaceResponsesMessages({
|
|
|
130
159
|
break;
|
|
131
160
|
}
|
|
132
161
|
case "tool": {
|
|
133
|
-
warnings.push({
|
|
134
|
-
type: "unsupported-setting",
|
|
135
|
-
setting: "tool messages"
|
|
136
|
-
});
|
|
162
|
+
warnings.push({ type: "unsupported", feature: "tool messages" });
|
|
137
163
|
break;
|
|
138
164
|
}
|
|
139
165
|
default: {
|
|
@@ -145,24 +171,6 @@ async function convertToHuggingFaceResponsesMessages({
|
|
|
145
171
|
return { input: messages, warnings };
|
|
146
172
|
}
|
|
147
173
|
|
|
148
|
-
// src/responses/map-huggingface-responses-finish-reason.ts
|
|
149
|
-
function mapHuggingFaceResponsesFinishReason(finishReason) {
|
|
150
|
-
switch (finishReason) {
|
|
151
|
-
case "stop":
|
|
152
|
-
return "stop";
|
|
153
|
-
case "length":
|
|
154
|
-
return "length";
|
|
155
|
-
case "content_filter":
|
|
156
|
-
return "content-filter";
|
|
157
|
-
case "tool_calls":
|
|
158
|
-
return "tool-calls";
|
|
159
|
-
case "error":
|
|
160
|
-
return "error";
|
|
161
|
-
default:
|
|
162
|
-
return "unknown";
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
|
|
166
174
|
// src/responses/huggingface-responses-prepare-tools.ts
|
|
167
175
|
function prepareResponsesTools({
|
|
168
176
|
tools,
|
|
@@ -184,10 +192,10 @@ function prepareResponsesTools({
|
|
|
184
192
|
parameters: tool.inputSchema
|
|
185
193
|
});
|
|
186
194
|
break;
|
|
187
|
-
case "provider
|
|
195
|
+
case "provider":
|
|
188
196
|
toolWarnings.push({
|
|
189
|
-
type: "unsupported
|
|
190
|
-
tool
|
|
197
|
+
type: "unsupported",
|
|
198
|
+
feature: `provider-defined tool ${tool.id}`
|
|
191
199
|
});
|
|
192
200
|
break;
|
|
193
201
|
default: {
|
|
@@ -226,6 +234,24 @@ function prepareResponsesTools({
|
|
|
226
234
|
};
|
|
227
235
|
}
|
|
228
236
|
|
|
237
|
+
// src/responses/map-huggingface-responses-finish-reason.ts
|
|
238
|
+
function mapHuggingFaceResponsesFinishReason(finishReason) {
|
|
239
|
+
switch (finishReason) {
|
|
240
|
+
case "stop":
|
|
241
|
+
return "stop";
|
|
242
|
+
case "length":
|
|
243
|
+
return "length";
|
|
244
|
+
case "content_filter":
|
|
245
|
+
return "content-filter";
|
|
246
|
+
case "tool_calls":
|
|
247
|
+
return "tool-calls";
|
|
248
|
+
case "error":
|
|
249
|
+
return "error";
|
|
250
|
+
default:
|
|
251
|
+
return "other";
|
|
252
|
+
}
|
|
253
|
+
}
|
|
254
|
+
|
|
229
255
|
// src/responses/huggingface-responses-language-model.ts
|
|
230
256
|
var HuggingFaceResponsesLanguageModel = class {
|
|
231
257
|
constructor(modelId, config) {
|
|
@@ -257,25 +283,19 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
257
283
|
var _a, _b;
|
|
258
284
|
const warnings = [];
|
|
259
285
|
if (topK != null) {
|
|
260
|
-
warnings.push({ type: "unsupported
|
|
286
|
+
warnings.push({ type: "unsupported", feature: "topK" });
|
|
261
287
|
}
|
|
262
288
|
if (seed != null) {
|
|
263
|
-
warnings.push({ type: "unsupported
|
|
289
|
+
warnings.push({ type: "unsupported", feature: "seed" });
|
|
264
290
|
}
|
|
265
291
|
if (presencePenalty != null) {
|
|
266
|
-
warnings.push({
|
|
267
|
-
type: "unsupported-setting",
|
|
268
|
-
setting: "presencePenalty"
|
|
269
|
-
});
|
|
292
|
+
warnings.push({ type: "unsupported", feature: "presencePenalty" });
|
|
270
293
|
}
|
|
271
294
|
if (frequencyPenalty != null) {
|
|
272
|
-
warnings.push({
|
|
273
|
-
type: "unsupported-setting",
|
|
274
|
-
setting: "frequencyPenalty"
|
|
275
|
-
});
|
|
295
|
+
warnings.push({ type: "unsupported", feature: "frequencyPenalty" });
|
|
276
296
|
}
|
|
277
297
|
if (stopSequences != null) {
|
|
278
|
-
warnings.push({ type: "unsupported
|
|
298
|
+
warnings.push({ type: "unsupported", feature: "stopSequences" });
|
|
279
299
|
}
|
|
280
300
|
const { input, warnings: messageWarnings } = await convertToHuggingFaceResponsesMessages({
|
|
281
301
|
prompt
|
|
@@ -316,12 +336,19 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
316
336
|
metadata: huggingfaceOptions == null ? void 0 : huggingfaceOptions.metadata,
|
|
317
337
|
instructions: huggingfaceOptions == null ? void 0 : huggingfaceOptions.instructions,
|
|
318
338
|
...preparedTools && { tools: preparedTools },
|
|
319
|
-
...preparedToolChoice && { tool_choice: preparedToolChoice }
|
|
339
|
+
...preparedToolChoice && { tool_choice: preparedToolChoice },
|
|
340
|
+
...(huggingfaceOptions == null ? void 0 : huggingfaceOptions.reasoningEffort) != null && {
|
|
341
|
+
reasoning: {
|
|
342
|
+
...(huggingfaceOptions == null ? void 0 : huggingfaceOptions.reasoningEffort) != null && {
|
|
343
|
+
effort: huggingfaceOptions.reasoningEffort
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
}
|
|
320
347
|
};
|
|
321
348
|
return { args: baseArgs, warnings };
|
|
322
349
|
}
|
|
323
350
|
async doGenerate(options) {
|
|
324
|
-
var _a, _b, _c, _d, _e, _f, _g
|
|
351
|
+
var _a, _b, _c, _d, _e, _f, _g;
|
|
325
352
|
const { args, warnings } = await this.getArgs(options);
|
|
326
353
|
const body = {
|
|
327
354
|
...args,
|
|
@@ -385,6 +412,20 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
385
412
|
}
|
|
386
413
|
break;
|
|
387
414
|
}
|
|
415
|
+
case "reasoning": {
|
|
416
|
+
for (const contentPart of part.content) {
|
|
417
|
+
content.push({
|
|
418
|
+
type: "reasoning",
|
|
419
|
+
text: contentPart.text,
|
|
420
|
+
providerMetadata: {
|
|
421
|
+
huggingface: {
|
|
422
|
+
itemId: part.id
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
});
|
|
426
|
+
}
|
|
427
|
+
break;
|
|
428
|
+
}
|
|
388
429
|
case "mcp_call": {
|
|
389
430
|
content.push({
|
|
390
431
|
type: "tool-call",
|
|
@@ -398,8 +439,7 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
398
439
|
type: "tool-result",
|
|
399
440
|
toolCallId: part.id,
|
|
400
441
|
toolName: part.name,
|
|
401
|
-
result: part.output
|
|
402
|
-
providerExecuted: true
|
|
442
|
+
result: part.output
|
|
403
443
|
});
|
|
404
444
|
}
|
|
405
445
|
break;
|
|
@@ -417,8 +457,7 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
417
457
|
type: "tool-result",
|
|
418
458
|
toolCallId: part.id,
|
|
419
459
|
toolName: "list_tools",
|
|
420
|
-
result: { tools: part.tools }
|
|
421
|
-
providerExecuted: true
|
|
460
|
+
result: { tools: part.tools }
|
|
422
461
|
});
|
|
423
462
|
}
|
|
424
463
|
break;
|
|
@@ -447,14 +486,13 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
447
486
|
}
|
|
448
487
|
return {
|
|
449
488
|
content,
|
|
450
|
-
finishReason:
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
outputTokens: (_i = (_h = response.usage) == null ? void 0 : _h.output_tokens) != null ? _i : 0,
|
|
456
|
-
totalTokens: (_o = (_j = response.usage) == null ? void 0 : _j.total_tokens) != null ? _o : ((_l = (_k = response.usage) == null ? void 0 : _k.input_tokens) != null ? _l : 0) + ((_n = (_m = response.usage) == null ? void 0 : _m.output_tokens) != null ? _n : 0)
|
|
489
|
+
finishReason: {
|
|
490
|
+
unified: mapHuggingFaceResponsesFinishReason(
|
|
491
|
+
(_e = (_d = response.incomplete_details) == null ? void 0 : _d.reason) != null ? _e : "stop"
|
|
492
|
+
),
|
|
493
|
+
raw: (_g = (_f = response.incomplete_details) == null ? void 0 : _f.reason) != null ? _g : void 0
|
|
457
494
|
},
|
|
495
|
+
usage: convertHuggingFaceResponsesUsage(response.usage),
|
|
458
496
|
request: { body },
|
|
459
497
|
response: {
|
|
460
498
|
id: response.id,
|
|
@@ -491,13 +529,12 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
491
529
|
abortSignal: options.abortSignal,
|
|
492
530
|
fetch: this.config.fetch
|
|
493
531
|
});
|
|
494
|
-
let finishReason =
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
inputTokens: void 0,
|
|
498
|
-
outputTokens: void 0,
|
|
499
|
-
totalTokens: void 0
|
|
532
|
+
let finishReason = {
|
|
533
|
+
unified: "other",
|
|
534
|
+
raw: void 0
|
|
500
535
|
};
|
|
536
|
+
let responseId = null;
|
|
537
|
+
let usage = void 0;
|
|
501
538
|
return {
|
|
502
539
|
stream: response.pipeThrough(
|
|
503
540
|
new TransformStream({
|
|
@@ -505,9 +542,12 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
505
542
|
controller.enqueue({ type: "stream-start", warnings });
|
|
506
543
|
},
|
|
507
544
|
transform(chunk, controller) {
|
|
508
|
-
var _a, _b, _c;
|
|
545
|
+
var _a, _b, _c, _d;
|
|
509
546
|
if (!chunk.success) {
|
|
510
|
-
finishReason =
|
|
547
|
+
finishReason = {
|
|
548
|
+
unified: "error",
|
|
549
|
+
raw: void 0
|
|
550
|
+
};
|
|
511
551
|
controller.enqueue({ type: "error", error: chunk.error });
|
|
512
552
|
return;
|
|
513
553
|
}
|
|
@@ -539,6 +579,16 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
539
579
|
id: value.item.call_id,
|
|
540
580
|
toolName: value.item.name
|
|
541
581
|
});
|
|
582
|
+
} else if (value.item.type === "reasoning") {
|
|
583
|
+
controller.enqueue({
|
|
584
|
+
type: "reasoning-start",
|
|
585
|
+
id: value.item.id,
|
|
586
|
+
providerMetadata: {
|
|
587
|
+
huggingface: {
|
|
588
|
+
itemId: value.item.id
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
});
|
|
542
592
|
}
|
|
543
593
|
return;
|
|
544
594
|
}
|
|
@@ -572,16 +622,32 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
572
622
|
}
|
|
573
623
|
if (isResponseCompletedChunk(value)) {
|
|
574
624
|
responseId = value.response.id;
|
|
575
|
-
finishReason =
|
|
576
|
-
|
|
577
|
-
|
|
625
|
+
finishReason = {
|
|
626
|
+
unified: mapHuggingFaceResponsesFinishReason(
|
|
627
|
+
(_b = (_a = value.response.incomplete_details) == null ? void 0 : _a.reason) != null ? _b : "stop"
|
|
628
|
+
),
|
|
629
|
+
raw: (_d = (_c = value.response.incomplete_details) == null ? void 0 : _c.reason) != null ? _d : void 0
|
|
630
|
+
};
|
|
578
631
|
if (value.response.usage) {
|
|
579
|
-
usage
|
|
580
|
-
usage.outputTokens = value.response.usage.output_tokens;
|
|
581
|
-
usage.totalTokens = (_c = value.response.usage.total_tokens) != null ? _c : value.response.usage.input_tokens + value.response.usage.output_tokens;
|
|
632
|
+
usage = value.response.usage;
|
|
582
633
|
}
|
|
583
634
|
return;
|
|
584
635
|
}
|
|
636
|
+
if (isReasoningDeltaChunk(value)) {
|
|
637
|
+
controller.enqueue({
|
|
638
|
+
type: "reasoning-delta",
|
|
639
|
+
id: value.item_id,
|
|
640
|
+
delta: value.delta
|
|
641
|
+
});
|
|
642
|
+
return;
|
|
643
|
+
}
|
|
644
|
+
if (isReasoningEndChunk(value)) {
|
|
645
|
+
controller.enqueue({
|
|
646
|
+
type: "reasoning-end",
|
|
647
|
+
id: value.item_id
|
|
648
|
+
});
|
|
649
|
+
return;
|
|
650
|
+
}
|
|
585
651
|
if (isTextDeltaChunk(value)) {
|
|
586
652
|
controller.enqueue({
|
|
587
653
|
type: "text-delta",
|
|
@@ -595,7 +661,7 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
595
661
|
controller.enqueue({
|
|
596
662
|
type: "finish",
|
|
597
663
|
finishReason,
|
|
598
|
-
usage,
|
|
664
|
+
usage: convertHuggingFaceResponsesUsage(usage),
|
|
599
665
|
providerMetadata: {
|
|
600
666
|
huggingface: {
|
|
601
667
|
responseId
|
|
@@ -610,150 +676,239 @@ var HuggingFaceResponsesLanguageModel = class {
|
|
|
610
676
|
};
|
|
611
677
|
}
|
|
612
678
|
};
|
|
613
|
-
var huggingfaceResponsesProviderOptionsSchema =
|
|
614
|
-
metadata:
|
|
615
|
-
instructions:
|
|
616
|
-
strictJsonSchema:
|
|
679
|
+
var huggingfaceResponsesProviderOptionsSchema = import_v42.z.object({
|
|
680
|
+
metadata: import_v42.z.record(import_v42.z.string(), import_v42.z.string()).optional(),
|
|
681
|
+
instructions: import_v42.z.string().optional(),
|
|
682
|
+
strictJsonSchema: import_v42.z.boolean().optional(),
|
|
683
|
+
reasoningEffort: import_v42.z.string().optional()
|
|
617
684
|
});
|
|
618
|
-
var
|
|
619
|
-
|
|
620
|
-
|
|
621
|
-
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
685
|
+
var huggingfaceResponsesOutputSchema = import_v42.z.discriminatedUnion("type", [
|
|
686
|
+
import_v42.z.object({
|
|
687
|
+
type: import_v42.z.literal("message"),
|
|
688
|
+
id: import_v42.z.string(),
|
|
689
|
+
role: import_v42.z.string().optional(),
|
|
690
|
+
status: import_v42.z.string().optional(),
|
|
691
|
+
content: import_v42.z.array(
|
|
692
|
+
import_v42.z.object({
|
|
693
|
+
type: import_v42.z.literal("output_text"),
|
|
694
|
+
text: import_v42.z.string(),
|
|
695
|
+
annotations: import_v42.z.array(import_v42.z.any()).optional()
|
|
696
|
+
})
|
|
697
|
+
)
|
|
698
|
+
}),
|
|
699
|
+
import_v42.z.object({
|
|
700
|
+
type: import_v42.z.literal("reasoning"),
|
|
701
|
+
id: import_v42.z.string(),
|
|
702
|
+
status: import_v42.z.string().optional(),
|
|
703
|
+
content: import_v42.z.array(
|
|
704
|
+
import_v42.z.object({
|
|
705
|
+
type: import_v42.z.literal("reasoning_text"),
|
|
706
|
+
text: import_v42.z.string()
|
|
707
|
+
})
|
|
708
|
+
),
|
|
709
|
+
summary: import_v42.z.array(
|
|
710
|
+
import_v42.z.object({
|
|
711
|
+
type: import_v42.z.literal("reasoning_summary"),
|
|
712
|
+
text: import_v42.z.string()
|
|
713
|
+
}).optional()
|
|
714
|
+
).optional()
|
|
715
|
+
}),
|
|
716
|
+
import_v42.z.object({
|
|
717
|
+
type: import_v42.z.literal("function_call"),
|
|
718
|
+
id: import_v42.z.string(),
|
|
719
|
+
call_id: import_v42.z.string(),
|
|
720
|
+
name: import_v42.z.string(),
|
|
721
|
+
arguments: import_v42.z.string(),
|
|
722
|
+
output: import_v42.z.string().optional(),
|
|
723
|
+
status: import_v42.z.string().optional()
|
|
724
|
+
}),
|
|
725
|
+
import_v42.z.object({
|
|
726
|
+
type: import_v42.z.literal("mcp_call"),
|
|
727
|
+
id: import_v42.z.string(),
|
|
728
|
+
name: import_v42.z.string(),
|
|
729
|
+
arguments: import_v42.z.string(),
|
|
730
|
+
output: import_v42.z.string().optional(),
|
|
731
|
+
status: import_v42.z.string().optional()
|
|
732
|
+
}),
|
|
733
|
+
import_v42.z.object({
|
|
734
|
+
type: import_v42.z.literal("mcp_list_tools"),
|
|
735
|
+
id: import_v42.z.string(),
|
|
736
|
+
server_label: import_v42.z.string(),
|
|
737
|
+
tools: import_v42.z.array(import_v42.z.any()).optional(),
|
|
738
|
+
status: import_v42.z.string().optional()
|
|
739
|
+
})
|
|
740
|
+
]);
|
|
741
|
+
var huggingfaceResponsesResponseSchema = import_v42.z.object({
|
|
742
|
+
id: import_v42.z.string(),
|
|
743
|
+
model: import_v42.z.string(),
|
|
744
|
+
object: import_v42.z.string(),
|
|
745
|
+
created_at: import_v42.z.number(),
|
|
746
|
+
status: import_v42.z.string(),
|
|
747
|
+
error: import_v42.z.any().nullable(),
|
|
748
|
+
instructions: import_v42.z.any().nullable(),
|
|
749
|
+
max_output_tokens: import_v42.z.any().nullable(),
|
|
750
|
+
metadata: import_v42.z.any().nullable(),
|
|
751
|
+
tool_choice: import_v42.z.any(),
|
|
752
|
+
tools: import_v42.z.array(import_v42.z.any()),
|
|
753
|
+
temperature: import_v42.z.number(),
|
|
754
|
+
top_p: import_v42.z.number(),
|
|
755
|
+
incomplete_details: import_v42.z.object({
|
|
756
|
+
reason: import_v42.z.string()
|
|
634
757
|
}).nullable().optional(),
|
|
635
|
-
usage:
|
|
636
|
-
input_tokens:
|
|
637
|
-
input_tokens_details:
|
|
638
|
-
cached_tokens:
|
|
758
|
+
usage: import_v42.z.object({
|
|
759
|
+
input_tokens: import_v42.z.number(),
|
|
760
|
+
input_tokens_details: import_v42.z.object({
|
|
761
|
+
cached_tokens: import_v42.z.number()
|
|
639
762
|
}).optional(),
|
|
640
|
-
output_tokens:
|
|
641
|
-
output_tokens_details:
|
|
642
|
-
reasoning_tokens:
|
|
763
|
+
output_tokens: import_v42.z.number(),
|
|
764
|
+
output_tokens_details: import_v42.z.object({
|
|
765
|
+
reasoning_tokens: import_v42.z.number()
|
|
643
766
|
}).optional(),
|
|
644
|
-
total_tokens:
|
|
767
|
+
total_tokens: import_v42.z.number()
|
|
645
768
|
}).nullable().optional(),
|
|
646
|
-
output:
|
|
647
|
-
output_text:
|
|
769
|
+
output: import_v42.z.array(huggingfaceResponsesOutputSchema),
|
|
770
|
+
output_text: import_v42.z.string().nullable().optional()
|
|
648
771
|
});
|
|
649
|
-
var responseOutputItemAddedSchema =
|
|
650
|
-
type:
|
|
651
|
-
output_index:
|
|
652
|
-
item:
|
|
653
|
-
|
|
654
|
-
type:
|
|
655
|
-
id:
|
|
656
|
-
role:
|
|
657
|
-
status:
|
|
658
|
-
content:
|
|
772
|
+
var responseOutputItemAddedSchema = import_v42.z.object({
|
|
773
|
+
type: import_v42.z.literal("response.output_item.added"),
|
|
774
|
+
output_index: import_v42.z.number(),
|
|
775
|
+
item: import_v42.z.discriminatedUnion("type", [
|
|
776
|
+
import_v42.z.object({
|
|
777
|
+
type: import_v42.z.literal("message"),
|
|
778
|
+
id: import_v42.z.string(),
|
|
779
|
+
role: import_v42.z.string().optional(),
|
|
780
|
+
status: import_v42.z.string().optional(),
|
|
781
|
+
content: import_v42.z.array(import_v42.z.any()).optional()
|
|
782
|
+
}),
|
|
783
|
+
import_v42.z.object({
|
|
784
|
+
type: import_v42.z.literal("reasoning"),
|
|
785
|
+
id: import_v42.z.string(),
|
|
786
|
+
status: import_v42.z.string().optional(),
|
|
787
|
+
content: import_v42.z.array(import_v42.z.any()).optional(),
|
|
788
|
+
summary: import_v42.z.array(import_v42.z.any()).optional()
|
|
659
789
|
}),
|
|
660
|
-
|
|
661
|
-
type:
|
|
662
|
-
id:
|
|
663
|
-
server_label:
|
|
664
|
-
tools:
|
|
665
|
-
error:
|
|
790
|
+
import_v42.z.object({
|
|
791
|
+
type: import_v42.z.literal("mcp_list_tools"),
|
|
792
|
+
id: import_v42.z.string(),
|
|
793
|
+
server_label: import_v42.z.string(),
|
|
794
|
+
tools: import_v42.z.array(import_v42.z.any()).optional(),
|
|
795
|
+
error: import_v42.z.string().optional()
|
|
666
796
|
}),
|
|
667
|
-
|
|
668
|
-
type:
|
|
669
|
-
id:
|
|
670
|
-
server_label:
|
|
671
|
-
name:
|
|
672
|
-
arguments:
|
|
673
|
-
output:
|
|
674
|
-
error:
|
|
797
|
+
import_v42.z.object({
|
|
798
|
+
type: import_v42.z.literal("mcp_call"),
|
|
799
|
+
id: import_v42.z.string(),
|
|
800
|
+
server_label: import_v42.z.string(),
|
|
801
|
+
name: import_v42.z.string(),
|
|
802
|
+
arguments: import_v42.z.string(),
|
|
803
|
+
output: import_v42.z.string().optional(),
|
|
804
|
+
error: import_v42.z.string().optional()
|
|
675
805
|
}),
|
|
676
|
-
|
|
677
|
-
type:
|
|
678
|
-
id:
|
|
679
|
-
call_id:
|
|
680
|
-
name:
|
|
681
|
-
arguments:
|
|
682
|
-
output:
|
|
683
|
-
error:
|
|
806
|
+
import_v42.z.object({
|
|
807
|
+
type: import_v42.z.literal("function_call"),
|
|
808
|
+
id: import_v42.z.string(),
|
|
809
|
+
call_id: import_v42.z.string(),
|
|
810
|
+
name: import_v42.z.string(),
|
|
811
|
+
arguments: import_v42.z.string(),
|
|
812
|
+
output: import_v42.z.string().optional(),
|
|
813
|
+
error: import_v42.z.string().optional()
|
|
684
814
|
})
|
|
685
815
|
]),
|
|
686
|
-
sequence_number:
|
|
816
|
+
sequence_number: import_v42.z.number()
|
|
687
817
|
});
|
|
688
|
-
var responseOutputItemDoneSchema =
|
|
689
|
-
type:
|
|
690
|
-
output_index:
|
|
691
|
-
item:
|
|
692
|
-
|
|
693
|
-
type:
|
|
694
|
-
id:
|
|
695
|
-
role:
|
|
696
|
-
status:
|
|
697
|
-
content:
|
|
818
|
+
var responseOutputItemDoneSchema = import_v42.z.object({
|
|
819
|
+
type: import_v42.z.literal("response.output_item.done"),
|
|
820
|
+
output_index: import_v42.z.number(),
|
|
821
|
+
item: import_v42.z.discriminatedUnion("type", [
|
|
822
|
+
import_v42.z.object({
|
|
823
|
+
type: import_v42.z.literal("message"),
|
|
824
|
+
id: import_v42.z.string(),
|
|
825
|
+
role: import_v42.z.string().optional(),
|
|
826
|
+
status: import_v42.z.string().optional(),
|
|
827
|
+
content: import_v42.z.array(import_v42.z.any()).optional()
|
|
698
828
|
}),
|
|
699
|
-
|
|
700
|
-
type:
|
|
701
|
-
id:
|
|
702
|
-
server_label:
|
|
703
|
-
tools:
|
|
704
|
-
error:
|
|
829
|
+
import_v42.z.object({
|
|
830
|
+
type: import_v42.z.literal("mcp_list_tools"),
|
|
831
|
+
id: import_v42.z.string(),
|
|
832
|
+
server_label: import_v42.z.string(),
|
|
833
|
+
tools: import_v42.z.array(import_v42.z.any()).optional(),
|
|
834
|
+
error: import_v42.z.string().optional()
|
|
705
835
|
}),
|
|
706
|
-
|
|
707
|
-
type:
|
|
708
|
-
id:
|
|
709
|
-
server_label:
|
|
710
|
-
name:
|
|
711
|
-
arguments:
|
|
712
|
-
output:
|
|
713
|
-
error:
|
|
836
|
+
import_v42.z.object({
|
|
837
|
+
type: import_v42.z.literal("mcp_call"),
|
|
838
|
+
id: import_v42.z.string(),
|
|
839
|
+
server_label: import_v42.z.string(),
|
|
840
|
+
name: import_v42.z.string(),
|
|
841
|
+
arguments: import_v42.z.string(),
|
|
842
|
+
output: import_v42.z.string().optional(),
|
|
843
|
+
error: import_v42.z.string().optional()
|
|
714
844
|
}),
|
|
715
|
-
|
|
716
|
-
type:
|
|
717
|
-
id:
|
|
718
|
-
call_id:
|
|
719
|
-
name:
|
|
720
|
-
arguments:
|
|
721
|
-
output:
|
|
722
|
-
error:
|
|
845
|
+
import_v42.z.object({
|
|
846
|
+
type: import_v42.z.literal("function_call"),
|
|
847
|
+
id: import_v42.z.string(),
|
|
848
|
+
call_id: import_v42.z.string(),
|
|
849
|
+
name: import_v42.z.string(),
|
|
850
|
+
arguments: import_v42.z.string(),
|
|
851
|
+
output: import_v42.z.string().optional(),
|
|
852
|
+
error: import_v42.z.string().optional()
|
|
853
|
+
}),
|
|
854
|
+
import_v42.z.object({
|
|
855
|
+
type: import_v42.z.literal("reasoning"),
|
|
856
|
+
id: import_v42.z.string(),
|
|
857
|
+
status: import_v42.z.string().optional(),
|
|
858
|
+
content: import_v42.z.array(import_v42.z.any()).optional(),
|
|
859
|
+
summary: import_v42.z.array(import_v42.z.any()).optional()
|
|
723
860
|
})
|
|
724
861
|
]),
|
|
725
|
-
sequence_number:
|
|
862
|
+
sequence_number: import_v42.z.number()
|
|
863
|
+
});
|
|
864
|
+
var textDeltaChunkSchema = import_v42.z.object({
|
|
865
|
+
type: import_v42.z.literal("response.output_text.delta"),
|
|
866
|
+
item_id: import_v42.z.string(),
|
|
867
|
+
output_index: import_v42.z.number(),
|
|
868
|
+
content_index: import_v42.z.number(),
|
|
869
|
+
delta: import_v42.z.string(),
|
|
870
|
+
sequence_number: import_v42.z.number()
|
|
871
|
+
});
|
|
872
|
+
var reasoningTextDeltaChunkSchema = import_v42.z.object({
|
|
873
|
+
type: import_v42.z.literal("response.reasoning_text.delta"),
|
|
874
|
+
item_id: import_v42.z.string(),
|
|
875
|
+
output_index: import_v42.z.number(),
|
|
876
|
+
content_index: import_v42.z.number(),
|
|
877
|
+
delta: import_v42.z.string(),
|
|
878
|
+
sequence_number: import_v42.z.number()
|
|
726
879
|
});
|
|
727
|
-
var
|
|
728
|
-
type:
|
|
729
|
-
item_id:
|
|
730
|
-
output_index:
|
|
731
|
-
content_index:
|
|
732
|
-
|
|
733
|
-
sequence_number:
|
|
880
|
+
var reasoningTextEndChunkSchema = import_v42.z.object({
|
|
881
|
+
type: import_v42.z.literal("response.reasoning_text.done"),
|
|
882
|
+
item_id: import_v42.z.string(),
|
|
883
|
+
output_index: import_v42.z.number(),
|
|
884
|
+
content_index: import_v42.z.number(),
|
|
885
|
+
text: import_v42.z.string(),
|
|
886
|
+
sequence_number: import_v42.z.number()
|
|
734
887
|
});
|
|
735
|
-
var responseCompletedChunkSchema =
|
|
736
|
-
type:
|
|
888
|
+
var responseCompletedChunkSchema = import_v42.z.object({
|
|
889
|
+
type: import_v42.z.literal("response.completed"),
|
|
737
890
|
response: huggingfaceResponsesResponseSchema,
|
|
738
|
-
sequence_number:
|
|
891
|
+
sequence_number: import_v42.z.number()
|
|
739
892
|
});
|
|
740
|
-
var responseCreatedChunkSchema =
|
|
741
|
-
type:
|
|
742
|
-
response:
|
|
743
|
-
id:
|
|
744
|
-
object:
|
|
745
|
-
created_at:
|
|
746
|
-
status:
|
|
747
|
-
model:
|
|
893
|
+
var responseCreatedChunkSchema = import_v42.z.object({
|
|
894
|
+
type: import_v42.z.literal("response.created"),
|
|
895
|
+
response: import_v42.z.object({
|
|
896
|
+
id: import_v42.z.string(),
|
|
897
|
+
object: import_v42.z.string(),
|
|
898
|
+
created_at: import_v42.z.number(),
|
|
899
|
+
status: import_v42.z.string(),
|
|
900
|
+
model: import_v42.z.string()
|
|
748
901
|
})
|
|
749
902
|
});
|
|
750
|
-
var huggingfaceResponsesChunkSchema =
|
|
903
|
+
var huggingfaceResponsesChunkSchema = import_v42.z.union([
|
|
751
904
|
responseOutputItemAddedSchema,
|
|
752
905
|
responseOutputItemDoneSchema,
|
|
906
|
+
reasoningTextDeltaChunkSchema,
|
|
907
|
+
reasoningTextEndChunkSchema,
|
|
753
908
|
textDeltaChunkSchema,
|
|
754
909
|
responseCompletedChunkSchema,
|
|
755
910
|
responseCreatedChunkSchema,
|
|
756
|
-
|
|
911
|
+
import_v42.z.object({ type: import_v42.z.string() }).loose()
|
|
757
912
|
// fallback for unknown chunks
|
|
758
913
|
]);
|
|
759
914
|
function isResponseOutputItemAddedChunk(chunk) {
|
|
@@ -765,6 +920,12 @@ function isResponseOutputItemDoneChunk(chunk) {
|
|
|
765
920
|
function isTextDeltaChunk(chunk) {
|
|
766
921
|
return chunk.type === "response.output_text.delta";
|
|
767
922
|
}
|
|
923
|
+
function isReasoningDeltaChunk(chunk) {
|
|
924
|
+
return chunk.type === "response.reasoning_text.delta";
|
|
925
|
+
}
|
|
926
|
+
function isReasoningEndChunk(chunk) {
|
|
927
|
+
return chunk.type === "response.reasoning_text.done";
|
|
928
|
+
}
|
|
768
929
|
function isResponseCompletedChunk(chunk) {
|
|
769
930
|
return chunk.type === "response.completed";
|
|
770
931
|
}
|
|
@@ -795,15 +956,17 @@ function createHuggingFace(options = {}) {
|
|
|
795
956
|
});
|
|
796
957
|
};
|
|
797
958
|
const provider = (modelId) => createResponsesModel(modelId);
|
|
959
|
+
provider.specificationVersion = "v3";
|
|
798
960
|
provider.languageModel = createResponsesModel;
|
|
799
961
|
provider.responses = createResponsesModel;
|
|
800
|
-
provider.
|
|
962
|
+
provider.embeddingModel = (modelId) => {
|
|
801
963
|
throw new import_provider3.NoSuchModelError({
|
|
802
964
|
modelId,
|
|
803
|
-
modelType: "
|
|
965
|
+
modelType: "embeddingModel",
|
|
804
966
|
message: "Hugging Face Responses API does not support text embeddings. Use the Hugging Face Inference API directly for embeddings."
|
|
805
967
|
});
|
|
806
968
|
};
|
|
969
|
+
provider.textEmbeddingModel = provider.embeddingModel;
|
|
807
970
|
provider.imageModel = (modelId) => {
|
|
808
971
|
throw new import_provider3.NoSuchModelError({
|
|
809
972
|
modelId,
|