@huggingface/inference 3.5.2 → 3.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. package/dist/browser/index.cjs +1652 -0
  2. package/dist/browser/index.js +1652 -0
  3. package/dist/index.cjs +277 -971
  4. package/dist/index.js +268 -982
  5. package/dist/src/index.d.ts.map +1 -1
  6. package/dist/src/lib/makeRequestOptions.d.ts +16 -1
  7. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  8. package/dist/src/providers/novita.d.ts.map +1 -1
  9. package/dist/src/snippets/getInferenceSnippets.d.ts +4 -0
  10. package/dist/src/snippets/getInferenceSnippets.d.ts.map +1 -0
  11. package/dist/src/snippets/index.d.ts +1 -4
  12. package/dist/src/snippets/index.d.ts.map +1 -1
  13. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  14. package/package.json +15 -6
  15. package/src/index.ts +1 -1
  16. package/src/lib/makeRequestOptions.ts +37 -10
  17. package/src/providers/fireworks-ai.ts +1 -1
  18. package/src/providers/hf-inference.ts +1 -1
  19. package/src/providers/nebius.ts +3 -3
  20. package/src/providers/novita.ts +7 -6
  21. package/src/providers/sambanova.ts +1 -1
  22. package/src/providers/together.ts +3 -3
  23. package/src/snippets/getInferenceSnippets.ts +398 -0
  24. package/src/snippets/index.ts +1 -5
  25. package/src/snippets/templates/js/fetch/basic.jinja +19 -0
  26. package/src/snippets/templates/js/fetch/basicAudio.jinja +19 -0
  27. package/src/snippets/templates/js/fetch/basicImage.jinja +19 -0
  28. package/src/snippets/templates/js/fetch/textToAudio.jinja +41 -0
  29. package/src/snippets/templates/js/fetch/textToImage.jinja +19 -0
  30. package/src/snippets/templates/js/fetch/zeroShotClassification.jinja +22 -0
  31. package/src/snippets/templates/js/huggingface.js/basic.jinja +11 -0
  32. package/src/snippets/templates/js/huggingface.js/basicAudio.jinja +13 -0
  33. package/src/snippets/templates/js/huggingface.js/basicImage.jinja +13 -0
  34. package/src/snippets/templates/js/huggingface.js/conversational.jinja +11 -0
  35. package/src/snippets/templates/js/huggingface.js/conversationalStream.jinja +19 -0
  36. package/src/snippets/templates/js/huggingface.js/textToImage.jinja +11 -0
  37. package/src/snippets/templates/js/huggingface.js/textToVideo.jinja +10 -0
  38. package/src/snippets/templates/js/openai/conversational.jinja +13 -0
  39. package/src/snippets/templates/js/openai/conversationalStream.jinja +22 -0
  40. package/src/snippets/templates/python/fal_client/textToImage.jinja +11 -0
  41. package/src/snippets/templates/python/huggingface_hub/basic.jinja +4 -0
  42. package/src/snippets/templates/python/huggingface_hub/basicAudio.jinja +1 -0
  43. package/src/snippets/templates/python/huggingface_hub/basicImage.jinja +1 -0
  44. package/src/snippets/templates/python/huggingface_hub/conversational.jinja +6 -0
  45. package/src/snippets/templates/python/huggingface_hub/conversationalStream.jinja +8 -0
  46. package/src/snippets/templates/python/huggingface_hub/documentQuestionAnswering.jinja +5 -0
  47. package/src/snippets/templates/python/huggingface_hub/imageToImage.jinja +6 -0
  48. package/src/snippets/templates/python/huggingface_hub/importInferenceClient.jinja +6 -0
  49. package/src/snippets/templates/python/huggingface_hub/textToImage.jinja +5 -0
  50. package/src/snippets/templates/python/huggingface_hub/textToVideo.jinja +4 -0
  51. package/src/snippets/templates/python/openai/conversational.jinja +13 -0
  52. package/src/snippets/templates/python/openai/conversationalStream.jinja +15 -0
  53. package/src/snippets/templates/python/requests/basic.jinja +7 -0
  54. package/src/snippets/templates/python/requests/basicAudio.jinja +7 -0
  55. package/src/snippets/templates/python/requests/basicImage.jinja +7 -0
  56. package/src/snippets/templates/python/requests/conversational.jinja +9 -0
  57. package/src/snippets/templates/python/requests/conversationalStream.jinja +16 -0
  58. package/src/snippets/templates/python/requests/documentQuestionAnswering.jinja +13 -0
  59. package/src/snippets/templates/python/requests/imageToImage.jinja +15 -0
  60. package/src/snippets/templates/python/requests/importRequests.jinja +10 -0
  61. package/src/snippets/templates/python/requests/tabular.jinja +9 -0
  62. package/src/snippets/templates/python/requests/textToAudio.jinja +23 -0
  63. package/src/snippets/templates/python/requests/textToImage.jinja +14 -0
  64. package/src/snippets/templates/python/requests/zeroShotClassification.jinja +8 -0
  65. package/src/snippets/templates/python/requests/zeroShotImageClassification.jinja +14 -0
  66. package/src/snippets/templates/sh/curl/basic.jinja +7 -0
  67. package/src/snippets/templates/sh/curl/basicAudio.jinja +5 -0
  68. package/src/snippets/templates/sh/curl/basicImage.jinja +5 -0
  69. package/src/snippets/templates/sh/curl/conversational.jinja +7 -0
  70. package/src/snippets/templates/sh/curl/conversationalStream.jinja +7 -0
  71. package/src/snippets/templates/sh/curl/zeroShotClassification.jinja +5 -0
  72. package/src/tasks/cv/textToVideo.ts +25 -5
  73. package/src/vendor/fetch-event-source/LICENSE +21 -0
  74. package/dist/src/snippets/curl.d.ts +0 -17
  75. package/dist/src/snippets/curl.d.ts.map +0 -1
  76. package/dist/src/snippets/js.d.ts +0 -21
  77. package/dist/src/snippets/js.d.ts.map +0 -1
  78. package/dist/src/snippets/python.d.ts +0 -4
  79. package/dist/src/snippets/python.d.ts.map +0 -1
  80. package/src/snippets/curl.ts +0 -177
  81. package/src/snippets/js.ts +0 -475
  82. package/src/snippets/python.ts +0 -563
@@ -0,0 +1,1652 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true});var __defProp = Object.defineProperty;
2
+ var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { enumerable: true, configurable: true, writable: true, value }) : obj[key] = value;
3
+ var __export = (target, all) => {
4
+ for (var name2 in all)
5
+ __defProp(target, name2, { get: all[name2], enumerable: true });
6
+ };
7
+ var __publicField = (obj, key, value) => {
8
+ __defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value);
9
+ return value;
10
+ };
11
+
12
+ // src/tasks/index.ts
13
+ var tasks_exports = {};
14
+ __export(tasks_exports, {
15
+ audioClassification: () => audioClassification,
16
+ audioToAudio: () => audioToAudio,
17
+ automaticSpeechRecognition: () => automaticSpeechRecognition,
18
+ chatCompletion: () => chatCompletion,
19
+ chatCompletionStream: () => chatCompletionStream,
20
+ documentQuestionAnswering: () => documentQuestionAnswering,
21
+ featureExtraction: () => featureExtraction,
22
+ fillMask: () => fillMask,
23
+ imageClassification: () => imageClassification,
24
+ imageSegmentation: () => imageSegmentation,
25
+ imageToImage: () => imageToImage,
26
+ imageToText: () => imageToText,
27
+ objectDetection: () => objectDetection,
28
+ questionAnswering: () => questionAnswering,
29
+ request: () => request,
30
+ sentenceSimilarity: () => sentenceSimilarity,
31
+ streamingRequest: () => streamingRequest,
32
+ summarization: () => summarization,
33
+ tableQuestionAnswering: () => tableQuestionAnswering,
34
+ tabularClassification: () => tabularClassification,
35
+ tabularRegression: () => tabularRegression,
36
+ textClassification: () => textClassification,
37
+ textGeneration: () => textGeneration,
38
+ textGenerationStream: () => textGenerationStream,
39
+ textToImage: () => textToImage,
40
+ textToSpeech: () => textToSpeech,
41
+ textToVideo: () => textToVideo,
42
+ tokenClassification: () => tokenClassification,
43
+ translation: () => translation,
44
+ visualQuestionAnswering: () => visualQuestionAnswering,
45
+ zeroShotClassification: () => zeroShotClassification,
46
+ zeroShotImageClassification: () => zeroShotImageClassification
47
+ });
48
+
49
+ // src/config.ts
50
+ var HF_HUB_URL = "https://huggingface.co";
51
+ var HF_ROUTER_URL = "https://router.huggingface.co";
52
+
53
+ // src/providers/black-forest-labs.ts
54
+ var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
55
+ var makeBody = (params) => {
56
+ return params.args;
57
+ };
58
+ var makeHeaders = (params) => {
59
+ if (params.authMethod === "provider-key") {
60
+ return { "X-Key": `${params.accessToken}` };
61
+ } else {
62
+ return { Authorization: `Bearer ${params.accessToken}` };
63
+ }
64
+ };
65
+ var makeUrl = (params) => {
66
+ return `${params.baseUrl}/v1/${params.model}`;
67
+ };
68
+ var BLACK_FOREST_LABS_CONFIG = {
69
+ baseUrl: BLACK_FOREST_LABS_AI_API_BASE_URL,
70
+ makeBody,
71
+ makeHeaders,
72
+ makeUrl
73
+ };
74
+
75
+ // src/providers/cerebras.ts
76
+ var CEREBRAS_API_BASE_URL = "https://api.cerebras.ai";
77
+ var makeBody2 = (params) => {
78
+ return {
79
+ ...params.args,
80
+ model: params.model
81
+ };
82
+ };
83
+ var makeHeaders2 = (params) => {
84
+ return { Authorization: `Bearer ${params.accessToken}` };
85
+ };
86
+ var makeUrl2 = (params) => {
87
+ return `${params.baseUrl}/v1/chat/completions`;
88
+ };
89
+ var CEREBRAS_CONFIG = {
90
+ baseUrl: CEREBRAS_API_BASE_URL,
91
+ makeBody: makeBody2,
92
+ makeHeaders: makeHeaders2,
93
+ makeUrl: makeUrl2
94
+ };
95
+
96
+ // src/providers/cohere.ts
97
+ var COHERE_API_BASE_URL = "https://api.cohere.com";
98
+ var makeBody3 = (params) => {
99
+ return {
100
+ ...params.args,
101
+ model: params.model
102
+ };
103
+ };
104
+ var makeHeaders3 = (params) => {
105
+ return { Authorization: `Bearer ${params.accessToken}` };
106
+ };
107
+ var makeUrl3 = (params) => {
108
+ return `${params.baseUrl}/compatibility/v1/chat/completions`;
109
+ };
110
+ var COHERE_CONFIG = {
111
+ baseUrl: COHERE_API_BASE_URL,
112
+ makeBody: makeBody3,
113
+ makeHeaders: makeHeaders3,
114
+ makeUrl: makeUrl3
115
+ };
116
+
117
+ // src/providers/fal-ai.ts
118
+ var FAL_AI_API_BASE_URL = "https://fal.run";
119
+ var makeBody4 = (params) => {
120
+ return params.args;
121
+ };
122
+ var makeHeaders4 = (params) => {
123
+ return {
124
+ Authorization: params.authMethod === "provider-key" ? `Key ${params.accessToken}` : `Bearer ${params.accessToken}`
125
+ };
126
+ };
127
+ var makeUrl4 = (params) => {
128
+ return `${params.baseUrl}/${params.model}`;
129
+ };
130
+ var FAL_AI_CONFIG = {
131
+ baseUrl: FAL_AI_API_BASE_URL,
132
+ makeBody: makeBody4,
133
+ makeHeaders: makeHeaders4,
134
+ makeUrl: makeUrl4
135
+ };
136
+
137
+ // src/providers/fireworks-ai.ts
138
+ var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai";
139
+ var makeBody5 = (params) => {
140
+ return {
141
+ ...params.args,
142
+ ...params.chatCompletion ? { model: params.model } : void 0
143
+ };
144
+ };
145
+ var makeHeaders5 = (params) => {
146
+ return { Authorization: `Bearer ${params.accessToken}` };
147
+ };
148
+ var makeUrl5 = (params) => {
149
+ if (params.chatCompletion) {
150
+ return `${params.baseUrl}/inference/v1/chat/completions`;
151
+ }
152
+ return `${params.baseUrl}/inference`;
153
+ };
154
+ var FIREWORKS_AI_CONFIG = {
155
+ baseUrl: FIREWORKS_AI_API_BASE_URL,
156
+ makeBody: makeBody5,
157
+ makeHeaders: makeHeaders5,
158
+ makeUrl: makeUrl5
159
+ };
160
+
161
+ // src/providers/hf-inference.ts
162
+ var makeBody6 = (params) => {
163
+ return {
164
+ ...params.args,
165
+ ...params.chatCompletion ? { model: params.model } : void 0
166
+ };
167
+ };
168
+ var makeHeaders6 = (params) => {
169
+ return { Authorization: `Bearer ${params.accessToken}` };
170
+ };
171
+ var makeUrl6 = (params) => {
172
+ if (params.task && ["feature-extraction", "sentence-similarity"].includes(params.task)) {
173
+ return `${params.baseUrl}/pipeline/${params.task}/${params.model}`;
174
+ }
175
+ if (params.chatCompletion) {
176
+ return `${params.baseUrl}/models/${params.model}/v1/chat/completions`;
177
+ }
178
+ return `${params.baseUrl}/models/${params.model}`;
179
+ };
180
+ var HF_INFERENCE_CONFIG = {
181
+ baseUrl: `${HF_ROUTER_URL}/hf-inference`,
182
+ makeBody: makeBody6,
183
+ makeHeaders: makeHeaders6,
184
+ makeUrl: makeUrl6
185
+ };
186
+
187
+ // src/providers/hyperbolic.ts
188
+ var HYPERBOLIC_API_BASE_URL = "https://api.hyperbolic.xyz";
189
+ var makeBody7 = (params) => {
190
+ return {
191
+ ...params.args,
192
+ ...params.task === "text-to-image" ? { model_name: params.model } : { model: params.model }
193
+ };
194
+ };
195
+ var makeHeaders7 = (params) => {
196
+ return { Authorization: `Bearer ${params.accessToken}` };
197
+ };
198
+ var makeUrl7 = (params) => {
199
+ if (params.task === "text-to-image") {
200
+ return `${params.baseUrl}/v1/images/generations`;
201
+ }
202
+ return `${params.baseUrl}/v1/chat/completions`;
203
+ };
204
+ var HYPERBOLIC_CONFIG = {
205
+ baseUrl: HYPERBOLIC_API_BASE_URL,
206
+ makeBody: makeBody7,
207
+ makeHeaders: makeHeaders7,
208
+ makeUrl: makeUrl7
209
+ };
210
+
211
+ // src/providers/nebius.ts
212
+ var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
213
+ var makeBody8 = (params) => {
214
+ return {
215
+ ...params.args,
216
+ model: params.model
217
+ };
218
+ };
219
+ var makeHeaders8 = (params) => {
220
+ return { Authorization: `Bearer ${params.accessToken}` };
221
+ };
222
+ var makeUrl8 = (params) => {
223
+ if (params.task === "text-to-image") {
224
+ return `${params.baseUrl}/v1/images/generations`;
225
+ }
226
+ if (params.chatCompletion) {
227
+ return `${params.baseUrl}/v1/chat/completions`;
228
+ }
229
+ if (params.task === "text-generation") {
230
+ return `${params.baseUrl}/v1/completions`;
231
+ }
232
+ return params.baseUrl;
233
+ };
234
+ var NEBIUS_CONFIG = {
235
+ baseUrl: NEBIUS_API_BASE_URL,
236
+ makeBody: makeBody8,
237
+ makeHeaders: makeHeaders8,
238
+ makeUrl: makeUrl8
239
+ };
240
+
241
+ // src/providers/novita.ts
242
+ var NOVITA_API_BASE_URL = "https://api.novita.ai";
243
+ var makeBody9 = (params) => {
244
+ return {
245
+ ...params.args,
246
+ ...params.chatCompletion ? { model: params.model } : void 0
247
+ };
248
+ };
249
+ var makeHeaders9 = (params) => {
250
+ return { Authorization: `Bearer ${params.accessToken}` };
251
+ };
252
+ var makeUrl9 = (params) => {
253
+ if (params.chatCompletion) {
254
+ return `${params.baseUrl}/v3/openai/chat/completions`;
255
+ } else if (params.task === "text-generation") {
256
+ return `${params.baseUrl}/v3/openai/completions`;
257
+ } else if (params.task === "text-to-video") {
258
+ return `${params.baseUrl}/v3/hf/${params.model}`;
259
+ }
260
+ return params.baseUrl;
261
+ };
262
+ var NOVITA_CONFIG = {
263
+ baseUrl: NOVITA_API_BASE_URL,
264
+ makeBody: makeBody9,
265
+ makeHeaders: makeHeaders9,
266
+ makeUrl: makeUrl9
267
+ };
268
+
269
+ // src/providers/replicate.ts
270
+ var REPLICATE_API_BASE_URL = "https://api.replicate.com";
271
+ var makeBody10 = (params) => {
272
+ return {
273
+ input: params.args,
274
+ version: params.model.includes(":") ? params.model.split(":")[1] : void 0
275
+ };
276
+ };
277
+ var makeHeaders10 = (params) => {
278
+ return { Authorization: `Bearer ${params.accessToken}`, Prefer: "wait" };
279
+ };
280
+ var makeUrl10 = (params) => {
281
+ if (params.model.includes(":")) {
282
+ return `${params.baseUrl}/v1/predictions`;
283
+ }
284
+ return `${params.baseUrl}/v1/models/${params.model}/predictions`;
285
+ };
286
+ var REPLICATE_CONFIG = {
287
+ baseUrl: REPLICATE_API_BASE_URL,
288
+ makeBody: makeBody10,
289
+ makeHeaders: makeHeaders10,
290
+ makeUrl: makeUrl10
291
+ };
292
+
293
+ // src/providers/sambanova.ts
294
+ var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
295
+ var makeBody11 = (params) => {
296
+ return {
297
+ ...params.args,
298
+ ...params.chatCompletion ? { model: params.model } : void 0
299
+ };
300
+ };
301
+ var makeHeaders11 = (params) => {
302
+ return { Authorization: `Bearer ${params.accessToken}` };
303
+ };
304
+ var makeUrl11 = (params) => {
305
+ if (params.chatCompletion) {
306
+ return `${params.baseUrl}/v1/chat/completions`;
307
+ }
308
+ return params.baseUrl;
309
+ };
310
+ var SAMBANOVA_CONFIG = {
311
+ baseUrl: SAMBANOVA_API_BASE_URL,
312
+ makeBody: makeBody11,
313
+ makeHeaders: makeHeaders11,
314
+ makeUrl: makeUrl11
315
+ };
316
+
317
+ // src/providers/together.ts
318
+ var TOGETHER_API_BASE_URL = "https://api.together.xyz";
319
+ var makeBody12 = (params) => {
320
+ return {
321
+ ...params.args,
322
+ model: params.model
323
+ };
324
+ };
325
+ var makeHeaders12 = (params) => {
326
+ return { Authorization: `Bearer ${params.accessToken}` };
327
+ };
328
+ var makeUrl12 = (params) => {
329
+ if (params.task === "text-to-image") {
330
+ return `${params.baseUrl}/v1/images/generations`;
331
+ }
332
+ if (params.chatCompletion) {
333
+ return `${params.baseUrl}/v1/chat/completions`;
334
+ }
335
+ if (params.task === "text-generation") {
336
+ return `${params.baseUrl}/v1/completions`;
337
+ }
338
+ return params.baseUrl;
339
+ };
340
+ var TOGETHER_CONFIG = {
341
+ baseUrl: TOGETHER_API_BASE_URL,
342
+ makeBody: makeBody12,
343
+ makeHeaders: makeHeaders12,
344
+ makeUrl: makeUrl12
345
+ };
346
+
347
+ // src/providers/openai.ts
348
+ var OPENAI_API_BASE_URL = "https://api.openai.com";
349
+ var makeBody13 = (params) => {
350
+ if (!params.chatCompletion) {
351
+ throw new Error("OpenAI only supports chat completions.");
352
+ }
353
+ return {
354
+ ...params.args,
355
+ model: params.model
356
+ };
357
+ };
358
+ var makeHeaders13 = (params) => {
359
+ return { Authorization: `Bearer ${params.accessToken}` };
360
+ };
361
+ var makeUrl13 = (params) => {
362
+ if (!params.chatCompletion) {
363
+ throw new Error("OpenAI only supports chat completions.");
364
+ }
365
+ return `${params.baseUrl}/v1/chat/completions`;
366
+ };
367
+ var OPENAI_CONFIG = {
368
+ baseUrl: OPENAI_API_BASE_URL,
369
+ makeBody: makeBody13,
370
+ makeHeaders: makeHeaders13,
371
+ makeUrl: makeUrl13,
372
+ clientSideRoutingOnly: true
373
+ };
374
+
375
+ // src/lib/isUrl.ts
376
+ function isUrl(modelOrUrl) {
377
+ return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
378
+ }
379
+
380
+ // package.json
381
+ var name = "@huggingface/inference";
382
+ var version = "3.6.0";
383
+
384
+ // src/providers/consts.ts
385
+ var HARDCODED_MODEL_ID_MAPPING = {
386
+ /**
387
+ * "HF model ID" => "Model ID on Inference Provider's side"
388
+ *
389
+ * Example:
390
+ * "Qwen/Qwen2.5-Coder-32B-Instruct": "Qwen2.5-Coder-32B-Instruct",
391
+ */
392
+ "black-forest-labs": {},
393
+ cerebras: {},
394
+ cohere: {},
395
+ "fal-ai": {},
396
+ "fireworks-ai": {},
397
+ "hf-inference": {},
398
+ hyperbolic: {},
399
+ nebius: {},
400
+ novita: {},
401
+ openai: {},
402
+ replicate: {},
403
+ sambanova: {},
404
+ together: {}
405
+ };
406
+
407
+ // src/lib/getProviderModelId.ts
408
+ var inferenceProviderMappingCache = /* @__PURE__ */ new Map();
409
+ async function getProviderModelId(params, args, options = {}) {
410
+ var _a, _b, _c;
411
+ if (params.provider === "hf-inference") {
412
+ return params.model;
413
+ }
414
+ if (!options.task) {
415
+ throw new Error("task must be specified when using a third-party provider");
416
+ }
417
+ const task = options.task === "text-generation" && options.chatCompletion ? "conversational" : options.task;
418
+ if ((_a = HARDCODED_MODEL_ID_MAPPING[params.provider]) == null ? void 0 : _a[params.model]) {
419
+ return HARDCODED_MODEL_ID_MAPPING[params.provider][params.model];
420
+ }
421
+ let inferenceProviderMapping;
422
+ if (inferenceProviderMappingCache.has(params.model)) {
423
+ inferenceProviderMapping = inferenceProviderMappingCache.get(params.model);
424
+ } else {
425
+ inferenceProviderMapping = await ((_b = options == null ? void 0 : options.fetch) != null ? _b : fetch)(
426
+ `${HF_HUB_URL}/api/models/${params.model}?expand[]=inferenceProviderMapping`,
427
+ {
428
+ headers: ((_c = args.accessToken) == null ? void 0 : _c.startsWith("hf_")) ? { Authorization: `Bearer ${args.accessToken}` } : {}
429
+ }
430
+ ).then((resp) => resp.json()).then((json) => json.inferenceProviderMapping).catch(() => null);
431
+ }
432
+ if (!inferenceProviderMapping) {
433
+ throw new Error(`We have not been able to find inference provider information for model ${params.model}.`);
434
+ }
435
+ const providerMapping = inferenceProviderMapping[params.provider];
436
+ if (providerMapping) {
437
+ if (providerMapping.task !== task) {
438
+ throw new Error(
439
+ `Model ${params.model} is not supported for task ${task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
440
+ );
441
+ }
442
+ if (providerMapping.status === "staging") {
443
+ console.warn(
444
+ `Model ${params.model} is in staging mode for provider ${params.provider}. Meant for test purposes only.`
445
+ );
446
+ }
447
+ return providerMapping.providerId;
448
+ }
449
+ throw new Error(`Model ${params.model} is not supported provider ${params.provider}.`);
450
+ }
451
+
452
+ // src/lib/makeRequestOptions.ts
453
+ var HF_HUB_INFERENCE_PROXY_TEMPLATE = `${HF_ROUTER_URL}/{{PROVIDER}}`;
454
+ var tasks = null;
455
+ var providerConfigs = {
456
+ "black-forest-labs": BLACK_FOREST_LABS_CONFIG,
457
+ cerebras: CEREBRAS_CONFIG,
458
+ cohere: COHERE_CONFIG,
459
+ "fal-ai": FAL_AI_CONFIG,
460
+ "fireworks-ai": FIREWORKS_AI_CONFIG,
461
+ "hf-inference": HF_INFERENCE_CONFIG,
462
+ hyperbolic: HYPERBOLIC_CONFIG,
463
+ openai: OPENAI_CONFIG,
464
+ nebius: NEBIUS_CONFIG,
465
+ novita: NOVITA_CONFIG,
466
+ replicate: REPLICATE_CONFIG,
467
+ sambanova: SAMBANOVA_CONFIG,
468
+ together: TOGETHER_CONFIG
469
+ };
470
+ async function makeRequestOptions(args, options) {
471
+ const { provider: maybeProvider, model: maybeModel } = args;
472
+ const provider = maybeProvider != null ? maybeProvider : "hf-inference";
473
+ const providerConfig = providerConfigs[provider];
474
+ const { task, chatCompletion: chatCompletion2 } = options != null ? options : {};
475
+ if (args.endpointUrl && provider !== "hf-inference") {
476
+ throw new Error(`Cannot use endpointUrl with a third-party provider.`);
477
+ }
478
+ if (maybeModel && isUrl(maybeModel)) {
479
+ throw new Error(`Model URLs are no longer supported. Use endpointUrl instead.`);
480
+ }
481
+ if (!maybeModel && !task) {
482
+ throw new Error("No model provided, and no task has been specified.");
483
+ }
484
+ if (!providerConfig) {
485
+ throw new Error(`No provider config found for provider ${provider}`);
486
+ }
487
+ if (providerConfig.clientSideRoutingOnly && !maybeModel) {
488
+ throw new Error(`Provider ${provider} requires a model ID to be passed directly.`);
489
+ }
490
+ const hfModel = maybeModel != null ? maybeModel : await loadDefaultModel(task);
491
+ const resolvedModel = providerConfig.clientSideRoutingOnly ? (
492
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
493
+ removeProviderPrefix(maybeModel, provider)
494
+ ) : await getProviderModelId({ model: hfModel, provider }, args, {
495
+ task,
496
+ chatCompletion: chatCompletion2,
497
+ fetch: options == null ? void 0 : options.fetch
498
+ });
499
+ return makeRequestOptionsFromResolvedModel(resolvedModel, args, options);
500
+ }
501
+ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
502
+ const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
503
+ const provider = maybeProvider != null ? maybeProvider : "hf-inference";
504
+ const providerConfig = providerConfigs[provider];
505
+ const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options != null ? options : {};
506
+ const authMethod = (() => {
507
+ if (providerConfig.clientSideRoutingOnly) {
508
+ if (accessToken && accessToken.startsWith("hf_")) {
509
+ throw new Error(`Provider ${provider} is closed-source and does not support HF tokens.`);
510
+ }
511
+ return "provider-key";
512
+ }
513
+ if (accessToken) {
514
+ return accessToken.startsWith("hf_") ? "hf-token" : "provider-key";
515
+ }
516
+ if (includeCredentials === "include") {
517
+ return "credentials-include";
518
+ }
519
+ return "none";
520
+ })();
521
+ const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : providerConfig.makeUrl({
522
+ baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.baseUrl,
523
+ model: resolvedModel,
524
+ chatCompletion: chatCompletion2,
525
+ task
526
+ });
527
+ const binary = "data" in args && !!args.data;
528
+ const headers = providerConfig.makeHeaders({
529
+ accessToken,
530
+ authMethod
531
+ });
532
+ if (!binary) {
533
+ headers["Content-Type"] = "application/json";
534
+ }
535
+ const ownUserAgent = `${name}/${version}`;
536
+ const userAgent = [ownUserAgent, typeof navigator !== "undefined" ? navigator.userAgent : void 0].filter((x) => x !== void 0).join(" ");
537
+ headers["User-Agent"] = userAgent;
538
+ const body = binary ? args.data : JSON.stringify(
539
+ providerConfig.makeBody({
540
+ args: remainingArgs,
541
+ model: resolvedModel,
542
+ task,
543
+ chatCompletion: chatCompletion2
544
+ })
545
+ );
546
+ let credentials;
547
+ if (typeof includeCredentials === "string") {
548
+ credentials = includeCredentials;
549
+ } else if (includeCredentials === true) {
550
+ credentials = "include";
551
+ }
552
+ const info = {
553
+ headers,
554
+ method: "POST",
555
+ body,
556
+ ...credentials ? { credentials } : void 0,
557
+ signal
558
+ };
559
+ return { url, info };
560
+ }
561
+ async function loadDefaultModel(task) {
562
+ var _a;
563
+ if (!tasks) {
564
+ tasks = await loadTaskInfo();
565
+ }
566
+ const taskInfo = tasks[task];
567
+ if (((_a = taskInfo == null ? void 0 : taskInfo.models.length) != null ? _a : 0) <= 0) {
568
+ throw new Error(`No default model defined for task ${task}, please define the model explicitly.`);
569
+ }
570
+ return taskInfo.models[0].id;
571
+ }
572
+ async function loadTaskInfo() {
573
+ const res = await fetch(`${HF_HUB_URL}/api/tasks`);
574
+ if (!res.ok) {
575
+ throw new Error("Failed to load tasks definitions from Hugging Face Hub.");
576
+ }
577
+ return await res.json();
578
+ }
579
+ function removeProviderPrefix(model, provider) {
580
+ if (!model.startsWith(`${provider}/`)) {
581
+ throw new Error(`Models from ${provider} must be prefixed by "${provider}/". Got "${model}".`);
582
+ }
583
+ return model.slice(provider.length + 1);
584
+ }
585
+
586
+ // src/tasks/custom/request.ts
587
+ async function request(args, options) {
588
+ var _a, _b, _c;
589
+ const { url, info } = await makeRequestOptions(args, options);
590
+ const response = await ((_a = options == null ? void 0 : options.fetch) != null ? _a : fetch)(url, info);
591
+ if ((options == null ? void 0 : options.retry_on_error) !== false && response.status === 503) {
592
+ return request(args, options);
593
+ }
594
+ if (!response.ok) {
595
+ const contentType = response.headers.get("Content-Type");
596
+ if (["application/json", "application/problem+json"].some((ct) => contentType == null ? void 0 : contentType.startsWith(ct))) {
597
+ const output = await response.json();
598
+ if ([400, 422, 404, 500].includes(response.status) && (options == null ? void 0 : options.chatCompletion)) {
599
+ throw new Error(
600
+ `Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
601
+ );
602
+ }
603
+ if (output.error || output.detail) {
604
+ throw new Error(JSON.stringify((_b = output.error) != null ? _b : output.detail));
605
+ } else {
606
+ throw new Error(output);
607
+ }
608
+ }
609
+ const message = (contentType == null ? void 0 : contentType.startsWith("text/plain;")) ? await response.text() : void 0;
610
+ throw new Error(message != null ? message : "An error occurred while fetching the blob");
611
+ }
612
+ if ((_c = response.headers.get("Content-Type")) == null ? void 0 : _c.startsWith("application/json")) {
613
+ return await response.json();
614
+ }
615
+ return await response.blob();
616
+ }
617
+
618
+ // src/vendor/fetch-event-source/parse.ts
619
+ function getLines(onLine) {
620
+ let buffer;
621
+ let position;
622
+ let fieldLength;
623
+ let discardTrailingNewline = false;
624
+ return function onChunk(arr) {
625
+ if (buffer === void 0) {
626
+ buffer = arr;
627
+ position = 0;
628
+ fieldLength = -1;
629
+ } else {
630
+ buffer = concat(buffer, arr);
631
+ }
632
+ const bufLength = buffer.length;
633
+ let lineStart = 0;
634
+ while (position < bufLength) {
635
+ if (discardTrailingNewline) {
636
+ if (buffer[position] === 10 /* NewLine */) {
637
+ lineStart = ++position;
638
+ }
639
+ discardTrailingNewline = false;
640
+ }
641
+ let lineEnd = -1;
642
+ for (; position < bufLength && lineEnd === -1; ++position) {
643
+ switch (buffer[position]) {
644
+ case 58 /* Colon */:
645
+ if (fieldLength === -1) {
646
+ fieldLength = position - lineStart;
647
+ }
648
+ break;
649
+ case 13 /* CarriageReturn */:
650
+ discardTrailingNewline = true;
651
+ case 10 /* NewLine */:
652
+ lineEnd = position;
653
+ break;
654
+ }
655
+ }
656
+ if (lineEnd === -1) {
657
+ break;
658
+ }
659
+ onLine(buffer.subarray(lineStart, lineEnd), fieldLength);
660
+ lineStart = position;
661
+ fieldLength = -1;
662
+ }
663
+ if (lineStart === bufLength) {
664
+ buffer = void 0;
665
+ } else if (lineStart !== 0) {
666
+ buffer = buffer.subarray(lineStart);
667
+ position -= lineStart;
668
+ }
669
+ };
670
+ }
671
+ function getMessages(onId, onRetry, onMessage) {
672
+ let message = newMessage();
673
+ const decoder = new TextDecoder();
674
+ return function onLine(line, fieldLength) {
675
+ if (line.length === 0) {
676
+ onMessage == null ? void 0 : onMessage(message);
677
+ message = newMessage();
678
+ } else if (fieldLength > 0) {
679
+ const field = decoder.decode(line.subarray(0, fieldLength));
680
+ const valueOffset = fieldLength + (line[fieldLength + 1] === 32 /* Space */ ? 2 : 1);
681
+ const value = decoder.decode(line.subarray(valueOffset));
682
+ switch (field) {
683
+ case "data":
684
+ message.data = message.data ? message.data + "\n" + value : value;
685
+ break;
686
+ case "event":
687
+ message.event = value;
688
+ break;
689
+ case "id":
690
+ onId(message.id = value);
691
+ break;
692
+ case "retry":
693
+ const retry = parseInt(value, 10);
694
+ if (!isNaN(retry)) {
695
+ onRetry(message.retry = retry);
696
+ }
697
+ break;
698
+ }
699
+ }
700
+ };
701
+ }
702
+ function concat(a, b) {
703
+ const res = new Uint8Array(a.length + b.length);
704
+ res.set(a);
705
+ res.set(b, a.length);
706
+ return res;
707
+ }
708
+ function newMessage() {
709
+ return {
710
+ data: "",
711
+ event: "",
712
+ id: "",
713
+ retry: void 0
714
+ };
715
+ }
716
+
717
+ // src/tasks/custom/streamingRequest.ts
718
+ async function* streamingRequest(args, options) {
719
+ var _a, _b, _c;
720
+ const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
721
+ const response = await ((_a = options == null ? void 0 : options.fetch) != null ? _a : fetch)(url, info);
722
+ if ((options == null ? void 0 : options.retry_on_error) !== false && response.status === 503) {
723
+ return yield* streamingRequest(args, options);
724
+ }
725
+ if (!response.ok) {
726
+ if ((_b = response.headers.get("Content-Type")) == null ? void 0 : _b.startsWith("application/json")) {
727
+ const output = await response.json();
728
+ if ([400, 422, 404, 500].includes(response.status) && (options == null ? void 0 : options.chatCompletion)) {
729
+ throw new Error(`Server ${args.model} does not seem to support chat completion. Error: ${output.error}`);
730
+ }
731
+ if (typeof output.error === "string") {
732
+ throw new Error(output.error);
733
+ }
734
+ if (output.error && "message" in output.error && typeof output.error.message === "string") {
735
+ throw new Error(output.error.message);
736
+ }
737
+ }
738
+ throw new Error(`Server response contains error: ${response.status}`);
739
+ }
740
+ if (!((_c = response.headers.get("content-type")) == null ? void 0 : _c.startsWith("text/event-stream"))) {
741
+ throw new Error(
742
+ `Server does not support event stream content type, it returned ` + response.headers.get("content-type")
743
+ );
744
+ }
745
+ if (!response.body) {
746
+ return;
747
+ }
748
+ const reader = response.body.getReader();
749
+ let events = [];
750
+ const onEvent = (event) => {
751
+ events.push(event);
752
+ };
753
+ const onChunk = getLines(
754
+ getMessages(
755
+ () => {
756
+ },
757
+ () => {
758
+ },
759
+ onEvent
760
+ )
761
+ );
762
+ try {
763
+ while (true) {
764
+ const { done, value } = await reader.read();
765
+ if (done) {
766
+ return;
767
+ }
768
+ onChunk(value);
769
+ for (const event of events) {
770
+ if (event.data.length > 0) {
771
+ if (event.data === "[DONE]") {
772
+ return;
773
+ }
774
+ const data = JSON.parse(event.data);
775
+ if (typeof data === "object" && data !== null && "error" in data) {
776
+ const errorStr = typeof data.error === "string" ? data.error : typeof data.error === "object" && data.error && "message" in data.error && typeof data.error.message === "string" ? data.error.message : JSON.stringify(data.error);
777
+ throw new Error(`Error forwarded from backend: ` + errorStr);
778
+ }
779
+ yield data;
780
+ }
781
+ }
782
+ events = [];
783
+ }
784
+ } finally {
785
+ reader.releaseLock();
786
+ }
787
+ }
788
+
789
+ // src/lib/InferenceOutputError.ts
790
+ var InferenceOutputError = class extends TypeError {
791
+ constructor(message) {
792
+ super(
793
+ `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
794
+ );
795
+ this.name = "InferenceOutputError";
796
+ }
797
+ };
798
+
799
+ // src/utils/pick.ts
800
+ function pick(o, props) {
801
+ return Object.assign(
802
+ {},
803
+ ...props.map((prop) => {
804
+ if (o[prop] !== void 0) {
805
+ return { [prop]: o[prop] };
806
+ }
807
+ })
808
+ );
809
+ }
810
+
811
+ // src/utils/typedInclude.ts
812
+ function typedInclude(arr, v) {
813
+ return arr.includes(v);
814
+ }
815
+
816
+ // src/utils/omit.ts
817
+ function omit(o, props) {
818
+ const propsArr = Array.isArray(props) ? props : [props];
819
+ const letsKeep = Object.keys(o).filter((prop) => !typedInclude(propsArr, prop));
820
+ return pick(o, letsKeep);
821
+ }
822
+
823
+ // src/tasks/audio/utils.ts
824
+ function preparePayload(args) {
825
+ return "data" in args ? args : {
826
+ ...omit(args, "inputs"),
827
+ data: args.inputs
828
+ };
829
+ }
830
+
831
+ // src/tasks/audio/audioClassification.ts
832
+ async function audioClassification(args, options) {
833
+ const payload = preparePayload(args);
834
+ const res = await request(payload, {
835
+ ...options,
836
+ task: "audio-classification"
837
+ });
838
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
839
+ if (!isValidOutput) {
840
+ throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
841
+ }
842
+ return res;
843
+ }
844
+
845
+ // src/utils/base64FromBytes.ts
846
+ function base64FromBytes(arr) {
847
+ if (globalThis.Buffer) {
848
+ return globalThis.Buffer.from(arr).toString("base64");
849
+ } else {
850
+ const bin = [];
851
+ arr.forEach((byte) => {
852
+ bin.push(String.fromCharCode(byte));
853
+ });
854
+ return globalThis.btoa(bin.join(""));
855
+ }
856
+ }
857
+
858
+ // src/tasks/audio/automaticSpeechRecognition.ts
859
+ async function automaticSpeechRecognition(args, options) {
860
+ const payload = await buildPayload(args);
861
+ const res = await request(payload, {
862
+ ...options,
863
+ task: "automatic-speech-recognition"
864
+ });
865
+ const isValidOutput = typeof (res == null ? void 0 : res.text) === "string";
866
+ if (!isValidOutput) {
867
+ throw new InferenceOutputError("Expected {text: string}");
868
+ }
869
+ return res;
870
+ }
871
+ var FAL_AI_SUPPORTED_BLOB_TYPES = ["audio/mpeg", "audio/mp4", "audio/wav", "audio/x-wav"];
872
+ async function buildPayload(args) {
873
+ if (args.provider === "fal-ai") {
874
+ const blob = "data" in args && args.data instanceof Blob ? args.data : "inputs" in args ? args.inputs : void 0;
875
+ const contentType = blob == null ? void 0 : blob.type;
876
+ if (!contentType) {
877
+ throw new Error(
878
+ `Unable to determine the input's content-type. Make sure your are passing a Blob when using provider fal-ai.`
879
+ );
880
+ }
881
+ if (!FAL_AI_SUPPORTED_BLOB_TYPES.includes(contentType)) {
882
+ throw new Error(
883
+ `Provider fal-ai does not support blob type ${contentType} - supported content types are: ${FAL_AI_SUPPORTED_BLOB_TYPES.join(
884
+ ", "
885
+ )}`
886
+ );
887
+ }
888
+ const base64audio = base64FromBytes(new Uint8Array(await blob.arrayBuffer()));
889
+ return {
890
+ ..."data" in args ? omit(args, "data") : omit(args, "inputs"),
891
+ audio_url: `data:${contentType};base64,${base64audio}`
892
+ };
893
+ } else {
894
+ return preparePayload(args);
895
+ }
896
+ }
897
+
898
+ // src/tasks/audio/textToSpeech.ts
899
+ async function textToSpeech(args, options) {
900
+ const payload = args.provider === "replicate" ? {
901
+ ...omit(args, ["inputs", "parameters"]),
902
+ ...args.parameters,
903
+ text: args.inputs
904
+ } : args;
905
+ const res = await request(payload, {
906
+ ...options,
907
+ task: "text-to-speech"
908
+ });
909
+ if (res instanceof Blob) {
910
+ return res;
911
+ }
912
+ if (res && typeof res === "object") {
913
+ if ("output" in res) {
914
+ if (typeof res.output === "string") {
915
+ const urlResponse = await fetch(res.output);
916
+ const blob = await urlResponse.blob();
917
+ return blob;
918
+ } else if (Array.isArray(res.output)) {
919
+ const urlResponse = await fetch(res.output[0]);
920
+ const blob = await urlResponse.blob();
921
+ return blob;
922
+ }
923
+ }
924
+ }
925
+ throw new InferenceOutputError("Expected Blob or object with output");
926
+ }
927
+
928
+ // src/tasks/audio/audioToAudio.ts
929
+ async function audioToAudio(args, options) {
930
+ const payload = preparePayload(args);
931
+ const res = await request(payload, {
932
+ ...options,
933
+ task: "audio-to-audio"
934
+ });
935
+ return validateOutput(res);
936
+ }
937
+ function validateOutput(output) {
938
+ if (!Array.isArray(output)) {
939
+ throw new InferenceOutputError("Expected Array");
940
+ }
941
+ if (!output.every((elem) => {
942
+ return typeof elem === "object" && elem && "label" in elem && typeof elem.label === "string" && "content-type" in elem && typeof elem["content-type"] === "string" && "blob" in elem && typeof elem.blob === "string";
943
+ })) {
944
+ throw new InferenceOutputError("Expected Array<{label: string, audio: Blob}>");
945
+ }
946
+ return output;
947
+ }
948
+
949
+ // src/tasks/cv/utils.ts
950
+ function preparePayload2(args) {
951
+ return "data" in args ? args : { ...omit(args, "inputs"), data: args.inputs };
952
+ }
953
+
954
+ // src/tasks/cv/imageClassification.ts
955
+ async function imageClassification(args, options) {
956
+ const payload = preparePayload2(args);
957
+ const res = await request(payload, {
958
+ ...options,
959
+ task: "image-classification"
960
+ });
961
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
962
+ if (!isValidOutput) {
963
+ throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
964
+ }
965
+ return res;
966
+ }
967
+
968
+ // src/tasks/cv/imageSegmentation.ts
969
+ async function imageSegmentation(args, options) {
970
+ const payload = preparePayload2(args);
971
+ const res = await request(payload, {
972
+ ...options,
973
+ task: "image-segmentation"
974
+ });
975
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.mask === "string" && typeof x.score === "number");
976
+ if (!isValidOutput) {
977
+ throw new InferenceOutputError("Expected Array<{label: string, mask: string, score: number}>");
978
+ }
979
+ return res;
980
+ }
981
+
982
+ // src/tasks/cv/imageToText.ts
983
+ async function imageToText(args, options) {
984
+ var _a;
985
+ const payload = preparePayload2(args);
986
+ const res = (_a = await request(payload, {
987
+ ...options,
988
+ task: "image-to-text"
989
+ })) == null ? void 0 : _a[0];
990
+ if (typeof (res == null ? void 0 : res.generated_text) !== "string") {
991
+ throw new InferenceOutputError("Expected {generated_text: string}");
992
+ }
993
+ return res;
994
+ }
995
+
996
+ // src/tasks/cv/objectDetection.ts
997
+ async function objectDetection(args, options) {
998
+ const payload = preparePayload2(args);
999
+ const res = await request(payload, {
1000
+ ...options,
1001
+ task: "object-detection"
1002
+ });
1003
+ const isValidOutput = Array.isArray(res) && res.every(
1004
+ (x) => typeof x.label === "string" && typeof x.score === "number" && typeof x.box.xmin === "number" && typeof x.box.ymin === "number" && typeof x.box.xmax === "number" && typeof x.box.ymax === "number"
1005
+ );
1006
+ if (!isValidOutput) {
1007
+ throw new InferenceOutputError(
1008
+ "Expected Array<{label:string; score:number; box:{xmin:number; ymin:number; xmax:number; ymax:number}}>"
1009
+ );
1010
+ }
1011
+ return res;
1012
+ }
1013
+
1014
+ // src/utils/delay.ts
1015
+ function delay(ms) {
1016
+ return new Promise((resolve) => {
1017
+ setTimeout(() => resolve(), ms);
1018
+ });
1019
+ }
1020
+
1021
+ // src/tasks/cv/textToImage.ts
1022
+ function getResponseFormatArg(provider) {
1023
+ switch (provider) {
1024
+ case "fal-ai":
1025
+ return { sync_mode: true };
1026
+ case "nebius":
1027
+ return { response_format: "b64_json" };
1028
+ case "replicate":
1029
+ return void 0;
1030
+ case "together":
1031
+ return { response_format: "base64" };
1032
+ default:
1033
+ return void 0;
1034
+ }
1035
+ }
1036
+ async function textToImage(args, options) {
1037
+ const payload = !args.provider || args.provider === "hf-inference" || args.provider === "sambanova" ? args : {
1038
+ ...omit(args, ["inputs", "parameters"]),
1039
+ ...args.parameters,
1040
+ ...getResponseFormatArg(args.provider),
1041
+ prompt: args.inputs
1042
+ };
1043
+ const res = await request(payload, {
1044
+ ...options,
1045
+ task: "text-to-image"
1046
+ });
1047
+ if (res && typeof res === "object") {
1048
+ if (args.provider === "black-forest-labs" && "polling_url" in res && typeof res.polling_url === "string") {
1049
+ return await pollBflResponse(res.polling_url, options == null ? void 0 : options.outputType);
1050
+ }
1051
+ if (args.provider === "fal-ai" && "images" in res && Array.isArray(res.images) && res.images[0].url) {
1052
+ if ((options == null ? void 0 : options.outputType) === "url") {
1053
+ return res.images[0].url;
1054
+ } else {
1055
+ const image = await fetch(res.images[0].url);
1056
+ return await image.blob();
1057
+ }
1058
+ }
1059
+ if (args.provider === "hyperbolic" && "images" in res && Array.isArray(res.images) && res.images[0] && typeof res.images[0].image === "string") {
1060
+ if ((options == null ? void 0 : options.outputType) === "url") {
1061
+ return `data:image/jpeg;base64,${res.images[0].image}`;
1062
+ }
1063
+ const base64Response = await fetch(`data:image/jpeg;base64,${res.images[0].image}`);
1064
+ return await base64Response.blob();
1065
+ }
1066
+ if ("data" in res && Array.isArray(res.data) && res.data[0].b64_json) {
1067
+ const base64Data = res.data[0].b64_json;
1068
+ if ((options == null ? void 0 : options.outputType) === "url") {
1069
+ return `data:image/jpeg;base64,${base64Data}`;
1070
+ }
1071
+ const base64Response = await fetch(`data:image/jpeg;base64,${base64Data}`);
1072
+ return await base64Response.blob();
1073
+ }
1074
+ if ("output" in res && Array.isArray(res.output)) {
1075
+ if ((options == null ? void 0 : options.outputType) === "url") {
1076
+ return res.output[0];
1077
+ }
1078
+ const urlResponse = await fetch(res.output[0]);
1079
+ const blob = await urlResponse.blob();
1080
+ return blob;
1081
+ }
1082
+ }
1083
+ const isValidOutput = res && res instanceof Blob;
1084
+ if (!isValidOutput) {
1085
+ throw new InferenceOutputError("Expected Blob");
1086
+ }
1087
+ if ((options == null ? void 0 : options.outputType) === "url") {
1088
+ const b64 = await res.arrayBuffer().then((buf) => Buffer.from(buf).toString("base64"));
1089
+ return `data:image/jpeg;base64,${b64}`;
1090
+ }
1091
+ return res;
1092
+ }
1093
+ async function pollBflResponse(url, outputType) {
1094
+ const urlObj = new URL(url);
1095
+ for (let step = 0; step < 5; step++) {
1096
+ await delay(1e3);
1097
+ console.debug(`Polling Black Forest Labs API for the result... ${step + 1}/5`);
1098
+ urlObj.searchParams.set("attempt", step.toString(10));
1099
+ const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
1100
+ if (!resp.ok) {
1101
+ throw new InferenceOutputError("Failed to fetch result from black forest labs API");
1102
+ }
1103
+ const payload = await resp.json();
1104
+ if (typeof payload === "object" && payload && "status" in payload && typeof payload.status === "string" && payload.status === "Ready" && "result" in payload && typeof payload.result === "object" && payload.result && "sample" in payload.result && typeof payload.result.sample === "string") {
1105
+ if (outputType === "url") {
1106
+ return payload.result.sample;
1107
+ }
1108
+ const image = await fetch(payload.result.sample);
1109
+ return await image.blob();
1110
+ }
1111
+ }
1112
+ throw new InferenceOutputError("Failed to fetch result from black forest labs API");
1113
+ }
1114
+
1115
+ // src/tasks/cv/imageToImage.ts
1116
+ async function imageToImage(args, options) {
1117
+ let reqArgs;
1118
+ if (!args.parameters) {
1119
+ reqArgs = {
1120
+ accessToken: args.accessToken,
1121
+ model: args.model,
1122
+ data: args.inputs
1123
+ };
1124
+ } else {
1125
+ reqArgs = {
1126
+ ...args,
1127
+ inputs: base64FromBytes(
1128
+ new Uint8Array(args.inputs instanceof ArrayBuffer ? args.inputs : await args.inputs.arrayBuffer())
1129
+ )
1130
+ };
1131
+ }
1132
+ const res = await request(reqArgs, {
1133
+ ...options,
1134
+ task: "image-to-image"
1135
+ });
1136
+ const isValidOutput = res && res instanceof Blob;
1137
+ if (!isValidOutput) {
1138
+ throw new InferenceOutputError("Expected Blob");
1139
+ }
1140
+ return res;
1141
+ }
1142
+
1143
+ // src/tasks/cv/zeroShotImageClassification.ts
1144
+ async function preparePayload3(args) {
1145
+ if (args.inputs instanceof Blob) {
1146
+ return {
1147
+ ...args,
1148
+ inputs: {
1149
+ image: base64FromBytes(new Uint8Array(await args.inputs.arrayBuffer()))
1150
+ }
1151
+ };
1152
+ } else {
1153
+ return {
1154
+ ...args,
1155
+ inputs: {
1156
+ image: base64FromBytes(
1157
+ new Uint8Array(
1158
+ args.inputs.image instanceof ArrayBuffer ? args.inputs.image : await args.inputs.image.arrayBuffer()
1159
+ )
1160
+ )
1161
+ }
1162
+ };
1163
+ }
1164
+ }
1165
+ async function zeroShotImageClassification(args, options) {
1166
+ const payload = await preparePayload3(args);
1167
+ const res = await request(payload, {
1168
+ ...options,
1169
+ task: "zero-shot-image-classification"
1170
+ });
1171
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x.label === "string" && typeof x.score === "number");
1172
+ if (!isValidOutput) {
1173
+ throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
1174
+ }
1175
+ return res;
1176
+ }
1177
+
1178
+ // src/tasks/cv/textToVideo.ts
1179
+ var SUPPORTED_PROVIDERS = ["fal-ai", "novita", "replicate"];
1180
+ async function textToVideo(args, options) {
1181
+ if (!args.provider || !typedInclude(SUPPORTED_PROVIDERS, args.provider)) {
1182
+ throw new Error(
1183
+ `textToVideo inference is only supported for the following providers: ${SUPPORTED_PROVIDERS.join(", ")}`
1184
+ );
1185
+ }
1186
+ const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
1187
+ const res = await request(payload, {
1188
+ ...options,
1189
+ task: "text-to-video"
1190
+ });
1191
+ if (args.provider === "fal-ai") {
1192
+ const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "url" in res.video && typeof res.video.url === "string" && isUrl(res.video.url);
1193
+ if (!isValidOutput) {
1194
+ throw new InferenceOutputError("Expected { video: { url: string } }");
1195
+ }
1196
+ const urlResponse = await fetch(res.video.url);
1197
+ return await urlResponse.blob();
1198
+ } else if (args.provider === "novita") {
1199
+ const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
1200
+ if (!isValidOutput) {
1201
+ throw new InferenceOutputError("Expected { video: { video_url: string } }");
1202
+ }
1203
+ const urlResponse = await fetch(res.video.video_url);
1204
+ return await urlResponse.blob();
1205
+ } else {
1206
+ const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
1207
+ if (!isValidOutput) {
1208
+ throw new InferenceOutputError("Expected { output: string }");
1209
+ }
1210
+ const urlResponse = await fetch(res.output);
1211
+ return await urlResponse.blob();
1212
+ }
1213
+ }
1214
+
1215
+ // src/tasks/nlp/featureExtraction.ts
1216
+ async function featureExtraction(args, options) {
1217
+ const res = await request(args, {
1218
+ ...options,
1219
+ task: "feature-extraction"
1220
+ });
1221
+ let isValidOutput = true;
1222
+ const isNumArrayRec = (arr, maxDepth, curDepth = 0) => {
1223
+ if (curDepth > maxDepth)
1224
+ return false;
1225
+ if (arr.every((x) => Array.isArray(x))) {
1226
+ return arr.every((x) => isNumArrayRec(x, maxDepth, curDepth + 1));
1227
+ } else {
1228
+ return arr.every((x) => typeof x === "number");
1229
+ }
1230
+ };
1231
+ isValidOutput = Array.isArray(res) && isNumArrayRec(res, 3, 0);
1232
+ if (!isValidOutput) {
1233
+ throw new InferenceOutputError("Expected Array<number[][][] | number[][] | number[] | number>");
1234
+ }
1235
+ return res;
1236
+ }
1237
+
1238
+ // src/tasks/nlp/fillMask.ts
1239
+ async function fillMask(args, options) {
1240
+ const res = await request(args, {
1241
+ ...options,
1242
+ task: "fill-mask"
1243
+ });
1244
+ const isValidOutput = Array.isArray(res) && res.every(
1245
+ (x) => typeof x.score === "number" && typeof x.sequence === "string" && typeof x.token === "number" && typeof x.token_str === "string"
1246
+ );
1247
+ if (!isValidOutput) {
1248
+ throw new InferenceOutputError(
1249
+ "Expected Array<{score: number, sequence: string, token: number, token_str: string}>"
1250
+ );
1251
+ }
1252
+ return res;
1253
+ }
1254
+
1255
+ // src/tasks/nlp/questionAnswering.ts
1256
+ async function questionAnswering(args, options) {
1257
+ const res = await request(args, {
1258
+ ...options,
1259
+ task: "question-answering"
1260
+ });
1261
+ const isValidOutput = Array.isArray(res) ? res.every(
1262
+ (elem) => typeof elem === "object" && !!elem && typeof elem.answer === "string" && typeof elem.end === "number" && typeof elem.score === "number" && typeof elem.start === "number"
1263
+ ) : typeof res === "object" && !!res && typeof res.answer === "string" && typeof res.end === "number" && typeof res.score === "number" && typeof res.start === "number";
1264
+ if (!isValidOutput) {
1265
+ throw new InferenceOutputError("Expected Array<{answer: string, end: number, score: number, start: number}>");
1266
+ }
1267
+ return Array.isArray(res) ? res[0] : res;
1268
+ }
1269
+
1270
+ // src/tasks/nlp/sentenceSimilarity.ts
1271
+ async function sentenceSimilarity(args, options) {
1272
+ const res = await request(prepareInput(args), {
1273
+ ...options,
1274
+ task: "sentence-similarity"
1275
+ });
1276
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number");
1277
+ if (!isValidOutput) {
1278
+ throw new InferenceOutputError("Expected number[]");
1279
+ }
1280
+ return res;
1281
+ }
1282
+ function prepareInput(args) {
1283
+ return {
1284
+ ...omit(args, ["inputs", "parameters"]),
1285
+ inputs: { ...omit(args.inputs, "sourceSentence") },
1286
+ parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters }
1287
+ };
1288
+ }
1289
+
1290
+ // src/tasks/nlp/summarization.ts
1291
+ async function summarization(args, options) {
1292
+ const res = await request(args, {
1293
+ ...options,
1294
+ task: "summarization"
1295
+ });
1296
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof (x == null ? void 0 : x.summary_text) === "string");
1297
+ if (!isValidOutput) {
1298
+ throw new InferenceOutputError("Expected Array<{summary_text: string}>");
1299
+ }
1300
+ return res == null ? void 0 : res[0];
1301
+ }
1302
+
1303
+ // src/tasks/nlp/tableQuestionAnswering.ts
1304
+ async function tableQuestionAnswering(args, options) {
1305
+ const res = await request(args, {
1306
+ ...options,
1307
+ task: "table-question-answering"
1308
+ });
1309
+ const isValidOutput = Array.isArray(res) ? res.every((elem) => validate(elem)) : validate(res);
1310
+ if (!isValidOutput) {
1311
+ throw new InferenceOutputError(
1312
+ "Expected {aggregator: string, answer: string, cells: string[], coordinates: number[][]}"
1313
+ );
1314
+ }
1315
+ return Array.isArray(res) ? res[0] : res;
1316
+ }
1317
+ function validate(elem) {
1318
+ return typeof elem === "object" && !!elem && "aggregator" in elem && typeof elem.aggregator === "string" && "answer" in elem && typeof elem.answer === "string" && "cells" in elem && Array.isArray(elem.cells) && elem.cells.every((x) => typeof x === "string") && "coordinates" in elem && Array.isArray(elem.coordinates) && elem.coordinates.every(
1319
+ (coord) => Array.isArray(coord) && coord.every((x) => typeof x === "number")
1320
+ );
1321
+ }
1322
+
1323
+ // src/tasks/nlp/textClassification.ts
1324
+ async function textClassification(args, options) {
1325
+ var _a;
1326
+ const res = (_a = await request(args, {
1327
+ ...options,
1328
+ task: "text-classification"
1329
+ })) == null ? void 0 : _a[0];
1330
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof (x == null ? void 0 : x.label) === "string" && typeof x.score === "number");
1331
+ if (!isValidOutput) {
1332
+ throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
1333
+ }
1334
+ return res;
1335
+ }
1336
+
1337
+ // src/utils/toArray.ts
1338
+ function toArray(obj) {
1339
+ if (Array.isArray(obj)) {
1340
+ return obj;
1341
+ }
1342
+ return [obj];
1343
+ }
1344
+
1345
+ // src/tasks/nlp/textGeneration.ts
1346
+ async function textGeneration(args, options) {
1347
+ if (args.provider === "together") {
1348
+ args.prompt = args.inputs;
1349
+ const raw = await request(args, {
1350
+ ...options,
1351
+ task: "text-generation"
1352
+ });
1353
+ const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw == null ? void 0 : raw.choices) && typeof (raw == null ? void 0 : raw.model) === "string";
1354
+ if (!isValidOutput) {
1355
+ throw new InferenceOutputError("Expected ChatCompletionOutput");
1356
+ }
1357
+ const completion = raw.choices[0];
1358
+ return {
1359
+ generated_text: completion.text
1360
+ };
1361
+ } else if (args.provider === "hyperbolic") {
1362
+ const payload = {
1363
+ messages: [{ content: args.inputs, role: "user" }],
1364
+ ...args.parameters ? {
1365
+ max_tokens: args.parameters.max_new_tokens,
1366
+ ...omit(args.parameters, "max_new_tokens")
1367
+ } : void 0,
1368
+ ...omit(args, ["inputs", "parameters"])
1369
+ };
1370
+ const raw = await request(payload, {
1371
+ ...options,
1372
+ task: "text-generation"
1373
+ });
1374
+ const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw == null ? void 0 : raw.choices) && typeof (raw == null ? void 0 : raw.model) === "string";
1375
+ if (!isValidOutput) {
1376
+ throw new InferenceOutputError("Expected ChatCompletionOutput");
1377
+ }
1378
+ const completion = raw.choices[0];
1379
+ return {
1380
+ generated_text: completion.message.content
1381
+ };
1382
+ } else {
1383
+ const res = toArray(
1384
+ await request(args, {
1385
+ ...options,
1386
+ task: "text-generation"
1387
+ })
1388
+ );
1389
+ const isValidOutput = Array.isArray(res) && res.every((x) => "generated_text" in x && typeof (x == null ? void 0 : x.generated_text) === "string");
1390
+ if (!isValidOutput) {
1391
+ throw new InferenceOutputError("Expected Array<{generated_text: string}>");
1392
+ }
1393
+ return res == null ? void 0 : res[0];
1394
+ }
1395
+ }
1396
+
1397
+ // src/tasks/nlp/textGenerationStream.ts
1398
+ async function* textGenerationStream(args, options) {
1399
+ yield* streamingRequest(args, {
1400
+ ...options,
1401
+ task: "text-generation"
1402
+ });
1403
+ }
1404
+
1405
+ // src/tasks/nlp/tokenClassification.ts
1406
+ async function tokenClassification(args, options) {
1407
+ const res = toArray(
1408
+ await request(args, {
1409
+ ...options,
1410
+ task: "token-classification"
1411
+ })
1412
+ );
1413
+ const isValidOutput = Array.isArray(res) && res.every(
1414
+ (x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
1415
+ );
1416
+ if (!isValidOutput) {
1417
+ throw new InferenceOutputError(
1418
+ "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
1419
+ );
1420
+ }
1421
+ return res;
1422
+ }
1423
+
1424
+ // src/tasks/nlp/translation.ts
1425
+ async function translation(args, options) {
1426
+ const res = await request(args, {
1427
+ ...options,
1428
+ task: "translation"
1429
+ });
1430
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof (x == null ? void 0 : x.translation_text) === "string");
1431
+ if (!isValidOutput) {
1432
+ throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
1433
+ }
1434
+ return (res == null ? void 0 : res.length) === 1 ? res == null ? void 0 : res[0] : res;
1435
+ }
1436
+
1437
+ // src/tasks/nlp/zeroShotClassification.ts
1438
+ async function zeroShotClassification(args, options) {
1439
+ const res = toArray(
1440
+ await request(args, {
1441
+ ...options,
1442
+ task: "zero-shot-classification"
1443
+ })
1444
+ );
1445
+ const isValidOutput = Array.isArray(res) && res.every(
1446
+ (x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
1447
+ );
1448
+ if (!isValidOutput) {
1449
+ throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
1450
+ }
1451
+ return res;
1452
+ }
1453
+
1454
+ // src/tasks/nlp/chatCompletion.ts
1455
+ async function chatCompletion(args, options) {
1456
+ const res = await request(args, {
1457
+ ...options,
1458
+ task: "text-generation",
1459
+ chatCompletion: true
1460
+ });
1461
+ const isValidOutput = typeof res === "object" && Array.isArray(res == null ? void 0 : res.choices) && typeof (res == null ? void 0 : res.created) === "number" && typeof (res == null ? void 0 : res.id) === "string" && typeof (res == null ? void 0 : res.model) === "string" && /// Together.ai and Nebius do not output a system_fingerprint
1462
+ (res.system_fingerprint === void 0 || res.system_fingerprint === null || typeof res.system_fingerprint === "string") && typeof (res == null ? void 0 : res.usage) === "object";
1463
+ if (!isValidOutput) {
1464
+ throw new InferenceOutputError("Expected ChatCompletionOutput");
1465
+ }
1466
+ return res;
1467
+ }
1468
+
1469
+ // src/tasks/nlp/chatCompletionStream.ts
1470
+ async function* chatCompletionStream(args, options) {
1471
+ yield* streamingRequest(args, {
1472
+ ...options,
1473
+ task: "text-generation",
1474
+ chatCompletion: true
1475
+ });
1476
+ }
1477
+
1478
+ // src/tasks/multimodal/documentQuestionAnswering.ts
1479
+ async function documentQuestionAnswering(args, options) {
1480
+ const reqArgs = {
1481
+ ...args,
1482
+ inputs: {
1483
+ question: args.inputs.question,
1484
+ // convert Blob or ArrayBuffer to base64
1485
+ image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
1486
+ }
1487
+ };
1488
+ const res = toArray(
1489
+ await request(reqArgs, {
1490
+ ...options,
1491
+ task: "document-question-answering"
1492
+ })
1493
+ );
1494
+ const isValidOutput = Array.isArray(res) && res.every(
1495
+ (elem) => typeof elem === "object" && !!elem && typeof (elem == null ? void 0 : elem.answer) === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
1496
+ );
1497
+ if (!isValidOutput) {
1498
+ throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
1499
+ }
1500
+ return res[0];
1501
+ }
1502
+
1503
+ // src/tasks/multimodal/visualQuestionAnswering.ts
1504
+ async function visualQuestionAnswering(args, options) {
1505
+ const reqArgs = {
1506
+ ...args,
1507
+ inputs: {
1508
+ question: args.inputs.question,
1509
+ // convert Blob or ArrayBuffer to base64
1510
+ image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
1511
+ }
1512
+ };
1513
+ const res = await request(reqArgs, {
1514
+ ...options,
1515
+ task: "visual-question-answering"
1516
+ });
1517
+ const isValidOutput = Array.isArray(res) && res.every(
1518
+ (elem) => typeof elem === "object" && !!elem && typeof (elem == null ? void 0 : elem.answer) === "string" && typeof elem.score === "number"
1519
+ );
1520
+ if (!isValidOutput) {
1521
+ throw new InferenceOutputError("Expected Array<{answer: string, score: number}>");
1522
+ }
1523
+ return res[0];
1524
+ }
1525
+
1526
+ // src/tasks/tabular/tabularRegression.ts
1527
+ async function tabularRegression(args, options) {
1528
+ const res = await request(args, {
1529
+ ...options,
1530
+ task: "tabular-regression"
1531
+ });
1532
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number");
1533
+ if (!isValidOutput) {
1534
+ throw new InferenceOutputError("Expected number[]");
1535
+ }
1536
+ return res;
1537
+ }
1538
+
1539
+ // src/tasks/tabular/tabularClassification.ts
1540
+ async function tabularClassification(args, options) {
1541
+ const res = await request(args, {
1542
+ ...options,
1543
+ task: "tabular-classification"
1544
+ });
1545
+ const isValidOutput = Array.isArray(res) && res.every((x) => typeof x === "number");
1546
+ if (!isValidOutput) {
1547
+ throw new InferenceOutputError("Expected number[]");
1548
+ }
1549
+ return res;
1550
+ }
1551
+
1552
+ // src/InferenceClient.ts
1553
+ var InferenceClient = class {
1554
+ constructor(accessToken = "", defaultOptions = {}) {
1555
+ __publicField(this, "accessToken");
1556
+ __publicField(this, "defaultOptions");
1557
+ this.accessToken = accessToken;
1558
+ this.defaultOptions = defaultOptions;
1559
+ for (const [name2, fn] of Object.entries(tasks_exports)) {
1560
+ Object.defineProperty(this, name2, {
1561
+ enumerable: false,
1562
+ value: (params, options) => (
1563
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1564
+ fn({ ...params, accessToken }, { ...defaultOptions, ...options })
1565
+ )
1566
+ });
1567
+ }
1568
+ }
1569
+ /**
1570
+ * Returns copy of InferenceClient tied to a specified endpoint.
1571
+ */
1572
+ endpoint(endpointUrl) {
1573
+ return new InferenceClientEndpoint(endpointUrl, this.accessToken, this.defaultOptions);
1574
+ }
1575
+ };
1576
+ var InferenceClientEndpoint = class {
1577
+ constructor(endpointUrl, accessToken = "", defaultOptions = {}) {
1578
+ accessToken;
1579
+ defaultOptions;
1580
+ for (const [name2, fn] of Object.entries(tasks_exports)) {
1581
+ Object.defineProperty(this, name2, {
1582
+ enumerable: false,
1583
+ value: (params, options) => (
1584
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
1585
+ fn({ ...params, accessToken, endpointUrl }, { ...defaultOptions, ...options })
1586
+ )
1587
+ });
1588
+ }
1589
+ }
1590
+ };
1591
+ var HfInference = class extends InferenceClient {
1592
+ };
1593
+
1594
+ // src/types.ts
1595
+ var INFERENCE_PROVIDERS = [
1596
+ "black-forest-labs",
1597
+ "cerebras",
1598
+ "cohere",
1599
+ "fal-ai",
1600
+ "fireworks-ai",
1601
+ "hf-inference",
1602
+ "hyperbolic",
1603
+ "nebius",
1604
+ "novita",
1605
+ "openai",
1606
+ "replicate",
1607
+ "sambanova",
1608
+ "together"
1609
+ ];
1610
+
1611
+ // (disabled):src/snippets/index.js
1612
+ var snippets_exports = {};
1613
+
1614
+
1615
+
1616
+
1617
+
1618
+
1619
+
1620
+
1621
+
1622
+
1623
+
1624
+
1625
+
1626
+
1627
+
1628
+
1629
+
1630
+
1631
+
1632
+
1633
+
1634
+
1635
+
1636
+
1637
+
1638
+
1639
+
1640
+
1641
+
1642
+
1643
+
1644
+
1645
+
1646
+
1647
+
1648
+
1649
+
1650
+
1651
+
1652
+ exports.HfInference = HfInference; exports.INFERENCE_PROVIDERS = INFERENCE_PROVIDERS; exports.InferenceClient = InferenceClient; exports.InferenceClientEndpoint = InferenceClientEndpoint; exports.InferenceOutputError = InferenceOutputError; exports.audioClassification = audioClassification; exports.audioToAudio = audioToAudio; exports.automaticSpeechRecognition = automaticSpeechRecognition; exports.chatCompletion = chatCompletion; exports.chatCompletionStream = chatCompletionStream; exports.documentQuestionAnswering = documentQuestionAnswering; exports.featureExtraction = featureExtraction; exports.fillMask = fillMask; exports.imageClassification = imageClassification; exports.imageSegmentation = imageSegmentation; exports.imageToImage = imageToImage; exports.imageToText = imageToText; exports.objectDetection = objectDetection; exports.questionAnswering = questionAnswering; exports.request = request; exports.sentenceSimilarity = sentenceSimilarity; exports.snippets = snippets_exports; exports.streamingRequest = streamingRequest; exports.summarization = summarization; exports.tableQuestionAnswering = tableQuestionAnswering; exports.tabularClassification = tabularClassification; exports.tabularRegression = tabularRegression; exports.textClassification = textClassification; exports.textGeneration = textGeneration; exports.textGenerationStream = textGenerationStream; exports.textToImage = textToImage; exports.textToSpeech = textToSpeech; exports.textToVideo = textToVideo; exports.tokenClassification = tokenClassification; exports.translation = translation; exports.visualQuestionAnswering = visualQuestionAnswering; exports.zeroShotClassification = zeroShotClassification; exports.zeroShotImageClassification = zeroShotImageClassification;