@huggingface/inference 3.6.1 → 3.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/README.md +0 -25
  2. package/dist/index.cjs +256 -154
  3. package/dist/index.js +256 -154
  4. package/dist/src/config.d.ts +1 -0
  5. package/dist/src/config.d.ts.map +1 -1
  6. package/dist/src/lib/makeRequestOptions.d.ts.map +1 -1
  7. package/dist/src/providers/black-forest-labs.d.ts.map +1 -1
  8. package/dist/src/providers/cerebras.d.ts.map +1 -1
  9. package/dist/src/providers/cohere.d.ts.map +1 -1
  10. package/dist/src/providers/fal-ai.d.ts +6 -16
  11. package/dist/src/providers/fal-ai.d.ts.map +1 -1
  12. package/dist/src/providers/fireworks-ai.d.ts.map +1 -1
  13. package/dist/src/providers/hf-inference.d.ts.map +1 -1
  14. package/dist/src/providers/hyperbolic.d.ts.map +1 -1
  15. package/dist/src/providers/nebius.d.ts.map +1 -1
  16. package/dist/src/providers/novita.d.ts.map +1 -1
  17. package/dist/src/providers/openai.d.ts.map +1 -1
  18. package/dist/src/providers/replicate.d.ts.map +1 -1
  19. package/dist/src/providers/sambanova.d.ts.map +1 -1
  20. package/dist/src/providers/together.d.ts.map +1 -1
  21. package/dist/src/tasks/audio/automaticSpeechRecognition.d.ts.map +1 -1
  22. package/dist/src/tasks/custom/request.d.ts +1 -0
  23. package/dist/src/tasks/custom/request.d.ts.map +1 -1
  24. package/dist/src/tasks/custom/streamingRequest.d.ts +1 -0
  25. package/dist/src/tasks/custom/streamingRequest.d.ts.map +1 -1
  26. package/dist/src/tasks/cv/imageToText.d.ts.map +1 -1
  27. package/dist/src/tasks/cv/objectDetection.d.ts +1 -1
  28. package/dist/src/tasks/cv/objectDetection.d.ts.map +1 -1
  29. package/dist/src/tasks/cv/textToVideo.d.ts +1 -1
  30. package/dist/src/tasks/cv/textToVideo.d.ts.map +1 -1
  31. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts +1 -1
  32. package/dist/src/tasks/cv/zeroShotImageClassification.d.ts.map +1 -1
  33. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts +1 -1
  34. package/dist/src/tasks/multimodal/documentQuestionAnswering.d.ts.map +1 -1
  35. package/dist/src/tasks/multimodal/visualQuestionAnswering.d.ts.map +1 -1
  36. package/dist/src/tasks/nlp/chatCompletion.d.ts +1 -1
  37. package/dist/src/tasks/nlp/chatCompletion.d.ts.map +1 -1
  38. package/dist/src/tasks/nlp/chatCompletionStream.d.ts +1 -1
  39. package/dist/src/tasks/nlp/chatCompletionStream.d.ts.map +1 -1
  40. package/dist/src/tasks/nlp/questionAnswering.d.ts.map +1 -1
  41. package/dist/src/tasks/nlp/sentenceSimilarity.d.ts.map +1 -1
  42. package/dist/src/tasks/nlp/textClassification.d.ts.map +1 -1
  43. package/dist/src/tasks/nlp/tokenClassification.d.ts.map +1 -1
  44. package/dist/src/tasks/nlp/zeroShotClassification.d.ts.map +1 -1
  45. package/dist/src/types.d.ts +11 -2
  46. package/dist/src/types.d.ts.map +1 -1
  47. package/dist/src/utils/request.d.ts +27 -0
  48. package/dist/src/utils/request.d.ts.map +1 -0
  49. package/dist/test/InferenceClient.spec.d.ts.map +1 -1
  50. package/package.json +2 -2
  51. package/src/config.ts +1 -0
  52. package/src/lib/makeRequestOptions.ts +8 -3
  53. package/src/providers/black-forest-labs.ts +6 -2
  54. package/src/providers/cerebras.ts +6 -2
  55. package/src/providers/cohere.ts +6 -2
  56. package/src/providers/fal-ai.ts +85 -3
  57. package/src/providers/fireworks-ai.ts +6 -2
  58. package/src/providers/hf-inference.ts +6 -2
  59. package/src/providers/hyperbolic.ts +6 -2
  60. package/src/providers/nebius.ts +6 -2
  61. package/src/providers/novita.ts +5 -2
  62. package/src/providers/openai.ts +6 -2
  63. package/src/providers/replicate.ts +6 -2
  64. package/src/providers/sambanova.ts +6 -2
  65. package/src/providers/together.ts +6 -2
  66. package/src/snippets/templates.exported.ts +1 -1
  67. package/src/tasks/audio/audioClassification.ts +2 -2
  68. package/src/tasks/audio/audioToAudio.ts +2 -2
  69. package/src/tasks/audio/automaticSpeechRecognition.ts +3 -3
  70. package/src/tasks/audio/textToSpeech.ts +2 -2
  71. package/src/tasks/custom/request.ts +7 -32
  72. package/src/tasks/custom/streamingRequest.ts +5 -85
  73. package/src/tasks/cv/imageClassification.ts +2 -2
  74. package/src/tasks/cv/imageSegmentation.ts +2 -2
  75. package/src/tasks/cv/imageToImage.ts +2 -2
  76. package/src/tasks/cv/imageToText.ts +7 -9
  77. package/src/tasks/cv/objectDetection.ts +4 -4
  78. package/src/tasks/cv/textToImage.ts +3 -3
  79. package/src/tasks/cv/textToVideo.ts +23 -36
  80. package/src/tasks/cv/zeroShotImageClassification.ts +4 -5
  81. package/src/tasks/multimodal/documentQuestionAnswering.ts +13 -13
  82. package/src/tasks/multimodal/visualQuestionAnswering.ts +4 -2
  83. package/src/tasks/nlp/chatCompletion.ts +3 -4
  84. package/src/tasks/nlp/chatCompletionStream.ts +3 -3
  85. package/src/tasks/nlp/featureExtraction.ts +2 -2
  86. package/src/tasks/nlp/fillMask.ts +2 -2
  87. package/src/tasks/nlp/questionAnswering.ts +3 -2
  88. package/src/tasks/nlp/sentenceSimilarity.ts +2 -11
  89. package/src/tasks/nlp/summarization.ts +2 -2
  90. package/src/tasks/nlp/tableQuestionAnswering.ts +2 -2
  91. package/src/tasks/nlp/textClassification.ts +8 -9
  92. package/src/tasks/nlp/textGeneration.ts +16 -16
  93. package/src/tasks/nlp/textGenerationStream.ts +2 -2
  94. package/src/tasks/nlp/tokenClassification.ts +9 -10
  95. package/src/tasks/nlp/translation.ts +2 -2
  96. package/src/tasks/nlp/zeroShotClassification.ts +9 -10
  97. package/src/tasks/tabular/tabularClassification.ts +2 -2
  98. package/src/tasks/tabular/tabularRegression.ts +2 -2
  99. package/src/types.ts +13 -2
  100. package/src/utils/request.ts +161 -0
package/dist/index.js CHANGED
@@ -44,9 +44,13 @@ __export(tasks_exports, {
44
44
  // src/config.ts
45
45
  var HF_HUB_URL = "https://huggingface.co";
46
46
  var HF_ROUTER_URL = "https://router.huggingface.co";
47
+ var HF_HEADER_X_BILL_TO = "X-HF-Bill-To";
47
48
 
48
49
  // src/providers/black-forest-labs.ts
49
50
  var BLACK_FOREST_LABS_AI_API_BASE_URL = "https://api.us1.bfl.ai";
51
+ var makeBaseUrl = () => {
52
+ return BLACK_FOREST_LABS_AI_API_BASE_URL;
53
+ };
50
54
  var makeBody = (params) => {
51
55
  return params.args;
52
56
  };
@@ -61,7 +65,7 @@ var makeUrl = (params) => {
61
65
  return `${params.baseUrl}/v1/${params.model}`;
62
66
  };
63
67
  var BLACK_FOREST_LABS_CONFIG = {
64
- baseUrl: BLACK_FOREST_LABS_AI_API_BASE_URL,
68
+ makeBaseUrl,
65
69
  makeBody,
66
70
  makeHeaders,
67
71
  makeUrl
@@ -69,6 +73,9 @@ var BLACK_FOREST_LABS_CONFIG = {
69
73
 
70
74
  // src/providers/cerebras.ts
71
75
  var CEREBRAS_API_BASE_URL = "https://api.cerebras.ai";
76
+ var makeBaseUrl2 = () => {
77
+ return CEREBRAS_API_BASE_URL;
78
+ };
72
79
  var makeBody2 = (params) => {
73
80
  return {
74
81
  ...params.args,
@@ -82,7 +89,7 @@ var makeUrl2 = (params) => {
82
89
  return `${params.baseUrl}/v1/chat/completions`;
83
90
  };
84
91
  var CEREBRAS_CONFIG = {
85
- baseUrl: CEREBRAS_API_BASE_URL,
92
+ makeBaseUrl: makeBaseUrl2,
86
93
  makeBody: makeBody2,
87
94
  makeHeaders: makeHeaders2,
88
95
  makeUrl: makeUrl2
@@ -90,6 +97,9 @@ var CEREBRAS_CONFIG = {
90
97
 
91
98
  // src/providers/cohere.ts
92
99
  var COHERE_API_BASE_URL = "https://api.cohere.com";
100
+ var makeBaseUrl3 = () => {
101
+ return COHERE_API_BASE_URL;
102
+ };
93
103
  var makeBody3 = (params) => {
94
104
  return {
95
105
  ...params.args,
@@ -103,14 +113,40 @@ var makeUrl3 = (params) => {
103
113
  return `${params.baseUrl}/compatibility/v1/chat/completions`;
104
114
  };
105
115
  var COHERE_CONFIG = {
106
- baseUrl: COHERE_API_BASE_URL,
116
+ makeBaseUrl: makeBaseUrl3,
107
117
  makeBody: makeBody3,
108
118
  makeHeaders: makeHeaders3,
109
119
  makeUrl: makeUrl3
110
120
  };
111
121
 
122
+ // src/lib/InferenceOutputError.ts
123
+ var InferenceOutputError = class extends TypeError {
124
+ constructor(message) {
125
+ super(
126
+ `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
127
+ );
128
+ this.name = "InferenceOutputError";
129
+ }
130
+ };
131
+
132
+ // src/lib/isUrl.ts
133
+ function isUrl(modelOrUrl) {
134
+ return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
135
+ }
136
+
137
+ // src/utils/delay.ts
138
+ function delay(ms) {
139
+ return new Promise((resolve) => {
140
+ setTimeout(() => resolve(), ms);
141
+ });
142
+ }
143
+
112
144
  // src/providers/fal-ai.ts
113
145
  var FAL_AI_API_BASE_URL = "https://fal.run";
146
+ var FAL_AI_API_BASE_URL_QUEUE = "https://queue.fal.run";
147
+ var makeBaseUrl4 = (task) => {
148
+ return task === "text-to-video" ? FAL_AI_API_BASE_URL_QUEUE : FAL_AI_API_BASE_URL;
149
+ };
114
150
  var makeBody4 = (params) => {
115
151
  return params.args;
116
152
  };
@@ -120,17 +156,64 @@ var makeHeaders4 = (params) => {
120
156
  };
121
157
  };
122
158
  var makeUrl4 = (params) => {
123
- return `${params.baseUrl}/${params.model}`;
159
+ const baseUrl = `${params.baseUrl}/${params.model}`;
160
+ if (params.authMethod !== "provider-key" && params.task === "text-to-video") {
161
+ return `${baseUrl}?_subdomain=queue`;
162
+ }
163
+ return baseUrl;
124
164
  };
125
165
  var FAL_AI_CONFIG = {
126
- baseUrl: FAL_AI_API_BASE_URL,
166
+ makeBaseUrl: makeBaseUrl4,
127
167
  makeBody: makeBody4,
128
168
  makeHeaders: makeHeaders4,
129
169
  makeUrl: makeUrl4
130
170
  };
171
+ async function pollFalResponse(res, url, headers) {
172
+ const requestId = res.request_id;
173
+ if (!requestId) {
174
+ throw new InferenceOutputError("No request ID found in the response");
175
+ }
176
+ let status = res.status;
177
+ const parsedUrl = new URL(url);
178
+ const baseUrl = `${parsedUrl.protocol}//${parsedUrl.host}${parsedUrl.host === "router.huggingface.co" ? "/fal-ai" : ""}`;
179
+ const modelId = new URL(res.response_url).pathname;
180
+ const queryParams = parsedUrl.search;
181
+ const statusUrl = `${baseUrl}${modelId}/status${queryParams}`;
182
+ const resultUrl = `${baseUrl}${modelId}${queryParams}`;
183
+ while (status !== "COMPLETED") {
184
+ await delay(500);
185
+ const statusResponse = await fetch(statusUrl, { headers });
186
+ if (!statusResponse.ok) {
187
+ throw new InferenceOutputError("Failed to fetch response status from fal-ai API");
188
+ }
189
+ try {
190
+ status = (await statusResponse.json()).status;
191
+ } catch (error) {
192
+ throw new InferenceOutputError("Failed to parse status response from fal-ai API");
193
+ }
194
+ }
195
+ const resultResponse = await fetch(resultUrl, { headers });
196
+ let result;
197
+ try {
198
+ result = await resultResponse.json();
199
+ } catch (error) {
200
+ throw new InferenceOutputError("Failed to parse result response from fal-ai API");
201
+ }
202
+ if (typeof result === "object" && !!result && "video" in result && typeof result.video === "object" && !!result.video && "url" in result.video && typeof result.video.url === "string" && isUrl(result.video.url)) {
203
+ const urlResponse = await fetch(result.video.url);
204
+ return await urlResponse.blob();
205
+ } else {
206
+ throw new InferenceOutputError(
207
+ "Expected { video: { url: string } } result format, got instead: " + JSON.stringify(result)
208
+ );
209
+ }
210
+ }
131
211
 
132
212
  // src/providers/fireworks-ai.ts
133
213
  var FIREWORKS_AI_API_BASE_URL = "https://api.fireworks.ai";
214
+ var makeBaseUrl5 = () => {
215
+ return FIREWORKS_AI_API_BASE_URL;
216
+ };
134
217
  var makeBody5 = (params) => {
135
218
  return {
136
219
  ...params.args,
@@ -147,13 +230,16 @@ var makeUrl5 = (params) => {
147
230
  return `${params.baseUrl}/inference`;
148
231
  };
149
232
  var FIREWORKS_AI_CONFIG = {
150
- baseUrl: FIREWORKS_AI_API_BASE_URL,
233
+ makeBaseUrl: makeBaseUrl5,
151
234
  makeBody: makeBody5,
152
235
  makeHeaders: makeHeaders5,
153
236
  makeUrl: makeUrl5
154
237
  };
155
238
 
156
239
  // src/providers/hf-inference.ts
240
+ var makeBaseUrl6 = () => {
241
+ return `${HF_ROUTER_URL}/hf-inference`;
242
+ };
157
243
  var makeBody6 = (params) => {
158
244
  return {
159
245
  ...params.args,
@@ -173,7 +259,7 @@ var makeUrl6 = (params) => {
173
259
  return `${params.baseUrl}/models/${params.model}`;
174
260
  };
175
261
  var HF_INFERENCE_CONFIG = {
176
- baseUrl: `${HF_ROUTER_URL}/hf-inference`,
262
+ makeBaseUrl: makeBaseUrl6,
177
263
  makeBody: makeBody6,
178
264
  makeHeaders: makeHeaders6,
179
265
  makeUrl: makeUrl6
@@ -181,6 +267,9 @@ var HF_INFERENCE_CONFIG = {
181
267
 
182
268
  // src/providers/hyperbolic.ts
183
269
  var HYPERBOLIC_API_BASE_URL = "https://api.hyperbolic.xyz";
270
+ var makeBaseUrl7 = () => {
271
+ return HYPERBOLIC_API_BASE_URL;
272
+ };
184
273
  var makeBody7 = (params) => {
185
274
  return {
186
275
  ...params.args,
@@ -197,7 +286,7 @@ var makeUrl7 = (params) => {
197
286
  return `${params.baseUrl}/v1/chat/completions`;
198
287
  };
199
288
  var HYPERBOLIC_CONFIG = {
200
- baseUrl: HYPERBOLIC_API_BASE_URL,
289
+ makeBaseUrl: makeBaseUrl7,
201
290
  makeBody: makeBody7,
202
291
  makeHeaders: makeHeaders7,
203
292
  makeUrl: makeUrl7
@@ -205,6 +294,9 @@ var HYPERBOLIC_CONFIG = {
205
294
 
206
295
  // src/providers/nebius.ts
207
296
  var NEBIUS_API_BASE_URL = "https://api.studio.nebius.ai";
297
+ var makeBaseUrl8 = () => {
298
+ return NEBIUS_API_BASE_URL;
299
+ };
208
300
  var makeBody8 = (params) => {
209
301
  return {
210
302
  ...params.args,
@@ -227,7 +319,7 @@ var makeUrl8 = (params) => {
227
319
  return params.baseUrl;
228
320
  };
229
321
  var NEBIUS_CONFIG = {
230
- baseUrl: NEBIUS_API_BASE_URL,
322
+ makeBaseUrl: makeBaseUrl8,
231
323
  makeBody: makeBody8,
232
324
  makeHeaders: makeHeaders8,
233
325
  makeUrl: makeUrl8
@@ -235,6 +327,9 @@ var NEBIUS_CONFIG = {
235
327
 
236
328
  // src/providers/novita.ts
237
329
  var NOVITA_API_BASE_URL = "https://api.novita.ai";
330
+ var makeBaseUrl9 = () => {
331
+ return NOVITA_API_BASE_URL;
332
+ };
238
333
  var makeBody9 = (params) => {
239
334
  return {
240
335
  ...params.args,
@@ -255,7 +350,7 @@ var makeUrl9 = (params) => {
255
350
  return params.baseUrl;
256
351
  };
257
352
  var NOVITA_CONFIG = {
258
- baseUrl: NOVITA_API_BASE_URL,
353
+ makeBaseUrl: makeBaseUrl9,
259
354
  makeBody: makeBody9,
260
355
  makeHeaders: makeHeaders9,
261
356
  makeUrl: makeUrl9
@@ -263,6 +358,9 @@ var NOVITA_CONFIG = {
263
358
 
264
359
  // src/providers/replicate.ts
265
360
  var REPLICATE_API_BASE_URL = "https://api.replicate.com";
361
+ var makeBaseUrl10 = () => {
362
+ return REPLICATE_API_BASE_URL;
363
+ };
266
364
  var makeBody10 = (params) => {
267
365
  return {
268
366
  input: params.args,
@@ -279,7 +377,7 @@ var makeUrl10 = (params) => {
279
377
  return `${params.baseUrl}/v1/models/${params.model}/predictions`;
280
378
  };
281
379
  var REPLICATE_CONFIG = {
282
- baseUrl: REPLICATE_API_BASE_URL,
380
+ makeBaseUrl: makeBaseUrl10,
283
381
  makeBody: makeBody10,
284
382
  makeHeaders: makeHeaders10,
285
383
  makeUrl: makeUrl10
@@ -287,6 +385,9 @@ var REPLICATE_CONFIG = {
287
385
 
288
386
  // src/providers/sambanova.ts
289
387
  var SAMBANOVA_API_BASE_URL = "https://api.sambanova.ai";
388
+ var makeBaseUrl11 = () => {
389
+ return SAMBANOVA_API_BASE_URL;
390
+ };
290
391
  var makeBody11 = (params) => {
291
392
  return {
292
393
  ...params.args,
@@ -303,7 +404,7 @@ var makeUrl11 = (params) => {
303
404
  return params.baseUrl;
304
405
  };
305
406
  var SAMBANOVA_CONFIG = {
306
- baseUrl: SAMBANOVA_API_BASE_URL,
407
+ makeBaseUrl: makeBaseUrl11,
307
408
  makeBody: makeBody11,
308
409
  makeHeaders: makeHeaders11,
309
410
  makeUrl: makeUrl11
@@ -311,6 +412,9 @@ var SAMBANOVA_CONFIG = {
311
412
 
312
413
  // src/providers/together.ts
313
414
  var TOGETHER_API_BASE_URL = "https://api.together.xyz";
415
+ var makeBaseUrl12 = () => {
416
+ return TOGETHER_API_BASE_URL;
417
+ };
314
418
  var makeBody12 = (params) => {
315
419
  return {
316
420
  ...params.args,
@@ -333,7 +437,7 @@ var makeUrl12 = (params) => {
333
437
  return params.baseUrl;
334
438
  };
335
439
  var TOGETHER_CONFIG = {
336
- baseUrl: TOGETHER_API_BASE_URL,
440
+ makeBaseUrl: makeBaseUrl12,
337
441
  makeBody: makeBody12,
338
442
  makeHeaders: makeHeaders12,
339
443
  makeUrl: makeUrl12
@@ -341,6 +445,9 @@ var TOGETHER_CONFIG = {
341
445
 
342
446
  // src/providers/openai.ts
343
447
  var OPENAI_API_BASE_URL = "https://api.openai.com";
448
+ var makeBaseUrl13 = () => {
449
+ return OPENAI_API_BASE_URL;
450
+ };
344
451
  var makeBody13 = (params) => {
345
452
  if (!params.chatCompletion) {
346
453
  throw new Error("OpenAI only supports chat completions.");
@@ -360,21 +467,16 @@ var makeUrl13 = (params) => {
360
467
  return `${params.baseUrl}/v1/chat/completions`;
361
468
  };
362
469
  var OPENAI_CONFIG = {
363
- baseUrl: OPENAI_API_BASE_URL,
470
+ makeBaseUrl: makeBaseUrl13,
364
471
  makeBody: makeBody13,
365
472
  makeHeaders: makeHeaders13,
366
473
  makeUrl: makeUrl13,
367
474
  clientSideRoutingOnly: true
368
475
  };
369
476
 
370
- // src/lib/isUrl.ts
371
- function isUrl(modelOrUrl) {
372
- return /^http(s?):/.test(modelOrUrl) || modelOrUrl.startsWith("/");
373
- }
374
-
375
477
  // package.json
376
478
  var name = "@huggingface/inference";
377
- var version = "3.6.1";
479
+ var version = "3.7.0";
378
480
 
379
481
  // src/providers/consts.ts
380
482
  var HARDCODED_MODEL_ID_MAPPING = {
@@ -496,7 +598,7 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
496
598
  const { accessToken, endpointUrl, provider: maybeProvider, model, ...remainingArgs } = args;
497
599
  const provider = maybeProvider ?? "hf-inference";
498
600
  const providerConfig = providerConfigs[provider];
499
- const { includeCredentials, task, chatCompletion: chatCompletion2, signal } = options ?? {};
601
+ const { includeCredentials, task, chatCompletion: chatCompletion2, signal, billTo } = options ?? {};
500
602
  const authMethod = (() => {
501
603
  if (providerConfig.clientSideRoutingOnly) {
502
604
  if (accessToken && accessToken.startsWith("hf_")) {
@@ -513,7 +615,8 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
513
615
  return "none";
514
616
  })();
515
617
  const url = endpointUrl ? chatCompletion2 ? endpointUrl + `/v1/chat/completions` : endpointUrl : providerConfig.makeUrl({
516
- baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.baseUrl,
618
+ authMethod,
619
+ baseUrl: authMethod !== "provider-key" ? HF_HUB_INFERENCE_PROXY_TEMPLATE.replace("{{PROVIDER}}", provider) : providerConfig.makeBaseUrl(task),
517
620
  model: resolvedModel,
518
621
  chatCompletion: chatCompletion2,
519
622
  task
@@ -523,6 +626,9 @@ function makeRequestOptionsFromResolvedModel(resolvedModel, args, options) {
523
626
  accessToken,
524
627
  authMethod
525
628
  });
629
+ if (billTo) {
630
+ headers[HF_HEADER_X_BILL_TO] = billTo;
631
+ }
526
632
  if (!binary) {
527
633
  headers["Content-Type"] = "application/json";
528
634
  }
@@ -576,37 +682,6 @@ function removeProviderPrefix(model, provider) {
576
682
  return model.slice(provider.length + 1);
577
683
  }
578
684
 
579
- // src/tasks/custom/request.ts
580
- async function request(args, options) {
581
- const { url, info } = await makeRequestOptions(args, options);
582
- const response = await (options?.fetch ?? fetch)(url, info);
583
- if (options?.retry_on_error !== false && response.status === 503) {
584
- return request(args, options);
585
- }
586
- if (!response.ok) {
587
- const contentType = response.headers.get("Content-Type");
588
- if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
589
- const output = await response.json();
590
- if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
591
- throw new Error(
592
- `Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
593
- );
594
- }
595
- if (output.error || output.detail) {
596
- throw new Error(JSON.stringify(output.error ?? output.detail));
597
- } else {
598
- throw new Error(output);
599
- }
600
- }
601
- const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
602
- throw new Error(message ?? "An error occurred while fetching the blob");
603
- }
604
- if (response.headers.get("Content-Type")?.startsWith("application/json")) {
605
- return await response.json();
606
- }
607
- return await response.blob();
608
- }
609
-
610
685
  // src/vendor/fetch-event-source/parse.ts
611
686
  function getLines(onLine) {
612
687
  let buffer;
@@ -706,12 +781,44 @@ function newMessage() {
706
781
  };
707
782
  }
708
783
 
709
- // src/tasks/custom/streamingRequest.ts
710
- async function* streamingRequest(args, options) {
784
+ // src/utils/request.ts
785
+ async function innerRequest(args, options) {
786
+ const { url, info } = await makeRequestOptions(args, options);
787
+ const response = await (options?.fetch ?? fetch)(url, info);
788
+ const requestContext = { url, info };
789
+ if (options?.retry_on_error !== false && response.status === 503) {
790
+ return innerRequest(args, options);
791
+ }
792
+ if (!response.ok) {
793
+ const contentType = response.headers.get("Content-Type");
794
+ if (["application/json", "application/problem+json"].some((ct) => contentType?.startsWith(ct))) {
795
+ const output = await response.json();
796
+ if ([400, 422, 404, 500].includes(response.status) && options?.chatCompletion) {
797
+ throw new Error(
798
+ `Server ${args.model} does not seem to support chat completion. Error: ${JSON.stringify(output.error)}`
799
+ );
800
+ }
801
+ if (output.error || output.detail) {
802
+ throw new Error(JSON.stringify(output.error ?? output.detail));
803
+ } else {
804
+ throw new Error(output);
805
+ }
806
+ }
807
+ const message = contentType?.startsWith("text/plain;") ? await response.text() : void 0;
808
+ throw new Error(message ?? "An error occurred while fetching the blob");
809
+ }
810
+ if (response.headers.get("Content-Type")?.startsWith("application/json")) {
811
+ const data = await response.json();
812
+ return { data, requestContext };
813
+ }
814
+ const blob = await response.blob();
815
+ return { data: blob, requestContext };
816
+ }
817
+ async function* innerStreamingRequest(args, options) {
711
818
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
712
819
  const response = await (options?.fetch ?? fetch)(url, info);
713
820
  if (options?.retry_on_error !== false && response.status === 503) {
714
- return yield* streamingRequest(args, options);
821
+ return yield* innerStreamingRequest(args, options);
715
822
  }
716
823
  if (!response.ok) {
717
824
  if (response.headers.get("Content-Type")?.startsWith("application/json")) {
@@ -725,6 +832,9 @@ async function* streamingRequest(args, options) {
725
832
  if (output.error && "message" in output.error && typeof output.error.message === "string") {
726
833
  throw new Error(output.error.message);
727
834
  }
835
+ if (typeof output.message === "string") {
836
+ throw new Error(output.message);
837
+ }
728
838
  }
729
839
  throw new Error(`Server response contains error: ${response.status}`);
730
840
  }
@@ -777,15 +887,22 @@ async function* streamingRequest(args, options) {
777
887
  }
778
888
  }
779
889
 
780
- // src/lib/InferenceOutputError.ts
781
- var InferenceOutputError = class extends TypeError {
782
- constructor(message) {
783
- super(
784
- `Invalid inference output: ${message}. Use the 'request' method with the same parameters to do a custom call with no type checking.`
785
- );
786
- this.name = "InferenceOutputError";
787
- }
788
- };
890
+ // src/tasks/custom/request.ts
891
+ async function request(args, options) {
892
+ console.warn(
893
+ "The request method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
894
+ );
895
+ const result = await innerRequest(args, options);
896
+ return result.data;
897
+ }
898
+
899
+ // src/tasks/custom/streamingRequest.ts
900
+ async function* streamingRequest(args, options) {
901
+ console.warn(
902
+ "The streamingRequest method is deprecated and will be removed in a future version of huggingface.js. Use specific task functions instead."
903
+ );
904
+ yield* innerStreamingRequest(args, options);
905
+ }
789
906
 
790
907
  // src/utils/pick.ts
791
908
  function pick(o, props) {
@@ -822,7 +939,7 @@ function preparePayload(args) {
822
939
  // src/tasks/audio/audioClassification.ts
823
940
  async function audioClassification(args, options) {
824
941
  const payload = preparePayload(args);
825
- const res = await request(payload, {
942
+ const { data: res } = await innerRequest(payload, {
826
943
  ...options,
827
944
  task: "audio-classification"
828
945
  });
@@ -849,7 +966,7 @@ function base64FromBytes(arr) {
849
966
  // src/tasks/audio/automaticSpeechRecognition.ts
850
967
  async function automaticSpeechRecognition(args, options) {
851
968
  const payload = await buildPayload(args);
852
- const res = await request(payload, {
969
+ const { data: res } = await innerRequest(payload, {
853
970
  ...options,
854
971
  task: "automatic-speech-recognition"
855
972
  });
@@ -893,7 +1010,7 @@ async function textToSpeech(args, options) {
893
1010
  ...args.parameters,
894
1011
  text: args.inputs
895
1012
  } : args;
896
- const res = await request(payload, {
1013
+ const { data: res } = await innerRequest(payload, {
897
1014
  ...options,
898
1015
  task: "text-to-speech"
899
1016
  });
@@ -919,7 +1036,7 @@ async function textToSpeech(args, options) {
919
1036
  // src/tasks/audio/audioToAudio.ts
920
1037
  async function audioToAudio(args, options) {
921
1038
  const payload = preparePayload(args);
922
- const res = await request(payload, {
1039
+ const { data: res } = await innerRequest(payload, {
923
1040
  ...options,
924
1041
  task: "audio-to-audio"
925
1042
  });
@@ -945,7 +1062,7 @@ function preparePayload2(args) {
945
1062
  // src/tasks/cv/imageClassification.ts
946
1063
  async function imageClassification(args, options) {
947
1064
  const payload = preparePayload2(args);
948
- const res = await request(payload, {
1065
+ const { data: res } = await innerRequest(payload, {
949
1066
  ...options,
950
1067
  task: "image-classification"
951
1068
  });
@@ -959,7 +1076,7 @@ async function imageClassification(args, options) {
959
1076
  // src/tasks/cv/imageSegmentation.ts
960
1077
  async function imageSegmentation(args, options) {
961
1078
  const payload = preparePayload2(args);
962
- const res = await request(payload, {
1079
+ const { data: res } = await innerRequest(payload, {
963
1080
  ...options,
964
1081
  task: "image-segmentation"
965
1082
  });
@@ -973,20 +1090,20 @@ async function imageSegmentation(args, options) {
973
1090
  // src/tasks/cv/imageToText.ts
974
1091
  async function imageToText(args, options) {
975
1092
  const payload = preparePayload2(args);
976
- const res = (await request(payload, {
1093
+ const { data: res } = await innerRequest(payload, {
977
1094
  ...options,
978
1095
  task: "image-to-text"
979
- }))?.[0];
980
- if (typeof res?.generated_text !== "string") {
1096
+ });
1097
+ if (typeof res?.[0]?.generated_text !== "string") {
981
1098
  throw new InferenceOutputError("Expected {generated_text: string}");
982
1099
  }
983
- return res;
1100
+ return res?.[0];
984
1101
  }
985
1102
 
986
1103
  // src/tasks/cv/objectDetection.ts
987
1104
  async function objectDetection(args, options) {
988
1105
  const payload = preparePayload2(args);
989
- const res = await request(payload, {
1106
+ const { data: res } = await innerRequest(payload, {
990
1107
  ...options,
991
1108
  task: "object-detection"
992
1109
  });
@@ -1001,13 +1118,6 @@ async function objectDetection(args, options) {
1001
1118
  return res;
1002
1119
  }
1003
1120
 
1004
- // src/utils/delay.ts
1005
- function delay(ms) {
1006
- return new Promise((resolve) => {
1007
- setTimeout(() => resolve(), ms);
1008
- });
1009
- }
1010
-
1011
1121
  // src/tasks/cv/textToImage.ts
1012
1122
  function getResponseFormatArg(provider) {
1013
1123
  switch (provider) {
@@ -1030,7 +1140,7 @@ async function textToImage(args, options) {
1030
1140
  ...getResponseFormatArg(args.provider),
1031
1141
  prompt: args.inputs
1032
1142
  };
1033
- const res = await request(payload, {
1143
+ const { data: res } = await innerRequest(payload, {
1034
1144
  ...options,
1035
1145
  task: "text-to-image"
1036
1146
  });
@@ -1119,7 +1229,7 @@ async function imageToImage(args, options) {
1119
1229
  )
1120
1230
  };
1121
1231
  }
1122
- const res = await request(reqArgs, {
1232
+ const { data: res } = await innerRequest(reqArgs, {
1123
1233
  ...options,
1124
1234
  task: "image-to-image"
1125
1235
  });
@@ -1154,7 +1264,7 @@ async function preparePayload3(args) {
1154
1264
  }
1155
1265
  async function zeroShotImageClassification(args, options) {
1156
1266
  const payload = await preparePayload3(args);
1157
- const res = await request(payload, {
1267
+ const { data: res } = await innerRequest(payload, {
1158
1268
  ...options,
1159
1269
  task: "zero-shot-image-classification"
1160
1270
  });
@@ -1174,37 +1284,36 @@ async function textToVideo(args, options) {
1174
1284
  );
1175
1285
  }
1176
1286
  const payload = args.provider === "fal-ai" || args.provider === "replicate" || args.provider === "novita" ? { ...omit(args, ["inputs", "parameters"]), ...args.parameters, prompt: args.inputs } : args;
1177
- const res = await request(payload, {
1287
+ const { data, requestContext } = await innerRequest(payload, {
1178
1288
  ...options,
1179
1289
  task: "text-to-video"
1180
1290
  });
1181
1291
  if (args.provider === "fal-ai") {
1182
- const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "url" in res.video && typeof res.video.url === "string" && isUrl(res.video.url);
1183
- if (!isValidOutput) {
1184
- throw new InferenceOutputError("Expected { video: { url: string } }");
1185
- }
1186
- const urlResponse = await fetch(res.video.url);
1187
- return await urlResponse.blob();
1292
+ return await pollFalResponse(
1293
+ data,
1294
+ requestContext.url,
1295
+ requestContext.info.headers
1296
+ );
1188
1297
  } else if (args.provider === "novita") {
1189
- const isValidOutput = typeof res === "object" && !!res && "video" in res && typeof res.video === "object" && !!res.video && "video_url" in res.video && typeof res.video.video_url === "string" && isUrl(res.video.video_url);
1298
+ const isValidOutput = typeof data === "object" && !!data && "video" in data && typeof data.video === "object" && !!data.video && "video_url" in data.video && typeof data.video.video_url === "string" && isUrl(data.video.video_url);
1190
1299
  if (!isValidOutput) {
1191
1300
  throw new InferenceOutputError("Expected { video: { video_url: string } }");
1192
1301
  }
1193
- const urlResponse = await fetch(res.video.video_url);
1302
+ const urlResponse = await fetch(data.video.video_url);
1194
1303
  return await urlResponse.blob();
1195
1304
  } else {
1196
- const isValidOutput = typeof res === "object" && !!res && "output" in res && typeof res.output === "string" && isUrl(res.output);
1305
+ const isValidOutput = typeof data === "object" && !!data && "output" in data && typeof data.output === "string" && isUrl(data.output);
1197
1306
  if (!isValidOutput) {
1198
1307
  throw new InferenceOutputError("Expected { output: string }");
1199
1308
  }
1200
- const urlResponse = await fetch(res.output);
1309
+ const urlResponse = await fetch(data.output);
1201
1310
  return await urlResponse.blob();
1202
1311
  }
1203
1312
  }
1204
1313
 
1205
1314
  // src/tasks/nlp/featureExtraction.ts
1206
1315
  async function featureExtraction(args, options) {
1207
- const res = await request(args, {
1316
+ const { data: res } = await innerRequest(args, {
1208
1317
  ...options,
1209
1318
  task: "feature-extraction"
1210
1319
  });
@@ -1227,7 +1336,7 @@ async function featureExtraction(args, options) {
1227
1336
 
1228
1337
  // src/tasks/nlp/fillMask.ts
1229
1338
  async function fillMask(args, options) {
1230
- const res = await request(args, {
1339
+ const { data: res } = await innerRequest(args, {
1231
1340
  ...options,
1232
1341
  task: "fill-mask"
1233
1342
  });
@@ -1244,7 +1353,7 @@ async function fillMask(args, options) {
1244
1353
 
1245
1354
  // src/tasks/nlp/questionAnswering.ts
1246
1355
  async function questionAnswering(args, options) {
1247
- const res = await request(args, {
1356
+ const { data: res } = await innerRequest(args, {
1248
1357
  ...options,
1249
1358
  task: "question-answering"
1250
1359
  });
@@ -1259,7 +1368,7 @@ async function questionAnswering(args, options) {
1259
1368
 
1260
1369
  // src/tasks/nlp/sentenceSimilarity.ts
1261
1370
  async function sentenceSimilarity(args, options) {
1262
- const res = await request(prepareInput(args), {
1371
+ const { data: res } = await innerRequest(args, {
1263
1372
  ...options,
1264
1373
  task: "sentence-similarity"
1265
1374
  });
@@ -1269,17 +1378,10 @@ async function sentenceSimilarity(args, options) {
1269
1378
  }
1270
1379
  return res;
1271
1380
  }
1272
- function prepareInput(args) {
1273
- return {
1274
- ...omit(args, ["inputs", "parameters"]),
1275
- inputs: { ...omit(args.inputs, "sourceSentence") },
1276
- parameters: { source_sentence: args.inputs.sourceSentence, ...args.parameters }
1277
- };
1278
- }
1279
1381
 
1280
1382
  // src/tasks/nlp/summarization.ts
1281
1383
  async function summarization(args, options) {
1282
- const res = await request(args, {
1384
+ const { data: res } = await innerRequest(args, {
1283
1385
  ...options,
1284
1386
  task: "summarization"
1285
1387
  });
@@ -1292,7 +1394,7 @@ async function summarization(args, options) {
1292
1394
 
1293
1395
  // src/tasks/nlp/tableQuestionAnswering.ts
1294
1396
  async function tableQuestionAnswering(args, options) {
1295
- const res = await request(args, {
1397
+ const { data: res } = await innerRequest(args, {
1296
1398
  ...options,
1297
1399
  task: "table-question-answering"
1298
1400
  });
@@ -1312,15 +1414,16 @@ function validate(elem) {
1312
1414
 
1313
1415
  // src/tasks/nlp/textClassification.ts
1314
1416
  async function textClassification(args, options) {
1315
- const res = (await request(args, {
1417
+ const { data: res } = await innerRequest(args, {
1316
1418
  ...options,
1317
1419
  task: "text-classification"
1318
- }))?.[0];
1319
- const isValidOutput = Array.isArray(res) && res.every((x) => typeof x?.label === "string" && typeof x.score === "number");
1420
+ });
1421
+ const output = res?.[0];
1422
+ const isValidOutput = Array.isArray(output) && output.every((x) => typeof x?.label === "string" && typeof x.score === "number");
1320
1423
  if (!isValidOutput) {
1321
1424
  throw new InferenceOutputError("Expected Array<{label: string, score: number}>");
1322
1425
  }
1323
- return res;
1426
+ return output;
1324
1427
  }
1325
1428
 
1326
1429
  // src/utils/toArray.ts
@@ -1335,7 +1438,7 @@ function toArray(obj) {
1335
1438
  async function textGeneration(args, options) {
1336
1439
  if (args.provider === "together") {
1337
1440
  args.prompt = args.inputs;
1338
- const raw = await request(args, {
1441
+ const { data: raw } = await innerRequest(args, {
1339
1442
  ...options,
1340
1443
  task: "text-generation"
1341
1444
  });
@@ -1356,10 +1459,10 @@ async function textGeneration(args, options) {
1356
1459
  } : void 0,
1357
1460
  ...omit(args, ["inputs", "parameters"])
1358
1461
  };
1359
- const raw = await request(payload, {
1462
+ const raw = (await innerRequest(payload, {
1360
1463
  ...options,
1361
1464
  task: "text-generation"
1362
- });
1465
+ })).data;
1363
1466
  const isValidOutput = typeof raw === "object" && "choices" in raw && Array.isArray(raw?.choices) && typeof raw?.model === "string";
1364
1467
  if (!isValidOutput) {
1365
1468
  throw new InferenceOutputError("Expected ChatCompletionOutput");
@@ -1369,23 +1472,22 @@ async function textGeneration(args, options) {
1369
1472
  generated_text: completion.message.content
1370
1473
  };
1371
1474
  } else {
1372
- const res = toArray(
1373
- await request(args, {
1374
- ...options,
1375
- task: "text-generation"
1376
- })
1377
- );
1378
- const isValidOutput = Array.isArray(res) && res.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
1475
+ const { data: res } = await innerRequest(args, {
1476
+ ...options,
1477
+ task: "text-generation"
1478
+ });
1479
+ const output = toArray(res);
1480
+ const isValidOutput = Array.isArray(output) && output.every((x) => "generated_text" in x && typeof x?.generated_text === "string");
1379
1481
  if (!isValidOutput) {
1380
1482
  throw new InferenceOutputError("Expected Array<{generated_text: string}>");
1381
1483
  }
1382
- return res?.[0];
1484
+ return output?.[0];
1383
1485
  }
1384
1486
  }
1385
1487
 
1386
1488
  // src/tasks/nlp/textGenerationStream.ts
1387
1489
  async function* textGenerationStream(args, options) {
1388
- yield* streamingRequest(args, {
1490
+ yield* innerStreamingRequest(args, {
1389
1491
  ...options,
1390
1492
  task: "text-generation"
1391
1493
  });
@@ -1393,13 +1495,12 @@ async function* textGenerationStream(args, options) {
1393
1495
 
1394
1496
  // src/tasks/nlp/tokenClassification.ts
1395
1497
  async function tokenClassification(args, options) {
1396
- const res = toArray(
1397
- await request(args, {
1398
- ...options,
1399
- task: "token-classification"
1400
- })
1401
- );
1402
- const isValidOutput = Array.isArray(res) && res.every(
1498
+ const { data: res } = await innerRequest(args, {
1499
+ ...options,
1500
+ task: "token-classification"
1501
+ });
1502
+ const output = toArray(res);
1503
+ const isValidOutput = Array.isArray(output) && output.every(
1403
1504
  (x) => typeof x.end === "number" && typeof x.entity_group === "string" && typeof x.score === "number" && typeof x.start === "number" && typeof x.word === "string"
1404
1505
  );
1405
1506
  if (!isValidOutput) {
@@ -1407,12 +1508,12 @@ async function tokenClassification(args, options) {
1407
1508
  "Expected Array<{end: number, entity_group: string, score: number, start: number, word: string}>"
1408
1509
  );
1409
1510
  }
1410
- return res;
1511
+ return output;
1411
1512
  }
1412
1513
 
1413
1514
  // src/tasks/nlp/translation.ts
1414
1515
  async function translation(args, options) {
1415
- const res = await request(args, {
1516
+ const { data: res } = await innerRequest(args, {
1416
1517
  ...options,
1417
1518
  task: "translation"
1418
1519
  });
@@ -1425,24 +1526,23 @@ async function translation(args, options) {
1425
1526
 
1426
1527
  // src/tasks/nlp/zeroShotClassification.ts
1427
1528
  async function zeroShotClassification(args, options) {
1428
- const res = toArray(
1429
- await request(args, {
1430
- ...options,
1431
- task: "zero-shot-classification"
1432
- })
1433
- );
1434
- const isValidOutput = Array.isArray(res) && res.every(
1529
+ const { data: res } = await innerRequest(args, {
1530
+ ...options,
1531
+ task: "zero-shot-classification"
1532
+ });
1533
+ const output = toArray(res);
1534
+ const isValidOutput = Array.isArray(output) && output.every(
1435
1535
  (x) => Array.isArray(x.labels) && x.labels.every((_label) => typeof _label === "string") && Array.isArray(x.scores) && x.scores.every((_score) => typeof _score === "number") && typeof x.sequence === "string"
1436
1536
  );
1437
1537
  if (!isValidOutput) {
1438
1538
  throw new InferenceOutputError("Expected Array<{labels: string[], scores: number[], sequence: string}>");
1439
1539
  }
1440
- return res;
1540
+ return output;
1441
1541
  }
1442
1542
 
1443
1543
  // src/tasks/nlp/chatCompletion.ts
1444
1544
  async function chatCompletion(args, options) {
1445
- const res = await request(args, {
1545
+ const { data: res } = await innerRequest(args, {
1446
1546
  ...options,
1447
1547
  task: "text-generation",
1448
1548
  chatCompletion: true
@@ -1457,7 +1557,7 @@ async function chatCompletion(args, options) {
1457
1557
 
1458
1558
  // src/tasks/nlp/chatCompletionStream.ts
1459
1559
  async function* chatCompletionStream(args, options) {
1460
- yield* streamingRequest(args, {
1560
+ yield* innerStreamingRequest(args, {
1461
1561
  ...options,
1462
1562
  task: "text-generation",
1463
1563
  chatCompletion: true
@@ -1474,19 +1574,21 @@ async function documentQuestionAnswering(args, options) {
1474
1574
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
1475
1575
  }
1476
1576
  };
1477
- const res = toArray(
1478
- await request(reqArgs, {
1577
+ const { data: res } = await innerRequest(
1578
+ reqArgs,
1579
+ {
1479
1580
  ...options,
1480
1581
  task: "document-question-answering"
1481
- })
1582
+ }
1482
1583
  );
1483
- const isValidOutput = Array.isArray(res) && res.every(
1584
+ const output = toArray(res);
1585
+ const isValidOutput = Array.isArray(output) && output.every(
1484
1586
  (elem) => typeof elem === "object" && !!elem && typeof elem?.answer === "string" && (typeof elem.end === "number" || typeof elem.end === "undefined") && (typeof elem.score === "number" || typeof elem.score === "undefined") && (typeof elem.start === "number" || typeof elem.start === "undefined")
1485
1587
  );
1486
1588
  if (!isValidOutput) {
1487
1589
  throw new InferenceOutputError("Expected Array<{answer: string, end?: number, score?: number, start?: number}>");
1488
1590
  }
1489
- return res[0];
1591
+ return output[0];
1490
1592
  }
1491
1593
 
1492
1594
  // src/tasks/multimodal/visualQuestionAnswering.ts
@@ -1499,7 +1601,7 @@ async function visualQuestionAnswering(args, options) {
1499
1601
  image: base64FromBytes(new Uint8Array(await args.inputs.image.arrayBuffer()))
1500
1602
  }
1501
1603
  };
1502
- const res = await request(reqArgs, {
1604
+ const { data: res } = await innerRequest(reqArgs, {
1503
1605
  ...options,
1504
1606
  task: "visual-question-answering"
1505
1607
  });
@@ -1514,7 +1616,7 @@ async function visualQuestionAnswering(args, options) {
1514
1616
 
1515
1617
  // src/tasks/tabular/tabularRegression.ts
1516
1618
  async function tabularRegression(args, options) {
1517
- const res = await request(args, {
1619
+ const { data: res } = await innerRequest(args, {
1518
1620
  ...options,
1519
1621
  task: "tabular-regression"
1520
1622
  });
@@ -1527,7 +1629,7 @@ async function tabularRegression(args, options) {
1527
1629
 
1528
1630
  // src/tasks/tabular/tabularClassification.ts
1529
1631
  async function tabularClassification(args, options) {
1530
- const res = await request(args, {
1632
+ const { data: res } = await innerRequest(args, {
1531
1633
  ...options,
1532
1634
  task: "tabular-classification"
1533
1635
  });
@@ -1618,7 +1720,7 @@ var templates = {
1618
1720
  "basicAudio": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "audio/flac"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1619
1721
  "basicImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "image/jpeg"\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});',
1620
1722
  "textToAudio": '{% if model.library_name == "transformers" %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Returns a byte object of the Audio wavform. Use it directly!\n});\n{% else %}\nasync function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n console.log(JSON.stringify(response));\n});\n{% endif %} ',
1621
- "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\nquery({ inputs: {{ providerInputs.asObj.inputs }} }).then((response) => {\n // Use image\n});',
1723
+ "textToImage": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.blob();\n return result;\n}\n\n\nquery({ {{ providerInputs.asTsString }} }).then((response) => {\n // Use image\n});',
1622
1724
  "zeroShotClassification": 'async function query(data) {\n const response = await fetch(\n "{{ fullUrl }}",\n {\n headers: {\n Authorization: "{{ authorizationHeader }}",\n "Content-Type": "application/json",\n },\n method: "POST",\n body: JSON.stringify(data),\n }\n );\n const result = await response.json();\n return result;\n}\n\nquery({\n inputs: {{ providerInputs.asObj.inputs }},\n parameters: { candidate_labels: ["refund", "legal", "faq"] }\n}).then((response) => {\n console.log(JSON.stringify(response));\n});'
1623
1725
  },
1624
1726
  "huggingface.js": {