@huggingface/inference 2.6.4 → 2.6.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -1,12 +1,17 @@
1
- # 🤗 Hugging Face Inference API
1
+ # 🤗 Hugging Face Inference Endpoints
2
2
 
3
- A Typescript powered wrapper for the Hugging Face Inference API. Learn more about the Inference API at [Hugging Face](https://huggingface.co/docs/api-inference/index). It also works with [Inference Endpoints](https://huggingface.co/docs/inference-endpoints/index).
3
+ A Typescript powered wrapper for the Hugging Face Inference Endpoints API. Learn more about Inference Endpoints at [Hugging Face](https://huggingface.co/inference-endpoints).
4
+ It works with both [Inference API (serverless)](https://huggingface.co/docs/api-inference/index) and [Inference Endpoints (dedicated)](https://huggingface.co/docs/inference-endpoints/index).
4
5
 
5
6
  Check out the [full documentation](https://huggingface.co/docs/huggingface.js/inference/README).
6
7
 
7
- You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference) or see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs).
8
+ You can also try out a live [interactive notebook](https://observablehq.com/@huggingface/hello-huggingface-js-inference), see some demos on [hf.co/huggingfacejs](https://huggingface.co/huggingfacejs), or watch a [Scrimba tutorial that explains how Inference Endpoints works](https://scrimba.com/scrim/cod8248f5adfd6e129582c523).
8
9
 
9
- ## Install
10
+ ## Getting Started
11
+
12
+ ### Install
13
+
14
+ #### Node
10
15
 
11
16
  ```console
12
17
  npm install @huggingface/inference
@@ -16,7 +21,7 @@ pnpm add @huggingface/inference
16
21
  yarn add @huggingface/inference
17
22
  ```
18
23
 
19
- ### Deno
24
+ #### Deno
20
25
 
21
26
  ```ts
22
27
  // esm.sh
@@ -25,26 +30,55 @@ import { HfInference } from "https://esm.sh/@huggingface/inference"
25
30
  import { HfInference } from "npm:@huggingface/inference"
26
31
  ```
27
32
 
28
- ## Usage
33
+
34
+ ### Initialize
35
+
36
+ ```typescript
37
+ import { HfInference } from '@huggingface/inference'
38
+
39
+ const hf = new HfInference('your access token')
40
+ ```
29
41
 
30
42
  ❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
31
43
 
32
44
  Your access token should be kept private. If you need to protect it in front-end applications, we suggest setting up a proxy server that stores the access token.
33
45
 
34
- ### Basic examples
35
46
 
36
- ```typescript
37
- import { HfInference } from '@huggingface/inference'
47
+ #### Tree-shaking
38
48
 
39
- const hf = new HfInference('your access token')
49
+ You can import the functions you need directly from the module instead of using the `HfInference` class.
50
+
51
+ ```ts
52
+ import { textGeneration } from "@huggingface/inference";
53
+
54
+ await textGeneration({
55
+ accessToken: "hf_...",
56
+ model: "model_or_endpoint",
57
+ inputs: ...,
58
+ parameters: ...
59
+ })
60
+ ```
61
+
62
+ This will enable tree-shaking by your bundler.
40
63
 
41
- // Natural Language
64
+ ## Natural Language Processing
42
65
 
66
+ ### Fill Mask
67
+
68
+ Tries to fill in a hole with a missing word (token to be precise).
69
+
70
+ ```typescript
43
71
  await hf.fillMask({
44
72
  model: 'bert-base-uncased',
45
73
  inputs: '[MASK] world!'
46
74
  })
75
+ ```
76
+
77
+ ### Summarization
78
+
79
+ Summarizes longer text into shorter text. Be careful, some models have a maximum length of input.
47
80
 
81
+ ```typescript
48
82
  await hf.summarization({
49
83
  model: 'facebook/bart-large-cnn',
50
84
  inputs:
@@ -53,7 +87,13 @@ await hf.summarization({
53
87
  max_length: 100
54
88
  }
55
89
  })
90
+ ```
91
+
92
+ ### Question Answering
56
93
 
94
+ Answers questions based on the context you provide.
95
+
96
+ ```typescript
57
97
  await hf.questionAnswering({
58
98
  model: 'deepset/roberta-base-squad2',
59
99
  inputs: {
@@ -61,7 +101,11 @@ await hf.questionAnswering({
61
101
  context: 'The capital of France is Paris.'
62
102
  }
63
103
  })
104
+ ```
64
105
 
106
+ ### Table Question Answering
107
+
108
+ ```typescript
65
109
  await hf.tableQuestionAnswering({
66
110
  model: 'google/tapas-base-finetuned-wtq',
67
111
  inputs: {
@@ -74,12 +118,26 @@ await hf.tableQuestionAnswering({
74
118
  }
75
119
  }
76
120
  })
121
+ ```
77
122
 
123
+ ### Text Classification
124
+
125
+ Often used for sentiment analysis, this method will assign labels to the given text along with a probability score of that label.
126
+
127
+ ```typescript
78
128
  await hf.textClassification({
79
129
  model: 'distilbert-base-uncased-finetuned-sst-2-english',
80
130
  inputs: 'I like you. I love you.'
81
131
  })
132
+ ```
133
+
134
+ ### Text Generation
135
+
136
+ Generates text from an input prompt.
82
137
 
138
+ [Demo](https://huggingface.co/spaces/huggingfacejs/streaming-text-generation)
139
+
140
+ ```typescript
83
141
  await hf.textGeneration({
84
142
  model: 'gpt2',
85
143
  inputs: 'The answer to the universe is'
@@ -92,17 +150,44 @@ for await (const output of hf.textGenerationStream({
92
150
  })) {
93
151
  console.log(output.token.text, output.generated_text);
94
152
  }
153
+ ```
95
154
 
155
+ ### Token Classification
156
+
157
+ Used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text.
158
+
159
+ ```typescript
96
160
  await hf.tokenClassification({
97
161
  model: 'dbmdz/bert-large-cased-finetuned-conll03-english',
98
162
  inputs: 'My name is Sarah Jessica Parker but you can call me Jessica'
99
163
  })
164
+ ```
165
+
166
+ ### Translation
167
+
168
+ Converts text from one language to another.
100
169
 
170
+ ```typescript
101
171
  await hf.translation({
102
172
  model: 't5-base',
103
173
  inputs: 'My name is Wolfgang and I live in Berlin'
104
174
  })
105
175
 
176
+ await hf.translation({
177
+ model: 'facebook/mbart-large-50-many-to-many-mmt',
178
+ inputs: textToTranslate,
179
+ parameters: {
180
+ "src_lang": "en_XX",
181
+ "tgt_lang": "fr_XX"
182
+ }
183
+ })
184
+ ```
185
+
186
+ ### Zero-Shot Classification
187
+
188
+ Checks how well an input text fits into a set of labels you provide.
189
+
190
+ ```typescript
106
191
  await hf.zeroShotClassification({
107
192
  model: 'facebook/bart-large-mnli',
108
193
  inputs: [
@@ -110,7 +195,13 @@ await hf.zeroShotClassification({
110
195
  ],
111
196
  parameters: { candidate_labels: ['refund', 'legal', 'faq'] }
112
197
  })
198
+ ```
199
+
200
+ ### Conversational
201
+
202
+ This task corresponds to any chatbot-like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long-range dependency or not.
113
203
 
204
+ ```typescript
114
205
  await hf.conversational({
115
206
  model: 'microsoft/DialoGPT-large',
116
207
  inputs: {
@@ -119,7 +210,13 @@ await hf.conversational({
119
210
  text: 'Can you explain why ?'
120
211
  }
121
212
  })
213
+ ```
214
+
215
+ ### Sentence Similarity
122
216
 
217
+ Calculate the semantic similarity between one text and a list of other sentences.
218
+
219
+ ```typescript
123
220
  await hf.sentenceSimilarity({
124
221
  model: 'sentence-transformers/paraphrase-xlm-r-multilingual-v1',
125
222
  inputs: {
@@ -131,51 +228,117 @@ await hf.sentenceSimilarity({
131
228
  ]
132
229
  }
133
230
  })
231
+ ```
134
232
 
135
- await hf.featureExtraction({
136
- model: "sentence-transformers/distilbert-base-nli-mean-tokens",
137
- inputs: "That is a happy person",
138
- });
233
+ ## Audio
234
+
235
+ ### Automatic Speech Recognition
236
+
237
+ Transcribes speech from an audio file.
139
238
 
140
- // Audio
239
+ [Demo](https://huggingface.co/spaces/huggingfacejs/speech-recognition-vue)
141
240
 
241
+ ```typescript
142
242
  await hf.automaticSpeechRecognition({
143
243
  model: 'facebook/wav2vec2-large-960h-lv60-self',
144
244
  data: readFileSync('test/sample1.flac')
145
245
  })
246
+ ```
247
+
248
+ ### Audio Classification
146
249
 
250
+ Assigns labels to the given audio along with a probability score of that label.
251
+
252
+ [Demo](https://huggingface.co/spaces/huggingfacejs/audio-classification-vue)
253
+
254
+ ```typescript
147
255
  await hf.audioClassification({
148
256
  model: 'superb/hubert-large-superb-er',
149
257
  data: readFileSync('test/sample1.flac')
150
258
  })
259
+ ```
260
+
261
+ ### Text To Speech
262
+
263
+ Generates natural-sounding speech from text input.
151
264
 
265
+ [Interactive tutorial](https://scrimba.com/scrim/co8da4d23b49b648f77f4848a?pl=pkVnrP7uP)
266
+
267
+ ```typescript
152
268
  await hf.textToSpeech({
153
269
  model: 'espnet/kan-bayashi_ljspeech_vits',
154
270
  inputs: 'Hello world!'
155
271
  })
272
+ ```
156
273
 
274
+ ### Audio To Audio
275
+
276
+ Outputs one or multiple generated audios from an input audio, commonly used for speech enhancement and source separation.
277
+
278
+ ```typescript
157
279
  await hf.audioToAudio({
158
280
  model: 'speechbrain/sepformer-wham',
159
281
  data: readFileSync('test/sample1.flac')
160
282
  })
283
+ ```
284
+
285
+ ## Computer Vision
286
+
287
+ ### Image Classification
161
288
 
162
- // Computer Vision
289
+ Assigns labels to a given image along with a probability score of that label.
163
290
 
291
+ [Demo](https://huggingface.co/spaces/huggingfacejs/image-classification-vue)
292
+
293
+ ```typescript
164
294
  await hf.imageClassification({
165
295
  data: readFileSync('test/cheetah.png'),
166
296
  model: 'google/vit-base-patch16-224'
167
297
  })
298
+ ```
299
+
300
+ ### Object Detection
301
+
302
+ Detects objects within an image and returns labels with corresponding bounding boxes and probability scores.
303
+
304
+ [Demo](https://huggingface.co/spaces/huggingfacejs/object-detection-vue)
168
305
 
306
+ ```typescript
169
307
  await hf.objectDetection({
170
308
  data: readFileSync('test/cats.png'),
171
309
  model: 'facebook/detr-resnet-50'
172
310
  })
311
+ ```
312
+
313
+ ### Image Segmentation
173
314
 
315
+ Detects segments within an image and returns labels with corresponding bounding boxes and probability scores.
316
+
317
+ ```typescript
174
318
  await hf.imageSegmentation({
175
319
  data: readFileSync('test/cats.png'),
176
320
  model: 'facebook/detr-resnet-50-panoptic'
177
321
  })
322
+ ```
323
+
324
+ ### Image To Text
325
+
326
+ Outputs text from a given image, commonly used for captioning or optical character recognition.
327
+
328
+ ```typescript
329
+ await hf.imageToText({
330
+ data: readFileSync('test/cats.png'),
331
+ model: 'nlpconnect/vit-gpt2-image-captioning'
332
+ })
333
+ ```
334
+
335
+ ### Text To Image
336
+
337
+ Creates an image from a text prompt.
338
+
339
+ [Demo](https://huggingface.co/spaces/huggingfacejs/image-to-text)
178
340
 
341
+ ```typescript
179
342
  await hf.textToImage({
180
343
  inputs: 'award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]',
181
344
  model: 'stabilityai/stable-diffusion-2',
@@ -183,12 +346,15 @@ await hf.textToImage({
183
346
  negative_prompt: 'blurry',
184
347
  }
185
348
  })
349
+ ```
186
350
 
187
- await hf.imageToText({
188
- data: readFileSync('test/cats.png'),
189
- model: 'nlpconnect/vit-gpt2-image-captioning'
190
- })
351
+ ### Image To Image
191
352
 
353
+ Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain.
354
+
355
+ [Interactive tutorial](https://scrimba.com/scrim/co4834bf9a91cc81cfab07969?pl=pkVnrP7uP)
356
+
357
+ ```typescript
192
358
  await hf.imageToImage({
193
359
  inputs: new Blob([readFileSync("test/stormtrooper_depth.png")]),
194
360
  parameters: {
@@ -196,7 +362,13 @@ await hf.imageToImage({
196
362
  },
197
363
  model: "lllyasviel/sd-controlnet-depth",
198
364
  });
365
+ ```
366
+
367
+ ### Zero Shot Image Classification
368
+
369
+ Checks how well an input image fits into a set of labels you provide.
199
370
 
371
+ ```typescript
200
372
  await hf.zeroShotImageClassification({
201
373
  model: 'openai/clip-vit-large-patch14-336',
202
374
  inputs: {
@@ -206,9 +378,28 @@ await hf.zeroShotImageClassification({
206
378
  candidate_labels: ['cat', 'dog']
207
379
  }
208
380
  })
381
+ ```
382
+
383
+ ## Multimodal
384
+
385
+ ### Feature Extraction
386
+
387
+ This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
388
+
389
+ ```typescript
390
+ await hf.featureExtraction({
391
+ model: "sentence-transformers/distilbert-base-nli-mean-tokens",
392
+ inputs: "That is a happy person",
393
+ });
394
+ ```
209
395
 
210
- // Multimodal
396
+ ### Visual Question Answering
211
397
 
398
+ Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.
399
+
400
+ [Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa)
401
+
402
+ ```typescript
212
403
  await hf.visualQuestionAnswering({
213
404
  model: 'dandelin/vilt-b32-finetuned-vqa',
214
405
  inputs: {
@@ -216,7 +407,15 @@ await hf.visualQuestionAnswering({
216
407
  image: await (await fetch('https://placekitten.com/300/300')).blob()
217
408
  }
218
409
  })
410
+ ```
411
+
412
+ ### Document Question Answering
413
+
414
+ Document question answering models take a (document, question) pair as input and return an answer in natural language.
219
415
 
416
+ [Demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa)
417
+
418
+ ```typescript
220
419
  await hf.documentQuestionAnswering({
221
420
  model: 'impira/layoutlm-document-qa',
222
421
  inputs: {
@@ -224,9 +423,15 @@ await hf.documentQuestionAnswering({
224
423
  image: await (await fetch('https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')).blob(),
225
424
  }
226
425
  })
426
+ ```
227
427
 
228
- // Tabular
428
+ ## Tabular
229
429
 
430
+ ### Tabular Regression
431
+
432
+ Tabular regression is the task of predicting a numerical value given a set of attributes.
433
+
434
+ ```typescript
230
435
  await hf.tabularRegression({
231
436
  model: "scikit-learn/Fish-Weight",
232
437
  inputs: {
@@ -240,7 +445,13 @@ await hf.tabularRegression({
240
445
  },
241
446
  },
242
447
  })
448
+ ```
449
+
450
+ ### Tabular Classification
451
+
452
+ Tabular classification is the task of classifying a target category (a group) based on set of attributes.
243
453
 
454
+ ```typescript
244
455
  await hf.tabularClassification({
245
456
  model: "vvmnnnkv/wine-quality",
246
457
  inputs: {
@@ -259,8 +470,13 @@ await hf.tabularClassification({
259
470
  },
260
471
  },
261
472
  })
473
+ ```
474
+
475
+ ## Custom Calls
262
476
 
263
- // Custom call, for models with custom parameters / outputs
477
+ For models with custom parameters / outputs.
478
+
479
+ ```typescript
264
480
  await hf.request({
265
481
  model: 'my-custom-model',
266
482
  inputs: 'hello world',
@@ -279,79 +495,21 @@ for await (const output of hf.streamingRequest({
279
495
  })) {
280
496
  ...
281
497
  }
282
-
283
- // Using your own inference endpoint: https://hf.co/docs/inference-endpoints/
284
- const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
285
- const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});
286
498
  ```
287
499
 
288
- ## Supported Tasks
289
-
290
- ### Natural Language Processing
500
+ ## Custom Inference Endpoints
291
501
 
292
- - [x] Fill mask
293
- - [x] Summarization
294
- - [x] Question answering
295
- - [x] Table question answering
296
- - [x] Text classification
297
- - [x] Text generation - [demo](https://huggingface.co/spaces/huggingfacejs/streaming-text-generation)
298
- - [x] Text2Text generation
299
- - [x] Token classification
300
- - [x] Named entity recognition
301
- - [x] Translation
302
- - [x] Zero-shot classification
303
- - [x] Conversational
304
- - [x] Feature extraction
305
- - [x] Sentence Similarity
502
+ Learn more about using your own inference endpoints [here](https://hf.co/docs/inference-endpoints/)
306
503
 
307
- ### Audio
308
-
309
- - [x] Automatic speech recognition
310
- - [x] Audio classification
311
- - [x] Text to speech
312
- - [x] Audio to audio
313
-
314
- ### Computer Vision
315
-
316
- - [x] Image classification
317
- - [x] Object detection
318
- - [x] Image segmentation
319
- - [x] Text to image
320
- - [x] Image to text - [demo](https://huggingface.co/spaces/huggingfacejs/image-to-text)
321
- - [x] Image to Image
322
- - [x] Zero-shot image classification
323
-
324
- ### Multimodal
325
-
326
- - [x] Document question answering - [demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa)
327
- - [x] Visual question answering - [demo](https://huggingface.co/spaces/huggingfacejs/doc-vis-qa)
328
-
329
- ### Tabular
330
-
331
- - [x] Tabular regression
332
- - [x] Tabular classification
333
-
334
- ## Tree-shaking
335
-
336
- You can import the functions you need directly from the module, rather than using the `HfInference` class:
337
-
338
- ```ts
339
- import {textGeneration} from "@huggingface/inference";
340
-
341
- await textGeneration({
342
- accessToken: "hf_...",
343
- model: "model_or_endpoint",
344
- inputs: ...,
345
- parameters: ...
346
- })
504
+ ```typescript
505
+ const gpt2 = hf.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
506
+ const { generated_text } = await gpt2.textGeneration({inputs: 'The answer to the universe is'});
347
507
  ```
348
508
 
349
- This will enable tree-shaking by your bundler.
350
-
351
509
  ## Running tests
352
510
 
353
511
  ```console
354
- HF_ACCESS_TOKEN="your access token" pnpm run test
512
+ HF_TOKEN="your access token" pnpm run test
355
513
  ```
356
514
 
357
515
  ## Finding appropriate models
@@ -1,9 +1,60 @@
1
1
  /// <reference path="./index.d.ts" />
2
+ "use strict";
2
3
  var __defProp = Object.defineProperty;
4
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
5
+ var __getOwnPropNames = Object.getOwnPropertyNames;
6
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
3
7
  var __export = (target, all) => {
4
8
  for (var name in all)
5
9
  __defProp(target, name, { get: all[name], enumerable: true });
6
10
  };
11
+ var __copyProps = (to, from, except, desc) => {
12
+ if (from && typeof from === "object" || typeof from === "function") {
13
+ for (let key of __getOwnPropNames(from))
14
+ if (!__hasOwnProp.call(to, key) && key !== except)
15
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
16
+ }
17
+ return to;
18
+ };
19
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
20
+
21
+ // src/index.ts
22
+ var src_exports = {};
23
+ __export(src_exports, {
24
+ HfInference: () => HfInference,
25
+ HfInferenceEndpoint: () => HfInferenceEndpoint,
26
+ InferenceOutputError: () => InferenceOutputError,
27
+ audioClassification: () => audioClassification,
28
+ audioToAudio: () => audioToAudio,
29
+ automaticSpeechRecognition: () => automaticSpeechRecognition,
30
+ documentQuestionAnswering: () => documentQuestionAnswering,
31
+ featureExtraction: () => featureExtraction,
32
+ fillMask: () => fillMask,
33
+ imageClassification: () => imageClassification,
34
+ imageSegmentation: () => imageSegmentation,
35
+ imageToImage: () => imageToImage,
36
+ imageToText: () => imageToText,
37
+ objectDetection: () => objectDetection,
38
+ questionAnswering: () => questionAnswering,
39
+ request: () => request,
40
+ sentenceSimilarity: () => sentenceSimilarity,
41
+ streamingRequest: () => streamingRequest,
42
+ summarization: () => summarization,
43
+ tableQuestionAnswering: () => tableQuestionAnswering,
44
+ tabularClassification: () => tabularClassification,
45
+ tabularRegression: () => tabularRegression,
46
+ textClassification: () => textClassification,
47
+ textGeneration: () => textGeneration,
48
+ textGenerationStream: () => textGenerationStream,
49
+ textToImage: () => textToImage,
50
+ textToSpeech: () => textToSpeech,
51
+ tokenClassification: () => tokenClassification,
52
+ translation: () => translation,
53
+ visualQuestionAnswering: () => visualQuestionAnswering,
54
+ zeroShotClassification: () => zeroShotClassification,
55
+ zeroShotImageClassification: () => zeroShotImageClassification
56
+ });
57
+ module.exports = __toCommonJS(src_exports);
7
58
 
8
59
  // src/tasks/index.ts
9
60
  var tasks_exports = {};
@@ -11,7 +62,6 @@ __export(tasks_exports, {
11
62
  audioClassification: () => audioClassification,
12
63
  audioToAudio: () => audioToAudio,
13
64
  automaticSpeechRecognition: () => automaticSpeechRecognition,
14
- conversational: () => conversational,
15
65
  documentQuestionAnswering: () => documentQuestionAnswering,
16
66
  featureExtraction: () => featureExtraction,
17
67
  fillMask: () => fillMask,
@@ -275,7 +325,7 @@ async function* streamingRequest(args, options) {
275
325
  const { url, info } = await makeRequestOptions({ ...args, stream: true }, options);
276
326
  const response = await (options?.fetch ?? fetch)(url, info);
277
327
  if (options?.retry_on_error !== false && response.status === 503 && !options?.wait_for_model) {
278
- return streamingRequest(args, {
328
+ return yield* streamingRequest(args, {
279
329
  ...options,
280
330
  wait_for_model: true
281
331
  });
@@ -533,18 +583,6 @@ async function zeroShotImageClassification(args, options) {
533
583
  return res;
534
584
  }
535
585
 
536
- // src/tasks/nlp/conversational.ts
537
- async function conversational(args, options) {
538
- const res = await request(args, { ...options, taskHint: "conversational" });
539
- const isValidOutput = Array.isArray(res.conversation.generated_responses) && res.conversation.generated_responses.every((x) => typeof x === "string") && Array.isArray(res.conversation.past_user_inputs) && res.conversation.past_user_inputs.every((x) => typeof x === "string") && typeof res.generated_text === "string" && (typeof res.warnings === "undefined" || Array.isArray(res.warnings) && res.warnings.every((x) => typeof x === "string"));
540
- if (!isValidOutput) {
541
- throw new InferenceOutputError(
542
- "Expected {conversation: {generated_responses: string[], past_user_inputs: string[]}, generated_text: string, warnings: string[]}"
543
- );
544
- }
545
- return res;
546
- }
547
-
548
586
  // src/tasks/nlp/featureExtraction.ts
549
587
  async function featureExtraction(args, options) {
550
588
  const defaultTask = args.model ? await getDefaultTask(args.model, args.accessToken, options) : void 0;
@@ -714,7 +752,7 @@ async function translation(args, options) {
714
752
  if (!isValidOutput) {
715
753
  throw new InferenceOutputError("Expected type Array<{translation_text: string}>");
716
754
  }
717
- return res?.[0];
755
+ return res?.length === 1 ? res?.[0] : res;
718
756
  }
719
757
 
720
758
  // src/tasks/nlp/zeroShotClassification.ts
@@ -851,14 +889,14 @@ var HfInferenceEndpoint = class {
851
889
  }
852
890
  }
853
891
  };
854
- export {
892
+ // Annotate the CommonJS export names for ESM import in node:
893
+ 0 && (module.exports = {
855
894
  HfInference,
856
895
  HfInferenceEndpoint,
857
896
  InferenceOutputError,
858
897
  audioClassification,
859
898
  audioToAudio,
860
899
  automaticSpeechRecognition,
861
- conversational,
862
900
  documentQuestionAnswering,
863
901
  featureExtraction,
864
902
  fillMask,
@@ -885,4 +923,4 @@ export {
885
923
  visualQuestionAnswering,
886
924
  zeroShotClassification,
887
925
  zeroShotImageClassification
888
- };
926
+ });