@huggingface/inference 2.3.0 → 2.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -16,6 +16,13 @@ yarn add @huggingface/inference
16
16
  pnpm add @huggingface/inference
17
17
  ```
18
18
 
19
+ ### Deno
20
+
21
+ ```
22
+ // This should automatically load the types located at https://www.unpkg.com/@huggingface/inference@2.3.2/dist/index.d.ts
23
+ import { HfInference } from "https://www.unpkg.com/@huggingface/inference@2.3.2/dist/index.mjs"
24
+ ```
25
+
19
26
  ## Usage
20
27
 
21
28
  ❗**Important note:** Using an access token is optional to get started, however you will be rate limited eventually. Join [Hugging Face](https://huggingface.co/join) and then visit [access tokens](https://huggingface.co/settings/tokens) to generate your access token for **free**.
@@ -0,0 +1,1260 @@
1
+ export interface Options {
2
+ /**
3
+ * (Default: true) Boolean. If a request 503s and wait_for_model is set to false, the request will be retried with the same parameters but with wait_for_model set to true.
4
+ */
5
+ retry_on_error?: boolean;
6
+ /**
7
+ * (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being used resulting in a real new query.
8
+ */
9
+ use_cache?: boolean;
10
+ /**
11
+ * (Default: false). Boolean. Do not load the model if it's not already available.
12
+ */
13
+ dont_load_model?: boolean;
14
+ /**
15
+ * (Default: false). Boolean to use GPU instead of CPU for inference (requires Startup plan at least).
16
+ */
17
+ use_gpu?: boolean;
18
+
19
+ /**
20
+ * (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests required to get your inference done. It is advised to only set this flag to true after receiving a 503 error as it will limit hanging in your application to known places.
21
+ */
22
+ wait_for_model?: boolean;
23
+ /**
24
+ * Custom fetch function to use instead of the default one, for example to use a proxy or edit headers.
25
+ */
26
+ fetch?: typeof fetch;
27
+ }
28
+
29
+ export interface BaseArgs {
30
+ /**
31
+ * The access token to use. Without it, you'll get rate-limited quickly.
32
+ *
33
+ * Can be created for free in hf.co/settings/token
34
+ */
35
+ accessToken?: string;
36
+ /**
37
+ * The model to use. Can be a full URL for HF inference endpoints.
38
+ */
39
+ model: string;
40
+ }
41
+
42
+ export type RequestArgs = BaseArgs &
43
+ ({ data: Blob | ArrayBuffer } | { inputs: unknown }) & {
44
+ parameters?: Record<string, unknown>;
45
+ accessToken?: string;
46
+ };
47
+
48
+ export class InferenceOutputError extends TypeError {}
49
+ export type AudioClassificationArgs = BaseArgs & {
50
+ /**
51
+ * Binary audio data
52
+ */
53
+ data: Blob | ArrayBuffer;
54
+ };
55
+ export type AudioClassificationReturn = AudioClassificationOutputValue[];
56
+ export interface AudioClassificationOutputValue {
57
+ /**
58
+ * The label for the class (model specific)
59
+ */
60
+ label: string;
61
+
62
+ /**
63
+ * A float that represents how likely it is that the audio file belongs to this class.
64
+ */
65
+ score: number;
66
+ }
67
+ /**
68
+ * This task reads some audio input and outputs the likelihood of classes.
69
+ * Recommended model: superb/hubert-large-superb-er
70
+ */
71
+ export function audioClassification(
72
+ args: AudioClassificationArgs,
73
+ options?: Options
74
+ ): Promise<AudioClassificationReturn>;
75
+ export type AutomaticSpeechRecognitionArgs = BaseArgs & {
76
+ /**
77
+ * Binary audio data
78
+ */
79
+ data: Blob | ArrayBuffer;
80
+ };
81
+ export interface AutomaticSpeechRecognitionOutput {
82
+ /**
83
+ * The text that was recognized from the audio
84
+ */
85
+ text: string;
86
+ }
87
+ /**
88
+ * This task reads some audio input and outputs the said words within the audio files.
89
+ * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
90
+ */
91
+ export function automaticSpeechRecognition(
92
+ args: AutomaticSpeechRecognitionArgs,
93
+ options?: Options
94
+ ): Promise<AutomaticSpeechRecognitionOutput>;
95
+ export type TextToSpeechArgs = BaseArgs & {
96
+ /**
97
+ * The text to generate an audio from
98
+ */
99
+ inputs: string;
100
+ };
101
+ export type TextToSpeechOutput = Blob;
102
+ /**
103
+ * This task synthesize an audio of a voice pronouncing a given text.
104
+ * Recommended model: espnet/kan-bayashi_ljspeech_vits
105
+ */
106
+ export function textToSpeech(args: TextToSpeechArgs, options?: Options): Promise<TextToSpeechOutput>;
107
+ /**
108
+ * Primitive to make custom calls to the inference API
109
+ */
110
+ export function request<T>(
111
+ args: RequestArgs,
112
+ options?: Options & {
113
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
114
+ includeCredentials?: boolean;
115
+ }
116
+ ): Promise<T>;
117
+ /**
118
+ * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
119
+ */
120
+ export function streamingRequest<T>(
121
+ args: RequestArgs,
122
+ options?: Options & {
123
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
124
+ includeCredentials?: boolean;
125
+ }
126
+ ): AsyncGenerator<T>;
127
+ export type ImageClassificationArgs = BaseArgs & {
128
+ /**
129
+ * Binary image data
130
+ */
131
+ data: Blob | ArrayBuffer;
132
+ };
133
+ export type ImageClassificationOutput = ImageClassificationOutputValue[];
134
+ export interface ImageClassificationOutputValue {
135
+ /**
136
+ * A float that represents how likely it is that the image file belongs to this class.
137
+ */
138
+ label: string;
139
+ /**
140
+ * The label for the class (model specific)
141
+ */
142
+ score: number;
143
+ }
144
+ /**
145
+ * This task reads some image input and outputs the likelihood of classes.
146
+ * Recommended model: google/vit-base-patch16-224
147
+ */
148
+ export function imageClassification(
149
+ args: ImageClassificationArgs,
150
+ options?: Options
151
+ ): Promise<ImageClassificationOutput>;
152
+ export type ImageSegmentationArgs = BaseArgs & {
153
+ /**
154
+ * Binary image data
155
+ */
156
+ data: Blob | ArrayBuffer;
157
+ };
158
+ export type ImageSegmentationOutput = ImageSegmentationOutputValue[];
159
+ export interface ImageSegmentationOutputValue {
160
+ /**
161
+ * The label for the class (model specific) of a segment.
162
+ */
163
+ label: string;
164
+ /**
165
+ * A str (base64 str of a single channel black-and-white img) representing the mask of a segment.
166
+ */
167
+ mask: string;
168
+ /**
169
+ * A float that represents how likely it is that the detected object belongs to the given class.
170
+ */
171
+ score: number;
172
+ }
173
+ /**
174
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
175
+ * Recommended model: facebook/detr-resnet-50-panoptic
176
+ */
177
+ export function imageSegmentation(
178
+ args: ImageSegmentationArgs,
179
+ options?: Options
180
+ ): Promise<ImageSegmentationOutput>;
181
+ export type ImageToImageArgs = BaseArgs & {
182
+ /**
183
+ * The initial image condition
184
+ *
185
+ **/
186
+ inputs: Blob | ArrayBuffer;
187
+
188
+ parameters?: {
189
+ /**
190
+ * The text prompt to guide the image generation.
191
+ */
192
+ prompt?: string;
193
+ /**
194
+ * strengh param only works for SD img2img and alt diffusion img2img models
195
+ * Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
196
+ * will be used as a starting point, adding more noise to it the larger the `strength`. The number of
197
+ * denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
198
+ * be maximum and the denoising process will run for the full number of iterations specified in
199
+ * `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
200
+ **/
201
+ strength?: number;
202
+ /**
203
+ * An optional negative prompt for the image generation
204
+ */
205
+ negative_prompt?: string;
206
+ /**
207
+ * The height in pixels of the generated image
208
+ */
209
+ height?: number;
210
+ /**
211
+ * The width in pixels of the generated image
212
+ */
213
+ width?: number;
214
+ /**
215
+ * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
216
+ */
217
+ num_inference_steps?: number;
218
+ /**
219
+ * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
220
+ */
221
+ guidance_scale?: number;
222
+ /**
223
+ * guess_mode only works for ControlNet models, defaults to False In this mode, the ControlNet encoder will try best to recognize the content of the input image even if
224
+ * you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.
225
+ */
226
+ guess_mode?: boolean;
227
+ };
228
+ };
229
+ export type ImageToImageOutput = Blob;
230
+ /**
231
+ * This task reads some text input and outputs an image.
232
+ * Recommended model: lllyasviel/sd-controlnet-depth
233
+ */
234
+ export function imageToImage(args: ImageToImageArgs, options?: Options): Promise<ImageToImageOutput>;
235
+ export type ImageToTextArgs = BaseArgs & {
236
+ /**
237
+ * Binary image data
238
+ */
239
+ data: Blob | ArrayBuffer;
240
+ };
241
+ export interface ImageToTextOutput {
242
+ /**
243
+ * The generated caption
244
+ */
245
+ generated_text: string;
246
+ }
247
+ /**
248
+ * This task reads some image input and outputs the text caption.
249
+ */
250
+ export function imageToText(args: ImageToTextArgs, options?: Options): Promise<ImageToTextOutput>;
251
+ export type ObjectDetectionArgs = BaseArgs & {
252
+ /**
253
+ * Binary image data
254
+ */
255
+ data: Blob | ArrayBuffer;
256
+ };
257
+ export type ObjectDetectionOutput = ObjectDetectionOutputValue[];
258
+ export interface ObjectDetectionOutputValue {
259
+ /**
260
+ * A dict (with keys [xmin,ymin,xmax,ymax]) representing the bounding box of a detected object.
261
+ */
262
+ box: {
263
+ xmax: number;
264
+ xmin: number;
265
+ ymax: number;
266
+ ymin: number;
267
+ };
268
+ /**
269
+ * The label for the class (model specific) of a detected object.
270
+ */
271
+ label: string;
272
+
273
+ /**
274
+ * A float that represents how likely it is that the detected object belongs to the given class.
275
+ */
276
+ score: number;
277
+ }
278
+ /**
279
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
280
+ * Recommended model: facebook/detr-resnet-50
281
+ */
282
+ export function objectDetection(args: ObjectDetectionArgs, options?: Options): Promise<ObjectDetectionOutput>;
283
+ export type TextToImageArgs = BaseArgs & {
284
+ /**
285
+ * The text to generate an image from
286
+ */
287
+ inputs: string;
288
+
289
+ parameters?: {
290
+ /**
291
+ * An optional negative prompt for the image generation
292
+ */
293
+ negative_prompt?: string;
294
+ /**
295
+ * The height in pixels of the generated image
296
+ */
297
+ height?: number;
298
+ /**
299
+ * The width in pixels of the generated image
300
+ */
301
+ width?: number;
302
+ /**
303
+ * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
304
+ */
305
+ num_inference_steps?: number;
306
+ /**
307
+ * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
308
+ */
309
+ guidance_scale?: number;
310
+ };
311
+ };
312
+ export type TextToImageOutput = Blob;
313
+ /**
314
+ * This task reads some text input and outputs an image.
315
+ * Recommended model: stabilityai/stable-diffusion-2
316
+ */
317
+ export function textToImage(args: TextToImageArgs, options?: Options): Promise<TextToImageOutput>;
318
+ export type DocumentQuestionAnsweringArgs = BaseArgs & {
319
+ inputs: {
320
+ /**
321
+ * Raw image
322
+ *
323
+ * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()`
324
+ **/
325
+ image: Blob | ArrayBuffer;
326
+ question: string;
327
+ };
328
+ };
329
+ export interface DocumentQuestionAnsweringOutput {
330
+ /**
331
+ * A string that’s the answer within the document.
332
+ */
333
+ answer: string;
334
+ /**
335
+ * ?
336
+ */
337
+ end?: number;
338
+ /**
339
+ * A float that represents how likely that the answer is correct
340
+ */
341
+ score?: number;
342
+ /**
343
+ * ?
344
+ */
345
+ start?: number;
346
+ }
347
+ /**
348
+ * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa.
349
+ */
350
+ export function documentQuestionAnswering(
351
+ args: DocumentQuestionAnsweringArgs,
352
+ options?: Options
353
+ ): Promise<DocumentQuestionAnsweringOutput>;
354
+ export type VisualQuestionAnsweringArgs = BaseArgs & {
355
+ inputs: {
356
+ /**
357
+ * Raw image
358
+ *
359
+ * You can use native `File` in browsers, or `new Blob([buffer])` in node, or for a base64 image `new Blob([btoa(base64String)])`, or even `await (await fetch('...)).blob()`
360
+ **/
361
+ image: Blob | ArrayBuffer;
362
+ question: string;
363
+ };
364
+ };
365
+ export interface VisualQuestionAnsweringOutput {
366
+ /**
367
+ * A string that’s the answer to a visual question.
368
+ */
369
+ answer: string;
370
+ /**
371
+ * Answer correctness score.
372
+ */
373
+ score: number;
374
+ }
375
+ /**
376
+ * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa.
377
+ */
378
+ export function visualQuestionAnswering(
379
+ args: VisualQuestionAnsweringArgs,
380
+ options?: Options
381
+ ): Promise<VisualQuestionAnsweringOutput>;
382
+ export type ConversationalArgs = BaseArgs & {
383
+ inputs: {
384
+ /**
385
+ * A list of strings corresponding to the earlier replies from the model.
386
+ */
387
+ generated_responses?: string[];
388
+ /**
389
+ * A list of strings corresponding to the earlier replies from the user. Should be of the same length of generated_responses.
390
+ */
391
+ past_user_inputs?: string[];
392
+ /**
393
+ * The last input from the user in the conversation.
394
+ */
395
+ text: string;
396
+ };
397
+ parameters?: {
398
+ /**
399
+ * (Default: None). Integer to define the maximum length in tokens of the output summary.
400
+ */
401
+ max_length?: number;
402
+ /**
403
+ * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
404
+ */
405
+ max_time?: number;
406
+ /**
407
+ * (Default: None). Integer to define the minimum length in tokens of the output summary.
408
+ */
409
+ min_length?: number;
410
+ /**
411
+ * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
412
+ */
413
+ repetition_penalty?: number;
414
+ /**
415
+ * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
416
+ */
417
+ temperature?: number;
418
+ /**
419
+ * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
420
+ */
421
+ top_k?: number;
422
+ /**
423
+ * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
424
+ */
425
+ top_p?: number;
426
+ };
427
+ };
428
+ export interface ConversationalOutput {
429
+ conversation: {
430
+ generated_responses: string[];
431
+ past_user_inputs: string[];
432
+ };
433
+ generated_text: string;
434
+ warnings: string[];
435
+ }
436
+ /**
437
+ * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
438
+ *
439
+ */
440
+ export function conversational(args: ConversationalArgs, options?: Options): Promise<ConversationalOutput>;
441
+ export type FeatureExtractionArgs = BaseArgs & {
442
+ /**
443
+ * The inputs is a string or a list of strings to get the features from.
444
+ *
445
+ * inputs: "That is a happy person",
446
+ *
447
+ */
448
+ inputs: string | string[];
449
+ };
450
+ /**
451
+ * Returned values are a list of floats, or a list of list of floats (depending on if you sent a string or a list of string, and if the automatic reduction, usually mean_pooling for instance was applied for you or not. This should be explained on the model's README.
452
+ */
453
+ export type FeatureExtractionOutput = (number | number[])[];
454
+ /**
455
+ * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
456
+ */
457
+ export function featureExtraction(
458
+ args: FeatureExtractionArgs,
459
+ options?: Options
460
+ ): Promise<FeatureExtractionOutput>;
461
+ export type FillMaskArgs = BaseArgs & {
462
+ inputs: string;
463
+ };
464
+ export type FillMaskOutput = {
465
+ /**
466
+ * The probability for this token.
467
+ */
468
+ score: number;
469
+ /**
470
+ * The actual sequence of tokens that ran against the model (may contain special tokens)
471
+ */
472
+ sequence: string;
473
+ /**
474
+ * The id of the token
475
+ */
476
+ token: number;
477
+ /**
478
+ * The string representation of the token
479
+ */
480
+ token_str: string;
481
+ }[];
482
+ /**
483
+ * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
484
+ */
485
+ export function fillMask(args: FillMaskArgs, options?: Options): Promise<FillMaskOutput>;
486
+ export type QuestionAnsweringArgs = BaseArgs & {
487
+ inputs: {
488
+ context: string;
489
+ question: string;
490
+ };
491
+ };
492
+ export interface QuestionAnsweringOutput {
493
+ /**
494
+ * A string that’s the answer within the text.
495
+ */
496
+ answer: string;
497
+ /**
498
+ * The index (string wise) of the stop of the answer within context.
499
+ */
500
+ end: number;
501
+ /**
502
+ * A float that represents how likely that the answer is correct
503
+ */
504
+ score: number;
505
+ /**
506
+ * The index (string wise) of the start of the answer within context.
507
+ */
508
+ start: number;
509
+ }
510
+ /**
511
+ * Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2
512
+ */
513
+ export function questionAnswering(
514
+ args: QuestionAnsweringArgs,
515
+ options?: Options
516
+ ): Promise<QuestionAnsweringOutput>;
517
+ export type SentenceSimilarityArgs = BaseArgs & {
518
+ /**
519
+ * The inputs vary based on the model.
520
+ *
521
+ * For example when using sentence-transformers/paraphrase-xlm-r-multilingual-v1 the inputs will have a `source_sentence` string and
522
+ * a `sentences` array of strings
523
+ */
524
+ inputs: Record<string, unknown> | Record<string, unknown>[];
525
+ };
526
+ /**
527
+ * Returned values are a list of floats
528
+ */
529
+ export type SentenceSimilarityOutput = number[];
530
+ /**
531
+ * Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings.
532
+ */
533
+ export function sentenceSimilarity(
534
+ args: SentenceSimilarityArgs,
535
+ options?: Options
536
+ ): Promise<SentenceSimilarityOutput>;
537
+ export type SummarizationArgs = BaseArgs & {
538
+ /**
539
+ * A string to be summarized
540
+ */
541
+ inputs: string;
542
+ parameters?: {
543
+ /**
544
+ * (Default: None). Integer to define the maximum length in tokens of the output summary.
545
+ */
546
+ max_length?: number;
547
+ /**
548
+ * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit.
549
+ */
550
+ max_time?: number;
551
+ /**
552
+ * (Default: None). Integer to define the minimum length in tokens of the output summary.
553
+ */
554
+ min_length?: number;
555
+ /**
556
+ * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
557
+ */
558
+ repetition_penalty?: number;
559
+ /**
560
+ * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
561
+ */
562
+ temperature?: number;
563
+ /**
564
+ * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
565
+ */
566
+ top_k?: number;
567
+ /**
568
+ * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
569
+ */
570
+ top_p?: number;
571
+ };
572
+ };
573
+ export interface SummarizationOutput {
574
+ /**
575
+ * The string after translation
576
+ */
577
+ summary_text: string;
578
+ }
579
+ /**
580
+ * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
581
+ */
582
+ export function summarization(args: SummarizationArgs, options?: Options): Promise<SummarizationOutput>;
583
+ export type TableQuestionAnsweringArgs = BaseArgs & {
584
+ inputs: {
585
+ /**
586
+ * The query in plain text that you want to ask the table
587
+ */
588
+ query: string;
589
+ /**
590
+ * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
591
+ */
592
+ table: Record<string, string[]>;
593
+ };
594
+ };
595
+ export interface TableQuestionAnsweringOutput {
596
+ /**
597
+ * The aggregator used to get the answer
598
+ */
599
+ aggregator: string;
600
+ /**
601
+ * The plaintext answer
602
+ */
603
+ answer: string;
604
+ /**
605
+ * A list of coordinates of the cells contents
606
+ */
607
+ cells: string[];
608
+ /**
609
+ * a list of coordinates of the cells referenced in the answer
610
+ */
611
+ coordinates: number[][];
612
+ }
613
+ /**
614
+ * Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq.
615
+ */
616
+ export function tableQuestionAnswering(
617
+ args: TableQuestionAnsweringArgs,
618
+ options?: Options
619
+ ): Promise<TableQuestionAnsweringOutput>;
620
+ export type TextClassificationArgs = BaseArgs & {
621
+ /**
622
+ * A string to be classified
623
+ */
624
+ inputs: string;
625
+ };
626
+ export type TextClassificationOutput = {
627
+ /**
628
+ * The label for the class (model specific)
629
+ */
630
+ label: string;
631
+ /**
632
+ * A floats that represents how likely is that the text belongs to this class.
633
+ */
634
+ score: number;
635
+ }[];
636
+ /**
637
+ * Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english
638
+ */
639
+ export function textClassification(
640
+ args: TextClassificationArgs,
641
+ options?: Options
642
+ ): Promise<TextClassificationOutput>;
643
+ export type TextGenerationArgs = BaseArgs & {
644
+ /**
645
+ * A string to be generated from
646
+ */
647
+ inputs: string;
648
+ parameters?: {
649
+ /**
650
+ * (Optional: True). Bool. Whether or not to use sampling, use greedy decoding otherwise.
651
+ */
652
+ do_sample?: boolean;
653
+ /**
654
+ * (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated.
655
+ */
656
+ max_new_tokens?: number;
657
+ /**
658
+ * (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. Network can cause some overhead so it will be a soft limit. Use that in combination with max_new_tokens for best results.
659
+ */
660
+ max_time?: number;
661
+ /**
662
+ * (Default: 1). Integer. The number of proposition you want to be returned.
663
+ */
664
+ num_return_sequences?: number;
665
+ /**
666
+ * (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized to not be picked in successive generation passes.
667
+ */
668
+ repetition_penalty?: number;
669
+ /**
670
+ * (Default: True). Bool. If set to False, the return results will not contain the original query making it easier for prompting.
671
+ */
672
+ return_full_text?: boolean;
673
+ /**
674
+ * (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, 0 means always take the highest score, 100.0 is getting closer to uniform probability.
675
+ */
676
+ temperature?: number;
677
+ /**
678
+ * (Default: None). Integer to define the top tokens considered within the sample operation to create new text.
679
+ */
680
+ top_k?: number;
681
+ /**
682
+ * (Default: None). Float to define the tokens that are within the sample operation of text generation. Add tokens in the sample for more probable to least probable until the sum of the probabilities is greater than top_p.
683
+ */
684
+ top_p?: number;
685
+ /**
686
+ * (Default: None). Integer. The maximum number of tokens from the input.
687
+ */
688
+ truncate?: number;
689
+ };
690
+ };
691
+ export interface TextGenerationOutput {
692
+ /**
693
+ * The continuated string
694
+ */
695
+ generated_text: string;
696
+ }
697
+ /**
698
+ * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
699
+ */
700
+ export function textGeneration(args: TextGenerationArgs, options?: Options): Promise<TextGenerationOutput>;
701
+ export type TextGenerationStreamFinishReason =
702
+ /** number of generated tokens == `max_new_tokens` */
703
+ | "length"
704
+ /** the model generated its end of sequence token */
705
+ | "eos_token"
706
+ /** the model generated a text included in `stop_sequences` */
707
+ | "stop_sequence";
708
+ export interface TextGenerationStreamToken {
709
+ /** Token ID from the model tokenizer */
710
+ id: number;
711
+ /** Token text */
712
+ text: string;
713
+ /** Logprob */
714
+ logprob: number;
715
+ /**
716
+ * Is the token a special token
717
+ * Can be used to ignore tokens when concatenating
718
+ */
719
+ special: boolean;
720
+ }
721
+ export interface TextGenerationStreamPrefillToken {
722
+ /** Token ID from the model tokenizer */
723
+ id: number;
724
+ /** Token text */
725
+ text: string;
726
+ /**
727
+ * Logprob
728
+ * Optional since the logprob of the first token cannot be computed
729
+ */
730
+ logprob?: number;
731
+ }
732
+ export interface TextGenerationStreamBestOfSequence {
733
+ /** Generated text */
734
+ generated_text: string;
735
+ /** Generation finish reason */
736
+ finish_reason: TextGenerationStreamFinishReason;
737
+ /** Number of generated tokens */
738
+ generated_tokens: number;
739
+ /** Sampling seed if sampling was activated */
740
+ seed?: number;
741
+ /** Prompt tokens */
742
+ prefill: TextGenerationStreamPrefillToken[];
743
+ /** Generated tokens */
744
+ tokens: TextGenerationStreamToken[];
745
+ }
746
+ export interface TextGenerationStreamDetails {
747
+ /** Generation finish reason */
748
+ finish_reason: TextGenerationStreamFinishReason;
749
+ /** Number of generated tokens */
750
+ generated_tokens: number;
751
+ /** Sampling seed if sampling was activated */
752
+ seed?: number;
753
+ /** Prompt tokens */
754
+ prefill: TextGenerationStreamPrefillToken[];
755
+ /** */
756
+ tokens: TextGenerationStreamToken[];
757
+ /** Additional sequences when using the `best_of` parameter */
758
+ best_of_sequences?: TextGenerationStreamBestOfSequence[];
759
+ }
760
+ export interface TextGenerationStreamOutput {
761
+ /** Generated token, one at a time */
762
+ token: TextGenerationStreamToken;
763
+ /**
764
+ * Complete generated text
765
+ * Only available when the generation is finished
766
+ */
767
+ generated_text: string | null;
768
+ /**
769
+ * Generation details
770
+ * Only available when the generation is finished
771
+ */
772
+ details: TextGenerationStreamDetails | null;
773
+ }
774
+ /**
775
+ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
776
+ */
777
+ export function textGenerationStream(
778
+ args: TextGenerationArgs,
779
+ options?: Options
780
+ ): AsyncGenerator<TextGenerationStreamOutput>;
781
+ export type TokenClassificationArgs = BaseArgs & {
782
+ /**
783
+ * A string to be classified
784
+ */
785
+ inputs: string;
786
+ parameters?: {
787
+ /**
788
+ * (Default: simple). There are several aggregation strategies:
789
+ *
790
+ * none: Every token gets classified without further aggregation.
791
+ *
792
+ * simple: Entities are grouped according to the default schema (B-, I- tags get merged when the tag is similar).
793
+ *
794
+ * first: Same as the simple strategy except words cannot end up with different tags. Words will use the tag of the first token when there is ambiguity.
795
+ *
796
+ * average: Same as the simple strategy except words cannot end up with different tags. Scores are averaged across tokens and then the maximum label is applied.
797
+ *
798
+ * max: Same as the simple strategy except words cannot end up with different tags. Word entity will be the token with the maximum score.
799
+ */
800
+ aggregation_strategy?: "none" | "simple" | "first" | "average" | "max";
801
+ };
802
+ };
803
+ export type TokenClassificationOutput = TokenClassificationOutputValue[];
804
+ export interface TokenClassificationOutputValue {
805
+ /**
806
+ * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
807
+ */
808
+ end: number;
809
+ /**
810
+ * The type for the entity being recognized (model specific).
811
+ */
812
+ entity_group: string;
813
+ /**
814
+ * How likely the entity was recognized.
815
+ */
816
+ score: number;
817
+ /**
818
+ * The offset stringwise where the answer is located. Useful to disambiguate if word occurs multiple times.
819
+ */
820
+ start: number;
821
+ /**
822
+ * The string that was captured
823
+ */
824
+ word: string;
825
+ }
826
+ /**
827
+ * Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
828
+ */
829
+ export function tokenClassification(
830
+ args: TokenClassificationArgs,
831
+ options?: Options
832
+ ): Promise<TokenClassificationOutput>;
833
+ export type TranslationArgs = BaseArgs & {
834
+ /**
835
+ * A string to be translated
836
+ */
837
+ inputs: string;
838
+ };
839
+ export interface TranslationOutput {
840
+ /**
841
+ * The string after translation
842
+ */
843
+ translation_text: string;
844
+ }
845
+ /**
846
+ * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
847
+ */
848
+ export function translation(args: TranslationArgs, options?: Options): Promise<TranslationOutput>;
849
+ export type ZeroShotClassificationArgs = BaseArgs & {
850
+ /**
851
+ * a string or list of strings
852
+ */
853
+ inputs: string | string[];
854
+ parameters: {
855
+ /**
856
+ * a list of strings that are potential classes for inputs. (max 10 candidate_labels, for more, simply run multiple requests, results are going to be misleading if using too many candidate_labels anyway. If you want to keep the exact same, you can simply run multi_label=True and do the scaling on your end.
857
+ */
858
+ candidate_labels: string[];
859
+ /**
860
+ * (Default: false) Boolean that is set to True if classes can overlap
861
+ */
862
+ multi_label?: boolean;
863
+ };
864
+ };
865
+ export type ZeroShotClassificationOutput = ZeroShotClassificationOutputValue[];
866
+ export interface ZeroShotClassificationOutputValue {
867
+ labels: string[];
868
+ scores: number[];
869
+ sequence: string;
870
+ }
871
+ /**
872
+ * This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli.
873
+ */
874
+ export function zeroShotClassification(
875
+ args: ZeroShotClassificationArgs,
876
+ options?: Options
877
+ ): Promise<ZeroShotClassificationOutput>;
878
+ export type TabularRegressionArgs = BaseArgs & {
879
+ inputs: {
880
+ /**
881
+ * A table of data represented as a dict of list where entries are headers and the lists are all the values, all lists must have the same size.
882
+ */
883
+ data: Record<string, string[]>;
884
+ };
885
+ };
886
+ /**
887
+ * a list of predicted values for each row
888
+ */
889
+ export type TabularRegressionOutput = number[];
890
+ /**
891
+ * Predicts target value for a given set of features in tabular form.
892
+ * Typically, you will want to train a regression model on your training data and use it with your new data of the same format.
893
+ * Example model: scikit-learn/Fish-Weight
894
+ */
895
+ export function tabularRegression(
896
+ args: TabularRegressionArgs,
897
+ options?: Options
898
+ ): Promise<TabularRegressionOutput>;
899
+ export class HfInference {
900
+ constructor(accessToken?: string, defaultOptions?: Options);
901
+ /**
902
+ * Returns copy of HfInference tied to a specified endpoint.
903
+ */
904
+ endpoint(endpointUrl: string): HfInferenceEndpoint;
905
+ /**
906
+ * This task reads some audio input and outputs the likelihood of classes.
907
+ * Recommended model: superb/hubert-large-superb-er
908
+ */
909
+ audioClassification(
910
+ args: Omit<AudioClassificationArgs, 'accessToken'>,
911
+ options?: Options
912
+ ): Promise<AudioClassificationReturn>;
913
+ /**
914
+ * This task reads some audio input and outputs the said words within the audio files.
915
+ * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
916
+ */
917
+ automaticSpeechRecognition(
918
+ args: Omit<AutomaticSpeechRecognitionArgs, 'accessToken'>,
919
+ options?: Options
920
+ ): Promise<AutomaticSpeechRecognitionOutput>;
921
+ /**
922
+ * This task synthesize an audio of a voice pronouncing a given text.
923
+ * Recommended model: espnet/kan-bayashi_ljspeech_vits
924
+ */
925
+ textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken'>, options?: Options): Promise<TextToSpeechOutput>;
926
+ /**
927
+ * Primitive to make custom calls to the inference API
928
+ */
929
+ request<T>(
930
+ args: Omit<RequestArgs, 'accessToken'>,
931
+ options?: Options & {
932
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
933
+ includeCredentials?: boolean;
934
+ }
935
+ ): Promise<T>;
936
+ /**
937
+ * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
938
+ */
939
+ streamingRequest<T>(
940
+ args: Omit<RequestArgs, 'accessToken'>,
941
+ options?: Options & {
942
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
943
+ includeCredentials?: boolean;
944
+ }
945
+ ): AsyncGenerator<T>;
946
+ /**
947
+ * This task reads some image input and outputs the likelihood of classes.
948
+ * Recommended model: google/vit-base-patch16-224
949
+ */
950
+ imageClassification(
951
+ args: Omit<ImageClassificationArgs, 'accessToken'>,
952
+ options?: Options
953
+ ): Promise<ImageClassificationOutput>;
954
+ /**
955
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
956
+ * Recommended model: facebook/detr-resnet-50-panoptic
957
+ */
958
+ imageSegmentation(
959
+ args: Omit<ImageSegmentationArgs, 'accessToken'>,
960
+ options?: Options
961
+ ): Promise<ImageSegmentationOutput>;
962
+ /**
963
+ * This task reads some text input and outputs an image.
964
+ * Recommended model: lllyasviel/sd-controlnet-depth
965
+ */
966
+ imageToImage(args: Omit<ImageToImageArgs, 'accessToken'>, options?: Options): Promise<ImageToImageOutput>;
967
+ /**
968
+ * This task reads some image input and outputs the text caption.
969
+ */
970
+ imageToText(args: Omit<ImageToTextArgs, 'accessToken'>, options?: Options): Promise<ImageToTextOutput>;
971
+ /**
972
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
973
+ * Recommended model: facebook/detr-resnet-50
974
+ */
975
+ objectDetection(args: Omit<ObjectDetectionArgs, 'accessToken'>, options?: Options): Promise<ObjectDetectionOutput>;
976
+ /**
977
+ * This task reads some text input and outputs an image.
978
+ * Recommended model: stabilityai/stable-diffusion-2
979
+ */
980
+ textToImage(args: Omit<TextToImageArgs, 'accessToken'>, options?: Options): Promise<TextToImageOutput>;
981
+ /**
982
+ * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa.
983
+ */
984
+ documentQuestionAnswering(
985
+ args: Omit<DocumentQuestionAnsweringArgs, 'accessToken'>,
986
+ options?: Options
987
+ ): Promise<DocumentQuestionAnsweringOutput>;
988
+ /**
989
+ * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa.
990
+ */
991
+ visualQuestionAnswering(
992
+ args: Omit<VisualQuestionAnsweringArgs, 'accessToken'>,
993
+ options?: Options
994
+ ): Promise<VisualQuestionAnsweringOutput>;
995
+ /**
996
+ * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
997
+ *
998
+ */
999
+ conversational(args: Omit<ConversationalArgs, 'accessToken'>, options?: Options): Promise<ConversationalOutput>;
1000
+ /**
1001
+ * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1002
+ */
1003
+ featureExtraction(
1004
+ args: Omit<FeatureExtractionArgs, 'accessToken'>,
1005
+ options?: Options
1006
+ ): Promise<FeatureExtractionOutput>;
1007
+ /**
1008
+ * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
1009
+ */
1010
+ fillMask(args: Omit<FillMaskArgs, 'accessToken'>, options?: Options): Promise<FillMaskOutput>;
1011
+ /**
1012
+ * Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2
1013
+ */
1014
+ questionAnswering(
1015
+ args: Omit<QuestionAnsweringArgs, 'accessToken'>,
1016
+ options?: Options
1017
+ ): Promise<QuestionAnsweringOutput>;
1018
+ /**
1019
+ * Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings.
1020
+ */
1021
+ sentenceSimilarity(
1022
+ args: Omit<SentenceSimilarityArgs, 'accessToken'>,
1023
+ options?: Options
1024
+ ): Promise<SentenceSimilarityOutput>;
1025
+ /**
1026
+ * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
1027
+ */
1028
+ summarization(args: Omit<SummarizationArgs, 'accessToken'>, options?: Options): Promise<SummarizationOutput>;
1029
+ /**
1030
+ * Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq.
1031
+ */
1032
+ tableQuestionAnswering(
1033
+ args: Omit<TableQuestionAnsweringArgs, 'accessToken'>,
1034
+ options?: Options
1035
+ ): Promise<TableQuestionAnsweringOutput>;
1036
+ /**
1037
+ * Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english
1038
+ */
1039
+ textClassification(
1040
+ args: Omit<TextClassificationArgs, 'accessToken'>,
1041
+ options?: Options
1042
+ ): Promise<TextClassificationOutput>;
1043
+ /**
1044
+ * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1045
+ */
1046
+ textGeneration(args: Omit<TextGenerationArgs, 'accessToken'>, options?: Options): Promise<TextGenerationOutput>;
1047
+ /**
1048
+ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1049
+ */
1050
+ textGenerationStream(
1051
+ args: Omit<TextGenerationArgs, 'accessToken'>,
1052
+ options?: Options
1053
+ ): AsyncGenerator<TextGenerationStreamOutput>;
1054
+ /**
1055
+ * Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
1056
+ */
1057
+ tokenClassification(
1058
+ args: Omit<TokenClassificationArgs, 'accessToken'>,
1059
+ options?: Options
1060
+ ): Promise<TokenClassificationOutput>;
1061
+ /**
1062
+ * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
1063
+ */
1064
+ translation(args: Omit<TranslationArgs, 'accessToken'>, options?: Options): Promise<TranslationOutput>;
1065
+ /**
1066
+ * This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli.
1067
+ */
1068
+ zeroShotClassification(
1069
+ args: Omit<ZeroShotClassificationArgs, 'accessToken'>,
1070
+ options?: Options
1071
+ ): Promise<ZeroShotClassificationOutput>;
1072
+ /**
1073
+ * Predicts target value for a given set of features in tabular form.
1074
+ * Typically, you will want to train a regression model on your training data and use it with your new data of the same format.
1075
+ * Example model: scikit-learn/Fish-Weight
1076
+ */
1077
+ tabularRegression(
1078
+ args: Omit<TabularRegressionArgs, 'accessToken'>,
1079
+ options?: Options
1080
+ ): Promise<TabularRegressionOutput>;
1081
+ }
1082
+ export class HfInferenceEndpoint {
1083
+ constructor(endpointUrl: string, accessToken?: string, defaultOptions?: Options);
1084
+ /**
1085
+ * This task reads some audio input and outputs the likelihood of classes.
1086
+ * Recommended model: superb/hubert-large-superb-er
1087
+ */
1088
+ audioClassification(
1089
+ args: Omit<AudioClassificationArgs, 'accessToken' | 'model'>,
1090
+ options?: Options
1091
+ ): Promise<AudioClassificationReturn>;
1092
+ /**
1093
+ * This task reads some audio input and outputs the said words within the audio files.
1094
+ * Recommended model (english language): facebook/wav2vec2-large-960h-lv60-self
1095
+ */
1096
+ automaticSpeechRecognition(
1097
+ args: Omit<AutomaticSpeechRecognitionArgs, 'accessToken' | 'model'>,
1098
+ options?: Options
1099
+ ): Promise<AutomaticSpeechRecognitionOutput>;
1100
+ /**
1101
+ * This task synthesize an audio of a voice pronouncing a given text.
1102
+ * Recommended model: espnet/kan-bayashi_ljspeech_vits
1103
+ */
1104
+ textToSpeech(args: Omit<TextToSpeechArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextToSpeechOutput>;
1105
+ /**
1106
+ * Primitive to make custom calls to the inference API
1107
+ */
1108
+ request<T>(
1109
+ args: Omit<RequestArgs, 'accessToken' | 'model'>,
1110
+ options?: Options & {
1111
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
1112
+ includeCredentials?: boolean;
1113
+ }
1114
+ ): Promise<T>;
1115
+ /**
1116
+ * Primitive to make custom inference calls that expect server-sent events, and returns the response through a generator
1117
+ */
1118
+ streamingRequest<T>(
1119
+ args: Omit<RequestArgs, 'accessToken' | 'model'>,
1120
+ options?: Options & {
1121
+ /** For internal HF use, which is why it's not exposed in {@link Options} */
1122
+ includeCredentials?: boolean;
1123
+ }
1124
+ ): AsyncGenerator<T>;
1125
+ /**
1126
+ * This task reads some image input and outputs the likelihood of classes.
1127
+ * Recommended model: google/vit-base-patch16-224
1128
+ */
1129
+ imageClassification(
1130
+ args: Omit<ImageClassificationArgs, 'accessToken' | 'model'>,
1131
+ options?: Options
1132
+ ): Promise<ImageClassificationOutput>;
1133
+ /**
1134
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
1135
+ * Recommended model: facebook/detr-resnet-50-panoptic
1136
+ */
1137
+ imageSegmentation(
1138
+ args: Omit<ImageSegmentationArgs, 'accessToken' | 'model'>,
1139
+ options?: Options
1140
+ ): Promise<ImageSegmentationOutput>;
1141
+ /**
1142
+ * This task reads some text input and outputs an image.
1143
+ * Recommended model: lllyasviel/sd-controlnet-depth
1144
+ */
1145
+ imageToImage(args: Omit<ImageToImageArgs, 'accessToken' | 'model'>, options?: Options): Promise<ImageToImageOutput>;
1146
+ /**
1147
+ * This task reads some image input and outputs the text caption.
1148
+ */
1149
+ imageToText(args: Omit<ImageToTextArgs, 'accessToken' | 'model'>, options?: Options): Promise<ImageToTextOutput>;
1150
+ /**
1151
+ * This task reads some image input and outputs the likelihood of classes & bounding boxes of detected objects.
1152
+ * Recommended model: facebook/detr-resnet-50
1153
+ */
1154
+ objectDetection(args: Omit<ObjectDetectionArgs, 'accessToken' | 'model'>, options?: Options): Promise<ObjectDetectionOutput>;
1155
+ /**
1156
+ * This task reads some text input and outputs an image.
1157
+ * Recommended model: stabilityai/stable-diffusion-2
1158
+ */
1159
+ textToImage(args: Omit<TextToImageArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextToImageOutput>;
1160
+ /**
1161
+ * Answers a question on a document image. Recommended model: impira/layoutlm-document-qa.
1162
+ */
1163
+ documentQuestionAnswering(
1164
+ args: Omit<DocumentQuestionAnsweringArgs, 'accessToken' | 'model'>,
1165
+ options?: Options
1166
+ ): Promise<DocumentQuestionAnsweringOutput>;
1167
+ /**
1168
+ * Answers a question on an image. Recommended model: dandelin/vilt-b32-finetuned-vqa.
1169
+ */
1170
+ visualQuestionAnswering(
1171
+ args: Omit<VisualQuestionAnsweringArgs, 'accessToken' | 'model'>,
1172
+ options?: Options
1173
+ ): Promise<VisualQuestionAnsweringOutput>;
1174
+ /**
1175
+ * This task corresponds to any chatbot like structure. Models tend to have shorter max_length, so please check with caution when using a given model if you need long range dependency or not. Recommended model: microsoft/DialoGPT-large.
1176
+ *
1177
+ */
1178
+ conversational(args: Omit<ConversationalArgs, 'accessToken' | 'model'>, options?: Options): Promise<ConversationalOutput>;
1179
+ /**
1180
+ * This task reads some text and outputs raw float values, that are usually consumed as part of a semantic database/semantic search.
1181
+ */
1182
+ featureExtraction(
1183
+ args: Omit<FeatureExtractionArgs, 'accessToken' | 'model'>,
1184
+ options?: Options
1185
+ ): Promise<FeatureExtractionOutput>;
1186
+ /**
1187
+ * Tries to fill in a hole with a missing word (token to be precise). That’s the base task for BERT models.
1188
+ */
1189
+ fillMask(args: Omit<FillMaskArgs, 'accessToken' | 'model'>, options?: Options): Promise<FillMaskOutput>;
1190
+ /**
1191
+ * Want to have a nice know-it-all bot that can answer any question?. Recommended model: deepset/roberta-base-squad2
1192
+ */
1193
+ questionAnswering(
1194
+ args: Omit<QuestionAnsweringArgs, 'accessToken' | 'model'>,
1195
+ options?: Options
1196
+ ): Promise<QuestionAnsweringOutput>;
1197
+ /**
1198
+ * Calculate the semantic similarity between one text and a list of other sentences by comparing their embeddings.
1199
+ */
1200
+ sentenceSimilarity(
1201
+ args: Omit<SentenceSimilarityArgs, 'accessToken' | 'model'>,
1202
+ options?: Options
1203
+ ): Promise<SentenceSimilarityOutput>;
1204
+ /**
1205
+ * This task is well known to summarize longer text into shorter text. Be careful, some models have a maximum length of input. That means that the summary cannot handle full books for instance. Be careful when choosing your model.
1206
+ */
1207
+ summarization(args: Omit<SummarizationArgs, 'accessToken' | 'model'>, options?: Options): Promise<SummarizationOutput>;
1208
+ /**
1209
+ * Don’t know SQL? Don’t want to dive into a large spreadsheet? Ask questions in plain english! Recommended model: google/tapas-base-finetuned-wtq.
1210
+ */
1211
+ tableQuestionAnswering(
1212
+ args: Omit<TableQuestionAnsweringArgs, 'accessToken' | 'model'>,
1213
+ options?: Options
1214
+ ): Promise<TableQuestionAnsweringOutput>;
1215
+ /**
1216
+ * Usually used for sentiment-analysis this will output the likelihood of classes of an input. Recommended model: distilbert-base-uncased-finetuned-sst-2-english
1217
+ */
1218
+ textClassification(
1219
+ args: Omit<TextClassificationArgs, 'accessToken' | 'model'>,
1220
+ options?: Options
1221
+ ): Promise<TextClassificationOutput>;
1222
+ /**
1223
+ * Use to continue text from a prompt. This is a very generic task. Recommended model: gpt2 (it’s a simple model, but fun to play with).
1224
+ */
1225
+ textGeneration(args: Omit<TextGenerationArgs, 'accessToken' | 'model'>, options?: Options): Promise<TextGenerationOutput>;
1226
+ /**
1227
+ * Use to continue text from a prompt. Same as `textGeneration` but returns generator that can be read one token at a time
1228
+ */
1229
+ textGenerationStream(
1230
+ args: Omit<TextGenerationArgs, 'accessToken' | 'model'>,
1231
+ options?: Options
1232
+ ): AsyncGenerator<TextGenerationStreamOutput>;
1233
+ /**
1234
+ * Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. Recommended model: dbmdz/bert-large-cased-finetuned-conll03-english
1235
+ */
1236
+ tokenClassification(
1237
+ args: Omit<TokenClassificationArgs, 'accessToken' | 'model'>,
1238
+ options?: Options
1239
+ ): Promise<TokenClassificationOutput>;
1240
+ /**
1241
+ * This task is well known to translate text from one language to another. Recommended model: Helsinki-NLP/opus-mt-ru-en.
1242
+ */
1243
+ translation(args: Omit<TranslationArgs, 'accessToken' | 'model'>, options?: Options): Promise<TranslationOutput>;
1244
+ /**
1245
+ * This task is super useful to try out classification with zero code, you simply pass a sentence/paragraph and the possible labels for that sentence, and you get a result. Recommended model: facebook/bart-large-mnli.
1246
+ */
1247
+ zeroShotClassification(
1248
+ args: Omit<ZeroShotClassificationArgs, 'accessToken' | 'model'>,
1249
+ options?: Options
1250
+ ): Promise<ZeroShotClassificationOutput>;
1251
+ /**
1252
+ * Predicts target value for a given set of features in tabular form.
1253
+ * Typically, you will want to train a regression model on your training data and use it with your new data of the same format.
1254
+ * Example model: scikit-learn/Fish-Weight
1255
+ */
1256
+ tabularRegression(
1257
+ args: Omit<TabularRegressionArgs, 'accessToken' | 'model'>,
1258
+ options?: Options
1259
+ ): Promise<TabularRegressionOutput>;
1260
+ }
package/dist/index.js CHANGED
@@ -1,3 +1,4 @@
1
+ /// <reference path="./index.d.ts" />
1
2
  "use strict";
2
3
  var __defProp = Object.defineProperty;
3
4
  var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
@@ -22,6 +23,7 @@ var src_exports = {};
22
23
  __export(src_exports, {
23
24
  HfInference: () => HfInference,
24
25
  HfInferenceEndpoint: () => HfInferenceEndpoint,
26
+ InferenceOutputError: () => InferenceOutputError,
25
27
  audioClassification: () => audioClassification,
26
28
  automaticSpeechRecognition: () => automaticSpeechRecognition,
27
29
  conversational: () => conversational,
@@ -704,6 +706,7 @@ var HfInferenceEndpoint = class {
704
706
  0 && (module.exports = {
705
707
  HfInference,
706
708
  HfInferenceEndpoint,
709
+ InferenceOutputError,
707
710
  audioClassification,
708
711
  automaticSpeechRecognition,
709
712
  conversational,
package/dist/index.mjs CHANGED
@@ -1,3 +1,4 @@
1
+ /// <reference path="./index.d.ts" />
1
2
  var __defProp = Object.defineProperty;
2
3
  var __export = (target, all) => {
3
4
  for (var name in all)
@@ -655,6 +656,7 @@ var HfInferenceEndpoint = class {
655
656
  export {
656
657
  HfInference,
657
658
  HfInferenceEndpoint,
659
+ InferenceOutputError,
658
660
  audioClassification,
659
661
  automaticSpeechRecognition,
660
662
  conversational,
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@huggingface/inference",
3
- "version": "2.3.0",
3
+ "version": "2.3.2",
4
4
  "packageManager": "pnpm@8.3.1",
5
5
  "license": "MIT",
6
6
  "author": "Tim Mikeladze <tim.mikeladze@gmail.com>",
@@ -29,32 +29,32 @@
29
29
  "src"
30
30
  ],
31
31
  "source": "src/index.ts",
32
- "types": "src/index.ts",
32
+ "types": "./dist/index.d.ts",
33
33
  "main": "./dist/index.js",
34
34
  "module": "./dist/index.mjs",
35
35
  "exports": {
36
- "types": "./src/index.ts",
36
+ "types": "./dist/index.d.ts",
37
37
  "require": "./dist/index.js",
38
38
  "import": "./dist/index.mjs"
39
39
  },
40
40
  "devDependencies": {
41
41
  "@types/node": "18.13.0",
42
+ "ts-node": "^10.9.1",
42
43
  "typescript": "^5.0.4",
43
44
  "vite": "^4.1.4",
44
- "vitest": "^0.29.8",
45
- "@huggingface/shared": "1.0.0"
45
+ "vitest": "^0.29.8"
46
46
  },
47
47
  "bundledDependencies": [
48
48
  "@huggingface/shared"
49
49
  ],
50
50
  "resolutions": {},
51
51
  "scripts": {
52
- "build": "tsup src/index.ts --format cjs,esm --clean",
52
+ "build": "tsup src/index.ts --format cjs,esm --clean && pnpm run dts",
53
+ "dts": "ts-node scripts/generate-dts.ts",
53
54
  "lint": "eslint --quiet --fix --ext .cjs,.ts .",
54
55
  "lint:check": "eslint --ext .cjs,.ts .",
55
56
  "format": "prettier --write .",
56
57
  "format:check": "prettier --check .",
57
- "preversion": "pnpm --filter doc-internal run fix-cdn-versions && git add ../../README.md",
58
58
  "test": "vitest run --config vitest.config.ts",
59
59
  "test:browser": "vitest run --browser.name=chrome --browser.headless --config vitest.config.ts",
60
60
  "type-check": "tsc"
package/src/index.ts CHANGED
@@ -1,3 +1,4 @@
1
1
  export { HfInference, HfInferenceEndpoint } from "./HfInference";
2
+ export { InferenceOutputError } from "./lib/InferenceOutputError";
2
3
  export * from "./types";
3
4
  export * from "./tasks";
package/src/utils/pick.ts CHANGED
@@ -1,10 +1,7 @@
1
1
  /**
2
2
  * Return copy of object, only keeping allowlisted properties.
3
- *
4
- * This doesn't add {p: undefined} anymore, for props not in the o object.
5
3
  */
6
4
  export function pick<T, K extends keyof T>(o: T, props: K[] | ReadonlyArray<K>): Pick<T, K> {
7
- // inspired by stackoverflow.com/questions/25553910/one-liner-to-take-some-properties-from-object-in-es-6
8
5
  return Object.assign(
9
6
  {},
10
7
  ...props.map((prop) => {