@huggingface/tasks 0.2.2 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. package/README.md +1 -1
  2. package/dist/index.cjs +3144 -3085
  3. package/dist/index.d.ts +441 -74
  4. package/dist/index.js +3143 -3084
  5. package/package.json +1 -1
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-libraries-downloads.ts +20 -0
  9. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +46 -292
  10. package/src/model-libraries.ts +375 -44
  11. package/src/pipelines.ts +14 -8
  12. package/src/tasks/audio-classification/inference.ts +4 -4
  13. package/src/tasks/audio-classification/spec/input.json +4 -4
  14. package/src/tasks/audio-classification/spec/output.json +1 -12
  15. package/src/tasks/automatic-speech-recognition/inference.ts +35 -30
  16. package/src/tasks/automatic-speech-recognition/spec/input.json +3 -3
  17. package/src/tasks/automatic-speech-recognition/spec/output.json +30 -28
  18. package/src/tasks/common-definitions.json +25 -17
  19. package/src/tasks/depth-estimation/inference.ts +10 -10
  20. package/src/tasks/depth-estimation/spec/input.json +3 -8
  21. package/src/tasks/depth-estimation/spec/output.json +9 -3
  22. package/src/tasks/document-question-answering/inference.ts +16 -8
  23. package/src/tasks/document-question-answering/spec/input.json +9 -9
  24. package/src/tasks/document-question-answering/spec/output.json +2 -2
  25. package/src/tasks/feature-extraction/inference.ts +1 -1
  26. package/src/tasks/feature-extraction/spec/input.json +2 -2
  27. package/src/tasks/fill-mask/inference.ts +4 -3
  28. package/src/tasks/fill-mask/spec/input.json +3 -3
  29. package/src/tasks/fill-mask/spec/output.json +1 -1
  30. package/src/tasks/image-classification/inference.ts +3 -3
  31. package/src/tasks/image-classification/spec/input.json +4 -4
  32. package/src/tasks/image-segmentation/inference.ts +3 -3
  33. package/src/tasks/image-segmentation/spec/input.json +4 -4
  34. package/src/tasks/image-to-image/inference.ts +5 -5
  35. package/src/tasks/image-to-image/spec/input.json +9 -7
  36. package/src/tasks/image-to-text/inference.ts +25 -20
  37. package/src/tasks/image-to-text/spec/input.json +3 -3
  38. package/src/tasks/image-to-text/spec/output.json +8 -11
  39. package/src/tasks/index.ts +2 -0
  40. package/src/tasks/object-detection/inference.ts +1 -1
  41. package/src/tasks/object-detection/spec/input.json +2 -2
  42. package/src/tasks/placeholder/spec/input.json +4 -4
  43. package/src/tasks/placeholder/spec/output.json +1 -1
  44. package/src/tasks/question-answering/inference.ts +8 -8
  45. package/src/tasks/question-answering/spec/input.json +9 -9
  46. package/src/tasks/sentence-similarity/inference.ts +1 -1
  47. package/src/tasks/sentence-similarity/spec/input.json +2 -2
  48. package/src/tasks/summarization/inference.ts +5 -4
  49. package/src/tasks/table-question-answering/inference.ts +1 -1
  50. package/src/tasks/table-question-answering/spec/input.json +8 -3
  51. package/src/tasks/text-classification/inference.ts +3 -3
  52. package/src/tasks/text-classification/spec/input.json +4 -4
  53. package/src/tasks/text-generation/inference.ts +123 -14
  54. package/src/tasks/text-generation/spec/input.json +28 -12
  55. package/src/tasks/text-generation/spec/output.json +112 -9
  56. package/src/tasks/text-to-audio/inference.ts +24 -19
  57. package/src/tasks/text-to-audio/spec/input.json +2 -2
  58. package/src/tasks/text-to-audio/spec/output.json +10 -13
  59. package/src/tasks/text-to-image/inference.ts +6 -8
  60. package/src/tasks/text-to-image/spec/input.json +9 -7
  61. package/src/tasks/text-to-image/spec/output.json +7 -9
  62. package/src/tasks/text-to-speech/inference.ts +18 -17
  63. package/src/tasks/text2text-generation/inference.ts +10 -8
  64. package/src/tasks/text2text-generation/spec/input.json +4 -4
  65. package/src/tasks/text2text-generation/spec/output.json +8 -11
  66. package/src/tasks/token-classification/inference.ts +4 -4
  67. package/src/tasks/token-classification/spec/input.json +4 -4
  68. package/src/tasks/token-classification/spec/output.json +1 -1
  69. package/src/tasks/translation/inference.ts +5 -4
  70. package/src/tasks/video-classification/inference.ts +5 -5
  71. package/src/tasks/video-classification/spec/input.json +6 -6
  72. package/src/tasks/visual-question-answering/inference.ts +2 -2
  73. package/src/tasks/visual-question-answering/spec/input.json +3 -3
  74. package/src/tasks/zero-shot-classification/inference.ts +3 -3
  75. package/src/tasks/zero-shot-classification/spec/input.json +4 -4
  76. package/src/tasks/zero-shot-image-classification/inference.ts +2 -2
  77. package/src/tasks/zero-shot-image-classification/spec/input.json +3 -3
  78. package/src/tasks/zero-shot-object-detection/inference.ts +1 -1
  79. package/src/tasks/zero-shot-object-detection/spec/input.json +2 -2
package/dist/index.d.ts CHANGED
@@ -1,50 +1,3 @@
1
- /**
2
- * Add your new library here.
3
- *
4
- * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
5
- * File formats live in an enum inside the internal codebase.
6
- */
7
- declare enum ModelLibrary {
8
- "adapter-transformers" = "Adapters",
9
- "allennlp" = "allenNLP",
10
- "asteroid" = "Asteroid",
11
- "bertopic" = "BERTopic",
12
- "diffusers" = "Diffusers",
13
- "doctr" = "docTR",
14
- "espnet" = "ESPnet",
15
- "fairseq" = "Fairseq",
16
- "flair" = "Flair",
17
- "keras" = "Keras",
18
- "k2" = "K2",
19
- "mlx" = "MLX",
20
- "nemo" = "NeMo",
21
- "open_clip" = "OpenCLIP",
22
- "paddlenlp" = "PaddleNLP",
23
- "peft" = "PEFT",
24
- "pyannote-audio" = "pyannote.audio",
25
- "sample-factory" = "Sample Factory",
26
- "sentence-transformers" = "Sentence Transformers",
27
- "setfit" = "SetFit",
28
- "sklearn" = "Scikit-learn",
29
- "spacy" = "spaCy",
30
- "span-marker" = "SpanMarker",
31
- "speechbrain" = "speechbrain",
32
- "tensorflowtts" = "TensorFlowTTS",
33
- "timm" = "Timm",
34
- "fastai" = "fastai",
35
- "transformers" = "Transformers",
36
- "transformers.js" = "Transformers.js",
37
- "stanza" = "Stanza",
38
- "fasttext" = "fastText",
39
- "stable-baselines3" = "Stable-Baselines3",
40
- "ml-agents" = "Unity ML-Agents",
41
- "pythae" = "Pythae",
42
- "mindspore" = "MindSpore",
43
- "unity-sentis" = "Unity Sentis"
44
- }
45
- type ModelLibraryKey = keyof typeof ModelLibrary;
46
- declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("speechbrain" | "fastai" | "adapter-transformers" | "allennlp" | "asteroid" | "bertopic" | "diffusers" | "doctr" | "espnet" | "fairseq" | "flair" | "keras" | "k2" | "mlx" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "sample-factory" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "tensorflowtts" | "timm" | "transformers" | "transformers.js" | "stanza" | "fasttext" | "stable-baselines3" | "ml-agents" | "pythae" | "mindspore" | "unity-sentis")[];
47
-
48
1
  declare const MODALITIES: readonly ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
49
2
  type Modality = (typeof MODALITIES)[number];
50
3
  declare const MODALITY_LABELS: {
@@ -162,7 +115,7 @@ declare const PIPELINE_DATA: {
162
115
  };
163
116
  "feature-extraction": {
164
117
  name: string;
165
- modality: "multimodal";
118
+ modality: "nlp";
166
119
  color: "red";
167
120
  };
168
121
  "text-generation": {
@@ -265,7 +218,7 @@ declare const PIPELINE_DATA: {
265
218
  };
266
219
  "text-to-image": {
267
220
  name: string;
268
- modality: "multimodal";
221
+ modality: "cv";
269
222
  color: "yellow";
270
223
  };
271
224
  "image-to-text": {
@@ -274,7 +227,7 @@ declare const PIPELINE_DATA: {
274
227
  type: string;
275
228
  name: string;
276
229
  }[];
277
- modality: "multimodal";
230
+ modality: "cv";
278
231
  color: "red";
279
232
  };
280
233
  "image-to-image": {
@@ -288,7 +241,7 @@ declare const PIPELINE_DATA: {
288
241
  };
289
242
  "image-to-video": {
290
243
  name: string;
291
- modality: "multimodal";
244
+ modality: "cv";
292
245
  color: "indigo";
293
246
  };
294
247
  "unconditional-image-generation": {
@@ -381,9 +334,15 @@ declare const PIPELINE_DATA: {
381
334
  };
382
335
  "text-to-video": {
383
336
  name: string;
384
- modality: "multimodal";
337
+ modality: "cv";
385
338
  color: "green";
386
339
  };
340
+ "image-text-to-text": {
341
+ name: string;
342
+ modality: "multimodal";
343
+ color: "red";
344
+ hideInDatasets: true;
345
+ };
387
346
  "visual-question-answering": {
388
347
  name: string;
389
348
  subtasks: {
@@ -410,7 +369,7 @@ declare const PIPELINE_DATA: {
410
369
  };
411
370
  "graph-ml": {
412
371
  name: string;
413
- modality: "multimodal";
372
+ modality: "other";
414
373
  color: "green";
415
374
  };
416
375
  "mask-generation": {
@@ -425,12 +384,12 @@ declare const PIPELINE_DATA: {
425
384
  };
426
385
  "text-to-3d": {
427
386
  name: string;
428
- modality: "multimodal";
387
+ modality: "cv";
429
388
  color: "yellow";
430
389
  };
431
390
  "image-to-3d": {
432
391
  name: string;
433
- modality: "multimodal";
392
+ modality: "cv";
434
393
  color: "green";
435
394
  };
436
395
  other: {
@@ -442,18 +401,9 @@ declare const PIPELINE_DATA: {
442
401
  };
443
402
  };
444
403
  type PipelineType = keyof typeof PIPELINE_DATA;
445
- declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d")[];
404
+ declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d")[];
446
405
  declare const SUBTASK_TYPES: string[];
447
- declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d">;
448
-
449
- /**
450
- * Mapping from library name (excluding Transformers) to its supported tasks.
451
- * Inference Endpoints (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
452
- * As an exception, we assume Transformers supports all inference tasks.
453
- * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
454
- * Ref: https://github.com/huggingface/api-inference-community/pull/158
455
- */
456
- declare const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLibraryKey, PipelineType[]>>;
406
+ declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d">;
457
407
 
458
408
  /**
459
409
  * See default-widget-inputs.ts for the default widget inputs, this files only contains the types
@@ -655,21 +605,48 @@ interface TransformersInfo {
655
605
  processor?: string;
656
606
  }
657
607
 
608
+ /**
609
+ * This file contains the (simplified) types used
610
+ * to represent queries that are made to Elastic
611
+ * in order to count number of model downloads
612
+ *
613
+ * Read this doc about download stats on the Hub:
614
+ *
615
+ * https://huggingface.co/docs/hub/models-download-stats
616
+ *
617
+ * see also:
618
+ * https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-bool-query.html
619
+ */
620
+ type ElasticBoolQueryFilter = {
621
+ term?: {
622
+ path: string;
623
+ };
624
+ } | {
625
+ terms?: {
626
+ path: string[];
627
+ };
628
+ } | {
629
+ wildcard?: {
630
+ path: string;
631
+ };
632
+ };
633
+
658
634
  /**
659
635
  * Elements configurable by a model library.
660
636
  */
661
637
  interface LibraryUiElement {
662
638
  /**
663
- * Name displayed on the main
639
+ * Pretty name of the library.
640
+ * displayed in tags, and on the main
664
641
  * call-to-action button on the model page.
665
642
  */
666
- btnLabel: string;
643
+ prettyLabel: string;
667
644
  /**
668
- * Repo name
645
+ * Repo name of the library's (usually on GitHub) code repo
669
646
  */
670
647
  repoName: string;
671
648
  /**
672
- * URL to library's repo
649
+ * URL to library's (usually on GitHub) code repo
673
650
  */
674
651
  repoUrl: string;
675
652
  /**
@@ -677,11 +654,401 @@ interface LibraryUiElement {
677
654
  */
678
655
  docsUrl?: string;
679
656
  /**
680
- * Code snippet displayed on model page
657
+ * Code snippet(s) displayed on model page
658
+ */
659
+ snippets?: (model: ModelData) => string[];
660
+ /**
661
+ * Elastic query used to count this library's model downloads
662
+ *
663
+ * By default, those files are counted:
664
+ * "config.json", "config.yaml", "hyperparams.yaml", "meta.yaml"
665
+ */
666
+ countDownloads?: ElasticBoolQueryFilter;
667
+ /**
668
+ * should we display this library in hf.co/models filter
669
+ * (only for popular libraries with > 100 models)
681
670
  */
682
- snippets: (model: ModelData) => string[];
671
+ filter?: boolean;
683
672
  }
684
- declare const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>>;
673
+ /**
674
+ * Add your new library here.
675
+ *
676
+ * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc).
677
+ * (unlike libraries, file formats live in an enum inside the internal codebase.)
678
+ *
679
+ * Doc on how to add a library to the Hub:
680
+ *
681
+ * https://huggingface.co/docs/hub/models-adding-libraries
682
+ *
683
+ * /!\ IMPORTANT
684
+ *
685
+ * The key you choose is the tag your models have in their library_name on the Hub.
686
+ */
687
+ declare const MODEL_LIBRARIES_UI_ELEMENTS: {
688
+ "adapter-transformers": {
689
+ prettyLabel: string;
690
+ repoName: string;
691
+ repoUrl: string;
692
+ docsUrl: string;
693
+ snippets: (model: ModelData) => string[];
694
+ filter: true;
695
+ countDownloads: {
696
+ term: {
697
+ path: string;
698
+ };
699
+ };
700
+ };
701
+ allennlp: {
702
+ prettyLabel: string;
703
+ repoName: string;
704
+ repoUrl: string;
705
+ docsUrl: string;
706
+ snippets: (model: ModelData) => string[];
707
+ filter: true;
708
+ };
709
+ asteroid: {
710
+ prettyLabel: string;
711
+ repoName: string;
712
+ repoUrl: string;
713
+ docsUrl: string;
714
+ snippets: (model: ModelData) => string[];
715
+ filter: true;
716
+ countDownloads: {
717
+ term: {
718
+ path: string;
719
+ };
720
+ };
721
+ };
722
+ bertopic: {
723
+ prettyLabel: string;
724
+ repoName: string;
725
+ repoUrl: string;
726
+ snippets: (model: ModelData) => string[];
727
+ filter: true;
728
+ };
729
+ diffusers: {
730
+ prettyLabel: string;
731
+ repoName: string;
732
+ repoUrl: string;
733
+ docsUrl: string;
734
+ snippets: (model: ModelData) => string[];
735
+ filter: true;
736
+ };
737
+ doctr: {
738
+ prettyLabel: string;
739
+ repoName: string;
740
+ repoUrl: string;
741
+ };
742
+ espnet: {
743
+ prettyLabel: string;
744
+ repoName: string;
745
+ repoUrl: string;
746
+ docsUrl: string;
747
+ snippets: (model: ModelData) => string[];
748
+ filter: true;
749
+ };
750
+ fairseq: {
751
+ prettyLabel: string;
752
+ repoName: string;
753
+ repoUrl: string;
754
+ snippets: (model: ModelData) => string[];
755
+ filter: true;
756
+ };
757
+ fastai: {
758
+ prettyLabel: string;
759
+ repoName: string;
760
+ repoUrl: string;
761
+ docsUrl: string;
762
+ snippets: (model: ModelData) => string[];
763
+ filter: true;
764
+ };
765
+ fasttext: {
766
+ prettyLabel: string;
767
+ repoName: string;
768
+ repoUrl: string;
769
+ snippets: (model: ModelData) => string[];
770
+ filter: true;
771
+ };
772
+ flair: {
773
+ prettyLabel: string;
774
+ repoName: string;
775
+ repoUrl: string;
776
+ docsUrl: string;
777
+ snippets: (model: ModelData) => string[];
778
+ filter: true;
779
+ countDownloads: {
780
+ term: {
781
+ path: string;
782
+ };
783
+ };
784
+ };
785
+ keras: {
786
+ prettyLabel: string;
787
+ repoName: string;
788
+ repoUrl: string;
789
+ docsUrl: string;
790
+ snippets: (model: ModelData) => string[];
791
+ filter: true;
792
+ countDownloads: {
793
+ term: {
794
+ path: string;
795
+ };
796
+ };
797
+ };
798
+ k2: {
799
+ prettyLabel: string;
800
+ repoName: string;
801
+ repoUrl: string;
802
+ };
803
+ mindspore: {
804
+ prettyLabel: string;
805
+ repoName: string;
806
+ repoUrl: string;
807
+ };
808
+ "ml-agents": {
809
+ prettyLabel: string;
810
+ repoName: string;
811
+ repoUrl: string;
812
+ docsUrl: string;
813
+ snippets: (model: ModelData) => string[];
814
+ filter: true;
815
+ countDownloads: {
816
+ wildcard: {
817
+ path: string;
818
+ };
819
+ };
820
+ };
821
+ mlx: {
822
+ prettyLabel: string;
823
+ repoName: string;
824
+ repoUrl: string;
825
+ snippets: (model: ModelData) => string[];
826
+ filter: true;
827
+ };
828
+ nemo: {
829
+ prettyLabel: string;
830
+ repoName: string;
831
+ repoUrl: string;
832
+ snippets: (model: ModelData) => string[];
833
+ filter: true;
834
+ countDownloads: {
835
+ wildcard: {
836
+ path: string;
837
+ };
838
+ };
839
+ };
840
+ open_clip: {
841
+ prettyLabel: string;
842
+ repoName: string;
843
+ repoUrl: string;
844
+ snippets: (model: ModelData) => string[];
845
+ filter: true;
846
+ countDownloads: {
847
+ wildcard: {
848
+ path: string;
849
+ };
850
+ };
851
+ };
852
+ paddlenlp: {
853
+ prettyLabel: string;
854
+ repoName: string;
855
+ repoUrl: string;
856
+ docsUrl: string;
857
+ snippets: (model: ModelData) => string[];
858
+ filter: true;
859
+ countDownloads: {
860
+ term: {
861
+ path: string;
862
+ };
863
+ };
864
+ };
865
+ peft: {
866
+ prettyLabel: string;
867
+ repoName: string;
868
+ repoUrl: string;
869
+ snippets: (model: ModelData) => string[];
870
+ filter: true;
871
+ countDownloads: {
872
+ term: {
873
+ path: string;
874
+ };
875
+ };
876
+ };
877
+ "pyannote-audio": {
878
+ prettyLabel: string;
879
+ repoName: string;
880
+ repoUrl: string;
881
+ snippets: (model: ModelData) => string[];
882
+ filter: true;
883
+ };
884
+ pythae: {
885
+ prettyLabel: string;
886
+ repoName: string;
887
+ repoUrl: string;
888
+ snippets: (model: ModelData) => string[];
889
+ filter: true;
890
+ };
891
+ "sample-factory": {
892
+ prettyLabel: string;
893
+ repoName: string;
894
+ repoUrl: string;
895
+ docsUrl: string;
896
+ snippets: (model: ModelData) => string[];
897
+ filter: true;
898
+ countDownloads: {
899
+ term: {
900
+ path: string;
901
+ };
902
+ };
903
+ };
904
+ "sentence-transformers": {
905
+ prettyLabel: string;
906
+ repoName: string;
907
+ repoUrl: string;
908
+ docsUrl: string;
909
+ snippets: (model: ModelData) => string[];
910
+ filter: true;
911
+ };
912
+ setfit: {
913
+ prettyLabel: string;
914
+ repoName: string;
915
+ repoUrl: string;
916
+ docsUrl: string;
917
+ snippets: (model: ModelData) => string[];
918
+ filter: true;
919
+ };
920
+ sklearn: {
921
+ prettyLabel: string;
922
+ repoName: string;
923
+ repoUrl: string;
924
+ snippets: (model: ModelData) => string[];
925
+ filter: true;
926
+ countDownloads: {
927
+ term: {
928
+ path: string;
929
+ };
930
+ };
931
+ };
932
+ spacy: {
933
+ prettyLabel: string;
934
+ repoName: string;
935
+ repoUrl: string;
936
+ docsUrl: string;
937
+ snippets: (model: ModelData) => string[];
938
+ filter: true;
939
+ countDownloads: {
940
+ wildcard: {
941
+ path: string;
942
+ };
943
+ };
944
+ };
945
+ "span-marker": {
946
+ prettyLabel: string;
947
+ repoName: string;
948
+ repoUrl: string;
949
+ docsUrl: string;
950
+ snippets: (model: ModelData) => string[];
951
+ filter: true;
952
+ };
953
+ speechbrain: {
954
+ prettyLabel: string;
955
+ repoName: string;
956
+ repoUrl: string;
957
+ docsUrl: string;
958
+ snippets: (model: ModelData) => string[];
959
+ filter: true;
960
+ countDownloads: {
961
+ term: {
962
+ path: string;
963
+ };
964
+ };
965
+ };
966
+ "stable-baselines3": {
967
+ prettyLabel: string;
968
+ repoName: string;
969
+ repoUrl: string;
970
+ docsUrl: string;
971
+ snippets: (model: ModelData) => string[];
972
+ filter: true;
973
+ countDownloads: {
974
+ wildcard: {
975
+ path: string;
976
+ };
977
+ };
978
+ };
979
+ stanza: {
980
+ prettyLabel: string;
981
+ repoName: string;
982
+ repoUrl: string;
983
+ docsUrl: string;
984
+ snippets: (model: ModelData) => string[];
985
+ filter: true;
986
+ countDownloads: {
987
+ term: {
988
+ path: string;
989
+ };
990
+ };
991
+ };
992
+ tensorflowtts: {
993
+ prettyLabel: string;
994
+ repoName: string;
995
+ repoUrl: string;
996
+ snippets: (model: ModelData) => string[];
997
+ };
998
+ timm: {
999
+ prettyLabel: string;
1000
+ repoName: string;
1001
+ repoUrl: string;
1002
+ docsUrl: string;
1003
+ snippets: (model: ModelData) => string[];
1004
+ filter: true;
1005
+ countDownloads: {
1006
+ terms: {
1007
+ path: string[];
1008
+ };
1009
+ };
1010
+ };
1011
+ transformers: {
1012
+ prettyLabel: string;
1013
+ repoName: string;
1014
+ repoUrl: string;
1015
+ docsUrl: string;
1016
+ snippets: (model: ModelData) => string[];
1017
+ filter: true;
1018
+ };
1019
+ "transformers.js": {
1020
+ prettyLabel: string;
1021
+ repoName: string;
1022
+ repoUrl: string;
1023
+ docsUrl: string;
1024
+ snippets: (model: ModelData) => string[];
1025
+ filter: true;
1026
+ };
1027
+ "unity-sentis": {
1028
+ prettyLabel: string;
1029
+ repoName: string;
1030
+ repoUrl: string;
1031
+ snippets: () => string[];
1032
+ filter: true;
1033
+ countDownloads: {
1034
+ wildcard: {
1035
+ path: string;
1036
+ };
1037
+ };
1038
+ };
1039
+ };
1040
+ type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
1041
+ declare const ALL_MODEL_LIBRARY_KEYS: ("sklearn" | "adapter-transformers" | "allennlp" | "asteroid" | "bertopic" | "diffusers" | "doctr" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "keras" | "k2" | "mindspore" | "ml-agents" | "mlx" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "spacy" | "span-marker" | "speechbrain" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timm" | "transformers" | "transformers.js" | "unity-sentis")[];
1042
+ declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("sklearn" | "adapter-transformers" | "allennlp" | "asteroid" | "bertopic" | "diffusers" | "doctr" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "keras" | "k2" | "mindspore" | "ml-agents" | "mlx" | "nemo" | "open_clip" | "paddlenlp" | "peft" | "pyannote-audio" | "pythae" | "sample-factory" | "sentence-transformers" | "setfit" | "spacy" | "span-marker" | "speechbrain" | "stable-baselines3" | "stanza" | "tensorflowtts" | "timm" | "transformers" | "transformers.js" | "unity-sentis")[];
1043
+
1044
+ /**
1045
+ * Mapping from library name (excluding Transformers) to its supported tasks.
1046
+ * Inference API (serverless) should be disabled for all other (library, task) pairs beyond this mapping.
1047
+ * As an exception, we assume Transformers supports all inference tasks.
1048
+ * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
1049
+ * Ref: https://github.com/huggingface/api-inference-community/pull/158
1050
+ */
1051
+ declare const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLibraryKey, PipelineType[]>>;
685
1052
 
686
1053
  type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>;
687
1054
  declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>;
@@ -851,4 +1218,4 @@ declare namespace index {
851
1218
  };
852
1219
  }
853
1220
 
854
- export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibrary, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, index as snippets };
1221
+ export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, index as snippets };