@huggingface/tasks 0.0.6 → 0.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/README.md +16 -2
  2. package/dist/index.d.ts +381 -5
  3. package/dist/index.js +1986 -77
  4. package/dist/index.mjs +1985 -76
  5. package/package.json +2 -4
  6. package/src/default-widget-inputs.ts +718 -0
  7. package/src/index.ts +35 -4
  8. package/src/library-to-tasks.ts +47 -0
  9. package/src/library-ui-elements.ts +765 -0
  10. package/src/model-data.ts +239 -0
  11. package/src/pipelines.ts +39 -0
  12. package/src/snippets/curl.ts +63 -0
  13. package/src/snippets/index.ts +6 -0
  14. package/src/snippets/inputs.ts +144 -0
  15. package/src/snippets/js.ts +150 -0
  16. package/src/snippets/python.ts +155 -0
  17. package/src/{audio-classification → tasks/audio-classification}/about.md +2 -1
  18. package/src/{audio-classification → tasks/audio-classification}/data.ts +3 -3
  19. package/src/{audio-to-audio → tasks/audio-to-audio}/data.ts +1 -1
  20. package/src/{automatic-speech-recognition → tasks/automatic-speech-recognition}/about.md +3 -2
  21. package/src/{automatic-speech-recognition → tasks/automatic-speech-recognition}/data.ts +6 -6
  22. package/src/{conversational → tasks/conversational}/data.ts +1 -1
  23. package/src/{depth-estimation → tasks/depth-estimation}/data.ts +1 -1
  24. package/src/{document-question-answering → tasks/document-question-answering}/data.ts +1 -1
  25. package/src/{feature-extraction → tasks/feature-extraction}/data.ts +2 -7
  26. package/src/{fill-mask → tasks/fill-mask}/data.ts +1 -1
  27. package/src/{image-classification → tasks/image-classification}/data.ts +1 -1
  28. package/src/{image-segmentation → tasks/image-segmentation}/data.ts +1 -1
  29. package/src/{image-to-image → tasks/image-to-image}/about.md +8 -7
  30. package/src/{image-to-image → tasks/image-to-image}/data.ts +1 -1
  31. package/src/{image-to-text → tasks/image-to-text}/data.ts +1 -1
  32. package/src/{tasksData.ts → tasks/index.ts} +144 -15
  33. package/src/{object-detection → tasks/object-detection}/data.ts +1 -1
  34. package/src/{placeholder → tasks/placeholder}/data.ts +1 -1
  35. package/src/{question-answering → tasks/question-answering}/data.ts +1 -1
  36. package/src/{reinforcement-learning → tasks/reinforcement-learning}/data.ts +1 -1
  37. package/src/{sentence-similarity → tasks/sentence-similarity}/data.ts +1 -1
  38. package/src/{summarization → tasks/summarization}/data.ts +1 -1
  39. package/src/{table-question-answering → tasks/table-question-answering}/data.ts +1 -1
  40. package/src/{tabular-classification → tasks/tabular-classification}/data.ts +1 -1
  41. package/src/{tabular-regression → tasks/tabular-regression}/data.ts +1 -1
  42. package/src/{text-classification → tasks/text-classification}/data.ts +1 -1
  43. package/src/{text-generation → tasks/text-generation}/about.md +13 -3
  44. package/src/{text-generation → tasks/text-generation}/data.ts +2 -2
  45. package/src/{text-to-image → tasks/text-to-image}/data.ts +1 -1
  46. package/src/{text-to-speech → tasks/text-to-speech}/about.md +2 -1
  47. package/src/{text-to-speech → tasks/text-to-speech}/data.ts +4 -4
  48. package/src/{text-to-video → tasks/text-to-video}/data.ts +1 -1
  49. package/src/{token-classification → tasks/token-classification}/data.ts +1 -1
  50. package/src/{translation → tasks/translation}/data.ts +1 -1
  51. package/src/{unconditional-image-generation → tasks/unconditional-image-generation}/data.ts +1 -1
  52. package/src/{video-classification → tasks/video-classification}/about.md +8 -28
  53. package/src/{video-classification → tasks/video-classification}/data.ts +1 -1
  54. package/src/{visual-question-answering → tasks/visual-question-answering}/data.ts +1 -1
  55. package/src/{zero-shot-classification → tasks/zero-shot-classification}/data.ts +1 -1
  56. package/src/{zero-shot-image-classification → tasks/zero-shot-image-classification}/data.ts +1 -1
  57. package/src/Types.ts +0 -64
  58. package/src/const.ts +0 -59
  59. /package/src/{modelLibraries.ts → model-libraries.ts} +0 -0
  60. /package/src/{audio-to-audio → tasks/audio-to-audio}/about.md +0 -0
  61. /package/src/{conversational → tasks/conversational}/about.md +0 -0
  62. /package/src/{depth-estimation → tasks/depth-estimation}/about.md +0 -0
  63. /package/src/{document-question-answering → tasks/document-question-answering}/about.md +0 -0
  64. /package/src/{feature-extraction → tasks/feature-extraction}/about.md +0 -0
  65. /package/src/{fill-mask → tasks/fill-mask}/about.md +0 -0
  66. /package/src/{image-classification → tasks/image-classification}/about.md +0 -0
  67. /package/src/{image-segmentation → tasks/image-segmentation}/about.md +0 -0
  68. /package/src/{image-to-text → tasks/image-to-text}/about.md +0 -0
  69. /package/src/{object-detection → tasks/object-detection}/about.md +0 -0
  70. /package/src/{placeholder → tasks/placeholder}/about.md +0 -0
  71. /package/src/{question-answering → tasks/question-answering}/about.md +0 -0
  72. /package/src/{reinforcement-learning → tasks/reinforcement-learning}/about.md +0 -0
  73. /package/src/{sentence-similarity → tasks/sentence-similarity}/about.md +0 -0
  74. /package/src/{summarization → tasks/summarization}/about.md +0 -0
  75. /package/src/{table-question-answering → tasks/table-question-answering}/about.md +0 -0
  76. /package/src/{tabular-classification → tasks/tabular-classification}/about.md +0 -0
  77. /package/src/{tabular-regression → tasks/tabular-regression}/about.md +0 -0
  78. /package/src/{text-classification → tasks/text-classification}/about.md +0 -0
  79. /package/src/{text-to-image → tasks/text-to-image}/about.md +0 -0
  80. /package/src/{text-to-video → tasks/text-to-video}/about.md +0 -0
  81. /package/src/{token-classification → tasks/token-classification}/about.md +0 -0
  82. /package/src/{translation → tasks/translation}/about.md +0 -0
  83. /package/src/{unconditional-image-generation → tasks/unconditional-image-generation}/about.md +0 -0
  84. /package/src/{visual-question-answering → tasks/visual-question-answering}/about.md +0 -0
  85. /package/src/{zero-shot-classification → tasks/zero-shot-classification}/about.md +0 -0
  86. /package/src/{zero-shot-image-classification → tasks/zero-shot-image-classification}/about.md +0 -0
package/README.md CHANGED
@@ -9,7 +9,7 @@ The Task pages are made to lower the barrier of entry to understand a task that
9
9
  The task pages avoid jargon to let everyone understand the documentation, and if specific terminology is needed, it is explained on the most basic level possible. This is important to understand before contributing to Tasks: at the end of every task page, the user is expected to be able to find and pull a model from the Hub and use it on their data and see if it works for their use case to come up with a proof of concept.
10
10
 
11
11
  ## How to Contribute
12
- You can open a pull request to contribute a new documentation about a new task. Under `src` we have a folder for every task that contains two files, `about.md` and `data.ts`. `about.md` contains the markdown part of the page, use cases, resources and minimal code block to infer a model that belongs to the task. `data.ts` contains redirections to canonical models and datasets, metrics, the schema of the task and the information the inference widget needs.
12
+ You can open a pull request to contribute a new documentation about a new task. Under `src/tasks` we have a folder for every task that contains two files, `about.md` and `data.ts`. `about.md` contains the markdown part of the page, use cases, resources and minimal code block to infer a model that belongs to the task. `data.ts` contains redirections to canonical models and datasets, metrics, the schema of the task and the information the inference widget needs.
13
13
 
14
14
  ![Anatomy of a Task Page](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/anatomy.png)
15
15
 
@@ -17,4 +17,18 @@ We have a [`dataset`](https://huggingface.co/datasets/huggingfacejs/tasks) that
17
17
 
18
18
  ![Libraries of a Task](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/libraries.png)
19
19
 
20
- This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need.
20
+ This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need.
21
+
22
+ ## Other data
23
+
24
+ This package contains the definition files (written in Typescript) for the huggingface.co hub's:
25
+
26
+ - **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run)
27
+ - **default widget inputs** (when they aren't provided in the model card)
28
+ - definitions and UI elements for **third party libraries**.
29
+
30
+ Please add to any of those definitions by opening a PR. Thanks 🔥
31
+
32
+ ⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub.
33
+
34
+ ## Feedback (feature requests, bugs, etc.) is super welcome 💙💚💛💜♥️🧡
package/dist/index.d.ts CHANGED
@@ -276,9 +276,18 @@ declare const PIPELINE_DATA: {
276
276
  };
277
277
  "image-to-image": {
278
278
  name: string;
279
+ subtasks: {
280
+ type: string;
281
+ name: string;
282
+ }[];
279
283
  modality: "cv";
280
284
  color: "indigo";
281
285
  };
286
+ "image-to-video": {
287
+ name: string;
288
+ modality: "multimodal";
289
+ color: "indigo";
290
+ };
282
291
  "unconditional-image-generation": {
283
292
  name: string;
284
293
  modality: "cv";
@@ -401,6 +410,26 @@ declare const PIPELINE_DATA: {
401
410
  modality: "multimodal";
402
411
  color: "green";
403
412
  };
413
+ "mask-generation": {
414
+ name: string;
415
+ modality: "cv";
416
+ color: "indigo";
417
+ };
418
+ "zero-shot-object-detection": {
419
+ name: string;
420
+ modality: "cv";
421
+ color: "yellow";
422
+ };
423
+ "text-to-3d": {
424
+ name: string;
425
+ modality: "multimodal";
426
+ color: "yellow";
427
+ };
428
+ "image-to-3d": {
429
+ name: string;
430
+ modality: "multimodal";
431
+ color: "green";
432
+ };
404
433
  other: {
405
434
  name: string;
406
435
  modality: "other";
@@ -410,10 +439,251 @@ declare const PIPELINE_DATA: {
410
439
  };
411
440
  };
412
441
  type PipelineType = keyof typeof PIPELINE_DATA;
413
- declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml")[];
442
+ declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d")[];
414
443
  declare const SUBTASK_TYPES: string[];
415
- declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml">;
444
+ declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d">;
445
+
446
+ /**
447
+ * Mapping from library name (excluding Transformers) to its supported tasks.
448
+ * Inference API should be disabled for all other (library, task) pairs beyond this mapping.
449
+ * As an exception, we assume Transformers supports all inference tasks.
450
+ * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge.
451
+ * Ref: https://github.com/huggingface/api-inference-community/pull/158
452
+ */
453
+ declare const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLibraryKey, PipelineType[]>>;
454
+
455
+ type TableData = Record<string, (string | number)[]>;
456
+ type WidgetExampleOutputLabels = Array<{
457
+ label: string;
458
+ score: number;
459
+ }>;
460
+ interface WidgetExampleOutputAnswerScore {
461
+ answer: string;
462
+ score: number;
463
+ }
464
+ interface WidgetExampleOutputText {
465
+ text: string;
466
+ }
467
+ interface WidgetExampleOutputUrl {
468
+ url: string;
469
+ }
470
+ type WidgetExampleOutput = WidgetExampleOutputLabels | WidgetExampleOutputAnswerScore | WidgetExampleOutputText | WidgetExampleOutputUrl;
471
+ interface WidgetExampleBase<TOutput> {
472
+ example_title?: string;
473
+ group?: string;
474
+ /**
475
+ * Potential overrides to API parameters for this specific example
476
+ * (takes precedences over the model card metadata's inference.parameters)
477
+ */
478
+ parameters?: {
479
+ aggregation_strategy?: string;
480
+ top_k?: number;
481
+ top_p?: number;
482
+ temperature?: number;
483
+ max_new_tokens?: number;
484
+ do_sample?: boolean;
485
+ negative_prompt?: string;
486
+ guidance_scale?: number;
487
+ num_inference_steps?: number;
488
+ };
489
+ /**
490
+ * Optional output
491
+ */
492
+ output?: TOutput;
493
+ }
494
+ interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
495
+ text: string;
496
+ }
497
+ interface WidgetExampleTextAndContextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
498
+ context: string;
499
+ }
500
+ interface WidgetExampleTextAndTableInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
501
+ table: TableData;
502
+ }
503
+ interface WidgetExampleAssetInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
504
+ src: string;
505
+ }
506
+ interface WidgetExampleAssetAndPromptInput<TOutput = WidgetExampleOutput> extends WidgetExampleAssetInput<TOutput> {
507
+ prompt: string;
508
+ }
509
+ type WidgetExampleAssetAndTextInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> & WidgetExampleTextInput<TOutput>;
510
+ type WidgetExampleAssetAndZeroShotInput<TOutput = WidgetExampleOutput> = WidgetExampleAssetInput<TOutput> & WidgetExampleZeroShotTextInput<TOutput>;
511
+ interface WidgetExampleStructuredDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
512
+ structured_data: TableData;
513
+ }
514
+ interface WidgetExampleTableDataInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
515
+ table: TableData;
516
+ }
517
+ interface WidgetExampleZeroShotTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleTextInput<TOutput> {
518
+ text: string;
519
+ candidate_labels: string;
520
+ multi_class: boolean;
521
+ }
522
+ interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
523
+ source_sentence: string;
524
+ sentences: string[];
525
+ }
526
+ type WidgetExample<TOutput = WidgetExampleOutput> = WidgetExampleTextInput<TOutput> | WidgetExampleTextAndContextInput<TOutput> | WidgetExampleTextAndTableInput<TOutput> | WidgetExampleAssetInput<TOutput> | WidgetExampleAssetAndPromptInput<TOutput> | WidgetExampleAssetAndTextInput<TOutput> | WidgetExampleAssetAndZeroShotInput<TOutput> | WidgetExampleStructuredDataInput<TOutput> | WidgetExampleTableDataInput<TOutput> | WidgetExampleZeroShotTextInput<TOutput> | WidgetExampleSentenceSimilarityInput<TOutput>;
527
+ type KeysOfUnion<T> = T extends unknown ? keyof T : never;
528
+ type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
529
+ declare enum InferenceDisplayability {
530
+ /**
531
+ * Yes
532
+ */
533
+ Yes = "Yes",
534
+ /**
535
+ * And then, all the possible reasons why it's no:
536
+ */
537
+ ExplicitOptOut = "ExplicitOptOut",
538
+ CustomCode = "CustomCode",
539
+ LibraryNotDetected = "LibraryNotDetected",
540
+ PipelineNotDetected = "PipelineNotDetected",
541
+ PipelineLibraryPairNotSupported = "PipelineLibraryPairNotSupported"
542
+ }
543
+ /**
544
+ * Public interface for model metadata
545
+ */
546
+ interface ModelData {
547
+ /**
548
+ * id of model (e.g. 'user/repo_name')
549
+ */
550
+ id: string;
551
+ /**
552
+ * Kept for backward compatibility
553
+ */
554
+ modelId?: string;
555
+ /**
556
+ * Whether or not to enable inference widget for this model
557
+ */
558
+ inference: InferenceDisplayability;
559
+ /**
560
+ * is this model private?
561
+ */
562
+ private?: boolean;
563
+ /**
564
+ * this dictionary has useful information about the model configuration
565
+ */
566
+ config?: Record<string, unknown> & {
567
+ adapter_transformers?: {
568
+ model_class?: string;
569
+ model_name?: string;
570
+ };
571
+ architectures?: string[];
572
+ sklearn?: {
573
+ filename?: string;
574
+ model_format?: string;
575
+ };
576
+ speechbrain?: {
577
+ interface?: string;
578
+ };
579
+ peft?: {
580
+ base_model_name?: string;
581
+ task_type?: string;
582
+ };
583
+ };
584
+ /**
585
+ * all the model tags
586
+ */
587
+ tags?: string[];
588
+ /**
589
+ * transformers-specific info to display in the code sample.
590
+ */
591
+ transformersInfo?: TransformersInfo;
592
+ /**
593
+ * Pipeline type
594
+ */
595
+ pipeline_tag?: PipelineType | undefined;
596
+ /**
597
+ * for relevant models, get mask token
598
+ */
599
+ mask_token?: string | undefined;
600
+ /**
601
+ * Example data that will be fed into the widget.
602
+ *
603
+ * can be set in the model card metadata (under `widget`),
604
+ * or by default in `DefaultWidget.ts`
605
+ */
606
+ widgetData?: WidgetExample[] | undefined;
607
+ /**
608
+ * Parameters that will be used by the widget when calling Inference API
609
+ * https://huggingface.co/docs/api-inference/detailed_parameters
610
+ *
611
+ * can be set in the model card metadata (under `inference/parameters`)
612
+ * Example:
613
+ * inference:
614
+ * parameters:
615
+ * key: val
616
+ */
617
+ cardData?: {
618
+ inference?: boolean | {
619
+ parameters?: Record<string, unknown>;
620
+ };
621
+ base_model?: string;
622
+ };
623
+ /**
624
+ * Library name
625
+ * Example: transformers, SpeechBrain, Stanza, etc.
626
+ */
627
+ library_name?: string;
628
+ }
629
+ /**
630
+ * transformers-specific info to display in the code sample.
631
+ */
632
+ interface TransformersInfo {
633
+ /**
634
+ * e.g. AutoModelForSequenceClassification
635
+ */
636
+ auto_model: string;
637
+ /**
638
+ * if set in config.json's auto_map
639
+ */
640
+ custom_class?: string;
641
+ /**
642
+ * e.g. text-classification
643
+ */
644
+ pipeline_tag?: PipelineType;
645
+ /**
646
+ * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor"
647
+ */
648
+ processor?: string;
649
+ }
416
650
 
651
+ /**
652
+ * Elements configurable by a model library.
653
+ */
654
+ interface LibraryUiElement {
655
+ /**
656
+ * Name displayed on the main
657
+ * call-to-action button on the model page.
658
+ */
659
+ btnLabel: string;
660
+ /**
661
+ * Repo name
662
+ */
663
+ repoName: string;
664
+ /**
665
+ * URL to library's repo
666
+ */
667
+ repoUrl: string;
668
+ /**
669
+ * URL to library's docs
670
+ */
671
+ docsUrl?: string;
672
+ /**
673
+ * Code snippet displayed on model page
674
+ */
675
+ snippets: (model: ModelData) => string[];
676
+ }
677
+ declare const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>>;
678
+
679
+ type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>;
680
+ declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>;
681
+
682
+ /**
683
+ * Model libraries compatible with each ML task
684
+ */
685
+ declare const TASKS_MODEL_LIBRARIES: Record<PipelineType, ModelLibraryKey[]>;
686
+ declare const TASKS_DATA: Record<PipelineType, TaskData | undefined>;
417
687
  interface ExampleRepo {
418
688
  description: string;
419
689
  id: string;
@@ -464,12 +734,118 @@ interface TaskData {
464
734
  widgetModels: string[];
465
735
  youtubeId?: string;
466
736
  }
467
-
468
- declare const TASKS_DATA: Record<PipelineType, TaskData | undefined>;
737
+ type TaskDataCustom = Omit<TaskData, "id" | "label" | "libraries">;
469
738
 
470
739
  declare const TAG_NFAA_CONTENT = "not-for-all-audiences";
471
740
  declare const OTHER_TAGS_SUGGESTIONS: string[];
472
741
  declare const TAG_TEXT_GENERATION_INFERENCE = "text-generation-inference";
473
742
  declare const TAG_CUSTOM_CODE = "custom_code";
474
743
 
475
- export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ExampleRepo, MODALITIES, MODALITY_LABELS, Modality, ModelLibrary, ModelLibraryKey, OTHER_TAGS_SUGGESTIONS, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TAG_CUSTOM_CODE, TAG_NFAA_CONTENT, TAG_TEXT_GENERATION_INFERENCE, TASKS_DATA, TaskData, TaskDemo, TaskDemoEntry };
744
+ declare function getModelInputSnippet(model: ModelData, noWrap?: boolean, noQuotes?: boolean): string;
745
+
746
+ declare const inputs_getModelInputSnippet: typeof getModelInputSnippet;
747
+ declare namespace inputs {
748
+ export {
749
+ inputs_getModelInputSnippet as getModelInputSnippet,
750
+ };
751
+ }
752
+
753
+ declare const snippetBasic$2: (model: ModelData, accessToken: string) => string;
754
+ declare const snippetZeroShotClassification$2: (model: ModelData, accessToken: string) => string;
755
+ declare const snippetFile$2: (model: ModelData, accessToken: string) => string;
756
+ declare const curlSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>>;
757
+ declare function getCurlInferenceSnippet(model: ModelData, accessToken: string): string;
758
+ declare function hasCurlInferenceSnippet(model: ModelData): boolean;
759
+
760
+ declare const curl_curlSnippets: typeof curlSnippets;
761
+ declare const curl_getCurlInferenceSnippet: typeof getCurlInferenceSnippet;
762
+ declare const curl_hasCurlInferenceSnippet: typeof hasCurlInferenceSnippet;
763
+ declare namespace curl {
764
+ export {
765
+ curl_curlSnippets as curlSnippets,
766
+ curl_getCurlInferenceSnippet as getCurlInferenceSnippet,
767
+ curl_hasCurlInferenceSnippet as hasCurlInferenceSnippet,
768
+ snippetBasic$2 as snippetBasic,
769
+ snippetFile$2 as snippetFile,
770
+ snippetZeroShotClassification$2 as snippetZeroShotClassification,
771
+ };
772
+ }
773
+
774
+ declare const snippetZeroShotClassification$1: (model: ModelData) => string;
775
+ declare const snippetZeroShotImageClassification: (model: ModelData) => string;
776
+ declare const snippetBasic$1: (model: ModelData) => string;
777
+ declare const snippetFile$1: (model: ModelData) => string;
778
+ declare const snippetTextToImage$1: (model: ModelData) => string;
779
+ declare const snippetTabular: (model: ModelData) => string;
780
+ declare const snippetTextToAudio$1: (model: ModelData) => string;
781
+ declare const snippetDocumentQuestionAnswering: (model: ModelData) => string;
782
+ declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelData) => string>>;
783
+ declare function getPythonInferenceSnippet(model: ModelData, accessToken: string): string;
784
+ declare function hasPythonInferenceSnippet(model: ModelData): boolean;
785
+
786
+ declare const python_getPythonInferenceSnippet: typeof getPythonInferenceSnippet;
787
+ declare const python_hasPythonInferenceSnippet: typeof hasPythonInferenceSnippet;
788
+ declare const python_pythonSnippets: typeof pythonSnippets;
789
+ declare const python_snippetDocumentQuestionAnswering: typeof snippetDocumentQuestionAnswering;
790
+ declare const python_snippetTabular: typeof snippetTabular;
791
+ declare const python_snippetZeroShotImageClassification: typeof snippetZeroShotImageClassification;
792
+ declare namespace python {
793
+ export {
794
+ python_getPythonInferenceSnippet as getPythonInferenceSnippet,
795
+ python_hasPythonInferenceSnippet as hasPythonInferenceSnippet,
796
+ python_pythonSnippets as pythonSnippets,
797
+ snippetBasic$1 as snippetBasic,
798
+ python_snippetDocumentQuestionAnswering as snippetDocumentQuestionAnswering,
799
+ snippetFile$1 as snippetFile,
800
+ python_snippetTabular as snippetTabular,
801
+ snippetTextToAudio$1 as snippetTextToAudio,
802
+ snippetTextToImage$1 as snippetTextToImage,
803
+ snippetZeroShotClassification$1 as snippetZeroShotClassification,
804
+ python_snippetZeroShotImageClassification as snippetZeroShotImageClassification,
805
+ };
806
+ }
807
+
808
+ declare const snippetBasic: (model: ModelData, accessToken: string) => string;
809
+ declare const snippetZeroShotClassification: (model: ModelData, accessToken: string) => string;
810
+ declare const snippetTextToImage: (model: ModelData, accessToken: string) => string;
811
+ declare const snippetTextToAudio: (model: ModelData, accessToken: string) => string;
812
+ declare const snippetFile: (model: ModelData, accessToken: string) => string;
813
+ declare const jsSnippets: Partial<Record<PipelineType, (model: ModelData, accessToken: string) => string>>;
814
+ declare function getJsInferenceSnippet(model: ModelData, accessToken: string): string;
815
+ declare function hasJsInferenceSnippet(model: ModelData): boolean;
816
+
817
+ declare const js_getJsInferenceSnippet: typeof getJsInferenceSnippet;
818
+ declare const js_hasJsInferenceSnippet: typeof hasJsInferenceSnippet;
819
+ declare const js_jsSnippets: typeof jsSnippets;
820
+ declare const js_snippetBasic: typeof snippetBasic;
821
+ declare const js_snippetFile: typeof snippetFile;
822
+ declare const js_snippetTextToAudio: typeof snippetTextToAudio;
823
+ declare const js_snippetTextToImage: typeof snippetTextToImage;
824
+ declare const js_snippetZeroShotClassification: typeof snippetZeroShotClassification;
825
+ declare namespace js {
826
+ export {
827
+ js_getJsInferenceSnippet as getJsInferenceSnippet,
828
+ js_hasJsInferenceSnippet as hasJsInferenceSnippet,
829
+ js_jsSnippets as jsSnippets,
830
+ js_snippetBasic as snippetBasic,
831
+ js_snippetFile as snippetFile,
832
+ js_snippetTextToAudio as snippetTextToAudio,
833
+ js_snippetTextToImage as snippetTextToImage,
834
+ js_snippetZeroShotClassification as snippetZeroShotClassification,
835
+ };
836
+ }
837
+
838
+ declare const index_curl: typeof curl;
839
+ declare const index_inputs: typeof inputs;
840
+ declare const index_js: typeof js;
841
+ declare const index_python: typeof python;
842
+ declare namespace index {
843
+ export {
844
+ index_curl as curl,
845
+ index_inputs as inputs,
846
+ index_js as js,
847
+ index_python as python,
848
+ };
849
+ }
850
+
851
+ export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibrary, ModelLibraryKey, OTHER_TAGS_SUGGESTIONS, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TAG_CUSTOM_CODE, TAG_NFAA_CONTENT, TAG_TEXT_GENERATION_INFERENCE, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, index as snippets };