@huggingface/tasks 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.ts CHANGED
@@ -104,18 +104,9 @@ declare const PIPELINE_DATA: {
104
104
  modality: "nlp";
105
105
  color: "indigo";
106
106
  };
107
- conversational: {
108
- name: string;
109
- subtasks: {
110
- type: string;
111
- name: string;
112
- }[];
113
- modality: "nlp";
114
- color: "green";
115
- };
116
107
  "feature-extraction": {
117
108
  name: string;
118
- modality: "multimodal";
109
+ modality: "nlp";
119
110
  color: "red";
120
111
  };
121
112
  "text-generation": {
@@ -218,7 +209,7 @@ declare const PIPELINE_DATA: {
218
209
  };
219
210
  "text-to-image": {
220
211
  name: string;
221
- modality: "multimodal";
212
+ modality: "cv";
222
213
  color: "yellow";
223
214
  };
224
215
  "image-to-text": {
@@ -227,7 +218,7 @@ declare const PIPELINE_DATA: {
227
218
  type: string;
228
219
  name: string;
229
220
  }[];
230
- modality: "multimodal";
221
+ modality: "cv";
231
222
  color: "red";
232
223
  };
233
224
  "image-to-image": {
@@ -241,7 +232,7 @@ declare const PIPELINE_DATA: {
241
232
  };
242
233
  "image-to-video": {
243
234
  name: string;
244
- modality: "multimodal";
235
+ modality: "cv";
245
236
  color: "indigo";
246
237
  };
247
238
  "unconditional-image-generation": {
@@ -334,9 +325,15 @@ declare const PIPELINE_DATA: {
334
325
  };
335
326
  "text-to-video": {
336
327
  name: string;
337
- modality: "multimodal";
328
+ modality: "cv";
338
329
  color: "green";
339
330
  };
331
+ "image-text-to-text": {
332
+ name: string;
333
+ modality: "multimodal";
334
+ color: "red";
335
+ hideInDatasets: true;
336
+ };
340
337
  "visual-question-answering": {
341
338
  name: string;
342
339
  subtasks: {
@@ -363,7 +360,7 @@ declare const PIPELINE_DATA: {
363
360
  };
364
361
  "graph-ml": {
365
362
  name: string;
366
- modality: "multimodal";
363
+ modality: "other";
367
364
  color: "green";
368
365
  };
369
366
  "mask-generation": {
@@ -378,14 +375,19 @@ declare const PIPELINE_DATA: {
378
375
  };
379
376
  "text-to-3d": {
380
377
  name: string;
381
- modality: "multimodal";
378
+ modality: "cv";
382
379
  color: "yellow";
383
380
  };
384
381
  "image-to-3d": {
385
382
  name: string;
386
- modality: "multimodal";
383
+ modality: "cv";
387
384
  color: "green";
388
385
  };
386
+ "image-feature-extraction": {
387
+ name: string;
388
+ modality: "cv";
389
+ color: "indigo";
390
+ };
389
391
  other: {
390
392
  name: string;
391
393
  modality: "other";
@@ -395,9 +397,10 @@ declare const PIPELINE_DATA: {
395
397
  };
396
398
  };
397
399
  type PipelineType = keyof typeof PIPELINE_DATA;
398
- declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d")[];
400
+ type WidgetType = PipelineType | "conversational";
401
+ declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction")[];
399
402
  declare const SUBTASK_TYPES: string[];
400
- declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "conversational" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d">;
403
+ declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction">;
401
404
 
402
405
  /**
403
406
  * See default-widget-inputs.ts for the default widget inputs, this files only contains the types
@@ -477,6 +480,21 @@ type WidgetExample<TOutput = WidgetExampleOutput> = WidgetExampleTextInput<TOutp
477
480
  type KeysOfUnion<T> = T extends unknown ? keyof T : never;
478
481
  type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
479
482
 
483
+ declare const SPECIAL_TOKENS_ATTRIBUTES: readonly ["bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token"];
484
+ /**
485
+ * Public interface for a tokenizer's special tokens mapping
486
+ */
487
+ type SpecialTokensMap = {
488
+ [key in (typeof SPECIAL_TOKENS_ATTRIBUTES)[number]]?: string;
489
+ };
490
+ /**
491
+ * Public interface for tokenizer config
492
+ */
493
+ interface TokenizerConfig extends SpecialTokensMap {
494
+ use_default_system_prompt?: boolean;
495
+ chat_template?: string;
496
+ }
497
+
480
498
  declare enum InferenceDisplayability {
481
499
  /**
482
500
  * Yes
@@ -531,6 +549,7 @@ interface ModelData {
531
549
  base_model_name?: string;
532
550
  task_type?: string;
533
551
  };
552
+ tokenizer?: TokenizerConfig;
534
553
  };
535
554
  /**
536
555
  * all the model tags
@@ -1044,7 +1063,7 @@ declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("sklearn" | "adapter-transformers
1044
1063
  */
1045
1064
  declare const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial<Record<ModelLibraryKey, PipelineType[]>>;
1046
1065
 
1047
- type PerLanguageMapping = Map<PipelineType, string[] | WidgetExample[]>;
1066
+ type PerLanguageMapping = Map<WidgetType, string[] | WidgetExample[]>;
1048
1067
  declare const MAPPING_DEFAULT_WIDGET: Map<string, PerLanguageMapping>;
1049
1068
 
1050
1069
  /**
@@ -1212,4 +1231,4 @@ declare namespace index {
1212
1231
  };
1213
1232
  }
1214
1233
 
1215
- export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SUBTASK_TYPES, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, index as snippets };
1234
+ export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, ExampleRepo, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, SPECIAL_TOKENS_ATTRIBUTES, SUBTASK_TYPES, SpecialTokensMap, TASKS_DATA, TASKS_MODEL_LIBRARIES, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, TokenizerConfig, TransformersInfo, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetType, index as snippets };