@huggingface/tasks 0.3.4 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs CHANGED
@@ -4222,7 +4222,7 @@ model = joblib.load(
4222
4222
  };
4223
4223
  var sklearn = (model) => {
4224
4224
  if (model.tags?.includes("skops")) {
4225
- const skopsmodelFile = model.config?.sklearn?.filename;
4225
+ const skopsmodelFile = model.config?.sklearn?.model?.file;
4226
4226
  const skopssaveFormat = model.config?.sklearn?.model_format;
4227
4227
  if (!skopsmodelFile) {
4228
4228
  return [`# \u26A0\uFE0F Model filename not specified in config.json`];
@@ -4292,7 +4292,7 @@ var speechBrainMethod = (speechbrainInterface) => {
4292
4292
  }
4293
4293
  };
4294
4294
  var speechbrain = (model) => {
4295
- const speechbrainInterface = model.config?.speechbrain?.interface;
4295
+ const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
4296
4296
  if (speechbrainInterface === void 0) {
4297
4297
  return [`# interface not specified in config.json`];
4298
4298
  }
@@ -4370,7 +4370,7 @@ var peftTask = (peftTaskType) => {
4370
4370
  }
4371
4371
  };
4372
4372
  var peft = (model) => {
4373
- const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
4373
+ const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
4374
4374
  const pefttask = peftTask(peftTaskType);
4375
4375
  if (!pefttask) {
4376
4376
  return [`Task type is invalid.`];
package/dist/index.d.ts CHANGED
@@ -444,6 +444,13 @@ interface WidgetExampleBase<TOutput> {
444
444
  */
445
445
  output?: TOutput;
446
446
  }
447
+ interface ChatMessage {
448
+ role: "user" | "assistant" | "system";
449
+ content: string;
450
+ }
451
+ interface WidgetExampleChatInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
452
+ messages: ChatMessage[];
453
+ }
447
454
  interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
448
455
  text: string;
449
456
  }
@@ -476,7 +483,7 @@ interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOutput> ex
476
483
  source_sentence: string;
477
484
  sentences: string[];
478
485
  }
479
- type WidgetExample<TOutput = WidgetExampleOutput> = WidgetExampleTextInput<TOutput> | WidgetExampleTextAndContextInput<TOutput> | WidgetExampleTextAndTableInput<TOutput> | WidgetExampleAssetInput<TOutput> | WidgetExampleAssetAndPromptInput<TOutput> | WidgetExampleAssetAndTextInput<TOutput> | WidgetExampleAssetAndZeroShotInput<TOutput> | WidgetExampleStructuredDataInput<TOutput> | WidgetExampleTableDataInput<TOutput> | WidgetExampleZeroShotTextInput<TOutput> | WidgetExampleSentenceSimilarityInput<TOutput>;
486
+ type WidgetExample<TOutput = WidgetExampleOutput> = WidgetExampleChatInput<TOutput> | WidgetExampleTextInput<TOutput> | WidgetExampleTextAndContextInput<TOutput> | WidgetExampleTextAndTableInput<TOutput> | WidgetExampleAssetInput<TOutput> | WidgetExampleAssetAndPromptInput<TOutput> | WidgetExampleAssetAndTextInput<TOutput> | WidgetExampleAssetAndZeroShotInput<TOutput> | WidgetExampleStructuredDataInput<TOutput> | WidgetExampleTableDataInput<TOutput> | WidgetExampleZeroShotTextInput<TOutput> | WidgetExampleSentenceSimilarityInput<TOutput>;
480
487
  type KeysOfUnion<T> = T extends unknown ? keyof T : never;
481
488
  type WidgetExampleAttribute = KeysOfUnion<WidgetExample>;
482
489
 
@@ -532,24 +539,46 @@ interface ModelData {
532
539
  /**
533
540
  * this dictionary has useful information about the model configuration
534
541
  */
535
- config?: Record<string, unknown> & {
542
+ config?: {
543
+ architectures?: string[];
544
+ /**
545
+ * Dict of AutoModel or Auto… class name to local import path in the repo
546
+ */
547
+ auto_map?: {
548
+ /**
549
+ * String Property
550
+ */
551
+ [x: string]: string;
552
+ };
553
+ model_type?: string;
554
+ quantization_config?: {
555
+ bits?: number;
556
+ load_in_4bit?: boolean;
557
+ load_in_8bit?: boolean;
558
+ };
559
+ tokenizer_config?: TokenizerConfig;
536
560
  adapter_transformers?: {
537
- model_class?: string;
538
561
  model_name?: string;
562
+ model_class?: string;
563
+ };
564
+ diffusers?: {
565
+ _class_name?: string;
539
566
  };
540
- architectures?: string[];
541
567
  sklearn?: {
542
- filename?: string;
568
+ model?: {
569
+ file?: string;
570
+ };
543
571
  model_format?: string;
544
572
  };
545
573
  speechbrain?: {
546
- interface?: string;
574
+ speechbrain_interface?: string;
575
+ vocoder_interface?: string;
576
+ vocoder_model_id?: string;
547
577
  };
548
578
  peft?: {
549
- base_model_name?: string;
579
+ base_model_name_or_path?: string;
550
580
  task_type?: string;
551
581
  };
552
- tokenizer_config?: TokenizerConfig;
553
582
  };
554
583
  /**
555
584
  * all the model tags
@@ -3219,4 +3248,4 @@ declare namespace index {
3219
3248
  };
3220
3249
  }
3221
3250
 
3222
- export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, AudioClassificationInput, AudioClassificationOutput, AudioClassificationOutputElement, AudioClassificationParameters, AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput, AutomaticSpeechRecognitionOutputChunk, AutomaticSpeechRecognitionParameters, BoundingBox, ClassificationOutputTransform$1 as ClassificationOutputTransform, DepthEstimationInput, DepthEstimationOutput, DocumentQuestionAnsweringInput, DocumentQuestionAnsweringInputData, DocumentQuestionAnsweringOutput, DocumentQuestionAnsweringOutputElement, DocumentQuestionAnsweringParameters, EarlyStoppingUnion$2 as EarlyStoppingUnion, ExampleRepo, FeatureExtractionInput, FeatureExtractionOutput, FillMaskInput, FillMaskOutput, FillMaskOutputElement, FillMaskParameters, FinishReason, GenerationParameters$2 as GenerationParameters, ImageClassificationInput, ImageClassificationOutput, ImageClassificationOutputElement, ImageClassificationParameters, ImageSegmentationInput, ImageSegmentationOutput, ImageSegmentationOutputElement, ImageSegmentationParameters, ImageSegmentationSubtask, ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, ObjectDetectionInput, ObjectDetectionOutput, ObjectDetectionOutputElement, ObjectDetectionParameters, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, PrefillToken, QuestionAnsweringInput, QuestionAnsweringInputData, QuestionAnsweringOutput, QuestionAnsweringOutputElement, QuestionAnsweringParameters, SPECIAL_TOKENS_ATTRIBUTES, SUBTASK_TYPES, SentenceSimilarityInput, SentenceSimilarityInputData, SentenceSimilarityOutput, SpecialTokensMap, SummarizationInput, SummarizationOutput, TASKS_DATA, TASKS_MODEL_LIBRARIES, TableQuestionAnsweringInput, TableQuestionAnsweringInputData, TableQuestionAnsweringOutput, TableQuestionAnsweringOutputElement, TargetSize$1 as TargetSize, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, Text2TextGenerationParameters, Text2TextGenerationTruncationStrategy, TextClassificationInput, TextClassificationOutput, TextClassificationOutputElement, TextClassificationParameters, TextGenerationInput, TextGenerationOutput, TextGenerationOutputDetails, TextGenerationParameters, TextGenerationSequenceDetails, TextToAudioParameters, TextToImageInput, TextToImageOutput, TextToImageParameters, TextToSpeechInput, TextToSpeechOutput, Token, TokenClassificationAggregationStrategy, TokenClassificationInput, TokenClassificationOutput, TokenClassificationOutputElement, TokenClassificationParameters, TokenizerConfig, TransformersInfo, TranslationInput, TranslationOutput, VideoClassificationInput, VideoClassificationOutput, VideoClassificationOutputElement, VideoClassificationParameters, VisualQuestionAnsweringInput, VisualQuestionAnsweringInputData, VisualQuestionAnsweringOutput, VisualQuestionAnsweringOutputElement, VisualQuestionAnsweringParameters, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetType, WordBox, ZeroShotClassificationInput, ZeroShotClassificationInputData, ZeroShotClassificationOutput, ZeroShotClassificationOutputElement, ZeroShotClassificationParameters, ZeroShotImageClassificationInput, ZeroShotImageClassificationInputData, ZeroShotImageClassificationOutput, ZeroShotImageClassificationOutputElement, ZeroShotImageClassificationParameters, ZeroShotObjectDetectionInput, ZeroShotObjectDetectionInputData, ZeroShotObjectDetectionOutput, ZeroShotObjectDetectionOutputElement, index as snippets };
3251
+ export { ALL_DISPLAY_MODEL_LIBRARY_KEYS, ALL_MODEL_LIBRARY_KEYS, AudioClassificationInput, AudioClassificationOutput, AudioClassificationOutputElement, AudioClassificationParameters, AutomaticSpeechRecognitionInput, AutomaticSpeechRecognitionOutput, AutomaticSpeechRecognitionOutputChunk, AutomaticSpeechRecognitionParameters, BoundingBox, ChatMessage, ClassificationOutputTransform$1 as ClassificationOutputTransform, DepthEstimationInput, DepthEstimationOutput, DocumentQuestionAnsweringInput, DocumentQuestionAnsweringInputData, DocumentQuestionAnsweringOutput, DocumentQuestionAnsweringOutputElement, DocumentQuestionAnsweringParameters, EarlyStoppingUnion$2 as EarlyStoppingUnion, ExampleRepo, FeatureExtractionInput, FeatureExtractionOutput, FillMaskInput, FillMaskOutput, FillMaskOutputElement, FillMaskParameters, FinishReason, GenerationParameters$2 as GenerationParameters, ImageClassificationInput, ImageClassificationOutput, ImageClassificationOutputElement, ImageClassificationParameters, ImageSegmentationInput, ImageSegmentationOutput, ImageSegmentationOutputElement, ImageSegmentationParameters, ImageSegmentationSubtask, ImageToImageInput, ImageToImageOutput, ImageToImageParameters, ImageToTextInput, ImageToTextOutput, ImageToTextParameters, InferenceDisplayability, LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, LibraryUiElement, MAPPING_DEFAULT_WIDGET, MODALITIES, MODALITY_LABELS, MODEL_LIBRARIES_UI_ELEMENTS, Modality, ModelData, ModelLibraryKey, ObjectDetectionInput, ObjectDetectionOutput, ObjectDetectionOutputElement, ObjectDetectionParameters, PIPELINE_DATA, PIPELINE_TYPES, PIPELINE_TYPES_SET, PipelineData, PipelineType, PrefillToken, QuestionAnsweringInput, QuestionAnsweringInputData, QuestionAnsweringOutput, QuestionAnsweringOutputElement, QuestionAnsweringParameters, SPECIAL_TOKENS_ATTRIBUTES, SUBTASK_TYPES, SentenceSimilarityInput, SentenceSimilarityInputData, SentenceSimilarityOutput, SpecialTokensMap, SummarizationInput, SummarizationOutput, TASKS_DATA, TASKS_MODEL_LIBRARIES, TableQuestionAnsweringInput, TableQuestionAnsweringInputData, TableQuestionAnsweringOutput, TableQuestionAnsweringOutputElement, TargetSize$1 as TargetSize, TaskData, TaskDataCustom, TaskDemo, TaskDemoEntry, Text2TextGenerationParameters, Text2TextGenerationTruncationStrategy, TextClassificationInput, TextClassificationOutput, TextClassificationOutputElement, TextClassificationParameters, TextGenerationInput, TextGenerationOutput, TextGenerationOutputDetails, TextGenerationParameters, TextGenerationSequenceDetails, TextToAudioParameters, TextToImageInput, TextToImageOutput, TextToImageParameters, TextToSpeechInput, TextToSpeechOutput, Token, TokenClassificationAggregationStrategy, TokenClassificationInput, TokenClassificationOutput, TokenClassificationOutputElement, TokenClassificationParameters, TokenizerConfig, TransformersInfo, TranslationInput, TranslationOutput, VideoClassificationInput, VideoClassificationOutput, VideoClassificationOutputElement, VideoClassificationParameters, VisualQuestionAnsweringInput, VisualQuestionAnsweringInputData, VisualQuestionAnsweringOutput, VisualQuestionAnsweringOutputElement, VisualQuestionAnsweringParameters, WidgetExample, WidgetExampleAssetAndPromptInput, WidgetExampleAssetAndTextInput, WidgetExampleAssetAndZeroShotInput, WidgetExampleAssetInput, WidgetExampleAttribute, WidgetExampleChatInput, WidgetExampleOutput, WidgetExampleOutputAnswerScore, WidgetExampleOutputLabels, WidgetExampleOutputText, WidgetExampleOutputUrl, WidgetExampleSentenceSimilarityInput, WidgetExampleStructuredDataInput, WidgetExampleTableDataInput, WidgetExampleTextAndContextInput, WidgetExampleTextAndTableInput, WidgetExampleTextInput, WidgetExampleZeroShotTextInput, WidgetType, WordBox, ZeroShotClassificationInput, ZeroShotClassificationInputData, ZeroShotClassificationOutput, ZeroShotClassificationOutputElement, ZeroShotClassificationParameters, ZeroShotImageClassificationInput, ZeroShotImageClassificationInputData, ZeroShotImageClassificationOutput, ZeroShotImageClassificationOutputElement, ZeroShotImageClassificationParameters, ZeroShotObjectDetectionInput, ZeroShotObjectDetectionInputData, ZeroShotObjectDetectionOutput, ZeroShotObjectDetectionOutputElement, index as snippets };
package/dist/index.js CHANGED
@@ -4187,7 +4187,7 @@ model = joblib.load(
4187
4187
  };
4188
4188
  var sklearn = (model) => {
4189
4189
  if (model.tags?.includes("skops")) {
4190
- const skopsmodelFile = model.config?.sklearn?.filename;
4190
+ const skopsmodelFile = model.config?.sklearn?.model?.file;
4191
4191
  const skopssaveFormat = model.config?.sklearn?.model_format;
4192
4192
  if (!skopsmodelFile) {
4193
4193
  return [`# \u26A0\uFE0F Model filename not specified in config.json`];
@@ -4257,7 +4257,7 @@ var speechBrainMethod = (speechbrainInterface) => {
4257
4257
  }
4258
4258
  };
4259
4259
  var speechbrain = (model) => {
4260
- const speechbrainInterface = model.config?.speechbrain?.interface;
4260
+ const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
4261
4261
  if (speechbrainInterface === void 0) {
4262
4262
  return [`# interface not specified in config.json`];
4263
4263
  }
@@ -4335,7 +4335,7 @@ var peftTask = (peftTaskType) => {
4335
4335
  }
4336
4336
  };
4337
4337
  var peft = (model) => {
4338
- const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
4338
+ const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
4339
4339
  const pefttask = peftTask(peftTaskType);
4340
4340
  if (!pefttask) {
4341
4341
  return [`Task type is invalid.`];
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@huggingface/tasks",
3
3
  "packageManager": "pnpm@8.10.5",
4
- "version": "0.3.4",
4
+ "version": "0.4.0",
5
5
  "description": "List of ML tasks for huggingface.co/tasks",
6
6
  "repository": "https://github.com/huggingface/huggingface.js.git",
7
7
  "publishConfig": {
package/src/index.ts CHANGED
@@ -19,12 +19,14 @@ export type { LibraryUiElement, ModelLibraryKey } from "./model-libraries";
19
19
  export type { ModelData, TransformersInfo } from "./model-data";
20
20
  export type { SpecialTokensMap, TokenizerConfig } from "./tokenizer-data";
21
21
  export type {
22
+ ChatMessage,
22
23
  WidgetExample,
23
24
  WidgetExampleAttribute,
24
25
  WidgetExampleAssetAndPromptInput,
25
26
  WidgetExampleAssetAndTextInput,
26
27
  WidgetExampleAssetAndZeroShotInput,
27
28
  WidgetExampleAssetInput,
29
+ WidgetExampleChatInput,
28
30
  WidgetExampleSentenceSimilarityInput,
29
31
  WidgetExampleStructuredDataInput,
30
32
  WidgetExampleTableDataInput,
package/src/model-data.ts CHANGED
@@ -40,21 +40,46 @@ export interface ModelData {
40
40
  /**
41
41
  * this dictionary has useful information about the model configuration
42
42
  */
43
- config?: Record<string, unknown> & {
44
- adapter_transformers?: { model_class?: string; model_name?: string };
43
+ config?: {
45
44
  architectures?: string[];
45
+ /**
46
+ * Dict of AutoModel or Auto… class name to local import path in the repo
47
+ */
48
+ auto_map?: {
49
+ /**
50
+ * String Property
51
+ */
52
+ [x: string]: string;
53
+ };
54
+ model_type?: string;
55
+ quantization_config?: {
56
+ bits?: number;
57
+ load_in_4bit?: boolean;
58
+ load_in_8bit?: boolean;
59
+ };
60
+ tokenizer_config?: TokenizerConfig;
61
+ adapter_transformers?: {
62
+ model_name?: string;
63
+ model_class?: string;
64
+ };
65
+ diffusers?: {
66
+ _class_name?: string;
67
+ };
46
68
  sklearn?: {
47
- filename?: string;
69
+ model?: {
70
+ file?: string;
71
+ };
48
72
  model_format?: string;
49
73
  };
50
74
  speechbrain?: {
51
- interface?: string;
75
+ speechbrain_interface?: string;
76
+ vocoder_interface?: string;
77
+ vocoder_model_id?: string;
52
78
  };
53
79
  peft?: {
54
- base_model_name?: string;
80
+ base_model_name_or_path?: string;
55
81
  task_type?: string;
56
82
  };
57
- tokenizer_config?: TokenizerConfig;
58
83
  };
59
84
  /**
60
85
  * all the model tags
@@ -293,7 +293,7 @@ model = joblib.load(
293
293
 
294
294
  export const sklearn = (model: ModelData): string[] => {
295
295
  if (model.tags?.includes("skops")) {
296
- const skopsmodelFile = model.config?.sklearn?.filename;
296
+ const skopsmodelFile = model.config?.sklearn?.model?.file;
297
297
  const skopssaveFormat = model.config?.sklearn?.model_format;
298
298
  if (!skopsmodelFile) {
299
299
  return [`# ⚠️ Model filename not specified in config.json`];
@@ -372,7 +372,7 @@ const speechBrainMethod = (speechbrainInterface: string) => {
372
372
  };
373
373
 
374
374
  export const speechbrain = (model: ModelData): string[] => {
375
- const speechbrainInterface = model.config?.speechbrain?.interface;
375
+ const speechbrainInterface = model.config?.speechbrain?.speechbrain_interface;
376
376
  if (speechbrainInterface === undefined) {
377
377
  return [`# interface not specified in config.json`];
378
378
  }
@@ -465,7 +465,7 @@ const peftTask = (peftTaskType?: string) => {
465
465
  };
466
466
 
467
467
  export const peft = (model: ModelData): string[] => {
468
- const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
468
+ const { base_model_name_or_path: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
469
469
  const pefttask = peftTask(peftTaskType);
470
470
  if (!pefttask) {
471
471
  return [`Task type is invalid.`];
@@ -51,6 +51,15 @@ export interface WidgetExampleBase<TOutput> {
51
51
  output?: TOutput;
52
52
  }
53
53
 
54
+ export interface ChatMessage {
55
+ role: "user" | "assistant" | "system";
56
+ content: string;
57
+ }
58
+
59
+ export interface WidgetExampleChatInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
60
+ messages: ChatMessage[];
61
+ }
62
+
54
63
  export interface WidgetExampleTextInput<TOutput = WidgetExampleOutput> extends WidgetExampleBase<TOutput> {
55
64
  text: string;
56
65
  }
@@ -101,6 +110,7 @@ export interface WidgetExampleSentenceSimilarityInput<TOutput = WidgetExampleOut
101
110
  //#endregion
102
111
 
103
112
  export type WidgetExample<TOutput = WidgetExampleOutput> =
113
+ | WidgetExampleChatInput<TOutput>
104
114
  | WidgetExampleTextInput<TOutput>
105
115
  | WidgetExampleTextAndContextInput<TOutput>
106
116
  | WidgetExampleTextAndTableInput<TOutput>