@huggingface/tasks 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +20 -0
- package/dist/index.d.ts +368 -46
- package/dist/index.js +117 -41
- package/dist/{index.cjs → index.mjs} +84 -67
- package/package.json +43 -33
- package/src/Types.ts +49 -43
- package/src/audio-classification/about.md +5 -5
- package/src/audio-classification/data.ts +11 -11
- package/src/audio-to-audio/about.md +4 -3
- package/src/audio-to-audio/data.ts +18 -15
- package/src/automatic-speech-recognition/about.md +5 -4
- package/src/automatic-speech-recognition/data.ts +18 -17
- package/src/const.ts +52 -44
- package/src/conversational/about.md +9 -9
- package/src/conversational/data.ts +22 -18
- package/src/depth-estimation/about.md +1 -3
- package/src/depth-estimation/data.ts +11 -11
- package/src/document-question-answering/about.md +1 -2
- package/src/document-question-answering/data.ts +22 -19
- package/src/feature-extraction/about.md +2 -3
- package/src/feature-extraction/data.ts +12 -15
- package/src/fill-mask/about.md +1 -1
- package/src/fill-mask/data.ts +16 -14
- package/src/image-classification/about.md +5 -3
- package/src/image-classification/data.ts +15 -15
- package/src/image-segmentation/about.md +4 -4
- package/src/image-segmentation/data.ts +26 -23
- package/src/image-to-image/about.md +10 -12
- package/src/image-to-image/data.ts +31 -27
- package/src/image-to-text/about.md +13 -6
- package/src/image-to-text/data.ts +20 -21
- package/src/index.ts +11 -0
- package/src/modelLibraries.ts +43 -0
- package/src/object-detection/about.md +2 -1
- package/src/object-detection/data.ts +20 -17
- package/src/pipelines.ts +619 -0
- package/src/placeholder/about.md +3 -3
- package/src/placeholder/data.ts +8 -8
- package/src/question-answering/about.md +1 -1
- package/src/question-answering/data.ts +21 -19
- package/src/reinforcement-learning/about.md +167 -176
- package/src/reinforcement-learning/data.ts +75 -78
- package/src/sentence-similarity/data.ts +29 -28
- package/src/summarization/about.md +6 -5
- package/src/summarization/data.ts +23 -20
- package/src/table-question-answering/about.md +5 -5
- package/src/table-question-answering/data.ts +35 -39
- package/src/tabular-classification/about.md +4 -6
- package/src/tabular-classification/data.ts +11 -12
- package/src/tabular-regression/about.md +14 -18
- package/src/tabular-regression/data.ts +10 -11
- package/src/tasksData.ts +47 -50
- package/src/text-classification/about.md +5 -4
- package/src/text-classification/data.ts +21 -20
- package/src/text-generation/about.md +7 -6
- package/src/text-generation/data.ts +36 -34
- package/src/text-to-image/about.md +19 -18
- package/src/text-to-image/data.ts +32 -26
- package/src/text-to-speech/about.md +4 -5
- package/src/text-to-speech/data.ts +16 -17
- package/src/text-to-video/about.md +41 -36
- package/src/text-to-video/data.ts +43 -38
- package/src/token-classification/about.md +1 -3
- package/src/token-classification/data.ts +26 -25
- package/src/translation/about.md +4 -4
- package/src/translation/data.ts +21 -21
- package/src/unconditional-image-generation/about.md +10 -5
- package/src/unconditional-image-generation/data.ts +26 -20
- package/src/video-classification/about.md +5 -1
- package/src/video-classification/data.ts +14 -14
- package/src/visual-question-answering/about.md +8 -3
- package/src/visual-question-answering/data.ts +22 -19
- package/src/zero-shot-classification/about.md +5 -4
- package/src/zero-shot-classification/data.ts +20 -20
- package/src/zero-shot-image-classification/about.md +17 -9
- package/src/zero-shot-image-classification/data.ts +12 -14
- package/tsconfig.json +18 -0
- package/assets/audio-classification/audio.wav +0 -0
- package/assets/audio-to-audio/input.wav +0 -0
- package/assets/audio-to-audio/label-0.wav +0 -0
- package/assets/audio-to-audio/label-1.wav +0 -0
- package/assets/automatic-speech-recognition/input.flac +0 -0
- package/assets/automatic-speech-recognition/wav2vec2.png +0 -0
- package/assets/contribution-guide/anatomy.png +0 -0
- package/assets/contribution-guide/libraries.png +0 -0
- package/assets/depth-estimation/depth-estimation-input.jpg +0 -0
- package/assets/depth-estimation/depth-estimation-output.png +0 -0
- package/assets/document-question-answering/document-question-answering-input.png +0 -0
- package/assets/image-classification/image-classification-input.jpeg +0 -0
- package/assets/image-segmentation/image-segmentation-input.jpeg +0 -0
- package/assets/image-segmentation/image-segmentation-output.png +0 -0
- package/assets/image-to-image/image-to-image-input.jpeg +0 -0
- package/assets/image-to-image/image-to-image-output.png +0 -0
- package/assets/image-to-image/pix2pix_examples.jpg +0 -0
- package/assets/image-to-text/savanna.jpg +0 -0
- package/assets/object-detection/object-detection-input.jpg +0 -0
- package/assets/object-detection/object-detection-output.jpg +0 -0
- package/assets/table-question-answering/tableQA.jpg +0 -0
- package/assets/text-to-image/image.jpeg +0 -0
- package/assets/text-to-speech/audio.wav +0 -0
- package/assets/text-to-video/text-to-video-output.gif +0 -0
- package/assets/unconditional-image-generation/unconditional-image-generation-output.jpeg +0 -0
- package/assets/video-classification/video-classification-input.gif +0 -0
- package/assets/visual-question-answering/elephant.jpeg +0 -0
- package/assets/zero-shot-image-classification/image-classification-input.jpeg +0 -0
- package/dist/index.d.cts +0 -145
package/dist/index.js
CHANGED
|
@@ -1,8 +1,46 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
var
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var src_exports = {};
|
|
22
|
+
__export(src_exports, {
|
|
23
|
+
MODALITIES: () => MODALITIES,
|
|
24
|
+
MODALITY_LABELS: () => MODALITY_LABELS,
|
|
25
|
+
ModelLibrary: () => ModelLibrary,
|
|
26
|
+
PIPELINE_DATA: () => PIPELINE_DATA,
|
|
27
|
+
PIPELINE_TYPES: () => PIPELINE_TYPES,
|
|
28
|
+
TASKS_DATA: () => TASKS_DATA
|
|
29
|
+
});
|
|
30
|
+
module.exports = __toCommonJS(src_exports);
|
|
31
|
+
|
|
32
|
+
// src/pipelines.ts
|
|
33
|
+
var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
|
|
34
|
+
var MODALITY_LABELS = {
|
|
35
|
+
multimodal: "Multimodal",
|
|
36
|
+
nlp: "Natural Language Processing",
|
|
37
|
+
audio: "Audio",
|
|
38
|
+
cv: "Computer Vision",
|
|
39
|
+
rl: "Reinforcement Learning",
|
|
40
|
+
tabular: "Tabular",
|
|
41
|
+
other: "Other"
|
|
42
|
+
};
|
|
43
|
+
var PIPELINE_DATA = {
|
|
6
44
|
"text-classification": {
|
|
7
45
|
name: "Text Classification",
|
|
8
46
|
subtasks: [
|
|
@@ -138,12 +176,12 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
138
176
|
modality: "nlp",
|
|
139
177
|
color: "yellow"
|
|
140
178
|
},
|
|
141
|
-
|
|
179
|
+
translation: {
|
|
142
180
|
name: "Translation",
|
|
143
181
|
modality: "nlp",
|
|
144
182
|
color: "green"
|
|
145
183
|
},
|
|
146
|
-
|
|
184
|
+
summarization: {
|
|
147
185
|
name: "Summarization",
|
|
148
186
|
subtasks: [
|
|
149
187
|
{
|
|
@@ -158,7 +196,7 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
158
196
|
modality: "nlp",
|
|
159
197
|
color: "indigo"
|
|
160
198
|
},
|
|
161
|
-
|
|
199
|
+
conversational: {
|
|
162
200
|
name: "Conversational",
|
|
163
201
|
subtasks: [
|
|
164
202
|
{
|
|
@@ -386,7 +424,7 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
386
424
|
modality: "rl",
|
|
387
425
|
color: "red"
|
|
388
426
|
},
|
|
389
|
-
|
|
427
|
+
robotics: {
|
|
390
428
|
name: "Robotics",
|
|
391
429
|
modality: "rl",
|
|
392
430
|
subtasks: [
|
|
@@ -539,19 +577,15 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
539
577
|
modality: "multimodal",
|
|
540
578
|
color: "green"
|
|
541
579
|
},
|
|
542
|
-
|
|
580
|
+
other: {
|
|
543
581
|
name: "Other",
|
|
544
582
|
modality: "other",
|
|
545
583
|
color: "blue",
|
|
546
584
|
hideInModels: true,
|
|
547
585
|
hideInDatasets: true
|
|
548
586
|
}
|
|
549
|
-
}
|
|
550
|
-
var
|
|
551
|
-
var ALL_PIPELINE_TYPES_SET = new Set(ALL_PIPELINE_TYPES);
|
|
552
|
-
var ALL_SUBTASKS = Object.values(PIPELINE_DATA).flatMap((data) => data.subtasks ?? []);
|
|
553
|
-
var ALL_SUBTASK_TYPES = ALL_SUBTASKS.map((s) => s.type);
|
|
554
|
-
var ALL_SUBTASK_TYPES_SET = new Set(ALL_SUBTASK_TYPES);
|
|
587
|
+
};
|
|
588
|
+
var PIPELINE_TYPES = Object.keys(PIPELINE_DATA);
|
|
555
589
|
|
|
556
590
|
// src/audio-classification/data.ts
|
|
557
591
|
var taskData = {
|
|
@@ -1839,19 +1873,9 @@ var taskData19 = {
|
|
|
1839
1873
|
],
|
|
1840
1874
|
type: "tabular"
|
|
1841
1875
|
},
|
|
1842
|
-
{
|
|
1843
|
-
label: "Question",
|
|
1844
|
-
content: "What is the number of reigns for Harley Race?",
|
|
1845
|
-
type: "text"
|
|
1846
|
-
}
|
|
1876
|
+
{ label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" }
|
|
1847
1877
|
],
|
|
1848
|
-
outputs: [
|
|
1849
|
-
{
|
|
1850
|
-
label: "Result",
|
|
1851
|
-
content: "7",
|
|
1852
|
-
type: "text"
|
|
1853
|
-
}
|
|
1854
|
-
]
|
|
1878
|
+
outputs: [{ label: "Result", content: "7", type: "text" }]
|
|
1855
1879
|
},
|
|
1856
1880
|
metrics: [
|
|
1857
1881
|
{
|
|
@@ -2978,7 +3002,7 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
2978
3002
|
"audio-classification": ["speechbrain", "transformers"],
|
|
2979
3003
|
"audio-to-audio": ["asteroid", "speechbrain"],
|
|
2980
3004
|
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
|
2981
|
-
|
|
3005
|
+
conversational: ["transformers"],
|
|
2982
3006
|
"depth-estimation": ["transformers"],
|
|
2983
3007
|
"document-question-answering": ["transformers"],
|
|
2984
3008
|
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
|
@@ -2991,12 +3015,12 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
2991
3015
|
"video-classification": [],
|
|
2992
3016
|
"multiple-choice": ["transformers"],
|
|
2993
3017
|
"object-detection": ["transformers", "transformers.js"],
|
|
2994
|
-
|
|
3018
|
+
other: [],
|
|
2995
3019
|
"question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"],
|
|
2996
|
-
|
|
3020
|
+
robotics: [],
|
|
2997
3021
|
"reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"],
|
|
2998
3022
|
"sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"],
|
|
2999
|
-
|
|
3023
|
+
summarization: ["transformers", "transformers.js"],
|
|
3000
3024
|
"table-question-answering": ["transformers"],
|
|
3001
3025
|
"table-to-text": ["transformers"],
|
|
3002
3026
|
"tabular-classification": ["sklearn"],
|
|
@@ -3011,8 +3035,16 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
3011
3035
|
"text-to-video": [],
|
|
3012
3036
|
"text2text-generation": ["transformers", "transformers.js"],
|
|
3013
3037
|
"time-series-forecasting": [],
|
|
3014
|
-
"token-classification": [
|
|
3015
|
-
|
|
3038
|
+
"token-classification": [
|
|
3039
|
+
"adapter-transformers",
|
|
3040
|
+
"flair",
|
|
3041
|
+
"spacy",
|
|
3042
|
+
"span-marker",
|
|
3043
|
+
"stanza",
|
|
3044
|
+
"transformers",
|
|
3045
|
+
"transformers.js"
|
|
3046
|
+
],
|
|
3047
|
+
translation: ["transformers", "transformers.js"],
|
|
3016
3048
|
"unconditional-image-generation": [],
|
|
3017
3049
|
"visual-question-answering": [],
|
|
3018
3050
|
"voice-activity-detection": [],
|
|
@@ -3025,7 +3057,7 @@ var TASKS_DATA = {
|
|
|
3025
3057
|
"audio-classification": getData("audio-classification", data_default),
|
|
3026
3058
|
"audio-to-audio": getData("audio-to-audio", data_default2),
|
|
3027
3059
|
"automatic-speech-recognition": getData("automatic-speech-recognition", data_default3),
|
|
3028
|
-
|
|
3060
|
+
conversational: getData("conversational", data_default4),
|
|
3029
3061
|
"depth-estimation": getData("depth-estimation", data_default13),
|
|
3030
3062
|
"document-question-answering": getData("document-question-answering", data_default5),
|
|
3031
3063
|
"feature-extraction": getData("feature-extraction", data_default6),
|
|
@@ -3038,12 +3070,12 @@ var TASKS_DATA = {
|
|
|
3038
3070
|
"multiple-choice": void 0,
|
|
3039
3071
|
"object-detection": getData("object-detection", data_default12),
|
|
3040
3072
|
"video-classification": getData("video-classification", data_default30),
|
|
3041
|
-
|
|
3073
|
+
other: void 0,
|
|
3042
3074
|
"question-answering": getData("question-answering", data_default16),
|
|
3043
3075
|
"reinforcement-learning": getData("reinforcement-learning", data_default15),
|
|
3044
|
-
|
|
3076
|
+
robotics: void 0,
|
|
3045
3077
|
"sentence-similarity": getData("sentence-similarity", data_default17),
|
|
3046
|
-
|
|
3078
|
+
summarization: getData("summarization", data_default18),
|
|
3047
3079
|
"table-question-answering": getData("table-question-answering", data_default19),
|
|
3048
3080
|
"table-to-text": void 0,
|
|
3049
3081
|
"tabular-classification": getData("tabular-classification", data_default20),
|
|
@@ -3059,7 +3091,7 @@ var TASKS_DATA = {
|
|
|
3059
3091
|
"text2text-generation": void 0,
|
|
3060
3092
|
"time-series-forecasting": void 0,
|
|
3061
3093
|
"token-classification": getData("token-classification", data_default24),
|
|
3062
|
-
|
|
3094
|
+
translation: getData("translation", data_default25),
|
|
3063
3095
|
"unconditional-image-generation": getData("unconditional-image-generation", data_default29),
|
|
3064
3096
|
"visual-question-answering": getData("visual-question-answering", data_default31),
|
|
3065
3097
|
"voice-activity-detection": void 0,
|
|
@@ -3074,6 +3106,50 @@ function getData(type, partialTaskData = data_default14) {
|
|
|
3074
3106
|
libraries: TASKS_MODEL_LIBRARIES[type]
|
|
3075
3107
|
};
|
|
3076
3108
|
}
|
|
3077
|
-
|
|
3109
|
+
|
|
3110
|
+
// src/modelLibraries.ts
|
|
3111
|
+
var ModelLibrary = /* @__PURE__ */ ((ModelLibrary2) => {
|
|
3112
|
+
ModelLibrary2["adapter-transformers"] = "Adapter Transformers";
|
|
3113
|
+
ModelLibrary2["allennlp"] = "allenNLP";
|
|
3114
|
+
ModelLibrary2["asteroid"] = "Asteroid";
|
|
3115
|
+
ModelLibrary2["bertopic"] = "BERTopic";
|
|
3116
|
+
ModelLibrary2["diffusers"] = "Diffusers";
|
|
3117
|
+
ModelLibrary2["doctr"] = "docTR";
|
|
3118
|
+
ModelLibrary2["espnet"] = "ESPnet";
|
|
3119
|
+
ModelLibrary2["fairseq"] = "Fairseq";
|
|
3120
|
+
ModelLibrary2["flair"] = "Flair";
|
|
3121
|
+
ModelLibrary2["keras"] = "Keras";
|
|
3122
|
+
ModelLibrary2["k2"] = "K2";
|
|
3123
|
+
ModelLibrary2["nemo"] = "NeMo";
|
|
3124
|
+
ModelLibrary2["open_clip"] = "OpenCLIP";
|
|
3125
|
+
ModelLibrary2["paddlenlp"] = "PaddleNLP";
|
|
3126
|
+
ModelLibrary2["peft"] = "PEFT";
|
|
3127
|
+
ModelLibrary2["pyannote-audio"] = "pyannote.audio";
|
|
3128
|
+
ModelLibrary2["sample-factory"] = "Sample Factory";
|
|
3129
|
+
ModelLibrary2["sentence-transformers"] = "Sentence Transformers";
|
|
3130
|
+
ModelLibrary2["sklearn"] = "Scikit-learn";
|
|
3131
|
+
ModelLibrary2["spacy"] = "spaCy";
|
|
3132
|
+
ModelLibrary2["span-marker"] = "SpanMarker";
|
|
3133
|
+
ModelLibrary2["speechbrain"] = "speechbrain";
|
|
3134
|
+
ModelLibrary2["tensorflowtts"] = "TensorFlowTTS";
|
|
3135
|
+
ModelLibrary2["timm"] = "Timm";
|
|
3136
|
+
ModelLibrary2["fastai"] = "fastai";
|
|
3137
|
+
ModelLibrary2["transformers"] = "Transformers";
|
|
3138
|
+
ModelLibrary2["transformers.js"] = "Transformers.js";
|
|
3139
|
+
ModelLibrary2["stanza"] = "Stanza";
|
|
3140
|
+
ModelLibrary2["fasttext"] = "fastText";
|
|
3141
|
+
ModelLibrary2["stable-baselines3"] = "Stable-Baselines3";
|
|
3142
|
+
ModelLibrary2["ml-agents"] = "ML-Agents";
|
|
3143
|
+
ModelLibrary2["pythae"] = "Pythae";
|
|
3144
|
+
ModelLibrary2["mindspore"] = "MindSpore";
|
|
3145
|
+
return ModelLibrary2;
|
|
3146
|
+
})(ModelLibrary || {});
|
|
3147
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
3148
|
+
0 && (module.exports = {
|
|
3149
|
+
MODALITIES,
|
|
3150
|
+
MODALITY_LABELS,
|
|
3151
|
+
ModelLibrary,
|
|
3152
|
+
PIPELINE_DATA,
|
|
3153
|
+
PIPELINE_TYPES,
|
|
3078
3154
|
TASKS_DATA
|
|
3079
|
-
};
|
|
3155
|
+
});
|
|
@@ -1,34 +1,15 @@
|
|
|
1
|
-
|
|
2
|
-
var
|
|
3
|
-
var
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
1
|
+
// src/pipelines.ts
|
|
2
|
+
var MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
|
|
3
|
+
var MODALITY_LABELS = {
|
|
4
|
+
multimodal: "Multimodal",
|
|
5
|
+
nlp: "Natural Language Processing",
|
|
6
|
+
audio: "Audio",
|
|
7
|
+
cv: "Computer Vision",
|
|
8
|
+
rl: "Reinforcement Learning",
|
|
9
|
+
tabular: "Tabular",
|
|
10
|
+
other: "Other"
|
|
9
11
|
};
|
|
10
|
-
var
|
|
11
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
-
for (let key of __getOwnPropNames(from))
|
|
13
|
-
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
-
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
-
}
|
|
16
|
-
return to;
|
|
17
|
-
};
|
|
18
|
-
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
-
|
|
20
|
-
// src/index.ts
|
|
21
|
-
var src_exports = {};
|
|
22
|
-
__export(src_exports, {
|
|
23
|
-
TASKS_DATA: () => TASKS_DATA
|
|
24
|
-
});
|
|
25
|
-
module.exports = __toCommonJS(src_exports);
|
|
26
|
-
|
|
27
|
-
// ../js/src/lib/interfaces/Types.ts
|
|
28
|
-
function ensureRecordOfPipelines(record) {
|
|
29
|
-
return record;
|
|
30
|
-
}
|
|
31
|
-
var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
12
|
+
var PIPELINE_DATA = {
|
|
32
13
|
"text-classification": {
|
|
33
14
|
name: "Text Classification",
|
|
34
15
|
subtasks: [
|
|
@@ -164,12 +145,12 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
164
145
|
modality: "nlp",
|
|
165
146
|
color: "yellow"
|
|
166
147
|
},
|
|
167
|
-
|
|
148
|
+
translation: {
|
|
168
149
|
name: "Translation",
|
|
169
150
|
modality: "nlp",
|
|
170
151
|
color: "green"
|
|
171
152
|
},
|
|
172
|
-
|
|
153
|
+
summarization: {
|
|
173
154
|
name: "Summarization",
|
|
174
155
|
subtasks: [
|
|
175
156
|
{
|
|
@@ -184,7 +165,7 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
184
165
|
modality: "nlp",
|
|
185
166
|
color: "indigo"
|
|
186
167
|
},
|
|
187
|
-
|
|
168
|
+
conversational: {
|
|
188
169
|
name: "Conversational",
|
|
189
170
|
subtasks: [
|
|
190
171
|
{
|
|
@@ -412,7 +393,7 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
412
393
|
modality: "rl",
|
|
413
394
|
color: "red"
|
|
414
395
|
},
|
|
415
|
-
|
|
396
|
+
robotics: {
|
|
416
397
|
name: "Robotics",
|
|
417
398
|
modality: "rl",
|
|
418
399
|
subtasks: [
|
|
@@ -565,19 +546,15 @@ var PIPELINE_DATA = ensureRecordOfPipelines({
|
|
|
565
546
|
modality: "multimodal",
|
|
566
547
|
color: "green"
|
|
567
548
|
},
|
|
568
|
-
|
|
549
|
+
other: {
|
|
569
550
|
name: "Other",
|
|
570
551
|
modality: "other",
|
|
571
552
|
color: "blue",
|
|
572
553
|
hideInModels: true,
|
|
573
554
|
hideInDatasets: true
|
|
574
555
|
}
|
|
575
|
-
}
|
|
576
|
-
var
|
|
577
|
-
var ALL_PIPELINE_TYPES_SET = new Set(ALL_PIPELINE_TYPES);
|
|
578
|
-
var ALL_SUBTASKS = Object.values(PIPELINE_DATA).flatMap((data) => data.subtasks ?? []);
|
|
579
|
-
var ALL_SUBTASK_TYPES = ALL_SUBTASKS.map((s) => s.type);
|
|
580
|
-
var ALL_SUBTASK_TYPES_SET = new Set(ALL_SUBTASK_TYPES);
|
|
556
|
+
};
|
|
557
|
+
var PIPELINE_TYPES = Object.keys(PIPELINE_DATA);
|
|
581
558
|
|
|
582
559
|
// src/audio-classification/data.ts
|
|
583
560
|
var taskData = {
|
|
@@ -1865,19 +1842,9 @@ var taskData19 = {
|
|
|
1865
1842
|
],
|
|
1866
1843
|
type: "tabular"
|
|
1867
1844
|
},
|
|
1868
|
-
{
|
|
1869
|
-
label: "Question",
|
|
1870
|
-
content: "What is the number of reigns for Harley Race?",
|
|
1871
|
-
type: "text"
|
|
1872
|
-
}
|
|
1845
|
+
{ label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" }
|
|
1873
1846
|
],
|
|
1874
|
-
outputs: [
|
|
1875
|
-
{
|
|
1876
|
-
label: "Result",
|
|
1877
|
-
content: "7",
|
|
1878
|
-
type: "text"
|
|
1879
|
-
}
|
|
1880
|
-
]
|
|
1847
|
+
outputs: [{ label: "Result", content: "7", type: "text" }]
|
|
1881
1848
|
},
|
|
1882
1849
|
metrics: [
|
|
1883
1850
|
{
|
|
@@ -3004,7 +2971,7 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
3004
2971
|
"audio-classification": ["speechbrain", "transformers"],
|
|
3005
2972
|
"audio-to-audio": ["asteroid", "speechbrain"],
|
|
3006
2973
|
"automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"],
|
|
3007
|
-
|
|
2974
|
+
conversational: ["transformers"],
|
|
3008
2975
|
"depth-estimation": ["transformers"],
|
|
3009
2976
|
"document-question-answering": ["transformers"],
|
|
3010
2977
|
"feature-extraction": ["sentence-transformers", "transformers", "transformers.js"],
|
|
@@ -3017,12 +2984,12 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
3017
2984
|
"video-classification": [],
|
|
3018
2985
|
"multiple-choice": ["transformers"],
|
|
3019
2986
|
"object-detection": ["transformers", "transformers.js"],
|
|
3020
|
-
|
|
2987
|
+
other: [],
|
|
3021
2988
|
"question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"],
|
|
3022
|
-
|
|
2989
|
+
robotics: [],
|
|
3023
2990
|
"reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"],
|
|
3024
2991
|
"sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"],
|
|
3025
|
-
|
|
2992
|
+
summarization: ["transformers", "transformers.js"],
|
|
3026
2993
|
"table-question-answering": ["transformers"],
|
|
3027
2994
|
"table-to-text": ["transformers"],
|
|
3028
2995
|
"tabular-classification": ["sklearn"],
|
|
@@ -3037,8 +3004,16 @@ var TASKS_MODEL_LIBRARIES = {
|
|
|
3037
3004
|
"text-to-video": [],
|
|
3038
3005
|
"text2text-generation": ["transformers", "transformers.js"],
|
|
3039
3006
|
"time-series-forecasting": [],
|
|
3040
|
-
"token-classification": [
|
|
3041
|
-
|
|
3007
|
+
"token-classification": [
|
|
3008
|
+
"adapter-transformers",
|
|
3009
|
+
"flair",
|
|
3010
|
+
"spacy",
|
|
3011
|
+
"span-marker",
|
|
3012
|
+
"stanza",
|
|
3013
|
+
"transformers",
|
|
3014
|
+
"transformers.js"
|
|
3015
|
+
],
|
|
3016
|
+
translation: ["transformers", "transformers.js"],
|
|
3042
3017
|
"unconditional-image-generation": [],
|
|
3043
3018
|
"visual-question-answering": [],
|
|
3044
3019
|
"voice-activity-detection": [],
|
|
@@ -3051,7 +3026,7 @@ var TASKS_DATA = {
|
|
|
3051
3026
|
"audio-classification": getData("audio-classification", data_default),
|
|
3052
3027
|
"audio-to-audio": getData("audio-to-audio", data_default2),
|
|
3053
3028
|
"automatic-speech-recognition": getData("automatic-speech-recognition", data_default3),
|
|
3054
|
-
|
|
3029
|
+
conversational: getData("conversational", data_default4),
|
|
3055
3030
|
"depth-estimation": getData("depth-estimation", data_default13),
|
|
3056
3031
|
"document-question-answering": getData("document-question-answering", data_default5),
|
|
3057
3032
|
"feature-extraction": getData("feature-extraction", data_default6),
|
|
@@ -3064,12 +3039,12 @@ var TASKS_DATA = {
|
|
|
3064
3039
|
"multiple-choice": void 0,
|
|
3065
3040
|
"object-detection": getData("object-detection", data_default12),
|
|
3066
3041
|
"video-classification": getData("video-classification", data_default30),
|
|
3067
|
-
|
|
3042
|
+
other: void 0,
|
|
3068
3043
|
"question-answering": getData("question-answering", data_default16),
|
|
3069
3044
|
"reinforcement-learning": getData("reinforcement-learning", data_default15),
|
|
3070
|
-
|
|
3045
|
+
robotics: void 0,
|
|
3071
3046
|
"sentence-similarity": getData("sentence-similarity", data_default17),
|
|
3072
|
-
|
|
3047
|
+
summarization: getData("summarization", data_default18),
|
|
3073
3048
|
"table-question-answering": getData("table-question-answering", data_default19),
|
|
3074
3049
|
"table-to-text": void 0,
|
|
3075
3050
|
"tabular-classification": getData("tabular-classification", data_default20),
|
|
@@ -3085,7 +3060,7 @@ var TASKS_DATA = {
|
|
|
3085
3060
|
"text2text-generation": void 0,
|
|
3086
3061
|
"time-series-forecasting": void 0,
|
|
3087
3062
|
"token-classification": getData("token-classification", data_default24),
|
|
3088
|
-
|
|
3063
|
+
translation: getData("translation", data_default25),
|
|
3089
3064
|
"unconditional-image-generation": getData("unconditional-image-generation", data_default29),
|
|
3090
3065
|
"visual-question-answering": getData("visual-question-answering", data_default31),
|
|
3091
3066
|
"voice-activity-detection": void 0,
|
|
@@ -3100,7 +3075,49 @@ function getData(type, partialTaskData = data_default14) {
|
|
|
3100
3075
|
libraries: TASKS_MODEL_LIBRARIES[type]
|
|
3101
3076
|
};
|
|
3102
3077
|
}
|
|
3103
|
-
|
|
3104
|
-
|
|
3078
|
+
|
|
3079
|
+
// src/modelLibraries.ts
|
|
3080
|
+
var ModelLibrary = /* @__PURE__ */ ((ModelLibrary2) => {
|
|
3081
|
+
ModelLibrary2["adapter-transformers"] = "Adapter Transformers";
|
|
3082
|
+
ModelLibrary2["allennlp"] = "allenNLP";
|
|
3083
|
+
ModelLibrary2["asteroid"] = "Asteroid";
|
|
3084
|
+
ModelLibrary2["bertopic"] = "BERTopic";
|
|
3085
|
+
ModelLibrary2["diffusers"] = "Diffusers";
|
|
3086
|
+
ModelLibrary2["doctr"] = "docTR";
|
|
3087
|
+
ModelLibrary2["espnet"] = "ESPnet";
|
|
3088
|
+
ModelLibrary2["fairseq"] = "Fairseq";
|
|
3089
|
+
ModelLibrary2["flair"] = "Flair";
|
|
3090
|
+
ModelLibrary2["keras"] = "Keras";
|
|
3091
|
+
ModelLibrary2["k2"] = "K2";
|
|
3092
|
+
ModelLibrary2["nemo"] = "NeMo";
|
|
3093
|
+
ModelLibrary2["open_clip"] = "OpenCLIP";
|
|
3094
|
+
ModelLibrary2["paddlenlp"] = "PaddleNLP";
|
|
3095
|
+
ModelLibrary2["peft"] = "PEFT";
|
|
3096
|
+
ModelLibrary2["pyannote-audio"] = "pyannote.audio";
|
|
3097
|
+
ModelLibrary2["sample-factory"] = "Sample Factory";
|
|
3098
|
+
ModelLibrary2["sentence-transformers"] = "Sentence Transformers";
|
|
3099
|
+
ModelLibrary2["sklearn"] = "Scikit-learn";
|
|
3100
|
+
ModelLibrary2["spacy"] = "spaCy";
|
|
3101
|
+
ModelLibrary2["span-marker"] = "SpanMarker";
|
|
3102
|
+
ModelLibrary2["speechbrain"] = "speechbrain";
|
|
3103
|
+
ModelLibrary2["tensorflowtts"] = "TensorFlowTTS";
|
|
3104
|
+
ModelLibrary2["timm"] = "Timm";
|
|
3105
|
+
ModelLibrary2["fastai"] = "fastai";
|
|
3106
|
+
ModelLibrary2["transformers"] = "Transformers";
|
|
3107
|
+
ModelLibrary2["transformers.js"] = "Transformers.js";
|
|
3108
|
+
ModelLibrary2["stanza"] = "Stanza";
|
|
3109
|
+
ModelLibrary2["fasttext"] = "fastText";
|
|
3110
|
+
ModelLibrary2["stable-baselines3"] = "Stable-Baselines3";
|
|
3111
|
+
ModelLibrary2["ml-agents"] = "ML-Agents";
|
|
3112
|
+
ModelLibrary2["pythae"] = "Pythae";
|
|
3113
|
+
ModelLibrary2["mindspore"] = "MindSpore";
|
|
3114
|
+
return ModelLibrary2;
|
|
3115
|
+
})(ModelLibrary || {});
|
|
3116
|
+
export {
|
|
3117
|
+
MODALITIES,
|
|
3118
|
+
MODALITY_LABELS,
|
|
3119
|
+
ModelLibrary,
|
|
3120
|
+
PIPELINE_DATA,
|
|
3121
|
+
PIPELINE_TYPES,
|
|
3105
3122
|
TASKS_DATA
|
|
3106
|
-
}
|
|
3123
|
+
};
|
package/package.json
CHANGED
|
@@ -1,34 +1,44 @@
|
|
|
1
1
|
{
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
2
|
+
"name": "@huggingface/tasks",
|
|
3
|
+
"packageManager": "pnpm@8.10.5",
|
|
4
|
+
"version": "0.0.5",
|
|
5
|
+
"description": "List of ML tasks for huggingface.co/tasks",
|
|
6
|
+
"repository": "https://github.com/huggingface/huggingface.js.git",
|
|
7
|
+
"publishConfig": {
|
|
8
|
+
"access": "public"
|
|
9
|
+
},
|
|
10
|
+
"main": "./dist/index.js",
|
|
11
|
+
"module": "./dist/index.mjs",
|
|
12
|
+
"types": "./dist/index.d.ts",
|
|
13
|
+
"exports": {
|
|
14
|
+
".": {
|
|
15
|
+
"types": "./dist/index.d.ts",
|
|
16
|
+
"require": "./dist/index.js",
|
|
17
|
+
"import": "./dist/index.mjs"
|
|
18
|
+
}
|
|
19
|
+
},
|
|
20
|
+
"source": "src/index.ts",
|
|
21
|
+
"files": [
|
|
22
|
+
"dist",
|
|
23
|
+
"src",
|
|
24
|
+
"tsconfig.json"
|
|
25
|
+
],
|
|
26
|
+
"keywords": [
|
|
27
|
+
"huggingface",
|
|
28
|
+
"hub",
|
|
29
|
+
"languages"
|
|
30
|
+
],
|
|
31
|
+
"author": "Hugging Face",
|
|
32
|
+
"license": "MIT",
|
|
33
|
+
"devDependencies": {
|
|
34
|
+
"typescript": "^5.0.4"
|
|
35
|
+
},
|
|
36
|
+
"scripts": {
|
|
37
|
+
"lint": "eslint --quiet --fix --ext .cjs,.ts .",
|
|
38
|
+
"lint:check": "eslint --ext .cjs,.ts .",
|
|
39
|
+
"format": "prettier --write .",
|
|
40
|
+
"format:check": "prettier --check .",
|
|
41
|
+
"build": "tsup src/index.ts --format cjs,esm --clean --dts",
|
|
42
|
+
"check": "tsc"
|
|
43
|
+
}
|
|
44
|
+
}
|