@huggingface/tasks 0.0.3 → 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +20 -0
- package/dist/index.d.ts +358 -46
- package/dist/index.js +103 -41
- package/dist/{index.cjs → index.mjs} +73 -68
- package/package.json +43 -33
- package/src/Types.ts +49 -43
- package/src/audio-classification/about.md +5 -5
- package/src/audio-classification/data.ts +11 -11
- package/src/audio-to-audio/about.md +4 -3
- package/src/audio-to-audio/data.ts +18 -15
- package/src/automatic-speech-recognition/about.md +5 -4
- package/src/automatic-speech-recognition/data.ts +18 -17
- package/src/const.ts +52 -44
- package/src/conversational/about.md +9 -9
- package/src/conversational/data.ts +22 -18
- package/src/depth-estimation/about.md +1 -3
- package/src/depth-estimation/data.ts +11 -11
- package/src/document-question-answering/about.md +1 -2
- package/src/document-question-answering/data.ts +22 -19
- package/src/feature-extraction/about.md +2 -3
- package/src/feature-extraction/data.ts +12 -15
- package/src/fill-mask/about.md +1 -1
- package/src/fill-mask/data.ts +16 -14
- package/src/image-classification/about.md +5 -3
- package/src/image-classification/data.ts +15 -15
- package/src/image-segmentation/about.md +4 -4
- package/src/image-segmentation/data.ts +26 -23
- package/src/image-to-image/about.md +8 -10
- package/src/image-to-image/data.ts +31 -27
- package/src/image-to-text/about.md +13 -6
- package/src/image-to-text/data.ts +20 -21
- package/src/index.ts +2 -0
- package/src/modelLibraries.ts +43 -0
- package/src/object-detection/about.md +2 -1
- package/src/object-detection/data.ts +20 -17
- package/src/pipelines.ts +608 -0
- package/src/placeholder/about.md +3 -3
- package/src/placeholder/data.ts +8 -8
- package/src/question-answering/about.md +1 -1
- package/src/question-answering/data.ts +21 -19
- package/src/reinforcement-learning/about.md +167 -176
- package/src/reinforcement-learning/data.ts +75 -78
- package/src/sentence-similarity/data.ts +29 -28
- package/src/summarization/about.md +6 -5
- package/src/summarization/data.ts +23 -20
- package/src/table-question-answering/about.md +5 -5
- package/src/table-question-answering/data.ts +35 -39
- package/src/tabular-classification/about.md +4 -6
- package/src/tabular-classification/data.ts +11 -12
- package/src/tabular-regression/about.md +14 -18
- package/src/tabular-regression/data.ts +10 -11
- package/src/tasksData.ts +47 -50
- package/src/text-classification/about.md +5 -4
- package/src/text-classification/data.ts +21 -20
- package/src/text-generation/about.md +7 -6
- package/src/text-generation/data.ts +36 -34
- package/src/text-to-image/about.md +19 -18
- package/src/text-to-image/data.ts +32 -26
- package/src/text-to-speech/about.md +4 -5
- package/src/text-to-speech/data.ts +16 -17
- package/src/text-to-video/about.md +41 -36
- package/src/text-to-video/data.ts +43 -38
- package/src/token-classification/about.md +1 -3
- package/src/token-classification/data.ts +26 -25
- package/src/translation/about.md +4 -4
- package/src/translation/data.ts +21 -21
- package/src/unconditional-image-generation/about.md +10 -5
- package/src/unconditional-image-generation/data.ts +26 -20
- package/src/video-classification/about.md +5 -1
- package/src/video-classification/data.ts +14 -14
- package/src/visual-question-answering/about.md +8 -3
- package/src/visual-question-answering/data.ts +22 -19
- package/src/zero-shot-classification/about.md +5 -4
- package/src/zero-shot-classification/data.ts +20 -20
- package/src/zero-shot-image-classification/about.md +17 -9
- package/src/zero-shot-image-classification/data.ts +12 -14
- package/tsconfig.json +18 -0
- package/assets/audio-classification/audio.wav +0 -0
- package/assets/audio-to-audio/input.wav +0 -0
- package/assets/audio-to-audio/label-0.wav +0 -0
- package/assets/audio-to-audio/label-1.wav +0 -0
- package/assets/automatic-speech-recognition/input.flac +0 -0
- package/assets/automatic-speech-recognition/wav2vec2.png +0 -0
- package/assets/contribution-guide/anatomy.png +0 -0
- package/assets/contribution-guide/libraries.png +0 -0
- package/assets/depth-estimation/depth-estimation-input.jpg +0 -0
- package/assets/depth-estimation/depth-estimation-output.png +0 -0
- package/assets/document-question-answering/document-question-answering-input.png +0 -0
- package/assets/image-classification/image-classification-input.jpeg +0 -0
- package/assets/image-segmentation/image-segmentation-input.jpeg +0 -0
- package/assets/image-segmentation/image-segmentation-output.png +0 -0
- package/assets/image-to-image/image-to-image-input.jpeg +0 -0
- package/assets/image-to-image/image-to-image-output.png +0 -0
- package/assets/image-to-image/pix2pix_examples.jpg +0 -0
- package/assets/image-to-text/savanna.jpg +0 -0
- package/assets/object-detection/object-detection-input.jpg +0 -0
- package/assets/object-detection/object-detection-output.jpg +0 -0
- package/assets/table-question-answering/tableQA.jpg +0 -0
- package/assets/text-to-image/image.jpeg +0 -0
- package/assets/text-to-speech/audio.wav +0 -0
- package/assets/text-to-video/text-to-video-output.gif +0 -0
- package/assets/unconditional-image-generation/unconditional-image-generation-output.jpeg +0 -0
- package/assets/video-classification/video-classification-input.gif +0 -0
- package/assets/visual-question-answering/elephant.jpeg +0 -0
- package/assets/zero-shot-image-classification/image-classification-input.jpeg +0 -0
- package/dist/index.d.cts +0 -145
package/src/pipelines.ts
ADDED
|
@@ -0,0 +1,608 @@
|
|
|
1
|
+
export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const;
|
|
2
|
+
|
|
3
|
+
export type Modality = (typeof MODALITIES)[number];
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Public interface for a sub task.
|
|
7
|
+
*
|
|
8
|
+
* This can be used in a model card's `model-index` metadata.
|
|
9
|
+
* and is more granular classification that can grow significantly
|
|
10
|
+
* over time as new tasks are added.
|
|
11
|
+
*/
|
|
12
|
+
export interface SubTask {
|
|
13
|
+
/**
|
|
14
|
+
* type of the task (e.g. audio-source-separation)
|
|
15
|
+
*/
|
|
16
|
+
type: string;
|
|
17
|
+
/**
|
|
18
|
+
* displayed name of the task (e.g. Audio Source Separation)
|
|
19
|
+
*/
|
|
20
|
+
name: string;
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Public interface for a PipelineData.
|
|
25
|
+
*
|
|
26
|
+
* This information corresponds to a pipeline type (aka task)
|
|
27
|
+
* in the Hub.
|
|
28
|
+
*/
|
|
29
|
+
export interface PipelineData {
|
|
30
|
+
/**
|
|
31
|
+
* displayed name of the task (e.g. Text Classification)
|
|
32
|
+
*/
|
|
33
|
+
name: string;
|
|
34
|
+
subtasks?: SubTask[];
|
|
35
|
+
modality: Modality;
|
|
36
|
+
/**
|
|
37
|
+
* color for the tag icon.
|
|
38
|
+
*/
|
|
39
|
+
color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow";
|
|
40
|
+
/**
|
|
41
|
+
* whether to hide in /models filters
|
|
42
|
+
*/
|
|
43
|
+
hideInModels?: boolean;
|
|
44
|
+
/**
|
|
45
|
+
* whether to hide in /datasets filters
|
|
46
|
+
*/
|
|
47
|
+
hideInDatasets?: boolean;
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
/// Coarse-grained taxonomy of tasks
|
|
51
|
+
///
|
|
52
|
+
/// This type is used in multiple places in the Hugging Face
|
|
53
|
+
/// ecosystem:
|
|
54
|
+
/// - To determine which widget to show.
|
|
55
|
+
/// - To determine which endpoint of Inference API to use.
|
|
56
|
+
/// - As filters at the left of models and datasets page.
|
|
57
|
+
///
|
|
58
|
+
/// Note that this is sensitive to order.
|
|
59
|
+
/// For each domain, the order should be of decreasing specificity.
|
|
60
|
+
/// This will impact the default pipeline tag of a model when not
|
|
61
|
+
/// specified.
|
|
62
|
+
export const PIPELINE_DATA = {
|
|
63
|
+
"text-classification": {
|
|
64
|
+
name: "Text Classification",
|
|
65
|
+
subtasks: [
|
|
66
|
+
{
|
|
67
|
+
type: "acceptability-classification",
|
|
68
|
+
name: "Acceptability Classification",
|
|
69
|
+
},
|
|
70
|
+
{
|
|
71
|
+
type: "entity-linking-classification",
|
|
72
|
+
name: "Entity Linking Classification",
|
|
73
|
+
},
|
|
74
|
+
{
|
|
75
|
+
type: "fact-checking",
|
|
76
|
+
name: "Fact Checking",
|
|
77
|
+
},
|
|
78
|
+
{
|
|
79
|
+
type: "intent-classification",
|
|
80
|
+
name: "Intent Classification",
|
|
81
|
+
},
|
|
82
|
+
{
|
|
83
|
+
type: "language-identification",
|
|
84
|
+
name: "Language Identification",
|
|
85
|
+
},
|
|
86
|
+
{
|
|
87
|
+
type: "multi-class-classification",
|
|
88
|
+
name: "Multi Class Classification",
|
|
89
|
+
},
|
|
90
|
+
{
|
|
91
|
+
type: "multi-label-classification",
|
|
92
|
+
name: "Multi Label Classification",
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
type: "multi-input-text-classification",
|
|
96
|
+
name: "Multi-input Text Classification",
|
|
97
|
+
},
|
|
98
|
+
{
|
|
99
|
+
type: "natural-language-inference",
|
|
100
|
+
name: "Natural Language Inference",
|
|
101
|
+
},
|
|
102
|
+
{
|
|
103
|
+
type: "semantic-similarity-classification",
|
|
104
|
+
name: "Semantic Similarity Classification",
|
|
105
|
+
},
|
|
106
|
+
{
|
|
107
|
+
type: "sentiment-classification",
|
|
108
|
+
name: "Sentiment Classification",
|
|
109
|
+
},
|
|
110
|
+
{
|
|
111
|
+
type: "topic-classification",
|
|
112
|
+
name: "Topic Classification",
|
|
113
|
+
},
|
|
114
|
+
{
|
|
115
|
+
type: "semantic-similarity-scoring",
|
|
116
|
+
name: "Semantic Similarity Scoring",
|
|
117
|
+
},
|
|
118
|
+
{
|
|
119
|
+
type: "sentiment-scoring",
|
|
120
|
+
name: "Sentiment Scoring",
|
|
121
|
+
},
|
|
122
|
+
{
|
|
123
|
+
type: "sentiment-analysis",
|
|
124
|
+
name: "Sentiment Analysis",
|
|
125
|
+
},
|
|
126
|
+
{
|
|
127
|
+
type: "hate-speech-detection",
|
|
128
|
+
name: "Hate Speech Detection",
|
|
129
|
+
},
|
|
130
|
+
{
|
|
131
|
+
type: "text-scoring",
|
|
132
|
+
name: "Text Scoring",
|
|
133
|
+
},
|
|
134
|
+
],
|
|
135
|
+
modality: "nlp",
|
|
136
|
+
color: "orange",
|
|
137
|
+
},
|
|
138
|
+
"token-classification": {
|
|
139
|
+
name: "Token Classification",
|
|
140
|
+
subtasks: [
|
|
141
|
+
{
|
|
142
|
+
type: "named-entity-recognition",
|
|
143
|
+
name: "Named Entity Recognition",
|
|
144
|
+
},
|
|
145
|
+
{
|
|
146
|
+
type: "part-of-speech",
|
|
147
|
+
name: "Part of Speech",
|
|
148
|
+
},
|
|
149
|
+
{
|
|
150
|
+
type: "parsing",
|
|
151
|
+
name: "Parsing",
|
|
152
|
+
},
|
|
153
|
+
{
|
|
154
|
+
type: "lemmatization",
|
|
155
|
+
name: "Lemmatization",
|
|
156
|
+
},
|
|
157
|
+
{
|
|
158
|
+
type: "word-sense-disambiguation",
|
|
159
|
+
name: "Word Sense Disambiguation",
|
|
160
|
+
},
|
|
161
|
+
{
|
|
162
|
+
type: "coreference-resolution",
|
|
163
|
+
name: "Coreference-resolution",
|
|
164
|
+
},
|
|
165
|
+
],
|
|
166
|
+
modality: "nlp",
|
|
167
|
+
color: "blue",
|
|
168
|
+
},
|
|
169
|
+
"table-question-answering": {
|
|
170
|
+
name: "Table Question Answering",
|
|
171
|
+
modality: "nlp",
|
|
172
|
+
color: "green",
|
|
173
|
+
},
|
|
174
|
+
"question-answering": {
|
|
175
|
+
name: "Question Answering",
|
|
176
|
+
subtasks: [
|
|
177
|
+
{
|
|
178
|
+
type: "extractive-qa",
|
|
179
|
+
name: "Extractive QA",
|
|
180
|
+
},
|
|
181
|
+
{
|
|
182
|
+
type: "open-domain-qa",
|
|
183
|
+
name: "Open Domain QA",
|
|
184
|
+
},
|
|
185
|
+
{
|
|
186
|
+
type: "closed-domain-qa",
|
|
187
|
+
name: "Closed Domain QA",
|
|
188
|
+
},
|
|
189
|
+
],
|
|
190
|
+
modality: "nlp",
|
|
191
|
+
color: "blue",
|
|
192
|
+
},
|
|
193
|
+
"zero-shot-classification": {
|
|
194
|
+
name: "Zero-Shot Classification",
|
|
195
|
+
modality: "nlp",
|
|
196
|
+
color: "yellow",
|
|
197
|
+
},
|
|
198
|
+
translation: {
|
|
199
|
+
name: "Translation",
|
|
200
|
+
modality: "nlp",
|
|
201
|
+
color: "green",
|
|
202
|
+
},
|
|
203
|
+
summarization: {
|
|
204
|
+
name: "Summarization",
|
|
205
|
+
subtasks: [
|
|
206
|
+
{
|
|
207
|
+
type: "news-articles-summarization",
|
|
208
|
+
name: "News Articles Summarization",
|
|
209
|
+
},
|
|
210
|
+
{
|
|
211
|
+
type: "news-articles-headline-generation",
|
|
212
|
+
name: "News Articles Headline Generation",
|
|
213
|
+
},
|
|
214
|
+
],
|
|
215
|
+
modality: "nlp",
|
|
216
|
+
color: "indigo",
|
|
217
|
+
},
|
|
218
|
+
conversational: {
|
|
219
|
+
name: "Conversational",
|
|
220
|
+
subtasks: [
|
|
221
|
+
{
|
|
222
|
+
type: "dialogue-generation",
|
|
223
|
+
name: "Dialogue Generation",
|
|
224
|
+
},
|
|
225
|
+
],
|
|
226
|
+
modality: "nlp",
|
|
227
|
+
color: "green",
|
|
228
|
+
},
|
|
229
|
+
"feature-extraction": {
|
|
230
|
+
name: "Feature Extraction",
|
|
231
|
+
modality: "multimodal",
|
|
232
|
+
color: "red",
|
|
233
|
+
},
|
|
234
|
+
"text-generation": {
|
|
235
|
+
name: "Text Generation",
|
|
236
|
+
subtasks: [
|
|
237
|
+
{
|
|
238
|
+
type: "dialogue-modeling",
|
|
239
|
+
name: "Dialogue Modeling",
|
|
240
|
+
},
|
|
241
|
+
{
|
|
242
|
+
type: "language-modeling",
|
|
243
|
+
name: "Language Modeling",
|
|
244
|
+
},
|
|
245
|
+
],
|
|
246
|
+
modality: "nlp",
|
|
247
|
+
color: "indigo",
|
|
248
|
+
},
|
|
249
|
+
"text2text-generation": {
|
|
250
|
+
name: "Text2Text Generation",
|
|
251
|
+
subtasks: [
|
|
252
|
+
{
|
|
253
|
+
type: "text-simplification",
|
|
254
|
+
name: "Text simplification",
|
|
255
|
+
},
|
|
256
|
+
{
|
|
257
|
+
type: "explanation-generation",
|
|
258
|
+
name: "Explanation Generation",
|
|
259
|
+
},
|
|
260
|
+
{
|
|
261
|
+
type: "abstractive-qa",
|
|
262
|
+
name: "Abstractive QA",
|
|
263
|
+
},
|
|
264
|
+
{
|
|
265
|
+
type: "open-domain-abstractive-qa",
|
|
266
|
+
name: "Open Domain Abstractive QA",
|
|
267
|
+
},
|
|
268
|
+
{
|
|
269
|
+
type: "closed-domain-qa",
|
|
270
|
+
name: "Closed Domain QA",
|
|
271
|
+
},
|
|
272
|
+
{
|
|
273
|
+
type: "open-book-qa",
|
|
274
|
+
name: "Open Book QA",
|
|
275
|
+
},
|
|
276
|
+
{
|
|
277
|
+
type: "closed-book-qa",
|
|
278
|
+
name: "Closed Book QA",
|
|
279
|
+
},
|
|
280
|
+
],
|
|
281
|
+
modality: "nlp",
|
|
282
|
+
color: "indigo",
|
|
283
|
+
},
|
|
284
|
+
"fill-mask": {
|
|
285
|
+
name: "Fill-Mask",
|
|
286
|
+
subtasks: [
|
|
287
|
+
{
|
|
288
|
+
type: "slot-filling",
|
|
289
|
+
name: "Slot Filling",
|
|
290
|
+
},
|
|
291
|
+
{
|
|
292
|
+
type: "masked-language-modeling",
|
|
293
|
+
name: "Masked Language Modeling",
|
|
294
|
+
},
|
|
295
|
+
],
|
|
296
|
+
modality: "nlp",
|
|
297
|
+
color: "red",
|
|
298
|
+
},
|
|
299
|
+
"sentence-similarity": {
|
|
300
|
+
name: "Sentence Similarity",
|
|
301
|
+
modality: "nlp",
|
|
302
|
+
color: "yellow",
|
|
303
|
+
},
|
|
304
|
+
"text-to-speech": {
|
|
305
|
+
name: "Text-to-Speech",
|
|
306
|
+
modality: "audio",
|
|
307
|
+
color: "yellow",
|
|
308
|
+
},
|
|
309
|
+
"text-to-audio": {
|
|
310
|
+
name: "Text-to-Audio",
|
|
311
|
+
modality: "audio",
|
|
312
|
+
color: "yellow",
|
|
313
|
+
},
|
|
314
|
+
"automatic-speech-recognition": {
|
|
315
|
+
name: "Automatic Speech Recognition",
|
|
316
|
+
modality: "audio",
|
|
317
|
+
color: "yellow",
|
|
318
|
+
},
|
|
319
|
+
"audio-to-audio": {
|
|
320
|
+
name: "Audio-to-Audio",
|
|
321
|
+
modality: "audio",
|
|
322
|
+
color: "blue",
|
|
323
|
+
},
|
|
324
|
+
"audio-classification": {
|
|
325
|
+
name: "Audio Classification",
|
|
326
|
+
subtasks: [
|
|
327
|
+
{
|
|
328
|
+
type: "keyword-spotting",
|
|
329
|
+
name: "Keyword Spotting",
|
|
330
|
+
},
|
|
331
|
+
{
|
|
332
|
+
type: "speaker-identification",
|
|
333
|
+
name: "Speaker Identification",
|
|
334
|
+
},
|
|
335
|
+
{
|
|
336
|
+
type: "audio-intent-classification",
|
|
337
|
+
name: "Audio Intent Classification",
|
|
338
|
+
},
|
|
339
|
+
{
|
|
340
|
+
type: "audio-emotion-recognition",
|
|
341
|
+
name: "Audio Emotion Recognition",
|
|
342
|
+
},
|
|
343
|
+
{
|
|
344
|
+
type: "audio-language-identification",
|
|
345
|
+
name: "Audio Language Identification",
|
|
346
|
+
},
|
|
347
|
+
],
|
|
348
|
+
modality: "audio",
|
|
349
|
+
color: "green",
|
|
350
|
+
},
|
|
351
|
+
"voice-activity-detection": {
|
|
352
|
+
name: "Voice Activity Detection",
|
|
353
|
+
modality: "audio",
|
|
354
|
+
color: "red",
|
|
355
|
+
},
|
|
356
|
+
"depth-estimation": {
|
|
357
|
+
name: "Depth Estimation",
|
|
358
|
+
modality: "cv",
|
|
359
|
+
color: "yellow",
|
|
360
|
+
},
|
|
361
|
+
"image-classification": {
|
|
362
|
+
name: "Image Classification",
|
|
363
|
+
subtasks: [
|
|
364
|
+
{
|
|
365
|
+
type: "multi-label-image-classification",
|
|
366
|
+
name: "Multi Label Image Classification",
|
|
367
|
+
},
|
|
368
|
+
{
|
|
369
|
+
type: "multi-class-image-classification",
|
|
370
|
+
name: "Multi Class Image Classification",
|
|
371
|
+
},
|
|
372
|
+
],
|
|
373
|
+
modality: "cv",
|
|
374
|
+
color: "blue",
|
|
375
|
+
},
|
|
376
|
+
"object-detection": {
|
|
377
|
+
name: "Object Detection",
|
|
378
|
+
subtasks: [
|
|
379
|
+
{
|
|
380
|
+
type: "face-detection",
|
|
381
|
+
name: "Face Detection",
|
|
382
|
+
},
|
|
383
|
+
{
|
|
384
|
+
type: "vehicle-detection",
|
|
385
|
+
name: "Vehicle Detection",
|
|
386
|
+
},
|
|
387
|
+
],
|
|
388
|
+
modality: "cv",
|
|
389
|
+
color: "yellow",
|
|
390
|
+
},
|
|
391
|
+
"image-segmentation": {
|
|
392
|
+
name: "Image Segmentation",
|
|
393
|
+
subtasks: [
|
|
394
|
+
{
|
|
395
|
+
type: "instance-segmentation",
|
|
396
|
+
name: "Instance Segmentation",
|
|
397
|
+
},
|
|
398
|
+
{
|
|
399
|
+
type: "semantic-segmentation",
|
|
400
|
+
name: "Semantic Segmentation",
|
|
401
|
+
},
|
|
402
|
+
{
|
|
403
|
+
type: "panoptic-segmentation",
|
|
404
|
+
name: "Panoptic Segmentation",
|
|
405
|
+
},
|
|
406
|
+
],
|
|
407
|
+
modality: "cv",
|
|
408
|
+
color: "green",
|
|
409
|
+
},
|
|
410
|
+
"text-to-image": {
|
|
411
|
+
name: "Text-to-Image",
|
|
412
|
+
modality: "multimodal",
|
|
413
|
+
color: "yellow",
|
|
414
|
+
},
|
|
415
|
+
"image-to-text": {
|
|
416
|
+
name: "Image-to-Text",
|
|
417
|
+
subtasks: [
|
|
418
|
+
{
|
|
419
|
+
type: "image-captioning",
|
|
420
|
+
name: "Image Captioning",
|
|
421
|
+
},
|
|
422
|
+
],
|
|
423
|
+
modality: "multimodal",
|
|
424
|
+
color: "red",
|
|
425
|
+
},
|
|
426
|
+
"image-to-image": {
|
|
427
|
+
name: "Image-to-Image",
|
|
428
|
+
modality: "cv",
|
|
429
|
+
color: "indigo",
|
|
430
|
+
},
|
|
431
|
+
"unconditional-image-generation": {
|
|
432
|
+
name: "Unconditional Image Generation",
|
|
433
|
+
modality: "cv",
|
|
434
|
+
color: "green",
|
|
435
|
+
},
|
|
436
|
+
"video-classification": {
|
|
437
|
+
name: "Video Classification",
|
|
438
|
+
modality: "cv",
|
|
439
|
+
color: "blue",
|
|
440
|
+
},
|
|
441
|
+
"reinforcement-learning": {
|
|
442
|
+
name: "Reinforcement Learning",
|
|
443
|
+
modality: "rl",
|
|
444
|
+
color: "red",
|
|
445
|
+
},
|
|
446
|
+
robotics: {
|
|
447
|
+
name: "Robotics",
|
|
448
|
+
modality: "rl",
|
|
449
|
+
subtasks: [
|
|
450
|
+
{
|
|
451
|
+
type: "grasping",
|
|
452
|
+
name: "Grasping",
|
|
453
|
+
},
|
|
454
|
+
{
|
|
455
|
+
type: "task-planning",
|
|
456
|
+
name: "Task Planning",
|
|
457
|
+
},
|
|
458
|
+
],
|
|
459
|
+
color: "blue",
|
|
460
|
+
},
|
|
461
|
+
"tabular-classification": {
|
|
462
|
+
name: "Tabular Classification",
|
|
463
|
+
modality: "tabular",
|
|
464
|
+
subtasks: [
|
|
465
|
+
{
|
|
466
|
+
type: "tabular-multi-class-classification",
|
|
467
|
+
name: "Tabular Multi Class Classification",
|
|
468
|
+
},
|
|
469
|
+
{
|
|
470
|
+
type: "tabular-multi-label-classification",
|
|
471
|
+
name: "Tabular Multi Label Classification",
|
|
472
|
+
},
|
|
473
|
+
],
|
|
474
|
+
color: "blue",
|
|
475
|
+
},
|
|
476
|
+
"tabular-regression": {
|
|
477
|
+
name: "Tabular Regression",
|
|
478
|
+
modality: "tabular",
|
|
479
|
+
subtasks: [
|
|
480
|
+
{
|
|
481
|
+
type: "tabular-single-column-regression",
|
|
482
|
+
name: "Tabular Single Column Regression",
|
|
483
|
+
},
|
|
484
|
+
],
|
|
485
|
+
color: "blue",
|
|
486
|
+
},
|
|
487
|
+
"tabular-to-text": {
|
|
488
|
+
name: "Tabular to Text",
|
|
489
|
+
modality: "tabular",
|
|
490
|
+
subtasks: [
|
|
491
|
+
{
|
|
492
|
+
type: "rdf-to-text",
|
|
493
|
+
name: "RDF to text",
|
|
494
|
+
},
|
|
495
|
+
],
|
|
496
|
+
color: "blue",
|
|
497
|
+
hideInModels: true,
|
|
498
|
+
},
|
|
499
|
+
"table-to-text": {
|
|
500
|
+
name: "Table to Text",
|
|
501
|
+
modality: "nlp",
|
|
502
|
+
color: "blue",
|
|
503
|
+
hideInModels: true,
|
|
504
|
+
},
|
|
505
|
+
"multiple-choice": {
|
|
506
|
+
name: "Multiple Choice",
|
|
507
|
+
subtasks: [
|
|
508
|
+
{
|
|
509
|
+
type: "multiple-choice-qa",
|
|
510
|
+
name: "Multiple Choice QA",
|
|
511
|
+
},
|
|
512
|
+
{
|
|
513
|
+
type: "multiple-choice-coreference-resolution",
|
|
514
|
+
name: "Multiple Choice Coreference Resolution",
|
|
515
|
+
},
|
|
516
|
+
],
|
|
517
|
+
modality: "nlp",
|
|
518
|
+
color: "blue",
|
|
519
|
+
hideInModels: true,
|
|
520
|
+
},
|
|
521
|
+
"text-retrieval": {
|
|
522
|
+
name: "Text Retrieval",
|
|
523
|
+
subtasks: [
|
|
524
|
+
{
|
|
525
|
+
type: "document-retrieval",
|
|
526
|
+
name: "Document Retrieval",
|
|
527
|
+
},
|
|
528
|
+
{
|
|
529
|
+
type: "utterance-retrieval",
|
|
530
|
+
name: "Utterance Retrieval",
|
|
531
|
+
},
|
|
532
|
+
{
|
|
533
|
+
type: "entity-linking-retrieval",
|
|
534
|
+
name: "Entity Linking Retrieval",
|
|
535
|
+
},
|
|
536
|
+
{
|
|
537
|
+
type: "fact-checking-retrieval",
|
|
538
|
+
name: "Fact Checking Retrieval",
|
|
539
|
+
},
|
|
540
|
+
],
|
|
541
|
+
modality: "nlp",
|
|
542
|
+
color: "indigo",
|
|
543
|
+
hideInModels: true,
|
|
544
|
+
},
|
|
545
|
+
"time-series-forecasting": {
|
|
546
|
+
name: "Time Series Forecasting",
|
|
547
|
+
modality: "tabular",
|
|
548
|
+
subtasks: [
|
|
549
|
+
{
|
|
550
|
+
type: "univariate-time-series-forecasting",
|
|
551
|
+
name: "Univariate Time Series Forecasting",
|
|
552
|
+
},
|
|
553
|
+
{
|
|
554
|
+
type: "multivariate-time-series-forecasting",
|
|
555
|
+
name: "Multivariate Time Series Forecasting",
|
|
556
|
+
},
|
|
557
|
+
],
|
|
558
|
+
color: "blue",
|
|
559
|
+
hideInModels: true,
|
|
560
|
+
},
|
|
561
|
+
"text-to-video": {
|
|
562
|
+
name: "Text-to-Video",
|
|
563
|
+
modality: "multimodal",
|
|
564
|
+
color: "green",
|
|
565
|
+
},
|
|
566
|
+
"visual-question-answering": {
|
|
567
|
+
name: "Visual Question Answering",
|
|
568
|
+
subtasks: [
|
|
569
|
+
{
|
|
570
|
+
type: "visual-question-answering",
|
|
571
|
+
name: "Visual Question Answering",
|
|
572
|
+
},
|
|
573
|
+
],
|
|
574
|
+
modality: "multimodal",
|
|
575
|
+
color: "red",
|
|
576
|
+
},
|
|
577
|
+
"document-question-answering": {
|
|
578
|
+
name: "Document Question Answering",
|
|
579
|
+
subtasks: [
|
|
580
|
+
{
|
|
581
|
+
type: "document-question-answering",
|
|
582
|
+
name: "Document Question Answering",
|
|
583
|
+
},
|
|
584
|
+
],
|
|
585
|
+
modality: "multimodal",
|
|
586
|
+
color: "blue",
|
|
587
|
+
hideInDatasets: true,
|
|
588
|
+
},
|
|
589
|
+
"zero-shot-image-classification": {
|
|
590
|
+
name: "Zero-Shot Image Classification",
|
|
591
|
+
modality: "cv",
|
|
592
|
+
color: "yellow",
|
|
593
|
+
},
|
|
594
|
+
"graph-ml": {
|
|
595
|
+
name: "Graph Machine Learning",
|
|
596
|
+
modality: "multimodal",
|
|
597
|
+
color: "green",
|
|
598
|
+
},
|
|
599
|
+
other: {
|
|
600
|
+
name: "Other",
|
|
601
|
+
modality: "other",
|
|
602
|
+
color: "blue",
|
|
603
|
+
hideInModels: true,
|
|
604
|
+
hideInDatasets: true,
|
|
605
|
+
},
|
|
606
|
+
} satisfies Record<string, PipelineData>;
|
|
607
|
+
|
|
608
|
+
export type PipelineType = keyof typeof PIPELINE_DATA;
|
package/src/placeholder/about.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
|
1
1
|
## Use Cases
|
|
2
2
|
|
|
3
|
-
You can contribute this area with common use cases of the task!
|
|
3
|
+
You can contribute this area with common use cases of the task!
|
|
4
4
|
|
|
5
|
-
## Task Variants
|
|
5
|
+
## Task Variants
|
|
6
6
|
|
|
7
|
-
This place can be filled with variants of this task if there's any.
|
|
7
|
+
This place can be filled with variants of this task if there's any.
|
|
8
8
|
|
|
9
9
|
## Inference
|
|
10
10
|
|
package/src/placeholder/data.ts
CHANGED
|
@@ -2,17 +2,17 @@ import type { TaskDataCustom } from "../Types";
|
|
|
2
2
|
|
|
3
3
|
const taskData: TaskDataCustom = {
|
|
4
4
|
datasets: [],
|
|
5
|
-
demo:
|
|
6
|
-
inputs:
|
|
5
|
+
demo: {
|
|
6
|
+
inputs: [],
|
|
7
7
|
outputs: [],
|
|
8
8
|
},
|
|
9
9
|
isPlaceholder: true,
|
|
10
|
-
metrics:
|
|
11
|
-
models:
|
|
12
|
-
spaces:
|
|
13
|
-
summary:
|
|
14
|
-
widgetModels:
|
|
15
|
-
youtubeId:
|
|
10
|
+
metrics: [],
|
|
11
|
+
models: [],
|
|
12
|
+
spaces: [],
|
|
13
|
+
summary: "",
|
|
14
|
+
widgetModels: [],
|
|
15
|
+
youtubeId: undefined,
|
|
16
16
|
};
|
|
17
17
|
|
|
18
18
|
export default taskData;
|
|
@@ -53,4 +53,4 @@ Would you like to learn more about QA? Awesome! Here are some curated resources
|
|
|
53
53
|
|
|
54
54
|
### Documentation
|
|
55
55
|
|
|
56
|
-
- [Question answering task guide](https://huggingface.co/docs/transformers/tasks/question_answering)
|
|
56
|
+
- [Question answering task guide](https://huggingface.co/docs/transformers/tasks/question_answering)
|