@huggingface/tasks 0.9.0 → 0.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. package/README.md +12 -14
  2. package/dist/index.cjs +91 -18
  3. package/dist/index.js +90 -18
  4. package/dist/scripts/inference-codegen.d.ts +2 -0
  5. package/dist/scripts/inference-codegen.d.ts.map +1 -0
  6. package/dist/scripts/inference-tgi-import.d.ts +2 -0
  7. package/dist/scripts/inference-tgi-import.d.ts.map +1 -0
  8. package/dist/src/default-widget-inputs.d.ts +6 -0
  9. package/dist/src/default-widget-inputs.d.ts.map +1 -0
  10. package/dist/src/index.d.ts +17 -0
  11. package/dist/src/index.d.ts.map +1 -0
  12. package/dist/src/library-to-tasks.d.ts +11 -0
  13. package/dist/src/library-to-tasks.d.ts.map +1 -0
  14. package/dist/src/local-apps.d.ts +104 -0
  15. package/dist/src/local-apps.d.ts.map +1 -0
  16. package/dist/src/model-data.d.ts +144 -0
  17. package/dist/src/model-data.d.ts.map +1 -0
  18. package/dist/src/model-libraries-downloads.d.ts +26 -0
  19. package/dist/src/model-libraries-downloads.d.ts.map +1 -0
  20. package/dist/src/model-libraries-snippets.d.ts +43 -0
  21. package/dist/src/model-libraries-snippets.d.ts.map +1 -0
  22. package/dist/src/model-libraries.d.ts +490 -0
  23. package/dist/src/model-libraries.d.ts.map +1 -0
  24. package/dist/src/pipelines.d.ts +404 -0
  25. package/dist/src/pipelines.d.ts.map +1 -0
  26. package/dist/src/snippets/curl.d.ts +9 -0
  27. package/dist/src/snippets/curl.d.ts.map +1 -0
  28. package/dist/src/snippets/index.d.ts +6 -0
  29. package/dist/src/snippets/index.d.ts.map +1 -0
  30. package/dist/src/snippets/inputs.d.ts +3 -0
  31. package/dist/src/snippets/inputs.d.ts.map +1 -0
  32. package/dist/src/snippets/js.d.ts +11 -0
  33. package/dist/src/snippets/js.d.ts.map +1 -0
  34. package/dist/src/snippets/python.d.ts +14 -0
  35. package/dist/src/snippets/python.d.ts.map +1 -0
  36. package/dist/src/snippets/types.d.ts +8 -0
  37. package/dist/src/snippets/types.d.ts.map +1 -0
  38. package/dist/src/tasks/audio-classification/data.d.ts +4 -0
  39. package/dist/src/tasks/audio-classification/data.d.ts.map +1 -0
  40. package/dist/src/tasks/audio-classification/inference.d.ts +52 -0
  41. package/dist/src/tasks/audio-classification/inference.d.ts.map +1 -0
  42. package/dist/src/tasks/audio-to-audio/data.d.ts +4 -0
  43. package/dist/src/tasks/audio-to-audio/data.d.ts.map +1 -0
  44. package/dist/src/tasks/automatic-speech-recognition/data.d.ts +4 -0
  45. package/dist/src/tasks/automatic-speech-recognition/data.d.ts.map +1 -0
  46. package/dist/src/tasks/automatic-speech-recognition/inference.d.ts +154 -0
  47. package/dist/src/tasks/automatic-speech-recognition/inference.d.ts.map +1 -0
  48. package/dist/src/tasks/chat-completion/inference.d.ts +254 -0
  49. package/dist/src/tasks/chat-completion/inference.d.ts.map +1 -0
  50. package/dist/src/tasks/depth-estimation/data.d.ts +4 -0
  51. package/dist/src/tasks/depth-estimation/data.d.ts.map +1 -0
  52. package/dist/src/tasks/depth-estimation/inference.d.ts +36 -0
  53. package/dist/src/tasks/depth-estimation/inference.d.ts.map +1 -0
  54. package/dist/src/tasks/document-question-answering/data.d.ts +4 -0
  55. package/dist/src/tasks/document-question-answering/data.d.ts.map +1 -0
  56. package/dist/src/tasks/document-question-answering/inference.d.ts +111 -0
  57. package/dist/src/tasks/document-question-answering/inference.d.ts.map +1 -0
  58. package/dist/src/tasks/feature-extraction/data.d.ts +4 -0
  59. package/dist/src/tasks/feature-extraction/data.d.ts.map +1 -0
  60. package/dist/src/tasks/feature-extraction/inference.d.ts +23 -0
  61. package/dist/src/tasks/feature-extraction/inference.d.ts.map +1 -0
  62. package/dist/src/tasks/fill-mask/data.d.ts +4 -0
  63. package/dist/src/tasks/fill-mask/data.d.ts.map +1 -0
  64. package/dist/src/tasks/fill-mask/inference.d.ts +63 -0
  65. package/dist/src/tasks/fill-mask/inference.d.ts.map +1 -0
  66. package/dist/src/tasks/image-classification/data.d.ts +4 -0
  67. package/dist/src/tasks/image-classification/data.d.ts.map +1 -0
  68. package/dist/src/tasks/image-classification/inference.d.ts +52 -0
  69. package/dist/src/tasks/image-classification/inference.d.ts.map +1 -0
  70. package/dist/src/tasks/image-feature-extraction/data.d.ts +4 -0
  71. package/dist/src/tasks/image-feature-extraction/data.d.ts.map +1 -0
  72. package/dist/src/tasks/image-segmentation/data.d.ts +4 -0
  73. package/dist/src/tasks/image-segmentation/data.d.ts.map +1 -0
  74. package/dist/src/tasks/image-segmentation/inference.d.ts +66 -0
  75. package/dist/src/tasks/image-segmentation/inference.d.ts.map +1 -0
  76. package/dist/src/tasks/image-to-image/data.d.ts +4 -0
  77. package/dist/src/tasks/image-to-image/data.d.ts.map +1 -0
  78. package/dist/src/tasks/image-to-image/inference.d.ts +64 -0
  79. package/dist/src/tasks/image-to-image/inference.d.ts.map +1 -0
  80. package/dist/src/tasks/image-to-text/data.d.ts +4 -0
  81. package/dist/src/tasks/image-to-text/data.d.ts.map +1 -0
  82. package/dist/src/tasks/image-to-text/inference.d.ts +139 -0
  83. package/dist/src/tasks/image-to-text/inference.d.ts.map +1 -0
  84. package/dist/src/tasks/index.d.ts +87 -0
  85. package/dist/src/tasks/index.d.ts.map +1 -0
  86. package/dist/src/tasks/mask-generation/data.d.ts +4 -0
  87. package/dist/src/tasks/mask-generation/data.d.ts.map +1 -0
  88. package/dist/src/tasks/object-detection/data.d.ts +4 -0
  89. package/dist/src/tasks/object-detection/data.d.ts.map +1 -0
  90. package/dist/src/tasks/object-detection/inference.d.ts +63 -0
  91. package/dist/src/tasks/object-detection/inference.d.ts.map +1 -0
  92. package/dist/src/tasks/placeholder/data.d.ts +4 -0
  93. package/dist/src/tasks/placeholder/data.d.ts.map +1 -0
  94. package/dist/src/tasks/question-answering/data.d.ts +4 -0
  95. package/dist/src/tasks/question-answering/data.d.ts.map +1 -0
  96. package/dist/src/tasks/question-answering/inference.d.ts +100 -0
  97. package/dist/src/tasks/question-answering/inference.d.ts.map +1 -0
  98. package/dist/src/tasks/reinforcement-learning/data.d.ts +4 -0
  99. package/dist/src/tasks/reinforcement-learning/data.d.ts.map +1 -0
  100. package/dist/src/tasks/sentence-similarity/data.d.ts +4 -0
  101. package/dist/src/tasks/sentence-similarity/data.d.ts.map +1 -0
  102. package/dist/src/tasks/sentence-similarity/inference.d.ts +32 -0
  103. package/dist/src/tasks/sentence-similarity/inference.d.ts.map +1 -0
  104. package/dist/src/tasks/summarization/data.d.ts +4 -0
  105. package/dist/src/tasks/summarization/data.d.ts.map +1 -0
  106. package/dist/src/tasks/summarization/inference.d.ts +55 -0
  107. package/dist/src/tasks/summarization/inference.d.ts.map +1 -0
  108. package/dist/src/tasks/table-question-answering/data.d.ts +4 -0
  109. package/dist/src/tasks/table-question-answering/data.d.ts.map +1 -0
  110. package/dist/src/tasks/table-question-answering/inference.d.ts +62 -0
  111. package/dist/src/tasks/table-question-answering/inference.d.ts.map +1 -0
  112. package/dist/src/tasks/tabular-classification/data.d.ts +4 -0
  113. package/dist/src/tasks/tabular-classification/data.d.ts.map +1 -0
  114. package/dist/src/tasks/tabular-regression/data.d.ts +4 -0
  115. package/dist/src/tasks/tabular-regression/data.d.ts.map +1 -0
  116. package/dist/src/tasks/text-classification/data.d.ts +4 -0
  117. package/dist/src/tasks/text-classification/data.d.ts.map +1 -0
  118. package/dist/src/tasks/text-classification/inference.d.ts +52 -0
  119. package/dist/src/tasks/text-classification/inference.d.ts.map +1 -0
  120. package/dist/src/tasks/text-generation/data.d.ts +4 -0
  121. package/dist/src/tasks/text-generation/data.d.ts.map +1 -0
  122. package/dist/src/tasks/text-generation/inference.d.ts +126 -0
  123. package/dist/src/tasks/text-generation/inference.d.ts.map +1 -0
  124. package/dist/src/tasks/text-to-audio/inference.d.ts +139 -0
  125. package/dist/src/tasks/text-to-audio/inference.d.ts.map +1 -0
  126. package/dist/src/tasks/text-to-image/data.d.ts +4 -0
  127. package/dist/src/tasks/text-to-image/data.d.ts.map +1 -0
  128. package/dist/src/tasks/text-to-image/inference.d.ts +68 -0
  129. package/dist/src/tasks/text-to-image/inference.d.ts.map +1 -0
  130. package/dist/src/tasks/text-to-speech/data.d.ts +4 -0
  131. package/dist/src/tasks/text-to-speech/data.d.ts.map +1 -0
  132. package/dist/src/tasks/text-to-speech/inference.d.ts +143 -0
  133. package/dist/src/tasks/text-to-speech/inference.d.ts.map +1 -0
  134. package/dist/src/tasks/text-to-video/data.d.ts +4 -0
  135. package/dist/src/tasks/text-to-video/data.d.ts.map +1 -0
  136. package/dist/src/tasks/text2text-generation/inference.d.ts +54 -0
  137. package/dist/src/tasks/text2text-generation/inference.d.ts.map +1 -0
  138. package/dist/src/tasks/token-classification/data.d.ts +4 -0
  139. package/dist/src/tasks/token-classification/data.d.ts.map +1 -0
  140. package/dist/src/tasks/token-classification/inference.d.ts +83 -0
  141. package/dist/src/tasks/token-classification/inference.d.ts.map +1 -0
  142. package/dist/src/tasks/translation/data.d.ts +4 -0
  143. package/dist/src/tasks/translation/data.d.ts.map +1 -0
  144. package/dist/src/tasks/translation/inference.d.ts +55 -0
  145. package/dist/src/tasks/translation/inference.d.ts.map +1 -0
  146. package/dist/src/tasks/unconditional-image-generation/data.d.ts +4 -0
  147. package/dist/src/tasks/unconditional-image-generation/data.d.ts.map +1 -0
  148. package/dist/src/tasks/video-classification/data.d.ts +4 -0
  149. package/dist/src/tasks/video-classification/data.d.ts.map +1 -0
  150. package/dist/src/tasks/video-classification/inference.d.ts +60 -0
  151. package/dist/src/tasks/video-classification/inference.d.ts.map +1 -0
  152. package/dist/src/tasks/visual-question-answering/data.d.ts +4 -0
  153. package/dist/src/tasks/visual-question-answering/data.d.ts.map +1 -0
  154. package/dist/src/tasks/visual-question-answering/inference.d.ts +64 -0
  155. package/dist/src/tasks/visual-question-answering/inference.d.ts.map +1 -0
  156. package/dist/src/tasks/zero-shot-classification/data.d.ts +4 -0
  157. package/dist/src/tasks/zero-shot-classification/data.d.ts.map +1 -0
  158. package/dist/src/tasks/zero-shot-classification/inference.d.ts +68 -0
  159. package/dist/src/tasks/zero-shot-classification/inference.d.ts.map +1 -0
  160. package/dist/src/tasks/zero-shot-image-classification/data.d.ts +4 -0
  161. package/dist/src/tasks/zero-shot-image-classification/data.d.ts.map +1 -0
  162. package/dist/src/tasks/zero-shot-image-classification/inference.d.ts +62 -0
  163. package/dist/src/tasks/zero-shot-image-classification/inference.d.ts.map +1 -0
  164. package/dist/src/tasks/zero-shot-object-detection/data.d.ts +4 -0
  165. package/dist/src/tasks/zero-shot-object-detection/data.d.ts.map +1 -0
  166. package/dist/src/tasks/zero-shot-object-detection/inference.d.ts +67 -0
  167. package/dist/src/tasks/zero-shot-object-detection/inference.d.ts.map +1 -0
  168. package/dist/src/tokenizer-data.d.ts +26 -0
  169. package/dist/src/tokenizer-data.d.ts.map +1 -0
  170. package/dist/src/widget-example.d.ts +86 -0
  171. package/dist/src/widget-example.d.ts.map +1 -0
  172. package/package.json +11 -6
  173. package/src/index.ts +3 -0
  174. package/src/local-apps.ts +119 -0
  175. package/src/model-data.ts +1 -5
  176. package/src/model-libraries-snippets.ts +21 -18
  177. package/src/model-libraries.ts +9 -0
  178. package/src/tasks/chat-completion/inference.ts +204 -85
  179. package/src/tasks/chat-completion/spec/input.json +198 -34
  180. package/src/tasks/chat-completion/spec/output.json +178 -40
  181. package/src/tasks/chat-completion/spec/stream_output.json +170 -0
  182. package/src/tasks/index.ts +7 -8
  183. package/src/tasks/text-generation/inference.ts +58 -170
  184. package/src/tasks/text-generation/spec/input.json +130 -29
  185. package/src/tasks/text-generation/spec/output.json +104 -90
  186. package/src/tasks/text-generation/spec/stream_output.json +97 -0
  187. package/tsconfig.json +3 -1
  188. package/dist/index.d.ts +0 -3531
  189. package/src/tasks/chat-completion/spec/output_stream.json +0 -48
  190. package/src/tasks/text-generation/spec/output_stream.json +0 -47
@@ -0,0 +1,404 @@
1
+ export declare const MODALITIES: readonly ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"];
2
+ export type Modality = (typeof MODALITIES)[number];
3
+ export declare const MODALITY_LABELS: {
4
+ multimodal: string;
5
+ nlp: string;
6
+ audio: string;
7
+ cv: string;
8
+ rl: string;
9
+ tabular: string;
10
+ other: string;
11
+ };
12
+ /**
13
+ * Public interface for a sub task.
14
+ *
15
+ * This can be used in a model card's `model-index` metadata.
16
+ * and is more granular classification that can grow significantly
17
+ * over time as new tasks are added.
18
+ */
19
+ export interface SubTask {
20
+ /**
21
+ * type of the task (e.g. audio-source-separation)
22
+ */
23
+ type: string;
24
+ /**
25
+ * displayed name of the task (e.g. Audio Source Separation)
26
+ */
27
+ name: string;
28
+ }
29
+ /**
30
+ * Public interface for a PipelineData.
31
+ *
32
+ * This information corresponds to a pipeline type (aka task)
33
+ * in the Hub.
34
+ */
35
+ export interface PipelineData {
36
+ /**
37
+ * displayed name of the task (e.g. Text Classification)
38
+ */
39
+ name: string;
40
+ subtasks?: SubTask[];
41
+ modality: Modality;
42
+ /**
43
+ * color for the tag icon.
44
+ */
45
+ color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow";
46
+ /**
47
+ * whether to hide in /models filters
48
+ */
49
+ hideInModels?: boolean;
50
+ /**
51
+ * whether to hide in /datasets filters
52
+ */
53
+ hideInDatasets?: boolean;
54
+ }
55
+ export declare const PIPELINE_DATA: {
56
+ "text-classification": {
57
+ name: string;
58
+ subtasks: {
59
+ type: string;
60
+ name: string;
61
+ }[];
62
+ modality: "nlp";
63
+ color: "orange";
64
+ };
65
+ "token-classification": {
66
+ name: string;
67
+ subtasks: {
68
+ type: string;
69
+ name: string;
70
+ }[];
71
+ modality: "nlp";
72
+ color: "blue";
73
+ };
74
+ "table-question-answering": {
75
+ name: string;
76
+ modality: "nlp";
77
+ color: "green";
78
+ };
79
+ "question-answering": {
80
+ name: string;
81
+ subtasks: {
82
+ type: string;
83
+ name: string;
84
+ }[];
85
+ modality: "nlp";
86
+ color: "blue";
87
+ };
88
+ "zero-shot-classification": {
89
+ name: string;
90
+ modality: "nlp";
91
+ color: "yellow";
92
+ };
93
+ translation: {
94
+ name: string;
95
+ modality: "nlp";
96
+ color: "green";
97
+ };
98
+ summarization: {
99
+ name: string;
100
+ subtasks: {
101
+ type: string;
102
+ name: string;
103
+ }[];
104
+ modality: "nlp";
105
+ color: "indigo";
106
+ };
107
+ "feature-extraction": {
108
+ name: string;
109
+ modality: "nlp";
110
+ color: "red";
111
+ };
112
+ "text-generation": {
113
+ name: string;
114
+ subtasks: {
115
+ type: string;
116
+ name: string;
117
+ }[];
118
+ modality: "nlp";
119
+ color: "indigo";
120
+ };
121
+ "text2text-generation": {
122
+ name: string;
123
+ subtasks: {
124
+ type: string;
125
+ name: string;
126
+ }[];
127
+ modality: "nlp";
128
+ color: "indigo";
129
+ };
130
+ "fill-mask": {
131
+ name: string;
132
+ subtasks: {
133
+ type: string;
134
+ name: string;
135
+ }[];
136
+ modality: "nlp";
137
+ color: "red";
138
+ };
139
+ "sentence-similarity": {
140
+ name: string;
141
+ modality: "nlp";
142
+ color: "yellow";
143
+ };
144
+ "text-to-speech": {
145
+ name: string;
146
+ modality: "audio";
147
+ color: "yellow";
148
+ };
149
+ "text-to-audio": {
150
+ name: string;
151
+ modality: "audio";
152
+ color: "yellow";
153
+ };
154
+ "automatic-speech-recognition": {
155
+ name: string;
156
+ modality: "audio";
157
+ color: "yellow";
158
+ };
159
+ "audio-to-audio": {
160
+ name: string;
161
+ modality: "audio";
162
+ color: "blue";
163
+ };
164
+ "audio-classification": {
165
+ name: string;
166
+ subtasks: {
167
+ type: string;
168
+ name: string;
169
+ }[];
170
+ modality: "audio";
171
+ color: "green";
172
+ };
173
+ "voice-activity-detection": {
174
+ name: string;
175
+ modality: "audio";
176
+ color: "red";
177
+ };
178
+ "depth-estimation": {
179
+ name: string;
180
+ modality: "cv";
181
+ color: "yellow";
182
+ };
183
+ "image-classification": {
184
+ name: string;
185
+ subtasks: {
186
+ type: string;
187
+ name: string;
188
+ }[];
189
+ modality: "cv";
190
+ color: "blue";
191
+ };
192
+ "object-detection": {
193
+ name: string;
194
+ subtasks: {
195
+ type: string;
196
+ name: string;
197
+ }[];
198
+ modality: "cv";
199
+ color: "yellow";
200
+ };
201
+ "image-segmentation": {
202
+ name: string;
203
+ subtasks: {
204
+ type: string;
205
+ name: string;
206
+ }[];
207
+ modality: "cv";
208
+ color: "green";
209
+ };
210
+ "text-to-image": {
211
+ name: string;
212
+ modality: "cv";
213
+ color: "yellow";
214
+ };
215
+ "image-to-text": {
216
+ name: string;
217
+ subtasks: {
218
+ type: string;
219
+ name: string;
220
+ }[];
221
+ modality: "cv";
222
+ color: "red";
223
+ };
224
+ "image-to-image": {
225
+ name: string;
226
+ subtasks: {
227
+ type: string;
228
+ name: string;
229
+ }[];
230
+ modality: "cv";
231
+ color: "indigo";
232
+ };
233
+ "image-to-video": {
234
+ name: string;
235
+ modality: "cv";
236
+ color: "indigo";
237
+ };
238
+ "unconditional-image-generation": {
239
+ name: string;
240
+ modality: "cv";
241
+ color: "green";
242
+ };
243
+ "video-classification": {
244
+ name: string;
245
+ modality: "cv";
246
+ color: "blue";
247
+ };
248
+ "reinforcement-learning": {
249
+ name: string;
250
+ modality: "rl";
251
+ color: "red";
252
+ };
253
+ robotics: {
254
+ name: string;
255
+ modality: "rl";
256
+ subtasks: {
257
+ type: string;
258
+ name: string;
259
+ }[];
260
+ color: "blue";
261
+ };
262
+ "tabular-classification": {
263
+ name: string;
264
+ modality: "tabular";
265
+ subtasks: {
266
+ type: string;
267
+ name: string;
268
+ }[];
269
+ color: "blue";
270
+ };
271
+ "tabular-regression": {
272
+ name: string;
273
+ modality: "tabular";
274
+ subtasks: {
275
+ type: string;
276
+ name: string;
277
+ }[];
278
+ color: "blue";
279
+ };
280
+ "tabular-to-text": {
281
+ name: string;
282
+ modality: "tabular";
283
+ subtasks: {
284
+ type: string;
285
+ name: string;
286
+ }[];
287
+ color: "blue";
288
+ hideInModels: true;
289
+ };
290
+ "table-to-text": {
291
+ name: string;
292
+ modality: "nlp";
293
+ color: "blue";
294
+ hideInModels: true;
295
+ };
296
+ "multiple-choice": {
297
+ name: string;
298
+ subtasks: {
299
+ type: string;
300
+ name: string;
301
+ }[];
302
+ modality: "nlp";
303
+ color: "blue";
304
+ hideInModels: true;
305
+ };
306
+ "text-retrieval": {
307
+ name: string;
308
+ subtasks: {
309
+ type: string;
310
+ name: string;
311
+ }[];
312
+ modality: "nlp";
313
+ color: "indigo";
314
+ hideInModels: true;
315
+ };
316
+ "time-series-forecasting": {
317
+ name: string;
318
+ modality: "tabular";
319
+ subtasks: {
320
+ type: string;
321
+ name: string;
322
+ }[];
323
+ color: "blue";
324
+ hideInModels: true;
325
+ };
326
+ "text-to-video": {
327
+ name: string;
328
+ modality: "cv";
329
+ color: "green";
330
+ };
331
+ "image-text-to-text": {
332
+ name: string;
333
+ modality: "multimodal";
334
+ color: "red";
335
+ hideInDatasets: true;
336
+ };
337
+ "visual-question-answering": {
338
+ name: string;
339
+ subtasks: {
340
+ type: string;
341
+ name: string;
342
+ }[];
343
+ modality: "multimodal";
344
+ color: "red";
345
+ };
346
+ "document-question-answering": {
347
+ name: string;
348
+ subtasks: {
349
+ type: string;
350
+ name: string;
351
+ }[];
352
+ modality: "multimodal";
353
+ color: "blue";
354
+ hideInDatasets: true;
355
+ };
356
+ "zero-shot-image-classification": {
357
+ name: string;
358
+ modality: "cv";
359
+ color: "yellow";
360
+ };
361
+ "graph-ml": {
362
+ name: string;
363
+ modality: "other";
364
+ color: "green";
365
+ };
366
+ "mask-generation": {
367
+ name: string;
368
+ modality: "cv";
369
+ color: "indigo";
370
+ };
371
+ "zero-shot-object-detection": {
372
+ name: string;
373
+ modality: "cv";
374
+ color: "yellow";
375
+ };
376
+ "text-to-3d": {
377
+ name: string;
378
+ modality: "cv";
379
+ color: "yellow";
380
+ };
381
+ "image-to-3d": {
382
+ name: string;
383
+ modality: "cv";
384
+ color: "green";
385
+ };
386
+ "image-feature-extraction": {
387
+ name: string;
388
+ modality: "cv";
389
+ color: "indigo";
390
+ };
391
+ other: {
392
+ name: string;
393
+ modality: "other";
394
+ color: "blue";
395
+ hideInModels: true;
396
+ hideInDatasets: true;
397
+ };
398
+ };
399
+ export type PipelineType = keyof typeof PIPELINE_DATA;
400
+ export type WidgetType = PipelineType | "conversational";
401
+ export declare const PIPELINE_TYPES: ("other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction")[];
402
+ export declare const SUBTASK_TYPES: string[];
403
+ export declare const PIPELINE_TYPES_SET: Set<"other" | "text-classification" | "token-classification" | "table-question-answering" | "question-answering" | "zero-shot-classification" | "translation" | "summarization" | "feature-extraction" | "text-generation" | "text2text-generation" | "fill-mask" | "sentence-similarity" | "text-to-speech" | "text-to-audio" | "automatic-speech-recognition" | "audio-to-audio" | "audio-classification" | "voice-activity-detection" | "depth-estimation" | "image-classification" | "object-detection" | "image-segmentation" | "text-to-image" | "image-to-text" | "image-to-image" | "image-to-video" | "unconditional-image-generation" | "video-classification" | "reinforcement-learning" | "robotics" | "tabular-classification" | "tabular-regression" | "tabular-to-text" | "table-to-text" | "multiple-choice" | "text-retrieval" | "time-series-forecasting" | "text-to-video" | "image-text-to-text" | "visual-question-answering" | "document-question-answering" | "zero-shot-image-classification" | "graph-ml" | "mask-generation" | "zero-shot-object-detection" | "text-to-3d" | "image-to-3d" | "image-feature-extraction">;
404
+ //# sourceMappingURL=pipelines.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"pipelines.d.ts","sourceRoot":"","sources":["../../src/pipelines.ts"],"names":[],"mappings":"AAAA,eAAO,MAAM,UAAU,yEAA0E,CAAC;AAElG,MAAM,MAAM,QAAQ,GAAG,CAAC,OAAO,UAAU,CAAC,CAAC,MAAM,CAAC,CAAC;AAEnD,eAAO,MAAM,eAAe;;;;;;;;CAQQ,CAAC;AAErC;;;;;;GAMG;AACH,MAAM,WAAW,OAAO;IACvB;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;CACb;AAED;;;;;GAKG;AACH,MAAM,WAAW,YAAY;IAC5B;;OAEG;IACH,IAAI,EAAE,MAAM,CAAC;IACb,QAAQ,CAAC,EAAE,OAAO,EAAE,CAAC;IACrB,QAAQ,EAAE,QAAQ,CAAC;IACnB;;OAEG;IACH,KAAK,EAAE,MAAM,GAAG,OAAO,GAAG,QAAQ,GAAG,QAAQ,GAAG,KAAK,GAAG,QAAQ,CAAC;IACjE;;OAEG;IACH,YAAY,CAAC,EAAE,OAAO,CAAC;IACvB;;OAEG;IACH,cAAc,CAAC,EAAE,OAAO,CAAC;CACzB;AAcD,eAAO,MAAM,aAAa;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA+kBc,CAAC;AAEzC,MAAM,MAAM,YAAY,GAAG,MAAM,OAAO,aAAa,CAAC;AAEtD,MAAM,MAAM,UAAU,GAAG,YAAY,GAAG,gBAAgB,CAAC;AAEzD,eAAO,MAAM,cAAc,mlCAA+C,CAAC;AAE3E,eAAO,MAAM,aAAa,UAEN,CAAC;AAErB,eAAO,MAAM,kBAAkB,olCAA0B,CAAC"}
@@ -0,0 +1,9 @@
1
+ import type { PipelineType } from "../pipelines.js";
2
+ import type { ModelDataMinimal } from "./types.js";
3
+ export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
+ export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
5
+ export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string;
6
+ export declare const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>>;
7
+ export declare function getCurlInferenceSnippet(model: ModelDataMinimal, accessToken: string): string;
8
+ export declare function hasCurlInferenceSnippet(model: Pick<ModelDataMinimal, "pipeline_tag">): boolean;
9
+ //# sourceMappingURL=curl.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"curl.d.ts","sourceRoot":"","sources":["../../../src/snippets/curl.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM3E,CAAC;AAEF,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAM5F,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAK1E,CAAC;AAEF,eAAO,MAAM,YAAY,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwBhH,CAAC;AAEF,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI5F;AAED,wBAAgB,uBAAuB,CAAC,KAAK,EAAE,IAAI,CAAC,gBAAgB,EAAE,cAAc,CAAC,GAAG,OAAO,CAE9F"}
@@ -0,0 +1,6 @@
1
+ import * as inputs from "./inputs";
2
+ import * as curl from "./curl";
3
+ import * as python from "./python";
4
+ import * as js from "./js";
5
+ export { inputs, curl, python, js };
6
+ //# sourceMappingURL=index.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/snippets/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,MAAM,UAAU,CAAC;AACnC,OAAO,KAAK,IAAI,MAAM,QAAQ,CAAC;AAC/B,OAAO,KAAK,MAAM,MAAM,UAAU,CAAC;AACnC,OAAO,KAAK,EAAE,MAAM,MAAM,CAAC;AAE3B,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,MAAM,EAAE,EAAE,EAAE,CAAC"}
@@ -0,0 +1,3 @@
1
+ import type { ModelDataMinimal } from "./types";
2
+ export declare function getModelInputSnippet(model: ModelDataMinimal, noWrap?: boolean, noQuotes?: boolean): string;
3
+ //# sourceMappingURL=inputs.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"inputs.d.ts","sourceRoot":"","sources":["../../../src/snippets/inputs.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,SAAS,CAAC;AAqHhD,wBAAgB,oBAAoB,CAAC,KAAK,EAAE,gBAAgB,EAAE,MAAM,UAAQ,EAAE,QAAQ,UAAQ,GAAG,MAAM,CAiBtG"}
@@ -0,0 +1,11 @@
1
+ import type { PipelineType } from "../pipelines.js";
2
+ import type { ModelDataMinimal } from "./types.js";
3
+ export declare const snippetBasic: (model: ModelDataMinimal, accessToken: string) => string;
4
+ export declare const snippetZeroShotClassification: (model: ModelDataMinimal, accessToken: string) => string;
5
+ export declare const snippetTextToImage: (model: ModelDataMinimal, accessToken: string) => string;
6
+ export declare const snippetTextToAudio: (model: ModelDataMinimal, accessToken: string) => string;
7
+ export declare const snippetFile: (model: ModelDataMinimal, accessToken: string) => string;
8
+ export declare const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal, accessToken: string) => string>>;
9
+ export declare function getJsInferenceSnippet(model: ModelDataMinimal, accessToken: string): string;
10
+ export declare function hasJsInferenceSnippet(model: ModelDataMinimal): boolean;
11
+ //# sourceMappingURL=js.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"js.d.ts","sourceRoot":"","sources":["../../../src/snippets/js.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,YAAY,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAgBxE,CAAC;AAEL,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkBzF,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAe9E,CAAC;AAEL,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAkCjF,CAAC;AAEF,eAAO,MAAM,WAAW,UAAW,gBAAgB,eAAe,MAAM,KAAG,MAiBvE,CAAC;AAEL,eAAO,MAAM,UAAU,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,KAAK,MAAM,CAAC,CAwB9G,CAAC;AAEF,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAI1F;AAED,wBAAgB,qBAAqB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAEtE"}
@@ -0,0 +1,14 @@
1
+ import type { PipelineType } from "../pipelines.js";
2
+ import type { ModelDataMinimal } from "./types.js";
3
+ export declare const snippetZeroShotClassification: (model: ModelDataMinimal) => string;
4
+ export declare const snippetZeroShotImageClassification: (model: ModelDataMinimal) => string;
5
+ export declare const snippetBasic: (model: ModelDataMinimal) => string;
6
+ export declare const snippetFile: (model: ModelDataMinimal) => string;
7
+ export declare const snippetTextToImage: (model: ModelDataMinimal) => string;
8
+ export declare const snippetTabular: (model: ModelDataMinimal) => string;
9
+ export declare const snippetTextToAudio: (model: ModelDataMinimal) => string;
10
+ export declare const snippetDocumentQuestionAnswering: (model: ModelDataMinimal) => string;
11
+ export declare const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal) => string>>;
12
+ export declare function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string;
13
+ export declare function hasPythonInferenceSnippet(model: ModelDataMinimal): boolean;
14
+ //# sourceMappingURL=python.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"python.d.ts","sourceRoot":"","sources":["../../../src/snippets/python.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAEpD,OAAO,KAAK,EAAE,gBAAgB,EAAE,MAAM,YAAY,CAAC;AAEnD,eAAO,MAAM,6BAA6B,UAAW,gBAAgB,KAAG,MAQrE,CAAC;AAEJ,eAAO,MAAM,kCAAkC,UAAW,gBAAgB,KAAG,MAc1E,CAAC;AAEJ,eAAO,MAAM,YAAY,UAAW,gBAAgB,KAAG,MAOpD,CAAC;AAEJ,eAAO,MAAM,WAAW,UAAW,gBAAgB,KAAG,MAOP,CAAC;AAEhD,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MAUjB,CAAC;AAE7C,eAAO,MAAM,cAAc,UAAW,gBAAgB,KAAG,MAMtD,CAAC;AAEJ,eAAO,MAAM,kBAAkB,UAAW,gBAAgB,KAAG,MA2B5D,CAAC;AAEF,eAAO,MAAM,gCAAgC,UAAW,gBAAgB,KAAG,MAUxE,CAAC;AAEJ,eAAO,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,YAAY,EAAE,CAAC,KAAK,EAAE,gBAAgB,KAAK,MAAM,CAAC,CA4B7F,CAAC;AAEF,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,EAAE,WAAW,EAAE,MAAM,GAAG,MAAM,CAU9F;AAED,wBAAgB,yBAAyB,CAAC,KAAK,EAAE,gBAAgB,GAAG,OAAO,CAE1E"}
@@ -0,0 +1,8 @@
1
+ import type { ModelData } from "../model-data";
2
+ /**
3
+ * Minimal model data required for snippets.
4
+ *
5
+ * Add more fields as needed.
6
+ */
7
+ export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name">;
8
+ //# sourceMappingURL=types.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../../src/snippets/types.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,eAAe,CAAC;AAE/C;;;;GAIG;AACH,MAAM,MAAM,gBAAgB,GAAG,IAAI,CAAC,SAAS,EAAE,IAAI,GAAG,cAAc,GAAG,YAAY,GAAG,cAAc,CAAC,CAAC"}
@@ -0,0 +1,4 @@
1
+ import type { TaskDataCustom } from "..";
2
+ declare const taskData: TaskDataCustom;
3
+ export default taskData;
4
+ //# sourceMappingURL=data.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAwEf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -0,0 +1,52 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+ /**
7
+ * Inputs for Audio Classification inference
8
+ */
9
+ export interface AudioClassificationInput {
10
+ /**
11
+ * The input audio data
12
+ */
13
+ inputs: unknown;
14
+ /**
15
+ * Additional inference parameters
16
+ */
17
+ parameters?: AudioClassificationParameters;
18
+ [property: string]: unknown;
19
+ }
20
+ /**
21
+ * Additional inference parameters
22
+ *
23
+ * Additional inference parameters for Audio Classification
24
+ */
25
+ export interface AudioClassificationParameters {
26
+ function_to_apply?: ClassificationOutputTransform;
27
+ /**
28
+ * When specified, limits the output to the top K most probable classes.
29
+ */
30
+ top_k?: number;
31
+ [property: string]: unknown;
32
+ }
33
+ /**
34
+ * The function to apply to the model outputs in order to retrieve the scores.
35
+ */
36
+ export type ClassificationOutputTransform = "sigmoid" | "softmax" | "none";
37
+ export type AudioClassificationOutput = AudioClassificationOutputElement[];
38
+ /**
39
+ * Outputs for Audio Classification inference
40
+ */
41
+ export interface AudioClassificationOutputElement {
42
+ /**
43
+ * The predicted class label.
44
+ */
45
+ label: string;
46
+ /**
47
+ * The corresponding probability.
48
+ */
49
+ score: number;
50
+ [property: string]: unknown;
51
+ }
52
+ //# sourceMappingURL=inference.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-classification/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;GAEG;AACH,MAAM,WAAW,wBAAwB;IACxC;;OAEG;IACH,MAAM,EAAE,OAAO,CAAC;IAChB;;OAEG;IACH,UAAU,CAAC,EAAE,6BAA6B,CAAC;IAC3C,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;;GAIG;AACH,MAAM,WAAW,6BAA6B;IAC7C,iBAAiB,CAAC,EAAE,6BAA6B,CAAC;IAClD;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,MAAM,6BAA6B,GAAG,SAAS,GAAG,SAAS,GAAG,MAAM,CAAC;AAC3E,MAAM,MAAM,yBAAyB,GAAG,gCAAgC,EAAE,CAAC;AAC3E;;GAEG;AACH,MAAM,WAAW,gCAAgC;IAChD;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -0,0 +1,4 @@
1
+ import type { TaskDataCustom } from "..";
2
+ declare const taskData: TaskDataCustom;
3
+ export default taskData;
4
+ //# sourceMappingURL=data.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cA6Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -0,0 +1,4 @@
1
+ import type { TaskDataCustom } from "..";
2
+ declare const taskData: TaskDataCustom;
3
+ export default taskData;
4
+ //# sourceMappingURL=data.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/automatic-speech-recognition/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,IAAI,CAAC;AAEzC,QAAA,MAAM,QAAQ,EAAE,cAyEf,CAAC;AAEF,eAAe,QAAQ,CAAC"}