@huggingface/tasks 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/README.md +1 -1
  2. package/dist/{index.mjs → index.cjs} +2695 -2497
  3. package/dist/index.d.ts +427 -65
  4. package/dist/index.js +2660 -2532
  5. package/package.json +13 -8
  6. package/src/index.ts +2 -5
  7. package/src/library-to-tasks.ts +1 -1
  8. package/src/model-data.ts +1 -1
  9. package/src/model-libraries-downloads.ts +20 -0
  10. package/src/{library-ui-elements.ts → model-libraries-snippets.ts} +50 -296
  11. package/src/model-libraries.ts +375 -44
  12. package/src/pipelines.ts +1 -1
  13. package/src/tasks/audio-classification/about.md +1 -1
  14. package/src/tasks/audio-classification/inference.ts +51 -0
  15. package/src/tasks/audio-classification/spec/input.json +34 -0
  16. package/src/tasks/audio-classification/spec/output.json +10 -0
  17. package/src/tasks/audio-to-audio/about.md +1 -1
  18. package/src/tasks/automatic-speech-recognition/about.md +4 -2
  19. package/src/tasks/automatic-speech-recognition/inference.ts +159 -0
  20. package/src/tasks/automatic-speech-recognition/spec/input.json +34 -0
  21. package/src/tasks/automatic-speech-recognition/spec/output.json +38 -0
  22. package/src/tasks/common-definitions.json +117 -0
  23. package/src/tasks/depth-estimation/data.ts +8 -4
  24. package/src/tasks/depth-estimation/inference.ts +35 -0
  25. package/src/tasks/depth-estimation/spec/input.json +25 -0
  26. package/src/tasks/depth-estimation/spec/output.json +16 -0
  27. package/src/tasks/document-question-answering/inference.ts +110 -0
  28. package/src/tasks/document-question-answering/spec/input.json +85 -0
  29. package/src/tasks/document-question-answering/spec/output.json +36 -0
  30. package/src/tasks/feature-extraction/inference.ts +22 -0
  31. package/src/tasks/feature-extraction/spec/input.json +26 -0
  32. package/src/tasks/feature-extraction/spec/output.json +7 -0
  33. package/src/tasks/fill-mask/inference.ts +62 -0
  34. package/src/tasks/fill-mask/spec/input.json +38 -0
  35. package/src/tasks/fill-mask/spec/output.json +29 -0
  36. package/src/tasks/image-classification/inference.ts +51 -0
  37. package/src/tasks/image-classification/spec/input.json +34 -0
  38. package/src/tasks/image-classification/spec/output.json +10 -0
  39. package/src/tasks/image-segmentation/inference.ts +65 -0
  40. package/src/tasks/image-segmentation/spec/input.json +54 -0
  41. package/src/tasks/image-segmentation/spec/output.json +25 -0
  42. package/src/tasks/image-to-image/inference.ts +67 -0
  43. package/src/tasks/image-to-image/spec/input.json +54 -0
  44. package/src/tasks/image-to-image/spec/output.json +12 -0
  45. package/src/tasks/image-to-text/inference.ts +143 -0
  46. package/src/tasks/image-to-text/spec/input.json +34 -0
  47. package/src/tasks/image-to-text/spec/output.json +14 -0
  48. package/src/tasks/index.ts +5 -2
  49. package/src/tasks/mask-generation/about.md +65 -0
  50. package/src/tasks/mask-generation/data.ts +42 -5
  51. package/src/tasks/object-detection/inference.ts +62 -0
  52. package/src/tasks/object-detection/spec/input.json +30 -0
  53. package/src/tasks/object-detection/spec/output.json +46 -0
  54. package/src/tasks/placeholder/data.ts +3 -0
  55. package/src/tasks/placeholder/spec/input.json +35 -0
  56. package/src/tasks/placeholder/spec/output.json +17 -0
  57. package/src/tasks/question-answering/inference.ts +99 -0
  58. package/src/tasks/question-answering/spec/input.json +67 -0
  59. package/src/tasks/question-answering/spec/output.json +29 -0
  60. package/src/tasks/sentence-similarity/about.md +2 -2
  61. package/src/tasks/sentence-similarity/inference.ts +32 -0
  62. package/src/tasks/sentence-similarity/spec/input.json +40 -0
  63. package/src/tasks/sentence-similarity/spec/output.json +12 -0
  64. package/src/tasks/summarization/data.ts +1 -0
  65. package/src/tasks/summarization/inference.ts +59 -0
  66. package/src/tasks/summarization/spec/input.json +7 -0
  67. package/src/tasks/summarization/spec/output.json +7 -0
  68. package/src/tasks/table-question-answering/inference.ts +61 -0
  69. package/src/tasks/table-question-answering/spec/input.json +44 -0
  70. package/src/tasks/table-question-answering/spec/output.json +40 -0
  71. package/src/tasks/tabular-classification/about.md +1 -1
  72. package/src/tasks/tabular-regression/about.md +1 -1
  73. package/src/tasks/text-classification/about.md +1 -0
  74. package/src/tasks/text-classification/inference.ts +51 -0
  75. package/src/tasks/text-classification/spec/input.json +35 -0
  76. package/src/tasks/text-classification/spec/output.json +10 -0
  77. package/src/tasks/text-generation/about.md +24 -13
  78. package/src/tasks/text-generation/data.ts +22 -38
  79. package/src/tasks/text-generation/inference.ts +194 -0
  80. package/src/tasks/text-generation/spec/input.json +90 -0
  81. package/src/tasks/text-generation/spec/output.json +120 -0
  82. package/src/tasks/text-to-audio/inference.ts +143 -0
  83. package/src/tasks/text-to-audio/spec/input.json +31 -0
  84. package/src/tasks/text-to-audio/spec/output.json +17 -0
  85. package/src/tasks/text-to-image/about.md +11 -2
  86. package/src/tasks/text-to-image/data.ts +6 -2
  87. package/src/tasks/text-to-image/inference.ts +71 -0
  88. package/src/tasks/text-to-image/spec/input.json +59 -0
  89. package/src/tasks/text-to-image/spec/output.json +13 -0
  90. package/src/tasks/text-to-speech/about.md +4 -2
  91. package/src/tasks/text-to-speech/data.ts +1 -0
  92. package/src/tasks/text-to-speech/inference.ts +147 -0
  93. package/src/tasks/text-to-speech/spec/input.json +7 -0
  94. package/src/tasks/text-to-speech/spec/output.json +7 -0
  95. package/src/tasks/text2text-generation/inference.ts +55 -0
  96. package/src/tasks/text2text-generation/spec/input.json +55 -0
  97. package/src/tasks/text2text-generation/spec/output.json +14 -0
  98. package/src/tasks/token-classification/inference.ts +82 -0
  99. package/src/tasks/token-classification/spec/input.json +65 -0
  100. package/src/tasks/token-classification/spec/output.json +33 -0
  101. package/src/tasks/translation/data.ts +1 -0
  102. package/src/tasks/translation/inference.ts +59 -0
  103. package/src/tasks/translation/spec/input.json +7 -0
  104. package/src/tasks/translation/spec/output.json +7 -0
  105. package/src/tasks/video-classification/inference.ts +59 -0
  106. package/src/tasks/video-classification/spec/input.json +42 -0
  107. package/src/tasks/video-classification/spec/output.json +10 -0
  108. package/src/tasks/visual-question-answering/inference.ts +63 -0
  109. package/src/tasks/visual-question-answering/spec/input.json +41 -0
  110. package/src/tasks/visual-question-answering/spec/output.json +21 -0
  111. package/src/tasks/zero-shot-classification/inference.ts +67 -0
  112. package/src/tasks/zero-shot-classification/spec/input.json +50 -0
  113. package/src/tasks/zero-shot-classification/spec/output.json +10 -0
  114. package/src/tasks/zero-shot-image-classification/data.ts +8 -5
  115. package/src/tasks/zero-shot-image-classification/inference.ts +61 -0
  116. package/src/tasks/zero-shot-image-classification/spec/input.json +45 -0
  117. package/src/tasks/zero-shot-image-classification/spec/output.json +10 -0
  118. package/src/tasks/zero-shot-object-detection/about.md +6 -0
  119. package/src/tasks/zero-shot-object-detection/data.ts +6 -1
  120. package/src/tasks/zero-shot-object-detection/inference.ts +66 -0
  121. package/src/tasks/zero-shot-object-detection/spec/input.json +40 -0
  122. package/src/tasks/zero-shot-object-detection/spec/output.json +47 -0
  123. package/tsconfig.json +3 -3
@@ -1,32 +1,6 @@
1
1
  import type { ModelData } from "./model-data";
2
- import type { ModelLibraryKey } from "./model-libraries";
3
-
4
- /**
5
- * Elements configurable by a model library.
6
- */
7
- export interface LibraryUiElement {
8
- /**
9
- * Name displayed on the main
10
- * call-to-action button on the model page.
11
- */
12
- btnLabel: string;
13
- /**
14
- * Repo name
15
- */
16
- repoName: string;
17
- /**
18
- * URL to library's repo
19
- */
20
- repoUrl: string;
21
- /**
22
- * URL to library's docs
23
- */
24
- docsUrl?: string;
25
- /**
26
- * Code snippet displayed on model page
27
- */
28
- snippets: (model: ModelData) => string[];
29
- }
2
+
3
+ const TAG_CUSTOM_CODE = "custom_code";
30
4
 
31
5
  function nameWithoutNamespace(modelId: string): string {
32
6
  const splitted = modelId.split("/");
@@ -35,11 +9,11 @@ function nameWithoutNamespace(modelId: string): string {
35
9
 
36
10
  //#region snippets
37
11
 
38
- const adapter_transformers = (model: ModelData) => [
39
- `from transformers import ${model.config?.adapter_transformers?.model_class}
12
+ export const adapters = (model: ModelData): string[] => [
13
+ `from adapters import AutoAdapterModel
40
14
 
41
- model = ${model.config?.adapter_transformers?.model_class}.from_pretrained("${model.config?.adapter_transformers?.model_name}")
42
- model.load_adapter("${model.id}", source="hf")`,
15
+ model = AutoAdapterModel.from_pretrained("${model.config?.adapter_transformers?.model_name}")
16
+ model.load_adapter("${model.id}", set_active=True)`,
43
17
  ];
44
18
 
45
19
  const allennlpUnknown = (model: ModelData) => [
@@ -58,14 +32,14 @@ predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "quest
58
32
  predictions = predictor.predict_json(predictor_input)`,
59
33
  ];
60
34
 
61
- const allennlp = (model: ModelData) => {
35
+ export const allennlp = (model: ModelData): string[] => {
62
36
  if (model.tags?.includes("question-answering")) {
63
37
  return allennlpQuestionAnswering(model);
64
38
  }
65
39
  return allennlpUnknown(model);
66
40
  };
67
41
 
68
- const asteroid = (model: ModelData) => [
42
+ export const asteroid = (model: ModelData): string[] => [
69
43
  `from asteroid.models import BaseModel
70
44
 
71
45
  model = BaseModel.from_pretrained("${model.id}")`,
@@ -75,7 +49,7 @@ function get_base_diffusers_model(model: ModelData): string {
75
49
  return model.cardData?.base_model?.toString() ?? "fill-in-base-model";
76
50
  }
77
51
 
78
- const bertopic = (model: ModelData) => [
52
+ export const bertopic = (model: ModelData): string[] => [
79
53
  `from bertopic import BERTopic
80
54
 
81
55
  model = BERTopic.load("${model.id}")`,
@@ -110,7 +84,7 @@ pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}
110
84
  pipeline.load_textual_inversion("${model.id}")`,
111
85
  ];
112
86
 
113
- const diffusers = (model: ModelData) => {
87
+ export const diffusers = (model: ModelData): string[] => {
114
88
  if (model.tags?.includes("controlnet")) {
115
89
  return diffusers_controlnet(model);
116
90
  } else if (model.tags?.includes("lora")) {
@@ -122,7 +96,7 @@ const diffusers = (model: ModelData) => {
122
96
  }
123
97
  };
124
98
 
125
- const espnetTTS = (model: ModelData) => [
99
+ export const espnetTTS = (model: ModelData): string[] => [
126
100
  `from espnet2.bin.tts_inference import Text2Speech
127
101
 
128
102
  model = Text2Speech.from_pretrained("${model.id}")
@@ -130,7 +104,7 @@ model = Text2Speech.from_pretrained("${model.id}")
130
104
  speech, *_ = model("text to generate speech from")`,
131
105
  ];
132
106
 
133
- const espnetASR = (model: ModelData) => [
107
+ export const espnetASR = (model: ModelData): string[] => [
134
108
  `from espnet2.bin.asr_inference import Speech2Text
135
109
 
136
110
  model = Speech2Text.from_pretrained(
@@ -143,7 +117,7 @@ text, *_ = model(speech)[0]`,
143
117
 
144
118
  const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`];
145
119
 
146
- const espnet = (model: ModelData) => {
120
+ export const espnet = (model: ModelData): string[] => {
147
121
  if (model.tags?.includes("text-to-speech")) {
148
122
  return espnetTTS(model);
149
123
  } else if (model.tags?.includes("automatic-speech-recognition")) {
@@ -152,7 +126,7 @@ const espnet = (model: ModelData) => {
152
126
  return espnetUnknown();
153
127
  };
154
128
 
155
- const fairseq = (model: ModelData) => [
129
+ export const fairseq = (model: ModelData): string[] => [
156
130
  `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
157
131
 
158
132
  models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
@@ -160,27 +134,27 @@ models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
160
134
  )`,
161
135
  ];
162
136
 
163
- const flair = (model: ModelData) => [
137
+ export const flair = (model: ModelData): string[] => [
164
138
  `from flair.models import SequenceTagger
165
139
 
166
140
  tagger = SequenceTagger.load("${model.id}")`,
167
141
  ];
168
142
 
169
- const keras = (model: ModelData) => [
143
+ export const keras = (model: ModelData): string[] => [
170
144
  `from huggingface_hub import from_pretrained_keras
171
145
 
172
146
  model = from_pretrained_keras("${model.id}")
173
147
  `,
174
148
  ];
175
149
 
176
- const open_clip = (model: ModelData) => [
150
+ export const open_clip = (model: ModelData): string[] => [
177
151
  `import open_clip
178
152
 
179
153
  model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}')
180
154
  tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`,
181
155
  ];
182
156
 
183
- const paddlenlp = (model: ModelData) => {
157
+ export const paddlenlp = (model: ModelData): string[] => {
184
158
  if (model.config?.architectures?.[0]) {
185
159
  const architecture = model.config.architectures[0];
186
160
  return [
@@ -204,7 +178,7 @@ const paddlenlp = (model: ModelData) => {
204
178
  }
205
179
  };
206
180
 
207
- const pyannote_audio_pipeline = (model: ModelData) => [
181
+ export const pyannote_audio_pipeline = (model: ModelData): string[] => [
208
182
  `from pyannote.audio import Pipeline
209
183
 
210
184
  pipeline = Pipeline.from_pretrained("${model.id}")
@@ -221,7 +195,7 @@ waveform, sample_rate = Audio().crop("file.wav", excerpt)
221
195
  pipeline({"waveform": waveform, "sample_rate": sample_rate})`,
222
196
  ];
223
197
 
224
- const pyannote_audio_model = (model: ModelData) => [
198
+ const pyannote_audio_model = (model: ModelData): string[] => [
225
199
  `from pyannote.audio import Model, Inference
226
200
 
227
201
  model = Model.from_pretrained("${model.id}")
@@ -236,14 +210,14 @@ excerpt = Segment(start=2.0, end=5.0)
236
210
  inference.crop("file.wav", excerpt)`,
237
211
  ];
238
212
 
239
- const pyannote_audio = (model: ModelData) => {
213
+ export const pyannote_audio = (model: ModelData): string[] => {
240
214
  if (model.tags?.includes("pyannote-audio-pipeline")) {
241
215
  return pyannote_audio_pipeline(model);
242
216
  }
243
217
  return pyannote_audio_model(model);
244
218
  };
245
219
 
246
- const tensorflowttsTextToMel = (model: ModelData) => [
220
+ const tensorflowttsTextToMel = (model: ModelData): string[] => [
247
221
  `from tensorflow_tts.inference import AutoProcessor, TFAutoModel
248
222
 
249
223
  processor = AutoProcessor.from_pretrained("${model.id}")
@@ -251,7 +225,7 @@ model = TFAutoModel.from_pretrained("${model.id}")
251
225
  `,
252
226
  ];
253
227
 
254
- const tensorflowttsMelToWav = (model: ModelData) => [
228
+ const tensorflowttsMelToWav = (model: ModelData): string[] => [
255
229
  `from tensorflow_tts.inference import TFAutoModel
256
230
 
257
231
  model = TFAutoModel.from_pretrained("${model.id}")
@@ -259,14 +233,14 @@ audios = model.inference(mels)
259
233
  `,
260
234
  ];
261
235
 
262
- const tensorflowttsUnknown = (model: ModelData) => [
236
+ const tensorflowttsUnknown = (model: ModelData): string[] => [
263
237
  `from tensorflow_tts.inference import TFAutoModel
264
238
 
265
239
  model = TFAutoModel.from_pretrained("${model.id}")
266
240
  `,
267
241
  ];
268
242
 
269
- const tensorflowtts = (model: ModelData) => {
243
+ export const tensorflowtts = (model: ModelData): string[] => {
270
244
  if (model.tags?.includes("text-to-mel")) {
271
245
  return tensorflowttsTextToMel(model);
272
246
  } else if (model.tags?.includes("mel-to-wav")) {
@@ -275,7 +249,7 @@ const tensorflowtts = (model: ModelData) => {
275
249
  return tensorflowttsUnknown(model);
276
250
  };
277
251
 
278
- const timm = (model: ModelData) => [
252
+ export const timm = (model: ModelData): string[] => [
279
253
  `import timm
280
254
 
281
255
  model = timm.create_model("hf_hub:${model.id}", pretrained=True)`,
@@ -317,7 +291,7 @@ model = joblib.load(
317
291
  ];
318
292
  };
319
293
 
320
- const sklearn = (model: ModelData) => {
294
+ export const sklearn = (model: ModelData): string[] => {
321
295
  if (model.tags?.includes("skops")) {
322
296
  const skopsmodelFile = model.config?.sklearn?.filename;
323
297
  const skopssaveFormat = model.config?.sklearn?.model_format;
@@ -334,29 +308,29 @@ const sklearn = (model: ModelData) => {
334
308
  }
335
309
  };
336
310
 
337
- const fastai = (model: ModelData) => [
311
+ export const fastai = (model: ModelData): string[] => [
338
312
  `from huggingface_hub import from_pretrained_fastai
339
313
 
340
314
  learn = from_pretrained_fastai("${model.id}")`,
341
315
  ];
342
316
 
343
- const sampleFactory = (model: ModelData) => [
317
+ export const sampleFactory = (model: ModelData): string[] => [
344
318
  `python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`,
345
319
  ];
346
320
 
347
- const sentenceTransformers = (model: ModelData) => [
321
+ export const sentenceTransformers = (model: ModelData): string[] => [
348
322
  `from sentence_transformers import SentenceTransformer
349
323
 
350
324
  model = SentenceTransformer("${model.id}")`,
351
325
  ];
352
326
 
353
- const setfit = (model: ModelData) => [
327
+ export const setfit = (model: ModelData): string[] => [
354
328
  `from setfit import SetFitModel
355
329
 
356
330
  model = SetFitModel.from_pretrained("${model.id}")`,
357
331
  ];
358
332
 
359
- const spacy = (model: ModelData) => [
333
+ export const spacy = (model: ModelData): string[] => [
360
334
  `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl
361
335
 
362
336
  # Using spacy.load().
@@ -368,13 +342,13 @@ import ${nameWithoutNamespace(model.id)}
368
342
  nlp = ${nameWithoutNamespace(model.id)}.load()`,
369
343
  ];
370
344
 
371
- const span_marker = (model: ModelData) => [
345
+ export const span_marker = (model: ModelData): string[] => [
372
346
  `from span_marker import SpanMarkerModel
373
347
 
374
348
  model = SpanMarkerModel.from_pretrained("${model.id}")`,
375
349
  ];
376
350
 
377
- const stanza = (model: ModelData) => [
351
+ export const stanza = (model: ModelData): string[] => [
378
352
  `import stanza
379
353
 
380
354
  stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}")
@@ -397,7 +371,7 @@ const speechBrainMethod = (speechbrainInterface: string) => {
397
371
  }
398
372
  };
399
373
 
400
- const speechbrain = (model: ModelData) => {
374
+ export const speechbrain = (model: ModelData): string[] => {
401
375
  const speechbrainInterface = model.config?.speechbrain?.interface;
402
376
  if (speechbrainInterface === undefined) {
403
377
  return [`# interface not specified in config.json`];
@@ -417,12 +391,12 @@ model.${speechbrainMethod}("file.wav")`,
417
391
  ];
418
392
  };
419
393
 
420
- const transformers = (model: ModelData) => {
394
+ export const transformers = (model: ModelData): string[] => {
421
395
  const info = model.transformersInfo;
422
396
  if (!info) {
423
397
  return [`# ⚠️ Type of model unknown`];
424
398
  }
425
- const remote_code_snippet = info.custom_class ? ", trust_remote_code=True" : "";
399
+ const remote_code_snippet = model.tags?.includes(TAG_CUSTOM_CODE) ? ", trust_remote_code=True" : "";
426
400
 
427
401
  let autoSnippet: string;
428
402
  if (info.processor) {
@@ -459,7 +433,7 @@ const transformers = (model: ModelData) => {
459
433
  return [autoSnippet];
460
434
  };
461
435
 
462
- const transformersJS = (model: ModelData) => {
436
+ export const transformersJS = (model: ModelData): string[] => {
463
437
  if (!model.pipeline_tag) {
464
438
  return [`// ⚠️ Unknown pipeline tag`];
465
439
  }
@@ -490,7 +464,7 @@ const peftTask = (peftTaskType?: string) => {
490
464
  }
491
465
  };
492
466
 
493
- const peft = (model: ModelData) => {
467
+ export const peft = (model: ModelData): string[] => {
494
468
  const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {};
495
469
  const pefttask = peftTask(peftTaskType);
496
470
  if (!pefttask) {
@@ -510,14 +484,14 @@ model = PeftModel.from_pretrained(model, "${model.id}")`,
510
484
  ];
511
485
  };
512
486
 
513
- const fasttext = (model: ModelData) => [
487
+ export const fasttext = (model: ModelData): string[] => [
514
488
  `from huggingface_hub import hf_hub_download
515
489
  import fasttext
516
490
 
517
491
  model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`,
518
492
  ];
519
493
 
520
- const stableBaselines3 = (model: ModelData) => [
494
+ export const stableBaselines3 = (model: ModelData): string[] => [
521
495
  `from huggingface_sb3 import load_from_hub
522
496
  checkpoint = load_from_hub(
523
497
  repo_id="${model.id}",
@@ -539,24 +513,26 @@ transcriptions = asr_model.transcribe(["file.wav"])`,
539
513
  }
540
514
  };
541
515
 
542
- const mlAgents = (model: ModelData) => [`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./downloads"`];
516
+ export const mlAgents = (model: ModelData): string[] => [
517
+ `mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./download: string[]s"`,
518
+ ];
543
519
 
544
- const sentis = (model: ModelData) => [
520
+ export const sentis = (/* model: ModelData */): string[] => [
545
521
  `string modelName = "[Your model name here].sentis";
546
522
  Model model = ModelLoader.Load(Application.streamingAssetsPath + "/" + modelName);
547
523
  IWorker engine = WorkerFactory.CreateWorker(BackendType.GPUCompute, model);
548
524
  // Please see provided C# file for more details
549
- `
525
+ `,
550
526
  ];
551
527
 
552
- const mlx = (model: ModelData) => [
528
+ export const mlx = (model: ModelData): string[] => [
553
529
  `pip install huggingface_hub hf_transfer
554
530
 
555
- export HF_HUB_ENABLE_HF_TRANSFER=1
531
+ export HF_HUB_ENABLE_HF_TRANS: string[]FER=1
556
532
  huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
557
533
  ];
558
534
 
559
- const nemo = (model: ModelData) => {
535
+ export const nemo = (model: ModelData): string[] => {
560
536
  let command: string[] | undefined = undefined;
561
537
  // Resolve the tag to a nemo domain/sub-domain
562
538
  if (model.tags?.includes("automatic-speech-recognition")) {
@@ -566,232 +542,10 @@ const nemo = (model: ModelData) => {
566
542
  return command ?? [`# tag did not correspond to a valid NeMo domain.`];
567
543
  };
568
544
 
569
- const pythae = (model: ModelData) => [
545
+ export const pythae = (model: ModelData): string[] => [
570
546
  `from pythae.models import AutoModel
571
547
 
572
548
  model = AutoModel.load_from_hf_hub("${model.id}")`,
573
549
  ];
574
550
 
575
551
  //#endregion
576
-
577
- export const MODEL_LIBRARIES_UI_ELEMENTS: Partial<Record<ModelLibraryKey, LibraryUiElement>> = {
578
- "adapter-transformers": {
579
- btnLabel: "Adapter Transformers",
580
- repoName: "adapter-transformers",
581
- repoUrl: "https://github.com/Adapter-Hub/adapter-transformers",
582
- docsUrl: "https://huggingface.co/docs/hub/adapter-transformers",
583
- snippets: adapter_transformers,
584
- },
585
- allennlp: {
586
- btnLabel: "AllenNLP",
587
- repoName: "AllenNLP",
588
- repoUrl: "https://github.com/allenai/allennlp",
589
- docsUrl: "https://huggingface.co/docs/hub/allennlp",
590
- snippets: allennlp,
591
- },
592
- asteroid: {
593
- btnLabel: "Asteroid",
594
- repoName: "Asteroid",
595
- repoUrl: "https://github.com/asteroid-team/asteroid",
596
- docsUrl: "https://huggingface.co/docs/hub/asteroid",
597
- snippets: asteroid,
598
- },
599
- bertopic: {
600
- btnLabel: "BERTopic",
601
- repoName: "BERTopic",
602
- repoUrl: "https://github.com/MaartenGr/BERTopic",
603
- snippets: bertopic,
604
- },
605
- diffusers: {
606
- btnLabel: "Diffusers",
607
- repoName: "🤗/diffusers",
608
- repoUrl: "https://github.com/huggingface/diffusers",
609
- docsUrl: "https://huggingface.co/docs/hub/diffusers",
610
- snippets: diffusers,
611
- },
612
- espnet: {
613
- btnLabel: "ESPnet",
614
- repoName: "ESPnet",
615
- repoUrl: "https://github.com/espnet/espnet",
616
- docsUrl: "https://huggingface.co/docs/hub/espnet",
617
- snippets: espnet,
618
- },
619
- fairseq: {
620
- btnLabel: "Fairseq",
621
- repoName: "fairseq",
622
- repoUrl: "https://github.com/pytorch/fairseq",
623
- snippets: fairseq,
624
- },
625
- flair: {
626
- btnLabel: "Flair",
627
- repoName: "Flair",
628
- repoUrl: "https://github.com/flairNLP/flair",
629
- docsUrl: "https://huggingface.co/docs/hub/flair",
630
- snippets: flair,
631
- },
632
- keras: {
633
- btnLabel: "Keras",
634
- repoName: "Keras",
635
- repoUrl: "https://github.com/keras-team/keras",
636
- docsUrl: "https://huggingface.co/docs/hub/keras",
637
- snippets: keras,
638
- },
639
- mlx: {
640
- btnLabel: "MLX",
641
- repoName: "MLX",
642
- repoUrl: "https://github.com/ml-explore/mlx-examples/tree/main",
643
- snippets: mlx,
644
- },
645
- nemo: {
646
- btnLabel: "NeMo",
647
- repoName: "NeMo",
648
- repoUrl: "https://github.com/NVIDIA/NeMo",
649
- snippets: nemo,
650
- },
651
- open_clip: {
652
- btnLabel: "OpenCLIP",
653
- repoName: "OpenCLIP",
654
- repoUrl: "https://github.com/mlfoundations/open_clip",
655
- snippets: open_clip,
656
- },
657
- paddlenlp: {
658
- btnLabel: "paddlenlp",
659
- repoName: "PaddleNLP",
660
- repoUrl: "https://github.com/PaddlePaddle/PaddleNLP",
661
- docsUrl: "https://huggingface.co/docs/hub/paddlenlp",
662
- snippets: paddlenlp,
663
- },
664
- peft: {
665
- btnLabel: "PEFT",
666
- repoName: "PEFT",
667
- repoUrl: "https://github.com/huggingface/peft",
668
- snippets: peft,
669
- },
670
- "pyannote-audio": {
671
- btnLabel: "pyannote.audio",
672
- repoName: "pyannote-audio",
673
- repoUrl: "https://github.com/pyannote/pyannote-audio",
674
- snippets: pyannote_audio,
675
- },
676
- "sentence-transformers": {
677
- btnLabel: "sentence-transformers",
678
- repoName: "sentence-transformers",
679
- repoUrl: "https://github.com/UKPLab/sentence-transformers",
680
- docsUrl: "https://huggingface.co/docs/hub/sentence-transformers",
681
- snippets: sentenceTransformers,
682
- },
683
- setfit: {
684
- btnLabel: "setfit",
685
- repoName: "setfit",
686
- repoUrl: "https://github.com/huggingface/setfit",
687
- docsUrl: "https://huggingface.co/docs/hub/setfit",
688
- snippets: setfit,
689
- },
690
- sklearn: {
691
- btnLabel: "Scikit-learn",
692
- repoName: "Scikit-learn",
693
- repoUrl: "https://github.com/scikit-learn/scikit-learn",
694
- snippets: sklearn,
695
- },
696
- fastai: {
697
- btnLabel: "fastai",
698
- repoName: "fastai",
699
- repoUrl: "https://github.com/fastai/fastai",
700
- docsUrl: "https://huggingface.co/docs/hub/fastai",
701
- snippets: fastai,
702
- },
703
- spacy: {
704
- btnLabel: "spaCy",
705
- repoName: "spaCy",
706
- repoUrl: "https://github.com/explosion/spaCy",
707
- docsUrl: "https://huggingface.co/docs/hub/spacy",
708
- snippets: spacy,
709
- },
710
- "span-marker": {
711
- btnLabel: "SpanMarker",
712
- repoName: "SpanMarkerNER",
713
- repoUrl: "https://github.com/tomaarsen/SpanMarkerNER",
714
- docsUrl: "https://huggingface.co/docs/hub/span_marker",
715
- snippets: span_marker,
716
- },
717
- speechbrain: {
718
- btnLabel: "speechbrain",
719
- repoName: "speechbrain",
720
- repoUrl: "https://github.com/speechbrain/speechbrain",
721
- docsUrl: "https://huggingface.co/docs/hub/speechbrain",
722
- snippets: speechbrain,
723
- },
724
- stanza: {
725
- btnLabel: "Stanza",
726
- repoName: "stanza",
727
- repoUrl: "https://github.com/stanfordnlp/stanza",
728
- docsUrl: "https://huggingface.co/docs/hub/stanza",
729
- snippets: stanza,
730
- },
731
- tensorflowtts: {
732
- btnLabel: "TensorFlowTTS",
733
- repoName: "TensorFlowTTS",
734
- repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS",
735
- snippets: tensorflowtts,
736
- },
737
- timm: {
738
- btnLabel: "timm",
739
- repoName: "pytorch-image-models",
740
- repoUrl: "https://github.com/rwightman/pytorch-image-models",
741
- docsUrl: "https://huggingface.co/docs/hub/timm",
742
- snippets: timm,
743
- },
744
- transformers: {
745
- btnLabel: "Transformers",
746
- repoName: "🤗/transformers",
747
- repoUrl: "https://github.com/huggingface/transformers",
748
- docsUrl: "https://huggingface.co/docs/hub/transformers",
749
- snippets: transformers,
750
- },
751
- "transformers.js": {
752
- btnLabel: "Transformers.js",
753
- repoName: "transformers.js",
754
- repoUrl: "https://github.com/xenova/transformers.js",
755
- docsUrl: "https://huggingface.co/docs/hub/transformers-js",
756
- snippets: transformersJS,
757
- },
758
- fasttext: {
759
- btnLabel: "fastText",
760
- repoName: "fastText",
761
- repoUrl: "https://fasttext.cc/",
762
- snippets: fasttext,
763
- },
764
- "sample-factory": {
765
- btnLabel: "sample-factory",
766
- repoName: "sample-factory",
767
- repoUrl: "https://github.com/alex-petrenko/sample-factory",
768
- docsUrl: "https://huggingface.co/docs/hub/sample-factory",
769
- snippets: sampleFactory,
770
- },
771
- "stable-baselines3": {
772
- btnLabel: "stable-baselines3",
773
- repoName: "stable-baselines3",
774
- repoUrl: "https://github.com/huggingface/huggingface_sb3",
775
- docsUrl: "https://huggingface.co/docs/hub/stable-baselines3",
776
- snippets: stableBaselines3,
777
- },
778
- "ml-agents": {
779
- btnLabel: "ml-agents",
780
- repoName: "ml-agents",
781
- repoUrl: "https://github.com/Unity-Technologies/ml-agents",
782
- docsUrl: "https://huggingface.co/docs/hub/ml-agents",
783
- snippets: mlAgents,
784
- },
785
- "unity-sentis": {
786
- btnLabel: "unity-sentis",
787
- repoName: "unity-sentis",
788
- repoUrl: "https://github.com/Unity-Technologies/sentis-samples",
789
- snippets: sentis,
790
- },
791
- pythae: {
792
- btnLabel: "pythae",
793
- repoName: "pythae",
794
- repoUrl: "https://github.com/clementchadebec/benchmark_VAE",
795
- snippets: pythae,
796
- },
797
- } as const;