@huggingface/tasks 0.13.14 → 0.13.16

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (100) hide show
  1. package/dist/commonjs/hardware.d.ts +4 -1
  2. package/dist/commonjs/hardware.d.ts.map +1 -1
  3. package/dist/commonjs/hardware.js +3 -0
  4. package/dist/commonjs/local-apps.js +9 -9
  5. package/dist/commonjs/local-apps.spec.js +2 -8
  6. package/dist/commonjs/model-libraries.d.ts +7 -1
  7. package/dist/commonjs/model-libraries.d.ts.map +1 -1
  8. package/dist/commonjs/model-libraries.js +7 -1
  9. package/dist/commonjs/tasks/audio-to-audio/data.d.ts.map +1 -1
  10. package/dist/commonjs/tasks/audio-to-audio/data.js +0 -4
  11. package/dist/commonjs/tasks/fill-mask/data.js +2 -2
  12. package/dist/commonjs/tasks/image-classification/data.d.ts.map +1 -1
  13. package/dist/commonjs/tasks/image-classification/data.js +2 -3
  14. package/dist/commonjs/tasks/image-feature-extraction/data.d.ts.map +1 -1
  15. package/dist/commonjs/tasks/image-feature-extraction/data.js +8 -3
  16. package/dist/commonjs/tasks/image-text-to-text/data.d.ts.map +1 -1
  17. package/dist/commonjs/tasks/image-text-to-text/data.js +19 -11
  18. package/dist/commonjs/tasks/image-to-3d/data.js +4 -4
  19. package/dist/commonjs/tasks/image-to-image/data.d.ts.map +1 -1
  20. package/dist/commonjs/tasks/image-to-image/data.js +12 -4
  21. package/dist/commonjs/tasks/index.js +1 -1
  22. package/dist/commonjs/tasks/keypoint-detection/data.d.ts.map +1 -1
  23. package/dist/commonjs/tasks/keypoint-detection/data.js +4 -0
  24. package/dist/commonjs/tasks/object-detection/data.d.ts.map +1 -1
  25. package/dist/commonjs/tasks/object-detection/data.js +6 -2
  26. package/dist/commonjs/tasks/sentence-similarity/data.d.ts.map +1 -1
  27. package/dist/commonjs/tasks/sentence-similarity/data.js +5 -1
  28. package/dist/commonjs/tasks/text-generation/data.d.ts.map +1 -1
  29. package/dist/commonjs/tasks/text-generation/data.js +17 -13
  30. package/dist/commonjs/tasks/text-to-image/data.d.ts.map +1 -1
  31. package/dist/commonjs/tasks/text-to-image/data.js +4 -0
  32. package/dist/commonjs/tasks/text-to-speech/data.d.ts.map +1 -1
  33. package/dist/commonjs/tasks/text-to-speech/data.js +9 -9
  34. package/dist/commonjs/tasks/text-to-video/data.js +5 -5
  35. package/dist/commonjs/tasks/video-text-to-text/data.js +4 -4
  36. package/dist/commonjs/tasks/zero-shot-classification/data.js +2 -2
  37. package/dist/commonjs/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
  38. package/dist/commonjs/tasks/zero-shot-image-classification/data.js +8 -4
  39. package/dist/esm/hardware.d.ts +4 -1
  40. package/dist/esm/hardware.d.ts.map +1 -1
  41. package/dist/esm/hardware.js +3 -0
  42. package/dist/esm/local-apps.js +9 -9
  43. package/dist/esm/local-apps.spec.js +2 -8
  44. package/dist/esm/model-libraries.d.ts +7 -1
  45. package/dist/esm/model-libraries.d.ts.map +1 -1
  46. package/dist/esm/model-libraries.js +7 -1
  47. package/dist/esm/tasks/audio-to-audio/data.d.ts.map +1 -1
  48. package/dist/esm/tasks/audio-to-audio/data.js +0 -4
  49. package/dist/esm/tasks/fill-mask/data.js +2 -2
  50. package/dist/esm/tasks/image-classification/data.d.ts.map +1 -1
  51. package/dist/esm/tasks/image-classification/data.js +2 -3
  52. package/dist/esm/tasks/image-feature-extraction/data.d.ts.map +1 -1
  53. package/dist/esm/tasks/image-feature-extraction/data.js +8 -3
  54. package/dist/esm/tasks/image-text-to-text/data.d.ts.map +1 -1
  55. package/dist/esm/tasks/image-text-to-text/data.js +19 -11
  56. package/dist/esm/tasks/image-to-3d/data.js +4 -4
  57. package/dist/esm/tasks/image-to-image/data.d.ts.map +1 -1
  58. package/dist/esm/tasks/image-to-image/data.js +12 -4
  59. package/dist/esm/tasks/index.js +1 -1
  60. package/dist/esm/tasks/keypoint-detection/data.d.ts.map +1 -1
  61. package/dist/esm/tasks/keypoint-detection/data.js +4 -0
  62. package/dist/esm/tasks/object-detection/data.d.ts.map +1 -1
  63. package/dist/esm/tasks/object-detection/data.js +6 -2
  64. package/dist/esm/tasks/sentence-similarity/data.d.ts.map +1 -1
  65. package/dist/esm/tasks/sentence-similarity/data.js +5 -1
  66. package/dist/esm/tasks/text-generation/data.d.ts.map +1 -1
  67. package/dist/esm/tasks/text-generation/data.js +17 -13
  68. package/dist/esm/tasks/text-to-image/data.d.ts.map +1 -1
  69. package/dist/esm/tasks/text-to-image/data.js +4 -0
  70. package/dist/esm/tasks/text-to-speech/data.d.ts.map +1 -1
  71. package/dist/esm/tasks/text-to-speech/data.js +9 -9
  72. package/dist/esm/tasks/text-to-video/data.js +5 -5
  73. package/dist/esm/tasks/video-text-to-text/data.js +4 -4
  74. package/dist/esm/tasks/zero-shot-classification/data.js +2 -2
  75. package/dist/esm/tasks/zero-shot-image-classification/data.d.ts.map +1 -1
  76. package/dist/esm/tasks/zero-shot-image-classification/data.js +8 -4
  77. package/package.json +1 -1
  78. package/src/hardware.ts +4 -1
  79. package/src/local-apps.spec.ts +2 -8
  80. package/src/local-apps.ts +9 -9
  81. package/src/model-libraries.ts +7 -1
  82. package/src/tasks/audio-to-audio/data.ts +0 -4
  83. package/src/tasks/fill-mask/data.ts +2 -2
  84. package/src/tasks/image-classification/data.ts +2 -3
  85. package/src/tasks/image-feature-extraction/data.ts +8 -3
  86. package/src/tasks/image-text-to-text/about.md +8 -3
  87. package/src/tasks/image-text-to-text/data.ts +19 -11
  88. package/src/tasks/image-to-3d/data.ts +4 -4
  89. package/src/tasks/image-to-image/data.ts +12 -5
  90. package/src/tasks/index.ts +1 -1
  91. package/src/tasks/keypoint-detection/data.ts +4 -0
  92. package/src/tasks/object-detection/data.ts +6 -2
  93. package/src/tasks/sentence-similarity/data.ts +5 -1
  94. package/src/tasks/text-generation/data.ts +17 -14
  95. package/src/tasks/text-to-image/data.ts +4 -0
  96. package/src/tasks/text-to-speech/data.ts +9 -10
  97. package/src/tasks/text-to-video/data.ts +5 -5
  98. package/src/tasks/video-text-to-text/data.ts +4 -4
  99. package/src/tasks/zero-shot-classification/data.ts +2 -2
  100. package/src/tasks/zero-shot-image-classification/data.ts +8 -4
@@ -12,7 +12,7 @@ export declare const TFLOPS_THRESHOLD_WHITE_HOUSE_CLUSTER: number;
12
12
  export declare const TFLOPS_THRESHOLD_EU_AI_ACT_MODEL_TRAINING_TOTAL: number;
13
13
  export interface HardwareSpec {
14
14
  /**
15
- * Approximate value, in FP16 whenever possible.
15
+ * Approximate value, in FP16 whenever possible for GPUs and FP32 for CPUs.
16
16
  * This is only approximate/theoretical and shouldn't be taken too seriously.
17
17
  * Currently the CPU values are from cpu-monkey.com
18
18
  * while the GPU values are from techpowerup.com
@@ -416,6 +416,9 @@ export declare const SKUS: {
416
416
  "EPYC 1st Generation (Naples)": {
417
417
  tflops: number;
418
418
  };
419
+ "Ryzen Zen 4 7000 (Threadripper)": {
420
+ tflops: number;
421
+ };
419
422
  "Ryzen Zen4 7000 (Ryzen 9)": {
420
423
  tflops: number;
421
424
  };
@@ -1 +1 @@
1
- {"version":3,"file":"hardware.d.ts","sourceRoot":"","sources":["../../src/hardware.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,eAAO,MAAM,iDAAiD,QAAW,CAAC;AAC1E,eAAO,MAAM,yDAAyD,QAAW,CAAC;AAClF,eAAO,MAAM,oCAAoC,QAAU,CAAC;AAE5D;;;GAGG;AACH,eAAO,MAAM,+CAA+C,QAAW,CAAC;AAExE,MAAM,WAAW,YAAY;IAC5B;;;;;;;;;OASG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB;AAED,eAAO,MAAM,sBAAsB,UAAqD,CAAC;AAEzF,eAAO,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAkeuD,CAAC;AAEzE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,IAAI,CAAC"}
1
+ {"version":3,"file":"hardware.d.ts","sourceRoot":"","sources":["../../src/hardware.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,eAAO,MAAM,iDAAiD,QAAW,CAAC;AAC1E,eAAO,MAAM,yDAAyD,QAAW,CAAC;AAClF,eAAO,MAAM,oCAAoC,QAAU,CAAC;AAE5D;;;GAGG;AACH,eAAO,MAAM,+CAA+C,QAAW,CAAC;AAExE,MAAM,WAAW,YAAY;IAC5B;;;;;;;;;OASG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB;AAED,eAAO,MAAM,sBAAsB,UAAqD,CAAC;AAEzF,eAAO,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAqeuD,CAAC;AAEzE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,IAAI,CAAC"}
@@ -401,6 +401,9 @@ exports.SKUS = {
401
401
  "EPYC 1st Generation (Naples)": {
402
402
  tflops: 0.6,
403
403
  },
404
+ "Ryzen Zen 4 7000 (Threadripper)": {
405
+ tflops: 10.0,
406
+ },
404
407
  "Ryzen Zen4 7000 (Ryzen 9)": {
405
408
  tflops: 0.56,
406
409
  },
@@ -29,17 +29,17 @@ function isMlxModel(model) {
29
29
  return model.tags.includes("mlx");
30
30
  }
31
31
  const snippetLlamacpp = (model, filepath) => {
32
+ let tagName = "";
33
+ if (filepath) {
34
+ const quantLabel = (0, gguf_js_1.parseGGUFQuantLabel)(filepath);
35
+ tagName = quantLabel ? `:${quantLabel}` : "";
36
+ }
32
37
  const command = (binary) => {
33
- const snippet = [
34
- "# Load and run the model:",
35
- `${binary} \\`,
36
- ` --hf-repo "${model.id}" \\`,
37
- ` --hf-file ${filepath ?? "{{GGUF_FILE}}"} \\`,
38
- ` -p "${model.tags.includes("conversational") ? "You are a helpful assistant" : "Once upon a time,"}"`,
39
- ];
40
- if (model.tags.includes("conversational")) {
38
+ const snippet = ["# Load and run the model:", `${binary} -hf ${model.id}${tagName}`];
39
+ if (!model.tags.includes("conversational")) {
40
+ // for non-conversational models, add a prompt
41
41
  snippet[snippet.length - 1] += " \\";
42
- snippet.push(" --conversation");
42
+ snippet.push(' -p "Once upon a time,"');
43
43
  }
44
44
  return snippet.join("\n");
45
45
  };
@@ -12,11 +12,7 @@ const local_apps_js_1 = require("./local-apps.js");
12
12
  };
13
13
  const snippet = snippetFunc(model);
14
14
  (0, vitest_1.expect)(snippet[0].content).toEqual(`# Load and run the model:
15
- llama-cli \\
16
- --hf-repo "bartowski/Llama-3.2-3B-Instruct-GGUF" \\
17
- --hf-file {{GGUF_FILE}} \\
18
- -p "You are a helpful assistant" \\
19
- --conversation`);
15
+ llama-cli -hf bartowski/Llama-3.2-3B-Instruct-GGUF`);
20
16
  });
21
17
  (0, vitest_1.it)("llama.cpp non-conversational", async () => {
22
18
  const { snippet: snippetFunc } = local_apps_js_1.LOCAL_APPS["llama.cpp"];
@@ -27,9 +23,7 @@ llama-cli \\
27
23
  };
28
24
  const snippet = snippetFunc(model);
29
25
  (0, vitest_1.expect)(snippet[0].content).toEqual(`# Load and run the model:
30
- llama-cli \\
31
- --hf-repo "mlabonne/gemma-2b-GGUF" \\
32
- --hf-file {{GGUF_FILE}} \\
26
+ llama-cli -hf mlabonne/gemma-2b-GGUF \\
33
27
  -p "Once upon a time,"`);
34
28
  });
35
29
  (0, vitest_1.it)("vLLM conversational llm", async () => {
@@ -465,6 +465,12 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
465
465
  countDownloads: string;
466
466
  snippets: () => string[];
467
467
  };
468
+ mitie: {
469
+ prettyLabel: string;
470
+ repoName: string;
471
+ repoUrl: string;
472
+ countDownloads: string;
473
+ };
468
474
  "ml-agents": {
469
475
  prettyLabel: string;
470
476
  repoName: string;
@@ -878,5 +884,5 @@ export declare const MODEL_LIBRARIES_UI_ELEMENTS: {
878
884
  };
879
885
  export type ModelLibraryKey = keyof typeof MODEL_LIBRARIES_UI_ELEMENTS;
880
886
  export declare const ALL_MODEL_LIBRARY_KEYS: ModelLibraryKey[];
881
- export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
887
+ export declare const ALL_DISPLAY_MODEL_LIBRARY_KEYS: ("adapter-transformers" | "allennlp" | "anemoi" | "asteroid" | "audiocraft" | "audioseal" | "bertopic" | "big_vision" | "birder" | "birefnet" | "bm25s" | "champ" | "chat_tts" | "colpali" | "cosmos" | "cxr-foundation" | "deepforest" | "depth-anything-v2" | "depth-pro" | "derm-foundation" | "diffree" | "diffusers" | "diffusionkit" | "doctr" | "cartesia_pytorch" | "cartesia_mlx" | "clipscope" | "cosyvoice" | "cotracker" | "edsnlp" | "elm" | "espnet" | "fairseq" | "fastai" | "fasttext" | "flair" | "gemma.cpp" | "gliner" | "glyph-byt5" | "grok" | "hallo" | "hezar" | "htrflow" | "hunyuan-dit" | "imstoucan" | "keras" | "tf-keras" | "keras-nlp" | "keras-hub" | "k2" | "liveportrait" | "llama-cpp-python" | "mini-omni2" | "mindspore" | "mamba-ssm" | "mars5-tts" | "mesh-anything" | "mitie" | "ml-agents" | "mlx" | "mlx-image" | "mlc-llm" | "model2vec" | "moshi" | "nemo" | "open-oasis" | "open_clip" | "paddlenlp" | "peft" | "pxia" | "pyannote-audio" | "py-feat" | "pythae" | "recurrentgemma" | "relik" | "refiners" | "reverb" | "saelens" | "sam2" | "sample-factory" | "sapiens" | "sentence-transformers" | "setfit" | "sklearn" | "spacy" | "span-marker" | "speechbrain" | "ssr-speech" | "stable-audio-tools" | "diffusion-single-file" | "seed-story" | "soloaudio" | "stable-baselines3" | "stanza" | "f5-tts" | "genmo" | "tensorflowtts" | "tabpfn" | "tic-clip" | "timesfm" | "timm" | "transformers" | "transformers.js" | "trellis" | "ultralytics" | "unity-sentis" | "sana" | "vfi-mamba" | "voicecraft" | "whisperkit" | "yolov10" | "3dtopia-xl")[];
882
888
  //# sourceMappingURL=model-libraries.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,gCAAgC,CAAC;AAEzE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAyzBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,EAA+C,eAAe,EAAE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,+/CAQ1B,CAAC"}
1
+ {"version":3,"file":"model-libraries.d.ts","sourceRoot":"","sources":["../../src/model-libraries.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,kBAAkB,EAAE,MAAM,gCAAgC,CAAC;AAEzE;;GAEG;AACH,MAAM,WAAW,gBAAgB;IAChC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,QAAQ,CAAC,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,MAAM,EAAE,CAAC;IAC1C;;;;;OAKG;IACH,cAAc,CAAC,EAAE,kBAAkB,CAAC;IACpC;;;OAGG;IACH,MAAM,CAAC,EAAE,OAAO,CAAC;CACjB;AAED;;;;;;;;;;;;;GAaG;AAEH,eAAO,MAAM,2BAA2B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA+zBI,CAAC;AAE7C,MAAM,MAAM,eAAe,GAAG,MAAM,OAAO,2BAA2B,CAAC;AAEvE,eAAO,MAAM,sBAAsB,EAA+C,eAAe,EAAE,CAAC;AAEpG,eAAO,MAAM,8BAA8B,ygDAQ1B,CAAC"}
@@ -453,6 +453,12 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
453
453
  countDownloads: `path:"MeshAnything_350m.pth"`,
454
454
  snippets: snippets.mesh_anything,
455
455
  },
456
+ mitie: {
457
+ prettyLabel: "MITIE",
458
+ repoName: "MITIE",
459
+ repoUrl: "https://github.com/mit-nlp/MITIE",
460
+ countDownloads: `path_filename:"total_word_feature_extractor"`,
461
+ },
456
462
  "ml-agents": {
457
463
  prettyLabel: "ml-agents",
458
464
  repoName: "ml-agents",
@@ -754,7 +760,7 @@ exports.MODEL_LIBRARIES_UI_ELEMENTS = {
754
760
  tabpfn: {
755
761
  prettyLabel: "TabPFN",
756
762
  repoName: "TabPFN",
757
- repoUrl: "https://github.com/PriorLabs/TabPFN"
763
+ repoUrl: "https://github.com/PriorLabs/TabPFN",
758
764
  },
759
765
  "tic-clip": {
760
766
  prettyLabel: "TiC-CLIP",
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiEf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/audio-to-audio/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA6Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -36,10 +36,6 @@ const taskData = {
36
36
  },
37
37
  ],
38
38
  models: [
39
- {
40
- description: "A solid model of audio source separation.",
41
- id: "speechbrain/sepformer-wham",
42
- },
43
39
  {
44
40
  description: "A speech enhancement model.",
45
41
  id: "ResembleAI/resemble-enhance",
@@ -59,8 +59,8 @@ const taskData = {
59
59
  ],
60
60
  models: [
61
61
  {
62
- description: "The famous BERT model.",
63
- id: "google-bert/bert-base-uncased",
62
+ description: "State-of-the-art masked language model.",
63
+ id: "answerdotai/ModernBERT-large",
64
64
  },
65
65
  {
66
66
  description: "A multilingual model trained on 100 languages.",
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAmFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-classification/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAkFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -74,9 +74,8 @@ const taskData = {
74
74
  ],
75
75
  spaces: [
76
76
  {
77
- // TO DO: write description
78
- description: "An application that classifies what a given image is about.",
79
- id: "nielsr/perceiver-image-classification",
77
+ description: "A leaderboard to evaluate different image classification models.",
78
+ id: "timm/leaderboard",
80
79
  },
81
80
  ],
82
81
  summary: "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.",
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAsDf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-feature-extraction/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA2Df,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -42,15 +42,20 @@ const taskData = {
42
42
  id: "facebook/dino-vitb16",
43
43
  },
44
44
  {
45
- description: "Strong image feature extraction model made for information retrieval from documents.",
46
- id: "vidore/colpali",
45
+ description: "Cutting-edge image feature extraction model.",
46
+ id: "apple/aimv2-large-patch14-336-distilled",
47
47
  },
48
48
  {
49
49
  description: "Strong image feature extraction model that can be used on images and documents.",
50
50
  id: "OpenGVLab/InternViT-6B-448px-V1-2",
51
51
  },
52
52
  ],
53
- spaces: [],
53
+ spaces: [
54
+ {
55
+ description: "A leaderboard to evaluate different image-feature-extraction models on classification performances",
56
+ id: "timm/leaderboard",
57
+ },
58
+ ],
54
59
  summary: "Image feature extraction is the task of extracting features learnt in a computer vision model.",
55
60
  widgetModels: [],
56
61
  };
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-text/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAyGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -7,8 +7,8 @@ const taskData = {
7
7
  id: "liuhaotian/LLaVA-Instruct-150K",
8
8
  },
9
9
  {
10
- description: "Conversation turns where questions involve image and text.",
11
- id: "liuhaotian/LLaVA-Pretrain",
10
+ description: "Collection of image-text pairs on scientific topics.",
11
+ id: "DAMO-NLP-SG/multimodal_textbook",
12
12
  },
13
13
  {
14
14
  description: "A collection of datasets made for model fine-tuning.",
@@ -42,11 +42,15 @@ const taskData = {
42
42
  metrics: [],
43
43
  models: [
44
44
  {
45
- description: "Powerful vision language model with great visual understanding and reasoning capabilities.",
46
- id: "meta-llama/Llama-3.2-11B-Vision-Instruct",
45
+ description: "Small and efficient yet powerful vision language model.",
46
+ id: "HuggingFaceTB/SmolVLM-Instruct",
47
47
  },
48
48
  {
49
- description: "Cutting-edge vision language models.",
49
+ description: "A screenshot understanding model used to control computers.",
50
+ id: "showlab/ShowUI-2B",
51
+ },
52
+ {
53
+ description: "Cutting-edge vision language model.",
50
54
  id: "allenai/Molmo-7B-D-0924",
51
55
  },
52
56
  {
@@ -58,8 +62,8 @@ const taskData = {
58
62
  id: "Qwen/Qwen2-VL-7B-Instruct",
59
63
  },
60
64
  {
61
- description: "Strong image-text-to-text model.",
62
- id: "mistralai/Pixtral-12B-2409",
65
+ description: "Image-text-to-text model with reasoning capabilities.",
66
+ id: "Qwen/QVQ-72B-Preview",
63
67
  },
64
68
  {
65
69
  description: "Strong image-text-to-text model focused on documents.",
@@ -83,14 +87,18 @@ const taskData = {
83
87
  description: "An image-text-to-text application focused on documents.",
84
88
  id: "stepfun-ai/GOT_official_online_demo",
85
89
  },
86
- {
87
- description: "An application to compare outputs of different vision language models.",
88
- id: "merve/compare_VLMs",
89
- },
90
90
  {
91
91
  description: "An application for chatting with an image-text-to-text model.",
92
92
  id: "GanymedeNil/Qwen2-VL-7B",
93
93
  },
94
+ {
95
+ description: "An application that parses screenshots into actions.",
96
+ id: "showlab/ShowUI",
97
+ },
98
+ {
99
+ description: "An application that detects gaze.",
100
+ id: "smoondream/gaze-demo",
101
+ },
94
102
  ],
95
103
  summary: "Image-text-to-text models take in an image and text prompt and output text. These models are also called vision-language models, or VLMs. The difference from image-to-text models is that these models take an additional text input, not restricting the model to certain use cases like image captioning, and may also be trained to accept a conversation as input.",
96
104
  widgetModels: ["meta-llama/Llama-3.2-11B-Vision-Instruct"],
@@ -41,8 +41,8 @@ const taskData = {
41
41
  id: "hwjiang/Real3D",
42
42
  },
43
43
  {
44
- description: "Generative 3D gaussian splatting model.",
45
- id: "ashawkey/LGM",
44
+ description: "Consistent image-to-3d generation model.",
45
+ id: "stabilityai/stable-point-aware-3d",
46
46
  },
47
47
  ],
48
48
  spaces: [
@@ -55,8 +55,8 @@ const taskData = {
55
55
  id: "TencentARC/InstantMesh",
56
56
  },
57
57
  {
58
- description: "Image-to-3D demo with mesh outputs.",
59
- id: "stabilityai/TripoSR",
58
+ description: "Image-to-3D demo.",
59
+ id: "stabilityai/stable-point-aware-3d",
60
60
  },
61
61
  {
62
62
  description: "Image-to-3D demo with mesh outputs.",
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAgGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAuGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -10,6 +10,10 @@ const taskData = {
10
10
  description: "Multiple images of celebrities, used for facial expression translation",
11
11
  id: "huggan/CelebA-faces",
12
12
  },
13
+ {
14
+ description: "12M image-caption pairs.",
15
+ id: "Spawning/PD12M",
16
+ },
13
17
  ],
14
18
  demo: {
15
19
  inputs: [
@@ -50,16 +54,20 @@ const taskData = {
50
54
  id: "keras-io/super-resolution",
51
55
  },
52
56
  {
53
- description: "A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.",
54
- id: "lambdalabs/sd-image-variations-diffusers",
57
+ description: "A model for applying edits to images through image controls.",
58
+ id: "Yuanshi/OminiControl",
55
59
  },
56
60
  {
57
61
  description: "A model that generates images based on segments in the input image and the text prompt.",
58
62
  id: "mfidabel/controlnet-segment-anything",
59
63
  },
60
64
  {
61
- description: "A model that takes an image and an instruction to edit the image.",
62
- id: "timbrooks/instruct-pix2pix",
65
+ description: "Strong model for inpainting and outpainting.",
66
+ id: "black-forest-labs/FLUX.1-Fill-dev",
67
+ },
68
+ {
69
+ description: "Strong model for image editing using depth maps.",
70
+ id: "black-forest-labs/FLUX.1-Depth-dev-lora",
63
71
  },
64
72
  ],
65
73
  spaces: [
@@ -69,7 +69,7 @@ exports.TASKS_MODEL_LIBRARIES = {
69
69
  "video-classification": ["transformers"],
70
70
  "mask-generation": ["transformers"],
71
71
  "multiple-choice": ["transformers"],
72
- "object-detection": ["transformers", "transformers.js"],
72
+ "object-detection": ["transformers", "transformers.js", "ultralytics"],
73
73
  other: [],
74
74
  "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"],
75
75
  robotics: [],
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/keypoint-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA6Cf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/keypoint-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiDf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -31,6 +31,10 @@ const taskData = {
31
31
  description: "Strong keypoint detection model used to detect human pose.",
32
32
  id: "facebook/sapiens-pose-1b",
33
33
  },
34
+ {
35
+ description: "Powerful keypoint detection model used to detect human pose.",
36
+ id: "usyd-community/vitpose-plus-base",
37
+ },
34
38
  ],
35
39
  spaces: [
36
40
  {
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/object-detection/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAqFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -52,6 +52,10 @@ const taskData = {
52
52
  description: "Fast and accurate object detection model trained on COCO and Object365 datasets.",
53
53
  id: "PekingU/rtdetr_r18vd_coco_o365",
54
54
  },
55
+ {
56
+ description: "Object detection model for low-lying objects.",
57
+ id: "StephanST/WALDO30",
58
+ },
55
59
  ],
56
60
  spaces: [
57
61
  {
@@ -63,8 +67,8 @@ const taskData = {
63
67
  id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS",
64
68
  },
65
69
  {
66
- description: "An application that shows multiple cutting edge techniques for object detection and tracking.",
67
- id: "kadirnar/torchyolo",
70
+ description: "A cutting-edge object detection application.",
71
+ id: "Ultralytics/YOLO11",
68
72
  },
69
73
  {
70
74
  description: "An object tracking, segmentation and inpainting application.",
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/sentence-similarity/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAgGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/sentence-similarity/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAoGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -66,9 +66,13 @@ const taskData = {
66
66
  id: "sentence-transformers/all-mpnet-base-v2",
67
67
  },
68
68
  {
69
- description: "A multilingual robust sentence similarity model..",
69
+ description: "A multilingual robust sentence similarity model.",
70
70
  id: "BAAI/bge-m3",
71
71
  },
72
+ {
73
+ description: "A robust sentence similarity model.",
74
+ id: "HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5",
75
+ },
72
76
  ],
73
77
  spaces: [
74
78
  {
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-generation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA6Gf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-generation/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAgHf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -3,24 +3,28 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  const taskData = {
4
4
  datasets: [
5
5
  {
6
- description: "A large multilingual dataset of text crawled from the web.",
7
- id: "mc4",
6
+ description: "Multilingual dataset used to evaluate text generation models.",
7
+ id: "CohereForAI/Global-MMLU",
8
8
  },
9
9
  {
10
- description: "Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.",
11
- id: "the_pile",
10
+ description: "High quality multilingual data used to train text-generation models.",
11
+ id: "HuggingFaceFW/fineweb-2",
12
12
  },
13
13
  {
14
14
  description: "Truly open-source, curated and cleaned dialogue dataset.",
15
15
  id: "HuggingFaceH4/ultrachat_200k",
16
16
  },
17
17
  {
18
- description: "An instruction dataset with preference ratings on responses.",
19
- id: "openbmb/UltraFeedback",
18
+ description: "A multilingual instruction dataset with preference ratings on responses.",
19
+ id: "allenai/tulu-3-sft-mixture",
20
20
  },
21
21
  {
22
22
  description: "A large synthetic dataset for alignment of text generation models.",
23
- id: "argilla/magpie-ultra-v0.1",
23
+ id: "HuggingFaceTB/smoltalk",
24
+ },
25
+ {
26
+ description: "A dataset made for training text generation models solving math questions.",
27
+ id: "HuggingFaceTB/finemath",
24
28
  },
25
29
  ],
26
30
  demo: {
@@ -59,20 +63,20 @@ const taskData = {
59
63
  id: "meta-llama/Meta-Llama-3.1-8B-Instruct",
60
64
  },
61
65
  {
62
- description: "Small yet powerful text generation model.",
63
- id: "microsoft/Phi-3-mini-4k-instruct",
66
+ description: "Powerful text generation model by Microsoft.",
67
+ id: "microsoft/phi-4",
64
68
  },
65
69
  {
66
- description: "A very powerful model that can solve mathematical problems.",
67
- id: "AI-MO/NuminaMath-7B-TIR",
70
+ description: "A very powerful model with reasoning capabilities.",
71
+ id: "PowerInfer/SmallThinker-3B-Preview",
68
72
  },
69
73
  {
70
74
  description: "Strong text generation model to follow instructions.",
71
75
  id: "Qwen/Qwen2.5-7B-Instruct",
72
76
  },
73
77
  {
74
- description: "Very strong open-source large language model.",
75
- id: "nvidia/Llama-3.1-Nemotron-70B-Instruct",
78
+ description: "Text generation model used to write code.",
79
+ id: "Qwen/Qwen2.5-Coder-32B-Instruct",
76
80
  },
77
81
  ],
78
82
  spaces: [
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cA+Ff,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-image/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAmGf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -10,6 +10,10 @@ const taskData = {
10
10
  description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.",
11
11
  id: "conceptual_captions",
12
12
  },
13
+ {
14
+ description: "12M image-caption pairs.",
15
+ id: "Spawning/PD12M",
16
+ },
13
17
  ],
14
18
  demo: {
15
19
  inputs: [
@@ -1 +1 @@
1
- {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAkFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/text-to-speech/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiFf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -12,7 +12,7 @@ const taskData = {
12
12
  id: "mythicinfinity/libritts_r",
13
13
  },
14
14
  {
15
- description: "Mulit-lingual dataset.",
15
+ description: "Multi-lingual dataset.",
16
16
  id: "facebook/multilingual_librispeech",
17
17
  },
18
18
  ],
@@ -48,25 +48,25 @@ const taskData = {
48
48
  },
49
49
  {
50
50
  description: "A massively multi-lingual TTS model.",
51
- id: "coqui/XTTS-v2",
51
+ id: "fishaudio/fish-speech-1.5",
52
52
  },
53
53
  {
54
54
  description: "A powerful TTS model.",
55
- id: "amphion/MaskGCT",
55
+ id: "OuteAI/OuteTTS-0.1-350M",
56
56
  },
57
57
  {
58
- description: "A Llama based TTS model.",
59
- id: "OuteAI/OuteTTS-0.1-350M",
58
+ description: "Small yet powerful TTS model.",
59
+ id: "hexgrad/Kokoro-82M",
60
60
  },
61
61
  ],
62
62
  spaces: [
63
63
  {
64
- description: "An application for generate highly realistic, multilingual speech.",
65
- id: "suno/bark",
64
+ description: "An application for generate high quality speech in different languages.",
65
+ id: "hexgrad/Kokoro-TTS",
66
66
  },
67
67
  {
68
- description: "An application on XTTS, a voice generation model that lets you clone voices into different languages.",
69
- id: "coqui/xtts",
68
+ description: "A multilingual text-to-speech application.",
69
+ id: "fishaudio/fish-speech-1",
70
70
  },
71
71
  {
72
72
  description: "An application that generates speech in different styles in English and Chinese.",
@@ -63,15 +63,15 @@ const taskData = {
63
63
  models: [
64
64
  {
65
65
  description: "A strong model for consistent video generation.",
66
- id: "rain1011/pyramid-flow-sd3",
66
+ id: "tencent/HunyuanVideo",
67
67
  },
68
68
  {
69
- description: "A robust model for text-to-video generation.",
70
- id: "VideoCrafter/VideoCrafter2",
69
+ description: "A text-to-video model with high fidelity motion and strong prompt adherence.",
70
+ id: "Lightricks/LTX-Video",
71
71
  },
72
72
  {
73
- description: "A cutting-edge text-to-video generation model.",
74
- id: "TIGER-Lab/T2V-Turbo-V2",
73
+ description: "A text-to-video model focusing on physics-aware applications like robotics.",
74
+ id: "nvidia/Cosmos-1.0-Diffusion-7B-Text2World",
75
75
  },
76
76
  ],
77
77
  spaces: [
@@ -38,12 +38,12 @@ const taskData = {
38
38
  metrics: [],
39
39
  models: [
40
40
  {
41
- description: "A robust video-text-to-text model that can take in image and video inputs.",
42
- id: "llava-hf/llava-onevision-qwen2-72b-ov-hf",
41
+ description: "A robust video-text-to-text model.",
42
+ id: "Vision-CAIR/LongVU_Qwen2_7B",
43
43
  },
44
44
  {
45
- description: "Large and powerful video-text-to-text model that can take in image and video inputs.",
46
- id: "llava-hf/LLaVA-NeXT-Video-34B-hf",
45
+ description: "Strong video-text-to-text model with reasoning capabilities.",
46
+ id: "GoodiesHere/Apollo-LMMs-Apollo-7B-t32",
47
47
  },
48
48
  ],
49
49
  spaces: [