@huggingface/tasks 0.19.66 → 0.19.68

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. package/dist/commonjs/dataset-libraries.d.ts +6 -0
  2. package/dist/commonjs/dataset-libraries.d.ts.map +1 -1
  3. package/dist/commonjs/dataset-libraries.js +6 -0
  4. package/dist/commonjs/hardware.d.ts +4 -0
  5. package/dist/commonjs/hardware.d.ts.map +1 -1
  6. package/dist/commonjs/hardware.js +5 -1
  7. package/dist/commonjs/local-apps.d.ts +0 -7
  8. package/dist/commonjs/local-apps.d.ts.map +1 -1
  9. package/dist/commonjs/local-apps.js +0 -7
  10. package/dist/commonjs/model-libraries-snippets.d.ts +1 -0
  11. package/dist/commonjs/model-libraries-snippets.d.ts.map +1 -1
  12. package/dist/commonjs/model-libraries-snippets.js +16 -3
  13. package/dist/commonjs/model-libraries.d.ts +64 -29
  14. package/dist/commonjs/model-libraries.d.ts.map +1 -1
  15. package/dist/commonjs/model-libraries.js +68 -33
  16. package/dist/commonjs/pipelines.d.ts +9 -1
  17. package/dist/commonjs/pipelines.d.ts.map +1 -1
  18. package/dist/commonjs/pipelines.js +8 -0
  19. package/dist/commonjs/snippets/inputs.d.ts.map +1 -1
  20. package/dist/commonjs/snippets/inputs.js +10 -0
  21. package/dist/commonjs/tasks/image-text-to-image/data.d.ts +4 -0
  22. package/dist/commonjs/tasks/image-text-to-image/data.d.ts.map +1 -0
  23. package/dist/commonjs/tasks/image-text-to-image/data.js +50 -0
  24. package/dist/commonjs/tasks/image-text-to-image/inference.d.ts +76 -0
  25. package/dist/commonjs/tasks/image-text-to-image/inference.d.ts.map +1 -0
  26. package/dist/commonjs/tasks/image-text-to-image/inference.js +2 -0
  27. package/dist/commonjs/tasks/image-text-to-video/data.d.ts +4 -0
  28. package/dist/commonjs/tasks/image-text-to-video/data.d.ts.map +1 -0
  29. package/dist/commonjs/tasks/image-text-to-video/data.js +50 -0
  30. package/dist/commonjs/tasks/image-text-to-video/inference.d.ts +78 -0
  31. package/dist/commonjs/tasks/image-text-to-video/inference.d.ts.map +1 -0
  32. package/dist/commonjs/tasks/image-text-to-video/inference.js +2 -0
  33. package/dist/commonjs/tasks/index.d.ts +2 -0
  34. package/dist/commonjs/tasks/index.d.ts.map +1 -1
  35. package/dist/commonjs/tasks/index.js +72 -66
  36. package/dist/esm/dataset-libraries.d.ts +6 -0
  37. package/dist/esm/dataset-libraries.d.ts.map +1 -1
  38. package/dist/esm/dataset-libraries.js +6 -0
  39. package/dist/esm/hardware.d.ts +4 -0
  40. package/dist/esm/hardware.d.ts.map +1 -1
  41. package/dist/esm/hardware.js +5 -1
  42. package/dist/esm/local-apps.d.ts +0 -7
  43. package/dist/esm/local-apps.d.ts.map +1 -1
  44. package/dist/esm/local-apps.js +0 -7
  45. package/dist/esm/model-libraries-snippets.d.ts +1 -0
  46. package/dist/esm/model-libraries-snippets.d.ts.map +1 -1
  47. package/dist/esm/model-libraries-snippets.js +12 -0
  48. package/dist/esm/model-libraries.d.ts +64 -29
  49. package/dist/esm/model-libraries.d.ts.map +1 -1
  50. package/dist/esm/model-libraries.js +68 -33
  51. package/dist/esm/pipelines.d.ts +9 -1
  52. package/dist/esm/pipelines.d.ts.map +1 -1
  53. package/dist/esm/pipelines.js +8 -0
  54. package/dist/esm/snippets/inputs.d.ts.map +1 -1
  55. package/dist/esm/snippets/inputs.js +10 -0
  56. package/dist/esm/tasks/image-text-to-image/data.d.ts +4 -0
  57. package/dist/esm/tasks/image-text-to-image/data.d.ts.map +1 -0
  58. package/dist/esm/tasks/image-text-to-image/data.js +48 -0
  59. package/dist/esm/tasks/image-text-to-image/inference.d.ts +76 -0
  60. package/dist/esm/tasks/image-text-to-image/inference.d.ts.map +1 -0
  61. package/dist/esm/tasks/image-text-to-image/inference.js +1 -0
  62. package/dist/esm/tasks/image-text-to-video/data.d.ts +4 -0
  63. package/dist/esm/tasks/image-text-to-video/data.d.ts.map +1 -0
  64. package/dist/esm/tasks/image-text-to-video/data.js +48 -0
  65. package/dist/esm/tasks/image-text-to-video/inference.d.ts +78 -0
  66. package/dist/esm/tasks/image-text-to-video/inference.d.ts.map +1 -0
  67. package/dist/esm/tasks/image-text-to-video/inference.js +1 -0
  68. package/dist/esm/tasks/index.d.ts +2 -0
  69. package/dist/esm/tasks/index.d.ts.map +1 -1
  70. package/dist/esm/tasks/index.js +6 -0
  71. package/package.json +1 -1
  72. package/src/dataset-libraries.ts +6 -0
  73. package/src/hardware.ts +5 -1
  74. package/src/local-apps.ts +0 -7
  75. package/src/model-libraries-snippets.ts +13 -0
  76. package/src/model-libraries.ts +68 -33
  77. package/src/pipelines.ts +8 -0
  78. package/src/snippets/inputs.ts +12 -0
  79. package/src/tasks/image-text-to-image/about.md +73 -0
  80. package/src/tasks/image-text-to-image/data.ts +54 -0
  81. package/src/tasks/image-text-to-image/inference.ts +75 -0
  82. package/src/tasks/image-text-to-image/spec/input.json +59 -0
  83. package/src/tasks/image-text-to-image/spec/output.json +13 -0
  84. package/src/tasks/image-text-to-video/about.md +71 -0
  85. package/src/tasks/image-text-to-video/data.ts +54 -0
  86. package/src/tasks/image-text-to-video/inference.ts +77 -0
  87. package/src/tasks/image-text-to-video/spec/input.json +63 -0
  88. package/src/tasks/image-text-to-video/spec/output.json +13 -0
  89. package/src/tasks/index.ts +16 -0
@@ -0,0 +1,50 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const taskData = {
4
+ datasets: [],
5
+ demo: {
6
+ inputs: [
7
+ {
8
+ filename: "image-text-to-image-input.jpeg",
9
+ type: "img",
10
+ },
11
+ {
12
+ label: "Input",
13
+ content: "A city above clouds, pastel colors, Victorian style",
14
+ type: "text",
15
+ },
16
+ ],
17
+ outputs: [
18
+ {
19
+ filename: "image-text-to-image-output.png",
20
+ type: "img",
21
+ },
22
+ ],
23
+ },
24
+ metrics: [
25
+ {
26
+ description: "The Fréchet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.",
27
+ id: "FID",
28
+ },
29
+ {
30
+ description: "CLIP Score measures the similarity between the generated image and the text prompt using CLIP embeddings. A higher score indicates better alignment with the text prompt.",
31
+ id: "CLIP",
32
+ },
33
+ ],
34
+ models: [
35
+ {
36
+ description: "A powerful model for image-text-to-image generation.",
37
+ id: "black-forest-labs/FLUX.2-dev",
38
+ },
39
+ ],
40
+ spaces: [
41
+ {
42
+ description: "An application for image-text-to-image generation.",
43
+ id: "black-forest-labs/FLUX.2-dev",
44
+ },
45
+ ],
46
+ summary: "Image-text-to-image models take an image and a text prompt as input and generate a new image based on the reference image and text instructions. These models are useful for image editing, style transfer, image variations, and guided image generation tasks.",
47
+ widgetModels: ["black-forest-labs/FLUX.2-dev"],
48
+ youtubeId: undefined,
49
+ };
50
+ exports.default = taskData;
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+ /**
7
+ * Inputs for Image Text To Image inference. Either inputs (image) or prompt (in parameters)
8
+ * must be provided, or both.
9
+ */
10
+ export interface ImageTextToImageInput {
11
+ /**
12
+ * The input image data as a base64-encoded string. If no `parameters` are provided, you can
13
+ * also provide the image data as a raw bytes payload. Either this or prompt must be
14
+ * provided.
15
+ */
16
+ inputs?: Blob;
17
+ /**
18
+ * Additional inference parameters for Image Text To Image
19
+ */
20
+ parameters?: ImageTextToImageParameters;
21
+ [property: string]: unknown;
22
+ }
23
+ /**
24
+ * Additional inference parameters for Image Text To Image
25
+ */
26
+ export interface ImageTextToImageParameters {
27
+ /**
28
+ * For diffusion models. A higher guidance scale value encourages the model to generate
29
+ * images closely linked to the text prompt at the expense of lower image quality.
30
+ */
31
+ guidance_scale?: number;
32
+ /**
33
+ * One prompt to guide what NOT to include in image generation.
34
+ */
35
+ negative_prompt?: string;
36
+ /**
37
+ * For diffusion models. The number of denoising steps. More denoising steps usually lead to
38
+ * a higher quality image at the expense of slower inference.
39
+ */
40
+ num_inference_steps?: number;
41
+ /**
42
+ * The text prompt to guide the image generation. Either this or inputs (image) must be
43
+ * provided.
44
+ */
45
+ prompt?: string;
46
+ /**
47
+ * Seed for the random number generator.
48
+ */
49
+ seed?: number;
50
+ /**
51
+ * The size in pixels of the output image. This parameter is only supported by some
52
+ * providers and for specific models. It will be ignored when unsupported.
53
+ */
54
+ target_size?: TargetSize;
55
+ [property: string]: unknown;
56
+ }
57
+ /**
58
+ * The size in pixels of the output image. This parameter is only supported by some
59
+ * providers and for specific models. It will be ignored when unsupported.
60
+ */
61
+ export interface TargetSize {
62
+ height: number;
63
+ width: number;
64
+ [property: string]: unknown;
65
+ }
66
+ /**
67
+ * Outputs of inference for the Image Text To Image task
68
+ */
69
+ export interface ImageTextToImageOutput {
70
+ /**
71
+ * The generated image returned as raw bytes in the payload.
72
+ */
73
+ image: unknown;
74
+ [property: string]: unknown;
75
+ }
76
+ //# sourceMappingURL=inference.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-image/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;;GAGG;AACH,MAAM,WAAW,qBAAqB;IACrC;;;;OAIG;IACH,MAAM,CAAC,EAAE,IAAI,CAAC;IACd;;OAEG;IACH,UAAU,CAAC,EAAE,0BAA0B,CAAC;IACxC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,0BAA0B;IAC1C;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;;OAGG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IACzB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;;GAGG;AACH,MAAM,WAAW,UAAU;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -0,0 +1,4 @@
1
+ import type { TaskDataCustom } from "../index.js";
2
+ declare const taskData: TaskDataCustom;
3
+ export default taskData;
4
+ //# sourceMappingURL=data.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"data.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-video/data.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,cAAc,EAAE,MAAM,aAAa,CAAC;AAElD,QAAA,MAAM,QAAQ,EAAE,cAiDf,CAAC;AAEF,eAAe,QAAQ,CAAC"}
@@ -0,0 +1,50 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const taskData = {
4
+ datasets: [],
5
+ demo: {
6
+ inputs: [
7
+ {
8
+ filename: "image-text-to-video-input.jpg",
9
+ type: "img",
10
+ },
11
+ {
12
+ label: "Input",
13
+ content: "Darth Vader is surfing on the waves.",
14
+ type: "text",
15
+ },
16
+ ],
17
+ outputs: [
18
+ {
19
+ filename: "image-text-to-video-output.gif",
20
+ type: "img",
21
+ },
22
+ ],
23
+ },
24
+ metrics: [
25
+ {
26
+ description: "Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.",
27
+ id: "fvd",
28
+ },
29
+ {
30
+ description: "CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.",
31
+ id: "clipsim",
32
+ },
33
+ ],
34
+ models: [
35
+ {
36
+ description: "A powerful model for image-text-to-video generation.",
37
+ id: "Lightricks/LTX-Video",
38
+ },
39
+ ],
40
+ spaces: [
41
+ {
42
+ description: "An application for image-text-to-video generation.",
43
+ id: "Lightricks/ltx-video-distilled",
44
+ },
45
+ ],
46
+ summary: "Image-text-to-video models take an reference image and a text instructions as and generate a video based on them. These models are useful for animating still images, creating dynamic content from static references, and generating videos with specific motion or transformation guidance.",
47
+ widgetModels: ["Lightricks/LTX-Video"],
48
+ youtubeId: undefined,
49
+ };
50
+ exports.default = taskData;
@@ -0,0 +1,78 @@
1
+ /**
2
+ * Inference code generated from the JSON schema spec in ./spec
3
+ *
4
+ * Using src/scripts/inference-codegen
5
+ */
6
+ /**
7
+ * Inputs for Image Text To Video inference. Either inputs (image) or prompt (in parameters)
8
+ * must be provided, or both.
9
+ */
10
+ export interface ImageTextToVideoInput {
11
+ /**
12
+ * The input image data as a base64-encoded string. If no `parameters` are provided, you can
13
+ * also provide the image data as a raw bytes payload. Either this or prompt must be
14
+ * provided.
15
+ */
16
+ inputs?: Blob;
17
+ /**
18
+ * Additional inference parameters for Image Text To Video
19
+ */
20
+ parameters?: ImageTextToVideoParameters;
21
+ [property: string]: unknown;
22
+ }
23
+ /**
24
+ * Additional inference parameters for Image Text To Video
25
+ */
26
+ export interface ImageTextToVideoParameters {
27
+ /**
28
+ * For diffusion models. A higher guidance scale value encourages the model to generate
29
+ * videos closely linked to the text prompt at the expense of lower image quality.
30
+ */
31
+ guidance_scale?: number;
32
+ /**
33
+ * One prompt to guide what NOT to include in video generation.
34
+ */
35
+ negative_prompt?: string;
36
+ /**
37
+ * The num_frames parameter determines how many video frames are generated.
38
+ */
39
+ num_frames?: number;
40
+ /**
41
+ * The number of denoising steps. More denoising steps usually lead to a higher quality
42
+ * video at the expense of slower inference.
43
+ */
44
+ num_inference_steps?: number;
45
+ /**
46
+ * The text prompt to guide the video generation. Either this or inputs (image) must be
47
+ * provided.
48
+ */
49
+ prompt?: string;
50
+ /**
51
+ * Seed for the random number generator.
52
+ */
53
+ seed?: number;
54
+ /**
55
+ * The size in pixel of the output video frames.
56
+ */
57
+ target_size?: TargetSize;
58
+ [property: string]: unknown;
59
+ }
60
+ /**
61
+ * The size in pixel of the output video frames.
62
+ */
63
+ export interface TargetSize {
64
+ height: number;
65
+ width: number;
66
+ [property: string]: unknown;
67
+ }
68
+ /**
69
+ * Outputs of inference for the Image Text To Video task
70
+ */
71
+ export interface ImageTextToVideoOutput {
72
+ /**
73
+ * The generated video returned as raw bytes in the payload.
74
+ */
75
+ video: unknown;
76
+ [property: string]: unknown;
77
+ }
78
+ //# sourceMappingURL=inference.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"inference.d.ts","sourceRoot":"","sources":["../../../../src/tasks/image-text-to-video/inference.ts"],"names":[],"mappings":"AAAA;;;;GAIG;AACH;;;GAGG;AACH,MAAM,WAAW,qBAAqB;IACrC;;;;OAIG;IACH,MAAM,CAAC,EAAE,IAAI,CAAC;IACd;;OAEG;IACH,UAAU,CAAC,EAAE,0BAA0B,CAAC;IACxC,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,0BAA0B;IAC1C;;;OAGG;IACH,cAAc,CAAC,EAAE,MAAM,CAAC;IACxB;;OAEG;IACH,eAAe,CAAC,EAAE,MAAM,CAAC;IACzB;;OAEG;IACH,UAAU,CAAC,EAAE,MAAM,CAAC;IACpB;;;OAGG;IACH,mBAAmB,CAAC,EAAE,MAAM,CAAC;IAC7B;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,IAAI,CAAC,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,WAAW,CAAC,EAAE,UAAU,CAAC;IACzB,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,UAAU;IAC1B,MAAM,EAAE,MAAM,CAAC;IACf,KAAK,EAAE,MAAM,CAAC;IACd,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B;AACD;;GAEG;AACH,MAAM,WAAW,sBAAsB;IACtC;;OAEG;IACH,KAAK,EAAE,OAAO,CAAC;IACf,CAAC,QAAQ,EAAE,MAAM,GAAG,OAAO,CAAC;CAC5B"}
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
@@ -10,6 +10,8 @@ export type * from "./image-to-image/inference.js";
10
10
  export type { ImageToTextInput, ImageToTextOutput, ImageToTextParameters } from "./image-to-text/inference.js";
11
11
  export type * from "./image-segmentation/inference.js";
12
12
  export type { ImageToVideoInput, ImageToVideoOutput, ImageToVideoParameters } from "./image-to-video/inference.js";
13
+ export type { ImageTextToImageInput, ImageTextToImageOutput, ImageTextToImageParameters, } from "./image-text-to-image/inference.js";
14
+ export type { ImageTextToVideoInput, ImageTextToVideoOutput, ImageTextToVideoParameters, } from "./image-text-to-video/inference.js";
13
15
  export type * from "./object-detection/inference.js";
14
16
  export type * from "./depth-estimation/inference.js";
15
17
  export type * from "./question-answering/inference.js";
@@ -1 +1 @@
1
- {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAkDpD,mBAAmB,qCAAqC,CAAC;AACzD,mBAAmB,6CAA6C,CAAC;AACjE,YAAY,EACX,mBAAmB,EACnB,0BAA0B,EAC1B,mCAAmC,EACnC,oBAAoB,EACpB,4BAA4B,EAC5B,2BAA2B,EAC3B,0BAA0B,EAC1B,gCAAgC,EAChC,+BAA+B,GAC/B,MAAM,gCAAgC,CAAC;AACxC,mBAAmB,4CAA4C,CAAC;AAChE,mBAAmB,mCAAmC,CAAC;AACvD,mBAAmB,0BAA0B,CAAC;AAC9C,YAAY,EACX,wBAAwB,EACxB,yBAAyB,EACzB,gCAAgC,EAChC,6BAA6B,GAC7B,MAAM,qCAAqC,CAAC;AAC7C,mBAAmB,+BAA+B,CAAC;AACnD,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,mBAAmB,mCAAmC,CAAC;AACvD,YAAY,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,sBAAsB,EAAE,MAAM,+BAA+B,CAAC;AACnH,mBAAmB,iCAAiC,CAAC;AACrD,mBAAmB,iCAAiC,CAAC;AACrD,mBAAmB,mCAAmC,CAAC;AACvD,mBAAmB,oCAAoC,CAAC;AACxD,mBAAmB,8BAA8B,CAAC;AAClD,mBAAmB,yCAAyC,CAAC;AAC7D,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,YAAY,EAAE,qBAAqB,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,YAAY,EAAE,sBAAsB,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,MAAM,+BAA+B,CAAC;AACnH,mBAAmB,qCAAqC,CAAC;AACzD,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,MAAM,4BAA4B,CAAC;AACtF,YAAY,EACX,6BAA6B,EAC7B,uBAAuB,EACvB,wBAAwB,EACxB,+BAA+B,EAC/B,4BAA4B,GAC5B,MAAM,oCAAoC,CAAC;AAC5C,YAAY,EACX,gCAAgC,EAChC,gCAAgC,EAChC,mBAAmB,EACnB,oBAAoB,EACpB,2BAA2B,EAC3B,qCAAqC,EACrC,kCAAkC,EAClC,yBAAyB,EACzB,uCAAuC,EACvC,0BAA0B,GAC1B,MAAM,gCAAgC,CAAC;AACxC,mBAAmB,qCAAqC,CAAC;AACzD,mBAAmB,0CAA0C,CAAC;AAC9D,mBAAmB,yCAAyC,CAAC;AAC7D,mBAAmB,+CAA+C,CAAC;AACnE,YAAY,EACX,WAAW,EACX,4BAA4B,EAC5B,6BAA6B,EAC7B,oCAAoC,GACpC,MAAM,2CAA2C,CAAC;AAEnD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AAC7D;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,MAAM,CAAC,YAAY,EAAE,eAAe,EAAE,CAgEzE,CAAC;AAoBF,eAAO,MAAM,UAAU,EAAE,MAAM,CAAC,YAAY,EAAE,QAAQ,GAAG,SAAS,CAwDxD,CAAC;AAEX,MAAM,WAAW,WAAW;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,EAAE,EAAE,MAAM,CAAC;CACX;AAED,MAAM,MAAM,aAAa,GACtB;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,IAAI,EAAE,KAAK,CAAC;QACX,KAAK,EAAE,MAAM,CAAC;QACd,KAAK,EAAE,MAAM,CAAC;KACd,CAAC,CAAC;IACH,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,KAAK,CAAC;CACX,GACD;IACA,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC;IAClB,IAAI,EAAE,SAAS,CAAC;CACf,GACD;IACA,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;CACZ,GACD;IACA,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;QACZ,KAAK,EAAE,MAAM,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;KACb,CAAC,CAAC;IACH,IAAI,EAAE,kBAAkB,CAAC;CACxB,CAAC;AAEL,MAAM,WAAW,QAAQ;IACxB,MAAM,EAAE,aAAa,EAAE,CAAC;IACxB,OAAO,EAAE,aAAa,EAAE,CAAC;CACzB;AAED,MAAM,WAAW,QAAQ;IACxB,QAAQ,EAAE,WAAW,EAAE,CAAC;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,EAAE,EAAE,YAAY,CAAC;IACjB,WAAW,CAAC,EAAE,YAAY,CAAC;IAC3B,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;IACd,SAAS,EAAE,eAAe,EAAE,CAAC;IAC7B,OAAO,EAAE,WAAW,EAAE,CAAC;IACvB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,EAAE,CAAC;IACvB,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,MAAM,cAAc,GAAG,IAAI,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,GAAG,WAAW,CAAC,CAAC"}
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../../src/tasks/index.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,iBAAiB,CAAC;AAoDpD,mBAAmB,qCAAqC,CAAC;AACzD,mBAAmB,6CAA6C,CAAC;AACjE,YAAY,EACX,mBAAmB,EACnB,0BAA0B,EAC1B,mCAAmC,EACnC,oBAAoB,EACpB,4BAA4B,EAC5B,2BAA2B,EAC3B,0BAA0B,EAC1B,gCAAgC,EAChC,+BAA+B,GAC/B,MAAM,gCAAgC,CAAC;AACxC,mBAAmB,4CAA4C,CAAC;AAChE,mBAAmB,mCAAmC,CAAC;AACvD,mBAAmB,0BAA0B,CAAC;AAC9C,YAAY,EACX,wBAAwB,EACxB,yBAAyB,EACzB,gCAAgC,EAChC,6BAA6B,GAC7B,MAAM,qCAAqC,CAAC;AAC7C,mBAAmB,+BAA+B,CAAC;AACnD,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,mBAAmB,mCAAmC,CAAC;AACvD,YAAY,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,sBAAsB,EAAE,MAAM,+BAA+B,CAAC;AACnH,YAAY,EACX,qBAAqB,EACrB,sBAAsB,EACtB,0BAA0B,GAC1B,MAAM,oCAAoC,CAAC;AAC5C,YAAY,EACX,qBAAqB,EACrB,sBAAsB,EACtB,0BAA0B,GAC1B,MAAM,oCAAoC,CAAC;AAC5C,mBAAmB,iCAAiC,CAAC;AACrD,mBAAmB,iCAAiC,CAAC;AACrD,mBAAmB,mCAAmC,CAAC;AACvD,mBAAmB,oCAAoC,CAAC;AACxD,mBAAmB,8BAA8B,CAAC;AAClD,mBAAmB,yCAAyC,CAAC;AAC7D,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,qBAAqB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,YAAY,EAAE,qBAAqB,EAAE,iBAAiB,EAAE,gBAAgB,EAAE,MAAM,8BAA8B,CAAC;AAC/G,YAAY,EAAE,sBAAsB,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,MAAM,+BAA+B,CAAC;AACnH,mBAAmB,qCAAqC,CAAC;AACzD,YAAY,EAAE,gBAAgB,EAAE,iBAAiB,EAAE,MAAM,4BAA4B,CAAC;AACtF,YAAY,EACX,6BAA6B,EAC7B,uBAAuB,EACvB,wBAAwB,EACxB,+BAA+B,EAC/B,4BAA4B,GAC5B,MAAM,oCAAoC,CAAC;AAC5C,YAAY,EACX,gCAAgC,EAChC,gCAAgC,EAChC,mBAAmB,EACnB,oBAAoB,EACpB,2BAA2B,EAC3B,qCAAqC,EACrC,kCAAkC,EAClC,yBAAyB,EACzB,uCAAuC,EACvC,0BAA0B,GAC1B,MAAM,gCAAgC,CAAC;AACxC,mBAAmB,qCAAqC,CAAC;AACzD,mBAAmB,0CAA0C,CAAC;AAC9D,mBAAmB,yCAAyC,CAAC;AAC7D,mBAAmB,+CAA+C,CAAC;AACnE,YAAY,EACX,WAAW,EACX,4BAA4B,EAC5B,6BAA6B,EAC7B,oCAAoC,GACpC,MAAM,2CAA2C,CAAC;AAEnD,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,uBAAuB,CAAC;AAC7D;;GAEG;AACH,eAAO,MAAM,qBAAqB,EAAE,MAAM,CAAC,YAAY,EAAE,eAAe,EAAE,CAkEzE,CAAC;AAoBF,eAAO,MAAM,UAAU,EAAE,MAAM,CAAC,YAAY,EAAE,QAAQ,GAAG,SAAS,CA0DxD,CAAC;AAEX,MAAM,WAAW,WAAW;IAC3B,WAAW,EAAE,MAAM,CAAC;IACpB,EAAE,EAAE,MAAM,CAAC;CACX;AAED,MAAM,MAAM,aAAa,GACtB;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,IAAI,EAAE,KAAK,CAAC;QACX,KAAK,EAAE,MAAM,CAAC;QACd,KAAK,EAAE,MAAM,CAAC;KACd,CAAC,CAAC;IACH,IAAI,EAAE,OAAO,CAAC;CACb,GACD;IACA,QAAQ,EAAE,MAAM,CAAC;IACjB,IAAI,EAAE,KAAK,CAAC;CACX,GACD;IACA,KAAK,EAAE,MAAM,EAAE,EAAE,CAAC;IAClB,IAAI,EAAE,SAAS,CAAC;CACf,GACD;IACA,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,MAAM,CAAC;IACd,IAAI,EAAE,MAAM,CAAC;CACZ,GACD;IACA,IAAI,EAAE,MAAM,CAAC;IACb,MAAM,EAAE,KAAK,CAAC;QACb,GAAG,EAAE,MAAM,CAAC;QACZ,KAAK,EAAE,MAAM,CAAC;QACd,IAAI,EAAE,MAAM,CAAC;KACb,CAAC,CAAC;IACH,IAAI,EAAE,kBAAkB,CAAC;CACxB,CAAC;AAEL,MAAM,WAAW,QAAQ;IACxB,MAAM,EAAE,aAAa,EAAE,CAAC;IACxB,OAAO,EAAE,aAAa,EAAE,CAAC;CACzB;AAED,MAAM,WAAW,QAAQ;IACxB,QAAQ,EAAE,WAAW,EAAE,CAAC;IACxB,IAAI,EAAE,QAAQ,CAAC;IACf,EAAE,EAAE,YAAY,CAAC;IACjB,WAAW,CAAC,EAAE,YAAY,CAAC;IAC3B,aAAa,CAAC,EAAE,OAAO,CAAC;IACxB,KAAK,EAAE,MAAM,CAAC;IACd,SAAS,EAAE,eAAe,EAAE,CAAC;IAC7B,OAAO,EAAE,WAAW,EAAE,CAAC;IACvB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,MAAM,EAAE,WAAW,EAAE,CAAC;IACtB,OAAO,EAAE,MAAM,CAAC;IAChB,YAAY,EAAE,MAAM,EAAE,CAAC;IACvB,SAAS,CAAC,EAAE,MAAM,CAAC;CACnB;AAED,MAAM,MAAM,cAAc,GAAG,IAAI,CAAC,QAAQ,EAAE,IAAI,GAAG,OAAO,GAAG,WAAW,CAAC,CAAC"}
@@ -18,39 +18,41 @@ const data_js_10 = __importDefault(require("./image-feature-extraction/data.js")
18
18
  const data_js_11 = __importDefault(require("./image-to-image/data.js"));
19
19
  const data_js_12 = __importDefault(require("./image-to-text/data.js"));
20
20
  const data_js_13 = __importDefault(require("./image-text-to-text/data.js"));
21
- const data_js_14 = __importDefault(require("./image-segmentation/data.js"));
22
- const data_js_15 = __importDefault(require("./image-to-video/data.js"));
23
- const data_js_16 = __importDefault(require("./mask-generation/data.js"));
24
- const data_js_17 = __importDefault(require("./object-detection/data.js"));
25
- const data_js_18 = __importDefault(require("./depth-estimation/data.js"));
26
- const data_js_19 = __importDefault(require("./placeholder/data.js"));
27
- const data_js_20 = __importDefault(require("./reinforcement-learning/data.js"));
28
- const data_js_21 = __importDefault(require("./question-answering/data.js"));
29
- const data_js_22 = __importDefault(require("./sentence-similarity/data.js"));
30
- const data_js_23 = __importDefault(require("./summarization/data.js"));
31
- const data_js_24 = __importDefault(require("./table-question-answering/data.js"));
32
- const data_js_25 = __importDefault(require("./tabular-classification/data.js"));
33
- const data_js_26 = __importDefault(require("./tabular-regression/data.js"));
34
- const data_js_27 = __importDefault(require("./text-to-image/data.js"));
35
- const data_js_28 = __importDefault(require("./text-to-speech/data.js"));
36
- const data_js_29 = __importDefault(require("./token-classification/data.js"));
37
- const data_js_30 = __importDefault(require("./translation/data.js"));
38
- const data_js_31 = __importDefault(require("./text-classification/data.js"));
39
- const data_js_32 = __importDefault(require("./text-generation/data.js"));
40
- const data_js_33 = __importDefault(require("./text-ranking/data.js"));
41
- const data_js_34 = __importDefault(require("./text-to-video/data.js"));
42
- const data_js_35 = __importDefault(require("./unconditional-image-generation/data.js"));
43
- const data_js_36 = __importDefault(require("./video-classification/data.js"));
44
- const data_js_37 = __importDefault(require("./visual-document-retrieval/data.js"));
45
- const data_js_38 = __importDefault(require("./visual-question-answering/data.js"));
46
- const data_js_39 = __importDefault(require("./zero-shot-classification/data.js"));
47
- const data_js_40 = __importDefault(require("./zero-shot-image-classification/data.js"));
48
- const data_js_41 = __importDefault(require("./zero-shot-object-detection/data.js"));
49
- const data_js_42 = __importDefault(require("./image-to-3d/data.js"));
50
- const data_js_43 = __importDefault(require("./text-to-3d/data.js"));
51
- const data_js_44 = __importDefault(require("./keypoint-detection/data.js"));
52
- const data_js_45 = __importDefault(require("./video-text-to-text/data.js"));
53
- const data_js_46 = __importDefault(require("./video-to-video/data.js"));
21
+ const data_js_14 = __importDefault(require("./image-text-to-image/data.js"));
22
+ const data_js_15 = __importDefault(require("./image-text-to-video/data.js"));
23
+ const data_js_16 = __importDefault(require("./image-segmentation/data.js"));
24
+ const data_js_17 = __importDefault(require("./image-to-video/data.js"));
25
+ const data_js_18 = __importDefault(require("./mask-generation/data.js"));
26
+ const data_js_19 = __importDefault(require("./object-detection/data.js"));
27
+ const data_js_20 = __importDefault(require("./depth-estimation/data.js"));
28
+ const data_js_21 = __importDefault(require("./placeholder/data.js"));
29
+ const data_js_22 = __importDefault(require("./reinforcement-learning/data.js"));
30
+ const data_js_23 = __importDefault(require("./question-answering/data.js"));
31
+ const data_js_24 = __importDefault(require("./sentence-similarity/data.js"));
32
+ const data_js_25 = __importDefault(require("./summarization/data.js"));
33
+ const data_js_26 = __importDefault(require("./table-question-answering/data.js"));
34
+ const data_js_27 = __importDefault(require("./tabular-classification/data.js"));
35
+ const data_js_28 = __importDefault(require("./tabular-regression/data.js"));
36
+ const data_js_29 = __importDefault(require("./text-to-image/data.js"));
37
+ const data_js_30 = __importDefault(require("./text-to-speech/data.js"));
38
+ const data_js_31 = __importDefault(require("./token-classification/data.js"));
39
+ const data_js_32 = __importDefault(require("./translation/data.js"));
40
+ const data_js_33 = __importDefault(require("./text-classification/data.js"));
41
+ const data_js_34 = __importDefault(require("./text-generation/data.js"));
42
+ const data_js_35 = __importDefault(require("./text-ranking/data.js"));
43
+ const data_js_36 = __importDefault(require("./text-to-video/data.js"));
44
+ const data_js_37 = __importDefault(require("./unconditional-image-generation/data.js"));
45
+ const data_js_38 = __importDefault(require("./video-classification/data.js"));
46
+ const data_js_39 = __importDefault(require("./visual-document-retrieval/data.js"));
47
+ const data_js_40 = __importDefault(require("./visual-question-answering/data.js"));
48
+ const data_js_41 = __importDefault(require("./zero-shot-classification/data.js"));
49
+ const data_js_42 = __importDefault(require("./zero-shot-image-classification/data.js"));
50
+ const data_js_43 = __importDefault(require("./zero-shot-object-detection/data.js"));
51
+ const data_js_44 = __importDefault(require("./image-to-3d/data.js"));
52
+ const data_js_45 = __importDefault(require("./text-to-3d/data.js"));
53
+ const data_js_46 = __importDefault(require("./keypoint-detection/data.js"));
54
+ const data_js_47 = __importDefault(require("./video-text-to-text/data.js"));
55
+ const data_js_48 = __importDefault(require("./video-to-video/data.js"));
54
56
  /**
55
57
  * Model libraries compatible with each ML task
56
58
  */
@@ -68,6 +70,8 @@ exports.TASKS_MODEL_LIBRARIES = {
68
70
  "image-feature-extraction": ["timm", "transformers"],
69
71
  "image-segmentation": ["transformers", "transformers.js"],
70
72
  "image-text-to-text": ["transformers"],
73
+ "image-text-to-image": ["diffusers"],
74
+ "image-text-to-video": ["diffusers"],
71
75
  "image-to-image": ["diffusers", "transformers", "transformers.js"],
72
76
  "image-to-text": ["transformers", "transformers.js"],
73
77
  "image-to-video": ["diffusers"],
@@ -124,7 +128,7 @@ exports.TASKS_MODEL_LIBRARIES = {
124
128
  * If the partialTaskData argument is left undefined,
125
129
  * the default placeholder data will be used.
126
130
  */
127
- function getData(type, partialTaskData = data_js_19.default) {
131
+ function getData(type, partialTaskData = data_js_21.default) {
128
132
  return {
129
133
  ...partialTaskData,
130
134
  id: type,
@@ -142,54 +146,56 @@ exports.TASKS_DATA = {
142
146
  "audio-to-audio": getData("audio-to-audio", data_js_4.default),
143
147
  "audio-text-to-text": getData("audio-text-to-text", data_js_3.default),
144
148
  "automatic-speech-recognition": getData("automatic-speech-recognition", data_js_5.default),
145
- "depth-estimation": getData("depth-estimation", data_js_18.default),
149
+ "depth-estimation": getData("depth-estimation", data_js_20.default),
146
150
  "document-question-answering": getData("document-question-answering", data_js_6.default),
147
- "visual-document-retrieval": getData("visual-document-retrieval", data_js_37.default),
151
+ "visual-document-retrieval": getData("visual-document-retrieval", data_js_39.default),
148
152
  "feature-extraction": getData("feature-extraction", data_js_7.default),
149
153
  "fill-mask": getData("fill-mask", data_js_8.default),
150
154
  "graph-ml": undefined,
151
155
  "image-classification": getData("image-classification", data_js_9.default),
152
156
  "image-feature-extraction": getData("image-feature-extraction", data_js_10.default),
153
- "image-segmentation": getData("image-segmentation", data_js_14.default),
157
+ "image-segmentation": getData("image-segmentation", data_js_16.default),
154
158
  "image-to-image": getData("image-to-image", data_js_11.default),
155
159
  "image-text-to-text": getData("image-text-to-text", data_js_13.default),
160
+ "image-text-to-image": getData("image-text-to-image", data_js_14.default),
161
+ "image-text-to-video": getData("image-text-to-video", data_js_15.default),
156
162
  "image-to-text": getData("image-to-text", data_js_12.default),
157
- "image-to-video": getData("image-to-video", data_js_15.default),
158
- "keypoint-detection": getData("keypoint-detection", data_js_44.default),
159
- "mask-generation": getData("mask-generation", data_js_16.default),
163
+ "image-to-video": getData("image-to-video", data_js_17.default),
164
+ "keypoint-detection": getData("keypoint-detection", data_js_46.default),
165
+ "mask-generation": getData("mask-generation", data_js_18.default),
160
166
  "multiple-choice": undefined,
161
- "object-detection": getData("object-detection", data_js_17.default),
162
- "video-classification": getData("video-classification", data_js_36.default),
167
+ "object-detection": getData("object-detection", data_js_19.default),
168
+ "video-classification": getData("video-classification", data_js_38.default),
163
169
  other: undefined,
164
- "question-answering": getData("question-answering", data_js_21.default),
165
- "reinforcement-learning": getData("reinforcement-learning", data_js_20.default),
170
+ "question-answering": getData("question-answering", data_js_23.default),
171
+ "reinforcement-learning": getData("reinforcement-learning", data_js_22.default),
166
172
  robotics: undefined,
167
- "sentence-similarity": getData("sentence-similarity", data_js_22.default),
168
- summarization: getData("summarization", data_js_23.default),
169
- "table-question-answering": getData("table-question-answering", data_js_24.default),
173
+ "sentence-similarity": getData("sentence-similarity", data_js_24.default),
174
+ summarization: getData("summarization", data_js_25.default),
175
+ "table-question-answering": getData("table-question-answering", data_js_26.default),
170
176
  "table-to-text": undefined,
171
- "tabular-classification": getData("tabular-classification", data_js_25.default),
172
- "tabular-regression": getData("tabular-regression", data_js_26.default),
177
+ "tabular-classification": getData("tabular-classification", data_js_27.default),
178
+ "tabular-regression": getData("tabular-regression", data_js_28.default),
173
179
  "tabular-to-text": undefined,
174
- "text-classification": getData("text-classification", data_js_31.default),
175
- "text-generation": getData("text-generation", data_js_32.default),
176
- "text-ranking": getData("text-ranking", data_js_33.default),
180
+ "text-classification": getData("text-classification", data_js_33.default),
181
+ "text-generation": getData("text-generation", data_js_34.default),
182
+ "text-ranking": getData("text-ranking", data_js_35.default),
177
183
  "text-retrieval": undefined,
178
- "text-to-image": getData("text-to-image", data_js_27.default),
179
- "text-to-speech": getData("text-to-speech", data_js_28.default),
184
+ "text-to-image": getData("text-to-image", data_js_29.default),
185
+ "text-to-speech": getData("text-to-speech", data_js_30.default),
180
186
  "text-to-audio": undefined,
181
- "text-to-video": getData("text-to-video", data_js_34.default),
187
+ "text-to-video": getData("text-to-video", data_js_36.default),
182
188
  "time-series-forecasting": undefined,
183
- "token-classification": getData("token-classification", data_js_29.default),
184
- translation: getData("translation", data_js_30.default),
185
- "unconditional-image-generation": getData("unconditional-image-generation", data_js_35.default),
186
- "video-text-to-text": getData("video-text-to-text", data_js_45.default),
187
- "video-to-video": getData("video-to-video", data_js_46.default),
188
- "visual-question-answering": getData("visual-question-answering", data_js_38.default),
189
+ "token-classification": getData("token-classification", data_js_31.default),
190
+ translation: getData("translation", data_js_32.default),
191
+ "unconditional-image-generation": getData("unconditional-image-generation", data_js_37.default),
192
+ "video-text-to-text": getData("video-text-to-text", data_js_47.default),
193
+ "video-to-video": getData("video-to-video", data_js_48.default),
194
+ "visual-question-answering": getData("visual-question-answering", data_js_40.default),
189
195
  "voice-activity-detection": undefined,
190
- "zero-shot-classification": getData("zero-shot-classification", data_js_39.default),
191
- "zero-shot-image-classification": getData("zero-shot-image-classification", data_js_40.default),
192
- "zero-shot-object-detection": getData("zero-shot-object-detection", data_js_41.default),
193
- "text-to-3d": getData("text-to-3d", data_js_43.default),
194
- "image-to-3d": getData("image-to-3d", data_js_42.default),
196
+ "zero-shot-classification": getData("zero-shot-classification", data_js_41.default),
197
+ "zero-shot-image-classification": getData("zero-shot-image-classification", data_js_42.default),
198
+ "zero-shot-object-detection": getData("zero-shot-object-detection", data_js_43.default),
199
+ "text-to-3d": getData("text-to-3d", data_js_45.default),
200
+ "image-to-3d": getData("image-to-3d", data_js_44.default),
195
201
  };
@@ -82,6 +82,12 @@ export declare const DATASET_LIBRARIES_UI_ELEMENTS: {
82
82
  repoUrl: string;
83
83
  docsUrl: string;
84
84
  };
85
+ datadesigner: {
86
+ prettyLabel: string;
87
+ repoName: string;
88
+ repoUrl: string;
89
+ docsUrl: string;
90
+ };
85
91
  };
86
92
  export type DatasetLibraryKey = keyof typeof DATASET_LIBRARIES_UI_ELEMENTS;
87
93
  //# sourceMappingURL=dataset-libraries.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"dataset-libraries.d.ts","sourceRoot":"","sources":["../../src/dataset-libraries.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACvC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,eAAO,MAAM,6BAA6B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA6DS,CAAC;AAGpD,MAAM,MAAM,iBAAiB,GAAG,MAAM,OAAO,6BAA6B,CAAC"}
1
+ {"version":3,"file":"dataset-libraries.d.ts","sourceRoot":"","sources":["../../src/dataset-libraries.ts"],"names":[],"mappings":"AAAA;;GAEG;AACH,MAAM,WAAW,uBAAuB;IACvC;;;;OAIG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,QAAQ,EAAE,MAAM,CAAC;IACjB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,OAAO,CAAC,EAAE,MAAM,CAAC;CACjB;AAED,eAAO,MAAM,6BAA6B;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAmES,CAAC;AAGpD,MAAM,MAAM,iBAAiB,GAAG,MAAM,OAAO,6BAA6B,CAAC"}
@@ -59,4 +59,10 @@ export const DATASET_LIBRARIES_UI_ELEMENTS = {
59
59
  repoUrl: "https://github.com/duckdb/duckdb",
60
60
  docsUrl: "https://huggingface.co/docs/hub/datasets-duckdb",
61
61
  },
62
+ datadesigner: {
63
+ prettyLabel: "NeMo Data Designer",
64
+ repoName: "datadesigner",
65
+ repoUrl: "https://github.com/NVIDIA-NeMo/DataDesigner",
66
+ docsUrl: "https://nvidia-nemo.github.io/DataDesigner/",
67
+ },
62
68
  };
@@ -60,6 +60,10 @@ export declare const SKUS: {
60
60
  tflops: number;
61
61
  memory: number[];
62
62
  };
63
+ GB10: {
64
+ tflops: number;
65
+ memory: number[];
66
+ };
63
67
  "RTX PRO 6000 WS": {
64
68
  tflops: number;
65
69
  memory: number[];
@@ -1 +1 @@
1
- {"version":3,"file":"hardware.d.ts","sourceRoot":"","sources":["../../src/hardware.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,eAAO,MAAM,iDAAiD,QAAW,CAAC;AAC1E,eAAO,MAAM,yDAAyD,QAAW,CAAC;AAClF,eAAO,MAAM,oCAAoC,QAAU,CAAC;AAE5D;;;GAGG;AACH,eAAO,MAAM,+CAA+C,QAAW,CAAC;AAExE,MAAM,WAAW,YAAY;IAC5B;;;;;;;;;OASG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB;AAED,eAAO,MAAM,sBAAsB,UAElC,CAAC;AAEF,eAAO,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CAytBuD,CAAC;AAEzE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,IAAI,CAAC"}
1
+ {"version":3,"file":"hardware.d.ts","sourceRoot":"","sources":["../../src/hardware.ts"],"names":[],"mappings":"AAAA;;;GAGG;AACH,eAAO,MAAM,iDAAiD,QAAW,CAAC;AAC1E,eAAO,MAAM,yDAAyD,QAAW,CAAC;AAClF,eAAO,MAAM,oCAAoC,QAAU,CAAC;AAE5D;;;GAGG;AACH,eAAO,MAAM,+CAA+C,QAAW,CAAC;AAExE,MAAM,WAAW,YAAY;IAC5B;;;;;;;;;OASG;IACH,MAAM,EAAE,MAAM,CAAC;IACf;;;OAGG;IACH,MAAM,CAAC,EAAE,MAAM,EAAE,CAAC;CAClB;AAED,eAAO,MAAM,sBAAsB,UAElC,CAAC;AAEF,eAAO,MAAM,IAAI;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;CA6tBuD,CAAC;AAEzE,MAAM,MAAM,OAAO,GAAG,MAAM,OAAO,IAAI,CAAC"}
@@ -44,6 +44,10 @@ export const SKUS = {
44
44
  tflops: 30.29,
45
45
  memory: [24],
46
46
  },
47
+ GB10: {
48
+ tflops: 29.71,
49
+ memory: [128],
50
+ },
47
51
  "RTX PRO 6000 WS": {
48
52
  tflops: 126,
49
53
  memory: [96],
@@ -286,7 +290,7 @@ export const SKUS = {
286
290
  },
287
291
  "RTX 3050 Mobile": {
288
292
  tflops: 7.639,
289
- memory: [6],
293
+ memory: [4, 6],
290
294
  },
291
295
  "RTX 2060": {
292
296
  tflops: 12.9,
@@ -182,13 +182,6 @@ export declare const LOCAL_APPS: {
182
182
  displayOnModelPage: (model: ModelData) => boolean;
183
183
  deeplink: (model: ModelData) => URL;
184
184
  };
185
- invoke: {
186
- prettyLabel: string;
187
- docsUrl: string;
188
- mainTask: "text-to-image";
189
- displayOnModelPage: (model: ModelData) => boolean;
190
- deeplink: (model: ModelData) => URL;
191
- };
192
185
  ollama: {
193
186
  prettyLabel: string;
194
187
  docsUrl: string;
@@ -1 +1 @@
1
- {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAKnD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;;OAIG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAqBF,iBAAS,UAAU,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAE7C;AAED,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAiRD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBAvQS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;yBAsCzC,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCAoPzD,SAAS;yBA3MX,SAAS,KAAG,eAAe,EAAE;;;;;;;yBA4F5B,SAAS,KAAG,eAAe,EAAE;;;;;;;yBA7B/B,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAtFzB,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yBAJjD,SAAS,aAAa,MAAM,KAAG,MAAM;;;;;;;yBA0J1B,SAAS,aAAa,MAAM,KAAG,MAAM;;;;;;;yBAI9C,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;CAkO5C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
1
+ {"version":3,"file":"local-apps.d.ts","sourceRoot":"","sources":["../../src/local-apps.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AACjD,OAAO,KAAK,EAAE,YAAY,EAAE,MAAM,gBAAgB,CAAC;AAKnD,MAAM,WAAW,eAAe;IAC/B;;OAEG;IACH,KAAK,EAAE,MAAM,CAAC;IACd;;OAEG;IACH,KAAK,CAAC,EAAE,MAAM,CAAC;IACf;;OAEG;IACH,OAAO,EAAE,MAAM,GAAG,MAAM,EAAE,CAAC;CAC3B;AAED;;GAEG;AACH,MAAM,MAAM,QAAQ,GAAG;IACtB;;OAEG;IACH,WAAW,EAAE,MAAM,CAAC;IACpB;;OAEG;IACH,OAAO,EAAE,MAAM,CAAC;IAChB;;OAEG;IACH,QAAQ,EAAE,YAAY,CAAC;IACvB;;OAEG;IACH,SAAS,CAAC,EAAE,OAAO,CAAC;IAEpB,UAAU,CAAC,EAAE,OAAO,CAAC;IACrB;;OAEG;IACH,kBAAkB,EAAE,CAAC,KAAK,EAAE,SAAS,KAAK,OAAO,CAAC;CAClD,GAAG,CACD;IACA;;OAEG;IACH,QAAQ,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,GAAG,CAAC;CACtD,GACD;IACA;;;;OAIG;IACH,OAAO,EAAE,CAAC,KAAK,EAAE,SAAS,EAAE,QAAQ,CAAC,EAAE,MAAM,KAAK,MAAM,GAAG,MAAM,EAAE,GAAG,eAAe,GAAG,eAAe,EAAE,CAAC;CACzG,CACH,CAAC;AAqBF,iBAAS,UAAU,CAAC,KAAK,EAAE,SAAS,GAAG,OAAO,CAE7C;AAED,iBAAS,mBAAmB,CAAC,KAAK,EAAE,SAAS,WAE5C;AAiRD;;;;;;;;;;GAUG;AACH,eAAO,MAAM,UAAU;;;;;;yBAvQS,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;yBAsCzC,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;oCAoPzD,SAAS;yBA3MX,SAAS,KAAG,eAAe,EAAE;;;;;;;yBA4F5B,SAAS,KAAG,eAAe,EAAE;;;;;;;yBA7B/B,SAAS,KAAG,eAAe,EAAE;;;;;;;;;;;;;;yBAtFzB,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;yBAJjD,SAAS,aAAa,MAAM,KAAG,MAAM;;;;;;;yBA0J1B,SAAS,aAAa,MAAM,KAAG,MAAM;;;;;;;yBAI9C,SAAS,aAAa,MAAM,KAAG,eAAe,EAAE;;CA2N5C,CAAC;AAErC,MAAM,MAAM,WAAW,GAAG,MAAM,OAAO,UAAU,CAAC"}
@@ -427,13 +427,6 @@ export const LOCAL_APPS = {
427
427
  displayOnModelPage: (model) => model.tags.includes("coreml") && model.tags.includes("joyfusion") && model.pipeline_tag === "text-to-image",
428
428
  deeplink: (model) => new URL(`https://joyfusion.app/import_from_hf?repo_id=${model.id}`),
429
429
  },
430
- invoke: {
431
- prettyLabel: "Invoke",
432
- docsUrl: "https://github.com/invoke-ai/InvokeAI",
433
- mainTask: "text-to-image",
434
- displayOnModelPage: (model) => model.library_name === "diffusers" && model.pipeline_tag === "text-to-image",
435
- deeplink: (model) => new URL(`https://models.invoke.ai/huggingface/${model.id}`),
436
- },
437
430
  ollama: {
438
431
  prettyLabel: "Ollama",
439
432
  docsUrl: "https://ollama.com",
@@ -15,6 +15,7 @@ export declare const depth_anything_v2: (model: ModelData) => string[];
15
15
  export declare const depth_pro: (model: ModelData) => string[];
16
16
  export declare const derm_foundation: () => string[];
17
17
  export declare const dia: (model: ModelData) => string[];
18
+ export declare const dia2: (model: ModelData) => string[];
18
19
  export declare const describe_anything: (model: ModelData) => string[];
19
20
  export declare const diffusers: (model: ModelData) => string[];
20
21
  export declare const diffusionkit: (model: ModelData) => string[];